On Wed, Nov 09, 2022, Paolo Bonzini wrote:
Allow access to the percpu area via the GS segment base, which is needed in order to access the saved host spec_ctrl value. In linux-next FILL_RETURN_BUFFER also needs to access percpu data.
For simplicity, the physical address of the save area is added to struct svm_cpu_data.
Cc: stable@vger.kernel.org Fixes: a149180fbcf3 ("x86: Add magic AMD return-thunk") Reported-by: Nathan Chancellor nathan@kernel.org Analyzed-by: Andrew Cooper andrew.cooper3@citrix.com Tested-by: Nathan Chancellor nathan@kernel.org Signed-off-by: Paolo Bonzini pbonzini@redhat.com
Reviewed-by: Sean Christopherson seanjc@google.com
diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h index 2af6a71126c1..83955a4e520e 100644 --- a/arch/x86/kvm/svm/svm.h +++ b/arch/x86/kvm/svm/svm.h @@ -287,6 +287,8 @@ struct svm_cpu_data { struct kvm_ldttss_desc *tss_desc; struct page *save_area;
- unsigned long save_area_pa;
I really dislike storing both the page and the address, but that's more about storing the "struct page" instead of the virtual address, and that can be cleaned up in a follow-up series.
Specifically, the ugly pointer arithmetic in svm_prepare_switch_to_guest() can be avoided by updating "struct vmcb" to capture SEV-ES+, and by tracking the save area as a VMCB (which it is).
E.g. as a very partial patch
diff --git a/arch/x86/include/asm/svm.h b/arch/x86/include/asm/svm.h index 0361626841bc..64ba98d32689 100644 --- a/arch/x86/include/asm/svm.h +++ b/arch/x86/include/asm/svm.h @@ -513,7 +513,10 @@ static inline void __unused_size_checks(void)
struct vmcb { struct vmcb_control_area control; - struct vmcb_save_area save; + union { + struct sev_es_save_area sev_es_save; + struct vmcb_save_area save; + } } __packed;
#define SVM_CPUID_FUNC 0x8000000a diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index 9f88c8e6766e..b23b7633033b 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -1462,12 +1462,8 @@ static void svm_prepare_switch_to_guest(struct kvm_vcpu *vcpu) * or subsequent vmload of host save area. */ vmsave(sd->save_area_pa); - if (sev_es_guest(vcpu->kvm)) { - struct sev_es_save_area *hostsa; - hostsa = (struct sev_es_save_area *)(page_address(sd->save_area) + 0x400); - - sev_es_prepare_switch_to_guest(hostsa); - } + if (sev_es_guest(vcpu->kvm)) + sev_es_prepare_switch_to_guest(sd->save_area->sev_es_save);
if (tsc_scaling) __svm_write_tsc_multiplier(vcpu->arch.tsc_scaling_ratio); diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h index 199a2ecef1ce..802ed393d860 100644 --- a/arch/x86/kvm/svm/svm.h +++ b/arch/x86/kvm/svm/svm.h @@ -286,7 +286,7 @@ struct svm_cpu_data { u32 min_asid; struct kvm_ldttss_desc *tss_desc;
- struct page *save_area; + struct vmcb *save_area; unsigned long save_area_pa;
struct vmcb *current_vmcb;