This is needed so that FILL_RETURN_BUFFER has access to the percpu area via the GS segment base.
Cc: stable@vger.kernel.org Fixes: f14eec0a3203 ("KVM: SVM: move more vmentry code to assembly") Reported-by: Nathan Chancellor nathan@kernel.org Analyzed-by: Andrew Cooper andrew.cooper3@citrix.com Signed-off-by: Paolo Bonzini pbonzini@redhat.com --- arch/x86/kvm/svm/svm.c | 3 +-- arch/x86/kvm/svm/svm.h | 2 +- arch/x86/kvm/svm/svm_ops.h | 5 ----- arch/x86/kvm/svm/vmenter.S | 13 +++++++++++++ 4 files changed, 15 insertions(+), 8 deletions(-)
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index 550a364be8d3..381c7dcffe25 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -3923,8 +3923,7 @@ static noinstr void svm_vcpu_enter_exit(struct kvm_vcpu *vcpu) } else { struct svm_cpu_data *sd = per_cpu(svm_data, vcpu->cpu);
- __svm_vcpu_run(svm); - vmload(__sme_page_pa(sd->save_area)); + __svm_vcpu_run(svm, __sme_page_pa(sd->save_area)); }
guest_state_exit_irqoff(); diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h index c5b8ec370108..99410651f2a5 100644 --- a/arch/x86/kvm/svm/svm.h +++ b/arch/x86/kvm/svm/svm.h @@ -484,6 +484,6 @@ void sev_es_unmap_ghcb(struct vcpu_svm *svm); /* vmenter.S */
void __svm_sev_es_vcpu_run(struct vcpu_svm *svm); -void __svm_vcpu_run(struct vcpu_svm *svm); +void __svm_vcpu_run(struct vcpu_svm *svm, unsigned long hsave_pa);
#endif diff --git a/arch/x86/kvm/svm/svm_ops.h b/arch/x86/kvm/svm/svm_ops.h index 9430d6437c9f..36c8af87a707 100644 --- a/arch/x86/kvm/svm/svm_ops.h +++ b/arch/x86/kvm/svm/svm_ops.h @@ -61,9 +61,4 @@ static __always_inline void vmsave(unsigned long pa) svm_asm1(vmsave, "a" (pa), "memory"); }
-static __always_inline void vmload(unsigned long pa) -{ - svm_asm1(vmload, "a" (pa), "memory"); -} - #endif /* __KVM_X86_SVM_OPS_H */ diff --git a/arch/x86/kvm/svm/vmenter.S b/arch/x86/kvm/svm/vmenter.S index 9738ce41fac9..45a4bd002494 100644 --- a/arch/x86/kvm/svm/vmenter.S +++ b/arch/x86/kvm/svm/vmenter.S @@ -35,6 +35,7 @@ /** * __svm_vcpu_run - Run a vCPU via a transition to SVM guest mode * @svm: struct vcpu_svm * + * @hsave_pa: unsigned long */ SYM_FUNC_START(__svm_vcpu_run) push %_ASM_BP @@ -49,6 +50,9 @@ SYM_FUNC_START(__svm_vcpu_run) #endif push %_ASM_BX
+ /* @hsave_pa is needed last after vmexit, save it first. */ + push %_ASM_ARG2 + /* Save @svm. */ push %_ASM_ARG1
@@ -124,6 +128,11 @@ SYM_FUNC_START(__svm_vcpu_run) 5: vmsave %_ASM_AX 6:
+ /* Pop @hsave_pa and restore GSBASE, allowing access to percpu data. */ + pop %_ASM_AX +7: vmload %_ASM_AX +8: + #ifdef CONFIG_RETPOLINE /* IMPORTANT: Stuff the RSB immediately after VM-Exit, before RET! */ FILL_RETURN_BUFFER %_ASM_AX, RSB_CLEAR_LOOPS, X86_FEATURE_RETPOLINE @@ -187,10 +196,14 @@ SYM_FUNC_START(__svm_vcpu_run) 50: cmpb $0, kvm_rebooting jne 6b ud2 +70: cmpb $0, kvm_rebooting + jne 8b + ud2
_ASM_EXTABLE(1b, 10b) _ASM_EXTABLE(3b, 30b) _ASM_EXTABLE(5b, 50b) + _ASM_EXTABLE(7b, 70b)
SYM_FUNC_END(__svm_vcpu_run)
linux-stable-mirror@lists.linaro.org