Two tiny patches for the IBRS code. They should go in through the x86/pti tree and should apply to both 4.9 and 4.14 trees.
Thanks,
Paolo
v1->v2: remove patch 2, the same bug has already been fixed
Paolo Bonzini (3): KVM: x86: use native MSR ops for SPEC_CTRL KVM: VMX: mark RDMSR path as unlikely
arch/x86/kvm/svm.c | 9 +++++---- arch/x86/kvm/vmx.c | 9 +++++---- 2 files changed, 10 insertions(+), 8 deletions(-)
Having a paravirt indirect call in the IBRS restore path is not a good idea, since we are trying to protect from speculative execution of bogus indirect branch targets. It is also slower, so use native_wrmsrl on the vmentry path too.
Fixes: d28b387fb74da95d69d2615732f50cceb38e9a4d Cc: x86@kernel.org Cc: Radim Krčmář rkrcmar@redhat.com Cc: KarimAllah Ahmed karahmed@amazon.de Cc: David Woodhouse dwmw@amazon.co.uk Cc: Jim Mattson jmattson@google.com Cc: Thomas Gleixner tglx@linutronix.de Cc: Ingo Molnar mingo@kernel.org Cc: stable@vger.kernel.org Reviewed-by: Jim Mattson jmattson@google.com Signed-off-by: Paolo Bonzini pbonzini@redhat.com --- arch/x86/kvm/svm.c | 7 ++++--- arch/x86/kvm/vmx.c | 7 ++++--- 2 files changed, 8 insertions(+), 6 deletions(-)
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index b3e488a74828..1598beeda11c 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -49,6 +49,7 @@ #include <asm/debugreg.h> #include <asm/kvm_para.h> #include <asm/irq_remapping.h> +#include <asm/microcode.h> #include <asm/nospec-branch.h>
#include <asm/virtext.h> @@ -5355,7 +5356,7 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu) * being speculatively taken. */ if (svm->spec_ctrl) - wrmsrl(MSR_IA32_SPEC_CTRL, svm->spec_ctrl); + native_wrmsrl(MSR_IA32_SPEC_CTRL, svm->spec_ctrl);
asm volatile ( "push %%" _ASM_BP "; \n\t" @@ -5465,10 +5466,10 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu) * save it. */ if (!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL)) - rdmsrl(MSR_IA32_SPEC_CTRL, svm->spec_ctrl); + svm->spec_ctrl = native_read_msr(MSR_IA32_SPEC_CTRL);
if (svm->spec_ctrl) - wrmsrl(MSR_IA32_SPEC_CTRL, 0); + native_wrmsrl(MSR_IA32_SPEC_CTRL, 0);
/* Eliminate branch target predictions from guest mode */ vmexit_fill_RSB(); diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 67b028d8e726..5caeb8dc5bda 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -51,6 +51,7 @@ #include <asm/apic.h> #include <asm/irq_remapping.h> #include <asm/mmu_context.h> +#include <asm/microcode.h> #include <asm/nospec-branch.h>
#include "trace.h" @@ -9453,7 +9454,7 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu) * being speculatively taken. */ if (vmx->spec_ctrl) - wrmsrl(MSR_IA32_SPEC_CTRL, vmx->spec_ctrl); + native_wrmsrl(MSR_IA32_SPEC_CTRL, vmx->spec_ctrl);
vmx->__launched = vmx->loaded_vmcs->launched; asm( @@ -9589,10 +9590,10 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu) * save it. */ if (!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL)) - rdmsrl(MSR_IA32_SPEC_CTRL, vmx->spec_ctrl); + vmx->spec_ctrl = native_read_msr(MSR_IA32_SPEC_CTRL);
if (vmx->spec_ctrl) - wrmsrl(MSR_IA32_SPEC_CTRL, 0); + native_wrmsrl(MSR_IA32_SPEC_CTRL, 0);
/* Eliminate branch target predictions from guest mode */ vmexit_fill_RSB();
vmx_vcpu_run and svm_vcpu_run are large functions, and this can actually make a substantial cycle difference by keeping the fast path contiguous in memory. Without it, the retpoline guest/retpoline host case is about 50 cycles slower.
Cc: x86@kernel.org Cc: Radim Krčmář rkrcmar@redhat.com Cc: KarimAllah Ahmed karahmed@amazon.de Cc: David Woodhouse dwmw@amazon.co.uk Cc: Jim Mattson jmattson@google.com Cc: Thomas Gleixner tglx@linutronix.de Cc: Ingo Molnar mingo@kernel.org Cc: stable@vger.kernel.org Reviewed-by: Jim Mattson jmattson@google.com Signed-off-by: Paolo Bonzini pbonzini@redhat.com --- arch/x86/kvm/svm.c | 2 +- arch/x86/kvm/vmx.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-)
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index 1598beeda11c..24c9521ebc24 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -5465,7 +5465,7 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu) * If the L02 MSR bitmap does not intercept the MSR, then we need to * save it. */ - if (!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL)) + if (unlikely(!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL))) svm->spec_ctrl = native_read_msr(MSR_IA32_SPEC_CTRL);
if (svm->spec_ctrl) diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index af89d377681d..e13fd2a833c4 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -9589,7 +9589,7 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu) * If the L02 MSR bitmap does not intercept the MSR, then we need to * save it. */ - if (!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL)) + if (unlikely(!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL))) vmx->spec_ctrl = native_read_msr(MSR_IA32_SPEC_CTRL);
if (vmx->spec_ctrl)
* Paolo Bonzini pbonzini@redhat.com wrote:
Two tiny patches for the IBRS code. They should go in through the x86/pti tree and should apply to both 4.9 and 4.14 trees.
Thanks,
Paolo
v1->v2: remove patch 2, the same bug has already been fixed
Paolo Bonzini (3): KVM: x86: use native MSR ops for SPEC_CTRL KVM: VMX: mark RDMSR path as unlikely
arch/x86/kvm/svm.c | 9 +++++---- arch/x86/kvm/vmx.c | 9 +++++---- 2 files changed, 10 insertions(+), 8 deletions(-)
Applied to tip:x86/pti, with minor tweaks to the titles/changelogs.
If all goes fine in testing I will send all pending tip:x86/pti changes to Linus later today, so the KVM development tree should be able to pull in these changes via upstream pretty soon.
Thanks,
Ingo
linux-stable-mirror@lists.linaro.org