From: Fred Griffoul fgriffo@amazon.co.uk
Implement state validation for nested virtualization to enable pfncache support for L1 guest pages.
This adds a new nested_ops callback 'is_nested_state_invalid()' that detects when KVM needs to reload nested virtualization state. A KVM_REQ_GET_NESTED_STATE_PAGES request is triggered to reload affected pages before L2 execution when it detects invalid state. The callback monitors L1 guest pages during guest entry/exit while the vCPU runs in IN_GUEST_MODE.
Currently, VMX implementations return false, with full support planned for the next patch.
Signed-off-by: Fred Griffoul fgriffo@amazon.co.uk --- arch/x86/include/asm/kvm_host.h | 1 + arch/x86/kvm/vmx/nested.c | 6 ++++++ arch/x86/kvm/x86.c | 14 +++++++++++++- 3 files changed, 20 insertions(+), 1 deletion(-)
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 5a3bfa293e8b..c9a1a43fbfde 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -1956,6 +1956,7 @@ struct kvm_x86_nested_ops { struct kvm_nested_state __user *user_kvm_nested_state, struct kvm_nested_state *kvm_state); bool (*get_nested_state_pages)(struct kvm_vcpu *vcpu); + bool (*is_nested_state_invalid)(struct kvm_vcpu *vcpu); int (*write_log_dirty)(struct kvm_vcpu *vcpu, gpa_t l2_gpa);
int (*enable_evmcs)(struct kvm_vcpu *vcpu, diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c index f05828aca7e5..a97d02b08ab8 100644 --- a/arch/x86/kvm/vmx/nested.c +++ b/arch/x86/kvm/vmx/nested.c @@ -3541,6 +3541,11 @@ static bool vmx_get_nested_state_pages(struct kvm_vcpu *vcpu) return true; }
+static bool vmx_is_nested_state_invalid(struct kvm_vcpu *vcpu) +{ + return false; +} + static int nested_vmx_write_pml_buffer(struct kvm_vcpu *vcpu, gpa_t gpa) { struct vmcs12 *vmcs12; @@ -7485,6 +7490,7 @@ struct kvm_x86_nested_ops vmx_nested_ops = { .get_state = vmx_get_nested_state, .set_state = vmx_set_nested_state, .get_nested_state_pages = vmx_get_nested_state_pages, + .is_nested_state_invalid = vmx_is_nested_state_invalid, .write_log_dirty = nested_vmx_write_pml_buffer, #ifdef CONFIG_KVM_HYPERV .enable_evmcs = nested_enable_evmcs, diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index ff8812f3a129..d830770363ab 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -2257,12 +2257,24 @@ int kvm_emulate_monitor(struct kvm_vcpu *vcpu) } EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_emulate_monitor);
+static inline bool kvm_invalid_nested_state(struct kvm_vcpu *vcpu) +{ + if (is_guest_mode(vcpu) && + kvm_x86_ops.nested_ops->is_nested_state_invalid && + kvm_x86_ops.nested_ops->is_nested_state_invalid(vcpu)) { + kvm_make_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu); + return true; + } + return false; +} + static inline bool kvm_vcpu_exit_request(struct kvm_vcpu *vcpu) { xfer_to_guest_mode_prepare();
return READ_ONCE(vcpu->mode) == EXITING_GUEST_MODE || - kvm_request_pending(vcpu) || xfer_to_guest_mode_work_pending(); + kvm_request_pending(vcpu) || xfer_to_guest_mode_work_pending() || + kvm_invalid_nested_state(vcpu); }
static fastpath_t __handle_fastpath_wrmsr(struct kvm_vcpu *vcpu, u32 msr, u64 data)