On Mon, Jan 25, 2021, Paolo Bonzini wrote:
+static bool nested_get_vmcs12_pages(struct kvm_vcpu *vcpu) +{
- struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
- struct vcpu_vmx *vmx = to_vmx(vcpu);
- struct kvm_host_map *map;
- struct page *page;
- u64 hpa;
- if (!nested_get_evmcs_page(vcpu))
return false;
if (nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) { /* @@ -3224,6 +3233,17 @@ static bool nested_get_vmcs12_pages(struct kvm_vcpu *vcpu) return true; } +static bool vmx_get_nested_state_pages(struct kvm_vcpu *vcpu) +{
- if (!nested_get_evmcs_page(vcpu))
return false;
- if (is_guest_mode(vcpu) && !nested_get_vmcs12_pages(vcpu))
return false;
nested_get_evmcs_page() will get called twice in the common case of is_guest_mode() == true. I can't tell if that will ever be fatal, but it's definitely weird. Maybe this?
if (!is_guest_mode(vcpu)) return nested_get_evmcs_page(vcpu);
return nested_get_vmcs12_pages(vcpu);
- return true;
+}
static int nested_vmx_write_pml_buffer(struct kvm_vcpu *vcpu, gpa_t gpa) { struct vmcs12 *vmcs12; @@ -6602,7 +6622,7 @@ struct kvm_x86_nested_ops vmx_nested_ops = { .hv_timer_pending = nested_vmx_preemption_timer_pending, .get_state = vmx_get_nested_state, .set_state = vmx_set_nested_state,
- .get_nested_state_pages = nested_get_vmcs12_pages,
- .get_nested_state_pages = vmx_get_nested_state_pages, .write_log_dirty = nested_vmx_write_pml_buffer, .enable_evmcs = nested_enable_evmcs, .get_evmcs_version = nested_get_evmcs_version,
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 9a8969a6dd06..b910aa74ee05 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -8802,9 +8802,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) if (kvm_request_pending(vcpu)) { if (kvm_check_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu)) {
if (WARN_ON_ONCE(!is_guest_mode(vcpu)))
;
else if (unlikely(!kvm_x86_ops.nested_ops->get_nested_state_pages(vcpu))) {
if (unlikely(!kvm_x86_ops.nested_ops->get_nested_state_pages(vcpu))) { r = 0; goto out; }
-- 2.26.2