From: Paul Durrant pdurrant@amazon.com
Using the HVA of the shared_info page is more efficient, so if the capability (KVM_XEN_HVM_CONFIG_SHARED_INFO_HVA) is present use that method to do the mapping.
NOTE: Have the juggle_shinfo_state() thread map and unmap using both GFN and HVA, to make sure the older mechanism is not broken.
Signed-off-by: Paul Durrant pdurrant@amazon.com Reviewed-by: David Woodhouse dwmw@amazon.co.uk --- Cc: Sean Christopherson seanjc@google.com Cc: Paolo Bonzini pbonzini@redhat.com Cc: David Woodhouse dwmw2@infradead.org
v3: - Re-work the juggle_shinfo_state() thread.
v2: - New in this version. --- .../selftests/kvm/x86_64/xen_shinfo_test.c | 44 +++++++++++++++---- 1 file changed, 35 insertions(+), 9 deletions(-)
diff --git a/tools/testing/selftests/kvm/x86_64/xen_shinfo_test.c b/tools/testing/selftests/kvm/x86_64/xen_shinfo_test.c index 9ec9ab60b63e..a61500ff0822 100644 --- a/tools/testing/selftests/kvm/x86_64/xen_shinfo_test.c +++ b/tools/testing/selftests/kvm/x86_64/xen_shinfo_test.c @@ -389,6 +389,7 @@ static int cmp_timespec(struct timespec *a, struct timespec *b) return 0; }
+static struct shared_info *shinfo; static struct vcpu_info *vinfo; static struct kvm_vcpu *vcpu;
@@ -404,20 +405,38 @@ static void *juggle_shinfo_state(void *arg) { struct kvm_vm *vm = (struct kvm_vm *)arg;
- struct kvm_xen_hvm_attr cache_activate = { + struct kvm_xen_hvm_attr cache_activate_gfn = { .type = KVM_XEN_ATTR_TYPE_SHARED_INFO, .u.shared_info.gfn = SHINFO_REGION_GPA / PAGE_SIZE };
- struct kvm_xen_hvm_attr cache_deactivate = { + struct kvm_xen_hvm_attr cache_deactivate_gfn = { .type = KVM_XEN_ATTR_TYPE_SHARED_INFO, .u.shared_info.gfn = KVM_XEN_INVALID_GFN };
+ struct kvm_xen_hvm_attr cache_activate_hva = { + .type = KVM_XEN_ATTR_TYPE_SHARED_INFO_HVA, + .u.shared_info.hva = (unsigned long)shinfo + }; + + struct kvm_xen_hvm_attr cache_deactivate_hva = { + .type = KVM_XEN_ATTR_TYPE_SHARED_INFO, + .u.shared_info.hva = 0 + }; + + int xen_caps = kvm_check_cap(KVM_CAP_XEN_HVM); + for (;;) { - __vm_ioctl(vm, KVM_XEN_HVM_SET_ATTR, &cache_activate); - __vm_ioctl(vm, KVM_XEN_HVM_SET_ATTR, &cache_deactivate); + __vm_ioctl(vm, KVM_XEN_HVM_SET_ATTR, &cache_activate_gfn); pthread_testcancel(); + __vm_ioctl(vm, KVM_XEN_HVM_SET_ATTR, &cache_deactivate_gfn); + + if (xen_caps & KVM_XEN_HVM_CONFIG_SHARED_INFO_HVA) { + __vm_ioctl(vm, KVM_XEN_HVM_SET_ATTR, &cache_activate_hva); + pthread_testcancel(); + __vm_ioctl(vm, KVM_XEN_HVM_SET_ATTR, &cache_deactivate_hva); + } }
return NULL; @@ -442,6 +461,7 @@ int main(int argc, char *argv[]) bool do_runstate_flag = !!(xen_caps & KVM_XEN_HVM_CONFIG_RUNSTATE_UPDATE_FLAG); bool do_eventfd_tests = !!(xen_caps & KVM_XEN_HVM_CONFIG_EVTCHN_2LEVEL); bool do_evtchn_tests = do_eventfd_tests && !!(xen_caps & KVM_XEN_HVM_CONFIG_EVTCHN_SEND); + bool has_shinfo_hva = !!(xen_caps & KVM_XEN_HVM_CONFIG_SHARED_INFO_HVA);
clock_gettime(CLOCK_REALTIME, &min_ts);
@@ -452,7 +472,7 @@ int main(int argc, char *argv[]) SHINFO_REGION_GPA, SHINFO_REGION_SLOT, 3, 0); virt_map(vm, SHINFO_REGION_GVA, SHINFO_REGION_GPA, 3);
- struct shared_info *shinfo = addr_gpa2hva(vm, SHINFO_VADDR); + shinfo = addr_gpa2hva(vm, SHINFO_VADDR);
int zero_fd = open("/dev/zero", O_RDONLY); TEST_ASSERT(zero_fd != -1, "Failed to open /dev/zero"); @@ -488,10 +508,16 @@ int main(int argc, char *argv[]) "Failed to read back RUNSTATE_UPDATE_FLAG attr"); }
- struct kvm_xen_hvm_attr ha = { - .type = KVM_XEN_ATTR_TYPE_SHARED_INFO, - .u.shared_info.gfn = SHINFO_REGION_GPA / PAGE_SIZE, - }; + struct kvm_xen_hvm_attr ha = {}; + + if (has_shinfo_hva) { + ha.type = KVM_XEN_ATTR_TYPE_SHARED_INFO_HVA; + ha.u.shared_info.hva = (unsigned long)shinfo; + } else { + ha.type = KVM_XEN_ATTR_TYPE_SHARED_INFO; + ha.u.shared_info.gfn = SHINFO_ADDR / PAGE_SIZE; + } + vm_ioctl(vm, KVM_XEN_HVM_SET_ATTR, &ha);
/*