On Fri, May 02, 2025, Manali Shukla wrote:
diff --git a/tools/testing/selftests/kvm/x86/kvm_buslock_test.c b/tools/testing/selftests/kvm/x86/kvm_buslock_test.c new file mode 100644 index 000000000000..9c081525ac2a --- /dev/null +++ b/tools/testing/selftests/kvm/x86/kvm_buslock_test.c @@ -0,0 +1,135 @@ +// SPDX-License-Identifier: GPL-2.0-only +/*
- Copyright (C) 2024 Advanced Micro Devices, Inc.
- */
+#include "test_util.h" +#include "kvm_util.h" +#include "processor.h" +#include "svm_util.h" +#include "vmx.h"
+#define NR_ITERATIONS 100 +#define L2_GUEST_STACK_SIZE 64
+#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Waddress-of-packed-member"
Eww.
+struct buslock_test {
- unsigned char pad[PAGE_SIZE - 2];
- atomic_long_t val;
+} __packed;
You don't need an entire page to generate a bus lock, two cache lines will do nicely. And there's certain no need for __packed.
+struct buslock_test test __aligned(PAGE_SIZE);
+static __always_inline void buslock_atomic_add(int i, atomic_long_t *v) +{
- asm volatile(LOCK_PREFIX "addl %1,%0"
: "+m" (v->counter)
: "ir" (i) : "memory");
+}
If only there were utilities for atomics...
+static void buslock_add(void)
guest_generate_buslocks()
+{
- /*
* Increment a page unaligned variable atomically.
* This should generate a bus lock exit.
Not should, will.
*/
- for (int i = 0; i < NR_ITERATIONS; i++)
buslock_atomic_add(2, &test.val);
Don't do weird and completely arbitrary things like adding '2' instead of '1', it makes readers look for intent and purpose that doesn't exist.
+}
...
+int main(int argc, char *argv[]) +{
- struct kvm_vcpu *vcpu;
- struct kvm_run *run;
- struct kvm_vm *vm;
- vm_vaddr_t nested_test_data_gva;
- TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_SVM) || kvm_cpu_has(X86_FEATURE_VMX));
There's no reason to make nested support a hard dependency, it's just as easy to make it conditional.
- TEST_REQUIRE(kvm_has_cap(KVM_CAP_X86_BUS_LOCK_EXIT));
- vm = vm_create(1);
- vm_enable_cap(vm, KVM_CAP_X86_BUS_LOCK_EXIT, KVM_BUS_LOCK_DETECTION_EXIT);
- vcpu = vm_vcpu_add(vm, 0, guest_code);
- if (kvm_cpu_has(X86_FEATURE_SVM))
vcpu_alloc_svm(vm, &nested_test_data_gva);
- else
vcpu_alloc_vmx(vm, &nested_test_data_gva);
- vcpu_args_set(vcpu, 1, nested_test_data_gva);
- run = vcpu->run;
- for (;;) {
struct ucall uc;
vcpu_run(vcpu);
if (run->exit_reason == KVM_EXIT_IO) {
switch (get_ucall(vcpu, &uc)) {
case UCALL_ABORT:
REPORT_GUEST_ASSERT(uc);
/* NOT REACHED */
case UCALL_SYNC:
continue;
case UCALL_DONE:
goto done;
default:
TEST_FAIL("Unknown ucall 0x%lx.", uc.cmd);
}
}
TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_X86_BUS_LOCK);
- }
*sigh*
This doesn't actually ****VERIFY**** that the expected number of bus lock exits were generated. KVM could literally do nothing and the test will pass. E.g. the test passes if I do this:
diff --git a/tools/testing/selftests/kvm/x86/kvm_buslock_test.c b/tools/testing/selftests/kvm/x86/kvm_buslock_test.c index 9c081525ac2a..aa65d6be0f13 100644 --- a/tools/testing/selftests/kvm/x86/kvm_buslock_test.c +++ b/tools/testing/selftests/kvm/x86/kvm_buslock_test.c @@ -93,10 +93,10 @@ int main(int argc, char *argv[]) vm_vaddr_t nested_test_data_gva;
TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_SVM) || kvm_cpu_has(X86_FEATURE_VMX)); - TEST_REQUIRE(kvm_has_cap(KVM_CAP_X86_BUS_LOCK_EXIT)); +// TEST_REQUIRE(kvm_has_cap(KVM_CAP_X86_BUS_LOCK_EXIT));
vm = vm_create(1); - vm_enable_cap(vm, KVM_CAP_X86_BUS_LOCK_EXIT, KVM_BUS_LOCK_DETECTION_EXIT); +// vm_enable_cap(vm, KVM_CAP_X86_BUS_LOCK_EXIT, KVM_BUS_LOCK_DETECTION_EXIT); vcpu = vm_vcpu_add(vm, 0, guest_code);
if (kvm_cpu_has(X86_FEATURE_SVM)) --
The test would also fail to detect if KVM completely skipped the instruction.
This is not rocket science. If you can't make your test fail by introducing bugs in what you're testing, then your test is worthless.
No need for a v6, I'm going to do surgery when I apply, this series has dragged on for far too long.