On Fri, Feb 02, 2024, Shaoqin Huang wrote:
v2->v3:
- Rebase to v6.8-rc2.
- Use TEST_ASSERT().
Patch says otherwise.
@@ -726,6 +728,11 @@ static void run_test(enum vm_guest_mode mode, void *arg) return; }
- sem_getvalue(&sem_vcpu_stop, &sem_val);
- assert(sem_val == 0);
- sem_getvalue(&sem_vcpu_cont, &sem_val);
- assert(sem_val == 0);
- /*
- We reserve page table for 2 times of extra dirty mem which
- will definitely cover the original (1G+) test range. Here
@@ -825,6 +832,13 @@ static void run_test(enum vm_guest_mode mode, void *arg) sync_global_to_guest(vm, iteration); }
- /*
*
* Before we set the host_quit, let the vcpu has time to run, to make
* sure we consume the sem_vcpu_stop and the vcpu consume the
* sem_vcpu_cont, to keep the semaphore balance.
*/
- usleep(p->interval * 1000);
Sorry, I wasn't as explicit as I should have been. When I said I don't have a better solution, I did not mean to imply that I am ok with busy waiting as a hack-a-around.
Against my better judgment, I spent half an hour slogging through this test to figure out what's going wrong. IIUC, the problem is essentially that the test instructs the vCPU worker to continue _after_ the last iteration, and _then_ sets host_quit, which results in the vCPU running one extra (unvalidated) iteration.
For the other modes, which stop if and only if vcpu_sync_stop_requested is set, the extra iteration is a non-issue. But because the dirty ring variant stops after every exit (to purge the ring), it hangs without an extra "continue".
So rather than blindly fire off an extra sem_vcpu_cont that may or may not be consumed, I believe the test can simply set host_quit _before_ the final "continue" so that the vCPU worker doesn't run an extra iteration.
I ran the below with 1000 loops of "for (i = 0; i < LOG_MODE_NUM; i++)" and so no issues. I didn't see the assert you hit, but without the fix, I did see this fire within a few loops (less than 5 I think)l
assert(host_log_mode == LOG_MODE_DIRTY_RING || atomic_read(&vcpu_sync_stop_requested) == false);
I'll post this as two patches: one to fix the bug, and a second to have the LOG_MODE_DIRTY_RING variant clear vcpu_sync_stop_requested so that the above assert() can be modified as below.
--- tools/testing/selftests/kvm/dirty_log_test.c | 62 ++++++++++++-------- 1 file changed, 37 insertions(+), 25 deletions(-)
diff --git a/tools/testing/selftests/kvm/dirty_log_test.c b/tools/testing/selftests/kvm/dirty_log_test.c index babea97b31a4..1a93c4231e45 100644 --- a/tools/testing/selftests/kvm/dirty_log_test.c +++ b/tools/testing/selftests/kvm/dirty_log_test.c @@ -376,7 +376,10 @@ static void dirty_ring_collect_dirty_pages(struct kvm_vcpu *vcpu, int slot,
cleared = kvm_vm_reset_dirty_ring(vcpu->vm);
- /* Cleared pages should be the same as collected */ + /* + * Cleared pages should be the same as collected, as KVM is supposed to + * clear only the entries that have been harvested. + */ TEST_ASSERT(cleared == count, "Reset dirty pages (%u) mismatch " "with collected (%u)", cleared, count);
@@ -402,6 +405,15 @@ static void dirty_ring_after_vcpu_run(struct kvm_vcpu *vcpu, int ret, int err) /* Update the flag first before pause */ WRITE_ONCE(dirty_ring_vcpu_ring_full, run->exit_reason == KVM_EXIT_DIRTY_RING_FULL); + + /* + * Unconditionally clear the stop request. This dirty ring + * variant doesn't actually consume the request, because the + * vCPU worker stops after every exit to allow the main task + * to reap the dirty ring. But the vCPU is still obviously + * stopped, i.e. has honored the request if one was made. + */ + atomic_set(&vcpu_sync_stop_requested, false); sem_post(&sem_vcpu_stop); pr_info("vcpu stops because %s...\n", dirty_ring_vcpu_ring_full ? @@ -415,12 +427,6 @@ static void dirty_ring_after_vcpu_run(struct kvm_vcpu *vcpu, int ret, int err) } }
-static void dirty_ring_before_vcpu_join(void) -{ - /* Kick another round of vcpu just to make sure it will quit */ - sem_post(&sem_vcpu_cont); -} - struct log_mode { const char *name; /* Return true if this mode is supported, otherwise false */ @@ -433,7 +439,6 @@ struct log_mode { uint32_t *ring_buf_idx); /* Hook to call when after each vcpu run */ void (*after_vcpu_run)(struct kvm_vcpu *vcpu, int ret, int err); - void (*before_vcpu_join) (void); } log_modes[LOG_MODE_NUM] = { { .name = "dirty-log", @@ -452,7 +457,6 @@ struct log_mode { .supported = dirty_ring_supported, .create_vm_done = dirty_ring_create_vm_done, .collect_dirty_pages = dirty_ring_collect_dirty_pages, - .before_vcpu_join = dirty_ring_before_vcpu_join, .after_vcpu_run = dirty_ring_after_vcpu_run, }, }; @@ -513,14 +517,6 @@ static void log_mode_after_vcpu_run(struct kvm_vcpu *vcpu, int ret, int err) mode->after_vcpu_run(vcpu, ret, err); }
-static void log_mode_before_vcpu_join(void) -{ - struct log_mode *mode = &log_modes[host_log_mode]; - - if (mode->before_vcpu_join) - mode->before_vcpu_join(); -} - static void generate_random_array(uint64_t *guest_array, uint64_t size) { uint64_t i; @@ -719,6 +715,7 @@ static void run_test(enum vm_guest_mode mode, void *arg) struct kvm_vm *vm; unsigned long *bmap; uint32_t ring_buf_idx = 0; + int sem_val;
if (!log_mode_supported()) { print_skip("Log mode '%s' not supported", @@ -788,12 +785,22 @@ static void run_test(enum vm_guest_mode mode, void *arg) /* Start the iterations */ iteration = 1; sync_global_to_guest(vm, iteration); - host_quit = false; + WRITE_ONCE(host_quit, false); host_dirty_count = 0; host_clear_count = 0; host_track_next_count = 0; WRITE_ONCE(dirty_ring_vcpu_ring_full, false);
+ /* + * Ensure the previous iteration didn't leave a dangling semaphore, i.e. + * that the main task and vCPU worker were synchronized and completed + * verification of all iterations. + */ + sem_getvalue(&sem_vcpu_stop, &sem_val); + TEST_ASSERT_EQ(sem_val, 0); + sem_getvalue(&sem_vcpu_cont, &sem_val); + TEST_ASSERT_EQ(sem_val, 0); + pthread_create(&vcpu_thread, NULL, vcpu_worker, vcpu);
while (iteration < p->iterations) { @@ -816,18 +823,23 @@ static void run_test(enum vm_guest_mode mode, void *arg) * the flush of the last page, and since we handle the last * page specially verification will succeed anyway. */ - assert(host_log_mode == LOG_MODE_DIRTY_RING || - atomic_read(&vcpu_sync_stop_requested) == false); + TEST_ASSERT_EQ(atomic_read(&vcpu_sync_stop_requested), false); vm_dirty_log_verify(mode, bmap); + + /* + * Set host_quit before sem_vcpu_cont in the final iteration to + * ensure that the vCPU worker doesn't resume the guest. As + * above, the dirty ring test may stop and wait even when not + * explicitly request to do so, i.e. would hang waiting for a + * "continue" if it's allowed to resume the guest. + */ + if (++iteration == p->iterations) + WRITE_ONCE(host_quit, true); + sem_post(&sem_vcpu_cont); - - iteration++; sync_global_to_guest(vm, iteration); }
- /* Tell the vcpu thread to quit */ - host_quit = true; - log_mode_before_vcpu_join(); pthread_join(vcpu_thread, NULL);
pr_info("Total bits checked: dirty (%"PRIu64"), clear (%"PRIu64"), "
base-commit: 78651ed78b3496b6dbf1fb7d64d9d9736a23edc0