On Wed, May 29, 2024 at 2:51 PM Sean Christopherson seanjc@google.com wrote:
On Wed, May 29, 2024, James Houghton wrote:
@@ -686,10 +694,12 @@ static __always_inline int kvm_handle_hva_range(struct mmu_notifier *mn, return __kvm_handle_hva_range(kvm, &range).ret; }
-static __always_inline int kvm_handle_hva_range_no_flush(struct mmu_notifier *mn,
unsigned long start,
unsigned long end,
gfn_handler_t handler)
+static __always_inline int kvm_handle_hva_range_no_flush(
struct mmu_notifier *mn,
unsigned long start,
unsigned long end,
gfn_handler_t handler,
bool lockless)
Unnecessary and unwanted style change.
Sorry -- this will be fixed.
{ struct kvm *kvm = mmu_notifier_to_kvm(mn); const struct kvm_mmu_notifier_range range = { @@ -699,6 +709,7 @@ static __always_inline int kvm_handle_hva_range_no_flush(struct mmu_notifier *mn .on_lock = (void *)kvm_null_fn, .flush_on_ret = false, .may_block = false,
.lockless = lockless,
Why add @lockess to kvm_handle_hva_range_no_flush()? Both callers immediately pass %false, and conceptually, locking is always optional for a "no flush" variant.
Right, this isn't needed in this patch. But I think I need it eventually (like, in the next patch), so I'll move it where it is really needed.
}; return __kvm_handle_hva_range(kvm, &range).ret;
@@ -889,7 +900,8 @@ static int kvm_mmu_notifier_clear_young(struct mmu_notifier *mn, * cadence. If we find this inaccurate, we might come up with a * more sophisticated heuristic later. */
return kvm_handle_hva_range_no_flush(mn, start, end, kvm_age_gfn);
return kvm_handle_hva_range_no_flush(mn, start, end,
kvm_age_gfn, false);
}
static int kvm_mmu_notifier_test_young(struct mmu_notifier *mn, @@ -899,7 +911,7 @@ static int kvm_mmu_notifier_test_young(struct mmu_notifier *mn, trace_kvm_test_age_hva(address);
return kvm_handle_hva_range_no_flush(mn, address, address + 1,
kvm_test_age_gfn);
kvm_test_age_gfn, false);
}
static void kvm_mmu_notifier_release(struct mmu_notifier *mn,
2.45.1.288.g0e0cd299f1-goog