On Tue, Nov 19, 2024 at 04:34:59PM +0100, Valentin Schneider wrote:
+static void __text_poke_sync(smp_cond_func_t cond_func) +{
- on_each_cpu_cond(cond_func, do_sync_core, NULL, 1);
+}
void text_poke_sync(void) {
- on_each_cpu(do_sync_core, NULL, 1);
- __text_poke_sync(NULL);
+}
+void text_poke_sync_deferrable(void) +{
- __text_poke_sync(do_sync_core_defer_cond);
}
How about we unwrap some of that like so:
/* @@ -2257,6 +2273,8 @@ static int tp_vec_nr; static void text_poke_bp_batch(struct text_poke_loc *tp, unsigned int nr_entries) { unsigned char int3 = INT3_INSN_OPCODE;
- bool force_ipi = false;
- void (*sync_fn)(void);
smp_cond_func_t cond = do_sync_core_defer_cond;
unsigned int i; int do_sync; @@ -2291,11 +2309,18 @@ static void text_poke_bp_batch(struct text_poke_loc *tp, unsigned int nr_entries * First step: add a int3 trap to the address that will be patched. */ for (i = 0; i < nr_entries; i++) {
/*
* Record that we need to send the IPI if at least one location
* in the batch requires it.
*/
force_ipi |= tp[i].force_ipi;
if (tp[i].force_ipi) cond = NULL;
tp[i].old = *(u8 *)text_poke_addr(&tp[i]); text_poke(text_poke_addr(&tp[i]), &int3, INT3_INSN_SIZE);
}
- text_poke_sync();
- sync_fn = force_ipi ? text_poke_sync : text_poke_sync_deferrable;
- sync_fn();
__text_poke_sync(cond);