5.15-stable review patch. If anyone has any objections, please let me know.
------------------
From: Jiri Olsa jolsa@kernel.org
commit f19a4050455aad847fb93f18dc1fe502eb60f989 upstream.
Currently we always cleanup/decrement bpf_bprintf_nest_level variable in bpf_bprintf_cleanup if it's > 0.
There's possible scenario where this could cause a problem, when bpf_bprintf_prepare does not get bin_args buffer (because num_args is 0) and following bpf_bprintf_cleanup call decrements bpf_bprintf_nest_level variable, like:
in task context: bpf_bprintf_prepare(num_args != 0) increments 'bpf_bprintf_nest_level = 1' -> first irq : bpf_bprintf_prepare(num_args == 0) bpf_bprintf_cleanup decrements 'bpf_bprintf_nest_level = 0' -> second irq: bpf_bprintf_prepare(num_args != 0) bpf_bprintf_nest_level = 1 gets same buffer as task context above
Adding check to bpf_bprintf_cleanup and doing the real cleanup only if we got bin_args data in the first place.
Signed-off-by: Jiri Olsa jolsa@kernel.org Signed-off-by: Daniel Borkmann daniel@iogearbox.net Acked-by: Yonghong Song yhs@fb.com Link: https://lore.kernel.org/bpf/20221215214430.1336195-3-jolsa@kernel.org [cascardo: there is no bpf_trace_vprintk in 5.15] Signed-off-by: Thadeu Lima de Souza Cascardo cascardo@igalia.com Signed-off-by: Greg Kroah-Hartman gregkh@linuxfoundation.org --- include/linux/bpf.h | 2 +- kernel/bpf/helpers.c | 16 +++++++++------- kernel/trace/bpf_trace.c | 4 ++-- 3 files changed, 12 insertions(+), 10 deletions(-)
--- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -2299,6 +2299,6 @@ struct bpf_bprintf_data {
int bpf_bprintf_prepare(char *fmt, u32 fmt_size, const u64 *raw_args, u32 num_args, struct bpf_bprintf_data *data); -void bpf_bprintf_cleanup(void); +void bpf_bprintf_cleanup(struct bpf_bprintf_data *data);
#endif /* _LINUX_BPF_H */ --- a/kernel/bpf/helpers.c +++ b/kernel/bpf/helpers.c @@ -738,12 +738,14 @@ static int try_get_fmt_tmp_buf(char **tm return 0; }
-void bpf_bprintf_cleanup(void) +void bpf_bprintf_cleanup(struct bpf_bprintf_data *data) { - if (this_cpu_read(bpf_bprintf_nest_level)) { - this_cpu_dec(bpf_bprintf_nest_level); - preempt_enable(); - } + if (!data->bin_args) + return; + if (WARN_ON_ONCE(this_cpu_read(bpf_bprintf_nest_level) == 0)) + return; + this_cpu_dec(bpf_bprintf_nest_level); + preempt_enable(); }
/* @@ -975,7 +977,7 @@ nocopy_fmt: err = 0; out: if (err) - bpf_bprintf_cleanup(); + bpf_bprintf_cleanup(data); return err; }
@@ -1001,7 +1003,7 @@ BPF_CALL_5(bpf_snprintf, char *, str, u3
err = bstr_printf(str, str_size, fmt, data.bin_args);
- bpf_bprintf_cleanup(); + bpf_bprintf_cleanup(&data);
return err + 1; } --- a/kernel/trace/bpf_trace.c +++ b/kernel/trace/bpf_trace.c @@ -387,7 +387,7 @@ BPF_CALL_5(bpf_trace_printk, char *, fmt trace_bpf_trace_printk(buf); raw_spin_unlock_irqrestore(&trace_printk_lock, flags);
- bpf_bprintf_cleanup(); + bpf_bprintf_cleanup(&data);
return ret; } @@ -435,7 +435,7 @@ BPF_CALL_5(bpf_seq_printf, struct seq_fi
seq_bprintf(m, fmt, data.bin_args);
- bpf_bprintf_cleanup(); + bpf_bprintf_cleanup(&data);
return seq_has_overflowed(m) ? -EOVERFLOW : 0; }