From: Hugo Villeneuve <hvilleneuve(a)dimonoff.com>
If an error occurs during probing, the sc16is7xx_lines bitfield may be left
in a state that doesn't represent the correct state of lines allocation.
For example, in a system with two SC16 devices, if an error occurs only
during probing of channel (port) B of the second device, sc16is7xx_lines
final state will be 00001011b instead of the expected 00000011b.
This is caused in part because of the "i--" in the for/loop located in
the out_ports: error path.
Fix this by checking the return value of uart_add_one_port() and set line
allocation bit only if this was successful. This allows the refactor of
the obfuscated for(i--...) loop in the error path, and properly call
uart_remove_one_port() only when needed, and properly unset line allocation
bits.
Also use same mechanism in remove() when calling uart_remove_one_port().
Fixes: c64349722d14 ("sc16is7xx: support multiple devices")
Cc: stable(a)vger.kernel.org
Cc: Yury Norov <yury.norov(a)gmail.com>
Signed-off-by: Hugo Villeneuve <hvilleneuve(a)dimonoff.com>
---
There is already a patch by Yury Norov <yury.norov(a)gmail.com> to simplify
sc16is7xx_alloc_line():
https://lore.kernel.org/all/20231212022749.625238-30-yury.norov@gmail.com/
Since my patch gets rid of sc16is7xx_alloc_line() entirely, it would make
Yury's patch unnecessary.
---
drivers/tty/serial/sc16is7xx.c | 44 ++++++++++++++--------------------
1 file changed, 18 insertions(+), 26 deletions(-)
diff --git a/drivers/tty/serial/sc16is7xx.c b/drivers/tty/serial/sc16is7xx.c
index b585663c1e6e..b92fd01cfeec 100644
--- a/drivers/tty/serial/sc16is7xx.c
+++ b/drivers/tty/serial/sc16is7xx.c
@@ -407,19 +407,6 @@ static void sc16is7xx_port_update(struct uart_port *port, u8 reg,
regmap_update_bits(one->regmap, reg, mask, val);
}
-static int sc16is7xx_alloc_line(void)
-{
- int i;
-
- BUILD_BUG_ON(SC16IS7XX_MAX_DEVS > BITS_PER_LONG);
-
- for (i = 0; i < SC16IS7XX_MAX_DEVS; i++)
- if (!test_and_set_bit(i, &sc16is7xx_lines))
- break;
-
- return i;
-}
-
static void sc16is7xx_power(struct uart_port *port, int on)
{
sc16is7xx_port_update(port, SC16IS7XX_IER_REG,
@@ -1550,6 +1537,13 @@ static int sc16is7xx_probe(struct device *dev,
SC16IS7XX_IOCONTROL_SRESET_BIT);
for (i = 0; i < devtype->nr_uart; ++i) {
+ s->p[i].port.line = find_first_zero_bit(&sc16is7xx_lines,
+ SC16IS7XX_MAX_DEVS);
+ if (s->p[i].port.line >= SC16IS7XX_MAX_DEVS) {
+ ret = -ERANGE;
+ goto out_ports;
+ }
+
/* Initialize port data */
s->p[i].port.dev = dev;
s->p[i].port.irq = irq;
@@ -1569,14 +1563,8 @@ static int sc16is7xx_probe(struct device *dev,
s->p[i].port.rs485_supported = sc16is7xx_rs485_supported;
s->p[i].port.ops = &sc16is7xx_ops;
s->p[i].old_mctrl = 0;
- s->p[i].port.line = sc16is7xx_alloc_line();
s->p[i].regmap = regmaps[i];
- if (s->p[i].port.line >= SC16IS7XX_MAX_DEVS) {
- ret = -ENOMEM;
- goto out_ports;
- }
-
mutex_init(&s->p[i].efr_lock);
ret = uart_get_rs485_mode(&s->p[i].port);
@@ -1594,8 +1582,13 @@ static int sc16is7xx_probe(struct device *dev,
kthread_init_work(&s->p[i].tx_work, sc16is7xx_tx_proc);
kthread_init_work(&s->p[i].reg_work, sc16is7xx_reg_proc);
kthread_init_delayed_work(&s->p[i].ms_work, sc16is7xx_ms_proc);
+
/* Register port */
- uart_add_one_port(&sc16is7xx_uart, &s->p[i].port);
+ ret = uart_add_one_port(&sc16is7xx_uart, &s->p[i].port);
+ if (ret)
+ goto out_ports;
+
+ set_bit(s->p[i].port.line, &sc16is7xx_lines);
/* Enable EFR */
sc16is7xx_port_write(&s->p[i].port, SC16IS7XX_LCR_REG,
@@ -1653,10 +1646,9 @@ static int sc16is7xx_probe(struct device *dev,
#endif
out_ports:
- for (i--; i >= 0; i--) {
- uart_remove_one_port(&sc16is7xx_uart, &s->p[i].port);
- clear_bit(s->p[i].port.line, &sc16is7xx_lines);
- }
+ for (i = 0; i < devtype->nr_uart; i++)
+ if (test_and_clear_bit(s->p[i].port.line, &sc16is7xx_lines))
+ uart_remove_one_port(&sc16is7xx_uart, &s->p[i].port);
kthread_stop(s->kworker_task);
@@ -1683,8 +1675,8 @@ static void sc16is7xx_remove(struct device *dev)
for (i = 0; i < s->devtype->nr_uart; i++) {
kthread_cancel_delayed_work_sync(&s->p[i].ms_work);
- uart_remove_one_port(&sc16is7xx_uart, &s->p[i].port);
- clear_bit(s->p[i].port.line, &sc16is7xx_lines);
+ if (test_and_clear_bit(s->p[i].port.line, &sc16is7xx_lines))
+ uart_remove_one_port(&sc16is7xx_uart, &s->p[i].port);
sc16is7xx_power(&s->p[i].port, 0);
}
--
2.39.2
Hi Sasha,
Thank you for picking this patch.
On Thu, 21 Dec 2023 09:53:01 -0500 Sasha Levin <sashal(a)kernel.org> wrote:
> This is a note to let you know that I've just added the patch titled
>
> mm/damon/core: use number of passed access sampling as a timer
>
> to the 6.6-stable tree which can be found at:
> http://www.kernel.org/git/?p=linux/kernel/git/stable/stable-queue.git;a=sum…
>
> The filename of the patch is:
> mm-damon-core-use-number-of-passed-access-sampling-a.patch
> and it can be found in the queue-6.6 subdirectory.
>
> If you, or anyone else, feels it should not be added to the stable tree,
> please let <stable(a)vger.kernel.org> know about it.
>
>
>
> commit dfda8d41e94ee98ebd2ad78c7cb49625a8c92474
> Author: SeongJae Park <sj(a)kernel.org>
> Date: Thu Sep 14 02:15:23 2023 +0000
>
> mm/damon/core: use number of passed access sampling as a timer
>
> [ Upstream commit 4472edf63d6630e6cf65e205b4fc8c3c94d0afe5 ]
>
> DAMON sleeps for sampling interval after each sampling, and check if the
> aggregation interval and the ops update interval have passed using
> ktime_get_coarse_ts64() and baseline timestamps for the intervals. That
> design is for making the operations occur at deterministic timing
> regardless of the time that spend for each work. However, it turned out
> it is not that useful, and incur not-that-intuitive results.
>
> After all, timer functions, and especially sleep functions that DAMON uses
> to wait for specific timing, are not necessarily strictly accurate. It is
> legal design, so no problem. However, depending on such inaccuracies, the
> nr_accesses can be larger than aggregation interval divided by sampling
> interval. For example, with the default setting (5 ms sampling interval
> and 100 ms aggregation interval) we frequently show regions having
> nr_accesses larger than 20. Also, if the execution of a DAMOS scheme
> takes a long time, next aggregation could happen before enough number of
> samples are collected. This is not what usual users would intuitively
> expect.
>
> Since access check sampling is the smallest unit work of DAMON, using the
> number of passed sampling intervals as the DAMON-internal timer can easily
> avoid these problems. That is, convert aggregation and ops update
> intervals to numbers of sampling intervals that need to be passed before
> those operations be executed, count the number of passed sampling
> intervals, and invoke the operations as soon as the specific amount of
> sampling intervals passed. Make the change.
>
> Note that this could make a behavioral change to settings that using
> intervals that not aligned by the sampling interval. For example, if the
> sampling interval is 5 ms and the aggregation interval is 12 ms, DAMON
> effectively uses 15 ms as its aggregation interval, because it checks
> whether the aggregation interval after sleeping the sampling interval.
> This change will make DAMON to effectively use 10 ms as aggregation
> interval, since it uses 'aggregation interval / sampling interval *
> sampling interval' as the effective aggregation interval, and we don't use
> floating point types. Usual users would have used aligned intervals, so
> this behavioral change is not expected to make any meaningful impact, so
> just make this change.
>
> Link: https://lkml.kernel.org/r/20230914021523.60649-1-sj@kernel.org
> Signed-off-by: SeongJae Park <sj(a)kernel.org>
> Signed-off-by: Andrew Morton <akpm(a)linux-foundation.org>
> Stable-dep-of: 6376a8245956 ("mm/damon/core: make damon_start() waits until kdamond_fn() starts")
I think adding this patch on 6.6.y has no problem. Nonetheless, Greg notified
me the patch that depends on this ("mm/damon/core: make damon_start() waits
until kdamond_fn() starts") cannot cleanly applied on 6.1.y and 6.6.y[1,2], and
hence I sent conflict-resolved patches for those[3,4] before.
Hence this patch might not really required, but I also think adding this now
might help merging future fixes. I don't have strong opinion on whether this
patch should be added to 6.6.y or not. I hope you to select a way that better
for minimizing stable kernels maintenance overhead.
[1] https://lore.kernel.org/stable/2023121849-ambulance-violate-e5b2@gregkh/
[2] https://lore.kernel.org/stable/2023121843-pension-tactile-868b@gregkh/
[3] https://lore.kernel.org/r/20231218175939.99263-1-sj@kernel.org
[4] https://lore.kernel.org/r/20231218175959.99278-1-sj@kernel.org
Thanks,
SJ
[...]
From: "Steven Rostedt (Google)" <rostedt(a)goodmis.org>
The synth_event_gen_test module can be built in, if someone wants to run
the tests at boot up and not have to load them.
The synth_event_gen_test_init() function creates and enables the synthetic
events and runs its tests.
The synth_event_gen_test_exit() disables the events it created and
destroys the events.
If the module is builtin, the events are never disabled. The issue is, the
events should be disable after the tests are run. This could be an issue
if the rest of the boot up tests are enabled, as they expect the events to
be in a known state before testing. That known state happens to be
disabled.
When CONFIG_SYNTH_EVENT_GEN_TEST=y and CONFIG_EVENT_TRACE_STARTUP_TEST=y
a warning will trigger:
Running tests on trace events:
Testing event create_synth_test:
Enabled event during self test!
------------[ cut here ]------------
WARNING: CPU: 2 PID: 1 at kernel/trace/trace_events.c:4150 event_trace_self_tests+0x1c2/0x480
Modules linked in:
CPU: 2 PID: 1 Comm: swapper/0 Not tainted 6.7.0-rc2-test-00031-gb803d7c664d5-dirty #276
Hardware name: QEMU Standard PC (Q35 + ICH9, 2009), BIOS 1.16.2-debian-1.16.2-1 04/01/2014
RIP: 0010:event_trace_self_tests+0x1c2/0x480
Code: bb e8 a2 ab 5d fc 48 8d 7b 48 e8 f9 3d 99 fc 48 8b 73 48 40 f6 c6 01 0f 84 d6 fe ff ff 48 c7 c7 20 b6 ad bb e8 7f ab 5d fc 90 <0f> 0b 90 48 89 df e8 d3 3d 99 fc 48 8b 1b 4c 39 f3 0f 85 2c ff ff
RSP: 0000:ffffc9000001fdc0 EFLAGS: 00010246
RAX: 0000000000000029 RBX: ffff88810399ca80 RCX: 0000000000000000
RDX: 0000000000000000 RSI: ffffffffb9f19478 RDI: ffff88823c734e64
RBP: ffff88810399f300 R08: 0000000000000000 R09: fffffbfff79eb32a
R10: ffffffffbcf59957 R11: 0000000000000001 R12: ffff888104068090
R13: ffffffffbc89f0a0 R14: ffffffffbc8a0f08 R15: 0000000000000078
FS: 0000000000000000(0000) GS:ffff88823c700000(0000) knlGS:0000000000000000
CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
CR2: 0000000000000000 CR3: 00000001f6282001 CR4: 0000000000170ef0
Call Trace:
<TASK>
? __warn+0xa5/0x200
? event_trace_self_tests+0x1c2/0x480
? report_bug+0x1f6/0x220
? handle_bug+0x6f/0x90
? exc_invalid_op+0x17/0x50
? asm_exc_invalid_op+0x1a/0x20
? tracer_preempt_on+0x78/0x1c0
? event_trace_self_tests+0x1c2/0x480
? __pfx_event_trace_self_tests_init+0x10/0x10
event_trace_self_tests_init+0x27/0xe0
do_one_initcall+0xd6/0x3c0
? __pfx_do_one_initcall+0x10/0x10
? kasan_set_track+0x25/0x30
? rcu_is_watching+0x38/0x60
kernel_init_freeable+0x324/0x450
? __pfx_kernel_init+0x10/0x10
kernel_init+0x1f/0x1e0
? _raw_spin_unlock_irq+0x33/0x50
ret_from_fork+0x34/0x60
? __pfx_kernel_init+0x10/0x10
ret_from_fork_asm+0x1b/0x30
</TASK>
This is because the synth_event_gen_test_init() left the synthetic events
that it created enabled. By having it disable them after testing, the
other selftests will run fine.
Link: https://lore.kernel.org/linux-trace-kernel/20231220111525.2f0f49b0@gandalf.…
Cc: stable(a)vger.kernel.org
Cc: Mathieu Desnoyers <mathieu.desnoyers(a)efficios.com>
Cc: Tom Zanussi <zanussi(a)kernel.org>
Fixes: 9fe41efaca084 ("tracing: Add synth event generation test module")
Acked-by: Masami Hiramatsu (Google) <mhiramat(a)kernel.org>
Reported-by: Alexander Graf <graf(a)amazon.com>
Tested-by: Alexander Graf <graf(a)amazon.com>
Signed-off-by: Steven Rostedt (Google) <rostedt(a)goodmis.org>
---
kernel/trace/synth_event_gen_test.c | 11 +++++++++++
1 file changed, 11 insertions(+)
diff --git a/kernel/trace/synth_event_gen_test.c b/kernel/trace/synth_event_gen_test.c
index 8dfe85499d4a..354c2117be43 100644
--- a/kernel/trace/synth_event_gen_test.c
+++ b/kernel/trace/synth_event_gen_test.c
@@ -477,6 +477,17 @@ static int __init synth_event_gen_test_init(void)
ret = test_trace_synth_event();
WARN_ON(ret);
+
+ /* Disable when done */
+ trace_array_set_clr_event(gen_synth_test->tr,
+ "synthetic",
+ "gen_synth_test", false);
+ trace_array_set_clr_event(empty_synth_test->tr,
+ "synthetic",
+ "empty_synth_test", false);
+ trace_array_set_clr_event(create_synth_test->tr,
+ "synthetic",
+ "create_synth_test", false);
out:
return ret;
}
--
2.42.0
From: "Steven Rostedt (Google)" <rostedt(a)goodmis.org>
Dongliang reported:
I found that in the latest version, the nodes of tracefs have been
changed to dynamically created.
This has caused me to encounter a problem where the gid I specified in
the mounting parameters cannot apply to all files, as in the following
situation:
/data/tmp/events # mount | grep tracefs
tracefs on /data/tmp type tracefs (rw,seclabel,relatime,gid=3012)
gid 3012 = readtracefs
/data/tmp # ls -lh
total 0
-r--r----- 1 root readtracefs 0 1970-01-01 08:00 README
-r--r----- 1 root readtracefs 0 1970-01-01 08:00 available_events
ums9621_1h10:/data/tmp/events # ls -lh
total 0
drwxr-xr-x 2 root root 0 2023-12-19 00:56 alarmtimer
drwxr-xr-x 2 root root 0 2023-12-19 00:56 asoc
It will prevent certain applications from accessing tracefs properly, I
try to avoid this issue by making the following modifications.
To fix this, have the files created default to taking the ownership of
the parent dentry unless the ownership was previously set by the user.
Link: https://lore.kernel.org/linux-trace-kernel/1703063706-30539-1-git-send-emai…
Link: https://lore.kernel.org/linux-trace-kernel/20231220105017.1489d790@gandalf.…
Cc: stable(a)vger.kernel.org
Cc: Mathieu Desnoyers <mathieu.desnoyers(a)efficios.com>
Cc: Hongyu Jin <hongyu.jin(a)unisoc.com>
Fixes: 28e12c09f5aa0 ("eventfs: Save ownership and mode")
Acked-by: Masami Hiramatsu (Google) <mhiramat(a)kernel.org>
Reported-by: Dongliang Cui <cuidongliang390(a)gmail.com>
Signed-off-by: Steven Rostedt (Google) <rostedt(a)goodmis.org>
---
fs/tracefs/event_inode.c | 12 +++++++++---
1 file changed, 9 insertions(+), 3 deletions(-)
diff --git a/fs/tracefs/event_inode.c b/fs/tracefs/event_inode.c
index 43e237864a42..2ccc849a5bda 100644
--- a/fs/tracefs/event_inode.c
+++ b/fs/tracefs/event_inode.c
@@ -148,7 +148,8 @@ static const struct file_operations eventfs_file_operations = {
.release = eventfs_release,
};
-static void update_inode_attr(struct inode *inode, struct eventfs_attr *attr, umode_t mode)
+static void update_inode_attr(struct dentry *dentry, struct inode *inode,
+ struct eventfs_attr *attr, umode_t mode)
{
if (!attr) {
inode->i_mode = mode;
@@ -162,9 +163,13 @@ static void update_inode_attr(struct inode *inode, struct eventfs_attr *attr, um
if (attr->mode & EVENTFS_SAVE_UID)
inode->i_uid = attr->uid;
+ else
+ inode->i_uid = d_inode(dentry->d_parent)->i_uid;
if (attr->mode & EVENTFS_SAVE_GID)
inode->i_gid = attr->gid;
+ else
+ inode->i_gid = d_inode(dentry->d_parent)->i_gid;
}
/**
@@ -206,7 +211,7 @@ static struct dentry *create_file(const char *name, umode_t mode,
return eventfs_failed_creating(dentry);
/* If the user updated the directory's attributes, use them */
- update_inode_attr(inode, attr, mode);
+ update_inode_attr(dentry, inode, attr, mode);
inode->i_op = &eventfs_file_inode_operations;
inode->i_fop = fop;
@@ -242,7 +247,8 @@ static struct dentry *create_dir(struct eventfs_inode *ei, struct dentry *parent
return eventfs_failed_creating(dentry);
/* If the user updated the directory's attributes, use them */
- update_inode_attr(inode, &ei->attr, S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO);
+ update_inode_attr(dentry, inode, &ei->attr,
+ S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO);
inode->i_op = &eventfs_root_dir_inode_operations;
inode->i_fop = &eventfs_file_operations;
--
2.42.0
Dear Stable,
> Lee pointed out issue found by syscaller [0] hitting BUG in prog array
> map poke update in prog_array_map_poke_run function due to error value
> returned from bpf_arch_text_poke function.
>
> There's race window where bpf_arch_text_poke can fail due to missing
> bpf program kallsym symbols, which is accounted for with check for
> -EINVAL in that BUG_ON call.
>
> The problem is that in such case we won't update the tail call jump
> and cause imbalance for the next tail call update check which will
> fail with -EBUSY in bpf_arch_text_poke.
>
> I'm hitting following race during the program load:
>
> CPU 0 CPU 1
>
> bpf_prog_load
> bpf_check
> do_misc_fixups
> prog_array_map_poke_track
>
> map_update_elem
> bpf_fd_array_map_update_elem
> prog_array_map_poke_run
>
> bpf_arch_text_poke returns -EINVAL
>
> bpf_prog_kallsyms_add
>
> After bpf_arch_text_poke (CPU 1) fails to update the tail call jump, the next
> poke update fails on expected jump instruction check in bpf_arch_text_poke
> with -EBUSY and triggers the BUG_ON in prog_array_map_poke_run.
>
> Similar race exists on the program unload.
>
> Fixing this by moving the update to bpf_arch_poke_desc_update function which
> makes sure we call __bpf_arch_text_poke that skips the bpf address check.
>
> Each architecture has slightly different approach wrt looking up bpf address
> in bpf_arch_text_poke, so instead of splitting the function or adding new
> 'checkip' argument in previous version, it seems best to move the whole
> map_poke_run update as arch specific code.
>
> [0] https://syzkaller.appspot.com/bug?extid=97a4fe20470e9bc30810
>
> Cc: Lee Jones <lee(a)kernel.org>
> Cc: Maciej Fijalkowski <maciej.fijalkowski(a)intel.com>
> Fixes: ebf7d1f508a7 ("bpf, x64: rework pro/epilogue and tailcall handling in JIT")
> Reported-by: syzbot+97a4fe20470e9bc30810(a)syzkaller.appspotmail.com
> Acked-by: Yonghong Song <yonghong.song(a)linux.dev>
> Signed-off-by: Jiri Olsa <jolsa(a)kernel.org>
> ---
> arch/x86/net/bpf_jit_comp.c | 46 +++++++++++++++++++++++++++++
> include/linux/bpf.h | 3 ++
> kernel/bpf/arraymap.c | 58 +++++++------------------------------
> 3 files changed, 59 insertions(+), 48 deletions(-)
Please could we have this backported?
Guided by the Fixes: tag.
> diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
> index 8c10d9abc239..e89e415aa743 100644
> --- a/arch/x86/net/bpf_jit_comp.c
> +++ b/arch/x86/net/bpf_jit_comp.c
> @@ -3025,3 +3025,49 @@ void arch_bpf_stack_walk(bool (*consume_fn)(void *cookie, u64 ip, u64 sp, u64 bp
> #endif
> WARN(1, "verification of programs using bpf_throw should have failed\n");
> }
> +
> +void bpf_arch_poke_desc_update(struct bpf_jit_poke_descriptor *poke,
> + struct bpf_prog *new, struct bpf_prog *old)
> +{
> + u8 *old_addr, *new_addr, *old_bypass_addr;
> + int ret;
> +
> + old_bypass_addr = old ? NULL : poke->bypass_addr;
> + old_addr = old ? (u8 *)old->bpf_func + poke->adj_off : NULL;
> + new_addr = new ? (u8 *)new->bpf_func + poke->adj_off : NULL;
> +
> + /*
> + * On program loading or teardown, the program's kallsym entry
> + * might not be in place, so we use __bpf_arch_text_poke to skip
> + * the kallsyms check.
> + */
> + if (new) {
> + ret = __bpf_arch_text_poke(poke->tailcall_target,
> + BPF_MOD_JUMP,
> + old_addr, new_addr);
> + BUG_ON(ret < 0);
> + if (!old) {
> + ret = __bpf_arch_text_poke(poke->tailcall_bypass,
> + BPF_MOD_JUMP,
> + poke->bypass_addr,
> + NULL);
> + BUG_ON(ret < 0);
> + }
> + } else {
> + ret = __bpf_arch_text_poke(poke->tailcall_bypass,
> + BPF_MOD_JUMP,
> + old_bypass_addr,
> + poke->bypass_addr);
> + BUG_ON(ret < 0);
> + /* let other CPUs finish the execution of program
> + * so that it will not possible to expose them
> + * to invalid nop, stack unwind, nop state
> + */
> + if (!ret)
> + synchronize_rcu();
> + ret = __bpf_arch_text_poke(poke->tailcall_target,
> + BPF_MOD_JUMP,
> + old_addr, NULL);
> + BUG_ON(ret < 0);
> + }
> +}
> diff --git a/include/linux/bpf.h b/include/linux/bpf.h
> index 6762dac3ef76..cff5bb08820e 100644
> --- a/include/linux/bpf.h
> +++ b/include/linux/bpf.h
> @@ -3175,6 +3175,9 @@ enum bpf_text_poke_type {
> int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t,
> void *addr1, void *addr2);
>
> +void bpf_arch_poke_desc_update(struct bpf_jit_poke_descriptor *poke,
> + struct bpf_prog *new, struct bpf_prog *old);
> +
> void *bpf_arch_text_copy(void *dst, void *src, size_t len);
> int bpf_arch_text_invalidate(void *dst, size_t len);
>
> diff --git a/kernel/bpf/arraymap.c b/kernel/bpf/arraymap.c
> index 2058e89b5ddd..c85ff9162a5c 100644
> --- a/kernel/bpf/arraymap.c
> +++ b/kernel/bpf/arraymap.c
> @@ -1012,11 +1012,16 @@ static void prog_array_map_poke_untrack(struct bpf_map *map,
> mutex_unlock(&aux->poke_mutex);
> }
>
> +void __weak bpf_arch_poke_desc_update(struct bpf_jit_poke_descriptor *poke,
> + struct bpf_prog *new, struct bpf_prog *old)
> +{
> + WARN_ON_ONCE(1);
> +}
> +
> static void prog_array_map_poke_run(struct bpf_map *map, u32 key,
> struct bpf_prog *old,
> struct bpf_prog *new)
> {
> - u8 *old_addr, *new_addr, *old_bypass_addr;
> struct prog_poke_elem *elem;
> struct bpf_array_aux *aux;
>
> @@ -1025,7 +1030,7 @@ static void prog_array_map_poke_run(struct bpf_map *map, u32 key,
>
> list_for_each_entry(elem, &aux->poke_progs, list) {
> struct bpf_jit_poke_descriptor *poke;
> - int i, ret;
> + int i;
>
> for (i = 0; i < elem->aux->size_poke_tab; i++) {
> poke = &elem->aux->poke_tab[i];
> @@ -1044,21 +1049,10 @@ static void prog_array_map_poke_run(struct bpf_map *map, u32 key,
> * activated, so tail call updates can arrive from here
> * while JIT is still finishing its final fixup for
> * non-activated poke entries.
> - * 3) On program teardown, the program's kallsym entry gets
> - * removed out of RCU callback, but we can only untrack
> - * from sleepable context, therefore bpf_arch_text_poke()
> - * might not see that this is in BPF text section and
> - * bails out with -EINVAL. As these are unreachable since
> - * RCU grace period already passed, we simply skip them.
> - * 4) Also programs reaching refcount of zero while patching
> + * 3) Also programs reaching refcount of zero while patching
> * is in progress is okay since we're protected under
> * poke_mutex and untrack the programs before the JIT
> - * buffer is freed. When we're still in the middle of
> - * patching and suddenly kallsyms entry of the program
> - * gets evicted, we just skip the rest which is fine due
> - * to point 3).
> - * 5) Any other error happening below from bpf_arch_text_poke()
> - * is a unexpected bug.
> + * buffer is freed.
> */
> if (!READ_ONCE(poke->tailcall_target_stable))
> continue;
> @@ -1068,39 +1062,7 @@ static void prog_array_map_poke_run(struct bpf_map *map, u32 key,
> poke->tail_call.key != key)
> continue;
>
> - old_bypass_addr = old ? NULL : poke->bypass_addr;
> - old_addr = old ? (u8 *)old->bpf_func + poke->adj_off : NULL;
> - new_addr = new ? (u8 *)new->bpf_func + poke->adj_off : NULL;
> -
> - if (new) {
> - ret = bpf_arch_text_poke(poke->tailcall_target,
> - BPF_MOD_JUMP,
> - old_addr, new_addr);
> - BUG_ON(ret < 0 && ret != -EINVAL);
> - if (!old) {
> - ret = bpf_arch_text_poke(poke->tailcall_bypass,
> - BPF_MOD_JUMP,
> - poke->bypass_addr,
> - NULL);
> - BUG_ON(ret < 0 && ret != -EINVAL);
> - }
> - } else {
> - ret = bpf_arch_text_poke(poke->tailcall_bypass,
> - BPF_MOD_JUMP,
> - old_bypass_addr,
> - poke->bypass_addr);
> - BUG_ON(ret < 0 && ret != -EINVAL);
> - /* let other CPUs finish the execution of program
> - * so that it will not possible to expose them
> - * to invalid nop, stack unwind, nop state
> - */
> - if (!ret)
> - synchronize_rcu();
> - ret = bpf_arch_text_poke(poke->tailcall_target,
> - BPF_MOD_JUMP,
> - old_addr, NULL);
> - BUG_ON(ret < 0 && ret != -EINVAL);
> - }
> + bpf_arch_poke_desc_update(poke, new, old);
> }
> }
> }
> --
> 2.43.0
>
--
Lee Jones [李琼斯]