From: Shivani Agarwal shivania2@vmware.com
Hi,
To Fix CVE-2024-38588 e60b613df8b6 is required, but it has a dependency on aebfd12521d9. Therefore backported both patches for v5.10.
Thanks, Shivani
Shivani Agarwal (2): x86/ibt,ftrace: Search for __fentry__ location ftrace: Fix possible use-after-free issue in ftrace_location()
arch/x86/kernel/kprobes/core.c | 11 +----- kernel/bpf/trampoline.c | 20 ++-------- kernel/kprobes.c | 8 +--- kernel/trace/ftrace.c | 71 ++++++++++++++++++++++++++-------- 4 files changed, 63 insertions(+), 47 deletions(-)
From: Peter Zijlstra peterz@infradead.org
[ Upstream commit aebfd12521d9c7d0b502cf6d06314cfbcdccfe3b ]
Currently a lot of ftrace code assumes __fentry__ is at sym+0. However with Intel IBT enabled the first instruction of a function will most likely be ENDBR.
Change ftrace_location() to not only return the __fentry__ location when called for the __fentry__ location, but also when called for the sym+0 location.
Then audit/update all callsites of this function to consistently use these new semantics.
Suggested-by: Steven Rostedt rostedt@goodmis.org Signed-off-by: Peter Zijlstra (Intel) peterz@infradead.org Acked-by: Masami Hiramatsu mhiramat@kernel.org Acked-by: Josh Poimboeuf jpoimboe@redhat.com Link: https://lore.kernel.org/r/20220308154318.227581603@infradead.org Stable-dep-of: e60b613df8b6 ("ftrace: Fix possible use-after-free issue in ftrace_location()") Signed-off-by: Sasha Levin sashal@kernel.org [Shivani: Modified to apply on v5.10.y] Signed-off-by: Shivani Agarwal shivani.agarwal@broadcom.com --- arch/x86/kernel/kprobes/core.c | 11 ++------ kernel/bpf/trampoline.c | 20 +++----------- kernel/kprobes.c | 8 ++---- kernel/trace/ftrace.c | 48 ++++++++++++++++++++++++++++------ 4 files changed, 48 insertions(+), 39 deletions(-)
diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c index e7edc9e4c6cd..6d59c8e7719b 100644 --- a/arch/x86/kernel/kprobes/core.c +++ b/arch/x86/kernel/kprobes/core.c @@ -194,17 +194,10 @@ static unsigned long __recover_probed_insn(kprobe_opcode_t *buf, unsigned long addr) { struct kprobe *kp; - unsigned long faddr; + bool faddr;
kp = get_kprobe((void *)addr); - faddr = ftrace_location(addr); - /* - * Addresses inside the ftrace location are refused by - * arch_check_ftrace_location(). Something went terribly wrong - * if such an address is checked here. - */ - if (WARN_ON(faddr && faddr != addr)) - return 0UL; + faddr = ftrace_location(addr) == addr; /* * Use the current code if it is not modified by Kprobe * and it cannot be modified by ftrace. diff --git a/kernel/bpf/trampoline.c b/kernel/bpf/trampoline.c index 87becf77cc75..0a14f14d83fe 100644 --- a/kernel/bpf/trampoline.c +++ b/kernel/bpf/trampoline.c @@ -87,18 +87,6 @@ static struct bpf_trampoline *bpf_trampoline_lookup(u64 key) return tr; }
-static int is_ftrace_location(void *ip) -{ - long addr; - - addr = ftrace_location((long)ip); - if (!addr) - return 0; - if (WARN_ON_ONCE(addr != (long)ip)) - return -EFAULT; - return 1; -} - static int unregister_fentry(struct bpf_trampoline *tr, void *old_addr) { void *ip = tr->func.addr; @@ -127,12 +115,12 @@ static int modify_fentry(struct bpf_trampoline *tr, void *old_addr, void *new_ad static int register_fentry(struct bpf_trampoline *tr, void *new_addr) { void *ip = tr->func.addr; + unsigned long faddr; int ret;
- ret = is_ftrace_location(ip); - if (ret < 0) - return ret; - tr->func.ftrace_managed = ret; + faddr = ftrace_location((unsigned long)ip); + if (faddr) + tr->func.ftrace_managed = true;
if (tr->func.ftrace_managed) ret = register_ftrace_direct((long)ip, (long)new_addr); diff --git a/kernel/kprobes.c b/kernel/kprobes.c index c8e62458d323..551ac118159f 100644 --- a/kernel/kprobes.c +++ b/kernel/kprobes.c @@ -1609,14 +1609,10 @@ static inline int check_kprobe_rereg(struct kprobe *p)
int __weak arch_check_ftrace_location(struct kprobe *p) { - unsigned long ftrace_addr; + unsigned long addr = (unsigned long)p->addr;
- ftrace_addr = ftrace_location((unsigned long)p->addr); - if (ftrace_addr) { + if (ftrace_location(addr) == addr) { #ifdef CONFIG_KPROBES_ON_FTRACE - /* Given address is not on the instruction boundary */ - if ((unsigned long)p->addr != ftrace_addr) - return -EILSEQ; p->flags |= KPROBE_FLAG_FTRACE; #else /* !CONFIG_KPROBES_ON_FTRACE */ return -EINVAL; diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 31fec924b7c4..a781733b2a01 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c @@ -1575,17 +1575,34 @@ unsigned long ftrace_location_range(unsigned long start, unsigned long end) }
/** - * ftrace_location - return true if the ip giving is a traced location + * ftrace_location - return the ftrace location * @ip: the instruction pointer to check * - * Returns rec->ip if @ip given is a pointer to a ftrace location. - * That is, the instruction that is either a NOP or call to - * the function tracer. It checks the ftrace internal tables to - * determine if the address belongs or not. + * If @ip matches the ftrace location, return @ip. + * If @ip matches sym+0, return sym's ftrace location. + * Otherwise, return 0. */ unsigned long ftrace_location(unsigned long ip) { - return ftrace_location_range(ip, ip); + struct dyn_ftrace *rec; + unsigned long offset; + unsigned long size; + + rec = lookup_rec(ip, ip); + if (!rec) { + if (!kallsyms_lookup_size_offset(ip, &size, &offset)) + goto out; + + /* map sym+0 to __fentry__ */ + if (!offset) + rec = lookup_rec(ip, ip + size - 1); + } + + if (rec) + return rec->ip; + +out: + return 0; }
/** @@ -4948,7 +4965,8 @@ ftrace_match_addr(struct ftrace_hash *hash, unsigned long ip, int remove) { struct ftrace_func_entry *entry;
- if (!ftrace_location(ip)) + ip = ftrace_location(ip); + if (!ip) return -EINVAL;
if (remove) { @@ -5096,11 +5114,16 @@ int register_ftrace_direct(unsigned long ip, unsigned long addr) struct ftrace_func_entry *entry; struct ftrace_hash *free_hash = NULL; struct dyn_ftrace *rec; - int ret = -EBUSY; + int ret = -ENODEV;
mutex_lock(&direct_mutex);
+ ip = ftrace_location(ip); + if (!ip) + goto out_unlock; + /* See if there's a direct function at @ip already */ + ret = -EBUSY; if (ftrace_find_rec_direct(ip)) goto out_unlock;
@@ -5229,6 +5252,10 @@ int unregister_ftrace_direct(unsigned long ip, unsigned long addr)
mutex_lock(&direct_mutex);
+ ip = ftrace_location(ip); + if (!ip) + goto out_unlock; + entry = find_direct_entry(&ip, NULL); if (!entry) goto out_unlock; @@ -5360,6 +5387,11 @@ int modify_ftrace_direct(unsigned long ip, mutex_lock(&direct_mutex);
mutex_lock(&ftrace_lock); + + ip = ftrace_location(ip); + if (!ip) + goto out_unlock; + entry = find_direct_entry(&ip, &rec); if (!entry) goto out_unlock;
From: Zheng Yejian zhengyejian1@huawei.com
[ Upstream commit e60b613df8b6253def41215402f72986fee3fc8d ]
KASAN reports a bug:
BUG: KASAN: use-after-free in ftrace_location+0x90/0x120 Read of size 8 at addr ffff888141d40010 by task insmod/424 CPU: 8 PID: 424 Comm: insmod Tainted: G W 6.9.0-rc2+ [...] Call Trace: <TASK> dump_stack_lvl+0x68/0xa0 print_report+0xcf/0x610 kasan_report+0xb5/0xe0 ftrace_location+0x90/0x120 register_kprobe+0x14b/0xa40 kprobe_init+0x2d/0xff0 [kprobe_example] do_one_initcall+0x8f/0x2d0 do_init_module+0x13a/0x3c0 load_module+0x3082/0x33d0 init_module_from_file+0xd2/0x130 __x64_sys_finit_module+0x306/0x440 do_syscall_64+0x68/0x140 entry_SYSCALL_64_after_hwframe+0x71/0x79
The root cause is that, in lookup_rec(), ftrace record of some address is being searched in ftrace pages of some module, but those ftrace pages at the same time is being freed in ftrace_release_mod() as the corresponding module is being deleted:
CPU1 | CPU2 register_kprobes() { | delete_module() { check_kprobe_address_safe() { | arch_check_ftrace_location() { | ftrace_location() { | lookup_rec() // USE! | ftrace_release_mod() // Free!
To fix this issue: 1. Hold rcu lock as accessing ftrace pages in ftrace_location_range(); 2. Use ftrace_location_range() instead of lookup_rec() in ftrace_location(); 3. Call synchronize_rcu() before freeing any ftrace pages both in ftrace_process_locs()/ftrace_release_mod()/ftrace_free_mem().
Link: https://lore.kernel.org/linux-trace-kernel/20240509192859.1273558-1-zhengyej...
Cc: stable@vger.kernel.org Cc: mhiramat@kernel.org Cc: mark.rutland@arm.com Cc: mathieu.desnoyers@efficios.com Fixes: ae6aa16fdc16 ("kprobes: introduce ftrace based optimization") Suggested-by: Steven Rostedt rostedt@goodmis.org Signed-off-by: Zheng Yejian zhengyejian1@huawei.com Signed-off-by: Steven Rostedt (Google) rostedt@goodmis.org Signed-off-by: Sasha Levin sashal@kernel.org [Shivani: Modified to apply on v5.10.y] Signed-off-by: Shivani Agarwal shivani.agarwal@broadcom.com --- kernel/trace/ftrace.c | 39 +++++++++++++++++++++++---------------- 1 file changed, 23 insertions(+), 16 deletions(-)
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index a781733b2a01..36182e7e0cd7 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c @@ -1566,12 +1566,15 @@ static struct dyn_ftrace *lookup_rec(unsigned long start, unsigned long end) unsigned long ftrace_location_range(unsigned long start, unsigned long end) { struct dyn_ftrace *rec; + unsigned long ip = 0;
+ rcu_read_lock(); rec = lookup_rec(start, end); if (rec) - return rec->ip; + ip = rec->ip; + rcu_read_unlock();
- return 0; + return ip; }
/** @@ -1584,25 +1587,22 @@ unsigned long ftrace_location_range(unsigned long start, unsigned long end) */ unsigned long ftrace_location(unsigned long ip) { - struct dyn_ftrace *rec; + unsigned long loc; unsigned long offset; unsigned long size;
- rec = lookup_rec(ip, ip); - if (!rec) { + loc = ftrace_location_range(ip, ip); + if (!loc) { if (!kallsyms_lookup_size_offset(ip, &size, &offset)) goto out;
/* map sym+0 to __fentry__ */ if (!offset) - rec = lookup_rec(ip, ip + size - 1); + loc = ftrace_location_range(ip, ip + size - 1); }
- if (rec) - return rec->ip; - out: - return 0; + return loc; }
/** @@ -6331,6 +6331,8 @@ static int ftrace_process_locs(struct module *mod, /* We should have used all pages unless we skipped some */ if (pg_unuse) { WARN_ON(!skipped); + /* Need to synchronize with ftrace_location_range() */ + synchronize_rcu(); ftrace_free_pages(pg_unuse); } return ret; @@ -6513,6 +6515,9 @@ void ftrace_release_mod(struct module *mod) out_unlock: mutex_unlock(&ftrace_lock);
+ /* Need to synchronize with ftrace_location_range() */ + if (tmp_page) + synchronize_rcu(); for (pg = tmp_page; pg; pg = tmp_page) {
/* Needs to be called outside of ftrace_lock */ @@ -6835,6 +6840,7 @@ void ftrace_free_mem(struct module *mod, void *start_ptr, void *end_ptr) unsigned long start = (unsigned long)(start_ptr); unsigned long end = (unsigned long)(end_ptr); struct ftrace_page **last_pg = &ftrace_pages_start; + struct ftrace_page *tmp_page = NULL; struct ftrace_page *pg; struct dyn_ftrace *rec; struct dyn_ftrace key; @@ -6878,12 +6884,8 @@ void ftrace_free_mem(struct module *mod, void *start_ptr, void *end_ptr) ftrace_update_tot_cnt--; if (!pg->index) { *last_pg = pg->next; - if (pg->records) { - free_pages((unsigned long)pg->records, pg->order); - ftrace_number_of_pages -= 1 << pg->order; - } - ftrace_number_of_groups--; - kfree(pg); + pg->next = tmp_page; + tmp_page = pg; pg = container_of(last_pg, struct ftrace_page, next); if (!(*last_pg)) ftrace_pages = pg; @@ -6900,6 +6902,11 @@ void ftrace_free_mem(struct module *mod, void *start_ptr, void *end_ptr) clear_func_from_hashes(func); kfree(func); } + /* Need to synchronize with ftrace_location_range() */ + if (tmp_page) { + synchronize_rcu(); + ftrace_free_pages(tmp_page); + } }
void __init ftrace_free_init_mem(void)
On Tue, Sep 24, 2024 at 11:53:22PM -0700, Shivani Agarwal wrote:
From: Shivani Agarwal shivania2@vmware.com
Hi,
To Fix CVE-2024-38588 e60b613df8b6 is required, but it has a dependency on aebfd12521d9. Therefore backported both patches for v5.10.
All queued up, thanks.
greg k-h
linux-stable-mirror@lists.linaro.org