From: Oleg Nesterov oleg@redhat.com
[ Upstream commit c7b4133c48445dde789ed30b19ccb0448c7593f7 ]
1. Clear utask->xol_vaddr unconditionally, even if this addr is not valid, xol_free_insn_slot() should never return with utask->xol_vaddr != NULL.
2. Add a comment to explain why do we need to validate slot_addr.
3. Simplify the validation above. We can simply check offset < PAGE_SIZE, unsigned underflows are fine, it should work if slot_addr < area->vaddr.
4. Kill the unnecessary "slot_nr >= UINSNS_PER_PAGE" check, slot_nr must be valid if offset < PAGE_SIZE.
The next patches will cleanup this function even more.
Signed-off-by: Oleg Nesterov oleg@redhat.com Signed-off-by: Peter Zijlstra (Intel) peterz@infradead.org Link: https://lore.kernel.org/r/20240929144235.GA9471@redhat.com Signed-off-by: Sasha Levin sashal@kernel.org --- kernel/events/uprobes.c | 21 +++++++++------------ 1 file changed, 9 insertions(+), 12 deletions(-)
diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c index 1ea2c1f311261..220d5f4a57e6b 100644 --- a/kernel/events/uprobes.c +++ b/kernel/events/uprobes.c @@ -1634,8 +1634,8 @@ static unsigned long xol_get_insn_slot(struct uprobe *uprobe) static void xol_free_insn_slot(struct task_struct *tsk) { struct xol_area *area; - unsigned long vma_end; unsigned long slot_addr; + unsigned long offset;
if (!tsk->mm || !tsk->mm->uprobes_state.xol_area || !tsk->utask) return; @@ -1644,24 +1644,21 @@ static void xol_free_insn_slot(struct task_struct *tsk) if (unlikely(!slot_addr)) return;
+ tsk->utask->xol_vaddr = 0; area = tsk->mm->uprobes_state.xol_area; - vma_end = area->vaddr + PAGE_SIZE; - if (area->vaddr <= slot_addr && slot_addr < vma_end) { - unsigned long offset; - int slot_nr; - - offset = slot_addr - area->vaddr; - slot_nr = offset / UPROBE_XOL_SLOT_BYTES; - if (slot_nr >= UINSNS_PER_PAGE) - return; + offset = slot_addr - area->vaddr; + /* + * slot_addr must fit into [area->vaddr, area->vaddr + PAGE_SIZE). + * This check can only fail if the "[uprobes]" vma was mremap'ed. + */ + if (offset < PAGE_SIZE) { + int slot_nr = offset / UPROBE_XOL_SLOT_BYTES;
clear_bit(slot_nr, area->bitmap); atomic_dec(&area->slot_count); smp_mb__after_atomic(); /* pairs with prepare_to_wait() */ if (waitqueue_active(&area->wq)) wake_up(&area->wq); - - tsk->utask->xol_vaddr = 0; } }
From: Thomas Gleixner tglx@linutronix.de
[ Upstream commit c163e40af9b2331b2c629fd4ec8b703ed4d4ae39 ]
clocksource_delta() has two variants. One with a check for negative motion, which is only selected by x86. This is a historic leftover as this function was previously used in the time getter hot paths.
Since 135225a363ae timekeeping_cycles_to_ns() has unconditional protection against this as a by-product of the protection against 64bit math overflow.
clocksource_delta() is only used in the clocksource watchdog and in timekeeping_advance(). The extra conditional there is not hurting anyone.
Remove the config option and unconditionally prevent negative motion of the readout.
Signed-off-by: Thomas Gleixner tglx@linutronix.de Acked-by: John Stultz jstultz@google.com Link: https://lore.kernel.org/all/20241031120328.599430157@linutronix.de Signed-off-by: Sasha Levin sashal@kernel.org --- arch/x86/Kconfig | 1 - kernel/time/Kconfig | 5 ----- kernel/time/timekeeping_internal.h | 7 ------- 3 files changed, 13 deletions(-)
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 0c802ade80406..e71101ced756e 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -107,7 +107,6 @@ config X86 select ARCH_WANTS_THP_SWAP if X86_64 select BUILDTIME_TABLE_SORT select CLKEVT_I8253 - select CLOCKSOURCE_VALIDATE_LAST_CYCLE select CLOCKSOURCE_WATCHDOG select DCACHE_WORD_ACCESS select EDAC_ATOMIC_SCRUB diff --git a/kernel/time/Kconfig b/kernel/time/Kconfig index a09b1d61df6a5..5cbedc0a06efc 100644 --- a/kernel/time/Kconfig +++ b/kernel/time/Kconfig @@ -17,11 +17,6 @@ config ARCH_CLOCKSOURCE_DATA config ARCH_CLOCKSOURCE_INIT bool
-# Clocksources require validation of the clocksource against the last -# cycle update - x86/TSC misfeature -config CLOCKSOURCE_VALIDATE_LAST_CYCLE - bool - # Timekeeping vsyscall support config GENERIC_TIME_VSYSCALL bool diff --git a/kernel/time/timekeeping_internal.h b/kernel/time/timekeeping_internal.h index 4ca2787d1642e..1d4854d5c386e 100644 --- a/kernel/time/timekeeping_internal.h +++ b/kernel/time/timekeeping_internal.h @@ -15,7 +15,6 @@ extern void tk_debug_account_sleep_time(const struct timespec64 *t); #define tk_debug_account_sleep_time(x) #endif
-#ifdef CONFIG_CLOCKSOURCE_VALIDATE_LAST_CYCLE static inline u64 clocksource_delta(u64 now, u64 last, u64 mask) { u64 ret = (now - last) & mask; @@ -26,12 +25,6 @@ static inline u64 clocksource_delta(u64 now, u64 last, u64 mask) */ return ret & ~(mask >> 1) ? 0 : ret; } -#else -static inline u64 clocksource_delta(u64 now, u64 last, u64 mask) -{ - return (now - last) & mask; -} -#endif
/* Semi public for serialization of non timekeeper VDSO updates. */ extern raw_spinlock_t timekeeper_lock;
Hi!
[ Upstream commit c163e40af9b2331b2c629fd4ec8b703ed4d4ae39 ]
clocksource_delta() has two variants. One with a check for negative motion, which is only selected by x86. This is a historic leftover as this function was previously used in the time getter hot paths.
Since 135225a363ae timekeeping_cycles_to_ns() has unconditional protection against this as a by-product of the protection against 64bit math overflow. timekeeping_advance(). The extra conditional there is not hurting anyone.
We don't have 135225a363ae in 5.10. So we probably should not have this?
Best regards, Pavel
+++ b/arch/x86/Kconfig @@ -107,7 +107,6 @@ config X86 select ARCH_WANTS_THP_SWAP if X86_64 select BUILDTIME_TABLE_SORT select CLKEVT_I8253
- select CLOCKSOURCE_VALIDATE_LAST_CYCLE select CLOCKSOURCE_WATCHDOG select DCACHE_WORD_ACCESS select EDAC_ATOMIC_SCRUB
diff --git a/kernel/time/Kconfig b/kernel/time/Kconfig index a09b1d61df6a5..5cbedc0a06efc 100644 --- a/kernel/time/Kconfig +++ b/kernel/time/Kconfig @@ -17,11 +17,6 @@ config ARCH_CLOCKSOURCE_DATA config ARCH_CLOCKSOURCE_INIT bool -# Clocksources require validation of the clocksource against the last -# cycle update - x86/TSC misfeature -config CLOCKSOURCE_VALIDATE_LAST_CYCLE
- bool
# Timekeeping vsyscall support config GENERIC_TIME_VSYSCALL bool diff --git a/kernel/time/timekeeping_internal.h b/kernel/time/timekeeping_internal.h index 4ca2787d1642e..1d4854d5c386e 100644 --- a/kernel/time/timekeeping_internal.h +++ b/kernel/time/timekeeping_internal.h @@ -15,7 +15,6 @@ extern void tk_debug_account_sleep_time(const struct timespec64 *t); #define tk_debug_account_sleep_time(x) #endif -#ifdef CONFIG_CLOCKSOURCE_VALIDATE_LAST_CYCLE static inline u64 clocksource_delta(u64 now, u64 last, u64 mask) { u64 ret = (now - last) & mask; @@ -26,12 +25,6 @@ static inline u64 clocksource_delta(u64 now, u64 last, u64 mask) */ return ret & ~(mask >> 1) ? 0 : ret; } -#else -static inline u64 clocksource_delta(u64 now, u64 last, u64 mask) -{
- return (now - last) & mask;
-} -#endif /* Semi public for serialization of non timekeeper VDSO updates. */ extern raw_spinlock_t timekeeper_lock;
linux-stable-mirror@lists.linaro.org