From: Jens Axboe axboe@kernel.dk
[ Upstream commit 5c251e9dc0e127bac6fc5b8e6696363d2e35f515 ]
This is in preparation for maintaining signal_pending() as the decider of whether or not a schedule() loop should be broken, or continue sleeping. This is different than the core signal use cases, which really need to know whether an actual signal is pending or not. task_sigpending() returns non-zero if TIF_SIGPENDING is set.
Only core kernel use cases should care about the distinction between the two, make sure those use the task_sigpending() helper.
Signed-off-by: Jens Axboe axboe@kernel.dk Signed-off-by: Thomas Gleixner tglx@linutronix.de Reviewed-by: Thomas Gleixner tglx@linutronix.de Reviewed-by: Oleg Nesterov oleg@redhat.com Link: https://lore.kernel.org/r/20201026203230.386348-2-axboe@kernel.dk Signed-off-by: Greg Kroah-Hartman gregkh@linuxfoundation.org --- include/linux/sched/signal.h | 9 +++++++-- kernel/events/uprobes.c | 2 +- kernel/signal.c | 8 ++++---- 3 files changed, 12 insertions(+), 7 deletions(-)
--- a/include/linux/sched/signal.h +++ b/include/linux/sched/signal.h @@ -354,11 +354,16 @@ static inline int restart_syscall(void) return -ERESTARTNOINTR; }
-static inline int signal_pending(struct task_struct *p) +static inline int task_sigpending(struct task_struct *p) { return unlikely(test_tsk_thread_flag(p,TIF_SIGPENDING)); }
+static inline int signal_pending(struct task_struct *p) +{ + return task_sigpending(p); +} + static inline int __fatal_signal_pending(struct task_struct *p) { return unlikely(sigismember(&p->pending.signal, SIGKILL)); @@ -366,7 +371,7 @@ static inline int __fatal_signal_pending
static inline int fatal_signal_pending(struct task_struct *p) { - return signal_pending(p) && __fatal_signal_pending(p); + return task_sigpending(p) && __fatal_signal_pending(p); }
static inline int signal_pending_state(long state, struct task_struct *p) --- a/kernel/events/uprobes.c +++ b/kernel/events/uprobes.c @@ -1973,7 +1973,7 @@ bool uprobe_deny_signal(void)
WARN_ON_ONCE(utask->state != UTASK_SSTEP);
- if (signal_pending(t)) { + if (task_sigpending(t)) { spin_lock_irq(&t->sighand->siglock); clear_tsk_thread_flag(t, TIF_SIGPENDING); spin_unlock_irq(&t->sighand->siglock); --- a/kernel/signal.c +++ b/kernel/signal.c @@ -984,7 +984,7 @@ static inline bool wants_signal(int sig, if (task_is_stopped_or_traced(p)) return false;
- return task_curr(p) || !signal_pending(p); + return task_curr(p) || !task_sigpending(p); }
static void complete_signal(int sig, struct task_struct *p, enum pid_type type) @@ -2813,7 +2813,7 @@ static void retarget_shared_pending(stru /* Remove the signals this thread can handle. */ sigandsets(&retarget, &retarget, &t->blocked);
- if (!signal_pending(t)) + if (!task_sigpending(t)) signal_wake_up(t, 0);
if (sigisemptyset(&retarget)) @@ -2847,7 +2847,7 @@ void exit_signals(struct task_struct *ts
cgroup_threadgroup_change_end(tsk);
- if (!signal_pending(tsk)) + if (!task_sigpending(tsk)) goto out;
unblocked = tsk->blocked; @@ -2891,7 +2891,7 @@ long do_no_restart_syscall(struct restar
static void __set_task_blocked(struct task_struct *tsk, const sigset_t *newset) { - if (signal_pending(tsk) && !thread_group_empty(tsk)) { + if (task_sigpending(tsk) && !thread_group_empty(tsk)) { sigset_t newblocked; /* A set of now blocked but previously unblocked signals. */ sigandnsets(&newblocked, newset, ¤t->blocked);