this_rq_is_locked() is introduced to check whether current CPU is holding rq_lock(). This will be used in bpf/stackmap.c to decide whether is safe to call up_read(), which may call rq_lock() for the same CPU.
Fixes: commit 615755a77b24 ("bpf: extend stackmap to save binary_build_id+offset instead of address") Cc: stable@vger.kernel.org # v4.17+ Cc: Peter Zijlstra peterz@infradead.org Cc: Alexei Starovoitov ast@kernel.org Cc: Daniel Borkmann daniel@iogearbox.net Cc: Tejun Heo tj@kernel.org Signed-off-by: Song Liu songliubraving@fb.com --- include/linux/sched.h | 1 + kernel/sched/core.c | 8 ++++++++ 2 files changed, 9 insertions(+)
diff --git a/include/linux/sched.h b/include/linux/sched.h index 2c2e56bd8913..fb0fcbd1b6f6 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1995,4 +1995,5 @@ int sched_trace_rq_cpu(struct rq *rq);
const struct cpumask *sched_trace_rd_span(struct root_domain *rd);
+bool this_rq_is_locked(void); #endif diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 7880f4f64d0e..577cbe7c05fc 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -138,6 +138,14 @@ struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf) } }
+bool this_rq_is_locked(void) +{ + struct rq *rq; + + rq = this_rq(); + return raw_spin_is_locked(&rq->lock); +} + /* * RQ-clock updating methods: */ -- 2.17.1