5.10-stable review patch. If anyone has any objections, please let me know.
------------------
From: Rob Clark robdclark@chromium.org
[ Upstream commit f630c7c6f10546ebff15c3a856e7949feb7a2372 ]
While migrating some code from wq to kthread_worker, I found that I missed the execute_start/end tracepoints. So add similar tracepoints for kthread_work. And for completeness, queue_work tracepoint (although this one differs slightly from the matching workqueue tracepoint).
Link: https://lkml.kernel.org/r/20201010180323.126634-1-robdclark@gmail.com Signed-off-by: Rob Clark robdclark@chromium.org Cc: Rob Clark robdclark@chromium.org Cc: Steven Rostedt rostedt@goodmis.org Cc: Ingo Molnar mingo@redhat.com Cc: "Peter Zijlstra (Intel)" peterz@infradead.org Cc: Phil Auld pauld@redhat.com Cc: Valentin Schneider valentin.schneider@arm.com Cc: Thara Gopinath thara.gopinath@linaro.org Cc: Randy Dunlap rdunlap@infradead.org Cc: Vincent Donnefort vincent.donnefort@arm.com Cc: Mel Gorman mgorman@techsingularity.net Cc: Jens Axboe axboe@kernel.dk Cc: Marcelo Tosatti mtosatti@redhat.com Cc: Frederic Weisbecker frederic@kernel.org Cc: Ilias Stamatis stamatis.iliass@gmail.com Cc: Liang Chen cl@rock-chips.com Cc: Ben Dooks ben.dooks@codethink.co.uk Cc: Peter Zijlstra a.p.zijlstra@chello.nl Cc: "J. Bruce Fields" bfields@redhat.com Signed-off-by: Andrew Morton akpm@linux-foundation.org Signed-off-by: Linus Torvalds torvalds@linux-foundation.org Stable-dep-of: e16c7b07784f ("kthread: fix task state in kthread worker if being frozen") Signed-off-by: Sasha Levin sashal@kernel.org --- include/trace/events/sched.h | 84 ++++++++++++++++++++++++++++++++++++ kernel/kthread.c | 9 ++++ 2 files changed, 93 insertions(+)
diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h index c96a4337afe6c..5039af667645d 100644 --- a/include/trace/events/sched.h +++ b/include/trace/events/sched.h @@ -5,6 +5,7 @@ #if !defined(_TRACE_SCHED_H) || defined(TRACE_HEADER_MULTI_READ) #define _TRACE_SCHED_H
+#include <linux/kthread.h> #include <linux/sched/numa_balancing.h> #include <linux/tracepoint.h> #include <linux/binfmts.h> @@ -51,6 +52,89 @@ TRACE_EVENT(sched_kthread_stop_ret, TP_printk("ret=%d", __entry->ret) );
+/** + * sched_kthread_work_queue_work - called when a work gets queued + * @worker: pointer to the kthread_worker + * @work: pointer to struct kthread_work + * + * This event occurs when a work is queued immediately or once a + * delayed work is actually queued (ie: once the delay has been + * reached). + */ +TRACE_EVENT(sched_kthread_work_queue_work, + + TP_PROTO(struct kthread_worker *worker, + struct kthread_work *work), + + TP_ARGS(worker, work), + + TP_STRUCT__entry( + __field( void *, work ) + __field( void *, function) + __field( void *, worker) + ), + + TP_fast_assign( + __entry->work = work; + __entry->function = work->func; + __entry->worker = worker; + ), + + TP_printk("work struct=%p function=%ps worker=%p", + __entry->work, __entry->function, __entry->worker) +); + +/** + * sched_kthread_work_execute_start - called immediately before the work callback + * @work: pointer to struct kthread_work + * + * Allows to track kthread work execution. + */ +TRACE_EVENT(sched_kthread_work_execute_start, + + TP_PROTO(struct kthread_work *work), + + TP_ARGS(work), + + TP_STRUCT__entry( + __field( void *, work ) + __field( void *, function) + ), + + TP_fast_assign( + __entry->work = work; + __entry->function = work->func; + ), + + TP_printk("work struct %p: function %ps", __entry->work, __entry->function) +); + +/** + * sched_kthread_work_execute_end - called immediately after the work callback + * @work: pointer to struct work_struct + * @function: pointer to worker function + * + * Allows to track workqueue execution. + */ +TRACE_EVENT(sched_kthread_work_execute_end, + + TP_PROTO(struct kthread_work *work, kthread_work_func_t function), + + TP_ARGS(work, function), + + TP_STRUCT__entry( + __field( void *, work ) + __field( void *, function) + ), + + TP_fast_assign( + __entry->work = work; + __entry->function = function; + ), + + TP_printk("work struct %p: function %ps", __entry->work, __entry->function) +); + /* * Tracepoint for waking up a task: */ diff --git a/kernel/kthread.c b/kernel/kthread.c index 9d6cc9c15a55e..d0cb3e413eff5 100644 --- a/kernel/kthread.c +++ b/kernel/kthread.c @@ -765,8 +765,15 @@ int kthread_worker_fn(void *worker_ptr) raw_spin_unlock_irq(&worker->lock);
if (work) { + kthread_work_func_t func = work->func; __set_current_state(TASK_RUNNING); + trace_sched_kthread_work_execute_start(work); work->func(work); + /* + * Avoid dereferencing work after this point. The trace + * event only cares about the address. + */ + trace_sched_kthread_work_execute_end(work, func); } else if (!freezing(current)) schedule();
@@ -895,6 +902,8 @@ static void kthread_insert_work(struct kthread_worker *worker, { kthread_insert_work_sanity_check(worker, work);
+ trace_sched_kthread_work_queue_work(worker, work); + list_add_tail(&work->node, pos); work->worker = worker; if (!worker->current_work && likely(worker->task))