On 22/08/2019 21:40, Valentin Schneider wrote:
On 22/08/2019 19:48, bsegall@google.com wrote:
Re we shouldn't get account_cfs_rq_runtime() called on throttled cfs_rq's, with this: --- diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 171eef3f08f9..1acb88024cad 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -4385,6 +4385,11 @@ static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq) return rq_clock_task(rq_of(cfs_rq)) - cfs_rq->throttled_clock_task_time; }
+static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq) +{ + return cfs_bandwidth_used() && cfs_rq->throttled; +} + /* returns 0 on failure to allocate runtime */ static int assign_cfs_rq_runtime(struct cfs_rq *cfs_rq) { @@ -4411,6 +4416,8 @@ static int assign_cfs_rq_runtime(struct cfs_rq *cfs_rq)
cfs_rq->runtime_remaining += amount;
+ WARN_ON(cfs_rq_throttled(cfs_rq) && cfs_rq->runtime_remaining > 0); + return cfs_rq->runtime_remaining > 0; }
@@ -4436,12 +4443,9 @@ void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec) if (!cfs_bandwidth_used() || !cfs_rq->runtime_enabled) return;
- __account_cfs_rq_runtime(cfs_rq, delta_exec); -} + WARN_ON(cfs_rq_throttled(cfs_rq));
-static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq) -{ - return cfs_bandwidth_used() && cfs_rq->throttled; + __account_cfs_rq_runtime(cfs_rq, delta_exec); }
/* check whether cfs_rq, or any parent, is throttled */ ---
I get this:
[ 204.798643] Call Trace: [ 204.798645] put_prev_entity+0x8d/0x100 [ 204.798647] put_prev_task_fair+0x22/0x40 [ 204.798648] pick_next_task_idle+0x36/0x50 [ 204.798650] __schedule+0x61d/0x6c0 [ 204.798651] schedule+0x2d/0x90 [ 204.798653] exit_to_usermode_loop+0x61/0x100 [ 204.798654] prepare_exit_to_usermode+0x91/0xa0 [ 204.798656] retint_user+0x8/0x8
(this is a hit on the account_cfs_rq_runtime() WARN_ON)