From: Tejun Heo tj@kernel.org
[ Upstream commit c5404d4e6df6faba1007544b5f4e62c7c14416dd ]
wq_adjust_max_active() needs to activate work items after max_active is increased. Previously, it did that by visiting each pwq once activating all that could be activated. While this makes sense with per-pwq nr_active, nr_active will be shared across multiple pwqs for unbound wqs. Then, we'd want to round-robin through pwqs to be fairer.
In preparation, this patch makes wq_adjust_max_active() round-robin pwqs while activating. While the activation ordering changes, this shouldn't cause user-noticeable behavior changes.
Signed-off-by: Tejun Heo tj@kernel.org Reviewed-by: Lai Jiangshan jiangshanlai@gmail.com Stable-dep-of: 5797b1c18919 ("workqueue: Implement system-wide nr_active enforcement for unbound workqueues") Signed-off-by: Sasha Levin sashal@kernel.org --- kernel/workqueue.c | 31 +++++++++++++++++++------------ 1 file changed, 19 insertions(+), 12 deletions(-)
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 80733046ee012..1659cd4a36c62 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -4703,7 +4703,7 @@ static int init_rescuer(struct workqueue_struct *wq) */ static void wq_adjust_max_active(struct workqueue_struct *wq) { - struct pool_workqueue *pwq; + bool activated;
lockdep_assert_held(&wq->mutex);
@@ -4723,19 +4723,26 @@ static void wq_adjust_max_active(struct workqueue_struct *wq) */ WRITE_ONCE(wq->max_active, wq->saved_max_active);
- for_each_pwq(pwq, wq) { - unsigned long flags; - - /* this function can be called during early boot w/ irq disabled */ - raw_spin_lock_irqsave(&pwq->pool->lock, flags); - - while (pwq_activate_first_inactive(pwq)) - ; + /* + * Round-robin through pwq's activating the first inactive work item + * until max_active is filled. + */ + do { + struct pool_workqueue *pwq;
- kick_pool(pwq->pool); + activated = false; + for_each_pwq(pwq, wq) { + unsigned long flags;
- raw_spin_unlock_irqrestore(&pwq->pool->lock, flags); - } + /* can be called during early boot w/ irq disabled */ + raw_spin_lock_irqsave(&pwq->pool->lock, flags); + if (pwq_activate_first_inactive(pwq)) { + activated = true; + kick_pool(pwq->pool); + } + raw_spin_unlock_irqrestore(&pwq->pool->lock, flags); + } + } while (activated); }
__printf(1, 4)