Skip to content

Commit 43a980a

Browse files
htejungregkh
authored andcommitted
workqueue: replace pool->manager_arb mutex with a flag
commit 692b482 upstream. Josef reported a HARDIRQ-safe -> HARDIRQ-unsafe lock order detected by lockdep: [ 1270.472259] WARNING: HARDIRQ-safe -> HARDIRQ-unsafe lock order detected [ 1270.472783] 4.14.0-rc1-xfstests-12888-g76833e8 hardkernel#110 Not tainted [ 1270.473240] ----------------------------------------------------- [ 1270.473710] kworker/u5:2/5157 [HC0[0]:SC0[0]:HE0:SE1] is trying to acquire: [ 1270.474239] (&(&lock->wait_lock)->rlock){+.+.}, at: [<ffffffff8da253d2>] __mutex_unlock_slowpath+0xa2/0x280 [ 1270.474994] [ 1270.474994] and this task is already holding: [ 1270.475440] (&pool->lock/1){-.-.}, at: [<ffffffff8d2992f6>] worker_thread+0x366/0x3c0 [ 1270.476046] which would create a new lock dependency: [ 1270.476436] (&pool->lock/1){-.-.} -> (&(&lock->wait_lock)->rlock){+.+.} [ 1270.476949] [ 1270.476949] but this new dependency connects a HARDIRQ-irq-safe lock: [ 1270.477553] (&pool->lock/1){-.-.} ... [ 1270.488900] to a HARDIRQ-irq-unsafe lock: [ 1270.489327] (&(&lock->wait_lock)->rlock){+.+.} ... [ 1270.494735] Possible interrupt unsafe locking scenario: [ 1270.494735] [ 1270.495250] CPU0 CPU1 [ 1270.495600] ---- ---- [ 1270.495947] lock(&(&lock->wait_lock)->rlock); [ 1270.496295] local_irq_disable(); [ 1270.496753] lock(&pool->lock/1); [ 1270.497205] lock(&(&lock->wait_lock)->rlock); [ 1270.497744] <Interrupt> [ 1270.497948] lock(&pool->lock/1); , which will cause a irq inversion deadlock if the above lock scenario happens. The root cause of this safe -> unsafe lock order is the mutex_unlock(pool->manager_arb) in manage_workers() with pool->lock held. Unlocking mutex while holding an irq spinlock was never safe and this problem has been around forever but it never got noticed because the only time the mutex is usually trylocked while holding irqlock making actual failures very unlikely and lockdep annotation missed the condition until the recent b9c16a0 ("locking/mutex: Fix lockdep_assert_held() fail"). Using mutex for pool->manager_arb has always been a bit of stretch. It primarily is an mechanism to arbitrate managership between workers which can easily be done with a pool flag. The only reason it became a mutex is that pool destruction path wants to exclude parallel managing operations. This patch replaces the mutex with a new pool flag POOL_MANAGER_ACTIVE and make the destruction path wait for the current manager on a wait queue. v2: Drop unnecessary flag clearing before pool destruction as suggested by Boqun. Signed-off-by: Tejun Heo <[email protected]> Reported-by: Josef Bacik <[email protected]> Reviewed-by: Lai Jiangshan <[email protected]> Cc: Peter Zijlstra <[email protected]> Cc: Boqun Feng <[email protected]> Signed-off-by: Greg Kroah-Hartman <[email protected]>
1 parent d785062 commit 43a980a

File tree

1 file changed

+15
-22
lines changed

1 file changed

+15
-22
lines changed

kernel/workqueue.c

Lines changed: 15 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -68,6 +68,7 @@ enum {
6868
* attach_mutex to avoid changing binding state while
6969
* worker_attach_to_pool() is in progress.
7070
*/
71+
POOL_MANAGER_ACTIVE = 1 << 0, /* being managed */
7172
POOL_DISASSOCIATED = 1 << 2, /* cpu can't serve workers */
7273

7374
/* worker flags */
@@ -165,7 +166,6 @@ struct worker_pool {
165166
/* L: hash of busy workers */
166167

167168
/* see manage_workers() for details on the two manager mutexes */
168-
struct mutex manager_arb; /* manager arbitration */
169169
struct worker *manager; /* L: purely informational */
170170
struct mutex attach_mutex; /* attach/detach exclusion */
171171
struct list_head workers; /* A: attached workers */
@@ -297,6 +297,7 @@ static struct workqueue_attrs *wq_update_unbound_numa_attrs_buf;
297297

298298
static DEFINE_MUTEX(wq_pool_mutex); /* protects pools and workqueues list */
299299
static DEFINE_SPINLOCK(wq_mayday_lock); /* protects wq->maydays list */
300+
static DECLARE_WAIT_QUEUE_HEAD(wq_manager_wait); /* wait for manager to go away */
300301

301302
static LIST_HEAD(workqueues); /* PR: list of all workqueues */
302303
static bool workqueue_freezing; /* PL: have wqs started freezing? */
@@ -799,7 +800,7 @@ static bool need_to_create_worker(struct worker_pool *pool)
799800
/* Do we have too many workers and should some go away? */
800801
static bool too_many_workers(struct worker_pool *pool)
801802
{
802-
bool managing = mutex_is_locked(&pool->manager_arb);
803+
bool managing = pool->flags & POOL_MANAGER_ACTIVE;
803804
int nr_idle = pool->nr_idle + managing; /* manager is considered idle */
804805
int nr_busy = pool->nr_workers - nr_idle;
805806

@@ -1979,24 +1980,17 @@ static bool manage_workers(struct worker *worker)
19791980
{
19801981
struct worker_pool *pool = worker->pool;
19811982

1982-
/*
1983-
* Anyone who successfully grabs manager_arb wins the arbitration
1984-
* and becomes the manager. mutex_trylock() on pool->manager_arb
1985-
* failure while holding pool->lock reliably indicates that someone
1986-
* else is managing the pool and the worker which failed trylock
1987-
* can proceed to executing work items. This means that anyone
1988-
* grabbing manager_arb is responsible for actually performing
1989-
* manager duties. If manager_arb is grabbed and released without
1990-
* actual management, the pool may stall indefinitely.
1991-
*/
1992-
if (!mutex_trylock(&pool->manager_arb))
1983+
if (pool->flags & POOL_MANAGER_ACTIVE)
19931984
return false;
1985+
1986+
pool->flags |= POOL_MANAGER_ACTIVE;
19941987
pool->manager = worker;
19951988

19961989
maybe_create_worker(pool);
19971990

19981991
pool->manager = NULL;
1999-
mutex_unlock(&pool->manager_arb);
1992+
pool->flags &= ~POOL_MANAGER_ACTIVE;
1993+
wake_up(&wq_manager_wait);
20001994
return true;
20011995
}
20021996

@@ -3203,7 +3197,6 @@ static int init_worker_pool(struct worker_pool *pool)
32033197
setup_timer(&pool->mayday_timer, pool_mayday_timeout,
32043198
(unsigned long)pool);
32053199

3206-
mutex_init(&pool->manager_arb);
32073200
mutex_init(&pool->attach_mutex);
32083201
INIT_LIST_HEAD(&pool->workers);
32093202

@@ -3273,13 +3266,15 @@ static void put_unbound_pool(struct worker_pool *pool)
32733266
hash_del(&pool->hash_node);
32743267

32753268
/*
3276-
* Become the manager and destroy all workers. Grabbing
3277-
* manager_arb prevents @pool's workers from blocking on
3278-
* attach_mutex.
3269+
* Become the manager and destroy all workers. This prevents
3270+
* @pool's workers from blocking on attach_mutex. We're the last
3271+
* manager and @pool gets freed with the flag set.
32793272
*/
3280-
mutex_lock(&pool->manager_arb);
3281-
32823273
spin_lock_irq(&pool->lock);
3274+
wait_event_lock_irq(wq_manager_wait,
3275+
!(pool->flags & POOL_MANAGER_ACTIVE), pool->lock);
3276+
pool->flags |= POOL_MANAGER_ACTIVE;
3277+
32833278
while ((worker = first_idle_worker(pool)))
32843279
destroy_worker(worker);
32853280
WARN_ON(pool->nr_workers || pool->nr_idle);
@@ -3293,8 +3288,6 @@ static void put_unbound_pool(struct worker_pool *pool)
32933288
if (pool->detach_completion)
32943289
wait_for_completion(pool->detach_completion);
32953290

3296-
mutex_unlock(&pool->manager_arb);
3297-
32983291
/* shut down the timers */
32993292
del_timer_sync(&pool->idle_timer);
33003293
del_timer_sync(&pool->mayday_timer);

0 commit comments

Comments
 (0)