Commit 38dbb7dd authored by Jens Axboe's avatar Jens Axboe

blk-cgroup: don't quiesce the queue on policy activate/deactivate

There's no potential harm in quiescing the queue, but it also doesn't
buy us anything. And we can't run the queue async for policy
deactivate, since we could be in the path of tearing the queue down.
If we schedule an async run of the queue at that time, we're racing
with queue teardown AFTER having we've already torn most of it down.
Reported-by: default avatarOmar Sandoval <osandov@fb.com>
Fixes: 4d199c6f ("blk-cgroup: ensure that we clear the stop bit on quiesced queues")
Tested-by: default avatarOmar Sandoval <osandov@fb.com>
Signed-off-by: default avatarJens Axboe <axboe@fb.com>
parent 6c0ca7ae
...@@ -1223,10 +1223,9 @@ int blkcg_activate_policy(struct request_queue *q, ...@@ -1223,10 +1223,9 @@ int blkcg_activate_policy(struct request_queue *q,
if (blkcg_policy_enabled(q, pol)) if (blkcg_policy_enabled(q, pol))
return 0; return 0;
if (q->mq_ops) { if (q->mq_ops)
blk_mq_freeze_queue(q); blk_mq_freeze_queue(q);
blk_mq_quiesce_queue(q); else
} else
blk_queue_bypass_start(q); blk_queue_bypass_start(q);
pd_prealloc: pd_prealloc:
if (!pd_prealloc) { if (!pd_prealloc) {
...@@ -1265,10 +1264,9 @@ int blkcg_activate_policy(struct request_queue *q, ...@@ -1265,10 +1264,9 @@ int blkcg_activate_policy(struct request_queue *q,
spin_unlock_irq(q->queue_lock); spin_unlock_irq(q->queue_lock);
out_bypass_end: out_bypass_end:
if (q->mq_ops) { if (q->mq_ops)
blk_mq_unfreeze_queue(q); blk_mq_unfreeze_queue(q);
blk_mq_start_stopped_hw_queues(q, true); else
} else
blk_queue_bypass_end(q); blk_queue_bypass_end(q);
if (pd_prealloc) if (pd_prealloc)
pol->pd_free_fn(pd_prealloc); pol->pd_free_fn(pd_prealloc);
...@@ -1292,10 +1290,9 @@ void blkcg_deactivate_policy(struct request_queue *q, ...@@ -1292,10 +1290,9 @@ void blkcg_deactivate_policy(struct request_queue *q,
if (!blkcg_policy_enabled(q, pol)) if (!blkcg_policy_enabled(q, pol))
return; return;
if (q->mq_ops) { if (q->mq_ops)
blk_mq_freeze_queue(q); blk_mq_freeze_queue(q);
blk_mq_quiesce_queue(q); else
} else
blk_queue_bypass_start(q); blk_queue_bypass_start(q);
spin_lock_irq(q->queue_lock); spin_lock_irq(q->queue_lock);
...@@ -1318,10 +1315,9 @@ void blkcg_deactivate_policy(struct request_queue *q, ...@@ -1318,10 +1315,9 @@ void blkcg_deactivate_policy(struct request_queue *q,
spin_unlock_irq(q->queue_lock); spin_unlock_irq(q->queue_lock);
if (q->mq_ops) { if (q->mq_ops)
blk_mq_unfreeze_queue(q); blk_mq_unfreeze_queue(q);
blk_mq_start_stopped_hw_queues(q, true); else
} else
blk_queue_bypass_end(q); blk_queue_bypass_end(q);
} }
EXPORT_SYMBOL_GPL(blkcg_deactivate_policy); EXPORT_SYMBOL_GPL(blkcg_deactivate_policy);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment