From 776687bce42bb22cce48b5da950e48ebbb9a948f Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 1 Jul 2014 10:29:17 -0600 Subject: [PATCH] block, blk-mq: draining can't be skipped even if bypass_depth was non-zero Currently, both blk_queue_bypass_start() and blk_mq_freeze_queue() skip queue draining if bypass_depth was already above zero. The assumption is that the one which bumped the bypass_depth should have performed draining already; however, there's nothing which prevents a new instance of bypassing/freezing from starting before the previous one finishes draining. The current code may allow the later bypassing/freezing instances to complete while there still are in-flight requests which haven't finished draining. Fix it by draining regardless of bypass_depth. We still skip draining from blk_queue_bypass_start() while the queue is initializing to avoid introducing excessive delays during boot. INIT_DONE setting is moved above the initial blk_queue_bypass_end() so that bypassing attempts can't slip inbetween. Signed-off-by: Tejun Heo Cc: Jens Axboe Cc: Nicholas A. Bellinger Signed-off-by: Jens Axboe --- block/blk-core.c | 11 +++++++---- block/blk-mq.c | 7 ++----- block/blk-sysfs.c | 2 +- 3 files changed, 10 insertions(+), 10 deletions(-) diff --git a/block/blk-core.c b/block/blk-core.c index 6f8dba161bfe1f..0d0bdd65b2d7d3 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -438,14 +438,17 @@ static void __blk_drain_queue(struct request_queue *q, bool drain_all) */ void blk_queue_bypass_start(struct request_queue *q) { - bool drain; - spin_lock_irq(q->queue_lock); - drain = !q->bypass_depth++; + q->bypass_depth++; queue_flag_set(QUEUE_FLAG_BYPASS, q); spin_unlock_irq(q->queue_lock); - if (drain) { + /* + * Queues start drained. Skip actual draining till init is + * complete. This avoids lenghty delays during queue init which + * can happen many times during boot. + */ + if (blk_queue_init_done(q)) { spin_lock_irq(q->queue_lock); __blk_drain_queue(q, false); spin_unlock_irq(q->queue_lock); diff --git a/block/blk-mq.c b/block/blk-mq.c index 9541f5111ba61a..f4bdddd7ed996d 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -131,15 +131,12 @@ void blk_mq_drain_queue(struct request_queue *q) */ static void blk_mq_freeze_queue(struct request_queue *q) { - bool drain; - spin_lock_irq(q->queue_lock); - drain = !q->bypass_depth++; + q->bypass_depth++; queue_flag_set(QUEUE_FLAG_BYPASS, q); spin_unlock_irq(q->queue_lock); - if (drain) - blk_mq_drain_queue(q); + blk_mq_drain_queue(q); } static void blk_mq_unfreeze_queue(struct request_queue *q) diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c index 23321fbab29318..4db5abf96b9ec1 100644 --- a/block/blk-sysfs.c +++ b/block/blk-sysfs.c @@ -554,8 +554,8 @@ int blk_register_queue(struct gendisk *disk) * Initialization must be complete by now. Finish the initial * bypass from queue allocation. */ - blk_queue_bypass_end(q); queue_flag_set_unlocked(QUEUE_FLAG_INIT_DONE, q); + blk_queue_bypass_end(q); ret = blk_trace_init_sysfs(dev); if (ret)