Skip to content
Open
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
21 changes: 9 additions & 12 deletions block/blk-mq.c
Original file line number Diff line number Diff line change
Expand Up @@ -171,7 +171,7 @@ bool __blk_freeze_queue_start(struct request_queue *q,
percpu_ref_kill(&q->q_usage_counter);
mutex_unlock(&q->mq_freeze_lock);
if (queue_is_mq(q))
blk_mq_run_hw_queues(q, false);
blk_mq_run_hw_queues(q, true);
} else {
mutex_unlock(&q->mq_freeze_lock);
}
Expand Down Expand Up @@ -2350,22 +2350,19 @@ void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)

might_sleep_if(!async && hctx->flags & BLK_MQ_F_BLOCKING);

/*
* First lockless check to avoid unnecessary overhead.
* Memory barrier below synchronizes with blk_mq_unquiesce_queue().
*/
need_run = blk_mq_hw_queue_need_run(hctx);
if (!need_run) {
unsigned long flags;

/*
* Synchronize with blk_mq_unquiesce_queue(), because we check
* if hw queue is quiesced locklessly above, we need the use
* ->queue_lock to make sure we see the up-to-date status to
* not miss rerunning the hw queue.
*/
spin_lock_irqsave(&hctx->queue->queue_lock, flags);
/* Synchronize with blk_mq_unquiesce_queue() */
smp_mb();
need_run = blk_mq_hw_queue_need_run(hctx);
spin_unlock_irqrestore(&hctx->queue->queue_lock, flags);

if (!need_run)
return;
/* Ensure dispatch list/sw queue updates visible before execution */
smp_mb();
}

if (async || !cpumask_test_cpu(raw_smp_processor_id(), hctx->cpumask)) {
Expand Down