Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 1bcb1ead authored by Ming Lei's avatar Ming Lei Committed by Jens Axboe
Browse files

blk-mq: allocate flush_rq in blk_mq_init_flush()



It is reasonable to allocate flush req in blk_mq_init_flush().

Reviewed-by: default avatarChristoph Hellwig <hch@lst.de>
Signed-off-by: default avatarMing Lei <ming.lei@canonical.com>
Signed-off-by: default avatarJens Axboe <axboe@fb.com>
parent 08e98fc6
Loading
Loading
Loading
Loading
+10 −1
Original line number Diff line number Diff line
@@ -472,7 +472,16 @@ int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask,
}
EXPORT_SYMBOL(blkdev_issue_flush);

void blk_mq_init_flush(struct request_queue *q)
int blk_mq_init_flush(struct request_queue *q)
{
	struct blk_mq_tag_set *set = q->tag_set;

	spin_lock_init(&q->mq_flush_lock);

	q->flush_rq = kzalloc(round_up(sizeof(struct request) +
				set->cmd_size, cache_line_size()),
				GFP_KERNEL);
	if (!q->flush_rq)
		return -ENOMEM;
	return 0;
}
+6 −10
Original line number Diff line number Diff line
@@ -1848,17 +1848,10 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
	if (set->ops->complete)
		blk_queue_softirq_done(q, set->ops->complete);

	blk_mq_init_flush(q);
	blk_mq_init_cpu_queues(q, set->nr_hw_queues);

	q->flush_rq = kzalloc(round_up(sizeof(struct request) +
				set->cmd_size, cache_line_size()),
				GFP_KERNEL);
	if (!q->flush_rq)
		goto err_hw;

	if (blk_mq_init_hw_queues(q, set))
		goto err_flush_rq;
		goto err_hw;

	mutex_lock(&all_q_mutex);
	list_add_tail(&q->all_q_node, &all_q_list);
@@ -1866,12 +1859,15 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)

	blk_mq_add_queue_tag_set(set, q);

	if (blk_mq_init_flush(q))
		goto err_hw_queues;

	blk_mq_map_swqueue(q);

	return q;

err_flush_rq:
	kfree(q->flush_rq);
err_hw_queues:
	blk_mq_exit_hw_queues(q, set, set->nr_hw_queues);
err_hw:
	blk_cleanup_queue(q);
err_hctxs:
+1 −1
Original line number Diff line number Diff line
@@ -27,7 +27,7 @@ struct blk_mq_ctx {

void __blk_mq_complete_request(struct request *rq);
void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async);
void blk_mq_init_flush(struct request_queue *q);
int blk_mq_init_flush(struct request_queue *q);
void blk_mq_freeze_queue(struct request_queue *q);
void blk_mq_free_queue(struct request_queue *q);
void blk_mq_clone_flush_request(struct request *flush_rq,