Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 5a1efc6e authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge branch 'for-linus' of git://git.kernel.dk/linux-block

Pull block IO fixes from Jens Axboe:
 "Normally I'd defer my initial for-linus pull request until after the
  merge window, but a race was uncovered in the virtio-blk conversion to
  blk-mq that could cause hangs.  So here's a small collection of fixes
  for you to pull:

   - The fix for the virtio-blk IO hang reported by Dave Chinner, from
     Shaohua and myself.

   - Add the Insert blktrace event for blk-mq.  This makes 'btt' happy
     when it is doing it's state transition analysis.

   - Ensure that blk-mq has disk/partition stats enabled by default,
     instead of making it opt-in.

   - A fix for __bio_add_page() and large sector counts"

* 'for-linus' of git://git.kernel.dk/linux-block:
  blk-mq: add blktrace insert event trace
  virtio-blk: virtqueue_kick() must be ordered with other virtqueue operations
  blk-mq: ensure that we set REQ_IO_STAT so diskstats work
  bio: fix argument of __bio_add_page() for max_sectors > 0xffff
parents 6d6e352c 01b983c9
Loading
Loading
Loading
Loading
+10 −4
Original line number Diff line number Diff line
@@ -171,9 +171,12 @@ bool blk_mq_can_queue(struct blk_mq_hw_ctx *hctx)
}
EXPORT_SYMBOL(blk_mq_can_queue);

static void blk_mq_rq_ctx_init(struct blk_mq_ctx *ctx, struct request *rq,
			       unsigned int rw_flags)
static void blk_mq_rq_ctx_init(struct request_queue *q, struct blk_mq_ctx *ctx,
			       struct request *rq, unsigned int rw_flags)
{
	if (blk_queue_io_stat(q))
		rw_flags |= REQ_IO_STAT;

	rq->mq_ctx = ctx;
	rq->cmd_flags = rw_flags;
	ctx->rq_dispatched[rw_is_sync(rw_flags)]++;
@@ -197,7 +200,7 @@ static struct request *blk_mq_alloc_request_pinned(struct request_queue *q,

		rq = __blk_mq_alloc_request(hctx, gfp & ~__GFP_WAIT, reserved);
		if (rq) {
			blk_mq_rq_ctx_init(ctx, rq, rw);
			blk_mq_rq_ctx_init(q, ctx, rq, rw);
			break;
		} else if (!(gfp & __GFP_WAIT))
			break;
@@ -718,6 +721,8 @@ static void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx,
{
	struct blk_mq_ctx *ctx = rq->mq_ctx;

	trace_block_rq_insert(hctx->queue, rq);

	list_add_tail(&rq->queuelist, &ctx->rq_list);
	blk_mq_hctx_mark_pending(hctx, ctx);

@@ -921,7 +926,7 @@ static void blk_mq_make_request(struct request_queue *q, struct bio *bio)
	trace_block_getrq(q, bio, rw);
	rq = __blk_mq_alloc_request(hctx, GFP_ATOMIC, false);
	if (likely(rq))
		blk_mq_rq_ctx_init(ctx, rq, rw);
		blk_mq_rq_ctx_init(q, ctx, rq, rw);
	else {
		blk_mq_put_ctx(ctx);
		trace_block_sleeprq(q, bio, rw);
@@ -1377,6 +1382,7 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_reg *reg,
	q->queue_hw_ctx = hctxs;

	q->mq_ops = reg->ops;
	q->queue_flags |= QUEUE_FLAG_MQ_DEFAULT;

	blk_queue_make_request(q, blk_mq_make_request);
	blk_queue_rq_timed_out(q, reg->ops->timeout);
+3 −2
Original line number Diff line number Diff line
@@ -199,15 +199,16 @@ static int virtio_queue_rq(struct blk_mq_hw_ctx *hctx, struct request *req)

	spin_lock_irqsave(&vblk->vq_lock, flags);
	if (__virtblk_add_req(vblk->vq, vbr, vbr->sg, num) < 0) {
		virtqueue_kick(vblk->vq);
		spin_unlock_irqrestore(&vblk->vq_lock, flags);
		blk_mq_stop_hw_queue(hctx);
		virtqueue_kick(vblk->vq);
		return BLK_MQ_RQ_QUEUE_BUSY;
	}
	spin_unlock_irqrestore(&vblk->vq_lock, flags);

	if (last)
		virtqueue_kick(vblk->vq);

	spin_unlock_irqrestore(&vblk->vq_lock, flags);
	return BLK_MQ_RQ_QUEUE_OK;
}

+1 −1
Original line number Diff line number Diff line
@@ -601,7 +601,7 @@ EXPORT_SYMBOL(bio_get_nr_vecs);

static int __bio_add_page(struct request_queue *q, struct bio *bio, struct page
			  *page, unsigned int len, unsigned int offset,
			  unsigned short max_sectors)
			  unsigned int max_sectors)
{
	int retried_segments = 0;
	struct bio_vec *bvec;
+3 −0
Original line number Diff line number Diff line
@@ -505,6 +505,9 @@ struct request_queue {
				 (1 << QUEUE_FLAG_SAME_COMP)	|	\
				 (1 << QUEUE_FLAG_ADD_RANDOM))

#define QUEUE_FLAG_MQ_DEFAULT	((1 << QUEUE_FLAG_IO_STAT) |		\
				 (1 << QUEUE_FLAG_SAME_COMP))

static inline void queue_lockdep_assert_held(struct request_queue *q)
{
	if (q->queue_lock)