Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 2a75184d authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge branch 'for-linus' of git://git.kernel.dk/linux-block

Pull block fixes from Jens Axboe:
 "Small collection of fixes for 3.14-rc. It contains:

   - Three minor update to blk-mq from Christoph.

   - Reduce number of unaligned (< 4kb) in-flight writes on mtip32xx to
     two.  From Micron.

   - Make the blk-mq CPU notify spinlock raw, since it can't be a
     sleeper spinlock on RT.  From Mike Galbraith.

   - Drop now bogus BUG_ON() for bio iteration with blk integrity.  From
     Nic Bellinger.

   - Properly propagate the SYNC flag on requests. From Shaohua"

* 'for-linus' of git://git.kernel.dk/linux-block:
  blk-mq: add REQ_SYNC early
  rt,blk,mq: Make blk_mq_cpu_notify_lock a raw spinlock
  bio-integrity: Drop bio_integrity_verify BUG_ON in post bip->bip_iter world
  blk-mq: support partial I/O completions
  blk-mq: merge blk_mq_insert_request and blk_mq_run_request
  blk-mq: remove blk_mq_alloc_rq
  mtip32xx: Reduce the number of unaligned writes to 2
parents 8ab47d3e 739c3eea
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -65,7 +65,7 @@ void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk,
	 * be resued after dying flag is set
	 */
	if (q->mq_ops) {
		blk_mq_insert_request(q, rq, at_head, true);
		blk_mq_insert_request(rq, at_head, true, false);
		return;
	}

+2 −2
Original line number Diff line number Diff line
@@ -137,7 +137,7 @@ static void mq_flush_run(struct work_struct *work)
	rq = container_of(work, struct request, mq_flush_work);

	memset(&rq->csd, 0, sizeof(rq->csd));
	blk_mq_run_request(rq, true, false);
	blk_mq_insert_request(rq, false, true, false);
}

static bool blk_flush_queue_rq(struct request *rq)
@@ -411,7 +411,7 @@ void blk_insert_flush(struct request *rq)
	if ((policy & REQ_FSEQ_DATA) &&
	    !(policy & (REQ_FSEQ_PREFLUSH | REQ_FSEQ_POSTFLUSH))) {
		if (q->mq_ops) {
			blk_mq_run_request(rq, false, true);
			blk_mq_insert_request(rq, false, false, true);
		} else
			list_add_tail(&rq->queuelist, &q->queue_head);
		return;
+7 −7
Original line number Diff line number Diff line
@@ -11,7 +11,7 @@
#include "blk-mq.h"

static LIST_HEAD(blk_mq_cpu_notify_list);
static DEFINE_SPINLOCK(blk_mq_cpu_notify_lock);
static DEFINE_RAW_SPINLOCK(blk_mq_cpu_notify_lock);

static int blk_mq_main_cpu_notify(struct notifier_block *self,
				  unsigned long action, void *hcpu)
@@ -19,12 +19,12 @@ static int blk_mq_main_cpu_notify(struct notifier_block *self,
	unsigned int cpu = (unsigned long) hcpu;
	struct blk_mq_cpu_notifier *notify;

	spin_lock(&blk_mq_cpu_notify_lock);
	raw_spin_lock(&blk_mq_cpu_notify_lock);

	list_for_each_entry(notify, &blk_mq_cpu_notify_list, list)
		notify->notify(notify->data, action, cpu);

	spin_unlock(&blk_mq_cpu_notify_lock);
	raw_spin_unlock(&blk_mq_cpu_notify_lock);
	return NOTIFY_OK;
}

@@ -32,16 +32,16 @@ void blk_mq_register_cpu_notifier(struct blk_mq_cpu_notifier *notifier)
{
	BUG_ON(!notifier->notify);

	spin_lock(&blk_mq_cpu_notify_lock);
	raw_spin_lock(&blk_mq_cpu_notify_lock);
	list_add_tail(&notifier->list, &blk_mq_cpu_notify_list);
	spin_unlock(&blk_mq_cpu_notify_lock);
	raw_spin_unlock(&blk_mq_cpu_notify_lock);
}

void blk_mq_unregister_cpu_notifier(struct blk_mq_cpu_notifier *notifier)
{
	spin_lock(&blk_mq_cpu_notify_lock);
	raw_spin_lock(&blk_mq_cpu_notify_lock);
	list_del(&notifier->list);
	spin_unlock(&blk_mq_cpu_notify_lock);
	raw_spin_unlock(&blk_mq_cpu_notify_lock);
}

void blk_mq_init_cpu_notifier(struct blk_mq_cpu_notifier *notifier,
+19 −83
Original line number Diff line number Diff line
@@ -73,8 +73,8 @@ static void blk_mq_hctx_mark_pending(struct blk_mq_hw_ctx *hctx,
		set_bit(ctx->index_hw, hctx->ctx_map);
}

static struct request *blk_mq_alloc_rq(struct blk_mq_hw_ctx *hctx, gfp_t gfp,
				       bool reserved)
static struct request *__blk_mq_alloc_request(struct blk_mq_hw_ctx *hctx,
					      gfp_t gfp, bool reserved)
{
	struct request *rq;
	unsigned int tag;
@@ -193,12 +193,6 @@ static void blk_mq_rq_ctx_init(struct request_queue *q, struct blk_mq_ctx *ctx,
	ctx->rq_dispatched[rw_is_sync(rw_flags)]++;
}

static struct request *__blk_mq_alloc_request(struct blk_mq_hw_ctx *hctx,
					      gfp_t gfp, bool reserved)
{
	return blk_mq_alloc_rq(hctx, gfp, reserved);
}

static struct request *blk_mq_alloc_request_pinned(struct request_queue *q,
						   int rw, gfp_t gfp,
						   bool reserved)
@@ -289,38 +283,10 @@ void blk_mq_free_request(struct request *rq)
	__blk_mq_free_request(hctx, ctx, rq);
}

static void blk_mq_bio_endio(struct request *rq, struct bio *bio, int error)
bool blk_mq_end_io_partial(struct request *rq, int error, unsigned int nr_bytes)
{
	if (error)
		clear_bit(BIO_UPTODATE, &bio->bi_flags);
	else if (!test_bit(BIO_UPTODATE, &bio->bi_flags))
		error = -EIO;

	if (unlikely(rq->cmd_flags & REQ_QUIET))
		set_bit(BIO_QUIET, &bio->bi_flags);

	/* don't actually finish bio if it's part of flush sequence */
	if (!(rq->cmd_flags & REQ_FLUSH_SEQ))
		bio_endio(bio, error);
}

void blk_mq_end_io(struct request *rq, int error)
{
	struct bio *bio = rq->bio;
	unsigned int bytes = 0;

	trace_block_rq_complete(rq->q, rq);

	while (bio) {
		struct bio *next = bio->bi_next;

		bio->bi_next = NULL;
		bytes += bio->bi_iter.bi_size;
		blk_mq_bio_endio(rq, bio, error);
		bio = next;
	}

	blk_account_io_completion(rq, bytes);
	if (blk_update_request(rq, error, blk_rq_bytes(rq)))
		return true;

	blk_account_io_done(rq);

@@ -328,8 +294,9 @@ void blk_mq_end_io(struct request *rq, int error)
		rq->end_io(rq, error);
	else
		blk_mq_free_request(rq);
	return false;
}
EXPORT_SYMBOL(blk_mq_end_io);
EXPORT_SYMBOL(blk_mq_end_io_partial);

static void __blk_mq_complete_request_remote(void *data)
{
@@ -730,60 +697,27 @@ static void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx,
	blk_mq_add_timer(rq);
}

void blk_mq_insert_request(struct request_queue *q, struct request *rq,
			   bool at_head, bool run_queue)
void blk_mq_insert_request(struct request *rq, bool at_head, bool run_queue,
		bool async)
{
	struct request_queue *q = rq->q;
	struct blk_mq_hw_ctx *hctx;
	struct blk_mq_ctx *ctx, *current_ctx;
	struct blk_mq_ctx *ctx = rq->mq_ctx, *current_ctx;

	current_ctx = blk_mq_get_ctx(q);
	if (!cpu_online(ctx->cpu))
		rq->mq_ctx = ctx = current_ctx;

	ctx = rq->mq_ctx;
	hctx = q->mq_ops->map_queue(q, ctx->cpu);

	if (rq->cmd_flags & (REQ_FLUSH | REQ_FUA)) {
	if (rq->cmd_flags & (REQ_FLUSH | REQ_FUA) &&
	    !(rq->cmd_flags & (REQ_FLUSH_SEQ))) {
		blk_insert_flush(rq);
	} else {
		current_ctx = blk_mq_get_ctx(q);

		if (!cpu_online(ctx->cpu)) {
			ctx = current_ctx;
			hctx = q->mq_ops->map_queue(q, ctx->cpu);
			rq->mq_ctx = ctx;
		}
		spin_lock(&ctx->lock);
		__blk_mq_insert_request(hctx, rq, at_head);
		spin_unlock(&ctx->lock);

		blk_mq_put_ctx(current_ctx);
	}

	if (run_queue)
		__blk_mq_run_hw_queue(hctx);
	}
EXPORT_SYMBOL(blk_mq_insert_request);

/*
 * This is a special version of blk_mq_insert_request to bypass FLUSH request
 * check. Should only be used internally.
 */
void blk_mq_run_request(struct request *rq, bool run_queue, bool async)
{
	struct request_queue *q = rq->q;
	struct blk_mq_hw_ctx *hctx;
	struct blk_mq_ctx *ctx, *current_ctx;

	current_ctx = blk_mq_get_ctx(q);

	ctx = rq->mq_ctx;
	if (!cpu_online(ctx->cpu)) {
		ctx = current_ctx;
		rq->mq_ctx = ctx;
	}
	hctx = q->mq_ops->map_queue(q, ctx->cpu);

	/* ctx->cpu might be offline */
	spin_lock(&ctx->lock);
	__blk_mq_insert_request(hctx, rq, false);
	spin_unlock(&ctx->lock);

	blk_mq_put_ctx(current_ctx);

@@ -926,6 +860,8 @@ static void blk_mq_make_request(struct request_queue *q, struct bio *bio)
	ctx = blk_mq_get_ctx(q);
	hctx = q->mq_ops->map_queue(q, ctx->cpu);

	if (is_sync)
		rw |= REQ_SYNC;
	trace_block_getrq(q, bio, rw);
	rq = __blk_mq_alloc_request(hctx, GFP_ATOMIC, false);
	if (likely(rq))
+0 −1
Original line number Diff line number Diff line
@@ -23,7 +23,6 @@ struct blk_mq_ctx {
};

void __blk_mq_complete_request(struct request *rq);
void blk_mq_run_request(struct request *rq, bool run_queue, bool async);
void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async);
void blk_mq_init_flush(struct request_queue *q);
void blk_mq_drain_queue(struct request_queue *q);
Loading