Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit dd76a786 authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge branch 'for-linus' of git://git.kernel.dk/linux-block

Pull block layer fixes from Jens Axboe:
 "A small collection of fixes that should go in before -rc1.  The pull
  request contains:

   - A two patch fix for a regression with block enabled tagging caused
     by a commit in the initial pull request.  One patch is from Martin
     and ensures that SCSI doesn't truncate 64-bit block flags, the
     other one is from me and prevents us from double using struct
     request queuelist for both completion and busy tags.  This caused
     anything from a boot crash for some, to crashes under load.

   - A blk-mq fix for a potential soft stall when hot unplugging CPUs
     with busy IO.

   - percpu_counter fix is listed in here, that caused a suspend issue
     with virtio-blk due to percpu counters having an inconsistent state
     during CPU removal.  Andrew sent this in separately a few days ago,
     but it's here.  JFYI.

   - A few fixes for block integrity from Martin.

   - A ratelimit fix for loop from Mike Galbraith, to avoid spewing too
     much in error cases"

* 'for-linus' of git://git.kernel.dk/linux-block:
  block: fix regression with block enabled tagging
  scsi: Make sure cmd_flags are 64-bit
  block: Ensure we only enable integrity metadata for reads and writes
  block: Fix integrity verification
  block: Fix for_each_bvec()
  drivers/block/loop.c: ratelimit error messages
  blk-mq: fix potential stall during CPU unplug with IO pending
  percpu_counter: fix bad counter state during suspend
parents e7990d45 360f92c2
Loading
Loading
Loading
Loading
+1 −1
Original line number Original line Diff line number Diff line
@@ -1307,7 +1307,7 @@ void __blk_put_request(struct request_queue *q, struct request *req)
		struct request_list *rl = blk_rq_rl(req);
		struct request_list *rl = blk_rq_rl(req);


		BUG_ON(!list_empty(&req->queuelist));
		BUG_ON(!list_empty(&req->queuelist));
		BUG_ON(!hlist_unhashed(&req->hash));
		BUG_ON(ELV_ON_HASH(req));


		blk_free_request(rl, req);
		blk_free_request(rl, req);
		freed_request(rl, flags);
		freed_request(rl, flags);
+6 −2
Original line number Original line Diff line number Diff line
@@ -956,6 +956,7 @@ static void blk_mq_hctx_notify(void *data, unsigned long action,
			       unsigned int cpu)
			       unsigned int cpu)
{
{
	struct blk_mq_hw_ctx *hctx = data;
	struct blk_mq_hw_ctx *hctx = data;
	struct request_queue *q = hctx->queue;
	struct blk_mq_ctx *ctx;
	struct blk_mq_ctx *ctx;
	LIST_HEAD(tmp);
	LIST_HEAD(tmp);


@@ -965,7 +966,7 @@ static void blk_mq_hctx_notify(void *data, unsigned long action,
	/*
	/*
	 * Move ctx entries to new CPU, if this one is going away.
	 * Move ctx entries to new CPU, if this one is going away.
	 */
	 */
	ctx = __blk_mq_get_ctx(hctx->queue, cpu);
	ctx = __blk_mq_get_ctx(q, cpu);


	spin_lock(&ctx->lock);
	spin_lock(&ctx->lock);
	if (!list_empty(&ctx->rq_list)) {
	if (!list_empty(&ctx->rq_list)) {
@@ -977,7 +978,7 @@ static void blk_mq_hctx_notify(void *data, unsigned long action,
	if (list_empty(&tmp))
	if (list_empty(&tmp))
		return;
		return;


	ctx = blk_mq_get_ctx(hctx->queue);
	ctx = blk_mq_get_ctx(q);
	spin_lock(&ctx->lock);
	spin_lock(&ctx->lock);


	while (!list_empty(&tmp)) {
	while (!list_empty(&tmp)) {
@@ -988,10 +989,13 @@ static void blk_mq_hctx_notify(void *data, unsigned long action,
		list_move_tail(&rq->queuelist, &ctx->rq_list);
		list_move_tail(&rq->queuelist, &ctx->rq_list);
	}
	}


	hctx = q->mq_ops->map_queue(q, ctx->cpu);
	blk_mq_hctx_mark_pending(hctx, ctx);
	blk_mq_hctx_mark_pending(hctx, ctx);


	spin_unlock(&ctx->lock);
	spin_unlock(&ctx->lock);
	blk_mq_put_ctx(ctx);
	blk_mq_put_ctx(ctx);

	blk_mq_run_hw_queue(hctx, true);
}
}


static int blk_mq_init_hw_commands(struct blk_mq_hw_ctx *hctx,
static int blk_mq_init_hw_commands(struct blk_mq_hw_ctx *hctx,
+6 −11
Original line number Original line Diff line number Diff line
@@ -30,8 +30,8 @@ static void blk_done_softirq(struct softirq_action *h)
	while (!list_empty(&local_list)) {
	while (!list_empty(&local_list)) {
		struct request *rq;
		struct request *rq;


		rq = list_entry(local_list.next, struct request, queuelist);
		rq = list_entry(local_list.next, struct request, ipi_list);
		list_del_init(&rq->queuelist);
		list_del_init(&rq->ipi_list);
		rq->q->softirq_done_fn(rq);
		rq->q->softirq_done_fn(rq);
	}
	}
}
}
@@ -45,14 +45,9 @@ static void trigger_softirq(void *data)


	local_irq_save(flags);
	local_irq_save(flags);
	list = this_cpu_ptr(&blk_cpu_done);
	list = this_cpu_ptr(&blk_cpu_done);
	/*
	list_add_tail(&rq->ipi_list, list);
	 * We reuse queuelist for a list of requests to process. Since the
	 * queuelist is used by the block layer only for requests waiting to be
	 * submitted to the device it is unused now.
	 */
	list_add_tail(&rq->queuelist, list);


	if (list->next == &rq->queuelist)
	if (list->next == &rq->ipi_list)
		raise_softirq_irqoff(BLOCK_SOFTIRQ);
		raise_softirq_irqoff(BLOCK_SOFTIRQ);


	local_irq_restore(flags);
	local_irq_restore(flags);
@@ -141,7 +136,7 @@ void __blk_complete_request(struct request *req)
		struct list_head *list;
		struct list_head *list;
do_local:
do_local:
		list = this_cpu_ptr(&blk_cpu_done);
		list = this_cpu_ptr(&blk_cpu_done);
		list_add_tail(&req->queuelist, list);
		list_add_tail(&req->ipi_list, list);


		/*
		/*
		 * if the list only contains our just added request,
		 * if the list only contains our just added request,
@@ -149,7 +144,7 @@ do_local:
		 * entries there, someone already raised the irq but it
		 * entries there, someone already raised the irq but it
		 * hasn't run yet.
		 * hasn't run yet.
		 */
		 */
		if (list->next == &req->queuelist)
		if (list->next == &req->ipi_list)
			raise_softirq_irqoff(BLOCK_SOFTIRQ);
			raise_softirq_irqoff(BLOCK_SOFTIRQ);
	} else if (raise_blk_irq(ccpu, req))
	} else if (raise_blk_irq(ccpu, req))
		goto do_local;
		goto do_local;
+1 −1
Original line number Original line Diff line number Diff line
@@ -78,7 +78,7 @@ static inline void blk_clear_rq_complete(struct request *rq)
/*
/*
 * Internal elevator interface
 * Internal elevator interface
 */
 */
#define ELV_ON_HASH(rq) hash_hashed(&(rq)->hash)
#define ELV_ON_HASH(rq) ((rq)->cmd_flags & REQ_HASHED)


void blk_insert_flush(struct request *rq);
void blk_insert_flush(struct request *rq);
void blk_abort_flushes(struct request_queue *q);
void blk_abort_flushes(struct request_queue *q);
+2 −0
Original line number Original line Diff line number Diff line
@@ -247,6 +247,7 @@ EXPORT_SYMBOL(elevator_exit);
static inline void __elv_rqhash_del(struct request *rq)
static inline void __elv_rqhash_del(struct request *rq)
{
{
	hash_del(&rq->hash);
	hash_del(&rq->hash);
	rq->cmd_flags &= ~REQ_HASHED;
}
}


static void elv_rqhash_del(struct request_queue *q, struct request *rq)
static void elv_rqhash_del(struct request_queue *q, struct request *rq)
@@ -261,6 +262,7 @@ static void elv_rqhash_add(struct request_queue *q, struct request *rq)


	BUG_ON(ELV_ON_HASH(rq));
	BUG_ON(ELV_ON_HASH(rq));
	hash_add(e->hash, &rq->hash, rq_hash_key(rq));
	hash_add(e->hash, &rq->hash, rq_hash_key(rq));
	rq->cmd_flags |= REQ_HASHED;
}
}


static void elv_rqhash_reposition(struct request_queue *q, struct request *rq)
static void elv_rqhash_reposition(struct request_queue *q, struct request *rq)
Loading