Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 2f50037a authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge tag 'for-linus-20180504' of git://git.kernel.dk/linux-block

Pull block fixes from Jens Axboe:
 "A collection of fixes that should to into this release. This contains:

   - Set of bcache fixes from Coly, fixing regression in patches that
     went into this series.

   - Set of NVMe fixes by way of Keith.

   - Set of bdi related fixes, one from Jan and two from Tetsuo Handa,
     fixing various issues around device addition/removal.

   - Two block inflight fixes from Omar, fixing issues around the
     transition to using tags for blk-mq inflight accounting that we
     did a few releases ago"

* tag 'for-linus-20180504' of git://git.kernel.dk/linux-block:
  bdi: Fix oops in wb_workfn()
  nvmet: switch loopback target state to connecting when resetting
  nvme/multipath: Fix multipath disabled naming collisions
  nvme/multipath: Disable runtime writable enabling parameter
  nvme: Set integrity flag for user passthrough commands
  nvme: fix potential memory leak in option parsing
  bdi: Fix use after free bug in debugfs_remove()
  bdi: wake up concurrent wb_shutdown() callers.
  bcache: use pr_info() to inform duplicated CACHE_SET_IO_DISABLE set
  bcache: set dc->io_disable to true in conditional_stop_bcache_device()
  bcache: add wait_for_kthread_stop() in bch_allocator_thread()
  bcache: count backing device I/O error for writeback I/O
  bcache: set CACHE_SET_IO_DISABLE in bch_cached_dev_error()
  bcache: store disk name in struct cache and struct cached_dev
  blk-mq: fix sysfs inflight counter
  blk-mq: count allocated but not started requests in iostats inflight
parents 2e171ffc b8b78495
Loading
Loading
Loading
Loading
+28 −12
Original line number Diff line number Diff line
@@ -95,19 +95,16 @@ static void blk_mq_check_inflight(struct blk_mq_hw_ctx *hctx,
{
	struct mq_inflight *mi = priv;

	if (blk_mq_rq_state(rq) == MQ_RQ_IN_FLIGHT) {
	/*
		 * index[0] counts the specific partition that was asked
		 * for. index[1] counts the ones that are active on the
		 * whole device, so increment that if mi->part is indeed
		 * a partition, and not a whole device.
	 * index[0] counts the specific partition that was asked for. index[1]
	 * counts the ones that are active on the whole device, so increment
	 * that if mi->part is indeed a partition, and not a whole device.
	 */
	if (rq->part == mi->part)
		mi->inflight[0]++;
	if (mi->part->partno)
		mi->inflight[1]++;
}
}

void blk_mq_in_flight(struct request_queue *q, struct hd_struct *part,
		      unsigned int inflight[2])
@@ -118,6 +115,25 @@ void blk_mq_in_flight(struct request_queue *q, struct hd_struct *part,
	blk_mq_queue_tag_busy_iter(q, blk_mq_check_inflight, &mi);
}

static void blk_mq_check_inflight_rw(struct blk_mq_hw_ctx *hctx,
				     struct request *rq, void *priv,
				     bool reserved)
{
	struct mq_inflight *mi = priv;

	if (rq->part == mi->part)
		mi->inflight[rq_data_dir(rq)]++;
}

void blk_mq_in_flight_rw(struct request_queue *q, struct hd_struct *part,
			 unsigned int inflight[2])
{
	struct mq_inflight mi = { .part = part, .inflight = inflight, };

	inflight[0] = inflight[1] = 0;
	blk_mq_queue_tag_busy_iter(q, blk_mq_check_inflight_rw, &mi);
}

void blk_freeze_queue_start(struct request_queue *q)
{
	int freeze_depth;
+3 −1
Original line number Diff line number Diff line
@@ -189,6 +189,8 @@ static inline bool blk_mq_hw_queue_mapped(struct blk_mq_hw_ctx *hctx)

void blk_mq_in_flight(struct request_queue *q, struct hd_struct *part,
		      unsigned int inflight[2]);
void blk_mq_in_flight_rw(struct request_queue *q, struct hd_struct *part,
			 unsigned int inflight[2]);

static inline void blk_mq_put_dispatch_budget(struct blk_mq_hw_ctx *hctx)
{
+12 −0
Original line number Diff line number Diff line
@@ -82,6 +82,18 @@ void part_in_flight(struct request_queue *q, struct hd_struct *part,
	}
}

void part_in_flight_rw(struct request_queue *q, struct hd_struct *part,
		       unsigned int inflight[2])
{
	if (q->mq_ops) {
		blk_mq_in_flight_rw(q, part, inflight);
		return;
	}

	inflight[0] = atomic_read(&part->in_flight[0]);
	inflight[1] = atomic_read(&part->in_flight[1]);
}

struct hd_struct *__disk_get_part(struct gendisk *disk, int partno)
{
	struct disk_part_tbl *ptbl = rcu_dereference(disk->part_tbl);
+6 −4
Original line number Diff line number Diff line
@@ -145,13 +145,15 @@ ssize_t part_stat_show(struct device *dev,
		jiffies_to_msecs(part_stat_read(p, time_in_queue)));
}

ssize_t part_inflight_show(struct device *dev,
			struct device_attribute *attr, char *buf)
ssize_t part_inflight_show(struct device *dev, struct device_attribute *attr,
			   char *buf)
{
	struct hd_struct *p = dev_to_part(dev);
	struct request_queue *q = part_to_disk(p)->queue;
	unsigned int inflight[2];

	return sprintf(buf, "%8u %8u\n", atomic_read(&p->in_flight[0]),
		atomic_read(&p->in_flight[1]));
	part_in_flight_rw(q, p, inflight);
	return sprintf(buf, "%8u %8u\n", inflight[0], inflight[1]);
}

#ifdef CONFIG_FAIL_MAKE_REQUEST
+4 −1
Original line number Diff line number Diff line
@@ -290,7 +290,7 @@ do { \
		if (kthread_should_stop() ||				\
		    test_bit(CACHE_SET_IO_DISABLE, &ca->set->flags)) {	\
			set_current_state(TASK_RUNNING);		\
			return 0;					\
			goto out;					\
		}							\
									\
		schedule();						\
@@ -378,6 +378,9 @@ static int bch_allocator_thread(void *arg)
			bch_prio_write(ca);
		}
	}
out:
	wait_for_kthread_stop();
	return 0;
}

/* Allocation */
Loading