Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 7fbc78e3 authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge tag 'for-linus-20190524' of git://git.kernel.dk/linux-block

Pull block fixes from Jens Axboe:

 - NVMe pull request from Keith, with fixes from a few folks.

 - bio and sbitmap before atomic barrier fixes (Andrea)

 - Hang fix for blk-mq freeze and unfreeze (Bob)

 - Single segment count regression fix (Christoph)

 - AoE now has a new maintainer

 - tools/io_uring/ Makefile fix, and sync with liburing (me)

* tag 'for-linus-20190524' of git://git.kernel.dk/linux-block: (23 commits)
  tools/io_uring: sync with liburing
  tools/io_uring: fix Makefile for pthread library link
  blk-mq: fix hang caused by freeze/unfreeze sequence
  block: remove the bi_seg_{front,back}_size fields in struct bio
  block: remove the segment size check in bio_will_gap
  block: force an unlimited segment size on queues with a virt boundary
  block: don't decrement nr_phys_segments for physically contigous segments
  sbitmap: fix improper use of smp_mb__before_atomic()
  bio: fix improper use of smp_mb__before_atomic()
  aoe: list new maintainer for aoe driver
  nvme-pci: use blk-mq mapping for unmanaged irqs
  nvme: update MAINTAINERS
  nvme: copy MTFA field from identify controller
  nvme: fix memory leak for power latency tolerance
  nvme: release namespace SRCU protection before performing controller ioctls
  nvme: merge nvme_ns_ioctl into nvme_ioctl
  nvme: remove the ifdef around nvme_nvm_ioctl
  nvme: fix srcu locking on error return in nvme_get_ns_from_disk
  nvme: Fix known effects
  nvme-pci: Sync queues on reset
  ...
parents 7f8b40e3 096c7a6d
Loading
Loading
Loading
Loading
+2 −2
Original line number Diff line number Diff line
@@ -2627,7 +2627,7 @@ F: Documentation/devicetree/bindings/eeprom/at24.txt
F:	drivers/misc/eeprom/at24.c

ATA OVER ETHERNET (AOE) DRIVER
M:	"Ed L. Cashin" <ed.cashin@acm.org>
M:	"Justin Sanders" <justin@coraid.com>
W:	http://www.openaoe.org/
S:	Supported
F:	Documentation/aoe/
@@ -11226,7 +11226,7 @@ F: drivers/video/fbdev/riva/
F:	drivers/video/fbdev/nvidia/

NVM EXPRESS DRIVER
M:	Keith Busch <keith.busch@intel.com>
M:	Keith Busch <kbusch@kernel.org>
M:	Jens Axboe <axboe@fb.com>
M:	Christoph Hellwig <hch@lst.de>
M:	Sagi Grimberg <sagi@grimberg.me>
+2 −1
Original line number Diff line number Diff line
@@ -413,7 +413,7 @@ int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags)
		smp_rmb();

		wait_event(q->mq_freeze_wq,
			   (atomic_read(&q->mq_freeze_depth) == 0 &&
			   (!q->mq_freeze_depth &&
			    (pm || (blk_pm_request_resume(q),
				    !blk_queue_pm_only(q)))) ||
			   blk_queue_dying(q));
@@ -503,6 +503,7 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
	spin_lock_init(&q->queue_lock);

	init_waitqueue_head(&q->mq_freeze_wq);
	mutex_init(&q->mq_freeze_lock);

	/*
	 * Init percpu_ref in atomic mode so that it's faster to shutdown.
+13 −121
Original line number Diff line number Diff line
@@ -12,23 +12,6 @@

#include "blk.h"

/*
 * Check if the two bvecs from two bios can be merged to one segment.  If yes,
 * no need to check gap between the two bios since the 1st bio and the 1st bvec
 * in the 2nd bio can be handled in one segment.
 */
static inline bool bios_segs_mergeable(struct request_queue *q,
		struct bio *prev, struct bio_vec *prev_last_bv,
		struct bio_vec *next_first_bv)
{
	if (!biovec_phys_mergeable(q, prev_last_bv, next_first_bv))
		return false;
	if (prev->bi_seg_back_size + next_first_bv->bv_len >
			queue_max_segment_size(q))
		return false;
	return true;
}

static inline bool bio_will_gap(struct request_queue *q,
		struct request *prev_rq, struct bio *prev, struct bio *next)
{
@@ -60,7 +43,7 @@ static inline bool bio_will_gap(struct request_queue *q,
	 */
	bio_get_last_bvec(prev, &pb);
	bio_get_first_bvec(next, &nb);
	if (bios_segs_mergeable(q, prev, &pb, &nb))
	if (biovec_phys_mergeable(q, &pb, &nb))
		return false;
	return __bvec_gap_to_prev(q, &pb, nb.bv_offset);
}
@@ -179,8 +162,7 @@ static unsigned get_max_segment_size(struct request_queue *q,
 * variables.
 */
static bool bvec_split_segs(struct request_queue *q, struct bio_vec *bv,
		unsigned *nsegs, unsigned *last_seg_size,
		unsigned *front_seg_size, unsigned *sectors, unsigned max_segs)
		unsigned *nsegs, unsigned *sectors, unsigned max_segs)
{
	unsigned len = bv->bv_len;
	unsigned total_len = 0;
@@ -202,27 +184,11 @@ static bool bvec_split_segs(struct request_queue *q, struct bio_vec *bv,
			break;
	}

	if (!new_nsegs)
		return !!len;

	/* update front segment size */
	if (!*nsegs) {
		unsigned first_seg_size;

		if (new_nsegs == 1)
			first_seg_size = get_max_segment_size(q, bv->bv_offset);
		else
			first_seg_size = queue_max_segment_size(q);

		if (*front_seg_size < first_seg_size)
			*front_seg_size = first_seg_size;
	}

	/* update other varibles */
	*last_seg_size = seg_size;
	if (new_nsegs) {
		*nsegs += new_nsegs;
		if (sectors)
			*sectors += total_len >> 9;
	}

	/* split in the middle of the bvec if len != 0 */
	return !!len;
@@ -235,8 +201,7 @@ static struct bio *blk_bio_segment_split(struct request_queue *q,
{
	struct bio_vec bv, bvprv, *bvprvp = NULL;
	struct bvec_iter iter;
	unsigned seg_size = 0, nsegs = 0, sectors = 0;
	unsigned front_seg_size = bio->bi_seg_front_size;
	unsigned nsegs = 0, sectors = 0;
	bool do_split = true;
	struct bio *new = NULL;
	const unsigned max_sectors = get_max_io_size(q, bio);
@@ -260,8 +225,6 @@ static struct bio *blk_bio_segment_split(struct request_queue *q,
				/* split in the middle of bvec */
				bv.bv_len = (max_sectors - sectors) << 9;
				bvec_split_segs(q, &bv, &nsegs,
						&seg_size,
						&front_seg_size,
						&sectors, max_segs);
			}
			goto split;
@@ -275,12 +238,9 @@ static struct bio *blk_bio_segment_split(struct request_queue *q,

		if (bv.bv_offset + bv.bv_len <= PAGE_SIZE) {
			nsegs++;
			seg_size = bv.bv_len;
			sectors += bv.bv_len >> 9;
			if (nsegs == 1 && seg_size > front_seg_size)
				front_seg_size = seg_size;
		} else if (bvec_split_segs(q, &bv, &nsegs, &seg_size,
				    &front_seg_size, &sectors, max_segs)) {
		} else if (bvec_split_segs(q, &bv, &nsegs, &sectors,
				max_segs)) {
			goto split;
		}
	}
@@ -295,10 +255,6 @@ static struct bio *blk_bio_segment_split(struct request_queue *q,
			bio = new;
	}

	bio->bi_seg_front_size = front_seg_size;
	if (seg_size > bio->bi_seg_back_size)
		bio->bi_seg_back_size = seg_size;

	return do_split ? new : NULL;
}

@@ -353,18 +309,13 @@ EXPORT_SYMBOL(blk_queue_split);
static unsigned int __blk_recalc_rq_segments(struct request_queue *q,
					     struct bio *bio)
{
	struct bio_vec uninitialized_var(bv), bvprv = { NULL };
	unsigned int seg_size, nr_phys_segs;
	unsigned front_seg_size;
	struct bio *fbio, *bbio;
	unsigned int nr_phys_segs = 0;
	struct bvec_iter iter;
	bool new_bio = false;
	struct bio_vec bv;

	if (!bio)
		return 0;

	front_seg_size = bio->bi_seg_front_size;

	switch (bio_op(bio)) {
	case REQ_OP_DISCARD:
	case REQ_OP_SECURE_ERASE:
@@ -374,42 +325,11 @@ static unsigned int __blk_recalc_rq_segments(struct request_queue *q,
		return 1;
	}

	fbio = bio;
	seg_size = 0;
	nr_phys_segs = 0;
	for_each_bio(bio) {
		bio_for_each_bvec(bv, bio, iter) {
			if (new_bio) {
				if (seg_size + bv.bv_len
				    > queue_max_segment_size(q))
					goto new_segment;
				if (!biovec_phys_mergeable(q, &bvprv, &bv))
					goto new_segment;

				seg_size += bv.bv_len;

				if (nr_phys_segs == 1 && seg_size >
						front_seg_size)
					front_seg_size = seg_size;

				continue;
			}
new_segment:
			bvec_split_segs(q, &bv, &nr_phys_segs, &seg_size,
					&front_seg_size, NULL, UINT_MAX);
			new_bio = false;
		}
		bbio = bio;
		if (likely(bio->bi_iter.bi_size)) {
			bvprv = bv;
			new_bio = true;
		}
		bio_for_each_bvec(bv, bio, iter)
			bvec_split_segs(q, &bv, &nr_phys_segs, NULL, UINT_MAX);
	}

	fbio->bi_seg_front_size = front_seg_size;
	if (seg_size > bbio->bi_seg_back_size)
		bbio->bi_seg_back_size = seg_size;

	return nr_phys_segs;
}

@@ -429,24 +349,6 @@ void blk_recount_segments(struct request_queue *q, struct bio *bio)
	bio_set_flag(bio, BIO_SEG_VALID);
}

static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio,
				   struct bio *nxt)
{
	struct bio_vec end_bv = { NULL }, nxt_bv;

	if (bio->bi_seg_back_size + nxt->bi_seg_front_size >
	    queue_max_segment_size(q))
		return 0;

	if (!bio_has_data(bio))
		return 1;

	bio_get_last_bvec(bio, &end_bv);
	bio_get_first_bvec(nxt, &nxt_bv);

	return biovec_phys_mergeable(q, &end_bv, &nxt_bv);
}

static inline struct scatterlist *blk_next_sg(struct scatterlist **sg,
		struct scatterlist *sglist)
{
@@ -706,8 +608,6 @@ static int ll_merge_requests_fn(struct request_queue *q, struct request *req,
				struct request *next)
{
	int total_phys_segments;
	unsigned int seg_size =
		req->biotail->bi_seg_back_size + next->bio->bi_seg_front_size;

	if (req_gap_back_merge(req, next->bio))
		return 0;
@@ -720,14 +620,6 @@ static int ll_merge_requests_fn(struct request_queue *q, struct request *req,
		return 0;

	total_phys_segments = req->nr_phys_segments + next->nr_phys_segments;
	if (blk_phys_contig_segment(q, req->biotail, next->bio)) {
		if (req->nr_phys_segments == 1)
			req->bio->bi_seg_front_size = seg_size;
		if (next->nr_phys_segments == 1)
			next->biotail->bi_seg_back_size = seg_size;
		total_phys_segments--;
	}

	if (total_phys_segments > queue_max_segments(q))
		return 0;

+10 −9
Original line number Diff line number Diff line
@@ -144,13 +144,14 @@ void blk_mq_in_flight_rw(struct request_queue *q, struct hd_struct *part,

void blk_freeze_queue_start(struct request_queue *q)
{
	int freeze_depth;

	freeze_depth = atomic_inc_return(&q->mq_freeze_depth);
	if (freeze_depth == 1) {
	mutex_lock(&q->mq_freeze_lock);
	if (++q->mq_freeze_depth == 1) {
		percpu_ref_kill(&q->q_usage_counter);
		mutex_unlock(&q->mq_freeze_lock);
		if (queue_is_mq(q))
			blk_mq_run_hw_queues(q, false);
	} else {
		mutex_unlock(&q->mq_freeze_lock);
	}
}
EXPORT_SYMBOL_GPL(blk_freeze_queue_start);
@@ -199,14 +200,14 @@ EXPORT_SYMBOL_GPL(blk_mq_freeze_queue);

void blk_mq_unfreeze_queue(struct request_queue *q)
{
	int freeze_depth;

	freeze_depth = atomic_dec_return(&q->mq_freeze_depth);
	WARN_ON_ONCE(freeze_depth < 0);
	if (!freeze_depth) {
	mutex_lock(&q->mq_freeze_lock);
	q->mq_freeze_depth--;
	WARN_ON_ONCE(q->mq_freeze_depth < 0);
	if (!q->mq_freeze_depth) {
		percpu_ref_resurrect(&q->q_usage_counter);
		wake_up_all(&q->mq_freeze_wq);
	}
	mutex_unlock(&q->mq_freeze_lock);
}
EXPORT_SYMBOL_GPL(blk_mq_unfreeze_queue);

+11 −0
Original line number Diff line number Diff line
@@ -310,6 +310,9 @@ void blk_queue_max_segment_size(struct request_queue *q, unsigned int max_size)
		       __func__, max_size);
	}

	/* see blk_queue_virt_boundary() for the explanation */
	WARN_ON_ONCE(q->limits.virt_boundary_mask);

	q->limits.max_segment_size = max_size;
}
EXPORT_SYMBOL(blk_queue_max_segment_size);
@@ -742,6 +745,14 @@ EXPORT_SYMBOL(blk_queue_segment_boundary);
void blk_queue_virt_boundary(struct request_queue *q, unsigned long mask)
{
	q->limits.virt_boundary_mask = mask;

	/*
	 * Devices that require a virtual boundary do not support scatter/gather
	 * I/O natively, but instead require a descriptor list entry for each
	 * page (which might not be idential to the Linux PAGE_SIZE).  Because
	 * of that they are not limited by our notion of "segment size".
	 */
	q->limits.max_segment_size = UINT_MAX;
}
EXPORT_SYMBOL(blk_queue_virt_boundary);

Loading