Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 133bb595 authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge branch 'for-linus' of git://git.kernel.dk/linux-block

Pull block updates from Jens Axboe:
 "This is a bit bigger than it should be, but I could (did) not want to
  send it off last week due to both wanting extra testing, and expecting
  a fix for the bounce regression as well.  In any case, this contains:

   - Fix for the blk-merge.c compilation warning on gcc 5.x from me.

   - A set of back/front SG gap merge fixes, from me and from Sagi.
     This ensures that we honor SG gapping for integrity payloads as
     well.

   - Two small fixes for null_blk from Matias, fixing a leak and a
     capacity propagation issue.

   - A blkcg fix from Tejun, fixing a NULL dereference.

   - A fast clone optimization from Ming, fixing a performance
     regression since the arbitrarily sized bio's were introduced.

   - Also from Ming, a regression fix for bouncing IOs"

* 'for-linus' of git://git.kernel.dk/linux-block:
  block: fix bounce_end_io
  block: blk-merge: fast-clone bio when splitting rw bios
  block: blkg_destroy_all() should clear q->root_blkg and ->root_rl.blkg
  block: Copy a user iovec if it includes gaps
  block: Refuse adding appending a gapped integrity page to a bio
  block: Refuse request/bio merges with gaps in the integrity payload
  block: Check for gaps on front and back merges
  null_blk: fix wrong capacity when bs is not 512 bytes
  null_blk: fix memory leak on cleanup
  block: fix bogus compiler warnings in blk-merge.c
parents 590dca3a 99451879
Loading
Loading
Loading
Loading
+5 −0
Original line number Diff line number Diff line
@@ -140,6 +140,11 @@ int bio_integrity_add_page(struct bio *bio, struct page *page,

	iv = bip->bip_vec + bip->bip_vcnt;

	if (bip->bip_vcnt &&
	    bvec_gap_to_prev(bdev_get_queue(bio->bi_bdev),
			     &bip->bip_vec[bip->bip_vcnt - 1], offset))
		return 0;

	iv->bv_page = page;
	iv->bv_len = len;
	iv->bv_offset = offset;
+3 −0
Original line number Diff line number Diff line
@@ -370,6 +370,9 @@ static void blkg_destroy_all(struct request_queue *q)
		blkg_destroy(blkg);
		spin_unlock(&blkcg->lock);
	}

	q->root_blkg = NULL;
	q->root_rl.blkg = NULL;
}

/*
+3 −0
Original line number Diff line number Diff line
@@ -204,6 +204,9 @@ bool blk_integrity_merge_rq(struct request_queue *q, struct request *req,
	    q->limits.max_integrity_segments)
		return false;

	if (integrity_req_gap_back_merge(req, next->bio))
		return false;

	return true;
}
EXPORT_SYMBOL(blk_integrity_merge_rq);
+24 −2
Original line number Diff line number Diff line
@@ -9,6 +9,24 @@

#include "blk.h"

static bool iovec_gap_to_prv(struct request_queue *q,
			     struct iovec *prv, struct iovec *cur)
{
	unsigned long prev_end;

	if (!queue_virt_boundary(q))
		return false;

	if (prv->iov_base == NULL && prv->iov_len == 0)
		/* prv is not set - don't check */
		return false;

	prev_end = (unsigned long)(prv->iov_base + prv->iov_len);

	return (((unsigned long)cur->iov_base & queue_virt_boundary(q)) ||
		prev_end & queue_virt_boundary(q));
}

int blk_rq_append_bio(struct request_queue *q, struct request *rq,
		      struct bio *bio)
{
@@ -67,7 +85,7 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
	struct bio *bio;
	int unaligned = 0;
	struct iov_iter i;
	struct iovec iov;
	struct iovec iov, prv = {.iov_base = NULL, .iov_len = 0};

	if (!iter || !iter->count)
		return -EINVAL;
@@ -81,8 +99,12 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
		/*
		 * Keep going so we check length of all segments
		 */
		if (uaddr & queue_dma_alignment(q))
		if ((uaddr & queue_dma_alignment(q)) ||
		    iovec_gap_to_prv(q, &prv, &iov))
			unaligned = 1;

		prv.iov_base = iov.iov_base;
		prv.iov_len = iov.iov_len;
	}

	if (unaligned || (q->dma_pad_mask & iter->count) || map_data)
+23 −36
Original line number Diff line number Diff line
@@ -66,36 +66,33 @@ static struct bio *blk_bio_segment_split(struct request_queue *q,
					 struct bio *bio,
					 struct bio_set *bs)
{
	struct bio *split;
	struct bio_vec bv, bvprv;
	struct bio_vec bv, bvprv, *bvprvp = NULL;
	struct bvec_iter iter;
	unsigned seg_size = 0, nsegs = 0, sectors = 0;
	int prev = 0;

	bio_for_each_segment(bv, bio, iter) {
		sectors += bv.bv_len >> 9;

		if (sectors > queue_max_sectors(q))
		if (sectors + (bv.bv_len >> 9) > queue_max_sectors(q))
			goto split;

		/*
		 * If the queue doesn't support SG gaps and adding this
		 * offset would create a gap, disallow it.
		 */
		if (prev && bvec_gap_to_prev(q, &bvprv, bv.bv_offset))
		if (bvprvp && bvec_gap_to_prev(q, bvprvp, bv.bv_offset))
			goto split;

		if (prev && blk_queue_cluster(q)) {
		if (bvprvp && blk_queue_cluster(q)) {
			if (seg_size + bv.bv_len > queue_max_segment_size(q))
				goto new_segment;
			if (!BIOVEC_PHYS_MERGEABLE(&bvprv, &bv))
			if (!BIOVEC_PHYS_MERGEABLE(bvprvp, &bv))
				goto new_segment;
			if (!BIOVEC_SEG_BOUNDARY(q, &bvprv, &bv))
			if (!BIOVEC_SEG_BOUNDARY(q, bvprvp, &bv))
				goto new_segment;

			seg_size += bv.bv_len;
			bvprv = bv;
			prev = 1;
			bvprvp = &bv;
			sectors += bv.bv_len >> 9;
			continue;
		}
new_segment:
@@ -104,23 +101,14 @@ static struct bio *blk_bio_segment_split(struct request_queue *q,

		nsegs++;
		bvprv = bv;
		prev = 1;
		bvprvp = &bv;
		seg_size = bv.bv_len;
		sectors += bv.bv_len >> 9;
	}

	return NULL;
split:
	split = bio_clone_bioset(bio, GFP_NOIO, bs);

	split->bi_iter.bi_size -= iter.bi_size;
	bio->bi_iter = iter;

	if (bio_integrity(bio)) {
		bio_integrity_advance(bio, split->bi_iter.bi_size);
		bio_integrity_trim(split, 0, bio_sectors(split));
	}

	return split;
	return bio_split(bio, sectors, GFP_NOIO, bs);
}

void blk_queue_split(struct request_queue *q, struct bio **bio,
@@ -439,6 +427,11 @@ static inline int ll_new_hw_segment(struct request_queue *q,
int ll_back_merge_fn(struct request_queue *q, struct request *req,
		     struct bio *bio)
{
	if (req_gap_back_merge(req, bio))
		return 0;
	if (blk_integrity_rq(req) &&
	    integrity_req_gap_back_merge(req, bio))
		return 0;
	if (blk_rq_sectors(req) + bio_sectors(bio) >
	    blk_rq_get_max_sectors(req)) {
		req->cmd_flags |= REQ_NOMERGE;
@@ -457,6 +450,12 @@ int ll_back_merge_fn(struct request_queue *q, struct request *req,
int ll_front_merge_fn(struct request_queue *q, struct request *req,
		      struct bio *bio)
{

	if (req_gap_front_merge(req, bio))
		return 0;
	if (blk_integrity_rq(req) &&
	    integrity_req_gap_front_merge(req, bio))
		return 0;
	if (blk_rq_sectors(req) + bio_sectors(bio) >
	    blk_rq_get_max_sectors(req)) {
		req->cmd_flags |= REQ_NOMERGE;
@@ -483,14 +482,6 @@ static bool req_no_special_merge(struct request *req)
	return !q->mq_ops && req->special;
}

static int req_gap_to_prev(struct request *req, struct bio *next)
{
	struct bio *prev = req->biotail;

	return bvec_gap_to_prev(req->q, &prev->bi_io_vec[prev->bi_vcnt - 1],
			next->bi_io_vec[0].bv_offset);
}

static int ll_merge_requests_fn(struct request_queue *q, struct request *req,
				struct request *next)
{
@@ -505,7 +496,7 @@ static int ll_merge_requests_fn(struct request_queue *q, struct request *req,
	if (req_no_special_merge(req) || req_no_special_merge(next))
		return 0;

	if (req_gap_to_prev(req, next->bio))
	if (req_gap_back_merge(req, next->bio))
		return 0;

	/*
@@ -713,10 +704,6 @@ bool blk_rq_merge_ok(struct request *rq, struct bio *bio)
	    !blk_write_same_mergeable(rq->bio, bio))
		return false;

	/* Only check gaps if the bio carries data */
	if (bio_has_data(bio) && req_gap_to_prev(rq, bio))
		return false;

	return true;
}

Loading