Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit aa8b57aa authored by Kent Overstreet's avatar Kent Overstreet
Browse files

block: Use bio_sectors() more consistently



Bunch of places in the code weren't using it where they could be -
this'll reduce the size of the patch that puts bi_sector/bi_size/bi_idx
into a struct bvec_iter.

Signed-off-by: default avatarKent Overstreet <koverstreet@google.com>
CC: Jens Axboe <axboe@kernel.dk>
CC: "Ed L. Cashin" <ecashin@coraid.com>
CC: Nick Piggin <npiggin@kernel.dk>
CC: Jiri Kosina <jkosina@suse.cz>
CC: Jim Paris <jim@jtan.com>
CC: Geoff Levand <geoff@infradead.org>
CC: Alasdair Kergon <agk@redhat.com>
CC: dm-devel@redhat.com
CC: Neil Brown <neilb@suse.de>
CC: Steven Rostedt <rostedt@goodmis.org>
Acked-by: default avatarEd Cashin <ecashin@coraid.com>
parent f73a1c7d
Loading
Loading
Loading
Loading
+1 −1
Original line number Original line Diff line number Diff line
@@ -2433,7 +2433,7 @@ static void pkt_make_request(struct request_queue *q, struct bio *bio)
		cloned_bio->bi_bdev = pd->bdev;
		cloned_bio->bi_bdev = pd->bdev;
		cloned_bio->bi_private = psd;
		cloned_bio->bi_private = psd;
		cloned_bio->bi_end_io = pkt_end_io_read_cloned;
		cloned_bio->bi_end_io = pkt_end_io_read_cloned;
		pd->stats.secs_r += bio->bi_size >> 9;
		pd->stats.secs_r += bio_sectors(bio);
		pkt_queue_bio(pd, cloned_bio);
		pkt_queue_bio(pd, cloned_bio);
		return;
		return;
	}
	}
+1 −1
Original line number Original line Diff line number Diff line
@@ -458,7 +458,7 @@ static void map_region(struct dm_io_region *io, struct mirror *m,
{
{
	io->bdev = m->dev->bdev;
	io->bdev = m->dev->bdev;
	io->sector = map_sector(m, bio);
	io->sector = map_sector(m, bio);
	io->count = bio->bi_size >> 9;
	io->count = bio_sectors(bio);
}
}


static void hold_bio(struct mirror_set *ms, struct bio *bio)
static void hold_bio(struct mirror_set *ms, struct bio *bio)
+3 −3
Original line number Original line Diff line number Diff line
@@ -502,11 +502,11 @@ static inline int is_io_in_chunk_boundary(struct mddev *mddev,
{
{
	if (likely(is_power_of_2(chunk_sects))) {
	if (likely(is_power_of_2(chunk_sects))) {
		return chunk_sects >= ((bio->bi_sector & (chunk_sects-1))
		return chunk_sects >= ((bio->bi_sector & (chunk_sects-1))
					+ (bio->bi_size >> 9));
					+ bio_sectors(bio));
	} else{
	} else{
		sector_t sector = bio->bi_sector;
		sector_t sector = bio->bi_sector;
		return chunk_sects >= (sector_div(sector, chunk_sects)
		return chunk_sects >= (sector_div(sector, chunk_sects)
						+ (bio->bi_size >> 9));
						+ bio_sectors(bio));
	}
	}
}
}


@@ -567,7 +567,7 @@ static void raid0_make_request(struct mddev *mddev, struct bio *bio)
	printk("md/raid0:%s: make_request bug: can't convert block across chunks"
	printk("md/raid0:%s: make_request bug: can't convert block across chunks"
	       " or bigger than %dk %llu %d\n",
	       " or bigger than %dk %llu %d\n",
	       mdname(mddev), chunk_sects / 2,
	       mdname(mddev), chunk_sects / 2,
	       (unsigned long long)bio->bi_sector, bio->bi_size >> 10);
	       (unsigned long long)bio->bi_sector, bio_sectors(bio) / 2);


	bio_io_error(bio);
	bio_io_error(bio);
	return;
	return;
+8 −9
Original line number Original line Diff line number Diff line
@@ -267,7 +267,7 @@ static void raid_end_bio_io(struct r1bio *r1_bio)
			 (bio_data_dir(bio) == WRITE) ? "write" : "read",
			 (bio_data_dir(bio) == WRITE) ? "write" : "read",
			 (unsigned long long) bio->bi_sector,
			 (unsigned long long) bio->bi_sector,
			 (unsigned long long) bio->bi_sector +
			 (unsigned long long) bio->bi_sector +
			 (bio->bi_size >> 9) - 1);
			 bio_sectors(bio) - 1);


		call_bio_endio(r1_bio);
		call_bio_endio(r1_bio);
	}
	}
@@ -458,7 +458,7 @@ static void raid1_end_write_request(struct bio *bio, int error)
					 " %llu-%llu\n",
					 " %llu-%llu\n",
					 (unsigned long long) mbio->bi_sector,
					 (unsigned long long) mbio->bi_sector,
					 (unsigned long long) mbio->bi_sector +
					 (unsigned long long) mbio->bi_sector +
					 (mbio->bi_size >> 9) - 1);
					 bio_sectors(mbio) - 1);
				call_bio_endio(r1_bio);
				call_bio_endio(r1_bio);
			}
			}
		}
		}
@@ -1049,7 +1049,7 @@ static void make_request(struct mddev *mddev, struct bio * bio)
	r1_bio = mempool_alloc(conf->r1bio_pool, GFP_NOIO);
	r1_bio = mempool_alloc(conf->r1bio_pool, GFP_NOIO);


	r1_bio->master_bio = bio;
	r1_bio->master_bio = bio;
	r1_bio->sectors = bio->bi_size >> 9;
	r1_bio->sectors = bio_sectors(bio);
	r1_bio->state = 0;
	r1_bio->state = 0;
	r1_bio->mddev = mddev;
	r1_bio->mddev = mddev;
	r1_bio->sector = bio->bi_sector;
	r1_bio->sector = bio->bi_sector;
@@ -1127,7 +1127,7 @@ static void make_request(struct mddev *mddev, struct bio * bio)
			r1_bio = mempool_alloc(conf->r1bio_pool, GFP_NOIO);
			r1_bio = mempool_alloc(conf->r1bio_pool, GFP_NOIO);


			r1_bio->master_bio = bio;
			r1_bio->master_bio = bio;
			r1_bio->sectors = (bio->bi_size >> 9) - sectors_handled;
			r1_bio->sectors = bio_sectors(bio) - sectors_handled;
			r1_bio->state = 0;
			r1_bio->state = 0;
			r1_bio->mddev = mddev;
			r1_bio->mddev = mddev;
			r1_bio->sector = bio->bi_sector + sectors_handled;
			r1_bio->sector = bio->bi_sector + sectors_handled;
@@ -1329,14 +1329,14 @@ static void make_request(struct mddev *mddev, struct bio * bio)
	/* Mustn't call r1_bio_write_done before this next test,
	/* Mustn't call r1_bio_write_done before this next test,
	 * as it could result in the bio being freed.
	 * as it could result in the bio being freed.
	 */
	 */
	if (sectors_handled < (bio->bi_size >> 9)) {
	if (sectors_handled < bio_sectors(bio)) {
		r1_bio_write_done(r1_bio);
		r1_bio_write_done(r1_bio);
		/* We need another r1_bio.  It has already been counted
		/* We need another r1_bio.  It has already been counted
		 * in bio->bi_phys_segments
		 * in bio->bi_phys_segments
		 */
		 */
		r1_bio = mempool_alloc(conf->r1bio_pool, GFP_NOIO);
		r1_bio = mempool_alloc(conf->r1bio_pool, GFP_NOIO);
		r1_bio->master_bio = bio;
		r1_bio->master_bio = bio;
		r1_bio->sectors = (bio->bi_size >> 9) - sectors_handled;
		r1_bio->sectors = bio_sectors(bio) - sectors_handled;
		r1_bio->state = 0;
		r1_bio->state = 0;
		r1_bio->mddev = mddev;
		r1_bio->mddev = mddev;
		r1_bio->sector = bio->bi_sector + sectors_handled;
		r1_bio->sector = bio->bi_sector + sectors_handled;
@@ -1947,7 +1947,7 @@ static void sync_request_write(struct mddev *mddev, struct r1bio *r1_bio)
		wbio->bi_rw = WRITE;
		wbio->bi_rw = WRITE;
		wbio->bi_end_io = end_sync_write;
		wbio->bi_end_io = end_sync_write;
		atomic_inc(&r1_bio->remaining);
		atomic_inc(&r1_bio->remaining);
		md_sync_acct(conf->mirrors[i].rdev->bdev, wbio->bi_size >> 9);
		md_sync_acct(conf->mirrors[i].rdev->bdev, bio_sectors(wbio));


		generic_make_request(wbio);
		generic_make_request(wbio);
	}
	}
@@ -2284,8 +2284,7 @@ static void handle_read_error(struct r1conf *conf, struct r1bio *r1_bio)
			r1_bio = mempool_alloc(conf->r1bio_pool, GFP_NOIO);
			r1_bio = mempool_alloc(conf->r1bio_pool, GFP_NOIO);


			r1_bio->master_bio = mbio;
			r1_bio->master_bio = mbio;
			r1_bio->sectors = (mbio->bi_size >> 9)
			r1_bio->sectors = bio_sectors(mbio) - sectors_handled;
					  - sectors_handled;
			r1_bio->state = 0;
			r1_bio->state = 0;
			set_bit(R1BIO_ReadError, &r1_bio->state);
			set_bit(R1BIO_ReadError, &r1_bio->state);
			r1_bio->mddev = mddev;
			r1_bio->mddev = mddev;
+11 −13
Original line number Original line Diff line number Diff line
@@ -1169,7 +1169,7 @@ static void make_request(struct mddev *mddev, struct bio * bio)
	/* If this request crosses a chunk boundary, we need to
	/* If this request crosses a chunk boundary, we need to
	 * split it.  This will only happen for 1 PAGE (or less) requests.
	 * split it.  This will only happen for 1 PAGE (or less) requests.
	 */
	 */
	if (unlikely((bio->bi_sector & chunk_mask) + (bio->bi_size >> 9)
	if (unlikely((bio->bi_sector & chunk_mask) + bio_sectors(bio)
		     > chunk_sects
		     > chunk_sects
		     && (conf->geo.near_copies < conf->geo.raid_disks
		     && (conf->geo.near_copies < conf->geo.raid_disks
			 || conf->prev.near_copies < conf->prev.raid_disks))) {
			 || conf->prev.near_copies < conf->prev.raid_disks))) {
@@ -1209,7 +1209,7 @@ static void make_request(struct mddev *mddev, struct bio * bio)
	bad_map:
	bad_map:
		printk("md/raid10:%s: make_request bug: can't convert block across chunks"
		printk("md/raid10:%s: make_request bug: can't convert block across chunks"
		       " or bigger than %dk %llu %d\n", mdname(mddev), chunk_sects/2,
		       " or bigger than %dk %llu %d\n", mdname(mddev), chunk_sects/2,
		       (unsigned long long)bio->bi_sector, bio->bi_size >> 10);
		       (unsigned long long)bio->bi_sector, bio_sectors(bio) / 2);


		bio_io_error(bio);
		bio_io_error(bio);
		return;
		return;
@@ -1224,7 +1224,7 @@ static void make_request(struct mddev *mddev, struct bio * bio)
	 */
	 */
	wait_barrier(conf);
	wait_barrier(conf);


	sectors = bio->bi_size >> 9;
	sectors = bio_sectors(bio);
	while (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
	while (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
	    bio->bi_sector < conf->reshape_progress &&
	    bio->bi_sector < conf->reshape_progress &&
	    bio->bi_sector + sectors > conf->reshape_progress) {
	    bio->bi_sector + sectors > conf->reshape_progress) {
@@ -1326,8 +1326,7 @@ static void make_request(struct mddev *mddev, struct bio * bio)
			r10_bio = mempool_alloc(conf->r10bio_pool, GFP_NOIO);
			r10_bio = mempool_alloc(conf->r10bio_pool, GFP_NOIO);


			r10_bio->master_bio = bio;
			r10_bio->master_bio = bio;
			r10_bio->sectors = ((bio->bi_size >> 9)
			r10_bio->sectors = bio_sectors(bio) - sectors_handled;
					    - sectors_handled);
			r10_bio->state = 0;
			r10_bio->state = 0;
			r10_bio->mddev = mddev;
			r10_bio->mddev = mddev;
			r10_bio->sector = bio->bi_sector + sectors_handled;
			r10_bio->sector = bio->bi_sector + sectors_handled;
@@ -1569,7 +1568,7 @@ static void make_request(struct mddev *mddev, struct bio * bio)
	 * after checking if we need to go around again.
	 * after checking if we need to go around again.
	 */
	 */


	if (sectors_handled < (bio->bi_size >> 9)) {
	if (sectors_handled < bio_sectors(bio)) {
		one_write_done(r10_bio);
		one_write_done(r10_bio);
		/* We need another r10_bio.  It has already been counted
		/* We need another r10_bio.  It has already been counted
		 * in bio->bi_phys_segments.
		 * in bio->bi_phys_segments.
@@ -1577,7 +1576,7 @@ static void make_request(struct mddev *mddev, struct bio * bio)
		r10_bio = mempool_alloc(conf->r10bio_pool, GFP_NOIO);
		r10_bio = mempool_alloc(conf->r10bio_pool, GFP_NOIO);


		r10_bio->master_bio = bio;
		r10_bio->master_bio = bio;
		r10_bio->sectors = (bio->bi_size >> 9) - sectors_handled;
		r10_bio->sectors = bio_sectors(bio) - sectors_handled;


		r10_bio->mddev = mddev;
		r10_bio->mddev = mddev;
		r10_bio->sector = bio->bi_sector + sectors_handled;
		r10_bio->sector = bio->bi_sector + sectors_handled;
@@ -2103,7 +2102,7 @@ static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio)
		d = r10_bio->devs[i].devnum;
		d = r10_bio->devs[i].devnum;
		atomic_inc(&conf->mirrors[d].rdev->nr_pending);
		atomic_inc(&conf->mirrors[d].rdev->nr_pending);
		atomic_inc(&r10_bio->remaining);
		atomic_inc(&r10_bio->remaining);
		md_sync_acct(conf->mirrors[d].rdev->bdev, tbio->bi_size >> 9);
		md_sync_acct(conf->mirrors[d].rdev->bdev, bio_sectors(tbio));


		tbio->bi_sector += conf->mirrors[d].rdev->data_offset;
		tbio->bi_sector += conf->mirrors[d].rdev->data_offset;
		tbio->bi_bdev = conf->mirrors[d].rdev->bdev;
		tbio->bi_bdev = conf->mirrors[d].rdev->bdev;
@@ -2128,7 +2127,7 @@ static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio)
		d = r10_bio->devs[i].devnum;
		d = r10_bio->devs[i].devnum;
		atomic_inc(&r10_bio->remaining);
		atomic_inc(&r10_bio->remaining);
		md_sync_acct(conf->mirrors[d].replacement->bdev,
		md_sync_acct(conf->mirrors[d].replacement->bdev,
			     tbio->bi_size >> 9);
			     bio_sectors(tbio));
		generic_make_request(tbio);
		generic_make_request(tbio);
	}
	}


@@ -2254,13 +2253,13 @@ static void recovery_request_write(struct mddev *mddev, struct r10bio *r10_bio)
	wbio2 = r10_bio->devs[1].repl_bio;
	wbio2 = r10_bio->devs[1].repl_bio;
	if (wbio->bi_end_io) {
	if (wbio->bi_end_io) {
		atomic_inc(&conf->mirrors[d].rdev->nr_pending);
		atomic_inc(&conf->mirrors[d].rdev->nr_pending);
		md_sync_acct(conf->mirrors[d].rdev->bdev, wbio->bi_size >> 9);
		md_sync_acct(conf->mirrors[d].rdev->bdev, bio_sectors(wbio));
		generic_make_request(wbio);
		generic_make_request(wbio);
	}
	}
	if (wbio2 && wbio2->bi_end_io) {
	if (wbio2 && wbio2->bi_end_io) {
		atomic_inc(&conf->mirrors[d].replacement->nr_pending);
		atomic_inc(&conf->mirrors[d].replacement->nr_pending);
		md_sync_acct(conf->mirrors[d].replacement->bdev,
		md_sync_acct(conf->mirrors[d].replacement->bdev,
			     wbio2->bi_size >> 9);
			     bio_sectors(wbio2));
		generic_make_request(wbio2);
		generic_make_request(wbio2);
	}
	}
}
}
@@ -2690,8 +2689,7 @@ static void handle_read_error(struct mddev *mddev, struct r10bio *r10_bio)
		r10_bio = mempool_alloc(conf->r10bio_pool,
		r10_bio = mempool_alloc(conf->r10bio_pool,
					GFP_NOIO);
					GFP_NOIO);
		r10_bio->master_bio = mbio;
		r10_bio->master_bio = mbio;
		r10_bio->sectors = (mbio->bi_size >> 9)
		r10_bio->sectors = bio_sectors(mbio) - sectors_handled;
			- sectors_handled;
		r10_bio->state = 0;
		r10_bio->state = 0;
		set_bit(R10BIO_ReadError,
		set_bit(R10BIO_ReadError,
			&r10_bio->state);
			&r10_bio->state);
Loading