Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 59d43914 authored by Akinobu Mita's avatar Akinobu Mita Committed by Linus Torvalds
Browse files

vfs: make guard_bh_eod() more generic



This patchset implements readpages() operation for block device by using
mpage_readpages() which can create multipage BIOs instead of BIOs for each
page and reduce system CPU time consumption.

This patch (of 3):

guard_bh_eod() is used in submit_bh() to allow us to do IO even on the odd
last sectors of a device, even if the block size is some multiple of the
physical sector size.  This makes guard_bh_eod() more generic and renames
it guard_bio_eod() so that we can use it without struct buffer_head
argument.

The reason for this change is that using mpage_readpages() for block
device requires to add this guard check in mpage code.

Signed-off-by: default avatarAkinobu Mita <akinobu.mita@gmail.com>
Cc: Jens Axboe <axboe@kernel.dk>
Cc: Alexander Viro <viro@zeniv.linux.org.uk>
Cc: Jeff Moyer <jmoyer@redhat.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 21bb9bd1
Loading
Loading
Loading
Loading
+12 −14
Original line number Original line Diff line number Diff line
@@ -2956,7 +2956,7 @@ static void end_bio_bh_io_sync(struct bio *bio, int err)


/*
/*
 * This allows us to do IO even on the odd last sectors
 * This allows us to do IO even on the odd last sectors
 * of a device, even if the bh block size is some multiple
 * of a device, even if the block size is some multiple
 * of the physical sector size.
 * of the physical sector size.
 *
 *
 * We'll just truncate the bio to the size of the device,
 * We'll just truncate the bio to the size of the device,
@@ -2966,10 +2966,11 @@ static void end_bio_bh_io_sync(struct bio *bio, int err)
 * errors, this only handles the "we need to be able to
 * errors, this only handles the "we need to be able to
 * do IO at the final sector" case.
 * do IO at the final sector" case.
 */
 */
static void guard_bh_eod(int rw, struct bio *bio, struct buffer_head *bh)
static void guard_bio_eod(int rw, struct bio *bio)
{
{
	sector_t maxsector;
	sector_t maxsector;
	unsigned bytes;
	struct bio_vec *bvec = &bio->bi_io_vec[bio->bi_vcnt - 1];
	unsigned truncated_bytes;


	maxsector = i_size_read(bio->bi_bdev->bd_inode) >> 9;
	maxsector = i_size_read(bio->bi_bdev->bd_inode) >> 9;
	if (!maxsector)
	if (!maxsector)
@@ -2984,23 +2985,20 @@ static void guard_bh_eod(int rw, struct bio *bio, struct buffer_head *bh)
		return;
		return;


	maxsector -= bio->bi_iter.bi_sector;
	maxsector -= bio->bi_iter.bi_sector;
	bytes = bio->bi_iter.bi_size;
	if (likely((bio->bi_iter.bi_size >> 9) <= maxsector))
	if (likely((bytes >> 9) <= maxsector))
		return;
		return;


	/* Uhhuh. We've got a bh that straddles the device size! */
	/* Uhhuh. We've got a bio that straddles the device size! */
	bytes = maxsector << 9;
	truncated_bytes = bio->bi_iter.bi_size - (maxsector << 9);


	/* Truncate the bio.. */
	/* Truncate the bio.. */
	bio->bi_iter.bi_size = bytes;
	bio->bi_iter.bi_size -= truncated_bytes;
	bio->bi_io_vec[0].bv_len = bytes;
	bvec->bv_len -= truncated_bytes;


	/* ..and clear the end of the buffer for reads */
	/* ..and clear the end of the buffer for reads */
	if ((rw & RW_MASK) == READ) {
	if ((rw & RW_MASK) == READ) {
		void *kaddr = kmap_atomic(bh->b_page);
		zero_user(bvec->bv_page, bvec->bv_offset + bvec->bv_len,
		memset(kaddr + bh_offset(bh) + bytes, 0, bh->b_size - bytes);
				truncated_bytes);
		kunmap_atomic(kaddr);
		flush_dcache_page(bh->b_page);
	}
	}
}
}


@@ -3041,7 +3039,7 @@ int _submit_bh(int rw, struct buffer_head *bh, unsigned long bio_flags)
	bio->bi_flags |= bio_flags;
	bio->bi_flags |= bio_flags;


	/* Take care of bh's that straddle the end of the device */
	/* Take care of bh's that straddle the end of the device */
	guard_bh_eod(rw, bio, bh);
	guard_bio_eod(rw, bio);


	if (buffer_meta(bh))
	if (buffer_meta(bh))
		rw |= REQ_META;
		rw |= REQ_META;