Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit d74c6d51 authored by Kent Overstreet's avatar Kent Overstreet
Browse files

block: Add bio_for_each_segment_all()



__bio_for_each_segment() iterates bvecs from the specified index
instead of bio->bv_idx.  Currently, the only usage is to walk all the
bvecs after the bio has been advanced by specifying 0 index.

For immutable bvecs, we need to split these apart;
bio_for_each_segment() is going to have a different implementation.
This will also help document the intent of code that's using it -
bio_for_each_segment_all() is only legal to use for code that owns the
bio.

Signed-off-by: default avatarKent Overstreet <koverstreet@google.com>
CC: Jens Axboe <axboe@kernel.dk>
CC: Neil Brown <neilb@suse.de>
CC: Boaz Harrosh <bharrosh@panasas.com>
parent 6bc454d1
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -952,7 +952,7 @@ static struct bio *bio_clone_range(struct bio *bio_src,
	/* Find first affected segment... */

	resid = offset;
	__bio_for_each_segment(bv, bio_src, idx, 0) {
	bio_for_each_segment(bv, bio_src, idx) {
		if (resid < bv->bv_len)
			break;
		resid -= bv->bv_len;
+1 −1
Original line number Diff line number Diff line
@@ -1291,7 +1291,7 @@ read_again:
			 * know the original bi_idx, so we just free
			 * them all
			 */
			__bio_for_each_segment(bvec, mbio, j, 0)
			bio_for_each_segment_all(bvec, mbio, j)
				bvec->bv_page = r1_bio->behind_bvecs[j].bv_page;
			if (test_bit(WriteMostly, &conf->mirrors[i].rdev->flags))
				atomic_inc(&r1_bio->behind_remaining);
+6 −6
Original line number Diff line number Diff line
@@ -961,7 +961,7 @@ static int __bio_copy_iov(struct bio *bio, struct bio_vec *iovecs,
	int iov_idx = 0;
	unsigned int iov_off = 0;

	__bio_for_each_segment(bvec, bio, i, 0) {
	bio_for_each_segment_all(bvec, bio, i) {
		char *bv_addr = page_address(bvec->bv_page);
		unsigned int bv_len = iovecs[i].bv_len;

@@ -1143,7 +1143,7 @@ struct bio *bio_copy_user_iov(struct request_queue *q,
	return bio;
cleanup:
	if (!map_data)
		bio_for_each_segment(bvec, bio, i)
		bio_for_each_segment_all(bvec, bio, i)
			__free_page(bvec->bv_page);

	bio_put(bio);
@@ -1357,7 +1357,7 @@ static void __bio_unmap_user(struct bio *bio)
	/*
	 * make sure we dirty pages we wrote to
	 */
	__bio_for_each_segment(bvec, bio, i, 0) {
	bio_for_each_segment_all(bvec, bio, i) {
		if (bio_data_dir(bio) == READ)
			set_page_dirty_lock(bvec->bv_page);

@@ -1463,7 +1463,7 @@ static void bio_copy_kern_endio(struct bio *bio, int err)
	int i;
	char *p = bmd->sgvecs[0].iov_base;

	__bio_for_each_segment(bvec, bio, i, 0) {
	bio_for_each_segment_all(bvec, bio, i) {
		char *addr = page_address(bvec->bv_page);
		int len = bmd->iovecs[i].bv_len;

@@ -1503,7 +1503,7 @@ struct bio *bio_copy_kern(struct request_queue *q, void *data, unsigned int len,
	if (!reading) {
		void *p = data;

		bio_for_each_segment(bvec, bio, i) {
		bio_for_each_segment_all(bvec, bio, i) {
			char *addr = page_address(bvec->bv_page);

			memcpy(addr, p, bvec->bv_len);
@@ -1789,7 +1789,7 @@ sector_t bio_sector_offset(struct bio *bio, unsigned short index,
	if (index >= bio->bi_idx)
		index = bio->bi_vcnt - 1;

	__bio_for_each_segment(bv, bio, i, 0) {
	bio_for_each_segment_all(bv, bio, i) {
		if (i == index) {
			if (offset > bv->bv_offset)
				sectors += (offset - bv->bv_offset) / sector_sz;
+1 −1
Original line number Diff line number Diff line
@@ -401,7 +401,7 @@ static void _clear_bio(struct bio *bio)
	struct bio_vec *bv;
	unsigned i;

	__bio_for_each_segment(bv, bio, i, 0) {
	bio_for_each_segment_all(bv, bio, i) {
		unsigned this_count = bv->bv_len;

		if (likely(PAGE_SIZE == this_count))
+1 −1
Original line number Diff line number Diff line
@@ -432,7 +432,7 @@ static void _mark_read4write_pages_uptodate(struct ore_io_state *ios, int ret)
		if (!bio)
			continue;

		__bio_for_each_segment(bv, bio, i, 0) {
		bio_for_each_segment_all(bv, bio, i) {
			struct page *page = bv->bv_page;

			SetPageUptodate(page);
Loading