Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 016c76ac authored by NeilBrown's avatar NeilBrown Committed by Shaohua Li
Browse files

md/raid5: use bio_inc_remaining() instead of repurposing bi_phys_segments as a counter



md/raid5 needs to keep track of how many stripe_heads are processing a
bio so that it can delay calling bio_endio() until all stripe_heads
have completed.  It currently uses 16 bits of ->bi_phys_segments for
this purpose.

16 bits is only enough for 256M requests, and it is possible for a
single bio to be larger than this, which causes problems.  Also, the
bio struct contains a larger counter, __bi_remaining, which has a
purpose very similar to the purpose of our counter.  So stop using
->bi_phys_segments, and instead use __bi_remaining.

This means we don't need to initialize the counter, as our caller
initializes it to '1'.  It also means we can call bio_endio() directly
as it tests this counter internally.

Signed-off-by: default avatarNeilBrown <neilb@suse.com>
Signed-off-by: default avatarShaohua Li <shli@fb.com>
parent bd83d0a2
Loading
Loading
Loading
Loading
+1 −2
Original line number Diff line number Diff line
@@ -318,7 +318,6 @@ r5c_return_dev_pending_writes(struct r5conf *conf, struct r5dev *dev)
	       dev->sector + STRIPE_SECTORS) {
		wbi2 = r5_next_bio(wbi, dev->sector);
		md_write_end(conf->mddev);
		if (!raid5_dec_bi_active_stripes(wbi))
		bio_endio(wbi);
		wbi = wbi2;
	}
+13 −44
Original line number Diff line number Diff line
@@ -1322,7 +1322,6 @@ static void ops_complete_biofill(void *stripe_head_ref)
			while (rbi && rbi->bi_iter.bi_sector <
				dev->sector + STRIPE_SECTORS) {
				rbi2 = r5_next_bio(rbi, dev->sector);
				if (!raid5_dec_bi_active_stripes(rbi))
				bio_endio(rbi);
				rbi = rbi2;
			}
@@ -3196,14 +3195,6 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx,
		(unsigned long long)bi->bi_iter.bi_sector,
		(unsigned long long)sh->sector);

	/*
	 * If several bio share a stripe. The bio bi_phys_segments acts as a
	 * reference count to avoid race. The reference count should already be
	 * increased before this function is called (for example, in
	 * raid5_make_request()), so other bio sharing this stripe will not free the
	 * stripe. If a stripe is owned by one stripe, the stripe lock will
	 * protect it.
	 */
	spin_lock_irq(&sh->stripe_lock);
	/* Don't allow new IO added to stripes in batch list */
	if (sh->batch_head)
@@ -3259,7 +3250,7 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx,
	if (*bip)
		bi->bi_next = *bip;
	*bip = bi;
	raid5_inc_bi_active_stripes(bi);
	bio_inc_remaining(bi);
	md_write_inc(conf->mddev, bi);

	if (forwrite) {
@@ -3384,7 +3375,6 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh,

			bi->bi_error = -EIO;
			md_write_end(conf->mddev);
			if (!raid5_dec_bi_active_stripes(bi))
			bio_endio(bi);
			bi = nextbi;
		}
@@ -3407,7 +3397,6 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh,

			bi->bi_error = -EIO;
			md_write_end(conf->mddev);
			if (!raid5_dec_bi_active_stripes(bi))
			bio_endio(bi);
			bi = bi2;
		}
@@ -3433,7 +3422,6 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh,
					r5_next_bio(bi, sh->dev[i].sector);

				bi->bi_error = -EIO;
				if (!raid5_dec_bi_active_stripes(bi))
				bio_endio(bi);
				bi = nextbi;
			}
@@ -3766,7 +3754,6 @@ static void handle_stripe_clean_event(struct r5conf *conf,
					dev->sector + STRIPE_SECTORS) {
					wbi2 = r5_next_bio(wbi, dev->sector);
					md_write_end(conf->mddev);
					if (!raid5_dec_bi_active_stripes(wbi))
					bio_endio(wbi);
					wbi = wbi2;
				}
@@ -5112,7 +5099,7 @@ static struct bio *remove_bio_from_retry(struct r5conf *conf)
		 * this sets the active strip count to 1 and the processed
		 * strip count to zero (upper 8 bits)
		 */
		raid5_set_bi_stripes(bi, 1); /* biased count of active stripes */
		raid5_set_bi_processed_stripes(bi, 0);
	}

	return bi;
@@ -5449,7 +5436,6 @@ static void make_discard_request(struct mddev *mddev, struct bio *bi)
	struct r5conf *conf = mddev->private;
	sector_t logical_sector, last_sector;
	struct stripe_head *sh;
	int remaining;
	int stripe_sectors;

	if (mddev->reshape_position != MaxSector)
@@ -5460,7 +5446,6 @@ static void make_discard_request(struct mddev *mddev, struct bio *bi)
	last_sector = bi->bi_iter.bi_sector + (bi->bi_iter.bi_size>>9);

	bi->bi_next = NULL;
	bi->bi_phys_segments = 1; /* over-loaded to count active stripes */
	md_write_start(mddev, bi);

	stripe_sectors = conf->chunk_sectors *
@@ -5507,7 +5492,7 @@ static void make_discard_request(struct mddev *mddev, struct bio *bi)
				continue;
			sh->dev[d].towrite = bi;
			set_bit(R5_OVERWRITE, &sh->dev[d].flags);
			raid5_inc_bi_active_stripes(bi);
			bio_inc_remaining(bi);
			md_write_inc(mddev, bi);
			sh->overwrite_disks++;
		}
@@ -5532,11 +5517,8 @@ static void make_discard_request(struct mddev *mddev, struct bio *bi)
	}

	md_write_end(mddev);
	remaining = raid5_dec_bi_active_stripes(bi);
	if (remaining == 0) {
	bio_endio(bi);
}
}

static void raid5_make_request(struct mddev *mddev, struct bio * bi)
{
@@ -5546,7 +5528,6 @@ static void raid5_make_request(struct mddev *mddev, struct bio * bi)
	sector_t logical_sector, last_sector;
	struct stripe_head *sh;
	const int rw = bio_data_dir(bi);
	int remaining;
	DEFINE_WAIT(w);
	bool do_prepare;
	bool do_flush = false;
@@ -5588,7 +5569,6 @@ static void raid5_make_request(struct mddev *mddev, struct bio * bi)
	logical_sector = bi->bi_iter.bi_sector & ~((sector_t)STRIPE_SECTORS-1);
	last_sector = bio_end_sector(bi);
	bi->bi_next = NULL;
	bi->bi_phys_segments = 1;	/* over-loaded to count active stripes */
	md_write_start(mddev, bi);

	prepare_to_wait(&conf->wait_for_overlap, &w, TASK_UNINTERRUPTIBLE);
@@ -5726,15 +5706,8 @@ static void raid5_make_request(struct mddev *mddev, struct bio * bi)

	if (rw == WRITE)
		md_write_end(mddev);
	remaining = raid5_dec_bi_active_stripes(bi);
	if (remaining == 0) {


		trace_block_bio_complete(bdev_get_queue(bi->bi_bdev),
					 bi, 0);
	bio_endio(bi);
}
}

static sector_t raid5_size(struct mddev *mddev, sector_t sectors, int raid_disks);

@@ -6098,7 +6071,6 @@ static int retry_aligned_read(struct r5conf *conf, struct bio *raid_bio)
	int dd_idx;
	sector_t sector, logical_sector, last_sector;
	int scnt = 0;
	int remaining;
	int handled = 0;

	logical_sector = raid_bio->bi_iter.bi_sector &
@@ -6137,12 +6109,9 @@ static int retry_aligned_read(struct r5conf *conf, struct bio *raid_bio)
		raid5_release_stripe(sh);
		handled++;
	}
	remaining = raid5_dec_bi_active_stripes(raid_bio);
	if (remaining == 0) {
		trace_block_bio_complete(bdev_get_queue(raid_bio->bi_bdev),
					 raid_bio, 0);

	bio_endio(raid_bio);
	}

	if (atomic_dec_and_test(&conf->active_aligned_reads))
		wake_up(&conf->wait_for_quiescent);
	return handled;
+1 −16
Original line number Diff line number Diff line
@@ -488,8 +488,7 @@ static inline struct bio *r5_next_bio(struct bio *bio, sector_t sector)
}

/*
 * We maintain a biased count of active stripes in the bottom 16 bits of
 * bi_phys_segments, and a count of processed stripes in the upper 16 bits
 * We maintain a count of processed stripes in the upper 16 bits
 */
static inline int raid5_bi_processed_stripes(struct bio *bio)
{
@@ -498,20 +497,6 @@ static inline int raid5_bi_processed_stripes(struct bio *bio)
	return (atomic_read(segments) >> 16) & 0xffff;
}

static inline int raid5_dec_bi_active_stripes(struct bio *bio)
{
	atomic_t *segments = (atomic_t *)&bio->bi_phys_segments;

	return atomic_sub_return(1, segments) & 0xffff;
}

static inline void raid5_inc_bi_active_stripes(struct bio *bio)
{
	atomic_t *segments = (atomic_t *)&bio->bi_phys_segments;

	atomic_inc(segments);
}

static inline void raid5_set_bi_processed_stripes(struct bio *bio,
	unsigned int cnt)
{