Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit e7836bd6 authored by Shaohua Li's avatar Shaohua Li Committed by NeilBrown
Browse files

raid5: lockless access raid5 overrided bi_phys_segments



Raid5 overrides bio->bi_phys_segments, accessing it is with device_lock hold,
which is unnecessary, We can make it lockless actually.

Signed-off-by: default avatarShaohua Li <shli@fusionio.com>
Signed-off-by: default avatarNeilBrown <neilb@suse.de>
parent 4eb788df
Loading
Loading
Loading
Loading
+32 −30
Original line number Diff line number Diff line
@@ -99,34 +99,40 @@ static inline struct bio *r5_next_bio(struct bio *bio, sector_t sector)
 * We maintain a biased count of active stripes in the bottom 16 bits of
 * bi_phys_segments, and a count of processed stripes in the upper 16 bits
 */
static inline int raid5_bi_phys_segments(struct bio *bio)
static inline int raid5_bi_processed_stripes(struct bio *bio)
{
	return bio->bi_phys_segments & 0xffff;
	atomic_t *segments = (atomic_t *)&bio->bi_phys_segments;
	return (atomic_read(segments) >> 16) & 0xffff;
}

static inline int raid5_bi_hw_segments(struct bio *bio)
static inline int raid5_dec_bi_active_stripes(struct bio *bio)
{
	return (bio->bi_phys_segments >> 16) & 0xffff;
	atomic_t *segments = (atomic_t *)&bio->bi_phys_segments;
	return atomic_sub_return(1, segments) & 0xffff;
}

static inline int raid5_dec_bi_phys_segments(struct bio *bio)
static inline void raid5_inc_bi_active_stripes(struct bio *bio)
{
	--bio->bi_phys_segments;
	return raid5_bi_phys_segments(bio);
	atomic_t *segments = (atomic_t *)&bio->bi_phys_segments;
	atomic_inc(segments);
}

static inline int raid5_dec_bi_hw_segments(struct bio *bio)
static inline void raid5_set_bi_processed_stripes(struct bio *bio,
	unsigned int cnt)
{
	unsigned short val = raid5_bi_hw_segments(bio);
	atomic_t *segments = (atomic_t *)&bio->bi_phys_segments;
	int old, new;

	--val;
	bio->bi_phys_segments = (val << 16) | raid5_bi_phys_segments(bio);
	return val;
	do {
		old = atomic_read(segments);
		new = (old & 0xffff) | (cnt << 16);
	} while (atomic_cmpxchg(segments, old, new) != old);
}

static inline void raid5_set_bi_hw_segments(struct bio *bio, unsigned int cnt)
static inline void raid5_set_bi_stripes(struct bio *bio, unsigned int cnt)
{
	bio->bi_phys_segments = raid5_bi_phys_segments(bio) | (cnt << 16);
	atomic_t *segments = (atomic_t *)&bio->bi_phys_segments;
	atomic_set(segments, cnt);
}

/* Find first data disk in a raid6 stripe */
@@ -781,7 +787,7 @@ static void ops_complete_biofill(void *stripe_head_ref)
			while (rbi && rbi->bi_sector <
				dev->sector + STRIPE_SECTORS) {
				rbi2 = r5_next_bio(rbi, dev->sector);
				if (!raid5_dec_bi_phys_segments(rbi)) {
				if (!raid5_dec_bi_active_stripes(rbi)) {
					rbi->bi_next = return_bi;
					return_bi = rbi;
				}
@@ -2367,7 +2373,7 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, in
	if (*bip)
		bi->bi_next = *bip;
	*bip = bi;
	bi->bi_phys_segments++;
	raid5_inc_bi_active_stripes(bi);

	if (forwrite) {
		/* check if page is covered */
@@ -2464,7 +2470,7 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh,
			sh->dev[i].sector + STRIPE_SECTORS) {
			struct bio *nextbi = r5_next_bio(bi, sh->dev[i].sector);
			clear_bit(BIO_UPTODATE, &bi->bi_flags);
			if (!raid5_dec_bi_phys_segments(bi)) {
			if (!raid5_dec_bi_active_stripes(bi)) {
				md_write_end(conf->mddev);
				bi->bi_next = *return_bi;
				*return_bi = bi;
@@ -2479,7 +2485,7 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh,
		       sh->dev[i].sector + STRIPE_SECTORS) {
			struct bio *bi2 = r5_next_bio(bi, sh->dev[i].sector);
			clear_bit(BIO_UPTODATE, &bi->bi_flags);
			if (!raid5_dec_bi_phys_segments(bi)) {
			if (!raid5_dec_bi_active_stripes(bi)) {
				md_write_end(conf->mddev);
				bi->bi_next = *return_bi;
				*return_bi = bi;
@@ -2503,7 +2509,7 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh,
				struct bio *nextbi =
					r5_next_bio(bi, sh->dev[i].sector);
				clear_bit(BIO_UPTODATE, &bi->bi_flags);
				if (!raid5_dec_bi_phys_segments(bi)) {
				if (!raid5_dec_bi_active_stripes(bi)) {
					bi->bi_next = *return_bi;
					*return_bi = bi;
				}
@@ -2722,7 +2728,7 @@ static void handle_stripe_clean_event(struct r5conf *conf,
				while (wbi && wbi->bi_sector <
					dev->sector + STRIPE_SECTORS) {
					wbi2 = r5_next_bio(wbi, dev->sector);
					if (!raid5_dec_bi_phys_segments(wbi)) {
					if (!raid5_dec_bi_active_stripes(wbi)) {
						md_write_end(conf->mddev);
						wbi->bi_next = *return_bi;
						*return_bi = wbi;
@@ -3798,7 +3804,7 @@ static struct bio *remove_bio_from_retry(struct r5conf *conf)
		 * this sets the active strip count to 1 and the processed
		 * strip count to zero (upper 8 bits)
		 */
		bi->bi_phys_segments = 1; /* biased count of active stripes */
		raid5_set_bi_stripes(bi, 1); /* biased count of active stripes */
	}

	return bi;
@@ -4133,9 +4139,7 @@ static void make_request(struct mddev *mddev, struct bio * bi)
		}
	}

	spin_lock_irq(&conf->device_lock);
	remaining = raid5_dec_bi_phys_segments(bi);
	spin_unlock_irq(&conf->device_lock);
	remaining = raid5_dec_bi_active_stripes(bi);
	if (remaining == 0) {

		if ( rw == WRITE )
@@ -4491,7 +4495,7 @@ static int retry_aligned_read(struct r5conf *conf, struct bio *raid_bio)
		     sector += STRIPE_SECTORS,
		     scnt++) {

		if (scnt < raid5_bi_hw_segments(raid_bio))
		if (scnt < raid5_bi_processed_stripes(raid_bio))
			/* already done this stripe */
			continue;

@@ -4499,14 +4503,14 @@ static int retry_aligned_read(struct r5conf *conf, struct bio *raid_bio)

		if (!sh) {
			/* failed to get a stripe - must wait */
			raid5_set_bi_hw_segments(raid_bio, scnt);
			raid5_set_bi_processed_stripes(raid_bio, scnt);
			conf->retry_read_aligned = raid_bio;
			return handled;
		}

		if (!add_stripe_bio(sh, raid_bio, dd_idx, 0)) {
			release_stripe(sh);
			raid5_set_bi_hw_segments(raid_bio, scnt);
			raid5_set_bi_processed_stripes(raid_bio, scnt);
			conf->retry_read_aligned = raid_bio;
			return handled;
		}
@@ -4515,9 +4519,7 @@ static int retry_aligned_read(struct r5conf *conf, struct bio *raid_bio)
		release_stripe(sh);
		handled++;
	}
	spin_lock_irq(&conf->device_lock);
	remaining = raid5_dec_bi_phys_segments(raid_bio);
	spin_unlock_irq(&conf->device_lock);
	remaining = raid5_dec_bi_active_stripes(raid_bio);
	if (remaining == 0)
		bio_endio(raid_bio, 0);
	if (atomic_dec_and_test(&conf->active_aligned_reads))