Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit d094d686 authored by NeilBrown's avatar NeilBrown Committed by Shaohua Li
Browse files

md/raid10: add rcu protection to rdev access during reshape.



mirrors[].rdev can become NULL at any point unless:
   - a counted reference is held
   - ->reconfig_mutex is held, or
   - rcu_read_lock() is held

Reshape isn't always suitably careful as in the past rdev couldn't be
removed during reshape.  It can now, so add protection.

Signed-off-by: default avatarNeilBrown <neilb@suse.com>
Signed-off-by: default avatarShaohua Li <shli@fb.com>
parent f90145f3
Loading
Loading
Loading
Loading
+22 −8
Original line number Diff line number Diff line
@@ -4361,15 +4361,16 @@ static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr,
	blist = read_bio;
	read_bio->bi_next = NULL;

	rcu_read_lock();
	for (s = 0; s < conf->copies*2; s++) {
		struct bio *b;
		int d = r10_bio->devs[s/2].devnum;
		struct md_rdev *rdev2;
		if (s&1) {
			rdev2 = conf->mirrors[d].replacement;
			rdev2 = rcu_dereference(conf->mirrors[d].replacement);
			b = r10_bio->devs[s/2].repl_bio;
		} else {
			rdev2 = conf->mirrors[d].rdev;
			rdev2 = rcu_dereference(conf->mirrors[d].rdev);
			b = r10_bio->devs[s/2].bio;
		}
		if (!rdev2 || test_bit(Faulty, &rdev2->flags))
@@ -4414,6 +4415,7 @@ static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr,
		nr_sectors += len >> 9;
	}
bio_full:
	rcu_read_unlock();
	r10_bio->sectors = nr_sectors;

	/* Now submit the read */
@@ -4465,16 +4467,20 @@ static void reshape_request_write(struct mddev *mddev, struct r10bio *r10_bio)
		struct bio *b;
		int d = r10_bio->devs[s/2].devnum;
		struct md_rdev *rdev;
		rcu_read_lock();
		if (s&1) {
			rdev = conf->mirrors[d].replacement;
			rdev = rcu_dereference(conf->mirrors[d].replacement);
			b = r10_bio->devs[s/2].repl_bio;
		} else {
			rdev = conf->mirrors[d].rdev;
			rdev = rcu_dereference(conf->mirrors[d].rdev);
			b = r10_bio->devs[s/2].bio;
		}
		if (!rdev || test_bit(Faulty, &rdev->flags))
		if (!rdev || test_bit(Faulty, &rdev->flags)) {
			rcu_read_unlock();
			continue;
		}
		atomic_inc(&rdev->nr_pending);
		rcu_read_unlock();
		md_sync_acct(b->bi_bdev, r10_bio->sectors);
		atomic_inc(&r10_bio->remaining);
		b->bi_next = NULL;
@@ -4535,9 +4541,10 @@ static int handle_reshape_read_error(struct mddev *mddev,
		if (s > (PAGE_SIZE >> 9))
			s = PAGE_SIZE >> 9;

		rcu_read_lock();
		while (!success) {
			int d = r10b->devs[slot].devnum;
			struct md_rdev *rdev = conf->mirrors[d].rdev;
			struct md_rdev *rdev = rcu_dereference(conf->mirrors[d].rdev);
			sector_t addr;
			if (rdev == NULL ||
			    test_bit(Faulty, &rdev->flags) ||
@@ -4545,11 +4552,15 @@ static int handle_reshape_read_error(struct mddev *mddev,
				goto failed;

			addr = r10b->devs[slot].addr + idx * PAGE_SIZE;
			atomic_inc(&rdev->nr_pending);
			rcu_read_unlock();
			success = sync_page_io(rdev,
					       addr,
					       s << 9,
					       bvec[idx].bv_page,
					       READ, false);
			rdev_dec_pending(rdev, mddev);
			rcu_read_lock();
			if (success)
				break;
		failed:
@@ -4559,6 +4570,7 @@ static int handle_reshape_read_error(struct mddev *mddev,
			if (slot == first_slot)
				break;
		}
		rcu_read_unlock();
		if (!success) {
			/* couldn't read this block, must give up */
			set_bit(MD_RECOVERY_INTR,
@@ -4628,16 +4640,18 @@ static void raid10_finish_reshape(struct mddev *mddev)
		}
	} else {
		int d;
		rcu_read_lock();
		for (d = conf->geo.raid_disks ;
		     d < conf->geo.raid_disks - mddev->delta_disks;
		     d++) {
			struct md_rdev *rdev = conf->mirrors[d].rdev;
			struct md_rdev *rdev = rcu_dereference(conf->mirrors[d].rdev);
			if (rdev)
				clear_bit(In_sync, &rdev->flags);
			rdev = conf->mirrors[d].replacement;
			rdev = rcu_dereference(conf->mirrors[d].replacement);
			if (rdev)
				clear_bit(In_sync, &rdev->flags);
		}
		rcu_read_unlock();
	}
	mddev->layout = mddev->new_layout;
	mddev->chunk_sectors = 1 << conf->geo.chunk_shift;