Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 4b8b8a4a authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge tag 'md-3.11-fixes' of git://neil.brown.name/md

Pull md bug fixes from NeilBrown:
 "Sorry boss, back at work now boss.  Here's them nice shiny patches ya
  wanted.  All nicely tagged and justified for -stable and everyfing:

  Three bug fixes for md in 3.10

  3.10 wasn't a good release for md.  The bio changes left a couple of
  bugs, and an md "fix" created another one.

  These three patches appear to fix the issues and have been tagged for
  -stable"

* tag 'md-3.11-fixes' of git://neil.brown.name/md:
  md/raid1: fix bio handling problems in process_checks()
  md: Remove recent change which allows devices to skip recovery.
  md/raid10: fix two problems with RAID10 resync.
parents 0a693ab6 30bc9b53
Loading
Loading
Loading
Loading
+0 −14
Original line number Original line Diff line number Diff line
@@ -7716,20 +7716,6 @@ static int remove_and_add_spares(struct mddev *mddev,
			continue;
			continue;


		rdev->recovery_offset = 0;
		rdev->recovery_offset = 0;
		if (rdev->saved_raid_disk >= 0 && mddev->in_sync) {
			spin_lock_irq(&mddev->write_lock);
			if (mddev->in_sync)
				/* OK, this device, which is in_sync,
				 * will definitely be noticed before
				 * the next write, so recovery isn't
				 * needed.
				 */
				rdev->recovery_offset = mddev->recovery_cp;
			spin_unlock_irq(&mddev->write_lock);
		}
		if (mddev->ro && rdev->recovery_offset != MaxSector)
			/* not safe to add this disk now */
			continue;
		if (mddev->pers->
		if (mddev->pers->
		    hot_add_disk(mddev, rdev) == 0) {
		    hot_add_disk(mddev, rdev) == 0) {
			if (sysfs_link_rdev(mddev, rdev))
			if (sysfs_link_rdev(mddev, rdev))
+30 −23
Original line number Original line Diff line number Diff line
@@ -1849,6 +1849,36 @@ static int process_checks(struct r1bio *r1_bio)
	int i;
	int i;
	int vcnt;
	int vcnt;


	/* Fix variable parts of all bios */
	vcnt = (r1_bio->sectors + PAGE_SIZE / 512 - 1) >> (PAGE_SHIFT - 9);
	for (i = 0; i < conf->raid_disks * 2; i++) {
		int j;
		int size;
		struct bio *b = r1_bio->bios[i];
		if (b->bi_end_io != end_sync_read)
			continue;
		/* fixup the bio for reuse */
		bio_reset(b);
		b->bi_vcnt = vcnt;
		b->bi_size = r1_bio->sectors << 9;
		b->bi_sector = r1_bio->sector +
			conf->mirrors[i].rdev->data_offset;
		b->bi_bdev = conf->mirrors[i].rdev->bdev;
		b->bi_end_io = end_sync_read;
		b->bi_private = r1_bio;

		size = b->bi_size;
		for (j = 0; j < vcnt ; j++) {
			struct bio_vec *bi;
			bi = &b->bi_io_vec[j];
			bi->bv_offset = 0;
			if (size > PAGE_SIZE)
				bi->bv_len = PAGE_SIZE;
			else
				bi->bv_len = size;
			size -= PAGE_SIZE;
		}
	}
	for (primary = 0; primary < conf->raid_disks * 2; primary++)
	for (primary = 0; primary < conf->raid_disks * 2; primary++)
		if (r1_bio->bios[primary]->bi_end_io == end_sync_read &&
		if (r1_bio->bios[primary]->bi_end_io == end_sync_read &&
		    test_bit(BIO_UPTODATE, &r1_bio->bios[primary]->bi_flags)) {
		    test_bit(BIO_UPTODATE, &r1_bio->bios[primary]->bi_flags)) {
@@ -1857,12 +1887,10 @@ static int process_checks(struct r1bio *r1_bio)
			break;
			break;
		}
		}
	r1_bio->read_disk = primary;
	r1_bio->read_disk = primary;
	vcnt = (r1_bio->sectors + PAGE_SIZE / 512 - 1) >> (PAGE_SHIFT - 9);
	for (i = 0; i < conf->raid_disks * 2; i++) {
	for (i = 0; i < conf->raid_disks * 2; i++) {
		int j;
		int j;
		struct bio *pbio = r1_bio->bios[primary];
		struct bio *pbio = r1_bio->bios[primary];
		struct bio *sbio = r1_bio->bios[i];
		struct bio *sbio = r1_bio->bios[i];
		int size;


		if (sbio->bi_end_io != end_sync_read)
		if (sbio->bi_end_io != end_sync_read)
			continue;
			continue;
@@ -1888,27 +1916,6 @@ static int process_checks(struct r1bio *r1_bio)
			rdev_dec_pending(conf->mirrors[i].rdev, mddev);
			rdev_dec_pending(conf->mirrors[i].rdev, mddev);
			continue;
			continue;
		}
		}
		/* fixup the bio for reuse */
		bio_reset(sbio);
		sbio->bi_vcnt = vcnt;
		sbio->bi_size = r1_bio->sectors << 9;
		sbio->bi_sector = r1_bio->sector +
			conf->mirrors[i].rdev->data_offset;
		sbio->bi_bdev = conf->mirrors[i].rdev->bdev;
		sbio->bi_end_io = end_sync_read;
		sbio->bi_private = r1_bio;

		size = sbio->bi_size;
		for (j = 0; j < vcnt ; j++) {
			struct bio_vec *bi;
			bi = &sbio->bi_io_vec[j];
			bi->bv_offset = 0;
			if (size > PAGE_SIZE)
				bi->bv_len = PAGE_SIZE;
			else
				bi->bv_len = size;
			size -= PAGE_SIZE;
		}


		bio_copy_data(sbio, pbio);
		bio_copy_data(sbio, pbio);
	}
	}
+9 −2
Original line number Original line Diff line number Diff line
@@ -2097,11 +2097,17 @@ static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio)
			 * both 'first' and 'i', so we just compare them.
			 * both 'first' and 'i', so we just compare them.
			 * All vec entries are PAGE_SIZE;
			 * All vec entries are PAGE_SIZE;
			 */
			 */
			for (j = 0; j < vcnt; j++)
			int sectors = r10_bio->sectors;
			for (j = 0; j < vcnt; j++) {
				int len = PAGE_SIZE;
				if (sectors < (len / 512))
					len = sectors * 512;
				if (memcmp(page_address(fbio->bi_io_vec[j].bv_page),
				if (memcmp(page_address(fbio->bi_io_vec[j].bv_page),
					   page_address(tbio->bi_io_vec[j].bv_page),
					   page_address(tbio->bi_io_vec[j].bv_page),
					   fbio->bi_io_vec[j].bv_len))
					   len))
					break;
					break;
				sectors -= len/512;
			}
			if (j == vcnt)
			if (j == vcnt)
				continue;
				continue;
			atomic64_add(r10_bio->sectors, &mddev->resync_mismatches);
			atomic64_add(r10_bio->sectors, &mddev->resync_mismatches);
@@ -3407,6 +3413,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,


		if (bio->bi_end_io == end_sync_read) {
		if (bio->bi_end_io == end_sync_read) {
			md_sync_acct(bio->bi_bdev, nr_sectors);
			md_sync_acct(bio->bi_bdev, nr_sectors);
			set_bit(BIO_UPTODATE, &bio->bi_flags);
			generic_make_request(bio);
			generic_make_request(bio);
		}
		}
	}
	}