Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 7399c31b authored by NeilBrown's avatar NeilBrown
Browse files

md/raid10: avoid reading from known bad blocks - part 2



When redirecting a read error to a different device, we must
again avoid bad blocks and possibly split the request.

Spin_lock typo fixed thanks to Dan Carpenter <error27@gmail.com>

Signed-off-by: default avatarNeilBrown <neilb@suse.de>
parent 856e08e2
Loading
Loading
Loading
Loading
+40 −5
Original line number Diff line number Diff line
@@ -1746,14 +1746,15 @@ static void handle_read_error(mddev_t *mddev, r10bio_t *r10_bio)
	rdev_dec_pending(conf->mirrors[mirror].rdev, mddev);

	bio = r10_bio->devs[slot].bio;
	bdevname(bio->bi_bdev, b);
	r10_bio->devs[slot].bio =
		mddev->ro ? IO_BLOCKED : NULL;
read_more:
	mirror = read_balance(conf, r10_bio, &max_sectors);
	if (mirror == -1 || max_sectors < r10_bio->sectors) {
	if (mirror == -1) {
		printk(KERN_ALERT "md/raid10:%s: %s: unrecoverable I/O"
		       " read error for block %llu\n",
		       mdname(mddev),
		       bdevname(bio->bi_bdev, b),
		       mdname(mddev), b,
		       (unsigned long long)r10_bio->sector);
		raid_end_bio_io(r10_bio);
		bio_put(bio);
@@ -1761,6 +1762,7 @@ static void handle_read_error(mddev_t *mddev, r10bio_t *r10_bio)
	}

	do_sync = (r10_bio->master_bio->bi_rw & REQ_SYNC);
	if (bio)
		bio_put(bio);
	slot = r10_bio->read_slot;
	rdev = conf->mirrors[mirror].rdev;
@@ -1773,6 +1775,9 @@ static void handle_read_error(mddev_t *mddev, r10bio_t *r10_bio)
		(unsigned long long)r10_bio->sector);
	bio = bio_clone_mddev(r10_bio->master_bio,
			      GFP_NOIO, mddev);
	md_trim_bio(bio,
		    r10_bio->sector - bio->bi_sector,
		    max_sectors);
	r10_bio->devs[slot].bio = bio;
	bio->bi_sector = r10_bio->devs[slot].addr
		+ rdev->data_offset;
@@ -1780,6 +1785,36 @@ static void handle_read_error(mddev_t *mddev, r10bio_t *r10_bio)
	bio->bi_rw = READ | do_sync;
	bio->bi_private = r10_bio;
	bio->bi_end_io = raid10_end_read_request;
	if (max_sectors < r10_bio->sectors) {
		/* Drat - have to split this up more */
		struct bio *mbio = r10_bio->master_bio;
		int sectors_handled =
			r10_bio->sector + max_sectors
			- mbio->bi_sector;
		r10_bio->sectors = max_sectors;
		spin_lock_irq(&conf->device_lock);
		if (mbio->bi_phys_segments == 0)
			mbio->bi_phys_segments = 2;
		else
			mbio->bi_phys_segments++;
		spin_unlock_irq(&conf->device_lock);
		generic_make_request(bio);
		bio = NULL;

		r10_bio = mempool_alloc(conf->r10bio_pool,
					GFP_NOIO);
		r10_bio->master_bio = mbio;
		r10_bio->sectors = (mbio->bi_size >> 9)
			- sectors_handled;
		r10_bio->state = 0;
		set_bit(R10BIO_ReadError,
			&r10_bio->state);
		r10_bio->mddev = mddev;
		r10_bio->sector = mbio->bi_sector
			+ sectors_handled;

		goto read_more;
	} else
		generic_make_request(bio);
}