Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 62096bce authored by NeilBrown's avatar NeilBrown
Browse files

md/raid1: factor several functions out or raid1d()



raid1d is too big with several deep branches.
So separate them out into their own functions.

Signed-off-by: default avatarNeilBrown <neilb@suse.de>
Reviewed-by: default avatarNamhyung Kim <namhyung@gmail.com>
parent 3a9f28a5
Loading
Loading
Loading
Loading
+151 −159
Original line number Diff line number Diff line
@@ -1861,105 +1861,66 @@ static int narrow_write_error(r1bio_t *r1_bio, int i)
	return ok;
}

static void raid1d(mddev_t *mddev)
static void handle_sync_write_finished(conf_t *conf, r1bio_t *r1_bio)
{
	r1bio_t *r1_bio;
	struct bio *bio;
	unsigned long flags;
	conf_t *conf = mddev->private;
	struct list_head *head = &conf->retry_list;
	mdk_rdev_t *rdev;
	struct blk_plug plug;

	md_check_recovery(mddev);

	blk_start_plug(&plug);
	for (;;) {
		char b[BDEVNAME_SIZE];

		if (atomic_read(&mddev->plug_cnt) == 0)
			flush_pending_writes(conf);

		spin_lock_irqsave(&conf->device_lock, flags);
		if (list_empty(head)) {
			spin_unlock_irqrestore(&conf->device_lock, flags);
			break;
		}
		r1_bio = list_entry(head->prev, r1bio_t, retry_list);
		list_del(head->prev);
		conf->nr_queued--;
		spin_unlock_irqrestore(&conf->device_lock, flags);

		mddev = r1_bio->mddev;
		conf = mddev->private;
		if (test_bit(R1BIO_IsSync, &r1_bio->state)) {
			if (test_bit(R1BIO_MadeGood, &r1_bio->state) ||
			    test_bit(R1BIO_WriteError, &r1_bio->state)) {
	int m;
	int s = r1_bio->sectors;
	for (m = 0; m < conf->raid_disks ; m++) {
					mdk_rdev_t *rdev
						= conf->mirrors[m].rdev;
		mdk_rdev_t *rdev = conf->mirrors[m].rdev;
		struct bio *bio = r1_bio->bios[m];
		if (bio->bi_end_io == NULL)
			continue;
					if (test_bit(BIO_UPTODATE,
						     &bio->bi_flags) &&
					    test_bit(R1BIO_MadeGood,
						     &r1_bio->state)) {
						rdev_clear_badblocks(
							rdev,
							r1_bio->sector,
							r1_bio->sectors);
		if (test_bit(BIO_UPTODATE, &bio->bi_flags) &&
		    test_bit(R1BIO_MadeGood, &r1_bio->state)) {
			rdev_clear_badblocks(rdev, r1_bio->sector, s);
		}
					if (!test_bit(BIO_UPTODATE,
						      &bio->bi_flags) &&
					    test_bit(R1BIO_WriteError,
						     &r1_bio->state)) {
						if (!rdev_set_badblocks(
							    rdev,
							    r1_bio->sector,
							    r1_bio->sectors, 0))
							md_error(mddev, rdev);
		if (!test_bit(BIO_UPTODATE, &bio->bi_flags) &&
		    test_bit(R1BIO_WriteError, &r1_bio->state)) {
			if (!rdev_set_badblocks(rdev, r1_bio->sector, s, 0))
				md_error(conf->mddev, rdev);
		}
	}
	put_buf(r1_bio);
				md_done_sync(mddev, s, 1);
			} else
				sync_request_write(mddev, r1_bio);
		} else if (test_bit(R1BIO_MadeGood, &r1_bio->state) ||
			   test_bit(R1BIO_WriteError, &r1_bio->state)) {
	md_done_sync(conf->mddev, s, 1);
}

static void handle_write_finished(conf_t *conf, r1bio_t *r1_bio)
{
	int m;
	for (m = 0; m < conf->raid_disks ; m++)
		if (r1_bio->bios[m] == IO_MADE_GOOD) {
					rdev = conf->mirrors[m].rdev;
					rdev_clear_badblocks(
						rdev,
			mdk_rdev_t *rdev = conf->mirrors[m].rdev;
			rdev_clear_badblocks(rdev,
					     r1_bio->sector,
					     r1_bio->sectors);
					rdev_dec_pending(rdev, mddev);
			rdev_dec_pending(rdev, conf->mddev);
		} else if (r1_bio->bios[m] != NULL) {
					/* This drive got a write error.  We
					 * need to narrow down and record
					 * precise write errors.
			/* This drive got a write error.  We need to
			 * narrow down and record precise write
			 * errors.
			 */
			if (!narrow_write_error(r1_bio, m)) {
						md_error(mddev,
				md_error(conf->mddev,
					 conf->mirrors[m].rdev);
						/* an I/O failed, we can't clear
						 * the bitmap */
						set_bit(R1BIO_Degraded,
							&r1_bio->state);
				/* an I/O failed, we can't clear the bitmap */
				set_bit(R1BIO_Degraded, &r1_bio->state);
			}
			rdev_dec_pending(conf->mirrors[m].rdev,
							 mddev);
					 conf->mddev);
		}
	if (test_bit(R1BIO_WriteError, &r1_bio->state))
		close_write(r1_bio);
	raid_end_bio_io(r1_bio);
		} else if (test_bit(R1BIO_ReadError, &r1_bio->state)) {
}

static void handle_read_error(conf_t *conf, r1bio_t *r1_bio)
{
	int disk;
	int max_sectors;
	mddev_t *mddev = conf->mddev;
	struct bio *bio;
	char b[BDEVNAME_SIZE];
	mdk_rdev_t *rdev;

	clear_bit(R1BIO_ReadError, &r1_bio->state);
	/* we got a read error. Maybe the drive is bad.  Maybe just
@@ -1973,12 +1934,10 @@ static void raid1d(mddev_t *mddev)
	if (mddev->ro == 0) {
		freeze_array(conf);
		fix_read_error(conf, r1_bio->read_disk,
					       r1_bio->sector,
					       r1_bio->sectors);
			       r1_bio->sector, r1_bio->sectors);
		unfreeze_array(conf);
	} else
				md_error(mddev,
					 conf->mirrors[r1_bio->read_disk].rdev);
		md_error(mddev, conf->mirrors[r1_bio->read_disk].rdev);

	bio = r1_bio->bios[r1_bio->read_disk];
	bdevname(bio->bi_bdev, b);
@@ -1987,26 +1946,22 @@ read_more:
	if (disk == -1) {
		printk(KERN_ALERT "md/raid1:%s: %s: unrecoverable I/O"
		       " read error for block %llu\n",
				       mdname(mddev), b,
				       (unsigned long long)r1_bio->sector);
		       mdname(mddev), b, (unsigned long long)r1_bio->sector);
		raid_end_bio_io(r1_bio);
	} else {
				const unsigned long do_sync = r1_bio->master_bio->bi_rw & REQ_SYNC;
		const unsigned long do_sync
			= r1_bio->master_bio->bi_rw & REQ_SYNC;
		if (bio) {
			r1_bio->bios[r1_bio->read_disk] =
				mddev->ro ? IO_BLOCKED : NULL;
			bio_put(bio);
		}
		r1_bio->read_disk = disk;
				bio = bio_clone_mddev(r1_bio->master_bio,
						      GFP_NOIO, mddev);
				md_trim_bio(bio,
					    r1_bio->sector - bio->bi_sector,
					    max_sectors);
		bio = bio_clone_mddev(r1_bio->master_bio, GFP_NOIO, mddev);
		md_trim_bio(bio, r1_bio->sector - bio->bi_sector, max_sectors);
		r1_bio->bios[r1_bio->read_disk] = bio;
		rdev = conf->mirrors[disk].rdev;
				printk_ratelimited(
					KERN_ERR
		printk_ratelimited(KERN_ERR
				   "md/raid1:%s: redirecting sector %llu"
				   " to other mirror: %s\n",
				   mdname(mddev),
@@ -2020,9 +1975,8 @@ read_more:
		if (max_sectors < r1_bio->sectors) {
			/* Drat - have to split this up more */
			struct bio *mbio = r1_bio->master_bio;
					int sectors_handled =
						r1_bio->sector + max_sectors
						- mbio->bi_sector;
			int sectors_handled = (r1_bio->sector + max_sectors
					       - mbio->bi_sector);
			r1_bio->sectors = max_sectors;
			spin_lock_irq(&conf->device_lock);
			if (mbio->bi_phys_segments == 0)
@@ -2033,29 +1987,67 @@ read_more:
			generic_make_request(bio);
			bio = NULL;

					r1_bio = mempool_alloc(conf->r1bio_pool,
							       GFP_NOIO);
			r1_bio = mempool_alloc(conf->r1bio_pool, GFP_NOIO);

			r1_bio->master_bio = mbio;
			r1_bio->sectors = (mbio->bi_size >> 9)
					  - sectors_handled;
			r1_bio->state = 0;
					set_bit(R1BIO_ReadError,
						&r1_bio->state);
			set_bit(R1BIO_ReadError, &r1_bio->state);
			r1_bio->mddev = mddev;
					r1_bio->sector = mbio->bi_sector
						+ sectors_handled;
			r1_bio->sector = mbio->bi_sector + sectors_handled;

			goto read_more;
		} else
			generic_make_request(bio);
	}
		} else {
}

static void raid1d(mddev_t *mddev)
{
	r1bio_t *r1_bio;
	unsigned long flags;
	conf_t *conf = mddev->private;
	struct list_head *head = &conf->retry_list;
	struct blk_plug plug;

	md_check_recovery(mddev);

	blk_start_plug(&plug);
	for (;;) {

		if (atomic_read(&mddev->plug_cnt) == 0)
			flush_pending_writes(conf);

		spin_lock_irqsave(&conf->device_lock, flags);
		if (list_empty(head)) {
			spin_unlock_irqrestore(&conf->device_lock, flags);
			break;
		}
		r1_bio = list_entry(head->prev, r1bio_t, retry_list);
		list_del(head->prev);
		conf->nr_queued--;
		spin_unlock_irqrestore(&conf->device_lock, flags);

		mddev = r1_bio->mddev;
		conf = mddev->private;
		if (test_bit(R1BIO_IsSync, &r1_bio->state)) {
			if (test_bit(R1BIO_MadeGood, &r1_bio->state) ||
			    test_bit(R1BIO_WriteError, &r1_bio->state))
				handle_sync_write_finished(conf, r1_bio);
			else
				sync_request_write(mddev, r1_bio);
		} else if (test_bit(R1BIO_MadeGood, &r1_bio->state) ||
			   test_bit(R1BIO_WriteError, &r1_bio->state))
			handle_write_finished(conf, r1_bio);
		else if (test_bit(R1BIO_ReadError, &r1_bio->state))
			handle_read_error(conf, r1_bio);
		else
			/* just a partial read to be scheduled from separate
			 * context
			 */
			generic_make_request(r1_bio->bios[r1_bio->read_disk]);
		}

		cond_resched();
		if (mddev->flags & ~(1<<MD_CHANGE_PENDING))
			md_check_recovery(mddev);