Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 560f8e55 authored by NeilBrown's avatar NeilBrown
Browse files

md/raid10: Split handle_read_error out from raid10d.



raid10d() is too big and is about to get bigger, so split
handle_read_error() out as a separate function.

Signed-off-by: default avatarNeilBrown <neilb@suse.de>
parent 1294b9c9
Loading
Loading
Loading
Loading
+66 −57
Original line number Diff line number Diff line
@@ -1618,43 +1618,16 @@ static void fix_read_error(conf_t *conf, mddev_t *mddev, r10bio_t *r10_bio)
	}
}

static void raid10d(mddev_t *mddev)
static void handle_read_error(mddev_t *mddev, r10bio_t *r10_bio)
{
	r10bio_t *r10_bio;
	int slot = r10_bio->read_slot;
	int mirror = r10_bio->devs[slot].devnum;
	struct bio *bio;
	unsigned long flags;
	conf_t *conf = mddev->private;
	struct list_head *head = &conf->retry_list;
	mdk_rdev_t *rdev;
	struct blk_plug plug;

	md_check_recovery(mddev);

	blk_start_plug(&plug);
	for (;;) {
	char b[BDEVNAME_SIZE];
	unsigned long do_sync;

		flush_pending_writes(conf);

		spin_lock_irqsave(&conf->device_lock, flags);
		if (list_empty(head)) {
			spin_unlock_irqrestore(&conf->device_lock, flags);
			break;
		}
		r10_bio = list_entry(head->prev, r10bio_t, retry_list);
		list_del(head->prev);
		conf->nr_queued--;
		spin_unlock_irqrestore(&conf->device_lock, flags);

		mddev = r10_bio->mddev;
		conf = mddev->private;
		if (test_bit(R10BIO_IsSync, &r10_bio->state))
			sync_request_write(mddev, r10_bio);
		else if (test_bit(R10BIO_IsRecover, &r10_bio->state))
			recovery_request_write(mddev, r10_bio);
		else {
			int slot = r10_bio->read_slot;
			int mirror = r10_bio->devs[slot].devnum;
	/* we got a read error. Maybe the drive is bad.  Maybe just
	 * the block and we can fix it.
	 * We freeze all other IO, and try reading the block from
@@ -1682,8 +1655,10 @@ static void raid10d(mddev_t *mddev)
		       (unsigned long long)r10_bio->sector);
		raid_end_bio_io(r10_bio);
		bio_put(bio);
			} else {
				const unsigned long do_sync = (r10_bio->master_bio->bi_rw & REQ_SYNC);
		return;
	}

	do_sync = (r10_bio->master_bio->bi_rw & REQ_SYNC);
	bio_put(bio);
	slot = r10_bio->read_slot;
	rdev = conf->mirrors[mirror].rdev;
@@ -1705,7 +1680,41 @@ static void raid10d(mddev_t *mddev)
	bio->bi_end_io = raid10_end_read_request;
	generic_make_request(bio);
}

static void raid10d(mddev_t *mddev)
{
	r10bio_t *r10_bio;
	unsigned long flags;
	conf_t *conf = mddev->private;
	struct list_head *head = &conf->retry_list;
	struct blk_plug plug;

	md_check_recovery(mddev);

	blk_start_plug(&plug);
	for (;;) {

		flush_pending_writes(conf);

		spin_lock_irqsave(&conf->device_lock, flags);
		if (list_empty(head)) {
			spin_unlock_irqrestore(&conf->device_lock, flags);
			break;
		}
		r10_bio = list_entry(head->prev, r10bio_t, retry_list);
		list_del(head->prev);
		conf->nr_queued--;
		spin_unlock_irqrestore(&conf->device_lock, flags);

		mddev = r10_bio->mddev;
		conf = mddev->private;
		if (test_bit(R10BIO_IsSync, &r10_bio->state))
			sync_request_write(mddev, r10_bio);
		else if (test_bit(R10BIO_IsRecover, &r10_bio->state))
			recovery_request_write(mddev, r10_bio);
		else
			handle_read_error(mddev, r10_bio);

		cond_resched();
		if (mddev->flags & ~(1<<MD_CHANGE_PENDING))
			md_check_recovery(mddev);