Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit fcff06c4 authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge branch 'for-next' of git://neil.brown.name/md

Pull md updates from NeilBrown.

* 'for-next' of git://neil.brown.name/md:
  DM RAID: Add support for MD RAID10
  md/RAID1: Add missing case for attempting to repair known bad blocks.
  md/raid5: For odirect-write performance, do not set STRIPE_PREREAD_ACTIVE.
  md/raid1: don't abort a resync on the first badblock.
  md: remove duplicated test on ->openers when calling do_md_stop()
  raid5: Add R5_ReadNoMerge flag which prevent bio from merging at block layer
  md/raid1: prevent merging too large request
  md/raid1: read balance chooses idlest disk for SSD
  md/raid1: make sequential read detection per disk based
  MD RAID10: Export md_raid10_congested
  MD: Move macros from raid1*.h to raid1*.c
  MD RAID1: rename mirror_info structure
  MD RAID10: rename mirror_info structure
  MD RAID10: Fix compiler warning.
  raid5: add a per-stripe lock
  raid5: remove unnecessary bitmap write optimization
  raid5: lockless access raid5 overrided bi_phys_segments
  raid5: reduce chance release_stripe() taking device_lock
parents 068535f1 63f33b8d
Loading
Loading
Loading
Loading
+26 −0
Original line number Original line Diff line number Diff line
@@ -27,6 +27,10 @@ The target is named "raid" and it accepts the following parameters:
		- rotating parity N (right-to-left) with data restart
		- rotating parity N (right-to-left) with data restart
  raid6_nc	RAID6 N continue
  raid6_nc	RAID6 N continue
		- rotating parity N (right-to-left) with data continuation
		- rotating parity N (right-to-left) with data continuation
  raid10        Various RAID10 inspired algorithms chosen by additional params
		- RAID10: Striped Mirrors (aka 'Striping on top of mirrors')
		- RAID1E: Integrated Adjacent Stripe Mirroring
		-  and other similar RAID10 variants


  Reference: Chapter 4 of
  Reference: Chapter 4 of
  http://www.snia.org/sites/default/files/SNIA_DDF_Technical_Position_v2.0.pdf
  http://www.snia.org/sites/default/files/SNIA_DDF_Technical_Position_v2.0.pdf
@@ -59,6 +63,28 @@ The target is named "raid" and it accepts the following parameters:
		logical size of the array.  The bitmap records the device
		logical size of the array.  The bitmap records the device
		synchronisation state for each region.
		synchronisation state for each region.


        [raid10_copies   <# copies>]
        [raid10_format   near]
		These two options are used to alter the default layout of
		a RAID10 configuration.  The number of copies is can be
		specified, but the default is 2.  There are other variations
		to how the copies are laid down - the default and only current
		option is "near".  Near copies are what most people think of
		with respect to mirroring.  If these options are left
		unspecified, or 'raid10_copies 2' and/or 'raid10_format near'
		are given, then the layouts for 2, 3 and 4 devices are:
		2 drives         3 drives          4 drives
		--------         ----------        --------------
		A1  A1           A1  A1  A2        A1  A1  A2  A2
		A2  A2           A2  A3  A3        A3  A3  A4  A4
		A3  A3           A4  A4  A5        A5  A5  A6  A6
		A4  A4           A5  A6  A6        A7  A7  A8  A8
		..  ..           ..  ..  ..        ..  ..  ..  ..
		The 2-device layout is equivalent 2-way RAID1.  The 4-device
		layout is what a traditional RAID10 would look like.  The
		3-device layout is what might be called a 'RAID1E - Integrated
		Adjacent Stripe Mirroring'.

<#raid_devs>: The number of devices composing the array.
<#raid_devs>: The number of devices composing the array.
	Each device consists of two entries.  The first is the device
	Each device consists of two entries.  The first is the device
	containing the metadata (if any); the second is the one containing the
	containing the metadata (if any); the second is the one containing the
+90 −5
Original line number Original line Diff line number Diff line
@@ -11,6 +11,7 @@
#include "md.h"
#include "md.h"
#include "raid1.h"
#include "raid1.h"
#include "raid5.h"
#include "raid5.h"
#include "raid10.h"
#include "bitmap.h"
#include "bitmap.h"


#include <linux/device-mapper.h>
#include <linux/device-mapper.h>
@@ -52,7 +53,10 @@ struct raid_dev {
#define DMPF_MAX_RECOVERY_RATE 0x20
#define DMPF_MAX_RECOVERY_RATE 0x20
#define DMPF_MAX_WRITE_BEHIND  0x40
#define DMPF_MAX_WRITE_BEHIND  0x40
#define DMPF_STRIPE_CACHE      0x80
#define DMPF_STRIPE_CACHE      0x80
#define DMPF_REGION_SIZE       0X100
#define DMPF_REGION_SIZE       0x100
#define DMPF_RAID10_COPIES     0x200
#define DMPF_RAID10_FORMAT     0x400

struct raid_set {
struct raid_set {
	struct dm_target *ti;
	struct dm_target *ti;


@@ -76,6 +80,7 @@ static struct raid_type {
	const unsigned algorithm;	/* RAID algorithm. */
	const unsigned algorithm;	/* RAID algorithm. */
} raid_types[] = {
} raid_types[] = {
	{"raid1",    "RAID1 (mirroring)",               0, 2, 1, 0 /* NONE */},
	{"raid1",    "RAID1 (mirroring)",               0, 2, 1, 0 /* NONE */},
	{"raid10",   "RAID10 (striped mirrors)",        0, 2, 10, UINT_MAX /* Varies */},
	{"raid4",    "RAID4 (dedicated parity disk)",	1, 2, 5, ALGORITHM_PARITY_0},
	{"raid4",    "RAID4 (dedicated parity disk)",	1, 2, 5, ALGORITHM_PARITY_0},
	{"raid5_la", "RAID5 (left asymmetric)",		1, 2, 5, ALGORITHM_LEFT_ASYMMETRIC},
	{"raid5_la", "RAID5 (left asymmetric)",		1, 2, 5, ALGORITHM_LEFT_ASYMMETRIC},
	{"raid5_ra", "RAID5 (right asymmetric)",	1, 2, 5, ALGORITHM_RIGHT_ASYMMETRIC},
	{"raid5_ra", "RAID5 (right asymmetric)",	1, 2, 5, ALGORITHM_RIGHT_ASYMMETRIC},
@@ -86,6 +91,17 @@ static struct raid_type {
	{"raid6_nc", "RAID6 (N continue)",		2, 4, 6, ALGORITHM_ROTATING_N_CONTINUE}
	{"raid6_nc", "RAID6 (N continue)",		2, 4, 6, ALGORITHM_ROTATING_N_CONTINUE}
};
};


static unsigned raid10_md_layout_to_copies(int layout)
{
	return layout & 0xFF;
}

static int raid10_format_to_md_layout(char *format, unsigned copies)
{
	/* 1 "far" copy, and 'copies' "near" copies */
	return (1 << 8) | (copies & 0xFF);
}

static struct raid_type *get_raid_type(char *name)
static struct raid_type *get_raid_type(char *name)
{
{
	int i;
	int i;
@@ -339,10 +355,16 @@ static int validate_region_size(struct raid_set *rs, unsigned long region_size)
 *    [max_write_behind <sectors>]	See '-write-behind=' (man mdadm)
 *    [max_write_behind <sectors>]	See '-write-behind=' (man mdadm)
 *    [stripe_cache <sectors>]		Stripe cache size for higher RAIDs
 *    [stripe_cache <sectors>]		Stripe cache size for higher RAIDs
 *    [region_size <sectors>]           Defines granularity of bitmap
 *    [region_size <sectors>]           Defines granularity of bitmap
 *
 * RAID10-only options:
 *    [raid10_copies <# copies>]        Number of copies.  (Default: 2)
 *    [raid10_format <near>]            Layout algorithm.  (Default: near)
 */
 */
static int parse_raid_params(struct raid_set *rs, char **argv,
static int parse_raid_params(struct raid_set *rs, char **argv,
			     unsigned num_raid_params)
			     unsigned num_raid_params)
{
{
	char *raid10_format = "near";
	unsigned raid10_copies = 2;
	unsigned i, rebuild_cnt = 0;
	unsigned i, rebuild_cnt = 0;
	unsigned long value, region_size = 0;
	unsigned long value, region_size = 0;
	sector_t sectors_per_dev = rs->ti->len;
	sector_t sectors_per_dev = rs->ti->len;
@@ -416,11 +438,28 @@ static int parse_raid_params(struct raid_set *rs, char **argv,
		}
		}


		key = argv[i++];
		key = argv[i++];

		/* Parameters that take a string value are checked here. */
		if (!strcasecmp(key, "raid10_format")) {
			if (rs->raid_type->level != 10) {
				rs->ti->error = "'raid10_format' is an invalid parameter for this RAID type";
				return -EINVAL;
			}
			if (strcmp("near", argv[i])) {
				rs->ti->error = "Invalid 'raid10_format' value given";
				return -EINVAL;
			}
			raid10_format = argv[i];
			rs->print_flags |= DMPF_RAID10_FORMAT;
			continue;
		}

		if (strict_strtoul(argv[i], 10, &value) < 0) {
		if (strict_strtoul(argv[i], 10, &value) < 0) {
			rs->ti->error = "Bad numerical argument given in raid params";
			rs->ti->error = "Bad numerical argument given in raid params";
			return -EINVAL;
			return -EINVAL;
		}
		}


		/* Parameters that take a numeric value are checked here */
		if (!strcasecmp(key, "rebuild")) {
		if (!strcasecmp(key, "rebuild")) {
			rebuild_cnt++;
			rebuild_cnt++;


@@ -439,6 +478,7 @@ static int parse_raid_params(struct raid_set *rs, char **argv,
					return -EINVAL;
					return -EINVAL;
				}
				}
				break;
				break;
			case 10:
			default:
			default:
				DMERR("The rebuild parameter is not supported for %s", rs->raid_type->name);
				DMERR("The rebuild parameter is not supported for %s", rs->raid_type->name);
				rs->ti->error = "Rebuild not supported for this RAID type";
				rs->ti->error = "Rebuild not supported for this RAID type";
@@ -495,7 +535,8 @@ static int parse_raid_params(struct raid_set *rs, char **argv,
			 */
			 */
			value /= 2;
			value /= 2;


			if (rs->raid_type->level < 5) {
			if ((rs->raid_type->level != 5) &&
			    (rs->raid_type->level != 6)) {
				rs->ti->error = "Inappropriate argument: stripe_cache";
				rs->ti->error = "Inappropriate argument: stripe_cache";
				return -EINVAL;
				return -EINVAL;
			}
			}
@@ -520,6 +561,14 @@ static int parse_raid_params(struct raid_set *rs, char **argv,
		} else if (!strcasecmp(key, "region_size")) {
		} else if (!strcasecmp(key, "region_size")) {
			rs->print_flags |= DMPF_REGION_SIZE;
			rs->print_flags |= DMPF_REGION_SIZE;
			region_size = value;
			region_size = value;
		} else if (!strcasecmp(key, "raid10_copies") &&
			   (rs->raid_type->level == 10)) {
			if ((value < 2) || (value > 0xFF)) {
				rs->ti->error = "Bad value for 'raid10_copies'";
				return -EINVAL;
			}
			rs->print_flags |= DMPF_RAID10_COPIES;
			raid10_copies = value;
		} else {
		} else {
			DMERR("Unable to parse RAID parameter: %s", key);
			DMERR("Unable to parse RAID parameter: %s", key);
			rs->ti->error = "Unable to parse RAID parameters";
			rs->ti->error = "Unable to parse RAID parameters";
@@ -538,8 +587,22 @@ static int parse_raid_params(struct raid_set *rs, char **argv,
	if (dm_set_target_max_io_len(rs->ti, max_io_len))
	if (dm_set_target_max_io_len(rs->ti, max_io_len))
		return -EINVAL;
		return -EINVAL;


	if ((rs->raid_type->level > 1) &&
	if (rs->raid_type->level == 10) {
	    sector_div(sectors_per_dev, (rs->md.raid_disks - rs->raid_type->parity_devs))) {
		if (raid10_copies > rs->md.raid_disks) {
			rs->ti->error = "Not enough devices to satisfy specification";
			return -EINVAL;
		}

		/* (Len * #mirrors) / #devices */
		sectors_per_dev = rs->ti->len * raid10_copies;
		sector_div(sectors_per_dev, rs->md.raid_disks);

		rs->md.layout = raid10_format_to_md_layout(raid10_format,
							   raid10_copies);
		rs->md.new_layout = rs->md.layout;
	} else if ((rs->raid_type->level > 1) &&
		   sector_div(sectors_per_dev,
			      (rs->md.raid_disks - rs->raid_type->parity_devs))) {
		rs->ti->error = "Target length not divisible by number of data devices";
		rs->ti->error = "Target length not divisible by number of data devices";
		return -EINVAL;
		return -EINVAL;
	}
	}
@@ -566,6 +629,9 @@ static int raid_is_congested(struct dm_target_callbacks *cb, int bits)
	if (rs->raid_type->level == 1)
	if (rs->raid_type->level == 1)
		return md_raid1_congested(&rs->md, bits);
		return md_raid1_congested(&rs->md, bits);


	if (rs->raid_type->level == 10)
		return md_raid10_congested(&rs->md, bits);

	return md_raid5_congested(&rs->md, bits);
	return md_raid5_congested(&rs->md, bits);
}
}


@@ -884,6 +950,9 @@ static int analyse_superblocks(struct dm_target *ti, struct raid_set *rs)
	case 6:
	case 6:
		redundancy = rs->raid_type->parity_devs;
		redundancy = rs->raid_type->parity_devs;
		break;
		break;
	case 10:
		redundancy = raid10_md_layout_to_copies(mddev->layout) - 1;
		break;
	default:
	default:
		ti->error = "Unknown RAID type";
		ti->error = "Unknown RAID type";
		return -EINVAL;
		return -EINVAL;
@@ -1049,12 +1118,19 @@ static int raid_ctr(struct dm_target *ti, unsigned argc, char **argv)
		goto bad;
		goto bad;
	}
	}


	if (ti->len != rs->md.array_sectors) {
		ti->error = "Array size does not match requested target length";
		ret = -EINVAL;
		goto size_mismatch;
	}
	rs->callbacks.congested_fn = raid_is_congested;
	rs->callbacks.congested_fn = raid_is_congested;
	dm_table_add_target_callbacks(ti->table, &rs->callbacks);
	dm_table_add_target_callbacks(ti->table, &rs->callbacks);


	mddev_suspend(&rs->md);
	mddev_suspend(&rs->md);
	return 0;
	return 0;


size_mismatch:
	md_stop(&rs->md);
bad:
bad:
	context_free(rs);
	context_free(rs);


@@ -1203,6 +1279,13 @@ static int raid_status(struct dm_target *ti, status_type_t type,
			DMEMIT(" region_size %lu",
			DMEMIT(" region_size %lu",
			       rs->md.bitmap_info.chunksize >> 9);
			       rs->md.bitmap_info.chunksize >> 9);


		if (rs->print_flags & DMPF_RAID10_COPIES)
			DMEMIT(" raid10_copies %u",
			       raid10_md_layout_to_copies(rs->md.layout));

		if (rs->print_flags & DMPF_RAID10_FORMAT)
			DMEMIT(" raid10_format near");

		DMEMIT(" %d", rs->md.raid_disks);
		DMEMIT(" %d", rs->md.raid_disks);
		for (i = 0; i < rs->md.raid_disks; i++) {
		for (i = 0; i < rs->md.raid_disks; i++) {
			if (rs->dev[i].meta_dev)
			if (rs->dev[i].meta_dev)
@@ -1277,7 +1360,7 @@ static void raid_resume(struct dm_target *ti)


static struct target_type raid_target = {
static struct target_type raid_target = {
	.name = "raid",
	.name = "raid",
	.version = {1, 2, 0},
	.version = {1, 3, 0},
	.module = THIS_MODULE,
	.module = THIS_MODULE,
	.ctr = raid_ctr,
	.ctr = raid_ctr,
	.dtr = raid_dtr,
	.dtr = raid_dtr,
@@ -1304,6 +1387,8 @@ module_init(dm_raid_init);
module_exit(dm_raid_exit);
module_exit(dm_raid_exit);


MODULE_DESCRIPTION(DM_NAME " raid4/5/6 target");
MODULE_DESCRIPTION(DM_NAME " raid4/5/6 target");
MODULE_ALIAS("dm-raid1");
MODULE_ALIAS("dm-raid10");
MODULE_ALIAS("dm-raid4");
MODULE_ALIAS("dm-raid4");
MODULE_ALIAS("dm-raid5");
MODULE_ALIAS("dm-raid5");
MODULE_ALIAS("dm-raid6");
MODULE_ALIAS("dm-raid6");
+2 −6
Original line number Original line Diff line number Diff line
@@ -3942,17 +3942,13 @@ array_state_store(struct mddev *mddev, const char *buf, size_t len)
		break;
		break;
	case clear:
	case clear:
		/* stopping an active array */
		/* stopping an active array */
		if (atomic_read(&mddev->openers) > 0)
			return -EBUSY;
		err = do_md_stop(mddev, 0, NULL);
		err = do_md_stop(mddev, 0, NULL);
		break;
		break;
	case inactive:
	case inactive:
		/* stopping an active array */
		/* stopping an active array */
		if (mddev->pers) {
		if (mddev->pers)
			if (atomic_read(&mddev->openers) > 0)
				return -EBUSY;
			err = do_md_stop(mddev, 2, NULL);
			err = do_md_stop(mddev, 2, NULL);
		} else
		else
			err = 0; /* already inactive */
			err = 0; /* already inactive */
		break;
		break;
	case suspended:
	case suspended:
+121 −43
Original line number Original line Diff line number Diff line
@@ -46,6 +46,20 @@
 */
 */
#define	NR_RAID1_BIOS 256
#define	NR_RAID1_BIOS 256


/* when we get a read error on a read-only array, we redirect to another
 * device without failing the first device, or trying to over-write to
 * correct the read error.  To keep track of bad blocks on a per-bio
 * level, we store IO_BLOCKED in the appropriate 'bios' pointer
 */
#define IO_BLOCKED ((struct bio *)1)
/* When we successfully write to a known bad-block, we need to remove the
 * bad-block marking which must be done from process context.  So we record
 * the success by setting devs[n].bio to IO_MADE_GOOD
 */
#define IO_MADE_GOOD ((struct bio *)2)

#define BIO_SPECIAL(bio) ((unsigned long)bio <= 2)

/* When there are this many requests queue to be written by
/* When there are this many requests queue to be written by
 * the raid1 thread, we become 'congested' to provide back-pressure
 * the raid1 thread, we become 'congested' to provide back-pressure
 * for writeback.
 * for writeback.
@@ -483,12 +497,14 @@ static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sect
	const sector_t this_sector = r1_bio->sector;
	const sector_t this_sector = r1_bio->sector;
	int sectors;
	int sectors;
	int best_good_sectors;
	int best_good_sectors;
	int start_disk;
	int best_disk, best_dist_disk, best_pending_disk;
	int best_disk;
	int has_nonrot_disk;
	int i;
	int disk;
	sector_t best_dist;
	sector_t best_dist;
	unsigned int min_pending;
	struct md_rdev *rdev;
	struct md_rdev *rdev;
	int choose_first;
	int choose_first;
	int choose_next_idle;


	rcu_read_lock();
	rcu_read_lock();
	/*
	/*
@@ -499,26 +515,26 @@ static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sect
 retry:
 retry:
	sectors = r1_bio->sectors;
	sectors = r1_bio->sectors;
	best_disk = -1;
	best_disk = -1;
	best_dist_disk = -1;
	best_dist = MaxSector;
	best_dist = MaxSector;
	best_pending_disk = -1;
	min_pending = UINT_MAX;
	best_good_sectors = 0;
	best_good_sectors = 0;
	has_nonrot_disk = 0;
	choose_next_idle = 0;


	if (conf->mddev->recovery_cp < MaxSector &&
	if (conf->mddev->recovery_cp < MaxSector &&
	    (this_sector + sectors >= conf->next_resync)) {
	    (this_sector + sectors >= conf->next_resync))
		choose_first = 1;
		choose_first = 1;
		start_disk = 0;
	else
	} else {
		choose_first = 0;
		choose_first = 0;
		start_disk = conf->last_used;
	}


	for (i = 0 ; i < conf->raid_disks * 2 ; i++) {
	for (disk = 0 ; disk < conf->raid_disks * 2 ; disk++) {
		sector_t dist;
		sector_t dist;
		sector_t first_bad;
		sector_t first_bad;
		int bad_sectors;
		int bad_sectors;

		unsigned int pending;
		int disk = start_disk + i;
		bool nonrot;
		if (disk >= conf->raid_disks * 2)
			disk -= conf->raid_disks * 2;


		rdev = rcu_dereference(conf->mirrors[disk].rdev);
		rdev = rcu_dereference(conf->mirrors[disk].rdev);
		if (r1_bio->bios[disk] == IO_BLOCKED
		if (r1_bio->bios[disk] == IO_BLOCKED
@@ -577,22 +593,77 @@ static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sect
		} else
		} else
			best_good_sectors = sectors;
			best_good_sectors = sectors;


		nonrot = blk_queue_nonrot(bdev_get_queue(rdev->bdev));
		has_nonrot_disk |= nonrot;
		pending = atomic_read(&rdev->nr_pending);
		dist = abs(this_sector - conf->mirrors[disk].head_position);
		dist = abs(this_sector - conf->mirrors[disk].head_position);
		if (choose_first
		if (choose_first) {
			best_disk = disk;
			break;
		}
		/* Don't change to another disk for sequential reads */
		/* Don't change to another disk for sequential reads */
		    || conf->next_seq_sect == this_sector
		if (conf->mirrors[disk].next_seq_sect == this_sector
		    || dist == 0
		    || dist == 0) {
			int opt_iosize = bdev_io_opt(rdev->bdev) >> 9;
			struct raid1_info *mirror = &conf->mirrors[disk];

			best_disk = disk;
			/*
			 * If buffered sequential IO size exceeds optimal
			 * iosize, check if there is idle disk. If yes, choose
			 * the idle disk. read_balance could already choose an
			 * idle disk before noticing it's a sequential IO in
			 * this disk. This doesn't matter because this disk
			 * will idle, next time it will be utilized after the
			 * first disk has IO size exceeds optimal iosize. In
			 * this way, iosize of the first disk will be optimal
			 * iosize at least. iosize of the second disk might be
			 * small, but not a big deal since when the second disk
			 * starts IO, the first disk is likely still busy.
			 */
			if (nonrot && opt_iosize > 0 &&
			    mirror->seq_start != MaxSector &&
			    mirror->next_seq_sect > opt_iosize &&
			    mirror->next_seq_sect - opt_iosize >=
			    mirror->seq_start) {
				choose_next_idle = 1;
				continue;
			}
			break;
		}
		/* If device is idle, use it */
		/* If device is idle, use it */
		    || atomic_read(&rdev->nr_pending) == 0) {
		if (pending == 0) {
			best_disk = disk;
			best_disk = disk;
			break;
			break;
		}
		}

		if (choose_next_idle)
			continue;

		if (min_pending > pending) {
			min_pending = pending;
			best_pending_disk = disk;
		}

		if (dist < best_dist) {
		if (dist < best_dist) {
			best_dist = dist;
			best_dist = dist;
			best_disk = disk;
			best_dist_disk = disk;
		}
		}
	}
	}


	/*
	 * If all disks are rotational, choose the closest disk. If any disk is
	 * non-rotational, choose the disk with less pending request even the
	 * disk is rotational, which might/might not be optimal for raids with
	 * mixed ratation/non-rotational disks depending on workload.
	 */
	if (best_disk == -1) {
		if (has_nonrot_disk)
			best_disk = best_pending_disk;
		else
			best_disk = best_dist_disk;
	}

	if (best_disk >= 0) {
	if (best_disk >= 0) {
		rdev = rcu_dereference(conf->mirrors[best_disk].rdev);
		rdev = rcu_dereference(conf->mirrors[best_disk].rdev);
		if (!rdev)
		if (!rdev)
@@ -606,8 +677,11 @@ static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sect
			goto retry;
			goto retry;
		}
		}
		sectors = best_good_sectors;
		sectors = best_good_sectors;
		conf->next_seq_sect = this_sector + sectors;

		conf->last_used = best_disk;
		if (conf->mirrors[best_disk].next_seq_sect != this_sector)
			conf->mirrors[best_disk].seq_start = this_sector;

		conf->mirrors[best_disk].next_seq_sect = this_sector + sectors;
	}
	}
	rcu_read_unlock();
	rcu_read_unlock();
	*max_sectors = sectors;
	*max_sectors = sectors;
@@ -873,7 +947,7 @@ static void alloc_behind_pages(struct bio *bio, struct r1bio *r1_bio)
static void make_request(struct mddev *mddev, struct bio * bio)
static void make_request(struct mddev *mddev, struct bio * bio)
{
{
	struct r1conf *conf = mddev->private;
	struct r1conf *conf = mddev->private;
	struct mirror_info *mirror;
	struct raid1_info *mirror;
	struct r1bio *r1_bio;
	struct r1bio *r1_bio;
	struct bio *read_bio;
	struct bio *read_bio;
	int i, disks;
	int i, disks;
@@ -1364,7 +1438,7 @@ static int raid1_add_disk(struct mddev *mddev, struct md_rdev *rdev)
	struct r1conf *conf = mddev->private;
	struct r1conf *conf = mddev->private;
	int err = -EEXIST;
	int err = -EEXIST;
	int mirror = 0;
	int mirror = 0;
	struct mirror_info *p;
	struct raid1_info *p;
	int first = 0;
	int first = 0;
	int last = conf->raid_disks - 1;
	int last = conf->raid_disks - 1;
	struct request_queue *q = bdev_get_queue(rdev->bdev);
	struct request_queue *q = bdev_get_queue(rdev->bdev);
@@ -1433,7 +1507,7 @@ static int raid1_remove_disk(struct mddev *mddev, struct md_rdev *rdev)
	struct r1conf *conf = mddev->private;
	struct r1conf *conf = mddev->private;
	int err = 0;
	int err = 0;
	int number = rdev->raid_disk;
	int number = rdev->raid_disk;
	struct mirror_info *p = conf->mirrors+ number;
	struct raid1_info *p = conf->mirrors + number;


	if (rdev != p->rdev)
	if (rdev != p->rdev)
		p = conf->mirrors + conf->raid_disks + number;
		p = conf->mirrors + conf->raid_disks + number;
@@ -2371,6 +2445,18 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int *skipp
				bio->bi_rw = READ;
				bio->bi_rw = READ;
				bio->bi_end_io = end_sync_read;
				bio->bi_end_io = end_sync_read;
				read_targets++;
				read_targets++;
			} else if (!test_bit(WriteErrorSeen, &rdev->flags) &&
				test_bit(MD_RECOVERY_SYNC, &mddev->recovery) &&
				!test_bit(MD_RECOVERY_CHECK, &mddev->recovery)) {
				/*
				 * The device is suitable for reading (InSync),
				 * but has bad block(s) here. Let's try to correct them,
				 * if we are doing resync or repair. Otherwise, leave
				 * this device alone for this sync request.
				 */
				bio->bi_rw = WRITE;
				bio->bi_end_io = end_sync_write;
				write_targets++;
			}
			}
		}
		}
		if (bio->bi_end_io) {
		if (bio->bi_end_io) {
@@ -2428,7 +2514,10 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int *skipp
		/* There is nowhere to write, so all non-sync
		/* There is nowhere to write, so all non-sync
		 * drives must be failed - so we are finished
		 * drives must be failed - so we are finished
		 */
		 */
		sector_t rv = max_sector - sector_nr;
		sector_t rv;
		if (min_bad > 0)
			max_sector = sector_nr + min_bad;
		rv = max_sector - sector_nr;
		*skipped = 1;
		*skipped = 1;
		put_buf(r1_bio);
		put_buf(r1_bio);
		return rv;
		return rv;
@@ -2521,7 +2610,7 @@ static struct r1conf *setup_conf(struct mddev *mddev)
{
{
	struct r1conf *conf;
	struct r1conf *conf;
	int i;
	int i;
	struct mirror_info *disk;
	struct raid1_info *disk;
	struct md_rdev *rdev;
	struct md_rdev *rdev;
	int err = -ENOMEM;
	int err = -ENOMEM;


@@ -2529,7 +2618,7 @@ static struct r1conf *setup_conf(struct mddev *mddev)
	if (!conf)
	if (!conf)
		goto abort;
		goto abort;


	conf->mirrors = kzalloc(sizeof(struct mirror_info)
	conf->mirrors = kzalloc(sizeof(struct raid1_info)
				* mddev->raid_disks * 2,
				* mddev->raid_disks * 2,
				 GFP_KERNEL);
				 GFP_KERNEL);
	if (!conf->mirrors)
	if (!conf->mirrors)
@@ -2572,6 +2661,7 @@ static struct r1conf *setup_conf(struct mddev *mddev)
			mddev->merge_check_needed = 1;
			mddev->merge_check_needed = 1;


		disk->head_position = 0;
		disk->head_position = 0;
		disk->seq_start = MaxSector;
	}
	}
	conf->raid_disks = mddev->raid_disks;
	conf->raid_disks = mddev->raid_disks;
	conf->mddev = mddev;
	conf->mddev = mddev;
@@ -2585,7 +2675,6 @@ static struct r1conf *setup_conf(struct mddev *mddev)
	conf->recovery_disabled = mddev->recovery_disabled - 1;
	conf->recovery_disabled = mddev->recovery_disabled - 1;


	err = -EIO;
	err = -EIO;
	conf->last_used = -1;
	for (i = 0; i < conf->raid_disks * 2; i++) {
	for (i = 0; i < conf->raid_disks * 2; i++) {


		disk = conf->mirrors + i;
		disk = conf->mirrors + i;
@@ -2611,19 +2700,9 @@ static struct r1conf *setup_conf(struct mddev *mddev)
			if (disk->rdev &&
			if (disk->rdev &&
			    (disk->rdev->saved_raid_disk < 0))
			    (disk->rdev->saved_raid_disk < 0))
				conf->fullsync = 1;
				conf->fullsync = 1;
		} else if (conf->last_used < 0)
			/*
			 * The first working device is used as a
			 * starting point to read balancing.
			 */
			conf->last_used = i;
		}
		}

	if (conf->last_used < 0) {
		printk(KERN_ERR "md/raid1:%s: no operational mirrors\n",
		       mdname(mddev));
		goto abort;
	}
	}

	err = -ENOMEM;
	err = -ENOMEM;
	conf->thread = md_register_thread(raid1d, mddev, "raid1");
	conf->thread = md_register_thread(raid1d, mddev, "raid1");
	if (!conf->thread) {
	if (!conf->thread) {
@@ -2798,7 +2877,7 @@ static int raid1_reshape(struct mddev *mddev)
	 */
	 */
	mempool_t *newpool, *oldpool;
	mempool_t *newpool, *oldpool;
	struct pool_info *newpoolinfo;
	struct pool_info *newpoolinfo;
	struct mirror_info *newmirrors;
	struct raid1_info *newmirrors;
	struct r1conf *conf = mddev->private;
	struct r1conf *conf = mddev->private;
	int cnt, raid_disks;
	int cnt, raid_disks;
	unsigned long flags;
	unsigned long flags;
@@ -2841,7 +2920,7 @@ static int raid1_reshape(struct mddev *mddev)
		kfree(newpoolinfo);
		kfree(newpoolinfo);
		return -ENOMEM;
		return -ENOMEM;
	}
	}
	newmirrors = kzalloc(sizeof(struct mirror_info) * raid_disks * 2,
	newmirrors = kzalloc(sizeof(struct raid1_info) * raid_disks * 2,
			     GFP_KERNEL);
			     GFP_KERNEL);
	if (!newmirrors) {
	if (!newmirrors) {
		kfree(newpoolinfo);
		kfree(newpoolinfo);
@@ -2880,7 +2959,6 @@ static int raid1_reshape(struct mddev *mddev)
	conf->raid_disks = mddev->raid_disks = raid_disks;
	conf->raid_disks = mddev->raid_disks = raid_disks;
	mddev->delta_disks = 0;
	mddev->delta_disks = 0;


	conf->last_used = 0; /* just make sure it is in-range */
	lower_barrier(conf);
	lower_barrier(conf);


	set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
	set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
+8 −22
Original line number Original line Diff line number Diff line
#ifndef _RAID1_H
#ifndef _RAID1_H
#define _RAID1_H
#define _RAID1_H


struct mirror_info {
struct raid1_info {
	struct md_rdev	*rdev;
	struct md_rdev	*rdev;
	sector_t	head_position;
	sector_t	head_position;

	/* When choose the best device for a read (read_balance())
	 * we try to keep sequential reads one the same device
	 */
	sector_t	next_seq_sect;
	sector_t	seq_start;
};
};


/*
/*
@@ -24,17 +30,11 @@ struct pool_info {


struct r1conf {
struct r1conf {
	struct mddev		*mddev;
	struct mddev		*mddev;
	struct mirror_info	*mirrors;	/* twice 'raid_disks' to
	struct raid1_info	*mirrors;	/* twice 'raid_disks' to
						 * allow for replacements.
						 * allow for replacements.
						 */
						 */
	int			raid_disks;
	int			raid_disks;


	/* When choose the best device for a read (read_balance())
	 * we try to keep sequential reads one the same device
	 * using 'last_used' and 'next_seq_sect'
	 */
	int			last_used;
	sector_t		next_seq_sect;
	/* During resync, read_balancing is only allowed on the part
	/* During resync, read_balancing is only allowed on the part
	 * of the array that has been resynced.  'next_resync' tells us
	 * of the array that has been resynced.  'next_resync' tells us
	 * where that is.
	 * where that is.
@@ -135,20 +135,6 @@ struct r1bio {
	/* DO NOT PUT ANY NEW FIELDS HERE - bios array is contiguously alloced*/
	/* DO NOT PUT ANY NEW FIELDS HERE - bios array is contiguously alloced*/
};
};


/* when we get a read error on a read-only array, we redirect to another
 * device without failing the first device, or trying to over-write to
 * correct the read error.  To keep track of bad blocks on a per-bio
 * level, we store IO_BLOCKED in the appropriate 'bios' pointer
 */
#define IO_BLOCKED ((struct bio *)1)
/* When we successfully write to a known bad-block, we need to remove the
 * bad-block marking which must be done from process context.  So we record
 * the success by setting bios[n] to IO_MADE_GOOD
 */
#define IO_MADE_GOOD ((struct bio *)2)

#define BIO_SPECIAL(bio) ((unsigned long)bio <= 2)

/* bits for r1bio.state */
/* bits for r1bio.state */
#define	R1BIO_Uptodate	0
#define	R1BIO_Uptodate	0
#define	R1BIO_IsSync	1
#define	R1BIO_IsSync	1
Loading