Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit c039c332 authored by Jonathan E Brassow's avatar Jonathan E Brassow Committed by Alasdair G Kergon
Browse files

dm raid: move sectors_per_dev calculation



In preparation for RAID10 inclusion in dm-raid, we move the sectors_per_dev
calculation later in the device creation process.  This is because we won't
know up-front how many stripes vs how many mirrors there are which will
change the calculation.

Signed-off-by: default avatarJonathan Brassow <jbrassow@redhat.com>
Signed-off-by: default avatarAlasdair G Kergon <agk@redhat.com>
parent f999e8fe
Loading
Loading
Loading
Loading
+9 −9
Original line number Original line Diff line number Diff line
@@ -101,20 +101,12 @@ static struct raid_set *context_alloc(struct dm_target *ti, struct raid_type *ra
{
{
	unsigned i;
	unsigned i;
	struct raid_set *rs;
	struct raid_set *rs;
	sector_t sectors_per_dev;


	if (raid_devs <= raid_type->parity_devs) {
	if (raid_devs <= raid_type->parity_devs) {
		ti->error = "Insufficient number of devices";
		ti->error = "Insufficient number of devices";
		return ERR_PTR(-EINVAL);
		return ERR_PTR(-EINVAL);
	}
	}


	sectors_per_dev = ti->len;
	if ((raid_type->level > 1) &&
	    sector_div(sectors_per_dev, (raid_devs - raid_type->parity_devs))) {
		ti->error = "Target length not divisible by number of data devices";
		return ERR_PTR(-EINVAL);
	}

	rs = kzalloc(sizeof(*rs) + raid_devs * sizeof(rs->dev[0]), GFP_KERNEL);
	rs = kzalloc(sizeof(*rs) + raid_devs * sizeof(rs->dev[0]), GFP_KERNEL);
	if (!rs) {
	if (!rs) {
		ti->error = "Cannot allocate raid context";
		ti->error = "Cannot allocate raid context";
@@ -128,7 +120,6 @@ static struct raid_set *context_alloc(struct dm_target *ti, struct raid_type *ra
	rs->md.raid_disks = raid_devs;
	rs->md.raid_disks = raid_devs;
	rs->md.level = raid_type->level;
	rs->md.level = raid_type->level;
	rs->md.new_level = rs->md.level;
	rs->md.new_level = rs->md.level;
	rs->md.dev_sectors = sectors_per_dev;
	rs->md.layout = raid_type->algorithm;
	rs->md.layout = raid_type->algorithm;
	rs->md.new_layout = rs->md.layout;
	rs->md.new_layout = rs->md.layout;
	rs->md.delta_disks = 0;
	rs->md.delta_disks = 0;
@@ -143,6 +134,7 @@ static struct raid_set *context_alloc(struct dm_target *ti, struct raid_type *ra
	 *  rs->md.external
	 *  rs->md.external
	 *  rs->md.chunk_sectors
	 *  rs->md.chunk_sectors
	 *  rs->md.new_chunk_sectors
	 *  rs->md.new_chunk_sectors
	 *  rs->md.dev_sectors
	 */
	 */


	return rs;
	return rs;
@@ -353,6 +345,7 @@ static int parse_raid_params(struct raid_set *rs, char **argv,
{
{
	unsigned i, rebuild_cnt = 0;
	unsigned i, rebuild_cnt = 0;
	unsigned long value, region_size = 0;
	unsigned long value, region_size = 0;
	sector_t sectors_per_dev = rs->ti->len;
	sector_t max_io_len;
	sector_t max_io_len;
	char *key;
	char *key;


@@ -545,6 +538,13 @@ static int parse_raid_params(struct raid_set *rs, char **argv,
	if (dm_set_target_max_io_len(rs->ti, max_io_len))
	if (dm_set_target_max_io_len(rs->ti, max_io_len))
		return -EINVAL;
		return -EINVAL;


	if ((rs->raid_type->level > 1) &&
	    sector_div(sectors_per_dev, (rs->md.raid_disks - rs->raid_type->parity_devs))) {
		rs->ti->error = "Target length not divisible by number of data devices";
		return -EINVAL;
	}
	rs->md.dev_sectors = sectors_per_dev;

	/* Assume there are no metadata devices until the drives are parsed */
	/* Assume there are no metadata devices until the drives are parsed */
	rs->md.persistent = 0;
	rs->md.persistent = 0;
	rs->md.external = 1;
	rs->md.external = 1;