Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit fb3f2f2f authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Greg Kroah-Hartman
Browse files

md: update the optimal I/O size on reshape



[ Upstream commit 16ef510139315a2147ee7525796f8dbd4e4b7864 ]

The raid5 and raid10 drivers currently update the read-ahead size,
but not the optimal I/O size on reshape.  To prepare for deriving the
read-ahead size from the optimal I/O size make sure it is updated
as well.

Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarJohannes Thumshirn <johannes.thumshirn@wdc.com>
Reviewed-by: default avatarMartin K. Petersen <martin.petersen@oracle.com>
Acked-by: default avatarSong Liu <song@kernel.org>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
Stable-dep-of: f0ddb83da3cb ("md/raid10: fix memleak of md thread")
Signed-off-by: default avatarSasha Levin <sashal@kernel.org>
parent b6460f68
Loading
Loading
Loading
Loading
+14 −8
Original line number Original line Diff line number Diff line
@@ -3735,10 +3735,20 @@ static struct r10conf *setup_conf(struct mddev *mddev)
	return ERR_PTR(err);
	return ERR_PTR(err);
}
}


static void raid10_set_io_opt(struct r10conf *conf)
{
	int raid_disks = conf->geo.raid_disks;

	if (!(conf->geo.raid_disks % conf->geo.near_copies))
		raid_disks /= conf->geo.near_copies;
	blk_queue_io_opt(conf->mddev->queue, (conf->mddev->chunk_sectors << 9) *
			 raid_disks);
}

static int raid10_run(struct mddev *mddev)
static int raid10_run(struct mddev *mddev)
{
{
	struct r10conf *conf;
	struct r10conf *conf;
	int i, disk_idx, chunk_size;
	int i, disk_idx;
	struct raid10_info *disk;
	struct raid10_info *disk;
	struct md_rdev *rdev;
	struct md_rdev *rdev;
	sector_t size;
	sector_t size;
@@ -3774,18 +3784,13 @@ static int raid10_run(struct mddev *mddev)
	mddev->thread = conf->thread;
	mddev->thread = conf->thread;
	conf->thread = NULL;
	conf->thread = NULL;


	chunk_size = mddev->chunk_sectors << 9;
	if (mddev->queue) {
	if (mddev->queue) {
		blk_queue_max_discard_sectors(mddev->queue,
		blk_queue_max_discard_sectors(mddev->queue,
					      mddev->chunk_sectors);
					      mddev->chunk_sectors);
		blk_queue_max_write_same_sectors(mddev->queue, 0);
		blk_queue_max_write_same_sectors(mddev->queue, 0);
		blk_queue_max_write_zeroes_sectors(mddev->queue, 0);
		blk_queue_max_write_zeroes_sectors(mddev->queue, 0);
		blk_queue_io_min(mddev->queue, chunk_size);
		blk_queue_io_min(mddev->queue, mddev->chunk_sectors << 9);
		if (conf->geo.raid_disks % conf->geo.near_copies)
		raid10_set_io_opt(conf);
			blk_queue_io_opt(mddev->queue, chunk_size * conf->geo.raid_disks);
		else
			blk_queue_io_opt(mddev->queue, chunk_size *
					 (conf->geo.raid_disks / conf->geo.near_copies));
	}
	}


	rdev_for_each(rdev, mddev) {
	rdev_for_each(rdev, mddev) {
@@ -4748,6 +4753,7 @@ static void end_reshape(struct r10conf *conf)
		stripe /= conf->geo.near_copies;
		stripe /= conf->geo.near_copies;
		if (conf->mddev->queue->backing_dev_info->ra_pages < 2 * stripe)
		if (conf->mddev->queue->backing_dev_info->ra_pages < 2 * stripe)
			conf->mddev->queue->backing_dev_info->ra_pages = 2 * stripe;
			conf->mddev->queue->backing_dev_info->ra_pages = 2 * stripe;
		raid10_set_io_opt(conf);
	}
	}
	conf->fullsync = 0;
	conf->fullsync = 0;
}
}
+8 −2
Original line number Original line Diff line number Diff line
@@ -7159,6 +7159,12 @@ static int only_parity(int raid_disk, int algo, int raid_disks, int max_degraded
	return 0;
	return 0;
}
}


static void raid5_set_io_opt(struct r5conf *conf)
{
	blk_queue_io_opt(conf->mddev->queue, (conf->chunk_sectors << 9) *
			 (conf->raid_disks - conf->max_degraded));
}

static int raid5_run(struct mddev *mddev)
static int raid5_run(struct mddev *mddev)
{
{
	struct r5conf *conf;
	struct r5conf *conf;
@@ -7448,8 +7454,7 @@ static int raid5_run(struct mddev *mddev)


		chunk_size = mddev->chunk_sectors << 9;
		chunk_size = mddev->chunk_sectors << 9;
		blk_queue_io_min(mddev->queue, chunk_size);
		blk_queue_io_min(mddev->queue, chunk_size);
		blk_queue_io_opt(mddev->queue, chunk_size *
		raid5_set_io_opt(conf);
				 (conf->raid_disks - conf->max_degraded));
		mddev->queue->limits.raid_partial_stripes_expensive = 1;
		mddev->queue->limits.raid_partial_stripes_expensive = 1;
		/*
		/*
		 * We can only discard a whole stripe. It doesn't make sense to
		 * We can only discard a whole stripe. It doesn't make sense to
@@ -8043,6 +8048,7 @@ static void end_reshape(struct r5conf *conf)
						   / PAGE_SIZE);
						   / PAGE_SIZE);
			if (conf->mddev->queue->backing_dev_info->ra_pages < 2 * stripe)
			if (conf->mddev->queue->backing_dev_info->ra_pages < 2 * stripe)
				conf->mddev->queue->backing_dev_info->ra_pages = 2 * stripe;
				conf->mddev->queue->backing_dev_info->ra_pages = 2 * stripe;
			raid5_set_io_opt(conf);
		}
		}
	}
	}
}
}