Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit cc4d1efd authored by Jonathan Brassow's avatar Jonathan Brassow Committed by NeilBrown
Browse files

MD RAID10: Export md_raid10_congested



md/raid10: Export is_congested test.

In similar fashion to commits
	11d8a6e3
	1ed7242e
we export the RAID10 congestion checking function so that dm-raid.c can
make use of it and make use of the personality.  The 'queue' and 'gendisk'
structures will not be available to the MD code when device-mapper sets
up the device, so we conditionalize access to these fields also.

Signed-off-by: default avatarJonathan Brassow <jbrassow@redhat.com>
Signed-off-by: default avatarNeilBrown <neilb@suse.de>
parent 473e87ce
Loading
Loading
Loading
Loading
+34 −22
Original line number Diff line number Diff line
@@ -853,9 +853,8 @@ static struct md_rdev *read_balance(struct r10conf *conf,
	return rdev;
}

static int raid10_congested(void *data, int bits)
int md_raid10_congested(struct mddev *mddev, int bits)
{
	struct mddev *mddev = data;
	struct r10conf *conf = mddev->private;
	int i, ret = 0;

@@ -863,8 +862,6 @@ static int raid10_congested(void *data, int bits)
	    conf->pending_count >= max_queued_requests)
		return 1;

	if (mddev_congested(mddev, bits))
		return 1;
	rcu_read_lock();
	for (i = 0;
	     (i < conf->geo.raid_disks || i < conf->prev.raid_disks)
@@ -880,6 +877,15 @@ static int raid10_congested(void *data, int bits)
	rcu_read_unlock();
	return ret;
}
EXPORT_SYMBOL_GPL(md_raid10_congested);

static int raid10_congested(void *data, int bits)
{
	struct mddev *mddev = data;

	return mddev_congested(mddev, bits) ||
		md_raid10_congested(mddev, bits);
}

static void flush_pending_writes(struct r10conf *conf)
{
@@ -3486,12 +3492,14 @@ static int run(struct mddev *mddev)
	conf->thread = NULL;

	chunk_size = mddev->chunk_sectors << 9;
	if (mddev->queue) {
		blk_queue_io_min(mddev->queue, chunk_size);
		if (conf->geo.raid_disks % conf->geo.near_copies)
			blk_queue_io_opt(mddev->queue, chunk_size * conf->geo.raid_disks);
		else
			blk_queue_io_opt(mddev->queue, chunk_size *
					 (conf->geo.raid_disks / conf->geo.near_copies));
	}

	rdev_for_each(rdev, mddev) {
		long long diff;
@@ -3525,6 +3533,7 @@ static int run(struct mddev *mddev)
		if (first || diff < min_offset_diff)
			min_offset_diff = diff;

		if (mddev->gendisk)
			disk_stack_limits(mddev->gendisk, rdev->bdev,
					  rdev->data_offset << 9);

@@ -3589,6 +3598,9 @@ static int run(struct mddev *mddev)
	md_set_array_sectors(mddev, size);
	mddev->resync_max_sectors = size;

	if (mddev->queue) {
		int stripe = conf->geo.raid_disks *
			((mddev->chunk_sectors << 9) / PAGE_SIZE);
		mddev->queue->backing_dev_info.congested_fn = raid10_congested;
		mddev->queue->backing_dev_info.congested_data = mddev;

@@ -3596,15 +3608,12 @@ static int run(struct mddev *mddev)
		 * We need to readahead at least twice a whole stripe....
		 * maybe...
		 */
	{
		int stripe = conf->geo.raid_disks *
			((mddev->chunk_sectors << 9) / PAGE_SIZE);
		stripe /= conf->geo.near_copies;
		if (mddev->queue->backing_dev_info.ra_pages < 2 * stripe)
			mddev->queue->backing_dev_info.ra_pages = 2 * stripe;
		blk_queue_merge_bvec(mddev->queue, raid10_mergeable_bvec);
	}

	blk_queue_merge_bvec(mddev->queue, raid10_mergeable_bvec);

	if (md_integrity_register(mddev))
		goto out_free_conf;
@@ -3655,7 +3664,10 @@ static int stop(struct mddev *mddev)
	lower_barrier(conf);

	md_unregister_thread(&mddev->thread);
	blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/
	if (mddev->queue)
		/* the unplug fn references 'conf'*/
		blk_sync_queue(mddev->queue);

	if (conf->r10bio_pool)
		mempool_destroy(conf->r10bio_pool);
	kfree(conf->mirrors);
+3 −0
Original line number Diff line number Diff line
@@ -145,4 +145,7 @@ enum r10bio_state {
 */
	R10BIO_Previous,
};

extern int md_raid10_congested(struct mddev *mddev, int bits);

#endif