Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 64590f45 authored by NeilBrown's avatar NeilBrown
Browse files

md: make merge_bvec_fn more robust in face of personality changes.



There is no locking around calls to merge_bvec_fn(), so
it is possible that calls which coincide with a level (or personality)
change could go wrong.

So create a central dispatch point for these functions and use
rcu_read_lock().
If the array is suspended, reject any merge that can be rejected.
If not, we know it is safe to call the function.

Signed-off-by: default avatarNeilBrown <neilb@suse.de>
parent 5c675f83
Loading
Loading
Loading
Loading
+2 −4
Original line number Diff line number Diff line
@@ -60,11 +60,10 @@ static inline struct dev_info *which_dev(struct mddev *mddev, sector_t sector)
 *
 *	Return amount of bytes we can take at this offset
 */
static int linear_mergeable_bvec(struct request_queue *q,
static int linear_mergeable_bvec(struct mddev *mddev,
				 struct bvec_merge_data *bvm,
				 struct bio_vec *biovec)
{
	struct mddev *mddev = q->queuedata;
	struct dev_info *dev0;
	unsigned long maxsectors, bio_sectors = bvm->bi_size >> 9;
	sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev);
@@ -213,8 +212,6 @@ static int linear_run (struct mddev *mddev)
	mddev->private = conf;
	md_set_array_sectors(mddev, linear_size(mddev, 0, 0));

	blk_queue_merge_bvec(mddev->queue, linear_mergeable_bvec);

	ret =  md_integrity_register(mddev);
	if (ret) {
		kfree(conf);
@@ -361,6 +358,7 @@ static struct md_personality linear_personality =
	.hot_add_disk	= linear_add,
	.size		= linear_size,
	.congested	= linear_congested,
	.mergeable_bvec	= linear_mergeable_bvec,
};

static int __init linear_init (void)
+24 −0
Original line number Diff line number Diff line
@@ -339,6 +339,29 @@ static int md_congested(void *data, int bits)
	return mddev_congested(mddev, bits);
}

static int md_mergeable_bvec(struct request_queue *q,
			     struct bvec_merge_data *bvm,
			     struct bio_vec *biovec)
{
	struct mddev *mddev = q->queuedata;
	int ret;
	rcu_read_lock();
	if (mddev->suspended) {
		/* Must always allow one vec */
		if (bvm->bi_size == 0)
			ret = biovec->bv_len;
		else
			ret = 0;
	} else {
		struct md_personality *pers = mddev->pers;
		if (pers && pers->mergeable_bvec)
			ret = pers->mergeable_bvec(mddev, bvm, biovec);
		else
			ret = biovec->bv_len;
	}
	rcu_read_unlock();
	return ret;
}
/*
 * Generic flush handling for md
 */
@@ -4925,6 +4948,7 @@ int md_run(struct mddev *mddev)
	if (mddev->queue) {
		mddev->queue->backing_dev_info.congested_data = mddev;
		mddev->queue->backing_dev_info.congested_fn = md_congested;
		blk_queue_merge_bvec(mddev->queue, md_mergeable_bvec);
	}
	if (mddev->pers->sync_request) {
		if (mddev->kobj.sd &&
+4 −0
Original line number Diff line number Diff line
@@ -499,6 +499,10 @@ struct md_personality
	/* congested implements bdi.congested_fn().
	 * Will not be called while array is 'suspended' */
	int (*congested)(struct mddev *mddev, int bits);
	/* mergeable_bvec is use to implement ->merge_bvec_fn */
	int (*mergeable_bvec)(struct mddev *mddev,
			      struct bvec_merge_data *bvm,
			      struct bio_vec *biovec);
};

struct md_sysfs_entry {
+3 −4
Original line number Diff line number Diff line
@@ -350,17 +350,16 @@ static struct md_rdev *map_sector(struct mddev *mddev, struct strip_zone *zone,

/**
 *	raid0_mergeable_bvec -- tell bio layer if two requests can be merged
 *	@q: request queue
 *	@mddev: the md device
 *	@bvm: properties of new bio
 *	@biovec: the request that could be merged to it.
 *
 *	Return amount of bytes we can accept at this offset
 */
static int raid0_mergeable_bvec(struct request_queue *q,
static int raid0_mergeable_bvec(struct mddev *mddev,
				struct bvec_merge_data *bvm,
				struct bio_vec *biovec)
{
	struct mddev *mddev = q->queuedata;
	struct r0conf *conf = mddev->private;
	sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev);
	sector_t sector_offset = sector;
@@ -465,7 +464,6 @@ static int raid0_run(struct mddev *mddev)
			mddev->queue->backing_dev_info.ra_pages = 2* stripe;
	}

	blk_queue_merge_bvec(mddev->queue, raid0_mergeable_bvec);
	dump_zones(mddev);

	ret = md_integrity_register(mddev);
@@ -724,6 +722,7 @@ static struct md_personality raid0_personality=
	.takeover	= raid0_takeover,
	.quiesce	= raid0_quiesce,
	.congested	= raid0_congested,
	.mergeable_bvec	= raid0_mergeable_bvec,
};

static int __init raid0_init (void)
+2 −4
Original line number Diff line number Diff line
@@ -701,11 +701,10 @@ static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sect
	return best_disk;
}

static int raid1_mergeable_bvec(struct request_queue *q,
static int raid1_mergeable_bvec(struct mddev *mddev,
				struct bvec_merge_data *bvm,
				struct bio_vec *biovec)
{
	struct mddev *mddev = q->queuedata;
	struct r1conf *conf = mddev->private;
	sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev);
	int max = biovec->bv_len;
@@ -2946,8 +2945,6 @@ static int run(struct mddev *mddev)
	md_set_array_sectors(mddev, raid1_size(mddev, 0, 0));

	if (mddev->queue) {
		blk_queue_merge_bvec(mddev->queue, raid1_mergeable_bvec);

		if (discard_supported)
			queue_flag_set_unlocked(QUEUE_FLAG_DISCARD,
						mddev->queue);
@@ -3183,6 +3180,7 @@ static struct md_personality raid1_personality =
	.quiesce	= raid1_quiesce,
	.takeover	= raid1_takeover,
	.congested	= raid1_congested,
	.mergeable_bvec	= raid1_mergeable_bvec,
};

static int __init raid_init(void)
Loading