Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit c8dc9c65 authored by Joe Lawrence's avatar Joe Lawrence Committed by NeilBrown
Browse files

md: raid1,10: Handle REQ_WRITE_SAME flag in write bios



Set mddev queue's max_write_same_sectors to its chunk_sector value (before
disk_stack_limits merges the underlying disk limits.)  With that in place,
be sure to handle writes coming down from the block layer that have the
REQ_WRITE_SAME flag set.  That flag needs to be copied into any newly cloned
write bio.

Signed-off-by: default avatarJoe Lawrence <joe.lawrence@stratus.com>
Acked-by: default avatar"Martin K. Petersen" <martin.petersen@oracle.com>
Signed-off-by: default avatarNeilBrown <neilb@suse.de>
parent bbfa57c0
Loading
Loading
Loading
Loading
+6 −1
Original line number Diff line number Diff line
@@ -1000,6 +1000,7 @@ static void make_request(struct mddev *mddev, struct bio * bio)
	const unsigned long do_flush_fua = (bio->bi_rw & (REQ_FLUSH | REQ_FUA));
	const unsigned long do_discard = (bio->bi_rw
					  & (REQ_DISCARD | REQ_SECURE));
	const unsigned long do_same = (bio->bi_rw & REQ_WRITE_SAME);
	struct md_rdev *blocked_rdev;
	struct blk_plug_cb *cb;
	struct raid1_plug_cb *plug = NULL;
@@ -1301,7 +1302,8 @@ static void make_request(struct mddev *mddev, struct bio * bio)
				   conf->mirrors[i].rdev->data_offset);
		mbio->bi_bdev = conf->mirrors[i].rdev->bdev;
		mbio->bi_end_io	= raid1_end_write_request;
		mbio->bi_rw = WRITE | do_flush_fua | do_sync | do_discard;
		mbio->bi_rw =
			WRITE | do_flush_fua | do_sync | do_discard | do_same;
		mbio->bi_private = r1_bio;

		atomic_inc(&r1_bio->remaining);
@@ -2818,6 +2820,9 @@ static int run(struct mddev *mddev)
	if (IS_ERR(conf))
		return PTR_ERR(conf);

	if (mddev->queue)
		blk_queue_max_write_same_sectors(mddev->queue,
						 mddev->chunk_sectors);
	rdev_for_each(rdev, mddev) {
		if (!mddev->gendisk)
			continue;
+7 −2
Original line number Diff line number Diff line
@@ -1105,6 +1105,7 @@ static void make_request(struct mddev *mddev, struct bio * bio)
	const unsigned long do_fua = (bio->bi_rw & REQ_FUA);
	const unsigned long do_discard = (bio->bi_rw
					  & (REQ_DISCARD | REQ_SECURE));
	const unsigned long do_same = (bio->bi_rw & REQ_WRITE_SAME);
	unsigned long flags;
	struct md_rdev *blocked_rdev;
	struct blk_plug_cb *cb;
@@ -1460,7 +1461,8 @@ static void make_request(struct mddev *mddev, struct bio * bio)
							      rdev));
			mbio->bi_bdev = rdev->bdev;
			mbio->bi_end_io	= raid10_end_write_request;
			mbio->bi_rw = WRITE | do_sync | do_fua | do_discard;
			mbio->bi_rw =
				WRITE | do_sync | do_fua | do_discard | do_same;
			mbio->bi_private = r10_bio;

			atomic_inc(&r10_bio->remaining);
@@ -1502,7 +1504,8 @@ static void make_request(struct mddev *mddev, struct bio * bio)
						   r10_bio, rdev));
			mbio->bi_bdev = rdev->bdev;
			mbio->bi_end_io	= raid10_end_write_request;
			mbio->bi_rw = WRITE | do_sync | do_fua | do_discard;
			mbio->bi_rw =
				WRITE | do_sync | do_fua | do_discard | do_same;
			mbio->bi_private = r10_bio;

			atomic_inc(&r10_bio->remaining);
@@ -3569,6 +3572,8 @@ static int run(struct mddev *mddev)
	if (mddev->queue) {
		blk_queue_max_discard_sectors(mddev->queue,
					      mddev->chunk_sectors);
		blk_queue_max_write_same_sectors(mddev->queue,
						 mddev->chunk_sectors);
		blk_queue_io_min(mddev->queue, chunk_size);
		if (conf->geo.raid_disks % conf->geo.near_copies)
			blk_queue_io_opt(mddev->queue, chunk_size * conf->geo.raid_disks);