Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit a12c768d authored by David Jeffery's avatar David Jeffery Committed by Greg Kroah-Hartman
Browse files

md: improve handling of bio with REQ_PREFLUSH in md_flush_request()



commit 775d78319f1ceb32be8eb3b1202ccdc60e9cb7f1 upstream.

If pers->make_request fails in md_flush_request(), the bio is lost. To
fix this, pass back a bool to indicate if the original make_request call
should continue to handle the I/O and instead of assuming the flush logic
will push it to completion.

Convert md_flush_request to return a bool and no longer calls the raid
driver's make_request function.  If the return is true, then the md flush
logic has or will complete the bio and the md make_request call is done.
If false, then the md make_request function needs to keep processing like
it is a normal bio. Let the original call to md_handle_request handle any
need to retry sending the bio to the raid driver's make_request function
should it be needed.

Also mark md_flush_request and the make_request function pointer as
__must_check to issue warnings should these critical return values be
ignored.

Fixes: 2bc13b83e629 ("md: batch flush requests.")
Cc: stable@vger.kernel.org # # v4.19+
Cc: NeilBrown <neilb@suse.com>
Signed-off-by: default avatarDavid Jeffery <djeffery@redhat.com>
Reviewed-by: default avatarXiao Ni <xni@redhat.com>
Signed-off-by: default avatarSong Liu <songliubraving@fb.com>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent d88d9321
Loading
Loading
Loading
Loading
+2 −3
Original line number Diff line number Diff line
@@ -252,10 +252,9 @@ static bool linear_make_request(struct mddev *mddev, struct bio *bio)
	sector_t start_sector, end_sector, data_offset;
	sector_t bio_sector = bio->bi_iter.bi_sector;

	if (unlikely(bio->bi_opf & REQ_PREFLUSH)) {
		md_flush_request(mddev, bio);
	if (unlikely(bio->bi_opf & REQ_PREFLUSH)
	    && md_flush_request(mddev, bio))
		return true;
	}

	tmp_dev = which_dev(mddev, bio_sector);
	start_sector = tmp_dev->end_sector - tmp_dev->rdev->sectors;
+2 −3
Original line number Diff line number Diff line
@@ -112,10 +112,9 @@ static bool multipath_make_request(struct mddev *mddev, struct bio * bio)
	struct multipath_bh * mp_bh;
	struct multipath_info *multipath;

	if (unlikely(bio->bi_opf & REQ_PREFLUSH)) {
		md_flush_request(mddev, bio);
	if (unlikely(bio->bi_opf & REQ_PREFLUSH)
	    && md_flush_request(mddev, bio))
		return true;
	}

	mp_bh = mempool_alloc(&conf->pool, GFP_NOIO);

+9 −2
Original line number Diff line number Diff line
@@ -487,7 +487,13 @@ static void md_submit_flush_data(struct work_struct *ws)
	}
}

void md_flush_request(struct mddev *mddev, struct bio *bio)
/*
 * Manages consolidation of flushes and submitting any flushes needed for
 * a bio with REQ_PREFLUSH.  Returns true if the bio is finished or is
 * being finished in another context.  Returns false if the flushing is
 * complete but still needs the I/O portion of the bio to be processed.
 */
bool md_flush_request(struct mddev *mddev, struct bio *bio)
{
	ktime_t start = ktime_get_boottime();
	spin_lock_irq(&mddev->lock);
@@ -512,9 +518,10 @@ void md_flush_request(struct mddev *mddev, struct bio *bio)
			bio_endio(bio);
		else {
			bio->bi_opf &= ~REQ_PREFLUSH;
			mddev->pers->make_request(mddev, bio);
			return false;
		}
	}
	return true;
}
EXPORT_SYMBOL(md_flush_request);

+2 −2
Original line number Diff line number Diff line
@@ -532,7 +532,7 @@ struct md_personality
	int level;
	struct list_head list;
	struct module *owner;
	bool (*make_request)(struct mddev *mddev, struct bio *bio);
	bool __must_check (*make_request)(struct mddev *mddev, struct bio *bio);
	/*
	 * start up works that do NOT require md_thread. tasks that
	 * requires md_thread should go into start()
@@ -684,7 +684,7 @@ extern void md_error(struct mddev *mddev, struct md_rdev *rdev);
extern void md_finish_reshape(struct mddev *mddev);

extern int mddev_congested(struct mddev *mddev, int bits);
extern void md_flush_request(struct mddev *mddev, struct bio *bio);
extern bool __must_check md_flush_request(struct mddev *mddev, struct bio *bio);
extern void md_super_write(struct mddev *mddev, struct md_rdev *rdev,
			   sector_t sector, int size, struct page *page);
extern int md_super_wait(struct mddev *mddev);
+2 −3
Original line number Diff line number Diff line
@@ -580,10 +580,9 @@ static bool raid0_make_request(struct mddev *mddev, struct bio *bio)
	unsigned chunk_sects;
	unsigned sectors;

	if (unlikely(bio->bi_opf & REQ_PREFLUSH)) {
		md_flush_request(mddev, bio);
	if (unlikely(bio->bi_opf & REQ_PREFLUSH)
	    && md_flush_request(mddev, bio))
		return true;
	}

	if (unlikely((bio_op(bio) == REQ_OP_DISCARD))) {
		raid0_handle_discard(mddev, bio);
Loading