Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 7b6620d7 authored by Jens Axboe's avatar Jens Axboe
Browse files

block: remove REQ_NOWAIT_INLINE



We had a few issues with this code, and there's still a problem around
how we deal with error handling for chained/split bios. For now, just
revert the code and we'll try again with a thoroug solution. This
reverts commits:

e15c2ffa ("block: fix O_DIRECT error handling for bio fragments")
0eb6ddfb ("block: Fix __blkdev_direct_IO() for bio fragments")
6a43074e ("block: properly handle IOCB_NOWAIT for async O_DIRECT IO")
893a1c97 ("blk-mq: allow REQ_NOWAIT to return an error inline")

Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 99c79f66
Loading
Loading
Loading
Loading
+2 −6
Original line number Diff line number Diff line
@@ -1958,13 +1958,9 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
	rq = blk_mq_get_request(q, bio, &data);
	if (unlikely(!rq)) {
		rq_qos_cleanup(q, bio);

		cookie = BLK_QC_T_NONE;
		if (bio->bi_opf & REQ_NOWAIT_INLINE)
			cookie = BLK_QC_T_EAGAIN;
		else if (bio->bi_opf & REQ_NOWAIT)
		if (bio->bi_opf & REQ_NOWAIT)
			bio_wouldblock_error(bio);
		return cookie;
		return BLK_QC_T_NONE;
	}

	trace_block_getrq(q, bio, bio->bi_opf);
+5 −44
Original line number Diff line number Diff line
@@ -345,24 +345,15 @@ __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, int nr_pages)
	struct bio *bio;
	bool is_poll = (iocb->ki_flags & IOCB_HIPRI) != 0;
	bool is_read = (iov_iter_rw(iter) == READ), is_sync;
	bool nowait = (iocb->ki_flags & IOCB_NOWAIT) != 0;
	loff_t pos = iocb->ki_pos;
	blk_qc_t qc = BLK_QC_T_NONE;
	gfp_t gfp;
	int ret;
	int ret = 0;

	if ((pos | iov_iter_alignment(iter)) &
	    (bdev_logical_block_size(bdev) - 1))
		return -EINVAL;

	if (nowait)
		gfp = GFP_NOWAIT;
	else
		gfp = GFP_KERNEL;

	bio = bio_alloc_bioset(gfp, nr_pages, &blkdev_dio_pool);
	if (!bio)
		return -EAGAIN;
	bio = bio_alloc_bioset(GFP_KERNEL, nr_pages, &blkdev_dio_pool);

	dio = container_of(bio, struct blkdev_dio, bio);
	dio->is_sync = is_sync = is_sync_kiocb(iocb);
@@ -384,7 +375,6 @@ __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, int nr_pages)
	if (!is_poll)
		blk_start_plug(&plug);

	ret = 0;
	for (;;) {
		bio_set_dev(bio, bdev);
		bio->bi_iter.bi_sector = pos >> 9;
@@ -409,14 +399,7 @@ __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, int nr_pages)
			task_io_account_write(bio->bi_iter.bi_size);
		}

		/*
		 * Tell underlying layer to not block for resource shortage.
		 * And if we would have blocked, return error inline instead
		 * of through the bio->bi_end_io() callback.
		 */
		if (nowait)
			bio->bi_opf |= (REQ_NOWAIT | REQ_NOWAIT_INLINE);

		dio->size += bio->bi_iter.bi_size;
		pos += bio->bi_iter.bi_size;

		nr_pages = iov_iter_npages(iter, BIO_MAX_PAGES);
@@ -428,13 +411,7 @@ __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, int nr_pages)
				polled = true;
			}

			dio->size += bio->bi_iter.bi_size;
			qc = submit_bio(bio);
			if (qc == BLK_QC_T_EAGAIN) {
				dio->size -= bio->bi_iter.bi_size;
				ret = -EAGAIN;
				goto error;
			}

			if (polled)
				WRITE_ONCE(iocb->ki_cookie, qc);
@@ -455,19 +432,8 @@ __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, int nr_pages)
			atomic_inc(&dio->ref);
		}

		dio->size += bio->bi_iter.bi_size;
		qc = submit_bio(bio);
		if (qc == BLK_QC_T_EAGAIN) {
			dio->size -= bio->bi_iter.bi_size;
			ret = -EAGAIN;
			goto error;
		}

		bio = bio_alloc(gfp, nr_pages);
		if (!bio) {
			ret = -EAGAIN;
			goto error;
		}
		submit_bio(bio);
		bio = bio_alloc(GFP_KERNEL, nr_pages);
	}

	if (!is_poll)
@@ -487,7 +453,6 @@ __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, int nr_pages)
	}
	__set_current_state(TASK_RUNNING);

out:
	if (!ret)
		ret = blk_status_to_errno(dio->bio.bi_status);
	if (likely(!ret))
@@ -495,10 +460,6 @@ __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, int nr_pages)

	bio_put(&dio->bio);
	return ret;
error:
	if (!is_poll)
		blk_finish_plug(&plug);
	goto out;
}

static ssize_t
+1 −4
Original line number Diff line number Diff line
@@ -311,7 +311,6 @@ enum req_flag_bits {
	__REQ_RAHEAD,		/* read ahead, can fail anytime */
	__REQ_BACKGROUND,	/* background IO */
	__REQ_NOWAIT,           /* Don't wait if request will block */
	__REQ_NOWAIT_INLINE,	/* Return would-block error inline */
	/*
	 * When a shared kthread needs to issue a bio for a cgroup, doing
	 * so synchronously can lead to priority inversions as the kthread
@@ -346,7 +345,6 @@ enum req_flag_bits {
#define REQ_RAHEAD		(1ULL << __REQ_RAHEAD)
#define REQ_BACKGROUND		(1ULL << __REQ_BACKGROUND)
#define REQ_NOWAIT		(1ULL << __REQ_NOWAIT)
#define REQ_NOWAIT_INLINE	(1ULL << __REQ_NOWAIT_INLINE)
#define REQ_CGROUP_PUNT		(1ULL << __REQ_CGROUP_PUNT)

#define REQ_NOUNMAP		(1ULL << __REQ_NOUNMAP)
@@ -420,13 +418,12 @@ static inline int op_stat_group(unsigned int op)

typedef unsigned int blk_qc_t;
#define BLK_QC_T_NONE		-1U
#define BLK_QC_T_EAGAIN		-2U
#define BLK_QC_T_SHIFT		16
#define BLK_QC_T_INTERNAL	(1U << 31)

static inline bool blk_qc_t_valid(blk_qc_t cookie)
{
	return cookie != BLK_QC_T_NONE && cookie != BLK_QC_T_EAGAIN;
	return cookie != BLK_QC_T_NONE;
}

static inline unsigned int blk_qc_t_to_queue_num(blk_qc_t cookie)