Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit a492f075 authored by Joe Lawrence's avatar Joe Lawrence Committed by Jens Axboe
Browse files

block,scsi: fixup blk_get_request dead queue scenarios



The blk_get_request function may fail in low-memory conditions or during
device removal (even if __GFP_WAIT is set). To distinguish between these
errors, modify the blk_get_request call stack to return the appropriate
ERR_PTR. Verify that all callers check the return status and consider
IS_ERR instead of a simple NULL pointer check.

For consistency, make a similar change to the blk_mq_alloc_request leg
of blk_get_request.  It may fail if the queue is dead, or the caller was
unwilling to wait.

Signed-off-by: default avatarJoe Lawrence <joe.lawrence@stratus.com>
Acked-by: Jiri Kosina <jkosina@suse.cz> [for pktdvd]
Acked-by: Boaz Harrosh <bharrosh@panasas.com> [for osd]
Reviewed-by: default avatarJeff Moyer <jmoyer@redhat.com>
Signed-off-by: default avatarJens Axboe <axboe@fb.com>
parent eb571eea
Loading
Loading
Loading
Loading
+17 −17
Original line number Diff line number Diff line
@@ -933,9 +933,9 @@ static struct io_context *rq_ioc(struct bio *bio)
 * Get a free request from @q.  This function may fail under memory
 * pressure or if @q is dead.
 *
 * Must be callled with @q->queue_lock held and,
 * Returns %NULL on failure, with @q->queue_lock held.
 * Returns !%NULL on success, with @q->queue_lock *not held*.
 * Must be called with @q->queue_lock held and,
 * Returns ERR_PTR on failure, with @q->queue_lock held.
 * Returns request pointer on success, with @q->queue_lock *not held*.
 */
static struct request *__get_request(struct request_list *rl, int rw_flags,
				     struct bio *bio, gfp_t gfp_mask)
@@ -949,7 +949,7 @@ static struct request *__get_request(struct request_list *rl, int rw_flags,
	int may_queue;

	if (unlikely(blk_queue_dying(q)))
		return NULL;
		return ERR_PTR(-ENODEV);

	may_queue = elv_may_queue(q, rw_flags);
	if (may_queue == ELV_MQUEUE_NO)
@@ -974,7 +974,7 @@ static struct request *__get_request(struct request_list *rl, int rw_flags,
					 * process is not a "batcher", and not
					 * exempted by the IO scheduler
					 */
					return NULL;
					return ERR_PTR(-ENOMEM);
				}
			}
		}
@@ -992,7 +992,7 @@ static struct request *__get_request(struct request_list *rl, int rw_flags,
	 * allocated with any setting of ->nr_requests
	 */
	if (rl->count[is_sync] >= (3 * q->nr_requests / 2))
		return NULL;
		return ERR_PTR(-ENOMEM);

	q->nr_rqs[is_sync]++;
	rl->count[is_sync]++;
@@ -1097,7 +1097,7 @@ static struct request *__get_request(struct request_list *rl, int rw_flags,
rq_starved:
	if (unlikely(rl->count[is_sync] == 0))
		rl->starved[is_sync] = 1;
	return NULL;
	return ERR_PTR(-ENOMEM);
}

/**
@@ -1110,9 +1110,9 @@ static struct request *__get_request(struct request_list *rl, int rw_flags,
 * Get a free request from @q.  If %__GFP_WAIT is set in @gfp_mask, this
 * function keeps retrying under memory pressure and fails iff @q is dead.
 *
 * Must be callled with @q->queue_lock held and,
 * Returns %NULL on failure, with @q->queue_lock held.
 * Returns !%NULL on success, with @q->queue_lock *not held*.
 * Must be called with @q->queue_lock held and,
 * Returns ERR_PTR on failure, with @q->queue_lock held.
 * Returns request pointer on success, with @q->queue_lock *not held*.
 */
static struct request *get_request(struct request_queue *q, int rw_flags,
				   struct bio *bio, gfp_t gfp_mask)
@@ -1125,12 +1125,12 @@ static struct request *get_request(struct request_queue *q, int rw_flags,
	rl = blk_get_rl(q, bio);	/* transferred to @rq on success */
retry:
	rq = __get_request(rl, rw_flags, bio, gfp_mask);
	if (rq)
	if (!IS_ERR(rq))
		return rq;

	if (!(gfp_mask & __GFP_WAIT) || unlikely(blk_queue_dying(q))) {
		blk_put_rl(rl);
		return NULL;
		return rq;
	}

	/* wait on @rl and retry */
@@ -1167,7 +1167,7 @@ static struct request *blk_old_get_request(struct request_queue *q, int rw,

	spin_lock_irq(q->queue_lock);
	rq = get_request(q, rw, NULL, gfp_mask);
	if (!rq)
	if (IS_ERR(rq))
		spin_unlock_irq(q->queue_lock);
	/* q->queue_lock is unlocked at this point */

@@ -1219,8 +1219,8 @@ struct request *blk_make_request(struct request_queue *q, struct bio *bio,
{
	struct request *rq = blk_get_request(q, bio_data_dir(bio), gfp_mask);

	if (unlikely(!rq))
		return ERR_PTR(-ENOMEM);
	if (IS_ERR(rq))
		return rq;

	blk_rq_set_block_pc(rq);

@@ -1615,8 +1615,8 @@ void blk_queue_bio(struct request_queue *q, struct bio *bio)
	 * Returns with the queue unlocked.
	 */
	req = get_request(q, rw_flags, bio, GFP_NOIO);
	if (unlikely(!req)) {
		bio_endio(bio, -ENODEV);	/* @q is dead */
	if (IS_ERR(req)) {
		bio_endio(bio, PTR_ERR(req));	/* @q is dead */
		goto out_unlock;
	}

+6 −2
Original line number Diff line number Diff line
@@ -218,9 +218,11 @@ struct request *blk_mq_alloc_request(struct request_queue *q, int rw, gfp_t gfp,
	struct blk_mq_hw_ctx *hctx;
	struct request *rq;
	struct blk_mq_alloc_data alloc_data;
	int ret;

	if (blk_mq_queue_enter(q))
		return NULL;
	ret = blk_mq_queue_enter(q);
	if (ret)
		return ERR_PTR(ret);

	ctx = blk_mq_get_ctx(q);
	hctx = q->mq_ops->map_queue(q, ctx->cpu);
@@ -240,6 +242,8 @@ struct request *blk_mq_alloc_request(struct request_queue *q, int rw, gfp_t gfp,
		ctx = alloc_data.ctx;
	}
	blk_mq_put_ctx(ctx);
	if (!rq)
		return ERR_PTR(-EWOULDBLOCK);
	return rq;
}
EXPORT_SYMBOL(blk_mq_alloc_request);
+4 −4
Original line number Diff line number Diff line
@@ -270,8 +270,8 @@ bsg_map_hdr(struct bsg_device *bd, struct sg_io_v4 *hdr, fmode_t has_write_perm,
	 * map scatter-gather elements separately and string them to request
	 */
	rq = blk_get_request(q, rw, GFP_KERNEL);
	if (!rq)
		return ERR_PTR(-ENOMEM);
	if (IS_ERR(rq))
		return rq;
	blk_rq_set_block_pc(rq);

	ret = blk_fill_sgv4_hdr_rq(q, rq, hdr, bd, has_write_perm);
@@ -285,8 +285,8 @@ bsg_map_hdr(struct bsg_device *bd, struct sg_io_v4 *hdr, fmode_t has_write_perm,
		}

		next_rq = blk_get_request(q, READ, GFP_KERNEL);
		if (!next_rq) {
			ret = -ENOMEM;
		if (IS_ERR(next_rq)) {
			ret = PTR_ERR(next_rq);
			goto out;
		}
		rq->next_rq = next_rq;
+6 −6
Original line number Diff line number Diff line
@@ -318,8 +318,8 @@ static int sg_io(struct request_queue *q, struct gendisk *bd_disk,
		at_head = 1;

	rq = blk_get_request(q, writing ? WRITE : READ, GFP_KERNEL);
	if (!rq)
		return -ENOMEM;
	if (IS_ERR(rq))
		return PTR_ERR(rq);
	blk_rq_set_block_pc(rq);

	if (blk_fill_sghdr_rq(q, rq, hdr, mode)) {
@@ -448,8 +448,8 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
	}

	rq = blk_get_request(q, in_len ? WRITE : READ, __GFP_WAIT);
	if (!rq) {
		err = -ENODEV;
	if (IS_ERR(rq)) {
		err = PTR_ERR(rq);
		goto error_free_buffer;
	}

@@ -539,8 +539,8 @@ static int __blk_send_generic(struct request_queue *q, struct gendisk *bd_disk,
	int err;

	rq = blk_get_request(q, WRITE, __GFP_WAIT);
	if (!rq)
		return -ENODEV;
	if (IS_ERR(rq))
		return PTR_ERR(rq);
	blk_rq_set_block_pc(rq);
	rq->timeout = BLK_DEFAULT_SG_TIMEOUT;
	rq->cmd[0] = cmd;
+2 −2
Original line number Diff line number Diff line
@@ -722,8 +722,8 @@ static int pd_special_command(struct pd_unit *disk,
	int err = 0;

	rq = blk_get_request(disk->gd->queue, READ, __GFP_WAIT);
	if (!rq)
		return -ENODEV;
	if (IS_ERR(rq))
		return PTR_ERR(rq);

	rq->cmd_type = REQ_TYPE_SPECIAL;
	rq->special = func;
Loading