Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Unverified Commit 93092367 authored by Michael Bestas's avatar Michael Bestas
Browse files

Merge tag 'LA.UM.9.12.r1-17600.04-SMxx50.QSSI14.0' of...

Merge tag 'LA.UM.9.12.r1-17600.04-SMxx50.QSSI14.0' of https://git.codelinaro.org/clo/la/kernel/msm-4.19 into android13-4.19-kona

"LA.UM.9.12.r1-17600.04-SMxx50.QSSI14.0"

* tag 'LA.UM.9.12.r1-17600.04-SMxx50.QSSI14.0' of https://git.codelinaro.org/clo/la/kernel/msm-4.19:
  BACKPORT: blk-mq: fix is_flush_rq
  BACKPORT: blk-mq: clear stale request in tags->rq[] before freeing one request pool
  BACKPORT: blk-mq: clearing flush request reference in tags->rqs[]
  BACKPORT: blk-mq: grab rq->refcount before calling ->fn in blk_mq_tagset_busy_iter

 Conflicts:
	block/blk-mq-tag.c
	block/blk-mq-tag.h
	block/blk-mq.c

Change-Id: I7d9036406b384ced9b64346fc4107b23f8893f34
parents 33ef1027 fe3f71bc
Loading
Loading
Loading
Loading
+5 −0
Original line number Diff line number Diff line
@@ -291,6 +291,11 @@ static void flush_end_io(struct request *flush_rq, blk_status_t error)
		spin_unlock_irqrestore(&fq->mq_flush_lock, flags);
}

bool is_flush_rq(struct request *rq)
{
	return rq->end_io == flush_end_io;
}

/**
 * blk_kick_flush - consider issuing flush request
 * @q: request_queue being kicked
+29 −7
Original line number Diff line number Diff line
@@ -218,6 +218,20 @@ struct bt_iter_data {
	bool reserved;
};

static struct request *blk_mq_find_and_get_req(struct blk_mq_tags *tags,
		unsigned int bitnr)
{
	struct request *rq;
	unsigned long flags;

	spin_lock_irqsave(&tags->lock, flags);
	rq = tags->rqs[bitnr];
	if (!rq || !refcount_inc_not_zero(&rq->ref))
		rq = NULL;
	spin_unlock_irqrestore(&tags->lock, flags);
	return rq;
}

static bool bt_iter(struct sbitmap *bitmap, unsigned int bitnr, void *data)
{
	struct bt_iter_data *iter_data = data;
@@ -225,18 +239,23 @@ static bool bt_iter(struct sbitmap *bitmap, unsigned int bitnr, void *data)
	struct blk_mq_tags *tags = hctx->tags;
	bool reserved = iter_data->reserved;
	struct request *rq;
	bool ret = true;

	if (!reserved)
		bitnr += tags->nr_reserved_tags;
	rq = tags->rqs[bitnr];

	/*
	 * We can hit rq == NULL here, because the tagging functions
	 * test and set the bit before assining ->rqs[].
	 */
	if (rq && rq->q == hctx->queue)
		iter_data->fn(hctx, rq, iter_data->data, reserved);
	rq = blk_mq_find_and_get_req(tags, bitnr);
	if (!rq)
		return true;

	if (rq->q == hctx->queue)
		iter_data->fn(hctx, rq, iter_data->data, reserved);
	blk_mq_put_rq_ref(rq);
	return ret;
}

static void bt_for_each(struct blk_mq_hw_ctx *hctx, struct sbitmap_queue *bt,
@@ -265,6 +284,7 @@ static bool bt_tags_iter(struct sbitmap *bitmap, unsigned int bitnr, void *data)
	struct blk_mq_tags *tags = iter_data->tags;
	bool reserved = iter_data->reserved;
	struct request *rq;
	bool ret = true;

	if (!reserved)
		bitnr += tags->nr_reserved_tags;
@@ -273,11 +293,13 @@ static bool bt_tags_iter(struct sbitmap *bitmap, unsigned int bitnr, void *data)
	 * We can hit rq == NULL here, because the tagging functions
	 * test and set the bit before assining ->rqs[].
	 */
	rq = tags->rqs[bitnr];
	if (rq && blk_mq_request_started(rq))
		iter_data->fn(rq, iter_data->data, reserved);

	rq = blk_mq_find_and_get_req(tags, bitnr);
	if (!rq)
		return true;
	if (blk_mq_request_started(rq))
		iter_data->fn(rq, iter_data->data, reserved);
	blk_mq_put_rq_ref(rq);
	return ret;
}

static void bt_tags_for_each(struct blk_mq_tags *tags, struct sbitmap_queue *bt,
+44 −5
Original line number Diff line number Diff line
@@ -812,6 +812,14 @@ static bool blk_mq_req_expired(struct request *rq, unsigned long *next)
	return false;
}

void blk_mq_put_rq_ref(struct request *rq)
{
	if (is_flush_rq(rq))
		rq->end_io(rq, 0);
	else if (refcount_dec_and_test(&rq->ref))
		__blk_mq_free_request(rq);
}

static void blk_mq_check_expired(struct blk_mq_hw_ctx *hctx,
		struct request *rq, void *priv, bool reserved)
{
@@ -844,11 +852,9 @@ static void blk_mq_check_expired(struct blk_mq_hw_ctx *hctx,
	 */
	if (blk_mq_req_expired(rq, next))
		blk_mq_rq_timed_out(rq, reserved);
	blk_mq_put_rq_ref(rq);
	return;

	if (is_flush_rq(rq, hctx))
		rq->end_io(rq, 0);
	else if (refcount_dec_and_test(&rq->ref))
		__blk_mq_free_request(rq);
}

static void blk_mq_timeout_work(struct work_struct *work)
@@ -2198,18 +2204,51 @@ static void blk_mq_remove_cpuhp(struct blk_mq_hw_ctx *hctx)
					    &hctx->cpuhp_dead);
}

/*
 * Before freeing hw queue, clearing the flush request reference in
 * tags->rqs[] for avoiding potential UAF.
 */
static void blk_mq_clear_flush_rq_mapping(struct blk_mq_tags *tags,
		unsigned int queue_depth, struct request *flush_rq)
{
	int i;
	unsigned long flags;

	/* The hw queue may not be mapped yet */
	if (!tags)
		return;

	WARN_ON_ONCE(refcount_read(&flush_rq->ref) != 0);

	for (i = 0; i < queue_depth; i++)
		 cmpxchg(&tags->rqs[i], flush_rq, NULL);

	/*
	 * Wait until all pending iteration is done.
	 *
	 * Request reference is cleared and it is guaranteed to be observed
	 * after the ->lock is released.
	 */
	spin_lock_irqsave(&tags->lock, flags);
	spin_unlock_irqrestore(&tags->lock, flags);
}

/* hctx->ctxs will be freed in queue's release handler */
static void blk_mq_exit_hctx(struct request_queue *q,
		struct blk_mq_tag_set *set,
		struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx)
{
	struct request *flush_rq = hctx->fq->flush_rq;

	blk_mq_debugfs_unregister_hctx(hctx);

	if (blk_mq_hw_queue_mapped(hctx))
		blk_mq_tag_idle(hctx);

	blk_mq_clear_flush_rq_mapping(set->tags[hctx_idx],
			set->queue_depth, flush_rq);
	if (set->ops->exit_request)
		set->ops->exit_request(set, hctx->fq->flush_rq, hctx_idx);
		set->ops->exit_request(set, flush_rq, hctx_idx);

	if (set->ops->exit_hctx)
		set->ops->exit_hctx(hctx, hctx_idx);
+1 −0
Original line number Diff line number Diff line
@@ -39,6 +39,7 @@ void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list);
bool blk_mq_get_driver_tag(struct request *rq);
struct request *blk_mq_dequeue_from_ctx(struct blk_mq_hw_ctx *hctx,
					struct blk_mq_ctx *start);
void blk_mq_put_rq_ref(struct request *rq);

/*
 * Internal helpers for allocating/freeing the request map
+1 −5
Original line number Diff line number Diff line
@@ -124,11 +124,7 @@ static inline void __blk_get_queue(struct request_queue *q)
	kobject_get(&q->kobj);
}

static inline bool
is_flush_rq(struct request *req, struct blk_mq_hw_ctx *hctx)
{
	return hctx->fq->flush_rq == req;
}
bool is_flush_rq(struct request *req);

struct blk_flush_queue *blk_alloc_flush_queue(struct request_queue *q,
		int node, int cmd_size, gfp_t flags);