Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit ba77929c authored by qctecmdr's avatar qctecmdr Committed by Gerrit - the friendly Code Review server
Browse files

Merge "BACKPORT: blk-mq: fix is_flush_rq"

parents f610453d 49724e13
Loading
Loading
Loading
Loading
+5 −0
Original line number Diff line number Diff line
@@ -289,6 +289,11 @@ static void flush_end_io(struct request *flush_rq, blk_status_t error)
		spin_unlock_irqrestore(&fq->mq_flush_lock, flags);
}

bool is_flush_rq(struct request *rq)
{
	return rq->end_io == flush_end_io;
}

/**
 * blk_kick_flush - consider issuing flush request
 * @q: request_queue being kicked
+7 −2
Original line number Diff line number Diff line
@@ -221,10 +221,14 @@ struct bt_iter_data {
static struct request *blk_mq_find_and_get_req(struct blk_mq_tags *tags,
		unsigned int bitnr)
{
	struct request *rq = tags->rqs[bitnr];
	struct request *rq;
	unsigned long flags;

	spin_lock_irqsave(&tags->lock, flags);
	rq = tags->rqs[bitnr];
	if (!rq || !refcount_inc_not_zero(&rq->ref))
		return NULL;
		rq = NULL;
	spin_unlock_irqrestore(&tags->lock, flags);
	return rq;
}

@@ -407,6 +411,7 @@ struct blk_mq_tags *blk_mq_init_tags(unsigned int total_tags,

	tags->nr_tags = total_tags;
	tags->nr_reserved_tags = reserved_tags;
	spin_lock_init(&tags->lock);

	return blk_mq_init_bitmap_tags(tags, node, alloc_policy);
}
+6 −1
Original line number Diff line number Diff line
@@ -19,8 +19,13 @@ struct blk_mq_tags {
	struct request **rqs;
	struct request **static_rqs;
	struct list_head page_list;
};

	/*
	 * used to clear request reference in rqs[] before freeing one
	 * request pool
	 */
	spinlock_t lock;
};

extern struct blk_mq_tags *blk_mq_init_tags(unsigned int nr_tags, unsigned int reserved_tags, int node, int alloc_policy);
extern void blk_mq_free_tags(struct blk_mq_tags *tags);
+76 −10
Original line number Diff line number Diff line
@@ -814,10 +814,7 @@ static bool blk_mq_req_expired(struct request *rq, unsigned long *next)

void blk_mq_put_rq_ref(struct request *rq)
{
	struct blk_mq_hw_ctx *hctx;

	hctx = blk_mq_map_queue(rq->q, rq->mq_ctx->cpu);
	if (is_flush_rq(rq, hctx))
	if (is_flush_rq(rq))
		rq->end_io(rq, 0);
	else if (refcount_dec_and_test(&rq->ref))
		__blk_mq_free_request(rq);
@@ -1836,6 +1833,45 @@ void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx,
	}
}

static size_t order_to_size(unsigned int order)
{
	return (size_t)PAGE_SIZE << order;
}

/* called before freeing request pool in @tags */
static void blk_mq_clear_rq_mapping(struct blk_mq_tag_set *set,
		struct blk_mq_tags *tags, unsigned int hctx_idx)
{
	struct blk_mq_tags *drv_tags = set->tags[hctx_idx];
	struct page *page;
	unsigned long flags;

	list_for_each_entry(page, &tags->page_list, lru) {
		unsigned long start = (unsigned long)page_address(page);
		unsigned long end = start + order_to_size(page->private);
		int i;

		for (i = 0; i < set->queue_depth; i++) {
			struct request *rq = drv_tags->rqs[i];
			unsigned long rq_addr = (unsigned long)rq;

			if (rq_addr >= start && rq_addr < end) {
				WARN_ON_ONCE(refcount_read(&rq->ref) != 0);
				cmpxchg(&drv_tags->rqs[i], rq, NULL);
			}
		}
	}

	/*
	 * Wait until all pending iteration is done.
	 *
	 * Request reference is cleared and it is guaranteed to be observed
	 * after the ->lock is released.
	 */
	spin_lock_irqsave(&drv_tags->lock, flags);
	spin_unlock_irqrestore(&drv_tags->lock, flags);
}

static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
{
	const int is_sync = op_is_sync(bio->bi_opf);
@@ -1968,6 +2004,8 @@ void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
		}
	}

	blk_mq_clear_rq_mapping(set, tags, hctx_idx);

	while (!list_empty(&tags->page_list)) {
		page = list_first_entry(&tags->page_list, struct page, lru);
		list_del_init(&page->lru);
@@ -2027,11 +2065,6 @@ struct blk_mq_tags *blk_mq_alloc_rq_map(struct blk_mq_tag_set *set,
	return tags;
}

static size_t order_to_size(unsigned int order)
{
	return (size_t)PAGE_SIZE << order;
}

static int blk_mq_init_request(struct blk_mq_tag_set *set, struct request *rq,
			       unsigned int hctx_idx, int node)
{
@@ -2162,18 +2195,51 @@ static void blk_mq_remove_cpuhp(struct blk_mq_hw_ctx *hctx)
					    &hctx->cpuhp_dead);
}

/*
 * Before freeing hw queue, clearing the flush request reference in
 * tags->rqs[] for avoiding potential UAF.
 */
static void blk_mq_clear_flush_rq_mapping(struct blk_mq_tags *tags,
		unsigned int queue_depth, struct request *flush_rq)
{
	int i;
	unsigned long flags;

	/* The hw queue may not be mapped yet */
	if (!tags)
		return;

	WARN_ON_ONCE(refcount_read(&flush_rq->ref) != 0);

	for (i = 0; i < queue_depth; i++)
		 cmpxchg(&tags->rqs[i], flush_rq, NULL);

	/*
	 * Wait until all pending iteration is done.
	 *
	 * Request reference is cleared and it is guaranteed to be observed
	 * after the ->lock is released.
	 */
	spin_lock_irqsave(&tags->lock, flags);
	spin_unlock_irqrestore(&tags->lock, flags);
}

/* hctx->ctxs will be freed in queue's release handler */
static void blk_mq_exit_hctx(struct request_queue *q,
		struct blk_mq_tag_set *set,
		struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx)
{
	struct request *flush_rq = hctx->fq->flush_rq;

	blk_mq_debugfs_unregister_hctx(hctx);

	if (blk_mq_hw_queue_mapped(hctx))
		blk_mq_tag_idle(hctx);

	blk_mq_clear_flush_rq_mapping(set->tags[hctx_idx],
			set->queue_depth, flush_rq);
	if (set->ops->exit_request)
		set->ops->exit_request(set, hctx->fq->flush_rq, hctx_idx);
		set->ops->exit_request(set, flush_rq, hctx_idx);

	if (set->ops->exit_hctx)
		set->ops->exit_hctx(hctx, hctx_idx);
+1 −5
Original line number Diff line number Diff line
@@ -124,11 +124,7 @@ static inline void __blk_get_queue(struct request_queue *q)
	kobject_get(&q->kobj);
}

static inline bool
is_flush_rq(struct request *req, struct blk_mq_hw_ctx *hctx)
{
	return hctx->fq->flush_rq == req;
}
bool is_flush_rq(struct request *req);

struct blk_flush_queue *blk_alloc_flush_queue(struct request_queue *q,
		int node, int cmd_size, gfp_t flags);