Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 4a23ba6f authored by Greg Kroah-Hartman's avatar Greg Kroah-Hartman
Browse files

Merge branch 'android11-5.4' into 'android11-5.4-lts'



Sync up with android11-5.4 for the following commits:

ec1b6ab9 BACKPORT: blk-mq: fix is_flush_rq
c9a3b51b BACKPORT: blk-mq: clearing flush request reference in tags->rqs[]
bb96e7f4 BACKPORT: blk-mq: clear stale request in tags->rq[] before freeing one request pool
a5d38e7c BACKPORT: blk-mq: grab rq->refcount before calling ->fn in blk_mq_tagset_busy_iter
734c36bc ANDROID: gki_defconfig: set DEFAULT_MMAP_MIN_ADDR=32768

Change-Id: Id6944de3e605f6a96ce03dddaa1f075cf68f24e6
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@google.com>
parents 40630092 ec1b6ab9
Loading
Loading
Loading
Loading
+1 −0
Original line number Diff line number Diff line
@@ -99,6 +99,7 @@ CONFIG_GKI_OPT_FEATURES=y
# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
CONFIG_BINFMT_MISC=y
CONFIG_MEMORY_HOTPLUG=y
CONFIG_DEFAULT_MMAP_MIN_ADDR=32768
CONFIG_CLEANCACHE=y
CONFIG_CMA=y
CONFIG_CMA_AREAS=16
+1 −0
Original line number Diff line number Diff line
@@ -77,6 +77,7 @@ CONFIG_GKI_HACKS_TO_FIX=y
CONFIG_GKI_OPT_FEATURES=y
# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
CONFIG_BINFMT_MISC=y
CONFIG_DEFAULT_MMAP_MIN_ADDR=32768
CONFIG_CLEANCACHE=y
CONFIG_CMA=y
CONFIG_CMA_AREAS=16
+5 −0
Original line number Diff line number Diff line
@@ -252,6 +252,11 @@ static void flush_end_io(struct request *flush_rq, blk_status_t error)
	spin_unlock_irqrestore(&fq->mq_flush_lock, flags);
}

bool is_flush_rq(struct request *rq)
{
	return rq->end_io == flush_end_io;
}

/**
 * blk_kick_flush - consider issuing flush request
 * @q: request_queue being kicked
+41 −11
Original line number Diff line number Diff line
@@ -212,6 +212,22 @@ struct bt_iter_data {
	bool reserved;
};

static struct request *blk_mq_find_and_get_req(struct blk_mq_tags *tags,
		unsigned int bitnr)
{
	struct request *rq;
	unsigned long flags;
	struct ext_blk_mq_tags *etags;

	etags = container_of(tags, struct ext_blk_mq_tags, tags);
	spin_lock_irqsave(&etags->lock, flags);
	rq = tags->rqs[bitnr];
	if (!rq || !refcount_inc_not_zero(&rq->ref))
		rq = NULL;
	spin_unlock_irqrestore(&etags->lock, flags);
	return rq;
}

static bool bt_iter(struct sbitmap *bitmap, unsigned int bitnr, void *data)
{
	struct bt_iter_data *iter_data = data;
@@ -219,18 +235,23 @@ static bool bt_iter(struct sbitmap *bitmap, unsigned int bitnr, void *data)
	struct blk_mq_tags *tags = hctx->tags;
	bool reserved = iter_data->reserved;
	struct request *rq;
	bool ret = true;

	if (!reserved)
		bitnr += tags->nr_reserved_tags;
	rq = tags->rqs[bitnr];

	/*
	 * We can hit rq == NULL here, because the tagging functions
	 * test and set the bit before assigning ->rqs[].
	 */
	if (rq && rq->q == hctx->queue)
		return iter_data->fn(hctx, rq, iter_data->data, reserved);
	rq = blk_mq_find_and_get_req(tags, bitnr);
	if (!rq)
		return true;

	if (rq->q == hctx->queue && rq->mq_hctx == hctx)
		ret = iter_data->fn(hctx, rq, iter_data->data, reserved);
	blk_mq_put_rq_ref(rq);
	return ret;
}

/**
@@ -273,6 +294,7 @@ static bool bt_tags_iter(struct sbitmap *bitmap, unsigned int bitnr, void *data)
	struct blk_mq_tags *tags = iter_data->tags;
	bool reserved = iter_data->reserved;
	struct request *rq;
	bool ret = true;

	if (!reserved)
		bitnr += tags->nr_reserved_tags;
@@ -281,11 +303,13 @@ static bool bt_tags_iter(struct sbitmap *bitmap, unsigned int bitnr, void *data)
	 * We can hit rq == NULL here, because the tagging functions
	 * test and set the bit before assining ->rqs[].
	 */
	rq = tags->rqs[bitnr];
	if (rq && blk_mq_request_started(rq))
		return iter_data->fn(rq, iter_data->data, reserved);

	rq = blk_mq_find_and_get_req(tags, bitnr);
	if (!rq)
		return true;
	if (blk_mq_request_started(rq))
		ret = iter_data->fn(rq, iter_data->data, reserved);
	blk_mq_put_rq_ref(rq);
	return ret;
}

/**
@@ -342,6 +366,9 @@ static void blk_mq_all_tag_busy_iter(struct blk_mq_tags *tags,
 *		indicates whether or not @rq is a reserved request. Return
 *		true to continue iterating tags, false to stop.
 * @priv:	Will be passed as second argument to @fn.
 *
 * We grab one request reference before calling @fn and release it after
 * @fn returns.
 */
void blk_mq_tagset_busy_iter(struct blk_mq_tag_set *tagset,
		busy_tag_iter_fn *fn, void *priv)
@@ -465,18 +492,21 @@ struct blk_mq_tags *blk_mq_init_tags(unsigned int total_tags,
				     int node, int alloc_policy)
{
	struct blk_mq_tags *tags;
	struct ext_blk_mq_tags *etags;

	if (total_tags > BLK_MQ_TAG_MAX) {
		pr_err("blk-mq: tag depth too large\n");
		return NULL;
	}

	tags = kzalloc_node(sizeof(*tags), GFP_KERNEL, node);
	if (!tags)
	etags = kzalloc_node(sizeof(*etags), GFP_KERNEL, node);
	if (!etags)
		return NULL;

	tags = &etags->tags;
	tags->nr_tags = total_tags;
	tags->nr_reserved_tags = reserved_tags;
	spin_lock_init(&etags->lock);

	return blk_mq_init_bitmap_tags(tags, node, alloc_policy);
}
+15 −0
Original line number Diff line number Diff line
@@ -21,6 +21,21 @@ struct blk_mq_tags {
	struct list_head page_list;
};

/*
 * Extended tag address space map. This was needed
 * to add a spinlock to blk_mq_tags in a KMI compliant
 * way (no changes could be made to struct blk_mq_tags).
 */
struct ext_blk_mq_tags {
	struct blk_mq_tags tags;

	 /*
	  * used to clear request reference in rqs[] before freeing one
	  * request pool
	  */
	spinlock_t lock;
};


extern struct blk_mq_tags *blk_mq_init_tags(unsigned int nr_tags, unsigned int reserved_tags, int node, int alloc_policy);
extern void blk_mq_free_tags(struct blk_mq_tags *tags);
Loading