Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit bee66b74 authored by Sahitya Tummala's avatar Sahitya Tummala
Browse files

mmc: block: Add cache barrier support



eMMC cache barrier provides a way to perform delayed
and in-order flushing of cached data. Host can use this
to avoid flushing delays but still maintain the order
between the data requests in cache.

In case the device gets more barrier requests than it
supports, then a barrier request is treated as a normal flush
request in the device.

If the eMMC cache flushing policy is set to 1, then the device
inherently flushes all the cached requests in FIFO order. For such
devices, as per spec, it is redundant to send any barrier requests
to the device. This may add unnecessary overhead to both host and
the device. So make sure to not send barrier requests in such cases.

Change-Id: Ia7af316800a6895942d3cabcd64600d56fab25a6
Signed-off-by: default avatarSahitya Tummala <stummala@codeaurora.org>
parent 7b2f8372
Loading
Loading
Loading
Loading
+26 −2
Original line number Diff line number Diff line
@@ -1684,7 +1684,30 @@ static int mmc_blk_issue_flush(struct mmc_queue *mq, struct request *req)
	struct mmc_card *card = md->queue.card;
	int ret = 0;

	if (!req)
		return 0;

	if (req->cmd_flags & REQ_BARRIER) {
		/*
		 * If eMMC cache flush policy is set to 1, then the device
		 * shall flush the requests in First-In-First-Out (FIFO) order.
		 * In this case, as per spec, the host must not send any cache
		 * barrier requests as they are redundant and add unnecessary
		 * overhead to both device and host.
		 */
		if (card->ext_csd.cache_flush_policy & 1)
			goto end_req;

		/*
		 * In case barrier is not supported or enabled in the device,
		 * use flush as a fallback option.
		 */
		ret = mmc_cache_barrier(card);
		if (ret)
			ret = mmc_flush_cache(card);
	 } else if (req->cmd_flags & REQ_FLUSH) {
		ret = mmc_flush_cache(card);
	 }
	if (ret == -ENODEV) {
		pr_err("%s: %s: restart mmc card",
				req->rq_disk->disk_name, __func__);
@@ -1701,6 +1724,7 @@ static int mmc_blk_issue_flush(struct mmc_queue *mq, struct request *req)
		ret = -EIO;
	}

end_req:
	blk_end_request_all(req, ret);

	return ret ? 0 : 1;
@@ -3310,7 +3334,7 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
			ret = mmc_blk_issue_secdiscard_rq(mq, req);
		else
			ret = mmc_blk_issue_discard_rq(mq, req);
	} else if (cmd_flags & REQ_FLUSH) {
	} else if (cmd_flags & (REQ_FLUSH | REQ_BARRIER)) {
		/* complete ongoing async transfer before issuing flush */
		if (card->host->areq)
			mmc_blk_issue_rw_rq(mq, NULL);
+34 −0
Original line number Diff line number Diff line
@@ -3534,6 +3534,40 @@ int mmc_power_restore_host(struct mmc_host *host)
}
EXPORT_SYMBOL(mmc_power_restore_host);

/*
 * Add barrier request to the requests in cache
 */
int mmc_cache_barrier(struct mmc_card *card)
{
	struct mmc_host *host = card->host;
	int err = 0;

	if (!card->ext_csd.cache_ctrl ||
	     (card->quirks & MMC_QUIRK_CACHE_DISABLE))
		goto out;

	if (!mmc_card_mmc(card))
		goto out;

	if (!card->ext_csd.barrier_en)
		return -ENOTSUPP;

	/*
	 * If a device receives maximum supported barrier
	 * requests, a barrier command is treated as a
	 * flush command. Hence, it is betetr to use
	 * flush timeout instead a generic CMD6 timeout
	 */
	err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
			EXT_CSD_FLUSH_CACHE, 0x2, 0);
	if (err)
		pr_err("%s: cache barrier error %d\n",
				mmc_hostname(host), err);
out:
	return err;
}
EXPORT_SYMBOL(mmc_cache_barrier);

/*
 * Flush the cache to the non-volatile storage.
 */
+31 −0
Original line number Diff line number Diff line
@@ -660,9 +660,19 @@ static int mmc_read_ext_csd(struct mmc_card *card, u8 *ext_csd)
				mmc_hostname(card->host),
				card->ext_csd.cmdq_depth);
		}
		card->ext_csd.barrier_support =
			ext_csd[EXT_CSD_BARRIER_SUPPORT];
		card->ext_csd.cache_flush_policy =
			ext_csd[EXT_CSD_CACHE_FLUSH_POLICY];
		pr_info("%s: cache barrier support %d flush policy %d\n",
				mmc_hostname(card->host),
				card->ext_csd.barrier_support,
				card->ext_csd.cache_flush_policy);
	} else {
		card->ext_csd.cmdq_support = 0;
		card->ext_csd.cmdq_depth = 0;
		card->ext_csd.barrier_support = 0;
		card->ext_csd.cache_flush_policy = 0;
	}

out:
@@ -1845,6 +1855,27 @@ reinit:
			} else {
				card->ext_csd.cache_ctrl = 1;
			}
			/* enable cache barrier if supported by the device */
			if (card->ext_csd.cache_ctrl &&
					card->ext_csd.barrier_support) {
				err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
					EXT_CSD_BARRIER_CTRL, 1,
					card->ext_csd.generic_cmd6_time);
				if (err && err != -EBADMSG) {
					pr_err("%s: %s: mmc_switch() for BARRIER_CTRL fails %d\n",
						mmc_hostname(host), __func__,
						err);
					goto free_card;
				}
				if (err) {
					pr_warn("%s: Barrier is supported but failed to turn on (%d)\n",
						mmc_hostname(card->host), err);
					card->ext_csd.barrier_en = 0;
					err = 0;
				} else {
					card->ext_csd.barrier_en = 1;
				}
			}
		} else {
			/*
			 * mmc standard doesn't say what is the card default
+3 −0
Original line number Diff line number Diff line
@@ -116,10 +116,13 @@ struct mmc_ext_csd {
	u8			raw_pwr_cl_ddr_52_195;	/* 238 */
	u8			raw_pwr_cl_ddr_52_360;	/* 239 */
	u8			raw_pwr_cl_ddr_200_360;	/* 253 */
	u8			cache_flush_policy;	/* 240 */
	u8			raw_bkops_status;	/* 246 */
	u8			raw_sectors[4];		/* 212 - 4 bytes */
	u8			cmdq_depth;		/* 307 */
	u8			cmdq_support;		/* 308 */
	u8			barrier_support;	/* 486 */
	u8			barrier_en;

	unsigned int            feature_support;
#define MMC_DISCARD_FEATURE	BIT(0)                  /* CMD38 feature */
+1 −0
Original line number Diff line number Diff line
@@ -186,6 +186,7 @@ extern void mmc_put_card(struct mmc_card *card);

extern void mmc_set_ios(struct mmc_host *host);
extern int mmc_flush_cache(struct mmc_card *);
extern int mmc_cache_barrier(struct mmc_card *);

extern int mmc_detect_card_removed(struct mmc_host *host);

Loading