Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit f4c5522b authored by Andrei Warkentin's avatar Andrei Warkentin Committed by Chris Ball
Browse files

mmc: Reliable write support.



Allows reliable writes to be used for MMC writes. Reliable writes are used
to service write REQ_FUA/REQ_META requests. Handles both the legacy and
the enhanced reliable write support in MMC cards.

Signed-off-by: default avatarAndrei Warkentin <andreiw@motorola.com>
Reviewed-by: default avatarArnd Bergmann <arnd@arndb.de>
Signed-off-by: default avatarChris Ball <cjb@laptop.org>
parent 766a6bf6
Loading
Loading
Loading
Loading
+77 −4
Original line number Original line Diff line number Diff line
@@ -48,6 +48,10 @@ MODULE_ALIAS("mmc:block");
#endif
#endif
#define MODULE_PARAM_PREFIX "mmcblk."
#define MODULE_PARAM_PREFIX "mmcblk."


#define REL_WRITES_SUPPORTED(card) (mmc_card_mmc((card)) &&	\
    (((card)->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN) ||	\
     ((card)->ext_csd.rel_sectors)))

static DEFINE_MUTEX(block_mutex);
static DEFINE_MUTEX(block_mutex);


/*
/*
@@ -331,6 +335,57 @@ static int mmc_blk_issue_secdiscard_rq(struct mmc_queue *mq,
	return err ? 0 : 1;
	return err ? 0 : 1;
}
}


static int mmc_blk_issue_flush(struct mmc_queue *mq, struct request *req)
{
	struct mmc_blk_data *md = mq->data;

	/*
	 * No-op, only service this because we need REQ_FUA for reliable
	 * writes.
	 */
	spin_lock_irq(&md->lock);
	__blk_end_request_all(req, 0);
	spin_unlock_irq(&md->lock);

	return 1;
}

/*
 * Reformat current write as a reliable write, supporting
 * both legacy and the enhanced reliable write MMC cards.
 * In each transfer we'll handle only as much as a single
 * reliable write can handle, thus finish the request in
 * partial completions.
 */
static inline int mmc_apply_rel_rw(struct mmc_blk_request *brq,
				   struct mmc_card *card,
				   struct request *req)
{
	int err;
	struct mmc_command set_count;

	if (!(card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN)) {
		/* Legacy mode imposes restrictions on transfers. */
		if (!IS_ALIGNED(brq->cmd.arg, card->ext_csd.rel_sectors))
			brq->data.blocks = 1;

		if (brq->data.blocks > card->ext_csd.rel_sectors)
			brq->data.blocks = card->ext_csd.rel_sectors;
		else if (brq->data.blocks < card->ext_csd.rel_sectors)
			brq->data.blocks = 1;
	}

	memset(&set_count, 0, sizeof(struct mmc_command));
	set_count.opcode = MMC_SET_BLOCK_COUNT;
	set_count.arg = brq->data.blocks | (1 << 31);
	set_count.flags = MMC_RSP_R1 | MMC_CMD_AC;
	err = mmc_wait_for_cmd(card->host, &set_count, 0);
	if (err)
		printk(KERN_ERR "%s: error %d SET_BLOCK_COUNT\n",
		       req->rq_disk->disk_name, err);
	return err;
}

static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *req)
static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *req)
{
{
	struct mmc_blk_data *md = mq->data;
	struct mmc_blk_data *md = mq->data;
@@ -338,6 +393,15 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *req)
	struct mmc_blk_request brq;
	struct mmc_blk_request brq;
	int ret = 1, disable_multi = 0;
	int ret = 1, disable_multi = 0;


	/*
	 * Reliable writes are used to implement Forced Unit Access and
	 * REQ_META accesses, and are supported only on MMCs.
	 */
	bool do_rel_wr = ((req->cmd_flags & REQ_FUA) ||
			  (req->cmd_flags & REQ_META)) &&
		(rq_data_dir(req) == WRITE) &&
		REL_WRITES_SUPPORTED(card);

	mmc_claim_host(card->host);
	mmc_claim_host(card->host);


	do {
	do {
@@ -374,12 +438,14 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *req)
		if (disable_multi && brq.data.blocks > 1)
		if (disable_multi && brq.data.blocks > 1)
			brq.data.blocks = 1;
			brq.data.blocks = 1;


		if (brq.data.blocks > 1) {
		if (brq.data.blocks > 1 || do_rel_wr) {
			/* SPI multiblock writes terminate using a special
			/* SPI multiblock writes terminate using a special
			 * token, not a STOP_TRANSMISSION request.
			 * token, not a STOP_TRANSMISSION request. Reliable
			 * writes use SET_BLOCK_COUNT and do not use a
			 * STOP_TRANSMISSION request either.
			 */
			 */
			if (!mmc_host_is_spi(card->host)
			if ((!mmc_host_is_spi(card->host) && !do_rel_wr) ||
					|| rq_data_dir(req) == READ)
			    rq_data_dir(req) == READ)
				brq.mrq.stop = &brq.stop;
				brq.mrq.stop = &brq.stop;
			readcmd = MMC_READ_MULTIPLE_BLOCK;
			readcmd = MMC_READ_MULTIPLE_BLOCK;
			writecmd = MMC_WRITE_MULTIPLE_BLOCK;
			writecmd = MMC_WRITE_MULTIPLE_BLOCK;
@@ -396,6 +462,9 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *req)
			brq.data.flags |= MMC_DATA_WRITE;
			brq.data.flags |= MMC_DATA_WRITE;
		}
		}


		if (do_rel_wr && mmc_apply_rel_rw(&brq, card, req))
			goto cmd_err;

		mmc_set_data_timeout(&brq.data, card);
		mmc_set_data_timeout(&brq.data, card);


		brq.data.sg = mq->sg;
		brq.data.sg = mq->sg;
@@ -565,6 +634,8 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
			return mmc_blk_issue_secdiscard_rq(mq, req);
			return mmc_blk_issue_secdiscard_rq(mq, req);
		else
		else
			return mmc_blk_issue_discard_rq(mq, req);
			return mmc_blk_issue_discard_rq(mq, req);
	} else if (req->cmd_flags & REQ_FLUSH) {
		return mmc_blk_issue_flush(mq, req);
	} else {
	} else {
		return mmc_blk_issue_rw_rq(mq, req);
		return mmc_blk_issue_rw_rq(mq, req);
	}
	}
@@ -622,6 +693,8 @@ static struct mmc_blk_data *mmc_blk_alloc(struct mmc_card *card)
	md->disk->queue = md->queue.queue;
	md->disk->queue = md->queue.queue;
	md->disk->driverfs_dev = &card->dev;
	md->disk->driverfs_dev = &card->dev;
	set_disk_ro(md->disk, md->read_only);
	set_disk_ro(md->disk, md->read_only);
	if (REL_WRITES_SUPPORTED(card))
		blk_queue_flush(md->queue.queue, REQ_FLUSH | REQ_FUA);


	/*
	/*
	 * As discussed on lkml, GENHD_FL_REMOVABLE should:
	 * As discussed on lkml, GENHD_FL_REMOVABLE should:
+5 −0
Original line number Original line Diff line number Diff line
@@ -300,6 +300,8 @@ static int mmc_read_ext_csd(struct mmc_card *card)
			ext_csd[EXT_CSD_ERASE_TIMEOUT_MULT];
			ext_csd[EXT_CSD_ERASE_TIMEOUT_MULT];
		card->ext_csd.hc_erase_size =
		card->ext_csd.hc_erase_size =
			ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] << 10;
			ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] << 10;

		card->ext_csd.rel_sectors = ext_csd[EXT_CSD_REL_WR_SEC_C];
	}
	}


	if (card->ext_csd.rev >= 4) {
	if (card->ext_csd.rev >= 4) {
@@ -351,6 +353,9 @@ static int mmc_read_ext_csd(struct mmc_card *card)
			ext_csd[EXT_CSD_TRIM_MULT];
			ext_csd[EXT_CSD_TRIM_MULT];
	}
	}


	if (card->ext_csd.rev >= 5)
		card->ext_csd.rel_param = ext_csd[EXT_CSD_WR_REL_PARAM];

	if (ext_csd[EXT_CSD_ERASED_MEM_CONT])
	if (ext_csd[EXT_CSD_ERASED_MEM_CONT])
		card->erased_byte = 0xFF;
		card->erased_byte = 0xFF;
	else
	else
+2 −0
Original line number Original line Diff line number Diff line
@@ -45,6 +45,8 @@ struct mmc_ext_csd {
	u8			rev;
	u8			rev;
	u8			erase_group_def;
	u8			erase_group_def;
	u8			sec_feature_support;
	u8			sec_feature_support;
	u8			rel_sectors;
	u8			rel_param;
	u8			bootconfig;
	u8			bootconfig;
	unsigned int		sa_timeout;		/* Units: 100ns */
	unsigned int		sa_timeout;		/* Units: 100ns */
	unsigned int		hs_max_dtr;
	unsigned int		hs_max_dtr;
+4 −0
Original line number Original line Diff line number Diff line
@@ -255,6 +255,7 @@ struct _mmc_csd {


#define EXT_CSD_PARTITION_ATTRIBUTE	156	/* R/W */
#define EXT_CSD_PARTITION_ATTRIBUTE	156	/* R/W */
#define EXT_CSD_PARTITION_SUPPORT	160	/* RO */
#define EXT_CSD_PARTITION_SUPPORT	160	/* RO */
#define EXT_CSD_WR_REL_PARAM		166	/* RO */
#define EXT_CSD_ERASE_GROUP_DEF		175	/* R/W */
#define EXT_CSD_ERASE_GROUP_DEF		175	/* R/W */
#define EXT_CSD_BOOT_CONFIG		179	/* R/W */
#define EXT_CSD_BOOT_CONFIG		179	/* R/W */
#define EXT_CSD_ERASED_MEM_CONT		181	/* RO */
#define EXT_CSD_ERASED_MEM_CONT		181	/* RO */
@@ -265,6 +266,7 @@ struct _mmc_csd {
#define EXT_CSD_CARD_TYPE		196	/* RO */
#define EXT_CSD_CARD_TYPE		196	/* RO */
#define EXT_CSD_SEC_CNT			212	/* RO, 4 bytes */
#define EXT_CSD_SEC_CNT			212	/* RO, 4 bytes */
#define EXT_CSD_S_A_TIMEOUT		217	/* RO */
#define EXT_CSD_S_A_TIMEOUT		217	/* RO */
#define EXT_CSD_REL_WR_SEC_C		222	/* RO */
#define EXT_CSD_HC_WP_GRP_SIZE		221	/* RO */
#define EXT_CSD_HC_WP_GRP_SIZE		221	/* RO */
#define EXT_CSD_ERASE_TIMEOUT_MULT	223	/* RO */
#define EXT_CSD_ERASE_TIMEOUT_MULT	223	/* RO */
#define EXT_CSD_HC_ERASE_GRP_SIZE	224	/* RO */
#define EXT_CSD_HC_ERASE_GRP_SIZE	224	/* RO */
@@ -277,6 +279,8 @@ struct _mmc_csd {
 * EXT_CSD field definitions
 * EXT_CSD field definitions
 */
 */


#define EXT_CSD_WR_REL_PARAM_EN		(1<<2)

#define EXT_CSD_CMD_SET_NORMAL		(1<<0)
#define EXT_CSD_CMD_SET_NORMAL		(1<<0)
#define EXT_CSD_CMD_SET_SECURE		(1<<1)
#define EXT_CSD_CMD_SET_SECURE		(1<<1)
#define EXT_CSD_CMD_SET_CPSECURE	(1<<2)
#define EXT_CSD_CMD_SET_CPSECURE	(1<<2)