Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit bd788c96 authored by Adrian Hunter's avatar Adrian Hunter Committed by Linus Torvalds
Browse files

mmc_block: add discard support



Enable MMC to service discard requests.  In the case of SD and MMC cards
that do not support trim, discards become erases.  In the case of cards
(MMC) that only allow erases in multiples of erase group size, round to
the nearest completely discarded erase group.

Signed-off-by: default avatarAdrian Hunter <adrian.hunter@nokia.com>
Acked-by: default avatarJens Axboe <axboe@kernel.dk>
Cc: Kyungmin Park <kmpark@infradead.org>
Cc: Madhusudhan Chikkature <madhu.cr@ti.com>
Cc: Christoph Hellwig <hch@lst.de>
Cc: Ben Gardiner <bengardiner@nanometrics.ca>
Cc: <linux-mmc@vger.kernel.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent dfe86cba
Loading
Loading
Loading
Loading
+41 −1
Original line number Original line Diff line number Diff line
@@ -247,7 +247,40 @@ static u32 get_card_status(struct mmc_card *card, struct request *req)
	return cmd.resp[0];
	return cmd.resp[0];
}
}


static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
static int mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req)
{
	struct mmc_blk_data *md = mq->data;
	struct mmc_card *card = md->queue.card;
	unsigned int from, nr, arg;
	int err = 0;

	mmc_claim_host(card->host);

	if (!mmc_can_erase(card)) {
		err = -EOPNOTSUPP;
		goto out;
	}

	from = blk_rq_pos(req);
	nr = blk_rq_sectors(req);

	if (mmc_can_trim(card))
		arg = MMC_TRIM_ARG;
	else
		arg = MMC_ERASE_ARG;

	err = mmc_erase(card, from, nr, arg);
out:
	spin_lock_irq(&md->lock);
	__blk_end_request(req, err, blk_rq_bytes(req));
	spin_unlock_irq(&md->lock);

	mmc_release_host(card->host);

	return err ? 0 : 1;
}

static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *req)
{
{
	struct mmc_blk_data *md = mq->data;
	struct mmc_blk_data *md = mq->data;
	struct mmc_card *card = md->queue.card;
	struct mmc_card *card = md->queue.card;
@@ -475,6 +508,13 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
	return 0;
	return 0;
}
}


static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
{
	if (req->cmd_flags & REQ_DISCARD)
		return mmc_blk_issue_discard_rq(mq, req);
	else
		return mmc_blk_issue_rw_rq(mq, req);
}


static inline int mmc_blk_readonly(struct mmc_card *card)
static inline int mmc_blk_readonly(struct mmc_card *card)
{
{
+14 −2
Original line number Original line Diff line number Diff line
@@ -30,9 +30,9 @@
static int mmc_prep_request(struct request_queue *q, struct request *req)
static int mmc_prep_request(struct request_queue *q, struct request *req)
{
{
	/*
	/*
	 * We only like normal block requests.
	 * We only like normal block requests and discards.
	 */
	 */
	if (req->cmd_type != REQ_TYPE_FS) {
	if (req->cmd_type != REQ_TYPE_FS && !(req->cmd_flags & REQ_DISCARD)) {
		blk_dump_rq_flags(req, "MMC bad request");
		blk_dump_rq_flags(req, "MMC bad request");
		return BLKPREP_KILL;
		return BLKPREP_KILL;
	}
	}
@@ -130,6 +130,18 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, spinlock_t *lock
	blk_queue_prep_rq(mq->queue, mmc_prep_request);
	blk_queue_prep_rq(mq->queue, mmc_prep_request);
	blk_queue_ordered(mq->queue, QUEUE_ORDERED_DRAIN);
	blk_queue_ordered(mq->queue, QUEUE_ORDERED_DRAIN);
	queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mq->queue);
	queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mq->queue);
	if (mmc_can_erase(card)) {
		queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, mq->queue);
		mq->queue->limits.max_discard_sectors = UINT_MAX;
		if (card->erased_byte == 0)
			mq->queue->limits.discard_zeroes_data = 1;
		if (!mmc_can_trim(card) && is_power_of_2(card->erase_size)) {
			mq->queue->limits.discard_granularity =
							card->erase_size << 9;
			mq->queue->limits.discard_alignment =
							card->erase_size << 9;
		}
	}


#ifdef CONFIG_MMC_BLOCK_BOUNCE
#ifdef CONFIG_MMC_BLOCK_BOUNCE
	if (host->max_hw_segs == 1) {
	if (host->max_hw_segs == 1) {