Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 91276162 authored by Matias Bjørling's avatar Matias Bjørling Committed by Jens Axboe
Browse files

lightnvm: refactor end_io functions for sync



To implement sync I/O support within the LightNVM core, the end_io
functions are refactored to take an end_io function pointer instead of
testing for initialized media manager, followed by calling its end_io
function.

Sync I/O can then be implemented using a callback that signal I/O
completion. This is similar to the logic found in blk_to_execute_io().
By implementing it this way, the underlying device I/Os submission logic
is abstracted away from core, targets, and media managers.

Signed-off-by: default avatarMatias Bjørling <m@bjorling.me>
Signed-off-by: default avatarJens Axboe <axboe@fb.com>
parent abd805ec
Loading
Loading
Loading
Loading
+1 −2
Original line number Diff line number Diff line
@@ -436,9 +436,8 @@ static void null_del_dev(struct nullb *nullb)
static void null_lnvm_end_io(struct request *rq, int error)
{
	struct nvm_rq *rqd = rq->end_io_data;
	struct nvm_dev *dev = rqd->dev;

	dev->mt->end_io(rqd, error);
	nvm_end_io(rqd, error);

	blk_put_request(rq);
}
+16 −0
Original line number Diff line number Diff line
@@ -27,6 +27,7 @@
#include <linux/module.h>
#include <linux/miscdevice.h>
#include <linux/lightnvm.h>
#include <linux/sched/sysctl.h>
#include <uapi/linux/lightnvm.h>

static LIST_HEAD(nvm_targets);
@@ -288,6 +289,21 @@ int nvm_erase_ppa(struct nvm_dev *dev, struct ppa_addr ppa)
}
EXPORT_SYMBOL(nvm_erase_ppa);

void nvm_end_io(struct nvm_rq *rqd, int error)
{
	rqd->end_io(rqd, error);
}
EXPORT_SYMBOL(nvm_end_io);

static void nvm_end_io_sync(struct nvm_rq *rqd, int errors)
{
	struct completion *waiting = rqd->wait;

	rqd->wait = NULL;

	complete(waiting);
}

static int nvm_core_init(struct nvm_dev *dev)
{
	struct nvm_id *id = &dev->identity;
+14 −20
Original line number Diff line number Diff line
@@ -317,18 +317,6 @@ static void gennvm_put_blk(struct nvm_dev *dev, struct nvm_block *blk)
	spin_unlock(&vlun->lock);
}

static int gennvm_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd)
{
	if (!dev->ops->submit_io)
		return -ENODEV;

	/* Convert address space */
	nvm_generic_to_addr_mode(dev, rqd);

	rqd->dev = dev;
	return dev->ops->submit_io(dev, rqd);
}

static void gennvm_blk_set_type(struct nvm_dev *dev, struct ppa_addr *ppa,
								int type)
{
@@ -375,25 +363,32 @@ static void gennvm_mark_blk_bad(struct nvm_dev *dev, struct nvm_rq *rqd)
		gennvm_blk_set_type(dev, &rqd->ppa_addr, 2);
}

static int gennvm_end_io(struct nvm_rq *rqd, int error)
static void gennvm_end_io(struct nvm_rq *rqd, int error)
{
	struct nvm_tgt_instance *ins = rqd->ins;
	int ret = 0;

	switch (error) {
	case NVM_RSP_SUCCESS:
		break;
	case NVM_RSP_ERR_EMPTYPAGE:
		break;
	case NVM_RSP_ERR_FAILWRITE:
		gennvm_mark_blk_bad(rqd->dev, rqd);
	default:
		ret++;
	}

	ret += ins->tt->end_io(rqd, error);
	ins->tt->end_io(rqd, error);
}

	return ret;
static int gennvm_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd)
{
	if (!dev->ops->submit_io)
		return -ENODEV;

	/* Convert address space */
	nvm_generic_to_addr_mode(dev, rqd);

	rqd->dev = dev;
	rqd->end_io = gennvm_end_io;
	return dev->ops->submit_io(dev, rqd);
}

static int gennvm_erase_blk(struct nvm_dev *dev, struct nvm_block *blk,
@@ -442,7 +437,6 @@ static struct nvmm_type gennvm = {
	.put_blk	= gennvm_put_blk,

	.submit_io	= gennvm_submit_io,
	.end_io		= gennvm_end_io,
	.erase_blk	= gennvm_erase_blk,

	.get_lun	= gennvm_get_lun,
+2 −4
Original line number Diff line number Diff line
@@ -642,7 +642,7 @@ static void rrpc_end_io_write(struct rrpc *rrpc, struct rrpc_rq *rrqd,
	}
}

static int rrpc_end_io(struct nvm_rq *rqd, int error)
static void rrpc_end_io(struct nvm_rq *rqd, int error)
{
	struct rrpc *rrpc = container_of(rqd->ins, struct rrpc, instance);
	struct rrpc_rq *rrqd = nvm_rq_to_pdu(rqd);
@@ -655,7 +655,7 @@ static int rrpc_end_io(struct nvm_rq *rqd, int error)
	bio_put(rqd->bio);

	if (rrqd->flags & NVM_IOTYPE_GC)
		return 0;
		return;

	rrpc_unlock_rq(rrpc, rqd);

@@ -665,8 +665,6 @@ static int rrpc_end_io(struct nvm_rq *rqd, int error)
		nvm_dev_dma_free(rrpc->dev, rqd->metadata, rqd->dma_metadata);

	mempool_free(rqd, rrpc->rq_pool);

	return 0;
}

static int rrpc_read_ppalist_rq(struct rrpc *rrpc, struct bio *bio,
+1 −4
Original line number Diff line number Diff line
@@ -453,11 +453,8 @@ static inline void nvme_nvm_rqtocmd(struct request *rq, struct nvm_rq *rqd,
static void nvme_nvm_end_io(struct request *rq, int error)
{
	struct nvm_rq *rqd = rq->end_io_data;
	struct nvm_dev *dev = rqd->dev;

	if (dev->mt && dev->mt->end_io(rqd, error))
		pr_err("nvme: err status: %x result: %lx\n",
				rq->errors, (unsigned long)rq->special);
	nvm_end_io(rqd, error);

	kfree(rq->cmd);
	blk_mq_free_request(rq);
Loading