Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 0a13daed authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge branch 'for-4.5/lightnvm' of git://git.kernel.dk/linux-block

Pull lightnvm fixes and updates from Jens Axboe:
 "This should have been part of the drivers branch, but it arrived a bit
  late and wasn't based on the official core block driver branch.  So
  they got a small scolding, but got a pass since it's still new.  Hence
  it's in a separate branch.

  This is mostly pure fixes, contained to lightnvm/, and minor feature
  additions"

* 'for-4.5/lightnvm' of git://git.kernel.dk/linux-block: (26 commits)
  lightnvm: ensure that nvm_dev_ops can be used without CONFIG_NVM
  lightnvm: introduce factory reset
  lightnvm: use system block for mm initialization
  lightnvm: introduce ioctl to initialize device
  lightnvm: core on-disk initialization
  lightnvm: introduce mlc lower page table mappings
  lightnvm: add mccap support
  lightnvm: manage open and closed blocks separately
  lightnvm: fix missing grown bad block type
  lightnvm: reference rrpc lun in rrpc block
  lightnvm: introduce nvm_submit_ppa
  lightnvm: move rq->error to nvm_rq->error
  lightnvm: support multiple ppas in nvm_erase_ppa
  lightnvm: move the pages per block check out of the loop
  lightnvm: sectors first in ppa list
  lightnvm: fix locking and mempool in rrpc_lun_gc
  lightnvm: put block back to gc list on its reclaim fail
  lightnvm: check bi_error in gc
  lightnvm: return the get_bb_tbl return value
  lightnvm: refactor end_io functions for sync
  ...
parents 64120354 a7fd9a4f
Loading
Loading
Loading
Loading
+1 −2
Original line number Diff line number Diff line
@@ -436,9 +436,8 @@ static void null_del_dev(struct nullb *nullb)
static void null_lnvm_end_io(struct request *rq, int error)
{
	struct nvm_rq *rqd = rq->end_io_data;
	struct nvm_dev *dev = rqd->dev;

	dev->mt->end_io(rqd, error);
	nvm_end_io(rqd, error);

	blk_put_request(rq);
}
+1 −1
Original line number Diff line number Diff line
@@ -2,6 +2,6 @@
# Makefile for Open-Channel SSDs.
#

obj-$(CONFIG_NVM)		:= core.o
obj-$(CONFIG_NVM)		:= core.o sysblk.o
obj-$(CONFIG_NVM_GENNVM) 	+= gennvm.o
obj-$(CONFIG_NVM_RRPC)		+= rrpc.o
+338 −2
Original line number Diff line number Diff line
@@ -27,6 +27,7 @@
#include <linux/module.h>
#include <linux/miscdevice.h>
#include <linux/lightnvm.h>
#include <linux/sched/sysctl.h>
#include <uapi/linux/lightnvm.h>

static LIST_HEAD(nvm_targets);
@@ -105,6 +106,9 @@ struct nvmm_type *nvm_init_mgr(struct nvm_dev *dev)
	lockdep_assert_held(&nvm_lock);

	list_for_each_entry(mt, &nvm_mgrs, list) {
		if (strncmp(dev->sb.mmtype, mt->name, NVM_MMTYPE_LEN))
			continue;

		ret = mt->register_mgr(dev);
		if (ret < 0) {
			pr_err("nvm: media mgr failed to init (%d) on dev %s\n",
@@ -166,6 +170,20 @@ static struct nvm_dev *nvm_find_nvm_dev(const char *name)
	return NULL;
}

struct nvm_block *nvm_get_blk_unlocked(struct nvm_dev *dev, struct nvm_lun *lun,
							unsigned long flags)
{
	return dev->mt->get_blk_unlocked(dev, lun, flags);
}
EXPORT_SYMBOL(nvm_get_blk_unlocked);

/* Assumes that all valid pages have already been moved on release to bm */
void nvm_put_blk_unlocked(struct nvm_dev *dev, struct nvm_block *blk)
{
	return dev->mt->put_blk_unlocked(dev, blk);
}
EXPORT_SYMBOL(nvm_put_blk_unlocked);

struct nvm_block *nvm_get_blk(struct nvm_dev *dev, struct nvm_lun *lun,
							unsigned long flags)
{
@@ -192,6 +210,206 @@ int nvm_erase_blk(struct nvm_dev *dev, struct nvm_block *blk)
}
EXPORT_SYMBOL(nvm_erase_blk);

void nvm_addr_to_generic_mode(struct nvm_dev *dev, struct nvm_rq *rqd)
{
	int i;

	if (rqd->nr_pages > 1) {
		for (i = 0; i < rqd->nr_pages; i++)
			rqd->ppa_list[i] = dev_to_generic_addr(dev,
							rqd->ppa_list[i]);
	} else {
		rqd->ppa_addr = dev_to_generic_addr(dev, rqd->ppa_addr);
	}
}
EXPORT_SYMBOL(nvm_addr_to_generic_mode);

void nvm_generic_to_addr_mode(struct nvm_dev *dev, struct nvm_rq *rqd)
{
	int i;

	if (rqd->nr_pages > 1) {
		for (i = 0; i < rqd->nr_pages; i++)
			rqd->ppa_list[i] = generic_to_dev_addr(dev,
							rqd->ppa_list[i]);
	} else {
		rqd->ppa_addr = generic_to_dev_addr(dev, rqd->ppa_addr);
	}
}
EXPORT_SYMBOL(nvm_generic_to_addr_mode);

int nvm_set_rqd_ppalist(struct nvm_dev *dev, struct nvm_rq *rqd,
					struct ppa_addr *ppas, int nr_ppas)
{
	int i, plane_cnt, pl_idx;

	if (dev->plane_mode == NVM_PLANE_SINGLE && nr_ppas == 1) {
		rqd->nr_pages = 1;
		rqd->ppa_addr = ppas[0];

		return 0;
	}

	plane_cnt = (1 << dev->plane_mode);
	rqd->nr_pages = plane_cnt * nr_ppas;

	if (dev->ops->max_phys_sect < rqd->nr_pages)
		return -EINVAL;

	rqd->ppa_list = nvm_dev_dma_alloc(dev, GFP_KERNEL, &rqd->dma_ppa_list);
	if (!rqd->ppa_list) {
		pr_err("nvm: failed to allocate dma memory\n");
		return -ENOMEM;
	}

	for (pl_idx = 0; pl_idx < plane_cnt; pl_idx++) {
		for (i = 0; i < nr_ppas; i++) {
			ppas[i].g.pl = pl_idx;
			rqd->ppa_list[(pl_idx * nr_ppas) + i] = ppas[i];
		}
	}

	return 0;
}
EXPORT_SYMBOL(nvm_set_rqd_ppalist);

void nvm_free_rqd_ppalist(struct nvm_dev *dev, struct nvm_rq *rqd)
{
	if (!rqd->ppa_list)
		return;

	nvm_dev_dma_free(dev, rqd->ppa_list, rqd->dma_ppa_list);
}
EXPORT_SYMBOL(nvm_free_rqd_ppalist);

int nvm_erase_ppa(struct nvm_dev *dev, struct ppa_addr *ppas, int nr_ppas)
{
	struct nvm_rq rqd;
	int ret;

	if (!dev->ops->erase_block)
		return 0;

	memset(&rqd, 0, sizeof(struct nvm_rq));

	ret = nvm_set_rqd_ppalist(dev, &rqd, ppas, nr_ppas);
	if (ret)
		return ret;

	nvm_generic_to_addr_mode(dev, &rqd);

	ret = dev->ops->erase_block(dev, &rqd);

	nvm_free_rqd_ppalist(dev, &rqd);

	return ret;
}
EXPORT_SYMBOL(nvm_erase_ppa);

void nvm_end_io(struct nvm_rq *rqd, int error)
{
	rqd->error = error;
	rqd->end_io(rqd);
}
EXPORT_SYMBOL(nvm_end_io);

static void nvm_end_io_sync(struct nvm_rq *rqd)
{
	struct completion *waiting = rqd->wait;

	rqd->wait = NULL;

	complete(waiting);
}

int nvm_submit_ppa(struct nvm_dev *dev, struct ppa_addr *ppa, int nr_ppas,
				int opcode, int flags, void *buf, int len)
{
	DECLARE_COMPLETION_ONSTACK(wait);
	struct nvm_rq rqd;
	struct bio *bio;
	int ret;
	unsigned long hang_check;

	bio = bio_map_kern(dev->q, buf, len, GFP_KERNEL);
	if (IS_ERR_OR_NULL(bio))
		return -ENOMEM;

	memset(&rqd, 0, sizeof(struct nvm_rq));
	ret = nvm_set_rqd_ppalist(dev, &rqd, ppa, nr_ppas);
	if (ret) {
		bio_put(bio);
		return ret;
	}

	rqd.opcode = opcode;
	rqd.bio = bio;
	rqd.wait = &wait;
	rqd.dev = dev;
	rqd.end_io = nvm_end_io_sync;
	rqd.flags = flags;
	nvm_generic_to_addr_mode(dev, &rqd);

	ret = dev->ops->submit_io(dev, &rqd);

	/* Prevent hang_check timer from firing at us during very long I/O */
	hang_check = sysctl_hung_task_timeout_secs;
	if (hang_check)
		while (!wait_for_completion_io_timeout(&wait, hang_check * (HZ/2)));
	else
		wait_for_completion_io(&wait);

	nvm_free_rqd_ppalist(dev, &rqd);

	return rqd.error;
}
EXPORT_SYMBOL(nvm_submit_ppa);

static int nvm_init_slc_tbl(struct nvm_dev *dev, struct nvm_id_group *grp)
{
	int i;

	dev->lps_per_blk = dev->pgs_per_blk;
	dev->lptbl = kcalloc(dev->lps_per_blk, sizeof(int), GFP_KERNEL);
	if (!dev->lptbl)
		return -ENOMEM;

	/* Just a linear array */
	for (i = 0; i < dev->lps_per_blk; i++)
		dev->lptbl[i] = i;

	return 0;
}

static int nvm_init_mlc_tbl(struct nvm_dev *dev, struct nvm_id_group *grp)
{
	int i, p;
	struct nvm_id_lp_mlc *mlc = &grp->lptbl.mlc;

	if (!mlc->num_pairs)
		return 0;

	dev->lps_per_blk = mlc->num_pairs;
	dev->lptbl = kcalloc(dev->lps_per_blk, sizeof(int), GFP_KERNEL);
	if (!dev->lptbl)
		return -ENOMEM;

	/* The lower page table encoding consists of a list of bytes, where each
	 * has a lower and an upper half. The first half byte maintains the
	 * increment value and every value after is an offset added to the
	 * previous incrementation value */
	dev->lptbl[0] = mlc->pairs[0] & 0xF;
	for (i = 1; i < dev->lps_per_blk; i++) {
		p = mlc->pairs[i >> 1];
		if (i & 0x1) /* upper */
			dev->lptbl[i] = dev->lptbl[i - 1] + ((p & 0xF0) >> 4);
		else /* lower */
			dev->lptbl[i] = dev->lptbl[i - 1] + (p & 0xF);
	}

	return 0;
}

static int nvm_core_init(struct nvm_dev *dev)
{
	struct nvm_id *id = &dev->identity;
@@ -206,6 +424,7 @@ static int nvm_core_init(struct nvm_dev *dev)
	dev->sec_size = grp->csecs;
	dev->oob_size = grp->sos;
	dev->sec_per_pg = grp->fpg_sz / grp->csecs;
	dev->mccap = grp->mccap;
	memcpy(&dev->ppaf, &id->ppaf, sizeof(struct nvm_addr_format));

	dev->plane_mode = NVM_PLANE_SINGLE;
@@ -216,11 +435,23 @@ static int nvm_core_init(struct nvm_dev *dev)
		return -EINVAL;
	}

	if (grp->fmtype != 0 && grp->fmtype != 1) {
	switch (grp->fmtype) {
	case NVM_ID_FMTYPE_SLC:
		if (nvm_init_slc_tbl(dev, grp))
			return -ENOMEM;
		break;
	case NVM_ID_FMTYPE_MLC:
		if (nvm_init_mlc_tbl(dev, grp))
			return -ENOMEM;
		break;
	default:
		pr_err("nvm: flash type not supported\n");
		return -EINVAL;
	}

	if (!dev->lps_per_blk)
		pr_info("nvm: lower page programming table missing\n");

	if (grp->mpos & 0x020202)
		dev->plane_mode = NVM_PLANE_DOUBLE;
	if (grp->mpos & 0x040404)
@@ -238,6 +469,7 @@ static int nvm_core_init(struct nvm_dev *dev)
				dev->nr_chnls;
	dev->total_pages = dev->total_blocks * dev->pgs_per_blk;
	INIT_LIST_HEAD(&dev->online_targets);
	mutex_init(&dev->mlock);

	return 0;
}
@@ -249,6 +481,8 @@ static void nvm_free(struct nvm_dev *dev)

	if (dev->mt)
		dev->mt->unregister_mgr(dev);

	kfree(dev->lptbl);
}

static int nvm_init(struct nvm_dev *dev)
@@ -338,8 +572,15 @@ int nvm_register(struct request_queue *q, char *disk_name,
		}
	}

	ret = nvm_get_sysblock(dev, &dev->sb);
	if (!ret)
		pr_err("nvm: device not initialized.\n");
	else if (ret < 0)
		pr_err("nvm: err (%d) on device initialization\n", ret);

	/* register device with a supported media manager */
	down_write(&nvm_lock);
	if (ret > 0)
		dev->mt = nvm_init_mgr(dev);
	list_add(&dev->devices, &nvm_devices);
	up_write(&nvm_lock);
@@ -788,6 +1029,97 @@ static long nvm_ioctl_dev_remove(struct file *file, void __user *arg)
	return __nvm_configure_remove(&remove);
}

static void nvm_setup_nvm_sb_info(struct nvm_sb_info *info)
{
	info->seqnr = 1;
	info->erase_cnt = 0;
	info->version = 1;
}

static long __nvm_ioctl_dev_init(struct nvm_ioctl_dev_init *init)
{
	struct nvm_dev *dev;
	struct nvm_sb_info info;
	int ret;

	down_write(&nvm_lock);
	dev = nvm_find_nvm_dev(init->dev);
	up_write(&nvm_lock);
	if (!dev) {
		pr_err("nvm: device not found\n");
		return -EINVAL;
	}

	nvm_setup_nvm_sb_info(&info);

	strncpy(info.mmtype, init->mmtype, NVM_MMTYPE_LEN);
	info.fs_ppa.ppa = -1;

	ret = nvm_init_sysblock(dev, &info);
	if (ret)
		return ret;

	memcpy(&dev->sb, &info, sizeof(struct nvm_sb_info));

	down_write(&nvm_lock);
	dev->mt = nvm_init_mgr(dev);
	up_write(&nvm_lock);

	return 0;
}

static long nvm_ioctl_dev_init(struct file *file, void __user *arg)
{
	struct nvm_ioctl_dev_init init;

	if (!capable(CAP_SYS_ADMIN))
		return -EPERM;

	if (copy_from_user(&init, arg, sizeof(struct nvm_ioctl_dev_init)))
		return -EFAULT;

	if (init.flags != 0) {
		pr_err("nvm: no flags supported\n");
		return -EINVAL;
	}

	init.dev[DISK_NAME_LEN - 1] = '\0';

	return __nvm_ioctl_dev_init(&init);
}

static long nvm_ioctl_dev_factory(struct file *file, void __user *arg)
{
	struct nvm_ioctl_dev_factory fact;
	struct nvm_dev *dev;

	if (!capable(CAP_SYS_ADMIN))
		return -EPERM;

	if (copy_from_user(&fact, arg, sizeof(struct nvm_ioctl_dev_factory)))
		return -EFAULT;

	fact.dev[DISK_NAME_LEN - 1] = '\0';

	if (fact.flags & ~(NVM_FACTORY_NR_BITS - 1))
		return -EINVAL;

	down_write(&nvm_lock);
	dev = nvm_find_nvm_dev(fact.dev);
	up_write(&nvm_lock);
	if (!dev) {
		pr_err("nvm: device not found\n");
		return -EINVAL;
	}

	if (dev->mt) {
		dev->mt->unregister_mgr(dev);
		dev->mt = NULL;
	}

	return nvm_dev_factory(dev, fact.flags);
}

static long nvm_ctl_ioctl(struct file *file, uint cmd, unsigned long arg)
{
	void __user *argp = (void __user *)arg;
@@ -801,6 +1133,10 @@ static long nvm_ctl_ioctl(struct file *file, uint cmd, unsigned long arg)
		return nvm_ioctl_dev_create(file, argp);
	case NVM_DEV_REMOVE:
		return nvm_ioctl_dev_remove(file, argp);
	case NVM_DEV_INIT:
		return nvm_ioctl_dev_init(file, argp);
	case NVM_DEV_FACTORY:
		return nvm_ioctl_dev_factory(file, argp);
	}
	return 0;
}
+79 −119
Original line number Diff line number Diff line
@@ -60,7 +60,8 @@ static int gennvm_luns_init(struct nvm_dev *dev, struct gen_nvm *gn)
		lun->vlun.lun_id = i % dev->luns_per_chnl;
		lun->vlun.chnl_id = i / dev->luns_per_chnl;
		lun->vlun.nr_free_blocks = dev->blks_per_lun;
		lun->vlun.nr_inuse_blocks = 0;
		lun->vlun.nr_open_blocks = 0;
		lun->vlun.nr_closed_blocks = 0;
		lun->vlun.nr_bad_blocks = 0;
	}
	return 0;
@@ -89,6 +90,7 @@ static int gennvm_block_bb(struct ppa_addr ppa, int nr_blocks, u8 *blks,

		list_move_tail(&blk->list, &lun->bb_list);
		lun->vlun.nr_bad_blocks++;
		lun->vlun.nr_free_blocks--;
	}

	return 0;
@@ -133,15 +135,15 @@ static int gennvm_block_map(u64 slba, u32 nlb, __le64 *entries, void *private)
		pba = pba - (dev->sec_per_lun * lun_id);
		blk = &lun->vlun.blocks[div_u64(pba, dev->sec_per_blk)];

		if (!blk->type) {
		if (!blk->state) {
			/* at this point, we don't know anything about the
			 * block. It's up to the FTL on top to re-etablish the
			 * block state
			 * block state. The block is assumed to be open.
			 */
			list_move_tail(&blk->list, &lun->used_list);
			blk->type = 1;
			blk->state = NVM_BLK_ST_OPEN;
			lun->vlun.nr_free_blocks--;
			lun->vlun.nr_inuse_blocks++;
			lun->vlun.nr_open_blocks++;
		}
	}

@@ -255,14 +257,14 @@ static void gennvm_unregister(struct nvm_dev *dev)
	module_put(THIS_MODULE);
}

static struct nvm_block *gennvm_get_blk(struct nvm_dev *dev,
static struct nvm_block *gennvm_get_blk_unlocked(struct nvm_dev *dev,
				struct nvm_lun *vlun, unsigned long flags)
{
	struct gen_lun *lun = container_of(vlun, struct gen_lun, vlun);
	struct nvm_block *blk = NULL;
	int is_gc = flags & NVM_IOTYPE_GC;

	spin_lock(&vlun->lock);
	assert_spin_locked(&vlun->lock);

	if (list_empty(&lun->free_list)) {
		pr_err_ratelimited("gennvm: lun %u have no free pages available",
@@ -275,83 +277,64 @@ static struct nvm_block *gennvm_get_blk(struct nvm_dev *dev,

	blk = list_first_entry(&lun->free_list, struct nvm_block, list);
	list_move_tail(&blk->list, &lun->used_list);
	blk->type = 1;
	blk->state = NVM_BLK_ST_OPEN;

	lun->vlun.nr_free_blocks--;
	lun->vlun.nr_inuse_blocks++;
	lun->vlun.nr_open_blocks++;

out:
	return blk;
}

static struct nvm_block *gennvm_get_blk(struct nvm_dev *dev,
				struct nvm_lun *vlun, unsigned long flags)
{
	struct nvm_block *blk;

	spin_lock(&vlun->lock);
	blk = gennvm_get_blk_unlocked(dev, vlun, flags);
	spin_unlock(&vlun->lock);
	return blk;
}

static void gennvm_put_blk(struct nvm_dev *dev, struct nvm_block *blk)
static void gennvm_put_blk_unlocked(struct nvm_dev *dev, struct nvm_block *blk)
{
	struct nvm_lun *vlun = blk->lun;
	struct gen_lun *lun = container_of(vlun, struct gen_lun, vlun);

	spin_lock(&vlun->lock);
	assert_spin_locked(&vlun->lock);

	switch (blk->type) {
	case 1:
	if (blk->state & NVM_BLK_ST_OPEN) {
		list_move_tail(&blk->list, &lun->free_list);
		lun->vlun.nr_open_blocks--;
		lun->vlun.nr_free_blocks++;
		lun->vlun.nr_inuse_blocks--;
		blk->type = 0;
		break;
	case 2:
		blk->state = NVM_BLK_ST_FREE;
	} else if (blk->state & NVM_BLK_ST_CLOSED) {
		list_move_tail(&blk->list, &lun->free_list);
		lun->vlun.nr_closed_blocks--;
		lun->vlun.nr_free_blocks++;
		blk->state = NVM_BLK_ST_FREE;
	} else if (blk->state & NVM_BLK_ST_BAD) {
		list_move_tail(&blk->list, &lun->bb_list);
		lun->vlun.nr_bad_blocks++;
		lun->vlun.nr_inuse_blocks--;
		break;
	default:
		blk->state = NVM_BLK_ST_BAD;
	} else {
		WARN_ON_ONCE(1);
		pr_err("gennvm: erroneous block type (%lu -> %u)\n",
							blk->id, blk->type);
							blk->id, blk->state);
		list_move_tail(&blk->list, &lun->bb_list);
		lun->vlun.nr_bad_blocks++;
		lun->vlun.nr_inuse_blocks--;
	}

	spin_unlock(&vlun->lock);
}

static void gennvm_addr_to_generic_mode(struct nvm_dev *dev, struct nvm_rq *rqd)
{
	int i;

	if (rqd->nr_pages > 1) {
		for (i = 0; i < rqd->nr_pages; i++)
			rqd->ppa_list[i] = dev_to_generic_addr(dev,
							rqd->ppa_list[i]);
	} else {
		rqd->ppa_addr = dev_to_generic_addr(dev, rqd->ppa_addr);
	}
}

static void gennvm_generic_to_addr_mode(struct nvm_dev *dev, struct nvm_rq *rqd)
{
	int i;

	if (rqd->nr_pages > 1) {
		for (i = 0; i < rqd->nr_pages; i++)
			rqd->ppa_list[i] = generic_to_dev_addr(dev,
							rqd->ppa_list[i]);
	} else {
		rqd->ppa_addr = generic_to_dev_addr(dev, rqd->ppa_addr);
		blk->state = NVM_BLK_ST_BAD;
	}
}

static int gennvm_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd)
static void gennvm_put_blk(struct nvm_dev *dev, struct nvm_block *blk)
{
	if (!dev->ops->submit_io)
		return 0;

	/* Convert address space */
	gennvm_generic_to_addr_mode(dev, rqd);
	struct nvm_lun *vlun = blk->lun;

	rqd->dev = dev;
	return dev->ops->submit_io(dev, rqd);
	spin_lock(&vlun->lock);
	gennvm_put_blk_unlocked(dev, blk);
	spin_unlock(&vlun->lock);
}

static void gennvm_blk_set_type(struct nvm_dev *dev, struct ppa_addr *ppa,
@@ -376,7 +359,7 @@ static void gennvm_blk_set_type(struct nvm_dev *dev, struct ppa_addr *ppa,
	blk = &lun->vlun.blocks[ppa->g.blk];

	/* will be moved to bb list on put_blk from target */
	blk->type = type;
	blk->state = type;
}

/* mark block bad. It is expected the target recover from the error. */
@@ -390,77 +373,51 @@ static void gennvm_mark_blk_bad(struct nvm_dev *dev, struct nvm_rq *rqd)
	if (dev->ops->set_bb_tbl(dev, rqd, 1))
		return;

	gennvm_addr_to_generic_mode(dev, rqd);
	nvm_addr_to_generic_mode(dev, rqd);

	/* look up blocks and mark them as bad */
	if (rqd->nr_pages > 1)
		for (i = 0; i < rqd->nr_pages; i++)
			gennvm_blk_set_type(dev, &rqd->ppa_list[i], 2);
			gennvm_blk_set_type(dev, &rqd->ppa_list[i],
						NVM_BLK_ST_BAD);
	else
		gennvm_blk_set_type(dev, &rqd->ppa_addr, 2);
		gennvm_blk_set_type(dev, &rqd->ppa_addr, NVM_BLK_ST_BAD);
}

static int gennvm_end_io(struct nvm_rq *rqd, int error)
static void gennvm_end_io(struct nvm_rq *rqd)
{
	struct nvm_tgt_instance *ins = rqd->ins;
	int ret = 0;

	switch (error) {
	switch (rqd->error) {
	case NVM_RSP_SUCCESS:
		break;
	case NVM_RSP_ERR_EMPTYPAGE:
		break;
	case NVM_RSP_ERR_FAILWRITE:
		gennvm_mark_blk_bad(rqd->dev, rqd);
	default:
		ret++;
	}

	ret += ins->tt->end_io(rqd, error);

	return ret;
	ins->tt->end_io(rqd);
}

static int gennvm_erase_blk(struct nvm_dev *dev, struct nvm_block *blk,
							unsigned long flags)
static int gennvm_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd)
{
	int plane_cnt = 0, pl_idx, ret;
	struct ppa_addr addr;
	struct nvm_rq rqd;

	if (!dev->ops->erase_block)
		return 0;

	addr = block_to_ppa(dev, blk);

	if (dev->plane_mode == NVM_PLANE_SINGLE) {
		rqd.nr_pages = 1;
		rqd.ppa_addr = addr;
	} else {
		plane_cnt = (1 << dev->plane_mode);
		rqd.nr_pages = plane_cnt;
	if (!dev->ops->submit_io)
		return -ENODEV;

		rqd.ppa_list = nvm_dev_dma_alloc(dev, GFP_KERNEL,
							&rqd.dma_ppa_list);
		if (!rqd.ppa_list) {
			pr_err("gennvm: failed to allocate dma memory\n");
			return -ENOMEM;
		}
	/* Convert address space */
	nvm_generic_to_addr_mode(dev, rqd);

		for (pl_idx = 0; pl_idx < plane_cnt; pl_idx++) {
			addr.g.pl = pl_idx;
			rqd.ppa_list[pl_idx] = addr;
		}
	rqd->dev = dev;
	rqd->end_io = gennvm_end_io;
	return dev->ops->submit_io(dev, rqd);
}

	gennvm_generic_to_addr_mode(dev, &rqd);

	ret = dev->ops->erase_block(dev, &rqd);

	if (plane_cnt)
		nvm_dev_dma_free(dev, rqd.ppa_list, rqd.dma_ppa_list);
static int gennvm_erase_blk(struct nvm_dev *dev, struct nvm_block *blk,
							unsigned long flags)
{
	struct ppa_addr addr = block_to_ppa(dev, blk);

	return ret;
	return nvm_erase_ppa(dev, &addr, 1);
}

static struct nvm_lun *gennvm_get_lun(struct nvm_dev *dev, int lunid)
@@ -480,10 +437,11 @@ static void gennvm_lun_info_print(struct nvm_dev *dev)
	gennvm_for_each_lun(gn, lun, i) {
		spin_lock(&lun->vlun.lock);

		pr_info("%s: lun%8u\t%u\t%u\t%u\n",
		pr_info("%s: lun%8u\t%u\t%u\t%u\t%u\n",
				dev->name, i,
				lun->vlun.nr_free_blocks,
				lun->vlun.nr_inuse_blocks,
				lun->vlun.nr_open_blocks,
				lun->vlun.nr_closed_blocks,
				lun->vlun.nr_bad_blocks);

		spin_unlock(&lun->vlun.lock);
@@ -497,11 +455,13 @@ static struct nvmm_type gennvm = {
	.register_mgr		= gennvm_register,
	.unregister_mgr		= gennvm_unregister,

	.get_blk_unlocked	= gennvm_get_blk_unlocked,
	.put_blk_unlocked	= gennvm_put_blk_unlocked,

	.get_blk		= gennvm_get_blk,
	.put_blk		= gennvm_put_blk,

	.submit_io		= gennvm_submit_io,
	.end_io		= gennvm_end_io,
	.erase_blk		= gennvm_erase_blk,

	.get_lun		= gennvm_get_lun,
+78 −26
Original line number Diff line number Diff line
@@ -179,16 +179,23 @@ static void rrpc_set_lun_cur(struct rrpc_lun *rlun, struct rrpc_block *rblk)
static struct rrpc_block *rrpc_get_blk(struct rrpc *rrpc, struct rrpc_lun *rlun,
							unsigned long flags)
{
	struct nvm_lun *lun = rlun->parent;
	struct nvm_block *blk;
	struct rrpc_block *rblk;

	blk = nvm_get_blk(rrpc->dev, rlun->parent, flags);
	if (!blk)
	spin_lock(&lun->lock);
	blk = nvm_get_blk_unlocked(rrpc->dev, rlun->parent, flags);
	if (!blk) {
		pr_err("nvm: rrpc: cannot get new block from media manager\n");
		spin_unlock(&lun->lock);
		return NULL;
	}

	rblk = &rlun->blocks[blk->id];
	blk->priv = rblk;
	list_add_tail(&rblk->list, &rlun->open_list);
	spin_unlock(&lun->lock);

	blk->priv = rblk;
	bitmap_zero(rblk->invalid_pages, rrpc->dev->pgs_per_blk);
	rblk->next_page = 0;
	rblk->nr_invalid_pages = 0;
@@ -199,7 +206,13 @@ static struct rrpc_block *rrpc_get_blk(struct rrpc *rrpc, struct rrpc_lun *rlun,

static void rrpc_put_blk(struct rrpc *rrpc, struct rrpc_block *rblk)
{
	nvm_put_blk(rrpc->dev, rblk->parent);
	struct rrpc_lun *rlun = rblk->rlun;
	struct nvm_lun *lun = rlun->parent;

	spin_lock(&lun->lock);
	nvm_put_blk_unlocked(rrpc->dev, rblk->parent);
	list_del(&rblk->list);
	spin_unlock(&lun->lock);
}

static void rrpc_put_blks(struct rrpc *rrpc)
@@ -287,6 +300,8 @@ static int rrpc_move_valid_pages(struct rrpc *rrpc, struct rrpc_block *rblk)
	}

	page = mempool_alloc(rrpc->page_pool, GFP_NOIO);
	if (!page)
		return -ENOMEM;

	while ((slot = find_first_zero_bit(rblk->invalid_pages,
					    nr_pgs_per_blk)) < nr_pgs_per_blk) {
@@ -328,6 +343,10 @@ static int rrpc_move_valid_pages(struct rrpc *rrpc, struct rrpc_block *rblk)
			goto finished;
		}
		wait_for_completion_io(&wait);
		if (bio->bi_error) {
			rrpc_inflight_laddr_release(rrpc, rqd);
			goto finished;
		}

		bio_reset(bio);
		reinit_completion(&wait);
@@ -350,6 +369,8 @@ static int rrpc_move_valid_pages(struct rrpc *rrpc, struct rrpc_block *rblk)
		wait_for_completion_io(&wait);

		rrpc_inflight_laddr_release(rrpc, rqd);
		if (bio->bi_error)
			goto finished;

		bio_reset(bio);
	}
@@ -373,16 +394,26 @@ static void rrpc_block_gc(struct work_struct *work)
	struct rrpc *rrpc = gcb->rrpc;
	struct rrpc_block *rblk = gcb->rblk;
	struct nvm_dev *dev = rrpc->dev;
	struct nvm_lun *lun = rblk->parent->lun;
	struct rrpc_lun *rlun = &rrpc->luns[lun->id - rrpc->lun_offset];

	mempool_free(gcb, rrpc->gcb_pool);
	pr_debug("nvm: block '%lu' being reclaimed\n", rblk->parent->id);

	if (rrpc_move_valid_pages(rrpc, rblk))
		goto done;
		goto put_back;

	if (nvm_erase_blk(dev, rblk->parent))
		goto put_back;

	nvm_erase_blk(dev, rblk->parent);
	rrpc_put_blk(rrpc, rblk);
done:
	mempool_free(gcb, rrpc->gcb_pool);

	return;

put_back:
	spin_lock(&rlun->lock);
	list_add_tail(&rblk->prio, &rlun->prio_list);
	spin_unlock(&rlun->lock);
}

/* the block with highest number of invalid pages, will be in the beginning
@@ -427,7 +458,7 @@ static void rrpc_lun_gc(struct work_struct *work)
	if (nr_blocks_need < rrpc->nr_luns)
		nr_blocks_need = rrpc->nr_luns;

	spin_lock(&lun->lock);
	spin_lock(&rlun->lock);
	while (nr_blocks_need > lun->nr_free_blocks &&
					!list_empty(&rlun->prio_list)) {
		struct rrpc_block *rblock = block_prio_find_max(rlun);
@@ -436,16 +467,16 @@ static void rrpc_lun_gc(struct work_struct *work)
		if (!rblock->nr_invalid_pages)
			break;

		gcb = mempool_alloc(rrpc->gcb_pool, GFP_ATOMIC);
		if (!gcb)
			break;

		list_del_init(&rblock->prio);

		BUG_ON(!block_is_full(rrpc, rblock));

		pr_debug("rrpc: selected block '%lu' for GC\n", block->id);

		gcb = mempool_alloc(rrpc->gcb_pool, GFP_ATOMIC);
		if (!gcb)
			break;

		gcb->rrpc = rrpc;
		gcb->rblk = rblock;
		INIT_WORK(&gcb->ws_gc, rrpc_block_gc);
@@ -454,7 +485,7 @@ static void rrpc_lun_gc(struct work_struct *work)

		nr_blocks_need--;
	}
	spin_unlock(&lun->lock);
	spin_unlock(&rlun->lock);

	/* TODO: Hint that request queue can be started again */
}
@@ -635,12 +666,24 @@ static void rrpc_end_io_write(struct rrpc *rrpc, struct rrpc_rq *rrqd,
		lun = rblk->parent->lun;

		cmnt_size = atomic_inc_return(&rblk->data_cmnt_size);
		if (unlikely(cmnt_size == rrpc->dev->pgs_per_blk))
		if (unlikely(cmnt_size == rrpc->dev->pgs_per_blk)) {
			struct nvm_block *blk = rblk->parent;
			struct rrpc_lun *rlun = rblk->rlun;

			spin_lock(&lun->lock);
			lun->nr_open_blocks--;
			lun->nr_closed_blocks++;
			blk->state &= ~NVM_BLK_ST_OPEN;
			blk->state |= NVM_BLK_ST_CLOSED;
			list_move_tail(&rblk->list, &rlun->closed_list);
			spin_unlock(&lun->lock);

			rrpc_run_gc(rrpc, rblk);
		}
	}
}

static int rrpc_end_io(struct nvm_rq *rqd, int error)
static void rrpc_end_io(struct nvm_rq *rqd)
{
	struct rrpc *rrpc = container_of(rqd->ins, struct rrpc, instance);
	struct rrpc_rq *rrqd = nvm_rq_to_pdu(rqd);
@@ -650,11 +693,12 @@ static int rrpc_end_io(struct nvm_rq *rqd, int error)
	if (bio_data_dir(rqd->bio) == WRITE)
		rrpc_end_io_write(rrpc, rrqd, laddr, npages);

	bio_put(rqd->bio);

	if (rrqd->flags & NVM_IOTYPE_GC)
		return 0;
		return;

	rrpc_unlock_rq(rrpc, rqd);
	bio_put(rqd->bio);

	if (npages > 1)
		nvm_dev_dma_free(rrpc->dev, rqd->ppa_list, rqd->dma_ppa_list);
@@ -662,8 +706,6 @@ static int rrpc_end_io(struct nvm_rq *rqd, int error)
		nvm_dev_dma_free(rrpc->dev, rqd->metadata, rqd->dma_metadata);

	mempool_free(rqd, rrpc->rq_pool);

	return 0;
}

static int rrpc_read_ppalist_rq(struct rrpc *rrpc, struct bio *bio,
@@ -841,6 +883,13 @@ static int rrpc_submit_io(struct rrpc *rrpc, struct bio *bio,
	err = nvm_submit_io(rrpc->dev, rqd);
	if (err) {
		pr_err("rrpc: I/O submission failed: %d\n", err);
		bio_put(bio);
		if (!(flags & NVM_IOTYPE_GC)) {
			rrpc_unlock_rq(rrpc, rqd);
			if (rqd->nr_pages > 1)
				nvm_dev_dma_free(rrpc->dev,
			rqd->ppa_list, rqd->dma_ppa_list);
		}
		return NVM_IO_ERR;
	}

@@ -1090,6 +1139,11 @@ static int rrpc_luns_init(struct rrpc *rrpc, int lun_begin, int lun_end)
	struct rrpc_lun *rlun;
	int i, j;

	if (dev->pgs_per_blk > MAX_INVALID_PAGES_STORAGE * BITS_PER_LONG) {
		pr_err("rrpc: number of pages per block too high.");
		return -EINVAL;
	}

	spin_lock_init(&rrpc->rev_lock);

	rrpc->luns = kcalloc(rrpc->nr_luns, sizeof(struct rrpc_lun),
@@ -1101,16 +1155,13 @@ static int rrpc_luns_init(struct rrpc *rrpc, int lun_begin, int lun_end)
	for (i = 0; i < rrpc->nr_luns; i++) {
		struct nvm_lun *lun = dev->mt->get_lun(dev, lun_begin + i);

		if (dev->pgs_per_blk >
				MAX_INVALID_PAGES_STORAGE * BITS_PER_LONG) {
			pr_err("rrpc: number of pages per block too high.");
			goto err;
		}

		rlun = &rrpc->luns[i];
		rlun->rrpc = rrpc;
		rlun->parent = lun;
		INIT_LIST_HEAD(&rlun->prio_list);
		INIT_LIST_HEAD(&rlun->open_list);
		INIT_LIST_HEAD(&rlun->closed_list);

		INIT_WORK(&rlun->ws_gc, rrpc_lun_gc);
		spin_lock_init(&rlun->lock);

@@ -1127,6 +1178,7 @@ static int rrpc_luns_init(struct rrpc *rrpc, int lun_begin, int lun_end)
			struct nvm_block *blk = &lun->blocks[j];

			rblk->parent = blk;
			rblk->rlun = rlun;
			INIT_LIST_HEAD(&rblk->prio);
			spin_lock_init(&rblk->lock);
		}
Loading