Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit e46f4e48 authored by Javier González's avatar Javier González Committed by Jens Axboe
Browse files

lightnvm: simplify geometry structure



Currently, the device geometry is stored redundantly in the nvm_id and
nvm_geo structures at a device level. Moreover, when instantiating
targets on a specific number of LUNs, these structures are replicated
and manually modified to fit the instance channel and LUN partitioning.

Instead, create a generic geometry around nvm_geo, which can be used by
(i) the underlying device to describe the geometry of the whole device,
and (ii) instances to describe their geometry independently.

Signed-off-by: default avatarJavier González <javier@cnexlabs.com>
Signed-off-by: default avatarMatias Bjørling <mb@lightnvm.io>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 43d47127
Loading
Loading
Loading
Loading
+20 −50
Original line number Diff line number Diff line
@@ -155,7 +155,7 @@ static struct nvm_tgt_dev *nvm_create_tgt_dev(struct nvm_dev *dev,
	int blun = lun_begin % dev->geo.nr_luns;
	int lunid = 0;
	int lun_balanced = 1;
	int prev_nr_luns;
	int sec_per_lun, prev_nr_luns;
	int i, j;

	nr_chnls = (nr_chnls_mod == 0) ? nr_chnls : nr_chnls + 1;
@@ -215,18 +215,23 @@ static struct nvm_tgt_dev *nvm_create_tgt_dev(struct nvm_dev *dev,
	if (!tgt_dev)
		goto err_ch;

	/* Inherit device geometry from parent */
	memcpy(&tgt_dev->geo, &dev->geo, sizeof(struct nvm_geo));

	/* Target device only owns a portion of the physical device */
	tgt_dev->geo.nr_chnls = nr_chnls;
	tgt_dev->geo.all_luns = nr_luns;
	tgt_dev->geo.nr_luns = (lun_balanced) ? prev_nr_luns : -1;
	tgt_dev->geo.all_luns = nr_luns;
	tgt_dev->geo.all_chunks = nr_luns * dev->geo.nr_chks;

	tgt_dev->geo.op = op;
	tgt_dev->total_secs = nr_luns * tgt_dev->geo.sec_per_lun;

	sec_per_lun = dev->geo.clba * dev->geo.nr_chks;
	tgt_dev->geo.total_secs = nr_luns * sec_per_lun;

	tgt_dev->q = dev->q;
	tgt_dev->map = dev_map;
	tgt_dev->luns = luns;
	memcpy(&tgt_dev->identity, &dev->identity, sizeof(struct nvm_id));

	tgt_dev->parent = dev;

	return tgt_dev;
@@ -296,8 +301,6 @@ static int __nvm_config_simple(struct nvm_dev *dev,
static int __nvm_config_extended(struct nvm_dev *dev,
				 struct nvm_ioctl_create_extended *e)
{
	struct nvm_geo *geo = &dev->geo;

	if (e->lun_begin == 0xFFFF && e->lun_end == 0xFFFF) {
		e->lun_begin = 0;
		e->lun_end = dev->geo.all_luns - 1;
@@ -311,7 +314,7 @@ static int __nvm_config_extended(struct nvm_dev *dev,
		return -EINVAL;
	}

	return nvm_config_check_luns(geo, e->lun_begin, e->lun_end);
	return nvm_config_check_luns(&dev->geo, e->lun_begin, e->lun_end);
}

static int nvm_create_tgt(struct nvm_dev *dev, struct nvm_ioctl_create *create)
@@ -406,7 +409,7 @@ static int nvm_create_tgt(struct nvm_dev *dev, struct nvm_ioctl_create *create)
	tqueue->queuedata = targetdata;

	blk_queue_max_hw_sectors(tqueue,
			(dev->geo.sec_size >> 9) * NVM_MAX_VLBA);
			(dev->geo.csecs >> 9) * NVM_MAX_VLBA);

	set_capacity(tdisk, tt->capacity(targetdata));
	add_disk(tdisk);
@@ -841,40 +844,9 @@ EXPORT_SYMBOL(nvm_get_tgt_bb_tbl);

static int nvm_core_init(struct nvm_dev *dev)
{
	struct nvm_id *id = &dev->identity;
	struct nvm_geo *geo = &dev->geo;
	int ret;

	memcpy(&geo->ppaf, &id->ppaf, sizeof(struct nvm_addr_format));

	if (id->mtype != 0) {
		pr_err("nvm: memory type not supported\n");
		return -EINVAL;
	}

	/* Whole device values */
	geo->nr_chnls = id->num_ch;
	geo->nr_luns = id->num_lun;

	/* Generic device geometry values */
	geo->ws_min = id->ws_min;
	geo->ws_opt = id->ws_opt;
	geo->ws_seq = id->ws_seq;
	geo->ws_per_chk = id->ws_per_chk;
	geo->nr_chks = id->num_chk;
	geo->mccap = id->mccap;

	geo->sec_per_chk = id->clba;
	geo->sec_per_lun = geo->sec_per_chk * geo->nr_chks;
	geo->all_luns = geo->nr_luns * geo->nr_chnls;

	/* 1.2 spec device geometry values */
	geo->plane_mode = 1 << geo->ws_seq;
	geo->nr_planes = geo->ws_opt / geo->ws_min;
	geo->sec_per_pg = geo->ws_min;
	geo->sec_per_pl = geo->sec_per_pg * geo->nr_planes;

	dev->total_secs = geo->all_luns * geo->sec_per_lun;
	dev->lun_map = kcalloc(BITS_TO_LONGS(geo->all_luns),
					sizeof(unsigned long), GFP_KERNEL);
	if (!dev->lun_map)
@@ -913,16 +885,14 @@ static int nvm_init(struct nvm_dev *dev)
	struct nvm_geo *geo = &dev->geo;
	int ret = -EINVAL;

	if (dev->ops->identity(dev, &dev->identity)) {
	if (dev->ops->identity(dev)) {
		pr_err("nvm: device could not be identified\n");
		goto err;
	}

	if (dev->identity.ver_id != 1 && dev->identity.ver_id != 2) {
		pr_err("nvm: device ver_id %d not supported by kernel.\n",
				dev->identity.ver_id);
		goto err;
	}
	pr_debug("nvm: ver:%u nvm_vendor:%x\n",
				geo->ver_id,
				geo->vmnt);

	ret = nvm_core_init(dev);
	if (ret) {
@@ -930,10 +900,10 @@ static int nvm_init(struct nvm_dev *dev)
		goto err;
	}

	pr_info("nvm: registered %s [%u/%u/%u/%u/%u/%u]\n",
			dev->name, geo->sec_per_pg, geo->nr_planes,
			geo->ws_per_chk, geo->nr_chks,
			geo->all_luns, geo->nr_chnls);
	pr_info("nvm: registered %s [%u/%u/%u/%u/%u]\n",
			dev->name, geo->ws_min, geo->ws_opt,
			geo->nr_chks, geo->all_luns,
			geo->nr_chnls);
	return 0;
err:
	pr_err("nvm: failed to initialize nvm\n");
+8 −8
Original line number Diff line number Diff line
@@ -613,7 +613,7 @@ static int pblk_line_submit_emeta_io(struct pblk *pblk, struct pblk_line *line,
	memset(&rqd, 0, sizeof(struct nvm_rq));

	rq_ppas = pblk_calc_secs(pblk, left_ppas, 0);
	rq_len = rq_ppas * geo->sec_size;
	rq_len = rq_ppas * geo->csecs;

	bio = pblk_bio_map_addr(pblk, emeta_buf, rq_ppas, rq_len,
					l_mg->emeta_alloc_type, GFP_KERNEL);
@@ -722,7 +722,7 @@ u64 pblk_line_smeta_start(struct pblk *pblk, struct pblk_line *line)
	if (bit >= lm->blk_per_line)
		return -1;

	return bit * geo->sec_per_pl;
	return bit * geo->ws_opt;
}

static int pblk_line_submit_smeta_io(struct pblk *pblk, struct pblk_line *line,
@@ -1034,17 +1034,17 @@ static int pblk_line_init_bb(struct pblk *pblk, struct pblk_line *line,
	/* Capture bad block information on line mapping bitmaps */
	while ((bit = find_next_bit(line->blk_bitmap, lm->blk_per_line,
					bit + 1)) < lm->blk_per_line) {
		off = bit * geo->sec_per_pl;
		off = bit * geo->ws_opt;
		bitmap_shift_left(l_mg->bb_aux, l_mg->bb_template, off,
							lm->sec_per_line);
		bitmap_or(line->map_bitmap, line->map_bitmap, l_mg->bb_aux,
							lm->sec_per_line);
		line->sec_in_line -= geo->sec_per_chk;
		line->sec_in_line -= geo->clba;
	}

	/* Mark smeta metadata sectors as bad sectors */
	bit = find_first_zero_bit(line->blk_bitmap, lm->blk_per_line);
	off = bit * geo->sec_per_pl;
	off = bit * geo->ws_opt;
	bitmap_set(line->map_bitmap, off, lm->smeta_sec);
	line->sec_in_line -= lm->smeta_sec;
	line->smeta_ssec = off;
@@ -1063,10 +1063,10 @@ static int pblk_line_init_bb(struct pblk *pblk, struct pblk_line *line,
	emeta_secs = lm->emeta_sec[0];
	off = lm->sec_per_line;
	while (emeta_secs) {
		off -= geo->sec_per_pl;
		off -= geo->ws_opt;
		if (!test_bit(off, line->invalid_bitmap)) {
			bitmap_set(line->invalid_bitmap, off, geo->sec_per_pl);
			emeta_secs -= geo->sec_per_pl;
			bitmap_set(line->invalid_bitmap, off, geo->ws_opt);
			emeta_secs -= geo->ws_opt;
		}
	}

+1 −1
Original line number Diff line number Diff line
@@ -88,7 +88,7 @@ static void pblk_gc_line_ws(struct work_struct *work)

	up(&gc->gc_sem);

	gc_rq->data = vmalloc(gc_rq->nr_secs * geo->sec_size);
	gc_rq->data = vmalloc(gc_rq->nr_secs * geo->csecs);
	if (!gc_rq->data) {
		pr_err("pblk: could not GC line:%d (%d/%d)\n",
					line->id, *line->vsc, gc_rq->nr_secs);
+62 −55
Original line number Diff line number Diff line
@@ -179,7 +179,7 @@ static int pblk_rwb_init(struct pblk *pblk)
		return -ENOMEM;

	power_size = get_count_order(nr_entries);
	power_seg_sz = get_count_order(geo->sec_size);
	power_seg_sz = get_count_order(geo->csecs);

	return pblk_rb_init(&pblk->rwb, entries, power_size, power_seg_sz);
}
@@ -187,18 +187,10 @@ static int pblk_rwb_init(struct pblk *pblk)
/* Minimum pages needed within a lun */
#define ADDR_POOL_SIZE 64

static int pblk_set_ppaf(struct pblk *pblk)
static int pblk_set_addrf_12(struct nvm_geo *geo, struct nvm_addrf_12 *dst)
{
	struct nvm_tgt_dev *dev = pblk->dev;
	struct nvm_geo *geo = &dev->geo;
	struct nvm_addr_format ppaf = geo->ppaf;
	int mod, power_len;

	div_u64_rem(geo->sec_per_chk, pblk->min_write_pgs, &mod);
	if (mod) {
		pr_err("pblk: bad configuration of sectors/pages\n");
		return -EINVAL;
	}
	struct nvm_addrf_12 *src = (struct nvm_addrf_12 *)&geo->addrf;
	int power_len;

	/* Re-calculate channel and lun format to adapt to configuration */
	power_len = get_count_order(geo->nr_chnls);
@@ -206,34 +198,50 @@ static int pblk_set_ppaf(struct pblk *pblk)
		pr_err("pblk: supports only power-of-two channel config.\n");
		return -EINVAL;
	}
	ppaf.ch_len = power_len;
	dst->ch_len = power_len;

	power_len = get_count_order(geo->nr_luns);
	if (1 << power_len != geo->nr_luns) {
		pr_err("pblk: supports only power-of-two LUN config.\n");
		return -EINVAL;
	}
	ppaf.lun_len = power_len;

	pblk->ppaf.sec_offset = 0;
	pblk->ppaf.pln_offset = ppaf.sect_len;
	pblk->ppaf.ch_offset = pblk->ppaf.pln_offset + ppaf.pln_len;
	pblk->ppaf.lun_offset = pblk->ppaf.ch_offset + ppaf.ch_len;
	pblk->ppaf.pg_offset = pblk->ppaf.lun_offset + ppaf.lun_len;
	pblk->ppaf.blk_offset = pblk->ppaf.pg_offset + ppaf.pg_len;
	pblk->ppaf.sec_mask = (1ULL << ppaf.sect_len) - 1;
	pblk->ppaf.pln_mask = ((1ULL << ppaf.pln_len) - 1) <<
							pblk->ppaf.pln_offset;
	pblk->ppaf.ch_mask = ((1ULL << ppaf.ch_len) - 1) <<
							pblk->ppaf.ch_offset;
	pblk->ppaf.lun_mask = ((1ULL << ppaf.lun_len) - 1) <<
							pblk->ppaf.lun_offset;
	pblk->ppaf.pg_mask = ((1ULL << ppaf.pg_len) - 1) <<
							pblk->ppaf.pg_offset;
	pblk->ppaf.blk_mask = ((1ULL << ppaf.blk_len) - 1) <<
							pblk->ppaf.blk_offset;

	pblk->ppaf_bitsize = pblk->ppaf.blk_offset + ppaf.blk_len;
	dst->lun_len = power_len;

	dst->blk_len = src->blk_len;
	dst->pg_len = src->pg_len;
	dst->pln_len = src->pln_len;
	dst->sect_len = src->sect_len;

	dst->sect_offset = 0;
	dst->pln_offset = dst->sect_len;
	dst->ch_offset = dst->pln_offset + dst->pln_len;
	dst->lun_offset = dst->ch_offset + dst->ch_len;
	dst->pg_offset = dst->lun_offset + dst->lun_len;
	dst->blk_offset = dst->pg_offset + dst->pg_len;

	dst->sec_mask = ((1ULL << dst->sect_len) - 1) << dst->sect_offset;
	dst->pln_mask = ((1ULL << dst->pln_len) - 1) << dst->pln_offset;
	dst->ch_mask = ((1ULL << dst->ch_len) - 1) << dst->ch_offset;
	dst->lun_mask = ((1ULL << dst->lun_len) - 1) << dst->lun_offset;
	dst->pg_mask = ((1ULL << dst->pg_len) - 1) << dst->pg_offset;
	dst->blk_mask = ((1ULL << dst->blk_len) - 1) << dst->blk_offset;

	return dst->blk_offset + src->blk_len;
}

static int pblk_set_ppaf(struct pblk *pblk)
{
	struct nvm_tgt_dev *dev = pblk->dev;
	struct nvm_geo *geo = &dev->geo;
	int mod;

	div_u64_rem(geo->clba, pblk->min_write_pgs, &mod);
	if (mod) {
		pr_err("pblk: bad configuration of sectors/pages\n");
		return -EINVAL;
	}

	pblk->ppaf_bitsize = pblk_set_addrf_12(geo, (void *)&pblk->ppaf);

	return 0;
}
@@ -303,10 +311,9 @@ static int pblk_core_init(struct pblk *pblk)
	atomic64_set(&pblk->nr_flush, 0);
	pblk->nr_flush_rst = 0;

	pblk->pgs_in_buffer = NVM_MEM_PAGE_WRITE * geo->sec_per_pg *
						geo->nr_planes * geo->all_luns;
	pblk->pgs_in_buffer = geo->mw_cunits * geo->all_luns;

	pblk->min_write_pgs = geo->sec_per_pl * (geo->sec_size / PAGE_SIZE);
	pblk->min_write_pgs = geo->ws_opt * (geo->csecs / PAGE_SIZE);
	max_write_ppas = pblk->min_write_pgs * geo->all_luns;
	pblk->max_write_pgs = min_t(int, max_write_ppas, NVM_MAX_VLBA);
	pblk_set_sec_per_write(pblk, pblk->min_write_pgs);
@@ -583,18 +590,18 @@ static unsigned int calc_emeta_len(struct pblk *pblk)
	/* Round to sector size so that lba_list starts on its own sector */
	lm->emeta_sec[1] = DIV_ROUND_UP(
			sizeof(struct line_emeta) + lm->blk_bitmap_len +
			sizeof(struct wa_counters), geo->sec_size);
	lm->emeta_len[1] = lm->emeta_sec[1] * geo->sec_size;
			sizeof(struct wa_counters), geo->csecs);
	lm->emeta_len[1] = lm->emeta_sec[1] * geo->csecs;

	/* Round to sector size so that vsc_list starts on its own sector */
	lm->dsec_per_line = lm->sec_per_line - lm->emeta_sec[0];
	lm->emeta_sec[2] = DIV_ROUND_UP(lm->dsec_per_line * sizeof(u64),
			geo->sec_size);
	lm->emeta_len[2] = lm->emeta_sec[2] * geo->sec_size;
			geo->csecs);
	lm->emeta_len[2] = lm->emeta_sec[2] * geo->csecs;

	lm->emeta_sec[3] = DIV_ROUND_UP(l_mg->nr_lines * sizeof(u32),
			geo->sec_size);
	lm->emeta_len[3] = lm->emeta_sec[3] * geo->sec_size;
			geo->csecs);
	lm->emeta_len[3] = lm->emeta_sec[3] * geo->csecs;

	lm->vsc_list_len = l_mg->nr_lines * sizeof(u32);

@@ -625,13 +632,13 @@ static void pblk_set_provision(struct pblk *pblk, long nr_free_blks)
	 * on user capacity consider only provisioned blocks
	 */
	pblk->rl.total_blocks = nr_free_blks;
	pblk->rl.nr_secs = nr_free_blks * geo->sec_per_chk;
	pblk->rl.nr_secs = nr_free_blks * geo->clba;

	/* Consider sectors used for metadata */
	sec_meta = (lm->smeta_sec + lm->emeta_sec[0]) * l_mg->nr_free_lines;
	blk_meta = DIV_ROUND_UP(sec_meta, geo->sec_per_chk);
	blk_meta = DIV_ROUND_UP(sec_meta, geo->clba);

	pblk->capacity = (provisioned - blk_meta) * geo->sec_per_chk;
	pblk->capacity = (provisioned - blk_meta) * geo->clba;

	atomic_set(&pblk->rl.free_blocks, nr_free_blks);
	atomic_set(&pblk->rl.free_user_blocks, nr_free_blks);
@@ -783,7 +790,7 @@ static int pblk_line_meta_init(struct pblk *pblk)
	unsigned int smeta_len, emeta_len;
	int i;

	lm->sec_per_line = geo->sec_per_chk * geo->all_luns;
	lm->sec_per_line = geo->clba * geo->all_luns;
	lm->blk_per_line = geo->all_luns;
	lm->blk_bitmap_len = BITS_TO_LONGS(geo->all_luns) * sizeof(long);
	lm->sec_bitmap_len = BITS_TO_LONGS(lm->sec_per_line) * sizeof(long);
@@ -797,8 +804,8 @@ static int pblk_line_meta_init(struct pblk *pblk)
	 */
	i = 1;
add_smeta_page:
	lm->smeta_sec = i * geo->sec_per_pl;
	lm->smeta_len = lm->smeta_sec * geo->sec_size;
	lm->smeta_sec = i * geo->ws_opt;
	lm->smeta_len = lm->smeta_sec * geo->csecs;

	smeta_len = sizeof(struct line_smeta) + lm->lun_bitmap_len;
	if (smeta_len > lm->smeta_len) {
@@ -811,8 +818,8 @@ static int pblk_line_meta_init(struct pblk *pblk)
	 */
	i = 1;
add_emeta_page:
	lm->emeta_sec[0] = i * geo->sec_per_pl;
	lm->emeta_len[0] = lm->emeta_sec[0] * geo->sec_size;
	lm->emeta_sec[0] = i * geo->ws_opt;
	lm->emeta_len[0] = lm->emeta_sec[0] * geo->csecs;

	emeta_len = calc_emeta_len(pblk);
	if (emeta_len > lm->emeta_len[0]) {
@@ -825,7 +832,7 @@ static int pblk_line_meta_init(struct pblk *pblk)
	lm->min_blk_line = 1;
	if (geo->all_luns > 1)
		lm->min_blk_line += DIV_ROUND_UP(lm->smeta_sec +
					lm->emeta_sec[0], geo->sec_per_chk);
					lm->emeta_sec[0], geo->clba);

	if (lm->min_blk_line > lm->blk_per_line) {
		pr_err("pblk: config. not supported. Min. LUN in line:%d\n",
@@ -1009,9 +1016,9 @@ static void *pblk_init(struct nvm_tgt_dev *dev, struct gendisk *tdisk,
	struct pblk *pblk;
	int ret;

	if (dev->identity.dom & NVM_RSP_L2P) {
	if (dev->geo.dom & NVM_RSP_L2P) {
		pr_err("pblk: host-side L2P table not supported. (%x)\n",
							dev->identity.dom);
							dev->geo.dom);
		return ERR_PTR(-EINVAL);
	}

@@ -1093,7 +1100,7 @@ static void *pblk_init(struct nvm_tgt_dev *dev, struct gendisk *tdisk,

	blk_queue_write_cache(tqueue, true, false);

	tqueue->limits.discard_granularity = geo->sec_per_chk * geo->sec_size;
	tqueue->limits.discard_granularity = geo->clba * geo->csecs;
	tqueue->limits.discard_alignment = 0;
	blk_queue_max_discard_sectors(tqueue, UINT_MAX >> 9);
	blk_queue_flag_set(QUEUE_FLAG_DISCARD, tqueue);
+1 −1
Original line number Diff line number Diff line
@@ -563,7 +563,7 @@ int pblk_submit_read_gc(struct pblk *pblk, struct pblk_gc_rq *gc_rq)
	if (!(gc_rq->secs_to_gc))
		goto out;

	data_len = (gc_rq->secs_to_gc) * geo->sec_size;
	data_len = (gc_rq->secs_to_gc) * geo->csecs;
	bio = pblk_bio_map_addr(pblk, gc_rq->data, gc_rq->secs_to_gc, data_len,
						PBLK_VMALLOC_META, GFP_KERNEL);
	if (IS_ERR(bio)) {
Loading