Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 24828d05 authored by Igor Konopko's avatar Igor Konopko Committed by Jens Axboe
Browse files

lightnvm: dynamic DMA pool entry size



Currently lightnvm and pblk uses single DMA pool, for which the entry
size always is equal to PAGE_SIZE. The contents of each entry allocated
from the DMA pool consists of a PPA list (8bytes * 64), leaving
56bytes * 64 space for metadata. Since the metadata field can be bigger,
such as 128 bytes, the static size does not cover this use-case.

This patch adds support for I/O metadata above 56 bytes by changing DMA
pool size based on device meta size and allows pblk to use OOB metadata
>=16B.

Reviewed-by: default avatarJavier González <javier@cnexlabs.com>
Signed-off-by: default avatarIgor Konopko <igor.j.konopko@intel.com>
Signed-off-by: default avatarMatias Bjørling <mb@lightnvm.io>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent faa79f27
Loading
Loading
Loading
Loading
+7 −2
Original line number Diff line number Diff line
@@ -1140,7 +1140,7 @@ EXPORT_SYMBOL(nvm_alloc_dev);

int nvm_register(struct nvm_dev *dev)
{
	int ret;
	int ret, exp_pool_size;

	if (!dev->q || !dev->ops)
		return -EINVAL;
@@ -1149,7 +1149,12 @@ int nvm_register(struct nvm_dev *dev)
	if (ret)
		return ret;

	dev->dma_pool = dev->ops->create_dma_pool(dev, "ppalist");
	exp_pool_size = max_t(int, PAGE_SIZE,
			      (NVM_MAX_VLBA * (sizeof(u64) + dev->geo.sos)));
	exp_pool_size = round_up(exp_pool_size, PAGE_SIZE);

	dev->dma_pool = dev->ops->create_dma_pool(dev, "ppalist",
						  exp_pool_size);
	if (!dev->dma_pool) {
		pr_err("nvm: could not create dma pool\n");
		nvm_free(dev);
+4 −4
Original line number Diff line number Diff line
@@ -250,8 +250,8 @@ int pblk_alloc_rqd_meta(struct pblk *pblk, struct nvm_rq *rqd)
	if (rqd->nr_ppas == 1)
		return 0;

	rqd->ppa_list = rqd->meta_list + pblk_dma_meta_size;
	rqd->dma_ppa_list = rqd->dma_meta_list + pblk_dma_meta_size;
	rqd->ppa_list = rqd->meta_list + pblk_dma_meta_size(pblk);
	rqd->dma_ppa_list = rqd->dma_meta_list + pblk_dma_meta_size(pblk);

	return 0;
}
@@ -846,8 +846,8 @@ int pblk_line_emeta_read(struct pblk *pblk, struct pblk_line *line,
	if (!meta_list)
		return -ENOMEM;

	ppa_list = meta_list + pblk_dma_meta_size;
	dma_ppa_list = dma_meta_list + pblk_dma_meta_size;
	ppa_list = meta_list + pblk_dma_meta_size(pblk);
	dma_ppa_list = dma_meta_list + pblk_dma_meta_size(pblk);

next_rq:
	memset(&rqd, 0, sizeof(struct nvm_rq));
+1 −1
Original line number Diff line number Diff line
@@ -406,7 +406,7 @@ static int pblk_core_init(struct pblk *pblk)
	pblk_set_sec_per_write(pblk, pblk->min_write_pgs);

	pblk->oob_meta_size = geo->sos;
	if (pblk->oob_meta_size != sizeof(struct pblk_sec_meta)) {
	if (pblk->oob_meta_size < sizeof(struct pblk_sec_meta)) {
		pblk_err(pblk, "Unsupported metadata size\n");
		return -EINVAL;
	}
+2 −2
Original line number Diff line number Diff line
@@ -481,8 +481,8 @@ static int pblk_recov_l2p_from_oob(struct pblk *pblk, struct pblk_line *line)
	if (!meta_list)
		return -ENOMEM;

	ppa_list = (void *)(meta_list) + pblk_dma_meta_size;
	dma_ppa_list = dma_meta_list + pblk_dma_meta_size;
	ppa_list = (void *)(meta_list) + pblk_dma_meta_size(pblk);
	dma_ppa_list = dma_meta_list + pblk_dma_meta_size(pblk);

	data = kcalloc(pblk->max_write_pgs, geo->csecs, GFP_KERNEL);
	if (!data) {
+5 −1
Original line number Diff line number Diff line
@@ -104,7 +104,6 @@ enum {
	PBLK_RL_LOW = 4
};

#define pblk_dma_meta_size (sizeof(struct pblk_sec_meta) * NVM_MAX_VLBA)
#define pblk_dma_ppa_size (sizeof(u64) * NVM_MAX_VLBA)

/* write buffer completion context */
@@ -1388,4 +1387,9 @@ static inline struct pblk_sec_meta *pblk_get_meta(struct pblk *pblk,
{
	return meta + pblk->oob_meta_size * index;
}

static inline int pblk_dma_meta_size(struct pblk *pblk)
{
	return pblk->oob_meta_size * NVM_MAX_VLBA;
}
#endif /* PBLK_H_ */
Loading