Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit e8d51348 authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Dan Williams
Browse files

memremap: change devm_memremap_pages interface to use struct dev_pagemap



This new interface is similar to how struct device (and many others)
work. The caller initializes a 'struct dev_pagemap' as required
and calls 'devm_memremap_pages'. This allows the pagemap structure to
be embedded in another structure and thus container_of can be used. In
this way application specific members can be stored in a containing
struct.

This will be used by the P2P infrastructure and HMM could probably
be cleaned up to use it as well (instead of having it's own, similar
'hmm_devmem_pages_create' function).

Signed-off-by: default avatarLogan Gunthorpe <logang@deltatee.com>
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Signed-off-by: default avatarDan Williams <dan.j.williams@intel.com>
parent e7744aa2
Loading
Loading
Loading
Loading
+11 −9
Original line number Diff line number Diff line
@@ -21,6 +21,7 @@
struct dax_pmem {
	struct device *dev;
	struct percpu_ref ref;
	struct dev_pagemap pgmap;
	struct completion cmp;
};

@@ -69,20 +70,23 @@ static int dax_pmem_probe(struct device *dev)
	struct nd_namespace_common *ndns;
	struct nd_dax *nd_dax = to_nd_dax(dev);
	struct nd_pfn *nd_pfn = &nd_dax->nd_pfn;
	struct vmem_altmap __altmap, *altmap = NULL;

	ndns = nvdimm_namespace_common_probe(dev);
	if (IS_ERR(ndns))
		return PTR_ERR(ndns);
	nsio = to_nd_namespace_io(&ndns->dev);

	dax_pmem = devm_kzalloc(dev, sizeof(*dax_pmem), GFP_KERNEL);
	if (!dax_pmem)
		return -ENOMEM;

	/* parse the 'pfn' info block via ->rw_bytes */
	rc = devm_nsio_enable(dev, nsio);
	if (rc)
		return rc;
	altmap = nvdimm_setup_pfn(nd_pfn, &res, &__altmap);
	if (IS_ERR(altmap))
		return PTR_ERR(altmap);
	rc = nvdimm_setup_pfn(nd_pfn, &dax_pmem->pgmap);
	if (rc)
		return rc;
	devm_nsio_disable(dev, nsio);

	pfn_sb = nd_pfn->pfn_sb;
@@ -94,10 +98,6 @@ static int dax_pmem_probe(struct device *dev)
		return -EBUSY;
	}

	dax_pmem = devm_kzalloc(dev, sizeof(*dax_pmem), GFP_KERNEL);
	if (!dax_pmem)
		return -ENOMEM;

	dax_pmem->dev = dev;
	init_completion(&dax_pmem->cmp);
	rc = percpu_ref_init(&dax_pmem->ref, dax_pmem_percpu_release, 0,
@@ -110,7 +110,8 @@ static int dax_pmem_probe(struct device *dev)
	if (rc)
		return rc;

	addr = devm_memremap_pages(dev, &res, &dax_pmem->ref, altmap);
	dax_pmem->pgmap.ref = &dax_pmem->ref;
	addr = devm_memremap_pages(dev, &dax_pmem->pgmap);
	if (IS_ERR(addr))
		return PTR_ERR(addr);

@@ -120,6 +121,7 @@ static int dax_pmem_probe(struct device *dev)
		return rc;

	/* adjust the dax_region resource to the start of data */
	memcpy(&res, &dax_pmem->pgmap.res, sizeof(res));
	res.start += le64_to_cpu(pfn_sb->dataoff);

	rc = sscanf(dev_name(&ndns->dev), "namespace%d.%d", &region_id, &id);
+4 −5
Original line number Diff line number Diff line
@@ -368,15 +368,14 @@ unsigned int pmem_sector_size(struct nd_namespace_common *ndns);
void nvdimm_badblocks_populate(struct nd_region *nd_region,
		struct badblocks *bb, const struct resource *res);
#if IS_ENABLED(CONFIG_ND_CLAIM)
struct vmem_altmap *nvdimm_setup_pfn(struct nd_pfn *nd_pfn,
		struct resource *res, struct vmem_altmap *altmap);
int nvdimm_setup_pfn(struct nd_pfn *nd_pfn, struct dev_pagemap *pgmap);
int devm_nsio_enable(struct device *dev, struct nd_namespace_io *nsio);
void devm_nsio_disable(struct device *dev, struct nd_namespace_io *nsio);
#else
static inline struct vmem_altmap *nvdimm_setup_pfn(struct nd_pfn *nd_pfn,
		struct resource *res, struct vmem_altmap *altmap)
static inline int nvdimm_setup_pfn(struct nd_pfn *nd_pfn,
				   struct dev_pagemap *pgmap)
{
	return ERR_PTR(-ENXIO);
	return -ENXIO;
}
static inline int devm_nsio_enable(struct device *dev,
		struct nd_namespace_io *nsio)
+15 −12
Original line number Diff line number Diff line
@@ -542,9 +542,10 @@ static unsigned long init_altmap_reserve(resource_size_t base)
	return reserve;
}

static struct vmem_altmap *__nvdimm_setup_pfn(struct nd_pfn *nd_pfn,
		struct resource *res, struct vmem_altmap *altmap)
static int __nvdimm_setup_pfn(struct nd_pfn *nd_pfn, struct dev_pagemap *pgmap)
{
	struct resource *res = &pgmap->res;
	struct vmem_altmap *altmap = &pgmap->altmap;
	struct nd_pfn_sb *pfn_sb = nd_pfn->pfn_sb;
	u64 offset = le64_to_cpu(pfn_sb->dataoff);
	u32 start_pad = __le32_to_cpu(pfn_sb->start_pad);
@@ -561,11 +562,13 @@ static struct vmem_altmap *__nvdimm_setup_pfn(struct nd_pfn *nd_pfn,
	res->start += start_pad;
	res->end -= end_trunc;

	pgmap->type = MEMORY_DEVICE_HOST;

	if (nd_pfn->mode == PFN_MODE_RAM) {
		if (offset < SZ_8K)
			return ERR_PTR(-EINVAL);
			return -EINVAL;
		nd_pfn->npfns = le64_to_cpu(pfn_sb->npfns);
		altmap = NULL;
		pgmap->altmap_valid = false;
	} else if (nd_pfn->mode == PFN_MODE_PMEM) {
		nd_pfn->npfns = PFN_SECTION_ALIGN_UP((resource_size(res)
					- offset) / PAGE_SIZE);
@@ -577,10 +580,11 @@ static struct vmem_altmap *__nvdimm_setup_pfn(struct nd_pfn *nd_pfn,
		memcpy(altmap, &__altmap, sizeof(*altmap));
		altmap->free = PHYS_PFN(offset - SZ_8K);
		altmap->alloc = 0;
		pgmap->altmap_valid = true;
	} else
		return ERR_PTR(-ENXIO);
		return -ENXIO;

	return altmap;
	return 0;
}

static u64 phys_pmem_align_down(struct nd_pfn *nd_pfn, u64 phys)
@@ -708,19 +712,18 @@ static int nd_pfn_init(struct nd_pfn *nd_pfn)
 * Determine the effective resource range and vmem_altmap from an nd_pfn
 * instance.
 */
struct vmem_altmap *nvdimm_setup_pfn(struct nd_pfn *nd_pfn,
		struct resource *res, struct vmem_altmap *altmap)
int nvdimm_setup_pfn(struct nd_pfn *nd_pfn, struct dev_pagemap *pgmap)
{
	int rc;

	if (!nd_pfn->uuid || !nd_pfn->ndns)
		return ERR_PTR(-ENODEV);
		return -ENODEV;

	rc = nd_pfn_init(nd_pfn);
	if (rc)
		return ERR_PTR(rc);
		return rc;

	/* we need a valid pfn_sb before we can init a vmem_altmap */
	return __nvdimm_setup_pfn(nd_pfn, res, altmap);
	/* we need a valid pfn_sb before we can init a dev_pagemap */
	return __nvdimm_setup_pfn(nd_pfn, pgmap);
}
EXPORT_SYMBOL_GPL(nvdimm_setup_pfn);
+20 −17
Original line number Diff line number Diff line
@@ -298,34 +298,34 @@ static int pmem_attach_disk(struct device *dev,
{
	struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);
	struct nd_region *nd_region = to_nd_region(dev->parent);
	struct vmem_altmap __altmap, *altmap = NULL;
	int nid = dev_to_node(dev), fua, wbc;
	struct resource *res = &nsio->res;
	struct resource bb_res;
	struct nd_pfn *nd_pfn = NULL;
	struct dax_device *dax_dev;
	struct nd_pfn_sb *pfn_sb;
	struct pmem_device *pmem;
	struct resource pfn_res;
	struct request_queue *q;
	struct device *gendev;
	struct gendisk *disk;
	void *addr;
	int rc;

	pmem = devm_kzalloc(dev, sizeof(*pmem), GFP_KERNEL);
	if (!pmem)
		return -ENOMEM;

	/* while nsio_rw_bytes is active, parse a pfn info block if present */
	if (is_nd_pfn(dev)) {
		nd_pfn = to_nd_pfn(dev);
		altmap = nvdimm_setup_pfn(nd_pfn, &pfn_res, &__altmap);
		if (IS_ERR(altmap))
			return PTR_ERR(altmap);
		rc = nvdimm_setup_pfn(nd_pfn, &pmem->pgmap);
		if (rc)
			return rc;
	}

	/* we're attaching a block device, disable raw namespace access */
	devm_nsio_disable(dev, nsio);

	pmem = devm_kzalloc(dev, sizeof(*pmem), GFP_KERNEL);
	if (!pmem)
		return -ENOMEM;

	dev_set_drvdata(dev, pmem);
	pmem->phys_addr = res->start;
	pmem->size = resource_size(res);
@@ -350,19 +350,22 @@ static int pmem_attach_disk(struct device *dev,
		return -ENOMEM;

	pmem->pfn_flags = PFN_DEV;
	pmem->pgmap.ref = &q->q_usage_counter;
	if (is_nd_pfn(dev)) {
		addr = devm_memremap_pages(dev, &pfn_res, &q->q_usage_counter,
				altmap);
		addr = devm_memremap_pages(dev, &pmem->pgmap);
		pfn_sb = nd_pfn->pfn_sb;
		pmem->data_offset = le64_to_cpu(pfn_sb->dataoff);
		pmem->pfn_pad = resource_size(res) - resource_size(&pfn_res);
		pmem->pfn_pad = resource_size(res) -
			resource_size(&pmem->pgmap.res);
		pmem->pfn_flags |= PFN_MAP;
		res = &pfn_res; /* for badblocks populate */
		res->start += pmem->data_offset;
		memcpy(&bb_res, &pmem->pgmap.res, sizeof(bb_res));
		bb_res.start += pmem->data_offset;
	} else if (pmem_should_map_pages(dev)) {
		addr = devm_memremap_pages(dev, &nsio->res,
				&q->q_usage_counter, NULL);
		memcpy(&pmem->pgmap.res, &nsio->res, sizeof(pmem->pgmap.res));
		pmem->pgmap.altmap_valid = false;
		addr = devm_memremap_pages(dev, &pmem->pgmap);
		pmem->pfn_flags |= PFN_MAP;
		memcpy(&bb_res, &pmem->pgmap.res, sizeof(bb_res));
	} else
		addr = devm_memremap(dev, pmem->phys_addr,
				pmem->size, ARCH_MEMREMAP_PMEM);
@@ -401,7 +404,7 @@ static int pmem_attach_disk(struct device *dev,
			/ 512);
	if (devm_init_badblocks(dev, &pmem->bb))
		return -ENOMEM;
	nvdimm_badblocks_populate(nd_region, &pmem->bb, res);
	nvdimm_badblocks_populate(nd_region, &pmem->bb, &bb_res);
	disk->bb = &pmem->bb;

	dax_dev = alloc_dax(pmem, disk->disk_name, &pmem_dax_ops);
+1 −0
Original line number Diff line number Diff line
@@ -22,6 +22,7 @@ struct pmem_device {
	struct badblocks	bb;
	struct dax_device	*dax_dev;
	struct gendisk		*disk;
	struct dev_pagemap	pgmap;
};

long __pmem_direct_access(struct pmem_device *pmem, pgoff_t pgoff,
Loading