Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 468ded03 authored by Dan Williams's avatar Dan Williams Committed by Linus Torvalds
Browse files

libnvdimm, pmem: move request_queue allocation earlier in probe



Before the dynamically allocated struct pages from devm_memremap_pages()
can be put to use outside the driver, we need a mechanism to track
whether they are still in use at teardown.  Towards that goal reorder
the initialization sequence to allow the 'q_usage_counter' from the
request_queue to be used by the devm_memremap_pages() implementation (in
subsequent patches).

Signed-off-by: default avatarDan Williams <dan.j.williams@intel.com>
Cc: Ross Zwisler <ross.zwisler@linux.intel.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent f25748e3
Loading
Loading
Loading
Loading
+20 −13
Original line number Original line Diff line number Diff line
@@ -159,6 +159,7 @@ static struct pmem_device *pmem_alloc(struct device *dev,
		struct resource *res, int id)
		struct resource *res, int id)
{
{
	struct pmem_device *pmem;
	struct pmem_device *pmem;
	struct request_queue *q;


	pmem = devm_kzalloc(dev, sizeof(*pmem), GFP_KERNEL);
	pmem = devm_kzalloc(dev, sizeof(*pmem), GFP_KERNEL);
	if (!pmem)
	if (!pmem)
@@ -176,6 +177,10 @@ static struct pmem_device *pmem_alloc(struct device *dev,
		return ERR_PTR(-EBUSY);
		return ERR_PTR(-EBUSY);
	}
	}


	q = blk_alloc_queue_node(GFP_KERNEL, dev_to_node(dev));
	if (!q)
		return ERR_PTR(-ENOMEM);

	pmem->pfn_flags = PFN_DEV;
	pmem->pfn_flags = PFN_DEV;
	if (pmem_should_map_pages(dev)) {
	if (pmem_should_map_pages(dev)) {
		pmem->virt_addr = (void __pmem *) devm_memremap_pages(dev, res,
		pmem->virt_addr = (void __pmem *) devm_memremap_pages(dev, res,
@@ -186,9 +191,12 @@ static struct pmem_device *pmem_alloc(struct device *dev,
				pmem->phys_addr, pmem->size,
				pmem->phys_addr, pmem->size,
				ARCH_MEMREMAP_PMEM);
				ARCH_MEMREMAP_PMEM);


	if (IS_ERR(pmem->virt_addr))
	if (IS_ERR(pmem->virt_addr)) {
		blk_cleanup_queue(q);
		return (void __force *) pmem->virt_addr;
		return (void __force *) pmem->virt_addr;
	}


	pmem->pmem_queue = q;
	return pmem;
	return pmem;
}
}


@@ -208,10 +216,6 @@ static int pmem_attach_disk(struct device *dev,
	int nid = dev_to_node(dev);
	int nid = dev_to_node(dev);
	struct gendisk *disk;
	struct gendisk *disk;


	pmem->pmem_queue = blk_alloc_queue_node(GFP_KERNEL, nid);
	if (!pmem->pmem_queue)
		return -ENOMEM;

	blk_queue_make_request(pmem->pmem_queue, pmem_make_request);
	blk_queue_make_request(pmem->pmem_queue, pmem_make_request);
	blk_queue_physical_block_size(pmem->pmem_queue, PAGE_SIZE);
	blk_queue_physical_block_size(pmem->pmem_queue, PAGE_SIZE);
	blk_queue_max_hw_sectors(pmem->pmem_queue, UINT_MAX);
	blk_queue_max_hw_sectors(pmem->pmem_queue, UINT_MAX);
@@ -446,19 +450,22 @@ static int nd_pmem_probe(struct device *dev)
		return -ENOMEM;
		return -ENOMEM;
	nvdimm_namespace_add_poison(ndns, &pmem->bb, 0);
	nvdimm_namespace_add_poison(ndns, &pmem->bb, 0);


	if (is_nd_btt(dev))
	if (is_nd_btt(dev)) {
		/* btt allocates its own request_queue */
		blk_cleanup_queue(pmem->pmem_queue);
		pmem->pmem_queue = NULL;
		return nvdimm_namespace_attach_btt(ndns);
		return nvdimm_namespace_attach_btt(ndns);
	}


	if (is_nd_pfn(dev))
	if (is_nd_pfn(dev))
		return nvdimm_namespace_attach_pfn(ndns);
		return nvdimm_namespace_attach_pfn(ndns);


	if (nd_btt_probe(ndns, pmem) == 0) {
	if (nd_btt_probe(ndns, pmem) == 0 || nd_pfn_probe(ndns, pmem) == 0) {
		/* we'll come back as btt-pmem */
		/*
		return -ENXIO;
		 * We'll come back as either btt-pmem, or pfn-pmem, so
	}
		 * drop the queue allocation for now.

		 */
	if (nd_pfn_probe(ndns, pmem) == 0) {
		blk_cleanup_queue(pmem->pmem_queue);
		/* we'll come back as pfn-pmem */
		return -ENXIO;
		return -ENXIO;
	}
	}