Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit c215c66c authored by Dan Williams's avatar Dan Williams Committed by Greg Kroah-Hartman
Browse files

mm, devm_memremap_pages: add MEMORY_DEVICE_PRIVATE support

commit 69324b8f48339de2f90fdf2f774687fc6c47629a upstream.

In preparation for consolidating all ZONE_DEVICE enabling via
devm_memremap_pages(), teach it how to handle the constraints of
MEMORY_DEVICE_PRIVATE ranges.

[jglisse@redhat.com: call move_pfn_range_to_zone for MEMORY_DEVICE_PRIVATE]
Link: http://lkml.kernel.org/r/154275559036.76910.12434636179931292607.stgit@dwillia2-desk3.amr.corp.intel.com


Signed-off-by: default avatarDan Williams <dan.j.williams@intel.com>
Reviewed-by: default avatarJérôme Glisse <jglisse@redhat.com>
Acked-by: default avatarChristoph Hellwig <hch@lst.de>
Reported-by: default avatarLogan Gunthorpe <logang@deltatee.com>
Reviewed-by: default avatarLogan Gunthorpe <logang@deltatee.com>
Cc: Balbir Singh <bsingharora@gmail.com>
Cc: Michal Hocko <mhocko@suse.com>
Cc: <stable@vger.kernel.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent ec5471c9
Loading
Loading
Loading
Loading
+41 −12
Original line number Diff line number Diff line
@@ -132,9 +132,15 @@ static void devm_memremap_pages_release(void *data)
		- align_start;

	mem_hotplug_begin();
	arch_remove_memory(align_start, align_size, pgmap->altmap_valid ?
			&pgmap->altmap : NULL);
	if (pgmap->type == MEMORY_DEVICE_PRIVATE) {
		pfn = align_start >> PAGE_SHIFT;
		__remove_pages(page_zone(pfn_to_page(pfn)), pfn,
				align_size >> PAGE_SHIFT, NULL);
	} else {
		arch_remove_memory(align_start, align_size,
				pgmap->altmap_valid ? &pgmap->altmap : NULL);
		kasan_remove_zero_shadow(__va(align_start), align_size);
	}
	mem_hotplug_done();

	untrack_pfn(NULL, PHYS_PFN(align_start), align_size);
@@ -232,17 +238,40 @@ void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap)
		goto err_pfn_remap;

	mem_hotplug_begin();

	/*
	 * For device private memory we call add_pages() as we only need to
	 * allocate and initialize struct page for the device memory. More-
	 * over the device memory is un-accessible thus we do not want to
	 * create a linear mapping for the memory like arch_add_memory()
	 * would do.
	 *
	 * For all other device memory types, which are accessible by
	 * the CPU, we do want the linear mapping and thus use
	 * arch_add_memory().
	 */
	if (pgmap->type == MEMORY_DEVICE_PRIVATE) {
		error = add_pages(nid, align_start >> PAGE_SHIFT,
				align_size >> PAGE_SHIFT, NULL, false);
	} else {
		error = kasan_add_zero_shadow(__va(align_start), align_size);
		if (error) {
			mem_hotplug_done();
			goto err_kasan;
		}

	error = arch_add_memory(nid, align_start, align_size, altmap, false);
	if (!error)
		move_pfn_range_to_zone(&NODE_DATA(nid)->node_zones[ZONE_DEVICE],
					align_start >> PAGE_SHIFT,
		error = arch_add_memory(nid, align_start, align_size, altmap,
				false);
	}

	if (!error) {
		struct zone *zone;

		zone = &NODE_DATA(nid)->node_zones[ZONE_DEVICE];
		move_pfn_range_to_zone(zone, align_start >> PAGE_SHIFT,
				align_size >> PAGE_SHIFT, altmap);
	}

	mem_hotplug_done();
	if (error)
		goto err_add_memory;