Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 24b6d416 authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Dan Williams
Browse files

mm: pass the vmem_altmap to vmemmap_free



We can just pass this on instead of having to do a radix tree lookup
without proper locking a few levels into the callchain.

Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Signed-off-by: default avatarDan Williams <dan.j.williams@intel.com>
parent da024512
Loading
Loading
Loading
Loading
+2 −1
Original line number Diff line number Diff line
@@ -696,7 +696,8 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
	return 0;
}
#endif	/* CONFIG_ARM64_64K_PAGES */
void vmemmap_free(unsigned long start, unsigned long end)
void vmemmap_free(unsigned long start, unsigned long end,
		struct vmem_altmap *altmap)
{
}
#endif	/* CONFIG_SPARSEMEM_VMEMMAP */
+2 −1
Original line number Diff line number Diff line
@@ -760,7 +760,8 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
	return vmemmap_populate_basepages(start, end, node);
}

void vmemmap_free(unsigned long start, unsigned long end)
void vmemmap_free(unsigned long start, unsigned long end,
		struct vmem_altmap *altmap)
{
}
#endif
+2 −3
Original line number Diff line number Diff line
@@ -254,7 +254,8 @@ static unsigned long vmemmap_list_free(unsigned long start)
	return vmem_back->phys;
}

void __ref vmemmap_free(unsigned long start, unsigned long end)
void __ref vmemmap_free(unsigned long start, unsigned long end,
		struct vmem_altmap *altmap)
{
	unsigned long page_size = 1 << mmu_psize_defs[mmu_vmemmap_psize].shift;
	unsigned long page_order = get_order(page_size);
@@ -265,7 +266,6 @@ void __ref vmemmap_free(unsigned long start, unsigned long end)

	for (; start < end; start += page_size) {
		unsigned long nr_pages, addr;
		struct vmem_altmap *altmap;
		struct page *section_base;
		struct page *page;

@@ -285,7 +285,6 @@ void __ref vmemmap_free(unsigned long start, unsigned long end)
		section_base = pfn_to_page(vmemmap_section_start(start));
		nr_pages = 1 << page_order;

		altmap = to_vmem_altmap((unsigned long) section_base);
		if (altmap) {
			vmem_altmap_free(altmap, nr_pages);
		} else if (PageReserved(page)) {
+2 −1
Original line number Diff line number Diff line
@@ -297,7 +297,8 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
	return ret;
}

void vmemmap_free(unsigned long start, unsigned long end)
void vmemmap_free(unsigned long start, unsigned long end,
		struct vmem_altmap *altmap)
{
}

+2 −1
Original line number Diff line number Diff line
@@ -2671,7 +2671,8 @@ int __meminit vmemmap_populate(unsigned long vstart, unsigned long vend,
	return 0;
}

void vmemmap_free(unsigned long start, unsigned long end)
void vmemmap_free(unsigned long start, unsigned long end,
		struct vmem_altmap *altmap)
{
}
#endif /* CONFIG_SPARSEMEM_VMEMMAP */
Loading