Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 24e6d5a5 authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Dan Williams
Browse files

mm: pass the vmem_altmap to arch_add_memory and __add_pages



We can just pass this on instead of having to do a radix tree lookup
without proper locking 2 levels into the callchain.

Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Signed-off-by: default avatarDan Williams <dan.j.williams@intel.com>
parent 55ce6e23
Loading
Loading
Loading
Loading
+3 −2
Original line number Diff line number Diff line
@@ -647,13 +647,14 @@ mem_init (void)
}

#ifdef CONFIG_MEMORY_HOTPLUG
int arch_add_memory(int nid, u64 start, u64 size, bool want_memblock)
int arch_add_memory(int nid, u64 start, u64 size, struct vmem_altmap *altmap,
		bool want_memblock)
{
	unsigned long start_pfn = start >> PAGE_SHIFT;
	unsigned long nr_pages = size >> PAGE_SHIFT;
	int ret;

	ret = __add_pages(nid, start_pfn, nr_pages, want_memblock);
	ret = __add_pages(nid, start_pfn, nr_pages, altmap, want_memblock);
	if (ret)
		printk("%s: Problem encountered in __add_pages() as ret=%d\n",
		       __func__,  ret);
+3 −2
Original line number Diff line number Diff line
@@ -127,7 +127,8 @@ int __weak remove_section_mapping(unsigned long start, unsigned long end)
	return -ENODEV;
}

int arch_add_memory(int nid, u64 start, u64 size, bool want_memblock)
int arch_add_memory(int nid, u64 start, u64 size, struct vmem_altmap *altmap,
		bool want_memblock)
{
	unsigned long start_pfn = start >> PAGE_SHIFT;
	unsigned long nr_pages = size >> PAGE_SHIFT;
@@ -144,7 +145,7 @@ int arch_add_memory(int nid, u64 start, u64 size, bool want_memblock)
		return -EFAULT;
	}

	return __add_pages(nid, start_pfn, nr_pages, want_memblock);
	return __add_pages(nid, start_pfn, nr_pages, altmap, want_memblock);
}

#ifdef CONFIG_MEMORY_HOTREMOVE
+3 −2
Original line number Diff line number Diff line
@@ -222,7 +222,8 @@ device_initcall(s390_cma_mem_init);

#endif /* CONFIG_CMA */

int arch_add_memory(int nid, u64 start, u64 size, bool want_memblock)
int arch_add_memory(int nid, u64 start, u64 size, struct vmem_altmap *altmap,
		bool want_memblock)
{
	unsigned long start_pfn = PFN_DOWN(start);
	unsigned long size_pages = PFN_DOWN(size);
@@ -232,7 +233,7 @@ int arch_add_memory(int nid, u64 start, u64 size, bool want_memblock)
	if (rc)
		return rc;

	rc = __add_pages(nid, start_pfn, size_pages, want_memblock);
	rc = __add_pages(nid, start_pfn, size_pages, altmap, want_memblock);
	if (rc)
		vmem_remove_mapping(start, size);
	return rc;
+3 −2
Original line number Diff line number Diff line
@@ -485,14 +485,15 @@ void free_initrd_mem(unsigned long start, unsigned long end)
#endif

#ifdef CONFIG_MEMORY_HOTPLUG
int arch_add_memory(int nid, u64 start, u64 size, bool want_memblock)
int arch_add_memory(int nid, u64 start, u64 size, struct vmem_altmap *altmap,
		bool want_memblock)
{
	unsigned long start_pfn = PFN_DOWN(start);
	unsigned long nr_pages = size >> PAGE_SHIFT;
	int ret;

	/* We only have ZONE_NORMAL, so this is easy.. */
	ret = __add_pages(nid, start_pfn, nr_pages, want_memblock);
	ret = __add_pages(nid, start_pfn, nr_pages, altmap, want_memblock);
	if (unlikely(ret))
		printk("%s: Failed, __add_pages() == %d\n", __func__, ret);

+3 −2
Original line number Diff line number Diff line
@@ -829,12 +829,13 @@ void __init mem_init(void)
}

#ifdef CONFIG_MEMORY_HOTPLUG
int arch_add_memory(int nid, u64 start, u64 size, bool want_memblock)
int arch_add_memory(int nid, u64 start, u64 size, struct vmem_altmap *altmap,
		bool want_memblock)
{
	unsigned long start_pfn = start >> PAGE_SHIFT;
	unsigned long nr_pages = size >> PAGE_SHIFT;

	return __add_pages(nid, start_pfn, nr_pages, want_memblock);
	return __add_pages(nid, start_pfn, nr_pages, altmap, want_memblock);
}

#ifdef CONFIG_MEMORY_HOTREMOVE
Loading