Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 892365ab authored by Gerald Schaefer's avatar Gerald Schaefer Committed by Martin Schwidefsky
Browse files

[S390] memory hotplug: prevent memory zone interleave



This fixes a kernel oops with CONFIG_DEBUG_VM triggered by a
VM_BUG_ON(bad_range()): kernel BUG at mm/page_alloc.c:748.

With memory hotplug on System z, it is possible that the memory
online/offline state is preserved over a system restart, e.g. there
may be offline memory blocks in ZONE_DMA or ZONE_NORMAL. So far,
the offline memory range has always been added to ZONE_MOVABLE during
system start, so that it was possible to have ZONE_MOVABLE interleave
with ZONE_DMA or ZONE_NORMAL. This patch fixes that by checking for
zone overlap before adding memory.

Signed-off-by: default avatarGerald Schaefer <gerald.schaefer@de.ibm.com>
Reviewed-by: default avatarHeiko Carstens <heiko.carstens@de.ibm.com>
Signed-off-by: default avatarMartin Schwidefsky <schwidefsky@de.ibm.com>
parent d60331ac
Loading
Loading
Loading
Loading
+26 −4
Original line number Original line Diff line number Diff line
@@ -223,16 +223,38 @@ void free_initrd_mem(unsigned long start, unsigned long end)
#ifdef CONFIG_MEMORY_HOTPLUG
#ifdef CONFIG_MEMORY_HOTPLUG
int arch_add_memory(int nid, u64 start, u64 size)
int arch_add_memory(int nid, u64 start, u64 size)
{
{
	struct pglist_data *pgdat;
	unsigned long zone_start_pfn, zone_end_pfn, nr_pages;
	unsigned long start_pfn = PFN_DOWN(start);
	unsigned long size_pages = PFN_DOWN(size);
	struct zone *zone;
	struct zone *zone;
	int rc;
	int rc;


	pgdat = NODE_DATA(nid);
	zone = pgdat->node_zones + ZONE_MOVABLE;
	rc = vmem_add_mapping(start, size);
	rc = vmem_add_mapping(start, size);
	if (rc)
	if (rc)
		return rc;
		return rc;
	rc = __add_pages(nid, zone, PFN_DOWN(start), PFN_DOWN(size));
	for_each_zone(zone) {
		if (zone_idx(zone) != ZONE_MOVABLE) {
			/* Add range within existing zone limits */
			zone_start_pfn = zone->zone_start_pfn;
			zone_end_pfn = zone->zone_start_pfn +
				       zone->spanned_pages;
		} else {
			/* Add remaining range to ZONE_MOVABLE */
			zone_start_pfn = start_pfn;
			zone_end_pfn = start_pfn + size_pages;
		}
		if (start_pfn < zone_start_pfn || start_pfn >= zone_end_pfn)
			continue;
		nr_pages = (start_pfn + size_pages > zone_end_pfn) ?
			   zone_end_pfn - start_pfn : size_pages;
		rc = __add_pages(nid, zone, start_pfn, nr_pages);
		if (rc)
			break;
		start_pfn += nr_pages;
		size_pages -= nr_pages;
		if (!size_pages)
			break;
	}
	if (rc)
	if (rc)
		vmem_remove_mapping(start, size);
		vmem_remove_mapping(start, size);
	return rc;
	return rc;