Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit a2f3aa02 authored by Dave Hansen's avatar Dave Hansen Committed by Linus Torvalds
Browse files

[PATCH] Fix sparsemem on Cell



Fix an oops experienced on the Cell architecture when init-time functions,
early_*(), are called at runtime.  It alters the call paths to make sure
that the callers explicitly say whether the call is being made on behalf of
a hotplug even, or happening at boot-time.

It has been compile tested on ppc64, ia64, s390, i386 and x86_64.

Acked-by: default avatarArnd Bergmann <arndb@de.ibm.com>
Signed-off-by: default avatarDave Hansen <haveblue@us.ibm.com>
Cc: Yasunori Goto <y-goto@jp.fujitsu.com>
Acked-by: default avatarAndy Whitcroft <apw@shadowen.org>
Cc: Christoph Lameter <clameter@engr.sgi.com>
Cc: Martin Schwidefsky <schwidefsky@de.ibm.com>
Acked-by: default avatarHeiko Carstens <heiko.carstens@de.ibm.com>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Paul Mackerras <paulus@samba.org>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent 47a4d5be
Loading
Loading
Loading
Loading
+3 −2
Original line number Diff line number Diff line
@@ -543,7 +543,8 @@ virtual_memmap_init (u64 start, u64 end, void *arg)

	if (map_start < map_end)
		memmap_init_zone((unsigned long)(map_end - map_start),
				 args->nid, args->zone, page_to_pfn(map_start));
				 args->nid, args->zone, page_to_pfn(map_start),
				 MEMMAP_EARLY);
	return 0;
}

@@ -552,7 +553,7 @@ memmap_init (unsigned long size, int nid, unsigned long zone,
	     unsigned long start_pfn)
{
	if (!vmem_map)
		memmap_init_zone(size, nid, zone, start_pfn);
		memmap_init_zone(size, nid, zone, start_pfn, MEMMAP_EARLY);
	else {
		struct page *start;
		struct memmap_init_callback_data args;
+2 −1
Original line number Diff line number Diff line
@@ -61,7 +61,8 @@ void memmap_init(unsigned long size, int nid, unsigned long zone,

		if (map_start < map_end)
			memmap_init_zone((unsigned long)(map_end - map_start),
					 nid, zone, page_to_pfn(map_start));
					 nid, zone, page_to_pfn(map_start),
					 MEMMAP_EARLY);
	}
}

+2 −1
Original line number Diff line number Diff line
@@ -978,7 +978,8 @@ extern int early_pfn_to_nid(unsigned long pfn);
#endif /* CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID */
#endif /* CONFIG_ARCH_POPULATES_NODE_MAP */
extern void set_dma_reserve(unsigned long new_dma_reserve);
extern void memmap_init_zone(unsigned long, int, unsigned long, unsigned long);
extern void memmap_init_zone(unsigned long, int, unsigned long,
				unsigned long, enum memmap_context);
extern void setup_per_zone_pages_min(void);
extern void mem_init(void);
extern void show_mem(void);
+6 −2
Original line number Diff line number Diff line
@@ -450,9 +450,13 @@ void build_all_zonelists(void);
void wakeup_kswapd(struct zone *zone, int order);
int zone_watermark_ok(struct zone *z, int order, unsigned long mark,
		int classzone_idx, int alloc_flags);

enum memmap_context {
	MEMMAP_EARLY,
	MEMMAP_HOTPLUG,
};
extern int init_currently_empty_zone(struct zone *zone, unsigned long start_pfn,
				     unsigned long size);
				     unsigned long size,
				     enum memmap_context context);

#ifdef CONFIG_HAVE_MEMORY_PRESENT
void memory_present(int nid, unsigned long start, unsigned long end);
+4 −2
Original line number Diff line number Diff line
@@ -67,11 +67,13 @@ static int __add_zone(struct zone *zone, unsigned long phys_start_pfn)
	zone_type = zone - pgdat->node_zones;
	if (!populated_zone(zone)) {
		int ret = 0;
		ret = init_currently_empty_zone(zone, phys_start_pfn, nr_pages);
		ret = init_currently_empty_zone(zone, phys_start_pfn,
						nr_pages, MEMMAP_HOTPLUG);
		if (ret < 0)
			return ret;
	}
	memmap_init_zone(nr_pages, nid, zone_type, phys_start_pfn);
	memmap_init_zone(nr_pages, nid, zone_type,
			 phys_start_pfn, MEMMAP_HOTPLUG);
	return 0;
}

Loading