Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 72d7c3b3 authored by Yinghai Lu's avatar Yinghai Lu Committed by H. Peter Anvin
Browse files

x86: Use memblock to replace early_res



1. replace find_e820_area with memblock_find_in_range
2. replace reserve_early with memblock_x86_reserve_range
3. replace free_early with memblock_x86_free_range.
4. NO_BOOTMEM will switch to use memblock too.
5. use _e820, _early wrap in the patch, in following patch, will
   replace them all
6. because memblock_x86_free_range support partial free, we can remove some special care
7. Need to make sure that memblock_find_in_range() is called after memblock_x86_fill()
   so adjust some calling later in setup.c::setup_arch()
   -- corruption_check and mptable_update

-v2: Move reserve_brk() early
    Before fill_memblock_area, to avoid overlap between brk and memblock_find_in_range()
    that could happen We have more then 128 RAM entry in E820 tables, and
    memblock_x86_fill() could use memblock_find_in_range() to find a new place for
    memblock.memory.region array.
    and We don't need to use extend_brk() after fill_memblock_area()
    So move reserve_brk() early before fill_memblock_area().
-v3: Move find_smp_config early
    To make sure memblock_find_in_range not find wrong place, if BIOS doesn't put mptable
    in right place.
-v4: Treat RESERVED_KERN as RAM in memblock.memory. and they are already in
    memblock.reserved already..
    use __NOT_KEEP_MEMBLOCK to make sure memblock related code could be freed later.
-v5: Generic version __memblock_find_in_range() is going from high to low, and for 32bit
    active_region for 32bit does include high pages
    need to replace the limit with memblock.default_alloc_limit, aka get_max_mapped()
-v6: Use current_limit instead
-v7: check with MEMBLOCK_ERROR instead of -1ULL or -1L
-v8: Set memblock_can_resize early to handle EFI with more RAM entries
-v9: update after kmemleak changes in mainline

Suggested-by: default avatarDavid S. Miller <davem@davemloft.net>
Suggested-by: default avatarBenjamin Herrenschmidt <benh@kernel.crashing.org>
Suggested-by: default avatarThomas Gleixner <tglx@linutronix.de>
Signed-off-by: default avatarYinghai Lu <yinghai@kernel.org>
Signed-off-by: default avatarH. Peter Anvin <hpa@zytor.com>
parent 301ff3e8
Loading
Loading
Loading
Loading
+3 −6
Original line number Diff line number Diff line
@@ -27,6 +27,7 @@ config X86
	select HAVE_PERF_EVENTS if (!M386 && !M486)
	select HAVE_IOREMAP_PROT
	select HAVE_KPROBES
	select HAVE_MEMBLOCK
	select ARCH_WANT_OPTIONAL_GPIOLIB
	select ARCH_WANT_FRAME_POINTERS
	select HAVE_DMA_ATTRS
@@ -195,9 +196,6 @@ config ARCH_SUPPORTS_OPTIMIZED_INLINING
config ARCH_SUPPORTS_DEBUG_PAGEALLOC
	def_bool y

config HAVE_EARLY_RES
	def_bool y

config HAVE_INTEL_TXT
	def_bool y
	depends on EXPERIMENTAL && DMAR && ACPI
@@ -590,14 +588,13 @@ config NO_BOOTMEM
	default y
	bool "Disable Bootmem code"
	---help---
	  Use early_res directly instead of bootmem before slab is ready.
	  Use memblock directly instead of bootmem before slab is ready.
		- allocator (buddy) [generic]
		- early allocator (bootmem) [generic]
		- very early allocator (reserve_early*()) [x86]
		- very early allocator (memblock) [some generic]
		- very very early allocator (early brk model) [x86]
	  So reduce one layer between early allocator to final allocator


config MEMTEST
	bool "Memtest"
	---help---
+8 −6
Original line number Diff line number Diff line
@@ -117,24 +117,26 @@ extern unsigned long end_user_pfn;
extern u64 find_e820_area(u64 start, u64 end, u64 size, u64 align);
extern u64 find_e820_area_size(u64 start, u64 *sizep, u64 align);
extern u64 early_reserve_e820(u64 startt, u64 sizet, u64 align);
#include <linux/early_res.h>

extern unsigned long e820_end_of_ram_pfn(void);
extern unsigned long e820_end_of_low_ram_pfn(void);
extern int e820_find_active_region(const struct e820entry *ei,
				  unsigned long start_pfn,
				  unsigned long last_pfn,
				  unsigned long *ei_startpfn,
				  unsigned long *ei_endpfn);
extern void e820_register_active_regions(int nid, unsigned long start_pfn,
					 unsigned long end_pfn);
extern u64 e820_hole_size(u64 start, u64 end);

extern u64 early_reserve_e820(u64 startt, u64 sizet, u64 align);

void memblock_x86_fill(void);

extern void finish_e820_parsing(void);
extern void e820_reserve_resources(void);
extern void e820_reserve_resources_late(void);
extern void setup_memory_map(void);
extern char *default_machine_specific_memory_setup(void);

void reserve_early(u64 start, u64 end, char *name);
void free_early(u64 start, u64 end);

/*
 * Returns true iff the specified range [s,e) is completely contained inside
 * the ISA region.
+9 −7
Original line number Diff line number Diff line
@@ -2,7 +2,8 @@
#include <linux/sched.h>
#include <linux/kthread.h>
#include <linux/workqueue.h>
#include <asm/e820.h>
#include <linux/memblock.h>

#include <asm/proto.h>

/*
@@ -18,10 +19,12 @@ static int __read_mostly memory_corruption_check = -1;
static unsigned __read_mostly corruption_check_size = 64*1024;
static unsigned __read_mostly corruption_check_period = 60; /* seconds */

static struct e820entry scan_areas[MAX_SCAN_AREAS];
static struct scan_area {
	u64 addr;
	u64 size;
} scan_areas[MAX_SCAN_AREAS];
static int num_scan_areas;


static __init int set_corruption_check(char *arg)
{
	char *end;
@@ -81,9 +84,9 @@ void __init setup_bios_corruption_check(void)

	while (addr < corruption_check_size && num_scan_areas < MAX_SCAN_AREAS) {
		u64 size;
		addr = find_e820_area_size(addr, &size, PAGE_SIZE);
		addr = memblock_x86_find_in_range_size(addr, &size, PAGE_SIZE);

		if (!(addr + 1))
		if (addr == MEMBLOCK_ERROR)
			break;

		if (addr >= corruption_check_size)
@@ -92,7 +95,7 @@ void __init setup_bios_corruption_check(void)
		if ((addr + size) > corruption_check_size)
			size = corruption_check_size - addr;

		e820_update_range(addr, size, E820_RAM, E820_RESERVED);
		memblock_x86_reserve_range(addr, addr + size, "SCAN RAM");
		scan_areas[num_scan_areas].addr = addr;
		scan_areas[num_scan_areas].size = size;
		num_scan_areas++;
@@ -105,7 +108,6 @@ void __init setup_bios_corruption_check(void)

	printk(KERN_INFO "Scanning %d areas for low memory corruption\n",
	       num_scan_areas);
	update_e820();
}


+53 −106
Original line number Diff line number Diff line
@@ -15,6 +15,7 @@
#include <linux/pfn.h>
#include <linux/suspend.h>
#include <linux/firmware-map.h>
#include <linux/memblock.h>

#include <asm/e820.h>
#include <asm/proto.h>
@@ -742,69 +743,29 @@ core_initcall(e820_mark_nvs_memory);
 */
u64 __init find_e820_area(u64 start, u64 end, u64 size, u64 align)
{
	int i;

	for (i = 0; i < e820.nr_map; i++) {
		struct e820entry *ei = &e820.map[i];
		u64 addr;
		u64 ei_start, ei_last;
	u64 mem = memblock_find_in_range(start, end, size, align);

		if (ei->type != E820_RAM)
			continue;

		ei_last = ei->addr + ei->size;
		ei_start = ei->addr;
		addr = find_early_area(ei_start, ei_last, start, end,
					 size, align);

		if (addr != -1ULL)
			return addr;
	}
	if (mem == MEMBLOCK_ERROR)
		return -1ULL;
}

u64 __init find_fw_memmap_area(u64 start, u64 end, u64 size, u64 align)
{
	return find_e820_area(start, end, size, align);
	return mem;
}

u64 __init get_max_mapped(void)
{
	u64 end = max_pfn_mapped;

	end <<= PAGE_SHIFT;

	return end;
}
/*
 * Find next free range after *start
 */
u64 __init find_e820_area_size(u64 start, u64 *sizep, u64 align)
{
	int i;
	u64 mem = memblock_x86_find_in_range_size(start, sizep, align);

	for (i = 0; i < e820.nr_map; i++) {
		struct e820entry *ei = &e820.map[i];
		u64 addr;
		u64 ei_start, ei_last;

		if (ei->type != E820_RAM)
			continue;

		ei_last = ei->addr + ei->size;
		ei_start = ei->addr;
		addr = find_early_area_size(ei_start, ei_last, start,
					 sizep, align);

		if (addr != -1ULL)
			return addr;
	}
	if (mem == MEMBLOCK_ERROR)
		return -1ULL

	return -1ULL;
	return mem;
}

/*
 * pre allocated 4k and reserved it in e820
 * pre allocated 4k and reserved it in memblock and e820_saved
 */
u64 __init early_reserve_e820(u64 startt, u64 sizet, u64 align)
{
@@ -813,8 +774,8 @@ u64 __init early_reserve_e820(u64 startt, u64 sizet, u64 align)
	u64 start;

	for (start = startt; ; start += size) {
		start = find_e820_area_size(start, &size, align);
		if (!(start + 1))
		start = memblock_x86_find_in_range_size(start, &size, align);
		if (start == MEMBLOCK_ERROR)
			return 0;
		if (size >= sizet)
			break;
@@ -830,10 +791,9 @@ u64 __init early_reserve_e820(u64 startt, u64 sizet, u64 align)
	addr = round_down(start + size - sizet, align);
	if (addr < start)
		return 0;
	e820_update_range(addr, sizet, E820_RAM, E820_RESERVED);
	memblock_x86_reserve_range(addr, addr + sizet, "new next");
	e820_update_range_saved(addr, sizet, E820_RAM, E820_RESERVED);
	printk(KERN_INFO "update e820 for early_reserve_e820\n");
	update_e820();
	printk(KERN_INFO "update e820_saved for early_reserve_e820\n");
	update_e820_saved();

	return addr;
@@ -895,52 +855,12 @@ unsigned long __init e820_end_of_low_ram_pfn(void)
{
	return e820_end_pfn(1UL<<(32 - PAGE_SHIFT), E820_RAM);
}
/*
 * Finds an active region in the address range from start_pfn to last_pfn and
 * returns its range in ei_startpfn and ei_endpfn for the e820 entry.
 */
int __init e820_find_active_region(const struct e820entry *ei,
				  unsigned long start_pfn,
				  unsigned long last_pfn,
				  unsigned long *ei_startpfn,
				  unsigned long *ei_endpfn)
{
	u64 align = PAGE_SIZE;

	*ei_startpfn = round_up(ei->addr, align) >> PAGE_SHIFT;
	*ei_endpfn = round_down(ei->addr + ei->size, align) >> PAGE_SHIFT;

	/* Skip map entries smaller than a page */
	if (*ei_startpfn >= *ei_endpfn)
		return 0;

	/* Skip if map is outside the node */
	if (ei->type != E820_RAM || *ei_endpfn <= start_pfn ||
				    *ei_startpfn >= last_pfn)
		return 0;

	/* Check for overlaps */
	if (*ei_startpfn < start_pfn)
		*ei_startpfn = start_pfn;
	if (*ei_endpfn > last_pfn)
		*ei_endpfn = last_pfn;

	return 1;
}

/* Walk the e820 map and register active regions within a node */
void __init e820_register_active_regions(int nid, unsigned long start_pfn,
					 unsigned long last_pfn)
{
	unsigned long ei_startpfn;
	unsigned long ei_endpfn;
	int i;

	for (i = 0; i < e820.nr_map; i++)
		if (e820_find_active_region(&e820.map[i],
					    start_pfn, last_pfn,
					    &ei_startpfn, &ei_endpfn))
			add_active_range(nid, ei_startpfn, ei_endpfn);
	memblock_x86_register_active_regions(nid, start_pfn, last_pfn);
}

/*
@@ -950,18 +870,16 @@ void __init e820_register_active_regions(int nid, unsigned long start_pfn,
 */
u64 __init e820_hole_size(u64 start, u64 end)
{
	unsigned long start_pfn = start >> PAGE_SHIFT;
	unsigned long last_pfn = end >> PAGE_SHIFT;
	unsigned long ei_startpfn, ei_endpfn, ram = 0;
	int i;
	return memblock_x86_hole_size(start, end);
}

	for (i = 0; i < e820.nr_map; i++) {
		if (e820_find_active_region(&e820.map[i],
					    start_pfn, last_pfn,
					    &ei_startpfn, &ei_endpfn))
			ram += ei_endpfn - ei_startpfn;
void reserve_early(u64 start, u64 end, char *name)
{
	memblock_x86_reserve_range(start, end, name);
}
	return end - start - ((u64)ram << PAGE_SHIFT);
void free_early(u64 start, u64 end)
{
	memblock_x86_free_range(start, end);
}

static void early_panic(char *msg)
@@ -1210,3 +1128,32 @@ void __init setup_memory_map(void)
	printk(KERN_INFO "BIOS-provided physical RAM map:\n");
	e820_print_map(who);
}

void __init memblock_x86_fill(void)
{
	int i;
	u64 end;

	/*
	 * EFI may have more than 128 entries
	 * We are safe to enable resizing, beause memblock_x86_fill()
	 * is rather later for x86
	 */
	memblock_can_resize = 1;

	for (i = 0; i < e820.nr_map; i++) {
		struct e820entry *ei = &e820.map[i];

		end = ei->addr + ei->size;
		if (end != (resource_size_t)end)
			continue;

		if (ei->type != E820_RAM && ei->type != E820_RESERVED_KERN)
			continue;

		memblock_add(ei->addr, ei->size);
	}

	memblock_analyze();
	memblock_dump_all();
}
+2 −1
Original line number Diff line number Diff line
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/memblock.h>

#include <asm/setup.h>
#include <asm/bios_ebda.h>
@@ -51,5 +52,5 @@ void __init reserve_ebda_region(void)
		lowmem = 0x9f000;

	/* reserve all memory between lowmem and the 1MB mark */
	reserve_early_overlap_ok(lowmem, 0x100000, "BIOS reserved");
	memblock_x86_reserve_range(lowmem, 0x100000, "* BIOS reserved");
}
Loading