Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 02ac81a8 authored by Ingo Molnar's avatar Ingo Molnar
Browse files

Merge branch 'x86/bootmem' into x86/mm



Merge reason: the topic is ready - consolidate it into the more generic x86/mm tree
              and prevent conflicts.

Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parents 9a6d44b9 d2137d5a
Loading
Loading
Loading
Loading
+8 −0
Original line number Diff line number Diff line
@@ -2,6 +2,7 @@
#define _ASM_X86_PAGE_DEFS_H

#include <linux/const.h>
#include <linux/types.h>

/* PAGE_SHIFT determines the page size */
#define PAGE_SHIFT	12
@@ -45,9 +46,16 @@ extern int devmem_is_allowed(unsigned long pagenr);
extern unsigned long max_low_pfn_mapped;
extern unsigned long max_pfn_mapped;

static inline phys_addr_t get_max_mapped(void)
{
	return (phys_addr_t)max_pfn_mapped << PAGE_SHIFT;
}

extern unsigned long init_memory_mapping(unsigned long start,
					 unsigned long end);

void init_memory_mapping_high(void);

extern void initmem_init(unsigned long start_pfn, unsigned long end_pfn,
				int acpi, int k8);
extern void free_initmem(void);
+16 −17
Original line number Diff line number Diff line
@@ -13,7 +13,7 @@
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/init.h>
#include <linux/bootmem.h>
#include <linux/memblock.h>
#include <linux/mmzone.h>
#include <linux/pci_ids.h>
#include <linux/pci.h>
@@ -57,7 +57,7 @@ static void __init insert_aperture_resource(u32 aper_base, u32 aper_size)
static u32 __init allocate_aperture(void)
{
	u32 aper_size;
	void *p;
	unsigned long addr;

	/* aper_size should <= 1G */
	if (fallback_aper_order > 5)
@@ -83,27 +83,26 @@ static u32 __init allocate_aperture(void)
	 * so don't use 512M below as gart iommu, leave the space for kernel
	 * code for safe
	 */
	p = __alloc_bootmem_nopanic(aper_size, aper_size, 512ULL<<20);
	addr = memblock_find_in_range(0, 1ULL<<32, aper_size, 512ULL<<20);
	if (addr == MEMBLOCK_ERROR || addr + aper_size > 0xffffffff) {
		printk(KERN_ERR
			"Cannot allocate aperture memory hole (%lx,%uK)\n",
				addr, aper_size>>10);
		return 0;
	}
	memblock_x86_reserve_range(addr, addr + aper_size, "aperture64");
	/*
	 * Kmemleak should not scan this block as it may not be mapped via the
	 * kernel direct mapping.
	 */
	kmemleak_ignore(p);
	if (!p || __pa(p)+aper_size > 0xffffffff) {
		printk(KERN_ERR
			"Cannot allocate aperture memory hole (%p,%uK)\n",
				p, aper_size>>10);
		if (p)
			free_bootmem(__pa(p), aper_size);
		return 0;
	}
	kmemleak_ignore(phys_to_virt(addr));
	printk(KERN_INFO "Mapping aperture over %d KB of RAM @ %lx\n",
			aper_size >> 10, __pa(p));
	insert_aperture_resource((u32)__pa(p), aper_size);
	register_nosave_region((u32)__pa(p) >> PAGE_SHIFT,
				(u32)__pa(p+aper_size) >> PAGE_SHIFT);
			aper_size >> 10, addr);
	insert_aperture_resource((u32)addr, aper_size);
	register_nosave_region(addr >> PAGE_SHIFT,
			       (addr+aper_size) >> PAGE_SHIFT);

	return (u32)__pa(p);
	return (u32)addr;
}


+24 −17
Original line number Diff line number Diff line
@@ -293,10 +293,32 @@ static void __init init_gbpages(void)
	else
		direct_gbpages = 0;
}

static void __init cleanup_highmap_brk_end(void)
{
	pud_t *pud;
	pmd_t *pmd;

	mmu_cr4_features = read_cr4();

	/*
	 * _brk_end cannot change anymore, but it and _end may be
	 * located on different 2M pages. cleanup_highmap(), however,
	 * can only consider _end when it runs, so destroy any
	 * mappings beyond _brk_end here.
	 */
	pud = pud_offset(pgd_offset_k(_brk_end), _brk_end);
	pmd = pmd_offset(pud, _brk_end - 1);
	while (++pmd <= pmd_offset(pud, (unsigned long)_end - 1))
		pmd_clear(pmd);
}
#else
static inline void init_gbpages(void)
{
}
static inline void cleanup_highmap_brk_end(void)
{
}
#endif

static void __init reserve_brk(void)
@@ -307,6 +329,8 @@ static void __init reserve_brk(void)
	/* Mark brk area as locked down and no longer taking any
	   new allocations */
	_brk_start = 0;

	cleanup_highmap_brk_end();
}

#ifdef CONFIG_BLK_DEV_INITRD
@@ -680,15 +704,6 @@ static int __init parse_reservelow(char *p)

early_param("reservelow", parse_reservelow);

static u64 __init get_max_mapped(void)
{
	u64 end = max_pfn_mapped;

	end <<= PAGE_SHIFT;

	return end;
}

/*
 * Determine if we were loaded by an EFI loader.  If so, then we have also been
 * passed the efi memmap, systab, etc., so we should use these data structures
@@ -950,14 +965,6 @@ void __init setup_arch(char **cmdline_p)
	max_low_pfn_mapped = init_memory_mapping(0, max_low_pfn<<PAGE_SHIFT);
	max_pfn_mapped = max_low_pfn_mapped;

#ifdef CONFIG_X86_64
	if (max_pfn > max_low_pfn) {
		max_pfn_mapped = init_memory_mapping(1UL<<32,
						     max_pfn<<PAGE_SHIFT);
		/* can we preseve max_low_pfn ?*/
		max_low_pfn = max_pfn;
	}
#endif
	memblock.current_limit = get_max_mapped();

	/*
+5 −3
Original line number Diff line number Diff line
@@ -278,12 +278,14 @@ int __init amd_scan_nodes(void)
		apicid_base = boot_cpu_physical_apicid;
	}

	for_each_node_mask(i, node_possible_map) {
		int j;

	for_each_node_mask(i, node_possible_map)
		memblock_x86_register_active_regions(i,
				nodes[i].start >> PAGE_SHIFT,
				nodes[i].end >> PAGE_SHIFT);
	init_memory_mapping_high();
	for_each_node_mask(i, node_possible_map) {
		int j;

		for (j = apicid_base; j < cores + apicid_base; j++)
			apicid_to_node[(i << bits) + j] = i;
		setup_node_bootmem(i, nodes[i].start, nodes[i].end);
+4 −32
Original line number Diff line number Diff line
@@ -33,7 +33,7 @@ int direct_gbpages
static void __init find_early_table_space(unsigned long end, int use_pse,
					  int use_gbpages)
{
	unsigned long puds, pmds, ptes, tables, start;
	unsigned long puds, pmds, ptes, tables, start = 0, good_end = end;
	phys_addr_t base;

	puds = (end + PUD_SIZE - 1) >> PUD_SHIFT;
@@ -65,20 +65,11 @@ static void __init find_early_table_space(unsigned long end, int use_pse,
#ifdef CONFIG_X86_32
	/* for fixmap */
	tables += roundup(__end_of_fixed_addresses * sizeof(pte_t), PAGE_SIZE);
#endif

	/*
	 * RED-PEN putting page tables only on node 0 could
	 * cause a hotspot and fill up ZONE_DMA. The page tables
	 * need roughly 0.5KB per GB.
	 */
#ifdef CONFIG_X86_32
	start = 0x7000;
#else
	start = 0x8000;
	good_end = max_pfn_mapped << PAGE_SHIFT;
#endif
	base = memblock_find_in_range(start, max_pfn_mapped<<PAGE_SHIFT,
					tables, PAGE_SIZE);

	base = memblock_find_in_range(start, good_end, tables, PAGE_SIZE);
	if (base == MEMBLOCK_ERROR)
		panic("Cannot find space for the kernel page tables");

@@ -279,25 +270,6 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
	load_cr3(swapper_pg_dir);
#endif

#ifdef CONFIG_X86_64
	if (!after_bootmem && !start) {
		pud_t *pud;
		pmd_t *pmd;

		mmu_cr4_features = read_cr4();

		/*
		 * _brk_end cannot change anymore, but it and _end may be
		 * located on different 2M pages. cleanup_highmap(), however,
		 * can only consider _end when it runs, so destroy any
		 * mappings beyond _brk_end here.
		 */
		pud = pud_offset(pgd_offset_k(_brk_end), _brk_end);
		pmd = pmd_offset(pud, _brk_end - 1);
		while (++pmd <= pmd_offset(pud, (unsigned long)_end - 1))
			pmd_clear(pmd);
	}
#endif
	__flush_tlb_all();

	if (!after_bootmem && e820_table_end > e820_table_start)
Loading