Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit f98eeb4e authored by Kumar Gala's avatar Kumar Gala
Browse files

[POWERPC] Fix handling of memreserve if the range lands in highmem



There were several issues if a memreserve range existed and happened
to be in highmem:

* The bootmem allocator is only aware of lowmem so calling
  reserve_bootmem with a highmem address would cause a BUG_ON
* All highmem pages were provided to the buddy allocator

Added a lmb_is_reserved() api that we now use to determine if a highem
page should continue to be PageReserved or provided to the buddy
allocator.

Also, we incorrectly reported the amount of pages reserved since all
highmem pages are initally marked reserved and we clear the
PageReserved flag as we "free" up the highmem pages.

Signed-off-by: default avatarKumar Gala <galak@kernel.crashing.org>
parent 52920df4
Loading
Loading
Loading
Loading
+13 −0
Original line number Diff line number Diff line
@@ -342,3 +342,16 @@ void __init lmb_enforce_memory_limit(unsigned long memory_limit)
		}
	}
}

int __init lmb_is_reserved(unsigned long addr)
{
	int i;

	for (i = 0; i < lmb.reserved.cnt; i++) {
		unsigned long upper = lmb.reserved.region[i].base +
				      lmb.reserved.region[i].size - 1;
		if ((addr >= lmb.reserved.region[i].base) && (addr <= upper))
			return 1;
	}
	return 0;
}
+19 −2
Original line number Diff line number Diff line
@@ -213,15 +213,30 @@ void __init do_init_bootmem(void)
	 */
#ifdef CONFIG_HIGHMEM
	free_bootmem_with_active_regions(0, total_lowmem >> PAGE_SHIFT);

	/* reserve the sections we're already using */
	for (i = 0; i < lmb.reserved.cnt; i++) {
		unsigned long addr = lmb.reserved.region[i].base +
				     lmb_size_bytes(&lmb.reserved, i) - 1;
		if (addr < total_lowmem)
			reserve_bootmem(lmb.reserved.region[i].base,
					lmb_size_bytes(&lmb.reserved, i));
		else if (lmb.reserved.region[i].base < total_lowmem) {
			unsigned long adjusted_size = total_lowmem -
				      lmb.reserved.region[i].base;
			reserve_bootmem(lmb.reserved.region[i].base,
					adjusted_size);
		}
	}
#else
	free_bootmem_with_active_regions(0, max_pfn);
#endif

	/* reserve the sections we're already using */
	for (i = 0; i < lmb.reserved.cnt; i++)
		reserve_bootmem(lmb.reserved.region[i].base,
				lmb_size_bytes(&lmb.reserved, i));

#endif
	/* XXX need to clip this if using highmem? */
	sparse_memory_present_with_active_regions(0);

@@ -334,11 +349,13 @@ void __init mem_init(void)
		highmem_mapnr = total_lowmem >> PAGE_SHIFT;
		for (pfn = highmem_mapnr; pfn < max_mapnr; ++pfn) {
			struct page *page = pfn_to_page(pfn);

			if (lmb_is_reserved(pfn << PAGE_SHIFT))
				continue;
			ClearPageReserved(page);
			init_page_count(page);
			__free_page(page);
			totalhigh_pages++;
			reservedpages--;
		}
		totalram_pages += totalhigh_pages;
		printk(KERN_DEBUG "High memory: %luk\n",
+1 −0
Original line number Diff line number Diff line
@@ -51,6 +51,7 @@ extern unsigned long __init __lmb_alloc_base(unsigned long size,
extern unsigned long __init lmb_phys_mem_size(void);
extern unsigned long __init lmb_end_of_DRAM(void);
extern void __init lmb_enforce_memory_limit(unsigned long memory_limit);
extern int __init lmb_is_reserved(unsigned long addr);

extern void lmb_dump_all(void);