Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 73d5a867 authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge branch 'x86-fixes-for-linus' of...

Merge branch 'x86-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip

* 'x86-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
  xen: update mask_rw_pte after kernel page tables init changes
  xen: set max_pfn_mapped to the last pfn mapped
  x86: Cleanup highmap after brk is concluded

Fix up trivial onflict (added header file includes) in
arch/x86/mm/init_64.c
parents e77277df d8aa5ec3
Loading
Loading
Loading
Loading
+0 −3
Original line number Diff line number Diff line
@@ -77,9 +77,6 @@ void __init x86_64_start_kernel(char * real_mode_data)
	/* Make NULL pointers segfault */
	zap_identity_mappings();

	/* Cleanup the over mapped high alias */
	cleanup_highmap();

	max_pfn_mapped = KERNEL_IMAGE_SIZE >> PAGE_SHIFT;

	for (i = 0; i < NUM_EXCEPTION_VECTORS; i++) {
+3 −22
Original line number Diff line number Diff line
@@ -294,30 +294,11 @@ static void __init init_gbpages(void)
	else
		direct_gbpages = 0;
}

static void __init cleanup_highmap_brk_end(void)
{
	pud_t *pud;
	pmd_t *pmd;

	mmu_cr4_features = read_cr4();

	/*
	 * _brk_end cannot change anymore, but it and _end may be
	 * located on different 2M pages. cleanup_highmap(), however,
	 * can only consider _end when it runs, so destroy any
	 * mappings beyond _brk_end here.
	 */
	pud = pud_offset(pgd_offset_k(_brk_end), _brk_end);
	pmd = pmd_offset(pud, _brk_end - 1);
	while (++pmd <= pmd_offset(pud, (unsigned long)_end - 1))
		pmd_clear(pmd);
}
#else
static inline void init_gbpages(void)
{
}
static inline void cleanup_highmap_brk_end(void)
static void __init cleanup_highmap(void)
{
}
#endif
@@ -330,8 +311,6 @@ static void __init reserve_brk(void)
	/* Mark brk area as locked down and no longer taking any
	   new allocations */
	_brk_start = 0;

	cleanup_highmap_brk_end();
}

#ifdef CONFIG_BLK_DEV_INITRD
@@ -950,6 +929,8 @@ void __init setup_arch(char **cmdline_p)
	 */
	reserve_brk();

	cleanup_highmap();

	memblock.current_limit = get_max_mapped();
	memblock_x86_fill();

+6 −5
Original line number Diff line number Diff line
@@ -52,6 +52,7 @@
#include <asm/cacheflush.h>
#include <asm/init.h>
#include <asm/uv/uv.h>
#include <asm/setup.h>

static int __init parse_direct_gbpages_off(char *arg)
{
@@ -294,18 +295,18 @@ void __init init_extra_mapping_uc(unsigned long phys, unsigned long size)
 * to the compile time generated pmds. This results in invalid pmds up
 * to the point where we hit the physaddr 0 mapping.
 *
 * We limit the mappings to the region from _text to _end.  _end is
 * rounded up to the 2MB boundary. This catches the invalid pmds as
 * We limit the mappings to the region from _text to _brk_end.  _brk_end
 * is rounded up to the 2MB boundary. This catches the invalid pmds as
 * well, as they are located before _text:
 */
void __init cleanup_highmap(void)
{
	unsigned long vaddr = __START_KERNEL_map;
	unsigned long end = roundup((unsigned long)_end, PMD_SIZE) - 1;
	unsigned long vaddr_end = __START_KERNEL_map + (max_pfn_mapped << PAGE_SHIFT);
	unsigned long end = roundup((unsigned long)_brk_end, PMD_SIZE) - 1;
	pmd_t *pmd = level2_kernel_pgt;
	pmd_t *last_pmd = pmd + PTRS_PER_PMD;

	for (; pmd < last_pmd; pmd++, vaddr += PMD_SIZE) {
	for (; vaddr + PMD_SIZE - 1 < vaddr_end; pmd++, vaddr += PMD_SIZE) {
		if (pmd_none(*pmd))
			continue;
		if (vaddr < (unsigned long) _text || vaddr > end)
+12 −9
Original line number Diff line number Diff line
@@ -1487,10 +1487,12 @@ static __init pte_t mask_rw_pte(pte_t *ptep, pte_t pte)
	/*
	 * If the new pfn is within the range of the newly allocated
	 * kernel pagetable, and it isn't being mapped into an
	 * early_ioremap fixmap slot, make sure it is RO.
	 * early_ioremap fixmap slot as a freshly allocated page, make sure
	 * it is RO.
	 */
	if (!is_early_ioremap_ptep(ptep) &&
	    pfn >= pgt_buf_start && pfn < pgt_buf_end)
	if (((!is_early_ioremap_ptep(ptep) &&
			pfn >= pgt_buf_start && pfn < pgt_buf_end)) ||
			(is_early_ioremap_ptep(ptep) && pfn != (pgt_buf_end - 1)))
		pte = pte_wrprotect(pte);

	return pte;
@@ -1700,9 +1702,6 @@ static __init void xen_map_identity_early(pmd_t *pmd, unsigned long max_pfn)
		for (pteidx = 0; pteidx < PTRS_PER_PTE; pteidx++, pfn++) {
			pte_t pte;

			if (pfn > max_pfn_mapped)
				max_pfn_mapped = pfn;

			if (!pte_none(pte_page[pteidx]))
				continue;

@@ -1760,6 +1759,12 @@ __init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd,
	pud_t *l3;
	pmd_t *l2;

	/* max_pfn_mapped is the last pfn mapped in the initial memory
	 * mappings. Considering that on Xen after the kernel mappings we
	 * have the mappings of some pages that don't exist in pfn space, we
	 * set max_pfn_mapped to the last real pfn mapped. */
	max_pfn_mapped = PFN_DOWN(__pa(xen_start_info->mfn_list));

	/* Zap identity mapping */
	init_level4_pgt[0] = __pgd(0);

@@ -1864,9 +1869,7 @@ __init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd,
	initial_kernel_pmd =
		extend_brk(sizeof(pmd_t) * PTRS_PER_PMD, PAGE_SIZE);

	max_pfn_mapped = PFN_DOWN(__pa(xen_start_info->pt_base) +
				  xen_start_info->nr_pt_frames * PAGE_SIZE +
				  512*1024);
	max_pfn_mapped = PFN_DOWN(__pa(xen_start_info->mfn_list));

	kernel_pmd = m2v(pgd[KERNEL_PGD_BOUNDARY].pgd);
	memcpy(initial_kernel_pmd, kernel_pmd, sizeof(pmd_t) * PTRS_PER_PMD);