Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 945fd17a authored by Thomas Gleixner's avatar Thomas Gleixner
Browse files

x86/cpu_entry_area: Sync cpu_entry_area to initial_page_table



The separation of the cpu_entry_area from the fixmap missed the fact that
on 32bit non-PAE kernels the cpu_entry_area mapping might not be covered in
initial_page_table by the previous synchronizations.

This results in suspend/resume failures because 32bit utilizes initial page
table for resume. The absence of the cpu_entry_area mapping results in a
triple fault, aka. insta reboot.

With PAE enabled this works by chance because the PGD entry which covers
the fixmap and other parts incindentally provides the cpu_entry_area
mapping as well.

Synchronize the initial page table after setting up the cpu entry
area. Instead of adding yet another copy of the same code, move it to a
function and invoke it from the various places.

It needs to be investigated if the existing calls in setup_arch() and
setup_per_cpu_areas() can be replaced by the later invocation from
setup_cpu_entry_areas(), but that's beyond the scope of this fix.

Fixes: 92a0f81d ("x86/cpu_entry_area: Move it out of the fixmap")
Reported-by: default avatarWoody Suwalski <terraluna977@gmail.com>
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Tested-by: default avatarWoody Suwalski <terraluna977@gmail.com>
Cc: William Grant <william.grant@canonical.com>
Cc: stable@vger.kernel.org
Link: https://lkml.kernel.org/r/alpine.DEB.2.21.1802282137290.1392@nanos.tec.linutronix.de
parent 1402fd8e
Loading
Loading
Loading
Loading
+1 −0
Original line number Original line Diff line number Diff line
@@ -32,6 +32,7 @@ extern pmd_t initial_pg_pmd[];
static inline void pgtable_cache_init(void) { }
static inline void pgtable_cache_init(void) { }
static inline void check_pgt_cache(void) { }
static inline void check_pgt_cache(void) { }
void paging_init(void);
void paging_init(void);
void sync_initial_page_table(void);


/*
/*
 * Define this if things work differently on an i386 and an i486:
 * Define this if things work differently on an i386 and an i486:
+1 −0
Original line number Original line Diff line number Diff line
@@ -28,6 +28,7 @@ extern pgd_t init_top_pgt[];
#define swapper_pg_dir init_top_pgt
#define swapper_pg_dir init_top_pgt


extern void paging_init(void);
extern void paging_init(void);
static inline void sync_initial_page_table(void) { }


#define pte_ERROR(e)					\
#define pte_ERROR(e)					\
	pr_err("%s:%d: bad pte %p(%016lx)\n",		\
	pr_err("%s:%d: bad pte %p(%016lx)\n",		\
+5 −12
Original line number Original line Diff line number Diff line
@@ -1204,20 +1204,13 @@ void __init setup_arch(char **cmdline_p)


	kasan_init();
	kasan_init();


#ifdef CONFIG_X86_32
	/* sync back kernel address range */
	clone_pgd_range(initial_page_table + KERNEL_PGD_BOUNDARY,
			swapper_pg_dir     + KERNEL_PGD_BOUNDARY,
			KERNEL_PGD_PTRS);

	/*
	/*
	 * sync back low identity map too.  It is used for example
	 * Sync back kernel address range.
	 * in the 32-bit EFI stub.
	 *
	 * FIXME: Can the later sync in setup_cpu_entry_areas() replace
	 * this call?
	 */
	 */
	clone_pgd_range(initial_page_table,
	sync_initial_page_table();
			swapper_pg_dir     + KERNEL_PGD_BOUNDARY,
			min(KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY));
#endif


	tboot_probe();
	tboot_probe();


+4 −13
Original line number Original line Diff line number Diff line
@@ -287,24 +287,15 @@ void __init setup_per_cpu_areas(void)
	/* Setup cpu initialized, callin, callout masks */
	/* Setup cpu initialized, callin, callout masks */
	setup_cpu_local_masks();
	setup_cpu_local_masks();


#ifdef CONFIG_X86_32
	/*
	/*
	 * Sync back kernel address range again.  We already did this in
	 * Sync back kernel address range again.  We already did this in
	 * setup_arch(), but percpu data also needs to be available in
	 * setup_arch(), but percpu data also needs to be available in
	 * the smpboot asm.  We can't reliably pick up percpu mappings
	 * the smpboot asm.  We can't reliably pick up percpu mappings
	 * using vmalloc_fault(), because exception dispatch needs
	 * using vmalloc_fault(), because exception dispatch needs
	 * percpu data.
	 * percpu data.
	 *
	 * FIXME: Can the later sync in setup_cpu_entry_areas() replace
	 * this call?
	 */
	 */
	clone_pgd_range(initial_page_table + KERNEL_PGD_BOUNDARY,
	sync_initial_page_table();
			swapper_pg_dir     + KERNEL_PGD_BOUNDARY,
			KERNEL_PGD_PTRS);

	/*
	 * sync back low identity map too.  It is used for example
	 * in the 32-bit EFI stub.
	 */
	clone_pgd_range(initial_page_table,
			swapper_pg_dir     + KERNEL_PGD_BOUNDARY,
			min(KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY));
#endif
}
}
+6 −0
Original line number Original line Diff line number Diff line
@@ -163,4 +163,10 @@ void __init setup_cpu_entry_areas(void)


	for_each_possible_cpu(cpu)
	for_each_possible_cpu(cpu)
		setup_cpu_entry_area(cpu);
		setup_cpu_entry_area(cpu);

	/*
	 * This is the last essential update to swapper_pgdir which needs
	 * to be synchronized to initial_page_table on 32bit.
	 */
	sync_initial_page_table();
}
}
Loading