Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 92a0f81d authored by Thomas Gleixner's avatar Thomas Gleixner Committed by Ingo Molnar
Browse files

x86/cpu_entry_area: Move it out of the fixmap



Put the cpu_entry_area into a separate P4D entry. The fixmap gets too big
and 0-day already hit a case where the fixmap PTEs were cleared by
cleanup_highmap().

Aside of that the fixmap API is a pain as it's all backwards.

Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Josh Poimboeuf <jpoimboe@redhat.com>
Cc: Juergen Gross <jgross@suse.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: linux-kernel@vger.kernel.org
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent ed1bbc40
Loading
Loading
Loading
Loading
+2 −0
Original line number Original line Diff line number Diff line
@@ -12,6 +12,7 @@ ffffea0000000000 - ffffeaffffffffff (=40 bits) virtual memory map (1TB)
... unused hole ...
... unused hole ...
ffffec0000000000 - fffffbffffffffff (=44 bits) kasan shadow memory (16TB)
ffffec0000000000 - fffffbffffffffff (=44 bits) kasan shadow memory (16TB)
... unused hole ...
... unused hole ...
fffffe8000000000 - fffffeffffffffff (=39 bits) cpu_entry_area mapping
ffffff0000000000 - ffffff7fffffffff (=39 bits) %esp fixup stacks
ffffff0000000000 - ffffff7fffffffff (=39 bits) %esp fixup stacks
... unused hole ...
... unused hole ...
ffffffef00000000 - fffffffeffffffff (=64 GB) EFI region mapping space
ffffffef00000000 - fffffffeffffffff (=64 GB) EFI region mapping space
@@ -35,6 +36,7 @@ ffd4000000000000 - ffd5ffffffffffff (=49 bits) virtual memory map (512TB)
... unused hole ...
... unused hole ...
ffdf000000000000 - fffffc0000000000 (=53 bits) kasan shadow memory (8PB)
ffdf000000000000 - fffffc0000000000 (=53 bits) kasan shadow memory (8PB)
... unused hole ...
... unused hole ...
fffffe8000000000 - fffffeffffffffff (=39 bits) cpu_entry_area mapping
ffffff0000000000 - ffffff7fffffffff (=39 bits) %esp fixup stacks
ffffff0000000000 - ffffff7fffffffff (=39 bits) %esp fixup stacks
... unused hole ...
... unused hole ...
ffffffef00000000 - fffffffeffffffff (=64 GB) EFI region mapping space
ffffffef00000000 - fffffffeffffffff (=64 GB) EFI region mapping space
+17 −1
Original line number Original line Diff line number Diff line
@@ -43,10 +43,26 @@ struct cpu_entry_area {
};
};


#define CPU_ENTRY_AREA_SIZE	(sizeof(struct cpu_entry_area))
#define CPU_ENTRY_AREA_SIZE	(sizeof(struct cpu_entry_area))
#define CPU_ENTRY_AREA_PAGES	(CPU_ENTRY_AREA_SIZE / PAGE_SIZE)
#define CPU_ENTRY_AREA_TOT_SIZE	(CPU_ENTRY_AREA_SIZE * NR_CPUS)


DECLARE_PER_CPU(struct cpu_entry_area *, cpu_entry_area);
DECLARE_PER_CPU(struct cpu_entry_area *, cpu_entry_area);


extern void setup_cpu_entry_areas(void);
extern void setup_cpu_entry_areas(void);
extern void cea_set_pte(void *cea_vaddr, phys_addr_t pa, pgprot_t flags);

#define	CPU_ENTRY_AREA_RO_IDT		CPU_ENTRY_AREA_BASE
#define CPU_ENTRY_AREA_PER_CPU		(CPU_ENTRY_AREA_RO_IDT + PAGE_SIZE)

#define CPU_ENTRY_AREA_RO_IDT_VADDR	((void *)CPU_ENTRY_AREA_RO_IDT)

#define CPU_ENTRY_AREA_MAP_SIZE			\
	(CPU_ENTRY_AREA_PER_CPU + CPU_ENTRY_AREA_TOT_SIZE - CPU_ENTRY_AREA_BASE)

extern struct cpu_entry_area *get_cpu_entry_area(int cpu);

static inline struct entry_stack *cpu_entry_stack(int cpu)
{
	return &get_cpu_entry_area(cpu)->entry_stack_page.stack;
}


#endif
#endif
+1 −0
Original line number Original line Diff line number Diff line
@@ -7,6 +7,7 @@
#include <asm/mmu.h>
#include <asm/mmu.h>
#include <asm/fixmap.h>
#include <asm/fixmap.h>
#include <asm/irq_vectors.h>
#include <asm/irq_vectors.h>
#include <asm/cpu_entry_area.h>


#include <linux/smp.h>
#include <linux/smp.h>
#include <linux/percpu.h>
#include <linux/percpu.h>
+1 −31
Original line number Original line Diff line number Diff line
@@ -25,7 +25,6 @@
#else
#else
#include <uapi/asm/vsyscall.h>
#include <uapi/asm/vsyscall.h>
#endif
#endif
#include <asm/cpu_entry_area.h>


/*
/*
 * We can't declare FIXADDR_TOP as variable for x86_64 because vsyscall
 * We can't declare FIXADDR_TOP as variable for x86_64 because vsyscall
@@ -84,7 +83,6 @@ enum fixed_addresses {
	FIX_IO_APIC_BASE_0,
	FIX_IO_APIC_BASE_0,
	FIX_IO_APIC_BASE_END = FIX_IO_APIC_BASE_0 + MAX_IO_APICS - 1,
	FIX_IO_APIC_BASE_END = FIX_IO_APIC_BASE_0 + MAX_IO_APICS - 1,
#endif
#endif
	FIX_RO_IDT,	/* Virtual mapping for read-only IDT */
#ifdef CONFIG_X86_32
#ifdef CONFIG_X86_32
	FIX_KMAP_BEGIN,	/* reserved pte's for temporary kernel mappings */
	FIX_KMAP_BEGIN,	/* reserved pte's for temporary kernel mappings */
	FIX_KMAP_END = FIX_KMAP_BEGIN+(KM_TYPE_NR*NR_CPUS)-1,
	FIX_KMAP_END = FIX_KMAP_BEGIN+(KM_TYPE_NR*NR_CPUS)-1,
@@ -100,9 +98,6 @@ enum fixed_addresses {
#ifdef	CONFIG_X86_INTEL_MID
#ifdef	CONFIG_X86_INTEL_MID
	FIX_LNW_VRTC,
	FIX_LNW_VRTC,
#endif
#endif
	/* Fixmap entries to remap the GDTs, one per processor. */
	FIX_CPU_ENTRY_AREA_TOP,
	FIX_CPU_ENTRY_AREA_BOTTOM = FIX_CPU_ENTRY_AREA_TOP + (CPU_ENTRY_AREA_PAGES * NR_CPUS) - 1,


#ifdef CONFIG_ACPI_APEI_GHES
#ifdef CONFIG_ACPI_APEI_GHES
	/* Used for GHES mapping from assorted contexts */
	/* Used for GHES mapping from assorted contexts */
@@ -191,30 +186,5 @@ void __init *early_memremap_decrypted_wp(resource_size_t phys_addr,
void __early_set_fixmap(enum fixed_addresses idx,
void __early_set_fixmap(enum fixed_addresses idx,
			phys_addr_t phys, pgprot_t flags);
			phys_addr_t phys, pgprot_t flags);


static inline unsigned int __get_cpu_entry_area_page_index(int cpu, int page)
{
	BUILD_BUG_ON(sizeof(struct cpu_entry_area) % PAGE_SIZE != 0);

	return FIX_CPU_ENTRY_AREA_BOTTOM - cpu*CPU_ENTRY_AREA_PAGES - page;
}

#define __get_cpu_entry_area_offset_index(cpu, offset) ({		\
	BUILD_BUG_ON(offset % PAGE_SIZE != 0);				\
	__get_cpu_entry_area_page_index(cpu, offset / PAGE_SIZE);	\
	})

#define get_cpu_entry_area_index(cpu, field)				\
	__get_cpu_entry_area_offset_index((cpu), offsetof(struct cpu_entry_area, field))

static inline struct cpu_entry_area *get_cpu_entry_area(int cpu)
{
	return (struct cpu_entry_area *)__fix_to_virt(__get_cpu_entry_area_page_index(cpu, 0));
}

static inline struct entry_stack *cpu_entry_stack(int cpu)
{
	return &get_cpu_entry_area(cpu)->entry_stack_page.stack;
}

#endif /* !__ASSEMBLY__ */
#endif /* !__ASSEMBLY__ */
#endif /* _ASM_X86_FIXMAP_H */
#endif /* _ASM_X86_FIXMAP_H */
+12 −3
Original line number Original line Diff line number Diff line
@@ -38,13 +38,22 @@ extern bool __vmalloc_start_set; /* set once high_memory is set */
#define LAST_PKMAP 1024
#define LAST_PKMAP 1024
#endif
#endif


#define PKMAP_BASE ((FIXADDR_START - PAGE_SIZE * (LAST_PKMAP + 1))	\
/*
		    & PMD_MASK)
 * Define this here and validate with BUILD_BUG_ON() in pgtable_32.c
 * to avoid include recursion hell
 */
#define CPU_ENTRY_AREA_PAGES	(NR_CPUS * 40)

#define CPU_ENTRY_AREA_BASE				\
	((FIXADDR_START - PAGE_SIZE * (CPU_ENTRY_AREA_PAGES + 1)) & PMD_MASK)

#define PKMAP_BASE		\
	((CPU_ENTRY_AREA_BASE - PAGE_SIZE) & PMD_MASK)


#ifdef CONFIG_HIGHMEM
#ifdef CONFIG_HIGHMEM
# define VMALLOC_END	(PKMAP_BASE - 2 * PAGE_SIZE)
# define VMALLOC_END	(PKMAP_BASE - 2 * PAGE_SIZE)
#else
#else
# define VMALLOC_END	(FIXADDR_START - 2 * PAGE_SIZE)
# define VMALLOC_END	(CPU_ENTRY_AREA_BASE - 2 * PAGE_SIZE)
#endif
#endif


#define MODULES_VADDR	VMALLOC_START
#define MODULES_VADDR	VMALLOC_START
Loading