Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 2573b66a authored by Linux Build Service Account's avatar Linux Build Service Account Committed by Gerrit - the friendly Code Review server
Browse files

Merge "FROMLIST: arm64: kpti: Fix the interaction between ASID switching and software PAN"

parents 792606e8 d19bcc64
Loading
Loading
Loading
Loading
+12 −0
Original line number Diff line number Diff line
@@ -801,6 +801,18 @@ config FORCE_MAX_ZONEORDER
	default "14" if (ARM64_64K_PAGES && TRANSPARENT_HUGEPAGE)
	default "11"

config UNMAP_KERNEL_AT_EL0
	bool "Unmap kernel when running in userspace (aka \"KAISER\")" if EXPERT
	default y
	help
	  Speculation attacks against some high-performance processors can
	  be used to bypass MMU permission checks and leak kernel data to
	  userspace. This can be defended against by unmapping the kernel
	  when running in userspace, mapping it back in on exception entry
	  via a trampoline page in the vector table.

	  If unsure, say Y.

menuconfig ARMV8_DEPRECATED
	bool "Emulate deprecated/obsolete ARMv8 instructions"
	depends on COMPAT
+2 −1
Original line number Diff line number Diff line
@@ -29,8 +29,9 @@
#define ARM64_HAS_PAN				4
#define ARM64_HAS_UAO				5
#define ARM64_ALT_PAN_NOT_UAO			6
#define ARM64_UNMAP_KERNEL_AT_EL0		23

#define ARM64_NCAPS				7
#define ARM64_NCAPS				24

#ifndef __ASSEMBLY__

+5 −0
Original line number Diff line number Diff line
@@ -33,6 +33,11 @@
enum fixed_addresses {
	FIX_HOLE,
	FIX_EARLYCON_MEM_BASE,
#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
	FIX_ENTRY_TRAMP_DATA,
	FIX_ENTRY_TRAMP_TEXT,
#define TRAMP_VALIAS		(__fix_to_virt(FIX_ENTRY_TRAMP_TEXT))
#endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
	__end_of_permanent_fixed_addresses,

	/*
+14 −0
Original line number Diff line number Diff line
@@ -16,6 +16,13 @@
#ifndef __ASM_MMU_H
#define __ASM_MMU_H

#define USER_ASID_FLAG	(UL(1) << 48)
#define TTBR_ASID_MASK	(UL(0xffff) << 48)

#ifndef __ASSEMBLY__

#include <asm/cpufeature.h>

typedef struct {
	atomic64_t	id;
	void		*vdso;
@@ -28,6 +35,12 @@ typedef struct {
 */
#define ASID(mm)	((mm)->context.id.counter & 0xffff)

static inline bool arm64_kernel_unmapped_at_el0(void)
{
	return IS_ENABLED(CONFIG_UNMAP_KERNEL_AT_EL0) &&
	       cpus_have_cap(ARM64_UNMAP_KERNEL_AT_EL0);
}

extern void paging_init(void);
extern void __iomem *early_io_map(phys_addr_t phys, unsigned long virt);
extern void init_mem_pgprot(void);
@@ -36,4 +49,5 @@ extern void create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys,
			       unsigned long virt, phys_addr_t size,
			       pgprot_t prot);

#endif	/* !__ASSEMBLY__ */
#endif
+10 −1
Original line number Diff line number Diff line
@@ -63,6 +63,13 @@ static inline void cpu_set_reserved_ttbr0(void)
	: "r" (ttbr));
}

static inline void cpu_switch_mm(pgd_t *pgd, struct mm_struct *mm)
{
	BUG_ON(pgd == swapper_pg_dir);
	cpu_set_reserved_ttbr0();
	cpu_do_switch_mm(virt_to_phys(pgd),mm);
}

/*
 * TCR.T0SZ value to use when the ID map is active. Usually equals
 * TCR_T0SZ(VA_BITS), unless system RAM is positioned very high in
@@ -123,7 +130,7 @@ static inline void update_saved_ttbr0(struct task_struct *tsk,
	else
		ttbr = virt_to_phys(mm->pgd) | ASID(mm) << 48;

	task_thread_info(tsk)->ttbr0 = ttbr;
	WRITE_ONCE(task_thread_info(tsk)->ttbr0, ttbr);
}
#else
static inline void update_saved_ttbr0(struct task_struct *tsk,
@@ -177,4 +184,6 @@ switch_mm(struct mm_struct *prev, struct mm_struct *next,
#define deactivate_mm(tsk,mm)	do { } while (0)
#define activate_mm(prev,next)	switch_mm(prev, next, current)

void post_ttbr_update_workaround(void);

#endif
Loading