Loading arch/arm64/Kconfig +17 −0 Original line number Diff line number Diff line Loading @@ -775,6 +775,23 @@ config FORCE_MAX_ZONEORDER However for 4K, we choose a higher default value, 11 as opposed to 10, giving us 4M allocations matching the default size used by generic code. config HARDEN_BRANCH_PREDICTOR bool "Harden the branch predictor against aliasing attacks" if EXPERT default y help Speculation attacks against some high-performance processors rely on being able to manipulate the branch predictor for a victim context by executing aliasing branches in the attacker context. Such attacks can be partially mitigated against by clearing internal branch predictor state and limiting the prediction logic in some situations. This config option will take CPU-specific actions to harden the branch predictor against aliasing attacks and may rely on specific instruction sequences or control bits being set by the system firmware. If unsure, say Y. menuconfig ARMV8_DEPRECATED bool "Emulate deprecated/obsolete ARMv8 instructions" depends on COMPAT Loading arch/arm64/include/asm/assembler.h +0 −13 Original line number Diff line number Diff line Loading @@ -399,17 +399,4 @@ alternative_endif mrs \rd, sp_el0 .endm /* * Errata workaround post TTBR0_EL1 update. */ .macro post_ttbr0_update_workaround #ifdef CONFIG_CAVIUM_ERRATUM_27456 alternative_if ARM64_WORKAROUND_CAVIUM_27456 ic iallu dsb nsh isb alternative_else_nop_endif #endif .endm #endif /* __ASM_ASSEMBLER_H */ arch/arm64/include/asm/cpufeature.h +4 −1 Original line number Diff line number Diff line Loading @@ -36,7 +36,8 @@ #define ARM64_WORKAROUND_CAVIUM_27456 11 #define ARM64_HAS_VIRT_HOST_EXTN 12 #define ARM64_NCAPS 13 #define ARM64_HARDEN_BRANCH_PREDICTOR 13 #define ARM64_NCAPS 14 #ifndef __ASSEMBLY__ Loading Loading @@ -169,7 +170,9 @@ void __init setup_cpu_features(void); void update_cpu_capabilities(const struct arm64_cpu_capabilities *caps, const char *info); void enable_cpu_capabilities(const struct arm64_cpu_capabilities *caps); void check_local_cpu_errata(void); void __init enable_errata_workarounds(void); #ifdef CONFIG_HOTPLUG_CPU void verify_local_cpu_capabilities(void); Loading arch/arm64/include/asm/cputype.h +10 −0 Original line number Diff line number Diff line Loading @@ -79,10 +79,14 @@ #define ARM_CPU_PART_AEM_V8 0xD0F #define ARM_CPU_PART_FOUNDATION 0xD00 #define ARM_CPU_PART_CORTEX_A57 0xD07 #define ARM_CPU_PART_CORTEX_A72 0xD08 #define ARM_CPU_PART_CORTEX_A53 0xD03 #define ARM_CPU_PART_CORTEX_A72 0xD08 #define ARM_CPU_PART_CORTEX_A73 0xD09 #define ARM_CPU_PART_CORTEX_A75 0xD0A #define ARM_CPU_PART_KRYO2XX_GOLD 0x800 #define ARM_CPU_PART_KRYO2XX_SILVER 0x801 #define QCOM_CPU_PART_KRYO 0x200 #define APM_CPU_PART_POTENZA 0x000 Loading @@ -90,9 +94,15 @@ #define MIDR_CORTEX_A53 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A53) #define MIDR_CORTEX_A57 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A57) #define MIDR_CORTEX_A72 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A72) #define MIDR_CORTEX_A73 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A73) #define MIDR_CORTEX_A75 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A75) #define MIDR_THUNDERX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX) #define MIDR_KRYO2XX_SILVER \ MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, ARM_CPU_PART_KRYO2XX_SILVER) #define MIDR_KRYO2XX_GOLD \ MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, ARM_CPU_PART_KRYO2XX_GOLD) #define MIDR_QCOM_KRYO MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_KRYO) #ifndef __ASSEMBLY__ Loading arch/arm64/include/asm/mmu.h +42 −0 Original line number Diff line number Diff line Loading @@ -16,6 +16,11 @@ #ifndef __ASM_MMU_H #define __ASM_MMU_H #include <linux/smp.h> #include <asm/cpufeature.h> #include <asm/percpu.h> typedef struct { atomic64_t id; void *vdso; Loading @@ -28,6 +33,43 @@ typedef struct { */ #define ASID(mm) ((mm)->context.id.counter & 0xffff) typedef void (*bp_hardening_cb_t)(void); struct bp_hardening_data { int hyp_vectors_slot; bp_hardening_cb_t fn; }; #ifdef CONFIG_HARDEN_BRANCH_PREDICTOR extern char __bp_harden_hyp_vecs_start[], __bp_harden_hyp_vecs_end[]; DECLARE_PER_CPU_READ_MOSTLY(struct bp_hardening_data, bp_hardening_data); static inline struct bp_hardening_data *arm64_get_bp_hardening_data(void) { return this_cpu_ptr(&bp_hardening_data); } static inline void arm64_apply_bp_hardening(void) { struct bp_hardening_data *d; if (!cpus_have_cap(ARM64_HARDEN_BRANCH_PREDICTOR)) return; d = arm64_get_bp_hardening_data(); if (d->fn) d->fn(); } #else static inline struct bp_hardening_data *arm64_get_bp_hardening_data(void) { return NULL; } static inline void arm64_apply_bp_hardening(void) { } #endif /* CONFIG_HARDEN_BRANCH_PREDICTOR */ extern void paging_init(void); extern void __iomem *early_io_map(phys_addr_t phys, unsigned long virt); extern void init_mem_pgprot(void); Loading Loading
arch/arm64/Kconfig +17 −0 Original line number Diff line number Diff line Loading @@ -775,6 +775,23 @@ config FORCE_MAX_ZONEORDER However for 4K, we choose a higher default value, 11 as opposed to 10, giving us 4M allocations matching the default size used by generic code. config HARDEN_BRANCH_PREDICTOR bool "Harden the branch predictor against aliasing attacks" if EXPERT default y help Speculation attacks against some high-performance processors rely on being able to manipulate the branch predictor for a victim context by executing aliasing branches in the attacker context. Such attacks can be partially mitigated against by clearing internal branch predictor state and limiting the prediction logic in some situations. This config option will take CPU-specific actions to harden the branch predictor against aliasing attacks and may rely on specific instruction sequences or control bits being set by the system firmware. If unsure, say Y. menuconfig ARMV8_DEPRECATED bool "Emulate deprecated/obsolete ARMv8 instructions" depends on COMPAT Loading
arch/arm64/include/asm/assembler.h +0 −13 Original line number Diff line number Diff line Loading @@ -399,17 +399,4 @@ alternative_endif mrs \rd, sp_el0 .endm /* * Errata workaround post TTBR0_EL1 update. */ .macro post_ttbr0_update_workaround #ifdef CONFIG_CAVIUM_ERRATUM_27456 alternative_if ARM64_WORKAROUND_CAVIUM_27456 ic iallu dsb nsh isb alternative_else_nop_endif #endif .endm #endif /* __ASM_ASSEMBLER_H */
arch/arm64/include/asm/cpufeature.h +4 −1 Original line number Diff line number Diff line Loading @@ -36,7 +36,8 @@ #define ARM64_WORKAROUND_CAVIUM_27456 11 #define ARM64_HAS_VIRT_HOST_EXTN 12 #define ARM64_NCAPS 13 #define ARM64_HARDEN_BRANCH_PREDICTOR 13 #define ARM64_NCAPS 14 #ifndef __ASSEMBLY__ Loading Loading @@ -169,7 +170,9 @@ void __init setup_cpu_features(void); void update_cpu_capabilities(const struct arm64_cpu_capabilities *caps, const char *info); void enable_cpu_capabilities(const struct arm64_cpu_capabilities *caps); void check_local_cpu_errata(void); void __init enable_errata_workarounds(void); #ifdef CONFIG_HOTPLUG_CPU void verify_local_cpu_capabilities(void); Loading
arch/arm64/include/asm/cputype.h +10 −0 Original line number Diff line number Diff line Loading @@ -79,10 +79,14 @@ #define ARM_CPU_PART_AEM_V8 0xD0F #define ARM_CPU_PART_FOUNDATION 0xD00 #define ARM_CPU_PART_CORTEX_A57 0xD07 #define ARM_CPU_PART_CORTEX_A72 0xD08 #define ARM_CPU_PART_CORTEX_A53 0xD03 #define ARM_CPU_PART_CORTEX_A72 0xD08 #define ARM_CPU_PART_CORTEX_A73 0xD09 #define ARM_CPU_PART_CORTEX_A75 0xD0A #define ARM_CPU_PART_KRYO2XX_GOLD 0x800 #define ARM_CPU_PART_KRYO2XX_SILVER 0x801 #define QCOM_CPU_PART_KRYO 0x200 #define APM_CPU_PART_POTENZA 0x000 Loading @@ -90,9 +94,15 @@ #define MIDR_CORTEX_A53 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A53) #define MIDR_CORTEX_A57 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A57) #define MIDR_CORTEX_A72 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A72) #define MIDR_CORTEX_A73 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A73) #define MIDR_CORTEX_A75 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A75) #define MIDR_THUNDERX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX) #define MIDR_KRYO2XX_SILVER \ MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, ARM_CPU_PART_KRYO2XX_SILVER) #define MIDR_KRYO2XX_GOLD \ MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, ARM_CPU_PART_KRYO2XX_GOLD) #define MIDR_QCOM_KRYO MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_KRYO) #ifndef __ASSEMBLY__ Loading
arch/arm64/include/asm/mmu.h +42 −0 Original line number Diff line number Diff line Loading @@ -16,6 +16,11 @@ #ifndef __ASM_MMU_H #define __ASM_MMU_H #include <linux/smp.h> #include <asm/cpufeature.h> #include <asm/percpu.h> typedef struct { atomic64_t id; void *vdso; Loading @@ -28,6 +33,43 @@ typedef struct { */ #define ASID(mm) ((mm)->context.id.counter & 0xffff) typedef void (*bp_hardening_cb_t)(void); struct bp_hardening_data { int hyp_vectors_slot; bp_hardening_cb_t fn; }; #ifdef CONFIG_HARDEN_BRANCH_PREDICTOR extern char __bp_harden_hyp_vecs_start[], __bp_harden_hyp_vecs_end[]; DECLARE_PER_CPU_READ_MOSTLY(struct bp_hardening_data, bp_hardening_data); static inline struct bp_hardening_data *arm64_get_bp_hardening_data(void) { return this_cpu_ptr(&bp_hardening_data); } static inline void arm64_apply_bp_hardening(void) { struct bp_hardening_data *d; if (!cpus_have_cap(ARM64_HARDEN_BRANCH_PREDICTOR)) return; d = arm64_get_bp_hardening_data(); if (d->fn) d->fn(); } #else static inline struct bp_hardening_data *arm64_get_bp_hardening_data(void) { return NULL; } static inline void arm64_apply_bp_hardening(void) { } #endif /* CONFIG_HARDEN_BRANCH_PREDICTOR */ extern void paging_init(void); extern void __iomem *early_io_map(phys_addr_t phys, unsigned long virt); extern void init_mem_pgprot(void); Loading