Loading arch/arm/include/asm/kvm_host.h +7 −0 Original line number Diff line number Diff line Loading @@ -26,6 +26,7 @@ #include <asm/kvm_asm.h> #include <asm/kvm_mmio.h> #include <asm/fpstate.h> #include <asm/spectre.h> #include <kvm/arm_arch_timer.h> #define __KVM_HAVE_ARCH_INTC_INITIALIZED Loading Loading @@ -366,4 +367,10 @@ void kvm_arch_free_vm(struct kvm *kvm); #define kvm_arm_vcpu_loaded(vcpu) (false) static inline int kvm_arm_get_spectre_bhb_state(void) { /* 32bit guests don't need firmware for this */ return SPECTRE_VULNERABLE; /* aka SMCCC_RET_NOT_SUPPORTED */ } #endif /* __ARM_KVM_HOST_H__ */ arch/arm64/Kconfig +9 −0 Original line number Diff line number Diff line Loading @@ -1152,6 +1152,15 @@ config COMPAT_VDSO You must have a 32-bit build of glibc 2.22 or later for programs to seamlessly take advantage of this. config MITIGATE_SPECTRE_BRANCH_HISTORY bool "Mitigate Spectre style attacks against branch history" if EXPERT default y help Speculation attacks against some high-performance processors can make use of branch history to influence future speculation. When taking an exception from user-space, a sequence of branches or a firmware call overwrites the branch history. menuconfig ARMV8_DEPRECATED bool "Emulate deprecated/obsolete ARMv8 instructions" depends on COMPAT Loading arch/arm64/include/asm/assembler.h +27 −0 Original line number Diff line number Diff line Loading @@ -732,4 +732,31 @@ USER(\label, ic ivau, \tmp2) // invalidate I line PoU .Lyield_out_\@ : .endm .macro __mitigate_spectre_bhb_loop tmp #ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY alternative_cb spectre_bhb_patch_loop_iter mov \tmp, #32 // Patched to correct the immediate alternative_cb_end .Lspectre_bhb_loop\@: b . + 4 subs \tmp, \tmp, #1 b.ne .Lspectre_bhb_loop\@ dsb nsh isb #endif /* CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY */ .endm /* Save/restores x0-x3 to the stack */ .macro __mitigate_spectre_bhb_fw #ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY stp x0, x1, [sp, #-16]! stp x2, x3, [sp, #-16]! mov w0, #ARM_SMCCC_ARCH_WORKAROUND_3 alternative_cb arm64_update_smccc_conduit nop // Patched to SMC/HVC #0 alternative_cb_end ldp x2, x3, [sp], #16 ldp x0, x1, [sp], #16 #endif /* CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY */ .endm #endif /* __ASM_ASSEMBLER_H */ arch/arm64/include/asm/cpucaps.h +1 −0 Original line number Diff line number Diff line Loading @@ -56,6 +56,7 @@ #define ARM64_WORKAROUND_1188873 35 #define ARM64_WORKAROUND_1542418 36 #define ARM64_WORKAROUND_1542419 37 #define ARM64_SPECTRE_BHB 38 /* kabi: reserve 38 - 62 for future cpu capabilities */ #define ARM64_NCAPS 62 Loading arch/arm64/include/asm/cpufeature.h +27 −0 Original line number Diff line number Diff line Loading @@ -482,6 +482,21 @@ static inline bool cpu_supports_mixed_endian_el0(void) return id_aa64mmfr0_mixed_endian_el0(read_cpuid(ID_AA64MMFR0_EL1)); } static inline bool supports_csv2p3(int scope) { u64 pfr0; u8 csv2_val; if (scope == SCOPE_LOCAL_CPU) pfr0 = read_sysreg_s(SYS_ID_AA64PFR0_EL1); else pfr0 = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1); csv2_val = cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_CSV2_SHIFT); return csv2_val == 3; } static inline bool system_supports_32bit_el0(void) { return cpus_have_const_cap(ARM64_HAS_32BIT_EL0); Loading Loading @@ -532,6 +547,18 @@ static inline int arm64_get_ssbd_state(void) void arm64_set_ssbd_mitigation(bool state); /* Watch out, ordering is important here. */ enum mitigation_state { SPECTRE_UNAFFECTED, SPECTRE_MITIGATED, SPECTRE_VULNERABLE, }; enum mitigation_state arm64_get_spectre_bhb_state(void); bool is_spectre_bhb_affected(const struct arm64_cpu_capabilities *entry, int scope); u8 spectre_bhb_loop_affected(int scope); void spectre_bhb_enable_mitigation(const struct arm64_cpu_capabilities *__unused); #endif /* __ASSEMBLY__ */ #endif Loading
arch/arm/include/asm/kvm_host.h +7 −0 Original line number Diff line number Diff line Loading @@ -26,6 +26,7 @@ #include <asm/kvm_asm.h> #include <asm/kvm_mmio.h> #include <asm/fpstate.h> #include <asm/spectre.h> #include <kvm/arm_arch_timer.h> #define __KVM_HAVE_ARCH_INTC_INITIALIZED Loading Loading @@ -366,4 +367,10 @@ void kvm_arch_free_vm(struct kvm *kvm); #define kvm_arm_vcpu_loaded(vcpu) (false) static inline int kvm_arm_get_spectre_bhb_state(void) { /* 32bit guests don't need firmware for this */ return SPECTRE_VULNERABLE; /* aka SMCCC_RET_NOT_SUPPORTED */ } #endif /* __ARM_KVM_HOST_H__ */
arch/arm64/Kconfig +9 −0 Original line number Diff line number Diff line Loading @@ -1152,6 +1152,15 @@ config COMPAT_VDSO You must have a 32-bit build of glibc 2.22 or later for programs to seamlessly take advantage of this. config MITIGATE_SPECTRE_BRANCH_HISTORY bool "Mitigate Spectre style attacks against branch history" if EXPERT default y help Speculation attacks against some high-performance processors can make use of branch history to influence future speculation. When taking an exception from user-space, a sequence of branches or a firmware call overwrites the branch history. menuconfig ARMV8_DEPRECATED bool "Emulate deprecated/obsolete ARMv8 instructions" depends on COMPAT Loading
arch/arm64/include/asm/assembler.h +27 −0 Original line number Diff line number Diff line Loading @@ -732,4 +732,31 @@ USER(\label, ic ivau, \tmp2) // invalidate I line PoU .Lyield_out_\@ : .endm .macro __mitigate_spectre_bhb_loop tmp #ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY alternative_cb spectre_bhb_patch_loop_iter mov \tmp, #32 // Patched to correct the immediate alternative_cb_end .Lspectre_bhb_loop\@: b . + 4 subs \tmp, \tmp, #1 b.ne .Lspectre_bhb_loop\@ dsb nsh isb #endif /* CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY */ .endm /* Save/restores x0-x3 to the stack */ .macro __mitigate_spectre_bhb_fw #ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY stp x0, x1, [sp, #-16]! stp x2, x3, [sp, #-16]! mov w0, #ARM_SMCCC_ARCH_WORKAROUND_3 alternative_cb arm64_update_smccc_conduit nop // Patched to SMC/HVC #0 alternative_cb_end ldp x2, x3, [sp], #16 ldp x0, x1, [sp], #16 #endif /* CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY */ .endm #endif /* __ASM_ASSEMBLER_H */
arch/arm64/include/asm/cpucaps.h +1 −0 Original line number Diff line number Diff line Loading @@ -56,6 +56,7 @@ #define ARM64_WORKAROUND_1188873 35 #define ARM64_WORKAROUND_1542418 36 #define ARM64_WORKAROUND_1542419 37 #define ARM64_SPECTRE_BHB 38 /* kabi: reserve 38 - 62 for future cpu capabilities */ #define ARM64_NCAPS 62 Loading
arch/arm64/include/asm/cpufeature.h +27 −0 Original line number Diff line number Diff line Loading @@ -482,6 +482,21 @@ static inline bool cpu_supports_mixed_endian_el0(void) return id_aa64mmfr0_mixed_endian_el0(read_cpuid(ID_AA64MMFR0_EL1)); } static inline bool supports_csv2p3(int scope) { u64 pfr0; u8 csv2_val; if (scope == SCOPE_LOCAL_CPU) pfr0 = read_sysreg_s(SYS_ID_AA64PFR0_EL1); else pfr0 = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1); csv2_val = cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_CSV2_SHIFT); return csv2_val == 3; } static inline bool system_supports_32bit_el0(void) { return cpus_have_const_cap(ARM64_HAS_32BIT_EL0); Loading Loading @@ -532,6 +547,18 @@ static inline int arm64_get_ssbd_state(void) void arm64_set_ssbd_mitigation(bool state); /* Watch out, ordering is important here. */ enum mitigation_state { SPECTRE_UNAFFECTED, SPECTRE_MITIGATED, SPECTRE_VULNERABLE, }; enum mitigation_state arm64_get_spectre_bhb_state(void); bool is_spectre_bhb_affected(const struct arm64_cpu_capabilities *entry, int scope); u8 spectre_bhb_loop_affected(int scope); void spectre_bhb_enable_mitigation(const struct arm64_cpu_capabilities *__unused); #endif /* __ASSEMBLY__ */ #endif