Loading arch/arm64/include/asm/processor.h +12 −2 Original line number Diff line number Diff line Loading @@ -150,6 +150,16 @@ static inline void start_thread_common(struct pt_regs *regs, unsigned long pc) regs->pc = pc; } static inline void set_ssbs_bit(struct pt_regs *regs) { regs->pstate |= PSR_SSBS_BIT; } static inline void set_compat_ssbs_bit(struct pt_regs *regs) { regs->pstate |= COMPAT_PSR_SSBS_BIT; } static inline void start_thread(struct pt_regs *regs, unsigned long pc, unsigned long sp) { Loading @@ -157,7 +167,7 @@ static inline void start_thread(struct pt_regs *regs, unsigned long pc, regs->pstate = PSR_MODE_EL0t; if (arm64_get_ssbd_state() != ARM64_SSBD_FORCE_ENABLE) regs->pstate |= PSR_SSBS_BIT; set_ssbs_bit(regs); regs->sp = sp; } Loading @@ -176,7 +186,7 @@ static inline void compat_start_thread(struct pt_regs *regs, unsigned long pc, #endif if (arm64_get_ssbd_state() != ARM64_SSBD_FORCE_ENABLE) regs->pstate |= COMPAT_PSR_SSBS_BIT; set_compat_ssbs_bit(regs); regs->compat_sp = sp; } Loading arch/arm64/kernel/process.c +28 −1 Original line number Diff line number Diff line Loading @@ -367,7 +367,7 @@ int copy_thread(unsigned long clone_flags, unsigned long stack_start, childregs->pstate |= PSR_UAO_BIT; if (arm64_get_ssbd_state() == ARM64_SSBD_FORCE_DISABLE) childregs->pstate |= PSR_SSBS_BIT; set_ssbs_bit(childregs); p->thread.cpu_context.x19 = stack_start; p->thread.cpu_context.x20 = stk_sz; Loading Loading @@ -408,6 +408,32 @@ void uao_thread_switch(struct task_struct *next) } } /* * Force SSBS state on context-switch, since it may be lost after migrating * from a CPU which treats the bit as RES0 in a heterogeneous system. */ static void ssbs_thread_switch(struct task_struct *next) { struct pt_regs *regs = task_pt_regs(next); /* * Nothing to do for kernel threads, but 'regs' may be junk * (e.g. idle task) so check the flags and bail early. */ if (unlikely(next->flags & PF_KTHREAD)) return; /* If the mitigation is enabled, then we leave SSBS clear. */ if ((arm64_get_ssbd_state() == ARM64_SSBD_FORCE_ENABLE) || test_tsk_thread_flag(next, TIF_SSBD)) return; if (compat_user_mode(regs)) set_compat_ssbs_bit(regs); else if (user_mode(regs)) set_ssbs_bit(regs); } /* * We store our current task in sp_el0, which is clobbered by userspace. Keep a * shadow copy so that we can restore this upon entry from userspace. Loading Loading @@ -436,6 +462,7 @@ __notrace_funcgraph struct task_struct *__switch_to(struct task_struct *prev, contextidr_thread_switch(next); entry_task_switch(next); uao_thread_switch(next); ssbs_thread_switch(next); /* * Complete any pending TLB or cache maintenance on this CPU in case Loading Loading
arch/arm64/include/asm/processor.h +12 −2 Original line number Diff line number Diff line Loading @@ -150,6 +150,16 @@ static inline void start_thread_common(struct pt_regs *regs, unsigned long pc) regs->pc = pc; } static inline void set_ssbs_bit(struct pt_regs *regs) { regs->pstate |= PSR_SSBS_BIT; } static inline void set_compat_ssbs_bit(struct pt_regs *regs) { regs->pstate |= COMPAT_PSR_SSBS_BIT; } static inline void start_thread(struct pt_regs *regs, unsigned long pc, unsigned long sp) { Loading @@ -157,7 +167,7 @@ static inline void start_thread(struct pt_regs *regs, unsigned long pc, regs->pstate = PSR_MODE_EL0t; if (arm64_get_ssbd_state() != ARM64_SSBD_FORCE_ENABLE) regs->pstate |= PSR_SSBS_BIT; set_ssbs_bit(regs); regs->sp = sp; } Loading @@ -176,7 +186,7 @@ static inline void compat_start_thread(struct pt_regs *regs, unsigned long pc, #endif if (arm64_get_ssbd_state() != ARM64_SSBD_FORCE_ENABLE) regs->pstate |= COMPAT_PSR_SSBS_BIT; set_compat_ssbs_bit(regs); regs->compat_sp = sp; } Loading
arch/arm64/kernel/process.c +28 −1 Original line number Diff line number Diff line Loading @@ -367,7 +367,7 @@ int copy_thread(unsigned long clone_flags, unsigned long stack_start, childregs->pstate |= PSR_UAO_BIT; if (arm64_get_ssbd_state() == ARM64_SSBD_FORCE_DISABLE) childregs->pstate |= PSR_SSBS_BIT; set_ssbs_bit(childregs); p->thread.cpu_context.x19 = stack_start; p->thread.cpu_context.x20 = stk_sz; Loading Loading @@ -408,6 +408,32 @@ void uao_thread_switch(struct task_struct *next) } } /* * Force SSBS state on context-switch, since it may be lost after migrating * from a CPU which treats the bit as RES0 in a heterogeneous system. */ static void ssbs_thread_switch(struct task_struct *next) { struct pt_regs *regs = task_pt_regs(next); /* * Nothing to do for kernel threads, but 'regs' may be junk * (e.g. idle task) so check the flags and bail early. */ if (unlikely(next->flags & PF_KTHREAD)) return; /* If the mitigation is enabled, then we leave SSBS clear. */ if ((arm64_get_ssbd_state() == ARM64_SSBD_FORCE_ENABLE) || test_tsk_thread_flag(next, TIF_SSBD)) return; if (compat_user_mode(regs)) set_compat_ssbs_bit(regs); else if (user_mode(regs)) set_ssbs_bit(regs); } /* * We store our current task in sp_el0, which is clobbered by userspace. Keep a * shadow copy so that we can restore this upon entry from userspace. Loading Loading @@ -436,6 +462,7 @@ __notrace_funcgraph struct task_struct *__switch_to(struct task_struct *prev, contextidr_thread_switch(next); entry_task_switch(next); uao_thread_switch(next); ssbs_thread_switch(next); /* * Complete any pending TLB or cache maintenance on this CPU in case Loading