Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit f2574030 authored by Michael Ellerman's avatar Michael Ellerman
Browse files

powerpc: Revert the initial stack protector support



Unfortunately the stack protector support we merged recently only works
on some toolchains. If the toolchain is built without glibc support
everything works fine, but if glibc is built then it leads to a panic
at boot.

The solution is not rc5 material, so revert the support for now. This
reverts commits:

6533b7c1 ("powerpc: Initial stack protector (-fstack-protector) support")
902e06eb ("powerpc/32: Change the stack protector canary value per task")

Fixes: 6533b7c1 ("powerpc: Initial stack protector (-fstack-protector) support")
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
parent f05fea5b
Loading
Loading
Loading
Loading
+0 −1
Original line number Original line Diff line number Diff line
@@ -164,7 +164,6 @@ config PPC
	select ARCH_HAS_SCALED_CPUTIME if VIRT_CPU_ACCOUNTING_NATIVE
	select ARCH_HAS_SCALED_CPUTIME if VIRT_CPU_ACCOUNTING_NATIVE
	select HAVE_ARCH_HARDENED_USERCOPY
	select HAVE_ARCH_HARDENED_USERCOPY
	select HAVE_KERNEL_GZIP
	select HAVE_KERNEL_GZIP
	select HAVE_CC_STACKPROTECTOR


config GENERIC_CSUM
config GENERIC_CSUM
	def_bool CPU_LITTLE_ENDIAN
	def_bool CPU_LITTLE_ENDIAN
+0 −40
Original line number Original line Diff line number Diff line
/*
 * GCC stack protector support.
 *
 * Stack protector works by putting predefined pattern at the start of
 * the stack frame and verifying that it hasn't been overwritten when
 * returning from the function.  The pattern is called stack canary
 * and gcc expects it to be defined by a global variable called
 * "__stack_chk_guard" on PPC.  This unfortunately means that on SMP
 * we cannot have a different canary value per task.
 */

#ifndef _ASM_STACKPROTECTOR_H
#define _ASM_STACKPROTECTOR_H

#include <linux/random.h>
#include <linux/version.h>
#include <asm/reg.h>

extern unsigned long __stack_chk_guard;

/*
 * Initialize the stackprotector canary value.
 *
 * NOTE: this must only be called from functions that never return,
 * and it must always be inlined.
 */
static __always_inline void boot_init_stack_canary(void)
{
	unsigned long canary;

	/* Try to get a semi random initial value. */
	get_random_bytes(&canary, sizeof(canary));
	canary ^= mftb();
	canary ^= LINUX_VERSION_CODE;

	current->stack_canary = canary;
	__stack_chk_guard = current->stack_canary;
}

#endif	/* _ASM_STACKPROTECTOR_H */
+0 −4
Original line number Original line Diff line number Diff line
@@ -19,10 +19,6 @@ CFLAGS_init.o += $(DISABLE_LATENT_ENTROPY_PLUGIN)
CFLAGS_btext.o += $(DISABLE_LATENT_ENTROPY_PLUGIN)
CFLAGS_btext.o += $(DISABLE_LATENT_ENTROPY_PLUGIN)
CFLAGS_prom.o += $(DISABLE_LATENT_ENTROPY_PLUGIN)
CFLAGS_prom.o += $(DISABLE_LATENT_ENTROPY_PLUGIN)


# -fstack-protector triggers protection checks in this code,
# but it is being used too early to link to meaningful stack_chk logic.
CFLAGS_prom_init.o += $(call cc-option, -fno-stack-protector)

ifdef CONFIG_FUNCTION_TRACER
ifdef CONFIG_FUNCTION_TRACER
# Do not trace early boot code
# Do not trace early boot code
CFLAGS_REMOVE_cputable.o = -mno-sched-epilog $(CC_FLAGS_FTRACE)
CFLAGS_REMOVE_cputable.o = -mno-sched-epilog $(CC_FLAGS_FTRACE)
+0 −3
Original line number Original line Diff line number Diff line
@@ -91,9 +91,6 @@ int main(void)
	DEFINE(TI_livepatch_sp, offsetof(struct thread_info, livepatch_sp));
	DEFINE(TI_livepatch_sp, offsetof(struct thread_info, livepatch_sp));
#endif
#endif


#ifdef CONFIG_CC_STACKPROTECTOR
	DEFINE(TSK_STACK_CANARY, offsetof(struct task_struct, stack_canary));
#endif
	DEFINE(KSP, offsetof(struct thread_struct, ksp));
	DEFINE(KSP, offsetof(struct thread_struct, ksp));
	DEFINE(PT_REGS, offsetof(struct thread_struct, regs));
	DEFINE(PT_REGS, offsetof(struct thread_struct, regs));
#ifdef CONFIG_BOOKE
#ifdef CONFIG_BOOKE
+1 −5
Original line number Original line Diff line number Diff line
@@ -674,11 +674,7 @@ BEGIN_FTR_SECTION
	mtspr	SPRN_SPEFSCR,r0		/* restore SPEFSCR reg */
	mtspr	SPRN_SPEFSCR,r0		/* restore SPEFSCR reg */
END_FTR_SECTION_IFSET(CPU_FTR_SPE)
END_FTR_SECTION_IFSET(CPU_FTR_SPE)
#endif /* CONFIG_SPE */
#endif /* CONFIG_SPE */
#if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP)

	lwz	r0,TSK_STACK_CANARY(r2)
	lis	r4,__stack_chk_guard@ha
	stw	r0,__stack_chk_guard@l(r4)
#endif
	lwz	r0,_CCR(r1)
	lwz	r0,_CCR(r1)
	mtcrf	0xFF,r0
	mtcrf	0xFF,r0
	/* r3-r12 are destroyed -- Cort */
	/* r3-r12 are destroyed -- Cort */
Loading