Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit ee400b63 authored by Stephen Rothwell's avatar Stephen Rothwell
Browse files

powerpc: more cleanup of powerpc/kernel



Update head_64.S from arch/ppc64
Remove arc/ppc/kernel/fpu.S

Signed-off-by: default avatarStephen Rothwell <sfr@canb.auug.org.au>
parent dcff1b17
Loading
Loading
Loading
Loading
+6 −3
Original line number Original line Diff line number Diff line
@@ -1253,7 +1253,7 @@ unrecov_slb:
 *
 *
 * On iSeries, the hypervisor must fill in at least one entry before
 * On iSeries, the hypervisor must fill in at least one entry before
 * we get control (with relocate on).  The address is give to the hv
 * we get control (with relocate on).  The address is give to the hv
 * as a page number (see xLparMap in LparData.c), so this must be at a
 * as a page number (see xLparMap in lpardata.c), so this must be at a
 * fixed address (the linker can't compute (u64)&initial_stab >>
 * fixed address (the linker can't compute (u64)&initial_stab >>
 * PAGE_SHIFT).
 * PAGE_SHIFT).
 */
 */
@@ -1364,6 +1364,7 @@ _STATIC(__start_initialization_iSeries)
	addi	r2,r2,0x4000
	addi	r2,r2,0x4000


	bl	.iSeries_early_setup
	bl	.iSeries_early_setup
	bl	.early_setup


	/* relocation is on at this point */
	/* relocation is on at this point */


@@ -1970,20 +1971,22 @@ _GLOBAL(hmt_start_secondary)
	blr
	blr
#endif
#endif


#if defined(CONFIG_KEXEC) || (defined(CONFIG_SMP) && !defined(CONFIG_PPC_ISERIES))
#if defined(CONFIG_KEXEC) || defined(CONFIG_SMP)
_GLOBAL(smp_release_cpus)
_GLOBAL(smp_release_cpus)
	/* All secondary cpus are spinning on a common
	/* All secondary cpus are spinning on a common
	 * spinloop, release them all now so they can start
	 * spinloop, release them all now so they can start
	 * to spin on their individual paca spinloops.
	 * to spin on their individual paca spinloops.
	 * For non SMP kernels, the secondary cpus never
	 * For non SMP kernels, the secondary cpus never
	 * get out of the common spinloop.
	 * get out of the common spinloop.
	 * XXX This does nothing useful on iSeries, secondaries are
	 * already waiting on their paca.
	 */
	 */
	li	r3,1
	li	r3,1
	LOADADDR(r5,__secondary_hold_spinloop)
	LOADADDR(r5,__secondary_hold_spinloop)
	std	r3,0(r5)
	std	r3,0(r5)
	sync
	sync
	blr
	blr
#endif /* CONFIG_SMP && !CONFIG_PPC_ISERIES */
#endif /* CONFIG_SMP */




/*
/*
+1 −0
Original line number Original line Diff line number Diff line
@@ -39,6 +39,7 @@ endif
# These are here while we do the architecture merge
# These are here while we do the architecture merge
vecemu-y			+= ../../powerpc/kernel/vecemu.o
vecemu-y			+= ../../powerpc/kernel/vecemu.o
vector-y			+= ../../powerpc/kernel/vector.o
vector-y			+= ../../powerpc/kernel/vector.o
fpu-y				+= ../../powerpc/kernel/fpu.o


else
else
obj-y				:= entry.o irq.o idle.o time.o misc.o \
obj-y				:= entry.o irq.o idle.o time.o misc.o \

arch/ppc/kernel/fpu.S

deleted100644 → 0
+0 −133
Original line number Original line Diff line number Diff line
/*
 *  FPU support code, moved here from head.S so that it can be used
 *  by chips which use other head-whatever.S files.
 *
 *  This program is free software; you can redistribute it and/or
 *  modify it under the terms of the GNU General Public License
 *  as published by the Free Software Foundation; either version
 *  2 of the License, or (at your option) any later version.
 *
 */

#include <linux/config.h>
#include <asm/processor.h>
#include <asm/page.h>
#include <asm/mmu.h>
#include <asm/pgtable.h>
#include <asm/cputable.h>
#include <asm/cache.h>
#include <asm/thread_info.h>
#include <asm/ppc_asm.h>
#include <asm/asm-offsets.h>

/*
 * This task wants to use the FPU now.
 * On UP, disable FP for the task which had the FPU previously,
 * and save its floating-point registers in its thread_struct.
 * Load up this task's FP registers from its thread_struct,
 * enable the FPU for the current task and return to the task.
 */
	.globl	load_up_fpu
load_up_fpu:
	mfmsr	r5
	ori	r5,r5,MSR_FP
#ifdef CONFIG_PPC64BRIDGE
	clrldi	r5,r5,1			/* turn off 64-bit mode */
#endif /* CONFIG_PPC64BRIDGE */
	SYNC
	MTMSRD(r5)			/* enable use of fpu now */
	isync
/*
 * For SMP, we don't do lazy FPU switching because it just gets too
 * horrendously complex, especially when a task switches from one CPU
 * to another.  Instead we call giveup_fpu in switch_to.
 */
#ifndef CONFIG_SMP
	tophys(r6,0)			/* get __pa constant */
	addis	r3,r6,last_task_used_math@ha
	lwz	r4,last_task_used_math@l(r3)
	cmpwi	0,r4,0
	beq	1f
	add	r4,r4,r6
	addi	r4,r4,THREAD		/* want last_task_used_math->thread */
	SAVE_32FPRS(0, r4)
	mffs	fr0
	stfd	fr0,THREAD_FPSCR-4(r4)
	lwz	r5,PT_REGS(r4)
	add	r5,r5,r6
	lwz	r4,_MSR-STACK_FRAME_OVERHEAD(r5)
	li	r10,MSR_FP|MSR_FE0|MSR_FE1
	andc	r4,r4,r10		/* disable FP for previous task */
	stw	r4,_MSR-STACK_FRAME_OVERHEAD(r5)
1:
#endif /* CONFIG_SMP */
	/* enable use of FP after return */
	mfspr	r5,SPRN_SPRG3		/* current task's THREAD (phys) */
	lwz	r4,THREAD_FPEXC_MODE(r5)
	ori	r9,r9,MSR_FP		/* enable FP for current */
	or	r9,r9,r4
	lfd	fr0,THREAD_FPSCR-4(r5)
	mtfsf	0xff,fr0
	REST_32FPRS(0, r5)
#ifndef CONFIG_SMP
	subi	r4,r5,THREAD
	sub	r4,r4,r6
	stw	r4,last_task_used_math@l(r3)
#endif /* CONFIG_SMP */
	/* restore registers and return */
	/* we haven't used ctr or xer or lr */
	b	fast_exception_return

/*
 * FP unavailable trap from kernel - print a message, but let
 * the task use FP in the kernel until it returns to user mode.
 */
 	.globl	KernelFP
KernelFP:
	lwz	r3,_MSR(r1)
	ori	r3,r3,MSR_FP
	stw	r3,_MSR(r1)		/* enable use of FP after return */
	lis	r3,86f@h
	ori	r3,r3,86f@l
	mr	r4,r2			/* current */
	lwz	r5,_NIP(r1)
	bl	printk
	b	ret_from_except
86:	.string	"floating point used in kernel (task=%p, pc=%x)\n"
	.align	4,0

/*
 * giveup_fpu(tsk)
 * Disable FP for the task given as the argument,
 * and save the floating-point registers in its thread_struct.
 * Enables the FPU for use in the kernel on return.
 */
	.globl	giveup_fpu
giveup_fpu:
	mfmsr	r5
	ori	r5,r5,MSR_FP
	SYNC_601
	ISYNC_601
	MTMSRD(r5)			/* enable use of fpu now */
	SYNC_601
	isync
	cmpwi	0,r3,0
	beqlr-				/* if no previous owner, done */
	addi	r3,r3,THREAD	        /* want THREAD of task */
	lwz	r5,PT_REGS(r3)
	cmpwi	0,r5,0
	SAVE_32FPRS(0, r3)
	mffs	fr0
	stfd	fr0,THREAD_FPSCR-4(r3)
	beq	1f
	lwz	r4,_MSR-STACK_FRAME_OVERHEAD(r5)
	li	r3,MSR_FP|MSR_FE0|MSR_FE1
	andc	r4,r4,r3		/* disable FP for previous task */
	stw	r4,_MSR-STACK_FRAME_OVERHEAD(r5)
1:
#ifndef CONFIG_SMP
	li	r5,0
	lis	r4,last_task_used_math@ha
	stw	r5,last_task_used_math@l(r4)
#endif /* CONFIG_SMP */
	blr