Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 228adef1 authored by Russell King's avatar Russell King Committed by Russell King
Browse files

[ARM] vfp: make fpexc bit names less verbose



Use the fpexc abbreviated names instead of long verbose names
for fpexc bits.

Signed-off-by: default avatarRussell King <rmk+kernel@arm.linux.org.uk>
parent 21d1ca04
Loading
Loading
Loading
Loading
+6 −6
Original line number Diff line number Diff line
@@ -74,14 +74,14 @@ vfp_support_entry:

	VFPFMRX	r1, FPEXC		@ Is the VFP enabled?
	DBGSTR1	"fpexc %08x", r1
	tst	r1, #FPEXC_ENABLE
	tst	r1, #FPEXC_EN
	bne	look_for_VFP_exceptions	@ VFP is already enabled

	DBGSTR1 "enable %x", r10
	ldr	r3, last_VFP_context_address
	orr	r1, r1, #FPEXC_ENABLE	@ user FPEXC has the enable bit set
	orr	r1, r1, #FPEXC_EN	@ user FPEXC has the enable bit set
	ldr	r4, [r3, r11, lsl #2]	@ last_VFP_context pointer
	bic	r5, r1, #FPEXC_EXCEPTION @ make sure exceptions are disabled
	bic	r5, r1, #FPEXC_EX	@ make sure exceptions are disabled
	cmp	r4, r10
	beq	check_for_exception	@ we are returning to the same
					@ process, so the registers are
@@ -124,7 +124,7 @@ no_old_VFP_process:
	VFPFMXR	FPSCR, r5		@ restore status

check_for_exception:
	tst	r1, #FPEXC_EXCEPTION
	tst	r1, #FPEXC_EX
	bne	process_exception	@ might as well handle the pending
					@ exception before retrying branch
					@ out before setting an FPEXC that
@@ -136,10 +136,10 @@ check_for_exception:


look_for_VFP_exceptions:
	tst	r1, #FPEXC_EXCEPTION
	tst	r1, #FPEXC_EX
	bne	process_exception
	VFPFMRX	r5, FPSCR
	tst	r5, #FPSCR_IXE		@ IXE doesn't set FPEXC_EXCEPTION !
	tst	r5, #FPSCR_IXE		@ IXE doesn't set FPEXC_EX !
	bne	process_exception

	@ Fall into hand on to next handler - appropriate coproc instr
+6 −6
Original line number Diff line number Diff line
@@ -53,7 +53,7 @@ static int vfp_notifier(struct notifier_block *self, unsigned long cmd, void *v)
		 * case the thread migrates to a different CPU. The
		 * restoring is done lazily.
		 */
		if ((fpexc & FPEXC_ENABLE) && last_VFP_context[cpu]) {
		if ((fpexc & FPEXC_EN) && last_VFP_context[cpu]) {
			vfp_save_state(last_VFP_context[cpu], fpexc);
			last_VFP_context[cpu]->hard.cpu = cpu;
		}
@@ -70,7 +70,7 @@ static int vfp_notifier(struct notifier_block *self, unsigned long cmd, void *v)
		 * Always disable VFP so we can lazily save/restore the
		 * old state.
		 */
		fmxr(FPEXC, fpexc & ~FPEXC_ENABLE);
		fmxr(FPEXC, fpexc & ~FPEXC_EN);
		return NOTIFY_DONE;
	}

@@ -81,13 +81,13 @@ static int vfp_notifier(struct notifier_block *self, unsigned long cmd, void *v)
		 */
		memset(vfp, 0, sizeof(union vfp_state));

		vfp->hard.fpexc = FPEXC_ENABLE;
		vfp->hard.fpexc = FPEXC_EN;
		vfp->hard.fpscr = FPSCR_ROUND_NEAREST;

		/*
		 * Disable VFP to ensure we initialise it first.
		 */
		fmxr(FPEXC, fmrx(FPEXC) & ~FPEXC_ENABLE);
		fmxr(FPEXC, fmrx(FPEXC) & ~FPEXC_EN);
	}

	/* flush and release case: Per-thread VFP cleanup. */
@@ -229,7 +229,7 @@ void VFP9_bounce(u32 trigger, u32 fpexc, struct pt_regs *regs)
	/*
	 * Enable access to the VFP so we can handle the bounce.
	 */
	fmxr(FPEXC, fpexc & ~(FPEXC_EXCEPTION|FPEXC_INV|FPEXC_UFC|FPEXC_IOC));
	fmxr(FPEXC, fpexc & ~(FPEXC_EX|FPEXC_INV|FPEXC_UFC|FPEXC_IOC));

	orig_fpscr = fpscr = fmrx(FPSCR);

@@ -248,7 +248,7 @@ void VFP9_bounce(u32 trigger, u32 fpexc, struct pt_regs *regs)
	/*
	 * Modify fpscr to indicate the number of iterations remaining
	 */
	if (fpexc & FPEXC_EXCEPTION) {
	if (fpexc & FPEXC_EX) {
		u32 len;

		len = fpexc + (1 << FPEXC_LENGTH_BIT);
+2 −2
Original line number Diff line number Diff line
@@ -26,8 +26,8 @@
#define FPSID_REV_MASK		(0xF  << FPSID_REV_BIT)

/* FPEXC bits */
#define FPEXC_EXCEPTION		(1<<31)
#define FPEXC_ENABLE		(1<<30)
#define FPEXC_EX		(1 << 31)
#define FPEXC_EN		(1 << 30)

/* FPSCR bits */
#define FPSCR_DEFAULT_NAN	(1<<25)