Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 5a0015d6 authored by Chris Zankel's avatar Chris Zankel Committed by Linus Torvalds
Browse files

[PATCH] xtensa: Architecture support for Tensilica Xtensa Part 3



The attached patches provides part 3 of an architecture implementation for the
Tensilica Xtensa CPU series.

Signed-off-by: default avatarChris Zankel <chris@zankel.net>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent 4bedea94
Loading
Loading
Loading
Loading
+18 −0
Original line number Diff line number Diff line
#
# Makefile for the Linux/Xtensa kernel.
#

extra-y := head.o vmlinux.lds


obj-y := align.o entry.o irq.o coprocessor.o process.o ptrace.o semaphore.o  \
	 setup.o signal.o syscalls.o time.o traps.o vectors.o platform.o  \
	 pci-dma.o

## windowspill.o

obj-$(CONFIG_KGDB) += xtensa-stub.o
obj-$(CONFIG_PCI) += pci.o
obj-$(CONFIG_MODULES) += xtensa_ksyms.o module.o

+459 −0
Original line number Diff line number Diff line
/*
 * arch/xtensa/kernel/align.S
 *
 * Handle unalignment exceptions in kernel space.
 *
 * This file is subject to the terms and conditions of the GNU General
 * Public License.  See the file "COPYING" in the main directory of
 * this archive for more details.
 *
 * Copyright (C) 2001 - 2005 Tensilica, Inc.
 *
 * Rewritten by Chris Zankel <chris@zankel.net>
 *
 * Based on work from Joe Taylor <joe@tensilica.com, joetylr@yahoo.com>
 * and Marc Gauthier <marc@tensilica.com, marc@alimni.uwaterloo.ca>
 */

#include <linux/linkage.h>
#include <asm/ptrace.h>
#include <asm/ptrace.h>
#include <asm/current.h>
#include <asm/offsets.h>
#include <asm/pgtable.h>
#include <asm/processor.h>
#include <asm/page.h>
#include <asm/thread_info.h>

#if XCHAL_UNALIGNED_LOAD_EXCEPTION || XCHAL_UNALIGNED_STORE_EXCEPTION

/*  First-level exception handler for unaligned exceptions.
 *
 *  Note: This handler works only for kernel exceptions.  Unaligned user
 *        access should get a seg fault.
 */

/* Big and little endian 16-bit values are located in
 * different halves of a register.  HWORD_START helps to
 * abstract the notion of extracting a 16-bit value from a
 * register.
 * We also have to define new shifting instructions because
 * lsb and msb are on 'opposite' ends in a register for
 * different endian machines.
 *
 * Assume a memory region in ascending address:
 *   	0 1 2 3|4 5 6 7
 *
 * When loading one word into a register, the content of that register is:
 *  LE	3 2 1 0, 7 6 5 4
 *  BE  0 1 2 3, 4 5 6 7
 *
 * Masking the bits of the higher/lower address means:
 *  LE  X X 0 0, 0 0 X X
 *  BE	0 0 X X, X X 0 0
 *
 * Shifting to higher/lower addresses, means:
 *  LE  shift left / shift right
 *  BE  shift right / shift left
 *
 * Extracting 16 bits from a 32 bit reg. value to higher/lower address means:
 *  LE  mask 0 0 X X / shift left
 *  BE  shift left / mask 0 0 X X
 */

#define UNALIGNED_USER_EXCEPTION

#if XCHAL_HAVE_BE

#define HWORD_START	16
#define	INSN_OP0	28
#define	INSN_T		24
#define	INSN_OP1	16

.macro __src_b	r, w0, w1;	src	\r, \w0, \w1;	.endm
.macro __ssa8	r;		ssa8b	\r;		.endm
.macro __ssa8r	r;		ssa8l	\r;		.endm
.macro __sh	r, s;		srl	\r, \s;		.endm
.macro __sl	r, s;		sll	\r, \s;		.endm
.macro __exth	r, s;		extui	\r, \s, 0, 16;	.endm
.macro __extl	r, s;		slli	\r, \s, 16;	.endm

#else

#define HWORD_START	0
#define	INSN_OP0	0
#define	INSN_T		4
#define	INSN_OP1	12

.macro __src_b	r, w0, w1;	src	\r, \w1, \w0;	.endm
.macro __ssa8	r;		ssa8l	\r;		.endm
.macro __ssa8r	r;		ssa8b	\r;		.endm
.macro __sh	r, s;		sll	\r, \s;		.endm
.macro __sl	r, s;		srl	\r, \s;		.endm
.macro __exth	r, s;		slli	\r, \s, 16;	.endm
.macro __extl	r, s;		extui	\r, \s, 0, 16;	.endm

#endif

/*
 *	xxxx xxxx = imm8 field
 *	     yyyy = imm4 field
 *	     ssss = s field
 *	     tttt = t field
 *
 *	       		 16		    0
 *		          -------------------
 *	L32I.N		  yyyy ssss tttt 1000
 *	S32I.N	          yyyy ssss tttt 1001
 *
 *	       23			    0
 *		-----------------------------
 *	res	          0000           0010
 *	L16UI	xxxx xxxx 0001 ssss tttt 0010
 *	L32I	xxxx xxxx 0010 ssss tttt 0010
 *	XXX	          0011 ssss tttt 0010
 *	XXX	          0100 ssss tttt 0010
 *	S16I	xxxx xxxx 0101 ssss tttt 0010
 *	S32I	xxxx xxxx 0110 ssss tttt 0010
 *	XXX	          0111 ssss tttt 0010
 *	XXX	          1000 ssss tttt 0010
 *	L16SI	xxxx xxxx 1001 ssss tttt 0010
 *	XXX	          1010           0010
 *      **L32AI	xxxx xxxx 1011 ssss tttt 0010 unsupported
 *	XXX	          1100           0010
 *	XXX	          1101           0010
 *	XXX	          1110           0010
 *	**S32RI	xxxx xxxx 1111 ssss tttt 0010 unsupported
 *		-----------------------------
 *                           ^         ^    ^
 *    sub-opcode (NIBBLE_R) -+         |    |
 *       t field (NIBBLE_T) -----------+    |
 *  major opcode (NIBBLE_OP0) --------------+
 */

#define OP0_L32I_N	0x8		/* load immediate narrow */
#define OP0_S32I_N	0x9		/* store immediate narrow */
#define OP1_SI_MASK	0x4		/* OP1 bit set for stores */
#define OP1_SI_BIT	2		/* OP1 bit number for stores */

#define OP1_L32I	0x2
#define OP1_L16UI	0x1
#define OP1_L16SI	0x9
#define OP1_L32AI	0xb

#define OP1_S32I	0x6
#define OP1_S16I	0x5
#define OP1_S32RI	0xf

/*
 * Entry condition:
 *
 *   a0:	trashed, original value saved on stack (PT_AREG0)
 *   a1:	a1
 *   a2:	new stack pointer, original in DEPC
 *   a3:	dispatch table
 *   depc:	a2, original value saved on stack (PT_DEPC)
 *   excsave_1:	a3
 *
 *   PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC
 *	     <  VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception
 */


ENTRY(fast_unaligned)

	/* Note: We don't expect the address to be aligned on a word
	 *       boundary. After all, the processor generated that exception
	 *       and it would be a hardware fault.
	 */

	/* Save some working register */

	s32i	a4, a2, PT_AREG4
	s32i	a5, a2, PT_AREG5
	s32i	a6, a2, PT_AREG6
	s32i	a7, a2, PT_AREG7
	s32i	a8, a2, PT_AREG8

	rsr	a0, DEPC
	xsr	a3, EXCSAVE_1
	s32i	a0, a2, PT_AREG2
	s32i	a3, a2, PT_AREG3

	/* Keep value of SAR in a0 */

	rsr	a0, SAR
	rsr	a8, EXCVADDR		# load unaligned memory address

	/* Now, identify one of the following load/store instructions.
	 *
	 * The only possible danger of a double exception on the
	 * following l32i instructions is kernel code in vmalloc
	 * memory. The processor was just executing at the EPC_1
	 * address, and indeed, already fetched the instruction.  That
	 * guarantees a TLB mapping, which hasn't been replaced by
	 * this unaligned exception handler that uses only static TLB
	 * mappings. However, high-level interrupt handlers might
	 * modify TLB entries, so for the generic case, we register a
	 * TABLE_FIXUP handler here, too.
	 */

	/* a3...a6 saved on stack, a2 = SP */

	/* Extract the instruction that caused the unaligned access. */

	rsr	a7, EPC_1	# load exception address
	movi	a3, ~3
	and	a3, a3, a7	# mask lower bits

	l32i	a4, a3, 0	# load 2 words
	l32i	a5, a3, 4

	__ssa8	a7
	__src_b	a4, a4, a5	# a4 has the instruction

	/* Analyze the instruction (load or store?). */

	extui	a5, a4, INSN_OP0, 4	# get insn.op0 nibble

#if XCHAL_HAVE_NARROW
	_beqi	a5, OP0_L32I_N, .Lload	# L32I.N, jump
	addi	a6, a5, -OP0_S32I_N
	_beqz	a6, .Lstore		# S32I.N, do a store
#endif
	/* 'store indicator bit' not set, jump */
	_bbci.l	a4, OP1_SI_BIT + INSN_OP1, .Lload

	/* Store: Jump to table entry to get the value in the source register.*/

.Lstore:movi	a5, .Lstore_table	# table
	extui	a6, a4, INSN_T, 4	# get source register
	addx8	a5, a6, a5
	jx	a5			# jump into table

	/* Invalid instruction, CRITICAL! */
.Linvalid_instruction_load:
	j	.Linvalid_instruction

	/* Load: Load memory address. */

.Lload: movi	a3, ~3
	and	a3, a3, a8		# align memory address

	__ssa8	a8
#ifdef UNALIGNED_USER_EXCEPTION
	addi	a3, a3, 8
	l32e	a5, a3, -8
	l32e	a6, a3, -4
#else
	l32i	a5, a3, 0
	l32i	a6, a3, 4
#endif
	__src_b	a3, a5, a6		# a3 has the data word

#if XCHAL_HAVE_NARROW
	addi	a7, a7, 2		# increment PC (assume 16-bit insn)

	extui	a5, a4, INSN_OP0, 4
	_beqi	a5, OP0_L32I_N, 1f	# l32i.n: jump

	addi	a7, a7, 1
#else
	addi	a7, a7, 3
#endif

	extui	a5, a4, INSN_OP1, 4
	_beqi	a5, OP1_L32I, 1f	# l32i: jump

	extui	a3, a3, 0, 16		# extract lower 16 bits
	_beqi	a5, OP1_L16UI, 1f
	addi	a5, a5, -OP1_L16SI
	_bnez	a5, .Linvalid_instruction_load

	/* sign extend value */

	slli	a3, a3, 16
	srai	a3, a3, 16

	/* Set target register. */

1:

#if XCHAL_HAVE_LOOP
	rsr	a3, LEND		# check if we reached LEND
	bne	a7, a3, 1f
	rsr	a3, LCOUNT		# and LCOUNT != 0
	beqz	a3, 1f
	addi	a3, a3, -1		# decrement LCOUNT and set
	rsr	a7, LBEG		# set PC to LBEGIN
	wsr	a3, LCOUNT
#endif

1:	wsr	a7, EPC_1		# skip load instruction
	extui	a4, a4, INSN_T, 4	# extract target register
	movi	a5, .Lload_table
	addx8	a4, a4, a5
	jx	a4			# jump to entry for target register

	.align	8
.Lload_table:
	s32i	a3, a2, PT_AREG0;	_j .Lexit;	.align 8
	mov	a1, a3;			_j .Lexit;	.align 8 # fishy??
	s32i	a3, a2, PT_AREG2;	_j .Lexit;	.align 8
	s32i	a3, a2, PT_AREG3;	_j .Lexit;	.align 8
	s32i	a3, a2, PT_AREG4;	_j .Lexit;	.align 8
	s32i	a3, a2, PT_AREG5;	_j .Lexit;	.align 8
	s32i	a3, a2, PT_AREG6;	_j .Lexit;	.align 8
	s32i	a3, a2, PT_AREG7;	_j .Lexit;	.align 8
	s32i	a3, a2, PT_AREG8;	_j .Lexit;	.align 8
	mov	a9, a3		;	_j .Lexit;	.align 8
	mov	a10, a3		;	_j .Lexit;	.align 8
	mov	a11, a3		;	_j .Lexit;	.align 8
	mov	a12, a3		;	_j .Lexit;	.align 8
	mov	a13, a3		;	_j .Lexit;	.align 8
	mov	a14, a3		;	_j .Lexit;	.align 8
	mov	a15, a3		;	_j .Lexit;	.align 8

.Lstore_table:
	l32i	a3, a2, PT_AREG0;	_j 1f;	.align 8
	mov	a3, a1;			_j 1f;	.align 8	# fishy??
	l32i	a3, a2, PT_AREG2;	_j 1f;	.align 8
	l32i	a3, a2, PT_AREG3;	_j 1f;	.align 8
	l32i	a3, a2, PT_AREG4;	_j 1f;	.align 8
	l32i	a3, a2, PT_AREG5;	_j 1f;	.align 8
	l32i	a3, a2, PT_AREG6;	_j 1f;	.align 8
	l32i	a3, a2, PT_AREG7;	_j 1f;	.align 8
	l32i	a3, a2, PT_AREG8;	_j 1f;	.align 8
	mov	a3, a9		;	_j 1f;	.align 8
	mov	a3, a10		;	_j 1f;	.align 8
	mov	a3, a11		;	_j 1f;	.align 8
	mov	a3, a12		;	_j 1f;	.align 8
	mov	a3, a13		;	_j 1f;	.align 8
	mov	a3, a14		;	_j 1f;	.align 8
	mov	a3, a15		;	_j 1f;	.align 8

1: 	# a7: instruction pointer, a4: instruction, a3: value

	movi	a6, 0			# mask: ffffffff:00000000

#if XCHAL_HAVE_NARROW
	addi	a7, a7, 2		# incr. PC,assume 16-bit instruction

	extui	a5, a4, INSN_OP0, 4	# extract OP0
	addi	a5, a5, -OP0_S32I_N
	_beqz	a5, 1f			# s32i.n: jump

	addi	a7, a7, 1		# increment PC, 32-bit instruction
#else
	addi	a7, a7, 3		# increment PC, 32-bit instruction
#endif

	extui	a5, a4, INSN_OP1, 4	# extract OP1
	_beqi	a5, OP1_S32I, 1f	# jump if 32 bit store
	_bnei	a5, OP1_S16I, .Linvalid_instruction_store

	movi	a5, -1
	__extl	a3, a3			# get 16-bit value
	__exth	a6, a5			# get 16-bit mask ffffffff:ffff0000

	/* Get memory address */

1:
#if XCHAL_HAVE_LOOP
	rsr	a3, LEND		# check if we reached LEND
	bne	a7, a3, 1f
	rsr	a3, LCOUNT		# and LCOUNT != 0
	beqz	a3, 1f
	addi	a3, a3, -1		# decrement LCOUNT and set
	rsr	a7, LBEG		# set PC to LBEGIN
	wsr	a3, LCOUNT
#endif

1:	wsr	a7, EPC_1		# skip store instruction
	movi	a4, ~3
	and	a4, a4, a8		# align memory address

	/* Insert value into memory */

	movi	a5, -1			# mask: ffffffff:XXXX0000
#ifdef UNALIGNED_USER_EXCEPTION
	addi	a4, a4, 8
#endif

	__ssa8r a8
	__src_b	a7, a5, a6		# lo-mask  F..F0..0 (BE) 0..0F..F (LE)
	__src_b	a6, a6, a5		# hi-mask  0..0F..F (BE) F..F0..0 (LE)
#ifdef UNALIGNED_USER_EXCEPTION
	l32e	a5, a4, -8
#else
	l32i	a5, a4, 0		# load lower address word
#endif
	and	a5, a5, a7		# mask
	__sh	a7, a3 			# shift value
	or	a5, a5, a7		# or with original value
#ifdef UNALIGNED_USER_EXCEPTION
	s32e	a5, a4, -8
	l32e	a7, a4, -4
#else
	s32i	a5, a4, 0		# store
	l32i	a7, a4, 4		# same for upper address word
#endif
	__sl	a5, a3
	and	a6, a7, a6
	or	a6, a6, a5
#ifdef UNALIGNED_USER_EXCEPTION
	s32e	a6, a4, -4
#else
	s32i	a6, a4, 4
#endif

	/* Done. restore stack and return */

.Lexit:
	movi	a4, 0
	rsr	a3, EXCSAVE_1
	s32i	a4, a3, EXC_TABLE_FIXUP

	/* Restore working register */

	l32i	a7, a2, PT_AREG7
	l32i	a6, a2, PT_AREG6
	l32i	a5, a2, PT_AREG5
	l32i	a4, a2, PT_AREG4
	l32i	a3, a2, PT_AREG3

	/* restore SAR and return */

	wsr	a0, SAR
	l32i	a0, a2, PT_AREG0
	l32i	a2, a2, PT_AREG2
	rfe

	/* We cannot handle this exception. */

	.extern _kernel_exception
.Linvalid_instruction_store:
.Linvalid_instruction:

	/* Restore a4...a8 and SAR, set SP, and jump to default exception. */

	l32i	a8, a2, PT_AREG8
	l32i	a7, a2, PT_AREG7
	l32i	a6, a2, PT_AREG6
	l32i	a5, a2, PT_AREG5
	l32i	a4, a2, PT_AREG4
	wsr	a0, SAR
	mov	a1, a2

	rsr	a0, PS
        bbsi.l  a2, PS_UM_SHIFT, 1f     # jump if user mode

	movi	a0, _kernel_exception
	jx	a0

1:	movi	a0, _user_exception
	jx	a0


#endif /* XCHAL_UNALIGNED_LOAD_EXCEPTION || XCHAL_UNALIGNED_STORE_EXCEPTION */
+94 −0
Original line number Diff line number Diff line
/*
 * arch/xtensa/kernel/asm-offsets.c
 *
 * Generates definitions from c-type structures used by assembly sources.
 *
 * This file is subject to the terms and conditions of the GNU General Public
 * License.  See the file "COPYING" in the main directory of this archive
 * for more details.
 *
 * Copyright (C) 2005 Tensilica Inc.
 *
 * Chris Zankel <chris@zankel.net>
 */

#include <asm/processor.h>

#include <linux/types.h>
#include <linux/sched.h>
#include <linux/stddef.h>
#include <linux/thread_info.h>
#include <linux/ptrace.h>
#include <asm/ptrace.h>
#include <asm/processor.h>
#include <asm/uaccess.h>

#define DEFINE(sym, val) asm volatile("\n->" #sym " %0 " #val : : "i" (val))
#define BLANK() asm volatile("\n->" : : )

int main(void)
{
	/* struct pt_regs */
	DEFINE(PT_PC, offsetof (struct pt_regs, pc));
	DEFINE(PT_PS, offsetof (struct pt_regs, ps));
	DEFINE(PT_DEPC, offsetof (struct pt_regs, depc));
	DEFINE(PT_EXCCAUSE, offsetof (struct pt_regs, exccause));
	DEFINE(PT_EXCVADDR, offsetof (struct pt_regs, excvaddr));
	DEFINE(PT_DEBUGCAUSE, offsetof (struct pt_regs, debugcause));
	DEFINE(PT_WMASK, offsetof (struct pt_regs, wmask));
	DEFINE(PT_LBEG, offsetof (struct pt_regs, lbeg));
	DEFINE(PT_LEND, offsetof (struct pt_regs, lend));
	DEFINE(PT_LCOUNT, offsetof (struct pt_regs, lcount));
	DEFINE(PT_SAR, offsetof (struct pt_regs, sar));
	DEFINE(PT_SYSCALL, offsetof (struct pt_regs, syscall));
	DEFINE(PT_AREG, offsetof (struct pt_regs, areg[0]));
	DEFINE(PT_AREG0, offsetof (struct pt_regs, areg[0]));
	DEFINE(PT_AREG1, offsetof (struct pt_regs, areg[1]));
	DEFINE(PT_AREG2, offsetof (struct pt_regs, areg[2]));
	DEFINE(PT_AREG3, offsetof (struct pt_regs, areg[3]));
	DEFINE(PT_AREG4, offsetof (struct pt_regs, areg[4]));
	DEFINE(PT_AREG5, offsetof (struct pt_regs, areg[5]));
	DEFINE(PT_AREG6, offsetof (struct pt_regs, areg[6]));
	DEFINE(PT_AREG7, offsetof (struct pt_regs, areg[7]));
	DEFINE(PT_AREG8, offsetof (struct pt_regs, areg[8]));
	DEFINE(PT_AREG9, offsetof (struct pt_regs, areg[9]));
	DEFINE(PT_AREG10, offsetof (struct pt_regs, areg[10]));
	DEFINE(PT_AREG11, offsetof (struct pt_regs, areg[11]));
	DEFINE(PT_AREG12, offsetof (struct pt_regs, areg[12]));
	DEFINE(PT_AREG13, offsetof (struct pt_regs, areg[13]));
	DEFINE(PT_AREG14, offsetof (struct pt_regs, areg[14]));
	DEFINE(PT_AREG15, offsetof (struct pt_regs, areg[15]));
	DEFINE(PT_WINDOWBASE, offsetof (struct pt_regs, windowbase));
	DEFINE(PT_WINDOWSTART, offsetof(struct pt_regs, windowstart));
	DEFINE(PT_SIZE, sizeof(struct pt_regs));
	DEFINE(PT_AREG_END, offsetof (struct pt_regs, areg[XCHAL_NUM_AREGS]));
	DEFINE(PT_USER_SIZE, offsetof(struct pt_regs, areg[XCHAL_NUM_AREGS]));
	BLANK();

	/* struct task_struct */
	DEFINE(TASK_PTRACE, offsetof (struct task_struct, ptrace));
	DEFINE(TASK_MM, offsetof (struct task_struct, mm));
	DEFINE(TASK_ACTIVE_MM, offsetof (struct task_struct, active_mm));
	DEFINE(TASK_PID, offsetof (struct task_struct, pid));
	DEFINE(TASK_THREAD, offsetof (struct task_struct, thread));
	DEFINE(TASK_THREAD_INFO, offsetof (struct task_struct, thread_info));
	DEFINE(TASK_STRUCT_SIZE, sizeof (struct task_struct));
	BLANK();

	/* struct thread_info (offset from start_struct) */
	DEFINE(THREAD_RA, offsetof (struct task_struct, thread.ra));
	DEFINE(THREAD_SP, offsetof (struct task_struct, thread.sp));
	DEFINE(THREAD_CP_SAVE, offsetof (struct task_struct, thread.cp_save));
	DEFINE(THREAD_CURRENT_DS, offsetof (struct task_struct, thread.current_ds));
	BLANK();

	/* struct mm_struct */
	DEFINE(MM_USERS, offsetof(struct mm_struct, mm_users));
	DEFINE(MM_PGD, offsetof (struct mm_struct, pgd));
	DEFINE(MM_CONTEXT, offsetof (struct mm_struct, context));
	BLANK();
	DEFINE(PT_SINGLESTEP_BIT, PT_SINGLESTEP_BIT);
	return 0;
}

+201 −0
Original line number Diff line number Diff line
/*
 * arch/xtensa/kernel/coprocessor.S
 *
 * Xtensa processor configuration-specific table of coprocessor and
 * other custom register layout information.
 *
 * This file is subject to the terms and conditions of the GNU General Public
 * License.  See the file "COPYING" in the main directory of this archive
 * for more details.
 *
 * Copyright (C) 2003 - 2005 Tensilica Inc.
 *
 * Marc Gauthier <marc@tensilica.com> <marc@alumni.uwaterloo.ca>
 */

/*
 * This module contains a table that describes the layout of the various
 * custom registers and states associated with each coprocessor, as well
 * as those not associated with any coprocessor ("extra state").
 * This table is included with core dumps and is available via the ptrace
 * interface, allowing the layout of such register/state information to
 * be modified in the kernel without affecting the debugger.  Each
 * register or state is identified using a 32-bit "libdb target number"
 * assigned when the Xtensa processor is generated.
 */

#include <linux/config.h>
#include <linux/linkage.h>
#include <asm/processor.h>

#if XCHAL_HAVE_CP

#define CP_LAST ((XCHAL_CP_MAX - 1) * COPROCESSOR_INFO_SIZE)

ENTRY(release_coprocessors)

	entry	a1, 16
						# a2: task
	movi	a3, 1 << XCHAL_CP_MAX 		# a3: coprocessor-bit
	movi	a4, coprocessor_info+CP_LAST	# a4: owner-table
						# a5: tmp
	movi	a6, 0				# a6: 0
	rsil	a7, LOCKLEVEL			# a7: PS

1:	/* Check if task is coprocessor owner of coprocessor[i]. */

	l32i	a5, a4, COPROCESSOR_INFO_OWNER
	srli	a3, a3, 1
	beqz	a3, 1f
	addi	a4, a4, -8
	beq	a2, a5, 1b

	/* Found an entry: Clear entry CPENABLE bit to disable CP. */

	rsr	a5, CPENABLE
	s32i	a6, a4, COPROCESSOR_INFO_OWNER
	xor	a5, a3, a5
	wsr	a5, CPENABLE

	bnez	a3, 1b

1:	wsr	a7, PS
	rsync
	retw


ENTRY(disable_coprocessor)
	entry	sp, 16
	rsil	a7, LOCKLEVEL
	rsr	a3, CPENABLE
	movi	a4, 1
	ssl	a2
	sll	a4, a4
	and	a4, a3, a4
	xor	a3, a3, a4
	wsr	a3, CPENABLE
	wsr	a7, PS
	rsync
	retw

ENTRY(enable_coprocessor)
	entry	sp, 16
	rsil	a7, LOCKLEVEL
	rsr	a3, CPENABLE
	movi	a4, 1
	ssl	a2
	sll	a4, a4
	or	a3, a3, a4
	wsr	a3, CPENABLE
	wsr	a7, PS
	rsync
	retw

#endif

ENTRY(save_coprocessor_extra)
	entry	sp, 16
	xchal_extra_store_funcbody
	retw

ENTRY(restore_coprocessor_extra)
	entry	sp, 16
	xchal_extra_load_funcbody
	retw

ENTRY(save_coprocessor_registers)
	entry	sp, 16
	xchal_cpi_store_funcbody
	retw

ENTRY(restore_coprocessor_registers)
	entry	sp, 16
	xchal_cpi_load_funcbody
	retw


/*
 *  The Xtensa compile-time HAL (core.h) XCHAL_*_SA_CONTENTS_LIBDB macros
 *  describe the contents of coprocessor & extra save areas in terms of
 *  undefined CONTENTS_LIBDB_{SREG,UREG,REGF} macros.  We define these
 *  latter macros here; they expand into a table of the format we want.
 *  The general format is:
 *
 *	CONTENTS_LIBDB_SREG(libdbnum, offset, size, align, rsv1, name, sregnum,
 *			    bitmask, rsv2, rsv3)
 *	CONTENTS_LIBDB_UREG(libdbnum, offset, size, align, rsv1, name, uregnum,
 *			    bitmask, rsv2, rsv3)
 *	CONTENTS_LIBDB_REGF(libdbnum, offset, size, align, rsv1, name, index,
 *			    numentries, contentsize, regname_base,
 *			    regfile_name, rsv2, rsv3)
 *
 *  For this table, we only care about the <libdbnum>, <offset> and <size>
 *  fields.
 */

/*  Map all XCHAL CONTENTS macros to the reg_entry asm macro defined below:  */

#define CONTENTS_LIBDB_SREG(libdbnum,offset,size,align,rsv1,name,sregnum,     \
			    bitmask, rsv2, rsv3)			      \
		reg_entry libdbnum, offset, size ;
#define CONTENTS_LIBDB_UREG(libdbnum,offset,size,align,rsv1,name,uregnum,     \
			    bitmask, rsv2, rsv3)			      \
		reg_entry libdbnum, offset, size ;
#define CONTENTS_LIBDB_REGF(libdbnum, offset, size, align, rsv1, name, index, \
			    numentries, contentsize, regname_base,	      \
			    regfile_name, rsv2, rsv3)			      \
		reg_entry libdbnum, offset, size ;

/* A single table entry: */
	.macro	reg_entry	libdbnum, offset, size
	 .ifne	(__last_offset-(__last_group_offset+\offset))
	  /* padding entry */
	  .word	(0xFC000000+__last_offset-(__last_group_offset+\offset))
	 .endif
	 .word	\libdbnum				/* actual entry */
	 .set	__last_offset, __last_group_offset+\offset+\size
	.endm	/* reg_entry */


/* Table entry that marks the beginning of a group (coprocessor or "extra"): */
	.macro	reg_group	cpnum, num_entries, align
	 .set	__last_group_offset, (__last_offset + \align- 1) & -\align
	 .ifne	\num_entries
	  .word	0xFD000000+(\cpnum<<16)+\num_entries
	 .endif
	.endm	/* reg_group */

/*
 * Register info tables.
 */

	.section .rodata, "a"
	.globl	_xtensa_reginfo_tables
	.globl	_xtensa_reginfo_table_size
	.align	4
_xtensa_reginfo_table_size:
	.word	_xtensa_reginfo_table_end - _xtensa_reginfo_tables

_xtensa_reginfo_tables:
	.set	__last_offset, 0
	reg_group 0xFF, XCHAL_EXTRA_SA_CONTENTS_LIBDB_NUM, XCHAL_EXTRA_SA_ALIGN
	XCHAL_EXTRA_SA_CONTENTS_LIBDB
	reg_group 0, XCHAL_CP0_SA_CONTENTS_LIBDB_NUM, XCHAL_CP0_SA_ALIGN
	XCHAL_CP0_SA_CONTENTS_LIBDB
	reg_group 1, XCHAL_CP1_SA_CONTENTS_LIBDB_NUM, XCHAL_CP1_SA_ALIGN
	XCHAL_CP1_SA_CONTENTS_LIBDB
	reg_group 2, XCHAL_CP2_SA_CONTENTS_LIBDB_NUM, XCHAL_CP2_SA_ALIGN
	XCHAL_CP2_SA_CONTENTS_LIBDB
	reg_group 3, XCHAL_CP3_SA_CONTENTS_LIBDB_NUM, XCHAL_CP3_SA_ALIGN
	XCHAL_CP3_SA_CONTENTS_LIBDB
	reg_group 4, XCHAL_CP4_SA_CONTENTS_LIBDB_NUM, XCHAL_CP4_SA_ALIGN
	XCHAL_CP4_SA_CONTENTS_LIBDB
	reg_group 5, XCHAL_CP5_SA_CONTENTS_LIBDB_NUM, XCHAL_CP5_SA_ALIGN
	XCHAL_CP5_SA_CONTENTS_LIBDB
	reg_group 6, XCHAL_CP6_SA_CONTENTS_LIBDB_NUM, XCHAL_CP6_SA_ALIGN
	XCHAL_CP6_SA_CONTENTS_LIBDB
	reg_group 7, XCHAL_CP7_SA_CONTENTS_LIBDB_NUM, XCHAL_CP7_SA_ALIGN
	XCHAL_CP7_SA_CONTENTS_LIBDB
	.word	0xFC000000	/* invalid register number,marks end of table*/
_xtensa_reginfo_table_end:
+1996 −0

File added.

Preview size limit exceeded, changes collapsed.

Loading