Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit e9564df7 authored by Guo Ren's avatar Guo Ren
Browse files

csky: Process management and Signal



This patch adds files related to task_switch, sigcontext, signal,
fpu context switch.

Signed-off-by: default avatarGuo Ren <ren_guo@c-sky.com>
Cc: Arnd Bergmann <arnd@arndb.de>
Cc: Eric W. Biederman <ebiederm@xmission.com>
parent 013de2d6
Loading
Loading
Loading
Loading

arch/csky/abiv2/fpu.c

0 → 100644
+275 −0
Original line number Original line Diff line number Diff line
// SPDX-License-Identifier: GPL-2.0
// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.

#include <linux/ptrace.h>
#include <linux/uaccess.h>
#include <abi/reg_ops.h>

#define MTCR_MASK	0xFC00FFE0
#define MFCR_MASK	0xFC00FFE0
#define MTCR_DIST	0xC0006420
#define MFCR_DIST	0xC0006020

void __init init_fpu(void)
{
	mtcr("cr<1, 2>", 0);
}

/*
 * fpu_libc_helper() is to help libc to excute:
 *  - mfcr %a, cr<1, 2>
 *  - mfcr %a, cr<2, 2>
 *  - mtcr %a, cr<1, 2>
 *  - mtcr %a, cr<2, 2>
 */
int fpu_libc_helper(struct pt_regs *regs)
{
	int fault;
	unsigned long instrptr, regx = 0;
	unsigned long index = 0, tmp = 0;
	unsigned long tinstr = 0;
	u16 instr_hi, instr_low;

	instrptr = instruction_pointer(regs);
	if (instrptr & 1)
		return 0;

	fault = __get_user(instr_low, (u16 *)instrptr);
	if (fault)
		return 0;

	fault = __get_user(instr_hi, (u16 *)(instrptr + 2));
	if (fault)
		return 0;

	tinstr = instr_hi | ((unsigned long)instr_low << 16);

	if (((tinstr >> 21) & 0x1F) != 2)
		return 0;

	if ((tinstr & MTCR_MASK) == MTCR_DIST) {
		index = (tinstr >> 16) & 0x1F;
		if (index > 13)
			return 0;

		tmp = tinstr & 0x1F;
		if (tmp > 2)
			return 0;

		regx =  *(&regs->a0 + index);

		if (tmp == 1)
			mtcr("cr<1, 2>", regx);
		else if (tmp == 2)
			mtcr("cr<2, 2>", regx);
		else
			return 0;

		regs->pc += 4;
		return 1;
	}

	if ((tinstr & MFCR_MASK) == MFCR_DIST) {
		index = tinstr & 0x1F;
		if (index > 13)
			return 0;

		tmp = ((tinstr >> 16) & 0x1F);
		if (tmp > 2)
			return 0;

		if (tmp == 1)
			regx = mfcr("cr<1, 2>");
		else if (tmp == 2)
			regx = mfcr("cr<2, 2>");
		else
			return 0;

		*(&regs->a0 + index) = regx;

		regs->pc += 4;
		return 1;
	}

	return 0;
}

void fpu_fpe(struct pt_regs *regs)
{
	int sig, code;
	unsigned int fesr;

	fesr = mfcr("cr<2, 2>");

	sig = SIGFPE;
	code = FPE_FLTUNK;

	if (fesr & FPE_ILLE) {
		sig = SIGILL;
		code = ILL_ILLOPC;
	} else if (fesr & FPE_IDC) {
		sig = SIGILL;
		code = ILL_ILLOPN;
	} else if (fesr & FPE_FEC) {
		sig = SIGFPE;
		if (fesr & FPE_IOC)
			code = FPE_FLTINV;
		else if (fesr & FPE_DZC)
			code = FPE_FLTDIV;
		else if (fesr & FPE_UFC)
			code = FPE_FLTUND;
		else if (fesr & FPE_OFC)
			code = FPE_FLTOVF;
		else if (fesr & FPE_IXC)
			code = FPE_FLTRES;
	}

	force_sig_fault(sig, code, (void __user *)regs->pc, current);
}

#define FMFVR_FPU_REGS(vrx, vry)	\
	"fmfvrl %0, "#vrx"\n"		\
	"fmfvrh %1, "#vrx"\n"		\
	"fmfvrl %2, "#vry"\n"		\
	"fmfvrh %3, "#vry"\n"

#define FMTVR_FPU_REGS(vrx, vry)	\
	"fmtvrl "#vrx", %0\n"		\
	"fmtvrh "#vrx", %1\n"		\
	"fmtvrl "#vry", %2\n"		\
	"fmtvrh "#vry", %3\n"

#define STW_FPU_REGS(a, b, c, d)	\
	"stw    %0, (%4, "#a")\n"	\
	"stw    %1, (%4, "#b")\n"	\
	"stw    %2, (%4, "#c")\n"	\
	"stw    %3, (%4, "#d")\n"

#define LDW_FPU_REGS(a, b, c, d)	\
	"ldw    %0, (%4, "#a")\n"	\
	"ldw    %1, (%4, "#b")\n"	\
	"ldw    %2, (%4, "#c")\n"	\
	"ldw    %3, (%4, "#d")\n"

void save_to_user_fp(struct user_fp *user_fp)
{
	unsigned long flg;
	unsigned long tmp1, tmp2;
	unsigned long *fpregs;

	local_irq_save(flg);

	tmp1 = mfcr("cr<1, 2>");
	tmp2 = mfcr("cr<2, 2>");

	user_fp->fcr = tmp1;
	user_fp->fesr = tmp2;

	fpregs = &user_fp->vr[0];
#ifdef CONFIG_CPU_HAS_FPUV2
#ifdef CONFIG_CPU_HAS_VDSP
	asm volatile(
		"vstmu.32    vr0-vr3,   (%0)\n"
		"vstmu.32    vr4-vr7,   (%0)\n"
		"vstmu.32    vr8-vr11,  (%0)\n"
		"vstmu.32    vr12-vr15, (%0)\n"
		"fstmu.64    vr16-vr31, (%0)\n"
		: "+a"(fpregs)
		::"memory");
#else
	asm volatile(
		"fstmu.64    vr0-vr31,  (%0)\n"
		: "+a"(fpregs)
		::"memory");
#endif
#else
	{
	unsigned long tmp3, tmp4;

	asm volatile(
		FMFVR_FPU_REGS(vr0, vr1)
		STW_FPU_REGS(0, 4, 16, 20)
		FMFVR_FPU_REGS(vr2, vr3)
		STW_FPU_REGS(32, 36, 48, 52)
		FMFVR_FPU_REGS(vr4, vr5)
		STW_FPU_REGS(64, 68, 80, 84)
		FMFVR_FPU_REGS(vr6, vr7)
		STW_FPU_REGS(96, 100, 112, 116)
		"addi	%4, 128\n"
		FMFVR_FPU_REGS(vr8, vr9)
		STW_FPU_REGS(0, 4, 16, 20)
		FMFVR_FPU_REGS(vr10, vr11)
		STW_FPU_REGS(32, 36, 48, 52)
		FMFVR_FPU_REGS(vr12, vr13)
		STW_FPU_REGS(64, 68, 80, 84)
		FMFVR_FPU_REGS(vr14, vr15)
		STW_FPU_REGS(96, 100, 112, 116)
		: "=a"(tmp1), "=a"(tmp2), "=a"(tmp3),
		  "=a"(tmp4), "+a"(fpregs)
		::"memory");
	}
#endif

	local_irq_restore(flg);
}

void restore_from_user_fp(struct user_fp *user_fp)
{
	unsigned long flg;
	unsigned long tmp1, tmp2;
	unsigned long *fpregs;

	local_irq_save(flg);

	tmp1 = user_fp->fcr;
	tmp2 = user_fp->fesr;

	mtcr("cr<1, 2>", tmp1);
	mtcr("cr<2, 2>", tmp2);

	fpregs = &user_fp->vr[0];
#ifdef CONFIG_CPU_HAS_FPUV2
#ifdef CONFIG_CPU_HAS_VDSP
	asm volatile(
		"vldmu.32    vr0-vr3,   (%0)\n"
		"vldmu.32    vr4-vr7,   (%0)\n"
		"vldmu.32    vr8-vr11,  (%0)\n"
		"vldmu.32    vr12-vr15, (%0)\n"
		"fldmu.64    vr16-vr31, (%0)\n"
		: "+a"(fpregs)
		::"memory");
#else
	asm volatile(
		"fldmu.64    vr0-vr31,  (%0)\n"
		: "+a"(fpregs)
		::"memory");
#endif
#else
	{
	unsigned long tmp3, tmp4;

	asm volatile(
		LDW_FPU_REGS(0, 4, 16, 20)
		FMTVR_FPU_REGS(vr0, vr1)
		LDW_FPU_REGS(32, 36, 48, 52)
		FMTVR_FPU_REGS(vr2, vr3)
		LDW_FPU_REGS(64, 68, 80, 84)
		FMTVR_FPU_REGS(vr4, vr5)
		LDW_FPU_REGS(96, 100, 112, 116)
		FMTVR_FPU_REGS(vr6, vr7)
		"addi	%4, 128\n"
		LDW_FPU_REGS(0, 4, 16, 20)
		FMTVR_FPU_REGS(vr8, vr9)
		LDW_FPU_REGS(32, 36, 48, 52)
		FMTVR_FPU_REGS(vr10, vr11)
		LDW_FPU_REGS(64, 68, 80, 84)
		FMTVR_FPU_REGS(vr12, vr13)
		LDW_FPU_REGS(96, 100, 112, 116)
		FMTVR_FPU_REGS(vr14, vr15)
		: "=a"(tmp1), "=a"(tmp2), "=a"(tmp3),
		  "=a"(tmp4), "+a"(fpregs)
		::"memory");
	}
#endif
	local_irq_restore(flg);
}
+66 −0
Original line number Original line Diff line number Diff line
/* SPDX-License-Identifier: GPL-2.0 */
// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.

#ifndef __ASM_CSKY_FPU_H
#define __ASM_CSKY_FPU_H

#include <asm/sigcontext.h>
#include <asm/ptrace.h>

int fpu_libc_helper(struct pt_regs *regs);
void fpu_fpe(struct pt_regs *regs);
void __init init_fpu(void);

void save_to_user_fp(struct user_fp *user_fp);
void restore_from_user_fp(struct user_fp *user_fp);

/*
 * Define the fesr bit for fpe handle.
 */
#define  FPE_ILLE  (1 << 16)    /* Illegal instruction  */
#define  FPE_FEC   (1 << 7)     /* Input float-point arithmetic exception */
#define  FPE_IDC   (1 << 5)     /* Input denormalized exception */
#define  FPE_IXC   (1 << 4)     /* Inexact exception */
#define  FPE_UFC   (1 << 3)     /* Underflow exception */
#define  FPE_OFC   (1 << 2)     /* Overflow exception */
#define  FPE_DZC   (1 << 1)     /* Divide by zero exception */
#define  FPE_IOC   (1 << 0)     /* Invalid operation exception */
#define  FPE_REGULAR_EXCEPTION (FPE_IXC | FPE_UFC | FPE_OFC | FPE_DZC | FPE_IOC)

#ifdef CONFIG_OPEN_FPU_IDE
#define IDE_STAT   (1 << 5)
#else
#define IDE_STAT   0
#endif

#ifdef CONFIG_OPEN_FPU_IXE
#define IXE_STAT   (1 << 4)
#else
#define IXE_STAT   0
#endif

#ifdef CONFIG_OPEN_FPU_UFE
#define UFE_STAT   (1 << 3)
#else
#define UFE_STAT   0
#endif

#ifdef CONFIG_OPEN_FPU_OFE
#define OFE_STAT   (1 << 2)
#else
#define OFE_STAT   0
#endif

#ifdef CONFIG_OPEN_FPU_DZE
#define DZE_STAT   (1 << 1)
#else
#define DZE_STAT   0
#endif

#ifdef CONFIG_OPEN_FPU_IOE
#define IOE_STAT   (1 << 0)
#else
#define IOE_STAT   0
#endif

#endif /* __ASM_CSKY_FPU_H */
+150 −0
Original line number Original line Diff line number Diff line
/* SPDX-License-Identifier: GPL-2.0 */
// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.

#ifndef __ASM_CSKY_MMU_CONTEXT_H
#define __ASM_CSKY_MMU_CONTEXT_H

#include <asm-generic/mm_hooks.h>
#include <asm/setup.h>
#include <asm/page.h>
#include <asm/cacheflush.h>
#include <asm/tlbflush.h>

#include <linux/errno.h>
#include <linux/sched.h>
#include <abi/ckmmu.h>

static inline void tlbmiss_handler_setup_pgd(unsigned long pgd, bool kernel)
{
	pgd &= ~(1<<31);
	pgd += PHYS_OFFSET;
	pgd |= 1;
	setup_pgd(pgd, kernel);
}

#define TLBMISS_HANDLER_SETUP_PGD(pgd) \
	tlbmiss_handler_setup_pgd((unsigned long)pgd, 0)
#define TLBMISS_HANDLER_SETUP_PGD_KERNEL(pgd) \
	tlbmiss_handler_setup_pgd((unsigned long)pgd, 1)

static inline unsigned long tlb_get_pgd(void)
{
	return ((get_pgd()|(1<<31)) - PHYS_OFFSET) & ~1;
}

#define cpu_context(cpu, mm)	((mm)->context.asid[cpu])
#define cpu_asid(cpu, mm)	(cpu_context((cpu), (mm)) & ASID_MASK)
#define asid_cache(cpu)		(cpu_data[cpu].asid_cache)

#define ASID_FIRST_VERSION	(1 << CONFIG_CPU_ASID_BITS)
#define ASID_INC		0x1
#define ASID_MASK		(ASID_FIRST_VERSION - 1)
#define ASID_VERSION_MASK	~ASID_MASK

#define destroy_context(mm)		do {} while (0)
#define enter_lazy_tlb(mm, tsk)		do {} while (0)
#define deactivate_mm(tsk, mm)		do {} while (0)

/*
 *  All unused by hardware upper bits will be considered
 *  as a software asid extension.
 */
static inline void
get_new_mmu_context(struct mm_struct *mm, unsigned long cpu)
{
	unsigned long asid = asid_cache(cpu);

	asid += ASID_INC;
	if (!(asid & ASID_MASK)) {
		flush_tlb_all();	/* start new asid cycle */
		if (!asid)		/* fix version if needed */
			asid = ASID_FIRST_VERSION;
	}
	cpu_context(cpu, mm) = asid_cache(cpu) = asid;
}

/*
 * Initialize the context related info for a new mm_struct
 * instance.
 */
static inline int
init_new_context(struct task_struct *tsk, struct mm_struct *mm)
{
	int i;

	for_each_online_cpu(i)
		cpu_context(i, mm) = 0;
	return 0;
}

static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
			struct task_struct *tsk)
{
	unsigned int cpu = smp_processor_id();
	unsigned long flags;

	local_irq_save(flags);
	/* Check if our ASID is of an older version and thus invalid */
	if ((cpu_context(cpu, next) ^ asid_cache(cpu)) & ASID_VERSION_MASK)
		get_new_mmu_context(next, cpu);
	write_mmu_entryhi(cpu_asid(cpu, next));
	TLBMISS_HANDLER_SETUP_PGD(next->pgd);

	/*
	 * Mark current->active_mm as not "active" anymore.
	 * We don't want to mislead possible IPI tlb flush routines.
	 */
	cpumask_clear_cpu(cpu, mm_cpumask(prev));
	cpumask_set_cpu(cpu, mm_cpumask(next));

	local_irq_restore(flags);
}

/*
 * After we have set current->mm to a new value, this activates
 * the context for the new mm so we see the new mappings.
 */
static inline void
activate_mm(struct mm_struct *prev, struct mm_struct *next)
{
	unsigned long flags;
	int cpu = smp_processor_id();

	local_irq_save(flags);

	/* Unconditionally get a new ASID.  */
	get_new_mmu_context(next, cpu);

	write_mmu_entryhi(cpu_asid(cpu, next));
	TLBMISS_HANDLER_SETUP_PGD(next->pgd);

	/* mark mmu ownership change */
	cpumask_clear_cpu(cpu, mm_cpumask(prev));
	cpumask_set_cpu(cpu, mm_cpumask(next));

	local_irq_restore(flags);
}

/*
 * If mm is currently active_mm, we can't really drop it. Instead,
 * we will get a new one for it.
 */
static inline void
drop_mmu_context(struct mm_struct *mm, unsigned int cpu)
{
	unsigned long flags;

	local_irq_save(flags);

	if (cpumask_test_cpu(cpu, mm_cpumask(mm)))  {
		get_new_mmu_context(mm, cpu);
		write_mmu_entryhi(cpu_asid(cpu, mm));
	} else {
		/* will get a new context next time */
		cpu_context(cpu, mm) = 0;
	}

	local_irq_restore(flags);
}

#endif /* __ASM_CSKY_MMU_CONTEXT_H */
+121 −0
Original line number Original line Diff line number Diff line
/* SPDX-License-Identifier: GPL-2.0 */
// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.

#ifndef __ASM_CSKY_PROCESSOR_H
#define __ASM_CSKY_PROCESSOR_H

/*
 * Default implementation of macro that returns current
 * instruction pointer ("program counter").
 */
#define current_text_addr() ({ __label__ _l; _l: &&_l; })

#include <linux/bitops.h>
#include <asm/segment.h>
#include <asm/ptrace.h>
#include <asm/current.h>
#include <asm/cache.h>
#include <abi/reg_ops.h>
#include <abi/regdef.h>
#ifdef CONFIG_CPU_HAS_FPU
#include <abi/fpu.h>
#endif

struct cpuinfo_csky {
	unsigned long udelay_val;
	unsigned long asid_cache;
	/*
	 * Capability and feature descriptor structure for CSKY CPU
	 */
	unsigned long options;
	unsigned int processor_id[4];
	unsigned int fpu_id;
} __aligned(SMP_CACHE_BYTES);

extern struct cpuinfo_csky cpu_data[];

/*
 * User space process size: 2GB. This is hardcoded into a few places,
 * so don't change it unless you know what you are doing.  TASK_SIZE
 * for a 64 bit kernel expandable to 8192EB, of which the current CSKY
 * implementations will "only" be able to use 1TB ...
 */
#define TASK_SIZE       0x7fff8000UL

#ifdef __KERNEL__
#define STACK_TOP       TASK_SIZE
#define STACK_TOP_MAX   STACK_TOP
#endif

/* This decides where the kernel will search for a free chunk of vm
 * space during mmap's.
 */
#define TASK_UNMAPPED_BASE      (TASK_SIZE / 3)

struct thread_struct {
	unsigned long  ksp;       /* kernel stack pointer */
	unsigned long  sr;        /* saved status register */
	unsigned long  esp0;      /* points to SR of stack frame */
	unsigned long  hi;
	unsigned long  lo;

	/* Other stuff associated with the thread. */
	unsigned long address;      /* Last user fault */
	unsigned long error_code;

	/* FPU regs */
	struct user_fp __aligned(16) user_fp;
};

#define INIT_THREAD  { \
	.ksp = (unsigned long) init_thread_union.stack + THREAD_SIZE, \
	.sr = DEFAULT_PSR_VALUE, \
}

/*
 * Do necessary setup to start up a newly executed thread.
 *
 * pass the data segment into user programs if it exists,
 * it can't hurt anything as far as I can tell
 */
#define start_thread(_regs, _pc, _usp)					\
do {									\
	set_fs(USER_DS); /* reads from user space */			\
	(_regs)->pc = (_pc);						\
	(_regs)->regs[1] = 0; /* ABIV1 is R7, uClibc_main rtdl arg */	\
	(_regs)->regs[2] = 0;						\
	(_regs)->regs[3] = 0; /* ABIV2 is R7, use it? */		\
	(_regs)->sr &= ~PS_S;						\
	(_regs)->usp = (_usp);						\
} while (0)

/* Forward declaration, a strange C thing */
struct task_struct;

/* Free all resources held by a thread. */
static inline void release_thread(struct task_struct *dead_task)
{
}

/* Prepare to copy thread state - unlazy all lazy status */
#define prepare_to_copy(tsk)    do { } while (0)

extern int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags);

#define copy_segments(tsk, mm)		do { } while (0)
#define release_segments(mm)		do { } while (0)
#define forget_segments()		do { } while (0)

extern unsigned long thread_saved_pc(struct task_struct *tsk);

unsigned long get_wchan(struct task_struct *p);

#define KSTK_EIP(tsk)		(task_pt_regs(tsk)->pc)
#define KSTK_ESP(tsk)		(task_pt_regs(tsk)->usp)

#define task_pt_regs(p) \
	((struct pt_regs *)(THREAD_SIZE + p->stack) - 1)

#define cpu_relax() barrier()

#endif /* __ASM_CSKY_PROCESSOR_H */
+36 −0
Original line number Original line Diff line number Diff line
/* SPDX-License-Identifier: GPL-2.0 */
// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.

#ifndef __ASM_CSKY_SWITCH_TO_H
#define __ASM_CSKY_SWITCH_TO_H

#include <linux/thread_info.h>
#ifdef CONFIG_CPU_HAS_FPU
#include <abi/fpu.h>
static inline void __switch_to_fpu(struct task_struct *prev,
				   struct task_struct *next)
{
	save_to_user_fp(&prev->thread.user_fp);
	restore_from_user_fp(&next->thread.user_fp);
}
#else
static inline void __switch_to_fpu(struct task_struct *prev,
				   struct task_struct *next)
{}
#endif

/*
 * Context switching is now performed out-of-line in switch_to.S
 */
extern struct task_struct *__switch_to(struct task_struct *,
				       struct task_struct *);

#define switch_to(prev, next, last)					\
	do {								\
		struct task_struct *__prev = (prev);			\
		struct task_struct *__next = (next);			\
		__switch_to_fpu(__prev, __next);			\
		((last) = __switch_to((prev), (next)));			\
	} while (0)

#endif /* __ASM_CSKY_SWITCH_TO_H */
Loading