Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 7cd58e43 authored by Jeremy Kerr's avatar Jeremy Kerr Committed by Paul Mackerras
Browse files

[POWERPC] spufs: move fault, lscsa_alloc and switch code to spufs module



Currently, part of the spufs code (switch.o, lscsa_alloc.o and fault.o)
is compiled directly into the kernel.

This change moves these components of spufs into the kernel.

The lscsa and switch objects are fairly straightforward to move in.

For the fault.o module, we split the fault-handling code into two
parts: a/p/p/c/spu_fault.c and a/p/p/c/spufs/fault.c. The former is for
the in-kernel spu_handle_mm_fault function, and we move the rest of the
fault-handling code into spufs.

Signed-off-by: default avatarJeremy Kerr <jk@ozlabs.org>
Signed-off-by: default avatarPaul Mackerras <paulus@samba.org>
parent 9b1d21f8
Loading
Loading
Loading
Loading
+1 −1
Original line number Original line Diff line number Diff line
@@ -19,7 +19,7 @@ spu-manage-$(CONFIG_PPC_CELLEB) += spu_manage.o
spu-manage-$(CONFIG_PPC_CELL_NATIVE)	+= spu_manage.o
spu-manage-$(CONFIG_PPC_CELL_NATIVE)	+= spu_manage.o


obj-$(CONFIG_SPU_BASE)			+= spu_callbacks.o spu_base.o \
obj-$(CONFIG_SPU_BASE)			+= spu_callbacks.o spu_base.o \
					   spu_syscalls.o \
					   spu_syscalls.o spu_fault.o \
					   $(spu-priv1-y) \
					   $(spu-priv1-y) \
					   $(spu-manage-y) \
					   $(spu-manage-y) \
					   spufs/
					   spufs/
+98 −0
Original line number Original line Diff line number Diff line
/*
 * SPU mm fault handler
 *
 * (C) Copyright IBM Deutschland Entwicklung GmbH 2007
 *
 * Author: Arnd Bergmann <arndb@de.ibm.com>
 * Author: Jeremy Kerr <jk@ozlabs.org>
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 2, or (at your option)
 * any later version.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
 */
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/module.h>

#include <asm/spu.h>
#include <asm/spu_csa.h>

/*
 * This ought to be kept in sync with the powerpc specific do_page_fault
 * function. Currently, there are a few corner cases that we haven't had
 * to handle fortunately.
 */
int spu_handle_mm_fault(struct mm_struct *mm, unsigned long ea,
		unsigned long dsisr, unsigned *flt)
{
	struct vm_area_struct *vma;
	unsigned long is_write;
	int ret;

#if 0
	if (!IS_VALID_EA(ea)) {
		return -EFAULT;
	}
#endif /* XXX */
	if (mm == NULL) {
		return -EFAULT;
	}
	if (mm->pgd == NULL) {
		return -EFAULT;
	}

	down_read(&mm->mmap_sem);
	vma = find_vma(mm, ea);
	if (!vma)
		goto bad_area;
	if (vma->vm_start <= ea)
		goto good_area;
	if (!(vma->vm_flags & VM_GROWSDOWN))
		goto bad_area;
	if (expand_stack(vma, ea))
		goto bad_area;
good_area:
	is_write = dsisr & MFC_DSISR_ACCESS_PUT;
	if (is_write) {
		if (!(vma->vm_flags & VM_WRITE))
			goto bad_area;
	} else {
		if (dsisr & MFC_DSISR_ACCESS_DENIED)
			goto bad_area;
		if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
			goto bad_area;
	}
	ret = 0;
	*flt = handle_mm_fault(mm, vma, ea, is_write);
	if (unlikely(*flt & VM_FAULT_ERROR)) {
		if (*flt & VM_FAULT_OOM) {
			ret = -ENOMEM;
			goto bad_area;
		} else if (*flt & VM_FAULT_SIGBUS) {
			ret = -EFAULT;
			goto bad_area;
		}
		BUG();
	}
	if (*flt & VM_FAULT_MAJOR)
		current->maj_flt++;
	else
		current->min_flt++;
	up_read(&mm->mmap_sem);
	return ret;

bad_area:
	up_read(&mm->mmap_sem);
	return -EFAULT;
}
EXPORT_SYMBOL_GPL(spu_handle_mm_fault);
+1 −1
Original line number Original line Diff line number Diff line
obj-y += switch.o fault.o lscsa_alloc.o


obj-$(CONFIG_SPU_FS) += spufs.o
obj-$(CONFIG_SPU_FS) += spufs.o
spufs-y += inode.o file.o context.o syscalls.o coredump.o
spufs-y += inode.o file.o context.o syscalls.o coredump.o
spufs-y += sched.o backing_ops.o hw_ops.o run.o gang.o
spufs-y += sched.o backing_ops.o hw_ops.o run.o gang.o
spufs-y += switch.o fault.o lscsa_alloc.o


# Rules to build switch.o with the help of SPU tool chain
# Rules to build switch.o with the help of SPU tool chain
SPU_CROSS	:= spu-
SPU_CROSS	:= spu-
+0 −71
Original line number Original line Diff line number Diff line
@@ -28,75 +28,6 @@


#include "spufs.h"
#include "spufs.h"


/*
 * This ought to be kept in sync with the powerpc specific do_page_fault
 * function. Currently, there are a few corner cases that we haven't had
 * to handle fortunately.
 */
static int spu_handle_mm_fault(struct mm_struct *mm, unsigned long ea,
		unsigned long dsisr, unsigned *flt)
{
	struct vm_area_struct *vma;
	unsigned long is_write;
	int ret;

#if 0
	if (!IS_VALID_EA(ea)) {
		return -EFAULT;
	}
#endif /* XXX */
	if (mm == NULL) {
		return -EFAULT;
	}
	if (mm->pgd == NULL) {
		return -EFAULT;
	}

	down_read(&mm->mmap_sem);
	vma = find_vma(mm, ea);
	if (!vma)
		goto bad_area;
	if (vma->vm_start <= ea)
		goto good_area;
	if (!(vma->vm_flags & VM_GROWSDOWN))
		goto bad_area;
	if (expand_stack(vma, ea))
		goto bad_area;
good_area:
	is_write = dsisr & MFC_DSISR_ACCESS_PUT;
	if (is_write) {
		if (!(vma->vm_flags & VM_WRITE))
			goto bad_area;
	} else {
		if (dsisr & MFC_DSISR_ACCESS_DENIED)
			goto bad_area;
		if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
			goto bad_area;
	}
	ret = 0;
	*flt = handle_mm_fault(mm, vma, ea, is_write);
	if (unlikely(*flt & VM_FAULT_ERROR)) {
		if (*flt & VM_FAULT_OOM) {
			ret = -ENOMEM;
			goto bad_area;
		} else if (*flt & VM_FAULT_SIGBUS) {
			ret = -EFAULT;
			goto bad_area;
		}
		BUG();
	}
	if (*flt & VM_FAULT_MAJOR)
		current->maj_flt++;
	else
		current->min_flt++;
	up_read(&mm->mmap_sem);
	return ret;

bad_area:
	up_read(&mm->mmap_sem);
	return -EFAULT;
}

static void spufs_handle_dma_error(struct spu_context *ctx,
static void spufs_handle_dma_error(struct spu_context *ctx,
				unsigned long ea, int type)
				unsigned long ea, int type)
{
{
@@ -138,7 +69,6 @@ void spufs_dma_callback(struct spu *spu, int type)
{
{
	spufs_handle_dma_error(spu->ctx, spu->dar, type);
	spufs_handle_dma_error(spu->ctx, spu->dar, type);
}
}
EXPORT_SYMBOL_GPL(spufs_dma_callback);


/*
/*
 * bottom half handler for page faults, we can't do this from
 * bottom half handler for page faults, we can't do this from
@@ -227,4 +157,3 @@ int spufs_handle_class1(struct spu_context *ctx)
	spuctx_switch_state(ctx, SPU_UTIL_SYSTEM);
	spuctx_switch_state(ctx, SPU_UTIL_SYSTEM);
	return ret;
	return ret;
}
}
EXPORT_SYMBOL_GPL(spufs_handle_class1);
+2 −0
Original line number Original line Diff line number Diff line
@@ -28,6 +28,8 @@
#include <asm/spu_csa.h>
#include <asm/spu_csa.h>
#include <asm/mmu.h>
#include <asm/mmu.h>


#include "spufs.h"

static int spu_alloc_lscsa_std(struct spu_state *csa)
static int spu_alloc_lscsa_std(struct spu_state *csa)
{
{
	struct spu_lscsa *lscsa;
	struct spu_lscsa *lscsa;
Loading