Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 13177c8b authored by Paul Mackerras's avatar Paul Mackerras
Browse files

Merge branch 'spufs' of master.kernel.org:/pub/scm/linux/kernel/git/arnd/cell-2.6 into for-2.6.22

parents 445c9b55 ccf17e9d
Loading
Loading
Loading
Loading
+17 −144
Original line number Diff line number Diff line
@@ -36,6 +36,8 @@
#include <asm/xmon.h>

const struct spu_management_ops *spu_management_ops;
EXPORT_SYMBOL_GPL(spu_management_ops);

const struct spu_priv1_ops *spu_priv1_ops;

static struct list_head spu_list[MAX_NUMNODES];
@@ -290,7 +292,6 @@ spu_irq_class_1(int irq, void *data)

	return stat ? IRQ_HANDLED : IRQ_NONE;
}
EXPORT_SYMBOL_GPL(spu_irq_class_1_bottom);

static irqreturn_t
spu_irq_class_2(int irq, void *data)
@@ -431,10 +432,11 @@ struct spu *spu_alloc_node(int node)
		spu = list_entry(spu_list[node].next, struct spu, list);
		list_del_init(&spu->list);
		pr_debug("Got SPU %d %d\n", spu->number, spu->node);
		spu_init_channels(spu);
	}
	mutex_unlock(&spu_mutex);

	if (spu)
		spu_init_channels(spu);
	return spu;
}
EXPORT_SYMBOL_GPL(spu_alloc_node);
@@ -461,108 +463,6 @@ void spu_free(struct spu *spu)
}
EXPORT_SYMBOL_GPL(spu_free);

static int spu_handle_mm_fault(struct spu *spu)
{
	struct mm_struct *mm = spu->mm;
	struct vm_area_struct *vma;
	u64 ea, dsisr, is_write;
	int ret;

	ea = spu->dar;
	dsisr = spu->dsisr;
#if 0
	if (!IS_VALID_EA(ea)) {
		return -EFAULT;
	}
#endif /* XXX */
	if (mm == NULL) {
		return -EFAULT;
	}
	if (mm->pgd == NULL) {
		return -EFAULT;
	}

	down_read(&mm->mmap_sem);
	vma = find_vma(mm, ea);
	if (!vma)
		goto bad_area;
	if (vma->vm_start <= ea)
		goto good_area;
	if (!(vma->vm_flags & VM_GROWSDOWN))
		goto bad_area;
#if 0
	if (expand_stack(vma, ea))
		goto bad_area;
#endif /* XXX */
good_area:
	is_write = dsisr & MFC_DSISR_ACCESS_PUT;
	if (is_write) {
		if (!(vma->vm_flags & VM_WRITE))
			goto bad_area;
	} else {
		if (dsisr & MFC_DSISR_ACCESS_DENIED)
			goto bad_area;
		if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
			goto bad_area;
	}
	ret = 0;
	switch (handle_mm_fault(mm, vma, ea, is_write)) {
	case VM_FAULT_MINOR:
		current->min_flt++;
		break;
	case VM_FAULT_MAJOR:
		current->maj_flt++;
		break;
	case VM_FAULT_SIGBUS:
		ret = -EFAULT;
		goto bad_area;
	case VM_FAULT_OOM:
		ret = -ENOMEM;
		goto bad_area;
	default:
		BUG();
	}
	up_read(&mm->mmap_sem);
	return ret;

bad_area:
	up_read(&mm->mmap_sem);
	return -EFAULT;
}

int spu_irq_class_1_bottom(struct spu *spu)
{
	u64 ea, dsisr, access, error = 0UL;
	int ret = 0;

	ea = spu->dar;
	dsisr = spu->dsisr;
	if (dsisr & (MFC_DSISR_PTE_NOT_FOUND | MFC_DSISR_ACCESS_DENIED)) {
		u64 flags;

		access = (_PAGE_PRESENT | _PAGE_USER);
		access |= (dsisr & MFC_DSISR_ACCESS_PUT) ? _PAGE_RW : 0UL;
		local_irq_save(flags);
		if (hash_page(ea, access, 0x300) != 0)
			error |= CLASS1_ENABLE_STORAGE_FAULT_INTR;
		local_irq_restore(flags);
	}
	if (error & CLASS1_ENABLE_STORAGE_FAULT_INTR) {
		if ((ret = spu_handle_mm_fault(spu)) != 0)
			error |= CLASS1_ENABLE_STORAGE_FAULT_INTR;
		else
			error &= ~CLASS1_ENABLE_STORAGE_FAULT_INTR;
	}
	spu->dar = 0UL;
	spu->dsisr = 0UL;
	if (!error) {
		spu_restart_dma(spu);
	} else {
		spu->dma_callback(spu, SPE_EVENT_SPE_DATA_STORAGE);
	}
	return ret;
}

struct sysdev_class spu_sysdev_class = {
	set_kset_name("spu")
};
@@ -636,12 +536,6 @@ static int spu_create_sysdev(struct spu *spu)
	return 0;
}

static void spu_destroy_sysdev(struct spu *spu)
{
	sysfs_remove_device_from_node(&spu->sysdev, spu->node);
	sysdev_unregister(&spu->sysdev);
}

static int __init create_spu(void *data)
{
	struct spu *spu;
@@ -693,58 +587,37 @@ static int __init create_spu(void *data)
	return ret;
}

static void destroy_spu(struct spu *spu)
{
	list_del_init(&spu->list);
	list_del_init(&spu->full_list);

	spu_destroy_sysdev(spu);
	spu_free_irqs(spu);
	spu_destroy_spu(spu);
	kfree(spu);
}

static void cleanup_spu_base(void)
{
	struct spu *spu, *tmp;
	int node;

	mutex_lock(&spu_mutex);
	for (node = 0; node < MAX_NUMNODES; node++) {
		list_for_each_entry_safe(spu, tmp, &spu_list[node], list)
			destroy_spu(spu);
	}
	mutex_unlock(&spu_mutex);
	sysdev_class_unregister(&spu_sysdev_class);
}
module_exit(cleanup_spu_base);

static int __init init_spu_base(void)
{
	int i, ret;
	int i, ret = 0;

	for (i = 0; i < MAX_NUMNODES; i++)
		INIT_LIST_HEAD(&spu_list[i]);

	if (!spu_management_ops)
		return 0;
		goto out;

	/* create sysdev class for spus */
	ret = sysdev_class_register(&spu_sysdev_class);
	if (ret)
		return ret;

	for (i = 0; i < MAX_NUMNODES; i++)
		INIT_LIST_HEAD(&spu_list[i]);
		goto out;

	ret = spu_enumerate_spus(create_spu);

	if (ret) {
		printk(KERN_WARNING "%s: Error initializing spus\n",
			__FUNCTION__);
		cleanup_spu_base();
		return ret;
		goto out_unregister_sysdev_class;
	}

	xmon_register_spus(&spu_full_list);

	return 0;

 out_unregister_sysdev_class:
	sysdev_class_unregister(&spu_sysdev_class);
 out:

	return ret;
}
module_init(init_spu_base);
+16 −18
Original line number Diff line number Diff line
@@ -26,19 +26,18 @@

#include <asm/spu.h>

static struct spu_coredump_calls spu_coredump_calls;
static struct spu_coredump_calls *spu_coredump_calls;
static DEFINE_MUTEX(spu_coredump_mutex);

int arch_notes_size(void)
{
	long ret;
	struct module *owner = spu_coredump_calls.owner;

	ret = -ENOSYS;
	mutex_lock(&spu_coredump_mutex);
	if (owner && try_module_get(owner)) {
		ret = spu_coredump_calls.arch_notes_size();
		module_put(owner);
	if (spu_coredump_calls && try_module_get(spu_coredump_calls->owner)) {
		ret = spu_coredump_calls->arch_notes_size();
		module_put(spu_coredump_calls->owner);
	}
	mutex_unlock(&spu_coredump_mutex);
	return ret;
@@ -46,36 +45,35 @@ int arch_notes_size(void)

void arch_write_notes(struct file *file)
{
	struct module *owner = spu_coredump_calls.owner;

	mutex_lock(&spu_coredump_mutex);
	if (owner && try_module_get(owner)) {
		spu_coredump_calls.arch_write_notes(file);
		module_put(owner);
	if (spu_coredump_calls && try_module_get(spu_coredump_calls->owner)) {
		spu_coredump_calls->arch_write_notes(file);
		module_put(spu_coredump_calls->owner);
	}
	mutex_unlock(&spu_coredump_mutex);
}

int register_arch_coredump_calls(struct spu_coredump_calls *calls)
{
	if (spu_coredump_calls.owner)
		return -EBUSY;
	int ret = 0;


	mutex_lock(&spu_coredump_mutex);
	spu_coredump_calls.arch_notes_size = calls->arch_notes_size;
	spu_coredump_calls.arch_write_notes = calls->arch_write_notes;
	spu_coredump_calls.owner = calls->owner;
	if (spu_coredump_calls)
		ret = -EBUSY;
	else
		spu_coredump_calls = calls;
	mutex_unlock(&spu_coredump_mutex);
	return 0;
	return ret;
}
EXPORT_SYMBOL_GPL(register_arch_coredump_calls);

void unregister_arch_coredump_calls(struct spu_coredump_calls *calls)
{
	BUG_ON(spu_coredump_calls.owner != calls->owner);
	BUG_ON(spu_coredump_calls != calls);

	mutex_lock(&spu_coredump_mutex);
	spu_coredump_calls.owner = NULL;
	spu_coredump_calls = NULL;
	mutex_unlock(&spu_coredump_mutex);
}
EXPORT_SYMBOL_GPL(unregister_arch_coredump_calls);
+1 −1
Original line number Diff line number Diff line
obj-y += switch.o
obj-y += switch.o fault.o

obj-$(CONFIG_SPU_FS) += spufs.o
spufs-y += inode.o file.o context.o syscalls.o coredump.o
+6 −0
Original line number Diff line number Diff line
@@ -350,6 +350,11 @@ static int spu_backing_send_mfc_command(struct spu_context *ctx,
	return ret;
}

static void spu_backing_restart_dma(struct spu_context *ctx)
{
	/* nothing to do here */
}

struct spu_context_ops spu_backing_ops = {
	.mbox_read = spu_backing_mbox_read,
	.mbox_stat_read = spu_backing_mbox_stat_read,
@@ -376,4 +381,5 @@ struct spu_context_ops spu_backing_ops = {
	.read_mfc_tagstatus = spu_backing_read_mfc_tagstatus,
	.get_mfc_free_elements = spu_backing_get_mfc_free_elements,
	.send_mfc_command = spu_backing_send_mfc_command,
	.restart_dma = spu_backing_restart_dma,
};
+4 −41
Original line number Diff line number Diff line
@@ -41,9 +41,10 @@ struct spu_context *alloc_spu_context(struct spu_gang *gang)
		goto out_free;
	}
	spin_lock_init(&ctx->mmio_lock);
	spin_lock_init(&ctx->mapping_lock);
	kref_init(&ctx->kref);
	mutex_init(&ctx->state_mutex);
	init_MUTEX(&ctx->run_sema);
	mutex_init(&ctx->run_mutex);
	init_waitqueue_head(&ctx->ibox_wq);
	init_waitqueue_head(&ctx->wbox_wq);
	init_waitqueue_head(&ctx->stop_wq);
@@ -51,6 +52,7 @@ struct spu_context *alloc_spu_context(struct spu_gang *gang)
	ctx->state = SPU_STATE_SAVED;
	ctx->ops = &spu_backing_ops;
	ctx->owner = get_task_mm(current);
	INIT_LIST_HEAD(&ctx->rq);
	if (gang)
		spu_gang_add_ctx(gang, ctx);
	ctx->rt_priority = current->rt_priority;
@@ -75,6 +77,7 @@ void destroy_spu_context(struct kref *kref)
	spu_fini_csa(&ctx->csa);
	if (ctx->gang)
		spu_gang_remove_ctx(ctx->gang, ctx);
	BUG_ON(!list_empty(&ctx->rq));
	kfree(ctx);
}

@@ -118,46 +121,6 @@ void spu_unmap_mappings(struct spu_context *ctx)
		unmap_mapping_range(ctx->psmap, 0, 0x20000, 1);
}

/**
 * spu_acquire_exclusive - lock spu contex and protect against userspace access
 * @ctx:	spu contex to lock
 *
 * Note:
 *	Returns 0 and with the context locked on success
 *	Returns negative error and with the context _unlocked_ on failure.
 */
int spu_acquire_exclusive(struct spu_context *ctx)
{
	int ret = -EINVAL;

	spu_acquire(ctx);
	/*
	 * Context is about to be freed, so we can't acquire it anymore.
	 */
	if (!ctx->owner)
		goto out_unlock;

	if (ctx->state == SPU_STATE_SAVED) {
		ret = spu_activate(ctx, 0);
		if (ret)
			goto out_unlock;
	} else {
		/*
		 * We need to exclude userspace access to the context.
		 *
		 * To protect against memory access we invalidate all ptes
		 * and make sure the pagefault handlers block on the mutex.
		 */
		spu_unmap_mappings(ctx);
	}

	return 0;

 out_unlock:
	spu_release(ctx);
	return ret;
}

/**
 * spu_acquire_runnable - lock spu contex and make sure it is in runnable state
 * @ctx:	spu contex to lock
Loading