Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit d6ad39bc authored by Jeremy Kerr's avatar Jeremy Kerr Committed by Paul Mackerras
Browse files

[POWERPC] spufs: rework class 0 and 1 interrupt handling



Based on original patches from
 Arnd Bergmann <arnd.bergman@de.ibm.com>; and
 Luke Browning <lukebr@linux.vnet.ibm.com>

Currently, spu contexts need to be loaded to the SPU in order to take
class 0 and class 1 exceptions.

This change makes the actual interrupt-handlers much simpler (ie,
set the exception information in the context save area), and defers the
handling code to the spufs_handle_class[01] functions, called from
spufs_run_spu.

This should improve the concurrency of the spu scheduling leading to
greater SPU utilization when SPUs are overcommited.

Signed-off-by: default avatarJeremy Kerr <jk@ozlabs.org>
Signed-off-by: default avatarPaul Mackerras <paulus@samba.org>
parent 8af30675
Loading
Loading
Loading
Loading
+7 −50
Original line number Diff line number Diff line
@@ -132,27 +132,6 @@ int spu_64k_pages_available(void)
}
EXPORT_SYMBOL_GPL(spu_64k_pages_available);

static int __spu_trap_invalid_dma(struct spu *spu)
{
	pr_debug("%s\n", __FUNCTION__);
	spu->dma_callback(spu, SPE_EVENT_INVALID_DMA);
	return 0;
}

static int __spu_trap_dma_align(struct spu *spu)
{
	pr_debug("%s\n", __FUNCTION__);
	spu->dma_callback(spu, SPE_EVENT_DMA_ALIGNMENT);
	return 0;
}

static int __spu_trap_error(struct spu *spu)
{
	pr_debug("%s\n", __FUNCTION__);
	spu->dma_callback(spu, SPE_EVENT_SPE_ERROR);
	return 0;
}

static void spu_restart_dma(struct spu *spu)
{
	struct spu_priv2 __iomem *priv2 = spu->priv2;
@@ -252,10 +231,12 @@ static int __spu_trap_data_map(struct spu *spu, unsigned long ea, u64 dsisr)
		return 1;
	}

	spu->class_0_pending = 0;
	spu->dar = ea;
	spu->dsisr = dsisr;
	mb();

	spu->stop_callback(spu);

	return 0;
}

@@ -335,12 +316,13 @@ spu_irq_class_0(int irq, void *data)

	spu = data;

	spin_lock(&spu->register_lock);
	mask = spu_int_mask_get(spu, 0);
	stat = spu_int_stat_get(spu, 0);
	stat &= mask;
	stat = spu_int_stat_get(spu, 0) & mask;

	spin_lock(&spu->register_lock);
	spu->class_0_pending |= stat;
	spu->dsisr = spu_mfc_dsisr_get(spu);
	spu->dar = spu_mfc_dar_get(spu);
	spin_unlock(&spu->register_lock);

	spu->stop_callback(spu);
@@ -350,31 +332,6 @@ spu_irq_class_0(int irq, void *data)
	return IRQ_HANDLED;
}

int
spu_irq_class_0_bottom(struct spu *spu)
{
	unsigned long flags;
	unsigned long stat;

	spin_lock_irqsave(&spu->register_lock, flags);
	stat = spu->class_0_pending;
	spu->class_0_pending = 0;

	if (stat & CLASS0_DMA_ALIGNMENT_INTR)
		__spu_trap_dma_align(spu);

	if (stat & CLASS0_INVALID_DMA_COMMAND_INTR)
		__spu_trap_invalid_dma(spu);

	if (stat & CLASS0_SPU_ERROR_INTR)
		__spu_trap_error(spu);

	spin_unlock_irqrestore(&spu->register_lock, flags);

	return (stat & CLASS0_INTR_MASK) ? -EIO : 0;
}
EXPORT_SYMBOL_GPL(spu_irq_class_0_bottom);

static irqreturn_t
spu_irq_class_1(int irq, void *data)
{
+65 −42
Original line number Diff line number Diff line
@@ -28,14 +28,23 @@

#include "spufs.h"

static void spufs_handle_dma_error(struct spu_context *ctx,
/**
 * Handle an SPE event, depending on context SPU_CREATE_EVENTS_ENABLED flag.
 *
 * If the context was created with events, we just set the return event.
 * Otherwise, send an appropriate signal to the process.
 */
static void spufs_handle_event(struct spu_context *ctx,
				unsigned long ea, int type)
{
	siginfo_t info;

	if (ctx->flags & SPU_CREATE_EVENTS_ENABLED) {
		ctx->event_return |= type;
		wake_up_all(&ctx->stop_wq);
	} else {
		siginfo_t info;
		return;
	}

	memset(&info, 0, sizeof(info));

	switch (type) {
@@ -60,14 +69,28 @@ static void spufs_handle_dma_error(struct spu_context *ctx,
		info.si_code = ILL_ILLOPC;
		break;
	}

	if (info.si_signo)
		force_sig_info(info.si_signo, &info, current);
}
}

void spufs_dma_callback(struct spu *spu, int type)
int spufs_handle_class0(struct spu_context *ctx)
{
	spufs_handle_dma_error(spu->ctx, spu->dar, type);
	unsigned long stat = ctx->csa.class_0_pending & CLASS0_INTR_MASK;

	if (likely(!stat))
		return 0;

	if (stat & CLASS0_DMA_ALIGNMENT_INTR)
		spufs_handle_event(ctx, ctx->csa.dar, SPE_EVENT_DMA_ALIGNMENT);

	if (stat & CLASS0_INVALID_DMA_COMMAND_INTR)
		spufs_handle_event(ctx, ctx->csa.dar, SPE_EVENT_INVALID_DMA);

	if (stat & CLASS0_SPU_ERROR_INTR)
		spufs_handle_event(ctx, ctx->csa.dar, SPE_EVENT_SPE_ERROR);

	return -EIO;
}

/*
@@ -95,16 +118,8 @@ int spufs_handle_class1(struct spu_context *ctx)
	 * in time, we can still expect to get the same fault
	 * the immediately after the context restore.
	 */
	if (ctx->state == SPU_STATE_RUNNABLE) {
		ea = ctx->spu->dar;
		dsisr = ctx->spu->dsisr;
		ctx->spu->dar= ctx->spu->dsisr = 0;
	} else {
		ea = ctx->csa.priv1.mfc_dar_RW;
		dsisr = ctx->csa.priv1.mfc_dsisr_RW;
		ctx->csa.priv1.mfc_dar_RW = 0;
		ctx->csa.priv1.mfc_dsisr_RW = 0;
	}
	ea = ctx->csa.dar;
	dsisr = ctx->csa.dsisr;

	if (!(dsisr & (MFC_DSISR_PTE_NOT_FOUND | MFC_DSISR_ACCESS_DENIED)))
		return 0;
@@ -132,6 +147,14 @@ int spufs_handle_class1(struct spu_context *ctx)
		ret = spu_handle_mm_fault(current->mm, ea, dsisr, &flt);

	spu_acquire(ctx);

	/*
	 * Clear dsisr under ctxt lock after handling the fault, so that
	 * time slicing will not preempt the context while the page fault
	 * handler is running. Context switch code removes mappings.
	 */
	ctx->csa.dar = ctx->csa.dsisr = 0;

	/*
	 * If we handled the fault successfully and are in runnable
	 * state, restart the DMA.
@@ -152,7 +175,7 @@ int spufs_handle_class1(struct spu_context *ctx)
		if (ctx->spu)
			ctx->ops->restart_dma(ctx);
	} else
		spufs_handle_dma_error(ctx, ea, SPE_EVENT_SPE_DATA_STORAGE);
		spufs_handle_event(ctx, ea, SPE_EVENT_SPE_DATA_STORAGE);

	spuctx_switch_state(ctx, SPU_UTIL_SYSTEM);
	return ret;
+34 −16
Original line number Diff line number Diff line
@@ -15,9 +15,32 @@ void spufs_stop_callback(struct spu *spu)
{
	struct spu_context *ctx = spu->ctx;

	/*
	 * It should be impossible to preempt a context while an exception
	 * is being processed, since the context switch code is specially
	 * coded to deal with interrupts ... But, just in case, sanity check
	 * the context pointer.  It is OK to return doing nothing since
	 * the exception will be regenerated when the context is resumed.
	 */
	if (ctx) {
		/* Copy exception arguments into module specific structure */
		ctx->csa.class_0_pending = spu->class_0_pending;
		ctx->csa.dsisr = spu->dsisr;
		ctx->csa.dar = spu->dar;

		/* ensure that the exception status has hit memory before a
		 * thread waiting on the context's stop queue is woken */
		smp_wmb();

		wake_up_all(&ctx->stop_wq);
	}

	/* Clear callback arguments from spu structure */
	spu->class_0_pending = 0;
	spu->dsisr = 0;
	spu->dar = 0;
}

static inline int spu_stopped(struct spu_context *ctx, u32 *stat)
{
	struct spu *spu;
@@ -29,9 +52,9 @@ static inline int spu_stopped(struct spu_context *ctx, u32 *stat)
	if (ctx->state != SPU_STATE_RUNNABLE ||
	    test_bit(SPU_SCHED_NOTIFY_ACTIVE, &ctx->sched_flags))
		return 1;
	pte_fault = spu->dsisr &
	pte_fault = ctx->csa.dsisr &
	    (MFC_DSISR_PTE_NOT_FOUND | MFC_DSISR_ACCESS_DENIED);
	return (!(*stat & SPU_STATUS_RUNNING) || pte_fault || spu->class_0_pending) ?
	return (!(*stat & SPU_STATUS_RUNNING) || pte_fault || ctx->csa.class_0_pending) ?
		1 : 0;
}

@@ -287,18 +310,6 @@ static int spu_process_callback(struct spu_context *ctx)
	return ret;
}

static inline int spu_process_events(struct spu_context *ctx)
{
	struct spu *spu = ctx->spu;
	int ret = 0;

	if (spu->class_0_pending)
		ret = spu_irq_class_0_bottom(spu);
	if (!ret && signal_pending(current))
		ret = -ERESTARTSYS;
	return ret;
}

long spufs_run_spu(struct spu_context *ctx, u32 *npc, u32 *event)
{
	int ret;
@@ -364,13 +375,20 @@ long spufs_run_spu(struct spu_context *ctx, u32 *npc, u32 *event)
		if (ret)
			break;

		ret = spufs_handle_class0(ctx);
		if (ret)
			break;

		if (unlikely(ctx->state != SPU_STATE_RUNNABLE)) {
			ret = spu_reacquire_runnable(ctx, npc, &status);
			if (ret)
				goto out2;
			continue;
		}
		ret = spu_process_events(ctx);

		if (signal_pending(current))
			ret = -ERESTARTSYS;


	} while (!ret && !(status & (SPU_STATUS_STOPPED_BY_STOP |
				      SPU_STATUS_STOPPED_BY_HALT |
+0 −2
Original line number Diff line number Diff line
@@ -245,7 +245,6 @@ static void spu_bind_context(struct spu *spu, struct spu_context *ctx)
	spu->wbox_callback = spufs_wbox_callback;
	spu->stop_callback = spufs_stop_callback;
	spu->mfc_callback = spufs_mfc_callback;
	spu->dma_callback = spufs_dma_callback;
	mb();
	spu_unmap_mappings(ctx);
	spu_restore(&ctx->csa, spu);
@@ -433,7 +432,6 @@ static void spu_unbind_context(struct spu *spu, struct spu_context *ctx)
	spu->wbox_callback = NULL;
	spu->stop_callback = NULL;
	spu->mfc_callback = NULL;
	spu->dma_callback = NULL;
	spu_associate_mm(spu, NULL);
	spu->pid = 0;
	spu->tgid = 0;
+1 −0
Original line number Diff line number Diff line
@@ -222,6 +222,7 @@ void spu_gang_add_ctx(struct spu_gang *gang, struct spu_context *ctx);

/* fault handling */
int spufs_handle_class1(struct spu_context *ctx);
int spufs_handle_class0(struct spu_context *ctx);

/* affinity */
struct spu *affinity_check(struct spu_context *ctx);
Loading