Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 26bec673 authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Arnd Bergmann
Browse files

[POWERPC] spufs: optimize spu_run



There is no need to directly wake up contexts in spu_activate when
called from spu_run, so add a flag to surpress this wakeup.

Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Signed-off-by: default avatarArnd Bergmann <arnd.bergmann@de.ibm.com>
parent 079cdb61
Loading
Loading
Loading
Loading
+2 −2
Original line number Original line Diff line number Diff line
@@ -163,7 +163,7 @@ int spu_acquire_exclusive(struct spu_context *ctx)
 *	Returns 0 and with the context locked on success
 *	Returns 0 and with the context locked on success
 *	Returns negative error and with the context _unlocked_ on failure.
 *	Returns negative error and with the context _unlocked_ on failure.
 */
 */
int spu_acquire_runnable(struct spu_context *ctx)
int spu_acquire_runnable(struct spu_context *ctx, unsigned long flags)
{
{
	int ret = -EINVAL;
	int ret = -EINVAL;


@@ -174,7 +174,7 @@ int spu_acquire_runnable(struct spu_context *ctx)
		 */
		 */
		if (!ctx->owner)
		if (!ctx->owner)
			goto out_unlock;
			goto out_unlock;
		ret = spu_activate(ctx, 0);
		ret = spu_activate(ctx, flags);
		if (ret)
		if (ret)
			goto out_unlock;
			goto out_unlock;
	}
	}
+2 −2
Original line number Original line Diff line number Diff line
@@ -164,7 +164,7 @@ static unsigned long spufs_ps_nopfn(struct vm_area_struct *vma,
	/* error here usually means a signal.. we might want to test
	/* error here usually means a signal.. we might want to test
	 * the error code more precisely though
	 * the error code more precisely though
	 */
	 */
	ret = spu_acquire_runnable(ctx);
	ret = spu_acquire_runnable(ctx, 0);
	if (ret)
	if (ret)
		return NOPFN_REFAULT;
		return NOPFN_REFAULT;


@@ -1306,7 +1306,7 @@ static ssize_t spufs_mfc_write(struct file *file, const char __user *buffer,
	if (ret)
	if (ret)
		goto out;
		goto out;


	spu_acquire_runnable(ctx);
	spu_acquire_runnable(ctx, 0);
	if (file->f_flags & O_NONBLOCK) {
	if (file->f_flags & O_NONBLOCK) {
		ret = ctx->ops->send_mfc_command(ctx, &cmd);
		ret = ctx->ops->send_mfc_command(ctx, &cmd);
	} else {
	} else {
+2 −2
Original line number Original line Diff line number Diff line
@@ -143,7 +143,7 @@ static inline int spu_run_init(struct spu_context *ctx, u32 * npc)
	int ret;
	int ret;
	unsigned long runcntl = SPU_RUNCNTL_RUNNABLE;
	unsigned long runcntl = SPU_RUNCNTL_RUNNABLE;


	ret = spu_acquire_runnable(ctx);
	ret = spu_acquire_runnable(ctx, SPU_ACTIVATE_NOWAKE);
	if (ret)
	if (ret)
		return ret;
		return ret;


@@ -155,7 +155,7 @@ static inline int spu_run_init(struct spu_context *ctx, u32 * npc)
			spu_release(ctx);
			spu_release(ctx);
			ret = spu_setup_isolated(ctx);
			ret = spu_setup_isolated(ctx);
			if (!ret)
			if (!ret)
				ret = spu_acquire_runnable(ctx);
				ret = spu_acquire_runnable(ctx, SPU_ACTIVATE_NOWAKE);
		}
		}


		/* if userspace has set the runcntrl register (eg, to issue an
		/* if userspace has set the runcntrl register (eg, to issue an
+6 −4
Original line number Original line Diff line number Diff line
@@ -247,8 +247,8 @@ static void spu_prio_wait(struct spu_context *ctx)
{
{
	DEFINE_WAIT(wait);
	DEFINE_WAIT(wait);


	set_bit(SPU_SCHED_WAKE, &ctx->sched_flags);
	prepare_to_wait_exclusive(&ctx->stop_wq, &wait, TASK_INTERRUPTIBLE);
	prepare_to_wait_exclusive(&ctx->stop_wq, &wait, TASK_INTERRUPTIBLE);

	if (!signal_pending(current)) {
	if (!signal_pending(current)) {
		mutex_unlock(&ctx->state_mutex);
		mutex_unlock(&ctx->state_mutex);
		schedule();
		schedule();
@@ -256,6 +256,7 @@ static void spu_prio_wait(struct spu_context *ctx)
	}
	}
	__set_current_state(TASK_RUNNING);
	__set_current_state(TASK_RUNNING);
	remove_wait_queue(&ctx->stop_wq, &wait);
	remove_wait_queue(&ctx->stop_wq, &wait);
	clear_bit(SPU_SCHED_WAKE, &ctx->sched_flags);
}
}


/**
/**
@@ -275,7 +276,7 @@ static void spu_reschedule(struct spu *spu)
	best = sched_find_first_bit(spu_prio->bitmap);
	best = sched_find_first_bit(spu_prio->bitmap);
	if (best < MAX_PRIO) {
	if (best < MAX_PRIO) {
		struct spu_context *ctx = spu_grab_context(best);
		struct spu_context *ctx = spu_grab_context(best);
		if (ctx)
		if (ctx && test_bit(SPU_SCHED_WAKE, &ctx->sched_flags))
			wake_up(&ctx->stop_wq);
			wake_up(&ctx->stop_wq);
	}
	}
	spin_unlock(&spu_prio->runq_lock);
	spin_unlock(&spu_prio->runq_lock);
@@ -315,7 +316,7 @@ static struct spu *spu_get_idle(struct spu_context *ctx)
 * add the context to the runqueue so it gets woken up once an spu
 * add the context to the runqueue so it gets woken up once an spu
 * is available.
 * is available.
 */
 */
int spu_activate(struct spu_context *ctx, u64 flags)
int spu_activate(struct spu_context *ctx, unsigned long flags)
{
{


	if (ctx->spu)
	if (ctx->spu)
@@ -331,6 +332,7 @@ int spu_activate(struct spu_context *ctx, u64 flags)
		}
		}


		spu_add_to_rq(ctx);
		spu_add_to_rq(ctx);
		if (!(flags & SPU_ACTIVATE_NOWAKE))
			spu_prio_wait(ctx);
			spu_prio_wait(ctx);
		spu_del_from_rq(ctx);
		spu_del_from_rq(ctx);
	} while (!signal_pending(current));
	} while (!signal_pending(current));
+11 −2
Original line number Original line Diff line number Diff line
@@ -39,6 +39,11 @@ enum {
struct spu_context_ops;
struct spu_context_ops;
struct spu_gang;
struct spu_gang;


/* ctx->sched_flags */
enum {
	SPU_SCHED_WAKE = 0,
};

struct spu_context {
struct spu_context {
	struct spu *spu;		  /* pointer to a physical SPU */
	struct spu *spu;		  /* pointer to a physical SPU */
	struct spu_state csa;		  /* SPU context save area. */
	struct spu_state csa;		  /* SPU context save area. */
@@ -77,6 +82,7 @@ struct spu_context {


	/* scheduler fields */
	/* scheduler fields */
 	struct list_head rq;
 	struct list_head rq;
	unsigned long sched_flags;
	int prio;
	int prio;
};
};


@@ -179,10 +185,13 @@ int put_spu_context(struct spu_context *ctx);
void spu_unmap_mappings(struct spu_context *ctx);
void spu_unmap_mappings(struct spu_context *ctx);


void spu_forget(struct spu_context *ctx);
void spu_forget(struct spu_context *ctx);
int spu_acquire_runnable(struct spu_context *ctx);
int spu_acquire_runnable(struct spu_context *ctx, unsigned long flags);
void spu_acquire_saved(struct spu_context *ctx);
void spu_acquire_saved(struct spu_context *ctx);
int spu_acquire_exclusive(struct spu_context *ctx);
int spu_acquire_exclusive(struct spu_context *ctx);
int spu_activate(struct spu_context *ctx, u64 flags);
enum {
	SPU_ACTIVATE_NOWAKE = 1,
};
int spu_activate(struct spu_context *ctx, unsigned long flags);
void spu_deactivate(struct spu_context *ctx);
void spu_deactivate(struct spu_context *ctx);
void spu_yield(struct spu_context *ctx);
void spu_yield(struct spu_context *ctx);
int __init spu_sched_init(void);
int __init spu_sched_init(void);