Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 91569531 authored by Luke Browning's avatar Luke Browning Committed by Paul Mackerras
Browse files

[POWERPC] spufs: reorganize spu_run_init



This cleans up spu_run_init so that it does all of the spu
initialization for spufs_run_spu.  It initializes the spu context as
much as possible before it activates the spu and writes the runcntl
register.

Signed-off-by: default avatarLuke Browning <lukebr@linux.vnet.ibm.com>
Signed-off-by: default avatarJeremy Kerr <jk@ozlabs.org>
Signed-off-by: default avatarPaul Mackerras <paulus@samba.org>
parent d6ad39bc
Loading
Loading
Loading
Loading
+31 −24
Original line number Original line Diff line number Diff line
@@ -152,23 +152,41 @@ static int spu_setup_isolated(struct spu_context *ctx)
static int spu_run_init(struct spu_context *ctx, u32 *npc)
static int spu_run_init(struct spu_context *ctx, u32 *npc)
{
{
	unsigned long runcntl;
	unsigned long runcntl;
	int ret;


	spuctx_switch_state(ctx, SPU_UTIL_SYSTEM);
	spuctx_switch_state(ctx, SPU_UTIL_SYSTEM);


	if (ctx->flags & SPU_CREATE_ISOLATE) {
	if (ctx->flags & SPU_CREATE_ISOLATE) {
		/*
		 * Force activation of spu.  Isolated state assumes that
		 * special loader context is loaded and running on spu.
		 */
		if (ctx->state == SPU_STATE_SAVED) {
			spu_set_timeslice(ctx);

			ret = spu_activate(ctx, 0);
			if (ret)
				return ret;
		}


		if (!(ctx->ops->status_read(ctx) & SPU_STATUS_ISOLATED_STATE)) {
		if (!(ctx->ops->status_read(ctx) & SPU_STATUS_ISOLATED_STATE)) {
			int ret = spu_setup_isolated(ctx);
			ret = spu_setup_isolated(ctx);
			if (ret)
			if (ret)
				return ret;
				return ret;
		}
		}


		/* if userspace has set the runcntrl register (eg, to issue an
		/*
		 * isolated exit), we need to re-set it here */
		 * If userspace has set the runcntrl register (eg, to
		 * issue an isolated exit), we need to re-set it here
		 */
		runcntl = ctx->ops->runcntl_read(ctx) &
		runcntl = ctx->ops->runcntl_read(ctx) &
			(SPU_RUNCNTL_RUNNABLE | SPU_RUNCNTL_ISOLATE);
			(SPU_RUNCNTL_RUNNABLE | SPU_RUNCNTL_ISOLATE);
		if (runcntl == 0)
		if (runcntl == 0)
			runcntl = SPU_RUNCNTL_RUNNABLE;
			runcntl = SPU_RUNCNTL_RUNNABLE;

		spuctx_switch_state(ctx, SPU_UTIL_USER);
		ctx->ops->runcntl_write(ctx, runcntl);

	} else {
	} else {
		unsigned long privcntl;
		unsigned long privcntl;


@@ -180,11 +198,17 @@ static int spu_run_init(struct spu_context *ctx, u32 *npc)


		ctx->ops->npc_write(ctx, *npc);
		ctx->ops->npc_write(ctx, *npc);
		ctx->ops->privcntl_write(ctx, privcntl);
		ctx->ops->privcntl_write(ctx, privcntl);
	}


	ctx->ops->runcntl_write(ctx, runcntl);
		if (ctx->state == SPU_STATE_SAVED) {
			spu_set_timeslice(ctx);
			ret = spu_activate(ctx, 0);
			if (ret)
				return ret;
		}


		spuctx_switch_state(ctx, SPU_UTIL_USER);
		spuctx_switch_state(ctx, SPU_UTIL_USER);
		ctx->ops->runcntl_write(ctx, runcntl);
	}


	return 0;
	return 0;
}
}
@@ -323,25 +347,8 @@ long spufs_run_spu(struct spu_context *ctx, u32 *npc, u32 *event)
	ctx->event_return = 0;
	ctx->event_return = 0;


	spu_acquire(ctx);
	spu_acquire(ctx);
	if (ctx->state == SPU_STATE_SAVED) {
		__spu_update_sched_info(ctx);
		spu_set_timeslice(ctx);


		ret = spu_activate(ctx, 0);
		if (ret) {
			spu_release(ctx);
			goto out;
		}
	} else {
		/*
		 * We have to update the scheduling priority under active_mutex
		 * to protect against find_victim().
		 *
		 * No need to update the timeslice ASAP, it will get updated
		 * once the current one has expired.
		 */
	spu_update_sched_info(ctx);
	spu_update_sched_info(ctx);
	}


	ret = spu_run_init(ctx, npc);
	ret = spu_run_init(ctx, npc);
	if (ret) {
	if (ret) {
+25 −10
Original line number Original line Diff line number Diff line
@@ -104,6 +104,12 @@ void spu_set_timeslice(struct spu_context *ctx)
 */
 */
void __spu_update_sched_info(struct spu_context *ctx)
void __spu_update_sched_info(struct spu_context *ctx)
{
{
	/*
	 * assert that the context is not on the runqueue, so it is safe
	 * to change its scheduling parameters.
	 */
	BUG_ON(!list_empty(&ctx->rq));

	/*
	/*
	 * 32-Bit assignments are atomic on powerpc, and we don't care about
	 * 32-Bit assignments are atomic on powerpc, and we don't care about
	 * memory ordering here because retrieving the controlling thread is
	 * memory ordering here because retrieving the controlling thread is
@@ -124,23 +130,28 @@ void __spu_update_sched_info(struct spu_context *ctx)
	ctx->policy = current->policy;
	ctx->policy = current->policy;


	/*
	/*
	 * A lot of places that don't hold list_mutex poke into
	 * TO DO: the context may be loaded, so we may need to activate
	 * cpus_allowed, including grab_runnable_context which
	 * it again on a different node. But it shouldn't hurt anything
	 * already holds the runq_lock.  So abuse runq_lock
	 * to update its parameters, because we know that the scheduler
	 * to protect this field as well.
	 * is not actively looking at this field, since it is not on the
	 * runqueue. The context will be rescheduled on the proper node
	 * if it is timesliced or preempted.
	 */
	 */
	spin_lock(&spu_prio->runq_lock);
	ctx->cpus_allowed = current->cpus_allowed;
	ctx->cpus_allowed = current->cpus_allowed;
	spin_unlock(&spu_prio->runq_lock);
}
}


void spu_update_sched_info(struct spu_context *ctx)
void spu_update_sched_info(struct spu_context *ctx)
{
{
	int node = ctx->spu->node;
	int node;


	if (ctx->state == SPU_STATE_RUNNABLE) {
		node = ctx->spu->node;
		mutex_lock(&cbe_spu_info[node].list_mutex);
		mutex_lock(&cbe_spu_info[node].list_mutex);
		__spu_update_sched_info(ctx);
		__spu_update_sched_info(ctx);
		mutex_unlock(&cbe_spu_info[node].list_mutex);
		mutex_unlock(&cbe_spu_info[node].list_mutex);
	} else {
		__spu_update_sched_info(ctx);
	}
}
}


static int __node_allowed(struct spu_context *ctx, int node)
static int __node_allowed(struct spu_context *ctx, int node)
@@ -604,6 +615,10 @@ static struct spu *find_victim(struct spu_context *ctx)
			 * higher priority contexts before lower priority
			 * higher priority contexts before lower priority
			 * ones, so this is safe until we introduce
			 * ones, so this is safe until we introduce
			 * priority inheritance schemes.
			 * priority inheritance schemes.
			 *
			 * XXX if the highest priority context is locked,
			 * this can loop a long time.  Might be better to
			 * look at another context or give up after X retries.
			 */
			 */
			if (!mutex_trylock(&victim->state_mutex)) {
			if (!mutex_trylock(&victim->state_mutex)) {
				victim = NULL;
				victim = NULL;