Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 3a5c359a authored by Andi Kleen's avatar Andi Kleen Committed by Ingo Molnar
Browse files

sched: cleanup: remove unnecessary gotos



Replace loops implemented with gotos with real loops.
Replace err = ...; goto x; x: return err; with return ...;

No functional changes.

Signed-off-by: default avatarAndi Kleen <ak@suse.de>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent d274a4ce
Loading
Loading
Loading
Loading
+162 −165
Original line number Diff line number Diff line
@@ -562,16 +562,13 @@ static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
static inline struct rq *__task_rq_lock(struct task_struct *p)
	__acquires(rq->lock)
{
	struct rq *rq;

repeat_lock_task:
	rq = task_rq(p);
	for (;;) {
		struct rq *rq = task_rq(p);
		spin_lock(&rq->lock);
	if (unlikely(rq != task_rq(p))) {
		if (likely(rq == task_rq(p)))
			return rq;
		spin_unlock(&rq->lock);
		goto repeat_lock_task;
	}
	return rq;
}

/*
@@ -584,15 +581,14 @@ static struct rq *task_rq_lock(struct task_struct *p, unsigned long *flags)
{
	struct rq *rq;

repeat_lock_task:
	for (;;) {
		local_irq_save(*flags);
		rq = task_rq(p);
		spin_lock(&rq->lock);
	if (unlikely(rq != task_rq(p))) {
		if (likely(rq == task_rq(p)))
			return rq;
		spin_unlock_irqrestore(&rq->lock, *flags);
		goto repeat_lock_task;
	}
	return rq;
}

static void __task_rq_unlock(struct rq *rq)
@@ -1083,7 +1079,7 @@ void wait_task_inactive(struct task_struct *p)
	int running, on_rq;
	struct rq *rq;

repeat:
	for (;;) {
		/*
		 * We do the initial early heuristics without holding
		 * any task-queue locks at all. We'll only try to get
@@ -1124,7 +1120,7 @@ void wait_task_inactive(struct task_struct *p)
		 */
		if (unlikely(running)) {
			cpu_relax();
		goto repeat;
			continue;
		}

		/*
@@ -1138,7 +1134,7 @@ void wait_task_inactive(struct task_struct *p)
		 */
		if (unlikely(on_rq)) {
			schedule_timeout_uninterruptible(1);
		goto repeat;
			continue;
		}

		/*
@@ -1146,6 +1142,8 @@ void wait_task_inactive(struct task_struct *p)
		 * runnable, which means that it will never become
		 * running in the future either. We're all done!
		 */
		break;
	}
}

/***
@@ -1236,7 +1234,7 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p, int this_cpu)

		/* Skip over this group if it has no CPUs allowed */
		if (!cpus_intersects(group->cpumask, p->cpus_allowed))
			goto nextgroup;
			continue;

		local_group = cpu_isset(this_cpu, group->cpumask);

@@ -1264,9 +1262,7 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p, int this_cpu)
			min_load = avg_load;
			idlest = group;
		}
nextgroup:
		group = group->next;
	} while (group != sd->groups);
	} while (group = group->next, group != sd->groups);

	if (!idlest || 100*this_load < imbalance*min_load)
		return NULL;
@@ -3517,8 +3513,9 @@ asmlinkage void __sched preempt_schedule(void)
	if (likely(ti->preempt_count || irqs_disabled()))
		return;

need_resched:
	do {
		add_preempt_count(PREEMPT_ACTIVE);

		/*
		 * We keep the big kernel semaphore locked, but we
		 * clear ->lock_depth so that schedule() doesnt
@@ -3534,10 +3531,12 @@ asmlinkage void __sched preempt_schedule(void)
#endif
		sub_preempt_count(PREEMPT_ACTIVE);

	/* we could miss a preemption opportunity between schedule and now */
		/*
		 * Check again in case we missed a preemption opportunity
		 * between schedule and now.
		 */
		barrier();
	if (unlikely(test_thread_flag(TIF_NEED_RESCHED)))
		goto need_resched;
	} while (unlikely(test_thread_flag(TIF_NEED_RESCHED)));
}
EXPORT_SYMBOL(preempt_schedule);

@@ -3557,8 +3556,9 @@ asmlinkage void __sched preempt_schedule_irq(void)
	/* Catch callers which need to be fixed */
	BUG_ON(ti->preempt_count || !irqs_disabled());

need_resched:
	do {
		add_preempt_count(PREEMPT_ACTIVE);

		/*
		 * We keep the big kernel semaphore locked, but we
		 * clear ->lock_depth so that schedule() doesnt
@@ -3576,10 +3576,12 @@ asmlinkage void __sched preempt_schedule_irq(void)
#endif
		sub_preempt_count(PREEMPT_ACTIVE);

	/* we could miss a preemption opportunity between schedule and now */
		/*
		 * Check again in case we missed a preemption opportunity
		 * between schedule and now.
		 */
		barrier();
	if (unlikely(test_thread_flag(TIF_NEED_RESCHED)))
		goto need_resched;
	} while (unlikely(test_thread_flag(TIF_NEED_RESCHED)));
}

#endif /* CONFIG_PREEMPT */
@@ -4324,10 +4326,10 @@ asmlinkage long sys_sched_setparam(pid_t pid, struct sched_param __user *param)
asmlinkage long sys_sched_getscheduler(pid_t pid)
{
	struct task_struct *p;
	int retval = -EINVAL;
	int retval;

	if (pid < 0)
		goto out_nounlock;
		return -EINVAL;

	retval = -ESRCH;
	read_lock(&tasklist_lock);
@@ -4338,8 +4340,6 @@ asmlinkage long sys_sched_getscheduler(pid_t pid)
			retval = p->policy;
	}
	read_unlock(&tasklist_lock);

out_nounlock:
	return retval;
}

@@ -4352,10 +4352,10 @@ asmlinkage long sys_sched_getparam(pid_t pid, struct sched_param __user *param)
{
	struct sched_param lp;
	struct task_struct *p;
	int retval = -EINVAL;
	int retval;

	if (!param || pid < 0)
		goto out_nounlock;
		return -EINVAL;

	read_lock(&tasklist_lock);
	p = find_process_by_pid(pid);
@@ -4375,7 +4375,6 @@ asmlinkage long sys_sched_getparam(pid_t pid, struct sched_param __user *param)
	 */
	retval = copy_to_user(param, &lp, sizeof(*param)) ? -EFAULT : 0;

out_nounlock:
	return retval;

out_unlock:
@@ -4731,11 +4730,11 @@ long sys_sched_rr_get_interval(pid_t pid, struct timespec __user *interval)
{
	struct task_struct *p;
	unsigned int time_slice;
	int retval = -EINVAL;
	int retval;
	struct timespec t;

	if (pid < 0)
		goto out_nounlock;
		return -EINVAL;

	retval = -ESRCH;
	read_lock(&tasklist_lock);
@@ -4763,8 +4762,8 @@ long sys_sched_rr_get_interval(pid_t pid, struct timespec __user *interval)
	read_unlock(&tasklist_lock);
	jiffies_to_timespec(time_slice, &t);
	retval = copy_to_user(interval, &t, sizeof(t)) ? -EFAULT : 0;
out_nounlock:
	return retval;

out_unlock:
	read_unlock(&tasklist_lock);
	return retval;
@@ -5070,7 +5069,7 @@ static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *p)
	struct rq *rq;
	int dest_cpu;

restart:
	do {
		/* On same node? */
		mask = node_to_cpumask(cpu_to_node(dead_cpu));
		cpus_and(mask, mask, p->cpus_allowed);
@@ -5097,8 +5096,7 @@ static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *p)
				       "longer affine to cpu%d\n",
				       p->pid, p->comm, dead_cpu);
		}
	if (!__migrate_task(p, dead_cpu, dest_cpu))
		goto restart;
	} while (!__migrate_task(p, dead_cpu, dest_cpu));
}

/*
@@ -5913,7 +5911,7 @@ static void init_numa_sched_groups_power(struct sched_group *group_head)

	if (!sg)
		return;
next_sg:
	do {
		for_each_cpu_mask(j, sg->cpumask) {
			struct sched_domain *sd;

@@ -5929,8 +5927,7 @@ static void init_numa_sched_groups_power(struct sched_group *group_head)
			sg_inc_cpu_power(sg, sd->groups->__cpu_power);
		}
		sg = sg->next;
	if (sg != group_head)
		goto next_sg;
	} while (sg != group_head);
}
#endif