Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit e81b693c authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge branch 'sched-fixes-for-linus' of git://tesla.tglx.de/git/linux-2.6-tip

* 'sched-fixes-for-linus' of git://tesla.tglx.de/git/linux-2.6-tip:
  sched: Fix a memory leak in __sdt_free()
  sched: Move blk_schedule_flush_plug() out of __schedule()
  sched: Separate the scheduler entry for preemption
parents b0fb4222 feff8fa0
Loading
Loading
Loading
Loading
+26 −15
Original line number Diff line number Diff line
@@ -4279,9 +4279,9 @@ pick_next_task(struct rq *rq)
}

/*
 * schedule() is the main scheduler function.
 * __schedule() is the main scheduler function.
 */
asmlinkage void __sched schedule(void)
static void __sched __schedule(void)
{
	struct task_struct *prev, *next;
	unsigned long *switch_count;
@@ -4322,16 +4322,6 @@ asmlinkage void __sched schedule(void)
				if (to_wakeup)
					try_to_wake_up_local(to_wakeup);
			}

			/*
			 * If we are going to sleep and we have plugged IO
			 * queued, make sure to submit it to avoid deadlocks.
			 */
			if (blk_needs_flush_plug(prev)) {
				raw_spin_unlock(&rq->lock);
				blk_schedule_flush_plug(prev);
				raw_spin_lock(&rq->lock);
			}
		}
		switch_count = &prev->nvcsw;
	}
@@ -4369,6 +4359,26 @@ asmlinkage void __sched schedule(void)
	if (need_resched())
		goto need_resched;
}

static inline void sched_submit_work(struct task_struct *tsk)
{
	if (!tsk->state)
		return;
	/*
	 * If we are going to sleep and we have plugged IO queued,
	 * make sure to submit it to avoid deadlocks.
	 */
	if (blk_needs_flush_plug(tsk))
		blk_schedule_flush_plug(tsk);
}

asmlinkage void schedule(void)
{
	struct task_struct *tsk = current;

	sched_submit_work(tsk);
	__schedule();
}
EXPORT_SYMBOL(schedule);

#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
@@ -4435,7 +4445,7 @@ asmlinkage void __sched notrace preempt_schedule(void)

	do {
		add_preempt_count_notrace(PREEMPT_ACTIVE);
		schedule();
		__schedule();
		sub_preempt_count_notrace(PREEMPT_ACTIVE);

		/*
@@ -4463,7 +4473,7 @@ asmlinkage void __sched preempt_schedule_irq(void)
	do {
		add_preempt_count(PREEMPT_ACTIVE);
		local_irq_enable();
		schedule();
		__schedule();
		local_irq_disable();
		sub_preempt_count(PREEMPT_ACTIVE);

@@ -5588,7 +5598,7 @@ static inline int should_resched(void)
static void __cond_resched(void)
{
	add_preempt_count(PREEMPT_ACTIVE);
	schedule();
	__schedule();
	sub_preempt_count(PREEMPT_ACTIVE);
}

@@ -7443,6 +7453,7 @@ static void __sdt_free(const struct cpumask *cpu_map)
			struct sched_domain *sd = *per_cpu_ptr(sdd->sd, j);
			if (sd && (sd->flags & SD_OVERLAP))
				free_sched_groups(sd->groups, 0);
			kfree(*per_cpu_ptr(sdd->sd, j));
			kfree(*per_cpu_ptr(sdd->sg, j));
			kfree(*per_cpu_ptr(sdd->sgp, j));
		}