Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 31c952dc authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge branch 'sched-fixes-for-linus' of...

Merge branch 'sched-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip

* 'sched-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
  sched_rt: don't use first_cpu on cpumask created with cpumask_and
  sched: fix buddie group latency
  sched: clear buddies more aggressively
  sched: symmetric sync vs avg_overlap
  sched: fix sync wakeups
  cpuset: fix possible deadlock in async_rebuild_sched_domains
parents 9e6235e9 3d398703
Loading
Loading
Loading
Loading
+12 −1
Original line number Diff line number Diff line
@@ -60,6 +60,14 @@
#include <linux/workqueue.h>
#include <linux/cgroup.h>

/*
 * Workqueue for cpuset related tasks.
 *
 * Using kevent workqueue may cause deadlock when memory_migrate
 * is set. So we create a separate workqueue thread for cpuset.
 */
static struct workqueue_struct *cpuset_wq;

/*
 * Tracks how many cpusets are currently defined in system.
 * When there is only one cpuset (the root cpuset) we can
@@ -831,7 +839,7 @@ static DECLARE_WORK(rebuild_sched_domains_work, do_rebuild_sched_domains);
 */
static void async_rebuild_sched_domains(void)
{
	schedule_work(&rebuild_sched_domains_work);
	queue_work(cpuset_wq, &rebuild_sched_domains_work);
}

/*
@@ -2111,6 +2119,9 @@ void __init cpuset_init_smp(void)

	hotcpu_notifier(cpuset_track_online_cpus, 0);
	hotplug_memory_notifier(cpuset_track_online_nodes, 10);

	cpuset_wq = create_singlethread_workqueue("cpuset");
	BUG_ON(!cpuset_wq);
}

/**
+10 −0
Original line number Diff line number Diff line
@@ -2266,6 +2266,16 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state, int sync)
	if (!sched_feat(SYNC_WAKEUPS))
		sync = 0;

	if (!sync) {
		if (current->se.avg_overlap < sysctl_sched_migration_cost &&
			  p->se.avg_overlap < sysctl_sched_migration_cost)
			sync = 1;
	} else {
		if (current->se.avg_overlap >= sysctl_sched_migration_cost ||
			  p->se.avg_overlap >= sysctl_sched_migration_cost)
			sync = 0;
	}

#ifdef CONFIG_SMP
	if (sched_feat(LB_WAKEUP_UPDATE)) {
		struct sched_domain *sd;
+21 −11
Original line number Diff line number Diff line
@@ -719,7 +719,7 @@ enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int wakeup)
		__enqueue_entity(cfs_rq, se);
}

static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se)
static void __clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
	if (cfs_rq->last == se)
		cfs_rq->last = NULL;
@@ -728,6 +728,12 @@ static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se)
		cfs_rq->next = NULL;
}

static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
	for_each_sched_entity(se)
		__clear_buddies(cfs_rq_of(se), se);
}

static void
dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int sleep)
{
@@ -768,8 +774,14 @@ check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)

	ideal_runtime = sched_slice(cfs_rq, curr);
	delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime;
	if (delta_exec > ideal_runtime)
	if (delta_exec > ideal_runtime) {
		resched_task(rq_of(cfs_rq)->curr);
		/*
		 * The current task ran long enough, ensure it doesn't get
		 * re-elected due to buddy favours.
		 */
		clear_buddies(cfs_rq, curr);
	}
}

static void
@@ -1179,20 +1191,15 @@ wake_affine(struct sched_domain *this_sd, struct rq *this_rq,
	    int idx, unsigned long load, unsigned long this_load,
	    unsigned int imbalance)
{
	struct task_struct *curr = this_rq->curr;
	struct task_group *tg;
	unsigned long tl = this_load;
	unsigned long tl_per_task;
	struct task_group *tg;
	unsigned long weight;
	int balanced;

	if (!(this_sd->flags & SD_WAKE_AFFINE) || !sched_feat(AFFINE_WAKEUPS))
		return 0;

	if (sync && (curr->se.avg_overlap > sysctl_sched_migration_cost ||
			p->se.avg_overlap > sysctl_sched_migration_cost))
		sync = 0;

	/*
	 * If sync wakeup then subtract the (maximum possible)
	 * effect of the currently running task from the load
@@ -1419,9 +1426,7 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int sync)
	if (!sched_feat(WAKEUP_PREEMPT))
		return;

	if (sched_feat(WAKEUP_OVERLAP) && (sync ||
			(se->avg_overlap < sysctl_sched_migration_cost &&
			 pse->avg_overlap < sysctl_sched_migration_cost))) {
	if (sched_feat(WAKEUP_OVERLAP) && sync) {
		resched_task(curr);
		return;
	}
@@ -1452,6 +1457,11 @@ static struct task_struct *pick_next_task_fair(struct rq *rq)

	do {
		se = pick_next_entity(cfs_rq);
		/*
		 * If se was a buddy, clear it so that it will have to earn
		 * the favour again.
		 */
		__clear_buddies(cfs_rq, se);
		set_next_entity(cfs_rq, se);
		cfs_rq = group_cfs_rq(se);
	} while (cfs_rq);
+2 −2
Original line number Diff line number Diff line
@@ -968,8 +968,8 @@ static inline int pick_optimal_cpu(int this_cpu, cpumask_t *mask)
	if ((this_cpu != -1) && cpu_isset(this_cpu, *mask))
		return this_cpu;

	first = first_cpu(*mask);
	if (first != NR_CPUS)
	first = cpumask_first(mask);
	if (first < nr_cpu_ids)
		return first;

	return -1;