Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit df557936 authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge branch 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull scheduler fixes from Ingo Molnar:
 "Misc fixes all around the map: an instrumentation fix, a nohz
  usability fix, a lockdep annotation fix and two task group scheduling
  fixes"

* 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  sched/core: Add missing lockdep_unpin() annotations
  sched/deadline: Fix migration of SCHED_DEADLINE tasks
  nohz: Revert "nohz: Set isolcpus when nohz_full is set"
  sched/fair: Update task group's load_avg after task migration
  sched/fair: Fix overly small weight for interactive group entities
  sched, tracing: Stop/start critical timings around the idle=poll idle loop
parents 9f30931a 0aaafaab
Loading
Loading
Loading
Loading
+8 −4
Original line number Diff line number Diff line
@@ -2366,8 +2366,15 @@ void wake_up_new_task(struct task_struct *p)
	trace_sched_wakeup_new(p);
	check_preempt_curr(rq, p, WF_FORK);
#ifdef CONFIG_SMP
	if (p->sched_class->task_woken)
	if (p->sched_class->task_woken) {
		/*
		 * Nothing relies on rq->lock after this, so its fine to
		 * drop it.
		 */
		lockdep_unpin_lock(&rq->lock);
		p->sched_class->task_woken(rq, p);
		lockdep_pin_lock(&rq->lock);
	}
#endif
	task_rq_unlock(rq, p, &flags);
}
@@ -7238,9 +7245,6 @@ void __init sched_init_smp(void)
	alloc_cpumask_var(&non_isolated_cpus, GFP_KERNEL);
	alloc_cpumask_var(&fallback_doms, GFP_KERNEL);

	/* nohz_full won't take effect without isolating the cpus. */
	tick_nohz_full_add_cpus_to(cpu_isolated_map);

	sched_init_numa();

	/*
+13 −4
Original line number Diff line number Diff line
@@ -668,8 +668,15 @@ static enum hrtimer_restart dl_task_timer(struct hrtimer *timer)
	 * Queueing this task back might have overloaded rq, check if we need
	 * to kick someone away.
	 */
	if (has_pushable_dl_tasks(rq))
	if (has_pushable_dl_tasks(rq)) {
		/*
		 * Nothing relies on rq->lock after this, so its safe to drop
		 * rq->lock.
		 */
		lockdep_unpin_lock(&rq->lock);
		push_dl_task(rq);
		lockdep_pin_lock(&rq->lock);
	}
#endif

unlock:
@@ -1066,8 +1073,9 @@ select_task_rq_dl(struct task_struct *p, int cpu, int sd_flag, int flags)
		int target = find_later_rq(p);

		if (target != -1 &&
				dl_time_before(p->dl.deadline,
					cpu_rq(target)->dl.earliest_dl.curr))
				(dl_time_before(p->dl.deadline,
					cpu_rq(target)->dl.earliest_dl.curr) ||
				(cpu_rq(target)->dl.dl_nr_running == 0)))
			cpu = target;
	}
	rcu_read_unlock();
@@ -1417,7 +1425,8 @@ static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq)

		later_rq = cpu_rq(cpu);

		if (!dl_time_before(task->dl.deadline,
		if (later_rq->dl.dl_nr_running &&
		    !dl_time_before(task->dl.deadline,
					later_rq->dl.earliest_dl.curr)) {
			/*
			 * Target rq has tasks of equal or earlier deadline,
+5 −4
Original line number Diff line number Diff line
@@ -2363,7 +2363,7 @@ static inline long calc_tg_weight(struct task_group *tg, struct cfs_rq *cfs_rq)
	 */
	tg_weight = atomic_long_read(&tg->load_avg);
	tg_weight -= cfs_rq->tg_load_avg_contrib;
	tg_weight += cfs_rq_load_avg(cfs_rq);
	tg_weight += cfs_rq->load.weight;

	return tg_weight;
}
@@ -2373,7 +2373,7 @@ static long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg)
	long tg_weight, load, shares;

	tg_weight = calc_tg_weight(tg, cfs_rq);
	load = cfs_rq_load_avg(cfs_rq);
	load = cfs_rq->load.weight;

	shares = (tg->shares * load);
	if (tg_weight)
@@ -2664,13 +2664,14 @@ static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq);
/* Group cfs_rq's load_avg is used for task_h_load and update_cfs_share */
static inline int update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq)
{
	int decayed;
	struct sched_avg *sa = &cfs_rq->avg;
	int decayed, removed = 0;

	if (atomic_long_read(&cfs_rq->removed_load_avg)) {
		long r = atomic_long_xchg(&cfs_rq->removed_load_avg, 0);
		sa->load_avg = max_t(long, sa->load_avg - r, 0);
		sa->load_sum = max_t(s64, sa->load_sum - r * LOAD_AVG_MAX, 0);
		removed = 1;
	}

	if (atomic_long_read(&cfs_rq->removed_util_avg)) {
@@ -2688,7 +2689,7 @@ static inline int update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq)
	cfs_rq->load_last_update_time_copy = sa->last_update_time;
#endif

	return decayed;
	return decayed || removed;
}

/* Update task and its cfs_rq load average */
+2 −0
Original line number Diff line number Diff line
@@ -57,9 +57,11 @@ static inline int cpu_idle_poll(void)
	rcu_idle_enter();
	trace_cpu_idle_rcuidle(0, smp_processor_id());
	local_irq_enable();
	stop_critical_timings();
	while (!tif_need_resched() &&
		(cpu_idle_force_poll || tick_check_broadcast_expired()))
		cpu_relax();
	start_critical_timings();
	trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
	rcu_idle_exit();
	return 1;