Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 7c4e0f08 authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Todd Kjos
Browse files

UPSTREAM: sched/core: Add missing update_rq_clock() in detach_task_cfs_rq()



Instead of adding the update_rq_clock() all the way at the bottom of
the callstack, add one at the top, this to aid later effort to
minimize update_rq_lock() calls.

  WARNING: CPU: 0 PID: 1 at ../kernel/sched/sched.h:797 detach_task_cfs_rq()
  rq->clock_update_flags < RQCF_ACT_SKIP

  Call Trace:
    dump_stack()
    __warn()
    warn_slowpath_fmt()
    detach_task_cfs_rq()
    switched_from_fair()
    __sched_setscheduler()
    _sched_setscheduler()
    sched_set_stop_task()
    cpu_stop_create()
    __smpboot_create_thread.part.2()
    smpboot_register_percpu_thread_cpumask()
    cpu_stop_init()
    do_one_initcall()
    ? print_cpu_info()
    kernel_init_freeable()
    ? rest_init()
    kernel_init()
    ret_from_fork()

Change-Id: Iee08c2ed3303ae8f0c527658f13646b02a412cad
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: linux-kernel@vger.kernel.org
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
(cherry picked from commit 80f5c1b84baa8180c3c27b7e227429712cd967b6)
Signed-off-by: default avatarQuentin Perret <quentin.perret@arm.com>
parent 4b9300bf
Loading
Loading
Loading
Loading
+3 −0
Original line number Diff line number Diff line
@@ -3824,6 +3824,7 @@ void rt_mutex_setprio(struct task_struct *p, int prio)
	BUG_ON(prio > MAX_PRIO);

	rq = __task_rq_lock(p, &rf);
	update_rq_clock(rq);

	/*
	 * Idle task boosting is a nono in general. There is one
@@ -4352,6 +4353,7 @@ static int __sched_setscheduler(struct task_struct *p,
	 * runqueue lock must be held.
	 */
	rq = task_rq_lock(p, &rf);
	update_rq_clock(rq);

	/*
	 * Changing the policy of the stop threads its a very bad idea
@@ -8702,6 +8704,7 @@ static void cpu_cgroup_fork(struct task_struct *task)

	rq = task_rq_lock(task, &rf);

	update_rq_clock(rq);
	sched_change_group(task, TASK_SET_GROUP);

	task_rq_unlock(rq, task, &rf);