Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit a1d85611 authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge branch 'sched-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull scheduler updates from Ingo Molnar:
 "The biggest change in this cycle is the rewrite of the main SMP load
  balancing metric: the CPU load/utilization.  The main goal was to make
  the metric more precise and more representative - see the changelog of
  this commit for the gory details:

    9d89c257 ("sched/fair: Rewrite runnable load and utilization average tracking")

  It is done in a way that significantly reduces complexity of the code:

    5 files changed, 249 insertions(+), 494 deletions(-)

  and the performance testing results are encouraging.  Nevertheless we
  need to keep an eye on potential regressions, since this potentially
  affects every SMP workload in existence.

  This work comes from Yuyang Du.

  Other changes:

   - SCHED_DL updates.  (Andrea Parri)

   - Simplify architecture callbacks by removing finish_arch_switch().
     (Peter Zijlstra et al)

   - cputime accounting: guarantee stime + utime == rtime.  (Peter
     Zijlstra)

   - optimize idle CPU wakeups some more - inspired by Facebook server
     loads.  (Mike Galbraith)

   - stop_machine fixes and updates.  (Oleg Nesterov)

   - Introduce the 'trace_sched_waking' tracepoint.  (Peter Zijlstra)

   - sched/numa tweaks.  (Srikar Dronamraju)

   - misc fixes and small cleanups"

* 'sched-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (44 commits)
  sched/deadline: Fix comment in enqueue_task_dl()
  sched/deadline: Fix comment in push_dl_tasks()
  sched: Change the sched_class::set_cpus_allowed() calling context
  sched: Make sched_class::set_cpus_allowed() unconditional
  sched: Fix a race between __kthread_bind() and sched_setaffinity()
  sched: Ensure a task has a non-normalized vruntime when returning back to CFS
  sched/numa: Fix NUMA_DIRECT topology identification
  tile: Reorganize _switch_to()
  sched, sparc32: Update scheduler comments in copy_thread()
  sched: Remove finish_arch_switch()
  sched, tile: Remove finish_arch_switch
  sched, sh: Fold finish_arch_switch() into switch_to()
  sched, score: Remove finish_arch_switch()
  sched, avr32: Remove finish_arch_switch()
  sched, MIPS: Get rid of finish_arch_switch()
  sched, arm: Remove finish_arch_switch()
  sched/fair: Clean up load average references
  sched/fair: Provide runnable_load_avg back to cfs_rq
  sched/fair: Remove task and group entity load when they are dead
  sched/fair: Init cfs_rq's sched_entity load average
  ...
parents 3959df1d ff277d42
Loading
Loading
Loading
Loading
+4 −1
Original line number Diff line number Diff line
@@ -10,7 +10,9 @@
 * CPU.
 */
#if defined(CONFIG_PREEMPT) && defined(CONFIG_SMP) && defined(CONFIG_CPU_V7)
#define finish_arch_switch(prev)	dsb(ish)
#define __complete_pending_tlbi()	dsb(ish)
#else
#define __complete_pending_tlbi()
#endif

/*
@@ -22,6 +24,7 @@ extern struct task_struct *__switch_to(struct task_struct *, struct thread_info

#define switch_to(prev,next,last)					\
do {									\
	__complete_pending_tlbi();					\
	last = __switch_to(prev,task_thread_info(prev), task_thread_info(next));	\
} while (0)

+5 −2
Original line number Diff line number Diff line
@@ -15,11 +15,13 @@
 */
#ifdef CONFIG_OWNERSHIP_TRACE
#include <asm/ocd.h>
#define finish_arch_switch(prev)			\
#define ocd_switch(prev, next)				\
	do {						\
		ocd_write(PID, prev->pid);		\
		ocd_write(PID, current->pid);		\
		ocd_write(PID, next->pid);		\
	} while(0)
#else
#define ocd_switch(prev, next)
#endif

/*
@@ -38,6 +40,7 @@ extern struct task_struct *__switch_to(struct task_struct *,
				       struct cpu_context *);
#define switch_to(prev, next, last)					\
	do {								\
		ocd_switch(prev, next);					\
		last = __switch_to(prev, &prev->thread.cpu_context + 1,	\
				   &next->thread.cpu_context);		\
	} while (0)
+23 −25
Original line number Diff line number Diff line
@@ -83,45 +83,43 @@ do { if (cpu_has_rw_llb) { \
	}								\
} while (0)

/*
 * For newly created kernel threads switch_to() will return to
 * ret_from_kernel_thread, newly created user threads to ret_from_fork.
 * That is, everything following resume() will be skipped for new threads.
 * So everything that matters to new threads should be placed before resume().
 */
#define switch_to(prev, next, last)					\
do {									\
	u32 __c0_stat;							\
	s32 __fpsave = FP_SAVE_NONE;					\
	__mips_mt_fpaff_switch_to(prev);				\
	if (cpu_has_dsp)						\
	if (cpu_has_dsp) {						\
		__save_dsp(prev);					\
	if (cop2_present && (KSTK_STATUS(prev) & ST0_CU2)) {		\
		__restore_dsp(next);					\
	}								\
	if (cop2_present) {						\
		set_c0_status(ST0_CU2);					\
		if ((KSTK_STATUS(prev) & ST0_CU2)) {			\
			if (cop2_lazy_restore)				\
				KSTK_STATUS(prev) &= ~ST0_CU2;		\
		__c0_stat = read_c0_status();				\
		write_c0_status(__c0_stat | ST0_CU2);			\
			cop2_save(prev);				\
		write_c0_status(__c0_stat & ~ST0_CU2);			\
		}							\
		if (KSTK_STATUS(next) & ST0_CU2 &&			\
		    !cop2_lazy_restore) {				\
			cop2_restore(next);				\
		}							\
		clear_c0_status(ST0_CU2);				\
	}								\
	__clear_software_ll_bit();					\
	if (test_and_clear_tsk_thread_flag(prev, TIF_USEDFPU))		\
		__fpsave = FP_SAVE_SCALAR;				\
	if (test_and_clear_tsk_thread_flag(prev, TIF_USEDMSA))		\
		__fpsave = FP_SAVE_VECTOR;				\
	(last) = resume(prev, next, task_thread_info(next), __fpsave);	\
} while (0)

#define finish_arch_switch(prev)					\
do {									\
	u32 __c0_stat;							\
	if (cop2_present && !cop2_lazy_restore &&			\
			(KSTK_STATUS(current) & ST0_CU2)) {		\
		__c0_stat = read_c0_status();				\
		write_c0_status(__c0_stat | ST0_CU2);			\
		cop2_restore(current);					\
		write_c0_status(__c0_stat & ~ST0_CU2);			\
	}								\
	if (cpu_has_dsp)						\
		__restore_dsp(current);					\
	if (cpu_has_userlocal)						\
		write_c0_userlocal(current_thread_info()->tp_value);	\
		write_c0_userlocal(task_thread_info(next)->tp_value);	\
	__restore_watch();						\
	disable_msa();							\
	(last) = resume(prev, next, task_thread_info(next), __fpsave);	\
} while (0)

#endif /* _ASM_SWITCH_TO_H */
+1 −1
Original line number Diff line number Diff line
@@ -2178,7 +2178,7 @@ static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
		vc->runner = vcpu;
		if (n_ceded == vc->n_runnable) {
			kvmppc_vcore_blocked(vc);
		} else if (should_resched()) {
		} else if (need_resched()) {
			vc->vcore_state = VCORE_PREEMPT;
			/* Let something else run */
			cond_resched_lock(&vc->lock);
+0 −2
Original line number Diff line number Diff line
@@ -8,6 +8,4 @@ do { \
	(last) = resume(prev, next, task_thread_info(next));	\
} while (0)

#define finish_arch_switch(prev)	do {} while (0)

#endif /* _ASM_SCORE_SWITCH_TO_H */
Loading