Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit dd050809 authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge branch 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull scheduler fixes from Ingo Molnar:
 "Three fixes for scheduler crashes, each triggers in relatively rare,
  hardware environment dependent situations"

* 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  sched/fair: Rework sched_fair time accounting
  math64: Add mul_u64_u32_shr()
  sched: Remove PREEMPT_NEED_RESCHED from generic code
  sched: Initialize power_orig for overlapping groups
parents 1070d5ac 9dbdb155
Loading
Loading
Loading
Loading
+1 −0
Original line number Diff line number Diff line
@@ -26,6 +26,7 @@ config X86
	select HAVE_AOUT if X86_32
	select HAVE_UNSTABLE_SCHED_CLOCK
	select ARCH_SUPPORTS_NUMA_BALANCING
	select ARCH_SUPPORTS_INT128 if X86_64
	select ARCH_WANTS_PROT_NUMA_PROT_NONE
	select HAVE_IDE
	select HAVE_OPROFILE
+11 −0
Original line number Diff line number Diff line
@@ -7,6 +7,12 @@

DECLARE_PER_CPU(int, __preempt_count);

/*
 * We use the PREEMPT_NEED_RESCHED bit as an inverted NEED_RESCHED such
 * that a decrement hitting 0 means we can and should reschedule.
 */
#define PREEMPT_ENABLED	(0 + PREEMPT_NEED_RESCHED)

/*
 * We mask the PREEMPT_NEED_RESCHED bit so as not to confuse all current users
 * that think a non-zero value indicates we cannot preempt.
@@ -74,6 +80,11 @@ static __always_inline void __preempt_count_sub(int val)
	__this_cpu_add_4(__preempt_count, -val);
}

/*
 * Because we keep PREEMPT_NEED_RESCHED set when we do _not_ need to reschedule
 * a decrement which hits zero means we have no preempt_count and should
 * reschedule.
 */
static __always_inline bool __preempt_count_dec_and_test(void)
{
	GEN_UNARY_RMWcc("decl", __preempt_count, __percpu_arg(0), "e");
+11 −24
Original line number Diff line number Diff line
@@ -3,13 +3,11 @@

#include <linux/thread_info.h>

/*
 * We mask the PREEMPT_NEED_RESCHED bit so as not to confuse all current users
 * that think a non-zero value indicates we cannot preempt.
 */
#define PREEMPT_ENABLED	(0)

static __always_inline int preempt_count(void)
{
	return current_thread_info()->preempt_count & ~PREEMPT_NEED_RESCHED;
	return current_thread_info()->preempt_count;
}

static __always_inline int *preempt_count_ptr(void)
@@ -17,11 +15,6 @@ static __always_inline int *preempt_count_ptr(void)
	return &current_thread_info()->preempt_count;
}

/*
 * We now loose PREEMPT_NEED_RESCHED and cause an extra reschedule; however the
 * alternative is loosing a reschedule. Better schedule too often -- also this
 * should be a very rare operation.
 */
static __always_inline void preempt_count_set(int pc)
{
	*preempt_count_ptr() = pc;
@@ -41,28 +34,17 @@ static __always_inline void preempt_count_set(int pc)
	task_thread_info(p)->preempt_count = PREEMPT_ENABLED; \
} while (0)

/*
 * We fold the NEED_RESCHED bit into the preempt count such that
 * preempt_enable() can decrement and test for needing to reschedule with a
 * single instruction.
 *
 * We invert the actual bit, so that when the decrement hits 0 we know we both
 * need to resched (the bit is cleared) and can resched (no preempt count).
 */

static __always_inline void set_preempt_need_resched(void)
{
	*preempt_count_ptr() &= ~PREEMPT_NEED_RESCHED;
}

static __always_inline void clear_preempt_need_resched(void)
{
	*preempt_count_ptr() |= PREEMPT_NEED_RESCHED;
}

static __always_inline bool test_preempt_need_resched(void)
{
	return !(*preempt_count_ptr() & PREEMPT_NEED_RESCHED);
	return false;
}

/*
@@ -81,7 +63,12 @@ static __always_inline void __preempt_count_sub(int val)

static __always_inline bool __preempt_count_dec_and_test(void)
{
	return !--*preempt_count_ptr();
	/*
	 * Because of load-store architectures cannot do per-cpu atomic
	 * operations; we cannot use PREEMPT_NEED_RESCHED because it might get
	 * lost.
	 */
	return !--*preempt_count_ptr() && tif_need_resched();
}

/*
@@ -89,7 +76,7 @@ static __always_inline bool __preempt_count_dec_and_test(void)
 */
static __always_inline bool should_resched(void)
{
	return unlikely(!*preempt_count_ptr());
	return unlikely(!preempt_count() && tif_need_resched());
}

#ifdef CONFIG_PREEMPT
+30 −0
Original line number Diff line number Diff line
@@ -133,4 +133,34 @@ __iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder)
	return ret;
}

#if defined(CONFIG_ARCH_SUPPORTS_INT128) && defined(__SIZEOF_INT128__)

#ifndef mul_u64_u32_shr
static inline u64 mul_u64_u32_shr(u64 a, u32 mul, unsigned int shift)
{
	return (u64)(((unsigned __int128)a * mul) >> shift);
}
#endif /* mul_u64_u32_shr */

#else

#ifndef mul_u64_u32_shr
static inline u64 mul_u64_u32_shr(u64 a, u32 mul, unsigned int shift)
{
	u32 ah, al;
	u64 ret;

	al = a;
	ah = a >> 32;

	ret = ((u64)al * mul) >> shift;
	if (ah)
		ret += ((u64)ah * mul) << (32 - shift);

	return ret;
}
#endif /* mul_u64_u32_shr */

#endif

#endif /* _LINUX_MATH64_H */
+2 −3
Original line number Diff line number Diff line
@@ -440,8 +440,6 @@ struct task_cputime {
		.sum_exec_runtime = 0,				\
	}

#define PREEMPT_ENABLED		(PREEMPT_NEED_RESCHED)

#ifdef CONFIG_PREEMPT_COUNT
#define PREEMPT_DISABLED	(1 + PREEMPT_ENABLED)
#else
@@ -932,7 +930,8 @@ struct pipe_inode_info;
struct uts_namespace;

struct load_weight {
	unsigned long weight, inv_weight;
	unsigned long weight;
	u32 inv_weight;
};

struct sched_avg {
Loading