Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit f8b6d1cc authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Ingo Molnar
Browse files

sched: Use jump_labels for sched_feat



Now that we initialize jump_labels before sched_init() we can use them
for the debug features without having to worry about a window where
they have the wrong setting.

Signed-off-by: default avatarPeter Zijlstra <a.p.zijlstra@chello.nl>
Link: http://lkml.kernel.org/n/tip-vpreo4hal9e0kzqmg5y0io2k@git.kernel.org


Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent be726ffd
Loading
Loading
Loading
Loading
+39 −7
Original line number Diff line number Diff line
@@ -149,7 +149,7 @@ static int sched_feat_show(struct seq_file *m, void *v)
{
	int i;

	for (i = 0; sched_feat_names[i]; i++) {
	for (i = 0; i < __SCHED_FEAT_NR; i++) {
		if (!(sysctl_sched_features & (1UL << i)))
			seq_puts(m, "NO_");
		seq_printf(m, "%s ", sched_feat_names[i]);
@@ -159,6 +159,36 @@ static int sched_feat_show(struct seq_file *m, void *v)
	return 0;
}

#ifdef HAVE_JUMP_LABEL

#define jump_label_key__true  jump_label_key_enabled
#define jump_label_key__false jump_label_key_disabled

#define SCHED_FEAT(name, enabled)	\
	jump_label_key__##enabled ,

struct jump_label_key sched_feat_keys[__SCHED_FEAT_NR] = {
#include "features.h"
};

#undef SCHED_FEAT

static void sched_feat_disable(int i)
{
	if (jump_label_enabled(&sched_feat_keys[i]))
		jump_label_dec(&sched_feat_keys[i]);
}

static void sched_feat_enable(int i)
{
	if (!jump_label_enabled(&sched_feat_keys[i]))
		jump_label_inc(&sched_feat_keys[i]);
}
#else
static void sched_feat_disable(int i) { };
static void sched_feat_enable(int i) { };
#endif /* HAVE_JUMP_LABEL */

static ssize_t
sched_feat_write(struct file *filp, const char __user *ubuf,
		size_t cnt, loff_t *ppos)
@@ -182,17 +212,20 @@ sched_feat_write(struct file *filp, const char __user *ubuf,
		cmp += 3;
	}

	for (i = 0; sched_feat_names[i]; i++) {
	for (i = 0; i < __SCHED_FEAT_NR; i++) {
		if (strcmp(cmp, sched_feat_names[i]) == 0) {
			if (neg)
			if (neg) {
				sysctl_sched_features &= ~(1UL << i);
			else
				sched_feat_disable(i);
			} else {
				sysctl_sched_features |= (1UL << i);
				sched_feat_enable(i);
			}
			break;
		}
	}

	if (!sched_feat_names[i])
	if (i == __SCHED_FEAT_NR)
		return -EINVAL;

	*ppos += cnt;
@@ -221,8 +254,7 @@ static __init int sched_init_debug(void)
	return 0;
}
late_initcall(sched_init_debug);

#endif
#endif /* CONFIG_SCHED_DEBUG */

/*
 * Number of tasks to iterate in a single balance run.
+15 −15
Original line number Diff line number Diff line
@@ -3,13 +3,13 @@
 * them to run sooner, but does not allow tons of sleepers to
 * rip the spread apart.
 */
SCHED_FEAT(GENTLE_FAIR_SLEEPERS, 1)
SCHED_FEAT(GENTLE_FAIR_SLEEPERS, true)

/*
 * Place new tasks ahead so that they do not starve already running
 * tasks
 */
SCHED_FEAT(START_DEBIT, 1)
SCHED_FEAT(START_DEBIT, true)

/*
 * Based on load and program behaviour, see if it makes sense to place
@@ -17,54 +17,54 @@ SCHED_FEAT(START_DEBIT, 1)
 * improve cache locality. Typically used with SYNC wakeups as
 * generated by pipes and the like, see also SYNC_WAKEUPS.
 */
SCHED_FEAT(AFFINE_WAKEUPS, 1)
SCHED_FEAT(AFFINE_WAKEUPS, true)

/*
 * Prefer to schedule the task we woke last (assuming it failed
 * wakeup-preemption), since its likely going to consume data we
 * touched, increases cache locality.
 */
SCHED_FEAT(NEXT_BUDDY, 0)
SCHED_FEAT(NEXT_BUDDY, false)

/*
 * Prefer to schedule the task that ran last (when we did
 * wake-preempt) as that likely will touch the same data, increases
 * cache locality.
 */
SCHED_FEAT(LAST_BUDDY, 1)
SCHED_FEAT(LAST_BUDDY, true)

/*
 * Consider buddies to be cache hot, decreases the likelyness of a
 * cache buddy being migrated away, increases cache locality.
 */
SCHED_FEAT(CACHE_HOT_BUDDY, 1)
SCHED_FEAT(CACHE_HOT_BUDDY, true)

/*
 * Use arch dependent cpu power functions
 */
SCHED_FEAT(ARCH_POWER, 0)
SCHED_FEAT(ARCH_POWER, false)

SCHED_FEAT(HRTICK, 0)
SCHED_FEAT(DOUBLE_TICK, 0)
SCHED_FEAT(LB_BIAS, 1)
SCHED_FEAT(HRTICK, false)
SCHED_FEAT(DOUBLE_TICK, false)
SCHED_FEAT(LB_BIAS, true)

/*
 * Spin-wait on mutex acquisition when the mutex owner is running on
 * another cpu -- assumes that when the owner is running, it will soon
 * release the lock. Decreases scheduling overhead.
 */
SCHED_FEAT(OWNER_SPIN, 1)
SCHED_FEAT(OWNER_SPIN, true)

/*
 * Decrement CPU power based on time not spent running tasks
 */
SCHED_FEAT(NONTASK_POWER, 1)
SCHED_FEAT(NONTASK_POWER, true)

/*
 * Queue remote wakeups on the target CPU and process them
 * using the scheduler IPI. Reduces rq->lock contention/bounces.
 */
SCHED_FEAT(TTWU_QUEUE, 1)
SCHED_FEAT(TTWU_QUEUE, true)

SCHED_FEAT(FORCE_SD_OVERLAP, 0)
SCHED_FEAT(RT_RUNTIME_SHARE, 1)
SCHED_FEAT(FORCE_SD_OVERLAP, false)
SCHED_FEAT(RT_RUNTIME_SHARE, true)
+27 −0
Original line number Diff line number Diff line
@@ -581,6 +581,7 @@ static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu)
 * Tunables that become constants when CONFIG_SCHED_DEBUG is off:
 */
#ifdef CONFIG_SCHED_DEBUG
# include <linux/jump_label.h>
# define const_debug __read_mostly
#else
# define const_debug const
@@ -593,11 +594,37 @@ extern const_debug unsigned int sysctl_sched_features;

enum {
#include "features.h"
	__SCHED_FEAT_NR,
};

#undef SCHED_FEAT

#if defined(CONFIG_SCHED_DEBUG) && defined(HAVE_JUMP_LABEL)
static __always_inline bool static_branch__true(struct jump_label_key *key)
{
	return likely(static_branch(key)); /* Not out of line branch. */
}

static __always_inline bool static_branch__false(struct jump_label_key *key)
{
	return unlikely(static_branch(key)); /* Out of line branch. */
}

#define SCHED_FEAT(name, enabled)					\
static __always_inline bool static_branch_##name(struct jump_label_key *key) \
{									\
	return static_branch__##enabled(key);				\
}

#include "features.h"

#undef SCHED_FEAT

extern struct jump_label_key sched_feat_keys[__SCHED_FEAT_NR];
#define sched_feat(x) (static_branch_##x(&sched_feat_keys[__SCHED_FEAT_##x]))
#else /* !(SCHED_DEBUG && HAVE_JUMP_LABEL) */
#define sched_feat(x) (sysctl_sched_features & (1UL << __SCHED_FEAT_##x))
#endif /* SCHED_DEBUG && HAVE_JUMP_LABEL */

static inline u64 global_rt_period(void)
{