Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit d14c8a68 authored by Ingo Molnar's avatar Ingo Molnar
Browse files

Merge branch 'sched/for-linus' into tracing/for-linus

parents d59fdcf2 873a6ed6
Loading
Loading
Loading
Loading
+2 −5
Original line number Original line Diff line number Diff line
@@ -61,10 +61,7 @@ builder by #define'ing ARCH_HASH_SCHED_DOMAIN, and exporting your
arch_init_sched_domains function. This function will attach domains to all
arch_init_sched_domains function. This function will attach domains to all
CPUs using cpu_attach_domain.
CPUs using cpu_attach_domain.


Implementors should change the line
The sched-domains debugging infrastructure can be enabled by enabling
#undef SCHED_DOMAIN_DEBUG
CONFIG_SCHED_DEBUG. This enables an error checking parse of the sched domains
to
#define SCHED_DOMAIN_DEBUG
in kernel/sched.c as this enables an error checking parse of the sched domains
which should catch most possible errors (described above). It also prints out
which should catch most possible errors (described above). It also prints out
the domain structure in a visual format.
the domain structure in a visual format.
+2 −2
Original line number Original line Diff line number Diff line
@@ -51,9 +51,9 @@ needs only about 3% CPU time to do so, it can do with a 0.03 * 0.005s =
0.00015s. So this group can be scheduled with a period of 0.005s and a run time
0.00015s. So this group can be scheduled with a period of 0.005s and a run time
of 0.00015s.
of 0.00015s.


The remaining CPU time will be used for user input and other tass. Because
The remaining CPU time will be used for user input and other tasks. Because
realtime tasks have explicitly allocated the CPU time they need to perform
realtime tasks have explicitly allocated the CPU time they need to perform
their tasks, buffer underruns in the graphocs or audio can be eliminated.
their tasks, buffer underruns in the graphics or audio can be eliminated.


NOTE: the above example is not fully implemented as of yet (2.6.25). We still
NOTE: the above example is not fully implemented as of yet (2.6.25). We still
lack an EDF scheduler to make non-uniform periods usable.
lack an EDF scheduler to make non-uniform periods usable.
+30 −29
Original line number Original line Diff line number Diff line
@@ -134,7 +134,6 @@ extern unsigned long nr_running(void);
extern unsigned long nr_uninterruptible(void);
extern unsigned long nr_uninterruptible(void);
extern unsigned long nr_active(void);
extern unsigned long nr_active(void);
extern unsigned long nr_iowait(void);
extern unsigned long nr_iowait(void);
extern unsigned long weighted_cpuload(const int cpu);


struct seq_file;
struct seq_file;
struct cfs_rq;
struct cfs_rq;
@@ -784,6 +783,8 @@ struct sched_domain {
	unsigned int balance_interval;	/* initialise to 1. units in ms. */
	unsigned int balance_interval;	/* initialise to 1. units in ms. */
	unsigned int nr_balance_failed; /* initialise to 0 */
	unsigned int nr_balance_failed; /* initialise to 0 */


	u64 last_update;

#ifdef CONFIG_SCHEDSTATS
#ifdef CONFIG_SCHEDSTATS
	/* load_balance() stats */
	/* load_balance() stats */
	unsigned int lb_count[CPU_MAX_IDLE_TYPES];
	unsigned int lb_count[CPU_MAX_IDLE_TYPES];
@@ -823,23 +824,6 @@ extern int arch_reinit_sched_domains(void);


#endif	/* CONFIG_SMP */
#endif	/* CONFIG_SMP */


/*
 * A runqueue laden with a single nice 0 task scores a weighted_cpuload of
 * SCHED_LOAD_SCALE. This function returns 1 if any cpu is laden with a
 * task of nice 0 or enough lower priority tasks to bring up the
 * weighted_cpuload
 */
static inline int above_background_load(void)
{
	unsigned long cpu;

	for_each_online_cpu(cpu) {
		if (weighted_cpuload(cpu) >= SCHED_LOAD_SCALE)
			return 1;
	}
	return 0;
}

struct io_context;			/* See blkdev.h */
struct io_context;			/* See blkdev.h */
#define NGROUPS_SMALL		32
#define NGROUPS_SMALL		32
#define NGROUPS_PER_BLOCK	((unsigned int)(PAGE_SIZE / sizeof(gid_t)))
#define NGROUPS_PER_BLOCK	((unsigned int)(PAGE_SIZE / sizeof(gid_t)))
@@ -921,8 +905,8 @@ struct sched_class {
	void (*set_cpus_allowed)(struct task_struct *p,
	void (*set_cpus_allowed)(struct task_struct *p,
				 const cpumask_t *newmask);
				 const cpumask_t *newmask);


	void (*join_domain)(struct rq *rq);
	void (*rq_online)(struct rq *rq);
	void (*leave_domain)(struct rq *rq);
	void (*rq_offline)(struct rq *rq);


	void (*switched_from) (struct rq *this_rq, struct task_struct *task,
	void (*switched_from) (struct rq *this_rq, struct task_struct *task,
			       int running);
			       int running);
@@ -1039,6 +1023,7 @@ struct task_struct {
#endif
#endif


	int prio, static_prio, normal_prio;
	int prio, static_prio, normal_prio;
	unsigned int rt_priority;
	const struct sched_class *sched_class;
	const struct sched_class *sched_class;
	struct sched_entity se;
	struct sched_entity se;
	struct sched_rt_entity rt;
	struct sched_rt_entity rt;
@@ -1122,7 +1107,6 @@ struct task_struct {
	int __user *set_child_tid;		/* CLONE_CHILD_SETTID */
	int __user *set_child_tid;		/* CLONE_CHILD_SETTID */
	int __user *clear_child_tid;		/* CLONE_CHILD_CLEARTID */
	int __user *clear_child_tid;		/* CLONE_CHILD_CLEARTID */


	unsigned int rt_priority;
	cputime_t utime, stime, utimescaled, stimescaled;
	cputime_t utime, stime, utimescaled, stimescaled;
	cputime_t gtime;
	cputime_t gtime;
	cputime_t prev_utime, prev_stime;
	cputime_t prev_utime, prev_stime;
@@ -1141,12 +1125,12 @@ struct task_struct {
	gid_t gid,egid,sgid,fsgid;
	gid_t gid,egid,sgid,fsgid;
	struct group_info *group_info;
	struct group_info *group_info;
	kernel_cap_t   cap_effective, cap_inheritable, cap_permitted, cap_bset;
	kernel_cap_t   cap_effective, cap_inheritable, cap_permitted, cap_bset;
	unsigned securebits;
	struct user_struct *user;
	struct user_struct *user;
	unsigned securebits;
#ifdef CONFIG_KEYS
#ifdef CONFIG_KEYS
	unsigned char jit_keyring;	/* default keyring to attach requested keys to */
	struct key *request_key_auth;	/* assumed request_key authority */
	struct key *request_key_auth;	/* assumed request_key authority */
	struct key *thread_keyring;	/* keyring private to this thread */
	struct key *thread_keyring;	/* keyring private to this thread */
	unsigned char jit_keyring;	/* default keyring to attach requested keys to */
#endif
#endif
	char comm[TASK_COMM_LEN]; /* executable name excluding path
	char comm[TASK_COMM_LEN]; /* executable name excluding path
				     - access with [gs]et_task_comm (which lock
				     - access with [gs]et_task_comm (which lock
@@ -1233,8 +1217,8 @@ struct task_struct {
# define MAX_LOCK_DEPTH 48UL
# define MAX_LOCK_DEPTH 48UL
	u64 curr_chain_key;
	u64 curr_chain_key;
	int lockdep_depth;
	int lockdep_depth;
	struct held_lock held_locks[MAX_LOCK_DEPTH];
	unsigned int lockdep_recursion;
	unsigned int lockdep_recursion;
	struct held_lock held_locks[MAX_LOCK_DEPTH];
#endif
#endif


/* journalling filesystem info */
/* journalling filesystem info */
@@ -1262,10 +1246,6 @@ struct task_struct {
	u64 acct_vm_mem1;	/* accumulated virtual memory usage */
	u64 acct_vm_mem1;	/* accumulated virtual memory usage */
	cputime_t acct_stimexpd;/* stime since last update */
	cputime_t acct_stimexpd;/* stime since last update */
#endif
#endif
#ifdef CONFIG_NUMA
  	struct mempolicy *mempolicy;
	short il_next;
#endif
#ifdef CONFIG_CPUSETS
#ifdef CONFIG_CPUSETS
	nodemask_t mems_allowed;
	nodemask_t mems_allowed;
	int cpuset_mems_generation;
	int cpuset_mems_generation;
@@ -1284,6 +1264,10 @@ struct task_struct {
#endif
#endif
	struct list_head pi_state_list;
	struct list_head pi_state_list;
	struct futex_pi_state *pi_state_cache;
	struct futex_pi_state *pi_state_cache;
#endif
#ifdef CONFIG_NUMA
	struct mempolicy *mempolicy;
	short il_next;
#endif
#endif
	atomic_t fs_excl;	/* holding fs exclusive resources */
	atomic_t fs_excl;	/* holding fs exclusive resources */
	struct rcu_head rcu;
	struct rcu_head rcu;
@@ -1504,6 +1488,7 @@ static inline void put_task_struct(struct task_struct *t)
#define PF_SWAPWRITE	0x00800000	/* Allowed to write to swap */
#define PF_SWAPWRITE	0x00800000	/* Allowed to write to swap */
#define PF_SPREAD_PAGE	0x01000000	/* Spread page cache over cpuset */
#define PF_SPREAD_PAGE	0x01000000	/* Spread page cache over cpuset */
#define PF_SPREAD_SLAB	0x02000000	/* Spread some slab caches over cpuset */
#define PF_SPREAD_SLAB	0x02000000	/* Spread some slab caches over cpuset */
#define PF_THREAD_BOUND	0x04000000	/* Thread bound to specific cpu */
#define PF_MEMPOLICY	0x10000000	/* Non-default NUMA mempolicy */
#define PF_MEMPOLICY	0x10000000	/* Non-default NUMA mempolicy */
#define PF_MUTEX_TESTER	0x20000000	/* Thread belongs to the rt mutex tester */
#define PF_MUTEX_TESTER	0x20000000	/* Thread belongs to the rt mutex tester */
#define PF_FREEZER_SKIP	0x40000000	/* Freezer should not count it as freezeable */
#define PF_FREEZER_SKIP	0x40000000	/* Freezer should not count it as freezeable */
@@ -1573,13 +1558,28 @@ static inline void sched_clock_idle_sleep_event(void)
static inline void sched_clock_idle_wakeup_event(u64 delta_ns)
static inline void sched_clock_idle_wakeup_event(u64 delta_ns)
{
{
}
}
#else

#ifdef CONFIG_NO_HZ
static inline void sched_clock_tick_stop(int cpu)
{
}

static inline void sched_clock_tick_start(int cpu)
{
}
#endif

#else /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */
extern void sched_clock_init(void);
extern void sched_clock_init(void);
extern u64 sched_clock_cpu(int cpu);
extern u64 sched_clock_cpu(int cpu);
extern void sched_clock_tick(void);
extern void sched_clock_tick(void);
extern void sched_clock_idle_sleep_event(void);
extern void sched_clock_idle_sleep_event(void);
extern void sched_clock_idle_wakeup_event(u64 delta_ns);
extern void sched_clock_idle_wakeup_event(u64 delta_ns);
#ifdef CONFIG_NO_HZ
extern void sched_clock_tick_stop(int cpu);
extern void sched_clock_tick_start(int cpu);
#endif
#endif
#endif /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */


/*
/*
 * For kernel-internal use: high-speed (but slightly incorrect) per-cpu
 * For kernel-internal use: high-speed (but slightly incorrect) per-cpu
@@ -1622,6 +1622,7 @@ extern unsigned int sysctl_sched_child_runs_first;
extern unsigned int sysctl_sched_features;
extern unsigned int sysctl_sched_features;
extern unsigned int sysctl_sched_migration_cost;
extern unsigned int sysctl_sched_migration_cost;
extern unsigned int sysctl_sched_nr_migrate;
extern unsigned int sysctl_sched_nr_migrate;
extern unsigned int sysctl_sched_shares_ratelimit;


int sched_nr_latency_handler(struct ctl_table *table, int write,
int sched_nr_latency_handler(struct ctl_table *table, int write,
		struct file *file, void __user *buffer, size_t *length,
		struct file *file, void __user *buffer, size_t *length,
+3 −2
Original line number Original line Diff line number Diff line
@@ -3,7 +3,7 @@
#
#


obj-y     = sched.o fork.o exec_domain.o panic.o printk.o profile.o \
obj-y     = sched.o fork.o exec_domain.o panic.o printk.o profile.o \
	    exit.o itimer.o time.o softirq.o resource.o \
	    cpu.o exit.o itimer.o time.o softirq.o resource.o \
	    sysctl.o capability.o ptrace.o timer.o user.o \
	    sysctl.o capability.o ptrace.o timer.o user.o \
	    signal.o sys.o kmod.o workqueue.o pid.o \
	    signal.o sys.o kmod.o workqueue.o pid.o \
	    rcupdate.o extable.o params.o posix-timers.o \
	    rcupdate.o extable.o params.o posix-timers.o \
@@ -27,7 +27,7 @@ obj-$(CONFIG_RT_MUTEXES) += rtmutex.o
obj-$(CONFIG_DEBUG_RT_MUTEXES) += rtmutex-debug.o
obj-$(CONFIG_DEBUG_RT_MUTEXES) += rtmutex-debug.o
obj-$(CONFIG_RT_MUTEX_TESTER) += rtmutex-tester.o
obj-$(CONFIG_RT_MUTEX_TESTER) += rtmutex-tester.o
obj-$(CONFIG_GENERIC_ISA_DMA) += dma.o
obj-$(CONFIG_GENERIC_ISA_DMA) += dma.o
obj-$(CONFIG_SMP) += cpu.o spinlock.o
obj-$(CONFIG_SMP) += spinlock.o
obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock.o
obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock.o
obj-$(CONFIG_PROVE_LOCKING) += spinlock.o
obj-$(CONFIG_PROVE_LOCKING) += spinlock.o
obj-$(CONFIG_UID16) += uid16.o
obj-$(CONFIG_UID16) += uid16.o
@@ -69,6 +69,7 @@ obj-$(CONFIG_TASK_DELAY_ACCT) += delayacct.o
obj-$(CONFIG_TASKSTATS) += taskstats.o tsacct.o
obj-$(CONFIG_TASKSTATS) += taskstats.o tsacct.o
obj-$(CONFIG_MARKERS) += marker.o
obj-$(CONFIG_MARKERS) += marker.o
obj-$(CONFIG_LATENCYTOP) += latencytop.o
obj-$(CONFIG_LATENCYTOP) += latencytop.o
obj-$(CONFIG_SMP) += sched_cpupri.o


ifneq ($(CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER),y)
ifneq ($(CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER),y)
# According to Alan Modra <alan@linuxcare.com.au>, the -fno-omit-frame-pointer is
# According to Alan Modra <alan@linuxcare.com.au>, the -fno-omit-frame-pointer is
+24 −0
Original line number Original line Diff line number Diff line
@@ -15,6 +15,28 @@
#include <linux/stop_machine.h>
#include <linux/stop_machine.h>
#include <linux/mutex.h>
#include <linux/mutex.h>


/*
 * Represents all cpu's present in the system
 * In systems capable of hotplug, this map could dynamically grow
 * as new cpu's are detected in the system via any platform specific
 * method, such as ACPI for e.g.
 */
cpumask_t cpu_present_map __read_mostly;
EXPORT_SYMBOL(cpu_present_map);

#ifndef CONFIG_SMP

/*
 * Represents all cpu's that are currently online.
 */
cpumask_t cpu_online_map __read_mostly = CPU_MASK_ALL;
EXPORT_SYMBOL(cpu_online_map);

cpumask_t cpu_possible_map __read_mostly = CPU_MASK_ALL;
EXPORT_SYMBOL(cpu_possible_map);

#else /* CONFIG_SMP */

/* Serializes the updates to cpu_online_map, cpu_present_map */
/* Serializes the updates to cpu_online_map, cpu_present_map */
static DEFINE_MUTEX(cpu_add_remove_lock);
static DEFINE_MUTEX(cpu_add_remove_lock);


@@ -403,3 +425,5 @@ out:
	cpu_maps_update_done();
	cpu_maps_update_done();
}
}
#endif /* CONFIG_PM_SLEEP_SMP */
#endif /* CONFIG_PM_SLEEP_SMP */

#endif /* CONFIG_SMP */
Loading