Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit f66ffded authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge branch 'sched-core-for-linus' of...

Merge branch 'sched-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip

* 'sched-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: (25 commits)
  sched: Fix SCHED_MC regression caused by change in sched cpu_power
  sched: Don't use possibly stale sched_class
  kthread, sched: Remove reference to kthread_create_on_cpu
  sched: cpuacct: Use bigger percpu counter batch values for stats counters
  percpu_counter: Make __percpu_counter_add an inline function on UP
  sched: Remove member rt_se from struct rt_rq
  sched: Change usage of rt_rq->rt_se to rt_rq->tg->rt_se[cpu]
  sched: Remove unused update_shares_locked()
  sched: Use for_each_bit
  sched: Queue a deboosted task to the head of the RT prio queue
  sched: Implement head queueing for sched_rt
  sched: Extend enqueue_task to allow head queueing
  sched: Remove USER_SCHED
  sched: Fix the place where group powers are updated
  sched: Assume *balance is valid
  sched: Remove load_balance_newidle()
  sched: Unify load_balance{,_newidle}()
  sched: Add a lock break for PREEMPT=y
  sched: Remove from fwd decls
  sched: Remove rq_iterator from move_one_task
  ...

Fix up trivial conflicts in kernel/sched.c
parents 2531216f dd5feea1
Loading
Loading
Loading
Loading
+0 −15
Original line number Diff line number Diff line
@@ -6,21 +6,6 @@ be removed from this file.

---------------------------

What:	USER_SCHED
When:	2.6.34

Why:	USER_SCHED was implemented as a proof of concept for group scheduling.
	The effect of USER_SCHED can already be achieved from userspace with
	the help of libcgroup. The removal of USER_SCHED will also simplify
	the scheduler code with the removal of one major ifdef. There are also
	issues USER_SCHED has with USER_NS. A decision was taken not to fix
	those and instead remove USER_SCHED. Also new group scheduling
	features will not be implemented for USER_SCHED.

Who:	Dhaval Giani <dhaval@linux.vnet.ibm.com>

---------------------------

What:	PRISM54
When:	2.6.34

+3 −2
Original line number Diff line number Diff line
@@ -124,7 +124,7 @@ extern int _cond_resched(void);
#endif

#ifdef CONFIG_DEBUG_SPINLOCK_SLEEP
  void __might_sleep(char *file, int line, int preempt_offset);
  void __might_sleep(const char *file, int line, int preempt_offset);
/**
 * might_sleep - annotation for functions that can sleep
 *
@@ -138,7 +138,8 @@ extern int _cond_resched(void);
# define might_sleep() \
	do { __might_sleep(__FILE__, __LINE__, 0); might_resched(); } while (0)
#else
  static inline void __might_sleep(char *file, int line, int preempt_offset) { }
  static inline void __might_sleep(const char *file, int line,
				   int preempt_offset) { }
# define might_sleep() do { might_resched(); } while (0)
#endif

+6 −3
Original line number Diff line number Diff line
@@ -98,9 +98,6 @@ static inline void percpu_counter_set(struct percpu_counter *fbc, s64 amount)
	fbc->count = amount;
}

#define __percpu_counter_add(fbc, amount, batch) \
	percpu_counter_add(fbc, amount)

static inline void
percpu_counter_add(struct percpu_counter *fbc, s64 amount)
{
@@ -109,6 +106,12 @@ percpu_counter_add(struct percpu_counter *fbc, s64 amount)
	preempt_enable();
}

static inline void
__percpu_counter_add(struct percpu_counter *fbc, s64 amount, s32 batch)
{
	percpu_counter_add(fbc, amount);
}

static inline s64 percpu_counter_read(struct percpu_counter *fbc)
{
	return fbc->count;
+3 −22
Original line number Diff line number Diff line
@@ -740,14 +740,6 @@ struct user_struct {
	uid_t uid;
	struct user_namespace *user_ns;

#ifdef CONFIG_USER_SCHED
	struct task_group *tg;
#ifdef CONFIG_SYSFS
	struct kobject kobj;
	struct delayed_work work;
#endif
#endif

#ifdef CONFIG_PERF_EVENTS
	atomic_long_t locked_vm;
#endif
@@ -1087,7 +1079,8 @@ struct sched_domain;
struct sched_class {
	const struct sched_class *next;

	void (*enqueue_task) (struct rq *rq, struct task_struct *p, int wakeup);
	void (*enqueue_task) (struct rq *rq, struct task_struct *p, int wakeup,
			      bool head);
	void (*dequeue_task) (struct rq *rq, struct task_struct *p, int sleep);
	void (*yield_task) (struct rq *rq);

@@ -1099,14 +1092,6 @@ struct sched_class {
#ifdef CONFIG_SMP
	int  (*select_task_rq)(struct task_struct *p, int sd_flag, int flags);

	unsigned long (*load_balance) (struct rq *this_rq, int this_cpu,
			struct rq *busiest, unsigned long max_load_move,
			struct sched_domain *sd, enum cpu_idle_type idle,
			int *all_pinned, int *this_best_prio);

	int (*move_one_task) (struct rq *this_rq, int this_cpu,
			      struct rq *busiest, struct sched_domain *sd,
			      enum cpu_idle_type idle);
	void (*pre_schedule) (struct rq *this_rq, struct task_struct *task);
	void (*post_schedule) (struct rq *this_rq);
	void (*task_waking) (struct rq *this_rq, struct task_struct *task);
@@ -2520,13 +2505,9 @@ extern long sched_getaffinity(pid_t pid, struct cpumask *mask);

extern void normalize_rt_tasks(void);

#ifdef CONFIG_GROUP_SCHED
#ifdef CONFIG_CGROUP_SCHED

extern struct task_group init_task_group;
#ifdef CONFIG_USER_SCHED
extern struct task_group root_task_group;
extern void set_tg_uid(struct user_struct *user);
#endif

extern struct task_group *sched_create_group(struct task_group *parent);
extern void sched_destroy_group(struct task_group *tg);
+30 −51
Original line number Diff line number Diff line
@@ -461,57 +461,6 @@ config LOG_BUF_SHIFT
config HAVE_UNSTABLE_SCHED_CLOCK
	bool

config GROUP_SCHED
	bool "Group CPU scheduler"
	depends on EXPERIMENTAL
	default n
	help
	  This feature lets CPU scheduler recognize task groups and control CPU
	  bandwidth allocation to such task groups.
	  In order to create a group from arbitrary set of processes, use
	  CONFIG_CGROUPS. (See Control Group support.)

config FAIR_GROUP_SCHED
	bool "Group scheduling for SCHED_OTHER"
	depends on GROUP_SCHED
	default GROUP_SCHED

config RT_GROUP_SCHED
	bool "Group scheduling for SCHED_RR/FIFO"
	depends on EXPERIMENTAL
	depends on GROUP_SCHED
	default n
	help
	  This feature lets you explicitly allocate real CPU bandwidth
	  to users or control groups (depending on the "Basis for grouping tasks"
	  setting below. If enabled, it will also make it impossible to
	  schedule realtime tasks for non-root users until you allocate
	  realtime bandwidth for them.
	  See Documentation/scheduler/sched-rt-group.txt for more information.

choice
	depends on GROUP_SCHED
	prompt "Basis for grouping tasks"
	default USER_SCHED

config USER_SCHED
	bool "user id"
	help
	  This option will choose userid as the basis for grouping
	  tasks, thus providing equal CPU bandwidth to each user.

config CGROUP_SCHED
	bool "Control groups"
 	depends on CGROUPS
 	help
	  This option allows you to create arbitrary task groups
	  using the "cgroup" pseudo filesystem and control
	  the cpu bandwidth allocated to each such task group.
	  Refer to Documentation/cgroups/cgroups.txt for more
	  information on "cgroup" pseudo filesystem.

endchoice

menuconfig CGROUPS
	boolean "Control Group support"
	help
@@ -632,6 +581,36 @@ config CGROUP_MEM_RES_CTLR_SWAP
	  Now, memory usage of swap_cgroup is 2 bytes per entry. If swap page
	  size is 4096bytes, 512k per 1Gbytes of swap.

menuconfig CGROUP_SCHED
	bool "Group CPU scheduler"
	depends on EXPERIMENTAL && CGROUPS
	default n
	help
	  This feature lets CPU scheduler recognize task groups and control CPU
	  bandwidth allocation to such task groups. It uses cgroups to group
	  tasks.

if CGROUP_SCHED
config FAIR_GROUP_SCHED
	bool "Group scheduling for SCHED_OTHER"
	depends on CGROUP_SCHED
	default CGROUP_SCHED

config RT_GROUP_SCHED
	bool "Group scheduling for SCHED_RR/FIFO"
	depends on EXPERIMENTAL
	depends on CGROUP_SCHED
	default n
	help
	  This feature lets you explicitly allocate real CPU bandwidth
	  to users or control groups (depending on the "Basis for grouping tasks"
	  setting below. If enabled, it will also make it impossible to
	  schedule realtime tasks for non-root users until you allocate
	  realtime bandwidth for them.
	  See Documentation/scheduler/sched-rt-group.txt for more information.

endif #CGROUP_SCHED

endif # CGROUPS

config MM_OWNER
Loading