Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 7c941438 authored by Dhaval Giani's avatar Dhaval Giani Committed by Ingo Molnar
Browse files

sched: Remove USER_SCHED

Remove the USER_SCHED feature. It has been scheduled to be removed in
2.6.34 as per http://marc.info/?l=linux-kernel&m=125728479022976&w=2



Signed-off-by: default avatarDhaval Giani <dhaval.giani@gmail.com>
Signed-off-by: default avatarPeter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <1263990378.24844.3.camel@localhost>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent 871e35bc
Loading
Loading
Loading
Loading
+0 −15
Original line number Diff line number Diff line
@@ -6,21 +6,6 @@ be removed from this file.

---------------------------

What:	USER_SCHED
When:	2.6.34

Why:	USER_SCHED was implemented as a proof of concept for group scheduling.
	The effect of USER_SCHED can already be achieved from userspace with
	the help of libcgroup. The removal of USER_SCHED will also simplify
	the scheduler code with the removal of one major ifdef. There are also
	issues USER_SCHED has with USER_NS. A decision was taken not to fix
	those and instead remove USER_SCHED. Also new group scheduling
	features will not be implemented for USER_SCHED.

Who:	Dhaval Giani <dhaval@linux.vnet.ibm.com>

---------------------------

What:	PRISM54
When:	2.6.34

+1 −13
Original line number Diff line number Diff line
@@ -731,14 +731,6 @@ struct user_struct {
	uid_t uid;
	struct user_namespace *user_ns;

#ifdef CONFIG_USER_SCHED
	struct task_group *tg;
#ifdef CONFIG_SYSFS
	struct kobject kobj;
	struct delayed_work work;
#endif
#endif

#ifdef CONFIG_PERF_EVENTS
	atomic_long_t locked_vm;
#endif
@@ -2502,13 +2494,9 @@ extern long sched_getaffinity(pid_t pid, struct cpumask *mask);

extern void normalize_rt_tasks(void);

#ifdef CONFIG_GROUP_SCHED
#ifdef CONFIG_CGROUP_SCHED

extern struct task_group init_task_group;
#ifdef CONFIG_USER_SCHED
extern struct task_group root_task_group;
extern void set_tg_uid(struct user_struct *user);
#endif

extern struct task_group *sched_create_group(struct task_group *parent);
extern void sched_destroy_group(struct task_group *tg);
+30 −51
Original line number Diff line number Diff line
@@ -435,57 +435,6 @@ config LOG_BUF_SHIFT
config HAVE_UNSTABLE_SCHED_CLOCK
	bool

config GROUP_SCHED
	bool "Group CPU scheduler"
	depends on EXPERIMENTAL
	default n
	help
	  This feature lets CPU scheduler recognize task groups and control CPU
	  bandwidth allocation to such task groups.
	  In order to create a group from arbitrary set of processes, use
	  CONFIG_CGROUPS. (See Control Group support.)

config FAIR_GROUP_SCHED
	bool "Group scheduling for SCHED_OTHER"
	depends on GROUP_SCHED
	default GROUP_SCHED

config RT_GROUP_SCHED
	bool "Group scheduling for SCHED_RR/FIFO"
	depends on EXPERIMENTAL
	depends on GROUP_SCHED
	default n
	help
	  This feature lets you explicitly allocate real CPU bandwidth
	  to users or control groups (depending on the "Basis for grouping tasks"
	  setting below. If enabled, it will also make it impossible to
	  schedule realtime tasks for non-root users until you allocate
	  realtime bandwidth for them.
	  See Documentation/scheduler/sched-rt-group.txt for more information.

choice
	depends on GROUP_SCHED
	prompt "Basis for grouping tasks"
	default USER_SCHED

config USER_SCHED
	bool "user id"
	help
	  This option will choose userid as the basis for grouping
	  tasks, thus providing equal CPU bandwidth to each user.

config CGROUP_SCHED
	bool "Control groups"
 	depends on CGROUPS
 	help
	  This option allows you to create arbitrary task groups
	  using the "cgroup" pseudo filesystem and control
	  the cpu bandwidth allocated to each such task group.
	  Refer to Documentation/cgroups/cgroups.txt for more
	  information on "cgroup" pseudo filesystem.

endchoice

menuconfig CGROUPS
	boolean "Control Group support"
	help
@@ -606,6 +555,36 @@ config CGROUP_MEM_RES_CTLR_SWAP
	  Now, memory usage of swap_cgroup is 2 bytes per entry. If swap page
	  size is 4096bytes, 512k per 1Gbytes of swap.

menuconfig CGROUP_SCHED
	bool "Group CPU scheduler"
	depends on EXPERIMENTAL && CGROUPS
	default n
	help
	  This feature lets CPU scheduler recognize task groups and control CPU
	  bandwidth allocation to such task groups. It uses cgroups to group
	  tasks.

if CGROUP_SCHED
config FAIR_GROUP_SCHED
	bool "Group scheduling for SCHED_OTHER"
	depends on CGROUP_SCHED
	default CGROUP_SCHED

config RT_GROUP_SCHED
	bool "Group scheduling for SCHED_RR/FIFO"
	depends on EXPERIMENTAL
	depends on CGROUP_SCHED
	default n
	help
	  This feature lets you explicitly allocate real CPU bandwidth
	  to users or control groups (depending on the "Basis for grouping tasks"
	  setting below. If enabled, it will also make it impossible to
	  schedule realtime tasks for non-root users until you allocate
	  realtime bandwidth for them.
	  See Documentation/scheduler/sched-rt-group.txt for more information.

endif #CGROUP_SCHED

endif # CGROUPS

config MM_OWNER
+0 −8
Original line number Diff line number Diff line
@@ -197,16 +197,8 @@ static int __init ksysfs_init(void)
			goto group_exit;
	}

	/* create the /sys/kernel/uids/ directory */
	error = uids_sysfs_init();
	if (error)
		goto notes_exit;

	return 0;

notes_exit:
	if (notes_size > 0)
		sysfs_remove_bin_file(kernel_kobj, &notes_attr);
group_exit:
	sysfs_remove_group(kernel_kobj, &kernel_attr_group);
kset_exit:
+7 −107
Original line number Diff line number Diff line
@@ -233,7 +233,7 @@ static void destroy_rt_bandwidth(struct rt_bandwidth *rt_b)
 */
static DEFINE_MUTEX(sched_domains_mutex);

#ifdef CONFIG_GROUP_SCHED
#ifdef CONFIG_CGROUP_SCHED

#include <linux/cgroup.h>

@@ -243,13 +243,7 @@ static LIST_HEAD(task_groups);

/* task group related information */
struct task_group {
#ifdef CONFIG_CGROUP_SCHED
	struct cgroup_subsys_state css;
#endif

#ifdef CONFIG_USER_SCHED
	uid_t uid;
#endif

#ifdef CONFIG_FAIR_GROUP_SCHED
	/* schedulable entities of this group on each cpu */
@@ -274,35 +268,7 @@ struct task_group {
	struct list_head children;
};

#ifdef CONFIG_USER_SCHED

/* Helper function to pass uid information to create_sched_user() */
void set_tg_uid(struct user_struct *user)
{
	user->tg->uid = user->uid;
}

/*
 * Root task group.
 *	Every UID task group (including init_task_group aka UID-0) will
 *	be a child to this group.
 */
struct task_group root_task_group;

#ifdef CONFIG_FAIR_GROUP_SCHED
/* Default task group's sched entity on each cpu */
static DEFINE_PER_CPU(struct sched_entity, init_sched_entity);
/* Default task group's cfs_rq on each cpu */
static DEFINE_PER_CPU_SHARED_ALIGNED(struct cfs_rq, init_tg_cfs_rq);
#endif /* CONFIG_FAIR_GROUP_SCHED */

#ifdef CONFIG_RT_GROUP_SCHED
static DEFINE_PER_CPU(struct sched_rt_entity, init_sched_rt_entity);
static DEFINE_PER_CPU_SHARED_ALIGNED(struct rt_rq, init_rt_rq_var);
#endif /* CONFIG_RT_GROUP_SCHED */
#else /* !CONFIG_USER_SCHED */
#define root_task_group init_task_group
#endif /* CONFIG_USER_SCHED */

/* task_group_lock serializes add/remove of task groups and also changes to
 * a task group's cpu shares.
@@ -318,11 +284,7 @@ static int root_task_group_empty(void)
}
#endif

#ifdef CONFIG_USER_SCHED
# define INIT_TASK_GROUP_LOAD	(2*NICE_0_LOAD)
#else /* !CONFIG_USER_SCHED */
# define INIT_TASK_GROUP_LOAD	NICE_0_LOAD
#endif /* CONFIG_USER_SCHED */

/*
 * A weight of 0 or 1 can cause arithmetics problems.
@@ -348,11 +310,7 @@ static inline struct task_group *task_group(struct task_struct *p)
{
	struct task_group *tg;

#ifdef CONFIG_USER_SCHED
	rcu_read_lock();
	tg = __task_cred(p)->user->tg;
	rcu_read_unlock();
#elif defined(CONFIG_CGROUP_SCHED)
#ifdef CONFIG_CGROUP_SCHED
	tg = container_of(task_subsys_state(p, cpu_cgroup_subsys_id),
				struct task_group, css);
#else
@@ -383,7 +341,7 @@ static inline struct task_group *task_group(struct task_struct *p)
	return NULL;
}

#endif	/* CONFIG_GROUP_SCHED */
#endif	/* CONFIG_CGROUP_SCHED */

/* CFS-related fields in a runqueue */
struct cfs_rq {
@@ -7678,9 +7636,6 @@ void __init sched_init(void)
#ifdef CONFIG_RT_GROUP_SCHED
	alloc_size += 2 * nr_cpu_ids * sizeof(void **);
#endif
#ifdef CONFIG_USER_SCHED
	alloc_size *= 2;
#endif
#ifdef CONFIG_CPUMASK_OFFSTACK
	alloc_size += num_possible_cpus() * cpumask_size();
#endif
@@ -7694,13 +7649,6 @@ void __init sched_init(void)
		init_task_group.cfs_rq = (struct cfs_rq **)ptr;
		ptr += nr_cpu_ids * sizeof(void **);

#ifdef CONFIG_USER_SCHED
		root_task_group.se = (struct sched_entity **)ptr;
		ptr += nr_cpu_ids * sizeof(void **);

		root_task_group.cfs_rq = (struct cfs_rq **)ptr;
		ptr += nr_cpu_ids * sizeof(void **);
#endif /* CONFIG_USER_SCHED */
#endif /* CONFIG_FAIR_GROUP_SCHED */
#ifdef CONFIG_RT_GROUP_SCHED
		init_task_group.rt_se = (struct sched_rt_entity **)ptr;
@@ -7709,13 +7657,6 @@ void __init sched_init(void)
		init_task_group.rt_rq = (struct rt_rq **)ptr;
		ptr += nr_cpu_ids * sizeof(void **);

#ifdef CONFIG_USER_SCHED
		root_task_group.rt_se = (struct sched_rt_entity **)ptr;
		ptr += nr_cpu_ids * sizeof(void **);

		root_task_group.rt_rq = (struct rt_rq **)ptr;
		ptr += nr_cpu_ids * sizeof(void **);
#endif /* CONFIG_USER_SCHED */
#endif /* CONFIG_RT_GROUP_SCHED */
#ifdef CONFIG_CPUMASK_OFFSTACK
		for_each_possible_cpu(i) {
@@ -7735,22 +7676,13 @@ void __init sched_init(void)
#ifdef CONFIG_RT_GROUP_SCHED
	init_rt_bandwidth(&init_task_group.rt_bandwidth,
			global_rt_period(), global_rt_runtime());
#ifdef CONFIG_USER_SCHED
	init_rt_bandwidth(&root_task_group.rt_bandwidth,
			global_rt_period(), RUNTIME_INF);
#endif /* CONFIG_USER_SCHED */
#endif /* CONFIG_RT_GROUP_SCHED */

#ifdef CONFIG_GROUP_SCHED
#ifdef CONFIG_CGROUP_SCHED
	list_add(&init_task_group.list, &task_groups);
	INIT_LIST_HEAD(&init_task_group.children);

#ifdef CONFIG_USER_SCHED
	INIT_LIST_HEAD(&root_task_group.children);
	init_task_group.parent = &root_task_group;
	list_add(&init_task_group.siblings, &root_task_group.children);
#endif /* CONFIG_USER_SCHED */
#endif /* CONFIG_GROUP_SCHED */
#endif /* CONFIG_CGROUP_SCHED */

#if defined CONFIG_FAIR_GROUP_SCHED && defined CONFIG_SMP
	update_shares_data = __alloc_percpu(nr_cpu_ids * sizeof(unsigned long),
@@ -7790,25 +7722,6 @@ void __init sched_init(void)
		 * directly in rq->cfs (i.e init_task_group->se[] = NULL).
		 */
		init_tg_cfs_entry(&init_task_group, &rq->cfs, NULL, i, 1, NULL);
#elif defined CONFIG_USER_SCHED
		root_task_group.shares = NICE_0_LOAD;
		init_tg_cfs_entry(&root_task_group, &rq->cfs, NULL, i, 0, NULL);
		/*
		 * In case of task-groups formed thr' the user id of tasks,
		 * init_task_group represents tasks belonging to root user.
		 * Hence it forms a sibling of all subsequent groups formed.
		 * In this case, init_task_group gets only a fraction of overall
		 * system cpu resource, based on the weight assigned to root
		 * user's cpu share (INIT_TASK_GROUP_LOAD). This is accomplished
		 * by letting tasks of init_task_group sit in a separate cfs_rq
		 * (init_tg_cfs_rq) and having one entity represent this group of
		 * tasks in rq->cfs (i.e init_task_group->se[] != NULL).
		 */
		init_tg_cfs_entry(&init_task_group,
				&per_cpu(init_tg_cfs_rq, i),
				&per_cpu(init_sched_entity, i), i, 1,
				root_task_group.se[i]);

#endif
#endif /* CONFIG_FAIR_GROUP_SCHED */

@@ -7817,12 +7730,6 @@ void __init sched_init(void)
		INIT_LIST_HEAD(&rq->leaf_rt_rq_list);
#ifdef CONFIG_CGROUP_SCHED
		init_tg_rt_entry(&init_task_group, &rq->rt, NULL, i, 1, NULL);
#elif defined CONFIG_USER_SCHED
		init_tg_rt_entry(&root_task_group, &rq->rt, NULL, i, 0, NULL);
		init_tg_rt_entry(&init_task_group,
				&per_cpu(init_rt_rq_var, i),
				&per_cpu(init_sched_rt_entity, i), i, 1,
				root_task_group.rt_se[i]);
#endif
#endif

@@ -8218,7 +8125,7 @@ static inline void unregister_rt_sched_group(struct task_group *tg, int cpu)
}
#endif /* CONFIG_RT_GROUP_SCHED */

#ifdef CONFIG_GROUP_SCHED
#ifdef CONFIG_CGROUP_SCHED
static void free_sched_group(struct task_group *tg)
{
	free_fair_sched_group(tg);
@@ -8327,7 +8234,7 @@ void sched_move_task(struct task_struct *tsk)

	task_rq_unlock(rq, &flags);
}
#endif /* CONFIG_GROUP_SCHED */
#endif /* CONFIG_CGROUP_SCHED */

#ifdef CONFIG_FAIR_GROUP_SCHED
static void __set_se_shares(struct sched_entity *se, unsigned long shares)
@@ -8469,13 +8376,6 @@ static int tg_schedulable(struct task_group *tg, void *data)
		runtime = d->rt_runtime;
	}

#ifdef CONFIG_USER_SCHED
	if (tg == &root_task_group) {
		period = global_rt_period();
		runtime = global_rt_runtime();
	}
#endif

	/*
	 * Cannot have more runtime than the period.
	 */
Loading