Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit eff766a6 authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Ingo Molnar
Browse files

sched: fix the task_group hierarchy for UID grouping



UID grouping doesn't actually have a task_group representing the root of
the task_group tree. Add one.

Signed-off-by: default avatarPeter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent ec7dc8ac
Loading
Loading
Loading
Loading
+3 −0
Original line number Original line Diff line number Diff line
@@ -2051,6 +2051,9 @@ extern void normalize_rt_tasks(void);
#ifdef CONFIG_GROUP_SCHED
#ifdef CONFIG_GROUP_SCHED


extern struct task_group init_task_group;
extern struct task_group init_task_group;
#ifdef CONFIG_USER_SCHED
extern struct task_group root_task_group;
#endif


extern struct task_group *sched_create_group(struct task_group *parent);
extern struct task_group *sched_create_group(struct task_group *parent);
extern void sched_destroy_group(struct task_group *tg);
extern void sched_destroy_group(struct task_group *tg);
+41 −2
Original line number Original line Diff line number Diff line
@@ -274,6 +274,14 @@ struct task_group {
};
};


#ifdef CONFIG_USER_SCHED
#ifdef CONFIG_USER_SCHED

/*
 * Root task group.
 * 	Every UID task group (including init_task_group aka UID-0) will
 * 	be a child to this group.
 */
struct task_group root_task_group;

#ifdef CONFIG_FAIR_GROUP_SCHED
#ifdef CONFIG_FAIR_GROUP_SCHED
/* Default task group's sched entity on each cpu */
/* Default task group's sched entity on each cpu */
static DEFINE_PER_CPU(struct sched_entity, init_sched_entity);
static DEFINE_PER_CPU(struct sched_entity, init_sched_entity);
@@ -285,6 +293,8 @@ static DEFINE_PER_CPU(struct cfs_rq, init_cfs_rq) ____cacheline_aligned_in_smp;
static DEFINE_PER_CPU(struct sched_rt_entity, init_sched_rt_entity);
static DEFINE_PER_CPU(struct sched_rt_entity, init_sched_rt_entity);
static DEFINE_PER_CPU(struct rt_rq, init_rt_rq) ____cacheline_aligned_in_smp;
static DEFINE_PER_CPU(struct rt_rq, init_rt_rq) ____cacheline_aligned_in_smp;
#endif
#endif
#else
#define root_task_group init_task_group
#endif
#endif


/* task_group_lock serializes add/remove of task groups and also changes to
/* task_group_lock serializes add/remove of task groups and also changes to
@@ -7507,6 +7517,9 @@ void __init sched_init(void)
#endif
#endif
#ifdef CONFIG_RT_GROUP_SCHED
#ifdef CONFIG_RT_GROUP_SCHED
	alloc_size += 2 * nr_cpu_ids * sizeof(void **);
	alloc_size += 2 * nr_cpu_ids * sizeof(void **);
#endif
#ifdef CONFIG_USER_SCHED
	alloc_size *= 2;
#endif
#endif
	/*
	/*
	 * As sched_init() is called before page_alloc is setup,
	 * As sched_init() is called before page_alloc is setup,
@@ -7521,12 +7534,29 @@ void __init sched_init(void)


		init_task_group.cfs_rq = (struct cfs_rq **)ptr;
		init_task_group.cfs_rq = (struct cfs_rq **)ptr;
		ptr += nr_cpu_ids * sizeof(void **);
		ptr += nr_cpu_ids * sizeof(void **);

#ifdef CONFIG_USER_SCHED
		root_task_group.se = (struct sched_entity **)ptr;
		ptr += nr_cpu_ids * sizeof(void **);

		root_task_group.cfs_rq = (struct cfs_rq **)ptr;
		ptr += nr_cpu_ids * sizeof(void **);
#endif
#endif
#endif
#ifdef CONFIG_RT_GROUP_SCHED
#ifdef CONFIG_RT_GROUP_SCHED
		init_task_group.rt_se = (struct sched_rt_entity **)ptr;
		init_task_group.rt_se = (struct sched_rt_entity **)ptr;
		ptr += nr_cpu_ids * sizeof(void **);
		ptr += nr_cpu_ids * sizeof(void **);


		init_task_group.rt_rq = (struct rt_rq **)ptr;
		init_task_group.rt_rq = (struct rt_rq **)ptr;
		ptr += nr_cpu_ids * sizeof(void **);

#ifdef CONFIG_USER_SCHED
		root_task_group.rt_se = (struct sched_rt_entity **)ptr;
		ptr += nr_cpu_ids * sizeof(void **);

		root_task_group.rt_rq = (struct rt_rq **)ptr;
		ptr += nr_cpu_ids * sizeof(void **);
#endif
#endif
#endif
	}
	}


@@ -7540,6 +7570,10 @@ void __init sched_init(void)
#ifdef CONFIG_RT_GROUP_SCHED
#ifdef CONFIG_RT_GROUP_SCHED
	init_rt_bandwidth(&init_task_group.rt_bandwidth,
	init_rt_bandwidth(&init_task_group.rt_bandwidth,
			global_rt_period(), global_rt_runtime());
			global_rt_period(), global_rt_runtime());
#ifdef CONFIG_USER_SCHED
	init_rt_bandwidth(&root_task_group.rt_bandwidth,
			global_rt_period(), RUNTIME_INF);
#endif
#endif
#endif


#ifdef CONFIG_GROUP_SCHED
#ifdef CONFIG_GROUP_SCHED
@@ -7582,6 +7616,8 @@ void __init sched_init(void)
		 */
		 */
		init_tg_cfs_entry(&init_task_group, &rq->cfs, NULL, i, 1, NULL);
		init_tg_cfs_entry(&init_task_group, &rq->cfs, NULL, i, 1, NULL);
#elif defined CONFIG_USER_SCHED
#elif defined CONFIG_USER_SCHED
		root_task_group.shares = NICE_0_LOAD;
		init_tg_cfs_entry(&root_task_group, &rq->cfs, NULL, i, 0, NULL);
		/*
		/*
		 * In case of task-groups formed thr' the user id of tasks,
		 * In case of task-groups formed thr' the user id of tasks,
		 * init_task_group represents tasks belonging to root user.
		 * init_task_group represents tasks belonging to root user.
@@ -7595,7 +7631,8 @@ void __init sched_init(void)
		 */
		 */
		init_tg_cfs_entry(&init_task_group,
		init_tg_cfs_entry(&init_task_group,
				&per_cpu(init_cfs_rq, i),
				&per_cpu(init_cfs_rq, i),
				&per_cpu(init_sched_entity, i), i, 1, NULL);
				&per_cpu(init_sched_entity, i), i, 1,
				root_task_group.se[i]);


#endif
#endif
#endif /* CONFIG_FAIR_GROUP_SCHED */
#endif /* CONFIG_FAIR_GROUP_SCHED */
@@ -7606,9 +7643,11 @@ void __init sched_init(void)
#ifdef CONFIG_CGROUP_SCHED
#ifdef CONFIG_CGROUP_SCHED
		init_tg_rt_entry(&init_task_group, &rq->rt, NULL, i, 1, NULL);
		init_tg_rt_entry(&init_task_group, &rq->rt, NULL, i, 1, NULL);
#elif defined CONFIG_USER_SCHED
#elif defined CONFIG_USER_SCHED
		init_tg_rt_entry(&root_task_group, &rq->rt, NULL, i, 0, NULL);
		init_tg_rt_entry(&init_task_group,
		init_tg_rt_entry(&init_task_group,
				&per_cpu(init_rt_rq, i),
				&per_cpu(init_rt_rq, i),
				&per_cpu(init_sched_rt_entity, i), i, 1, NULL);
				&per_cpu(init_sched_rt_entity, i), i, 1,
				root_task_group.rt_se[i]);
#endif
#endif
#endif
#endif


+1 −1
Original line number Original line Diff line number Diff line
@@ -101,7 +101,7 @@ static int sched_create_user(struct user_struct *up)
{
{
	int rc = 0;
	int rc = 0;


	up->tg = sched_create_group(NULL);
	up->tg = sched_create_group(&root_task_group);
	if (IS_ERR(up->tg))
	if (IS_ERR(up->tg))
		rc = -ENOMEM;
		rc = -ENOMEM;