Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 86de6cbe authored by Olav Haugan's avatar Olav Haugan
Browse files

sched/core: Add protection against null-pointer dereference



p->grp is being accessed outside of lock which can cause null-pointer
dereference. Fix this and also add rcu critical section around access
of this data structure.

CRs-fixed: 985379
Change-Id: Ic82de6ae2821845d704f0ec18046cc6a24f98e39
Signed-off-by: default avatarOlav Haugan <ohaugan@codeaurora.org>
parent 9aa2c45c
Loading
Loading
Loading
Loading
+14 −9
Original line number Diff line number Diff line
@@ -3370,7 +3370,7 @@ static void remove_task_from_group(struct task_struct *p)

	rq = __task_rq_lock(p);
	list_del_init(&p->grp_list);
	p->grp = NULL;
	rcu_assign_pointer(p->grp, NULL);
	__task_rq_unlock(rq);

	if (!list_empty(&grp->tasks)) {
@@ -3400,7 +3400,7 @@ add_task_to_group(struct task_struct *p, struct related_thread_group *grp)
	 * reference of p->grp in various hot-paths
	 */
	rq = __task_rq_lock(p);
	p->grp = grp;
	rcu_assign_pointer(p->grp, grp);
	list_add(&p->grp_list, &grp->tasks);
	__task_rq_unlock(rq);

@@ -3469,12 +3469,13 @@ done:

unsigned int sched_get_group_id(struct task_struct *p)
{
	unsigned long flags;
	unsigned int group_id;
	struct related_thread_group *grp;

	raw_spin_lock_irqsave(&p->pi_lock, flags);
	group_id = p->grp ? p->grp->id : 0;
	raw_spin_unlock_irqrestore(&p->pi_lock, flags);
	rcu_read_lock();
	grp = task_related_thread_group(p);
	group_id = grp ? grp->id : 0;
	rcu_read_unlock();

	return group_id;
}
@@ -3664,7 +3665,7 @@ static inline int update_preferred_cluster(struct related_thread_group *grp,
	 * has passed since we last updated preference
	 */
	if (abs(new_load - old_load) > sched_ravg_window / 4 ||
		sched_ktime_clock() - p->grp->last_update > sched_ravg_window)
		sched_ktime_clock() - grp->last_update > sched_ravg_window)
		return 1;

	return 0;
@@ -4398,15 +4399,17 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)

	raw_spin_lock(&rq->lock);
	old_load = task_load(p);
	grp = task_related_thread_group(p);
	wallclock = sched_ktime_clock();
	update_task_ravg(rq->curr, rq, TASK_UPDATE, wallclock, 0);
	heavy_task = heavy_task_wakeup(p, rq, TASK_WAKE);
	update_task_ravg(p, rq, TASK_WAKE, wallclock, 0);
	raw_spin_unlock(&rq->lock);

	rcu_read_lock();
	grp = task_related_thread_group(p);
	if (update_preferred_cluster(grp, p, old_load))
		set_preferred_cluster(grp);
	rcu_read_unlock();

	p->sched_contributes_to_load = !!task_contributes_to_load(p);
	p->state = TASK_WAKING;
@@ -5340,7 +5343,6 @@ void scheduler_tick(void)

	raw_spin_lock(&rq->lock);
	old_load = task_load(curr);
	grp = task_related_thread_group(curr);
	set_window_start(rq);
	update_rq_clock(rq);
	curr->sched_class->task_tick(rq, curr, 0);
@@ -5362,8 +5364,11 @@ void scheduler_tick(void)
#endif
	rq_last_tick_reset(rq);

	rcu_read_lock();
	grp = task_related_thread_group(curr);
	if (update_preferred_cluster(grp, curr, old_load))
		set_preferred_cluster(grp);
	rcu_read_unlock();

	if (curr->sched_class == &fair_sched_class)
		check_for_migration(rq, curr);
+14 −6
Original line number Diff line number Diff line
@@ -2910,7 +2910,7 @@ preferred_cluster(struct sched_cluster *cluster, struct task_struct *p)

	rcu_read_lock();

	grp = p->grp;
	grp = task_related_thread_group(p);
	if (!grp || !sysctl_sched_enable_colocation)
		rc = 1;
	else
@@ -3283,7 +3283,7 @@ static int select_best_cpu(struct task_struct *p, int target, int reason,

	rcu_read_lock();

	grp = p->grp;
	grp = task_related_thread_group(p);

	if (grp && grp->preferred_cluster) {
		pref_cluster = grp->preferred_cluster;
@@ -3827,6 +3827,7 @@ static inline void reset_balance_interval(int cpu)
static inline int migration_needed(struct task_struct *p, int cpu)
{
	int nice;
	struct related_thread_group *grp;

	if (!sched_enable_hmp || p->state != TASK_RUNNING)
		return 0;
@@ -3839,12 +3840,19 @@ static inline int migration_needed(struct task_struct *p, int cpu)
		return IRQLOAD_MIGRATION;

	nice = task_nice(p);
	if (!p->grp && (nice > sched_upmigrate_min_nice ||
		 upmigrate_discouraged(p)) && cpu_capacity(cpu) > min_capacity)
	rcu_read_lock();
	grp = task_related_thread_group(p);
	if (!grp && (nice > sched_upmigrate_min_nice ||
	       upmigrate_discouraged(p)) && cpu_capacity(cpu) > min_capacity) {
		rcu_read_unlock();
		return DOWN_MIGRATION;
	}

	if (!p->grp && !task_will_fit(p, cpu))
	if (!grp && !task_will_fit(p, cpu)) {
		rcu_read_unlock();
		return UP_MIGRATION;
	}
	rcu_read_unlock();

	return 0;
}
@@ -4007,7 +4015,7 @@ void init_new_task_load(struct task_struct *p)
	p->init_load_pct = 0;
	memset(&p->ravg, 0, sizeof(struct ravg));
	p->se.avg.decay_count	= 0;
	p->grp = NULL;
	rcu_assign_pointer(p->grp, NULL);
	INIT_LIST_HEAD(&p->grp_list);

	if (init_load_pct) {
+1 −1
Original line number Diff line number Diff line
@@ -1157,7 +1157,7 @@ static inline int sched_cpu_high_irqload(int cpu)
static inline
struct related_thread_group *task_related_thread_group(struct task_struct *p)
{
	return p->grp;
	return rcu_dereference(p->grp);
}

#else	/* CONFIG_SCHED_HMP */