Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 7437cd7c authored by Joonwoo Park's avatar Joonwoo Park
Browse files

sched: pre-allocate colocation groups



At present, sched_set_group_id() dynamically allocates structure for
colocation group to assign the given task to the group.  However
this can cause deadlock as memory allocator can wakeup a task which
also tries to acquire related_thread_group_lock.

Avoid such deadlock by pre-allocating colocation structures.  This
limits maximum colocation groups to static number but it's fine as it's
never expected to be a lot.

Change-Id: Ifc32ab4ead63c382ae390358ed86f7cc5b6eb2dc
Signed-off-by: default avatarJoonwoo Park <joonwoop@codeaurora.org>
parent 9aa1df0c
Loading
Loading
Loading
Loading
+2 −0
Original line number Original line Diff line number Diff line
@@ -2401,6 +2401,8 @@ struct cpu_cycle_counter_cb {
	u64 (*get_cpu_cycle_counter)(int cpu);
	u64 (*get_cpu_cycle_counter)(int cpu);
};
};


#define MAX_NUM_CGROUP_COLOC_ID	20

#ifdef CONFIG_SCHED_HMP
#ifdef CONFIG_SCHED_HMP
extern void free_task_load_ptrs(struct task_struct *p);
extern void free_task_load_ptrs(struct task_struct *p);
extern int sched_set_window(u64 window_start, unsigned int window_size);
extern int sched_set_window(u64 window_start, unsigned int window_size);
+3 −0
Original line number Original line Diff line number Diff line
@@ -8071,6 +8071,9 @@ void __init sched_init(void)
		atomic_set(&rq->nr_iowait, 0);
		atomic_set(&rq->nr_iowait, 0);
	}
	}


	i = alloc_related_thread_groups();
	BUG_ON(i);

	set_hmp_defaults();
	set_hmp_defaults();


	set_load_weight(&init_task);
	set_load_weight(&init_task);
+100 −95
Original line number Original line Diff line number Diff line
@@ -784,11 +784,12 @@ __read_mostly unsigned int sched_major_task_runtime = 10000000;


static unsigned int sync_cpu;
static unsigned int sync_cpu;


static LIST_HEAD(related_thread_groups);
struct related_thread_group *related_thread_groups[MAX_NUM_CGROUP_COLOC_ID];
static LIST_HEAD(active_related_thread_groups);
static DEFINE_RWLOCK(related_thread_group_lock);
static DEFINE_RWLOCK(related_thread_group_lock);


#define for_each_related_thread_group(grp) \
#define for_each_related_thread_group(grp) \
	list_for_each_entry(grp, &related_thread_groups, list)
	list_for_each_entry(grp, &active_related_thread_groups, list)


/*
/*
 * Task load is categorized into buckets for the purpose of top task tracking.
 * Task load is categorized into buckets for the purpose of top task tracking.
@@ -3052,7 +3053,7 @@ void reset_all_window_stats(u64 window_start, unsigned int window_size)


	read_unlock(&tasklist_lock);
	read_unlock(&tasklist_lock);


	list_for_each_entry(grp, &related_thread_groups, list) {
	list_for_each_entry(grp, &active_related_thread_groups, list) {
		int j;
		int j;


		for_each_possible_cpu(j) {
		for_each_possible_cpu(j) {
@@ -3965,47 +3966,54 @@ _group_cpu_time(struct related_thread_group *grp, int cpu)
	return grp ? per_cpu_ptr(grp->cpu_time, cpu) : NULL;
	return grp ? per_cpu_ptr(grp->cpu_time, cpu) : NULL;
}
}


struct related_thread_group *alloc_related_thread_group(int group_id)
static inline struct related_thread_group*
lookup_related_thread_group(unsigned int group_id)
{
{
	return related_thread_groups[group_id];
}

int alloc_related_thread_groups(void)
{
	int i, ret;
	struct related_thread_group *grp;
	struct related_thread_group *grp;


	grp = kzalloc(sizeof(*grp), GFP_ATOMIC);
	/* groupd_id = 0 is invalid as it's special id to remove group. */
	if (!grp)
	for (i = 1; i < MAX_NUM_CGROUP_COLOC_ID; i++) {
		return ERR_PTR(-ENOMEM);
		grp = kzalloc(sizeof(*grp), GFP_NOWAIT);
		if (!grp) {
			ret = -ENOMEM;
			goto err;
		}


		if (alloc_group_cputime(grp)) {
		if (alloc_group_cputime(grp)) {
			kfree(grp);
			kfree(grp);
		return ERR_PTR(-ENOMEM);
			ret = -ENOMEM;
			goto err;
		}
		}


	grp->id = group_id;
		grp->id = i;
		INIT_LIST_HEAD(&grp->tasks);
		INIT_LIST_HEAD(&grp->tasks);
		INIT_LIST_HEAD(&grp->list);
		INIT_LIST_HEAD(&grp->list);
		raw_spin_lock_init(&grp->lock);
		raw_spin_lock_init(&grp->lock);


	return grp;
		related_thread_groups[i] = grp;
	}
	}


struct related_thread_group *lookup_related_thread_group(unsigned int group_id)
	return 0;
{
	struct related_thread_group *grp;


	list_for_each_entry(grp, &related_thread_groups, list) {
err:
		if (grp->id == group_id)
	for (i = 1; i < MAX_NUM_CGROUP_COLOC_ID; i++) {
			return grp;
		grp = lookup_related_thread_group(i);
		if (grp) {
			free_group_cputime(grp);
			kfree(grp);
			related_thread_groups[i] = NULL;
		} else {
			break;
		}
		}

	return NULL;
	}
	}


/* See comments before preferred_cluster() */
	return ret;
static void free_related_thread_group(struct rcu_head *rcu)
{
	struct related_thread_group *grp = container_of(rcu, struct
			related_thread_group, rcu);

	free_group_cputime(grp);
	kfree(grp);
}
}


static void remove_task_from_group(struct task_struct *p)
static void remove_task_from_group(struct task_struct *p)
@@ -4030,10 +4038,12 @@ static void remove_task_from_group(struct task_struct *p)
	raw_spin_unlock(&grp->lock);
	raw_spin_unlock(&grp->lock);


	/* Reserved groups cannot be destroyed */
	/* Reserved groups cannot be destroyed */
	if (empty_group && grp->id != DEFAULT_CGROUP_COLOC_ID) {
	if (empty_group && grp->id != DEFAULT_CGROUP_COLOC_ID)
		list_del(&grp->list);
		 /*
		call_rcu(&grp->rcu, free_related_thread_group);
		  * We test whether grp->list is attached with list_empty()
	}
		  * hence re-init the list after deletion.
		  */
		list_del_init(&grp->list);
}
}


static int
static int
@@ -4105,53 +4115,15 @@ void add_new_task_to_grp(struct task_struct *new)
	write_unlock_irqrestore(&related_thread_group_lock, flags);
	write_unlock_irqrestore(&related_thread_group_lock, flags);
}
}


#if defined(CONFIG_SCHED_TUNE) && defined(CONFIG_CGROUP_SCHEDTUNE)
static int __sched_set_group_id(struct task_struct *p, unsigned int group_id)
/*
 * We create a default colocation group at boot. There is no need to
 * synchronize tasks between cgroups at creation time because the
 * correct cgroup hierarchy is not available at boot. Therefore cgroup
 * colocation is turned off by default even though the colocation group
 * itself has been allocated. Furthermore this colocation group cannot
 * be destroyted once it has been created. All of this has been as part
 * of runtime optimizations.
 *
 * The job of synchronizing tasks to the colocation group is done when
 * the colocation flag in the cgroup is turned on.
 */
static int __init create_default_coloc_group(void)
{
	struct related_thread_group *grp = NULL;
	unsigned long flags;

	grp = alloc_related_thread_group(DEFAULT_CGROUP_COLOC_ID);
	if (IS_ERR(grp)) {
		WARN_ON(1);
		return -ENOMEM;
	}

	write_lock_irqsave(&related_thread_group_lock, flags);
	list_add(&grp->list, &related_thread_groups);
	write_unlock_irqrestore(&related_thread_group_lock, flags);

	update_freq_aggregate_threshold(MAX_FREQ_AGGR_THRESH);
	return 0;
}
late_initcall(create_default_coloc_group);

int sync_cgroup_colocation(struct task_struct *p, bool insert)
{
	unsigned int grp_id = insert ? DEFAULT_CGROUP_COLOC_ID : 0;

	return sched_set_group_id(p, grp_id);
}
#endif

int sched_set_group_id(struct task_struct *p, unsigned int group_id)
{
{
	int rc = 0;
	int rc = 0;
	unsigned long flags;
	unsigned long flags;
	struct related_thread_group *grp = NULL;
	struct related_thread_group *grp = NULL;


	if (group_id >= MAX_NUM_CGROUP_COLOC_ID)
		return -EINVAL;

	raw_spin_lock_irqsave(&p->pi_lock, flags);
	raw_spin_lock_irqsave(&p->pi_lock, flags);
	write_lock(&related_thread_group_lock);
	write_lock(&related_thread_group_lock);


@@ -4167,29 +4139,26 @@ int sched_set_group_id(struct task_struct *p, unsigned int group_id)
	}
	}


	grp = lookup_related_thread_group(group_id);
	grp = lookup_related_thread_group(group_id);
	if (!grp) {
	if (list_empty(&grp->list))
		/* This is a reserved id */
		list_add(&grp->list, &active_related_thread_groups);
		if (group_id == DEFAULT_CGROUP_COLOC_ID) {
			rc = -EINVAL;
			goto done;
		}

		grp = alloc_related_thread_group(group_id);
		if (IS_ERR(grp)) {
			rc = -ENOMEM;
			goto done;
		}

		list_add(&grp->list, &related_thread_groups);
	}


	rc = add_task_to_group(p, grp);
	rc = add_task_to_group(p, grp);
done:
done:
	write_unlock(&related_thread_group_lock);
	write_unlock(&related_thread_group_lock);
	raw_spin_unlock_irqrestore(&p->pi_lock, flags);
	raw_spin_unlock_irqrestore(&p->pi_lock, flags);

	return rc;
	return rc;
}
}


int sched_set_group_id(struct task_struct *p, unsigned int group_id)
{
	/* DEFAULT_CGROUP_COLOC_ID is a reserved id */
	if (group_id == DEFAULT_CGROUP_COLOC_ID)
		return -EINVAL;

	return __sched_set_group_id(p, group_id);
}

unsigned int sched_get_group_id(struct task_struct *p)
unsigned int sched_get_group_id(struct task_struct *p)
{
{
	unsigned int group_id;
	unsigned int group_id;
@@ -4203,6 +4172,42 @@ unsigned int sched_get_group_id(struct task_struct *p)
	return group_id;
	return group_id;
}
}


#if defined(CONFIG_SCHED_TUNE) && defined(CONFIG_CGROUP_SCHEDTUNE)
/*
 * We create a default colocation group at boot. There is no need to
 * synchronize tasks between cgroups at creation time because the
 * correct cgroup hierarchy is not available at boot. Therefore cgroup
 * colocation is turned off by default even though the colocation group
 * itself has been allocated. Furthermore this colocation group cannot
 * be destroyted once it has been created. All of this has been as part
 * of runtime optimizations.
 *
 * The job of synchronizing tasks to the colocation group is done when
 * the colocation flag in the cgroup is turned on.
 */
static int __init create_default_coloc_group(void)
{
	struct related_thread_group *grp = NULL;
	unsigned long flags;

	grp = lookup_related_thread_group(DEFAULT_CGROUP_COLOC_ID);
	write_lock_irqsave(&related_thread_group_lock, flags);
	list_add(&grp->list, &active_related_thread_groups);
	write_unlock_irqrestore(&related_thread_group_lock, flags);

	update_freq_aggregate_threshold(MAX_FREQ_AGGR_THRESH);
	return 0;
}
late_initcall(create_default_coloc_group);

int sync_cgroup_colocation(struct task_struct *p, bool insert)
{
	unsigned int grp_id = insert ? DEFAULT_CGROUP_COLOC_ID : 0;

	return __sched_set_group_id(p, grp_id);
}
#endif

static void update_cpu_cluster_capacity(const cpumask_t *cpus)
static void update_cpu_cluster_capacity(const cpumask_t *cpus)
{
{
	int i;
	int i;
+3 −0
Original line number Original line Diff line number Diff line
@@ -1448,6 +1448,8 @@ static inline void update_cgroup_boost_settings(void) { }
static inline void restore_cgroup_boost_settings(void) { }
static inline void restore_cgroup_boost_settings(void) { }
#endif
#endif


extern int alloc_related_thread_groups(void);

#else	/* CONFIG_SCHED_HMP */
#else	/* CONFIG_SCHED_HMP */


struct hmp_sched_stats;
struct hmp_sched_stats;
@@ -1638,6 +1640,7 @@ static inline void set_hmp_defaults(void) { }


static inline void clear_reserved(int cpu) { }
static inline void clear_reserved(int cpu) { }
static inline void sched_boost_parse_dt(void) {}
static inline void sched_boost_parse_dt(void) {}
static inline int alloc_related_thread_groups(void) { return 0; }


#define trace_sched_cpu_load(...)
#define trace_sched_cpu_load(...)
#define trace_sched_cpu_load_lb(...)
#define trace_sched_cpu_load_lb(...)