Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit a62214e8 authored by qctecmdr's avatar qctecmdr Committed by Gerrit - the friendly Code Review server
Browse files

Merge "sched/walt: Improve the scheduler"

parents f277fa98 3d049096
Loading
Loading
Loading
Loading
+3 −4
Original line number Diff line number Diff line
@@ -2944,15 +2944,14 @@ extern void clear_top_tasks_bitmap(unsigned long *bitmap);
#if defined(CONFIG_SCHED_TUNE)
extern bool task_sched_boost(struct task_struct *p);
extern int sync_cgroup_colocation(struct task_struct *p, bool insert);
extern bool same_schedtune(struct task_struct *tsk1, struct task_struct *tsk2);
extern bool schedtune_task_colocated(struct task_struct *p);
extern void update_cgroup_boost_settings(void);
extern void restore_cgroup_boost_settings(void);

#else
static inline bool
same_schedtune(struct task_struct *tsk1, struct task_struct *tsk2)
static inline bool schedtune_task_colocated(struct task_struct *p)
{
	return true;
	return false;
}

static inline bool task_sched_boost(struct task_struct *p)
+17 −5
Original line number Diff line number Diff line
@@ -208,11 +208,6 @@ static inline void init_sched_boost(struct schedtune *st)
	st->colocate_update_disabled = false;
}

bool same_schedtune(struct task_struct *tsk1, struct task_struct *tsk2)
{
	return task_schedtune(tsk1) == task_schedtune(tsk2);
}

void update_cgroup_boost_settings(void)
{
	int i;
@@ -546,6 +541,23 @@ static int sched_colocate_write(struct cgroup_subsys_state *css,
	return 0;
}

bool schedtune_task_colocated(struct task_struct *p)
{
	struct schedtune *st;
	bool colocated;

	if (unlikely(!schedtune_initialized))
		return false;

	/* Get task boost value */
	rcu_read_lock();
	st = task_schedtune(p);
	colocated = st->colocate;
	rcu_read_unlock();

	return colocated;
}

#else /* CONFIG_SCHED_WALT */

static inline void init_sched_boost(struct schedtune *st) { }
+11 −21
Original line number Diff line number Diff line
@@ -2648,7 +2648,6 @@ static void transfer_busy_time(struct rq *rq, struct related_thread_group *grp,
 * Enable colocation and frequency aggregation for all threads in a process.
 * The children inherits the group id from the parent.
 */
unsigned int __read_mostly sysctl_sched_enable_thread_grouping;
unsigned int __read_mostly sysctl_sched_coloc_downmigrate_ns;

struct related_thread_group *related_thread_groups[MAX_NUM_CGROUP_COLOC_ID];
@@ -2920,34 +2919,25 @@ void add_new_task_to_grp(struct task_struct *new)
{
	unsigned long flags;
	struct related_thread_group *grp;
	struct task_struct *leader = new->group_leader;
	unsigned int leader_grp_id = sched_get_group_id(leader);

	if (!sysctl_sched_enable_thread_grouping &&
	    leader_grp_id != DEFAULT_CGROUP_COLOC_ID)
		return;

	if (thread_group_leader(new))
	/*
	 * If the task does not belong to colocated schedtune
	 * cgroup, nothing to do. We are checking this without
	 * lock. Even if there is a race, it will be added
	 * to the co-located cgroup via cgroup attach.
	 */
	if (!schedtune_task_colocated(new))
		return;

	if (leader_grp_id == DEFAULT_CGROUP_COLOC_ID) {
		if (!same_schedtune(new, leader))
			return;
	}

	grp = lookup_related_thread_group(DEFAULT_CGROUP_COLOC_ID);
	write_lock_irqsave(&related_thread_group_lock, flags);

	rcu_read_lock();
	grp = task_related_thread_group(leader);
	rcu_read_unlock();

	/*
	 * It's possible that someone already added the new task to the
	 * group. A leader's thread group is updated prior to calling
	 * this function. It's also possible that the leader has exited
	 * the group. In either case, there is nothing else to do.
	 * group. or it might have taken out from the colocated schedtune
	 * cgroup. check these conditions under lock.
	 */
	if (!grp || new->grp) {
	if (!schedtune_task_colocated(new) || new->grp) {
		write_unlock_irqrestore(&related_thread_group_lock, flags);
		return;
	}