Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit b7af31ed authored by Linux Build Service Account's avatar Linux Build Service Account Committed by Gerrit - the friendly Code Review server
Browse files

Merge "Merge remote-tracking branch '318/dev/msm-3.18-sched' into msm318"

parents 6b7ceb20 90efbf1a
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -2760,7 +2760,7 @@ static const struct pid_entry tgid_base_stuff[] = {
#ifdef CONFIG_SCHED_HMP
	REG("sched_init_task_load",      S_IRUGO|S_IWUSR, proc_pid_sched_init_task_load_operations),
#ifndef CONFIG_SCHED_QHMP
	REG("sched_group_id",      S_IRUGO|S_IWUSR, proc_pid_sched_group_id_operations),
	REG("sched_group_id",      S_IRUGO|S_IWUGO, proc_pid_sched_group_id_operations),
#endif
#endif
#ifdef CONFIG_SCHED_DEBUG
+1 −0
Original line number Diff line number Diff line
@@ -77,6 +77,7 @@ extern unsigned int sysctl_sched_small_task_pct;
extern unsigned int sysctl_sched_select_prev_cpu_us;
extern unsigned int sysctl_sched_enable_colocation;
extern unsigned int sysctl_sched_restrict_cluster_spill;
extern unsigned int sysctl_sched_enable_thread_grouping;
#if defined(CONFIG_SCHED_FREQ_INPUT)
extern unsigned int sysctl_sched_new_task_windows;
extern unsigned int sysctl_sched_pred_alert_freq;
+43 −0
Original line number Diff line number Diff line
@@ -1672,6 +1672,12 @@ __read_mostly unsigned int sysctl_sched_cpu_high_irqload = (10 * NSEC_PER_MSEC);

unsigned int __read_mostly sysctl_sched_enable_colocation = 1;

/*
 * Enable colocation for all threads in a process. The children
 * inherits the group id from the parent.
 */
unsigned int __read_mostly sysctl_sched_enable_thread_grouping = 0;

#ifdef CONFIG_SCHED_FREQ_INPUT

__read_mostly unsigned int sysctl_sched_new_task_windows = 5;
@@ -3420,6 +3426,42 @@ add_task_to_group(struct task_struct *p, struct related_thread_group *grp)
	return 0;
}

static void add_new_task_to_grp(struct task_struct *new)
{
	unsigned long flags;
	struct related_thread_group *grp;
	struct task_struct *parent;

	if (!sysctl_sched_enable_thread_grouping)
		return;

	if (thread_group_leader(new))
		return;

	parent = new->group_leader;

	/*
	 * The parent's pi_lock is required here to protect race
	 * against the parent task being removed from the
	 * group.
	 */
	raw_spin_lock_irqsave(&parent->pi_lock, flags);

	/* protected by pi_lock. */
	grp = task_related_thread_group(parent);
	if (!grp) {
		raw_spin_unlock_irqrestore(&parent->pi_lock, flags);
		return;
	}
	raw_spin_lock(&grp->lock);
	raw_spin_unlock_irqrestore(&parent->pi_lock, flags);

	rcu_assign_pointer(new->grp, grp);
	list_add(&new->grp_list, &grp->tasks);

	raw_spin_unlock(&grp->lock);
}

int sched_set_group_id(struct task_struct *p, unsigned int group_id)
{
	int rc = 0, destroy = 0;
@@ -4894,6 +4936,7 @@ void wake_up_new_task(struct task_struct *p)

	raw_spin_lock_irqsave(&p->pi_lock, flags);
	init_new_task_load(p);
	add_new_task_to_grp(p);
#ifdef CONFIG_SMP
	/*
	 * Fork balancing, do it here and not earlier because:
+3 −0
Original line number Diff line number Diff line
@@ -1217,6 +1217,9 @@ static inline int update_preferred_cluster(struct related_thread_group *grp,
	return 0;
}

static inline void
add_new_task_to_grp(struct task_struct *new) {}

#endif	/* CONFIG_SCHED_HMP */

/*
+7 −0
Original line number Diff line number Diff line
@@ -474,6 +474,13 @@ static struct ctl_table kern_table[] = {
		.mode		= 0644,
		.proc_handler   = sched_hmp_proc_update_handler,
	},
	{
		.procname       = "sched_enable_thread_grouping",
		.data           = &sysctl_sched_enable_thread_grouping,
		.maxlen         = sizeof(unsigned int),
		.mode           = 0644,
		.proc_handler   = proc_dointvec,
	},
#ifdef CONFIG_SCHED_FREQ_INPUT
	{
		.procname       = "sched_new_task_windows",