Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 19430adb authored by Srivatsa Vaddagiri's avatar Srivatsa Vaddagiri Committed by Matt Wagantall
Browse files

sched: Add userspace interface to set PF_WAKE_UP_IDLE



sched_prefer_idle flag controls whether tasks can be woken to any
available idle cpu. It may be desirable to set sched_prefer_idle to 0
so that most tasks wake up to non-idle cpus under mostly_idle
threshold and have specialized tasks override this behavior through
other means. PF_WAKE_UP_IDLE flag per task provides exactly that. It
lets tasks with PF_WAKE_UP_IDLE flag set be woken up to any available
idle cpu independent of sched_prefer_idle flag setting. Currently
only kernel-space API exists to set PF_WAKE_UP_IDLE flag for a task.
This patch adds a user-space API (in /proc filesystem) to set
PF_WAKE_UP_IDLE flag for a given task. /proc/[pid]/sched_wake_up_idle
file can be written to set or clear PF_WAKE_UP_IDLE flag for a given
task.

Change-Id: I13a37e740195e503f457ebe291d54e83b230fbeb
Signed-off-by: default avatarSrivatsa Vaddagiri <vatsa@codeaurora.org>
[rameezmustafa@codeaurora.org: Port to msm-3.18]
Signed-off-by: default avatarSyed Rameez Mustafa <rameezmustafa@codeaurora.org>
parent c73191b7
Loading
Loading
Loading
Loading
+10 −1
Original line number Diff line number Diff line
@@ -825,7 +825,16 @@ power-efficient cpu found while scanning cluster's online cpus.
- PF_WAKE_UP_IDLE
	Any task that has this flag set in its 'task_struct.flags' field will be
always woken to idle cpu. Further any task woken by such tasks will be also
placed on idle cpu.
placed on idle cpu. PF_WAKE_UP_IDLE flag is inherited by children of a task.
It can be modified for a task in two ways:

	> kernel-space interface
		set_wake_up_idle() needs to be called in the context of a task
		to set or clear its PF_WAKE_UP_IDLE flag.

	> user-space interface
		/proc/[pid]/sched_wake_up_idle file needs to be written to for
		setting or clearing PF_WAKE_UP_IDLE flag for a given task

For some low band of frequency, spread of task on all available cpus can be
groslly power-inefficient. As an example, consider two tasks that each need
+72 −2
Original line number Diff line number Diff line
@@ -1215,11 +1215,78 @@ static const struct file_operations proc_pid_sched_operations = {

#endif

#ifdef CONFIG_SCHED_HMP

/*
 * Print out various scheduling related per-task fields:
 */

#ifdef CONFIG_SMP

static int sched_wake_up_idle_show(struct seq_file *m, void *v)
{
	struct inode *inode = m->private;
	struct task_struct *p;

	p = get_proc_task(inode);
	if (!p)
		return -ESRCH;

	seq_printf(m, "%d\n", sched_get_wake_up_idle(p));

	put_task_struct(p);

	return 0;
}

static ssize_t
sched_wake_up_idle_write(struct file *file, const char __user *buf,
	    size_t count, loff_t *offset)
{
	struct inode *inode = file_inode(file);
	struct task_struct *p;
	char buffer[PROC_NUMBUF];
	int wake_up_idle, err;

	memset(buffer, 0, sizeof(buffer));
	if (count > sizeof(buffer) - 1)
		count = sizeof(buffer) - 1;
	if (copy_from_user(buffer, buf, count)) {
		err = -EFAULT;
		goto out;
	}

	err = kstrtoint(strstrip(buffer), 0, &wake_up_idle);
	if (err)
		goto out;

	p = get_proc_task(inode);
	if (!p)
		return -ESRCH;

	err = sched_set_wake_up_idle(p, wake_up_idle);

	put_task_struct(p);

out:
	return err < 0 ? err : count;
}

static int sched_wake_up_idle_open(struct inode *inode, struct file *filp)
{
	return single_open(filp, sched_wake_up_idle_show, inode);
}

static const struct file_operations proc_pid_sched_wake_up_idle_operations = {
	.open		= sched_wake_up_idle_open,
	.read		= seq_read,
	.write		= sched_wake_up_idle_write,
	.llseek		= seq_lseek,
	.release	= single_release,
};

#endif	/* CONFIG_SMP */

#ifdef CONFIG_SCHED_HMP

static int sched_init_task_load_show(struct seq_file *m, void *v)
{
	struct inode *inode = m->private;
@@ -2568,6 +2635,9 @@ static const struct pid_entry tgid_base_stuff[] = {
	ONE("status",     S_IRUGO, proc_pid_status),
	ONE("personality", S_IRUSR, proc_pid_personality),
	ONE("limits",	  S_IRUGO, proc_pid_limits),
#ifdef CONFIG_SMP
	REG("sched_wake_up_idle",      S_IRUGO|S_IWUSR, proc_pid_sched_wake_up_idle_operations),
#endif
#ifdef CONFIG_SCHED_HMP
	REG("sched_init_task_load",      S_IRUGO|S_IWUSR, proc_pid_sched_init_task_load_operations),
#endif
+3 −0
Original line number Diff line number Diff line
@@ -2157,6 +2157,9 @@ sched_set_cpu_cstate(int cpu, int cstate, int wakeup_energy, int wakeup_latency)
}
#endif

extern int sched_set_wake_up_idle(struct task_struct *p, int wake_up_idle);
extern u32 sched_get_wake_up_idle(struct task_struct *p);

#ifdef CONFIG_SCHED_HMP

extern int sched_set_boost(int enable);
+20 −1
Original line number Diff line number Diff line
@@ -83,7 +83,7 @@ unsigned int sysctl_sched_child_runs_first __read_mostly;
/*
 * Controls whether, when SD_SHARE_PKG_RESOURCES is on, if all
 * tasks go to idle CPUs when woken. If this is off, note that the
 * per-task flag PF_WAKE_ON_IDLE can still cause a task to go to an
 * per-task flag PF_WAKE_UP_IDLE can still cause a task to go to an
 * idle CPU upon being woken.
 */
unsigned int __read_mostly sysctl_sched_wake_to_idle;
@@ -2209,6 +2209,25 @@ static inline void update_cfs_shares(struct cfs_rq *cfs_rq)
#endif /* CONFIG_FAIR_GROUP_SCHED */

#ifdef CONFIG_SMP
u32 sched_get_wake_up_idle(struct task_struct *p)
{
	u32 enabled = p->flags & PF_WAKE_UP_IDLE;

	return !!enabled;
}

int sched_set_wake_up_idle(struct task_struct *p, int wake_up_idle)
{
	int enable = !!wake_up_idle;

	if (enable)
		p->flags |= PF_WAKE_UP_IDLE;
	else
		p->flags &= ~PF_WAKE_UP_IDLE;

	return 0;
}

/*
 * We choose a half-life close to 1 scheduling period.
 * Note: The tables below are dependent on this value.