Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 7ed681a9 authored by blong's avatar blong Committed by Gerrit - the friendly Code Review server
Browse files

sched: Add task boost feature



This change is for general scheduler improvement.

Change-Id: I3fc3976316350f9b2c392b8484d5390240a04782
Signed-off-by: default avatarAbhijeet Dharmapurikar <adharmap@codeaurora.org>
Signed-off-by: default avatarblong <blong@codeaurora.org>
parent f84134c2
Loading
Loading
Loading
Loading
+112 −0
Original line number Diff line number Diff line
@@ -2953,6 +2953,116 @@ static int proc_tgid_io_accounting(struct seq_file *m, struct pid_namespace *ns,
}
#endif /* CONFIG_TASK_IO_ACCOUNTING */

static ssize_t proc_sched_task_boost_read(struct file *file,
			   char __user *buf, size_t count, loff_t *ppos)
{
	struct task_struct *task = get_proc_task(file_inode(file));
	char buffer[PROC_NUMBUF];
	int sched_boost;
	size_t len;

	if (!task)
		return -ESRCH;
	sched_boost = task->boost;
	put_task_struct(task);
	len = snprintf(buffer, sizeof(buffer), "%d\n", sched_boost);
	return simple_read_from_buffer(buf, count, ppos, buffer, len);
}

static ssize_t proc_sched_task_boost_write(struct file *file,
		   const char __user *buf, size_t count, loff_t *ppos)
{
	struct task_struct *task = get_proc_task(file_inode(file));
	char buffer[PROC_NUMBUF];
	int sched_boost;
	int err;

	if (!task)
		return -ESRCH;
	memset(buffer, 0, sizeof(buffer));
	if (count > sizeof(buffer) - 1)
		count = sizeof(buffer) - 1;
	if (copy_from_user(buffer, buf, count)) {
		err = -EFAULT;
		goto out;
	}

	err = kstrtoint(strstrip(buffer), 0, &sched_boost);
	if (err)
		goto out;
	if (sched_boost < 0 || sched_boost > 2) {
		err = -EINVAL;
		goto out;
	}

	task->boost = sched_boost;
	if (sched_boost == 0)
		task->boost_period = 0;
out:
	put_task_struct(task);
	return err < 0 ? err : count;
}

static ssize_t proc_sched_task_boost_period_read(struct file *file,
			   char __user *buf, size_t count, loff_t *ppos)
{
	struct task_struct *task = get_proc_task(file_inode(file));
	char buffer[PROC_NUMBUF];
	u64 sched_boost_period_ms = 0;
	size_t len;

	if (!task)
		return -ESRCH;
	sched_boost_period_ms = div64_ul(task->boost_period, 1000000UL);
	put_task_struct(task);
	len = snprintf(buffer, sizeof(buffer), "%llu\n", sched_boost_period_ms);
	return simple_read_from_buffer(buf, count, ppos, buffer, len);
}

static ssize_t proc_sched_task_boost_period_write(struct file *file,
		   const char __user *buf, size_t count, loff_t *ppos)
{
	struct task_struct *task = get_proc_task(file_inode(file));
	char buffer[PROC_NUMBUF];
	unsigned int sched_boost_period;
	int err;

	memset(buffer, 0, sizeof(buffer));
	if (count > sizeof(buffer) - 1)
		count = sizeof(buffer) - 1;
	if (copy_from_user(buffer, buf, count)) {
		err = -EFAULT;
		goto out;
	}

	err = kstrtouint(strstrip(buffer), 0, &sched_boost_period);
	if (err)
		goto out;
	if (task->boost == 0 && sched_boost_period) {
		/* setting boost period without boost is invalid */
		err = -EINVAL;
		goto out;
	}

	task->boost_period = (u64)sched_boost_period * 1000 * 1000;
	task->boost_expires = sched_clock() + task->boost_period;
out:
	put_task_struct(task);
	return err < 0 ? err : count;
}

static const struct file_operations proc_task_boost_enabled_operations = {
	.read       = proc_sched_task_boost_read,
	.write      = proc_sched_task_boost_write,
	.llseek     = generic_file_llseek,
};

static const struct file_operations proc_task_boost_period_operations = {
	.read		= proc_sched_task_boost_period_read,
	.write		= proc_sched_task_boost_period_write,
	.llseek		= generic_file_llseek,
};

#ifdef CONFIG_USER_NS
static int proc_id_map_open(struct inode *inode, struct file *file,
	const struct seq_operations *seq_ops)
@@ -3131,6 +3241,8 @@ static const struct pid_entry tgid_base_stuff[] = {
#ifdef CONFIG_SCHED_WALT
	REG("sched_init_task_load", 00644, proc_pid_sched_init_task_load_operations),
	REG("sched_group_id", 00666, proc_pid_sched_group_id_operations),
	REG("sched_boost", 0666,  proc_task_boost_enabled_operations),
	REG("sched_boost_period_ms", 0666, proc_task_boost_period_operations),
#endif
#ifdef CONFIG_SCHED_DEBUG
	REG("sched",      S_IRUGO|S_IWUSR, proc_pid_sched_operations),
+6 −1
Original line number Diff line number Diff line
@@ -273,6 +273,7 @@ extern int __must_check io_schedule_prepare(void);
extern void io_schedule_finish(int token);
extern long io_schedule_timeout(long timeout);
extern void io_schedule(void);
extern int set_task_boost(int boost, u64 period);

/**
 * struct prev_cputime - snapshot of system and user cputime
@@ -783,6 +784,10 @@ struct task_struct {
	struct sched_entity		se;
	struct sched_rt_entity		rt;
	u64				 last_sleep_ts;

	int				boost;
	u64				boost_period;
	u64				boost_expires;
#ifdef CONFIG_SCHED_WALT
	struct ravg ravg;
	/*
+23 −0
Original line number Diff line number Diff line
@@ -2258,6 +2258,9 @@ static void __sched_fork(unsigned long clone_flags, struct task_struct *p)
	p->se.nr_migrations		= 0;
	p->se.vruntime			= 0;
	p->last_sleep_ts		= 0;
	p->boost			= 0;
	p->boost_expires		= 0;
	p->boost_period			= 0;
	INIT_LIST_HEAD(&p->se.group_node);

#ifdef CONFIG_FAIR_GROUP_SCHED
@@ -7790,6 +7793,26 @@ const u32 sched_prio_to_wmult[40] = {

#undef CREATE_TRACE_POINTS

/*
 *@boost:should be 0,1,2.
 *@period:boost time based on ms units.
 */
int set_task_boost(int boost, u64 period)
{
	if (boost < 0 || boost > 2)
		return -EINVAL;
	if (boost) {
		current->boost = boost;
		current->boost_period = (u64)period * 1000 * 1000;
		current->boost_expires = sched_clock() + current->boost_period;
	} else {
		current->boost = 0;
		current->boost_expires = 0;
		current->boost_period = 0;
	}
	return 0;
}

#ifdef CONFIG_SCHED_WALT
/*
 * sched_exit() - Set EXITING_TASK_MARKER in task's ravg.demand field
+23 −5
Original line number Diff line number Diff line
@@ -3119,6 +3119,18 @@ static inline void cfs_rq_util_change(struct cfs_rq *cfs_rq, int flags)
	}
}

static inline int per_task_boost(struct task_struct *p)
{
	if (p->boost_period) {
		if (sched_clock() > p->boost_expires) {
			p->boost_period = 0;
			p->boost_expires = 0;
			p->boost = 0;
		}
	}
	return p->boost;
}

#ifdef CONFIG_SMP
#ifdef CONFIG_FAIR_GROUP_SCHED
/**
@@ -3839,14 +3851,20 @@ static inline bool task_fits_max(struct task_struct *p, int cpu)
{
	unsigned long capacity = capacity_orig_of(cpu);
	unsigned long max_capacity = cpu_rq(cpu)->rd->max_cpu_capacity.val;
	unsigned long task_boost = per_task_boost(p);

	if (capacity == max_capacity)
		return true;

	if ((task_boost_policy(p) == SCHED_BOOST_ON_BIG ||
			schedtune_task_boost(p) > 0) &&
			is_min_capacity_cpu(cpu))
	if (is_min_capacity_cpu(cpu)) {
		if (task_boost_policy(p) == SCHED_BOOST_ON_BIG ||
			task_boost > 0 ||
			schedtune_task_boost(p) > 0)
			return false;
	} else { /* mid cap cpu */
		if (task_boost > 1)
			return false;
	}

	return task_fits_capacity(p, capacity, cpu);
}
@@ -6761,7 +6779,7 @@ static void find_best_target(struct sched_domain *sd, cpumask_t *cpus,
	unsigned long best_active_cuml_util = ULONG_MAX;
	unsigned long best_idle_cuml_util = ULONG_MAX;
	bool prefer_idle = schedtune_prefer_idle(p);
	bool boosted = schedtune_task_boost(p) > 0;
	bool boosted = schedtune_task_boost(p) > 0 || per_task_boost(p) > 0;
	/* Initialise with deepest possible cstate (INT_MAX) */
	int shallowest_idle_cstate = INT_MAX;
	struct sched_domain *start_sd;