Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 1d462075 authored by qctecmdr's avatar qctecmdr Committed by Gerrit - the friendly Code Review server
Browse files

Merge "sched: Improve the scheduler"

parents 06375914 b2cc9bc5
Loading
Loading
Loading
Loading
+7 −7
Original line number Diff line number Diff line
@@ -2913,7 +2913,7 @@ static ssize_t proc_sched_task_boost_read(struct file *file,

	if (!task)
		return -ESRCH;
	sched_boost = task->boost;
	sched_boost = task->wts.boost;
	put_task_struct(task);
	len = scnprintf(buffer, sizeof(buffer), "%d\n", sched_boost);
	return simple_read_from_buffer(buf, count, ppos, buffer, len);
@@ -2945,9 +2945,9 @@ static ssize_t proc_sched_task_boost_write(struct file *file,
		goto out;
	}

	task->boost = sched_boost;
	task->wts.boost = sched_boost;
	if (sched_boost == 0)
		task->boost_period = 0;
		task->wts.boost_period = 0;
out:
	put_task_struct(task);
	return err < 0 ? err : count;
@@ -2963,7 +2963,7 @@ static ssize_t proc_sched_task_boost_period_read(struct file *file,

	if (!task)
		return -ESRCH;
	sched_boost_period_ms = div64_ul(task->boost_period, 1000000UL);
	sched_boost_period_ms = div64_ul(task->wts.boost_period, 1000000UL);
	put_task_struct(task);
	len = snprintf(buffer, sizeof(buffer), "%llu\n", sched_boost_period_ms);
	return simple_read_from_buffer(buf, count, ppos, buffer, len);
@@ -2991,14 +2991,14 @@ static ssize_t proc_sched_task_boost_period_write(struct file *file,
	err = kstrtouint(strstrip(buffer), 0, &sched_boost_period);
	if (err)
		goto out;
	if (task->boost == 0 && sched_boost_period) {
	if (task->wts.boost == 0 && sched_boost_period) {
		/* setting boost period without boost is invalid */
		err = -EINVAL;
		goto out;
	}

	task->boost_period = (u64)sched_boost_period * 1000 * 1000;
	task->boost_expires = sched_clock() + task->boost_period;
	task->wts.boost_period = (u64)sched_boost_period * 1000 * 1000;
	task->wts.boost_expires = sched_clock() + task->wts.boost_period;
out:
	put_task_struct(task);
	return err < 0 ? err : count;
+34 −36
Original line number Diff line number Diff line
@@ -556,7 +556,7 @@ static inline int hh_vcpu_populate_affinity_info(u32 cpu_index, u64 cap_id)
#endif /* CONFIG_QCOM_HYP_CORE_CTL */

#ifdef CONFIG_SCHED_WALT
extern void sched_exit(struct task_struct *p);
extern void walt_task_dead(struct task_struct *p);
extern int
register_cpu_cycle_counter_cb(struct cpu_cycle_counter_cb *cb);
extern void
@@ -569,8 +569,7 @@ extern void walt_update_cluster_topology(void);
#define RAVG_HIST_SIZE_MAX  5
#define NUM_BUSY_BUCKETS 10

/* ravg represents frequency scaled cpu-demand of tasks */
struct ravg {
struct walt_task_struct {
	/*
	 * 'mark_start' marks the beginning of an event (task waking up, task
	 * starting to execute, task being preempted) within a window
@@ -616,9 +615,24 @@ struct ravg {
	u16				pred_demand_scaled;
	u64				active_time;
	u64				last_win_size;
	int				boost;
	bool				wake_up_idle;
	bool				misfit;
	u64				boost_period;
	u64				boost_expires;
	u64				last_sleep_ts;
	u32				init_load_pct;
	u32				unfilter;
	u64				last_wake_ts;
	u64				last_enqueued_ts;
	struct walt_related_thread_group __rcu	*grp;
	struct list_head		grp_list;
	u64				cpu_cycles;
	cpumask_t			cpus_requested;
};

#else
static inline void sched_exit(struct task_struct *p) { }
static inline void walt_task_dead(struct task_struct *p) { }

static inline int
register_cpu_cycle_counter_cb(struct cpu_cycle_counter_cb *cb)
@@ -841,20 +855,7 @@ struct task_struct {
	struct sched_rt_entity		rt;

#ifdef CONFIG_SCHED_WALT
	int boost;
	u64 boost_period;
	u64 boost_expires;
	u64 last_sleep_ts;
	bool wake_up_idle;
	struct ravg ravg;
	u32 init_load_pct;
	u64 last_wake_ts;
	u64 last_enqueued_ts;
	struct related_thread_group *grp;
	struct list_head grp_list;
	u64 cpu_cycles;
	bool misfit;
	u32 unfilter;
	struct walt_task_struct		wts;
#endif

#ifdef CONFIG_CGROUP_SCHED
@@ -882,9 +883,6 @@ struct task_struct {
	int				nr_cpus_allowed;
	const cpumask_t			*cpus_ptr;
	cpumask_t			cpus_mask;
#ifdef CONFIG_SCHED_WALT
	cpumask_t			cpus_requested;
#endif

#ifdef CONFIG_PREEMPT_RCU
	int				rcu_read_lock_nesting;
@@ -2214,19 +2212,19 @@ const struct cpumask *sched_trace_rd_span(struct root_domain *rd);
#define PF_WAKE_UP_IDLE	1
static inline u32 sched_get_wake_up_idle(struct task_struct *p)
{
	return p->wake_up_idle;
	return p->wts.wake_up_idle;
}

static inline int sched_set_wake_up_idle(struct task_struct *p,
						int wake_up_idle)
{
	p->wake_up_idle = !!wake_up_idle;
	p->wts.wake_up_idle = !!wake_up_idle;
	return 0;
}

static inline void set_wake_up_idle(bool enabled)
{
	current->wake_up_idle = enabled;
	current->wts.wake_up_idle = enabled;
}
#else
static inline u32 sched_get_wake_up_idle(struct task_struct *p)
+3 −3
Original line number Diff line number Diff line
@@ -475,11 +475,11 @@ TRACE_EVENT(sched_load_balance_skip_tasks,
	TP_fast_assign(
		__entry->scpu           = scpu;
		__entry->src_util_cum   =
					cpu_rq(scpu)->cum_window_demand_scaled;
				cpu_rq(scpu)->wrq.cum_window_demand_scaled;
		__entry->grp_type       = grp_type;
		__entry->dcpu           = dcpu;
		__entry->dst_util_cum   =
					cpu_rq(dcpu)->cum_window_demand_scaled;
				cpu_rq(dcpu)->wrq.cum_window_demand_scaled;
		__entry->pid            = pid;
		__entry->affinity       = affinity;
		__entry->task_util      = task_util;
@@ -1044,7 +1044,7 @@ TRACE_EVENT(sched_task_util,
		__entry->rtg_skip_min           = rtg_skip_min;
		__entry->start_cpu              = start_cpu;
#ifdef CONFIG_SCHED_WALT
		__entry->unfilter               = p->unfilter;
		__entry->unfilter               = p->wts.unfilter;
#else
		__entry->unfilter               = 0;
#endif
+4 −4
Original line number Diff line number Diff line
@@ -75,7 +75,10 @@ struct task_struct init_task
	.cpus_mask	= CPU_MASK_ALL,
	.nr_cpus_allowed= NR_CPUS,
#ifdef CONFIG_SCHED_WALT
	.wts		= {
		.cpus_requested	= CPU_MASK_ALL,
		.wake_up_idle	= false,
	},
#endif
	.mm		= NULL,
	.active_mm	= &init_mm,
@@ -95,9 +98,6 @@ struct task_struct init_task
#endif
#ifdef CONFIG_CGROUP_SCHED
	.sched_task_group = &root_task_group,
#endif
#ifdef CONFIG_SCHED_WALT
	.wake_up_idle	= false,
#endif
	.ptraced	= LIST_HEAD_INIT(init_task.ptraced),
	.ptrace_entry	= LIST_HEAD_INIT(init_task.ptrace_entry),
+2 −2
Original line number Diff line number Diff line
@@ -1017,8 +1017,8 @@ static int update_cpus_allowed(struct cpuset *cs, struct task_struct *p,
#ifdef CONFIG_SCHED_WALT
	int ret;

	if (cpumask_subset(&p->cpus_requested, cs->cpus_allowed)) {
		ret = set_cpus_allowed_ptr(p, &p->cpus_requested);
	if (cpumask_subset(&p->wts.cpus_requested, cs->cpus_allowed)) {
		ret = set_cpus_allowed_ptr(p, &p->wts.cpus_requested);
		if (!ret)
			return ret;
	}
Loading