Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 441fed3e authored by Pavankumar Kondeti's avatar Pavankumar Kondeti
Browse files

sched/walt: Improve the scheduler



This change is for general scheduler improvements.

Change-Id: I80606a09c3d09c65b7bcdf3940bee5e155d6318c
Co-developed-by: default avatarLingutla Chandrasekhar <clingutla@codeaurora.org>
Signed-off-by: default avatarLingutla Chandrasekhar <clingutla@codeaurora.org>
Signed-off-by: default avatarPavankumar Kondeti <pkondeti@codeaurora.org>
parent fbad67d3
Loading
Loading
Loading
Loading
+1 −3
Original line number Diff line number Diff line
// SPDX-License-Identifier: GPL-2.0-only
/*
 * Copyright (c) 2012-2019, The Linux Foundation. All rights reserved.
 * Copyright (c) 2012-2020, The Linux Foundation. All rights reserved.
 */

#include "sched.h"
@@ -78,12 +78,10 @@ static void sched_full_throttle_boost_exit(void)
static void sched_conservative_boost_enter(void)
{
	update_cgroup_boost_settings();
	sched_task_filter_util = sysctl_sched_min_task_util_for_boost;
}

static void sched_conservative_boost_exit(void)
{
	sched_task_filter_util = sysctl_sched_min_task_util_for_colocation;
	restore_cgroup_boost_settings();
}

+0 −1
Original line number Diff line number Diff line
@@ -175,7 +175,6 @@ unsigned int sched_capacity_margin_down[NR_CPUS] = {
unsigned int sysctl_sched_min_task_util_for_boost = 51;
/* 0.68ms default for 20ms window size scaled to 1024 */
unsigned int sysctl_sched_min_task_util_for_colocation = 35;
unsigned int sched_task_filter_util = 35;
__read_mostly unsigned int sysctl_sched_prefer_spread;
#endif
unsigned int sched_small_task_threshold = 102;
+1 −2
Original line number Diff line number Diff line
@@ -2800,7 +2800,6 @@ static inline int same_freq_domain(int src_cpu, int dst_cpu)
#define	CPU_RESERVED	1

extern enum sched_boost_policy boost_policy;
extern unsigned int sched_task_filter_util;
static inline enum sched_boost_policy sched_boost_policy(void)
{
	return boost_policy;
@@ -2926,7 +2925,7 @@ static inline enum sched_boost_policy task_boost_policy(struct task_struct *p)
		 * under conservative boost.
		 */
		if (sched_boost() == CONSERVATIVE_BOOST &&
				task_util(p) <= sched_task_filter_util)
			task_util(p) <= sysctl_sched_min_task_util_for_boost)
			policy = SCHED_BOOST_NONE;
	}

+1 −1
Original line number Diff line number Diff line
@@ -1862,7 +1862,7 @@ static void update_history(struct rq *rq, struct task_struct *p,
	p->ravg.pred_demand = pred_demand;
	p->ravg.pred_demand_scaled = pred_demand_scaled;

	if (demand_scaled > sched_task_filter_util)
	if (demand_scaled > sysctl_sched_min_task_util_for_colocation)
		p->unfilter = sysctl_sched_task_unfilter_period;
	else
		if (p->unfilter)