Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit cc64f132 authored by Pavankumar Kondeti's avatar Pavankumar Kondeti
Browse files

sched: Improve the scheduler



This change is for general scheduler improvement.

Change-Id: If04d5d50971a18652a0f3dde22dca8725d20a632
Signed-off-by: default avatarPavankumar Kondeti <pkondeti@codeaurora.org>
parent ca81ce5a
Loading
Loading
Loading
Loading
+2 −44
Original line number Diff line number Diff line
// SPDX-License-Identifier: GPL-2.0-only
/*
 * Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
 * Copyright (c) 2012-2019, The Linux Foundation. All rights reserved.
 */

#include "sched.h"
@@ -22,45 +22,6 @@ static DEFINE_MUTEX(boost_mutex);
static unsigned int freq_aggr_threshold_backup;
static int boost_refcount[MAX_NUM_BOOST_TYPE];

static inline void boost_kick(int cpu)
{
	struct rq *rq = cpu_rq(cpu);

	if (!test_and_set_bit(BOOST_KICK, &rq->walt_flags))
		smp_send_reschedule(cpu);
}

static void boost_kick_cpus(void)
{
	int i;
	struct cpumask kick_mask;

	if (boost_policy != SCHED_BOOST_ON_BIG)
		return;

	cpumask_andnot(&kick_mask, cpu_online_mask, cpu_isolated_mask);

	for_each_cpu(i, &kick_mask) {
		if (cpu_capacity(i) != max_capacity)
			boost_kick(i);
	}
}

int got_boost_kick(void)
{
	int cpu = smp_processor_id();
	struct rq *rq = cpu_rq(cpu);

	return test_bit(BOOST_KICK, &rq->walt_flags);
}

void clear_boost_kick(int cpu)
{
	struct rq *rq = cpu_rq(cpu);

	clear_bit(BOOST_KICK, &rq->walt_flags);
}

/*
 * Scheduler boost type and boost policy might at first seem unrelated,
 * however, there exists a connection between them that will allow us
@@ -126,17 +87,14 @@ static void _sched_set_boost(int type)
		if (boost_refcount[FULL_THROTTLE_BOOST] == 1) {
			core_ctl_set_boost(true);
			restore_cgroup_boost_settings();
			boost_kick_cpus();
		}
		break;

	case CONSERVATIVE_BOOST:
	    boost_refcount[CONSERVATIVE_BOOST]++;
		if ((boost_refcount[CONSERVATIVE_BOOST] == 1) &&
				!boost_refcount[FULL_THROTTLE_BOOST]) {
				!boost_refcount[FULL_THROTTLE_BOOST])
			update_cgroup_boost_settings();
			boost_kick_cpus();
		}
		break;

	case RESTRAINED_BOOST:
+1 −10
Original line number Diff line number Diff line
@@ -1802,18 +1802,9 @@ void scheduler_ipi(void)
	 */
	preempt_fold_need_resched();

	if (llist_empty(&this_rq()->wake_list) && !got_nohz_idle_kick()
		&& !got_boost_kick())
	if (llist_empty(&this_rq()->wake_list) && !got_nohz_idle_kick())
		return;

	if (got_boost_kick()) {
		struct rq *rq = cpu_rq(cpu);

		if (rq->curr->sched_class == &fair_sched_class)
			check_for_migration(rq, rq->curr);
		clear_boost_kick(cpu);
	}

	/*
	 * Not all reschedule IPI handlers call irq_enter/irq_exit, since
	 * traditionally all their work was done from the interrupt return
+0 −3
Original line number Diff line number Diff line
@@ -12154,9 +12154,6 @@ static void walt_check_for_rotation(struct rq *src_rq)
	if (!walt_rotation_enabled)
		return;

	if (got_boost_kick())
		return;

	if (!is_min_capacity_cpu(src_cpu))
		return;

+0 −10
Original line number Diff line number Diff line
@@ -2984,7 +2984,6 @@ static inline int same_freq_domain(int src_cpu, int dst_cpu)
	return cpumask_test_cpu(dst_cpu, &rq->freq_domain_cpumask);
}

#define	BOOST_KICK	0
#define	CPU_RESERVED	1

extern int sched_boost(void);
@@ -3064,8 +3063,6 @@ extern unsigned long thermal_cap(int cpu);

extern void clear_walt_request(int cpu);

extern int got_boost_kick(void);
extern void clear_boost_kick(int cpu);
extern enum sched_boost_policy sched_boost_policy(void);
extern void sched_boost_parse_dt(void);
extern void clear_ed_task(struct task_struct *p, struct rq *rq);
@@ -3230,13 +3227,6 @@ static inline int is_reserved(int cpu)
	return 0;
}

static inline int got_boost_kick(void)
{
	return 0;
}

static inline void clear_boost_kick(int cpu) { }

static inline enum sched_boost_policy sched_boost_policy(void)
{
	return SCHED_BOOST_NONE;
+1 −2
Original line number Diff line number Diff line
// SPDX-License-Identifier: GPL-2.0-only
/*
 * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
 * Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
 */

#include <linux/syscore_ops.h>
@@ -405,7 +405,6 @@ void clear_walt_request(int cpu)
	struct rq *rq = cpu_rq(cpu);
	unsigned long flags;

	clear_boost_kick(cpu);
	clear_reserved(cpu);
	if (rq->push_task) {
		struct task_struct *push_task = NULL;