Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 3d2c3f6b authored by Pavankumar Kondeti's avatar Pavankumar Kondeti
Browse files

sched/core_ctl: Improve the scheduler



This change is for general scheduler improvement.

Change-Id: I05a6645db80cc04993b45d7ec25a3fb7a112cf3e
Signed-off-by: default avatarPavankumar Kondeti <pkondeti@codeaurora.org>
parent bf0b314d
Loading
Loading
Loading
Loading
+6 −1
Original line number Diff line number Diff line
/* SPDX-License-Identifier: GPL-2.0-only */
/*
 * Copyright (c) 2016, 2018, The Linux Foundation. All rights reserved.
 * Copyright (c) 2016, 2019, The Linux Foundation. All rights reserved.
 */

#ifndef __CORE_CTL_H
#define __CORE_CTL_H

#define MAX_CPUS_PER_CLUSTER 6
#define MAX_CLUSTERS 3

struct core_ctl_notif_data {
	unsigned int nr_big;
	unsigned int coloc_load_pct;
	unsigned int ta_util_pct[MAX_CLUSTERS];
	unsigned int cur_cap_pct[MAX_CLUSTERS];
};

#ifdef CONFIG_SCHED_CORE_CTL
+27 −0
Original line number Diff line number Diff line
@@ -1425,6 +1425,33 @@ TRACE_EVENT(core_ctl_update_nr_need,
		__entry->nrrun, __entry->max_nr, __entry->nr_prev_assist)
);

TRACE_EVENT(core_ctl_notif_data,

	TP_PROTO(u32 nr_big, u32 ta_load, u32 *ta_util, u32 *cur_cap),

	TP_ARGS(nr_big, ta_load, ta_util, cur_cap),

	TP_STRUCT__entry(
		__field(u32, nr_big)
		__field(u32, ta_load)
		__array(u32, ta_util, MAX_CLUSTERS)
		__array(u32, cur_cap, MAX_CLUSTERS)
	),

	TP_fast_assign(
		__entry->nr_big = nr_big;
		__entry->ta_load = ta_load;
		memcpy(__entry->ta_util, ta_util, MAX_CLUSTERS * sizeof(u32));
		memcpy(__entry->cur_cap, cur_cap, MAX_CLUSTERS * sizeof(u32));
	),

	TP_printk("nr_big=%u ta_load=%u ta_util=(%u %u %u) cur_cap=(%u %u %u)",
		  __entry->nr_big, __entry->ta_load,
		  __entry->ta_util[0], __entry->ta_util[1],
		  __entry->ta_util[2], __entry->cur_cap[0],
		  __entry->cur_cap[1], __entry->cur_cap[2])
);

/*
 * Tracepoint for schedtune_tasks_update
 */
+5 −6
Original line number Diff line number Diff line
// SPDX-License-Identifier: GPL-2.0-only
/*
 * Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
 * Copyright (c) 2014-2019, The Linux Foundation. All rights reserved.
 */

#define pr_fmt(fmt)	"core_ctl: " fmt
@@ -20,9 +20,6 @@
#include "sched.h"
#include "walt.h"

#define MAX_CPUS_PER_CLUSTER 6
#define MAX_CLUSTERS 3

struct cluster_data {
	bool inited;
	unsigned int min_cpus;
@@ -915,7 +912,7 @@ void core_ctl_notifier_unregister(struct notifier_block *n)

static void core_ctl_call_notifier(void)
{
	struct core_ctl_notif_data ndata;
	struct core_ctl_notif_data ndata = {0};
	struct notifier_block *nb;

	/*
@@ -930,7 +927,9 @@ static void core_ctl_call_notifier(void)
		return;

	ndata.nr_big = last_nr_big;
	ndata.coloc_load_pct = walt_get_default_coloc_group_load();
	walt_fill_ta_data(&ndata);
	trace_core_ctl_notif_data(ndata.nr_big, ndata.coloc_load_pct,
			ndata.ta_util_pct, ndata.cur_cap_pct);

	atomic_notifier_call_chain(&core_ctl_notifier, 0, &ndata);
}
+22 −5
Original line number Diff line number Diff line
@@ -7,7 +7,6 @@
#include <linux/cpufreq.h>
#include <linux/list_sort.h>
#include <linux/jiffies.h>
#include <linux/sched/core_ctl.h>
#include <linux/sched/stat.h>
#include <trace/events/sched.h>
#include "sched.h"
@@ -3185,20 +3184,22 @@ void walt_rotation_checkpoint(int nr_big)
	walt_rotation_enabled = nr_big >= num_possible_cpus();
}

unsigned int walt_get_default_coloc_group_load(void)
void walt_fill_ta_data(struct core_ctl_notif_data *data)
{
	struct related_thread_group *grp;
	unsigned long flags;
	u64 total_demand = 0, wallclock;
	struct task_struct *p;
	int min_cap_cpu, scale = 1024;
	struct sched_cluster *cluster;
	int i = 0;

	grp = lookup_related_thread_group(DEFAULT_CGROUP_COLOC_ID);

	raw_spin_lock_irqsave(&grp->lock, flags);
	if (list_empty(&grp->tasks)) {
		raw_spin_unlock_irqrestore(&grp->lock, flags);
		return 0;
		goto fill_util;
	}

	wallclock = sched_ktime_clock();
@@ -3224,8 +3225,24 @@ unsigned int walt_get_default_coloc_group_load(void)
	if (min_cap_cpu != -1)
		scale = arch_scale_cpu_capacity(NULL, min_cap_cpu);

	return div64_u64(total_demand * 1024 * 100,
	data->coloc_load_pct = div64_u64(total_demand * 1024 * 100,
			       (u64)sched_ravg_window * scale);

fill_util:
	for_each_sched_cluster(cluster) {
		int fcpu = cluster_first_cpu(cluster);

		if (i == MAX_CLUSTERS)
			break;

		scale = arch_scale_cpu_capacity(NULL, fcpu);
		data->ta_util_pct[i] = div64_u64(cluster->aggr_grp_load * 1024 *
				       100, (u64)sched_ravg_window * scale);

		scale = arch_scale_freq_capacity(fcpu);
		data->cur_cap_pct[i] = (scale * 100)/1024;
		i++;
	}
}

int walt_proc_group_thresholds_handler(struct ctl_table *table, int write,
+2 −5
Original line number Diff line number Diff line
@@ -9,6 +9,7 @@
#ifdef CONFIG_SCHED_WALT

#include <linux/sched/sysctl.h>
#include <linux/sched/core_ctl.h>

#define MAX_NR_CLUSTERS			3

@@ -297,7 +298,7 @@ static inline void walt_update_last_enqueue(struct task_struct *p)
extern void walt_rotate_work_init(void);
extern void walt_rotation_checkpoint(int nr_big);
extern unsigned int walt_rotation_enabled;
extern unsigned int walt_get_default_coloc_group_load(void);
extern void walt_fill_ta_data(struct core_ctl_notif_data *data);

extern __read_mostly bool sched_freq_aggr_en;
static inline void walt_enable_frequency_aggregation(bool enable)
@@ -312,10 +313,6 @@ static inline void walt_sched_init_rq(struct rq *rq) { }
static inline void walt_rotate_work_init(void) { }
static inline void walt_rotation_checkpoint(int nr_big) { }
static inline void walt_update_last_enqueue(struct task_struct *p) { }
static inline unsigned int walt_get_default_coloc_group_load(void)
{
	return 0;
}

static inline void update_task_ravg(struct task_struct *p, struct rq *rq,
				int event, u64 wallclock, u64 irqtime) { }