Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 99a5b57c authored by qctecmdr Service's avatar qctecmdr Service Committed by Gerrit - the friendly Code Review server
Browse files

Merge "sched: core_ctl: Add core_ctl_notifier_register()"

parents d53d3a92 3206cd0e
Loading
Loading
Loading
Loading
+10 −1
Original line number Diff line number Diff line
/*
 * Copyright (c) 2016, The Linux Foundation. All rights reserved.
 * Copyright (c) 2016, 2018, The Linux Foundation. All rights reserved.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 and
@@ -14,14 +14,23 @@
#ifndef __CORE_CTL_H
#define __CORE_CTL_H

struct core_ctl_notif_data {
	unsigned int nr_big;
	unsigned int coloc_load_pct;
};

#ifdef CONFIG_SCHED_CORE_CTL
void core_ctl_check(u64 wallclock);
int core_ctl_set_boost(bool boost);
void core_ctl_notifier_register(struct notifier_block *n);
void core_ctl_notifier_unregister(struct notifier_block *n);
#else
static inline void core_ctl_check(u64 wallclock) {}
static inline int core_ctl_set_boost(bool boost)
{
	return 0;
}
static inline void core_ctl_notifier_register(struct notifier_block *n) {}
static inline void core_ctl_notifier_unregister(struct notifier_block *n) {}
#endif
#endif
+39 −0
Original line number Diff line number Diff line
@@ -21,6 +21,7 @@
#include <linux/sched/rt.h>
#include <linux/syscore_ops.h>
#include <uapi/linux/sched/types.h>
#include <linux/sched/core_ctl.h>

#include <trace/events/sched.h>
#include "sched.h"
@@ -81,6 +82,9 @@ static void apply_need(struct cluster_data *state);
static void wake_up_core_ctl_thread(struct cluster_data *state);
static bool initialized;

ATOMIC_NOTIFIER_HEAD(core_ctl_notifier);
static unsigned int last_nr_big;

static unsigned int get_active_cpu_count(const struct cluster_data *cluster);
static void cpuset_next(struct cluster_data *cluster);

@@ -660,6 +664,7 @@ static void update_running_avg(void)
	}
	spin_unlock_irqrestore(&state_lock, flags);

	last_nr_big = big_avg;
	walt_rotation_checkpoint(big_avg);
}

@@ -851,6 +856,38 @@ int core_ctl_set_boost(bool boost)
}
EXPORT_SYMBOL(core_ctl_set_boost);

void core_ctl_notifier_register(struct notifier_block *n)
{
	atomic_notifier_chain_register(&core_ctl_notifier, n);
}

void core_ctl_notifier_unregister(struct notifier_block *n)
{
	atomic_notifier_chain_unregister(&core_ctl_notifier, n);
}

static void core_ctl_call_notifier(void)
{
	struct core_ctl_notif_data ndata;
	struct notifier_block *nb;

	/*
	 * Don't bother querying the stats when the notifier
	 * chain is empty.
	 */
	rcu_read_lock();
	nb = rcu_dereference_raw(core_ctl_notifier.head);
	rcu_read_unlock();

	if (!nb)
		return;

	ndata.nr_big = last_nr_big;
	ndata.coloc_load_pct = walt_get_default_coloc_group_load();

	atomic_notifier_call_chain(&core_ctl_notifier, 0, &ndata);
}

void core_ctl_check(u64 window_start)
{
	int cpu;
@@ -886,6 +923,8 @@ void core_ctl_check(u64 window_start)
		if (eval_need(cluster))
			wake_up_core_ctl_thread(cluster);
	}

	core_ctl_call_notifier();
}

static void move_cpu_lru(struct cpu_data *cpu_data)
+43 −0
Original line number Diff line number Diff line
@@ -3274,6 +3274,49 @@ void walt_rotation_checkpoint(int nr_big)
	walt_rotation_enabled = nr_big >= num_possible_cpus();
}

unsigned int walt_get_default_coloc_group_load(void)
{
	struct related_thread_group *grp;
	unsigned long flags;
	u64 total_demand = 0, wallclock;
	struct task_struct *p;
	int min_cap_cpu, scale = 1024;

	grp = lookup_related_thread_group(DEFAULT_CGROUP_COLOC_ID);

	raw_spin_lock_irqsave(&grp->lock, flags);
	if (list_empty(&grp->tasks)) {
		raw_spin_unlock_irqrestore(&grp->lock, flags);
		return 0;
	}

	wallclock = sched_ktime_clock();

	list_for_each_entry(p, &grp->tasks, grp_list) {
		if (p->ravg.mark_start < wallclock -
		    (sched_ravg_window * sched_ravg_hist_size))
			continue;

		total_demand += p->ravg.coloc_demand;
	}

	raw_spin_unlock_irqrestore(&grp->lock, flags);

	/*
	 * Scale the total demand to the lowest capacity CPU and
	 * convert into percentage.
	 *
	 * P = total_demand/sched_ravg_window * 1024/scale * 100
	 */

	min_cap_cpu = this_rq()->rd->min_cap_orig_cpu;
	if (min_cap_cpu != -1)
		scale = arch_scale_cpu_capacity(NULL, min_cap_cpu);

	return div64_u64(total_demand * 1024 * 100,
			(u64)sched_ravg_window * scale);
}

int walt_proc_update_handler(struct ctl_table *table, int write,
			     void __user *buffer, size_t *lenp,
			     loff_t *ppos)
+5 −0
Original line number Diff line number Diff line
@@ -304,6 +304,7 @@ static inline void walt_update_last_enqueue(struct task_struct *p)
extern void walt_rotate_work_init(void);
extern void walt_rotation_checkpoint(int nr_big);
extern unsigned int walt_rotation_enabled;
extern unsigned int walt_get_default_coloc_group_load(void);

#else /* CONFIG_SCHED_WALT */

@@ -312,6 +313,10 @@ static inline void walt_sched_init_rq(struct rq *rq) { }
static inline void walt_rotate_work_init(void) { }
static inline void walt_rotation_checkpoint(int nr_big) { }
static inline void walt_update_last_enqueue(struct task_struct *p) { }
static inline unsigned int walt_get_default_coloc_group_load(void)
{
	return 0;
}

static inline void update_task_ravg(struct task_struct *p, struct rq *rq,
				int event, u64 wallclock, u64 irqtime) { }