Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 930fb4f5 authored by Linux Build Service Account's avatar Linux Build Service Account Committed by Gerrit - the friendly Code Review server
Browse files

Merge "sched: improve trace prints"

parents 5822da10 2dba40ba
Loading
Loading
Loading
Loading
+18 −6
Original line number Diff line number Diff line
@@ -657,6 +657,10 @@ TRACE_EVENT(sched_cpu_util,
		__field(unsigned int, capacity_orig		)
		__field(int, idle_state				)
		__field(u64, irqload				)
		__field(int, online				)
		__field(int, isolated				)
		__field(int, reserved				)
		__field(int, high_irq_load			)
	),

	TP_fast_assign(
@@ -669,10 +673,14 @@ TRACE_EVENT(sched_cpu_util,
		__entry->capacity_orig		= capacity_orig_of(cpu);
		__entry->idle_state		= idle_get_state_idx(cpu_rq(cpu));
		__entry->irqload		= sched_irqload(cpu);
		__entry->online			= cpu_online(cpu);
		__entry->isolated		= cpu_isolated(cpu);
		__entry->reserved		= is_reserved(cpu);
		__entry->high_irq_load          = sched_cpu_high_irqload(cpu);
	),

	TP_printk("cpu=%d nr_running=%d cpu_util=%ld cpu_util_cum=%ld capacity_curr=%u capacity=%u capacity_orig=%u idle_state=%d irqload=%llu",
		__entry->cpu, __entry->nr_running, __entry->cpu_util, __entry->cpu_util_cum, __entry->capacity_curr, __entry->capacity, __entry->capacity_orig, __entry->idle_state, __entry->irqload)
	TP_printk("cpu=%d nr_running=%d cpu_util=%ld cpu_util_cum=%ld capacity_curr=%u capacity=%u capacity_orig=%u idle_state=%d irqload=%llu online=%u isolated=%u reserved=%u high_irq_load=%u",
		__entry->cpu, __entry->nr_running, __entry->cpu_util, __entry->cpu_util_cum, __entry->capacity_curr, __entry->capacity, __entry->capacity_orig, __entry->idle_state, __entry->irqload, __entry->online, __entry->isolated, __entry->reserved, __entry->high_irq_load)
);

TRACE_EVENT(sched_energy_diff,
@@ -1637,10 +1645,11 @@ TRACE_EVENT(sched_find_best_target,

	TP_PROTO(struct task_struct *tsk, bool prefer_idle,
		unsigned long min_util, int start_cpu,
		int best_idle, int best_active, int target),
		int best_idle, int best_active, int target,
		int backup_cpu),

	TP_ARGS(tsk, prefer_idle, min_util, start_cpu,
		best_idle, best_active, target),
		best_idle, best_active, target, backup_cpu),

	TP_STRUCT__entry(
		__array( char,	comm,	TASK_COMM_LEN	)
@@ -1651,6 +1660,7 @@ TRACE_EVENT(sched_find_best_target,
		__field( int,	best_idle		)
		__field( int,	best_active		)
		__field( int,	target			)
		__field( int,	backup_cpu		)
	),

	TP_fast_assign(
@@ -1662,14 +1672,16 @@ TRACE_EVENT(sched_find_best_target,
		__entry->best_idle	= best_idle;
		__entry->best_active	= best_active;
		__entry->target		= target;
		__entry->backup_cpu	= backup_cpu;
	),

	TP_printk("pid=%d comm=%s prefer_idle=%d start_cpu=%d "
		  "best_idle=%d best_active=%d target=%d",
		  "best_idle=%d best_active=%d target=%d backup=%d",
		__entry->pid, __entry->comm,
		__entry->prefer_idle, __entry->start_cpu,
		__entry->best_idle, __entry->best_active,
		__entry->target)
		__entry->target,
		__entry->backup_cpu)
);

TRACE_EVENT(sched_group_energy,
+27 −1
Original line number Diff line number Diff line
@@ -25,6 +25,7 @@
#include <linux/sched_energy.h>
#include <linux/stddef.h>
#include <linux/cpu.h>
#include <linux/cpuset.h>
#include <linux/pm_opp.h>
#include <linux/platform_device.h>

@@ -49,6 +50,17 @@ static void free_resources(void)
	}
}

static int update_topology;

/*
 * Ideally this should be arch specific implementation,
 * let's define here to help rebuild sched_domain with new capacities.
 */
int arch_update_cpu_topology(void)
{
	return update_topology;
}

void init_sched_energy_costs(void)
{
	struct device_node *cn, *cp;
@@ -273,8 +285,22 @@ static int sched_energy_probe(struct platform_device *pdev)

	kfree(max_frequencies);

	if (is_sge_valid)
	if (is_sge_valid) {
		/*
		 * Sched_domains might have built with default cpu capacity
		 * values on bootup.
		 *
		 * Let's rebuild them again with actual cpu capacities.
		 * And partition_sched_domain() expects update in cpu topology
		 * to rebuild the domains, so make it satisfied..
		 */
		update_topology = 1;
		rebuild_sched_domains();
		update_topology = 0;

		walt_sched_energy_populated_callback();
	}

	dev_info(&pdev->dev, "Sched-energy-costs capacity updated\n");
	return 0;

+4 −4
Original line number Diff line number Diff line
@@ -6976,6 +6976,7 @@ static inline int find_best_target(struct task_struct *p, int *backup_cpu,

			cpumask_clear_cpu(i, &search_cpus);

			trace_sched_cpu_util(i);
			if (!cpu_online(i) || cpu_isolated(i))
				continue;

@@ -6987,8 +6988,6 @@ static inline int find_best_target(struct task_struct *p, int *backup_cpu,
			if (walt_cpu_high_irqload(i) || is_reserved(i))
				continue;

			trace_sched_cpu_util(i);

			/*
			 * p's blocked utilization is still accounted for on prev_cpu
			 * so prev_cpu will receive a negative bias due to the double
@@ -7057,7 +7056,8 @@ static inline int find_best_target(struct task_struct *p, int *backup_cpu,
					trace_sched_find_best_target(p,
							prefer_idle, min_util,
							cpu, best_idle_cpu,
							best_active_cpu, i);
							best_active_cpu,
							i, -1);

					return i;
				}
@@ -7278,7 +7278,7 @@ static inline int find_best_target(struct task_struct *p, int *backup_cpu,

	trace_sched_find_best_target(p, prefer_idle, min_util, cpu,
				     best_idle_cpu, best_active_cpu,
				     target_cpu);
				     target_cpu, *backup_cpu);

	schedstat_inc(p->se.statistics.nr_wakeups_fbt_count);
	schedstat_inc(this_rq()->eas_stats.fbt_count);