Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 41bb745c authored by qctecmdr's avatar qctecmdr Committed by Gerrit - the friendly Code Review server
Browse files

Merge "PM / EM: Micro optimization in em_pd_energy"

parents ac21732f 8a306ac7
Loading
Loading
Loading
Loading
+3 −0
Original line number Diff line number Diff line
@@ -83,6 +83,9 @@ static inline unsigned long em_pd_energy(struct em_perf_domain *pd,
	struct em_cap_state *cs;
	int i, cpu;

	if (!sum_util)
		return 0;

	/*
	 * In order to predict the capacity state, map the utilization of the
	 * most utilized CPU of the performance domain to a requested frequency,
+36 −4
Original line number Diff line number Diff line
@@ -275,11 +275,12 @@ TRACE_EVENT(sched_load_balance,
	TP_PROTO(int cpu, enum cpu_idle_type idle, int balance,
		unsigned long group_mask, int busiest_nr_running,
		unsigned long imbalance, unsigned int env_flags, int ld_moved,
		unsigned int balance_interval, int active_balance),
		unsigned int balance_interval, int active_balance,
		int overutilized),

	TP_ARGS(cpu, idle, balance, group_mask, busiest_nr_running,
		imbalance, env_flags, ld_moved, balance_interval,
		active_balance),
		active_balance, overutilized),

	TP_STRUCT__entry(
		__field(int,                    cpu)
@@ -292,6 +293,7 @@ TRACE_EVENT(sched_load_balance,
		__field(int,                    ld_moved)
		__field(unsigned int,           balance_interval)
		__field(int,                    active_balance)
		__field(int,                    overutilized)
	),

	TP_fast_assign(
@@ -305,16 +307,18 @@ TRACE_EVENT(sched_load_balance,
		__entry->ld_moved               = ld_moved;
		__entry->balance_interval       = balance_interval;
		__entry->active_balance		= active_balance;
		__entry->overutilized		= overutilized;
	),

	TP_printk("cpu=%d state=%s balance=%d group=%#lx busy_nr=%d imbalance=%ld flags=%#x ld_moved=%d bal_int=%d active_balance=%d",
	TP_printk("cpu=%d state=%s balance=%d group=%#lx busy_nr=%d imbalance=%ld flags=%#x ld_moved=%d bal_int=%d active_balance=%d sd_overutilized=%d",
		__entry->cpu,
		__entry->idle == CPU_IDLE ? "idle" :
		(__entry->idle == CPU_NEWLY_IDLE ? "newly_idle" : "busy"),
		__entry->balance,
		__entry->group_mask, __entry->busiest_nr_running,
		__entry->imbalance, __entry->env_flags, __entry->ld_moved,
		__entry->balance_interval, __entry->active_balance)
		__entry->balance_interval, __entry->active_balance,
		__entry->overutilized)
);

TRACE_EVENT(sched_load_balance_nohz_kick,
@@ -1105,6 +1109,34 @@ TRACE_EVENT(sched_find_best_target,
		  __entry->target, __entry->backup)
);

/*
 * Tracepoint for system overutilized flag
 */
#ifdef CONFIG_SCHED_WALT
struct sched_domain;
TRACE_EVENT_CONDITION(sched_overutilized,

	TP_PROTO(struct sched_domain *sd, bool was_overutilized, bool overutilized),

	TP_ARGS(sd, was_overutilized, overutilized),

	TP_CONDITION(overutilized != was_overutilized),

	TP_STRUCT__entry(
		__field( bool,	overutilized	  )
		__array( char,  cpulist , 32      )
	),

	TP_fast_assign(
		__entry->overutilized	= overutilized;
		scnprintf(__entry->cpulist, sizeof(__entry->cpulist), "%*pbl", cpumask_pr_args(sched_domain_span(sd)));
	),

	TP_printk("overutilized=%d sd_span=%s",
		__entry->overutilized ? 1 : 0, __entry->cpulist)
);
#endif

TRACE_EVENT(sched_preempt_disable,

	TP_PROTO(u64 delta, bool irqs_disabled,
+31 −1
Original line number Diff line number Diff line
@@ -5305,11 +5305,13 @@ static bool sd_overutilized(struct sched_domain *sd)

static void set_sd_overutilized(struct sched_domain *sd)
{
	trace_sched_overutilized(sd, sd->shared->overutilized, true);
	sd->shared->overutilized = true;
}

static void clear_sd_overutilized(struct sched_domain *sd)
{
	trace_sched_overutilized(sd, sd->shared->overutilized, false);
	sd->shared->overutilized = false;
}
#endif
@@ -10251,7 +10253,12 @@ static int load_balance(int this_cpu, struct rq *this_rq,
				 group ? group->cpumask[0] : 0,
				 busiest ? busiest->nr_running : 0,
				 env.imbalance, env.flags, ld_moved,
				 sd->balance_interval, active_balance);
				 sd->balance_interval, active_balance,
#ifdef CONFIG_SCHED_WALT
				 sd_overutilized(sd));
#else
				 READ_ONCE(this_rq->rd->overutilized));
#endif
	return ld_moved;
}

@@ -10259,6 +10266,9 @@ static inline unsigned long
get_sd_balance_interval(struct sched_domain *sd, int cpu_busy)
{
	unsigned long interval = sd->balance_interval;
#ifdef CONFIG_SCHED_WALT
	unsigned int cpu;
#endif

	if (cpu_busy)
		interval *= sd->busy_factor;
@@ -10267,6 +10277,26 @@ get_sd_balance_interval(struct sched_domain *sd, int cpu_busy)
	interval = msecs_to_jiffies(interval);
	interval = clamp(interval, 1UL, max_load_balance_interval);

	/*
	 * check if sched domain is marked as overutilized
	 * we ought to only do this on systems which have SD_ASYMCAPACITY
	 * but we want to do it for all sched domains in those systems
	 * So for now, just check if overutilized as a proxy.
	 */
	/*
	 * If we are overutilized and we have a misfit task, then
	 * we want to balance as soon as practically possible, so
	 * we return an interval of zero, except for busy balance.
	 */
#ifdef CONFIG_SCHED_WALT
	if (sd_overutilized(sd) && !cpu_busy) {
		/* we know the root is overutilized, let's check for a misfit task */
		for_each_cpu(cpu, sched_domain_span(sd)) {
			if (cpu_rq(cpu)->misfit_task_load)
				return 1;
		}
	}
#endif
	return interval;
}