Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 84f72d7e authored by Pavankumar Kondeti's avatar Pavankumar Kondeti
Browse files

sched: remove "hmp" references in the code



HMP specific code is removed in the previous patches. There are
still references to HMP in the code that is shared with WALT.
Rename hmp_stats and hmp_flags methods in rq structure to
walt_stats and walt_flags.

Change-Id: I37f7ef1893e479de0e8a2c3b809a7e2e631adead
Signed-off-by: default avatarPavankumar Kondeti <pkondeti@codeaurora.org>
parent 0f38b7ea
Loading
Loading
Loading
Loading
+4 −4
Original line number Diff line number Diff line
@@ -245,7 +245,7 @@ TRACE_EVENT(sched_update_history,
		__entry->pred_demand     = p->ravg.pred_demand;
		memcpy(__entry->hist, p->ravg.sum_history,
					RAVG_HIST_SIZE_MAX * sizeof(u32));
		__entry->nr_big_tasks   = rq->hmp_stats.nr_big_tasks;
		__entry->nr_big_tasks   = rq->walt_stats.nr_big_tasks;
		__entry->cpu            = rq->cpu;
	),

@@ -563,10 +563,10 @@ DECLARE_EVENT_CLASS(sched_cpu_load,
		__entry->cpu			= rq->cpu;
		__entry->idle			= idle;
		__entry->nr_running		= rq->nr_running;
		__entry->nr_big_tasks		= rq->hmp_stats.nr_big_tasks;
		__entry->nr_big_tasks		= rq->walt_stats.nr_big_tasks;
		__entry->load_scale_factor	= cpu_load_scale_factor(rq->cpu);
		__entry->capacity		= cpu_capacity(rq->cpu);
		__entry->cumulative_runnable_avg = rq->hmp_stats.cumulative_runnable_avg;
		__entry->cumulative_runnable_avg = rq->walt_stats.cumulative_runnable_avg;
		__entry->irqload		= irqload;
		__entry->max_freq		= cpu_max_freq(rq->cpu);
		__entry->power_cost		= power_cost;
@@ -618,7 +618,7 @@ TRACE_EVENT(sched_load_to_gov,
		__entry->grp_rq_ps	= rq->grp_time.prev_runnable_sum;
		__entry->nt_ps		= rq->nt_prev_runnable_sum;
		__entry->grp_nt_ps	= rq->grp_time.nt_prev_runnable_sum;
		__entry->pl		= rq->hmp_stats.pred_demands_sum;
		__entry->pl		= rq->walt_stats.pred_demands_sum;
		__entry->load		= load;
	),

+3 −3
Original line number Diff line number Diff line
@@ -32,7 +32,7 @@ static inline void boost_kick(int cpu)
{
	struct rq *rq = cpu_rq(cpu);

	if (!test_and_set_bit(BOOST_KICK, &rq->hmp_flags))
	if (!test_and_set_bit(BOOST_KICK, &rq->walt_flags))
		smp_send_reschedule(cpu);
}

@@ -57,14 +57,14 @@ int got_boost_kick(void)
	int cpu = smp_processor_id();
	struct rq *rq = cpu_rq(cpu);

	return test_bit(BOOST_KICK, &rq->hmp_flags);
	return test_bit(BOOST_KICK, &rq->walt_flags);
}

void clear_boost_kick(int cpu)
{
	struct rq *rq = cpu_rq(cpu);

	clear_bit(BOOST_KICK, &rq->hmp_flags);
	clear_bit(BOOST_KICK, &rq->walt_flags);
}

/*
+2 −3
Original line number Diff line number Diff line
@@ -5958,7 +5958,7 @@ int do_isolation_work_cpu_stop(void *data)
	 */
	nohz_balance_clear_nohz_mask(cpu);

	clear_hmp_request(cpu);
	clear_walt_request(cpu);
	local_irq_enable();
	return 0;
}
@@ -8068,7 +8068,7 @@ int sched_cpu_dying(unsigned int cpu)
	BUG_ON(rq->nr_running != 1);
	raw_spin_unlock_irqrestore(&rq->lock, flags);

	clear_hmp_request(cpu);
	clear_walt_request(cpu);

	calc_load_migrate(rq);
	update_max_interval();
@@ -8308,7 +8308,6 @@ void __init sched_init(void)
		rq->avg_idle = 2*sysctl_sched_migration_cost;
		rq->max_idle_balance_cost = sysctl_sched_migration_cost;
		rq->push_task = NULL;

		walt_sched_init(rq);

		INIT_LIST_HEAD(&rq->cfs_tasks);
+11 −11
Original line number Diff line number Diff line
@@ -22,35 +22,35 @@
#ifdef CONFIG_SCHED_WALT

static void
inc_hmp_sched_stats_dl(struct rq *rq, struct task_struct *p)
inc_walt_sched_stats_dl(struct rq *rq, struct task_struct *p)
{
	inc_cumulative_runnable_avg(&rq->hmp_stats, p);
	inc_cumulative_runnable_avg(&rq->walt_stats, p);
}

static void
dec_hmp_sched_stats_dl(struct rq *rq, struct task_struct *p)
dec_walt_sched_stats_dl(struct rq *rq, struct task_struct *p)
{
	dec_cumulative_runnable_avg(&rq->hmp_stats, p);
	dec_cumulative_runnable_avg(&rq->walt_stats, p);
}

static void
fixup_hmp_sched_stats_dl(struct rq *rq, struct task_struct *p,
fixup_walt_sched_stats_dl(struct rq *rq, struct task_struct *p,
			 u32 new_task_load, u32 new_pred_demand)
{
	s64 task_load_delta = (s64)new_task_load - task_load(p);
	s64 pred_demand_delta = PRED_DEMAND_DELTA;

	fixup_cumulative_runnable_avg(&rq->hmp_stats, p, task_load_delta,
	fixup_cumulative_runnable_avg(&rq->walt_stats, p, task_load_delta,
				      pred_demand_delta);
}

#else	/* CONFIG_SCHED_WALT */

static inline void
inc_hmp_sched_stats_dl(struct rq *rq, struct task_struct *p) { }
inc_walt_sched_stats_dl(struct rq *rq, struct task_struct *p) { }

static inline void
dec_hmp_sched_stats_dl(struct rq *rq, struct task_struct *p) { }
dec_walt_sched_stats_dl(struct rq *rq, struct task_struct *p) { }

#endif	/* CONFIG_SCHED_WALT */

@@ -865,7 +865,7 @@ void inc_dl_tasks(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
	WARN_ON(!dl_prio(prio));
	dl_rq->dl_nr_running++;
	add_nr_running(rq_of_dl_rq(dl_rq), 1);
	inc_hmp_sched_stats_dl(rq_of_dl_rq(dl_rq), dl_task_of(dl_se));
	inc_walt_sched_stats_dl(rq_of_dl_rq(dl_rq), dl_task_of(dl_se));

	inc_dl_deadline(dl_rq, deadline);
	inc_dl_migration(dl_se, dl_rq);
@@ -880,7 +880,7 @@ void dec_dl_tasks(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
	WARN_ON(!dl_rq->dl_nr_running);
	dl_rq->dl_nr_running--;
	sub_nr_running(rq_of_dl_rq(dl_rq), 1);
	dec_hmp_sched_stats_dl(rq_of_dl_rq(dl_rq), dl_task_of(dl_se));
	dec_walt_sched_stats_dl(rq_of_dl_rq(dl_rq), dl_task_of(dl_se));

	dec_dl_deadline(dl_rq, dl_se->deadline);
	dec_dl_migration(dl_se, dl_rq);
@@ -1845,7 +1845,7 @@ const struct sched_class dl_sched_class = {

	.update_curr		= update_curr_dl,
#ifdef CONFIG_SCHED_WALT
	.fixup_hmp_sched_stats	= fixup_hmp_sched_stats_dl,
	.fixup_walt_sched_stats	= fixup_walt_sched_stats_dl,
#endif
};

+3 −3
Original line number Diff line number Diff line
@@ -700,10 +700,10 @@ do { \
	P(cluster->max_freq);
	P(cluster->exec_scale_factor);
#ifdef CONFIG_SCHED_WALT
	P(hmp_stats.nr_big_tasks);
	P(walt_stats.nr_big_tasks);
#endif
	SEQ_printf(m, "  .%-30s: %llu\n", "hmp_stats.cumulative_runnable_avg",
			rq->hmp_stats.cumulative_runnable_avg);
	SEQ_printf(m, "  .%-30s: %llu\n", "walt_stats.cumulative_runnable_avg",
			rq->walt_stats.cumulative_runnable_avg);
#endif
#undef P
#undef PN
Loading