Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 708c46ba authored by Pavankumar Kondeti's avatar Pavankumar Kondeti
Browse files

sched: walt: Free per-cpu prev_window/curr_window upon task exit



The task's per-cpu prev_window/curr_window memory is allocated but
not freed when HMP is not defined. Move free_task_load_ptrs()
to walt.c

Change-Id: Ia70fa0cfb020f0d1b8c127eab8f68930e044e714
Signed-off-by: default avatarPavankumar Kondeti <pkondeti@codeaurora.org>
parent efc62c2e
Loading
Loading
Loading
Loading
+2 −3
Original line number Diff line number Diff line
@@ -2637,7 +2637,6 @@ struct cpu_cycle_counter_cb {
#define MAX_NUM_CGROUP_COLOC_ID	20

#ifdef CONFIG_SCHED_HMP
extern void free_task_load_ptrs(struct task_struct *p);
extern int sched_set_window(u64 window_start, unsigned int window_size);
extern unsigned long sched_get_busy(int cpu);
extern void sched_get_cpus_busy(struct sched_load *busy,
@@ -2661,8 +2660,6 @@ extern int sched_set_group_id(struct task_struct *p, unsigned int group_id);
extern unsigned int sched_get_group_id(struct task_struct *p);

#else /* CONFIG_SCHED_HMP */
static inline void free_task_load_ptrs(struct task_struct *p) { }

static inline int sched_set_window(u64 window_start, unsigned int window_size)
{
	return -EINVAL;
@@ -2700,6 +2697,7 @@ extern u32 sched_get_init_task_load(struct task_struct *p);
extern void sched_update_cpu_freq_min_max(const cpumask_t *cpus, u32 fmin,
					  u32 fmax);
extern int sched_set_boost(int enable);
extern void free_task_load_ptrs(struct task_struct *p);
#else
static inline int
register_cpu_cycle_counter_cb(struct cpu_cycle_counter_cb *cb)
@@ -2712,6 +2710,7 @@ static inline int sched_set_boost(int enable)
{
	return -EINVAL;
}
static inline void free_task_load_ptrs(struct task_struct *p) { }
#endif /* CONFIG_SCHED_WALT */

#ifndef CONFIG_SCHED_WALT
+0 −14
Original line number Diff line number Diff line
@@ -783,20 +783,6 @@ unsigned int cpu_temp(int cpu)
		return 0;
}

void free_task_load_ptrs(struct task_struct *p)
{
	kfree(p->ravg.curr_window_cpu);
	kfree(p->ravg.prev_window_cpu);

	/*
	 * update_task_ravg() can be called for exiting tasks. While the
	 * function itself ensures correct behavior, the corresponding
	 * trace event requires that these pointers be NULL.
	 */
	p->ravg.curr_window_cpu = NULL;
	p->ravg.prev_window_cpu = NULL;
}

/* Return task demand in percentage scale */
unsigned int pct_task_load(struct task_struct *p)
{
+14 −0
Original line number Diff line number Diff line
@@ -2025,6 +2025,20 @@ void init_new_task_load(struct task_struct *p, bool idle_task)
	p->misfit = false;
}

void free_task_load_ptrs(struct task_struct *p)
{
	kfree(p->ravg.curr_window_cpu);
	kfree(p->ravg.prev_window_cpu);

	/*
	 * update_task_ravg() can be called for exiting tasks. While the
	 * function itself ensures correct behavior, the corresponding
	 * trace event requires that these pointers be NULL.
	 */
	p->ravg.curr_window_cpu = NULL;
	p->ravg.prev_window_cpu = NULL;
}

void reset_task_stats(struct task_struct *p)
{
	u32 sum = 0;