Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit c73cdda4 authored by Linux Build Service Account's avatar Linux Build Service Account Committed by Gerrit - the friendly Code Review server
Browse files

Merge "sched/walt: Fix the memory leak of idle task load pointers"

parents f826765a 736630c5
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -392,7 +392,7 @@ extern int lockdep_tasklist_lock_is_held(void);
extern void sched_init(void);
extern void sched_init_smp(void);
extern asmlinkage void schedule_tail(struct task_struct *prev);
extern void init_idle(struct task_struct *idle, int cpu, bool hotplug);
extern void init_idle(struct task_struct *idle, int cpu);
extern void init_idle_bootup_task(struct task_struct *idle);

extern cpumask_var_t cpu_isolated_map;
+1 −1
Original line number Diff line number Diff line
@@ -1926,7 +1926,7 @@ struct task_struct *fork_idle(int cpu)
			    cpu_to_node(cpu));
	if (!IS_ERR(task)) {
		init_idle_pids(task->pids);
		init_idle(task, cpu, false);
		init_idle(task, cpu);
	}

	return task;
+4 −7
Original line number Diff line number Diff line
@@ -2505,7 +2505,7 @@ int sched_fork(unsigned long clone_flags, struct task_struct *p)
	unsigned long flags;
	int cpu;

	init_new_task_load(p, false);
	init_new_task_load(p);
	cpu = get_cpu();

	__sched_fork(clone_flags, p);
@@ -5529,21 +5529,17 @@ void init_idle_bootup_task(struct task_struct *idle)
 * init_idle - set up an idle thread for a given CPU
 * @idle: task in question
 * @cpu: cpu the idle task belongs to
 * @cpu_up: differentiate between initial boot vs hotplug
 *
 * NOTE: this function does not set the idle thread's NEED_RESCHED
 * flag, to make booting more robust.
 */
void init_idle(struct task_struct *idle, int cpu, bool cpu_up)
void init_idle(struct task_struct *idle, int cpu)
{
	struct rq *rq = cpu_rq(cpu);
	unsigned long flags;

	__sched_fork(0, idle);

	if (!cpu_up)
		init_new_task_load(idle, true);

	raw_spin_lock_irqsave(&idle->pi_lock, flags);
	raw_spin_lock(&rq->lock);

@@ -8368,7 +8364,8 @@ void __init sched_init(void)
	 * but because we are the idle thread, we just pick up running again
	 * when this runqueue becomes "idle".
	 */
	init_idle(current, smp_processor_id(), false);
	init_idle(current, smp_processor_id());
	init_new_task_load(current);

	calc_load_update = jiffies + LOAD_FREQ;

+1 −4
Original line number Diff line number Diff line
@@ -2001,7 +2001,7 @@ int sched_set_init_task_load(struct task_struct *p, int init_load_pct)
	return 0;
}

void init_new_task_load(struct task_struct *p, bool idle_task)
void init_new_task_load(struct task_struct *p)
{
	int i;
	u32 init_load_windows;
@@ -2019,9 +2019,6 @@ void init_new_task_load(struct task_struct *p, bool idle_task)
	/* Don't have much choice. CPU frequency would be bogus */
	BUG_ON(!p->ravg.curr_window_cpu || !p->ravg.prev_window_cpu);

	if (idle_task)
		return;

	if (current->init_load_pct)
		init_load_pct = current->init_load_pct;
	else
+2 −2
Original line number Diff line number Diff line
@@ -150,7 +150,7 @@ extern void fixup_walt_sched_stats_common(struct rq *rq, struct task_struct *p,
extern void inc_rq_walt_stats(struct rq *rq, struct task_struct *p);
extern void dec_rq_walt_stats(struct rq *rq, struct task_struct *p);
extern void fixup_busy_time(struct task_struct *p, int new_cpu);
extern void init_new_task_load(struct task_struct *p, bool idle_task);
extern void init_new_task_load(struct task_struct *p);
extern void mark_task_starting(struct task_struct *p);
extern void set_window_start(struct rq *rq);
void account_irqtime(int cpu, struct task_struct *curr, u64 delta,
@@ -337,7 +337,7 @@ static inline void walt_dec_cumulative_runnable_avg(struct rq *rq,
}

static inline void fixup_busy_time(struct task_struct *p, int new_cpu) { }
static inline void init_new_task_load(struct task_struct *p, bool idle_task)
static inline void init_new_task_load(struct task_struct *p)
{
}

Loading