Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 1cea0ce0 authored by Pavankumar Kondeti's avatar Pavankumar Kondeti
Browse files

sched: clean up idle task's mark_start restoring in init_idle()



The idle task's mark_start can get updated even without the CPU being
online. Hence the mark_start is restored when the CPU is coming online.

The idle task's mark_start is reset in init_idle()->__sched_fork()->
init_new_task_load(). The original mark_start is saved and restored
later. This can be avoided by moving init_new_task_load() to
wake_up_new_task(), which never gets called for an idle task.

We only care about idle task's ravg.mark_start and not initializing
the other fields of ravg struct will not have any side effects.

This clean up allows the subsequent patches to drop the rq->lock
while calling __sched_fork() in init_idle().

CRs-Fixed: 965873
Change-Id: I41de6d69944d7d44b9c4d11b2d97ad01bd8fe96d
Signed-off-by: default avatarPavankumar Kondeti <pkondeti@codeaurora.org>
parent 74240891
Loading
Loading
Loading
Loading
+1 −26
Original line number Diff line number Diff line
@@ -3651,16 +3651,6 @@ static int register_sched_callback(void)
 */
core_initcall(register_sched_callback);

static u64 orig_mark_start(struct task_struct *p)
{
	return p->ravg.mark_start;
}

static void restore_orig_mark_start(struct task_struct *p, u64 mark_start)
{
	p->ravg.mark_start = mark_start;
}

static inline int update_preferred_cluster(struct related_thread_group *grp,
		struct task_struct *p, u32 old_load)
{
@@ -3702,13 +3692,6 @@ static inline void set_window_start(struct rq *rq) {}

static inline void migrate_sync_cpu(int cpu) {}

static inline u64 orig_mark_start(struct task_struct *p) { return 0; }

static inline void
restore_orig_mark_start(struct task_struct *p, u64 mark_start)
{
}

#endif	/* CONFIG_SCHED_HMP */

#ifdef CONFIG_SMP
@@ -4605,7 +4588,6 @@ static void __sched_fork(unsigned long clone_flags, struct task_struct *p)
	p->se.prev_sum_exec_runtime	= 0;
	p->se.nr_migrations		= 0;
	p->se.vruntime			= 0;
	init_new_task_load(p);

	INIT_LIST_HEAD(&p->se.group_node);

@@ -4899,6 +4881,7 @@ void wake_up_new_task(struct task_struct *p)
	struct rq *rq;

	raw_spin_lock_irqsave(&p->pi_lock, flags);
	init_new_task_load(p);
#ifdef CONFIG_SMP
	/*
	 * Fork balancing, do it here and not earlier because:
@@ -7468,18 +7451,10 @@ void init_idle(struct task_struct *idle, int cpu)
{
	struct rq *rq = cpu_rq(cpu);
	unsigned long flags;
	u64 mark_start;

	raw_spin_lock_irqsave(&rq->lock, flags);

	mark_start = orig_mark_start(idle);

	__sched_fork(0, idle);
	/*
	 * Restore idle thread's original mark_start as we rely on it being
	 * correct for maintaining per-cpu counters, curr/prev_runnable_sum.
	 */
	restore_orig_mark_start(idle, mark_start);
	idle->state = TASK_RUNNING;
	idle->se.exec_start = sched_clock();

+1 −26
Original line number Diff line number Diff line
@@ -2734,16 +2734,6 @@ static int register_sched_callback(void)
 */
core_initcall(register_sched_callback);

static u64 orig_mark_start(struct task_struct *p)
{
	return p->ravg.mark_start;
}

static void restore_orig_mark_start(struct task_struct *p, u64 mark_start)
{
	p->ravg.mark_start = mark_start;
}

/*
 * Note down when task started running on a cpu. This information will be handy
 * to avoid "too" frequent task migrations for a running task on account of
@@ -2783,13 +2773,6 @@ static inline void set_window_start(struct rq *rq) {}

static inline void migrate_sync_cpu(int cpu) {}

static inline u64 orig_mark_start(struct task_struct *p) { return 0; }

static inline void
restore_orig_mark_start(struct task_struct *p, u64 mark_start)
{
}

static inline void note_run_start(struct task_struct *p, u64 wallclock) { }

#endif	/* CONFIG_SCHED_HMP */
@@ -3681,7 +3664,6 @@ static void __sched_fork(unsigned long clone_flags, struct task_struct *p)
	p->se.prev_sum_exec_runtime	= 0;
	p->se.nr_migrations		= 0;
	p->se.vruntime			= 0;
	init_new_task_load(p);

	INIT_LIST_HEAD(&p->se.group_node);

@@ -3975,6 +3957,7 @@ void wake_up_new_task(struct task_struct *p)
	struct rq *rq;

	raw_spin_lock_irqsave(&p->pi_lock, flags);
	init_new_task_load(p);
#ifdef CONFIG_SMP
	/*
	 * Fork balancing, do it here and not earlier because:
@@ -6494,18 +6477,10 @@ void init_idle(struct task_struct *idle, int cpu)
{
	struct rq *rq = cpu_rq(cpu);
	unsigned long flags;
	u64 mark_start;

	raw_spin_lock_irqsave(&rq->lock, flags);

	mark_start = orig_mark_start(idle);

	__sched_fork(0, idle);
	/*
	 * Restore idle thread's original mark_start as we rely on it being
	 * correct for maintaining per-cpu counters, curr/prev_runnable_sum.
	 */
	restore_orig_mark_start(idle, mark_start);
	idle->state = TASK_RUNNING;
	idle->se.exec_start = sched_clock();