Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit aa483808 authored by Venkatesh Pallipadi's avatar Venkatesh Pallipadi Committed by Ingo Molnar
Browse files

sched: Remove irq time from available CPU power

The idea was suggested by Peter Zijlstra here:

  http://marc.info/?l=linux-kernel&m=127476934517534&w=2



irq time is technically not available to the tasks running on the CPU.
This patch removes irq time from CPU power piggybacking on
sched_rt_avg_update().

Tested this by keeping CPU X busy with a network intensive task having 75%
oa a single CPU irq processing (hard+soft) on a 4-way system. And start seven
cycle soakers on the system. Without this change, there will be two tasks on
each CPU. With this change, there is a single task on irq busy CPU X and
remaining 7 tasks are spread around among other 3 CPUs.

Signed-off-by: default avatarVenkatesh Pallipadi <venki@google.com>
Signed-off-by: default avatarPeter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <1286237003-12406-8-git-send-email-venki@google.com>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent 305e6835
Loading
Loading
Loading
Loading
+18 −0
Original line number Original line Diff line number Diff line
@@ -519,6 +519,10 @@ struct rq {
	u64 avg_idle;
	u64 avg_idle;
#endif
#endif


#ifdef CONFIG_IRQ_TIME_ACCOUNTING
	u64 prev_irq_time;
#endif

	/* calc_load related fields */
	/* calc_load related fields */
	unsigned long calc_load_update;
	unsigned long calc_load_update;
	long calc_load_active;
	long calc_load_active;
@@ -643,6 +647,7 @@ static inline struct task_group *task_group(struct task_struct *p)
#endif /* CONFIG_CGROUP_SCHED */
#endif /* CONFIG_CGROUP_SCHED */


static u64 irq_time_cpu(int cpu);
static u64 irq_time_cpu(int cpu);
static void sched_irq_time_avg_update(struct rq *rq, u64 irq_time);


inline void update_rq_clock(struct rq *rq)
inline void update_rq_clock(struct rq *rq)
{
{
@@ -654,6 +659,8 @@ inline void update_rq_clock(struct rq *rq)
		irq_time = irq_time_cpu(cpu);
		irq_time = irq_time_cpu(cpu);
		if (rq->clock - irq_time > rq->clock_task)
		if (rq->clock - irq_time > rq->clock_task)
			rq->clock_task = rq->clock - irq_time;
			rq->clock_task = rq->clock - irq_time;

		sched_irq_time_avg_update(rq, irq_time);
	}
	}
}
}


@@ -1985,6 +1992,15 @@ void account_system_vtime(struct task_struct *curr)
	local_irq_restore(flags);
	local_irq_restore(flags);
}
}


static void sched_irq_time_avg_update(struct rq *rq, u64 curr_irq_time)
{
	if (sched_clock_irqtime && sched_feat(NONIRQ_POWER)) {
		u64 delta_irq = curr_irq_time - rq->prev_irq_time;
		rq->prev_irq_time = curr_irq_time;
		sched_rt_avg_update(rq, delta_irq);
	}
}

#else
#else


static u64 irq_time_cpu(int cpu)
static u64 irq_time_cpu(int cpu)
@@ -1992,6 +2008,8 @@ static u64 irq_time_cpu(int cpu)
	return 0;
	return 0;
}
}


static void sched_irq_time_avg_update(struct rq *rq, u64 curr_irq_time) { }

#endif
#endif


#include "sched_idletask.c"
#include "sched_idletask.c"
+7 −1
Original line number Original line Diff line number Diff line
@@ -2275,7 +2275,13 @@ unsigned long scale_rt_power(int cpu)
	u64 total, available;
	u64 total, available;


	total = sched_avg_period() + (rq->clock - rq->age_stamp);
	total = sched_avg_period() + (rq->clock - rq->age_stamp);

	if (unlikely(total < rq->rt_avg)) {
		/* Ensures that power won't end up being negative */
		available = 0;
	} else {
		available = total - rq->rt_avg;
		available = total - rq->rt_avg;
	}


	if (unlikely((s64)total < SCHED_LOAD_SCALE))
	if (unlikely((s64)total < SCHED_LOAD_SCALE))
		total = SCHED_LOAD_SCALE;
		total = SCHED_LOAD_SCALE;
+5 −0
Original line number Original line Diff line number Diff line
@@ -61,3 +61,8 @@ SCHED_FEAT(ASYM_EFF_LOAD, 1)
 * release the lock. Decreases scheduling overhead.
 * release the lock. Decreases scheduling overhead.
 */
 */
SCHED_FEAT(OWNER_SPIN, 1)
SCHED_FEAT(OWNER_SPIN, 1)

/*
 * Decrement CPU power based on irq activity
 */
SCHED_FEAT(NONIRQ_POWER, 1)