Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 14e07f02 authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge branch 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull scheduler fix from Thomas Gleixner:
 "A single fix for a cputime accounting regression which got introduced
  in the 4.11 cycle"

* 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  sched/cputime: Fix ksoftirqd cputime accounting regression
parents 8c9a694d 25e2d8c1
Loading
Loading
Loading
Loading
+16 −11
Original line number Diff line number Diff line
@@ -34,6 +34,18 @@ void disable_sched_clock_irqtime(void)
	sched_clock_irqtime = 0;
}

static void irqtime_account_delta(struct irqtime *irqtime, u64 delta,
				  enum cpu_usage_stat idx)
{
	u64 *cpustat = kcpustat_this_cpu->cpustat;

	u64_stats_update_begin(&irqtime->sync);
	cpustat[idx] += delta;
	irqtime->total += delta;
	irqtime->tick_delta += delta;
	u64_stats_update_end(&irqtime->sync);
}

/*
 * Called before incrementing preempt_count on {soft,}irq_enter
 * and before decrementing preempt_count on {soft,}irq_exit.
@@ -41,7 +53,6 @@ void disable_sched_clock_irqtime(void)
void irqtime_account_irq(struct task_struct *curr)
{
	struct irqtime *irqtime = this_cpu_ptr(&cpu_irqtime);
	u64 *cpustat = kcpustat_this_cpu->cpustat;
	s64 delta;
	int cpu;

@@ -52,22 +63,16 @@ void irqtime_account_irq(struct task_struct *curr)
	delta = sched_clock_cpu(cpu) - irqtime->irq_start_time;
	irqtime->irq_start_time += delta;

	u64_stats_update_begin(&irqtime->sync);
	/*
	 * We do not account for softirq time from ksoftirqd here.
	 * We want to continue accounting softirq time to ksoftirqd thread
	 * in that case, so as not to confuse scheduler with a special task
	 * that do not consume any time, but still wants to run.
	 */
	if (hardirq_count()) {
		cpustat[CPUTIME_IRQ] += delta;
		irqtime->tick_delta += delta;
	} else if (in_serving_softirq() && curr != this_cpu_ksoftirqd()) {
		cpustat[CPUTIME_SOFTIRQ] += delta;
		irqtime->tick_delta += delta;
	}

	u64_stats_update_end(&irqtime->sync);
	if (hardirq_count())
		irqtime_account_delta(irqtime, delta, CPUTIME_IRQ);
	else if (in_serving_softirq() && curr != this_cpu_ksoftirqd())
		irqtime_account_delta(irqtime, delta, CPUTIME_SOFTIRQ);
}
EXPORT_SYMBOL_GPL(irqtime_account_irq);

+7 −2
Original line number Diff line number Diff line
@@ -1869,6 +1869,7 @@ static inline void nohz_balance_exit_idle(unsigned int cpu) { }

#ifdef CONFIG_IRQ_TIME_ACCOUNTING
struct irqtime {
	u64			total;
	u64			tick_delta;
	u64			irq_start_time;
	struct u64_stats_sync	sync;
@@ -1876,16 +1877,20 @@ struct irqtime {

DECLARE_PER_CPU(struct irqtime, cpu_irqtime);

/*
 * Returns the irqtime minus the softirq time computed by ksoftirqd.
 * Otherwise ksoftirqd's sum_exec_runtime is substracted its own runtime
 * and never move forward.
 */
static inline u64 irq_time_read(int cpu)
{
	struct irqtime *irqtime = &per_cpu(cpu_irqtime, cpu);
	u64 *cpustat = kcpustat_cpu(cpu).cpustat;
	unsigned int seq;
	u64 total;

	do {
		seq = __u64_stats_fetch_begin(&irqtime->sync);
		total = cpustat[CPUTIME_SOFTIRQ] + cpustat[CPUTIME_IRQ];
		total = irqtime->total;
	} while (__u64_stats_fetch_retry(&irqtime->sync, seq));

	return total;