Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 19d23dbf authored by Frederic Weisbecker's avatar Frederic Weisbecker Committed by Ingo Molnar
Browse files

sched/irqtime: Consolidate accounting synchronization with u64_stats API



The irqtime accounting currently implement its own ad hoc implementation
of u64_stats API. Lets rather consolidate it with the appropriate
library.

Signed-off-by: default avatarFrederic Weisbecker <fweisbec@gmail.com>
Reviewed-by: default avatarRik van Riel <riel@redhat.com>
Cc: Eric Dumazet <eric.dumazet@gmail.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Wanpeng Li <wanpeng.li@hotmail.com>
Link: http://lkml.kernel.org/r/1474849761-12678-5-git-send-email-fweisbec@gmail.com


Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 68107df5
Loading
Loading
Loading
Loading
+14 −17
Original line number Diff line number Diff line
@@ -23,10 +23,8 @@
 * task when irq is in progress while we read rq->clock. That is a worthy
 * compromise in place of having locks on each irq in account_system_time.
 */
DEFINE_PER_CPU(u64, cpu_hardirq_time);
DEFINE_PER_CPU(u64, cpu_softirq_time);
DEFINE_PER_CPU(struct irqtime, cpu_irqtime);

static DEFINE_PER_CPU(u64, irq_start_time);
static int sched_clock_irqtime;

void enable_sched_clock_irqtime(void)
@@ -39,16 +37,13 @@ void disable_sched_clock_irqtime(void)
	sched_clock_irqtime = 0;
}

#ifndef CONFIG_64BIT
DEFINE_PER_CPU(seqcount_t, irq_time_seq);
#endif /* CONFIG_64BIT */

/*
 * Called before incrementing preempt_count on {soft,}irq_enter
 * and before decrementing preempt_count on {soft,}irq_exit.
 */
void irqtime_account_irq(struct task_struct *curr)
{
	struct irqtime *irqtime = this_cpu_ptr(&cpu_irqtime);
	s64 delta;
	int cpu;

@@ -56,10 +51,10 @@ void irqtime_account_irq(struct task_struct *curr)
		return;

	cpu = smp_processor_id();
	delta = sched_clock_cpu(cpu) - __this_cpu_read(irq_start_time);
	__this_cpu_add(irq_start_time, delta);
	delta = sched_clock_cpu(cpu) - irqtime->irq_start_time;
	irqtime->irq_start_time += delta;

	irq_time_write_begin();
	u64_stats_update_begin(&irqtime->sync);
	/*
	 * We do not account for softirq time from ksoftirqd here.
	 * We want to continue accounting softirq time to ksoftirqd thread
@@ -67,11 +62,11 @@ void irqtime_account_irq(struct task_struct *curr)
	 * that do not consume any time, but still wants to run.
	 */
	if (hardirq_count())
		__this_cpu_add(cpu_hardirq_time, delta);
		irqtime->hardirq_time += delta;
	else if (in_serving_softirq() && curr != this_cpu_ksoftirqd())
		__this_cpu_add(cpu_softirq_time, delta);
		irqtime->softirq_time += delta;

	irq_time_write_end();
	u64_stats_update_end(&irqtime->sync);
}
EXPORT_SYMBOL_GPL(irqtime_account_irq);

@@ -79,9 +74,10 @@ static cputime_t irqtime_account_hi_update(cputime_t maxtime)
{
	u64 *cpustat = kcpustat_this_cpu->cpustat;
	cputime_t irq_cputime;
	u64 nsecs;

	irq_cputime = nsecs_to_cputime64(__this_cpu_read(cpu_hardirq_time)) -
		      cpustat[CPUTIME_IRQ];
	nsecs = __this_cpu_read(cpu_irqtime.hardirq_time);
	irq_cputime = nsecs_to_cputime64(nsecs) - cpustat[CPUTIME_IRQ];
	irq_cputime = min(irq_cputime, maxtime);
	cpustat[CPUTIME_IRQ] += irq_cputime;

@@ -92,9 +88,10 @@ static cputime_t irqtime_account_si_update(cputime_t maxtime)
{
	u64 *cpustat = kcpustat_this_cpu->cpustat;
	cputime_t softirq_cputime;
	u64 nsecs;

	softirq_cputime = nsecs_to_cputime64(__this_cpu_read(cpu_softirq_time)) -
			  cpustat[CPUTIME_SOFTIRQ];
	nsecs = __this_cpu_read(cpu_irqtime.softirq_time);
	softirq_cputime = nsecs_to_cputime64(nsecs) - cpustat[CPUTIME_SOFTIRQ];
	softirq_cputime = min(softirq_cputime, maxtime);
	cpustat[CPUTIME_SOFTIRQ] += softirq_cputime;

+15 −38
Original line number Diff line number Diff line
@@ -2,6 +2,7 @@
#include <linux/sched.h>
#include <linux/sched/sysctl.h>
#include <linux/sched/rt.h>
#include <linux/u64_stats_sync.h>
#include <linux/sched/deadline.h>
#include <linux/binfmts.h>
#include <linux/mutex.h>
@@ -1735,52 +1736,28 @@ static inline void nohz_balance_exit_idle(unsigned int cpu) { }
#endif

#ifdef CONFIG_IRQ_TIME_ACCOUNTING
struct irqtime {
	u64			hardirq_time;
	u64			softirq_time;
	u64			irq_start_time;
	struct u64_stats_sync	sync;
};

DECLARE_PER_CPU(u64, cpu_hardirq_time);
DECLARE_PER_CPU(u64, cpu_softirq_time);

#ifndef CONFIG_64BIT
DECLARE_PER_CPU(seqcount_t, irq_time_seq);

static inline void irq_time_write_begin(void)
{
	__this_cpu_inc(irq_time_seq.sequence);
	smp_wmb();
}

static inline void irq_time_write_end(void)
{
	smp_wmb();
	__this_cpu_inc(irq_time_seq.sequence);
}
DECLARE_PER_CPU(struct irqtime, cpu_irqtime);

static inline u64 irq_time_read(int cpu)
{
	u64 irq_time;
	unsigned seq;
	struct irqtime *irqtime = &per_cpu(cpu_irqtime, cpu);
	unsigned int seq;
	u64 total;

	do {
		seq = read_seqcount_begin(&per_cpu(irq_time_seq, cpu));
		irq_time = per_cpu(cpu_softirq_time, cpu) +
			   per_cpu(cpu_hardirq_time, cpu);
	} while (read_seqcount_retry(&per_cpu(irq_time_seq, cpu), seq));

	return irq_time;
}
#else /* CONFIG_64BIT */
static inline void irq_time_write_begin(void)
{
}
		seq = __u64_stats_fetch_begin(&irqtime->sync);
		total = irqtime->softirq_time + irqtime->hardirq_time;
	} while (__u64_stats_fetch_retry(&irqtime->sync, seq));

static inline void irq_time_write_end(void)
{
}

static inline u64 irq_time_read(int cpu)
{
	return per_cpu(cpu_softirq_time, cpu) + per_cpu(cpu_hardirq_time, cpu);
	return total;
}
#endif /* CONFIG_64BIT */
#endif /* CONFIG_IRQ_TIME_ACCOUNTING */

#ifdef CONFIG_CPU_FREQ