Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit cd85d551 authored by Helge Deller's avatar Helge Deller
Browse files

parisc: more irq statistics in /proc/interrupts



Add framework and initial values for more fine grained statistics in
/proc/interrupts.

Signed-off-by: default avatarHelge Deller <deller@gmx.de>
parent 200c8804
Loading
Loading
Loading
Loading
+35 −1
Original line number Diff line number Diff line
/* hardirq.h: PA-RISC hard IRQ support.
 *
 * Copyright (C) 2001 Matthew Wilcox <matthew@wil.cx>
 * Copyright (C) 2013 Helge Deller <deller@gmx.de>
 */

#ifndef _PARISC_HARDIRQ_H
#define _PARISC_HARDIRQ_H

#include <asm-generic/hardirq.h>
#include <linux/cache.h>
#include <linux/threads.h>
#include <linux/irq.h>

typedef struct {
	unsigned int __softirq_pending;
#ifdef CONFIG_DEBUG_STACKOVERFLOW
	unsigned int kernel_stack_usage;
#endif
#ifdef CONFIG_SMP
	unsigned int irq_resched_count;
	unsigned int irq_call_count;
	/*
	 * irq_tlb_count is double-counted in irq_call_count, so it must be
	 * subtracted from irq_call_count when displaying irq_call_count
	 */
	unsigned int irq_tlb_count;
#endif
} ____cacheline_aligned irq_cpustat_t;

DECLARE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);

#define __ARCH_IRQ_STAT
#define __IRQ_STAT(cpu, member) (irq_stat[cpu].member)
#define inc_irq_stat(member)	this_cpu_inc(irq_stat.member)
#define local_softirq_pending()	this_cpu_read(irq_stat.__softirq_pending)

#define __ARCH_SET_SOFTIRQ_PENDING

#define set_softirq_pending(x)	\
		this_cpu_write(irq_stat.__softirq_pending, (x))
#define or_softirq_pending(x)	this_cpu_or(irq_stat.__softirq_pending, (x))

#define ack_bad_irq(irq) WARN(1, "unexpected IRQ trap at vector %02x\n", irq)

#endif /* _PARISC_HARDIRQ_H */
+0 −1
Original line number Diff line number Diff line
@@ -112,7 +112,6 @@ struct cpuinfo_parisc {
	unsigned long txn_addr;     /* MMIO addr of EIR or id_eid */
#ifdef CONFIG_SMP
	unsigned long pending_ipi;  /* bitmap of type ipi_message_type */
	unsigned long ipi_count;    /* number ipi Interrupts */
#endif
	unsigned long bh_count;     /* number of times bh was invoked */
	unsigned long prof_counter; /* per CPU profiling support */
+47 −1
Original line number Diff line number Diff line
@@ -152,6 +152,40 @@ static struct irq_chip cpu_interrupt_type = {
	.irq_retrigger	= NULL,
};

DEFINE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);
#define irq_stats(x)		(&per_cpu(irq_stat, x))

/*
 * /proc/interrupts printing for arch specific interrupts
 */
int arch_show_interrupts(struct seq_file *p, int prec)
{
	int j;

#ifdef CONFIG_DEBUG_STACKOVERFLOW
	seq_printf(p, "%*s: ", prec, "STK");
	for_each_online_cpu(j)
		seq_printf(p, "%10u ", irq_stats(j)->kernel_stack_usage);
	seq_printf(p, "  Kernel stack usage\n");
#endif
#ifdef CONFIG_SMP
	seq_printf(p, "%*s: ", prec, "RES");
	for_each_online_cpu(j)
		seq_printf(p, "%10u ", irq_stats(j)->irq_resched_count);
	seq_printf(p, "  Rescheduling interrupts\n");
	seq_printf(p, "%*s: ", prec, "CAL");
	for_each_online_cpu(j)
		seq_printf(p, "%10u ", irq_stats(j)->irq_call_count -
					irq_stats(j)->irq_tlb_count);
	seq_printf(p, "  Function call interrupts\n");
	seq_printf(p, "%*s: ", prec, "TLB");
	for_each_online_cpu(j)
		seq_printf(p, "%10u ", irq_stats(j)->irq_tlb_count);
	seq_printf(p, "  TLB shootdowns\n");
#endif
	return 0;
}

int show_interrupts(struct seq_file *p, void *v)
{
	int i = *(loff_t *) v, j;
@@ -219,6 +253,9 @@ int show_interrupts(struct seq_file *p, void *v)
		raw_spin_unlock_irqrestore(&desc->lock, flags);
	}

	if (i == NR_IRQS)
		arch_show_interrupts(p, 3);

	return 0;
}

@@ -340,13 +377,22 @@ static inline void stack_overflow_check(struct pt_regs *regs)
	/* Our stack starts directly behind the thread_info struct. */
	unsigned long stack_start = (unsigned long) current_thread_info();
	unsigned long sp = regs->gr[30];
	unsigned long stack_usage;
	unsigned int *last_usage;

	/* if sr7 != 0, we interrupted a userspace process which we do not want
	 * to check for stack overflow. We will only check the kernel stack. */
	if (regs->sr[7])
		return;

	if (likely((sp - stack_start) < (THREAD_SIZE - STACK_MARGIN)))
	/* calculate kernel stack usage */
	stack_usage = sp - stack_start;
	last_usage = &per_cpu(irq_stat.kernel_stack_usage, smp_processor_id());

	if (unlikely(stack_usage > *last_usage))
		*last_usage = stack_usage;

	if (likely(stack_usage < (THREAD_SIZE - STACK_MARGIN)))
		return;

	pr_emerg("stackcheck: %s will most likely overflow kernel stack "
+2 −1
Original line number Diff line number Diff line
@@ -127,7 +127,7 @@ ipi_interrupt(int irq, void *dev_id)
	unsigned long flags;

	/* Count this now; we may make a call that never returns. */
	p->ipi_count++;
	inc_irq_stat(irq_call_count);

	mb();	/* Order interrupt and bit testing. */

@@ -155,6 +155,7 @@ ipi_interrupt(int irq, void *dev_id)
				
			case IPI_RESCHEDULE:
				smp_debug(100, KERN_DEBUG "CPU%d IPI_RESCHEDULE\n", this_cpu);
				inc_irq_stat(irq_resched_count);
				scheduler_ipi();
				break;

+2 −0
Original line number Diff line number Diff line
@@ -1069,6 +1069,7 @@ void flush_tlb_all(void)
{
	int do_recycle;

	inc_irq_stat(irq_tlb_count);
	do_recycle = 0;
	spin_lock(&sid_lock);
	if (dirty_space_ids > RECYCLE_THRESHOLD) {
@@ -1089,6 +1090,7 @@ void flush_tlb_all(void)
#else
void flush_tlb_all(void)
{
	inc_irq_stat(irq_tlb_count);
	spin_lock(&sid_lock);
	flush_tlb_all_local(NULL);
	recycle_sids();