Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit a782a7e4 authored by Thomas Gleixner's avatar Thomas Gleixner
Browse files

x86/irq: Store irq descriptor in vector array



We can spare the irq_desc lookup in the interrupt entry code if we
store the descriptor pointer in the vector array instead the interrupt
number.

Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Cc: Jiang Liu <jiang.liu@linux.intel.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Rusty Russell <rusty@rustcorp.com.au>
Cc: Bjorn Helgaas <bhelgaas@google.com>
Link: http://lkml.kernel.org/r/20150802203609.717724106@linutronix.de


Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
parent f61ae4fb
Loading
Loading
Loading
Loading
+3 −3
Original line number Diff line number Diff line
@@ -182,10 +182,10 @@ extern char irq_entries_start[];
#define trace_irq_entries_start irq_entries_start
#endif

#define VECTOR_UNUSED		(-1)
#define VECTOR_RETRIGGERED	(-2)
#define VECTOR_UNUSED		NULL
#define VECTOR_RETRIGGERED	((void *)~0UL)

typedef int vector_irq_t[NR_VECTORS];
typedef struct irq_desc* vector_irq_t[NR_VECTORS];
DECLARE_PER_CPU(vector_irq_t, vector_irq);

#endif /* !ASSEMBLY_ */
+3 −1
Original line number Diff line number Diff line
@@ -36,7 +36,9 @@ extern void kvm_set_posted_intr_wakeup_handler(void (*handler)(void));

extern void (*x86_platform_ipi_callback)(void);
extern void native_init_IRQ(void);
extern bool handle_irq(unsigned irq, struct pt_regs *regs);

struct irq_desc;
extern bool handle_irq(struct irq_desc *desc, struct pt_regs *regs);

extern __visible unsigned int do_IRQ(struct pt_regs *regs);

+24 −27
Original line number Diff line number Diff line
@@ -169,7 +169,7 @@ static int __assign_irq_vector(int irq, struct apic_chip_data *d,
			goto next;

		for_each_cpu_and(new_cpu, vector_cpumask, cpu_online_mask) {
			if (per_cpu(vector_irq, new_cpu)[vector] > VECTOR_UNUSED)
			if (!IS_ERR_OR_NULL(per_cpu(vector_irq, new_cpu)[vector]))
				goto next;
		}
		/* Found one! */
@@ -181,7 +181,7 @@ static int __assign_irq_vector(int irq, struct apic_chip_data *d,
			   cpumask_intersects(d->old_domain, cpu_online_mask);
		}
		for_each_cpu_and(new_cpu, vector_cpumask, cpu_online_mask)
			per_cpu(vector_irq, new_cpu)[vector] = irq;
			per_cpu(vector_irq, new_cpu)[vector] = irq_to_desc(irq);
		d->cfg.vector = vector;
		cpumask_copy(d->domain, vector_cpumask);
		err = 0;
@@ -223,8 +223,9 @@ static int assign_irq_vector_policy(int irq, int node,

static void clear_irq_vector(int irq, struct apic_chip_data *data)
{
	int cpu, vector;
	struct irq_desc *desc;
	unsigned long flags;
	int cpu, vector;

	raw_spin_lock_irqsave(&vector_lock, flags);
	BUG_ON(!data->cfg.vector);
@@ -241,10 +242,11 @@ static void clear_irq_vector(int irq, struct apic_chip_data *data)
		return;
	}

	desc = irq_to_desc(irq);
	for_each_cpu_and(cpu, data->old_domain, cpu_online_mask) {
		for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS;
		     vector++) {
			if (per_cpu(vector_irq, cpu)[vector] != irq)
			if (per_cpu(vector_irq, cpu)[vector] != desc)
				continue;
			per_cpu(vector_irq, cpu)[vector] = VECTOR_UNUSED;
			break;
@@ -402,30 +404,30 @@ int __init arch_early_irq_init(void)
	return arch_early_ioapic_init();
}

/* Initialize vector_irq on a new cpu */
static void __setup_vector_irq(int cpu)
{
	/* Initialize vector_irq on a new cpu */
	int irq, vector;
	struct apic_chip_data *data;
	struct irq_desc *desc;
	int irq, vector;

	/* Mark the inuse vectors */
	for_each_active_irq(irq) {
		data = apic_chip_data(irq_get_irq_data(irq));
		if (!data)
			continue;
	for_each_irq_desc(irq, desc) {
		struct irq_data *idata = irq_desc_get_irq_data(desc);

		if (!cpumask_test_cpu(cpu, data->domain))
		data = apic_chip_data(idata);
		if (!data || !cpumask_test_cpu(cpu, data->domain))
			continue;
		vector = data->cfg.vector;
		per_cpu(vector_irq, cpu)[vector] = irq;
		per_cpu(vector_irq, cpu)[vector] = desc;
	}
	/* Mark the free vectors */
	for (vector = 0; vector < NR_VECTORS; ++vector) {
		irq = per_cpu(vector_irq, cpu)[vector];
		if (irq <= VECTOR_UNUSED)
		desc = per_cpu(vector_irq, cpu)[vector];
		if (IS_ERR_OR_NULL(desc))
			continue;

		data = apic_chip_data(irq_get_irq_data(irq));
		data = apic_chip_data(irq_desc_get_irq_data(desc));
		if (!cpumask_test_cpu(cpu, data->domain))
			per_cpu(vector_irq, cpu)[vector] = VECTOR_UNUSED;
	}
@@ -447,7 +449,7 @@ void setup_vector_irq(int cpu)
	 * legacy vector to irq mapping:
	 */
	for (irq = 0; irq < nr_legacy_irqs(); irq++)
		per_cpu(vector_irq, cpu)[ISA_IRQ_VECTOR(irq)] = irq;
		per_cpu(vector_irq, cpu)[ISA_IRQ_VECTOR(irq)] = irq_to_desc(irq);

	__setup_vector_irq(cpu);
}
@@ -543,19 +545,13 @@ asmlinkage __visible void smp_irq_move_cleanup_interrupt(void)

	me = smp_processor_id();
	for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) {
		int irq;
		unsigned int irr;
		struct irq_desc *desc;
		struct apic_chip_data *data;
		struct irq_desc *desc;
		unsigned int irr;

	retry:
		irq = __this_cpu_read(vector_irq[vector]);

		if (irq <= VECTOR_UNUSED)
			continue;

		desc = irq_to_desc(irq);
		if (!desc)
		desc = __this_cpu_read(vector_irq[vector]);
		if (IS_ERR_OR_NULL(desc))
			continue;

		if (!raw_spin_trylock(&desc->lock)) {
@@ -565,9 +561,10 @@ asmlinkage __visible void smp_irq_move_cleanup_interrupt(void)
			goto retry;
		}

		data = apic_chip_data(&desc->irq_data);
		data = apic_chip_data(irq_desc_get_irq_data(desc));
		if (!data)
			goto unlock;

		/*
		 * Check if the irq migration is in progress. If so, we
		 * haven't received the cleanup request yet for this irq.
+16 −21
Original line number Diff line number Diff line
@@ -211,22 +211,21 @@ u64 arch_irq_stat(void)
__visible unsigned int __irq_entry do_IRQ(struct pt_regs *regs)
{
	struct pt_regs *old_regs = set_irq_regs(regs);

	struct irq_desc * desc;
	/* high bit used in ret_from_ code  */
	unsigned vector = ~regs->orig_ax;
	unsigned irq;

	entering_irq();

	irq = __this_cpu_read(vector_irq[vector]);
	desc = __this_cpu_read(vector_irq[vector]);

	if (!handle_irq(irq, regs)) {
	if (!handle_irq(desc, regs)) {
		ack_APIC_irq();

		if (irq != VECTOR_RETRIGGERED) {
			pr_emerg_ratelimited("%s: %d.%d No irq handler for vector (irq %d)\n",
		if (desc != VECTOR_RETRIGGERED) {
			pr_emerg_ratelimited("%s: %d.%d No irq handler for vector\n",
					     __func__, smp_processor_id(),
					     vector, irq);
					     vector);
		} else {
			__this_cpu_write(vector_irq[vector], VECTOR_UNUSED);
		}
@@ -330,10 +329,10 @@ static struct cpumask affinity_new, online_new;
 */
int check_irq_vectors_for_cpu_disable(void)
{
	int irq, cpu;
	unsigned int this_cpu, vector, this_count, count;
	struct irq_desc *desc;
	struct irq_data *data;
	int cpu;

	this_cpu = smp_processor_id();
	cpumask_copy(&online_new, cpu_online_mask);
@@ -341,24 +340,21 @@ int check_irq_vectors_for_cpu_disable(void)

	this_count = 0;
	for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) {
		irq = __this_cpu_read(vector_irq[vector]);
		if (irq < 0)
		desc = __this_cpu_read(vector_irq[vector]);
		if (IS_ERR_OR_NULL(desc))
			continue;
		desc = irq_to_desc(irq);
		if (!desc)
			continue;

		/*
		 * Protect against concurrent action removal, affinity
		 * changes etc.
		 */
		raw_spin_lock(&desc->lock);
		data = irq_desc_get_irq_data(desc);
		cpumask_copy(&affinity_new, irq_data_get_affinity_mask(data));
		cpumask_copy(&affinity_new,
			     irq_data_get_affinity_mask(data));
		cpumask_clear_cpu(this_cpu, &affinity_new);

		/* Do not count inactive or per-cpu irqs. */
		if (!irq_has_action(irq) || irqd_is_per_cpu(data)) {
		if (!irq_desc_has_action(desc) || irqd_is_per_cpu(data)) {
			raw_spin_unlock(&desc->lock);
			continue;
		}
@@ -399,7 +395,7 @@ int check_irq_vectors_for_cpu_disable(void)
		for (vector = FIRST_EXTERNAL_VECTOR;
		     vector < first_system_vector; vector++) {
			if (!test_bit(vector, used_vectors) &&
			    per_cpu(vector_irq, cpu)[vector] <= VECTOR_UNUSED)
			    IS_ERR_OR_NULL(per_cpu(vector_irq, cpu)[vector]))
			    count++;
		}
	}
@@ -504,14 +500,13 @@ void fixup_irqs(void)
	for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) {
		unsigned int irr;

		if (__this_cpu_read(vector_irq[vector]) <= VECTOR_UNUSED)
		if (IS_ERR_OR_NULL(__this_cpu_read(vector_irq[vector])))
			continue;

		irr = apic_read(APIC_IRR + (vector / 32 * 0x10));
		if (irr  & (1 << (vector % 32))) {
			irq = __this_cpu_read(vector_irq[vector]);
			desc = __this_cpu_read(vector_irq[vector]);

			desc = irq_to_desc(irq);
			raw_spin_lock(&desc->lock);
			data = irq_desc_get_irq_data(desc);
			chip = irq_data_get_irq_chip(data);
+4 −5
Original line number Diff line number Diff line
@@ -148,21 +148,20 @@ void do_softirq_own_stack(void)
	call_on_stack(__do_softirq, isp);
}

bool handle_irq(unsigned irq, struct pt_regs *regs)
bool handle_irq(struct irq_desc *desc, struct pt_regs *regs)
{
	struct irq_desc *desc;
	unsigned int irq = irq_desc_get_irq(desc);
	int overflow;

	overflow = check_stack_overflow();

	desc = irq_to_desc(irq);
	if (unlikely(!desc))
	if (IS_ERR_OR_NULL(desc))
		return false;

	if (user_mode(regs) || !execute_on_irq_stack(overflow, desc, irq)) {
		if (unlikely(overflow))
			print_stack_overflow();
		desc->handle_irq(irq, desc);
		generic_handle_irq_desc(irq, desc);
	}

	return true;
Loading