Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 34471a91 authored by Russell King's avatar Russell King
Browse files

Merge branch 'ppi-irq-core-for-rmk' of git://github.com/mzyngier/arm-platforms into devel-stable

parents cefd3e71 28af690a
Loading
Loading
Loading
Loading
+25 −0
Original line number Diff line number Diff line
@@ -1420,6 +1420,31 @@ config SMP_ON_UP

	  If you don't know what to do here, say Y.

config ARM_CPU_TOPOLOGY
	bool "Support cpu topology definition"
	depends on SMP && CPU_V7
	default y
	help
	  Support ARM cpu topology definition. The MPIDR register defines
	  affinity between processors which is then used to describe the cpu
	  topology of an ARM System.

config SCHED_MC
	bool "Multi-core scheduler support"
	depends on ARM_CPU_TOPOLOGY
	help
	  Multi-core scheduler support improves the CPU scheduler's decision
	  making when dealing with multi-core CPU chips at a cost of slightly
	  increased overhead in some places. If unsure say N here.

config SCHED_SMT
	bool "SMT scheduler support"
	depends on ARM_CPU_TOPOLOGY
	help
	  Improves the CPU scheduler's decision making when dealing with
	  MultiThreading at a cost of slightly increased overhead in some
	  places. If unsure say N here.

config HAVE_ARM_SCU
	bool
	help
+46 −14
Original line number Diff line number Diff line
@@ -29,6 +29,9 @@
#include <linux/cpu_pm.h>
#include <linux/cpumask.h>
#include <linux/io.h>
#include <linux/interrupt.h>
#include <linux/percpu.h>
#include <linux/slab.h>

#include <asm/irq.h>
#include <asm/mach/irq.h>
@@ -181,7 +184,7 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
		return -EINVAL;

	mask = 0xff << shift;
	bit = 1 << (cpu + shift);
	bit = 1 << (cpu_logical_map(cpu) + shift);

	spin_lock(&irq_controller_lock);
	val = readl_relaxed(reg) & ~mask;
@@ -260,9 +263,16 @@ static void __init gic_dist_init(struct gic_chip_data *gic,
	unsigned int irq_start)
{
	unsigned int gic_irqs, irq_limit, i;
	u32 cpumask;
	void __iomem *base = gic->dist_base;
	u32 cpumask = 1 << smp_processor_id();
	u32 cpu = 0;
	u32 nrppis = 0, ppi_base = 0;

#ifdef CONFIG_SMP
	cpu = cpu_logical_map(smp_processor_id());
#endif

	cpumask = 1 << cpu;
	cpumask |= cpumask << 8;
	cpumask |= cpumask << 16;

@@ -279,6 +289,23 @@ static void __init gic_dist_init(struct gic_chip_data *gic,

	gic->gic_irqs = gic_irqs;

	/*
	 * Nobody would be insane enough to use PPIs on a secondary
	 * GIC, right?
	 */
	if (gic == &gic_data[0]) {
		nrppis = (32 - irq_start) & 31;

		/* The GIC only supports up to 16 PPIs. */
		if (nrppis > 16)
			BUG();

		ppi_base = gic->irq_offset + 32 - nrppis;
	}

	pr_info("Configuring GIC with %d sources (%d PPIs)\n",
		gic_irqs, (gic == &gic_data[0]) ? nrppis : 0);

	/*
	 * Set all global interrupts to be level triggered, active low.
	 */
@@ -314,7 +341,17 @@ static void __init gic_dist_init(struct gic_chip_data *gic,
	/*
	 * Setup the Linux IRQ subsystem.
	 */
	for (i = irq_start; i < irq_limit; i++) {
	for (i = 0; i < nrppis; i++) {
		int ppi = i + ppi_base;

		irq_set_percpu_devid(ppi);
		irq_set_chip_and_handler(ppi, &gic_chip,
					 handle_percpu_devid_irq);
		irq_set_chip_data(ppi, gic);
		set_irq_flags(ppi, IRQF_VALID | IRQF_NOAUTOEN);
	}

	for (i = irq_start + nrppis; i < irq_limit; i++) {
		irq_set_chip_and_handler(i, &gic_chip, handle_fasteoi_irq);
		irq_set_chip_data(i, gic);
		set_irq_flags(i, IRQF_VALID | IRQF_PROBE);
@@ -557,20 +594,15 @@ void __cpuinit gic_secondary_init(unsigned int gic_nr)
	gic_cpu_init(&gic_data[gic_nr]);
}

void __cpuinit gic_enable_ppi(unsigned int irq)
{
	unsigned long flags;

	local_irq_save(flags);
	irq_set_status_flags(irq, IRQ_NOPROBE);
	gic_unmask_irq(irq_get_irq_data(irq));
	local_irq_restore(flags);
}

#ifdef CONFIG_SMP
void gic_raise_softirq(const struct cpumask *mask, unsigned int irq)
{
	unsigned long map = *cpus_addr(*mask);
	int cpu;
	unsigned long map = 0;

	/* Convert our logical CPU mask into a physical one. */
	for_each_cpu(cpu, mask)
		map |= 1 << cpu_logical_map(cpu);

	/*
	 * Ensure that stores to Normal memory are visible to the
+6 −0
Original line number Diff line number Diff line
@@ -8,6 +8,7 @@
#define CPUID_CACHETYPE	1
#define CPUID_TCM	2
#define CPUID_TLBTYPE	3
#define CPUID_MPIDR	5

#define CPUID_EXT_PFR0	"c1, 0"
#define CPUID_EXT_PFR1	"c1, 1"
@@ -70,6 +71,11 @@ static inline unsigned int __attribute_const__ read_cpuid_tcmstatus(void)
	return read_cpuid(CPUID_TCM);
}

static inline unsigned int __attribute_const__ read_cpuid_mpidr(void)
{
	return read_cpuid(CPUID_MPIDR);
}

/*
 * Intel's XScale3 core supports some v6 features (supersections, L2)
 * but advertises itself as v5 as it does not support the v6 ISA.  For
+0 −7
Original line number Diff line number Diff line
@@ -25,13 +25,6 @@
	movne	r1, sp
	adrne	lr, BSYM(1b)
	bne	do_IPI

#ifdef CONFIG_LOCAL_TIMERS
	test_for_ltirq r0, r2, r6, lr
	movne	r0, sp
	adrne	lr, BSYM(1b)
	bne	do_local_timer
#endif
#endif
9997:
	.endm
+19 −0
Original line number Diff line number Diff line
/*
 * Annotations for marking C functions as exception handlers.
 *
 * These should only be used for C functions that are called from the low
 * level exception entry code and not any intervening C code.
 */
#ifndef __ASM_ARM_EXCEPTION_H
#define __ASM_ARM_EXCEPTION_H

#include <linux/ftrace.h>

#define __exception	__attribute__((section(".exception.text")))
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
#define __exception_irq_entry	__irq_entry
#else
#define __exception_irq_entry	__exception
#endif

#endif /* __ASM_ARM_EXCEPTION_H */
Loading