Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 27a513ca authored by Russell King's avatar Russell King
Browse files

Merge branch 'devel-stable' into for-next

Conflicts:
	arch/arm/kernel/perf_event_cpu.c
parents 970d96f9 bcc8fa83
Loading
Loading
Loading
Loading
+126 −155
Original line number Diff line number Diff line
@@ -20,6 +20,126 @@
#include <asm/cputype.h>
#include <asm/suspend.h>

/*
 * The public API for this code is documented in arch/arm/include/asm/mcpm.h.
 * For a comprehensive description of the main algorithm used here, please
 * see Documentation/arm/cluster-pm-race-avoidance.txt.
 */

struct sync_struct mcpm_sync;

/*
 * __mcpm_cpu_going_down: Indicates that the cpu is being torn down.
 *    This must be called at the point of committing to teardown of a CPU.
 *    The CPU cache (SCTRL.C bit) is expected to still be active.
 */
static void __mcpm_cpu_going_down(unsigned int cpu, unsigned int cluster)
{
	mcpm_sync.clusters[cluster].cpus[cpu].cpu = CPU_GOING_DOWN;
	sync_cache_w(&mcpm_sync.clusters[cluster].cpus[cpu].cpu);
}

/*
 * __mcpm_cpu_down: Indicates that cpu teardown is complete and that the
 *    cluster can be torn down without disrupting this CPU.
 *    To avoid deadlocks, this must be called before a CPU is powered down.
 *    The CPU cache (SCTRL.C bit) is expected to be off.
 *    However L2 cache might or might not be active.
 */
static void __mcpm_cpu_down(unsigned int cpu, unsigned int cluster)
{
	dmb();
	mcpm_sync.clusters[cluster].cpus[cpu].cpu = CPU_DOWN;
	sync_cache_w(&mcpm_sync.clusters[cluster].cpus[cpu].cpu);
	sev();
}

/*
 * __mcpm_outbound_leave_critical: Leave the cluster teardown critical section.
 * @state: the final state of the cluster:
 *     CLUSTER_UP: no destructive teardown was done and the cluster has been
 *         restored to the previous state (CPU cache still active); or
 *     CLUSTER_DOWN: the cluster has been torn-down, ready for power-off
 *         (CPU cache disabled, L2 cache either enabled or disabled).
 */
static void __mcpm_outbound_leave_critical(unsigned int cluster, int state)
{
	dmb();
	mcpm_sync.clusters[cluster].cluster = state;
	sync_cache_w(&mcpm_sync.clusters[cluster].cluster);
	sev();
}

/*
 * __mcpm_outbound_enter_critical: Enter the cluster teardown critical section.
 * This function should be called by the last man, after local CPU teardown
 * is complete.  CPU cache expected to be active.
 *
 * Returns:
 *     false: the critical section was not entered because an inbound CPU was
 *         observed, or the cluster is already being set up;
 *     true: the critical section was entered: it is now safe to tear down the
 *         cluster.
 */
static bool __mcpm_outbound_enter_critical(unsigned int cpu, unsigned int cluster)
{
	unsigned int i;
	struct mcpm_sync_struct *c = &mcpm_sync.clusters[cluster];

	/* Warn inbound CPUs that the cluster is being torn down: */
	c->cluster = CLUSTER_GOING_DOWN;
	sync_cache_w(&c->cluster);

	/* Back out if the inbound cluster is already in the critical region: */
	sync_cache_r(&c->inbound);
	if (c->inbound == INBOUND_COMING_UP)
		goto abort;

	/*
	 * Wait for all CPUs to get out of the GOING_DOWN state, so that local
	 * teardown is complete on each CPU before tearing down the cluster.
	 *
	 * If any CPU has been woken up again from the DOWN state, then we
	 * shouldn't be taking the cluster down at all: abort in that case.
	 */
	sync_cache_r(&c->cpus);
	for (i = 0; i < MAX_CPUS_PER_CLUSTER; i++) {
		int cpustate;

		if (i == cpu)
			continue;

		while (1) {
			cpustate = c->cpus[i].cpu;
			if (cpustate != CPU_GOING_DOWN)
				break;

			wfe();
			sync_cache_r(&c->cpus[i].cpu);
		}

		switch (cpustate) {
		case CPU_DOWN:
			continue;

		default:
			goto abort;
		}
	}

	return true;

abort:
	__mcpm_outbound_leave_critical(cluster, CLUSTER_UP);
	return false;
}

static int __mcpm_cluster_state(unsigned int cluster)
{
	sync_cache_r(&mcpm_sync.clusters[cluster].cluster);
	return mcpm_sync.clusters[cluster].cluster;
}

extern unsigned long mcpm_entry_vectors[MAX_NR_CLUSTERS][MAX_CPUS_PER_CLUSTER];

void mcpm_set_entry_vector(unsigned cpu, unsigned cluster, void *ptr)
@@ -78,16 +198,11 @@ int mcpm_cpu_power_up(unsigned int cpu, unsigned int cluster)
	bool cpu_is_down, cluster_is_down;
	int ret = 0;

	pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
	if (!platform_ops)
		return -EUNATCH; /* try not to shadow power_up errors */
	might_sleep();

	/* backward compatibility callback */
	if (platform_ops->power_up)
		return platform_ops->power_up(cpu, cluster);

	pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);

	/*
	 * Since this is called with IRQs enabled, and no arch_spin_lock_irq
	 * variant exists, we need to disable IRQs manually here.
@@ -128,29 +243,17 @@ void mcpm_cpu_power_down(void)
	bool cpu_going_down, last_man;
	phys_reset_t phys_reset;

	mpidr = read_cpuid_mpidr();
	cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
	cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
	pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
	if (WARN_ON_ONCE(!platform_ops))
	       return;
	BUG_ON(!irqs_disabled());

	/*
	 * Do this before calling into the power_down method,
	 * as it might not always be safe to do afterwards.
	 */
	setup_mm_for_reboot();

	/* backward compatibility callback */
	if (platform_ops->power_down) {
		platform_ops->power_down();
		goto not_dead;
	}

	mpidr = read_cpuid_mpidr();
	cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
	cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
	pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);

	__mcpm_cpu_going_down(cpu, cluster);

	arch_spin_lock(&mcpm_lock);
	BUG_ON(__mcpm_cluster_state(cluster) != CLUSTER_UP);

@@ -187,7 +290,6 @@ void mcpm_cpu_power_down(void)
	if (cpu_going_down)
		wfi();

not_dead:
	/*
	 * It is possible for a power_up request to happen concurrently
	 * with a power_down request for the same CPU. In this case the
@@ -219,22 +321,11 @@ int mcpm_wait_for_cpu_powerdown(unsigned int cpu, unsigned int cluster)
	return ret;
}

void mcpm_cpu_suspend(u64 expected_residency)
void mcpm_cpu_suspend(void)
{
	if (WARN_ON_ONCE(!platform_ops))
		return;

	/* backward compatibility callback */
	if (platform_ops->suspend) {
		phys_reset_t phys_reset;
		BUG_ON(!irqs_disabled());
		setup_mm_for_reboot();
		platform_ops->suspend(expected_residency);
		phys_reset = (phys_reset_t)(unsigned long)virt_to_phys(cpu_reset);
		phys_reset(virt_to_phys(mcpm_entry_point));
		BUG();
	}

	/* Some platforms might have to enable special resume modes, etc. */
	if (platform_ops->cpu_suspend_prepare) {
		unsigned int mpidr = read_cpuid_mpidr();
@@ -256,12 +347,6 @@ int mcpm_cpu_powered_up(void)
	if (!platform_ops)
		return -EUNATCH;

	/* backward compatibility callback */
	if (platform_ops->powered_up) {
		platform_ops->powered_up();
		return 0;
	}

	mpidr = read_cpuid_mpidr();
	cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
	cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
@@ -334,120 +419,6 @@ int __init mcpm_loopback(void (*cache_disable)(void))

#endif

struct sync_struct mcpm_sync;

/*
 * __mcpm_cpu_going_down: Indicates that the cpu is being torn down.
 *    This must be called at the point of committing to teardown of a CPU.
 *    The CPU cache (SCTRL.C bit) is expected to still be active.
 */
void __mcpm_cpu_going_down(unsigned int cpu, unsigned int cluster)
{
	mcpm_sync.clusters[cluster].cpus[cpu].cpu = CPU_GOING_DOWN;
	sync_cache_w(&mcpm_sync.clusters[cluster].cpus[cpu].cpu);
}

/*
 * __mcpm_cpu_down: Indicates that cpu teardown is complete and that the
 *    cluster can be torn down without disrupting this CPU.
 *    To avoid deadlocks, this must be called before a CPU is powered down.
 *    The CPU cache (SCTRL.C bit) is expected to be off.
 *    However L2 cache might or might not be active.
 */
void __mcpm_cpu_down(unsigned int cpu, unsigned int cluster)
{
	dmb();
	mcpm_sync.clusters[cluster].cpus[cpu].cpu = CPU_DOWN;
	sync_cache_w(&mcpm_sync.clusters[cluster].cpus[cpu].cpu);
	sev();
}

/*
 * __mcpm_outbound_leave_critical: Leave the cluster teardown critical section.
 * @state: the final state of the cluster:
 *     CLUSTER_UP: no destructive teardown was done and the cluster has been
 *         restored to the previous state (CPU cache still active); or
 *     CLUSTER_DOWN: the cluster has been torn-down, ready for power-off
 *         (CPU cache disabled, L2 cache either enabled or disabled).
 */
void __mcpm_outbound_leave_critical(unsigned int cluster, int state)
{
	dmb();
	mcpm_sync.clusters[cluster].cluster = state;
	sync_cache_w(&mcpm_sync.clusters[cluster].cluster);
	sev();
}

/*
 * __mcpm_outbound_enter_critical: Enter the cluster teardown critical section.
 * This function should be called by the last man, after local CPU teardown
 * is complete.  CPU cache expected to be active.
 *
 * Returns:
 *     false: the critical section was not entered because an inbound CPU was
 *         observed, or the cluster is already being set up;
 *     true: the critical section was entered: it is now safe to tear down the
 *         cluster.
 */
bool __mcpm_outbound_enter_critical(unsigned int cpu, unsigned int cluster)
{
	unsigned int i;
	struct mcpm_sync_struct *c = &mcpm_sync.clusters[cluster];

	/* Warn inbound CPUs that the cluster is being torn down: */
	c->cluster = CLUSTER_GOING_DOWN;
	sync_cache_w(&c->cluster);

	/* Back out if the inbound cluster is already in the critical region: */
	sync_cache_r(&c->inbound);
	if (c->inbound == INBOUND_COMING_UP)
		goto abort;

	/*
	 * Wait for all CPUs to get out of the GOING_DOWN state, so that local
	 * teardown is complete on each CPU before tearing down the cluster.
	 *
	 * If any CPU has been woken up again from the DOWN state, then we
	 * shouldn't be taking the cluster down at all: abort in that case.
	 */
	sync_cache_r(&c->cpus);
	for (i = 0; i < MAX_CPUS_PER_CLUSTER; i++) {
		int cpustate;

		if (i == cpu)
			continue;

		while (1) {
			cpustate = c->cpus[i].cpu;
			if (cpustate != CPU_GOING_DOWN)
				break;

			wfe();
			sync_cache_r(&c->cpus[i].cpu);
		}

		switch (cpustate) {
		case CPU_DOWN:
			continue;

		default:
			goto abort;
		}
	}

	return true;

abort:
	__mcpm_outbound_leave_critical(cluster, CLUSTER_UP);
	return false;
}

int __mcpm_cluster_state(unsigned int cluster)
{
	sync_cache_r(&mcpm_sync.clusters[cluster].cluster);
	return mcpm_sync.clusters[cluster].cluster;
}

extern unsigned long mcpm_power_up_setup_phys;

int __init mcpm_sync_init(
+28 −45
Original line number Diff line number Diff line
@@ -137,17 +137,12 @@ int mcpm_wait_for_cpu_powerdown(unsigned int cpu, unsigned int cluster);
/**
 * mcpm_cpu_suspend - bring the calling CPU in a suspended state
 *
 * @expected_residency: duration in microseconds the CPU is expected
 *			to remain suspended, or 0 if unknown/infinity.
 *
 * The calling CPU is suspended.  The expected residency argument is used
 * as a hint by the platform specific backend to implement the appropriate
 * sleep state level according to the knowledge it has on wake-up latency
 * for the given hardware.
 * The calling CPU is suspended.  This is similar to mcpm_cpu_power_down()
 * except for possible extra platform specific configuration steps to allow
 * an asynchronous wake-up e.g. with a pending interrupt.
 *
 * If this CPU is found to be the "last man standing" in the cluster
 * then the cluster may be prepared for power-down too, if the expected
 * residency makes it worthwhile.
 * then the cluster may be prepared for power-down too.
 *
 * This must be called with interrupts disabled.
 *
@@ -157,7 +152,7 @@ int mcpm_wait_for_cpu_powerdown(unsigned int cpu, unsigned int cluster);
 * This will return if mcpm_platform_register() has not been called
 * previously in which case the caller should take appropriate action.
 */
void mcpm_cpu_suspend(u64 expected_residency);
void mcpm_cpu_suspend(void);

/**
 * mcpm_cpu_powered_up - housekeeping workafter a CPU has been powered up
@@ -234,12 +229,6 @@ struct mcpm_platform_ops {
	void (*cpu_is_up)(unsigned int cpu, unsigned int cluster);
	void (*cluster_is_up)(unsigned int cluster);
	int (*wait_for_powerdown)(unsigned int cpu, unsigned int cluster);

	/* deprecated callbacks */
	int (*power_up)(unsigned int cpu, unsigned int cluster);
	void (*power_down)(void);
	void (*suspend)(u64);
	void (*powered_up)(void);
};

/**
@@ -251,35 +240,6 @@ struct mcpm_platform_ops {
 */
int __init mcpm_platform_register(const struct mcpm_platform_ops *ops);

/* Synchronisation structures for coordinating safe cluster setup/teardown: */

/*
 * When modifying this structure, make sure you update the MCPM_SYNC_ defines
 * to match.
 */
struct mcpm_sync_struct {
	/* individual CPU states */
	struct {
		s8 cpu __aligned(__CACHE_WRITEBACK_GRANULE);
	} cpus[MAX_CPUS_PER_CLUSTER];

	/* cluster state */
	s8 cluster __aligned(__CACHE_WRITEBACK_GRANULE);

	/* inbound-side state */
	s8 inbound __aligned(__CACHE_WRITEBACK_GRANULE);
};

struct sync_struct {
	struct mcpm_sync_struct clusters[MAX_NR_CLUSTERS];
};

void __mcpm_cpu_going_down(unsigned int cpu, unsigned int cluster);
void __mcpm_cpu_down(unsigned int cpu, unsigned int cluster);
void __mcpm_outbound_leave_critical(unsigned int cluster, int state);
bool __mcpm_outbound_enter_critical(unsigned int this_cpu, unsigned int cluster);
int __mcpm_cluster_state(unsigned int cluster);

/**
 * mcpm_sync_init - Initialize the cluster synchronization support
 *
@@ -318,6 +278,29 @@ int __init mcpm_loopback(void (*cache_disable)(void));

void __init mcpm_smp_set_ops(void);

/*
 * Synchronisation structures for coordinating safe cluster setup/teardown.
 * This is private to the MCPM core code and shared between C and assembly.
 * When modifying this structure, make sure you update the MCPM_SYNC_ defines
 * to match.
 */
struct mcpm_sync_struct {
	/* individual CPU states */
	struct {
		s8 cpu __aligned(__CACHE_WRITEBACK_GRANULE);
	} cpus[MAX_CPUS_PER_CLUSTER];

	/* cluster state */
	s8 cluster __aligned(__CACHE_WRITEBACK_GRANULE);

	/* inbound-side state */
	s8 inbound __aligned(__CACHE_WRITEBACK_GRANULE);
};

struct sync_struct {
	struct mcpm_sync_struct clusters[MAX_NR_CLUSTERS];
};

#else

/* 
+7 −0
Original line number Diff line number Diff line
@@ -19,4 +19,11 @@ extern unsigned long perf_misc_flags(struct pt_regs *regs);
#define perf_misc_flags(regs)	perf_misc_flags(regs)
#endif

#define perf_arch_fetch_caller_regs(regs, __ip) { \
	(regs)->ARM_pc = (__ip); \
	(regs)->ARM_fp = (unsigned long) __builtin_frame_address(0); \
	(regs)->ARM_sp = current_stack_pointer; \
	(regs)->ARM_cpsr = SVC_MODE; \
}

#endif /* __ARM_PERF_EVENT_H__ */
+5 −14
Original line number Diff line number Diff line
@@ -24,22 +24,10 @@
 *	interrupt and passed the address of the low level handler,
 *	and can be used to implement any platform specific handling
 *	before or after calling it.
 * @runtime_resume: an optional handler which will be called by the
 *	runtime PM framework following a call to pm_runtime_get().
 *	Note that if pm_runtime_get() is called more than once in
 *	succession this handler will only be called once.
 * @runtime_suspend: an optional handler which will be called by the
 *	runtime PM framework following a call to pm_runtime_put().
 *	Note that if pm_runtime_get() is called more than once in
 *	succession this handler will only be called following the
 *	final call to pm_runtime_put() that actually disables the
 *	hardware.
 */
struct arm_pmu_platdata {
	irqreturn_t (*handle_irq)(int irq, void *dev,
				  irq_handler_t pmu_handler);
	int (*runtime_resume)(struct device *dev);
	int (*runtime_suspend)(struct device *dev);
};

#ifdef CONFIG_HW_PERF_EVENTS
@@ -92,6 +80,7 @@ struct pmu_hw_events {
struct arm_pmu {
	struct pmu	pmu;
	cpumask_t	active_irqs;
	cpumask_t	supported_cpus;
	int		*irq_affinity;
	char		*name;
	irqreturn_t	(*handle_irq)(int irq_num, void *dev);
@@ -122,8 +111,6 @@ struct arm_pmu {

#define to_arm_pmu(p) (container_of(p, struct arm_pmu, pmu))

extern const struct dev_pm_ops armpmu_dev_pm_ops;

int armpmu_register(struct arm_pmu *armpmu, int type);

u64 armpmu_event_update(struct perf_event *event);
@@ -158,6 +145,10 @@ struct pmu_probe_info {
#define XSCALE_PMU_PROBE(_version, _fn) \
	PMU_PROBE(ARM_CPU_IMP_INTEL << 24 | _version, ARM_PMU_XSCALE_MASK, _fn)

int arm_pmu_device_probe(struct platform_device *pdev,
			 const struct of_device_id *of_table,
			 const struct pmu_probe_info *probe_table);

#endif /* CONFIG_HW_PERF_EVENTS */

#endif /* __ARM_PMU_H__ */
+3 −1
Original line number Diff line number Diff line
@@ -71,7 +71,9 @@ obj-$(CONFIG_CPU_PJ4) += pj4-cp0.o
obj-$(CONFIG_CPU_PJ4B)		+= pj4-cp0.o
obj-$(CONFIG_IWMMXT)		+= iwmmxt.o
obj-$(CONFIG_PERF_EVENTS)	+= perf_regs.o perf_callchain.o
obj-$(CONFIG_HW_PERF_EVENTS)	+= perf_event.o perf_event_cpu.o
obj-$(CONFIG_HW_PERF_EVENTS)	+= perf_event.o \
				   perf_event_xscale.o perf_event_v6.o \
				   perf_event_v7.o
CFLAGS_pj4-cp0.o		:= -marm
AFLAGS_iwmmxt.o			:= -Wa,-mcpu=iwmmxt
obj-$(CONFIG_ARM_CPU_TOPOLOGY)  += topology.o
Loading