Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 667832da authored by Russell King's avatar Russell King
Browse files

Merge branch 'perf/updates' of...

Merge branch 'perf/updates' of git://git.kernel.org/pub/scm/linux/kernel/git/will/linux into devel-stable
parents 3587b1b0 2ac29a14
Loading
Loading
Loading
Loading
+5 −0
Original line number Original line Diff line number Diff line
@@ -21,4 +21,9 @@
#define C(_x)				PERF_COUNT_HW_CACHE_##_x
#define C(_x)				PERF_COUNT_HW_CACHE_##_x
#define CACHE_OP_UNSUPPORTED		0xFFFF
#define CACHE_OP_UNSUPPORTED		0xFFFF


struct pt_regs;
extern unsigned long perf_instruction_pointer(struct pt_regs *regs);
extern unsigned long perf_misc_flags(struct pt_regs *regs);
#define perf_misc_flags(regs)	perf_misc_flags(regs)

#endif /* __ARM_PERF_EVENT_H__ */
#endif /* __ARM_PERF_EVENT_H__ */
+12 −16
Original line number Original line Diff line number Diff line
@@ -67,19 +67,19 @@ struct arm_pmu {
	cpumask_t	active_irqs;
	cpumask_t	active_irqs;
	char		*name;
	char		*name;
	irqreturn_t	(*handle_irq)(int irq_num, void *dev);
	irqreturn_t	(*handle_irq)(int irq_num, void *dev);
	void		(*enable)(struct hw_perf_event *evt, int idx);
	void		(*enable)(struct perf_event *event);
	void		(*disable)(struct hw_perf_event *evt, int idx);
	void		(*disable)(struct perf_event *event);
	int		(*get_event_idx)(struct pmu_hw_events *hw_events,
	int		(*get_event_idx)(struct pmu_hw_events *hw_events,
					 struct hw_perf_event *hwc);
					 struct perf_event *event);
	int		(*set_event_filter)(struct hw_perf_event *evt,
	int		(*set_event_filter)(struct hw_perf_event *evt,
					    struct perf_event_attr *attr);
					    struct perf_event_attr *attr);
	u32		(*read_counter)(int idx);
	u32		(*read_counter)(struct perf_event *event);
	void		(*write_counter)(int idx, u32 val);
	void		(*write_counter)(struct perf_event *event, u32 val);
	void		(*start)(void);
	void		(*start)(struct arm_pmu *);
	void		(*stop)(void);
	void		(*stop)(struct arm_pmu *);
	void		(*reset)(void *);
	void		(*reset)(void *);
	int		(*request_irq)(irq_handler_t handler);
	int		(*request_irq)(struct arm_pmu *, irq_handler_t handler);
	void		(*free_irq)(void);
	void		(*free_irq)(struct arm_pmu *);
	int		(*map_event)(struct perf_event *event);
	int		(*map_event)(struct perf_event *event);
	int		num_events;
	int		num_events;
	atomic_t	active_events;
	atomic_t	active_events;
@@ -93,15 +93,11 @@ struct arm_pmu {


extern const struct dev_pm_ops armpmu_dev_pm_ops;
extern const struct dev_pm_ops armpmu_dev_pm_ops;


int armpmu_register(struct arm_pmu *armpmu, char *name, int type);
int armpmu_register(struct arm_pmu *armpmu, int type);


u64 armpmu_event_update(struct perf_event *event,
u64 armpmu_event_update(struct perf_event *event);
			struct hw_perf_event *hwc,
			int idx);


int armpmu_event_set_period(struct perf_event *event,
int armpmu_event_set_period(struct perf_event *event);
			    struct hw_perf_event *hwc,
			    int idx);


int armpmu_map_event(struct perf_event *event,
int armpmu_map_event(struct perf_event *event,
		     const unsigned (*event_map)[PERF_COUNT_HW_MAX],
		     const unsigned (*event_map)[PERF_COUNT_HW_MAX],
+58 −27
Original line number Original line Diff line number Diff line
@@ -86,12 +86,10 @@ armpmu_map_event(struct perf_event *event,
	return -ENOENT;
	return -ENOENT;
}
}


int
int armpmu_event_set_period(struct perf_event *event)
armpmu_event_set_period(struct perf_event *event,
			struct hw_perf_event *hwc,
			int idx)
{
{
	struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
	struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
	struct hw_perf_event *hwc = &event->hw;
	s64 left = local64_read(&hwc->period_left);
	s64 left = local64_read(&hwc->period_left);
	s64 period = hwc->sample_period;
	s64 period = hwc->sample_period;
	int ret = 0;
	int ret = 0;
@@ -119,24 +117,22 @@ armpmu_event_set_period(struct perf_event *event,


	local64_set(&hwc->prev_count, (u64)-left);
	local64_set(&hwc->prev_count, (u64)-left);


	armpmu->write_counter(idx, (u64)(-left) & 0xffffffff);
	armpmu->write_counter(event, (u64)(-left) & 0xffffffff);


	perf_event_update_userpage(event);
	perf_event_update_userpage(event);


	return ret;
	return ret;
}
}


u64
u64 armpmu_event_update(struct perf_event *event)
armpmu_event_update(struct perf_event *event,
		    struct hw_perf_event *hwc,
		    int idx)
{
{
	struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
	struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
	struct hw_perf_event *hwc = &event->hw;
	u64 delta, prev_raw_count, new_raw_count;
	u64 delta, prev_raw_count, new_raw_count;


again:
again:
	prev_raw_count = local64_read(&hwc->prev_count);
	prev_raw_count = local64_read(&hwc->prev_count);
	new_raw_count = armpmu->read_counter(idx);
	new_raw_count = armpmu->read_counter(event);


	if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
	if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
			     new_raw_count) != prev_raw_count)
			     new_raw_count) != prev_raw_count)
@@ -159,7 +155,7 @@ armpmu_read(struct perf_event *event)
	if (hwc->idx < 0)
	if (hwc->idx < 0)
		return;
		return;


	armpmu_event_update(event, hwc, hwc->idx);
	armpmu_event_update(event);
}
}


static void
static void
@@ -173,14 +169,13 @@ armpmu_stop(struct perf_event *event, int flags)
	 * PERF_EF_UPDATE, see comments in armpmu_start().
	 * PERF_EF_UPDATE, see comments in armpmu_start().
	 */
	 */
	if (!(hwc->state & PERF_HES_STOPPED)) {
	if (!(hwc->state & PERF_HES_STOPPED)) {
		armpmu->disable(hwc, hwc->idx);
		armpmu->disable(event);
		armpmu_event_update(event, hwc, hwc->idx);
		armpmu_event_update(event);
		hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
		hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
	}
	}
}
}


static void
static void armpmu_start(struct perf_event *event, int flags)
armpmu_start(struct perf_event *event, int flags)
{
{
	struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
	struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
	struct hw_perf_event *hwc = &event->hw;
	struct hw_perf_event *hwc = &event->hw;
@@ -200,8 +195,8 @@ armpmu_start(struct perf_event *event, int flags)
	 * get an interrupt too soon or *way* too late if the overflow has
	 * get an interrupt too soon or *way* too late if the overflow has
	 * happened since disabling.
	 * happened since disabling.
	 */
	 */
	armpmu_event_set_period(event, hwc, hwc->idx);
	armpmu_event_set_period(event);
	armpmu->enable(hwc, hwc->idx);
	armpmu->enable(event);
}
}


static void
static void
@@ -233,7 +228,7 @@ armpmu_add(struct perf_event *event, int flags)
	perf_pmu_disable(event->pmu);
	perf_pmu_disable(event->pmu);


	/* If we don't have a space for the counter then finish early. */
	/* If we don't have a space for the counter then finish early. */
	idx = armpmu->get_event_idx(hw_events, hwc);
	idx = armpmu->get_event_idx(hw_events, event);
	if (idx < 0) {
	if (idx < 0) {
		err = idx;
		err = idx;
		goto out;
		goto out;
@@ -244,7 +239,7 @@ armpmu_add(struct perf_event *event, int flags)
	 * sure it is disabled.
	 * sure it is disabled.
	 */
	 */
	event->hw.idx = idx;
	event->hw.idx = idx;
	armpmu->disable(hwc, idx);
	armpmu->disable(event);
	hw_events->events[idx] = event;
	hw_events->events[idx] = event;


	hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
	hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
@@ -264,13 +259,12 @@ validate_event(struct pmu_hw_events *hw_events,
	       struct perf_event *event)
	       struct perf_event *event)
{
{
	struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
	struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
	struct hw_perf_event fake_event = event->hw;
	struct pmu *leader_pmu = event->group_leader->pmu;
	struct pmu *leader_pmu = event->group_leader->pmu;


	if (event->pmu != leader_pmu || event->state <= PERF_EVENT_STATE_OFF)
	if (event->pmu != leader_pmu || event->state <= PERF_EVENT_STATE_OFF)
		return 1;
		return 1;


	return armpmu->get_event_idx(hw_events, &fake_event) >= 0;
	return armpmu->get_event_idx(hw_events, event) >= 0;
}
}


static int
static int
@@ -316,7 +310,7 @@ static irqreturn_t armpmu_dispatch_irq(int irq, void *dev)
static void
static void
armpmu_release_hardware(struct arm_pmu *armpmu)
armpmu_release_hardware(struct arm_pmu *armpmu)
{
{
	armpmu->free_irq();
	armpmu->free_irq(armpmu);
	pm_runtime_put_sync(&armpmu->plat_device->dev);
	pm_runtime_put_sync(&armpmu->plat_device->dev);
}
}


@@ -330,7 +324,7 @@ armpmu_reserve_hardware(struct arm_pmu *armpmu)
		return -ENODEV;
		return -ENODEV;


	pm_runtime_get_sync(&pmu_device->dev);
	pm_runtime_get_sync(&pmu_device->dev);
	err = armpmu->request_irq(armpmu_dispatch_irq);
	err = armpmu->request_irq(armpmu, armpmu_dispatch_irq);
	if (err) {
	if (err) {
		armpmu_release_hardware(armpmu);
		armpmu_release_hardware(armpmu);
		return err;
		return err;
@@ -465,13 +459,13 @@ static void armpmu_enable(struct pmu *pmu)
	int enabled = bitmap_weight(hw_events->used_mask, armpmu->num_events);
	int enabled = bitmap_weight(hw_events->used_mask, armpmu->num_events);


	if (enabled)
	if (enabled)
		armpmu->start();
		armpmu->start(armpmu);
}
}


static void armpmu_disable(struct pmu *pmu)
static void armpmu_disable(struct pmu *pmu)
{
{
	struct arm_pmu *armpmu = to_arm_pmu(pmu);
	struct arm_pmu *armpmu = to_arm_pmu(pmu);
	armpmu->stop();
	armpmu->stop(armpmu);
}
}


#ifdef CONFIG_PM_RUNTIME
#ifdef CONFIG_PM_RUNTIME
@@ -517,12 +511,13 @@ static void __init armpmu_init(struct arm_pmu *armpmu)
	};
	};
}
}


int armpmu_register(struct arm_pmu *armpmu, char *name, int type)
int armpmu_register(struct arm_pmu *armpmu, int type)
{
{
	armpmu_init(armpmu);
	armpmu_init(armpmu);
	pm_runtime_enable(&armpmu->plat_device->dev);
	pr_info("enabled with %s PMU driver, %d counters available\n",
	pr_info("enabled with %s PMU driver, %d counters available\n",
			armpmu->name, armpmu->num_events);
			armpmu->name, armpmu->num_events);
	return perf_pmu_register(&armpmu->pmu, name, type);
	return perf_pmu_register(&armpmu->pmu, armpmu->name, type);
}
}


/*
/*
@@ -576,6 +571,10 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
{
{
	struct frame_tail __user *tail;
	struct frame_tail __user *tail;


	if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
		/* We don't support guest os callchain now */
		return;
	}


	tail = (struct frame_tail __user *)regs->ARM_fp - 1;
	tail = (struct frame_tail __user *)regs->ARM_fp - 1;


@@ -603,9 +602,41 @@ perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs)
{
{
	struct stackframe fr;
	struct stackframe fr;


	if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
		/* We don't support guest os callchain now */
		return;
	}

	fr.fp = regs->ARM_fp;
	fr.fp = regs->ARM_fp;
	fr.sp = regs->ARM_sp;
	fr.sp = regs->ARM_sp;
	fr.lr = regs->ARM_lr;
	fr.lr = regs->ARM_lr;
	fr.pc = regs->ARM_pc;
	fr.pc = regs->ARM_pc;
	walk_stackframe(&fr, callchain_trace, entry);
	walk_stackframe(&fr, callchain_trace, entry);
}
}

unsigned long perf_instruction_pointer(struct pt_regs *regs)
{
	if (perf_guest_cbs && perf_guest_cbs->is_in_guest())
		return perf_guest_cbs->get_guest_ip();

	return instruction_pointer(regs);
}

unsigned long perf_misc_flags(struct pt_regs *regs)
{
	int misc = 0;

	if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
		if (perf_guest_cbs->is_user_mode())
			misc |= PERF_RECORD_MISC_GUEST_USER;
		else
			misc |= PERF_RECORD_MISC_GUEST_KERNEL;
	} else {
		if (user_mode(regs))
			misc |= PERF_RECORD_MISC_USER;
		else
			misc |= PERF_RECORD_MISC_KERNEL;
	}

	return misc;
}
+49 −25
Original line number Original line Diff line number Diff line
@@ -23,6 +23,7 @@
#include <linux/kernel.h>
#include <linux/kernel.h>
#include <linux/of.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/spinlock.h>


#include <asm/cputype.h>
#include <asm/cputype.h>
@@ -45,7 +46,7 @@ const char *perf_pmu_name(void)
	if (!cpu_pmu)
	if (!cpu_pmu)
		return NULL;
		return NULL;


	return cpu_pmu->pmu.name;
	return cpu_pmu->name;
}
}
EXPORT_SYMBOL_GPL(perf_pmu_name);
EXPORT_SYMBOL_GPL(perf_pmu_name);


@@ -70,7 +71,7 @@ static struct pmu_hw_events *cpu_pmu_get_cpu_events(void)
	return &__get_cpu_var(cpu_hw_events);
	return &__get_cpu_var(cpu_hw_events);
}
}


static void cpu_pmu_free_irq(void)
static void cpu_pmu_free_irq(struct arm_pmu *cpu_pmu)
{
{
	int i, irq, irqs;
	int i, irq, irqs;
	struct platform_device *pmu_device = cpu_pmu->plat_device;
	struct platform_device *pmu_device = cpu_pmu->plat_device;
@@ -86,7 +87,7 @@ static void cpu_pmu_free_irq(void)
	}
	}
}
}


static int cpu_pmu_request_irq(irq_handler_t handler)
static int cpu_pmu_request_irq(struct arm_pmu *cpu_pmu, irq_handler_t handler)
{
{
	int i, err, irq, irqs;
	int i, err, irq, irqs;
	struct platform_device *pmu_device = cpu_pmu->plat_device;
	struct platform_device *pmu_device = cpu_pmu->plat_device;
@@ -147,7 +148,7 @@ static void __devinit cpu_pmu_init(struct arm_pmu *cpu_pmu)


	/* Ensure the PMU has sane values out of reset. */
	/* Ensure the PMU has sane values out of reset. */
	if (cpu_pmu && cpu_pmu->reset)
	if (cpu_pmu && cpu_pmu->reset)
		on_each_cpu(cpu_pmu->reset, NULL, 1);
		on_each_cpu(cpu_pmu->reset, cpu_pmu, 1);
}
}


/*
/*
@@ -163,7 +164,9 @@ static int __cpuinit cpu_pmu_notify(struct notifier_block *b,
		return NOTIFY_DONE;
		return NOTIFY_DONE;


	if (cpu_pmu && cpu_pmu->reset)
	if (cpu_pmu && cpu_pmu->reset)
		cpu_pmu->reset(NULL);
		cpu_pmu->reset(cpu_pmu);
	else
		return NOTIFY_DONE;


	return NOTIFY_OK;
	return NOTIFY_OK;
}
}
@@ -195,13 +198,13 @@ static struct platform_device_id __devinitdata cpu_pmu_plat_device_ids[] = {
/*
/*
 * CPU PMU identification and probing.
 * CPU PMU identification and probing.
 */
 */
static struct arm_pmu *__devinit probe_current_pmu(void)
static int __devinit probe_current_pmu(struct arm_pmu *pmu)
{
{
	struct arm_pmu *pmu = NULL;
	int cpu = get_cpu();
	int cpu = get_cpu();
	unsigned long cpuid = read_cpuid_id();
	unsigned long cpuid = read_cpuid_id();
	unsigned long implementor = (cpuid & 0xFF000000) >> 24;
	unsigned long implementor = (cpuid & 0xFF000000) >> 24;
	unsigned long part_number = (cpuid & 0xFFF0);
	unsigned long part_number = (cpuid & 0xFFF0);
	int ret = -ENODEV;


	pr_info("probing PMU on CPU %d\n", cpu);
	pr_info("probing PMU on CPU %d\n", cpu);


@@ -211,25 +214,25 @@ static struct arm_pmu *__devinit probe_current_pmu(void)
		case 0xB360:	/* ARM1136 */
		case 0xB360:	/* ARM1136 */
		case 0xB560:	/* ARM1156 */
		case 0xB560:	/* ARM1156 */
		case 0xB760:	/* ARM1176 */
		case 0xB760:	/* ARM1176 */
			pmu = armv6pmu_init();
			ret = armv6pmu_init(pmu);
			break;
			break;
		case 0xB020:	/* ARM11mpcore */
		case 0xB020:	/* ARM11mpcore */
			pmu = armv6mpcore_pmu_init();
			ret = armv6mpcore_pmu_init(pmu);
			break;
			break;
		case 0xC080:	/* Cortex-A8 */
		case 0xC080:	/* Cortex-A8 */
			pmu = armv7_a8_pmu_init();
			ret = armv7_a8_pmu_init(pmu);
			break;
			break;
		case 0xC090:	/* Cortex-A9 */
		case 0xC090:	/* Cortex-A9 */
			pmu = armv7_a9_pmu_init();
			ret = armv7_a9_pmu_init(pmu);
			break;
			break;
		case 0xC050:	/* Cortex-A5 */
		case 0xC050:	/* Cortex-A5 */
			pmu = armv7_a5_pmu_init();
			ret = armv7_a5_pmu_init(pmu);
			break;
			break;
		case 0xC0F0:	/* Cortex-A15 */
		case 0xC0F0:	/* Cortex-A15 */
			pmu = armv7_a15_pmu_init();
			ret = armv7_a15_pmu_init(pmu);
			break;
			break;
		case 0xC070:	/* Cortex-A7 */
		case 0xC070:	/* Cortex-A7 */
			pmu = armv7_a7_pmu_init();
			ret = armv7_a7_pmu_init(pmu);
			break;
			break;
		}
		}
	/* Intel CPUs [xscale]. */
	/* Intel CPUs [xscale]. */
@@ -237,43 +240,54 @@ static struct arm_pmu *__devinit probe_current_pmu(void)
		part_number = (cpuid >> 13) & 0x7;
		part_number = (cpuid >> 13) & 0x7;
		switch (part_number) {
		switch (part_number) {
		case 1:
		case 1:
			pmu = xscale1pmu_init();
			ret = xscale1pmu_init(pmu);
			break;
			break;
		case 2:
		case 2:
			pmu = xscale2pmu_init();
			ret = xscale2pmu_init(pmu);
			break;
			break;
		}
		}
	}
	}


	put_cpu();
	put_cpu();
	return pmu;
	return ret;
}
}


static int __devinit cpu_pmu_device_probe(struct platform_device *pdev)
static int __devinit cpu_pmu_device_probe(struct platform_device *pdev)
{
{
	const struct of_device_id *of_id;
	const struct of_device_id *of_id;
	struct arm_pmu *(*init_fn)(void);
	int (*init_fn)(struct arm_pmu *);
	struct device_node *node = pdev->dev.of_node;
	struct device_node *node = pdev->dev.of_node;
	struct arm_pmu *pmu;
	int ret = -ENODEV;


	if (cpu_pmu) {
	if (cpu_pmu) {
		pr_info("attempt to register multiple PMU devices!");
		pr_info("attempt to register multiple PMU devices!");
		return -ENOSPC;
		return -ENOSPC;
	}
	}


	pmu = kzalloc(sizeof(struct arm_pmu), GFP_KERNEL);
	if (!pmu) {
		pr_info("failed to allocate PMU device!");
		return -ENOMEM;
	}

	if (node && (of_id = of_match_node(cpu_pmu_of_device_ids, pdev->dev.of_node))) {
	if (node && (of_id = of_match_node(cpu_pmu_of_device_ids, pdev->dev.of_node))) {
		init_fn = of_id->data;
		init_fn = of_id->data;
		cpu_pmu = init_fn();
		ret = init_fn(pmu);
	} else {
	} else {
		cpu_pmu = probe_current_pmu();
		ret = probe_current_pmu(pmu);
	}
	}


	if (!cpu_pmu)
	if (ret) {
		return -ENODEV;
		pr_info("failed to register PMU devices!");
		kfree(pmu);
		return ret;
	}


	cpu_pmu = pmu;
	cpu_pmu->plat_device = pdev;
	cpu_pmu->plat_device = pdev;
	cpu_pmu_init(cpu_pmu);
	cpu_pmu_init(cpu_pmu);
	register_cpu_notifier(&cpu_pmu_hotplug_notifier);
	armpmu_register(cpu_pmu, PERF_TYPE_RAW);
	armpmu_register(cpu_pmu, cpu_pmu->name, PERF_TYPE_RAW);


	return 0;
	return 0;
}
}
@@ -290,6 +304,16 @@ static struct platform_driver cpu_pmu_driver = {


static int __init register_pmu_driver(void)
static int __init register_pmu_driver(void)
{
{
	return platform_driver_register(&cpu_pmu_driver);
	int err;

	err = register_cpu_notifier(&cpu_pmu_hotplug_notifier);
	if (err)
		return err;

	err = platform_driver_register(&cpu_pmu_driver);
	if (err)
		unregister_cpu_notifier(&cpu_pmu_hotplug_notifier);

	return err;
}
}
device_initcall(register_pmu_driver);
device_initcall(register_pmu_driver);
+63 −63
Original line number Original line Diff line number Diff line
@@ -401,9 +401,10 @@ armv6_pmcr_counter_has_overflowed(unsigned long pmcr,
	return ret;
	return ret;
}
}


static inline u32
static inline u32 armv6pmu_read_counter(struct perf_event *event)
armv6pmu_read_counter(int counter)
{
{
	struct hw_perf_event *hwc = &event->hw;
	int counter = hwc->idx;
	unsigned long value = 0;
	unsigned long value = 0;


	if (ARMV6_CYCLE_COUNTER == counter)
	if (ARMV6_CYCLE_COUNTER == counter)
@@ -418,10 +419,11 @@ armv6pmu_read_counter(int counter)
	return value;
	return value;
}
}


static inline void
static inline void armv6pmu_write_counter(struct perf_event *event, u32 value)
armv6pmu_write_counter(int counter,
		       u32 value)
{
{
	struct hw_perf_event *hwc = &event->hw;
	int counter = hwc->idx;

	if (ARMV6_CYCLE_COUNTER == counter)
	if (ARMV6_CYCLE_COUNTER == counter)
		asm volatile("mcr   p15, 0, %0, c15, c12, 1" : : "r"(value));
		asm volatile("mcr   p15, 0, %0, c15, c12, 1" : : "r"(value));
	else if (ARMV6_COUNTER0 == counter)
	else if (ARMV6_COUNTER0 == counter)
@@ -432,12 +434,13 @@ armv6pmu_write_counter(int counter,
		WARN_ONCE(1, "invalid counter number (%d)\n", counter);
		WARN_ONCE(1, "invalid counter number (%d)\n", counter);
}
}


static void
static void armv6pmu_enable_event(struct perf_event *event)
armv6pmu_enable_event(struct hw_perf_event *hwc,
		      int idx)
{
{
	unsigned long val, mask, evt, flags;
	unsigned long val, mask, evt, flags;
	struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
	struct hw_perf_event *hwc = &event->hw;
	struct pmu_hw_events *events = cpu_pmu->get_hw_events();
	struct pmu_hw_events *events = cpu_pmu->get_hw_events();
	int idx = hwc->idx;


	if (ARMV6_CYCLE_COUNTER == idx) {
	if (ARMV6_CYCLE_COUNTER == idx) {
		mask	= 0;
		mask	= 0;
@@ -473,7 +476,8 @@ armv6pmu_handle_irq(int irq_num,
{
{
	unsigned long pmcr = armv6_pmcr_read();
	unsigned long pmcr = armv6_pmcr_read();
	struct perf_sample_data data;
	struct perf_sample_data data;
	struct pmu_hw_events *cpuc;
	struct arm_pmu *cpu_pmu = (struct arm_pmu *)dev;
	struct pmu_hw_events *cpuc = cpu_pmu->get_hw_events();
	struct pt_regs *regs;
	struct pt_regs *regs;
	int idx;
	int idx;


@@ -489,7 +493,6 @@ armv6pmu_handle_irq(int irq_num,
	 */
	 */
	armv6_pmcr_write(pmcr);
	armv6_pmcr_write(pmcr);


	cpuc = &__get_cpu_var(cpu_hw_events);
	for (idx = 0; idx < cpu_pmu->num_events; ++idx) {
	for (idx = 0; idx < cpu_pmu->num_events; ++idx) {
		struct perf_event *event = cpuc->events[idx];
		struct perf_event *event = cpuc->events[idx];
		struct hw_perf_event *hwc;
		struct hw_perf_event *hwc;
@@ -506,13 +509,13 @@ armv6pmu_handle_irq(int irq_num,
			continue;
			continue;


		hwc = &event->hw;
		hwc = &event->hw;
		armpmu_event_update(event, hwc, idx);
		armpmu_event_update(event);
		perf_sample_data_init(&data, 0, hwc->last_period);
		perf_sample_data_init(&data, 0, hwc->last_period);
		if (!armpmu_event_set_period(event, hwc, idx))
		if (!armpmu_event_set_period(event))
			continue;
			continue;


		if (perf_event_overflow(event, &data, regs))
		if (perf_event_overflow(event, &data, regs))
			cpu_pmu->disable(hwc, idx);
			cpu_pmu->disable(event);
	}
	}


	/*
	/*
@@ -527,8 +530,7 @@ armv6pmu_handle_irq(int irq_num,
	return IRQ_HANDLED;
	return IRQ_HANDLED;
}
}


static void
static void armv6pmu_start(struct arm_pmu *cpu_pmu)
armv6pmu_start(void)
{
{
	unsigned long flags, val;
	unsigned long flags, val;
	struct pmu_hw_events *events = cpu_pmu->get_hw_events();
	struct pmu_hw_events *events = cpu_pmu->get_hw_events();
@@ -540,8 +542,7 @@ armv6pmu_start(void)
	raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
	raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
}
}


static void
static void armv6pmu_stop(struct arm_pmu *cpu_pmu)
armv6pmu_stop(void)
{
{
	unsigned long flags, val;
	unsigned long flags, val;
	struct pmu_hw_events *events = cpu_pmu->get_hw_events();
	struct pmu_hw_events *events = cpu_pmu->get_hw_events();
@@ -555,10 +556,11 @@ armv6pmu_stop(void)


static int
static int
armv6pmu_get_event_idx(struct pmu_hw_events *cpuc,
armv6pmu_get_event_idx(struct pmu_hw_events *cpuc,
		       struct hw_perf_event *event)
				struct perf_event *event)
{
{
	struct hw_perf_event *hwc = &event->hw;
	/* Always place a cycle counter into the cycle counter. */
	/* Always place a cycle counter into the cycle counter. */
	if (ARMV6_PERFCTR_CPU_CYCLES == event->config_base) {
	if (ARMV6_PERFCTR_CPU_CYCLES == hwc->config_base) {
		if (test_and_set_bit(ARMV6_CYCLE_COUNTER, cpuc->used_mask))
		if (test_and_set_bit(ARMV6_CYCLE_COUNTER, cpuc->used_mask))
			return -EAGAIN;
			return -EAGAIN;


@@ -579,12 +581,13 @@ armv6pmu_get_event_idx(struct pmu_hw_events *cpuc,
	}
	}
}
}


static void
static void armv6pmu_disable_event(struct perf_event *event)
armv6pmu_disable_event(struct hw_perf_event *hwc,
		       int idx)
{
{
	unsigned long val, mask, evt, flags;
	unsigned long val, mask, evt, flags;
	struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
	struct hw_perf_event *hwc = &event->hw;
	struct pmu_hw_events *events = cpu_pmu->get_hw_events();
	struct pmu_hw_events *events = cpu_pmu->get_hw_events();
	int idx = hwc->idx;


	if (ARMV6_CYCLE_COUNTER == idx) {
	if (ARMV6_CYCLE_COUNTER == idx) {
		mask	= ARMV6_PMCR_CCOUNT_IEN;
		mask	= ARMV6_PMCR_CCOUNT_IEN;
@@ -613,12 +616,13 @@ armv6pmu_disable_event(struct hw_perf_event *hwc,
	raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
	raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
}
}


static void
static void armv6mpcore_pmu_disable_event(struct perf_event *event)
armv6mpcore_pmu_disable_event(struct hw_perf_event *hwc,
			      int idx)
{
{
	unsigned long val, mask, flags, evt = 0;
	unsigned long val, mask, flags, evt = 0;
	struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
	struct hw_perf_event *hwc = &event->hw;
	struct pmu_hw_events *events = cpu_pmu->get_hw_events();
	struct pmu_hw_events *events = cpu_pmu->get_hw_events();
	int idx = hwc->idx;


	if (ARMV6_CYCLE_COUNTER == idx) {
	if (ARMV6_CYCLE_COUNTER == idx) {
		mask	= ARMV6_PMCR_CCOUNT_IEN;
		mask	= ARMV6_PMCR_CCOUNT_IEN;
@@ -649,24 +653,22 @@ static int armv6_map_event(struct perf_event *event)
				&armv6_perf_cache_map, 0xFF);
				&armv6_perf_cache_map, 0xFF);
}
}


static struct arm_pmu armv6pmu = {
static int __devinit armv6pmu_init(struct arm_pmu *cpu_pmu)
	.name			= "v6",
	.handle_irq		= armv6pmu_handle_irq,
	.enable			= armv6pmu_enable_event,
	.disable		= armv6pmu_disable_event,
	.read_counter		= armv6pmu_read_counter,
	.write_counter		= armv6pmu_write_counter,
	.get_event_idx		= armv6pmu_get_event_idx,
	.start			= armv6pmu_start,
	.stop			= armv6pmu_stop,
	.map_event		= armv6_map_event,
	.num_events		= 3,
	.max_period		= (1LLU << 32) - 1,
};

static struct arm_pmu *__devinit armv6pmu_init(void)
{
{
	return &armv6pmu;
	cpu_pmu->name		= "v6";
	cpu_pmu->handle_irq	= armv6pmu_handle_irq;
	cpu_pmu->enable		= armv6pmu_enable_event;
	cpu_pmu->disable	= armv6pmu_disable_event;
	cpu_pmu->read_counter	= armv6pmu_read_counter;
	cpu_pmu->write_counter	= armv6pmu_write_counter;
	cpu_pmu->get_event_idx	= armv6pmu_get_event_idx;
	cpu_pmu->start		= armv6pmu_start;
	cpu_pmu->stop		= armv6pmu_stop;
	cpu_pmu->map_event	= armv6_map_event;
	cpu_pmu->num_events	= 3;
	cpu_pmu->max_period	= (1LLU << 32) - 1;

	return 0;
}
}


/*
/*
@@ -683,33 +685,31 @@ static int armv6mpcore_map_event(struct perf_event *event)
				&armv6mpcore_perf_cache_map, 0xFF);
				&armv6mpcore_perf_cache_map, 0xFF);
}
}


static struct arm_pmu armv6mpcore_pmu = {
static int __devinit armv6mpcore_pmu_init(struct arm_pmu *cpu_pmu)
	.name			= "v6mpcore",
	.handle_irq		= armv6pmu_handle_irq,
	.enable			= armv6pmu_enable_event,
	.disable		= armv6mpcore_pmu_disable_event,
	.read_counter		= armv6pmu_read_counter,
	.write_counter		= armv6pmu_write_counter,
	.get_event_idx		= armv6pmu_get_event_idx,
	.start			= armv6pmu_start,
	.stop			= armv6pmu_stop,
	.map_event		= armv6mpcore_map_event,
	.num_events		= 3,
	.max_period		= (1LLU << 32) - 1,
};

static struct arm_pmu *__devinit armv6mpcore_pmu_init(void)
{
{
	return &armv6mpcore_pmu;
	cpu_pmu->name		= "v6mpcore";
	cpu_pmu->handle_irq	= armv6pmu_handle_irq;
	cpu_pmu->enable		= armv6pmu_enable_event;
	cpu_pmu->disable	= armv6mpcore_pmu_disable_event;
	cpu_pmu->read_counter	= armv6pmu_read_counter;
	cpu_pmu->write_counter	= armv6pmu_write_counter;
	cpu_pmu->get_event_idx	= armv6pmu_get_event_idx;
	cpu_pmu->start		= armv6pmu_start;
	cpu_pmu->stop		= armv6pmu_stop;
	cpu_pmu->map_event	= armv6mpcore_map_event;
	cpu_pmu->num_events	= 3;
	cpu_pmu->max_period	= (1LLU << 32) - 1;

	return 0;
}
}
#else
#else
static struct arm_pmu *__devinit armv6pmu_init(void)
static int armv6pmu_init(struct arm_pmu *cpu_pmu)
{
{
	return NULL;
	return -ENODEV;
}
}


static struct arm_pmu *__devinit armv6mpcore_pmu_init(void)
static int armv6mpcore_pmu_init(struct arm_pmu *cpu_pmu)
{
{
	return NULL;
	return -ENODEV;
}
}
#endif	/* CONFIG_CPU_V6 || CONFIG_CPU_V6K */
#endif	/* CONFIG_CPU_V6 || CONFIG_CPU_V6K */
Loading