Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit ef790fe0 authored by Linus Torvalds's avatar Linus Torvalds
Browse files
* git://git.kernel.org/pub/scm/linux/kernel/git/davem/sparc-2.6:
  sparc: using HZ needs an include of linux/param.h
  sparc32: convert to asm-generic/hardirq.h
  sparc64: Cache per-cpu %pcr register value in perf code.
  sparc64: Fix comment typo in perf_event.c
  sparc64: Minor coding style fixups in perf code.
  sparc64: Add a basic conflict engine in preparation for multi-counter support.
  sparc64: Increase vmalloc size to fix percpu regressions.
  sparc64: Add initial perf event conflict resolution and checks.
  sparc: Niagara1 perf event support.
  sparc: Add Niagara2 HW cache event support.
  sparc: Support all ultra3 and ultra4 derivatives.
  sparc: Support HW cache events.
parents ed3c6614 c4a57435
Loading
Loading
Loading
Loading
+1 −11
Original line number Diff line number Diff line
@@ -7,17 +7,7 @@
#ifndef __SPARC_HARDIRQ_H
#define __SPARC_HARDIRQ_H

#include <linux/threads.h>
#include <linux/spinlock.h>
#include <linux/cache.h>

/* entry.S is sensitive to the offsets of these fields */ /* XXX P3 Is it? */
typedef struct {
	unsigned int __softirq_pending;
} ____cacheline_aligned irq_cpustat_t;

#include <linux/irq_cpustat.h>	/* Standard mappings for irq_cpustat_t above */

#define HARDIRQ_BITS    8
#include <asm-generic/hardirq.h>

#endif /* __SPARC_HARDIRQ_H */
+2 −2
Original line number Diff line number Diff line
@@ -6,10 +6,10 @@
#ifndef _SPARC_IRQ_H
#define _SPARC_IRQ_H

#include <linux/interrupt.h>

#define NR_IRQS    16

#include <linux/interrupt.h>

#define irq_canonicalize(irq)	(irq)

extern void __init init_IRQ(void);
+2 −2
Original line number Diff line number Diff line
@@ -41,8 +41,8 @@
#define LOW_OBP_ADDRESS		_AC(0x00000000f0000000,UL)
#define HI_OBP_ADDRESS		_AC(0x0000000100000000,UL)
#define VMALLOC_START		_AC(0x0000000100000000,UL)
#define VMALLOC_END		_AC(0x0000000200000000,UL)
#define VMEMMAP_BASE		_AC(0x0000000200000000,UL)
#define VMALLOC_END		_AC(0x0000010000000000,UL)
#define VMEMMAP_BASE		_AC(0x0000010000000000,UL)

#define vmemmap			((struct page *)VMEMMAP_BASE)

+4 −4
Original line number Diff line number Diff line
@@ -280,8 +280,8 @@ kvmap_dtlb_nonlinear:

#ifdef CONFIG_SPARSEMEM_VMEMMAP
	/* Do not use the TSB for vmemmap.  */
	mov		(VMEMMAP_BASE >> 24), %g5
	sllx		%g5, 24, %g5
	mov		(VMEMMAP_BASE >> 40), %g5
	sllx		%g5, 40, %g5
	cmp		%g4,%g5
	bgeu,pn		%xcc, kvmap_vmemmap
	 nop
@@ -293,8 +293,8 @@ kvmap_dtlb_tsbmiss:
	sethi		%hi(MODULES_VADDR), %g5
	cmp		%g4, %g5
	blu,pn		%xcc, kvmap_dtlb_longpath
	 mov		(VMALLOC_END >> 24), %g5
	sllx		%g5, 24, %g5
	 mov		(VMALLOC_END >> 40), %g5
	sllx		%g5, 40, %g5
	cmp		%g4, %g5
	bgeu,pn		%xcc, kvmap_dtlb_longpath
	 nop
+537 −40
Original line number Diff line number Diff line
@@ -56,6 +56,7 @@ struct cpu_hw_events {
	struct perf_event	*events[MAX_HWEVENTS];
	unsigned long		used_mask[BITS_TO_LONGS(MAX_HWEVENTS)];
	unsigned long		active_mask[BITS_TO_LONGS(MAX_HWEVENTS)];
	u64			pcr;
	int			enabled;
};
DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = { .enabled = 1, };
@@ -68,8 +69,30 @@ struct perf_event_map {
#define PIC_LOWER	0x02
};

static unsigned long perf_event_encode(const struct perf_event_map *pmap)
{
	return ((unsigned long) pmap->encoding << 16) | pmap->pic_mask;
}

static void perf_event_decode(unsigned long val, u16 *enc, u8 *msk)
{
	*msk = val & 0xff;
	*enc = val >> 16;
}

#define C(x) PERF_COUNT_HW_CACHE_##x

#define CACHE_OP_UNSUPPORTED	0xfffe
#define CACHE_OP_NONSENSE	0xffff

typedef struct perf_event_map cache_map_t
				[PERF_COUNT_HW_CACHE_MAX]
				[PERF_COUNT_HW_CACHE_OP_MAX]
				[PERF_COUNT_HW_CACHE_RESULT_MAX];

struct sparc_pmu {
	const struct perf_event_map	*(*event_map)(int);
	const cache_map_t		*cache_map;
	int				max_events;
	int				upper_shift;
	int				lower_shift;
@@ -80,21 +103,109 @@ struct sparc_pmu {
	int				lower_nop;
};

static const struct perf_event_map ultra3i_perfmon_event_map[] = {
static const struct perf_event_map ultra3_perfmon_event_map[] = {
	[PERF_COUNT_HW_CPU_CYCLES] = { 0x0000, PIC_UPPER | PIC_LOWER },
	[PERF_COUNT_HW_INSTRUCTIONS] = { 0x0001, PIC_UPPER | PIC_LOWER },
	[PERF_COUNT_HW_CACHE_REFERENCES] = { 0x0009, PIC_LOWER },
	[PERF_COUNT_HW_CACHE_MISSES] = { 0x0009, PIC_UPPER },
};

static const struct perf_event_map *ultra3i_event_map(int event_id)
static const struct perf_event_map *ultra3_event_map(int event_id)
{
	return &ultra3i_perfmon_event_map[event_id];
	return &ultra3_perfmon_event_map[event_id];
}

static const struct sparc_pmu ultra3i_pmu = {
	.event_map	= ultra3i_event_map,
	.max_events	= ARRAY_SIZE(ultra3i_perfmon_event_map),
static const cache_map_t ultra3_cache_map = {
[C(L1D)] = {
	[C(OP_READ)] = {
		[C(RESULT_ACCESS)] = { 0x09, PIC_LOWER, },
		[C(RESULT_MISS)] = { 0x09, PIC_UPPER, },
	},
	[C(OP_WRITE)] = {
		[C(RESULT_ACCESS)] = { 0x0a, PIC_LOWER },
		[C(RESULT_MISS)] = { 0x0a, PIC_UPPER },
	},
	[C(OP_PREFETCH)] = {
		[C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
		[C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
	},
},
[C(L1I)] = {
	[C(OP_READ)] = {
		[C(RESULT_ACCESS)] = { 0x09, PIC_LOWER, },
		[C(RESULT_MISS)] = { 0x09, PIC_UPPER, },
	},
	[ C(OP_WRITE) ] = {
		[ C(RESULT_ACCESS) ] = { CACHE_OP_NONSENSE },
		[ C(RESULT_MISS)   ] = { CACHE_OP_NONSENSE },
	},
	[ C(OP_PREFETCH) ] = {
		[ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
		[ C(RESULT_MISS)   ] = { CACHE_OP_UNSUPPORTED },
	},
},
[C(LL)] = {
	[C(OP_READ)] = {
		[C(RESULT_ACCESS)] = { 0x0c, PIC_LOWER, },
		[C(RESULT_MISS)] = { 0x0c, PIC_UPPER, },
	},
	[C(OP_WRITE)] = {
		[C(RESULT_ACCESS)] = { 0x0c, PIC_LOWER },
		[C(RESULT_MISS)] = { 0x0c, PIC_UPPER },
	},
	[C(OP_PREFETCH)] = {
		[C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
		[C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
	},
},
[C(DTLB)] = {
	[C(OP_READ)] = {
		[C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
		[C(RESULT_MISS)] = { 0x12, PIC_UPPER, },
	},
	[ C(OP_WRITE) ] = {
		[ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
		[ C(RESULT_MISS)   ] = { CACHE_OP_UNSUPPORTED },
	},
	[ C(OP_PREFETCH) ] = {
		[ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
		[ C(RESULT_MISS)   ] = { CACHE_OP_UNSUPPORTED },
	},
},
[C(ITLB)] = {
	[C(OP_READ)] = {
		[C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
		[C(RESULT_MISS)] = { 0x11, PIC_UPPER, },
	},
	[ C(OP_WRITE) ] = {
		[ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
		[ C(RESULT_MISS)   ] = { CACHE_OP_UNSUPPORTED },
	},
	[ C(OP_PREFETCH) ] = {
		[ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
		[ C(RESULT_MISS)   ] = { CACHE_OP_UNSUPPORTED },
	},
},
[C(BPU)] = {
	[C(OP_READ)] = {
		[C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
		[C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
	},
	[ C(OP_WRITE) ] = {
		[ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
		[ C(RESULT_MISS)   ] = { CACHE_OP_UNSUPPORTED },
	},
	[ C(OP_PREFETCH) ] = {
		[ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
		[ C(RESULT_MISS)   ] = { CACHE_OP_UNSUPPORTED },
	},
},
};

static const struct sparc_pmu ultra3_pmu = {
	.event_map	= ultra3_event_map,
	.cache_map	= &ultra3_cache_map,
	.max_events	= ARRAY_SIZE(ultra3_perfmon_event_map),
	.upper_shift	= 11,
	.lower_shift	= 4,
	.event_mask	= 0x3f,
@@ -102,6 +213,121 @@ static const struct sparc_pmu ultra3i_pmu = {
	.lower_nop	= 0x14,
};

/* Niagara1 is very limited.  The upper PIC is hard-locked to count
 * only instructions, so it is free running which creates all kinds of
 * problems.  Some hardware designs make one wonder if the creator
 * even looked at how this stuff gets used by software.
 */
static const struct perf_event_map niagara1_perfmon_event_map[] = {
	[PERF_COUNT_HW_CPU_CYCLES] = { 0x00, PIC_UPPER },
	[PERF_COUNT_HW_INSTRUCTIONS] = { 0x00, PIC_UPPER },
	[PERF_COUNT_HW_CACHE_REFERENCES] = { 0, PIC_NONE },
	[PERF_COUNT_HW_CACHE_MISSES] = { 0x03, PIC_LOWER },
};

static const struct perf_event_map *niagara1_event_map(int event_id)
{
	return &niagara1_perfmon_event_map[event_id];
}

static const cache_map_t niagara1_cache_map = {
[C(L1D)] = {
	[C(OP_READ)] = {
		[C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
		[C(RESULT_MISS)] = { 0x03, PIC_LOWER, },
	},
	[C(OP_WRITE)] = {
		[C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
		[C(RESULT_MISS)] = { 0x03, PIC_LOWER, },
	},
	[C(OP_PREFETCH)] = {
		[C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
		[C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
	},
},
[C(L1I)] = {
	[C(OP_READ)] = {
		[C(RESULT_ACCESS)] = { 0x00, PIC_UPPER },
		[C(RESULT_MISS)] = { 0x02, PIC_LOWER, },
	},
	[ C(OP_WRITE) ] = {
		[ C(RESULT_ACCESS) ] = { CACHE_OP_NONSENSE },
		[ C(RESULT_MISS)   ] = { CACHE_OP_NONSENSE },
	},
	[ C(OP_PREFETCH) ] = {
		[ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
		[ C(RESULT_MISS)   ] = { CACHE_OP_UNSUPPORTED },
	},
},
[C(LL)] = {
	[C(OP_READ)] = {
		[C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
		[C(RESULT_MISS)] = { 0x07, PIC_LOWER, },
	},
	[C(OP_WRITE)] = {
		[C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
		[C(RESULT_MISS)] = { 0x07, PIC_LOWER, },
	},
	[C(OP_PREFETCH)] = {
		[C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
		[C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
	},
},
[C(DTLB)] = {
	[C(OP_READ)] = {
		[C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
		[C(RESULT_MISS)] = { 0x05, PIC_LOWER, },
	},
	[ C(OP_WRITE) ] = {
		[ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
		[ C(RESULT_MISS)   ] = { CACHE_OP_UNSUPPORTED },
	},
	[ C(OP_PREFETCH) ] = {
		[ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
		[ C(RESULT_MISS)   ] = { CACHE_OP_UNSUPPORTED },
	},
},
[C(ITLB)] = {
	[C(OP_READ)] = {
		[C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
		[C(RESULT_MISS)] = { 0x04, PIC_LOWER, },
	},
	[ C(OP_WRITE) ] = {
		[ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
		[ C(RESULT_MISS)   ] = { CACHE_OP_UNSUPPORTED },
	},
	[ C(OP_PREFETCH) ] = {
		[ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
		[ C(RESULT_MISS)   ] = { CACHE_OP_UNSUPPORTED },
	},
},
[C(BPU)] = {
	[C(OP_READ)] = {
		[C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
		[C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
	},
	[ C(OP_WRITE) ] = {
		[ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
		[ C(RESULT_MISS)   ] = { CACHE_OP_UNSUPPORTED },
	},
	[ C(OP_PREFETCH) ] = {
		[ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
		[ C(RESULT_MISS)   ] = { CACHE_OP_UNSUPPORTED },
	},
},
};

static const struct sparc_pmu niagara1_pmu = {
	.event_map	= niagara1_event_map,
	.cache_map	= &niagara1_cache_map,
	.max_events	= ARRAY_SIZE(niagara1_perfmon_event_map),
	.upper_shift	= 0,
	.lower_shift	= 4,
	.event_mask	= 0x7,
	.upper_nop	= 0x0,
	.lower_nop	= 0x0,
};

static const struct perf_event_map niagara2_perfmon_event_map[] = {
	[PERF_COUNT_HW_CPU_CYCLES] = { 0x02ff, PIC_UPPER | PIC_LOWER },
	[PERF_COUNT_HW_INSTRUCTIONS] = { 0x02ff, PIC_UPPER | PIC_LOWER },
@@ -116,8 +342,96 @@ static const struct perf_event_map *niagara2_event_map(int event_id)
	return &niagara2_perfmon_event_map[event_id];
}

static const cache_map_t niagara2_cache_map = {
[C(L1D)] = {
	[C(OP_READ)] = {
		[C(RESULT_ACCESS)] = { 0x0208, PIC_UPPER | PIC_LOWER, },
		[C(RESULT_MISS)] = { 0x0302, PIC_UPPER | PIC_LOWER, },
	},
	[C(OP_WRITE)] = {
		[C(RESULT_ACCESS)] = { 0x0210, PIC_UPPER | PIC_LOWER, },
		[C(RESULT_MISS)] = { 0x0302, PIC_UPPER | PIC_LOWER, },
	},
	[C(OP_PREFETCH)] = {
		[C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
		[C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
	},
},
[C(L1I)] = {
	[C(OP_READ)] = {
		[C(RESULT_ACCESS)] = { 0x02ff, PIC_UPPER | PIC_LOWER, },
		[C(RESULT_MISS)] = { 0x0301, PIC_UPPER | PIC_LOWER, },
	},
	[ C(OP_WRITE) ] = {
		[ C(RESULT_ACCESS) ] = { CACHE_OP_NONSENSE },
		[ C(RESULT_MISS)   ] = { CACHE_OP_NONSENSE },
	},
	[ C(OP_PREFETCH) ] = {
		[ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
		[ C(RESULT_MISS)   ] = { CACHE_OP_UNSUPPORTED },
	},
},
[C(LL)] = {
	[C(OP_READ)] = {
		[C(RESULT_ACCESS)] = { 0x0208, PIC_UPPER | PIC_LOWER, },
		[C(RESULT_MISS)] = { 0x0330, PIC_UPPER | PIC_LOWER, },
	},
	[C(OP_WRITE)] = {
		[C(RESULT_ACCESS)] = { 0x0210, PIC_UPPER | PIC_LOWER, },
		[C(RESULT_MISS)] = { 0x0320, PIC_UPPER | PIC_LOWER, },
	},
	[C(OP_PREFETCH)] = {
		[C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
		[C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
	},
},
[C(DTLB)] = {
	[C(OP_READ)] = {
		[C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
		[C(RESULT_MISS)] = { 0x0b08, PIC_UPPER | PIC_LOWER, },
	},
	[ C(OP_WRITE) ] = {
		[ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
		[ C(RESULT_MISS)   ] = { CACHE_OP_UNSUPPORTED },
	},
	[ C(OP_PREFETCH) ] = {
		[ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
		[ C(RESULT_MISS)   ] = { CACHE_OP_UNSUPPORTED },
	},
},
[C(ITLB)] = {
	[C(OP_READ)] = {
		[C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
		[C(RESULT_MISS)] = { 0xb04, PIC_UPPER | PIC_LOWER, },
	},
	[ C(OP_WRITE) ] = {
		[ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
		[ C(RESULT_MISS)   ] = { CACHE_OP_UNSUPPORTED },
	},
	[ C(OP_PREFETCH) ] = {
		[ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
		[ C(RESULT_MISS)   ] = { CACHE_OP_UNSUPPORTED },
	},
},
[C(BPU)] = {
	[C(OP_READ)] = {
		[C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
		[C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
	},
	[ C(OP_WRITE) ] = {
		[ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
		[ C(RESULT_MISS)   ] = { CACHE_OP_UNSUPPORTED },
	},
	[ C(OP_PREFETCH) ] = {
		[ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
		[ C(RESULT_MISS)   ] = { CACHE_OP_UNSUPPORTED },
	},
},
};

static const struct sparc_pmu niagara2_pmu = {
	.event_map	= niagara2_event_map,
	.cache_map	= &niagara2_cache_map,
	.max_events	= ARRAY_SIZE(niagara2_perfmon_event_map),
	.upper_shift	= 19,
	.lower_shift	= 6,
@@ -151,23 +465,30 @@ static u64 nop_for_index(int idx)
			      sparc_pmu->lower_nop, idx);
}

static inline void sparc_pmu_enable_event(struct hw_perf_event *hwc,
					    int idx)
static inline void sparc_pmu_enable_event(struct cpu_hw_events *cpuc, struct hw_perf_event *hwc, int idx)
{
	u64 val, mask = mask_for_index(idx);

	val = pcr_ops->read();
	pcr_ops->write((val & ~mask) | hwc->config);
	val = cpuc->pcr;
	val &= ~mask;
	val |= hwc->config;
	cpuc->pcr = val;

	pcr_ops->write(cpuc->pcr);
}

static inline void sparc_pmu_disable_event(struct hw_perf_event *hwc,
					     int idx)
static inline void sparc_pmu_disable_event(struct cpu_hw_events *cpuc, struct hw_perf_event *hwc, int idx)
{
	u64 mask = mask_for_index(idx);
	u64 nop = nop_for_index(idx);
	u64 val = pcr_ops->read();
	u64 val;

	val = cpuc->pcr;
	val &= ~mask;
	val |= nop;
	cpuc->pcr = val;

	pcr_ops->write((val & ~mask) | nop);
	pcr_ops->write(cpuc->pcr);
}

void hw_perf_enable(void)
@@ -182,7 +503,7 @@ void hw_perf_enable(void)
	cpuc->enabled = 1;
	barrier();

	val = pcr_ops->read();
	val = cpuc->pcr;

	for (i = 0; i < MAX_HWEVENTS; i++) {
		struct perf_event *cp = cpuc->events[i];
@@ -194,7 +515,9 @@ void hw_perf_enable(void)
		val |= hwc->config_base;
	}

	pcr_ops->write(val);
	cpuc->pcr = val;

	pcr_ops->write(cpuc->pcr);
}

void hw_perf_disable(void)
@@ -207,10 +530,12 @@ void hw_perf_disable(void)

	cpuc->enabled = 0;

	val = pcr_ops->read();
	val = cpuc->pcr;
	val &= ~(PCR_UTRACE | PCR_STRACE |
		 sparc_pmu->hv_bit | sparc_pmu->irq_bit);
	pcr_ops->write(val);
	cpuc->pcr = val;

	pcr_ops->write(cpuc->pcr);
}

static u32 read_pmc(int idx)
@@ -282,13 +607,13 @@ static int sparc_pmu_enable(struct perf_event *event)
	if (test_and_set_bit(idx, cpuc->used_mask))
		return -EAGAIN;

	sparc_pmu_disable_event(hwc, idx);
	sparc_pmu_disable_event(cpuc, hwc, idx);

	cpuc->events[idx] = event;
	set_bit(idx, cpuc->active_mask);

	sparc_perf_event_set_period(event, hwc, idx);
	sparc_pmu_enable_event(hwc, idx);
	sparc_pmu_enable_event(cpuc, hwc, idx);
	perf_event_update_userpage(event);
	return 0;
}
@@ -324,7 +649,7 @@ static void sparc_pmu_disable(struct perf_event *event)
	int idx = hwc->idx;

	clear_bit(idx, cpuc->active_mask);
	sparc_pmu_disable_event(hwc, idx);
	sparc_pmu_disable_event(cpuc, hwc, idx);

	barrier();

@@ -338,18 +663,29 @@ static void sparc_pmu_disable(struct perf_event *event)
static void sparc_pmu_read(struct perf_event *event)
{
	struct hw_perf_event *hwc = &event->hw;

	sparc_perf_event_update(event, hwc, hwc->idx);
}

static void sparc_pmu_unthrottle(struct perf_event *event)
{
	struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
	struct hw_perf_event *hwc = &event->hw;
	sparc_pmu_enable_event(hwc, hwc->idx);

	sparc_pmu_enable_event(cpuc, hwc, hwc->idx);
}

static atomic_t active_events = ATOMIC_INIT(0);
static DEFINE_MUTEX(pmc_grab_mutex);

static void perf_stop_nmi_watchdog(void *unused)
{
	struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);

	stop_nmi_watchdog(NULL);
	cpuc->pcr = pcr_ops->read();
}

void perf_event_grab_pmc(void)
{
	if (atomic_inc_not_zero(&active_events))
@@ -358,7 +694,7 @@ void perf_event_grab_pmc(void)
	mutex_lock(&pmc_grab_mutex);
	if (atomic_read(&active_events) == 0) {
		if (atomic_read(&nmi_active) > 0) {
			on_each_cpu(stop_nmi_watchdog, NULL, 1);
			on_each_cpu(perf_stop_nmi_watchdog, NULL, 1);
			BUG_ON(atomic_read(&nmi_active) != 0);
		}
		atomic_inc(&active_events);
@@ -375,29 +711,159 @@ void perf_event_release_pmc(void)
	}
}

static const struct perf_event_map *sparc_map_cache_event(u64 config)
{
	unsigned int cache_type, cache_op, cache_result;
	const struct perf_event_map *pmap;

	if (!sparc_pmu->cache_map)
		return ERR_PTR(-ENOENT);

	cache_type = (config >>  0) & 0xff;
	if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
		return ERR_PTR(-EINVAL);

	cache_op = (config >>  8) & 0xff;
	if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
		return ERR_PTR(-EINVAL);

	cache_result = (config >> 16) & 0xff;
	if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
		return ERR_PTR(-EINVAL);

	pmap = &((*sparc_pmu->cache_map)[cache_type][cache_op][cache_result]);

	if (pmap->encoding == CACHE_OP_UNSUPPORTED)
		return ERR_PTR(-ENOENT);

	if (pmap->encoding == CACHE_OP_NONSENSE)
		return ERR_PTR(-EINVAL);

	return pmap;
}

static void hw_perf_event_destroy(struct perf_event *event)
{
	perf_event_release_pmc();
}

/* Make sure all events can be scheduled into the hardware at
 * the same time.  This is simplified by the fact that we only
 * need to support 2 simultaneous HW events.
 */
static int sparc_check_constraints(unsigned long *events, int n_ev)
{
	if (n_ev <= perf_max_events) {
		u8 msk1, msk2;
		u16 dummy;

		if (n_ev == 1)
			return 0;
		BUG_ON(n_ev != 2);
		perf_event_decode(events[0], &dummy, &msk1);
		perf_event_decode(events[1], &dummy, &msk2);

		/* If both events can go on any counter, OK.  */
		if (msk1 == (PIC_UPPER | PIC_LOWER) &&
		    msk2 == (PIC_UPPER | PIC_LOWER))
			return 0;

		/* If one event is limited to a specific counter,
		 * and the other can go on both, OK.
		 */
		if ((msk1 == PIC_UPPER || msk1 == PIC_LOWER) &&
		    msk2 == (PIC_UPPER | PIC_LOWER))
			return 0;
		if ((msk2 == PIC_UPPER || msk2 == PIC_LOWER) &&
		    msk1 == (PIC_UPPER | PIC_LOWER))
			return 0;

		/* If the events are fixed to different counters, OK.  */
		if ((msk1 == PIC_UPPER && msk2 == PIC_LOWER) ||
		    (msk1 == PIC_LOWER && msk2 == PIC_UPPER))
			return 0;

		/* Otherwise, there is a conflict.  */
	}

	return -1;
}

static int check_excludes(struct perf_event **evts, int n_prev, int n_new)
{
	int eu = 0, ek = 0, eh = 0;
	struct perf_event *event;
	int i, n, first;

	n = n_prev + n_new;
	if (n <= 1)
		return 0;

	first = 1;
	for (i = 0; i < n; i++) {
		event = evts[i];
		if (first) {
			eu = event->attr.exclude_user;
			ek = event->attr.exclude_kernel;
			eh = event->attr.exclude_hv;
			first = 0;
		} else if (event->attr.exclude_user != eu ||
			   event->attr.exclude_kernel != ek ||
			   event->attr.exclude_hv != eh) {
			return -EAGAIN;
		}
	}

	return 0;
}

static int collect_events(struct perf_event *group, int max_count,
			  struct perf_event *evts[], unsigned long *events)
{
	struct perf_event *event;
	int n = 0;

	if (!is_software_event(group)) {
		if (n >= max_count)
			return -1;
		evts[n] = group;
		events[n++] = group->hw.event_base;
	}
	list_for_each_entry(event, &group->sibling_list, group_entry) {
		if (!is_software_event(event) &&
		    event->state != PERF_EVENT_STATE_OFF) {
			if (n >= max_count)
				return -1;
			evts[n] = event;
			events[n++] = event->hw.event_base;
		}
	}
	return n;
}

static int __hw_perf_event_init(struct perf_event *event)
{
	struct perf_event_attr *attr = &event->attr;
	struct perf_event *evts[MAX_HWEVENTS];
	struct hw_perf_event *hwc = &event->hw;
	unsigned long events[MAX_HWEVENTS];
	const struct perf_event_map *pmap;
	u64 enc;
	int n;

	if (atomic_read(&nmi_active) < 0)
		return -ENODEV;

	if (attr->type != PERF_TYPE_HARDWARE)
		return -EOPNOTSUPP;

	if (attr->type == PERF_TYPE_HARDWARE) {
		if (attr->config >= sparc_pmu->max_events)
			return -EINVAL;

	perf_event_grab_pmc();
	event->destroy = hw_perf_event_destroy;
		pmap = sparc_pmu->event_map(attr->config);
	} else if (attr->type == PERF_TYPE_HW_CACHE) {
		pmap = sparc_map_cache_event(attr->config);
		if (IS_ERR(pmap))
			return PTR_ERR(pmap);
	} else
		return -EOPNOTSUPP;

	/* We save the enable bits in the config_base.  So to
	 * turn off sampling just write 'config', and to enable
@@ -411,15 +877,39 @@ static int __hw_perf_event_init(struct perf_event *event)
	if (!attr->exclude_hv)
		hwc->config_base |= sparc_pmu->hv_bit;

	hwc->event_base = perf_event_encode(pmap);

	enc = pmap->encoding;

	n = 0;
	if (event->group_leader != event) {
		n = collect_events(event->group_leader,
				   perf_max_events - 1,
				   evts, events);
		if (n < 0)
			return -EINVAL;
	}
	events[n] = hwc->event_base;
	evts[n] = event;

	if (check_excludes(evts, n, 1))
		return -EINVAL;

	if (sparc_check_constraints(events, n + 1))
		return -EINVAL;

	/* Try to do all error checking before this point, as unwinding
	 * state after grabbing the PMC is difficult.
	 */
	perf_event_grab_pmc();
	event->destroy = hw_perf_event_destroy;

	if (!hwc->sample_period) {
		hwc->sample_period = MAX_PERIOD;
		hwc->last_period = hwc->sample_period;
		atomic64_set(&hwc->period_left, hwc->sample_period);
	}

	pmap = sparc_pmu->event_map(attr->config);

	enc = pmap->encoding;
	if (pmap->pic_mask & PIC_UPPER) {
		hwc->idx = PIC_UPPER_INDEX;
		enc <<= sparc_pmu->upper_shift;
@@ -513,7 +1003,7 @@ static int __kprobes perf_event_nmi_handler(struct notifier_block *self,
			continue;

		if (perf_event_overflow(event, 1, &data, regs))
			sparc_pmu_disable_event(hwc, idx);
			sparc_pmu_disable_event(cpuc, hwc, idx);
	}

	return NOTIFY_STOP;
@@ -525,8 +1015,15 @@ static __read_mostly struct notifier_block perf_event_nmi_notifier = {

static bool __init supported_pmu(void)
{
	if (!strcmp(sparc_pmu_type, "ultra3i")) {
		sparc_pmu = &ultra3i_pmu;
	if (!strcmp(sparc_pmu_type, "ultra3") ||
	    !strcmp(sparc_pmu_type, "ultra3+") ||
	    !strcmp(sparc_pmu_type, "ultra3i") ||
	    !strcmp(sparc_pmu_type, "ultra4+")) {
		sparc_pmu = &ultra3_pmu;
		return true;
	}
	if (!strcmp(sparc_pmu_type, "niagara")) {
		sparc_pmu = &niagara1_pmu;
		return true;
	}
	if (!strcmp(sparc_pmu_type, "niagara2")) {
Loading