Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit c0d362a8 authored by Ingo Molnar's avatar Ingo Molnar
Browse files

Merge branch 'master' of...

Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/paulus/perfcounters into perfcounters/core
parents 506c10f2 f7862837
Loading
Loading
Loading
Loading
+31 −0
Original line number Diff line number Diff line
@@ -131,5 +131,36 @@ static inline int irqs_disabled_flags(unsigned long flags)
 */
struct hw_interrupt_type;

#ifdef CONFIG_PERF_COUNTERS
static inline unsigned long get_perf_counter_pending(void)
{
	unsigned long x;

	asm volatile("lbz %0,%1(13)"
		: "=r" (x)
		: "i" (offsetof(struct paca_struct, perf_counter_pending)));
	return x;
}

static inline void set_perf_counter_pending(int x)
{
	asm volatile("stb %0,%1(13)" : :
		"r" (x),
		"i" (offsetof(struct paca_struct, perf_counter_pending)));
}

extern void perf_counter_do_pending(void);

#else

static inline unsigned long get_perf_counter_pending(void)
{
	return 0;
}

static inline void set_perf_counter_pending(int x) {}
static inline void perf_counter_do_pending(void) {}
#endif /* CONFIG_PERF_COUNTERS */

#endif	/* __KERNEL__ */
#endif	/* _ASM_POWERPC_HW_IRQ_H */
+1 −0
Original line number Diff line number Diff line
@@ -99,6 +99,7 @@ struct paca_struct {
	u8 soft_enabled;		/* irq soft-enable flag */
	u8 hard_enabled;		/* set if irqs are enabled in MSR */
	u8 io_sync;			/* writel() needs spin_unlock sync */
	u8 perf_counter_pending;	/* PM interrupt while soft-disabled */

	/* Stuff for accurate time accounting */
	u64 user_time;			/* accumulated usermode TB ticks */
+72 −0
Original line number Diff line number Diff line
/*
 * Performance counter support - PowerPC-specific definitions.
 *
 * Copyright 2008-2009 Paul Mackerras, IBM Corporation.
 *
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public License
 * as published by the Free Software Foundation; either version
 * 2 of the License, or (at your option) any later version.
 */
#include <linux/types.h>

#define MAX_HWCOUNTERS		8
#define MAX_EVENT_ALTERNATIVES	8

/*
 * This struct provides the constants and functions needed to
 * describe the PMU on a particular POWER-family CPU.
 */
struct power_pmu {
	int	n_counter;
	int	max_alternatives;
	u64	add_fields;
	u64	test_adder;
	int	(*compute_mmcr)(unsigned int events[], int n_ev,
				unsigned int hwc[], u64 mmcr[]);
	int	(*get_constraint)(unsigned int event, u64 *mskp, u64 *valp);
	int	(*get_alternatives)(unsigned int event, unsigned int alt[]);
	void	(*disable_pmc)(unsigned int pmc, u64 mmcr[]);
	int	n_generic;
	int	*generic_events;
};

extern struct power_pmu *ppmu;

/*
 * The power_pmu.get_constraint function returns a 64-bit value and
 * a 64-bit mask that express the constraints between this event and
 * other events.
 *
 * The value and mask are divided up into (non-overlapping) bitfields
 * of three different types:
 *
 * Select field: this expresses the constraint that some set of bits
 * in MMCR* needs to be set to a specific value for this event.  For a
 * select field, the mask contains 1s in every bit of the field, and
 * the value contains a unique value for each possible setting of the
 * MMCR* bits.  The constraint checking code will ensure that two events
 * that set the same field in their masks have the same value in their
 * value dwords.
 *
 * Add field: this expresses the constraint that there can be at most
 * N events in a particular class.  A field of k bits can be used for
 * N <= 2^(k-1) - 1.  The mask has the most significant bit of the field
 * set (and the other bits 0), and the value has only the least significant
 * bit of the field set.  In addition, the 'add_fields' and 'test_adder'
 * in the struct power_pmu for this processor come into play.  The
 * add_fields value contains 1 in the LSB of the field, and the
 * test_adder contains 2^(k-1) - 1 - N in the field.
 *
 * NAND field: this expresses the constraint that you may not have events
 * in all of a set of classes.  (For example, on PPC970, you can't select
 * events from the FPU, ISU and IDU simultaneously, although any two are
 * possible.)  For N classes, the field is N+1 bits wide, and each class
 * is assigned one bit from the least-significant N bits.  The mask has
 * only the most-significant bit set, and the value has only the bit
 * for the event's class set.  The test_adder has the least significant
 * bit set in the field.
 *
 * If an event is not subject to the constraint expressed by a particular
 * field, then it will have 0 in both the mask and value for that field.
 */
+1 −0
Original line number Diff line number Diff line
@@ -322,3 +322,4 @@ SYSCALL_SPU(epoll_create1)
SYSCALL_SPU(dup3)
SYSCALL_SPU(pipe2)
SYSCALL(inotify_init1)
SYSCALL(perf_counter_open)
+2 −1
Original line number Diff line number Diff line
@@ -341,10 +341,11 @@
#define __NR_dup3		316
#define __NR_pipe2		317
#define __NR_inotify_init1	318
#define __NR_perf_counter_open	319

#ifdef __KERNEL__

#define __NR_syscalls		319
#define __NR_syscalls		320

#define __NR__exit __NR_exit
#define NR_syscalls	__NR_syscalls
Loading