Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 61f63e38 authored by Ingo Molnar's avatar Ingo Molnar
Browse files

Merge tag 'perf-core-for-mingo-4.12-20170316' of...

Merge tag 'perf-core-for-mingo-4.12-20170316' of git://git.kernel.org/pub/scm/linux/kernel/git/acme/linux

 into perf/core

Pull perf/core improvements and fixes from Arnaldo Carvalho de Melo:

New features:

 - Add 'brstackinsn' field in 'perf script' to reuse the x86 instruction
   decoder used in the Intel PT code to study hot paths to samples (Andi Kleen)

Kernel changes:

 - Default UPROBES_EVENTS to Y (Alexei Starovoitov)

 - Fix check for kretprobe offset within function entry (Naveen N. Rao)

Infrastructure changes:

 - Introduce util func is_sdt_event() (Ravi Bangoria)

 - Make perf_event__synthesize_mmap_events() scale on older kernels where
   reading /proc/pid/maps is way slower than reading /proc/pid/task/pid/maps (Stephane Eranian)

Signed-off-by: default avatarArnaldo Carvalho de Melo <acme@redhat.com>
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parents ee368428 61f35d75
Loading
Loading
Loading
Loading
+1 −0
Original line number Original line Diff line number Diff line
@@ -268,6 +268,7 @@ extern void show_registers(struct pt_regs *regs);
extern void kprobes_inc_nmissed_count(struct kprobe *p);
extern void kprobes_inc_nmissed_count(struct kprobe *p);
extern bool arch_within_kprobe_blacklist(unsigned long addr);
extern bool arch_within_kprobe_blacklist(unsigned long addr);
extern bool arch_function_offset_within_entry(unsigned long offset);
extern bool arch_function_offset_within_entry(unsigned long offset);
extern bool function_offset_within_entry(kprobe_opcode_t *addr, const char *sym, unsigned long offset);


extern bool within_kprobe_blacklist(unsigned long addr);
extern bool within_kprobe_blacklist(unsigned long addr);


+26 −14
Original line number Original line Diff line number Diff line
@@ -1391,21 +1391,19 @@ bool within_kprobe_blacklist(unsigned long addr)
 * This returns encoded errors if it fails to look up symbol or invalid
 * This returns encoded errors if it fails to look up symbol or invalid
 * combination of parameters.
 * combination of parameters.
 */
 */
static kprobe_opcode_t *kprobe_addr(struct kprobe *p)
static kprobe_opcode_t *_kprobe_addr(kprobe_opcode_t *addr,
			const char *symbol_name, unsigned int offset)
{
{
	kprobe_opcode_t *addr = p->addr;
	if ((symbol_name && addr) || (!symbol_name && !addr))

	if ((p->symbol_name && p->addr) ||
	    (!p->symbol_name && !p->addr))
		goto invalid;
		goto invalid;


	if (p->symbol_name) {
	if (symbol_name) {
		kprobe_lookup_name(p->symbol_name, addr);
		kprobe_lookup_name(symbol_name, addr);
		if (!addr)
		if (!addr)
			return ERR_PTR(-ENOENT);
			return ERR_PTR(-ENOENT);
	}
	}


	addr = (kprobe_opcode_t *)(((char *)addr) + p->offset);
	addr = (kprobe_opcode_t *)(((char *)addr) + offset);
	if (addr)
	if (addr)
		return addr;
		return addr;


@@ -1413,6 +1411,11 @@ static kprobe_opcode_t *kprobe_addr(struct kprobe *p)
	return ERR_PTR(-EINVAL);
	return ERR_PTR(-EINVAL);
}
}


static kprobe_opcode_t *kprobe_addr(struct kprobe *p)
{
	return _kprobe_addr(p->addr, p->symbol_name, p->offset);
}

/* Check passed kprobe is valid and return kprobe in kprobe_table. */
/* Check passed kprobe is valid and return kprobe in kprobe_table. */
static struct kprobe *__get_valid_kprobe(struct kprobe *p)
static struct kprobe *__get_valid_kprobe(struct kprobe *p)
{
{
@@ -1881,19 +1884,28 @@ bool __weak arch_function_offset_within_entry(unsigned long offset)
	return !offset;
	return !offset;
}
}


bool function_offset_within_entry(kprobe_opcode_t *addr, const char *sym, unsigned long offset)
{
	kprobe_opcode_t *kp_addr = _kprobe_addr(addr, sym, offset);

	if (IS_ERR(kp_addr))
		return false;

	if (!kallsyms_lookup_size_offset((unsigned long)kp_addr, NULL, &offset) ||
						!arch_function_offset_within_entry(offset))
		return false;

	return true;
}

int register_kretprobe(struct kretprobe *rp)
int register_kretprobe(struct kretprobe *rp)
{
{
	int ret = 0;
	int ret = 0;
	struct kretprobe_instance *inst;
	struct kretprobe_instance *inst;
	int i;
	int i;
	void *addr;
	void *addr;
	unsigned long offset;

	addr = kprobe_addr(&rp->kp);
	if (!kallsyms_lookup_size_offset((unsigned long)addr, NULL, &offset))
		return -EINVAL;


	if (!arch_function_offset_within_entry(offset))
	if (!function_offset_within_entry(rp->kp.addr, rp->kp.symbol_name, rp->kp.offset))
		return -EINVAL;
		return -EINVAL;


	if (kretprobe_blacklist_size) {
	if (kretprobe_blacklist_size) {
+1 −1
Original line number Original line Diff line number Diff line
@@ -455,7 +455,7 @@ config UPROBE_EVENTS
	select UPROBES
	select UPROBES
	select PROBE_EVENTS
	select PROBE_EVENTS
	select TRACING
	select TRACING
	default n
	default y
	help
	help
	  This allows the user to add tracing events on top of userspace
	  This allows the user to add tracing events on top of userspace
	  dynamic events (similar to tracepoints) on the fly via the trace
	  dynamic events (similar to tracepoints) on the fly via the trace
+1 −1
Original line number Original line Diff line number Diff line
@@ -697,7 +697,7 @@ static int create_trace_kprobe(int argc, char **argv)
			return ret;
			return ret;
		}
		}
		if (offset && is_return &&
		if (offset && is_return &&
		    !arch_function_offset_within_entry(offset)) {
		    !function_offset_within_entry(NULL, symbol, offset)) {
			pr_info("Given offset is not valid for return probe.\n");
			pr_info("Given offset is not valid for return probe.\n");
			return -EINVAL;
			return -EINVAL;
		}
		}
+2 −3
Original line number Original line Diff line number Diff line
@@ -100,7 +100,7 @@
#define X86_FEATURE_XTOPOLOGY	( 3*32+22) /* cpu topology enum extensions */
#define X86_FEATURE_XTOPOLOGY	( 3*32+22) /* cpu topology enum extensions */
#define X86_FEATURE_TSC_RELIABLE ( 3*32+23) /* TSC is known to be reliable */
#define X86_FEATURE_TSC_RELIABLE ( 3*32+23) /* TSC is known to be reliable */
#define X86_FEATURE_NONSTOP_TSC	( 3*32+24) /* TSC does not stop in C states */
#define X86_FEATURE_NONSTOP_TSC	( 3*32+24) /* TSC does not stop in C states */
/* free, was #define X86_FEATURE_CLFLUSH_MONITOR ( 3*32+25) * "" clflush reqd with monitor */
#define X86_FEATURE_CPUID	( 3*32+25) /* CPU has CPUID instruction itself */
#define X86_FEATURE_EXTD_APICID	( 3*32+26) /* has extended APICID (8 bits) */
#define X86_FEATURE_EXTD_APICID	( 3*32+26) /* has extended APICID (8 bits) */
#define X86_FEATURE_AMD_DCM     ( 3*32+27) /* multi-node processor */
#define X86_FEATURE_AMD_DCM     ( 3*32+27) /* multi-node processor */
#define X86_FEATURE_APERFMPERF	( 3*32+28) /* APERFMPERF */
#define X86_FEATURE_APERFMPERF	( 3*32+28) /* APERFMPERF */
@@ -186,7 +186,7 @@
 *
 *
 * Reuse free bits when adding new feature flags!
 * Reuse free bits when adding new feature flags!
 */
 */

#define X86_FEATURE_RING3MWAIT	( 7*32+ 0) /* Ring 3 MONITOR/MWAIT */
#define X86_FEATURE_CPB		( 7*32+ 2) /* AMD Core Performance Boost */
#define X86_FEATURE_CPB		( 7*32+ 2) /* AMD Core Performance Boost */
#define X86_FEATURE_EPB		( 7*32+ 3) /* IA32_ENERGY_PERF_BIAS support */
#define X86_FEATURE_EPB		( 7*32+ 3) /* IA32_ENERGY_PERF_BIAS support */
#define X86_FEATURE_CAT_L3	( 7*32+ 4) /* Cache Allocation Technology L3 */
#define X86_FEATURE_CAT_L3	( 7*32+ 4) /* Cache Allocation Technology L3 */
@@ -321,5 +321,4 @@
#define X86_BUG_SWAPGS_FENCE	X86_BUG(11) /* SWAPGS without input dep on GS */
#define X86_BUG_SWAPGS_FENCE	X86_BUG(11) /* SWAPGS without input dep on GS */
#define X86_BUG_MONITOR		X86_BUG(12) /* IPI required to wake up remote CPU */
#define X86_BUG_MONITOR		X86_BUG(12) /* IPI required to wake up remote CPU */
#define X86_BUG_AMD_E400	X86_BUG(13) /* CPU is among the affected by Erratum 400 */
#define X86_BUG_AMD_E400	X86_BUG(13) /* CPU is among the affected by Erratum 400 */

#endif /* _ASM_X86_CPUFEATURES_H */
#endif /* _ASM_X86_CPUFEATURES_H */
Loading