Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit d207ea8e authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge branch 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull perf updates from Thomas Gleixner:
 "Kernel:
   - Improve kallsyms coverage
   - Add x86 entry trampolines to kcore
   - Fix ARM SPE handling
   - Correct PPC event post processing

  Tools:
   - Make the build system more robust
   - Small fixes and enhancements all over the place
   - Update kernel ABI header copies
   - Preparatory work for converting libtraceevnt to a shared library
   - License cleanups"

* 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (100 commits)
  tools arch: Update arch/x86/lib/memcpy_64.S copy used in 'perf bench mem memcpy'
  tools arch x86: Update tools's copy of cpufeatures.h
  perf python: Fix pyrf_evlist__read_on_cpu() interface
  perf mmap: Store real cpu number in 'struct perf_mmap'
  perf tools: Remove ext from struct kmod_path
  perf tools: Add gzip_is_compressed function
  perf tools: Add lzma_is_compressed function
  perf tools: Add is_compressed callback to compressions array
  perf tools: Move the temp file processing into decompress_kmodule
  perf tools: Use compression id in decompress_kmodule()
  perf tools: Store compression id into struct dso
  perf tools: Add compression id into 'struct kmod_path'
  perf tools: Make is_supported_compression() static
  perf tools: Make decompress_to_file() function static
  perf tools: Get rid of dso__needs_decompress() call in __open_dso()
  perf tools: Get rid of dso__needs_decompress() call in symbol__disassemble()
  perf tools: Get rid of dso__needs_decompress() call in read_object_code()
  tools lib traceevent: Change to SPDX License format
  perf llvm: Allow passing options to llc in addition to clang
  perf parser: Improve error message for PMU address filters
  ...
parents 2a8a2b7c 66e5db4a
Loading
Loading
Loading
Loading
+33 −0
Original line number Original line Diff line number Diff line
@@ -2,6 +2,8 @@


#include <linux/spinlock.h>
#include <linux/spinlock.h>
#include <linux/percpu.h>
#include <linux/percpu.h>
#include <linux/kallsyms.h>
#include <linux/kcore.h>


#include <asm/cpu_entry_area.h>
#include <asm/cpu_entry_area.h>
#include <asm/pgtable.h>
#include <asm/pgtable.h>
@@ -13,6 +15,7 @@ static DEFINE_PER_CPU_PAGE_ALIGNED(struct entry_stack_page, entry_stack_storage)
#ifdef CONFIG_X86_64
#ifdef CONFIG_X86_64
static DEFINE_PER_CPU_PAGE_ALIGNED(char, exception_stacks
static DEFINE_PER_CPU_PAGE_ALIGNED(char, exception_stacks
	[(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ + DEBUG_STKSZ]);
	[(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ + DEBUG_STKSZ]);
static DEFINE_PER_CPU(struct kcore_list, kcore_entry_trampoline);
#endif
#endif


struct cpu_entry_area *get_cpu_entry_area(int cpu)
struct cpu_entry_area *get_cpu_entry_area(int cpu)
@@ -146,10 +149,40 @@ static void __init setup_cpu_entry_area(int cpu)


	cea_set_pte(&get_cpu_entry_area(cpu)->entry_trampoline,
	cea_set_pte(&get_cpu_entry_area(cpu)->entry_trampoline,
		     __pa_symbol(_entry_trampoline), PAGE_KERNEL_RX);
		     __pa_symbol(_entry_trampoline), PAGE_KERNEL_RX);
	/*
	 * The cpu_entry_area alias addresses are not in the kernel binary
	 * so they do not show up in /proc/kcore normally.  This adds entries
	 * for them manually.
	 */
	kclist_add_remap(&per_cpu(kcore_entry_trampoline, cpu),
			 _entry_trampoline,
			 &get_cpu_entry_area(cpu)->entry_trampoline, PAGE_SIZE);
#endif
#endif
	percpu_setup_debug_store(cpu);
	percpu_setup_debug_store(cpu);
}
}


#ifdef CONFIG_X86_64
int arch_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
		     char *name)
{
	unsigned int cpu, ncpu = 0;

	if (symnum >= num_possible_cpus())
		return -EINVAL;

	for_each_possible_cpu(cpu) {
		if (ncpu++ >= symnum)
			break;
	}

	*value = (unsigned long)&get_cpu_entry_area(cpu)->entry_trampoline;
	*type = 't';
	strlcpy(name, "__entry_SYSCALL_64_trampoline", KSYM_NAME_LEN);

	return 0;
}
#endif

static __init void setup_cpu_entry_area_ptes(void)
static __init void setup_cpu_entry_area_ptes(void)
{
{
#ifdef CONFIG_X86_32
#ifdef CONFIG_X86_32
+5 −2
Original line number Original line Diff line number Diff line
@@ -359,8 +359,11 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
			phdr->p_type = PT_LOAD;
			phdr->p_type = PT_LOAD;
			phdr->p_flags = PF_R | PF_W | PF_X;
			phdr->p_flags = PF_R | PF_W | PF_X;
			phdr->p_offset = kc_vaddr_to_offset(m->addr) + data_offset;
			phdr->p_offset = kc_vaddr_to_offset(m->addr) + data_offset;
			if (m->type == KCORE_REMAP)
				phdr->p_vaddr = (size_t)m->vaddr;
			else
				phdr->p_vaddr = (size_t)m->addr;
				phdr->p_vaddr = (size_t)m->addr;
			if (m->type == KCORE_RAM)
			if (m->type == KCORE_RAM || m->type == KCORE_REMAP)
				phdr->p_paddr = __pa(m->addr);
				phdr->p_paddr = __pa(m->addr);
			else if (m->type == KCORE_TEXT)
			else if (m->type == KCORE_TEXT)
				phdr->p_paddr = __pa_symbol(m->addr);
				phdr->p_paddr = __pa_symbol(m->addr);
+13 −0
Original line number Original line Diff line number Diff line
@@ -12,11 +12,13 @@ enum kcore_type {
	KCORE_VMEMMAP,
	KCORE_VMEMMAP,
	KCORE_USER,
	KCORE_USER,
	KCORE_OTHER,
	KCORE_OTHER,
	KCORE_REMAP,
};
};


struct kcore_list {
struct kcore_list {
	struct list_head list;
	struct list_head list;
	unsigned long addr;
	unsigned long addr;
	unsigned long vaddr;
	size_t size;
	size_t size;
	int type;
	int type;
};
};
@@ -36,11 +38,22 @@ struct vmcoredd_node {


#ifdef CONFIG_PROC_KCORE
#ifdef CONFIG_PROC_KCORE
void __init kclist_add(struct kcore_list *, void *, size_t, int type);
void __init kclist_add(struct kcore_list *, void *, size_t, int type);
static inline
void kclist_add_remap(struct kcore_list *m, void *addr, void *vaddr, size_t sz)
{
	m->vaddr = (unsigned long)vaddr;
	kclist_add(m, addr, sz, KCORE_REMAP);
}
#else
#else
static inline
static inline
void kclist_add(struct kcore_list *new, void *addr, size_t size, int type)
void kclist_add(struct kcore_list *new, void *addr, size_t size, int type)
{
{
}
}

static inline
void kclist_add_remap(struct kcore_list *m, void *addr, void *vaddr, size_t sz)
{
}
#endif
#endif


#endif /* _LINUX_KCORE_H */
#endif /* _LINUX_KCORE_H */
+37 −14
Original line number Original line Diff line number Diff line
@@ -432,6 +432,7 @@ int sprint_backtrace(char *buffer, unsigned long address)
/* To avoid using get_symbol_offset for every symbol, we carry prefix along. */
/* To avoid using get_symbol_offset for every symbol, we carry prefix along. */
struct kallsym_iter {
struct kallsym_iter {
	loff_t pos;
	loff_t pos;
	loff_t pos_arch_end;
	loff_t pos_mod_end;
	loff_t pos_mod_end;
	loff_t pos_ftrace_mod_end;
	loff_t pos_ftrace_mod_end;
	unsigned long value;
	unsigned long value;
@@ -443,9 +444,29 @@ struct kallsym_iter {
	int show_value;
	int show_value;
};
};


int __weak arch_get_kallsym(unsigned int symnum, unsigned long *value,
			    char *type, char *name)
{
	return -EINVAL;
}

static int get_ksymbol_arch(struct kallsym_iter *iter)
{
	int ret = arch_get_kallsym(iter->pos - kallsyms_num_syms,
				   &iter->value, &iter->type,
				   iter->name);

	if (ret < 0) {
		iter->pos_arch_end = iter->pos;
		return 0;
	}

	return 1;
}

static int get_ksymbol_mod(struct kallsym_iter *iter)
static int get_ksymbol_mod(struct kallsym_iter *iter)
{
{
	int ret = module_get_kallsym(iter->pos - kallsyms_num_syms,
	int ret = module_get_kallsym(iter->pos - iter->pos_arch_end,
				     &iter->value, &iter->type,
				     &iter->value, &iter->type,
				     iter->name, iter->module_name,
				     iter->name, iter->module_name,
				     &iter->exported);
				     &iter->exported);
@@ -501,32 +522,34 @@ static void reset_iter(struct kallsym_iter *iter, loff_t new_pos)
	iter->nameoff = get_symbol_offset(new_pos);
	iter->nameoff = get_symbol_offset(new_pos);
	iter->pos = new_pos;
	iter->pos = new_pos;
	if (new_pos == 0) {
	if (new_pos == 0) {
		iter->pos_arch_end = 0;
		iter->pos_mod_end = 0;
		iter->pos_mod_end = 0;
		iter->pos_ftrace_mod_end = 0;
		iter->pos_ftrace_mod_end = 0;
	}
	}
}
}


/*
 * The end position (last + 1) of each additional kallsyms section is recorded
 * in iter->pos_..._end as each section is added, and so can be used to
 * determine which get_ksymbol_...() function to call next.
 */
static int update_iter_mod(struct kallsym_iter *iter, loff_t pos)
static int update_iter_mod(struct kallsym_iter *iter, loff_t pos)
{
{
	iter->pos = pos;
	iter->pos = pos;


	if (iter->pos_ftrace_mod_end > 0 &&
	if ((!iter->pos_arch_end || iter->pos_arch_end > pos) &&
	    iter->pos_ftrace_mod_end < iter->pos)
	    get_ksymbol_arch(iter))
		return get_ksymbol_bpf(iter);

	if (iter->pos_mod_end > 0 &&
	    iter->pos_mod_end < iter->pos) {
		if (!get_ksymbol_ftrace_mod(iter))
			return get_ksymbol_bpf(iter);
		return 1;
		return 1;
	}


	if (!get_ksymbol_mod(iter)) {
	if ((!iter->pos_mod_end || iter->pos_mod_end > pos) &&
		if (!get_ksymbol_ftrace_mod(iter))
	    get_ksymbol_mod(iter))
			return get_ksymbol_bpf(iter);
		return 1;
	}


	if ((!iter->pos_ftrace_mod_end || iter->pos_ftrace_mod_end > pos) &&
	    get_ksymbol_ftrace_mod(iter))
		return 1;
		return 1;

	return get_ksymbol_bpf(iter);
}
}


/* Returns false if pos at or past end of file. */
/* Returns false if pos at or past end of file. */
+2 −1
Original line number Original line Diff line number Diff line
@@ -220,6 +220,7 @@
#define X86_FEATURE_STIBP		( 7*32+27) /* Single Thread Indirect Branch Predictors */
#define X86_FEATURE_STIBP		( 7*32+27) /* Single Thread Indirect Branch Predictors */
#define X86_FEATURE_ZEN			( 7*32+28) /* "" CPU is AMD family 0x17 (Zen) */
#define X86_FEATURE_ZEN			( 7*32+28) /* "" CPU is AMD family 0x17 (Zen) */
#define X86_FEATURE_L1TF_PTEINV		( 7*32+29) /* "" L1TF workaround PTE inversion */
#define X86_FEATURE_L1TF_PTEINV		( 7*32+29) /* "" L1TF workaround PTE inversion */
#define X86_FEATURE_IBRS_ENHANCED	( 7*32+30) /* Enhanced IBRS */


/* Virtualization flags: Linux defined, word 8 */
/* Virtualization flags: Linux defined, word 8 */
#define X86_FEATURE_TPR_SHADOW		( 8*32+ 0) /* Intel TPR Shadow */
#define X86_FEATURE_TPR_SHADOW		( 8*32+ 0) /* Intel TPR Shadow */
@@ -230,7 +231,7 @@


#define X86_FEATURE_VMMCALL		( 8*32+15) /* Prefer VMMCALL to VMCALL */
#define X86_FEATURE_VMMCALL		( 8*32+15) /* Prefer VMMCALL to VMCALL */
#define X86_FEATURE_XENPV		( 8*32+16) /* "" Xen paravirtual guest */
#define X86_FEATURE_XENPV		( 8*32+16) /* "" Xen paravirtual guest */

#define X86_FEATURE_EPT_AD		( 8*32+17) /* Intel Extended Page Table access-dirty bit */


/* Intel-defined CPU features, CPUID level 0x00000007:0 (EBX), word 9 */
/* Intel-defined CPU features, CPUID level 0x00000007:0 (EBX), word 9 */
#define X86_FEATURE_FSGSBASE		( 9*32+ 0) /* RDFSBASE, WRFSBASE, RDGSBASE, WRGSBASE instructions*/
#define X86_FEATURE_FSGSBASE		( 9*32+ 0) /* RDFSBASE, WRFSBASE, RDGSBASE, WRGSBASE instructions*/
Loading