Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 5804b110 authored by Ingo Molnar's avatar Ingo Molnar
Browse files

Merge tag 'perf-core-for-mingo-4.19-20180815' of...

Merge tag 'perf-core-for-mingo-4.19-20180815' of git://git.kernel.org/pub/scm/linux/kernel/git/acme/linux

 into perf/urgent

Pull perf/core improvements and fixes from Arnaldo Carvalho de Melo:

kernel:

- kallsyms, x86: Export addresses of PTI entry trampolines (Alexander Shishkin)

- kallsyms: Simplify update_iter_mod() (Adrian Hunter)

- x86: Add entry trampolines to kcore (Adrian Hunter)

Hardware tracing:

- Fix auxtrace queue resize (Adrian Hunter)

Arch specific:

- Fix uninitialized ARM SPE record error variable (Kim Phillips)

- Fix trace event post-processing in powerpc (Sandipan Das)

Build:

- Fix check-headers.sh AND list path of execution (Alexander Kapshuk)

- Remove -mcet and -fcf-protection when building the python binding
  with older clang versions (Arnaldo Carvalho de Melo)

- Make check-headers.sh check based on kernel dir (Jiri Olsa)

- Move syscall_64.tbl check into check-headers.sh (Jiri Olsa)

Infrastructure:

- Check for null when copying nsinfo.  (Benno Evers)

Libraries:

- Rename libtraceevent prefixes, prep work for making it a shared
  library generaly available (Tzvetomir Stoyanov (VMware))

Signed-off-by: default avatarArnaldo Carvalho de Melo <acme@redhat.com>
parents 13e091b6 6855dc41
Loading
Loading
Loading
Loading
+33 −0
Original line number Diff line number Diff line
@@ -2,6 +2,8 @@

#include <linux/spinlock.h>
#include <linux/percpu.h>
#include <linux/kallsyms.h>
#include <linux/kcore.h>

#include <asm/cpu_entry_area.h>
#include <asm/pgtable.h>
@@ -13,6 +15,7 @@ static DEFINE_PER_CPU_PAGE_ALIGNED(struct entry_stack_page, entry_stack_storage)
#ifdef CONFIG_X86_64
static DEFINE_PER_CPU_PAGE_ALIGNED(char, exception_stacks
	[(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ + DEBUG_STKSZ]);
static DEFINE_PER_CPU(struct kcore_list, kcore_entry_trampoline);
#endif

struct cpu_entry_area *get_cpu_entry_area(int cpu)
@@ -146,10 +149,40 @@ static void __init setup_cpu_entry_area(int cpu)

	cea_set_pte(&get_cpu_entry_area(cpu)->entry_trampoline,
		     __pa_symbol(_entry_trampoline), PAGE_KERNEL_RX);
	/*
	 * The cpu_entry_area alias addresses are not in the kernel binary
	 * so they do not show up in /proc/kcore normally.  This adds entries
	 * for them manually.
	 */
	kclist_add_remap(&per_cpu(kcore_entry_trampoline, cpu),
			 _entry_trampoline,
			 &get_cpu_entry_area(cpu)->entry_trampoline, PAGE_SIZE);
#endif
	percpu_setup_debug_store(cpu);
}

#ifdef CONFIG_X86_64
int arch_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
		     char *name)
{
	unsigned int cpu, ncpu = 0;

	if (symnum >= num_possible_cpus())
		return -EINVAL;

	for_each_possible_cpu(cpu) {
		if (ncpu++ >= symnum)
			break;
	}

	*value = (unsigned long)&get_cpu_entry_area(cpu)->entry_trampoline;
	*type = 't';
	strlcpy(name, "__entry_SYSCALL_64_trampoline", KSYM_NAME_LEN);

	return 0;
}
#endif

static __init void setup_cpu_entry_area_ptes(void)
{
#ifdef CONFIG_X86_32
+5 −2
Original line number Diff line number Diff line
@@ -383,8 +383,11 @@ static void elf_kcore_store_hdr(char *bufp, int nphdr, int dataoff)
		phdr->p_type	= PT_LOAD;
		phdr->p_flags	= PF_R|PF_W|PF_X;
		phdr->p_offset	= kc_vaddr_to_offset(m->addr) + dataoff;
		if (m->type == KCORE_REMAP)
			phdr->p_vaddr	= (size_t)m->vaddr;
		else
			phdr->p_vaddr	= (size_t)m->addr;
		if (m->type == KCORE_RAM || m->type == KCORE_TEXT)
		if (m->type == KCORE_RAM || m->type == KCORE_TEXT || m->type == KCORE_REMAP)
			phdr->p_paddr	= __pa(m->addr);
		else
			phdr->p_paddr	= (elf_addr_t)-1;
+13 −0
Original line number Diff line number Diff line
@@ -12,11 +12,13 @@ enum kcore_type {
	KCORE_VMEMMAP,
	KCORE_USER,
	KCORE_OTHER,
	KCORE_REMAP,
};

struct kcore_list {
	struct list_head list;
	unsigned long addr;
	unsigned long vaddr;
	size_t size;
	int type;
};
@@ -36,11 +38,22 @@ struct vmcoredd_node {

#ifdef CONFIG_PROC_KCORE
extern void kclist_add(struct kcore_list *, void *, size_t, int type);
static inline
void kclist_add_remap(struct kcore_list *m, void *addr, void *vaddr, size_t sz)
{
	m->vaddr = (unsigned long)vaddr;
	kclist_add(m, addr, sz, KCORE_REMAP);
}
#else
static inline
void kclist_add(struct kcore_list *new, void *addr, size_t size, int type)
{
}

static inline
void kclist_add_remap(struct kcore_list *m, void *addr, void *vaddr, size_t sz)
{
}
#endif

#endif /* _LINUX_KCORE_H */
+37 −14
Original line number Diff line number Diff line
@@ -432,6 +432,7 @@ int sprint_backtrace(char *buffer, unsigned long address)
/* To avoid using get_symbol_offset for every symbol, we carry prefix along. */
struct kallsym_iter {
	loff_t pos;
	loff_t pos_arch_end;
	loff_t pos_mod_end;
	loff_t pos_ftrace_mod_end;
	unsigned long value;
@@ -443,9 +444,29 @@ struct kallsym_iter {
	int show_value;
};

int __weak arch_get_kallsym(unsigned int symnum, unsigned long *value,
			    char *type, char *name)
{
	return -EINVAL;
}

static int get_ksymbol_arch(struct kallsym_iter *iter)
{
	int ret = arch_get_kallsym(iter->pos - kallsyms_num_syms,
				   &iter->value, &iter->type,
				   iter->name);

	if (ret < 0) {
		iter->pos_arch_end = iter->pos;
		return 0;
	}

	return 1;
}

static int get_ksymbol_mod(struct kallsym_iter *iter)
{
	int ret = module_get_kallsym(iter->pos - kallsyms_num_syms,
	int ret = module_get_kallsym(iter->pos - iter->pos_arch_end,
				     &iter->value, &iter->type,
				     iter->name, iter->module_name,
				     &iter->exported);
@@ -501,32 +522,34 @@ static void reset_iter(struct kallsym_iter *iter, loff_t new_pos)
	iter->nameoff = get_symbol_offset(new_pos);
	iter->pos = new_pos;
	if (new_pos == 0) {
		iter->pos_arch_end = 0;
		iter->pos_mod_end = 0;
		iter->pos_ftrace_mod_end = 0;
	}
}

/*
 * The end position (last + 1) of each additional kallsyms section is recorded
 * in iter->pos_..._end as each section is added, and so can be used to
 * determine which get_ksymbol_...() function to call next.
 */
static int update_iter_mod(struct kallsym_iter *iter, loff_t pos)
{
	iter->pos = pos;

	if (iter->pos_ftrace_mod_end > 0 &&
	    iter->pos_ftrace_mod_end < iter->pos)
		return get_ksymbol_bpf(iter);

	if (iter->pos_mod_end > 0 &&
	    iter->pos_mod_end < iter->pos) {
		if (!get_ksymbol_ftrace_mod(iter))
			return get_ksymbol_bpf(iter);
	if ((!iter->pos_arch_end || iter->pos_arch_end > pos) &&
	    get_ksymbol_arch(iter))
		return 1;
	}

	if (!get_ksymbol_mod(iter)) {
		if (!get_ksymbol_ftrace_mod(iter))
			return get_ksymbol_bpf(iter);
	}
	if ((!iter->pos_mod_end || iter->pos_mod_end > pos) &&
	    get_ksymbol_mod(iter))
		return 1;

	if ((!iter->pos_ftrace_mod_end || iter->pos_ftrace_mod_end > pos) &&
	    get_ksymbol_ftrace_mod(iter))
		return 1;

	return get_ksymbol_bpf(iter);
}

/* Returns false if pos at or past end of file. */
+2 −2
Original line number Diff line number Diff line
@@ -129,12 +129,12 @@ $(OUTPUT)liblockdep.a: $(LIB_IN)
tags:	force
	$(RM) tags
	find . -name '*.[ch]' | xargs ctags --extra=+f --c-kinds=+px \
	--regex-c++='/_PE\(([^,)]*).*/PEVENT_ERRNO__\1/'
	--regex-c++='/_PE\(([^,)]*).*/TEP_ERRNO__\1/'

TAGS:	force
	$(RM) TAGS
	find . -name '*.[ch]' | xargs etags \
	--regex='/_PE(\([^,)]*\).*/PEVENT_ERRNO__\1/'
	--regex='/_PE(\([^,)]*\).*/TEP_ERRNO__\1/'

define do_install
	$(print_install)				\
Loading