Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 937779db authored by Ingo Molnar's avatar Ingo Molnar
Browse files

Merge branch 'perf/urgent' into perf/core



Merge reason: We want to queue up a dependent patch.

Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parents 6230f2c7 9f591fd7
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -1298,7 +1298,7 @@ static void power_pmu_setup(int cpu)
}

static int __cpuinit
power_pmu_notify(struct notifier_block *self, unsigned long action, void *hcpu)
power_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu)
{
	unsigned int cpu = (long)hcpu;

+16 −9
Original line number Diff line number Diff line
@@ -787,7 +787,6 @@ void hw_perf_enable(void)
		 * step2: reprogram moved events into new counters
		 */
		for (i = 0; i < n_running; i++) {

			event = cpuc->event_list[i];
			hwc = &event->hw;

@@ -802,21 +801,16 @@ void hw_perf_enable(void)
				continue;

			x86_pmu_stop(event);

			hwc->idx = -1;
		}

		for (i = 0; i < cpuc->n_events; i++) {

			event = cpuc->event_list[i];
			hwc = &event->hw;

			if (i < n_running &&
			    match_prev_assignment(hwc, cpuc, i))
				continue;

			if (hwc->idx == -1)
			if (!match_prev_assignment(hwc, cpuc, i))
				x86_assign_hw_event(event, cpuc, i);
			else if (i < n_running)
				continue;

			x86_pmu_start(event);
		}
@@ -1685,3 +1679,16 @@ struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)

	return entry;
}

void perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip, int skip)
{
	regs->ip = ip;
	/*
	 * perf_arch_fetch_caller_regs adds another call, we need to increment
	 * the skip level
	 */
	regs->bp = rewind_frame_pointer(skip + 1);
	regs->cs = __KERNEL_CS;
	local_save_flags(regs->flags);
}
EXPORT_SYMBOL_GPL(perf_arch_fetch_caller_regs);
+15 −0
Original line number Diff line number Diff line
@@ -29,4 +29,19 @@ struct stack_frame {
	struct stack_frame *next_frame;
	unsigned long return_address;
};

static inline unsigned long rewind_frame_pointer(int n)
{
	struct stack_frame *frame;

	get_bp(frame);

#ifdef CONFIG_FRAME_POINTER
	while (n--)
		frame = frame->next_frame;
#endif

	return (unsigned long)frame;
}

#endif /* DUMPSTACK_H */
+2 −2
Original line number Diff line number Diff line
@@ -208,7 +208,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
			if (in_irq_stack(stack, irq_stack, irq_stack_end)) {
				if (ops->stack(data, "IRQ") < 0)
					break;
				bp = print_context_stack(tinfo, stack, bp,
				bp = ops->walk_stack(tinfo, stack, bp,
					ops, data, irq_stack_end, &graph);
				/*
				 * We link to the next stack (which would be
@@ -229,7 +229,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
	/*
	 * This handles the process stack:
	 */
	bp = print_context_stack(tinfo, stack, bp, ops, data, NULL, &graph);
	bp = ops->walk_stack(tinfo, stack, bp, ops, data, NULL, &graph);
	put_cpu();
}
EXPORT_SYMBOL(dump_trace);
+13 −10
Original line number Diff line number Diff line
@@ -131,12 +131,12 @@ struct ftrace_event_call {
	void			*mod;
	void			*data;

	int			profile_count;
	int			(*profile_enable)(struct ftrace_event_call *);
	void			(*profile_disable)(struct ftrace_event_call *);
	int			perf_refcount;
	int			(*perf_event_enable)(struct ftrace_event_call *);
	void			(*perf_event_disable)(struct ftrace_event_call *);
};

#define FTRACE_MAX_PROFILE_SIZE	2048
#define PERF_MAX_TRACE_SIZE	2048

#define MAX_FILTER_PRED		32
#define MAX_FILTER_STR_VAL	256	/* Should handle KSYM_SYMBOL_LEN */
@@ -187,22 +187,25 @@ do { \

#ifdef CONFIG_PERF_EVENTS
struct perf_event;
extern int ftrace_profile_enable(int event_id);
extern void ftrace_profile_disable(int event_id);

DECLARE_PER_CPU(struct pt_regs, perf_trace_regs);

extern int perf_trace_enable(int event_id);
extern void perf_trace_disable(int event_id);
extern int ftrace_profile_set_filter(struct perf_event *event, int event_id,
				     char *filter_str);
extern void ftrace_profile_free_filter(struct perf_event *event);
extern void *
ftrace_perf_buf_prepare(int size, unsigned short type, int *rctxp,
perf_trace_buf_prepare(int size, unsigned short type, int *rctxp,
			 unsigned long *irq_flags);

static inline void
ftrace_perf_buf_submit(void *raw_data, int size, int rctx, u64 addr,
		       u64 count, unsigned long irq_flags)
perf_trace_buf_submit(void *raw_data, int size, int rctx, u64 addr,
		       u64 count, unsigned long irq_flags, struct pt_regs *regs)
{
	struct trace_entry *entry = raw_data;

	perf_tp_event(entry->type, addr, count, raw_data, size);
	perf_tp_event(entry->type, addr, count, raw_data, size, regs);
	perf_swevent_put_recursion_context(rctx);
	local_irq_restore(irq_flags);
}
Loading