Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit cb04ff9a authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Ingo Molnar
Browse files

sched, perf: Use a single callback into the scheduler



We can easily use a single callback for both sched-in and sched-out. This
reduces the code footprint in the scheduler path as well as removes
the PMU black spot otherwise present between the out and in callback.

Signed-off-by: default avatarPeter Zijlstra <a.p.zijlstra@chello.nl>
Link: http://lkml.kernel.org/n/tip-o56ajxp1edwqg6x9d31wb805@git.kernel.org


Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 8b1e1363
Loading
Loading
Loading
Loading
+6 −18
Original line number Diff line number Diff line
@@ -1084,9 +1084,7 @@ extern void perf_pmu_unregister(struct pmu *pmu);

extern int perf_num_counters(void);
extern const char *perf_pmu_name(void);
extern void __perf_event_task_sched_in(struct task_struct *prev,
				       struct task_struct *task);
extern void __perf_event_task_sched_out(struct task_struct *prev,
extern void __perf_event_task_sched(struct task_struct *prev,
				    struct task_struct *next);
extern int perf_event_init_task(struct task_struct *child);
extern void perf_event_exit_task(struct task_struct *child);
@@ -1207,20 +1205,13 @@ perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr)

extern struct static_key_deferred perf_sched_events;

static inline void perf_event_task_sched_in(struct task_struct *prev,
static inline void perf_event_task_sched(struct task_struct *prev,
					    struct task_struct *task)
{
	if (static_key_false(&perf_sched_events.key))
		__perf_event_task_sched_in(prev, task);
}

static inline void perf_event_task_sched_out(struct task_struct *prev,
					     struct task_struct *next)
{
	perf_sw_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, NULL, 0);

	if (static_key_false(&perf_sched_events.key))
		__perf_event_task_sched_out(prev, next);
		__perf_event_task_sched(prev, task);
}

extern void perf_event_mmap(struct vm_area_struct *vma);
@@ -1295,11 +1286,8 @@ extern void perf_event_disable(struct perf_event *event);
extern void perf_event_task_tick(void);
#else
static inline void
perf_event_task_sched_in(struct task_struct *prev,
perf_event_task_sched(struct task_struct *prev,
		      struct task_struct *task)				{ }
static inline void
perf_event_task_sched_out(struct task_struct *prev,
			  struct task_struct *next)			{ }
static inline int perf_event_init_task(struct task_struct *child)	{ return 0; }
static inline void perf_event_exit_task(struct task_struct *child)	{ }
static inline void perf_event_free_task(struct task_struct *task)	{ }
+10 −4
Original line number Diff line number Diff line
@@ -2039,7 +2039,7 @@ static void perf_event_context_sched_out(struct task_struct *task, int ctxn,
 * accessing the event control register. If a NMI hits, then it will
 * not restart the event.
 */
void __perf_event_task_sched_out(struct task_struct *task,
static void __perf_event_task_sched_out(struct task_struct *task,
					struct task_struct *next)
{
	int ctxn;
@@ -2279,7 +2279,7 @@ static void perf_branch_stack_sched_in(struct task_struct *prev,
 * accessing the event control register. If a NMI hits, then it will
 * keep the event running.
 */
void __perf_event_task_sched_in(struct task_struct *prev,
static void __perf_event_task_sched_in(struct task_struct *prev,
				       struct task_struct *task)
{
	struct perf_event_context *ctx;
@@ -2305,6 +2305,12 @@ void __perf_event_task_sched_in(struct task_struct *prev,
		perf_branch_stack_sched_in(prev, task);
}

void __perf_event_task_sched(struct task_struct *prev, struct task_struct *next)
{
	__perf_event_task_sched_out(prev, next);
	__perf_event_task_sched_in(prev, next);
}

static u64 perf_calculate_period(struct perf_event *event, u64 nsec, u64 count)
{
	u64 frequency = event->attr.sample_freq;
+1 −8
Original line number Diff line number Diff line
@@ -1913,7 +1913,7 @@ prepare_task_switch(struct rq *rq, struct task_struct *prev,
		    struct task_struct *next)
{
	sched_info_switch(prev, next);
	perf_event_task_sched_out(prev, next);
	perf_event_task_sched(prev, next);
	fire_sched_out_preempt_notifiers(prev, next);
	prepare_lock_switch(rq, next);
	prepare_arch_switch(next);
@@ -1956,13 +1956,6 @@ static void finish_task_switch(struct rq *rq, struct task_struct *prev)
	 */
	prev_state = prev->state;
	finish_arch_switch(prev);
#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
	local_irq_disable();
#endif /* __ARCH_WANT_INTERRUPTS_ON_CTXSW */
	perf_event_task_sched_in(prev, current);
#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
	local_irq_enable();
#endif /* __ARCH_WANT_INTERRUPTS_ON_CTXSW */
	finish_lock_switch(rq, prev);
	finish_arch_post_lock_switch();