Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 05a0c86d authored by Linux Build Service Account's avatar Linux Build Service Account Committed by Gerrit - the friendly Code Review server
Browse files

Merge "Merge remote-tracking branch '318/dev/msm-3.18-sched' into msm318"

parents 3ea11417 cbb2f8b5
Loading
Loading
Loading
Loading
+8 −12
Original line number Diff line number Diff line
@@ -1999,13 +1999,7 @@ struct sched_load {
	unsigned long predicted_load;
};

#if defined(CONFIG_SCHED_FREQ_INPUT)
extern int sched_set_window(u64 window_start, unsigned int window_size);
extern unsigned long sched_get_busy(int cpu);
extern void sched_get_cpus_busy(struct sched_load *busy,
				const struct cpumask *query_cpus);
extern void sched_set_io_is_busy(int val);
#ifdef CONFIG_SCHED_QHMP
#if defined(CONFIG_SCHED_QHMP) || !defined(CONFIG_SCHED_HMP)
static inline int sched_update_freq_max_load(const cpumask_t *cpumask)
{
	return 0;
@@ -2013,6 +2007,13 @@ static inline int sched_update_freq_max_load(const cpumask_t *cpumask)
#else
int sched_update_freq_max_load(const cpumask_t *cpumask);
#endif

#if defined(CONFIG_SCHED_FREQ_INPUT)
extern int sched_set_window(u64 window_start, unsigned int window_size);
extern unsigned long sched_get_busy(int cpu);
extern void sched_get_cpus_busy(struct sched_load *busy,
				const struct cpumask *query_cpus);
extern void sched_set_io_is_busy(int val);
#else
static inline int sched_set_window(u64 window_start, unsigned int window_size)
{
@@ -2025,11 +2026,6 @@ static inline unsigned long sched_get_busy(int cpu)
static inline void sched_get_cpus_busy(struct sched_load *busy,
				       const struct cpumask *query_cpus) {};
static inline void sched_set_io_is_busy(int val) {};

static inline int sched_update_freq_max_load(const cpumask_t *cpumask)
{
	return 0;
}
#endif

/*
+4 −6
Original line number Diff line number Diff line
@@ -274,7 +274,7 @@ TRACE_EVENT(sched_set_boost,
TRACE_EVENT(sched_update_task_ravg,

	TP_PROTO(struct task_struct *p, struct rq *rq, enum task_event evt,
		 u64 wallclock, u64 irqtime, u32 cycles, u32 exec_time,
		 u64 wallclock, u64 irqtime, u64 cycles, u64 exec_time,
		 struct group_cpu_time *cpu_time),

	TP_ARGS(p, rq, evt, wallclock, irqtime, cycles, exec_time, cpu_time),
@@ -317,8 +317,7 @@ TRACE_EVENT(sched_update_task_ravg,
		__entry->evt            = evt;
		__entry->cpu            = rq->cpu;
		__entry->cur_pid        = rq->curr->pid;
		__entry->cur_freq       = cpu_cycles_to_freq(rq->cpu, cycles,
							     exec_time);
		__entry->cur_freq       = cpu_cycles_to_freq(cycles, exec_time);
		memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
		__entry->pid            = p->pid;
		__entry->mark_start     = p->ravg.mark_start;
@@ -365,7 +364,7 @@ TRACE_EVENT(sched_update_task_ravg,

TRACE_EVENT(sched_get_task_cpu_cycles,

	TP_PROTO(int cpu, int event, u64 cycles, u32 exec_time),
	TP_PROTO(int cpu, int event, u64 cycles, u64 exec_time),

	TP_ARGS(cpu, event, cycles, exec_time),

@@ -383,8 +382,7 @@ TRACE_EVENT(sched_get_task_cpu_cycles,
		__entry->event 		= event;
		__entry->cycles 	= cycles;
		__entry->exec_time 	= exec_time;
		__entry->freq 		= cpu_cycles_to_freq(cpu, cycles,
							     exec_time);
		__entry->freq		= cpu_cycles_to_freq(cycles, exec_time);
		__entry->legacy_freq 	= cpu_cur_freq(cpu);
	),

+176 −146

File changed.

Preview size limit exceeded, changes collapsed.

+2 −0
Original line number Diff line number Diff line
@@ -81,6 +81,8 @@ void irqtime_account_irq(struct task_struct *curr)

	if (account)
		sched_account_irqtime(cpu, curr, delta, wallclock);
	else if (curr != this_cpu_ksoftirqd())
		sched_account_irqstart(cpu, curr, wallclock);

	local_irq_restore(flags);
}
+1 −0
Original line number Diff line number Diff line
@@ -332,6 +332,7 @@ do { \
	P(cluster->efficiency);
	P(cluster->cur_freq);
	P(cluster->max_freq);
	P(cluster->exec_scale_factor);
#endif
#endif
#ifdef CONFIG_SCHED_HMP
Loading