Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 91ac727e authored by Pavankumar Kondeti's avatar Pavankumar Kondeti
Browse files

sched: fix compilation errors with !SCHED_FREQ_INPUT



There are few compilation errors with SCHED_HMP &&
!SCHED_FREQ_INPUT. Fix them by refactoring the code.

Change-Id: I62747baddcbe66fa8e6e1d58ed0a74af5f4e95cd
Signed-off-by: default avatarPavankumar Kondeti <pkondeti@codeaurora.org>
parent 5b42fe0b
Loading
Loading
Loading
Loading
+8 −12
Original line number Diff line number Diff line
@@ -1998,13 +1998,7 @@ struct sched_load {
	unsigned long predicted_load;
};

#if defined(CONFIG_SCHED_FREQ_INPUT)
extern int sched_set_window(u64 window_start, unsigned int window_size);
extern unsigned long sched_get_busy(int cpu);
extern void sched_get_cpus_busy(struct sched_load *busy,
				const struct cpumask *query_cpus);
extern void sched_set_io_is_busy(int val);
#ifdef CONFIG_SCHED_QHMP
#if defined(CONFIG_SCHED_QHMP) || !defined(CONFIG_SCHED_HMP)
static inline int sched_update_freq_max_load(const cpumask_t *cpumask)
{
	return 0;
@@ -2012,6 +2006,13 @@ static inline int sched_update_freq_max_load(const cpumask_t *cpumask)
#else
int sched_update_freq_max_load(const cpumask_t *cpumask);
#endif

#if defined(CONFIG_SCHED_FREQ_INPUT)
extern int sched_set_window(u64 window_start, unsigned int window_size);
extern unsigned long sched_get_busy(int cpu);
extern void sched_get_cpus_busy(struct sched_load *busy,
				const struct cpumask *query_cpus);
extern void sched_set_io_is_busy(int val);
#else
static inline int sched_set_window(u64 window_start, unsigned int window_size)
{
@@ -2024,11 +2025,6 @@ static inline unsigned long sched_get_busy(int cpu)
static inline void sched_get_cpus_busy(struct sched_load *busy,
				       const struct cpumask *query_cpus) {};
static inline void sched_set_io_is_busy(int val) {};

static inline int sched_update_freq_max_load(const cpumask_t *cpumask)
{
	return 0;
}
#endif

/*
+73 −61
Original line number Diff line number Diff line
@@ -1766,15 +1766,6 @@ unsigned int __read_mostly sched_disable_window_stats;
 */
#ifdef CONFIG_SCHED_FREQ_INPUT
__read_mostly unsigned int sched_major_task_runtime = 10000000;
#endif

static unsigned int sync_cpu;

static LIST_HEAD(related_thread_groups);
static DEFINE_RWLOCK(related_thread_group_lock);

#define for_each_related_thread_group(grp) \
	list_for_each_entry(grp, &related_thread_groups, list)

/*
 * Demand aggregation for frequency purpose:
@@ -1825,6 +1816,16 @@ static DEFINE_RWLOCK(related_thread_group_lock);
static __read_mostly unsigned int sched_freq_aggregate;
__read_mostly unsigned int sysctl_sched_freq_aggregate;

#endif

static unsigned int sync_cpu;

static LIST_HEAD(related_thread_groups);
static DEFINE_RWLOCK(related_thread_group_lock);

#define for_each_related_thread_group(grp) \
	list_for_each_entry(grp, &related_thread_groups, list)

#define EXITING_TASK_MARKER	0xdeaddead

static inline int exiting_task(struct task_struct *p)
@@ -1876,6 +1877,9 @@ static inline u64 scale_exec_time(u64 delta, struct rq *rq)
	return delta;
}

static inline struct group_cpu_time *
_group_cpu_time(struct related_thread_group *grp, int cpu);

#ifdef CONFIG_SCHED_FREQ_INPUT

static inline int cpu_is_waiting_on_io(struct rq *rq)
@@ -1915,9 +1919,6 @@ static inline unsigned int load_to_freq(struct rq *rq, u64 load)
	return freq;
}

static inline struct group_cpu_time *
_group_cpu_time(struct related_thread_group *grp, int cpu);

/*
 * Return load from all related group in given cpu.
 * Caller must ensure that related_thread_group_lock is held.
@@ -2583,6 +2584,41 @@ static void update_cpu_busy_time(struct task_struct *p, struct rq *rq,
	BUG();
}

static inline u32 predict_and_update_buckets(struct rq *rq,
			struct task_struct *p, u32 runtime) {

	int bidx;
	u32 pred_demand;

	bidx = busy_to_bucket(runtime);
	pred_demand = get_pred_busy(rq, p, bidx, runtime);
	bucket_increase(p->ravg.busy_buckets, bidx);

	return pred_demand;
}
#define assign_ravg_pred_demand(x) (p->ravg.pred_demand = x)

#else	/* CONFIG_SCHED_FREQ_INPUT */

static inline void
update_task_pred_demand(struct rq *rq, struct task_struct *p, int event)
{
}

static inline void update_cpu_busy_time(struct task_struct *p, struct rq *rq,
	     int event, u64 wallclock, u64 irqtime)
{
}

static inline u32 predict_and_update_buckets(struct rq *rq,
			struct task_struct *p, u32 runtime)
{
	return 0;
}
#define assign_ravg_pred_demand(x)

#endif	/* CONFIG_SCHED_FREQ_INPUT */

u32 __weak get_freq_max_load(int cpu, u32 freq)
{
	/* 100% by default */
@@ -2673,41 +2709,6 @@ fail:
	return ret;
}

static inline u32 predict_and_update_buckets(struct rq *rq,
			struct task_struct *p, u32 runtime) {

	int bidx;
	u32 pred_demand;

	bidx = busy_to_bucket(runtime);
	pred_demand = get_pred_busy(rq, p, bidx, runtime);
	bucket_increase(p->ravg.busy_buckets, bidx);

	return pred_demand;
}
#define assign_ravg_pred_demand(x) (p->ravg.pred_demand = x)

#else	/* CONFIG_SCHED_FREQ_INPUT */

static inline void
update_task_pred_demand(struct rq *rq, struct task_struct *p, int event)
{
}

static inline void update_cpu_busy_time(struct task_struct *p, struct rq *rq,
	     int event, u64 wallclock, u64 irqtime)
{
}

static inline u32 predict_and_update_buckets(struct rq *rq,
			struct task_struct *p, u32 runtime)
{
	return 0;
}
#define assign_ravg_pred_demand(x)

#endif	/* CONFIG_SCHED_FREQ_INPUT */

static void update_task_cpu_cycles(struct task_struct *p, int cpu)
{
	if (use_cycle_counter)
@@ -3184,6 +3185,7 @@ const char *sched_window_reset_reasons[] = {
	"HIST_SIZE_CHANGE",
};

static inline void _reset_all_group_time(u64 window_start);
/* Called with IRQs enabled */
void reset_all_window_stats(u64 window_start, unsigned int window_size)
{
@@ -3192,7 +3194,6 @@ void reset_all_window_stats(u64 window_start, unsigned int window_size)
	u64 start_ts = sched_ktime_clock();
	int reason = WINDOW_CHANGE;
	unsigned int old = 0, new = 0;
	struct related_thread_group *grp;

	disable_window_stats();

@@ -3207,18 +3208,7 @@ void reset_all_window_stats(u64 window_start, unsigned int window_size)
		raw_spin_lock(&rq->lock);
	}

	list_for_each_entry(grp, &related_thread_groups, list) {
		int j;

		for_each_possible_cpu(j) {
			struct group_cpu_time *cpu_time;
			/* Protected by rq lock */
			cpu_time = _group_cpu_time(grp, j);
			memset(cpu_time, 0, sizeof(struct group_cpu_time));
			if (window_start)
				cpu_time->window_start = window_start;
		}
	}
	_reset_all_group_time(window_start);

	if (window_size) {
		sched_ravg_window = window_size * TICK_NSEC;
@@ -3836,6 +3826,26 @@ _group_cpu_time(struct related_thread_group *grp, int cpu)
	return grp ? per_cpu_ptr(grp->cpu_time, cpu) : NULL;
}

/* must be called with all rq lock held */
static inline void _reset_all_group_time(u64 window_start)
{
	struct related_thread_group *grp;

	list_for_each_entry(grp, &related_thread_groups, list) {
		int j;

		for_each_possible_cpu(j) {
			struct group_cpu_time *cpu_time;

			/* Protected by rq lock */
			cpu_time = _group_cpu_time(grp, j);
			memset(cpu_time, 0, sizeof(struct group_cpu_time));
			if (window_start)
				cpu_time->window_start = window_start;
		}
	}
}

#else	/* CONFIG_SCHED_FREQ_INPUT */

static inline void free_group_cputime(struct related_thread_group *grp) { }
@@ -3850,7 +3860,7 @@ static inline void transfer_busy_time(struct rq *rq,
{
}

static struct group_cpu_time *
static inline struct group_cpu_time *
task_group_cpu_time(struct task_struct *p, int cpu)
{
	return NULL;
@@ -3862,6 +3872,8 @@ _group_cpu_time(struct related_thread_group *grp, int cpu)
	return NULL;
}

static inline void _reset_all_group_time(u64 window_start) {}

#endif

struct related_thread_group *alloc_related_thread_group(int group_id)