Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit d41481ee authored by Satya Durga Srinivasu Prabhala's avatar Satya Durga Srinivasu Prabhala Committed by Todd Kjos
Browse files

ANDROID: sched/walt: Fix compilation issue for x86_64



Below compilation errors observed when SCHED_WALT enabled and
FAIR_GROUP_SCHED is disabled, fix it.

  CC      kernel/sched/walt.o
kernel/sched/walt.c: In function .walt_inc_cfs_cumulative_runnable_avg.:
kernel/sched/walt.c:157:8: error: .struct cfs_rq. has no member named .cumulative_runnable_avg.
  cfs_rq->cumulative_runnable_avg += p->ravg.demand;
        ^
kernel/sched/walt.c: In function .walt_dec_cfs_cumulative_runnable_avg.:
kernel/sched/walt.c:163:8: error: .struct cfs_rq. has no member named .cumulative_runnable_avg.
  cfs_rq->cumulative_runnable_avg -= p->ravg.demand;
        ^
make[2]: *** [kernel/sched/walt.o] Error 1
make[1]: *** [kernel/sched] Error 2
make: *** [kernel] Error 2

Change-Id: I52d506aa5494f4c351f19e6bb2f1a75c8333f321
Signed-off-by: default avatarSatya Durga Srinivasu Prabhala <satyap@codeaurora.org>
(cherry-picked for android-4.14)
Signed-off-by: default avatarChris Redpath <chris.redpath@arm.com>
parent 572cdb99
Loading
Loading
Loading
Loading
+3 −4
Original line number Diff line number Diff line
@@ -483,10 +483,6 @@ struct cfs_rq {
	struct list_head leaf_cfs_rq_list;
	struct task_group *tg;	/* group that "owns" this runqueue */

#ifdef CONFIG_SCHED_WALT
	u64 cumulative_runnable_avg;
#endif

#ifdef CONFIG_CFS_BANDWIDTH
	int runtime_enabled;
	u64 runtime_expires;
@@ -496,6 +492,9 @@ struct cfs_rq {
	u64 throttled_clock_task_time;
	int throttled, throttle_count;
	struct list_head throttled_list;
#ifdef CONFIG_SCHED_WALT
	u64 cumulative_runnable_avg;
#endif /* CONFIG_SCHED_WALT */
#endif /* CONFIG_CFS_BANDWIDTH */
#endif /* CONFIG_FAIR_GROUP_SCHED */
};
+2 −0
Original line number Diff line number Diff line
@@ -148,6 +148,7 @@ static int __init walt_init_ops(void)
}
late_initcall(walt_init_ops);

#ifdef CONFIG_CFS_BANDWIDTH
void walt_inc_cfs_cumulative_runnable_avg(struct cfs_rq *cfs_rq,
		struct task_struct *p)
{
@@ -159,6 +160,7 @@ void walt_dec_cfs_cumulative_runnable_avg(struct cfs_rq *cfs_rq,
{
	cfs_rq->cumulative_runnable_avg -= p->ravg.demand;
}
#endif

static int exiting_task(struct task_struct *p)
{
+13 −8
Original line number Diff line number Diff line
@@ -20,10 +20,7 @@ void walt_update_task_ravg(struct task_struct *p, struct rq *rq, int event,
		u64 wallclock, u64 irqtime);
void walt_inc_cumulative_runnable_avg(struct rq *rq, struct task_struct *p);
void walt_dec_cumulative_runnable_avg(struct rq *rq, struct task_struct *p);
void walt_inc_cfs_cumulative_runnable_avg(struct cfs_rq *rq,
		struct task_struct *p);
void walt_dec_cfs_cumulative_runnable_avg(struct cfs_rq *rq,
		struct task_struct *p);

void walt_fixup_busy_time(struct task_struct *p, int new_cpu);
void walt_init_new_task_load(struct task_struct *p);
void walt_mark_task_starting(struct task_struct *p);
@@ -42,10 +39,6 @@ static inline void walt_update_task_ravg(struct task_struct *p, struct rq *rq,
		int event, u64 wallclock, u64 irqtime) { }
static inline void walt_inc_cumulative_runnable_avg(struct rq *rq, struct task_struct *p) { }
static inline void walt_dec_cumulative_runnable_avg(struct rq *rq, struct task_struct *p) { }
static inline void walt_inc_cfs_cumulative_runnable_avg(struct cfs_rq *rq,
		struct task_struct *p) { }
static inline void walt_dec_cfs_cumulative_runnable_avg(struct cfs_rq *rq,
		struct task_struct *p) { }
static inline void walt_fixup_busy_time(struct task_struct *p, int new_cpu) { }
static inline void walt_init_new_task_load(struct task_struct *p) { }
static inline void walt_mark_task_starting(struct task_struct *p) { }
@@ -57,6 +50,18 @@ static inline u64 walt_ktime_clock(void) { return 0; }

#endif /* CONFIG_SCHED_WALT */

#if defined(CONFIG_CFS_BANDWIDTH) && defined(CONFIG_SCHED_WALT)
void walt_inc_cfs_cumulative_runnable_avg(struct cfs_rq *rq,
		struct task_struct *p);
void walt_dec_cfs_cumulative_runnable_avg(struct cfs_rq *rq,
		struct task_struct *p);
#else
static inline void walt_inc_cfs_cumulative_runnable_avg(struct cfs_rq *rq,
		struct task_struct *p) { }
static inline void walt_dec_cfs_cumulative_runnable_avg(struct cfs_rq *rq,
		struct task_struct *p) { }
#endif

extern bool walt_disabled;

#endif