Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 576259be authored by Syed Rameez Mustafa's avatar Syed Rameez Mustafa
Browse files

sched/hmp: Use GFP_KERNEL for top task memory allocations



Task load structure allocations can consume a lot of memory as the
number of tasks begin to increase. Also they might exhaust the atomic
memory pool pretty quickly if a workload starts spawning lots of
threads in a short amount of time thus increasing the possibility of
failed allocations. Move the call to init_new_task_load() outside
atomic context and start using GFP_KERNEL for allocations. There is
no need for this allocation to be in atomic context.

Change-Id: I357772e10bf8958804d9cd0c78eda27139054b21
Signed-off-by: default avatarSyed Rameez Mustafa <rameezmustafa@codeaurora.org>
parent ecd8f780
Loading
Loading
Loading
Loading
+5 −13
Original line number Diff line number Diff line
@@ -2269,17 +2269,7 @@ void sched_exit(struct task_struct *p)
	reset_task_stats(p);
	p->ravg.mark_start = wallclock;
	p->ravg.sum_history[0] = EXITING_TASK_MARKER;

	kfree(p->ravg.curr_window_cpu);
	kfree(p->ravg.prev_window_cpu);

	/*
	 * update_task_ravg() can be called for exiting tasks. While the
	 * function itself ensures correct behavior, the corresponding
	 * trace event requires that these pointers be NULL.
	 */
	p->ravg.curr_window_cpu = NULL;
	p->ravg.prev_window_cpu = NULL;
	free_task_load_ptrs(p);

	enqueue_task(rq, p, 0);
	clear_ed_task(p, rq);
@@ -2384,10 +2374,12 @@ int sysctl_numa_balancing(struct ctl_table *table, int write,
int sched_fork(unsigned long clone_flags, struct task_struct *p)
{
	unsigned long flags;
	int cpu = get_cpu();
	int cpu;

	__sched_fork(clone_flags, p);
	init_new_task_load(p, false);
	cpu = get_cpu();

	__sched_fork(clone_flags, p);
	/*
	 * We mark the process as running here. This guarantees that
	 * nobody will actually run it, and a signal or other external
+16 −2
Original line number Diff line number Diff line
@@ -1624,6 +1624,20 @@ unsigned int cpu_temp(int cpu)
		return 0;
}

void free_task_load_ptrs(struct task_struct *p)
{
	kfree(p->ravg.curr_window_cpu);
	kfree(p->ravg.prev_window_cpu);

	/*
	 * update_task_ravg() can be called for exiting tasks. While the
	 * function itself ensures correct behavior, the corresponding
	 * trace event requires that these pointers be NULL.
	 */
	p->ravg.curr_window_cpu = NULL;
	p->ravg.prev_window_cpu = NULL;
}

void init_new_task_load(struct task_struct *p, bool idle_task)
{
	int i;
@@ -1636,8 +1650,8 @@ void init_new_task_load(struct task_struct *p, bool idle_task)
	memset(&p->ravg, 0, sizeof(struct ravg));
	p->cpu_cycles = 0;

	p->ravg.curr_window_cpu = kcalloc(nr_cpu_ids, sizeof(u32), GFP_ATOMIC);
	p->ravg.prev_window_cpu = kcalloc(nr_cpu_ids, sizeof(u32), GFP_ATOMIC);
	p->ravg.curr_window_cpu = kcalloc(nr_cpu_ids, sizeof(u32), GFP_KERNEL);
	p->ravg.prev_window_cpu = kcalloc(nr_cpu_ids, sizeof(u32), GFP_KERNEL);

	/* Don't have much choice. CPU frequency would be bogus */
	BUG_ON(!p->ravg.curr_window_cpu || !p->ravg.prev_window_cpu);
+3 −0
Original line number Diff line number Diff line
@@ -1079,6 +1079,7 @@ extern unsigned int __read_mostly sched_downmigrate;
extern unsigned int  __read_mostly sysctl_sched_spill_nr_run;
extern unsigned int  __read_mostly sched_load_granule;

extern void free_task_load_ptrs(struct task_struct *p);
extern void init_new_task_load(struct task_struct *p, bool idle_task);
extern u64 sched_ktime_clock(void);
extern int got_boost_kick(void);
@@ -1527,6 +1528,8 @@ static inline struct sched_cluster *rq_cluster(struct rq *rq)
	return NULL;
}

static inline void free_task_load_ptrs(struct task_struct *p) { }

static inline void init_new_task_load(struct task_struct *p, bool idle_task)
{
}