Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit fd641941 authored by Park Ju Hyung's avatar Park Ju Hyung Committed by KakatkarAkshay
Browse files

sched: do not allocate window cpu arrays separately



These are allocated extremely frequently.

Allocate them with CONFIG_NR_CPUS upon struct ravg's allocation.

This will break walt debug tracings.

Signed-off-by: default avatarPark Ju Hyung <qkrwngud825@gmail.com>
Signed-off-by: default avatarAdam W. Willis <return.of.octobot@gmail.com>
Signed-off-by: default avatarLibXZR <xzr467706992@163.com>
parent f2d6d2fa
Loading
Loading
Loading
Loading
+1 −4
Original line number Diff line number Diff line
@@ -664,7 +664,6 @@ extern u32 sched_get_init_task_load(struct task_struct *p);
extern void sched_update_cpu_freq_min_max(const cpumask_t *cpus, u32 fmin,
					  u32 fmax);
extern int sched_set_boost(int enable);
extern void free_task_load_ptrs(struct task_struct *p);
extern void sched_set_refresh_rate(enum fps fps);

#define RAVG_HIST_SIZE_MAX 5
@@ -709,7 +708,7 @@ struct ravg {
	u32 sum, demand;
	u32 coloc_demand;
	u32 sum_history[RAVG_HIST_SIZE_MAX];
	u32 *curr_window_cpu, *prev_window_cpu;
	u32 curr_window_cpu[CONFIG_NR_CPUS], prev_window_cpu[CONFIG_NR_CPUS];
	u32 curr_window, prev_window;
	u32 pred_demand;
	u8 busy_buckets[NUM_BUSY_BUCKETS];
@@ -731,8 +730,6 @@ static inline int sched_set_boost(int enable)
{
	return -EINVAL;
}
static inline void free_task_load_ptrs(struct task_struct *p) { }

static inline void sched_update_cpu_freq_min_max(const cpumask_t *cpus,
					u32 fmin, u32 fmax) { }

+0 −1
Original line number Diff line number Diff line
@@ -2375,7 +2375,6 @@ static __latent_entropy struct task_struct *copy_process(
	perf_event_free_task(p);
bad_fork_cleanup_policy:
	lockdep_free_task(p);
	free_task_load_ptrs(p);
#ifdef CONFIG_NUMA
	mpol_put(p->mempolicy);
bad_fork_cleanup_threadgroup_lock:
+0 −1
Original line number Diff line number Diff line
@@ -8805,7 +8805,6 @@ void sched_exit(struct task_struct *p)
	enqueue_task(rq, p, 0);
	clear_ed_task(p, rq);
	task_rq_unlock(rq, p, &rf);
	free_task_load_ptrs(p);
}
#endif /* CONFIG_SCHED_WALT */

+14 −37
Original line number Diff line number Diff line
@@ -2152,11 +2152,6 @@ void init_new_task_load(struct task_struct *p)
	memset(&p->ravg, 0, sizeof(struct ravg));
	p->cpu_cycles = 0;

	p->ravg.curr_window_cpu = kcalloc(nr_cpu_ids, sizeof(u32),
					  GFP_KERNEL | __GFP_NOFAIL);
	p->ravg.prev_window_cpu = kcalloc(nr_cpu_ids, sizeof(u32),
					  GFP_KERNEL | __GFP_NOFAIL);

	if (init_load_pct) {
		init_load_windows = div64_u64((u64)init_load_pct *
			  (u64)sched_ravg_window, 100);
@@ -2174,46 +2169,28 @@ void init_new_task_load(struct task_struct *p)
	p->unfilter = sysctl_sched_task_unfilter_period;
}

/*
 * kfree() may wakeup kswapd. So this function should NOT be called
 * with any CPU's rq->lock acquired.
 */
void free_task_load_ptrs(struct task_struct *p)
{
	kfree(p->ravg.curr_window_cpu);
	kfree(p->ravg.prev_window_cpu);

	/*
	 * update_task_ravg() can be called for exiting tasks. While the
	 * function itself ensures correct behavior, the corresponding
	 * trace event requires that these pointers be NULL.
	 */
	p->ravg.curr_window_cpu = NULL;
	p->ravg.prev_window_cpu = NULL;
}

void reset_task_stats(struct task_struct *p)
{
	u32 sum = 0;
	u32 *curr_window_ptr = NULL;
	u32 *prev_window_ptr = NULL;
	u32 sum;
	u32 curr_window_saved[CONFIG_NR_CPUS];
	u32 prev_window_saved[CONFIG_NR_CPUS];

	if (exiting_task(p)) {
		sum = EXITING_TASK_MARKER;
	} else {
		curr_window_ptr =  p->ravg.curr_window_cpu;
		prev_window_ptr = p->ravg.prev_window_cpu;
		memset(curr_window_ptr, 0, sizeof(u32) * nr_cpu_ids);
		memset(prev_window_ptr, 0, sizeof(u32) * nr_cpu_ids);
	}

		memset(&p->ravg, 0, sizeof(struct ravg));

	p->ravg.curr_window_cpu = curr_window_ptr;
	p->ravg.prev_window_cpu = prev_window_ptr;

		/* Retain EXITING_TASK marker */
		p->ravg.sum_history[0] = sum;
	} else {
		memcpy(curr_window_saved, p->ravg.curr_window_cpu, sizeof(curr_window_saved));
		memcpy(prev_window_saved, p->ravg.prev_window_cpu, sizeof(prev_window_saved));

		memset(&p->ravg, 0, sizeof(struct ravg));

		memcpy(p->ravg.curr_window_cpu, curr_window_saved, sizeof(curr_window_saved));
		memcpy(p->ravg.prev_window_cpu, prev_window_saved, sizeof(prev_window_saved));
	}
}

void mark_task_starting(struct task_struct *p)