Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 147c5fc2 authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Ingo Molnar
Browse files

sched/fair: Shrink sg_lb_stats and play memset games



We can shrink sg_lb_stats because rq::nr_running is an unsigned int
and cpu numbers are 'int'

Before:
  sgs:        /* size: 72, cachelines: 2, members: 10 */
  sds:        /* size: 184, cachelines: 3, members: 7 */

After:
  sgs:        /* size: 56, cachelines: 1, members: 10 */
  sds:        /* size: 152, cachelines: 3, members: 7 */

Further we can avoid clearing all of sds since we do a total
clear/assignment of sg_stats in update_sg_lb_stats() with exception of
busiest_stat.avg_load which is referenced in update_sd_pick_busiest().

Signed-off-by: default avatarPeter Zijlstra <peterz@infradead.org>
Link: http://lkml.kernel.org/n/tip-0klzmz9okll8wc0nsudguc9p@git.kernel.org


Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 56cf515b
Loading
Loading
Loading
Loading
+26 −7
Original line number Diff line number Diff line
@@ -4237,12 +4237,12 @@ static unsigned long task_h_load(struct task_struct *p)
struct sg_lb_stats {
	unsigned long avg_load; /*Avg load across the CPUs of the group */
	unsigned long group_load; /* Total load over the CPUs of the group */
	unsigned long sum_nr_running; /* Nr tasks running in the group */
	unsigned long sum_weighted_load; /* Weighted load of group's tasks */
	unsigned long load_per_task;
	unsigned long group_capacity;
	unsigned long idle_cpus;
	unsigned long group_weight;
	unsigned int sum_nr_running; /* Nr tasks running in the group */
	unsigned int group_capacity;
	unsigned int idle_cpus;
	unsigned int group_weight;
	int group_imb; /* Is there an imbalance in the group ? */
	int group_has_capacity; /* Is there extra capacity in the group? */
};
@@ -4258,9 +4258,28 @@ struct sd_lb_stats {
	unsigned long total_pwr;	/* Total power of all groups in sd */
	unsigned long avg_load;	/* Average load across all groups in sd */

	struct sg_lb_stats local_stat;	/* Statistics of the local group */
	struct sg_lb_stats busiest_stat;/* Statistics of the busiest group */
	struct sg_lb_stats local_stat;	/* Statistics of the local group */
};

static inline void init_sd_lb_stats(struct sd_lb_stats *sds)
{
	/*
	 * Skimp on the clearing to avoid duplicate work. We can avoid clearing
	 * local_stat because update_sg_lb_stats() does a full clear/assignment.
	 * We must however clear busiest_stat::avg_load because
	 * update_sd_pick_busiest() reads this before assignment.
	 */
	*sds = (struct sd_lb_stats){
		.busiest = NULL,
		.local = NULL,
		.total_load = 0UL,
		.total_pwr = 0UL,
		.busiest_stat = {
			.avg_load = 0UL,
		},
	};
}

/**
 * get_sd_load_idx - Obtain the load index for a given sched domain.
@@ -4615,7 +4634,7 @@ static inline void update_sd_lb_stats(struct lb_env *env,
		 */
		if (prefer_sibling && !local_group &&
				sds->local && sds->local_stat.group_has_capacity)
			sgs->group_capacity = min(sgs->group_capacity, 1UL);
			sgs->group_capacity = min(sgs->group_capacity, 1U);

		/* Now, start updating sd_lb_stats */
		sds->total_load += sgs->group_load;
@@ -4846,7 +4865,7 @@ static struct sched_group *find_busiest_group(struct lb_env *env)
	struct sg_lb_stats *local, *busiest;
	struct sd_lb_stats sds;

	memset(&sds, 0, sizeof(sds));
	init_sd_lb_stats(&sds);

	/*
	 * Compute the various statistics relavent for load balancing at