Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 1f8c553d authored by Gautham R Shenoy's avatar Gautham R Shenoy Committed by Ingo Molnar
Browse files

sched: Create a helper function to calculate sched_group stats for fbg()



Impact: cleanup

Create a helper function named update_sg_lb_stats() which
can be invoked to calculate the individual group's statistics
in find_busiest_group().

This reduces the lenght of find_busiest_group() considerably.

Credit: Vaidyanathan Srinivasan <svaidy@linux.vnet.ibm.com>
Signed-off-by: default avatarGautham R Shenoy <ego@in.ibm.com>
Aked-by: default avatarPeter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Suresh Siddha <suresh.b.siddha@intel.com>
Cc: "Balbir Singh" <balbir@in.ibm.com>
Cc: Nick Piggin <nickpiggin@yahoo.com.au>
Cc: "Dhaval Giani" <dhaval@linux.vnet.ibm.com>
Cc: Bharata B Rao <bharata@linux.vnet.ibm.com>
LKML-Reference: <20090325091351.13992.43461.stgit@sofia.in.ibm.com>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent 381be78f
Loading
Loading
Loading
Loading
+100 −75
Original line number Diff line number Diff line
@@ -3237,56 +3237,36 @@ static inline int get_sd_load_idx(struct sched_domain *sd,

	return load_idx;
}
/******* find_busiest_group() helpers end here *********************/

/*
 * find_busiest_group finds and returns the busiest CPU group within the
 * domain. It calculates and returns the amount of weighted load which
 * should be moved to restore balance via the imbalance parameter.
 */
static struct sched_group *
find_busiest_group(struct sched_domain *sd, int this_cpu,
		   unsigned long *imbalance, enum cpu_idle_type idle,
		   int *sd_idle, const struct cpumask *cpus, int *balance)
{
	struct sched_group *busiest = NULL, *this = NULL, *group = sd->groups;
	unsigned long max_load, avg_load, total_load, this_load, total_pwr;
	unsigned long max_pull;
	unsigned long busiest_load_per_task, busiest_nr_running;
	unsigned long this_load_per_task, this_nr_running;
	int load_idx, group_imb = 0;
#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
	int power_savings_balance = 1;
	unsigned long leader_nr_running = 0, min_load_per_task = 0;
	unsigned long min_nr_running = ULONG_MAX;
	struct sched_group *group_min = NULL, *group_leader = NULL;
#endif

	max_load = this_load = total_load = total_pwr = 0;
	busiest_load_per_task = busiest_nr_running = 0;
	this_load_per_task = this_nr_running = 0;

	load_idx = get_sd_load_idx(sd, idle);

	do {
		struct sg_lb_stats sgs;
/**
 * update_sg_lb_stats - Update sched_group's statistics for load balancing.
 * @group: sched_group whose statistics are to be updated.
 * @this_cpu: Cpu for which load balance is currently performed.
 * @idle: Idle status of this_cpu
 * @load_idx: Load index of sched_domain of this_cpu for load calc.
 * @sd_idle: Idle status of the sched_domain containing group.
 * @local_group: Does group contain this_cpu.
 * @cpus: Set of cpus considered for load balancing.
 * @balance: Should we balance.
 * @sgs: variable to hold the statistics for this group.
 */
static inline void update_sg_lb_stats(struct sched_group *group, int this_cpu,
			enum cpu_idle_type idle, int load_idx, int *sd_idle,
			int local_group, const struct cpumask *cpus,
			int *balance, struct sg_lb_stats *sgs)
{
	unsigned long load, max_cpu_load, min_cpu_load;
		int local_group;
	int i;
	unsigned int balance_cpu = -1, first_idle_cpu = 0;
	unsigned long sum_avg_load_per_task;
	unsigned long avg_load_per_task;

		local_group = cpumask_test_cpu(this_cpu,
					       sched_group_cpus(group));
		memset(&sgs, 0, sizeof(sgs));

	if (local_group)
		balance_cpu = group_first_cpu(group);

	/* Tally up the load of all CPUs in the group */
	sum_avg_load_per_task = avg_load_per_task = 0;

	max_cpu_load = 0;
	min_cpu_load = ~0UL;

@@ -3312,9 +3292,9 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
				min_cpu_load = load;
		}

			sgs.group_load += load;
			sgs.sum_nr_running += rq->nr_running;
			sgs.sum_weighted_load += weighted_cpuload(i);
		sgs->group_load += load;
		sgs->sum_nr_running += rq->nr_running;
		sgs->sum_weighted_load += weighted_cpuload(i);

		sum_avg_load_per_task += cpu_avg_load_per_task(i);
	}
@@ -3328,15 +3308,12 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
	if (idle != CPU_NEWLY_IDLE && local_group &&
	    balance_cpu != this_cpu && balance) {
		*balance = 0;
			goto ret;
		return;
	}

		total_load += sgs.group_load;
		total_pwr += group->__cpu_power;

	/* Adjust by relative CPU power of the group */
		sgs.avg_load = sg_div_cpu_power(group,
				sgs.group_load * SCHED_LOAD_SCALE);
	sgs->avg_load = sg_div_cpu_power(group,
			sgs->group_load * SCHED_LOAD_SCALE);


	/*
@@ -3352,9 +3329,57 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
			sum_avg_load_per_task * SCHED_LOAD_SCALE);

	if ((max_cpu_load - min_cpu_load) > 2*avg_load_per_task)
			sgs.group_imb = 1;
		sgs->group_imb = 1;

	sgs->group_capacity = group->__cpu_power / SCHED_LOAD_SCALE;

		sgs.group_capacity = group->__cpu_power / SCHED_LOAD_SCALE;
}
/******* find_busiest_group() helpers end here *********************/

/*
 * find_busiest_group finds and returns the busiest CPU group within the
 * domain. It calculates and returns the amount of weighted load which
 * should be moved to restore balance via the imbalance parameter.
 */
static struct sched_group *
find_busiest_group(struct sched_domain *sd, int this_cpu,
		   unsigned long *imbalance, enum cpu_idle_type idle,
		   int *sd_idle, const struct cpumask *cpus, int *balance)
{
	struct sched_group *busiest = NULL, *this = NULL, *group = sd->groups;
	unsigned long max_load, avg_load, total_load, this_load, total_pwr;
	unsigned long max_pull;
	unsigned long busiest_load_per_task, busiest_nr_running;
	unsigned long this_load_per_task, this_nr_running;
	int load_idx, group_imb = 0;
#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
	int power_savings_balance = 1;
	unsigned long leader_nr_running = 0, min_load_per_task = 0;
	unsigned long min_nr_running = ULONG_MAX;
	struct sched_group *group_min = NULL, *group_leader = NULL;
#endif

	max_load = this_load = total_load = total_pwr = 0;
	busiest_load_per_task = busiest_nr_running = 0;
	this_load_per_task = this_nr_running = 0;

	load_idx = get_sd_load_idx(sd, idle);

	do {
		struct sg_lb_stats sgs;
		int local_group;

		local_group = cpumask_test_cpu(this_cpu,
					       sched_group_cpus(group));
		memset(&sgs, 0, sizeof(sgs));
		update_sg_lb_stats(group, this_cpu, idle, load_idx, sd_idle,
				local_group, cpus, balance, &sgs);

		if (balance && !(*balance))
			goto ret;

		total_load += sgs.group_load;
		total_pwr += group->__cpu_power;

		if (local_group) {
			this_load = sgs.avg_load;