Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit feb4d37f authored by Linux Build Service Account's avatar Linux Build Service Account Committed by Gerrit - the friendly Code Review server
Browse files

Merge "cpuidle: Wakeup only cpus for which qos has changed."

parents 366b24d4 769d28ca
Loading
Loading
Loading
Loading
+24 −4
Original line number Diff line number Diff line
@@ -613,16 +613,36 @@ EXPORT_SYMBOL_GPL(cpuidle_register);

#ifdef CONFIG_SMP

static void wake_up_idle_cpus(void *v)
{
	int cpu;
	struct cpumask cpus;

	if (v) {
		cpumask_andnot(&cpus, v, cpu_isolated_mask);
		cpumask_and(&cpus, &cpus, cpu_online_mask);
	} else
		cpumask_andnot(&cpus, cpu_online_mask, cpu_isolated_mask);

	preempt_disable();
	for_each_cpu(cpu, &cpus) {
		if (cpu == smp_processor_id())
			continue;
		wake_up_if_idle(cpu);
	}
	preempt_enable();
}

/*
 * This function gets called when a part of the kernel has a new latency
 * requirement.  This means we need to get all processors out of their C-state,
 * and then recalculate a new suitable C-state. Just do a cross-cpu IPI; that
 * wakes them all right up.
 * requirement.  This means we need to get only those processors out of their
 * C-state for which qos requirement is changed, and then recalculate a new
 * suitable C-state. Just do a cross-cpu IPI; that wakes them all right up.
 */
static int cpuidle_latency_notify(struct notifier_block *b,
		unsigned long l, void *v)
{
	wake_up_all_idle_cpus();
	wake_up_idle_cpus(v);
	return NOTIFY_OK;
}

+17 −6
Original line number Diff line number Diff line
@@ -267,7 +267,8 @@ static const struct file_operations pm_qos_debug_fops = {
	.release        = single_release,
};

static inline void pm_qos_set_value_for_cpus(struct pm_qos_constraints *c)
static inline void pm_qos_set_value_for_cpus(struct pm_qos_constraints *c,
		struct cpumask *cpus)
{
	struct pm_qos_request *req = NULL;
	int cpu;
@@ -294,9 +295,12 @@ static inline void pm_qos_set_value_for_cpus(struct pm_qos_constraints *c)
		}
	}

	for_each_possible_cpu(cpu)
	for_each_possible_cpu(cpu) {
		if (c->target_per_cpu[cpu] != qos_val[cpu])
			cpumask_set_cpu(cpu, cpus);
		c->target_per_cpu[cpu] = qos_val[cpu];
	}
}

/**
 * pm_qos_update_target - manages the constraints list and calls the notifiers
@@ -316,6 +320,7 @@ int pm_qos_update_target(struct pm_qos_constraints *c,
	unsigned long flags;
	int prev_value, curr_value, new_value;
	struct plist_node *node = &req->node;
	struct cpumask cpus;
	int ret;

	spin_lock_irqsave(&pm_qos_lock, flags);
@@ -346,18 +351,24 @@ int pm_qos_update_target(struct pm_qos_constraints *c,
	}

	curr_value = pm_qos_get_value(c);
	cpumask_clear(&cpus);
	pm_qos_set_value(c, curr_value);
	pm_qos_set_value_for_cpus(c);
	pm_qos_set_value_for_cpus(c, &cpus);

	spin_unlock_irqrestore(&pm_qos_lock, flags);

	trace_pm_qos_update_target(action, prev_value, curr_value);
	if (prev_value != curr_value) {

	/*
	 * if cpu mask bits are set, call the notifier call chain
	 * to update the new qos restriction for the cores
	 */

	if (!cpumask_empty(&cpus)) {
		ret = 1;
		if (c->notifiers)
			blocking_notifier_call_chain(c->notifiers,
						     (unsigned long)curr_value,
						     NULL);
				     (unsigned long)curr_value, &cpus);
	} else {
		ret = 0;
	}