Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 40a34266 authored by Linus Torvalds's avatar Linus Torvalds
Browse files
* 'release-2.6.27' of git://git.kernel.org/pub/scm/linux/kernel/git/ak/linux-acpi-2.6:
  cpuidle: Make ladder governor honor latency requirements fully
  cpuidle: Menu governor fix wrong usage of measured_us
  cpuidle: Do not use poll_idle unless user asks for it
  x86: Fix ioremap off by one BUG
parents 4ad193b4 22d9aac2
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -170,7 +170,7 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr,
	phys_addr &= PAGE_MASK;
	size = PAGE_ALIGN(last_addr+1) - phys_addr;

	retval = reserve_memtype(phys_addr, phys_addr + size,
	retval = reserve_memtype(phys_addr, (u64)phys_addr + size,
						prot_val, &new_prot_val);
	if (retval) {
		pr_debug("Warning: reserve_memtype returned %d\n", retval);
+22 −4
Original line number Diff line number Diff line
@@ -67,10 +67,17 @@ static int ladder_select_state(struct cpuidle_device *dev)
	struct ladder_device *ldev = &__get_cpu_var(ladder_devices);
	struct ladder_device_state *last_state;
	int last_residency, last_idx = ldev->last_state_idx;
	int latency_req = pm_qos_requirement(PM_QOS_CPU_DMA_LATENCY);

	if (unlikely(!ldev))
		return 0;

	/* Special case when user has set very strict latency requirement */
	if (unlikely(latency_req == 0)) {
		ladder_do_selection(ldev, last_idx, 0);
		return 0;
	}

	last_state = &ldev->states[last_idx];

	if (dev->states[last_idx].flags & CPUIDLE_FLAG_TIME_VALID)
@@ -81,8 +88,7 @@ static int ladder_select_state(struct cpuidle_device *dev)
	/* consider promotion */
	if (last_idx < dev->state_count - 1 &&
	    last_residency > last_state->threshold.promotion_time &&
	    dev->states[last_idx + 1].exit_latency <=
			pm_qos_requirement(PM_QOS_CPU_DMA_LATENCY)) {
	    dev->states[last_idx + 1].exit_latency <= latency_req) {
		last_state->stats.promotion_count++;
		last_state->stats.demotion_count = 0;
		if (last_state->stats.promotion_count >= last_state->threshold.promotion_count) {
@@ -92,7 +98,19 @@ static int ladder_select_state(struct cpuidle_device *dev)
	}

	/* consider demotion */
	if (last_idx > 0 &&
	if (last_idx > CPUIDLE_DRIVER_STATE_START &&
	    dev->states[last_idx].exit_latency > latency_req) {
		int i;

		for (i = last_idx - 1; i > CPUIDLE_DRIVER_STATE_START; i--) {
			if (dev->states[i].exit_latency <= latency_req)
				break;
		}
		ladder_do_selection(ldev, last_idx, i);
		return i;
	}

	if (last_idx > CPUIDLE_DRIVER_STATE_START &&
	    last_residency < last_state->threshold.demotion_time) {
		last_state->stats.demotion_count++;
		last_state->stats.promotion_count = 0;
@@ -117,7 +135,7 @@ static int ladder_enable_device(struct cpuidle_device *dev)
	struct ladder_device_state *lstate;
	struct cpuidle_state *state;

	ldev->last_state_idx = 0;
	ldev->last_state_idx = CPUIDLE_DRIVER_STATE_START;

	for (i = 0; i < dev->state_count; i++) {
		state = &dev->states[i];
+28 −14
Original line number Diff line number Diff line
@@ -34,21 +34,28 @@ static DEFINE_PER_CPU(struct menu_device, menu_devices);
static int menu_select(struct cpuidle_device *dev)
{
	struct menu_device *data = &__get_cpu_var(menu_devices);
	int latency_req = pm_qos_requirement(PM_QOS_CPU_DMA_LATENCY);
	int i;

	/* Special case when user has set very strict latency requirement */
	if (unlikely(latency_req == 0)) {
		data->last_state_idx = 0;
		return 0;
	}

	/* determine the expected residency time */
	data->expected_us =
		(u32) ktime_to_ns(tick_nohz_get_sleep_length()) / 1000;

	/* find the deepest idle state that satisfies our constraints */
	for (i = 1; i < dev->state_count; i++) {
	for (i = CPUIDLE_DRIVER_STATE_START + 1; i < dev->state_count; i++) {
		struct cpuidle_state *s = &dev->states[i];

		if (s->target_residency > data->expected_us)
			break;
		if (s->target_residency > data->predicted_us)
			break;
		if (s->exit_latency > pm_qos_requirement(PM_QOS_CPU_DMA_LATENCY))
		if (s->exit_latency > latency_req)
			break;
	}

@@ -67,9 +74,9 @@ static void menu_reflect(struct cpuidle_device *dev)
{
	struct menu_device *data = &__get_cpu_var(menu_devices);
	int last_idx = data->last_state_idx;
	unsigned int measured_us =
		cpuidle_get_last_residency(dev) + data->elapsed_us;
	unsigned int last_idle_us = cpuidle_get_last_residency(dev);
	struct cpuidle_state *target = &dev->states[last_idx];
	unsigned int measured_us;

	/*
	 * Ugh, this idle state doesn't support residency measurements, so we
@@ -77,20 +84,27 @@ static void menu_reflect(struct cpuidle_device *dev)
	 * for one full standard timer tick.  However, be aware that this
	 * could potentially result in a suboptimal state transition.
	 */
	if (!(target->flags & CPUIDLE_FLAG_TIME_VALID))
		measured_us = USEC_PER_SEC / HZ;
	if (unlikely(!(target->flags & CPUIDLE_FLAG_TIME_VALID)))
		last_idle_us = USEC_PER_SEC / HZ;

	/*
	 * measured_us and elapsed_us are the cumulative idle time, since the
	 * last time we were woken out of idle by an interrupt.
	 */
	if (data->elapsed_us <= data->elapsed_us + last_idle_us)
		measured_us = data->elapsed_us + last_idle_us;
	else
		measured_us = -1;

	/* Predict time remaining until next break event */
	if (measured_us + BREAK_FUZZ < data->expected_us - target->exit_latency) {
	/* Predict time until next break event */
	data->predicted_us = max(measured_us, data->last_measured_us);

	if (last_idle_us + BREAK_FUZZ <
	    data->expected_us - target->exit_latency) {
		data->last_measured_us = measured_us;
		data->elapsed_us = 0;
	} else {
		if (data->elapsed_us < data->elapsed_us + measured_us)
		data->elapsed_us = measured_us;
		else
			data->elapsed_us = -1;
		data->predicted_us = max(measured_us, data->last_measured_us);
	}
}