Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 4f262acf authored by Linus Torvalds's avatar Linus Torvalds
Browse files
Pull ARM updates from Russell King.

* 'fixes' of git://git.linaro.org/people/rmk/linux-arm:
  ARM: 7358/1: perf: add PMU hotplug notifier
  ARM: 7357/1: perf: fix overflow handling for xscale2 PMUs
  ARM: 7356/1: perf: check that we have an event in the PMU IRQ handlers
  ARM: 7355/1: perf: clear overflow flag when disabling counter on ARMv7 PMU
  ARM: 7354/1: perf: limit sample_period to half max_period in non-sampling mode
  ARM: ecard: ensure fake vma vm_flags is setup
  ARM: 7346/1: errata: fix PL310 erratum #753970 workaround selection
  ARM: 7345/1: errata: update workaround for A9 erratum #743622
  ARM: 7348/1: arm/spear600: fix one-shot timer
  ARM: 7339/1: amba/serial.h: Include types.h for resolving dependency of type bool
parents d09b3c96 a0feb6db
Loading
Loading
Loading
Loading
+1 −1
Original line number Original line Diff line number Diff line
@@ -1280,7 +1280,7 @@ config ARM_ERRATA_743622
	depends on CPU_V7
	depends on CPU_V7
	help
	help
	  This option enables the workaround for the 743622 Cortex-A9
	  This option enables the workaround for the 743622 Cortex-A9
	  (r2p0..r2p2) erratum. Under very rare conditions, a faulty
	  (r2p*) erratum. Under very rare conditions, a faulty
	  optimisation in the Cortex-A9 Store Buffer may lead to data
	  optimisation in the Cortex-A9 Store Buffer may lead to data
	  corruption. This workaround sets a specific bit in the diagnostic
	  corruption. This workaround sets a specific bit in the diagnostic
	  register of the Cortex-A9 which disables the Store Buffer
	  register of the Cortex-A9 which disables the Store Buffer
+1 −1
Original line number Original line Diff line number Diff line
@@ -134,7 +134,7 @@ int __init armpmu_register(struct arm_pmu *armpmu, char *name, int type);


u64 armpmu_event_update(struct perf_event *event,
u64 armpmu_event_update(struct perf_event *event,
			struct hw_perf_event *hwc,
			struct hw_perf_event *hwc,
			int idx, int overflow);
			int idx);


int armpmu_event_set_period(struct perf_event *event,
int armpmu_event_set_period(struct perf_event *event,
			    struct hw_perf_event *hwc,
			    struct hw_perf_event *hwc,
+1 −0
Original line number Original line Diff line number Diff line
@@ -242,6 +242,7 @@ static void ecard_init_pgtables(struct mm_struct *mm)


	memcpy(dst_pgd, src_pgd, sizeof(pgd_t) * (EASI_SIZE / PGDIR_SIZE));
	memcpy(dst_pgd, src_pgd, sizeof(pgd_t) * (EASI_SIZE / PGDIR_SIZE));


	vma.vm_flags = VM_EXEC;
	vma.vm_mm = mm;
	vma.vm_mm = mm;


	flush_tlb_range(&vma, IO_START, IO_START + IO_SIZE);
	flush_tlb_range(&vma, IO_START, IO_START + IO_SIZE);
+34 −11
Original line number Original line Diff line number Diff line
@@ -180,7 +180,7 @@ armpmu_event_set_period(struct perf_event *event,
u64
u64
armpmu_event_update(struct perf_event *event,
armpmu_event_update(struct perf_event *event,
		    struct hw_perf_event *hwc,
		    struct hw_perf_event *hwc,
		    int idx, int overflow)
		    int idx)
{
{
	struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
	struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
	u64 delta, prev_raw_count, new_raw_count;
	u64 delta, prev_raw_count, new_raw_count;
@@ -193,13 +193,7 @@ again:
			     new_raw_count) != prev_raw_count)
			     new_raw_count) != prev_raw_count)
		goto again;
		goto again;


	new_raw_count &= armpmu->max_period;
	delta = (new_raw_count - prev_raw_count) & armpmu->max_period;
	prev_raw_count &= armpmu->max_period;

	if (overflow)
		delta = armpmu->max_period - prev_raw_count + new_raw_count + 1;
	else
		delta = new_raw_count - prev_raw_count;


	local64_add(delta, &event->count);
	local64_add(delta, &event->count);
	local64_sub(delta, &hwc->period_left);
	local64_sub(delta, &hwc->period_left);
@@ -216,7 +210,7 @@ armpmu_read(struct perf_event *event)
	if (hwc->idx < 0)
	if (hwc->idx < 0)
		return;
		return;


	armpmu_event_update(event, hwc, hwc->idx, 0);
	armpmu_event_update(event, hwc, hwc->idx);
}
}


static void
static void
@@ -232,7 +226,7 @@ armpmu_stop(struct perf_event *event, int flags)
	if (!(hwc->state & PERF_HES_STOPPED)) {
	if (!(hwc->state & PERF_HES_STOPPED)) {
		armpmu->disable(hwc, hwc->idx);
		armpmu->disable(hwc, hwc->idx);
		barrier(); /* why? */
		barrier(); /* why? */
		armpmu_event_update(event, hwc, hwc->idx, 0);
		armpmu_event_update(event, hwc, hwc->idx);
		hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
		hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
	}
	}
}
}
@@ -518,7 +512,13 @@ __hw_perf_event_init(struct perf_event *event)
	hwc->config_base	    |= (unsigned long)mapping;
	hwc->config_base	    |= (unsigned long)mapping;


	if (!hwc->sample_period) {
	if (!hwc->sample_period) {
		hwc->sample_period  = armpmu->max_period;
		/*
		 * For non-sampling runs, limit the sample_period to half
		 * of the counter width. That way, the new counter value
		 * is far less likely to overtake the previous one unless
		 * you have some serious IRQ latency issues.
		 */
		hwc->sample_period  = armpmu->max_period >> 1;
		hwc->last_period    = hwc->sample_period;
		hwc->last_period    = hwc->sample_period;
		local64_set(&hwc->period_left, hwc->sample_period);
		local64_set(&hwc->period_left, hwc->sample_period);
	}
	}
@@ -679,6 +679,28 @@ static void __init cpu_pmu_init(struct arm_pmu *armpmu)
	armpmu->type = ARM_PMU_DEVICE_CPU;
	armpmu->type = ARM_PMU_DEVICE_CPU;
}
}


/*
 * PMU hardware loses all context when a CPU goes offline.
 * When a CPU is hotplugged back in, since some hardware registers are
 * UNKNOWN at reset, the PMU must be explicitly reset to avoid reading
 * junk values out of them.
 */
static int __cpuinit pmu_cpu_notify(struct notifier_block *b,
					unsigned long action, void *hcpu)
{
	if ((action & ~CPU_TASKS_FROZEN) != CPU_STARTING)
		return NOTIFY_DONE;

	if (cpu_pmu && cpu_pmu->reset)
		cpu_pmu->reset(NULL);

	return NOTIFY_OK;
}

static struct notifier_block __cpuinitdata pmu_cpu_notifier = {
	.notifier_call = pmu_cpu_notify,
};

/*
/*
 * CPU PMU identification and registration.
 * CPU PMU identification and registration.
 */
 */
@@ -730,6 +752,7 @@ init_hw_perf_events(void)
		pr_info("enabled with %s PMU driver, %d counters available\n",
		pr_info("enabled with %s PMU driver, %d counters available\n",
			cpu_pmu->name, cpu_pmu->num_events);
			cpu_pmu->name, cpu_pmu->num_events);
		cpu_pmu_init(cpu_pmu);
		cpu_pmu_init(cpu_pmu);
		register_cpu_notifier(&pmu_cpu_notifier);
		armpmu_register(cpu_pmu, "cpu", PERF_TYPE_RAW);
		armpmu_register(cpu_pmu, "cpu", PERF_TYPE_RAW);
	} else {
	} else {
		pr_info("no hardware support available\n");
		pr_info("no hardware support available\n");
+3 −19
Original line number Original line Diff line number Diff line
@@ -467,23 +467,6 @@ armv6pmu_enable_event(struct hw_perf_event *hwc,
	raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
	raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
}
}


static int counter_is_active(unsigned long pmcr, int idx)
{
	unsigned long mask = 0;
	if (idx == ARMV6_CYCLE_COUNTER)
		mask = ARMV6_PMCR_CCOUNT_IEN;
	else if (idx == ARMV6_COUNTER0)
		mask = ARMV6_PMCR_COUNT0_IEN;
	else if (idx == ARMV6_COUNTER1)
		mask = ARMV6_PMCR_COUNT1_IEN;

	if (mask)
		return pmcr & mask;

	WARN_ONCE(1, "invalid counter number (%d)\n", idx);
	return 0;
}

static irqreturn_t
static irqreturn_t
armv6pmu_handle_irq(int irq_num,
armv6pmu_handle_irq(int irq_num,
		    void *dev)
		    void *dev)
@@ -513,7 +496,8 @@ armv6pmu_handle_irq(int irq_num,
		struct perf_event *event = cpuc->events[idx];
		struct perf_event *event = cpuc->events[idx];
		struct hw_perf_event *hwc;
		struct hw_perf_event *hwc;


		if (!counter_is_active(pmcr, idx))
		/* Ignore if we don't have an event. */
		if (!event)
			continue;
			continue;


		/*
		/*
@@ -524,7 +508,7 @@ armv6pmu_handle_irq(int irq_num,
			continue;
			continue;


		hwc = &event->hw;
		hwc = &event->hw;
		armpmu_event_update(event, hwc, idx, 1);
		armpmu_event_update(event, hwc, idx);
		data.period = event->hw.last_period;
		data.period = event->hw.last_period;
		if (!armpmu_event_set_period(event, hwc, idx))
		if (!armpmu_event_set_period(event, hwc, idx))
			continue;
			continue;
Loading