Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 0bbdb707 authored by Linux Build Service Account's avatar Linux Build Service Account Committed by Gerrit - the friendly Code Review server
Browse files

Merge "msm: perf: Refine disable/enable in tracecounters"

parents 61616ffc 45b05826
Loading
Loading
Loading
Loading
+1 −0
Original line number Diff line number Diff line
@@ -51,6 +51,7 @@ static char *descriptions =
	"26 Perf: interrupt disable without bringing cpus up\n"
	"27 Perf: stop counters when going into hotplug\n"
	"28 ARM: dts: msm: add perf-events support for msm8909\n"
	"29 msm: perf: Refine disable/enable in tracecounters\n"
;

static ssize_t desc_read(struct file *fp, char __user *buf,
+45 −11
Original line number Diff line number Diff line
@@ -17,7 +17,8 @@
#include "perf_trace_counters.h"

static unsigned int tp_pid_state;

DEFINE_PER_CPU(u32, cntenset_val);
DEFINE_PER_CPU(unsigned long, l2_enmask);
DEFINE_PER_CPU(u32, previous_ccnt);
DEFINE_PER_CPU(u32[NUM_L1_CTRS], previous_l1_cnts);
DEFINE_PER_CPU(u32[NUM_L2_PERCPU], previous_l2_cnts);
@@ -40,15 +41,10 @@ static struct notifier_block tracectr_cpu_hotplug_notifier_block = {
	.notifier_call = tracectr_cpu_hotplug_notifier,
};

static void setup_prev_cnts(u32 cpu)
static void setup_prev_cnts(u32 cpu, u32 cnten_val)
{
	int i;
	u32 cnten_val;

	/* Read PMCNTENSET */
	asm volatile("mrc p15, 0, %0, c9, c12, 1" : "=r"(cnten_val));
	/* Disable all the counters that were enabled */
	asm volatile("mcr p15, 0, %0, c9, c12, 2" : : "r"(cnten_val));
	if (cnten_val & CC) {
		/* Read value */
		asm volatile("mrc p15, 0, %0, c9, c13, 0"
@@ -65,28 +61,66 @@ static void setup_prev_cnts(u32 cpu)
				: "=r"(per_cpu(previous_l1_cnts[i], cpu)));
		}
	}
	/* Enable all the counters that were disabled */
	asm volatile("mcr p15, 0, %0, c9, c12, 1" : : "r"(cnten_val));
}

static int tracectr_notifier(struct notifier_block *self, unsigned long cmd,
		void *v)
{
	u32 cnten_val;
	u32 l2_cnten_val;
	u32 val;
	u32 bit;
	int i;
	int num_l2ctrs;
	struct thread_info *thread = v;
	int current_pid;
	u32 cpu = thread->cpu;
	unsigned long idx;
	u32 num_cores = nr_cpu_ids;

	if (cmd != THREAD_NOTIFY_SWITCH)
		return -EFAULT;

	current_pid = thread->task->pid;
	if (per_cpu(old_pid, cpu) != -1) {
		/* Read PMCNTENSET */
		asm volatile("mrc p15, 0, %0, c9, c12, 1" : "=r"(cnten_val));
		per_cpu(cntenset_val, cpu) = cnten_val;
		/* Disable all the counters that were enabled */
		asm volatile("mcr p15, 0, %0, c9, c12, 2" : : "r"(cnten_val));

		if (per_cpu(hotplug_flag, cpu) == 1) {
			per_cpu(hotplug_flag, cpu) = 0;
			setup_prev_cnts(cpu);
		} else
			setup_prev_cnts(cpu, cnten_val);
		} else {
			/* check # L2 counters */
			val = get_l2_indirect_reg(L2PMCR);
			num_l2ctrs = ((val >> 11) & 0x1f) + 1;
			l2_cnten_val = get_l2_indirect_reg(L2PMCNTENSET);
			per_cpu(l2_enmask, cpu) = 0;
			for (i = 0; i < NUM_L2_PERCPU; i++) {
				/*
				 * Assign L2 counters to cores sequentially
				 * starting from zero. A core could have
				 * multiple L2 counters allocated if # L2
				 * counters is more than the # cores
				 */
				idx = cpu + (num_cores * i);
				bit = BIT(idx);
				if (idx < num_l2ctrs && (l2_cnten_val & bit)) {
					/* Disable */
					set_l2_indirect_reg(L2PMCNTENCLR, bit);
					per_cpu(l2_enmask, cpu) |= bit;
				}
			}
			trace_sched_switch_with_ctrs(per_cpu(old_pid, cpu),
				current_pid);
			/* Enable L2*/
			set_l2_indirect_reg(L2PMCNTENSET,
					per_cpu(l2_enmask, cpu));
		}
		/* Enable all the counters that were disabled */
		asm volatile("mcr p15, 0, %0, c9, c12, 1" : : "r"(cnten_val));
	}
	per_cpu(old_pid, cpu) = current_pid;
	return NOTIFY_OK;
+24 −42
Original line number Diff line number Diff line
/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
/* Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 and
@@ -31,6 +31,8 @@
#include <linux/tracepoint.h>
#include <mach/msm-krait-l2-accessors.h>

DECLARE_PER_CPU(u32, cntenset_val);
DECLARE_PER_CPU(unsigned long, l2_enmask);
DECLARE_PER_CPU(u32, previous_ccnt);
DECLARE_PER_CPU(u32[NUM_L1_CTRS], previous_l1_cnts);
DECLARE_PER_CPU(u32[NUM_L2_PERCPU], previous_l2_cnts);
@@ -54,13 +56,13 @@ TRACE_EVENT(sched_switch_with_ctrs,

		TP_fast_assign(
			u32 cpu = smp_processor_id();
			u32 idx;
			unsigned long idx;
			u32 i;
			u32 counter_reg;
			u32 val;
			u32 cnten_val;
			unsigned long l2_cnten_val;
			u32 num_l2ctrs;
			u32 num_cores = nr_cpu_ids;
			u32 total_ccnt = 0;
			u32 total_cnt = 0;
			u32 delta_l1_cnts[NUM_L1_CTRS];
@@ -71,12 +73,7 @@ TRACE_EVENT(sched_switch_with_ctrs,
			val = get_l2_indirect_reg(L2PMCR);
			num_l2ctrs = ((val >> 11) & 0x1f) + 1;

			/* Read PMCNTENSET */
			asm volatile("mrc p15, 0, %0, c9, c12, 1"
						: "=r"(cnten_val));
			/* Disable all the counters that were enabled */
			asm volatile("mcr p15, 0, %0, c9, c12, 2"
					: : "r"(cnten_val));
			cnten_val = per_cpu(cntenset_val, cpu);
			if (cnten_val & CC) {
				/* Read value */
				asm volatile("mrc p15, 0, %0, c9, c13, 0"
@@ -103,23 +100,13 @@ TRACE_EVENT(sched_switch_with_ctrs,
				} else
					delta_l1_cnts[i] = 0;
			}
			/* Enable all the counters that were disabled */
			asm volatile("mcr p15, 0, %0, c9, c12, 1"
					: : "r"(cnten_val));

			/* L2 counters */
			/* Assign L2 counters to cores sequentially starting
			 * from zero. A core could have multiple L2 counters
			 * allocated if # L2 counters is more than the # cores
			 */
			cnten_val = get_l2_indirect_reg(L2PMCNTENSET);
			for (i = 0; i < NUM_L2_PERCPU; i++) {
				idx = cpu + (num_cores * i);
				if (idx < num_l2ctrs &&
						(cnten_val & (1 << idx))) {
					/* Disable */
					set_l2_indirect_reg(L2PMCNTENCLR,
						(1 << idx));
			i = 0;
			delta_l2_cnts[0] = 0;
			delta_l2_cnts[1] = 0;
			l2_cnten_val = per_cpu(l2_enmask, cpu);
			for_each_set_bit(idx, &l2_cnten_val, num_l2ctrs) {
				/* L2PMEVCNTR values go from 0x421,
				 * 0x431..
				 * So we multiply idx by 16 to get the
@@ -127,17 +114,12 @@ TRACE_EVENT(sched_switch_with_ctrs,
				 */
				counter_reg = (idx * 16) +
					IA_L2PMXEVCNTR_BASE;
					total_cnt =
					  get_l2_indirect_reg(counter_reg);
					/* Enable */
					set_l2_indirect_reg(L2PMCNTENSET,
						(1 << idx));
				total_cnt = get_l2_indirect_reg(counter_reg);
				delta_l2_cnts[i] = total_cnt -
				  per_cpu(previous_l2_cnts[i], cpu);
				per_cpu(previous_l2_cnts[i], cpu) =
					total_cnt;
				} else
					delta_l2_cnts[i] = 0;
				i++;
			}
			__entry->ctr0 = delta_l1_cnts[0];
			__entry->ctr1 = delta_l1_cnts[1];