Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 61502d56 authored by qctecmdr's avatar qctecmdr Committed by Gerrit - the friendly Code Review server
Browse files

Merge "drivers: llcc_perfmon: support for dual memory controller added"

parents 7acb3dd9 48fb6a1d
Loading
Loading
Loading
Loading
+1 −0
Original line number Diff line number Diff line
@@ -15,6 +15,7 @@ enum event_port_select {
	EVENT_PORT_TRP,
	EVENT_PORT_DRP,
	EVENT_PORT_PMGR,
	EVENT_PORT_BEAC1,
	EVENT_PORT_TENURE,
	EVENT_PORT_TLAT,
};
+197 −129
Original line number Diff line number Diff line
@@ -19,7 +19,7 @@
#include "llcc_perfmon.h"

#define LLCC_PERFMON_NAME		"qcom_llcc_perfmon"
#define LLCC_PERFMON_COUNTER_MAX	16
#define MAX_CNTR			16
#define MAX_NUMBER_OF_PORTS		8
#define NUM_CHANNELS			16
#define DELIM_CHAR			" "
@@ -45,7 +45,7 @@ struct llcc_perfmon_private;
 */
struct event_port_ops {
	void (*event_config)(struct llcc_perfmon_private *priv,
			unsigned int type, unsigned int num, bool enable);
			unsigned int type, unsigned int *num, bool enable);
	void (*event_enable)(struct llcc_perfmon_private *priv, bool enable);
	void (*event_filter_config)(struct llcc_perfmon_private *priv,
			enum filter_type filter, unsigned long match,
@@ -55,12 +55,12 @@ struct event_port_ops {
/**
 * struct llcc_perfmon_private	- llcc perfmon private
 * @llcc_map:		llcc register address space map
 * @llcc_bcast_map:	llcc broadcast register address space map
 * @bank_off:		Offset of llcc banks
 * @num_banks:		Number of banks supported
 * @port_ops:		struct event_port_ops
 * @configured:		Mapping of configured event counters
 * @configured_counters:
 *			Count of configured counters.
 * @configured_cntrs:	Count of configured counters.
 * @enables_port:	Port enabled for perfmon configuration
 * @filtered_ports:	Port filter enabled
 * @port_configd:	Number of perfmon port configuration supported
@@ -76,8 +76,8 @@ struct llcc_perfmon_private {
	unsigned int bank_off[NUM_CHANNELS];
	unsigned int num_banks;
	struct event_port_ops *port_ops[MAX_NUMBER_OF_PORTS];
	struct llcc_perfmon_counter_map configured[LLCC_PERFMON_COUNTER_MAX];
	unsigned int configured_counters;
	struct llcc_perfmon_counter_map configured[MAX_CNTR];
	unsigned int configured_cntrs;
	unsigned int enables_port;
	unsigned int filtered_ports;
	unsigned int port_configd;
@@ -113,18 +113,20 @@ static void llcc_bcast_modify(struct llcc_perfmon_private *llcc_priv,

static void perfmon_counter_dump(struct llcc_perfmon_private *llcc_priv)
{
	struct llcc_perfmon_counter_map *counter_map;
	uint32_t val;
	unsigned int i, j;

	if (!llcc_priv->configured_counters)
	if (!llcc_priv->configured_cntrs)
		return;

	llcc_bcast_write(llcc_priv, PERFMON_DUMP, MONITOR_DUMP);
	for (i = 0; i < llcc_priv->configured_counters; i++) {
	for (i = 0; i < llcc_priv->configured_cntrs; i++) {
		counter_map = &llcc_priv->configured[i];
		for (j = 0; j < llcc_priv->num_banks; j++) {
			regmap_read(llcc_priv->llcc_map, llcc_priv->bank_off[j]
					+ LLCC_COUNTER_n_VALUE(i), &val);
			llcc_priv->configured[i].counter_dump[j] += val;
			counter_map->counter_dump[j] += val;
		}
	}
}
@@ -133,26 +135,39 @@ static ssize_t perfmon_counter_dump_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{
	struct llcc_perfmon_private *llcc_priv = dev_get_drvdata(dev);
	struct llcc_perfmon_counter_map *counter_map;
	unsigned int i, j;
	unsigned long long total;
	ssize_t cnt = 0;

	if (llcc_priv->configured_counters == 0) {
	if (llcc_priv->configured_cntrs == 0) {
		pr_err("counters not configured\n");
		return cnt;
	}

	perfmon_counter_dump(llcc_priv);
	for (i = 0; i < llcc_priv->configured_counters - 1; i++) {
		cnt += scnprintf(buf + cnt, PAGE_SIZE - cnt, "Port %02d,",
				llcc_priv->configured[i].port_sel);
		cnt += scnprintf(buf + cnt, PAGE_SIZE - cnt, "Event %02d,",
				llcc_priv->configured[i].event_sel);

	for (i = 0; i < llcc_priv->configured_cntrs - 1; i++) {
		total = 0;
		counter_map = &llcc_priv->configured[i];
		cnt += scnprintf(buf + cnt, PAGE_SIZE - cnt,
				"Port %02d,Event %02d,",
				counter_map->port_sel, counter_map->event_sel);

		if ((counter_map->port_sel == EVENT_PORT_BEAC) &&
				(llcc_priv->num_mc > 1)) {
			/* DBX uses 2 counters for BEAC 0 & 1 */
			i++;
			for (j = 0; j < llcc_priv->num_banks; j++) {
			total += llcc_priv->configured[i].counter_dump[j];
			llcc_priv->configured[i].counter_dump[j] = 0;
				total += counter_map->counter_dump[j];
				counter_map->counter_dump[j] = 0;
				total += counter_map[1].counter_dump[j];
				counter_map[1].counter_dump[j] = 0;
			}
		} else {
			for (j = 0; j < llcc_priv->num_banks; j++) {
				total += counter_map->counter_dump[j];
				counter_map->counter_dump[j] = 0;
			}
		}

		cnt += scnprintf(buf + cnt, PAGE_SIZE - cnt, "0x%016llx\n",
@@ -161,9 +176,10 @@ static ssize_t perfmon_counter_dump_show(struct device *dev,

	cnt += scnprintf(buf + cnt, PAGE_SIZE - cnt, "CYCLE COUNT, ,");
	total = 0;
	counter_map = &llcc_priv->configured[i];
	for (j = 0; j < llcc_priv->num_banks; j++) {
		total += llcc_priv->configured[i].counter_dump[j];
		llcc_priv->configured[i].counter_dump[j] = 0;
		total += counter_map->counter_dump[j];
		counter_map->counter_dump[j] = 0;
	}

	cnt += scnprintf(buf + cnt, PAGE_SIZE - cnt, "0x%016llx\n", total);
@@ -179,19 +195,19 @@ static ssize_t perfmon_configure_store(struct device *dev,
{
	struct llcc_perfmon_private *llcc_priv = dev_get_drvdata(dev);
	struct event_port_ops *port_ops;
	unsigned int j = 0, k;
	struct llcc_perfmon_counter_map *counter_map;
	unsigned int j = 0, k, end_cntrs;
	unsigned long port_sel, event_sel;
	uint32_t val;
	char *token, *delim = DELIM_CHAR;

	mutex_lock(&llcc_priv->mutex);
	if (llcc_priv->configured_counters) {
	if (llcc_priv->configured_cntrs) {
		pr_err("Counters configured already, remove & try again\n");
		mutex_unlock(&llcc_priv->mutex);
		return -EINVAL;
	}

	llcc_priv->configured_counters = 0;
	token = strsep((char **)&buf, delim);

	while (token != NULL) {
@@ -214,32 +230,35 @@ static ssize_t perfmon_configure_store(struct device *dev,
			continue;
		}

		llcc_priv->configured[j].port_sel = port_sel;
		llcc_priv->configured[j].event_sel = event_sel;
		/* Last perfmon counter for cycle counter */
		end_cntrs = 1;
		if (port_sel == EVENT_PORT_BEAC)
			end_cntrs = llcc_priv->num_mc;

		if (j == (MAX_CNTR - end_cntrs))
			break;

		counter_map = &llcc_priv->configured[j];
		counter_map->port_sel = port_sel;
		counter_map->event_sel = event_sel;
		for (k = 0; k < llcc_priv->num_banks; k++)
			llcc_priv->configured[j].counter_dump[k] = 0;
			counter_map->counter_dump[k] = 0;

		port_ops = llcc_priv->port_ops[port_sel];
		pr_info("counter %d configured for event %ld from port %ld\n",
				j, event_sel, port_sel);
		port_ops->event_config(llcc_priv, event_sel, j++, true);
		if (!(llcc_priv->enables_port & (1 << port_sel)))
			if (port_ops->event_enable)
		port_ops->event_config(llcc_priv, event_sel, &j, true);
		pr_info("counter %2d configured for event %2ld from port %ld\n",
				j++, event_sel, port_sel);
		if (((llcc_priv->enables_port & (1 << port_sel)) == 0) &&
				(port_ops->event_enable))
			port_ops->event_enable(llcc_priv, true);

		llcc_priv->enables_port |= (1 << port_sel);

		/* Last perfmon counter for cycle counter */
		if (llcc_priv->configured_counters++ ==
				(LLCC_PERFMON_COUNTER_MAX - 2))
			break;
	}

	/* configure clock event */
	val = COUNT_CLOCK_EVENT | CLEAR_ON_ENABLE | CLEAR_ON_DUMP;
	llcc_bcast_write(llcc_priv, PERFMON_COUNTER_n_CONFIG(j), val);

	llcc_priv->configured_counters++;
	llcc_bcast_write(llcc_priv, PERFMON_COUNTER_n_CONFIG(j++), val);
	llcc_priv->configured_cntrs = j;
	mutex_unlock(&llcc_priv->mutex);
	return count;
}
@@ -249,12 +268,13 @@ static ssize_t perfmon_remove_store(struct device *dev,
{
	struct llcc_perfmon_private *llcc_priv = dev_get_drvdata(dev);
	struct event_port_ops *port_ops;
	unsigned int j = 0, counter_remove = 0;
	struct llcc_perfmon_counter_map *counter_map;
	unsigned int j = 0, end_cntrs;
	unsigned long port_sel, event_sel;
	char *token, *delim = DELIM_CHAR;

	mutex_lock(&llcc_priv->mutex);
	if (!llcc_priv->configured_counters) {
	if (!llcc_priv->configured_cntrs) {
		pr_err("Counters not configured\n");
		mutex_unlock(&llcc_priv->mutex);
		return -EINVAL;
@@ -282,29 +302,32 @@ static ssize_t perfmon_remove_store(struct device *dev,
			continue;
		}

		/* Last perfmon counter for cycle counter */
		end_cntrs = 1;
		if (port_sel == EVENT_PORT_BEAC)
			end_cntrs = llcc_priv->num_mc;

		if (j == (llcc_priv->configured_cntrs - end_cntrs))
			break;

		/* put dummy values */
		llcc_priv->configured[j].port_sel = MAX_NUMBER_OF_PORTS;
		llcc_priv->configured[j].event_sel = 100;
		counter_map = &llcc_priv->configured[j];
		counter_map->port_sel = MAX_NUMBER_OF_PORTS;
		counter_map->event_sel = 0;
		port_ops = llcc_priv->port_ops[port_sel];
		pr_info("removed counter %d for event %ld from port %ld\n",
				j, event_sel, port_sel);

		port_ops->event_config(llcc_priv, event_sel, j++, false);
		if (llcc_priv->enables_port & (1 << port_sel))
			if (port_ops->event_enable)
		port_ops->event_config(llcc_priv, event_sel, &j, false);
		pr_info("removed counter %2d for event %2ld from port %2ld\n",
				j++, event_sel, port_sel);
		if ((llcc_priv->enables_port & (1 << port_sel)) &&
				(port_ops->event_enable))
			port_ops->event_enable(llcc_priv, false);

		llcc_priv->enables_port &= ~(1 << port_sel);

		/* Last perfmon counter for cycle counter */
		if (counter_remove++ == (LLCC_PERFMON_COUNTER_MAX - 2))
			break;
	}

	/* remove clock event */
	llcc_bcast_write(llcc_priv, PERFMON_COUNTER_n_CONFIG(j), 0);

	llcc_priv->configured_counters = 0;
	llcc_priv->configured_cntrs = 0;
	mutex_unlock(&llcc_priv->mutex);
	return count;
}
@@ -339,13 +362,12 @@ static ssize_t perfmon_filter_config_store(struct device *dev,
	char *token, *delim = DELIM_CHAR;
	enum filter_type filter = UNKNOWN;

	if (llcc_priv->configured_counters) {
	if (llcc_priv->configured_cntrs) {
		pr_err("remove configured events and try\n");
		return count;
	}

	mutex_lock(&llcc_priv->mutex);

	token = strsep((char **)&buf, delim);
	if (token != NULL)
		filter = find_filter_type(token);
@@ -484,7 +506,7 @@ static ssize_t perfmon_start_store(struct device *dev,

	mutex_lock(&llcc_priv->mutex);
	if (start) {
		if (!llcc_priv->configured_counters) {
		if (!llcc_priv->configured_cntrs) {
			pr_err("start failed. perfmon not configured\n");
			mutex_unlock(&llcc_priv->mutex);
			return -EINVAL;
@@ -505,14 +527,13 @@ static ssize_t perfmon_start_store(struct device *dev,
		if (llcc_priv->expires)
			hrtimer_cancel(&llcc_priv->hrtimer);

		if (!llcc_priv->configured_counters)
		if (!llcc_priv->configured_cntrs)
			pr_err("stop failed. perfmon not configured\n");
	}

	mask_val = PERFMON_MODE_MONITOR_MODE_MASK |
		PERFMON_MODE_MONITOR_EN_MASK;
	llcc_bcast_modify(llcc_priv, PERFMON_MODE, val, mask_val);

	mutex_unlock(&llcc_priv->mutex);
	return count;
}
@@ -555,7 +576,6 @@ static ssize_t perfmon_scid_status_show(struct device *dev,
	for (i = 0; i < SCID_MAX; i++) {
		total = 0;
		offset = TRP_SCID_n_STATUS(i);

		for (j = 0; j < llcc_priv->num_banks; j++) {
			regmap_read(llcc_priv->llcc_map,
					llcc_priv->bank_off[j] + offset, &val);
@@ -604,39 +624,49 @@ static struct attribute_group llcc_perfmon_group = {
	.attrs	= llcc_perfmon_attrs,
};

static void perfmon_counter_config(struct llcc_perfmon_private *llcc_priv,
		unsigned int port, unsigned int event_counter_num)
static void perfmon_cntr_config(struct llcc_perfmon_private *llcc_priv,
		unsigned int port, unsigned int counter_num, bool enable)
{
	uint32_t val;
	uint32_t val = 0;

	if (counter_num >= MAX_CNTR)
		return;

	if (enable)
		val = (port & PERFMON_PORT_SELECT_MASK) |
		((event_counter_num << EVENT_SELECT_SHIFT) &
		PERFMON_EVENT_SELECT_MASK) | CLEAR_ON_ENABLE | CLEAR_ON_DUMP;
	llcc_bcast_write(llcc_priv, PERFMON_COUNTER_n_CONFIG(event_counter_num),
			val);
			((counter_num << EVENT_SELECT_SHIFT) &
			PERFMON_EVENT_SELECT_MASK) | CLEAR_ON_ENABLE |
			CLEAR_ON_DUMP;

	llcc_bcast_write(llcc_priv, PERFMON_COUNTER_n_CONFIG(counter_num), val);
}

static void feac_event_config(struct llcc_perfmon_private *llcc_priv,
		unsigned int event_type, unsigned int event_counter_num,
		unsigned int event_type, unsigned int *counter_num,
		bool enable)
{
	uint32_t val = 0, mask_val, counter_num = 0;
	uint32_t val = 0, mask_val;

	mask_val = EVENT_SEL_MASK;
	if (llcc_priv->version == REV_2)
		mask_val = EVENT_SEL_MASK7;

	if (llcc_priv->filtered_ports & (1 << EVENT_PORT_FEAC))
		mask_val |= FILTER_SEL_MASK | FILTER_EN_MASK;

	if (enable) {
		val = (event_type << EVENT_SEL_SHIFT) & EVENT_SEL_MASK;
		if (llcc_priv->version == REV_2)
			val = (event_type << EVENT_SEL_SHIFT) &
					EVENT_SEL_MASK7;

		if (llcc_priv->filtered_ports & (1 << EVENT_PORT_FEAC))
			val |= (FILTER_0 << FILTER_SEL_SHIFT) | FILTER_EN;

		counter_num = event_counter_num;
	}

	llcc_bcast_modify(llcc_priv, FEAC_PROF_EVENT_n_CFG(event_counter_num),
	llcc_bcast_modify(llcc_priv, FEAC_PROF_EVENT_n_CFG(*counter_num),
			val, mask_val);
	perfmon_counter_config(llcc_priv, EVENT_PORT_FEAC, counter_num);
	perfmon_cntr_config(llcc_priv, EVENT_PORT_FEAC, *counter_num, enable);
}

static void feac_event_enable(struct llcc_perfmon_private *llcc_priv,
@@ -748,10 +778,10 @@ static struct event_port_ops feac_port_ops = {
};

static void ferc_event_config(struct llcc_perfmon_private *llcc_priv,
		unsigned int event_type, unsigned int event_counter_num,
		unsigned int event_type, unsigned int *counter_num,
		bool enable)
{
	uint32_t val = 0, mask_val, counter_num = 0;
	uint32_t val = 0, mask_val;

	mask_val = EVENT_SEL_MASK;
	if (llcc_priv->filtered_ports & (1 << EVENT_PORT_FERC))
@@ -762,12 +792,11 @@ static void ferc_event_config(struct llcc_perfmon_private *llcc_priv,
		if (llcc_priv->filtered_ports & (1 << EVENT_PORT_FERC))
			val |= (FILTER_0 << FILTER_SEL_SHIFT) | FILTER_EN;

		counter_num = event_counter_num;
	}

	llcc_bcast_modify(llcc_priv, FERC_PROF_EVENT_n_CFG(event_counter_num),
	llcc_bcast_modify(llcc_priv, FERC_PROF_EVENT_n_CFG(*counter_num),
			val, mask_val);
	perfmon_counter_config(llcc_priv, EVENT_PORT_FERC, counter_num);
	perfmon_cntr_config(llcc_priv, EVENT_PORT_FERC, *counter_num, enable);
}

static void ferc_event_enable(struct llcc_perfmon_private *llcc_priv,
@@ -810,10 +839,10 @@ static struct event_port_ops ferc_port_ops = {
};

static void fewc_event_config(struct llcc_perfmon_private *llcc_priv,
		unsigned int event_type, unsigned int event_counter_num,
		unsigned int event_type, unsigned int *counter_num,
		bool enable)
{
	uint32_t val = 0, mask_val, counter_num = 0;
	uint32_t val = 0, mask_val;

	mask_val = EVENT_SEL_MASK;
	if (llcc_priv->filtered_ports & (1 << EVENT_PORT_FEWC))
@@ -824,12 +853,11 @@ static void fewc_event_config(struct llcc_perfmon_private *llcc_priv,
		if (llcc_priv->filtered_ports & (1 << EVENT_PORT_FEWC))
			val |= (FILTER_0 << FILTER_SEL_SHIFT) | FILTER_EN;

		counter_num = event_counter_num;
	}

	llcc_bcast_modify(llcc_priv, FEWC_PROF_EVENT_n_CFG(event_counter_num),
	llcc_bcast_modify(llcc_priv, FEWC_PROF_EVENT_n_CFG(*counter_num),
			val, mask_val);
	perfmon_counter_config(llcc_priv, EVENT_PORT_FEWC, counter_num);
	perfmon_cntr_config(llcc_priv, EVENT_PORT_FEWC, *counter_num, enable);
}

static void fewc_event_filter_config(struct llcc_perfmon_private *llcc_priv,
@@ -857,17 +885,18 @@ static struct event_port_ops fewc_port_ops = {
};

static void beac_event_config(struct llcc_perfmon_private *llcc_priv,
		unsigned int event_type, unsigned int event_counter_num,
		unsigned int event_type, unsigned int *counter_num,
		bool enable)
{
	uint32_t val = 0, mask_val, counter_num = 0;
	uint32_t val = 0, mask_val;
	uint32_t valcfg = 0, mask_valcfg;
	unsigned int mc_cnt, offset;
	struct llcc_perfmon_counter_map *counter_map;

	mask_val = EVENT_SEL_MASK;
	if (llcc_priv->filtered_ports & (1 << EVENT_PORT_BEAC)) {
		mask_val |= FILTER_SEL_MASK | FILTER_EN_MASK;
		if (llcc_priv->version == REV_0)
		if (llcc_priv->version == REV_2)
			mask_valcfg = BEAC_WR_BEAT_FILTER_SEL_MASK |
				BEAC_WR_BEAT_FILTER_EN_MASK |
				BEAC_RD_BEAT_FILTER_SEL_MASK |
@@ -877,9 +906,8 @@ static void beac_event_config(struct llcc_perfmon_private *llcc_priv,
	if (enable) {
		val = (event_type << EVENT_SEL_SHIFT) & EVENT_SEL_MASK;
		if (llcc_priv->filtered_ports & (1 << EVENT_PORT_BEAC)) {
			val |= (FILTER_0 << FILTER_SEL_SHIFT) |
				FILTER_EN;
			if (llcc_priv->version == REV_0)
			val |= (FILTER_0 << FILTER_SEL_SHIFT) | FILTER_EN;
			if (llcc_priv->version == REV_2)
				valcfg = (FILTER_0 <<
					BEAC_WR_BEAT_FILTER_SEL_SHIFT) |
					BEAC_WR_BEAT_FILTER_EN |
@@ -887,20 +915,35 @@ static void beac_event_config(struct llcc_perfmon_private *llcc_priv,
					BEAC_RD_BEAT_FILTER_SEL_SHIFT) |
					BEAC_RD_BEAT_FILTER_EN;
		}

		counter_num = event_counter_num;
	}

	for (mc_cnt = 0; mc_cnt < llcc_priv->num_mc; mc_cnt++) {
		offset = BEAC_PROF_EVENT_n_CFG(event_counter_num) +
		offset = BEAC_PROF_EVENT_n_CFG(*counter_num + mc_cnt) +
			mc_cnt * BEAC_INST_OFF;
		llcc_bcast_modify(llcc_priv, offset, val, mask_val);

		offset = BEAC_PROF_CFG + mc_cnt * BEAC_INST_OFF;
		llcc_bcast_modify(llcc_priv, offset, valcfg, mask_valcfg);

		perfmon_cntr_config(llcc_priv, EVENT_PORT_BEAC, *counter_num,
				enable);
		/* DBX uses 2 counters for BEAC 0 & 1 */
		if (mc_cnt == 1)
			perfmon_cntr_config(llcc_priv, EVENT_PORT_BEAC1,
					*counter_num + mc_cnt, enable);
	}

	perfmon_counter_config(llcc_priv, EVENT_PORT_BEAC, counter_num);
	/* DBX uses 2 counters for BEAC 0 & 1 */
	if (llcc_priv->num_mc > 1) {
		counter_map = &llcc_priv->configured[(*counter_num)++];
		if (enable) {
			counter_map->port_sel = EVENT_PORT_BEAC;
			counter_map->event_sel = event_type;
		} else {
			counter_map->port_sel = MAX_NUMBER_OF_PORTS;
			counter_map->event_sel = 100;
		}
	}
}

static void beac_event_enable(struct llcc_perfmon_private *llcc_priv,
@@ -961,10 +1004,10 @@ static struct event_port_ops beac_port_ops = {
};

static void berc_event_config(struct llcc_perfmon_private *llcc_priv,
		unsigned int event_type, unsigned int event_counter_num,
		unsigned int event_type, unsigned int *counter_num,
		bool enable)
{
	uint32_t val = 0, mask_val, counter_num = 0;
	uint32_t val = 0, mask_val;

	mask_val = EVENT_SEL_MASK;
	if (llcc_priv->filtered_ports & (1 << EVENT_PORT_BERC))
@@ -974,13 +1017,11 @@ static void berc_event_config(struct llcc_perfmon_private *llcc_priv,
		val = (event_type << EVENT_SEL_SHIFT) & EVENT_SEL_MASK;
		if (llcc_priv->filtered_ports & (1 << EVENT_PORT_BERC))
			val |= (FILTER_0 << FILTER_SEL_SHIFT) | FILTER_EN;

		counter_num = event_counter_num;
	}

	llcc_bcast_modify(llcc_priv, BERC_PROF_EVENT_n_CFG(event_counter_num),
	llcc_bcast_modify(llcc_priv, BERC_PROF_EVENT_n_CFG(*counter_num),
			val, mask_val);
	perfmon_counter_config(llcc_priv, EVENT_PORT_BERC, counter_num);
	perfmon_cntr_config(llcc_priv, EVENT_PORT_BERC, *counter_num, enable);
}

static void berc_event_enable(struct llcc_perfmon_private *llcc_priv,
@@ -1023,26 +1064,31 @@ static struct event_port_ops berc_port_ops = {
};

static void trp_event_config(struct llcc_perfmon_private *llcc_priv,
		unsigned int event_type, unsigned int event_counter_num,
		unsigned int event_type, unsigned int *counter_num,
		bool enable)
{
	uint32_t val = 0, mask_val, counter_num = 0;
	uint32_t val = 0, mask_val;

	mask_val = EVENT_SEL_MASK;
	if (llcc_priv->version == REV_2)
		mask_val = EVENT_SEL_MASK7;

	if (llcc_priv->filtered_ports & (1 << EVENT_PORT_TRP))
		mask_val |= FILTER_SEL_MASK | FILTER_EN_MASK;

	if (enable) {
		val = (event_type << EVENT_SEL_SHIFT) & EVENT_SEL_MASK;
		if (llcc_priv->version == REV_2)
			val = (event_type << EVENT_SEL_SHIFT) &
					EVENT_SEL_MASK7;

		if (llcc_priv->filtered_ports & (1 << EVENT_PORT_TRP))
			val |= (FILTER_0 << FILTER_SEL_SHIFT) | FILTER_EN;

		counter_num = event_counter_num;
	}

	llcc_bcast_modify(llcc_priv, TRP_PROF_EVENT_n_CFG(event_counter_num),
	llcc_bcast_modify(llcc_priv, TRP_PROF_EVENT_n_CFG(*counter_num),
			val, mask_val);
	perfmon_counter_config(llcc_priv, EVENT_PORT_TRP, counter_num);
	perfmon_cntr_config(llcc_priv, EVENT_PORT_TRP, *counter_num, enable);
}

static void trp_event_filter_config(struct llcc_perfmon_private *llcc_priv,
@@ -1052,11 +1098,18 @@ static void trp_event_filter_config(struct llcc_perfmon_private *llcc_priv,
	uint32_t val = 0, mask_val;

	if (filter == SCID) {
		if (llcc_priv->version == REV_2) {
			if (enable)
				val = (1 << match);

			mask_val = SCID_MULTI_MATCH_MASK;
		} else {
			if (enable)
				val = (match << TRP_SCID_MATCH_SHIFT) |
					(mask << TRP_SCID_MASK_SHIFT);

			mask_val = TRP_SCID_MATCH_MASK | TRP_SCID_MASK_MASK;
		}
	} else if (filter == WAY_ID) {
		if (enable)
			val = (match << TRP_WAY_ID_MATCH_SHIFT) |
@@ -1074,7 +1127,12 @@ static void trp_event_filter_config(struct llcc_perfmon_private *llcc_priv,
		return;
	}

	llcc_bcast_modify(llcc_priv, TRP_PROF_FILTER_0_CFG1, val, mask_val);
	if ((llcc_priv->version == REV_2) && (filter == SCID))
		llcc_bcast_modify(llcc_priv, TRP_PROF_FILTER_0_CFG2, val,
				mask_val);
	else
		llcc_bcast_modify(llcc_priv, TRP_PROF_FILTER_0_CFG1, val,
				mask_val);
}

static struct event_port_ops  trp_port_ops = {
@@ -1083,26 +1141,31 @@ static struct event_port_ops trp_port_ops = {
};

static void drp_event_config(struct llcc_perfmon_private *llcc_priv,
		unsigned int event_type, unsigned int event_counter_num,
		unsigned int event_type, unsigned int *counter_num,
		bool enable)
{
	uint32_t val = 0, mask_val, counter_num = 0;
	uint32_t val = 0, mask_val;

	mask_val = EVENT_SEL_MASK;
	if (llcc_priv->version == REV_2)
		mask_val = EVENT_SEL_MASK7;

	if (llcc_priv->filtered_ports & (1 << EVENT_PORT_DRP))
		mask_val |= FILTER_SEL_MASK | FILTER_EN_MASK;

	if (enable) {
		val = (event_type << EVENT_SEL_SHIFT) & EVENT_SEL_MASK;
		if (llcc_priv->version == REV_2)
			val = (event_type << EVENT_SEL_SHIFT) &
					EVENT_SEL_MASK7;

		if (llcc_priv->filtered_ports & (1 << EVENT_PORT_DRP))
			val |= (FILTER_0 << FILTER_SEL_SHIFT) | FILTER_EN;

		counter_num = event_counter_num;
	}

	llcc_bcast_modify(llcc_priv, DRP_PROF_EVENT_n_CFG(event_counter_num),
	llcc_bcast_modify(llcc_priv, DRP_PROF_EVENT_n_CFG(*counter_num),
			val, mask_val);
	perfmon_counter_config(llcc_priv, EVENT_PORT_DRP, counter_num);
	perfmon_cntr_config(llcc_priv, EVENT_PORT_DRP, *counter_num, enable);
}

static void drp_event_enable(struct llcc_perfmon_private *llcc_priv,
@@ -1123,10 +1186,10 @@ static struct event_port_ops drp_port_ops = {
};

static void pmgr_event_config(struct llcc_perfmon_private *llcc_priv,
		unsigned int event_type, unsigned int event_counter_num,
		unsigned int event_type, unsigned int *counter_num,
		bool enable)
{
	uint32_t val = 0, mask_val, counter_num = 0;
	uint32_t val = 0, mask_val;

	mask_val = EVENT_SEL_MASK;
	if (llcc_priv->filtered_ports & (1 << EVENT_PORT_PMGR))
@@ -1136,13 +1199,11 @@ static void pmgr_event_config(struct llcc_perfmon_private *llcc_priv,
		val = (event_type << EVENT_SEL_SHIFT) & EVENT_SEL_MASK;
		if (llcc_priv->filtered_ports & (1 << EVENT_PORT_PMGR))
			val |= (FILTER_0 << FILTER_SEL_SHIFT) | FILTER_EN;

		counter_num = event_counter_num;
	}

	llcc_bcast_modify(llcc_priv, PMGR_PROF_EVENT_n_CFG(event_counter_num),
	llcc_bcast_modify(llcc_priv, PMGR_PROF_EVENT_n_CFG(*counter_num),
			val, mask_val);
	perfmon_counter_config(llcc_priv, EVENT_PORT_PMGR, counter_num);
	perfmon_cntr_config(llcc_priv, EVENT_PORT_PMGR, *counter_num, enable);
}

static struct event_port_ops pmgr_port_ops = {
@@ -1191,17 +1252,22 @@ static int llcc_perfmon_probe(struct platform_device *pdev)

	llcc_priv->llcc_map = llcc_driv_data->regmap;
	llcc_priv->llcc_bcast_map = llcc_driv_data->bcast_regmap;

	llcc_bcast_read(llcc_priv, LLCC_COMMON_STATUS0, &val);
	llcc_priv->num_mc = (val & NUM_MC_MASK) >> NUM_MC_SHIFT;
	/* Setting to 1, as some platforms it read as 0 */
	if (llcc_priv->num_mc == 0)
		llcc_priv->num_mc = 1;

	llcc_priv->num_banks = (val & LB_CNT_MASK) >> LB_CNT_SHIFT;
	for (val = 0; val < llcc_priv->num_banks; val++)
		llcc_priv->bank_off[val] = BANK_OFFSET * val;

	llcc_priv->version = REV_0;
	llcc_bcast_read(llcc_priv, LLCC_COMMON_HW_INFO, &val);
	if (val >= LLCC_VERSION)
	if (val == LLCC_VERSION_1)
		llcc_priv->version = REV_1;
	else if (val == LLCC_VERSION_2)
		llcc_priv->version = REV_2;

	result = sysfs_create_group(&pdev->dev.kobj, &llcc_perfmon_group);
	if (result) {
@@ -1222,6 +1288,8 @@ static int llcc_perfmon_probe(struct platform_device *pdev)
	hrtimer_init(&llcc_priv->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
	llcc_priv->hrtimer.function = llcc_perfmon_timer_handler;
	llcc_priv->expires = 0;
	pr_info("Revision %d, has %d memory controllers connected with LLCC\n",
			llcc_priv->version, llcc_priv->num_mc);
	return 0;
}

+19 −14
Original line number Diff line number Diff line
@@ -12,49 +12,50 @@
#define FEAC_PROF_FILTER_0_CFG3		(0x03700C)
#define FEAC_PROF_FILTER_0_CFG5		(0x037014)
#define FEAC_PROF_FILTER_0_CFG6		(0x037018)
#define FEAC_PROF_EVENT_n_CFG(n)	(0x037060 + 4 * n)
#define FEAC_PROF_EVENT_n_CFG(n)	(0x037060 + 4 * (n))
#define FEAC_PROF_CFG			(0x0370A0)

/* FERC */
#define FERC_PROF_FILTER_0_CFG0		(0x03B000)
#define FERC_PROF_EVENT_n_CFG(n)	(0x03B020 + 4 * n)
#define FERC_PROF_EVENT_n_CFG(n)	(0x03B020 + 4 * (n))
#define FERC_PROF_CFG			(0x03B060)

/* FEWC */
#define FEWC_PROF_FILTER_0_CFG0		(0x033000)
#define FEWC_PROF_EVENT_n_CFG(n)	(0x033020 + 4 * n)
#define FEWC_PROF_EVENT_n_CFG(n)	(0x033020 + 4 * (n))

/* BEAC */
#define BEAC_PROF_FILTER_0_CFG5		(0x049014)
#define BEAC_PROF_EVENT_n_CFG(n)	(0x049040 + 4 * n)
#define BEAC_PROF_EVENT_n_CFG(n)	(0x049040 + 4 * (n))
#define BEAC_PROF_CFG			(0x049080)
#define BEAC_INST_OFF			(0x4000)

/* BERC */
#define BERC_PROF_FILTER_0_CFG0		(0x039000)
#define BERC_PROF_EVENT_n_CFG(n)	(0x039020 + 4 * n)
#define BERC_PROF_EVENT_n_CFG(n)	(0x039020 + 4 * (n))
#define BERC_PROF_CFG			(0x039060)

/* TRP */
#define TRP_PROF_FILTER_0_CFG1		(0x024004)
#define TRP_PROF_EVENT_n_CFG(n)		(0x024020 + 4 * n)
#define TRP_SCID_n_STATUS(n)		(0x000004 + 0x1000 * n)
#define TRP_PROF_FILTER_0_CFG2		(0x024008)
#define TRP_PROF_EVENT_n_CFG(n)		(0x024020 + 4 * (n))
#define TRP_SCID_n_STATUS(n)		(0x000004 + 0x1000 * (n))

/* DRP */
#define DRP_PROF_EVENT_n_CFG(n)		(0x044010 + 4 * n)
#define DRP_PROF_EVENT_n_CFG(n)		(0x044010 + 4 * (n))
#define DRP_PROF_CFG			(0x044050)

/* PMGR */
#define PMGR_PROF_EVENT_n_CFG(n)	(0x03F000 + 4 * n)
#define PMGR_PROF_EVENT_n_CFG(n)	(0x03F000 + 4 * (n))

#define PERFMON_COUNTER_n_CONFIG(n)	(0x031020 + 4 * n)
#define PERFMON_COUNTER_n_CONFIG(n)	(0x031020 + 4 * (n))
#define PERFMON_MODE			(0x03100C)
#define PERFMON_DUMP			(0x031010)
#define BROADCAST_COUNTER_n_VALUE(n)	(0x031060 + 4 * n)
#define BROADCAST_COUNTER_n_VALUE(n)	(0x031060 + 4 * (n))

#define LLCC_COUNTER_n_VALUE(n)		(0x031060 + 4 * n)
#define LLCC_COUNTER_n_VALUE(n)		(0x031060 + 4 * (n))

#define EVENT_NUM_MAX			(64)
#define EVENT_NUM_MAX			(128)
#define SCID_MAX			(32)

/* Perfmon */
@@ -122,6 +123,8 @@
#define EVENT_SEL_SHIFT			(0)
#define EVENT_SEL_MASK			GENMASK(EVENT_SEL_SHIFT + 5,\
						EVENT_SEL_SHIFT)
#define EVENT_SEL_MASK7			GENMASK(EVENT_SEL_SHIFT + 6,\
						EVENT_SEL_SHIFT)

#define CACHEALLOC_MASK_SHIFT		(16)
#define CACHEALLOC_MASK_MASK		GENMASK(CACHEALLOC_MASK_SHIFT + 3, \
@@ -263,8 +266,10 @@
					+ 13, \
					TRP_SCID_STATUS_CURRENT_CAP_SHIFT)

#define LLCC_VERSION			(0x01010100)
#define LLCC_VERSION_1			(0x01010200)
#define LLCC_VERSION_2			(0x02000000)
#define REV_0				(0x0)
#define REV_1				(0x1)
#define REV_2				(0x2)
#define BANK_OFFSET			(0x80000)
#endif /* _SOC_QCOM_LLCC_PERFMON_H_ */