Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 6048fbb5 authored by qctecmdr's avatar qctecmdr Committed by Gerrit - the friendly Code Review server
Browse files

Merge "qcom/l2cache_counters: Enable event creation for any cluster"

parents 4d4f6ede f5343ced
Loading
Loading
Loading
Loading
+65 −31
Original line number Original line Diff line number Diff line
@@ -147,14 +147,12 @@ struct l2cache_pmu {
	struct list_head clusters;
	struct list_head clusters;
};
};



static unsigned int which_cluster_tenure = 1;
static unsigned int which_cluster_tenure = 1;
static u32 l2_counter_present_mask;
static u32 l2_counter_present_mask;


#define to_l2cache_pmu(p)	(container_of(p, struct l2cache_pmu, pmu))
#define to_l2cache_pmu(p)	(container_of(p, struct l2cache_pmu, pmu))
#define to_cluster_device(d)	container_of(d, struct cluster_pmu, dev)
#define to_cluster_device(d)	container_of(d, struct cluster_pmu, dev)



static inline struct cluster_pmu *get_cluster_pmu(
static inline struct cluster_pmu *get_cluster_pmu(
	struct l2cache_pmu *l2cache_pmu, int cpu)
	struct l2cache_pmu *l2cache_pmu, int cpu)
{
{
@@ -408,7 +406,7 @@ static void l2_cache_event_update(struct perf_event *event, u32 ovsr)
	u64 delta, prev, now;
	u64 delta, prev, now;
	u32 event_idx = hwc->config_base;
	u32 event_idx = hwc->config_base;
	u32 event_grp;
	u32 event_grp;
	struct cluster_pmu *cluster;
	struct cluster_pmu *cluster = event->pmu_private;


	prev = local64_read(&hwc->prev_count);
	prev = local64_read(&hwc->prev_count);
	if (ovsr) {
	if (ovsr) {
@@ -416,7 +414,6 @@ static void l2_cache_event_update(struct perf_event *event, u32 ovsr)
		goto out;
		goto out;
	}
	}


	cluster = get_cluster_pmu(to_l2cache_pmu(event->pmu), event->cpu);
	event_idx = (hwc->config_base & REGBIT_MASK) >> REGBIT_SHIFT;
	event_idx = (hwc->config_base & REGBIT_MASK) >> REGBIT_SHIFT;
	event_grp = hwc->config_base & EVENT_GROUP_MASK;
	event_grp = hwc->config_base & EVENT_GROUP_MASK;
	do {
	do {
@@ -557,6 +554,7 @@ static int l2_cache_event_init(struct perf_event *event)
	hwc->idx = -1;
	hwc->idx = -1;
	hwc->config_base = event->attr.config;
	hwc->config_base = event->attr.config;
	event->readable_on_cpus = CPU_MASK_ALL;
	event->readable_on_cpus = CPU_MASK_ALL;
	event->pmu_private = cluster;


	/*
	/*
	 * We are overiding event->cpu, as it is possible to enable events,
	 * We are overiding event->cpu, as it is possible to enable events,
@@ -568,12 +566,11 @@ static int l2_cache_event_init(struct perf_event *event)


static void l2_cache_event_start(struct perf_event *event, int flags)
static void l2_cache_event_start(struct perf_event *event, int flags)
{
{
	struct cluster_pmu *cluster;
	struct hw_perf_event *hwc = &event->hw;
	struct hw_perf_event *hwc = &event->hw;
	struct cluster_pmu *cluster = event->pmu_private;
	int event_idx;
	int event_idx;


	hwc->state = 0;
	hwc->state = 0;
	cluster = get_cluster_pmu(to_l2cache_pmu(event->pmu), event->cpu);
	event_idx = (hwc->config_base & REGBIT_MASK) >> REGBIT_SHIFT;
	event_idx = (hwc->config_base & REGBIT_MASK) >> REGBIT_SHIFT;
	if ((hwc->config_base & EVENT_GROUP_MASK) == TENURE_CNTRS_GROUP_ID) {
	if ((hwc->config_base & EVENT_GROUP_MASK) == TENURE_CNTRS_GROUP_ID) {
		cluster_tenure_counter_enable(cluster, event_idx);
		cluster_tenure_counter_enable(cluster, event_idx);
@@ -586,11 +583,10 @@ static void l2_cache_event_start(struct perf_event *event, int flags)
static void l2_cache_event_stop(struct perf_event *event, int flags)
static void l2_cache_event_stop(struct perf_event *event, int flags)
{
{
	struct hw_perf_event *hwc = &event->hw;
	struct hw_perf_event *hwc = &event->hw;
	struct cluster_pmu *cluster;
	struct cluster_pmu *cluster = event->pmu_private;
	int event_idx;
	int event_idx;
	u32 ovsr;
	u32 ovsr;


	cluster = get_cluster_pmu(to_l2cache_pmu(event->pmu), event->cpu);
	if (hwc->state & PERF_HES_STOPPED)
	if (hwc->state & PERF_HES_STOPPED)
		return;
		return;


@@ -615,10 +611,9 @@ static void l2_cache_event_stop(struct perf_event *event, int flags)
static int l2_cache_event_add(struct perf_event *event, int flags)
static int l2_cache_event_add(struct perf_event *event, int flags)
{
{
	struct hw_perf_event *hwc = &event->hw;
	struct hw_perf_event *hwc = &event->hw;
	struct cluster_pmu *cluster = event->pmu_private;
	int idx;
	int idx;
	struct cluster_pmu *cluster;


	cluster = get_cluster_pmu(to_l2cache_pmu(event->pmu), event->cpu);
	idx = l2_cache_get_event_idx(cluster, event);
	idx = l2_cache_get_event_idx(cluster, event);
	if (idx < 0)
	if (idx < 0)
		return idx;
		return idx;
@@ -640,11 +635,9 @@ static int l2_cache_event_add(struct perf_event *event, int flags)
static void l2_cache_event_del(struct perf_event *event, int flags)
static void l2_cache_event_del(struct perf_event *event, int flags)
{
{
	struct hw_perf_event *hwc = &event->hw;
	struct hw_perf_event *hwc = &event->hw;
	struct cluster_pmu *cluster;
	int idx = hwc->idx;
	int idx = hwc->idx;
	unsigned long intr_flag;
	unsigned long intr_flag;

	struct cluster_pmu *cluster = event->pmu_private;
	cluster = get_cluster_pmu(to_l2cache_pmu(event->pmu), event->cpu);


	/*
	/*
	 * We could race here with overflow interrupt of this event.
	 * We could race here with overflow interrupt of this event.
@@ -913,13 +906,26 @@ static struct cluster_pmu *l2_cache_associate_cpu_with_cluster(
	return cluster;
	return cluster;
}
}


static void clusters_initialization(struct l2cache_pmu *l2cache_pmu,
						unsigned int cpu)
{
	struct cluster_pmu *temp_cluster = NULL;

	list_for_each_entry(temp_cluster, &l2cache_pmu->clusters, next) {
		cluster_pmu_reset(temp_cluster);
		enable_irq(temp_cluster->irq);
		temp_cluster->on_cpu = cpu;
	}
}

static int l2cache_pmu_online_cpu(unsigned int cpu, struct hlist_node *node)
static int l2cache_pmu_online_cpu(unsigned int cpu, struct hlist_node *node)
{
{
	struct cluster_pmu *cluster;
	struct cluster_pmu *cluster;
	struct l2cache_pmu *l2cache_pmu;
	struct l2cache_pmu *l2cache_pmu;
	cpumask_t cluster_online_cpus;


	if (!node)
	if (!node)
		return 0;
		goto out;


	l2cache_pmu = hlist_entry_safe(node, struct l2cache_pmu, node);
	l2cache_pmu = hlist_entry_safe(node, struct l2cache_pmu, node);
	cluster = get_cluster_pmu(l2cache_pmu, cpu);
	cluster = get_cluster_pmu(l2cache_pmu, cpu);
@@ -929,45 +935,59 @@ static int l2cache_pmu_online_cpu(unsigned int cpu, struct hlist_node *node)
		if (!cluster) {
		if (!cluster) {
			/* Only if broken firmware doesn't list every cluster */
			/* Only if broken firmware doesn't list every cluster */
			WARN_ONCE(1, "No L2 cache cluster for CPU%d\n", cpu);
			WARN_ONCE(1, "No L2 cache cluster for CPU%d\n", cpu);
			return 0;
			goto out;
		}
		}
	}
	}


	/* If another CPU is managing this cluster, we're done */
	if (cluster->on_cpu != -1)
		return 0;

	/*
	/*
	 * All CPUs on this cluster were down, use this one.
	 * If another CPU is managing this cluster, whether that cpu is
	 * Reset to put it into sane state.
	 * from the same cluster.
	 */
	 */
	cluster->on_cpu = cpu;
	if (cluster->on_cpu != -1) {
		cpumask_and(&cluster_online_cpus, &cluster->cluster_cpus,
						get_cpu_mask(cluster->on_cpu));
		if (cpumask_test_cpu(cluster->on_cpu, &cluster_online_cpus))
			goto out;
	} else {
		clusters_initialization(l2cache_pmu, cpu);
		cpumask_set_cpu(cpu, &l2cache_pmu->cpumask);
		cpumask_set_cpu(cpu, &l2cache_pmu->cpumask);
	cluster_pmu_reset(cluster);
		goto out;
	}


	enable_irq(cluster->irq);
	cluster->on_cpu = cpu;
	cpumask_set_cpu(cpu, &l2cache_pmu->cpumask);


out:
	return 0;
	return 0;
}
}


static void disable_clusters_interrupt(struct l2cache_pmu *l2cache_pmu)
{
	struct cluster_pmu *temp_cluster = NULL;

	list_for_each_entry(temp_cluster, &l2cache_pmu->clusters, next)
		disable_irq(temp_cluster->irq);
}

static int l2cache_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node)
static int l2cache_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node)
{
{
	struct cluster_pmu *cluster;
	struct cluster_pmu *cluster;
	struct l2cache_pmu *l2cache_pmu;
	struct l2cache_pmu *l2cache_pmu;
	cpumask_t cluster_online_cpus;
	cpumask_t cluster_online_cpus;
	unsigned int target;
	unsigned int target;
	struct cluster_pmu *temp_cluster = NULL;


	if (!node)
	if (!node)
		return 0;
		goto out;


	l2cache_pmu = hlist_entry_safe(node, struct l2cache_pmu, node);
	l2cache_pmu = hlist_entry_safe(node, struct l2cache_pmu, node);
	cluster = get_cluster_pmu(l2cache_pmu, cpu);
	cluster = get_cluster_pmu(l2cache_pmu, cpu);
	if (!cluster)
	if (!cluster)
		return 0;
		goto out;


	/* If this CPU is not managing the cluster, we're done */
	/* If this CPU is not managing the cluster, we're done */
	if (cluster->on_cpu != cpu)
	if (cluster->on_cpu != cpu)
		return 0;
		goto out;


	/* Give up ownership of cluster */
	/* Give up ownership of cluster */
	cpumask_clear_cpu(cpu, &l2cache_pmu->cpumask);
	cpumask_clear_cpu(cpu, &l2cache_pmu->cpumask);
@@ -978,14 +998,28 @@ static int l2cache_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node)
			cpu_online_mask);
			cpu_online_mask);
	target = cpumask_any_but(&cluster_online_cpus, cpu);
	target = cpumask_any_but(&cluster_online_cpus, cpu);
	if (target >= nr_cpu_ids) {
	if (target >= nr_cpu_ids) {
		disable_irq(cluster->irq);
		cpumask_and(&cluster_online_cpus, &l2cache_pmu->cpumask,
		return 0;
			cpu_online_mask);
		target = cpumask_first(&cluster_online_cpus);
		if (target >= nr_cpu_ids) {
			disable_clusters_interrupt(l2cache_pmu);
			goto out;
		}
	}
	}


	perf_pmu_migrate_context(&l2cache_pmu->pmu, cpu, target);
	cluster->on_cpu = target;
	cluster->on_cpu = target;
	if (cpumask_first(&l2cache_pmu->cpumask) >= nr_cpu_ids) {
		list_for_each_entry(temp_cluster,
					&l2cache_pmu->clusters, next) {
			if (temp_cluster->cluster_id != cluster->cluster_id)
				temp_cluster->on_cpu = target;
		}
	}

	perf_pmu_migrate_context(&l2cache_pmu->pmu, cpu, target);
	cpumask_set_cpu(target, &l2cache_pmu->cpumask);
	cpumask_set_cpu(target, &l2cache_pmu->cpumask);


out:
	return 0;
	return 0;
}
}