Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 57006d3e authored by Pawel Moll's avatar Pawel Moll
Browse files

bus: arm-ccn: Allocate event when it is being added, not initialised



To make events rotation possible, they should be allocated when event
is being ->added(), not during initialisation. This patch moves the
respective code.

Signed-off-by: default avatarPawel Moll <pawel.moll@arm.com>
parent 9ce1aa86
Loading
Loading
Loading
Loading
+67 −47
Original line number Original line Diff line number Diff line
@@ -628,7 +628,65 @@ static int arm_ccn_pmu_type_eq(u32 a, u32 b)
	return 0;
	return 0;
}
}


static void arm_ccn_pmu_event_destroy(struct perf_event *event)
static int arm_ccn_pmu_event_alloc(struct perf_event *event)
{
	struct arm_ccn *ccn = pmu_to_arm_ccn(event->pmu);
	struct hw_perf_event *hw = &event->hw;
	u32 node_xp, type, event_id;
	struct arm_ccn_component *source;
	int bit;

	node_xp = CCN_CONFIG_NODE(event->attr.config);
	type = CCN_CONFIG_TYPE(event->attr.config);
	event_id = CCN_CONFIG_EVENT(event->attr.config);

	/* Allocate the cycle counter */
	if (type == CCN_TYPE_CYCLES) {
		if (test_and_set_bit(CCN_IDX_PMU_CYCLE_COUNTER,
				ccn->dt.pmu_counters_mask))
			return -EAGAIN;

		hw->idx = CCN_IDX_PMU_CYCLE_COUNTER;
		ccn->dt.pmu_counters[CCN_IDX_PMU_CYCLE_COUNTER].event = event;

		return 0;
	}

	/* Allocate an event counter */
	hw->idx = arm_ccn_pmu_alloc_bit(ccn->dt.pmu_counters_mask,
			CCN_NUM_PMU_EVENT_COUNTERS);
	if (hw->idx < 0) {
		dev_dbg(ccn->dev, "No more counters available!\n");
		return -EAGAIN;
	}

	if (type == CCN_TYPE_XP)
		source = &ccn->xp[node_xp];
	else
		source = &ccn->node[node_xp];
	ccn->dt.pmu_counters[hw->idx].source = source;

	/* Allocate an event source or a watchpoint */
	if (type == CCN_TYPE_XP && event_id == CCN_EVENT_WATCHPOINT)
		bit = arm_ccn_pmu_alloc_bit(source->xp.dt_cmp_mask,
				CCN_NUM_XP_WATCHPOINTS);
	else
		bit = arm_ccn_pmu_alloc_bit(source->pmu_events_mask,
				CCN_NUM_PMU_EVENTS);
	if (bit < 0) {
		dev_dbg(ccn->dev, "No more event sources/watchpoints on node/XP %d!\n",
				node_xp);
		clear_bit(hw->idx, ccn->dt.pmu_counters_mask);
		return -EAGAIN;
	}
	hw->config_base = bit;

	ccn->dt.pmu_counters[hw->idx].event = event;

	return 0;
}

static void arm_ccn_pmu_event_release(struct perf_event *event)
{
{
	struct arm_ccn *ccn = pmu_to_arm_ccn(event->pmu);
	struct arm_ccn *ccn = pmu_to_arm_ccn(event->pmu);
	struct hw_perf_event *hw = &event->hw;
	struct hw_perf_event *hw = &event->hw;
@@ -657,8 +715,7 @@ static int arm_ccn_pmu_event_init(struct perf_event *event)
	struct arm_ccn *ccn;
	struct arm_ccn *ccn;
	struct hw_perf_event *hw = &event->hw;
	struct hw_perf_event *hw = &event->hw;
	u32 node_xp, type, event_id;
	u32 node_xp, type, event_id;
	int valid, bit;
	int valid;
	struct arm_ccn_component *source;
	int i;
	int i;
	struct perf_event *sibling;
	struct perf_event *sibling;


@@ -666,7 +723,6 @@ static int arm_ccn_pmu_event_init(struct perf_event *event)
		return -ENOENT;
		return -ENOENT;


	ccn = pmu_to_arm_ccn(event->pmu);
	ccn = pmu_to_arm_ccn(event->pmu);
	event->destroy = arm_ccn_pmu_event_destroy;


	if (hw->sample_period) {
	if (hw->sample_period) {
		dev_warn(ccn->dev, "Sampling not supported!\n");
		dev_warn(ccn->dev, "Sampling not supported!\n");
@@ -778,49 +834,6 @@ static int arm_ccn_pmu_event_init(struct perf_event *event)
				!is_software_event(sibling))
				!is_software_event(sibling))
			return -EINVAL;
			return -EINVAL;


	/* Allocate the cycle counter */
	if (type == CCN_TYPE_CYCLES) {
		if (test_and_set_bit(CCN_IDX_PMU_CYCLE_COUNTER,
				ccn->dt.pmu_counters_mask))
			return -EAGAIN;

		hw->idx = CCN_IDX_PMU_CYCLE_COUNTER;
		ccn->dt.pmu_counters[CCN_IDX_PMU_CYCLE_COUNTER].event = event;

		return 0;
	}

	/* Allocate an event counter */
	hw->idx = arm_ccn_pmu_alloc_bit(ccn->dt.pmu_counters_mask,
			CCN_NUM_PMU_EVENT_COUNTERS);
	if (hw->idx < 0) {
		dev_warn(ccn->dev, "No more counters available!\n");
		return -EAGAIN;
	}

	if (type == CCN_TYPE_XP)
		source = &ccn->xp[node_xp];
	else
		source = &ccn->node[node_xp];
	ccn->dt.pmu_counters[hw->idx].source = source;

	/* Allocate an event source or a watchpoint */
	if (type == CCN_TYPE_XP && event_id == CCN_EVENT_WATCHPOINT)
		bit = arm_ccn_pmu_alloc_bit(source->xp.dt_cmp_mask,
				CCN_NUM_XP_WATCHPOINTS);
	else
		bit = arm_ccn_pmu_alloc_bit(source->pmu_events_mask,
				CCN_NUM_PMU_EVENTS);
	if (bit < 0) {
		dev_warn(ccn->dev, "No more event sources/watchpoints on node/XP %d!\n",
				node_xp);
		clear_bit(hw->idx, ccn->dt.pmu_counters_mask);
		return -EAGAIN;
	}
	hw->config_base = bit;

	ccn->dt.pmu_counters[hw->idx].event = event;

	return 0;
	return 0;
}
}


@@ -1087,8 +1100,13 @@ static void arm_ccn_pmu_event_config(struct perf_event *event)


static int arm_ccn_pmu_event_add(struct perf_event *event, int flags)
static int arm_ccn_pmu_event_add(struct perf_event *event, int flags)
{
{
	int err;
	struct hw_perf_event *hw = &event->hw;
	struct hw_perf_event *hw = &event->hw;


	err = arm_ccn_pmu_event_alloc(event);
	if (err)
		return err;

	arm_ccn_pmu_event_config(event);
	arm_ccn_pmu_event_config(event);


	hw->state = PERF_HES_STOPPED;
	hw->state = PERF_HES_STOPPED;
@@ -1102,6 +1120,8 @@ static int arm_ccn_pmu_event_add(struct perf_event *event, int flags)
static void arm_ccn_pmu_event_del(struct perf_event *event, int flags)
static void arm_ccn_pmu_event_del(struct perf_event *event, int flags)
{
{
	arm_ccn_pmu_event_stop(event, PERF_EF_UPDATE);
	arm_ccn_pmu_event_stop(event, PERF_EF_UPDATE);

	arm_ccn_pmu_event_release(event);
}
}


static void arm_ccn_pmu_event_read(struct perf_event *event)
static void arm_ccn_pmu_event_read(struct perf_event *event)