Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit dfa7c899 authored by Thomas Gleixner's avatar Thomas Gleixner Committed by Ingo Molnar
Browse files

perf counters: expand use of counter->event



Impact: change syscall, cleanup

Make use of the new perf_counters event type.

Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent eab656ae
Loading
Loading
Loading
Loading
+11 −11
Original line number Original line Diff line number Diff line
@@ -56,9 +56,10 @@ const int max_intel_perfmon_events = ARRAY_SIZE(intel_perfmon_event_map);
/*
/*
 * Setup the hardware configuration for a given hw_event_type
 * Setup the hardware configuration for a given hw_event_type
 */
 */
int hw_perf_counter_init(struct perf_counter *counter, s32 hw_event_type)
int hw_perf_counter_init(struct perf_counter *counter)
{
{
	struct hw_perf_counter *hwc = &counter->hw;
	struct hw_perf_counter *hwc = &counter->hw;
	u32 hw_event_type = counter->event.hw_event_type;


	if (unlikely(!perf_counters_initialized))
	if (unlikely(!perf_counters_initialized))
		return -EINVAL;
		return -EINVAL;
@@ -83,7 +84,7 @@ int hw_perf_counter_init(struct perf_counter *counter, s32 hw_event_type)
	hwc->config_base = MSR_ARCH_PERFMON_EVENTSEL0;
	hwc->config_base = MSR_ARCH_PERFMON_EVENTSEL0;
	hwc->counter_base = MSR_ARCH_PERFMON_PERFCTR0;
	hwc->counter_base = MSR_ARCH_PERFMON_PERFCTR0;


	hwc->irq_period = counter->__irq_period;
	hwc->irq_period = counter->event.hw_event_period;
	/*
	/*
	 * Intel PMCs cannot be accessed sanely above 32 bit width,
	 * Intel PMCs cannot be accessed sanely above 32 bit width,
	 * so we install an artificial 1<<31 period regardless of
	 * so we install an artificial 1<<31 period regardless of
@@ -95,21 +96,19 @@ int hw_perf_counter_init(struct perf_counter *counter, s32 hw_event_type)
	hwc->next_count = -((s32) hwc->irq_period);
	hwc->next_count = -((s32) hwc->irq_period);


	/*
	/*
	 * Negative event types mean raw encoded event+umask values:
	 * Raw event type provide the config in the event structure
	 */
	 */
	if (hw_event_type < 0) {
		counter->hw_event_type = -hw_event_type;
		counter->hw_event_type &= ~PERF_COUNT_NMI;
	} else {
	hw_event_type &= ~PERF_COUNT_NMI;
	hw_event_type &= ~PERF_COUNT_NMI;
	if (hw_event_type == PERF_COUNT_RAW) {
		hwc->config |= counter->event.hw_raw_ctrl;
	} else {
		if (hw_event_type >= max_intel_perfmon_events)
		if (hw_event_type >= max_intel_perfmon_events)
			return -EINVAL;
			return -EINVAL;
		/*
		/*
		 * The generic map:
		 * The generic map:
		 */
		 */
		counter->hw_event_type = intel_perfmon_event_map[hw_event_type];
		hwc->config |= intel_perfmon_event_map[hw_event_type];
	}
	}
	hwc->config |= counter->hw_event_type;
	counter->wakeup_pending = 0;
	counter->wakeup_pending = 0;


	return 0;
	return 0;
@@ -373,7 +372,7 @@ perf_handle_group(struct perf_counter *leader, u64 *status, u64 *overflown)
				perf_save_and_restart(counter);
				perf_save_and_restart(counter);
			}
			}
		}
		}
		perf_store_irq_data(leader, counter->hw_event_type);
		perf_store_irq_data(leader, counter->event.hw_event_type);
		perf_store_irq_data(leader, atomic64_counter_read(counter));
		perf_store_irq_data(leader, atomic64_counter_read(counter));
	}
	}
}
}
@@ -418,7 +417,8 @@ static void __smp_perf_counter_interrupt(struct pt_regs *regs, int nmi)
			perf_store_irq_data(counter, instruction_pointer(regs));
			perf_store_irq_data(counter, instruction_pointer(regs));
			break;
			break;
		case PERF_RECORD_GROUP:
		case PERF_RECORD_GROUP:
			perf_store_irq_data(counter, counter->hw_event_type);
			perf_store_irq_data(counter,
					    counter->event.hw_event_type);
			perf_store_irq_data(counter,
			perf_store_irq_data(counter,
					    atomic64_counter_read(counter));
					    atomic64_counter_read(counter));
			perf_handle_group(counter, &status, &ack);
			perf_handle_group(counter, &status, &ack);
+1 −3
Original line number Original line Diff line number Diff line
@@ -96,8 +96,7 @@ struct perf_counter {
#else
#else
	atomic_t			count32[2];
	atomic_t			count32[2];
#endif
#endif
	u64				__irq_period;
	struct perf_counter_event	event;

	struct hw_perf_counter		hw;
	struct hw_perf_counter		hw;


	struct perf_counter_context	*ctx;
	struct perf_counter_context	*ctx;
@@ -111,7 +110,6 @@ struct perf_counter {
	int				oncpu;
	int				oncpu;
	int				cpu;
	int				cpu;


	s32				hw_event_type;
	enum perf_record_type		record_type;
	enum perf_record_type		record_type;


	/* read() / irq related data */
	/* read() / irq related data */
+5 −5
Original line number Original line Diff line number Diff line
@@ -37,7 +37,7 @@ static DEFINE_MUTEX(perf_resource_mutex);
 * Architecture provided APIs - weak aliases:
 * Architecture provided APIs - weak aliases:
 */
 */


int __weak hw_perf_counter_init(struct perf_counter *counter, u32 hw_event_type)
int __weak hw_perf_counter_init(struct perf_counter *counter)
{
{
	return -EINVAL;
	return -EINVAL;
}
}
@@ -707,7 +707,7 @@ static const struct file_operations perf_fops = {
 * Allocate and initialize a counter structure
 * Allocate and initialize a counter structure
 */
 */
static struct perf_counter *
static struct perf_counter *
perf_counter_alloc(u32 hw_event_period, int cpu, u32 record_type)
perf_counter_alloc(struct perf_counter_event *event, int cpu, u32 record_type)
{
{
	struct perf_counter *counter = kzalloc(sizeof(*counter), GFP_KERNEL);
	struct perf_counter *counter = kzalloc(sizeof(*counter), GFP_KERNEL);


@@ -722,7 +722,7 @@ perf_counter_alloc(u32 hw_event_period, int cpu, u32 record_type)
	counter->usrdata	= &counter->data[1];
	counter->usrdata	= &counter->data[1];
	counter->cpu		= cpu;
	counter->cpu		= cpu;
	counter->record_type	= record_type;
	counter->record_type	= record_type;
	counter->__irq_period	= hw_event_period;
	counter->event		= *event;
	counter->wakeup_pending = 0;
	counter->wakeup_pending = 0;


	return counter;
	return counter;
@@ -750,11 +750,11 @@ sys_perf_counter_open(struct perf_counter_event __user *uevent, u32 record_type,
		return PTR_ERR(ctx);
		return PTR_ERR(ctx);


	ret = -ENOMEM;
	ret = -ENOMEM;
	counter = perf_counter_alloc(event.hw_event_period, cpu, record_type);
	counter = perf_counter_alloc(&event, cpu, record_type);
	if (!counter)
	if (!counter)
		goto err_put_context;
		goto err_put_context;


	ret = hw_perf_counter_init(counter, event.hw_event_type);
	ret = hw_perf_counter_init(counter);
	if (ret)
	if (ret)
		goto err_free_put_context;
		goto err_free_put_context;