Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 15ac9a39 authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Ingo Molnar
Browse files

perf: Remove the sysfs bits



Neither the overcommit nor the reservation sysfs parameter were
actually working, remove them as they'll only get in the way.

Signed-off-by: default avatarPeter Zijlstra <a.p.zijlstra@chello.nl>
Cc: paulus <paulus@samba.org>
LKML-Reference: <new-submission>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent a4eaf7f1
Loading
Loading
Loading
Loading
+1 −2
Original line number Diff line number Diff line
@@ -808,7 +808,7 @@ static void alpha_perf_event_irq_handler(unsigned long la_ptr,
	wrperfmon(PERFMON_CMD_DISABLE, cpuc->idx_mask);

	/* la_ptr is the counter that overflowed. */
	if (unlikely(la_ptr >= perf_max_events)) {
	if (unlikely(la_ptr >= alpha_pmu->num_pmcs)) {
		/* This should never occur! */
		irq_err_count++;
		pr_warning("PMI: silly index %ld\n", la_ptr);
@@ -879,7 +879,6 @@ void __init init_hw_perf_events(void)

	/* And set up PMU specification */
	alpha_pmu = &ev67_pmu;
	perf_max_events = alpha_pmu->num_pmcs;

	perf_pmu_register(&pmu);
}
+1 −8
Original line number Diff line number Diff line
@@ -534,7 +534,7 @@ static int armpmu_event_init(struct perf_event *event)
	event->destroy = hw_perf_event_destroy;

	if (!atomic_inc_not_zero(&active_events)) {
		if (atomic_read(&active_events) > perf_max_events) {
		if (atomic_read(&active_events) > armpmu.num_events) {
			atomic_dec(&active_events);
			return -ENOSPC;
		}
@@ -2974,14 +2974,12 @@ init_hw_perf_events(void)
			armpmu = &armv6pmu;
			memcpy(armpmu_perf_cache_map, armv6_perf_cache_map,
					sizeof(armv6_perf_cache_map));
			perf_max_events	= armv6pmu.num_events;
			break;
		case 0xB020:	/* ARM11mpcore */
			armpmu = &armv6mpcore_pmu;
			memcpy(armpmu_perf_cache_map,
			       armv6mpcore_perf_cache_map,
			       sizeof(armv6mpcore_perf_cache_map));
			perf_max_events = armv6mpcore_pmu.num_events;
			break;
		case 0xC080:	/* Cortex-A8 */
			armv7pmu.id = ARM_PERF_PMU_ID_CA8;
@@ -2993,7 +2991,6 @@ init_hw_perf_events(void)
			/* Reset PMNC and read the nb of CNTx counters
			    supported */
			armv7pmu.num_events = armv7_reset_read_pmnc();
			perf_max_events = armv7pmu.num_events;
			break;
		case 0xC090:	/* Cortex-A9 */
			armv7pmu.id = ARM_PERF_PMU_ID_CA9;
@@ -3005,7 +3002,6 @@ init_hw_perf_events(void)
			/* Reset PMNC and read the nb of CNTx counters
			    supported */
			armv7pmu.num_events = armv7_reset_read_pmnc();
			perf_max_events = armv7pmu.num_events;
			break;
		}
	/* Intel CPUs [xscale]. */
@@ -3016,13 +3012,11 @@ init_hw_perf_events(void)
			armpmu = &xscale1pmu;
			memcpy(armpmu_perf_cache_map, xscale_perf_cache_map,
					sizeof(xscale_perf_cache_map));
			perf_max_events	= xscale1pmu.num_events;
			break;
		case 2:
			armpmu = &xscale2pmu;
			memcpy(armpmu_perf_cache_map, xscale_perf_cache_map,
					sizeof(xscale_perf_cache_map));
			perf_max_events	= xscale2pmu.num_events;
			break;
		}
	}
@@ -3032,7 +3026,6 @@ init_hw_perf_events(void)
				arm_pmu_names[armpmu->id], armpmu->num_events);
	} else {
		pr_info("no hardware support available\n");
		perf_max_events = -1;
	}

	perf_pmu_register(&pmu);
+3 −6
Original line number Diff line number Diff line
@@ -897,7 +897,7 @@ static int sparc_check_constraints(struct perf_event **evts,
	if (!n_ev)
		return 0;

	if (n_ev > perf_max_events)
	if (n_ev > MAX_HWEVENTS)
		return -1;

	msk0 = perf_event_get_msk(events[0]);
@@ -1014,7 +1014,7 @@ static int sparc_pmu_add(struct perf_event *event, int ef_flags)
	perf_pmu_disable(event->pmu);

	n0 = cpuc->n_events;
	if (n0 >= perf_max_events)
	if (n0 >= MAX_HWEVENTS)
		goto out;

	cpuc->event[n0] = event;
@@ -1097,7 +1097,7 @@ static int sparc_pmu_event_init(struct perf_event *event)
	n = 0;
	if (event->group_leader != event) {
		n = collect_events(event->group_leader,
				   perf_max_events - 1,
				   MAX_HWEVENTS - 1,
				   evts, events, current_idx_dmy);
		if (n < 0)
			return -EINVAL;
@@ -1309,9 +1309,6 @@ void __init init_hw_perf_events(void)

	pr_cont("Supported PMU type is '%s'\n", sparc_pmu_type);

	/* All sparc64 PMUs currently have 2 events.  */
	perf_max_events = 2;

	perf_pmu_register(&pmu);
	register_die_notifier(&perf_event_nmi_notifier);
}
+0 −1
Original line number Diff line number Diff line
@@ -1396,7 +1396,6 @@ void __init init_hw_perf_events(void)
		x86_pmu.num_counters = X86_PMC_MAX_GENERIC;
	}
	x86_pmu.intel_ctrl = (1 << x86_pmu.num_counters) - 1;
	perf_max_events = x86_pmu.num_counters;

	if (x86_pmu.num_counters_fixed > X86_PMC_MAX_FIXED) {
		WARN(1, KERN_ERR "hw perf events fixed %d > max(%d), clipping!",
+0 −6
Original line number Diff line number Diff line
@@ -860,7 +860,6 @@ struct perf_cpu_context {
	struct perf_event_context	ctx;
	struct perf_event_context	*task_ctx;
	int				active_oncpu;
	int				max_pertask;
	int				exclusive;
	struct swevent_hlist		*swevent_hlist;
	struct mutex			hlist_mutex;
@@ -883,11 +882,6 @@ struct perf_output_handle {

#ifdef CONFIG_PERF_EVENTS

/*
 * Set by architecture code:
 */
extern int perf_max_events;

extern int perf_pmu_register(struct pmu *pmu);
extern void perf_pmu_unregister(struct pmu *pmu);

Loading