Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 1aa6cfd3 authored by Thomas Gleixner's avatar Thomas Gleixner Committed by Ingo Molnar
Browse files

perf/x86/intel/uncore: Clean up hotplug conversion fallout



The recent conversion to the hotplug state machine kept two mechanisms from
the original code:

 1) The first_init logic which adds the number of online CPUs in a package
    to the refcount. That's wrong because the callbacks are executed for
    all online CPUs.

    Remove it so the refcounting is correct.

 2) The on_each_cpu() call to undo box->init() in the error handling
    path. That's bogus because when the prepare callback fails no box has
    been initialized yet.

    Remove it.

Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Sebastian Siewior <bigeasy@linutronix.de>
Cc: Stephane Eranian <eranian@google.com>
Cc: Vince Weaver <vincent.weaver@maine.edu>
Cc: Yasuaki Ishimatsu <yasu.isimatu@gmail.com>
Fixes: 1a246b9f ("perf/x86/intel/uncore: Convert to hotplug state machine")
Link: http://lkml.kernel.org/r/20170131230141.298032324@linutronix.de


Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent dd86e373
Loading
Loading
Loading
Loading
+4 −40
Original line number Diff line number Diff line
@@ -764,30 +764,6 @@ static void uncore_pmu_unregister(struct intel_uncore_pmu *pmu)
	pmu->registered = false;
}

static void __uncore_exit_boxes(struct intel_uncore_type *type, int cpu)
{
	struct intel_uncore_pmu *pmu = type->pmus;
	struct intel_uncore_box *box;
	int i, pkg;

	if (pmu) {
		pkg = topology_physical_package_id(cpu);
		for (i = 0; i < type->num_boxes; i++, pmu++) {
			box = pmu->boxes[pkg];
			if (box)
				uncore_box_exit(box);
		}
	}
}

static void uncore_exit_boxes(void *dummy)
{
	struct intel_uncore_type **types;

	for (types = uncore_msr_uncores; *types; types++)
		__uncore_exit_boxes(*types++, smp_processor_id());
}

static void uncore_free_boxes(struct intel_uncore_pmu *pmu)
{
	int pkg;
@@ -1078,22 +1054,12 @@ static int uncore_cpu_dying(unsigned int cpu)
	return 0;
}

static int first_init;

static int uncore_cpu_starting(unsigned int cpu)
{
	struct intel_uncore_type *type, **types = uncore_msr_uncores;
	struct intel_uncore_pmu *pmu;
	struct intel_uncore_box *box;
	int i, pkg, ncpus = 1;

	if (first_init) {
		/*
		 * On init we get the number of online cpus in the package
		 * and set refcount for all of them.
		 */
		ncpus = cpumask_weight(topology_core_cpumask(cpu));
	}
	int i, pkg;

	pkg = topology_logical_package_id(cpu);
	for (; *types; types++) {
@@ -1104,7 +1070,7 @@ static int uncore_cpu_starting(unsigned int cpu)
			if (!box)
				continue;
			/* The first cpu on a package activates the box */
			if (atomic_add_return(ncpus, &box->refcnt) == ncpus)
			if (atomic_inc_return(&box->refcnt) == 1)
				uncore_box_init(box);
		}
	}
@@ -1408,19 +1374,17 @@ static int __init intel_uncore_init(void)
					  "perf/x86/intel/uncore:prepare",
					  uncore_cpu_prepare, NULL);
	}
	first_init = 1;

	cpuhp_setup_state(CPUHP_AP_PERF_X86_UNCORE_STARTING,
			  "perf/x86/uncore:starting",
			  uncore_cpu_starting, uncore_cpu_dying);
	first_init = 0;

	cpuhp_setup_state(CPUHP_AP_PERF_X86_UNCORE_ONLINE,
			  "perf/x86/uncore:online",
			  uncore_event_cpu_online, uncore_event_cpu_offline);
	return 0;

err:
	/* Undo box->init_box() */
	on_each_cpu_mask(&uncore_cpu_mask, uncore_exit_boxes, NULL, 1);
	uncore_types_exit(uncore_msr_uncores);
	uncore_pci_exit();
	return ret;