Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit a6408f6c authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge branch 'smp-hotplug-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull smp hotplug updates from Thomas Gleixner:
 "This is the next part of the hotplug rework.

   - Convert all notifiers with a priority assigned

   - Convert all CPU_STARTING/DYING notifiers

     The final removal of the STARTING/DYING infrastructure will happen
     when the merge window closes.

  Another 700 hundred line of unpenetrable maze gone :)"

* 'smp-hotplug-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (70 commits)
  timers/core: Correct callback order during CPU hot plug
  leds/trigger/cpu: Move from CPU_STARTING to ONLINE level
  powerpc/numa: Convert to hotplug state machine
  arm/perf: Fix hotplug state machine conversion
  irqchip/armada: Avoid unused function warnings
  ARC/time: Convert to hotplug state machine
  clocksource/atlas7: Convert to hotplug state machine
  clocksource/armada-370-xp: Convert to hotplug state machine
  clocksource/exynos_mct: Convert to hotplug state machine
  clocksource/arm_global_timer: Convert to hotplug state machine
  rcu: Convert rcutree to hotplug state machine
  KVM/arm/arm64/vgic-new: Convert to hotplug state machine
  smp/cfd: Convert core to hotplug state machine
  x86/x2apic: Convert to CPU hotplug state machine
  profile: Convert to hotplug state machine
  timers/core: Convert to hotplug state machine
  hrtimer: Convert to hotplug state machine
  x86/tboot: Convert to hotplug state machine
  arm64/armv8 deprecated: Convert to hotplug state machine
  hwtracing/coresight-etm4x: Convert to hotplug state machine
  ...
parents 1a81a8f2 4fae16df
Loading
Loading
Loading
Loading
+18 −30
Original line number Diff line number Diff line
@@ -296,31 +296,24 @@ static irqreturn_t timer_irq_handler(int irq, void *dev_id)
	return IRQ_HANDLED;
}

static int arc_timer_cpu_notify(struct notifier_block *self,
				unsigned long action, void *hcpu)

static int arc_timer_starting_cpu(unsigned int cpu)
{
	struct clock_event_device *evt = this_cpu_ptr(&arc_clockevent_device);

	evt->cpumask = cpumask_of(smp_processor_id());

	switch (action & ~CPU_TASKS_FROZEN) {
	case CPU_STARTING:
		clockevents_config_and_register(evt, arc_timer_freq,
						0, ULONG_MAX);
	clockevents_config_and_register(evt, arc_timer_freq, 0, ARC_TIMER_MAX);
	enable_percpu_irq(arc_timer_irq, 0);
		break;
	case CPU_DYING:
		disable_percpu_irq(arc_timer_irq);
		break;
	return 0;
}

	return NOTIFY_OK;
static int arc_timer_dying_cpu(unsigned int cpu)
{
	disable_percpu_irq(arc_timer_irq);
	return 0;
}

static struct notifier_block arc_timer_cpu_nb = {
	.notifier_call = arc_timer_cpu_notify,
};

/*
 * clockevent setup for boot CPU
 */
@@ -329,12 +322,6 @@ static int __init arc_clockevent_setup(struct device_node *node)
	struct clock_event_device *evt = this_cpu_ptr(&arc_clockevent_device);
	int ret;

	ret = register_cpu_notifier(&arc_timer_cpu_nb);
	if (ret) {
		pr_err("Failed to register cpu notifier");
		return ret;
	}

	arc_timer_irq = irq_of_parse_and_map(node, 0);
	if (arc_timer_irq <= 0) {
		pr_err("clockevent: missing irq");
@@ -347,11 +334,6 @@ static int __init arc_clockevent_setup(struct device_node *node)
		return ret;
	}

	evt->irq = arc_timer_irq;
	evt->cpumask = cpumask_of(smp_processor_id());
	clockevents_config_and_register(evt, arc_timer_freq,
					0, ARC_TIMER_MAX);

	/* Needs apriori irq_set_percpu_devid() done in intc map function */
	ret = request_percpu_irq(arc_timer_irq, timer_irq_handler,
				 "Timer0 (per-cpu-tick)", evt);
@@ -360,8 +342,14 @@ static int __init arc_clockevent_setup(struct device_node *node)
		return ret;
	}

	enable_percpu_irq(arc_timer_irq, 0);

	ret = cpuhp_setup_state(CPUHP_AP_ARC_TIMER_STARTING,
				"AP_ARC_TIMER_STARTING",
				arc_timer_starting_cpu,
				arc_timer_dying_cpu);
	if (ret) {
		pr_err("Failed to setup hotplug state");
		return ret;
	}
	return 0;
}

+11 −20
Original line number Diff line number Diff line
@@ -310,25 +310,18 @@ static void twd_timer_setup(void)
	enable_percpu_irq(clk->irq, 0);
}

static int twd_timer_cpu_notify(struct notifier_block *self,
				unsigned long action, void *hcpu)
static int twd_timer_starting_cpu(unsigned int cpu)
{
	switch (action & ~CPU_TASKS_FROZEN) {
	case CPU_STARTING:
	twd_timer_setup();
		break;
	case CPU_DYING:
		twd_timer_stop();
		break;
	return 0;
}

	return NOTIFY_OK;
static int twd_timer_dying_cpu(unsigned int cpu)
{
	twd_timer_stop();
	return 0;
}

static struct notifier_block twd_timer_cpu_nb = {
	.notifier_call = twd_timer_cpu_notify,
};

static int __init twd_local_timer_common_register(struct device_node *np)
{
	int err;
@@ -345,9 +338,9 @@ static int __init twd_local_timer_common_register(struct device_node *np)
		goto out_free;
	}

	err = register_cpu_notifier(&twd_timer_cpu_nb);
	if (err)
		goto out_irq;
	cpuhp_setup_state_nocalls(CPUHP_AP_ARM_TWD_STARTING,
				  "AP_ARM_TWD_STARTING",
				  twd_timer_starting_cpu, twd_timer_dying_cpu);

	twd_get_clock(np);
	if (!of_property_read_bool(np, "always-on"))
@@ -365,8 +358,6 @@ static int __init twd_local_timer_common_register(struct device_node *np)

	return 0;

out_irq:
	free_percpu_irq(twd_ppi, twd_evt);
out_free:
	iounmap(twd_base);
	twd_base = NULL;
+6 −13
Original line number Diff line number Diff line
@@ -111,20 +111,12 @@ static struct notifier_block mvebu_hwcc_pci_nb __maybe_unused = {
	.notifier_call = mvebu_hwcc_notifier,
};

static int armada_xp_clear_shared_l2_notifier_func(struct notifier_block *nfb,
					unsigned long action, void *hcpu)
static int armada_xp_clear_l2_starting(unsigned int cpu)
{
	if (action == CPU_STARTING || action == CPU_STARTING_FROZEN)
	armada_xp_clear_shared_l2();

	return NOTIFY_OK;
	return 0;
}

static struct notifier_block armada_xp_clear_shared_l2_notifier = {
	.notifier_call = armada_xp_clear_shared_l2_notifier_func,
	.priority = 100,
};

static void __init armada_370_coherency_init(struct device_node *np)
{
	struct resource res;
@@ -155,8 +147,9 @@ static void __init armada_370_coherency_init(struct device_node *np)

	of_node_put(cpu_config_np);

	register_cpu_notifier(&armada_xp_clear_shared_l2_notifier);

	cpuhp_setup_state_nocalls(CPUHP_AP_ARM_MVEBU_COHERENCY,
				  "AP_ARM_MVEBU_COHERENCY",
				  armada_xp_clear_l2_starting, NULL);
exit:
	set_cpu_coherent();
}
+13 −14
Original line number Diff line number Diff line
@@ -597,17 +597,16 @@ static void l2c310_configure(void __iomem *base)
			      L310_POWER_CTRL);
}

static int l2c310_cpu_enable_flz(struct notifier_block *nb, unsigned long act, void *data)
static int l2c310_starting_cpu(unsigned int cpu)
{
	switch (act & ~CPU_TASKS_FROZEN) {
	case CPU_STARTING:
	set_auxcr(get_auxcr() | BIT(3) | BIT(2) | BIT(1));
		break;
	case CPU_DYING:
		set_auxcr(get_auxcr() & ~(BIT(3) | BIT(2) | BIT(1)));
		break;
	return 0;
}
	return NOTIFY_OK;

static int l2c310_dying_cpu(unsigned int cpu)
{
	set_auxcr(get_auxcr() & ~(BIT(3) | BIT(2) | BIT(1)));
	return 0;
}

static void __init l2c310_enable(void __iomem *base, unsigned num_lock)
@@ -678,10 +677,10 @@ static void __init l2c310_enable(void __iomem *base, unsigned num_lock)
			power_ctrl & L310_STNDBY_MODE_EN ? "en" : "dis");
	}

	if (aux & L310_AUX_CTRL_FULL_LINE_ZERO) {
		set_auxcr(get_auxcr() | BIT(3) | BIT(2) | BIT(1));
		cpu_notifier(l2c310_cpu_enable_flz, 0);
	}
	if (aux & L310_AUX_CTRL_FULL_LINE_ZERO)
		cpuhp_setup_state(CPUHP_AP_ARM_L2X0_STARTING,
				  "AP_ARM_L2X0_STARTING", l2c310_starting_cpu,
				  l2c310_dying_cpu);
}

static void __init l2c310_fixup(void __iomem *base, u32 cache_id,
+17 −11
Original line number Diff line number Diff line
@@ -643,19 +643,19 @@ int vfp_restore_user_hwstate(struct user_vfp __user *ufp,
 * hardware state at every thread switch.  We clear our held state when
 * a CPU has been killed, indicating that the VFP hardware doesn't contain
 * a threads VFP state.  When a CPU starts up, we re-enable access to the
 * VFP hardware.
 *
 * Both CPU_DYING and CPU_STARTING are called on the CPU which
 * VFP hardware. The callbacks below are called on the CPU which
 * is being offlined/onlined.
 */
static int vfp_hotplug(struct notifier_block *b, unsigned long action,
	void *hcpu)
static int vfp_dying_cpu(unsigned int cpu)
{
	vfp_force_reload(cpu, current_thread_info());
	return 0;
}

static int vfp_starting_cpu(unsigned int unused)
{
	if (action == CPU_DYING || action == CPU_DYING_FROZEN)
		vfp_current_hw_state[(long)hcpu] = NULL;
	else if (action == CPU_STARTING || action == CPU_STARTING_FROZEN)
	vfp_enable(NULL);
	return NOTIFY_OK;
	return 0;
}

void vfp_kmode_exception(void)
@@ -732,6 +732,10 @@ static int __init vfp_init(void)
	unsigned int vfpsid;
	unsigned int cpu_arch = cpu_architecture();

	/*
	 * Enable the access to the VFP on all online CPUs so the
	 * following test on FPSID will succeed.
	 */
	if (cpu_arch >= CPU_ARCH_ARMv6)
		on_each_cpu(vfp_enable, NULL, 1);

@@ -794,7 +798,9 @@ static int __init vfp_init(void)
		VFP_arch = (vfpsid & FPSID_ARCH_MASK) >> FPSID_ARCH_BIT;
	}

	hotcpu_notifier(vfp_hotplug, 0);
	cpuhp_setup_state_nocalls(CPUHP_AP_ARM_VFP_STARTING,
				  "AP_ARM_VFP_STARTING", vfp_starting_cpu,
				  vfp_dying_cpu);

	vfp_vector = vfp_support_entry;

Loading