Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit e71c3978 authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge branch 'smp-hotplug-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull smp hotplug updates from Thomas Gleixner:
 "This is the final round of converting the notifier mess to the state
  machine. The removal of the notifiers and the related infrastructure
  will happen around rc1, as there are conversions outstanding in other
  trees.

  The whole exercise removed about 2000 lines of code in total and in
  course of the conversion several dozen bugs got fixed. The new
  mechanism allows to test almost every hotplug step standalone, so
  usage sites can exercise all transitions extensively.

  There is more room for improvement, like integrating all the
  pointlessly different architecture mechanisms of synchronizing,
  setting cpus online etc into the core code"

* 'smp-hotplug-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (60 commits)
  tracing/rb: Init the CPU mask on allocation
  soc/fsl/qbman: Convert to hotplug state machine
  soc/fsl/qbman: Convert to hotplug state machine
  zram: Convert to hotplug state machine
  KVM/PPC/Book3S HV: Convert to hotplug state machine
  arm64/cpuinfo: Convert to hotplug state machine
  arm64/cpuinfo: Make hotplug notifier symmetric
  mm/compaction: Convert to hotplug state machine
  iommu/vt-d: Convert to hotplug state machine
  mm/zswap: Convert pool to hotplug state machine
  mm/zswap: Convert dst-mem to hotplug state machine
  mm/zsmalloc: Convert to hotplug state machine
  mm/vmstat: Convert to hotplug state machine
  mm/vmstat: Avoid on each online CPU loops
  mm/vmstat: Drop get_online_cpus() from init_cpu_node_state/vmstat_cpu_dead()
  tracing/rb: Convert to hotplug state machine
  oprofile/nmi timer: Convert to hotplug state machine
  net/iucv: Use explicit clean up labels in iucv_init()
  x86/pci/amd-bus: Convert to hotplug state machine
  x86/oprofile/nmi: Convert to hotplug state machine
  ...
parents f797484c b18cc3de
Loading
Loading
Loading
Loading
+20 −14
Original line number Diff line number Diff line
@@ -757,19 +757,18 @@ EXPORT_SYMBOL_GPL(bL_switcher_put_enabled);
 * while the switcher is active.
 * We're just not ready to deal with that given the trickery involved.
 */
static int bL_switcher_hotplug_callback(struct notifier_block *nfb,
					unsigned long action, void *hcpu)
static int bL_switcher_cpu_pre(unsigned int cpu)
{
	if (bL_switcher_active) {
		int pairing = bL_switcher_cpu_pairing[(unsigned long)hcpu];
		switch (action & 0xf) {
		case CPU_UP_PREPARE:
		case CPU_DOWN_PREPARE:
	int pairing;

	if (!bL_switcher_active)
		return 0;

	pairing = bL_switcher_cpu_pairing[cpu];

	if (pairing == -1)
				return NOTIFY_BAD;
		}
	}
	return NOTIFY_DONE;
		return -EINVAL;
	return 0;
}

static bool no_bL_switcher;
@@ -782,8 +781,15 @@ static int __init bL_switcher_init(void)
	if (!mcpm_is_available())
		return -ENODEV;

	cpu_notifier(bL_switcher_hotplug_callback, 0);

	cpuhp_setup_state_nocalls(CPUHP_ARM_BL_PREPARE, "arm/bl:prepare",
				  bL_switcher_cpu_pre, NULL);
	ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "arm/bl:predown",
					NULL, bL_switcher_cpu_pre);
	if (ret < 0) {
		cpuhp_remove_state_nocalls(CPUHP_ARM_BL_PREPARE);
		pr_err("bL_switcher: Failed to allocate a hotplug state\n");
		return ret;
	}
	if (!no_bL_switcher) {
		ret = bL_switcher_enable();
		if (ret)
+22 −25
Original line number Diff line number Diff line
@@ -925,9 +925,9 @@ static bool core_has_os_save_restore(void)
	}
}

static void reset_ctrl_regs(void *unused)
static void reset_ctrl_regs(unsigned int cpu)
{
	int i, raw_num_brps, err = 0, cpu = smp_processor_id();
	int i, raw_num_brps, err = 0;
	u32 val;

	/*
@@ -1020,25 +1020,20 @@ static void reset_ctrl_regs(void *unused)
		cpumask_or(&debug_err_mask, &debug_err_mask, cpumask_of(cpu));
}

static int dbg_reset_notify(struct notifier_block *self,
				      unsigned long action, void *cpu)
static int dbg_reset_online(unsigned int cpu)
{
	if ((action & ~CPU_TASKS_FROZEN) == CPU_ONLINE)
		smp_call_function_single((int)cpu, reset_ctrl_regs, NULL, 1);

	return NOTIFY_OK;
	local_irq_disable();
	reset_ctrl_regs(cpu);
	local_irq_enable();
	return 0;
}

static struct notifier_block dbg_reset_nb = {
	.notifier_call = dbg_reset_notify,
};

#ifdef CONFIG_CPU_PM
static int dbg_cpu_pm_notify(struct notifier_block *self, unsigned long action,
			     void *v)
{
	if (action == CPU_PM_EXIT)
		reset_ctrl_regs(NULL);
		reset_ctrl_regs(smp_processor_id());

	return NOTIFY_OK;
}
@@ -1059,6 +1054,8 @@ static inline void pm_init(void)

static int __init arch_hw_breakpoint_init(void)
{
	int ret;

	debug_arch = get_debug_arch();

	if (!debug_arch_supported()) {
@@ -1072,25 +1069,28 @@ static int __init arch_hw_breakpoint_init(void)
	core_num_brps = get_num_brps();
	core_num_wrps = get_num_wrps();

	cpu_notifier_register_begin();

	/*
	 * We need to tread carefully here because DBGSWENABLE may be
	 * driven low on this core and there isn't an architected way to
	 * determine that.
	 */
	get_online_cpus();
	register_undef_hook(&debug_reg_hook);

	/*
	 * Reset the breakpoint resources. We assume that a halting
	 * debugger will leave the world in a nice state for us.
	 * Register CPU notifier which resets the breakpoint resources. We
	 * assume that a halting debugger will leave the world in a nice state
	 * for us.
	 */
	on_each_cpu(reset_ctrl_regs, NULL, 1);
	ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "arm/hw_breakpoint:online",
				dbg_reset_online, NULL);
	unregister_undef_hook(&debug_reg_hook);
	if (!cpumask_empty(&debug_err_mask)) {
	if (WARN_ON(ret < 0) || !cpumask_empty(&debug_err_mask)) {
		core_num_brps = 0;
		core_num_wrps = 0;
		cpu_notifier_register_done();
		if (ret > 0)
			cpuhp_remove_state_nocalls(ret);
		put_online_cpus();
		return 0;
	}

@@ -1108,12 +1108,9 @@ static int __init arch_hw_breakpoint_init(void)
			TRAP_HWBKPT, "watchpoint debug exception");
	hook_ifault_code(FAULT_CODE_DEBUG, hw_breakpoint_pending, SIGTRAP,
			TRAP_HWBKPT, "breakpoint debug exception");
	put_online_cpus();

	/* Register hotplug and PM notifiers. */
	__register_cpu_notifier(&dbg_reset_nb);

	cpu_notifier_register_done();

	/* Register PM notifiers. */
	pm_init();
	return 0;
}
+9 −27
Original line number Diff line number Diff line
@@ -227,7 +227,7 @@ static struct attribute_group cpuregs_attr_group = {
	.name = "identification"
};

static int cpuid_add_regs(int cpu)
static int cpuid_cpu_online(unsigned int cpu)
{
	int rc;
	struct device *dev;
@@ -248,7 +248,7 @@ static int cpuid_add_regs(int cpu)
	return rc;
}

static int cpuid_remove_regs(int cpu)
static int cpuid_cpu_offline(unsigned int cpu)
{
	struct device *dev;
	struct cpuinfo_arm64 *info = &per_cpu(cpu_data, cpu);
@@ -264,40 +264,22 @@ static int cpuid_remove_regs(int cpu)
	return 0;
}

static int cpuid_callback(struct notifier_block *nb,
			 unsigned long action, void *hcpu)
{
	int rc = 0;
	unsigned long cpu = (unsigned long)hcpu;

	switch (action & ~CPU_TASKS_FROZEN) {
	case CPU_ONLINE:
		rc = cpuid_add_regs(cpu);
		break;
	case CPU_DEAD:
		rc = cpuid_remove_regs(cpu);
		break;
	}

	return notifier_from_errno(rc);
}

static int __init cpuinfo_regs_init(void)
{
	int cpu;

	cpu_notifier_register_begin();
	int cpu, ret;

	for_each_possible_cpu(cpu) {
		struct cpuinfo_arm64 *info = &per_cpu(cpu_data, cpu);

		kobject_init(&info->kobj, &cpuregs_kobj_type);
		if (cpu_online(cpu))
			cpuid_add_regs(cpu);
	}
	__hotcpu_notifier(cpuid_callback, 0);

	cpu_notifier_register_done();
	ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "arm64/cpuinfo:online",
				cpuid_cpu_online, cpuid_cpu_offline);
	if (ret < 0) {
		pr_err("cpuinfo: failed to register hotplug callbacks.\n");
		return ret;
	}
	return 0;
}
static void cpuinfo_detect_icache_policy(struct cpuinfo_arm64 *info)
+17 −57
Original line number Diff line number Diff line
@@ -224,85 +224,45 @@ static struct attribute_group err_inject_attr_group = {
	.name = "err_inject"
};
/* Add/Remove err_inject interface for CPU device */
static int err_inject_add_dev(struct device *sys_dev)
static int err_inject_add_dev(unsigned int cpu)
{
	struct device *sys_dev = get_cpu_device(cpu);

	return sysfs_create_group(&sys_dev->kobj, &err_inject_attr_group);
}

static int err_inject_remove_dev(struct device *sys_dev)
static int err_inject_remove_dev(unsigned int cpu)
{
	struct device *sys_dev = get_cpu_device(cpu);

	sysfs_remove_group(&sys_dev->kobj, &err_inject_attr_group);
	return 0;
}
static int err_inject_cpu_callback(struct notifier_block *nfb,
		unsigned long action, void *hcpu)
{
	unsigned int cpu = (unsigned long)hcpu;
	struct device *sys_dev;

	sys_dev = get_cpu_device(cpu);
	switch (action) {
	case CPU_ONLINE:
	case CPU_ONLINE_FROZEN:
		err_inject_add_dev(sys_dev);
		break;
	case CPU_DEAD:
	case CPU_DEAD_FROZEN:
		err_inject_remove_dev(sys_dev);
		break;
	}

	return NOTIFY_OK;
}

static struct notifier_block err_inject_cpu_notifier =
{
	.notifier_call = err_inject_cpu_callback,
};
static enum cpuhp_state hp_online;

static int __init
err_inject_init(void)
static int __init err_inject_init(void)
{
	int i;

	int ret;
#ifdef ERR_INJ_DEBUG
	printk(KERN_INFO "Enter error injection driver.\n");
#endif

	cpu_notifier_register_begin();

	for_each_online_cpu(i) {
		err_inject_cpu_callback(&err_inject_cpu_notifier, CPU_ONLINE,
				(void *)(long)i);
	ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "ia64/err_inj:online",
				err_inject_add_dev, err_inject_remove_dev);
	if (ret >= 0) {
		hp_online = ret;
		ret = 0;
	}

	__register_hotcpu_notifier(&err_inject_cpu_notifier);

	cpu_notifier_register_done();

	return 0;
	return ret;
}

static void __exit
err_inject_exit(void)
static void __exit err_inject_exit(void)
{
	int i;
	struct device *sys_dev;

#ifdef ERR_INJ_DEBUG
	printk(KERN_INFO "Exit error injection driver.\n");
#endif

	cpu_notifier_register_begin();

	for_each_online_cpu(i) {
		sys_dev = get_cpu_device(i);
		sysfs_remove_group(&sys_dev->kobj, &err_inject_attr_group);
	}

	__unregister_hotcpu_notifier(&err_inject_cpu_notifier);

	cpu_notifier_register_done();
	cpuhp_remove_state(hp_online);
}

module_init(err_inject_init);
+16 −44
Original line number Diff line number Diff line
@@ -932,8 +932,7 @@ static const struct file_operations proc_palinfo_fops = {
	.release	= single_release,
};

static void
create_palinfo_proc_entries(unsigned int cpu)
static int palinfo_add_proc(unsigned int cpu)
{
	pal_func_cpu_u_t f;
	struct proc_dir_entry *cpu_dir;
@@ -943,7 +942,7 @@ create_palinfo_proc_entries(unsigned int cpu)

	cpu_dir = proc_mkdir(cpustr, palinfo_dir);
	if (!cpu_dir)
		return;
		return -EINVAL;

	f.req_cpu = cpu;

@@ -952,42 +951,21 @@ create_palinfo_proc_entries(unsigned int cpu)
		proc_create_data(palinfo_entries[j].name, 0, cpu_dir,
				 &proc_palinfo_fops, (void *)f.value);
	}
	return 0;
}

static void
remove_palinfo_proc_entries(unsigned int hcpu)
static int palinfo_del_proc(unsigned int hcpu)
{
	char cpustr[3+4+1];	/* cpu numbers are up to 4095 on itanic */

	sprintf(cpustr, "cpu%d", hcpu);
	remove_proc_subtree(cpustr, palinfo_dir);
	return 0;
}

static int palinfo_cpu_callback(struct notifier_block *nfb,
					unsigned long action, void *hcpu)
{
	unsigned int hotcpu = (unsigned long)hcpu;

	switch (action) {
	case CPU_ONLINE:
	case CPU_ONLINE_FROZEN:
		create_palinfo_proc_entries(hotcpu);
		break;
	case CPU_DEAD:
	case CPU_DEAD_FROZEN:
		remove_palinfo_proc_entries(hotcpu);
		break;
	}
	return NOTIFY_OK;
}

static struct notifier_block __refdata palinfo_cpu_notifier =
{
	.notifier_call = palinfo_cpu_callback,
	.priority = 0,
};
static enum cpuhp_state hp_online;

static int __init
palinfo_init(void)
static int __init palinfo_init(void)
{
	int i = 0;

@@ -996,25 +974,19 @@ palinfo_init(void)
	if (!palinfo_dir)
		return -ENOMEM;

	cpu_notifier_register_begin();

	/* Create palinfo dirs in /proc for all online cpus */
	for_each_online_cpu(i) {
		create_palinfo_proc_entries(i);
	i = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "ia64/palinfo:online",
			      palinfo_add_proc, palinfo_del_proc);
	if (i < 0) {
		remove_proc_subtree("pal", NULL);
		return i;
	}

	/* Register for future delivery via notify registration */
	__register_hotcpu_notifier(&palinfo_cpu_notifier);

	cpu_notifier_register_done();

	hp_online = i;
	return 0;
}

static void __exit
palinfo_exit(void)
static void __exit palinfo_exit(void)
{
	unregister_hotcpu_notifier(&palinfo_cpu_notifier);
	cpuhp_remove_state(hp_online);
	remove_proc_subtree("pal", NULL);
}

Loading