Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit e1c105a9 authored by Boris Ostrovsky's avatar Boris Ostrovsky Committed by David Vrabel
Browse files

hotplug: Prevent alloc/free of irq descriptors during cpu up/down (again)



Now that Xen no longer allocates irqs in _cpu_up() we can restore
commit a8994181 ("hotplug: Prevent alloc/free of irq descriptors
during cpu up/down")

Signed-off-by: default avatarBoris Ostrovsky <boris.ostrovsky@oracle.com>
Acked-by: default avatarThomas Gleixner <tglx@linutronix.de>
CC: x86@kernel.org
CC: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: default avatarDavid Vrabel <david.vrabel@citrix.com>
parent 5fc509bc
Loading
Loading
Loading
Loading
+0 −11
Original line number Original line Diff line number Diff line
@@ -1115,17 +1115,8 @@ int native_cpu_up(unsigned int cpu, struct task_struct *tidle)


	common_cpu_up(cpu, tidle);
	common_cpu_up(cpu, tidle);


	/*
	 * We have to walk the irq descriptors to setup the vector
	 * space for the cpu which comes online.  Prevent irq
	 * alloc/free across the bringup.
	 */
	irq_lock_sparse();

	err = do_boot_cpu(apicid, cpu, tidle);
	err = do_boot_cpu(apicid, cpu, tidle);

	if (err) {
	if (err) {
		irq_unlock_sparse();
		pr_err("do_boot_cpu failed(%d) to wakeup CPU#%u\n", err, cpu);
		pr_err("do_boot_cpu failed(%d) to wakeup CPU#%u\n", err, cpu);
		return -EIO;
		return -EIO;
	}
	}
@@ -1143,8 +1134,6 @@ int native_cpu_up(unsigned int cpu, struct task_struct *tidle)
		touch_nmi_watchdog();
		touch_nmi_watchdog();
	}
	}


	irq_unlock_sparse();

	return 0;
	return 0;
}
}


+8 −0
Original line number Original line Diff line number Diff line
@@ -349,8 +349,16 @@ static int bringup_cpu(unsigned int cpu)
	struct task_struct *idle = idle_thread_get(cpu);
	struct task_struct *idle = idle_thread_get(cpu);
	int ret;
	int ret;


	/*
	 * Some architectures have to walk the irq descriptors to
	 * setup the vector space for the cpu which comes online.
	 * Prevent irq alloc/free across the bringup.
	 */
	irq_lock_sparse();

	/* Arch-specific enabling code. */
	/* Arch-specific enabling code. */
	ret = __cpu_up(cpu, idle);
	ret = __cpu_up(cpu, idle);
	irq_unlock_sparse();
	if (ret) {
	if (ret) {
		cpu_notify(CPU_UP_CANCELED, cpu);
		cpu_notify(CPU_UP_CANCELED, cpu);
		return ret;
		return ret;