Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 13583bf1 authored by Thomas Gleixner's avatar Thomas Gleixner
Browse files

ia64: Use generic idle thread allocation



Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Rusty Russell <rusty@rustcorp.com.au>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Srivatsa S. Bhat <srivatsa.bhat@linux.vnet.ibm.com>
Cc: Tony Luck <tony.luck@intel.com>
Link: http://lkml.kernel.org/r/20120420124557.380965133@linutronix.de
parent 17e32eac
Loading
Loading
Loading
Loading
+1 −0
Original line number Original line Diff line number Diff line
@@ -33,6 +33,7 @@ config IA64
	select ARCH_WANT_OPTIONAL_GPIOLIB
	select ARCH_WANT_OPTIONAL_GPIOLIB
	select ARCH_HAVE_NMI_SAFE_CMPXCHG
	select ARCH_HAVE_NMI_SAFE_CMPXCHG
	select GENERIC_IOMAP
	select GENERIC_IOMAP
	select GENERIC_SMP_IDLE_THREAD
	default y
	default y
	help
	help
	  The Itanium Processor Family is Intel's 64-bit successor to
	  The Itanium Processor Family is Intel's 64-bit successor to
+3 −58
Original line number Original line Diff line number Diff line
@@ -74,13 +74,6 @@
#define bsp_remove_ok	0
#define bsp_remove_ok	0
#endif
#endif


/*
 * Store all idle threads, this can be reused instead of creating
 * a new thread. Also avoids complicated thread destroy functionality
 * for idle threads.
 */
struct task_struct *idle_thread_array[NR_CPUS];

/*
/*
 * Global array allocated for NR_CPUS at boot time
 * Global array allocated for NR_CPUS at boot time
 */
 */
@@ -94,13 +87,7 @@ struct sal_to_os_boot *sal_state_for_booting_cpu = &sal_boot_rendez_state[0];


#define set_brendez_area(x) (sal_state_for_booting_cpu = &sal_boot_rendez_state[(x)]);
#define set_brendez_area(x) (sal_state_for_booting_cpu = &sal_boot_rendez_state[(x)]);


#define get_idle_for_cpu(x)		(idle_thread_array[(x)])
#define set_idle_for_cpu(x,p)	(idle_thread_array[(x)] = (p))

#else
#else

#define get_idle_for_cpu(x)		(NULL)
#define set_idle_for_cpu(x,p)
#define set_brendez_area(x)
#define set_brendez_area(x)
#endif
#endif


@@ -480,54 +467,12 @@ struct pt_regs * __cpuinit idle_regs(struct pt_regs *regs)
	return NULL;
	return NULL;
}
}


struct create_idle {
	struct work_struct work;
	struct task_struct *idle;
	struct completion done;
	int cpu;
};

void __cpuinit
do_fork_idle(struct work_struct *work)
{
	struct create_idle *c_idle =
		container_of(work, struct create_idle, work);

	c_idle->idle = fork_idle(c_idle->cpu);
	complete(&c_idle->done);
}

static int __cpuinit
static int __cpuinit
do_boot_cpu (int sapicid, int cpu)
do_boot_cpu (int sapicid, int cpu, struct task_struct *idle)
{
{
	int timeout;
	int timeout;
	struct create_idle c_idle = {
		.work = __WORK_INITIALIZER(c_idle.work, do_fork_idle),
		.cpu	= cpu,
		.done	= COMPLETION_INITIALIZER(c_idle.done),
	};

	/*
	 * We can't use kernel_thread since we must avoid to
	 * reschedule the child.
	 */
 	c_idle.idle = get_idle_for_cpu(cpu);
 	if (c_idle.idle) {
		init_idle(c_idle.idle, cpu);
 		goto do_rest;
	}

	schedule_work(&c_idle.work);
	wait_for_completion(&c_idle.done);

	if (IS_ERR(c_idle.idle))
		panic("failed fork for CPU %d", cpu);

	set_idle_for_cpu(cpu, c_idle.idle);

do_rest:
	task_for_booting_cpu = c_idle.idle;


	task_for_booting_cpu = idle;
	Dprintk("Sending wakeup vector %lu to AP 0x%x/0x%x.\n", ap_wakeup_vector, cpu, sapicid);
	Dprintk("Sending wakeup vector %lu to AP 0x%x/0x%x.\n", ap_wakeup_vector, cpu, sapicid);


	set_brendez_area(cpu);
	set_brendez_area(cpu);
@@ -811,7 +756,7 @@ __cpu_up(unsigned int cpu, struct task_struct *tidle)


	per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
	per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
	/* Processor goes to start_secondary(), sets online flag */
	/* Processor goes to start_secondary(), sets online flag */
	ret = do_boot_cpu(sapicid, cpu);
	ret = do_boot_cpu(sapicid, cpu, tidle);
	if (ret < 0)
	if (ret < 0)
		return ret;
		return ret;