Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 781b0f8d authored by Ralf Baechle's avatar Ralf Baechle
Browse files

[MIPS] VSMP: Fix initialization ordering bug.

parent 3ab0f40f
Loading
Loading
Loading
Loading
+83 −69
Original line number Original line Diff line number Diff line
@@ -140,45 +140,29 @@ static struct irqaction irq_call = {
	.name		= "IPI_call"
	.name		= "IPI_call"
};
};


/*
static void __init smp_copy_vpe_config(void)
 * Common setup before any secondaries are started
 * Make sure all CPU's are in a sensible state before we boot any of the
 * secondarys
 */
void plat_smp_setup(void)
{
{
	unsigned long val;
	write_vpe_c0_status(
	int i, num;
		(read_c0_status() & ~(ST0_IM | ST0_IE | ST0_KSU)) | ST0_CU0);

#ifdef CONFIG_MIPS_MT_FPAFF
	/* If we have an FPU, enroll ourselves in the FPU-full mask */
	if (cpu_has_fpu)
		cpu_set(0, mt_fpu_cpumask);
#endif /* CONFIG_MIPS_MT_FPAFF */
	if (!cpu_has_mipsmt)
		return;

	/* disable MT so we can configure */
	dvpe();
	dmt();


	mips_mt_set_cpuoptions();
	/* set config to be the same as vpe0, particularly kseg0 coherency alg */

	write_vpe_c0_config( read_c0_config());
	/* Put MVPE's into 'configuration state' */
	set_c0_mvpcontrol(MVPCONTROL_VPC);


	val = read_c0_mvpconf0();
	/* make sure there are no software interrupts pending */
	write_vpe_c0_cause(0);


	/* we'll always have more TC's than VPE's, so loop setting everything
	/* Propagate Config7 */
	   to a sensible state */
	write_vpe_c0_config7(read_c0_config7());
	for (i = 0, num = 0; i <= ((val & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT); i++) {
}
		settc(i);


		/* VPE's */
static unsigned int __init smp_vpe_init(unsigned int tc, unsigned int mvpconf0,
		if (i <= ((val & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT)) {
	unsigned int ncpu)
{
	if (tc > ((mvpconf0 & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT))
		return ncpu;


			/* deactivate all but vpe0 */
	/* Deactivate all but VPE 0 */
			if (i != 0) {
	if (tc != 0) {
		unsigned long tmp = read_vpe_c0_vpeconf0();
		unsigned long tmp = read_vpe_c0_vpeconf0();


		tmp &= ~VPECONF0_VPA;
		tmp &= ~VPECONF0_VPA;
@@ -188,43 +172,36 @@ void plat_smp_setup(void)
		write_vpe_c0_vpeconf0(tmp);
		write_vpe_c0_vpeconf0(tmp);


		/* Record this as available CPU */
		/* Record this as available CPU */
				cpu_set(i, phys_cpu_present_map);
		cpu_set(tc, phys_cpu_present_map);
				__cpu_number_map[i]	= ++num;
		__cpu_number_map[tc]	= ++ncpu;
				__cpu_logical_map[num]	= i;
		__cpu_logical_map[ncpu]	= tc;
	}
	}


			/* disable multi-threading with TC's */
	/* Disable multi-threading with TC's */
	write_vpe_c0_vpecontrol(read_vpe_c0_vpecontrol() & ~VPECONTROL_TE);
	write_vpe_c0_vpecontrol(read_vpe_c0_vpecontrol() & ~VPECONTROL_TE);


			if (i != 0) {
	if (tc != 0)
				write_vpe_c0_status((read_c0_status() & ~(ST0_IM | ST0_IE | ST0_KSU)) | ST0_CU0);
		smp_copy_vpe_config();

				/* set config to be the same as vpe0, particularly kseg0 coherency alg */
				write_vpe_c0_config( read_c0_config());

				/* make sure there are no software interrupts pending */
				write_vpe_c0_cause(0);

				/* Propagate Config7 */
				write_vpe_c0_config7(read_c0_config7());
			}


	return ncpu;
}
}


		/* TC's */
static void __init smp_tc_init(unsigned int tc, unsigned int mvpconf0)

{
		if (i != 0) {
	unsigned long tmp;
	unsigned long tmp;


	if (!tc)
		return;

	/* bind a TC to each VPE, May as well put all excess TC's
	/* bind a TC to each VPE, May as well put all excess TC's
	   on the last VPE */
	   on the last VPE */
			if ( i >= (((val & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT)+1) )
	if (tc >= (((mvpconf0 & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT)+1))
				write_tc_c0_tcbind(read_tc_c0_tcbind() | ((val & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT) );
		write_tc_c0_tcbind(read_tc_c0_tcbind() | ((mvpconf0 & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT));
	else {
	else {
				write_tc_c0_tcbind( read_tc_c0_tcbind() | i);
		write_tc_c0_tcbind(read_tc_c0_tcbind() | tc);


		/* and set XTC */
		/* and set XTC */
				write_vpe_c0_vpeconf0( read_vpe_c0_vpeconf0() | (i << VPECONF0_XTC_SHIFT));
		write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() | (tc << VPECONF0_XTC_SHIFT));
	}
	}


	tmp = read_tc_c0_tcstatus();
	tmp = read_tc_c0_tcstatus();
@@ -236,6 +213,43 @@ void plat_smp_setup(void)


	write_tc_c0_tchalt(TCHALT_H);
	write_tc_c0_tchalt(TCHALT_H);
}
}

/*
 * Common setup before any secondaries are started
 * Make sure all CPU's are in a sensible state before we boot any of the
 * secondarys
 */
void __init plat_smp_setup(void)
{
	unsigned int mvpconf0, ntc, tc, ncpu = 0;

#ifdef CONFIG_MIPS_MT_FPAFF
	/* If we have an FPU, enroll ourselves in the FPU-full mask */
	if (cpu_has_fpu)
		cpu_set(0, mt_fpu_cpumask);
#endif /* CONFIG_MIPS_MT_FPAFF */
	if (!cpu_has_mipsmt)
		return;

	/* disable MT so we can configure */
	dvpe();
	dmt();

	mips_mt_set_cpuoptions();

	/* Put MVPE's into 'configuration state' */
	set_c0_mvpcontrol(MVPCONTROL_VPC);

	mvpconf0 = read_c0_mvpconf0();
	ntc = (mvpconf0 & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT;

	/* we'll always have more TC's than VPE's, so loop setting everything
	   to a sensible state */
	for (tc = 0; tc <= ntc; tc++) {
		settc(tc);

		smp_tc_init(tc, mvpconf0);
		ncpu = smp_vpe_init(tc, mvpconf0, ncpu);
	}
	}


	/* Release config state */
	/* Release config state */
@@ -243,7 +257,7 @@ void plat_smp_setup(void)


	/* We'll wait until starting the secondaries before starting MVPE */
	/* We'll wait until starting the secondaries before starting MVPE */


	printk(KERN_INFO "Detected %i available secondary CPU(s)\n", num);
	printk(KERN_INFO "Detected %i available secondary CPU(s)\n", ncpu);
}
}


void __init plat_prepare_cpus(unsigned int max_cpus)
void __init plat_prepare_cpus(unsigned int max_cpus)