Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 87353d8a authored by Ralf Baechle's avatar Ralf Baechle
Browse files

[MIPS] SMP: Call platform methods via ops structure.

parent 19388fb0
Loading
Loading
Loading
Loading
+5 −0
Original line number Diff line number Diff line
@@ -1441,6 +1441,7 @@ config MIPS_MT_SMP
	select SMP
	select SYS_SUPPORTS_SCHED_SMT if SMP
	select SYS_SUPPORTS_SMP
	select SMP_UP
	help
	  This is a kernel model which is also known a VSMP or lately
	  has been marketesed into SMVP.
@@ -1457,6 +1458,7 @@ config MIPS_MT_SMTC
	select NR_CPUS_DEFAULT_8
	select SMP
	select SYS_SUPPORTS_SMP
	select SMP_UP
	help
	  This is a kernel model which is known a SMTC or lately has been
	  marketesed into SMVP.
@@ -1735,6 +1737,9 @@ config SMP

	  If you don't know what to do here, say N.

config SMP_UP
	bool

config SYS_SUPPORTS_SMP
	bool

+8 −0
Original line number Diff line number Diff line
@@ -12,6 +12,7 @@

#include <asm/bootinfo.h>
#include <asm/sgialib.h>
#include <asm/smp-ops.h>

#undef DEBUG_PROM_INIT

@@ -48,4 +49,11 @@ void __init prom_init(void)
	ArcRead(0, &c, 1, &cnt);
	ArcEnterInteractiveMode();
#endif
#ifdef CONFIG_SGI_IP27
	{
		extern struct plat_smp_ops ip27_smp_ops;

		register_smp_ops(&ip27_smp_ops);
	}
#endif
}
+0 −1
Original line number Diff line number Diff line
@@ -17,7 +17,6 @@
#include <asm/system.h>
#include <asm/hardirq.h>
#include <asm/mmu_context.h>
#include <asm/smp.h>
#include <asm/mipsmtregs.h>
#include <asm/r4kcache.h>
#include <asm/cacheflush.h>
+1 −2
Original line number Diff line number Diff line
@@ -29,6 +29,7 @@
#include <asm/cpu.h>
#include <asm/sections.h>
#include <asm/setup.h>
#include <asm/smp-ops.h>
#include <asm/system.h>

struct cpuinfo_mips cpu_data[NR_CPUS] __read_mostly;
@@ -575,9 +576,7 @@ void __init setup_arch(char **cmdline_p)
	arch_mem_init(cmdline_p);

	resource_init();
#ifdef CONFIG_SMP
	plat_smp_setup();
#endif
}

static int __init fpu_disable(char *s)
+106 −87
Original line number Diff line number Diff line
@@ -215,72 +215,67 @@ static void __init smp_tc_init(unsigned int tc, unsigned int mvpconf0)
	write_tc_c0_tchalt(TCHALT_H);
}

/*
 * Common setup before any secondaries are started
 * Make sure all CPU's are in a sensible state before we boot any of the
 * secondarys
 */
void __init plat_smp_setup(void)
static void vsmp_send_ipi_single(int cpu, unsigned int action)
{
	unsigned int mvpconf0, ntc, tc, ncpu = 0;
	unsigned int nvpe;

#ifdef CONFIG_MIPS_MT_FPAFF
	/* If we have an FPU, enroll ourselves in the FPU-full mask */
	if (cpu_has_fpu)
		cpu_set(0, mt_fpu_cpumask);
#endif /* CONFIG_MIPS_MT_FPAFF */
	if (!cpu_has_mipsmt)
		return;
	int i;
	unsigned long flags;
	int vpflags;

	/* disable MT so we can configure */
	dvpe();
	dmt();
	local_irq_save(flags);

	/* Put MVPE's into 'configuration state' */
	set_c0_mvpcontrol(MVPCONTROL_VPC);
	vpflags = dvpe();	/* cant access the other CPU's registers whilst MVPE enabled */

	mvpconf0 = read_c0_mvpconf0();
	ntc = (mvpconf0 & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT;
	switch (action) {
	case SMP_CALL_FUNCTION:
		i = C_SW1;
		break;

	nvpe = ((mvpconf0 & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT) + 1;
	smp_num_siblings = nvpe;
	case SMP_RESCHEDULE_YOURSELF:
	default:
		i = C_SW0;
		break;
	}

	/* we'll always have more TC's than VPE's, so loop setting everything
	   to a sensible state */
	for (tc = 0; tc <= ntc; tc++) {
		settc(tc);
	/* 1:1 mapping of vpe and tc... */
	settc(cpu);
	write_vpe_c0_cause(read_vpe_c0_cause() | i);
	evpe(vpflags);

		smp_tc_init(tc, mvpconf0);
		ncpu = smp_vpe_init(tc, mvpconf0, ncpu);
	local_irq_restore(flags);
}

	/* Release config state */
	clear_c0_mvpcontrol(MVPCONTROL_VPC);

	/* We'll wait until starting the secondaries before starting MVPE */
static void vsmp_send_ipi_mask(cpumask_t mask, unsigned int action)
{
	unsigned int i;

	printk(KERN_INFO "Detected %i available secondary CPU(s)\n", ncpu);
	for_each_cpu_mask(i, mask)
		vsmp_send_ipi_single(i, action);
}

void __init plat_prepare_cpus(unsigned int max_cpus)
static void __cpuinit vsmp_init_secondary(void)
{
	mips_mt_set_cpuoptions();
	/* Enable per-cpu interrupts */

	/* set up ipi interrupts */
	if (cpu_has_vint) {
		set_vi_handler(MIPS_CPU_IPI_RESCHED_IRQ, ipi_resched_dispatch);
		set_vi_handler(MIPS_CPU_IPI_CALL_IRQ, ipi_call_dispatch);
	/* This is Malta specific: IPI,performance and timer inetrrupts */
	write_c0_status((read_c0_status() & ~ST0_IM ) |
	                (STATUSF_IP0 | STATUSF_IP1 | STATUSF_IP6 | STATUSF_IP7));
}

	cpu_ipi_resched_irq = MIPS_CPU_IRQ_BASE + MIPS_CPU_IPI_RESCHED_IRQ;
	cpu_ipi_call_irq = MIPS_CPU_IRQ_BASE + MIPS_CPU_IPI_CALL_IRQ;
static void __cpuinit vsmp_smp_finish(void)
{
	write_c0_compare(read_c0_count() + (8* mips_hpt_frequency/HZ));

	setup_irq(cpu_ipi_resched_irq, &irq_resched);
	setup_irq(cpu_ipi_call_irq, &irq_call);
#ifdef CONFIG_MIPS_MT_FPAFF
	/* If we have an FPU, enroll ourselves in the FPU-full mask */
	if (cpu_has_fpu)
		cpu_set(smp_processor_id(), mt_fpu_cpumask);
#endif /* CONFIG_MIPS_MT_FPAFF */

	set_irq_handler(cpu_ipi_resched_irq, handle_percpu_irq);
	set_irq_handler(cpu_ipi_call_irq, handle_percpu_irq);
	local_irq_enable();
}

static void vsmp_cpus_done(void)
{
}

/*
@@ -291,7 +286,7 @@ void __init plat_prepare_cpus(unsigned int max_cpus)
 * (unsigned long)idle->thread_info the gp
 * assumes a 1:1 mapping of TC => VPE
 */
void __cpuinit prom_boot_secondary(int cpu, struct task_struct *idle)
static void __cpuinit vsmp_boot_secondary(int cpu, struct task_struct *idle)
{
	struct thread_info *gp = task_thread_info(idle);
	dvpe();
@@ -325,57 +320,81 @@ void __cpuinit prom_boot_secondary(int cpu, struct task_struct *idle)
	evpe(EVPE_ENABLE);
}

void __cpuinit prom_init_secondary(void)
{
	/* Enable per-cpu interrupts */

	/* This is Malta specific: IPI,performance and timer inetrrupts */
	write_c0_status((read_c0_status() & ~ST0_IM ) |
	                (STATUSF_IP0 | STATUSF_IP1 | STATUSF_IP6 | STATUSF_IP7));
}

void __cpuinit prom_smp_finish(void)
/*
 * Common setup before any secondaries are started
 * Make sure all CPU's are in a sensible state before we boot any of the
 * secondarys
 */
static void __init vsmp_smp_setup(void)
{
	write_c0_compare(read_c0_count() + (8* mips_hpt_frequency/HZ));
	unsigned int mvpconf0, ntc, tc, ncpu = 0;
	unsigned int nvpe;

#ifdef CONFIG_MIPS_MT_FPAFF
	/* If we have an FPU, enroll ourselves in the FPU-full mask */
	if (cpu_has_fpu)
		cpu_set(smp_processor_id(), mt_fpu_cpumask);
		cpu_set(0, mt_fpu_cpumask);
#endif /* CONFIG_MIPS_MT_FPAFF */
	if (!cpu_has_mipsmt)
		return;

	local_irq_enable();
}
	/* disable MT so we can configure */
	dvpe();
	dmt();

void prom_cpus_done(void)
{
	/* Put MVPE's into 'configuration state' */
	set_c0_mvpcontrol(MVPCONTROL_VPC);

	mvpconf0 = read_c0_mvpconf0();
	ntc = (mvpconf0 & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT;

	nvpe = ((mvpconf0 & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT) + 1;
	smp_num_siblings = nvpe;

	/* we'll always have more TC's than VPE's, so loop setting everything
	   to a sensible state */
	for (tc = 0; tc <= ntc; tc++) {
		settc(tc);

		smp_tc_init(tc, mvpconf0);
		ncpu = smp_vpe_init(tc, mvpconf0, ncpu);
	}

void core_send_ipi(int cpu, unsigned int action)
{
	int i;
	unsigned long flags;
	int vpflags;
	/* Release config state */
	clear_c0_mvpcontrol(MVPCONTROL_VPC);

	local_irq_save(flags);
	/* We'll wait until starting the secondaries before starting MVPE */

	vpflags = dvpe();	/* cant access the other CPU's registers whilst MVPE enabled */
	printk(KERN_INFO "Detected %i available secondary CPU(s)\n", ncpu);
}

	switch (action) {
	case SMP_CALL_FUNCTION:
		i = C_SW1;
		break;
static void __init vsmp_prepare_cpus(unsigned int max_cpus)
{
	mips_mt_set_cpuoptions();

	case SMP_RESCHEDULE_YOURSELF:
	default:
		i = C_SW0;
		break;
	/* set up ipi interrupts */
	if (cpu_has_vint) {
		set_vi_handler(MIPS_CPU_IPI_RESCHED_IRQ, ipi_resched_dispatch);
		set_vi_handler(MIPS_CPU_IPI_CALL_IRQ, ipi_call_dispatch);
	}

	/* 1:1 mapping of vpe and tc... */
	settc(cpu);
	write_vpe_c0_cause(read_vpe_c0_cause() | i);
	evpe(vpflags);
	cpu_ipi_resched_irq = MIPS_CPU_IRQ_BASE + MIPS_CPU_IPI_RESCHED_IRQ;
	cpu_ipi_call_irq = MIPS_CPU_IRQ_BASE + MIPS_CPU_IPI_CALL_IRQ;

	local_irq_restore(flags);
	setup_irq(cpu_ipi_resched_irq, &irq_resched);
	setup_irq(cpu_ipi_call_irq, &irq_call);

	set_irq_handler(cpu_ipi_resched_irq, handle_percpu_irq);
	set_irq_handler(cpu_ipi_call_irq, handle_percpu_irq);
}

struct plat_smp_ops vsmp_smp_ops = {
	.send_ipi_single	= vsmp_send_ipi_single,
	.send_ipi_mask		= vsmp_send_ipi_mask,
	.init_secondary		= vsmp_init_secondary,
	.smp_finish		= vsmp_smp_finish,
	.cpus_done		= vsmp_cpus_done,
	.boot_secondary		= vsmp_boot_secondary,
	.smp_setup		= vsmp_smp_setup,
	.prepare_cpus		= vsmp_prepare_cpus,
};
Loading