Loading arch/s390/kernel/smp.c +100 −161 Original line number Original line Diff line number Diff line /* /* * arch/s390/kernel/smp.c * arch/s390/kernel/smp.c * * * Copyright (C) IBM Corp. 1999,2006 * Copyright IBM Corp. 1999,2007 * Author(s): Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com), * Author(s): Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com), * Martin Schwidefsky (schwidefsky@de.ibm.com) * Martin Schwidefsky (schwidefsky@de.ibm.com) * Heiko Carstens (heiko.carstens@de.ibm.com) * Heiko Carstens (heiko.carstens@de.ibm.com) Loading Loading @@ -43,16 +43,17 @@ #include <asm/timer.h> #include <asm/timer.h> #include <asm/lowcore.h> #include <asm/lowcore.h> extern volatile int __cpu_logical_map[]; /* /* * An array with a pointer the lowcore of every CPU. * An array with a pointer the lowcore of every CPU. */ */ struct _lowcore *lowcore_ptr[NR_CPUS]; struct _lowcore *lowcore_ptr[NR_CPUS]; EXPORT_SYMBOL(lowcore_ptr); cpumask_t cpu_online_map = CPU_MASK_NONE; cpumask_t cpu_online_map = CPU_MASK_NONE; EXPORT_SYMBOL(cpu_online_map); cpumask_t cpu_possible_map = CPU_MASK_NONE; cpumask_t cpu_possible_map = CPU_MASK_NONE; EXPORT_SYMBOL(cpu_possible_map); static struct task_struct *current_set[NR_CPUS]; static struct task_struct *current_set[NR_CPUS]; Loading Loading @@ -152,8 +153,8 @@ static void __smp_call_function_map(void (*func) (void *info), void *info, * * * Run a function on all other CPUs. * Run a function on all other CPUs. * * * You must not call this function with disabled interrupts or from a * You must not call this function with disabled interrupts, from a * hardware interrupt handler. You may call it from a bottom half. * hardware interrupt handler or from a bottom half. */ */ int smp_call_function(void (*func) (void *info), void *info, int nonatomic, int smp_call_function(void (*func) (void *info), void *info, int nonatomic, int wait) int wait) Loading @@ -179,8 +180,8 @@ EXPORT_SYMBOL(smp_call_function); * * * Run a function on one processor. * Run a function on one processor. * * * You must not call this function with disabled interrupts or from a * You must not call this function with disabled interrupts, from a * hardware interrupt handler. You may call it from a bottom half. * hardware interrupt handler or from a bottom half. */ */ int smp_call_function_on(void (*func) (void *info), void *info, int nonatomic, int smp_call_function_on(void (*func) (void *info), void *info, int nonatomic, int wait, int cpu) int wait, int cpu) Loading Loading @@ -263,7 +264,6 @@ void smp_send_stop(void) /* /* * Reboot, halt and power_off routines for SMP. * Reboot, halt and power_off routines for SMP. */ */ void machine_restart_smp(char *__unused) void machine_restart_smp(char *__unused) { { smp_send_stop(); smp_send_stop(); Loading Loading @@ -360,7 +360,8 @@ struct ec_creg_mask_parms { /* /* * callback for setting/clearing control bits * callback for setting/clearing control bits */ */ static void smp_ctl_bit_callback(void *info) { static void smp_ctl_bit_callback(void *info) { struct ec_creg_mask_parms *pp = info; struct ec_creg_mask_parms *pp = info; unsigned long cregs[16]; unsigned long cregs[16]; int i; int i; Loading @@ -383,6 +384,7 @@ void smp_ctl_set_bit(int cr, int bit) parms.orvals[cr] = 1 << bit; parms.orvals[cr] = 1 << bit; on_each_cpu(smp_ctl_bit_callback, &parms, 0, 1); on_each_cpu(smp_ctl_bit_callback, &parms, 0, 1); } } EXPORT_SYMBOL(smp_ctl_set_bit); /* /* * Clear a bit in a control register of all cpus * Clear a bit in a control register of all cpus Loading @@ -396,6 +398,7 @@ void smp_ctl_clear_bit(int cr, int bit) parms.andvals[cr] = ~(1L << bit); parms.andvals[cr] = ~(1L << bit); on_each_cpu(smp_ctl_bit_callback, &parms, 0, 1); on_each_cpu(smp_ctl_bit_callback, &parms, 0, 1); } } EXPORT_SYMBOL(smp_ctl_clear_bit); #if defined(CONFIG_ZFCPDUMP) || defined(CONFIG_ZFCPDUMP_MODULE) #if defined(CONFIG_ZFCPDUMP) || defined(CONFIG_ZFCPDUMP_MODULE) Loading Loading @@ -460,8 +463,7 @@ EXPORT_SYMBOL_GPL(zfcpdump_save_areas); * Lets check how many CPUs we have. * Lets check how many CPUs we have. */ */ static unsigned int static unsigned int __init smp_count_cpus(void) __init smp_count_cpus(void) { { unsigned int cpu, num_cpus; unsigned int cpu, num_cpus; __u16 boot_cpu_addr; __u16 boot_cpu_addr; Loading @@ -477,8 +479,7 @@ __init smp_count_cpus(void) if ((__u16) cpu == boot_cpu_addr) if ((__u16) cpu == boot_cpu_addr) continue; continue; __cpu_logical_map[1] = (__u16) cpu; __cpu_logical_map[1] = (__u16) cpu; if (signal_processor(1, sigp_sense) == if (signal_processor(1, sigp_sense) == sigp_not_operational) sigp_not_operational) continue; continue; num_cpus++; num_cpus++; } } Loading Loading @@ -531,56 +532,13 @@ static void __init smp_create_idle(unsigned int cpu) current_set[cpu] = p; current_set[cpu] = p; } } /* Reserving and releasing of CPUs */ static int cpu_stopped(int cpu) static DEFINE_SPINLOCK(smp_reserve_lock); static int smp_cpu_reserved[NR_CPUS]; int smp_get_cpu(cpumask_t cpu_mask) { unsigned long flags; int cpu; spin_lock_irqsave(&smp_reserve_lock, flags); /* Try to find an already reserved cpu. */ for_each_cpu_mask(cpu, cpu_mask) { if (smp_cpu_reserved[cpu] != 0) { smp_cpu_reserved[cpu]++; /* Found one. */ goto out; } } /* Reserve a new cpu from cpu_mask. */ for_each_cpu_mask(cpu, cpu_mask) { if (cpu_online(cpu)) { smp_cpu_reserved[cpu]++; goto out; } } cpu = -ENODEV; out: spin_unlock_irqrestore(&smp_reserve_lock, flags); return cpu; } void smp_put_cpu(int cpu) { unsigned long flags; spin_lock_irqsave(&smp_reserve_lock, flags); smp_cpu_reserved[cpu]--; spin_unlock_irqrestore(&smp_reserve_lock, flags); } static int cpu_stopped(int cpu) { { __u32 status; __u32 status; /* Check for stopped state */ /* Check for stopped state */ if (signal_processor_ps(&status, 0, cpu, sigp_sense) == sigp_status_stored) { if (signal_processor_ps(&status, 0, cpu, sigp_sense) == sigp_status_stored) { if (status & 0x40) if (status & 0x40) return 1; return 1; } } Loading @@ -589,8 +547,7 @@ cpu_stopped(int cpu) /* Upping and downing of CPUs */ /* Upping and downing of CPUs */ int int __cpu_up(unsigned int cpu) __cpu_up(unsigned int cpu) { { struct task_struct *idle; struct task_struct *idle; struct _lowcore *cpu_lowcore; struct _lowcore *cpu_lowcore; Loading Loading @@ -619,7 +576,7 @@ __cpu_up(unsigned int cpu) idle = current_set[cpu]; idle = current_set[cpu]; cpu_lowcore = lowcore_ptr[cpu]; cpu_lowcore = lowcore_ptr[cpu]; cpu_lowcore->kernel_stack = (unsigned long) cpu_lowcore->kernel_stack = (unsigned long) task_stack_page(idle) + (THREAD_SIZE); task_stack_page(idle) + THREAD_SIZE; sf = (struct stack_frame *) (cpu_lowcore->kernel_stack sf = (struct stack_frame *) (cpu_lowcore->kernel_stack - sizeof(struct pt_regs) - sizeof(struct pt_regs) - sizeof(struct stack_frame)); - sizeof(struct stack_frame)); Loading Loading @@ -682,18 +639,11 @@ static int __init setup_possible_cpus(char *s) } } early_param("possible_cpus", setup_possible_cpus); early_param("possible_cpus", setup_possible_cpus); int int __cpu_disable(void) __cpu_disable(void) { { unsigned long flags; struct ec_creg_mask_parms cr_parms; struct ec_creg_mask_parms cr_parms; int cpu = smp_processor_id(); int cpu = smp_processor_id(); spin_lock_irqsave(&smp_reserve_lock, flags); if (smp_cpu_reserved[cpu] != 0) { spin_unlock_irqrestore(&smp_reserve_lock, flags); return -EBUSY; } cpu_clear(cpu, cpu_online_map); cpu_clear(cpu, cpu_online_map); /* Disable pfault pseudo page faults on this cpu. */ /* Disable pfault pseudo page faults on this cpu. */ Loading @@ -712,16 +662,15 @@ __cpu_disable(void) 1 << 27 | 1 << 26 | 1 << 25 | 1 << 24); 1 << 27 | 1 << 26 | 1 << 25 | 1 << 24); /* disable most machine checks */ /* disable most machine checks */ cr_parms.orvals[14] = 0; cr_parms.orvals[14] = 0; cr_parms.andvals[14] = ~(1<<28 | 1<<27 | 1<<26 | 1<<25 | 1<<24); cr_parms.andvals[14] = ~(1 << 28 | 1 << 27 | 1 << 26 | 1 << 25 | 1 << 24); smp_ctl_bit_callback(&cr_parms); smp_ctl_bit_callback(&cr_parms); spin_unlock_irqrestore(&smp_reserve_lock, flags); return 0; return 0; } } void void __cpu_die(unsigned int cpu) __cpu_die(unsigned int cpu) { { /* Wait until target cpu is down */ /* Wait until target cpu is down */ while (!smp_cpu_not_running(cpu)) while (!smp_cpu_not_running(cpu)) Loading @@ -729,8 +678,7 @@ __cpu_die(unsigned int cpu) printk("Processor %d spun down\n", cpu); printk("Processor %d spun down\n", cpu); } } void void cpu_die(void) cpu_die(void) { { idle_task_exit(); idle_task_exit(); signal_processor(smp_processor_id(), sigp_stop); signal_processor(smp_processor_id(), sigp_stop); Loading Loading @@ -764,20 +712,20 @@ void __init smp_prepare_cpus(unsigned int max_cpus) __get_free_pages(GFP_KERNEL | GFP_DMA, __get_free_pages(GFP_KERNEL | GFP_DMA, sizeof(void*) == 8 ? 1 : 0); sizeof(void*) == 8 ? 1 : 0); stack = __get_free_pages(GFP_KERNEL, ASYNC_ORDER); stack = __get_free_pages(GFP_KERNEL, ASYNC_ORDER); if (lowcore_ptr[i] == NULL || stack == 0ULL) if (!lowcore_ptr[i] || !stack) panic("smp_boot_cpus failed to allocate memory\n"); panic("smp_boot_cpus failed to allocate memory\n"); *(lowcore_ptr[i]) = S390_lowcore; *(lowcore_ptr[i]) = S390_lowcore; lowcore_ptr[i]->async_stack = stack + (ASYNC_SIZE); lowcore_ptr[i]->async_stack = stack + ASYNC_SIZE; stack = __get_free_pages(GFP_KERNEL, 0); stack = __get_free_pages(GFP_KERNEL, 0); if (stack == 0ULL) if (!stack) panic("smp_boot_cpus failed to allocate memory\n"); panic("smp_boot_cpus failed to allocate memory\n"); lowcore_ptr[i]->panic_stack = stack + (PAGE_SIZE); lowcore_ptr[i]->panic_stack = stack + PAGE_SIZE; #ifndef CONFIG_64BIT #ifndef CONFIG_64BIT if (MACHINE_HAS_IEEE) { if (MACHINE_HAS_IEEE) { lowcore_ptr[i]->extended_save_area_addr = lowcore_ptr[i]->extended_save_area_addr = (__u32) __get_free_pages(GFP_KERNEL, 0); (__u32) __get_free_pages(GFP_KERNEL, 0); if (lowcore_ptr[i]->extended_save_area_addr == 0) if (!lowcore_ptr[i]->extended_save_area_addr) panic("smp_boot_cpus failed to " panic("smp_boot_cpus failed to " "allocate memory\n"); "allocate memory\n"); } } Loading Loading @@ -875,13 +823,4 @@ static int __init topology_init(void) } } return 0; return 0; } } subsys_initcall(topology_init); subsys_initcall(topology_init); EXPORT_SYMBOL(cpu_online_map); EXPORT_SYMBOL(cpu_possible_map); EXPORT_SYMBOL(lowcore_ptr); EXPORT_SYMBOL(smp_ctl_set_bit); EXPORT_SYMBOL(smp_ctl_clear_bit); EXPORT_SYMBOL(smp_get_cpu); EXPORT_SYMBOL(smp_put_cpu); include/asm-s390/smp.h +0 −5 Original line number Original line Diff line number Diff line Loading @@ -54,9 +54,6 @@ extern int smp_call_function_on(void (*func) (void *info), void *info, #define raw_smp_processor_id() (S390_lowcore.cpu_data.cpu_nr) #define raw_smp_processor_id() (S390_lowcore.cpu_data.cpu_nr) extern int smp_get_cpu(cpumask_t cpu_map); extern void smp_put_cpu(int cpu); static inline __u16 hard_smp_processor_id(void) static inline __u16 hard_smp_processor_id(void) { { __u16 cpu_address; __u16 cpu_address; Loading Loading @@ -114,8 +111,6 @@ static inline void smp_send_stop(void) } } #define smp_cpu_not_running(cpu) 1 #define smp_cpu_not_running(cpu) 1 #define smp_get_cpu(cpu) ({ 0; }) #define smp_put_cpu(cpu) ({ 0; }) #define smp_setup_cpu_possible_map() do { } while (0) #define smp_setup_cpu_possible_map() do { } while (0) #endif #endif Loading Loading
arch/s390/kernel/smp.c +100 −161 Original line number Original line Diff line number Diff line /* /* * arch/s390/kernel/smp.c * arch/s390/kernel/smp.c * * * Copyright (C) IBM Corp. 1999,2006 * Copyright IBM Corp. 1999,2007 * Author(s): Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com), * Author(s): Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com), * Martin Schwidefsky (schwidefsky@de.ibm.com) * Martin Schwidefsky (schwidefsky@de.ibm.com) * Heiko Carstens (heiko.carstens@de.ibm.com) * Heiko Carstens (heiko.carstens@de.ibm.com) Loading Loading @@ -43,16 +43,17 @@ #include <asm/timer.h> #include <asm/timer.h> #include <asm/lowcore.h> #include <asm/lowcore.h> extern volatile int __cpu_logical_map[]; /* /* * An array with a pointer the lowcore of every CPU. * An array with a pointer the lowcore of every CPU. */ */ struct _lowcore *lowcore_ptr[NR_CPUS]; struct _lowcore *lowcore_ptr[NR_CPUS]; EXPORT_SYMBOL(lowcore_ptr); cpumask_t cpu_online_map = CPU_MASK_NONE; cpumask_t cpu_online_map = CPU_MASK_NONE; EXPORT_SYMBOL(cpu_online_map); cpumask_t cpu_possible_map = CPU_MASK_NONE; cpumask_t cpu_possible_map = CPU_MASK_NONE; EXPORT_SYMBOL(cpu_possible_map); static struct task_struct *current_set[NR_CPUS]; static struct task_struct *current_set[NR_CPUS]; Loading Loading @@ -152,8 +153,8 @@ static void __smp_call_function_map(void (*func) (void *info), void *info, * * * Run a function on all other CPUs. * Run a function on all other CPUs. * * * You must not call this function with disabled interrupts or from a * You must not call this function with disabled interrupts, from a * hardware interrupt handler. You may call it from a bottom half. * hardware interrupt handler or from a bottom half. */ */ int smp_call_function(void (*func) (void *info), void *info, int nonatomic, int smp_call_function(void (*func) (void *info), void *info, int nonatomic, int wait) int wait) Loading @@ -179,8 +180,8 @@ EXPORT_SYMBOL(smp_call_function); * * * Run a function on one processor. * Run a function on one processor. * * * You must not call this function with disabled interrupts or from a * You must not call this function with disabled interrupts, from a * hardware interrupt handler. You may call it from a bottom half. * hardware interrupt handler or from a bottom half. */ */ int smp_call_function_on(void (*func) (void *info), void *info, int nonatomic, int smp_call_function_on(void (*func) (void *info), void *info, int nonatomic, int wait, int cpu) int wait, int cpu) Loading Loading @@ -263,7 +264,6 @@ void smp_send_stop(void) /* /* * Reboot, halt and power_off routines for SMP. * Reboot, halt and power_off routines for SMP. */ */ void machine_restart_smp(char *__unused) void machine_restart_smp(char *__unused) { { smp_send_stop(); smp_send_stop(); Loading Loading @@ -360,7 +360,8 @@ struct ec_creg_mask_parms { /* /* * callback for setting/clearing control bits * callback for setting/clearing control bits */ */ static void smp_ctl_bit_callback(void *info) { static void smp_ctl_bit_callback(void *info) { struct ec_creg_mask_parms *pp = info; struct ec_creg_mask_parms *pp = info; unsigned long cregs[16]; unsigned long cregs[16]; int i; int i; Loading @@ -383,6 +384,7 @@ void smp_ctl_set_bit(int cr, int bit) parms.orvals[cr] = 1 << bit; parms.orvals[cr] = 1 << bit; on_each_cpu(smp_ctl_bit_callback, &parms, 0, 1); on_each_cpu(smp_ctl_bit_callback, &parms, 0, 1); } } EXPORT_SYMBOL(smp_ctl_set_bit); /* /* * Clear a bit in a control register of all cpus * Clear a bit in a control register of all cpus Loading @@ -396,6 +398,7 @@ void smp_ctl_clear_bit(int cr, int bit) parms.andvals[cr] = ~(1L << bit); parms.andvals[cr] = ~(1L << bit); on_each_cpu(smp_ctl_bit_callback, &parms, 0, 1); on_each_cpu(smp_ctl_bit_callback, &parms, 0, 1); } } EXPORT_SYMBOL(smp_ctl_clear_bit); #if defined(CONFIG_ZFCPDUMP) || defined(CONFIG_ZFCPDUMP_MODULE) #if defined(CONFIG_ZFCPDUMP) || defined(CONFIG_ZFCPDUMP_MODULE) Loading Loading @@ -460,8 +463,7 @@ EXPORT_SYMBOL_GPL(zfcpdump_save_areas); * Lets check how many CPUs we have. * Lets check how many CPUs we have. */ */ static unsigned int static unsigned int __init smp_count_cpus(void) __init smp_count_cpus(void) { { unsigned int cpu, num_cpus; unsigned int cpu, num_cpus; __u16 boot_cpu_addr; __u16 boot_cpu_addr; Loading @@ -477,8 +479,7 @@ __init smp_count_cpus(void) if ((__u16) cpu == boot_cpu_addr) if ((__u16) cpu == boot_cpu_addr) continue; continue; __cpu_logical_map[1] = (__u16) cpu; __cpu_logical_map[1] = (__u16) cpu; if (signal_processor(1, sigp_sense) == if (signal_processor(1, sigp_sense) == sigp_not_operational) sigp_not_operational) continue; continue; num_cpus++; num_cpus++; } } Loading Loading @@ -531,56 +532,13 @@ static void __init smp_create_idle(unsigned int cpu) current_set[cpu] = p; current_set[cpu] = p; } } /* Reserving and releasing of CPUs */ static int cpu_stopped(int cpu) static DEFINE_SPINLOCK(smp_reserve_lock); static int smp_cpu_reserved[NR_CPUS]; int smp_get_cpu(cpumask_t cpu_mask) { unsigned long flags; int cpu; spin_lock_irqsave(&smp_reserve_lock, flags); /* Try to find an already reserved cpu. */ for_each_cpu_mask(cpu, cpu_mask) { if (smp_cpu_reserved[cpu] != 0) { smp_cpu_reserved[cpu]++; /* Found one. */ goto out; } } /* Reserve a new cpu from cpu_mask. */ for_each_cpu_mask(cpu, cpu_mask) { if (cpu_online(cpu)) { smp_cpu_reserved[cpu]++; goto out; } } cpu = -ENODEV; out: spin_unlock_irqrestore(&smp_reserve_lock, flags); return cpu; } void smp_put_cpu(int cpu) { unsigned long flags; spin_lock_irqsave(&smp_reserve_lock, flags); smp_cpu_reserved[cpu]--; spin_unlock_irqrestore(&smp_reserve_lock, flags); } static int cpu_stopped(int cpu) { { __u32 status; __u32 status; /* Check for stopped state */ /* Check for stopped state */ if (signal_processor_ps(&status, 0, cpu, sigp_sense) == sigp_status_stored) { if (signal_processor_ps(&status, 0, cpu, sigp_sense) == sigp_status_stored) { if (status & 0x40) if (status & 0x40) return 1; return 1; } } Loading @@ -589,8 +547,7 @@ cpu_stopped(int cpu) /* Upping and downing of CPUs */ /* Upping and downing of CPUs */ int int __cpu_up(unsigned int cpu) __cpu_up(unsigned int cpu) { { struct task_struct *idle; struct task_struct *idle; struct _lowcore *cpu_lowcore; struct _lowcore *cpu_lowcore; Loading Loading @@ -619,7 +576,7 @@ __cpu_up(unsigned int cpu) idle = current_set[cpu]; idle = current_set[cpu]; cpu_lowcore = lowcore_ptr[cpu]; cpu_lowcore = lowcore_ptr[cpu]; cpu_lowcore->kernel_stack = (unsigned long) cpu_lowcore->kernel_stack = (unsigned long) task_stack_page(idle) + (THREAD_SIZE); task_stack_page(idle) + THREAD_SIZE; sf = (struct stack_frame *) (cpu_lowcore->kernel_stack sf = (struct stack_frame *) (cpu_lowcore->kernel_stack - sizeof(struct pt_regs) - sizeof(struct pt_regs) - sizeof(struct stack_frame)); - sizeof(struct stack_frame)); Loading Loading @@ -682,18 +639,11 @@ static int __init setup_possible_cpus(char *s) } } early_param("possible_cpus", setup_possible_cpus); early_param("possible_cpus", setup_possible_cpus); int int __cpu_disable(void) __cpu_disable(void) { { unsigned long flags; struct ec_creg_mask_parms cr_parms; struct ec_creg_mask_parms cr_parms; int cpu = smp_processor_id(); int cpu = smp_processor_id(); spin_lock_irqsave(&smp_reserve_lock, flags); if (smp_cpu_reserved[cpu] != 0) { spin_unlock_irqrestore(&smp_reserve_lock, flags); return -EBUSY; } cpu_clear(cpu, cpu_online_map); cpu_clear(cpu, cpu_online_map); /* Disable pfault pseudo page faults on this cpu. */ /* Disable pfault pseudo page faults on this cpu. */ Loading @@ -712,16 +662,15 @@ __cpu_disable(void) 1 << 27 | 1 << 26 | 1 << 25 | 1 << 24); 1 << 27 | 1 << 26 | 1 << 25 | 1 << 24); /* disable most machine checks */ /* disable most machine checks */ cr_parms.orvals[14] = 0; cr_parms.orvals[14] = 0; cr_parms.andvals[14] = ~(1<<28 | 1<<27 | 1<<26 | 1<<25 | 1<<24); cr_parms.andvals[14] = ~(1 << 28 | 1 << 27 | 1 << 26 | 1 << 25 | 1 << 24); smp_ctl_bit_callback(&cr_parms); smp_ctl_bit_callback(&cr_parms); spin_unlock_irqrestore(&smp_reserve_lock, flags); return 0; return 0; } } void void __cpu_die(unsigned int cpu) __cpu_die(unsigned int cpu) { { /* Wait until target cpu is down */ /* Wait until target cpu is down */ while (!smp_cpu_not_running(cpu)) while (!smp_cpu_not_running(cpu)) Loading @@ -729,8 +678,7 @@ __cpu_die(unsigned int cpu) printk("Processor %d spun down\n", cpu); printk("Processor %d spun down\n", cpu); } } void void cpu_die(void) cpu_die(void) { { idle_task_exit(); idle_task_exit(); signal_processor(smp_processor_id(), sigp_stop); signal_processor(smp_processor_id(), sigp_stop); Loading Loading @@ -764,20 +712,20 @@ void __init smp_prepare_cpus(unsigned int max_cpus) __get_free_pages(GFP_KERNEL | GFP_DMA, __get_free_pages(GFP_KERNEL | GFP_DMA, sizeof(void*) == 8 ? 1 : 0); sizeof(void*) == 8 ? 1 : 0); stack = __get_free_pages(GFP_KERNEL, ASYNC_ORDER); stack = __get_free_pages(GFP_KERNEL, ASYNC_ORDER); if (lowcore_ptr[i] == NULL || stack == 0ULL) if (!lowcore_ptr[i] || !stack) panic("smp_boot_cpus failed to allocate memory\n"); panic("smp_boot_cpus failed to allocate memory\n"); *(lowcore_ptr[i]) = S390_lowcore; *(lowcore_ptr[i]) = S390_lowcore; lowcore_ptr[i]->async_stack = stack + (ASYNC_SIZE); lowcore_ptr[i]->async_stack = stack + ASYNC_SIZE; stack = __get_free_pages(GFP_KERNEL, 0); stack = __get_free_pages(GFP_KERNEL, 0); if (stack == 0ULL) if (!stack) panic("smp_boot_cpus failed to allocate memory\n"); panic("smp_boot_cpus failed to allocate memory\n"); lowcore_ptr[i]->panic_stack = stack + (PAGE_SIZE); lowcore_ptr[i]->panic_stack = stack + PAGE_SIZE; #ifndef CONFIG_64BIT #ifndef CONFIG_64BIT if (MACHINE_HAS_IEEE) { if (MACHINE_HAS_IEEE) { lowcore_ptr[i]->extended_save_area_addr = lowcore_ptr[i]->extended_save_area_addr = (__u32) __get_free_pages(GFP_KERNEL, 0); (__u32) __get_free_pages(GFP_KERNEL, 0); if (lowcore_ptr[i]->extended_save_area_addr == 0) if (!lowcore_ptr[i]->extended_save_area_addr) panic("smp_boot_cpus failed to " panic("smp_boot_cpus failed to " "allocate memory\n"); "allocate memory\n"); } } Loading Loading @@ -875,13 +823,4 @@ static int __init topology_init(void) } } return 0; return 0; } } subsys_initcall(topology_init); subsys_initcall(topology_init); EXPORT_SYMBOL(cpu_online_map); EXPORT_SYMBOL(cpu_possible_map); EXPORT_SYMBOL(lowcore_ptr); EXPORT_SYMBOL(smp_ctl_set_bit); EXPORT_SYMBOL(smp_ctl_clear_bit); EXPORT_SYMBOL(smp_get_cpu); EXPORT_SYMBOL(smp_put_cpu);
include/asm-s390/smp.h +0 −5 Original line number Original line Diff line number Diff line Loading @@ -54,9 +54,6 @@ extern int smp_call_function_on(void (*func) (void *info), void *info, #define raw_smp_processor_id() (S390_lowcore.cpu_data.cpu_nr) #define raw_smp_processor_id() (S390_lowcore.cpu_data.cpu_nr) extern int smp_get_cpu(cpumask_t cpu_map); extern void smp_put_cpu(int cpu); static inline __u16 hard_smp_processor_id(void) static inline __u16 hard_smp_processor_id(void) { { __u16 cpu_address; __u16 cpu_address; Loading Loading @@ -114,8 +111,6 @@ static inline void smp_send_stop(void) } } #define smp_cpu_not_running(cpu) 1 #define smp_cpu_not_running(cpu) 1 #define smp_get_cpu(cpu) ({ 0; }) #define smp_put_cpu(cpu) ({ 0; }) #define smp_setup_cpu_possible_map() do { } while (0) #define smp_setup_cpu_possible_map() do { } while (0) #endif #endif Loading