Loading arch/arm/kernel/smp.c +29 −8 Original line number Diff line number Diff line Loading @@ -466,6 +466,17 @@ void __init smp_prepare_cpus(unsigned int max_cpus) } static void (*__smp_cross_call)(const struct cpumask *, unsigned int); DEFINE_PER_CPU(bool, pending_ipi); static void smp_cross_call_common(const struct cpumask *cpumask, unsigned int func) { unsigned int cpu; for_each_cpu(cpu, cpumask) per_cpu(pending_ipi, cpu) = true; __smp_cross_call(cpumask, func); } void __init set_smp_cross_call(void (*fn)(const struct cpumask *, unsigned int)) { Loading Loading @@ -518,31 +529,32 @@ u64 smp_irq_stat_cpu(unsigned int cpu) void arch_send_call_function_ipi_mask(const struct cpumask *mask) { smp_cross_call(mask, IPI_CALL_FUNC); smp_cross_call_common(mask, IPI_CALL_FUNC); } void arch_send_wakeup_ipi_mask(const struct cpumask *mask) { smp_cross_call(mask, IPI_WAKEUP); smp_cross_call_common(mask, IPI_WAKEUP); } void arch_send_call_function_single_ipi(int cpu) { smp_cross_call(cpumask_of(cpu), IPI_CALL_FUNC); smp_cross_call_common(cpumask_of(cpu), IPI_CALL_FUNC_SINGLE); } #ifdef CONFIG_IRQ_WORK void arch_irq_work_raise(void) { if (arch_irq_work_has_interrupt()) smp_cross_call(cpumask_of(smp_processor_id()), IPI_IRQ_WORK); smp_cross_call_common(cpumask_of(smp_processor_id()), IPI_IRQ_WORK); } #endif #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST void tick_broadcast(const struct cpumask *mask) { smp_cross_call(mask, IPI_TIMER); smp_cross_call_common(mask, IPI_TIMER); } #endif Loading Loading @@ -664,7 +676,7 @@ void handle_IPI(int ipinr, struct pt_regs *regs) void smp_send_reschedule(int cpu) { smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE); smp_cross_call_common(cpumask_of(cpu), IPI_RESCHEDULE); } void smp_send_stop(void) Loading @@ -675,7 +687,7 @@ void smp_send_stop(void) cpumask_copy(&mask, cpu_online_mask); cpumask_clear_cpu(smp_processor_id(), &mask); if (!cpumask_empty(&mask)) smp_cross_call(&mask, IPI_CPU_STOP); smp_cross_call_common(&mask, IPI_CPU_STOP); /* Wait up to one second for other CPUs to stop */ timeout = USEC_PER_SEC; Loading Loading @@ -748,7 +760,16 @@ core_initcall(register_cpufreq_notifier); static void raise_nmi(cpumask_t *mask) { smp_cross_call(mask, IPI_CPU_BACKTRACE); /* * Generate the backtrace directly if we are running in a calling * context that is not preemptible by the backtrace IPI. Note * that nmi_cpu_backtrace() automatically removes the current cpu * from mask. */ if (cpumask_test_cpu(smp_processor_id(), mask) && irqs_disabled()) nmi_cpu_backtrace(NULL); smp_cross_call_common(mask, IPI_CPU_BACKTRACE); } void arch_trigger_cpumask_backtrace(const cpumask_t *mask, bool exclude_self) Loading arch/arm64/kernel/smp.c +19 −8 Original line number Diff line number Diff line Loading @@ -591,6 +591,18 @@ acpi_parse_gic_cpu_interface(struct acpi_subtable_header *header, #else #define acpi_table_parse_madt(...) do { } while (0) #endif void (*__smp_cross_call)(const struct cpumask *, unsigned int); DEFINE_PER_CPU(bool, pending_ipi); void smp_cross_call_common(const struct cpumask *cpumask, unsigned int func) { unsigned int cpu; for_each_cpu(cpu, cpumask) per_cpu(pending_ipi, cpu) = true; __smp_cross_call(cpumask, func); } /* * Enumerate the possible CPU set from the device tree and build the Loading Loading @@ -735,8 +747,6 @@ void __init smp_prepare_cpus(unsigned int max_cpus) } } void (*__smp_cross_call)(const struct cpumask *, unsigned int); void __init set_smp_cross_call(void (*fn)(const struct cpumask *, unsigned int)) { __smp_cross_call = fn; Loading Loading @@ -785,18 +795,18 @@ u64 smp_irq_stat_cpu(unsigned int cpu) void arch_send_call_function_ipi_mask(const struct cpumask *mask) { smp_cross_call(mask, IPI_CALL_FUNC); smp_cross_call_common(mask, IPI_CALL_FUNC); } void arch_send_call_function_single_ipi(int cpu) { smp_cross_call(cpumask_of(cpu), IPI_CALL_FUNC); smp_cross_call_common(cpumask_of(cpu), IPI_CALL_FUNC); } #ifdef CONFIG_ARM64_ACPI_PARKING_PROTOCOL void arch_send_wakeup_ipi_mask(const struct cpumask *mask) { smp_cross_call(mask, IPI_WAKEUP); smp_cross_call_common(mask, IPI_WAKEUP); } #endif Loading Loading @@ -897,19 +907,20 @@ void handle_IPI(int ipinr, struct pt_regs *regs) if ((unsigned)ipinr < NR_IPI) trace_ipi_exit_rcuidle(ipi_types[ipinr]); per_cpu(pending_ipi, cpu) = false; set_irq_regs(old_regs); } void smp_send_reschedule(int cpu) { BUG_ON(cpu_is_offline(cpu)); smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE); smp_cross_call_common(cpumask_of(cpu), IPI_RESCHEDULE); } #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST void tick_broadcast(const struct cpumask *mask) { smp_cross_call(mask, IPI_TIMER); smp_cross_call_common(mask, IPI_TIMER); } #endif Loading @@ -926,7 +937,7 @@ void smp_send_stop(void) if (system_state == SYSTEM_BOOTING || system_state == SYSTEM_RUNNING) pr_crit("SMP: stopping secondary CPUs\n"); smp_cross_call(&mask, IPI_CPU_STOP); smp_cross_call_common(&mask, IPI_CPU_STOP); } /* Wait up to one second for other CPUs to stop */ Loading Loading
arch/arm/kernel/smp.c +29 −8 Original line number Diff line number Diff line Loading @@ -466,6 +466,17 @@ void __init smp_prepare_cpus(unsigned int max_cpus) } static void (*__smp_cross_call)(const struct cpumask *, unsigned int); DEFINE_PER_CPU(bool, pending_ipi); static void smp_cross_call_common(const struct cpumask *cpumask, unsigned int func) { unsigned int cpu; for_each_cpu(cpu, cpumask) per_cpu(pending_ipi, cpu) = true; __smp_cross_call(cpumask, func); } void __init set_smp_cross_call(void (*fn)(const struct cpumask *, unsigned int)) { Loading Loading @@ -518,31 +529,32 @@ u64 smp_irq_stat_cpu(unsigned int cpu) void arch_send_call_function_ipi_mask(const struct cpumask *mask) { smp_cross_call(mask, IPI_CALL_FUNC); smp_cross_call_common(mask, IPI_CALL_FUNC); } void arch_send_wakeup_ipi_mask(const struct cpumask *mask) { smp_cross_call(mask, IPI_WAKEUP); smp_cross_call_common(mask, IPI_WAKEUP); } void arch_send_call_function_single_ipi(int cpu) { smp_cross_call(cpumask_of(cpu), IPI_CALL_FUNC); smp_cross_call_common(cpumask_of(cpu), IPI_CALL_FUNC_SINGLE); } #ifdef CONFIG_IRQ_WORK void arch_irq_work_raise(void) { if (arch_irq_work_has_interrupt()) smp_cross_call(cpumask_of(smp_processor_id()), IPI_IRQ_WORK); smp_cross_call_common(cpumask_of(smp_processor_id()), IPI_IRQ_WORK); } #endif #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST void tick_broadcast(const struct cpumask *mask) { smp_cross_call(mask, IPI_TIMER); smp_cross_call_common(mask, IPI_TIMER); } #endif Loading Loading @@ -664,7 +676,7 @@ void handle_IPI(int ipinr, struct pt_regs *regs) void smp_send_reschedule(int cpu) { smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE); smp_cross_call_common(cpumask_of(cpu), IPI_RESCHEDULE); } void smp_send_stop(void) Loading @@ -675,7 +687,7 @@ void smp_send_stop(void) cpumask_copy(&mask, cpu_online_mask); cpumask_clear_cpu(smp_processor_id(), &mask); if (!cpumask_empty(&mask)) smp_cross_call(&mask, IPI_CPU_STOP); smp_cross_call_common(&mask, IPI_CPU_STOP); /* Wait up to one second for other CPUs to stop */ timeout = USEC_PER_SEC; Loading Loading @@ -748,7 +760,16 @@ core_initcall(register_cpufreq_notifier); static void raise_nmi(cpumask_t *mask) { smp_cross_call(mask, IPI_CPU_BACKTRACE); /* * Generate the backtrace directly if we are running in a calling * context that is not preemptible by the backtrace IPI. Note * that nmi_cpu_backtrace() automatically removes the current cpu * from mask. */ if (cpumask_test_cpu(smp_processor_id(), mask) && irqs_disabled()) nmi_cpu_backtrace(NULL); smp_cross_call_common(mask, IPI_CPU_BACKTRACE); } void arch_trigger_cpumask_backtrace(const cpumask_t *mask, bool exclude_self) Loading
arch/arm64/kernel/smp.c +19 −8 Original line number Diff line number Diff line Loading @@ -591,6 +591,18 @@ acpi_parse_gic_cpu_interface(struct acpi_subtable_header *header, #else #define acpi_table_parse_madt(...) do { } while (0) #endif void (*__smp_cross_call)(const struct cpumask *, unsigned int); DEFINE_PER_CPU(bool, pending_ipi); void smp_cross_call_common(const struct cpumask *cpumask, unsigned int func) { unsigned int cpu; for_each_cpu(cpu, cpumask) per_cpu(pending_ipi, cpu) = true; __smp_cross_call(cpumask, func); } /* * Enumerate the possible CPU set from the device tree and build the Loading Loading @@ -735,8 +747,6 @@ void __init smp_prepare_cpus(unsigned int max_cpus) } } void (*__smp_cross_call)(const struct cpumask *, unsigned int); void __init set_smp_cross_call(void (*fn)(const struct cpumask *, unsigned int)) { __smp_cross_call = fn; Loading Loading @@ -785,18 +795,18 @@ u64 smp_irq_stat_cpu(unsigned int cpu) void arch_send_call_function_ipi_mask(const struct cpumask *mask) { smp_cross_call(mask, IPI_CALL_FUNC); smp_cross_call_common(mask, IPI_CALL_FUNC); } void arch_send_call_function_single_ipi(int cpu) { smp_cross_call(cpumask_of(cpu), IPI_CALL_FUNC); smp_cross_call_common(cpumask_of(cpu), IPI_CALL_FUNC); } #ifdef CONFIG_ARM64_ACPI_PARKING_PROTOCOL void arch_send_wakeup_ipi_mask(const struct cpumask *mask) { smp_cross_call(mask, IPI_WAKEUP); smp_cross_call_common(mask, IPI_WAKEUP); } #endif Loading Loading @@ -897,19 +907,20 @@ void handle_IPI(int ipinr, struct pt_regs *regs) if ((unsigned)ipinr < NR_IPI) trace_ipi_exit_rcuidle(ipi_types[ipinr]); per_cpu(pending_ipi, cpu) = false; set_irq_regs(old_regs); } void smp_send_reschedule(int cpu) { BUG_ON(cpu_is_offline(cpu)); smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE); smp_cross_call_common(cpumask_of(cpu), IPI_RESCHEDULE); } #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST void tick_broadcast(const struct cpumask *mask) { smp_cross_call(mask, IPI_TIMER); smp_cross_call_common(mask, IPI_TIMER); } #endif Loading @@ -926,7 +937,7 @@ void smp_send_stop(void) if (system_state == SYSTEM_BOOTING || system_state == SYSTEM_RUNNING) pr_crit("SMP: stopping secondary CPUs\n"); smp_cross_call(&mask, IPI_CPU_STOP); smp_cross_call_common(&mask, IPI_CPU_STOP); } /* Wait up to one second for other CPUs to stop */ Loading