Loading arch/mips/kernel/gdb-stub.c +2 −2 Original line number Diff line number Diff line Loading @@ -769,7 +769,7 @@ void handle_exception(struct gdb_regs *regs) /* * acquire the CPU spinlocks */ for (i = num_online_cpus()-1; i >= 0; i--) for_each_online_cpu(i) if (__raw_spin_trylock(&kgdb_cpulock[i]) == 0) panic("kgdb: couldn't get cpulock %d\n", i); Loading Loading @@ -1044,7 +1044,7 @@ void handle_exception(struct gdb_regs *regs) exit_kgdb_exception: /* release locks so other CPUs can go */ for (i = num_online_cpus()-1; i >= 0; i--) for_each_online_cpu(i) __raw_spin_unlock(&kgdb_cpulock[i]); spin_unlock(&kgdb_lock); Loading arch/mips/kernel/smp.c +21 −12 Original line number Diff line number Diff line Loading @@ -375,10 +375,13 @@ void flush_tlb_mm(struct mm_struct *mm) if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) { smp_on_other_tlbs(flush_tlb_mm_ipi, (void *)mm); } else { int i; for (i = 0; i < num_online_cpus(); i++) if (smp_processor_id() != i) cpu_context(i, mm) = 0; cpumask_t mask = cpu_online_map; unsigned int cpu; cpu_clear(smp_processor_id(), mask); for_each_online_cpu(cpu) if (cpu_context(cpu, mm)) cpu_context(cpu, mm) = 0; } local_flush_tlb_mm(mm); Loading Loading @@ -411,10 +414,13 @@ void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned l fd.addr2 = end; smp_on_other_tlbs(flush_tlb_range_ipi, (void *)&fd); } else { int i; for (i = 0; i < num_online_cpus(); i++) if (smp_processor_id() != i) cpu_context(i, mm) = 0; cpumask_t mask = cpu_online_map; unsigned int cpu; cpu_clear(smp_processor_id(), mask); for_each_online_cpu(cpu) if (cpu_context(cpu, mm)) cpu_context(cpu, mm) = 0; } local_flush_tlb_range(vma, start, end); preempt_enable(); Loading Loading @@ -453,10 +459,13 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long page) fd.addr1 = page; smp_on_other_tlbs(flush_tlb_page_ipi, (void *)&fd); } else { int i; for (i = 0; i < num_online_cpus(); i++) if (smp_processor_id() != i) cpu_context(i, vma->vm_mm) = 0; cpumask_t mask = cpu_online_map; unsigned int cpu; cpu_clear(smp_processor_id(), mask); for_each_online_cpu(cpu) if (cpu_context(cpu, vma->vm_mm)) cpu_context(cpu, vma->vm_mm) = 0; } local_flush_tlb_page(vma, page); preempt_enable(); Loading arch/mips/kernel/smtc.c +2 −2 Original line number Diff line number Diff line Loading @@ -1264,7 +1264,7 @@ void smtc_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu) if (cpu_has_vtag_icache) flush_icache_all(); /* Traverse all online CPUs (hack requires contigous range) */ for (i = 0; i < num_online_cpus(); i++) { for_each_online_cpu(i) { /* * We don't need to worry about our own CPU, nor those of * CPUs who don't share our TLB. Loading Loading @@ -1293,7 +1293,7 @@ void smtc_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu) /* * SMTC shares the TLB within VPEs and possibly across all VPEs. */ for (i = 0; i < num_online_cpus(); i++) { for_each_online_cpu(i) { if ((smtc_status & SMTC_TLB_SHARED) || (cpu_data[i].vpe_id == cpu_data[cpu].vpe_id)) cpu_context(i, mm) = asid_cache(i) = asid; Loading include/asm-mips/mmu_context.h +2 −2 Original line number Diff line number Diff line Loading @@ -120,7 +120,7 @@ init_new_context(struct task_struct *tsk, struct mm_struct *mm) { int i; for (i = 0; i < num_online_cpus(); i++) for_each_online_cpu(i) cpu_context(i, mm) = 0; return 0; Loading Loading @@ -284,7 +284,7 @@ drop_mmu_context(struct mm_struct *mm, unsigned cpu) int i; /* SMTC shares the TLB (and ASIDs) across VPEs */ for (i = 0; i < num_online_cpus(); i++) { for_each_online_cpu(i) { if((smtc_status & SMTC_TLB_SHARED) || (cpu_data[i].vpe_id == cpu_data[cpu].vpe_id)) cpu_context(i, mm) = 0; Loading Loading
arch/mips/kernel/gdb-stub.c +2 −2 Original line number Diff line number Diff line Loading @@ -769,7 +769,7 @@ void handle_exception(struct gdb_regs *regs) /* * acquire the CPU spinlocks */ for (i = num_online_cpus()-1; i >= 0; i--) for_each_online_cpu(i) if (__raw_spin_trylock(&kgdb_cpulock[i]) == 0) panic("kgdb: couldn't get cpulock %d\n", i); Loading Loading @@ -1044,7 +1044,7 @@ void handle_exception(struct gdb_regs *regs) exit_kgdb_exception: /* release locks so other CPUs can go */ for (i = num_online_cpus()-1; i >= 0; i--) for_each_online_cpu(i) __raw_spin_unlock(&kgdb_cpulock[i]); spin_unlock(&kgdb_lock); Loading
arch/mips/kernel/smp.c +21 −12 Original line number Diff line number Diff line Loading @@ -375,10 +375,13 @@ void flush_tlb_mm(struct mm_struct *mm) if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) { smp_on_other_tlbs(flush_tlb_mm_ipi, (void *)mm); } else { int i; for (i = 0; i < num_online_cpus(); i++) if (smp_processor_id() != i) cpu_context(i, mm) = 0; cpumask_t mask = cpu_online_map; unsigned int cpu; cpu_clear(smp_processor_id(), mask); for_each_online_cpu(cpu) if (cpu_context(cpu, mm)) cpu_context(cpu, mm) = 0; } local_flush_tlb_mm(mm); Loading Loading @@ -411,10 +414,13 @@ void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned l fd.addr2 = end; smp_on_other_tlbs(flush_tlb_range_ipi, (void *)&fd); } else { int i; for (i = 0; i < num_online_cpus(); i++) if (smp_processor_id() != i) cpu_context(i, mm) = 0; cpumask_t mask = cpu_online_map; unsigned int cpu; cpu_clear(smp_processor_id(), mask); for_each_online_cpu(cpu) if (cpu_context(cpu, mm)) cpu_context(cpu, mm) = 0; } local_flush_tlb_range(vma, start, end); preempt_enable(); Loading Loading @@ -453,10 +459,13 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long page) fd.addr1 = page; smp_on_other_tlbs(flush_tlb_page_ipi, (void *)&fd); } else { int i; for (i = 0; i < num_online_cpus(); i++) if (smp_processor_id() != i) cpu_context(i, vma->vm_mm) = 0; cpumask_t mask = cpu_online_map; unsigned int cpu; cpu_clear(smp_processor_id(), mask); for_each_online_cpu(cpu) if (cpu_context(cpu, vma->vm_mm)) cpu_context(cpu, vma->vm_mm) = 0; } local_flush_tlb_page(vma, page); preempt_enable(); Loading
arch/mips/kernel/smtc.c +2 −2 Original line number Diff line number Diff line Loading @@ -1264,7 +1264,7 @@ void smtc_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu) if (cpu_has_vtag_icache) flush_icache_all(); /* Traverse all online CPUs (hack requires contigous range) */ for (i = 0; i < num_online_cpus(); i++) { for_each_online_cpu(i) { /* * We don't need to worry about our own CPU, nor those of * CPUs who don't share our TLB. Loading Loading @@ -1293,7 +1293,7 @@ void smtc_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu) /* * SMTC shares the TLB within VPEs and possibly across all VPEs. */ for (i = 0; i < num_online_cpus(); i++) { for_each_online_cpu(i) { if ((smtc_status & SMTC_TLB_SHARED) || (cpu_data[i].vpe_id == cpu_data[cpu].vpe_id)) cpu_context(i, mm) = asid_cache(i) = asid; Loading
include/asm-mips/mmu_context.h +2 −2 Original line number Diff line number Diff line Loading @@ -120,7 +120,7 @@ init_new_context(struct task_struct *tsk, struct mm_struct *mm) { int i; for (i = 0; i < num_online_cpus(); i++) for_each_online_cpu(i) cpu_context(i, mm) = 0; return 0; Loading Loading @@ -284,7 +284,7 @@ drop_mmu_context(struct mm_struct *mm, unsigned cpu) int i; /* SMTC shares the TLB (and ASIDs) across VPEs */ for (i = 0; i < num_online_cpus(); i++) { for_each_online_cpu(i) { if((smtc_status & SMTC_TLB_SHARED) || (cpu_data[i].vpe_id == cpu_data[cpu].vpe_id)) cpu_context(i, mm) = 0; Loading