Loading arch/mips/au1000/common/irq.c +1 −3 Original line number Original line Diff line number Diff line Loading @@ -65,8 +65,6 @@ #define EXT_INTC1_REQ1 5 /* IP 5 */ #define EXT_INTC1_REQ1 5 /* IP 5 */ #define MIPS_TIMER_IP 7 /* IP 7 */ #define MIPS_TIMER_IP 7 /* IP 7 */ extern void mips_timer_interrupt(void); void (*board_init_irq)(void); void (*board_init_irq)(void); static DEFINE_SPINLOCK(irq_lock); static DEFINE_SPINLOCK(irq_lock); Loading Loading @@ -635,7 +633,7 @@ asmlinkage void plat_irq_dispatch(void) unsigned int pending = read_c0_status() & read_c0_cause() & ST0_IM; unsigned int pending = read_c0_status() & read_c0_cause() & ST0_IM; if (pending & CAUSEF_IP7) if (pending & CAUSEF_IP7) mips_timer_interrupt(); ll_timer_interrupt(63); else if (pending & CAUSEF_IP2) else if (pending & CAUSEF_IP2) intc0_req0_irqdispatch(); intc0_req0_irqdispatch(); else if (pending & CAUSEF_IP3) else if (pending & CAUSEF_IP3) Loading arch/mips/au1000/common/time.c +0 −40 Original line number Original line Diff line number Diff line Loading @@ -64,48 +64,8 @@ static unsigned long last_pc0, last_match20; static DEFINE_SPINLOCK(time_lock); static DEFINE_SPINLOCK(time_lock); static inline void ack_r4ktimer(unsigned long newval) { write_c0_compare(newval); } /* * There are a lot of conceptually broken versions of the MIPS timer interrupt * handler floating around. This one is rather different, but the algorithm * is provably more robust. */ unsigned long wtimer; unsigned long wtimer; void mips_timer_interrupt(void) { int irq = 63; irq_enter(); kstat_this_cpu.irqs[irq]++; if (r4k_offset == 0) goto null; do { kstat_this_cpu.irqs[irq]++; do_timer(1); #ifndef CONFIG_SMP update_process_times(user_mode(get_irq_regs())); #endif r4k_cur += r4k_offset; ack_r4ktimer(r4k_cur); } while (((unsigned long)read_c0_count() - r4k_cur) < 0x7fffffff); irq_exit(); return; null: ack_r4ktimer(0); irq_exit(); } #ifdef CONFIG_PM #ifdef CONFIG_PM irqreturn_t counter0_irq(int irq, void *dev_id) irqreturn_t counter0_irq(int irq, void *dev_id) { { Loading arch/mips/kernel/smtc.c +1 −1 Original line number Original line Diff line number Diff line Loading @@ -867,7 +867,7 @@ void ipi_decode(struct smtc_ipi *pipi) #ifdef CONFIG_SMTC_IDLE_HOOK_DEBUG #ifdef CONFIG_SMTC_IDLE_HOOK_DEBUG clock_hang_reported[dest_copy] = 0; clock_hang_reported[dest_copy] = 0; #endif /* CONFIG_SMTC_IDLE_HOOK_DEBUG */ #endif /* CONFIG_SMTC_IDLE_HOOK_DEBUG */ local_timer_interrupt(0, NULL); local_timer_interrupt(0); irq_exit(); irq_exit(); break; break; case LINUX_SMP_IPI: case LINUX_SMP_IPI: Loading arch/mips/kernel/time.c +69 −24 Original line number Original line Diff line number Diff line Loading @@ -144,7 +144,7 @@ void local_timer_interrupt(int irq, void *dev_id) * High-level timer interrupt service routines. This function * High-level timer interrupt service routines. This function * is set as irqaction->handler and is invoked through do_IRQ. * is set as irqaction->handler and is invoked through do_IRQ. */ */ irqreturn_t timer_interrupt(int irq, void *dev_id) static irqreturn_t timer_interrupt(int irq, void *dev_id) { { write_seqlock(&xtime_lock); write_seqlock(&xtime_lock); Loading Loading @@ -174,9 +174,10 @@ int null_perf_irq(void) return 0; return 0; } } EXPORT_SYMBOL(null_perf_irq); int (*perf_irq)(void) = null_perf_irq; int (*perf_irq)(void) = null_perf_irq; EXPORT_SYMBOL(null_perf_irq); EXPORT_SYMBOL(perf_irq); EXPORT_SYMBOL(perf_irq); /* /* Loading Loading @@ -208,35 +209,79 @@ static inline int handle_perf_irq (int r2) !r2; !r2; } } asmlinkage void ll_timer_interrupt(int irq) void ll_timer_interrupt(int irq, void *dev_id) { { int r2 = cpu_has_mips_r2; int cpu = smp_processor_id(); irq_enter(); #ifdef CONFIG_MIPS_MT_SMTC kstat_this_cpu.irqs[irq]++; /* * In an SMTC system, one Count/Compare set exists per VPE. * Which TC within a VPE gets the interrupt is essentially * random - we only know that it shouldn't be one with * IXMT set. Whichever TC gets the interrupt needs to * send special interprocessor interrupts to the other * TCs to make sure that they schedule, etc. * * That code is specific to the SMTC kernel, not to * the a particular platform, so it's invoked from * the general MIPS timer_interrupt routine. */ /* * We could be here due to timer interrupt, * perf counter overflow, or both. */ (void) handle_perf_irq(1); if (read_c0_cause() & (1 << 30)) { /* * There are things we only want to do once per tick * in an "MP" system. One TC of each VPE will take * the actual timer interrupt. The others will get * timer broadcast IPIs. We use whoever it is that takes * the tick on VPE 0 to run the full timer_interrupt(). */ if (cpu_data[cpu].vpe_id == 0) { timer_interrupt(irq, NULL); } else { write_c0_compare(read_c0_count() + (mips_hpt_frequency/HZ)); local_timer_interrupt(irq, dev_id); } smtc_timer_broadcast(cpu_data[cpu].vpe_id); } #else /* CONFIG_MIPS_MT_SMTC */ int r2 = cpu_has_mips_r2; if (handle_perf_irq(r2)) if (handle_perf_irq(r2)) goto out; return; if (r2 && ((read_c0_cause() & (1 << 30)) == 0)) if (r2 && ((read_c0_cause() & (1 << 30)) == 0)) goto out; return; if (cpu == 0) { /* * CPU 0 handles the global timer interrupt job and process * accounting resets count/compare registers to trigger next * timer int. */ timer_interrupt(irq, NULL); timer_interrupt(irq, NULL); } else { /* Everyone else needs to reset the timer int here as ll_local_timer_interrupt doesn't */ /* * FIXME: need to cope with counter underflow. * More support needs to be added to kernel/time for * counter/timer interrupts on multiple CPU's */ write_c0_compare(read_c0_count() + (mips_hpt_frequency/HZ)); out: /* irq_exit(); * Other CPUs should do profiling and process accounting */ local_timer_interrupt(irq, dev_id); } } #endif /* CONFIG_MIPS_MT_SMTC */ asmlinkage void ll_local_timer_interrupt(int irq) { irq_enter(); if (smp_processor_id() != 0) kstat_this_cpu.irqs[irq]++; /* we keep interrupt disabled all the time */ local_timer_interrupt(irq, NULL); irq_exit(); } } /* /* Loading arch/mips/mips-boards/generic/time.c +5 −107 Original line number Original line Diff line number Diff line Loading @@ -67,108 +67,6 @@ static void mips_perf_dispatch(void) do_IRQ(cp0_perfcount_irq); do_IRQ(cp0_perfcount_irq); } } /* * Redeclare until I get around mopping the timer code insanity on MIPS. */ extern int null_perf_irq(void); extern int (*perf_irq)(void); /* * Possibly handle a performance counter interrupt. * Return true if the timer interrupt should not be checked */ static inline int handle_perf_irq (int r2) { /* * The performance counter overflow interrupt may be shared with the * timer interrupt (cp0_perfcount_irq < 0). If it is and a * performance counter has overflowed (perf_irq() == IRQ_HANDLED) * and we can't reliably determine if a counter interrupt has also * happened (!r2) then don't check for a timer interrupt. */ return (cp0_perfcount_irq < 0) && perf_irq() == IRQ_HANDLED && !r2; } irqreturn_t mips_timer_interrupt(int irq, void *dev_id) { int cpu = smp_processor_id(); #ifdef CONFIG_MIPS_MT_SMTC /* * In an SMTC system, one Count/Compare set exists per VPE. * Which TC within a VPE gets the interrupt is essentially * random - we only know that it shouldn't be one with * IXMT set. Whichever TC gets the interrupt needs to * send special interprocessor interrupts to the other * TCs to make sure that they schedule, etc. * * That code is specific to the SMTC kernel, not to * the a particular platform, so it's invoked from * the general MIPS timer_interrupt routine. */ /* * We could be here due to timer interrupt, * perf counter overflow, or both. */ (void) handle_perf_irq(1); if (read_c0_cause() & (1 << 30)) { /* * There are things we only want to do once per tick * in an "MP" system. One TC of each VPE will take * the actual timer interrupt. The others will get * timer broadcast IPIs. We use whoever it is that takes * the tick on VPE 0 to run the full timer_interrupt(). */ if (cpu_data[cpu].vpe_id == 0) { timer_interrupt(irq, NULL); } else { write_c0_compare(read_c0_count() + (mips_hpt_frequency/HZ)); local_timer_interrupt(irq, dev_id); } smtc_timer_broadcast(); } #else /* CONFIG_MIPS_MT_SMTC */ int r2 = cpu_has_mips_r2; if (handle_perf_irq(r2)) goto out; if (r2 && ((read_c0_cause() & (1 << 30)) == 0)) goto out; if (cpu == 0) { /* * CPU 0 handles the global timer interrupt job and process * accounting resets count/compare registers to trigger next * timer int. */ timer_interrupt(irq, NULL); } else { /* Everyone else needs to reset the timer int here as ll_local_timer_interrupt doesn't */ /* * FIXME: need to cope with counter underflow. * More support needs to be added to kernel/time for * counter/timer interrupts on multiple CPU's */ write_c0_compare(read_c0_count() + (mips_hpt_frequency/HZ)); /* * Other CPUs should do profiling and process accounting */ local_timer_interrupt(irq, dev_id); } out: #endif /* CONFIG_MIPS_MT_SMTC */ return IRQ_HANDLED; } /* /* * Estimate CPU frequency. Sets mips_hpt_frequency as a side-effect * Estimate CPU frequency. Sets mips_hpt_frequency as a side-effect */ */ Loading Loading @@ -246,7 +144,7 @@ void __init plat_time_init(void) mips_scroll_message(); mips_scroll_message(); } } irqreturn_t mips_perf_interrupt(int irq, void *dev_id) static irqreturn_t mips_perf_interrupt(int irq, void *dev_id) { { return perf_irq(); return perf_irq(); } } Loading @@ -257,8 +155,10 @@ static struct irqaction perf_irqaction = { .name = "performance", .name = "performance", }; }; void __init plat_perf_setup(struct irqaction *irq) void __init plat_perf_setup(void) { { struct irqaction *irq = &perf_irqaction; cp0_perfcount_irq = -1; cp0_perfcount_irq = -1; #ifdef MSC01E_INT_BASE #ifdef MSC01E_INT_BASE Loading Loading @@ -297,8 +197,6 @@ void __init plat_timer_setup(struct irqaction *irq) mips_cpu_timer_irq = MIPS_CPU_IRQ_BASE + cp0_compare_irq; mips_cpu_timer_irq = MIPS_CPU_IRQ_BASE + cp0_compare_irq; } } /* we are using the cpu counter for timer interrupts */ irq->handler = mips_timer_interrupt; /* we use our own handler */ #ifdef CONFIG_MIPS_MT_SMTC #ifdef CONFIG_MIPS_MT_SMTC setup_irq_smtc(mips_cpu_timer_irq, irq, 0x100 << cp0_compare_irq); setup_irq_smtc(mips_cpu_timer_irq, irq, 0x100 << cp0_compare_irq); #else #else Loading @@ -308,5 +206,5 @@ void __init plat_timer_setup(struct irqaction *irq) set_irq_handler(mips_cpu_timer_irq, handle_percpu_irq); set_irq_handler(mips_cpu_timer_irq, handle_percpu_irq); #endif #endif plat_perf_setup(&perf_irqaction); plat_perf_setup(); } } Loading
arch/mips/au1000/common/irq.c +1 −3 Original line number Original line Diff line number Diff line Loading @@ -65,8 +65,6 @@ #define EXT_INTC1_REQ1 5 /* IP 5 */ #define EXT_INTC1_REQ1 5 /* IP 5 */ #define MIPS_TIMER_IP 7 /* IP 7 */ #define MIPS_TIMER_IP 7 /* IP 7 */ extern void mips_timer_interrupt(void); void (*board_init_irq)(void); void (*board_init_irq)(void); static DEFINE_SPINLOCK(irq_lock); static DEFINE_SPINLOCK(irq_lock); Loading Loading @@ -635,7 +633,7 @@ asmlinkage void plat_irq_dispatch(void) unsigned int pending = read_c0_status() & read_c0_cause() & ST0_IM; unsigned int pending = read_c0_status() & read_c0_cause() & ST0_IM; if (pending & CAUSEF_IP7) if (pending & CAUSEF_IP7) mips_timer_interrupt(); ll_timer_interrupt(63); else if (pending & CAUSEF_IP2) else if (pending & CAUSEF_IP2) intc0_req0_irqdispatch(); intc0_req0_irqdispatch(); else if (pending & CAUSEF_IP3) else if (pending & CAUSEF_IP3) Loading
arch/mips/au1000/common/time.c +0 −40 Original line number Original line Diff line number Diff line Loading @@ -64,48 +64,8 @@ static unsigned long last_pc0, last_match20; static DEFINE_SPINLOCK(time_lock); static DEFINE_SPINLOCK(time_lock); static inline void ack_r4ktimer(unsigned long newval) { write_c0_compare(newval); } /* * There are a lot of conceptually broken versions of the MIPS timer interrupt * handler floating around. This one is rather different, but the algorithm * is provably more robust. */ unsigned long wtimer; unsigned long wtimer; void mips_timer_interrupt(void) { int irq = 63; irq_enter(); kstat_this_cpu.irqs[irq]++; if (r4k_offset == 0) goto null; do { kstat_this_cpu.irqs[irq]++; do_timer(1); #ifndef CONFIG_SMP update_process_times(user_mode(get_irq_regs())); #endif r4k_cur += r4k_offset; ack_r4ktimer(r4k_cur); } while (((unsigned long)read_c0_count() - r4k_cur) < 0x7fffffff); irq_exit(); return; null: ack_r4ktimer(0); irq_exit(); } #ifdef CONFIG_PM #ifdef CONFIG_PM irqreturn_t counter0_irq(int irq, void *dev_id) irqreturn_t counter0_irq(int irq, void *dev_id) { { Loading
arch/mips/kernel/smtc.c +1 −1 Original line number Original line Diff line number Diff line Loading @@ -867,7 +867,7 @@ void ipi_decode(struct smtc_ipi *pipi) #ifdef CONFIG_SMTC_IDLE_HOOK_DEBUG #ifdef CONFIG_SMTC_IDLE_HOOK_DEBUG clock_hang_reported[dest_copy] = 0; clock_hang_reported[dest_copy] = 0; #endif /* CONFIG_SMTC_IDLE_HOOK_DEBUG */ #endif /* CONFIG_SMTC_IDLE_HOOK_DEBUG */ local_timer_interrupt(0, NULL); local_timer_interrupt(0); irq_exit(); irq_exit(); break; break; case LINUX_SMP_IPI: case LINUX_SMP_IPI: Loading
arch/mips/kernel/time.c +69 −24 Original line number Original line Diff line number Diff line Loading @@ -144,7 +144,7 @@ void local_timer_interrupt(int irq, void *dev_id) * High-level timer interrupt service routines. This function * High-level timer interrupt service routines. This function * is set as irqaction->handler and is invoked through do_IRQ. * is set as irqaction->handler and is invoked through do_IRQ. */ */ irqreturn_t timer_interrupt(int irq, void *dev_id) static irqreturn_t timer_interrupt(int irq, void *dev_id) { { write_seqlock(&xtime_lock); write_seqlock(&xtime_lock); Loading Loading @@ -174,9 +174,10 @@ int null_perf_irq(void) return 0; return 0; } } EXPORT_SYMBOL(null_perf_irq); int (*perf_irq)(void) = null_perf_irq; int (*perf_irq)(void) = null_perf_irq; EXPORT_SYMBOL(null_perf_irq); EXPORT_SYMBOL(perf_irq); EXPORT_SYMBOL(perf_irq); /* /* Loading Loading @@ -208,35 +209,79 @@ static inline int handle_perf_irq (int r2) !r2; !r2; } } asmlinkage void ll_timer_interrupt(int irq) void ll_timer_interrupt(int irq, void *dev_id) { { int r2 = cpu_has_mips_r2; int cpu = smp_processor_id(); irq_enter(); #ifdef CONFIG_MIPS_MT_SMTC kstat_this_cpu.irqs[irq]++; /* * In an SMTC system, one Count/Compare set exists per VPE. * Which TC within a VPE gets the interrupt is essentially * random - we only know that it shouldn't be one with * IXMT set. Whichever TC gets the interrupt needs to * send special interprocessor interrupts to the other * TCs to make sure that they schedule, etc. * * That code is specific to the SMTC kernel, not to * the a particular platform, so it's invoked from * the general MIPS timer_interrupt routine. */ /* * We could be here due to timer interrupt, * perf counter overflow, or both. */ (void) handle_perf_irq(1); if (read_c0_cause() & (1 << 30)) { /* * There are things we only want to do once per tick * in an "MP" system. One TC of each VPE will take * the actual timer interrupt. The others will get * timer broadcast IPIs. We use whoever it is that takes * the tick on VPE 0 to run the full timer_interrupt(). */ if (cpu_data[cpu].vpe_id == 0) { timer_interrupt(irq, NULL); } else { write_c0_compare(read_c0_count() + (mips_hpt_frequency/HZ)); local_timer_interrupt(irq, dev_id); } smtc_timer_broadcast(cpu_data[cpu].vpe_id); } #else /* CONFIG_MIPS_MT_SMTC */ int r2 = cpu_has_mips_r2; if (handle_perf_irq(r2)) if (handle_perf_irq(r2)) goto out; return; if (r2 && ((read_c0_cause() & (1 << 30)) == 0)) if (r2 && ((read_c0_cause() & (1 << 30)) == 0)) goto out; return; if (cpu == 0) { /* * CPU 0 handles the global timer interrupt job and process * accounting resets count/compare registers to trigger next * timer int. */ timer_interrupt(irq, NULL); timer_interrupt(irq, NULL); } else { /* Everyone else needs to reset the timer int here as ll_local_timer_interrupt doesn't */ /* * FIXME: need to cope with counter underflow. * More support needs to be added to kernel/time for * counter/timer interrupts on multiple CPU's */ write_c0_compare(read_c0_count() + (mips_hpt_frequency/HZ)); out: /* irq_exit(); * Other CPUs should do profiling and process accounting */ local_timer_interrupt(irq, dev_id); } } #endif /* CONFIG_MIPS_MT_SMTC */ asmlinkage void ll_local_timer_interrupt(int irq) { irq_enter(); if (smp_processor_id() != 0) kstat_this_cpu.irqs[irq]++; /* we keep interrupt disabled all the time */ local_timer_interrupt(irq, NULL); irq_exit(); } } /* /* Loading
arch/mips/mips-boards/generic/time.c +5 −107 Original line number Original line Diff line number Diff line Loading @@ -67,108 +67,6 @@ static void mips_perf_dispatch(void) do_IRQ(cp0_perfcount_irq); do_IRQ(cp0_perfcount_irq); } } /* * Redeclare until I get around mopping the timer code insanity on MIPS. */ extern int null_perf_irq(void); extern int (*perf_irq)(void); /* * Possibly handle a performance counter interrupt. * Return true if the timer interrupt should not be checked */ static inline int handle_perf_irq (int r2) { /* * The performance counter overflow interrupt may be shared with the * timer interrupt (cp0_perfcount_irq < 0). If it is and a * performance counter has overflowed (perf_irq() == IRQ_HANDLED) * and we can't reliably determine if a counter interrupt has also * happened (!r2) then don't check for a timer interrupt. */ return (cp0_perfcount_irq < 0) && perf_irq() == IRQ_HANDLED && !r2; } irqreturn_t mips_timer_interrupt(int irq, void *dev_id) { int cpu = smp_processor_id(); #ifdef CONFIG_MIPS_MT_SMTC /* * In an SMTC system, one Count/Compare set exists per VPE. * Which TC within a VPE gets the interrupt is essentially * random - we only know that it shouldn't be one with * IXMT set. Whichever TC gets the interrupt needs to * send special interprocessor interrupts to the other * TCs to make sure that they schedule, etc. * * That code is specific to the SMTC kernel, not to * the a particular platform, so it's invoked from * the general MIPS timer_interrupt routine. */ /* * We could be here due to timer interrupt, * perf counter overflow, or both. */ (void) handle_perf_irq(1); if (read_c0_cause() & (1 << 30)) { /* * There are things we only want to do once per tick * in an "MP" system. One TC of each VPE will take * the actual timer interrupt. The others will get * timer broadcast IPIs. We use whoever it is that takes * the tick on VPE 0 to run the full timer_interrupt(). */ if (cpu_data[cpu].vpe_id == 0) { timer_interrupt(irq, NULL); } else { write_c0_compare(read_c0_count() + (mips_hpt_frequency/HZ)); local_timer_interrupt(irq, dev_id); } smtc_timer_broadcast(); } #else /* CONFIG_MIPS_MT_SMTC */ int r2 = cpu_has_mips_r2; if (handle_perf_irq(r2)) goto out; if (r2 && ((read_c0_cause() & (1 << 30)) == 0)) goto out; if (cpu == 0) { /* * CPU 0 handles the global timer interrupt job and process * accounting resets count/compare registers to trigger next * timer int. */ timer_interrupt(irq, NULL); } else { /* Everyone else needs to reset the timer int here as ll_local_timer_interrupt doesn't */ /* * FIXME: need to cope with counter underflow. * More support needs to be added to kernel/time for * counter/timer interrupts on multiple CPU's */ write_c0_compare(read_c0_count() + (mips_hpt_frequency/HZ)); /* * Other CPUs should do profiling and process accounting */ local_timer_interrupt(irq, dev_id); } out: #endif /* CONFIG_MIPS_MT_SMTC */ return IRQ_HANDLED; } /* /* * Estimate CPU frequency. Sets mips_hpt_frequency as a side-effect * Estimate CPU frequency. Sets mips_hpt_frequency as a side-effect */ */ Loading Loading @@ -246,7 +144,7 @@ void __init plat_time_init(void) mips_scroll_message(); mips_scroll_message(); } } irqreturn_t mips_perf_interrupt(int irq, void *dev_id) static irqreturn_t mips_perf_interrupt(int irq, void *dev_id) { { return perf_irq(); return perf_irq(); } } Loading @@ -257,8 +155,10 @@ static struct irqaction perf_irqaction = { .name = "performance", .name = "performance", }; }; void __init plat_perf_setup(struct irqaction *irq) void __init plat_perf_setup(void) { { struct irqaction *irq = &perf_irqaction; cp0_perfcount_irq = -1; cp0_perfcount_irq = -1; #ifdef MSC01E_INT_BASE #ifdef MSC01E_INT_BASE Loading Loading @@ -297,8 +197,6 @@ void __init plat_timer_setup(struct irqaction *irq) mips_cpu_timer_irq = MIPS_CPU_IRQ_BASE + cp0_compare_irq; mips_cpu_timer_irq = MIPS_CPU_IRQ_BASE + cp0_compare_irq; } } /* we are using the cpu counter for timer interrupts */ irq->handler = mips_timer_interrupt; /* we use our own handler */ #ifdef CONFIG_MIPS_MT_SMTC #ifdef CONFIG_MIPS_MT_SMTC setup_irq_smtc(mips_cpu_timer_irq, irq, 0x100 << cp0_compare_irq); setup_irq_smtc(mips_cpu_timer_irq, irq, 0x100 << cp0_compare_irq); #else #else Loading @@ -308,5 +206,5 @@ void __init plat_timer_setup(struct irqaction *irq) set_irq_handler(mips_cpu_timer_irq, handle_percpu_irq); set_irq_handler(mips_cpu_timer_irq, handle_percpu_irq); #endif #endif plat_perf_setup(&perf_irqaction); plat_perf_setup(); } }