Loading arch/ia64/configs/sn2_defconfig +1 −1 Original line number Original line Diff line number Diff line Loading @@ -113,7 +113,7 @@ CONFIG_IOSAPIC=y CONFIG_IA64_SGI_SN_XP=m CONFIG_IA64_SGI_SN_XP=m CONFIG_FORCE_MAX_ZONEORDER=17 CONFIG_FORCE_MAX_ZONEORDER=17 CONFIG_SMP=y CONFIG_SMP=y CONFIG_NR_CPUS=512 CONFIG_NR_CPUS=1024 # CONFIG_HOTPLUG_CPU is not set # CONFIG_HOTPLUG_CPU is not set CONFIG_SCHED_SMT=y CONFIG_SCHED_SMT=y CONFIG_PREEMPT=y CONFIG_PREEMPT=y Loading arch/ia64/kernel/time.c +29 −0 Original line number Original line Diff line number Diff line Loading @@ -249,3 +249,32 @@ time_init (void) */ */ set_normalized_timespec(&wall_to_monotonic, -xtime.tv_sec, -xtime.tv_nsec); set_normalized_timespec(&wall_to_monotonic, -xtime.tv_sec, -xtime.tv_nsec); } } #define SMALLUSECS 100 void udelay (unsigned long usecs) { unsigned long start; unsigned long cycles; unsigned long smallusecs; /* * Execute the non-preemptible delay loop (because the ITC might * not be synchronized between CPUS) in relatively short time * chunks, allowing preemption between the chunks. */ while (usecs > 0) { smallusecs = (usecs > SMALLUSECS) ? SMALLUSECS : usecs; preempt_disable(); cycles = smallusecs*local_cpu_data->cyc_per_usec; start = ia64_get_itc(); while (ia64_get_itc() - start < cycles) cpu_relax(); preempt_enable(); usecs -= smallusecs; } } EXPORT_SYMBOL(udelay); arch/ia64/kernel/uncached.c +3 −3 Original line number Original line Diff line number Diff line Loading @@ -53,7 +53,7 @@ static void uncached_ipi_visibility(void *data) if ((status != PAL_VISIBILITY_OK) && if ((status != PAL_VISIBILITY_OK) && (status != PAL_VISIBILITY_OK_REMOTE_NEEDED)) (status != PAL_VISIBILITY_OK_REMOTE_NEEDED)) printk(KERN_DEBUG "pal_prefetch_visibility() returns %i on " printk(KERN_DEBUG "pal_prefetch_visibility() returns %i on " "CPU %i\n", status, get_cpu()); "CPU %i\n", status, raw_smp_processor_id()); } } Loading @@ -63,7 +63,7 @@ static void uncached_ipi_mc_drain(void *data) status = ia64_pal_mc_drain(); status = ia64_pal_mc_drain(); if (status) if (status) printk(KERN_WARNING "ia64_pal_mc_drain() failed with %i on " printk(KERN_WARNING "ia64_pal_mc_drain() failed with %i on " "CPU %i\n", status, get_cpu()); "CPU %i\n", status, raw_smp_processor_id()); } } Loading Loading @@ -105,7 +105,7 @@ uncached_get_new_chunk(struct gen_pool *poolp) status = ia64_pal_prefetch_visibility(PAL_VISIBILITY_PHYSICAL); status = ia64_pal_prefetch_visibility(PAL_VISIBILITY_PHYSICAL); dprintk(KERN_INFO "pal_prefetch_visibility() returns %i on cpu %i\n", dprintk(KERN_INFO "pal_prefetch_visibility() returns %i on cpu %i\n", status, get_cpu()); status, raw_smp_processor_id()); if (!status) { if (!status) { status = smp_call_function(uncached_ipi_visibility, NULL, 0, 1); status = smp_call_function(uncached_ipi_visibility, NULL, 0, 1); Loading arch/ia64/kernel/vmlinux.lds.S +3 −0 Original line number Original line Diff line number Diff line Loading @@ -177,6 +177,9 @@ SECTIONS } } . = ALIGN(PAGE_SIZE); /* make sure the gate page doesn't expose kernel data */ . = ALIGN(PAGE_SIZE); /* make sure the gate page doesn't expose kernel data */ .data.read_mostly : AT(ADDR(.data.read_mostly) - LOAD_OFFSET) { *(.data.read_mostly) } .data.cacheline_aligned : AT(ADDR(.data.cacheline_aligned) - LOAD_OFFSET) .data.cacheline_aligned : AT(ADDR(.data.cacheline_aligned) - LOAD_OFFSET) { *(.data.cacheline_aligned) } { *(.data.cacheline_aligned) } Loading arch/ia64/sn/kernel/sn2/sn2_smp.c +1 −1 Original line number Original line Diff line number Diff line Loading @@ -202,7 +202,7 @@ sn2_global_tlb_purge(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long nbits) unsigned long end, unsigned long nbits) { { int i, opt, shub1, cnode, mynasid, cpu, lcpu = 0, nasid, flushed = 0; int i, opt, shub1, cnode, mynasid, cpu, lcpu = 0, nasid, flushed = 0; int mymm = (mm == current->active_mm); int mymm = (mm == current->active_mm && current->mm); volatile unsigned long *ptc0, *ptc1; volatile unsigned long *ptc0, *ptc1; unsigned long itc, itc2, flags, data0 = 0, data1 = 0, rr_value; unsigned long itc, itc2, flags, data0 = 0, data1 = 0, rr_value; short nasids[MAX_NUMNODES], nix; short nasids[MAX_NUMNODES], nix; Loading Loading
arch/ia64/configs/sn2_defconfig +1 −1 Original line number Original line Diff line number Diff line Loading @@ -113,7 +113,7 @@ CONFIG_IOSAPIC=y CONFIG_IA64_SGI_SN_XP=m CONFIG_IA64_SGI_SN_XP=m CONFIG_FORCE_MAX_ZONEORDER=17 CONFIG_FORCE_MAX_ZONEORDER=17 CONFIG_SMP=y CONFIG_SMP=y CONFIG_NR_CPUS=512 CONFIG_NR_CPUS=1024 # CONFIG_HOTPLUG_CPU is not set # CONFIG_HOTPLUG_CPU is not set CONFIG_SCHED_SMT=y CONFIG_SCHED_SMT=y CONFIG_PREEMPT=y CONFIG_PREEMPT=y Loading
arch/ia64/kernel/time.c +29 −0 Original line number Original line Diff line number Diff line Loading @@ -249,3 +249,32 @@ time_init (void) */ */ set_normalized_timespec(&wall_to_monotonic, -xtime.tv_sec, -xtime.tv_nsec); set_normalized_timespec(&wall_to_monotonic, -xtime.tv_sec, -xtime.tv_nsec); } } #define SMALLUSECS 100 void udelay (unsigned long usecs) { unsigned long start; unsigned long cycles; unsigned long smallusecs; /* * Execute the non-preemptible delay loop (because the ITC might * not be synchronized between CPUS) in relatively short time * chunks, allowing preemption between the chunks. */ while (usecs > 0) { smallusecs = (usecs > SMALLUSECS) ? SMALLUSECS : usecs; preempt_disable(); cycles = smallusecs*local_cpu_data->cyc_per_usec; start = ia64_get_itc(); while (ia64_get_itc() - start < cycles) cpu_relax(); preempt_enable(); usecs -= smallusecs; } } EXPORT_SYMBOL(udelay);
arch/ia64/kernel/uncached.c +3 −3 Original line number Original line Diff line number Diff line Loading @@ -53,7 +53,7 @@ static void uncached_ipi_visibility(void *data) if ((status != PAL_VISIBILITY_OK) && if ((status != PAL_VISIBILITY_OK) && (status != PAL_VISIBILITY_OK_REMOTE_NEEDED)) (status != PAL_VISIBILITY_OK_REMOTE_NEEDED)) printk(KERN_DEBUG "pal_prefetch_visibility() returns %i on " printk(KERN_DEBUG "pal_prefetch_visibility() returns %i on " "CPU %i\n", status, get_cpu()); "CPU %i\n", status, raw_smp_processor_id()); } } Loading @@ -63,7 +63,7 @@ static void uncached_ipi_mc_drain(void *data) status = ia64_pal_mc_drain(); status = ia64_pal_mc_drain(); if (status) if (status) printk(KERN_WARNING "ia64_pal_mc_drain() failed with %i on " printk(KERN_WARNING "ia64_pal_mc_drain() failed with %i on " "CPU %i\n", status, get_cpu()); "CPU %i\n", status, raw_smp_processor_id()); } } Loading Loading @@ -105,7 +105,7 @@ uncached_get_new_chunk(struct gen_pool *poolp) status = ia64_pal_prefetch_visibility(PAL_VISIBILITY_PHYSICAL); status = ia64_pal_prefetch_visibility(PAL_VISIBILITY_PHYSICAL); dprintk(KERN_INFO "pal_prefetch_visibility() returns %i on cpu %i\n", dprintk(KERN_INFO "pal_prefetch_visibility() returns %i on cpu %i\n", status, get_cpu()); status, raw_smp_processor_id()); if (!status) { if (!status) { status = smp_call_function(uncached_ipi_visibility, NULL, 0, 1); status = smp_call_function(uncached_ipi_visibility, NULL, 0, 1); Loading
arch/ia64/kernel/vmlinux.lds.S +3 −0 Original line number Original line Diff line number Diff line Loading @@ -177,6 +177,9 @@ SECTIONS } } . = ALIGN(PAGE_SIZE); /* make sure the gate page doesn't expose kernel data */ . = ALIGN(PAGE_SIZE); /* make sure the gate page doesn't expose kernel data */ .data.read_mostly : AT(ADDR(.data.read_mostly) - LOAD_OFFSET) { *(.data.read_mostly) } .data.cacheline_aligned : AT(ADDR(.data.cacheline_aligned) - LOAD_OFFSET) .data.cacheline_aligned : AT(ADDR(.data.cacheline_aligned) - LOAD_OFFSET) { *(.data.cacheline_aligned) } { *(.data.cacheline_aligned) } Loading
arch/ia64/sn/kernel/sn2/sn2_smp.c +1 −1 Original line number Original line Diff line number Diff line Loading @@ -202,7 +202,7 @@ sn2_global_tlb_purge(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long nbits) unsigned long end, unsigned long nbits) { { int i, opt, shub1, cnode, mynasid, cpu, lcpu = 0, nasid, flushed = 0; int i, opt, shub1, cnode, mynasid, cpu, lcpu = 0, nasid, flushed = 0; int mymm = (mm == current->active_mm); int mymm = (mm == current->active_mm && current->mm); volatile unsigned long *ptc0, *ptc1; volatile unsigned long *ptc0, *ptc1; unsigned long itc, itc2, flags, data0 = 0, data1 = 0, rr_value; unsigned long itc, itc2, flags, data0 = 0, data1 = 0, rr_value; short nasids[MAX_NUMNODES], nix; short nasids[MAX_NUMNODES], nix; Loading