Loading arch/ia64/hp/sim/boot/boot_head.S +25 −6 Original line number Diff line number Diff line Loading @@ -4,6 +4,7 @@ */ #include <asm/asmmacro.h> #include <asm/pal.h> .bss .align 16 Loading Loading @@ -49,7 +50,11 @@ GLOBAL_ENTRY(jmp_to_kernel) br.sptk.few b7 END(jmp_to_kernel) /* * r28 contains the index of the PAL function * r29--31 the args * Return values in ret0--3 (r8--11) */ GLOBAL_ENTRY(pal_emulator_static) mov r8=-1 mov r9=256 Loading @@ -62,7 +67,7 @@ GLOBAL_ENTRY(pal_emulator_static) cmp.gtu p6,p7=r9,r28 (p6) br.cond.sptk.few stacked ;; static: cmp.eq p6,p7=6,r28 /* PAL_PTCE_INFO */ static: cmp.eq p6,p7=PAL_PTCE_INFO,r28 (p7) br.cond.sptk.few 1f ;; mov r8=0 /* status = 0 */ Loading @@ -70,21 +75,21 @@ static: cmp.eq p6,p7=6,r28 /* PAL_PTCE_INFO */ movl r10=0x0000000200000003 /* count[0], count[1] */ movl r11=0x1000000000002000 /* stride[0], stride[1] */ br.cond.sptk.few rp 1: cmp.eq p6,p7=14,r28 /* PAL_FREQ_RATIOS */ 1: cmp.eq p6,p7=PAL_FREQ_RATIOS,r28 (p7) br.cond.sptk.few 1f mov r8=0 /* status = 0 */ movl r9 =0x100000064 /* proc_ratio (1/100) */ movl r10=0x100000100 /* bus_ratio<<32 (1/256) */ movl r11=0x100000064 /* itc_ratio<<32 (1/100) */ ;; 1: cmp.eq p6,p7=19,r28 /* PAL_RSE_INFO */ 1: cmp.eq p6,p7=PAL_RSE_INFO,r28 (p7) br.cond.sptk.few 1f mov r8=0 /* status = 0 */ mov r9=96 /* num phys stacked */ mov r10=0 /* hints */ mov r11=0 br.cond.sptk.few rp 1: cmp.eq p6,p7=1,r28 /* PAL_CACHE_FLUSH */ 1: cmp.eq p6,p7=PAL_CACHE_FLUSH,r28 /* PAL_CACHE_FLUSH */ (p7) br.cond.sptk.few 1f mov r9=ar.lc movl r8=524288 /* flush 512k million cache lines (16MB) */ Loading @@ -102,7 +107,7 @@ static: cmp.eq p6,p7=6,r28 /* PAL_PTCE_INFO */ mov ar.lc=r9 mov r8=r0 ;; 1: cmp.eq p6,p7=15,r28 /* PAL_PERF_MON_INFO */ 1: cmp.eq p6,p7=PAL_PERF_MON_INFO,r28 (p7) br.cond.sptk.few 1f mov r8=0 /* status = 0 */ movl r9 =0x08122f04 /* generic=4 width=47 retired=8 cycles=18 */ Loading Loading @@ -138,6 +143,20 @@ static: cmp.eq p6,p7=6,r28 /* PAL_PTCE_INFO */ st8 [r29]=r0,16 /* clear remaining bits */ st8 [r18]=r0,16 /* clear remaining bits */ ;; 1: cmp.eq p6,p7=PAL_VM_SUMMARY,r28 (p7) br.cond.sptk.few 1f mov r8=0 /* status = 0 */ movl r9=0x2044040020F1865 /* num_tc_levels=2, num_unique_tcs=4 */ /* max_itr_entry=64, max_dtr_entry=64 */ /* hash_tag_id=2, max_pkr=15 */ /* key_size=24, phys_add_size=50, vw=1 */ movl r10=0x183C /* rid_size=24, impl_va_msb=60 */ ;; 1: cmp.eq p6,p7=PAL_MEM_ATTRIB,r28 (p7) br.cond.sptk.few 1f mov r8=0 /* status = 0 */ mov r9=0x80|0x01 /* NatPage|WB */ ;; 1: br.cond.sptk.few rp stacked: br.ret.sptk.few rp Loading arch/ia64/kernel/palinfo.c +60 −55 Original line number Diff line number Diff line Loading @@ -307,9 +307,7 @@ vm_info(char *page) if ((status = ia64_pal_vm_summary(&vm_info_1, &vm_info_2)) !=0) { printk(KERN_ERR "ia64_pal_vm_summary=%ld\n", status); return 0; } } else { p += sprintf(p, "Physical Address Space : %d bits\n" Loading @@ -319,13 +317,14 @@ vm_info(char *page) "Hash Tag ID : 0x%x\n" "Size of RR.rid : %d\n", vm_info_1.pal_vm_info_1_s.phys_add_size, vm_info_2.pal_vm_info_2_s.impl_va_msb+1, vm_info_1.pal_vm_info_1_s.max_pkr+1, vm_info_1.pal_vm_info_1_s.key_size, vm_info_1.pal_vm_info_1_s.hash_tag_id, vm_info_2.pal_vm_info_2_s.impl_va_msb+1, vm_info_1.pal_vm_info_1_s.max_pkr+1, vm_info_1.pal_vm_info_1_s.key_size, vm_info_1.pal_vm_info_1_s.hash_tag_id, vm_info_2.pal_vm_info_2_s.rid_size); } if (ia64_pal_mem_attrib(&attrib) != 0) return 0; if (ia64_pal_mem_attrib(&attrib) == 0) { p += sprintf(p, "Supported memory attributes : "); sep = ""; for (i = 0; i < 8; i++) { Loading @@ -335,11 +334,11 @@ vm_info(char *page) } } p += sprintf(p, "\n"); } if ((status = ia64_pal_vm_page_size(&tr_pages, &vw_pages)) !=0) { printk(KERN_ERR "ia64_pal_vm_page_size=%ld\n", status); return 0; } } else { p += sprintf(p, "\nTLB walker : %simplemented\n" Loading @@ -356,19 +355,18 @@ vm_info(char *page) p += sprintf(p, "\nTLB purgeable page sizes : "); p = bitvector_process(p, vw_pages); } if ((status=ia64_get_ptce(&ptce)) != 0) { printk(KERN_ERR "ia64_get_ptce=%ld\n", status); return 0; } } else { p += sprintf(p, "\nPurge base address : 0x%016lx\n" "Purge outer loop count : %d\n" "Purge inner loop count : %d\n" "Purge outer loop stride : %d\n" "Purge inner loop stride : %d\n", ptce.base, ptce.count[0], ptce.count[1], ptce.stride[0], ptce.stride[1]); ptce.base, ptce.count[0], ptce.count[1], ptce.stride[0], ptce.stride[1]); p += sprintf(p, "TC Levels : %d\n" Loading @@ -392,19 +390,26 @@ vm_info(char *page) "\tAssociativity : %d\n" "\tNumber of entries : %d\n" "\tFlags : ", cache_types[j+tc_info.tc_unified], i+1, tc_info.tc_num_sets, tc_info.tc_associativity, tc_info.tc_num_entries); if (tc_info.tc_pf) p += sprintf(p, "PreferredPageSizeOptimized "); if (tc_info.tc_unified) p += sprintf(p, "Unified "); if (tc_info.tc_reduce_tr) p += sprintf(p, "TCReduction"); cache_types[j+tc_info.tc_unified], i+1, tc_info.tc_num_sets, tc_info.tc_associativity, tc_info.tc_num_entries); if (tc_info.tc_pf) p += sprintf(p, "PreferredPageSizeOptimized "); if (tc_info.tc_unified) p += sprintf(p, "Unified "); if (tc_info.tc_reduce_tr) p += sprintf(p, "TCReduction"); p += sprintf(p, "\n\tSupported page sizes: "); p = bitvector_process(p, tc_pages); /* when unified date (j=2) is enough */ if (tc_info.tc_unified) break; if (tc_info.tc_unified) break; } } } p += sprintf(p, "\n"); Loading Loading @@ -440,14 +445,14 @@ register_info(char *page) p += sprintf(p, "\n"); } if (ia64_pal_rse_info(&phys_stacked, &hints) != 0) return 0; if (ia64_pal_rse_info(&phys_stacked, &hints) == 0) { p += sprintf(p, "RSE stacked physical registers : %ld\n" "RSE load/store hints : %ld (%s)\n", phys_stacked, hints.ph_data, hints.ph_data < RSE_HINTS_COUNT ? rse_hints[hints.ph_data]: "(??)"); } if (ia64_pal_debug_info(&iregs, &dregs)) return 0; Loading arch/ia64/mm/init.c +12 −3 Original line number Diff line number Diff line Loading @@ -382,13 +382,22 @@ ia64_mmu_init (void *my_cpu_data) if (impl_va_bits < 51 || impl_va_bits > 61) panic("CPU has bogus IMPL_VA_MSB value of %lu!\n", impl_va_bits - 1); /* * mapped_space_bits - PAGE_SHIFT is the total number of ptes we need, * which must fit into "vmlpt_bits - pte_bits" slots. Second half of * the test makes sure that our mapped space doesn't overlap the * unimplemented hole in the middle of the region. */ if ((mapped_space_bits - PAGE_SHIFT > vmlpt_bits - pte_bits) || (mapped_space_bits > impl_va_bits - 1)) panic("Cannot build a big enough virtual-linear page table" " to cover mapped address space.\n" " Try using a smaller page size.\n"); /* place the VMLPT at the end of each page-table mapped region: */ pta = POW2(61) - POW2(vmlpt_bits); if (POW2(mapped_space_bits) >= pta) panic("mm/init: overlap between virtually mapped linear page table and " "mapped kernel space!"); /* * Set the (virtually mapped linear) page table address. Bit * 8 selects between the short and long format, bits 2-7 the Loading Loading
arch/ia64/hp/sim/boot/boot_head.S +25 −6 Original line number Diff line number Diff line Loading @@ -4,6 +4,7 @@ */ #include <asm/asmmacro.h> #include <asm/pal.h> .bss .align 16 Loading Loading @@ -49,7 +50,11 @@ GLOBAL_ENTRY(jmp_to_kernel) br.sptk.few b7 END(jmp_to_kernel) /* * r28 contains the index of the PAL function * r29--31 the args * Return values in ret0--3 (r8--11) */ GLOBAL_ENTRY(pal_emulator_static) mov r8=-1 mov r9=256 Loading @@ -62,7 +67,7 @@ GLOBAL_ENTRY(pal_emulator_static) cmp.gtu p6,p7=r9,r28 (p6) br.cond.sptk.few stacked ;; static: cmp.eq p6,p7=6,r28 /* PAL_PTCE_INFO */ static: cmp.eq p6,p7=PAL_PTCE_INFO,r28 (p7) br.cond.sptk.few 1f ;; mov r8=0 /* status = 0 */ Loading @@ -70,21 +75,21 @@ static: cmp.eq p6,p7=6,r28 /* PAL_PTCE_INFO */ movl r10=0x0000000200000003 /* count[0], count[1] */ movl r11=0x1000000000002000 /* stride[0], stride[1] */ br.cond.sptk.few rp 1: cmp.eq p6,p7=14,r28 /* PAL_FREQ_RATIOS */ 1: cmp.eq p6,p7=PAL_FREQ_RATIOS,r28 (p7) br.cond.sptk.few 1f mov r8=0 /* status = 0 */ movl r9 =0x100000064 /* proc_ratio (1/100) */ movl r10=0x100000100 /* bus_ratio<<32 (1/256) */ movl r11=0x100000064 /* itc_ratio<<32 (1/100) */ ;; 1: cmp.eq p6,p7=19,r28 /* PAL_RSE_INFO */ 1: cmp.eq p6,p7=PAL_RSE_INFO,r28 (p7) br.cond.sptk.few 1f mov r8=0 /* status = 0 */ mov r9=96 /* num phys stacked */ mov r10=0 /* hints */ mov r11=0 br.cond.sptk.few rp 1: cmp.eq p6,p7=1,r28 /* PAL_CACHE_FLUSH */ 1: cmp.eq p6,p7=PAL_CACHE_FLUSH,r28 /* PAL_CACHE_FLUSH */ (p7) br.cond.sptk.few 1f mov r9=ar.lc movl r8=524288 /* flush 512k million cache lines (16MB) */ Loading @@ -102,7 +107,7 @@ static: cmp.eq p6,p7=6,r28 /* PAL_PTCE_INFO */ mov ar.lc=r9 mov r8=r0 ;; 1: cmp.eq p6,p7=15,r28 /* PAL_PERF_MON_INFO */ 1: cmp.eq p6,p7=PAL_PERF_MON_INFO,r28 (p7) br.cond.sptk.few 1f mov r8=0 /* status = 0 */ movl r9 =0x08122f04 /* generic=4 width=47 retired=8 cycles=18 */ Loading Loading @@ -138,6 +143,20 @@ static: cmp.eq p6,p7=6,r28 /* PAL_PTCE_INFO */ st8 [r29]=r0,16 /* clear remaining bits */ st8 [r18]=r0,16 /* clear remaining bits */ ;; 1: cmp.eq p6,p7=PAL_VM_SUMMARY,r28 (p7) br.cond.sptk.few 1f mov r8=0 /* status = 0 */ movl r9=0x2044040020F1865 /* num_tc_levels=2, num_unique_tcs=4 */ /* max_itr_entry=64, max_dtr_entry=64 */ /* hash_tag_id=2, max_pkr=15 */ /* key_size=24, phys_add_size=50, vw=1 */ movl r10=0x183C /* rid_size=24, impl_va_msb=60 */ ;; 1: cmp.eq p6,p7=PAL_MEM_ATTRIB,r28 (p7) br.cond.sptk.few 1f mov r8=0 /* status = 0 */ mov r9=0x80|0x01 /* NatPage|WB */ ;; 1: br.cond.sptk.few rp stacked: br.ret.sptk.few rp Loading
arch/ia64/kernel/palinfo.c +60 −55 Original line number Diff line number Diff line Loading @@ -307,9 +307,7 @@ vm_info(char *page) if ((status = ia64_pal_vm_summary(&vm_info_1, &vm_info_2)) !=0) { printk(KERN_ERR "ia64_pal_vm_summary=%ld\n", status); return 0; } } else { p += sprintf(p, "Physical Address Space : %d bits\n" Loading @@ -319,13 +317,14 @@ vm_info(char *page) "Hash Tag ID : 0x%x\n" "Size of RR.rid : %d\n", vm_info_1.pal_vm_info_1_s.phys_add_size, vm_info_2.pal_vm_info_2_s.impl_va_msb+1, vm_info_1.pal_vm_info_1_s.max_pkr+1, vm_info_1.pal_vm_info_1_s.key_size, vm_info_1.pal_vm_info_1_s.hash_tag_id, vm_info_2.pal_vm_info_2_s.impl_va_msb+1, vm_info_1.pal_vm_info_1_s.max_pkr+1, vm_info_1.pal_vm_info_1_s.key_size, vm_info_1.pal_vm_info_1_s.hash_tag_id, vm_info_2.pal_vm_info_2_s.rid_size); } if (ia64_pal_mem_attrib(&attrib) != 0) return 0; if (ia64_pal_mem_attrib(&attrib) == 0) { p += sprintf(p, "Supported memory attributes : "); sep = ""; for (i = 0; i < 8; i++) { Loading @@ -335,11 +334,11 @@ vm_info(char *page) } } p += sprintf(p, "\n"); } if ((status = ia64_pal_vm_page_size(&tr_pages, &vw_pages)) !=0) { printk(KERN_ERR "ia64_pal_vm_page_size=%ld\n", status); return 0; } } else { p += sprintf(p, "\nTLB walker : %simplemented\n" Loading @@ -356,19 +355,18 @@ vm_info(char *page) p += sprintf(p, "\nTLB purgeable page sizes : "); p = bitvector_process(p, vw_pages); } if ((status=ia64_get_ptce(&ptce)) != 0) { printk(KERN_ERR "ia64_get_ptce=%ld\n", status); return 0; } } else { p += sprintf(p, "\nPurge base address : 0x%016lx\n" "Purge outer loop count : %d\n" "Purge inner loop count : %d\n" "Purge outer loop stride : %d\n" "Purge inner loop stride : %d\n", ptce.base, ptce.count[0], ptce.count[1], ptce.stride[0], ptce.stride[1]); ptce.base, ptce.count[0], ptce.count[1], ptce.stride[0], ptce.stride[1]); p += sprintf(p, "TC Levels : %d\n" Loading @@ -392,19 +390,26 @@ vm_info(char *page) "\tAssociativity : %d\n" "\tNumber of entries : %d\n" "\tFlags : ", cache_types[j+tc_info.tc_unified], i+1, tc_info.tc_num_sets, tc_info.tc_associativity, tc_info.tc_num_entries); if (tc_info.tc_pf) p += sprintf(p, "PreferredPageSizeOptimized "); if (tc_info.tc_unified) p += sprintf(p, "Unified "); if (tc_info.tc_reduce_tr) p += sprintf(p, "TCReduction"); cache_types[j+tc_info.tc_unified], i+1, tc_info.tc_num_sets, tc_info.tc_associativity, tc_info.tc_num_entries); if (tc_info.tc_pf) p += sprintf(p, "PreferredPageSizeOptimized "); if (tc_info.tc_unified) p += sprintf(p, "Unified "); if (tc_info.tc_reduce_tr) p += sprintf(p, "TCReduction"); p += sprintf(p, "\n\tSupported page sizes: "); p = bitvector_process(p, tc_pages); /* when unified date (j=2) is enough */ if (tc_info.tc_unified) break; if (tc_info.tc_unified) break; } } } p += sprintf(p, "\n"); Loading Loading @@ -440,14 +445,14 @@ register_info(char *page) p += sprintf(p, "\n"); } if (ia64_pal_rse_info(&phys_stacked, &hints) != 0) return 0; if (ia64_pal_rse_info(&phys_stacked, &hints) == 0) { p += sprintf(p, "RSE stacked physical registers : %ld\n" "RSE load/store hints : %ld (%s)\n", phys_stacked, hints.ph_data, hints.ph_data < RSE_HINTS_COUNT ? rse_hints[hints.ph_data]: "(??)"); } if (ia64_pal_debug_info(&iregs, &dregs)) return 0; Loading
arch/ia64/mm/init.c +12 −3 Original line number Diff line number Diff line Loading @@ -382,13 +382,22 @@ ia64_mmu_init (void *my_cpu_data) if (impl_va_bits < 51 || impl_va_bits > 61) panic("CPU has bogus IMPL_VA_MSB value of %lu!\n", impl_va_bits - 1); /* * mapped_space_bits - PAGE_SHIFT is the total number of ptes we need, * which must fit into "vmlpt_bits - pte_bits" slots. Second half of * the test makes sure that our mapped space doesn't overlap the * unimplemented hole in the middle of the region. */ if ((mapped_space_bits - PAGE_SHIFT > vmlpt_bits - pte_bits) || (mapped_space_bits > impl_va_bits - 1)) panic("Cannot build a big enough virtual-linear page table" " to cover mapped address space.\n" " Try using a smaller page size.\n"); /* place the VMLPT at the end of each page-table mapped region: */ pta = POW2(61) - POW2(vmlpt_bits); if (POW2(mapped_space_bits) >= pta) panic("mm/init: overlap between virtually mapped linear page table and " "mapped kernel space!"); /* * Set the (virtually mapped linear) page table address. Bit * 8 selects between the short and long format, bits 2-7 the Loading