Loading arch/x86/include/asm/elf.h +1 −1 Original line number Diff line number Diff line Loading @@ -256,7 +256,7 @@ extern int force_personality32; instruction set this CPU supports. This could be done in user space, but it's not easy, and we've already done it here. */ #define ELF_HWCAP (boot_cpu_data.x86_capability[0]) #define ELF_HWCAP (boot_cpu_data.x86_capability[CPUID_1_EDX]) /* This yields a string that ld.so will use to load implementation specific libraries for optimization. This is more specific in Loading arch/x86/kernel/cpu/amd.c +10 −13 Original line number Diff line number Diff line Loading @@ -117,7 +117,7 @@ static void init_amd_k6(struct cpuinfo_x86 *c) void (*f_vide)(void); u64 d, d2; printk(KERN_INFO "AMD K6 stepping B detected - "); pr_info("AMD K6 stepping B detected - "); /* * It looks like AMD fixed the 2.6.2 bug and improved indirect Loading @@ -133,10 +133,9 @@ static void init_amd_k6(struct cpuinfo_x86 *c) d = d2-d; if (d > 20*K6_BUG_LOOP) printk(KERN_CONT "system stability may be impaired when more than 32 MB are used.\n"); pr_cont("system stability may be impaired when more than 32 MB are used.\n"); else printk(KERN_CONT "probably OK (after B9730xxxx).\n"); pr_cont("probably OK (after B9730xxxx).\n"); } /* K6 with old style WHCR */ Loading @@ -154,7 +153,7 @@ static void init_amd_k6(struct cpuinfo_x86 *c) wbinvd(); wrmsr(MSR_K6_WHCR, l, h); local_irq_restore(flags); printk(KERN_INFO "Enabling old style K6 write allocation for %d Mb\n", pr_info("Enabling old style K6 write allocation for %d Mb\n", mbytes); } return; Loading @@ -175,7 +174,7 @@ static void init_amd_k6(struct cpuinfo_x86 *c) wbinvd(); wrmsr(MSR_K6_WHCR, l, h); local_irq_restore(flags); printk(KERN_INFO "Enabling new style K6 write allocation for %d Mb\n", pr_info("Enabling new style K6 write allocation for %d Mb\n", mbytes); } Loading @@ -202,7 +201,7 @@ static void init_amd_k7(struct cpuinfo_x86 *c) */ if (c->x86_model >= 6 && c->x86_model <= 10) { if (!cpu_has(c, X86_FEATURE_XMM)) { printk(KERN_INFO "Enabling disabled K7/SSE Support.\n"); pr_info("Enabling disabled K7/SSE Support.\n"); msr_clear_bit(MSR_K7_HWCR, 15); set_cpu_cap(c, X86_FEATURE_XMM); } Loading @@ -216,8 +215,7 @@ static void init_amd_k7(struct cpuinfo_x86 *c) if ((c->x86_model == 8 && c->x86_mask >= 1) || (c->x86_model > 8)) { rdmsr(MSR_K7_CLK_CTL, l, h); if ((l & 0xfff00000) != 0x20000000) { printk(KERN_INFO "CPU: CLK_CTL MSR was %x. Reprogramming to %x\n", pr_info("CPU: CLK_CTL MSR was %x. Reprogramming to %x\n", l, ((l & 0x000fffff)|0x20000000)); wrmsr(MSR_K7_CLK_CTL, (l & 0x000fffff)|0x20000000, h); } Loading Loading @@ -485,7 +483,7 @@ static void bsp_init_amd(struct cpuinfo_x86 *c) if (!rdmsrl_safe(MSR_K8_TSEG_ADDR, &tseg)) { unsigned long pfn = tseg >> PAGE_SHIFT; printk(KERN_DEBUG "tseg: %010llx\n", tseg); pr_debug("tseg: %010llx\n", tseg); if (pfn_range_is_mapped(pfn, pfn + 1)) set_memory_4k((unsigned long)__va(tseg), 1); } Loading @@ -500,8 +498,7 @@ static void bsp_init_amd(struct cpuinfo_x86 *c) rdmsrl(MSR_K7_HWCR, val); if (!(val & BIT(24))) printk(KERN_WARNING FW_BUG "TSC doesn't count " "with P0 frequency!\n"); pr_warn(FW_BUG "TSC doesn't count with P0 frequency!\n"); } } Loading arch/x86/kernel/cpu/bugs_64.c +1 −1 Original line number Diff line number Diff line Loading @@ -15,7 +15,7 @@ void __init check_bugs(void) { identify_boot_cpu(); #if !defined(CONFIG_SMP) printk(KERN_INFO "CPU: "); pr_info("CPU: "); print_cpu_info(&boot_cpu_data); #endif alternative_instructions(); Loading arch/x86/kernel/cpu/centaur.c +5 −5 Original line number Diff line number Diff line Loading @@ -29,7 +29,7 @@ static void init_c3(struct cpuinfo_x86 *c) rdmsr(MSR_VIA_FCR, lo, hi); lo |= ACE_FCR; /* enable ACE unit */ wrmsr(MSR_VIA_FCR, lo, hi); printk(KERN_INFO "CPU: Enabled ACE h/w crypto\n"); pr_info("CPU: Enabled ACE h/w crypto\n"); } /* enable RNG unit, if present and disabled */ Loading @@ -37,7 +37,7 @@ static void init_c3(struct cpuinfo_x86 *c) rdmsr(MSR_VIA_RNG, lo, hi); lo |= RNG_ENABLE; /* enable RNG unit */ wrmsr(MSR_VIA_RNG, lo, hi); printk(KERN_INFO "CPU: Enabled h/w RNG\n"); pr_info("CPU: Enabled h/w RNG\n"); } /* store Centaur Extended Feature Flags as Loading Loading @@ -130,7 +130,7 @@ static void init_centaur(struct cpuinfo_x86 *c) name = "C6"; fcr_set = ECX8|DSMC|EDCTLB|EMMX|ERETSTK; fcr_clr = DPDC; printk(KERN_NOTICE "Disabling bugged TSC.\n"); pr_notice("Disabling bugged TSC.\n"); clear_cpu_cap(c, X86_FEATURE_TSC); break; case 8: Loading Loading @@ -163,11 +163,11 @@ static void init_centaur(struct cpuinfo_x86 *c) newlo = (lo|fcr_set) & (~fcr_clr); if (newlo != lo) { printk(KERN_INFO "Centaur FCR was 0x%X now 0x%X\n", pr_info("Centaur FCR was 0x%X now 0x%X\n", lo, newlo); wrmsr(MSR_IDT_FCR1, newlo, hi); } else { printk(KERN_INFO "Centaur FCR is 0x%X\n", lo); pr_info("Centaur FCR is 0x%X\n", lo); } /* Emulate MTRRs using Centaur's MCR. */ set_cpu_cap(c, X86_FEATURE_CENTAUR_MCR); Loading arch/x86/kernel/cpu/common.c +20 −22 Original line number Diff line number Diff line Loading @@ -228,7 +228,7 @@ static void squash_the_stupid_serial_number(struct cpuinfo_x86 *c) lo |= 0x200000; wrmsr(MSR_IA32_BBL_CR_CTL, lo, hi); printk(KERN_NOTICE "CPU serial number disabled.\n"); pr_notice("CPU serial number disabled.\n"); clear_cpu_cap(c, X86_FEATURE_PN); /* Disabling the serial number may affect the cpuid level */ Loading Loading @@ -329,8 +329,7 @@ static void filter_cpuid_features(struct cpuinfo_x86 *c, bool warn) if (!warn) continue; printk(KERN_WARNING "CPU: CPU feature " X86_CAP_FMT " disabled, no CPUID level 0x%x\n", pr_warn("CPU: CPU feature " X86_CAP_FMT " disabled, no CPUID level 0x%x\n", x86_cap_flag(df->feature), df->level); } } Loading Loading @@ -510,7 +509,7 @@ void detect_ht(struct cpuinfo_x86 *c) smp_num_siblings = (ebx & 0xff0000) >> 16; if (smp_num_siblings == 1) { printk_once(KERN_INFO "CPU0: Hyper-Threading is disabled\n"); pr_info_once("CPU0: Hyper-Threading is disabled\n"); goto out; } Loading @@ -531,9 +530,9 @@ void detect_ht(struct cpuinfo_x86 *c) out: if (!printed && (c->x86_max_cores * smp_num_siblings) > 1) { printk(KERN_INFO "CPU: Physical Processor ID: %d\n", pr_info("CPU: Physical Processor ID: %d\n", c->phys_proc_id); printk(KERN_INFO "CPU: Processor Core ID: %d\n", pr_info("CPU: Processor Core ID: %d\n", c->cpu_core_id); printed = 1; } Loading @@ -559,8 +558,7 @@ static void get_cpu_vendor(struct cpuinfo_x86 *c) } } printk_once(KERN_ERR "CPU: vendor_id '%s' unknown, using generic init.\n" \ pr_err_once("CPU: vendor_id '%s' unknown, using generic init.\n" \ "CPU: Your system may be unstable.\n", v); c->x86_vendor = X86_VENDOR_UNKNOWN; Loading Loading @@ -760,7 +758,7 @@ void __init early_cpu_init(void) int count = 0; #ifdef CONFIG_PROCESSOR_SELECT printk(KERN_INFO "KERNEL supported cpus:\n"); pr_info("KERNEL supported cpus:\n"); #endif for (cdev = __x86_cpu_dev_start; cdev < __x86_cpu_dev_end; cdev++) { Loading @@ -778,7 +776,7 @@ void __init early_cpu_init(void) for (j = 0; j < 2; j++) { if (!cpudev->c_ident[j]) continue; printk(KERN_INFO " %s %s\n", cpudev->c_vendor, pr_info(" %s %s\n", cpudev->c_vendor, cpudev->c_ident[j]); } } Loading Loading @@ -1061,7 +1059,7 @@ static void __print_cpu_msr(void) for (index = index_min; index < index_max; index++) { if (rdmsrl_safe(index, &val)) continue; printk(KERN_INFO " MSR%08x: %016llx\n", index, val); pr_info(" MSR%08x: %016llx\n", index, val); } } } Loading Loading @@ -1100,19 +1098,19 @@ void print_cpu_info(struct cpuinfo_x86 *c) } if (vendor && !strstr(c->x86_model_id, vendor)) printk(KERN_CONT "%s ", vendor); pr_cont("%s ", vendor); if (c->x86_model_id[0]) printk(KERN_CONT "%s", c->x86_model_id); pr_cont("%s", c->x86_model_id); else printk(KERN_CONT "%d86", c->x86); pr_cont("%d86", c->x86); printk(KERN_CONT " (family: 0x%x, model: 0x%x", c->x86, c->x86_model); pr_cont(" (family: 0x%x, model: 0x%x", c->x86, c->x86_model); if (c->x86_mask || c->cpuid_level >= 0) printk(KERN_CONT ", stepping: 0x%x)\n", c->x86_mask); pr_cont(", stepping: 0x%x)\n", c->x86_mask); else printk(KERN_CONT ")\n"); pr_cont(")\n"); print_cpu_msr(c); } Loading Loading @@ -1438,7 +1436,7 @@ void cpu_init(void) show_ucode_info_early(); printk(KERN_INFO "Initializing CPU#%d\n", cpu); pr_info("Initializing CPU#%d\n", cpu); if (cpu_feature_enabled(X86_FEATURE_VME) || cpu_has_tsc || Loading Loading
arch/x86/include/asm/elf.h +1 −1 Original line number Diff line number Diff line Loading @@ -256,7 +256,7 @@ extern int force_personality32; instruction set this CPU supports. This could be done in user space, but it's not easy, and we've already done it here. */ #define ELF_HWCAP (boot_cpu_data.x86_capability[0]) #define ELF_HWCAP (boot_cpu_data.x86_capability[CPUID_1_EDX]) /* This yields a string that ld.so will use to load implementation specific libraries for optimization. This is more specific in Loading
arch/x86/kernel/cpu/amd.c +10 −13 Original line number Diff line number Diff line Loading @@ -117,7 +117,7 @@ static void init_amd_k6(struct cpuinfo_x86 *c) void (*f_vide)(void); u64 d, d2; printk(KERN_INFO "AMD K6 stepping B detected - "); pr_info("AMD K6 stepping B detected - "); /* * It looks like AMD fixed the 2.6.2 bug and improved indirect Loading @@ -133,10 +133,9 @@ static void init_amd_k6(struct cpuinfo_x86 *c) d = d2-d; if (d > 20*K6_BUG_LOOP) printk(KERN_CONT "system stability may be impaired when more than 32 MB are used.\n"); pr_cont("system stability may be impaired when more than 32 MB are used.\n"); else printk(KERN_CONT "probably OK (after B9730xxxx).\n"); pr_cont("probably OK (after B9730xxxx).\n"); } /* K6 with old style WHCR */ Loading @@ -154,7 +153,7 @@ static void init_amd_k6(struct cpuinfo_x86 *c) wbinvd(); wrmsr(MSR_K6_WHCR, l, h); local_irq_restore(flags); printk(KERN_INFO "Enabling old style K6 write allocation for %d Mb\n", pr_info("Enabling old style K6 write allocation for %d Mb\n", mbytes); } return; Loading @@ -175,7 +174,7 @@ static void init_amd_k6(struct cpuinfo_x86 *c) wbinvd(); wrmsr(MSR_K6_WHCR, l, h); local_irq_restore(flags); printk(KERN_INFO "Enabling new style K6 write allocation for %d Mb\n", pr_info("Enabling new style K6 write allocation for %d Mb\n", mbytes); } Loading @@ -202,7 +201,7 @@ static void init_amd_k7(struct cpuinfo_x86 *c) */ if (c->x86_model >= 6 && c->x86_model <= 10) { if (!cpu_has(c, X86_FEATURE_XMM)) { printk(KERN_INFO "Enabling disabled K7/SSE Support.\n"); pr_info("Enabling disabled K7/SSE Support.\n"); msr_clear_bit(MSR_K7_HWCR, 15); set_cpu_cap(c, X86_FEATURE_XMM); } Loading @@ -216,8 +215,7 @@ static void init_amd_k7(struct cpuinfo_x86 *c) if ((c->x86_model == 8 && c->x86_mask >= 1) || (c->x86_model > 8)) { rdmsr(MSR_K7_CLK_CTL, l, h); if ((l & 0xfff00000) != 0x20000000) { printk(KERN_INFO "CPU: CLK_CTL MSR was %x. Reprogramming to %x\n", pr_info("CPU: CLK_CTL MSR was %x. Reprogramming to %x\n", l, ((l & 0x000fffff)|0x20000000)); wrmsr(MSR_K7_CLK_CTL, (l & 0x000fffff)|0x20000000, h); } Loading Loading @@ -485,7 +483,7 @@ static void bsp_init_amd(struct cpuinfo_x86 *c) if (!rdmsrl_safe(MSR_K8_TSEG_ADDR, &tseg)) { unsigned long pfn = tseg >> PAGE_SHIFT; printk(KERN_DEBUG "tseg: %010llx\n", tseg); pr_debug("tseg: %010llx\n", tseg); if (pfn_range_is_mapped(pfn, pfn + 1)) set_memory_4k((unsigned long)__va(tseg), 1); } Loading @@ -500,8 +498,7 @@ static void bsp_init_amd(struct cpuinfo_x86 *c) rdmsrl(MSR_K7_HWCR, val); if (!(val & BIT(24))) printk(KERN_WARNING FW_BUG "TSC doesn't count " "with P0 frequency!\n"); pr_warn(FW_BUG "TSC doesn't count with P0 frequency!\n"); } } Loading
arch/x86/kernel/cpu/bugs_64.c +1 −1 Original line number Diff line number Diff line Loading @@ -15,7 +15,7 @@ void __init check_bugs(void) { identify_boot_cpu(); #if !defined(CONFIG_SMP) printk(KERN_INFO "CPU: "); pr_info("CPU: "); print_cpu_info(&boot_cpu_data); #endif alternative_instructions(); Loading
arch/x86/kernel/cpu/centaur.c +5 −5 Original line number Diff line number Diff line Loading @@ -29,7 +29,7 @@ static void init_c3(struct cpuinfo_x86 *c) rdmsr(MSR_VIA_FCR, lo, hi); lo |= ACE_FCR; /* enable ACE unit */ wrmsr(MSR_VIA_FCR, lo, hi); printk(KERN_INFO "CPU: Enabled ACE h/w crypto\n"); pr_info("CPU: Enabled ACE h/w crypto\n"); } /* enable RNG unit, if present and disabled */ Loading @@ -37,7 +37,7 @@ static void init_c3(struct cpuinfo_x86 *c) rdmsr(MSR_VIA_RNG, lo, hi); lo |= RNG_ENABLE; /* enable RNG unit */ wrmsr(MSR_VIA_RNG, lo, hi); printk(KERN_INFO "CPU: Enabled h/w RNG\n"); pr_info("CPU: Enabled h/w RNG\n"); } /* store Centaur Extended Feature Flags as Loading Loading @@ -130,7 +130,7 @@ static void init_centaur(struct cpuinfo_x86 *c) name = "C6"; fcr_set = ECX8|DSMC|EDCTLB|EMMX|ERETSTK; fcr_clr = DPDC; printk(KERN_NOTICE "Disabling bugged TSC.\n"); pr_notice("Disabling bugged TSC.\n"); clear_cpu_cap(c, X86_FEATURE_TSC); break; case 8: Loading Loading @@ -163,11 +163,11 @@ static void init_centaur(struct cpuinfo_x86 *c) newlo = (lo|fcr_set) & (~fcr_clr); if (newlo != lo) { printk(KERN_INFO "Centaur FCR was 0x%X now 0x%X\n", pr_info("Centaur FCR was 0x%X now 0x%X\n", lo, newlo); wrmsr(MSR_IDT_FCR1, newlo, hi); } else { printk(KERN_INFO "Centaur FCR is 0x%X\n", lo); pr_info("Centaur FCR is 0x%X\n", lo); } /* Emulate MTRRs using Centaur's MCR. */ set_cpu_cap(c, X86_FEATURE_CENTAUR_MCR); Loading
arch/x86/kernel/cpu/common.c +20 −22 Original line number Diff line number Diff line Loading @@ -228,7 +228,7 @@ static void squash_the_stupid_serial_number(struct cpuinfo_x86 *c) lo |= 0x200000; wrmsr(MSR_IA32_BBL_CR_CTL, lo, hi); printk(KERN_NOTICE "CPU serial number disabled.\n"); pr_notice("CPU serial number disabled.\n"); clear_cpu_cap(c, X86_FEATURE_PN); /* Disabling the serial number may affect the cpuid level */ Loading Loading @@ -329,8 +329,7 @@ static void filter_cpuid_features(struct cpuinfo_x86 *c, bool warn) if (!warn) continue; printk(KERN_WARNING "CPU: CPU feature " X86_CAP_FMT " disabled, no CPUID level 0x%x\n", pr_warn("CPU: CPU feature " X86_CAP_FMT " disabled, no CPUID level 0x%x\n", x86_cap_flag(df->feature), df->level); } } Loading Loading @@ -510,7 +509,7 @@ void detect_ht(struct cpuinfo_x86 *c) smp_num_siblings = (ebx & 0xff0000) >> 16; if (smp_num_siblings == 1) { printk_once(KERN_INFO "CPU0: Hyper-Threading is disabled\n"); pr_info_once("CPU0: Hyper-Threading is disabled\n"); goto out; } Loading @@ -531,9 +530,9 @@ void detect_ht(struct cpuinfo_x86 *c) out: if (!printed && (c->x86_max_cores * smp_num_siblings) > 1) { printk(KERN_INFO "CPU: Physical Processor ID: %d\n", pr_info("CPU: Physical Processor ID: %d\n", c->phys_proc_id); printk(KERN_INFO "CPU: Processor Core ID: %d\n", pr_info("CPU: Processor Core ID: %d\n", c->cpu_core_id); printed = 1; } Loading @@ -559,8 +558,7 @@ static void get_cpu_vendor(struct cpuinfo_x86 *c) } } printk_once(KERN_ERR "CPU: vendor_id '%s' unknown, using generic init.\n" \ pr_err_once("CPU: vendor_id '%s' unknown, using generic init.\n" \ "CPU: Your system may be unstable.\n", v); c->x86_vendor = X86_VENDOR_UNKNOWN; Loading Loading @@ -760,7 +758,7 @@ void __init early_cpu_init(void) int count = 0; #ifdef CONFIG_PROCESSOR_SELECT printk(KERN_INFO "KERNEL supported cpus:\n"); pr_info("KERNEL supported cpus:\n"); #endif for (cdev = __x86_cpu_dev_start; cdev < __x86_cpu_dev_end; cdev++) { Loading @@ -778,7 +776,7 @@ void __init early_cpu_init(void) for (j = 0; j < 2; j++) { if (!cpudev->c_ident[j]) continue; printk(KERN_INFO " %s %s\n", cpudev->c_vendor, pr_info(" %s %s\n", cpudev->c_vendor, cpudev->c_ident[j]); } } Loading Loading @@ -1061,7 +1059,7 @@ static void __print_cpu_msr(void) for (index = index_min; index < index_max; index++) { if (rdmsrl_safe(index, &val)) continue; printk(KERN_INFO " MSR%08x: %016llx\n", index, val); pr_info(" MSR%08x: %016llx\n", index, val); } } } Loading Loading @@ -1100,19 +1098,19 @@ void print_cpu_info(struct cpuinfo_x86 *c) } if (vendor && !strstr(c->x86_model_id, vendor)) printk(KERN_CONT "%s ", vendor); pr_cont("%s ", vendor); if (c->x86_model_id[0]) printk(KERN_CONT "%s", c->x86_model_id); pr_cont("%s", c->x86_model_id); else printk(KERN_CONT "%d86", c->x86); pr_cont("%d86", c->x86); printk(KERN_CONT " (family: 0x%x, model: 0x%x", c->x86, c->x86_model); pr_cont(" (family: 0x%x, model: 0x%x", c->x86, c->x86_model); if (c->x86_mask || c->cpuid_level >= 0) printk(KERN_CONT ", stepping: 0x%x)\n", c->x86_mask); pr_cont(", stepping: 0x%x)\n", c->x86_mask); else printk(KERN_CONT ")\n"); pr_cont(")\n"); print_cpu_msr(c); } Loading Loading @@ -1438,7 +1436,7 @@ void cpu_init(void) show_ucode_info_early(); printk(KERN_INFO "Initializing CPU#%d\n", cpu); pr_info("Initializing CPU#%d\n", cpu); if (cpu_feature_enabled(X86_FEATURE_VME) || cpu_has_tsc || Loading