Loading Documentation/kernel-parameters.txt +3 −0 Original line number Diff line number Diff line Loading @@ -1885,6 +1885,9 @@ bytes respectively. Such letter suffixes can also be entirely omitted. arch_perfmon: [X86] Force use of architectural perfmon on Intel CPUs instead of the CPU specific event set. timer: [X86] Force use of architectural NMI timer mode (see also oprofile.timer for generic hr timer mode) oops=panic Always panic on oopses. Default is to just kill the process, but there is a small probability of Loading arch/Kconfig +4 −0 Original line number Diff line number Diff line Loading @@ -30,6 +30,10 @@ config OPROFILE_EVENT_MULTIPLEX config HAVE_OPROFILE bool config OPROFILE_NMI_TIMER def_bool y depends on PERF_EVENTS && HAVE_PERF_EVENTS_NMI config KPROBES bool "Kprobes" depends on MODULES Loading arch/x86/oprofile/Makefile +1 −2 Original line number Diff line number Diff line Loading @@ -4,9 +4,8 @@ DRIVER_OBJS = $(addprefix ../../../drivers/oprofile/, \ oprof.o cpu_buffer.o buffer_sync.o \ event_buffer.o oprofile_files.o \ oprofilefs.o oprofile_stats.o \ timer_int.o ) timer_int.o nmi_timer_int.o ) oprofile-y := $(DRIVER_OBJS) init.o backtrace.o oprofile-$(CONFIG_X86_LOCAL_APIC) += nmi_int.o op_model_amd.o \ op_model_ppro.o op_model_p4.o oprofile-$(CONFIG_X86_IO_APIC) += nmi_timer_int.o arch/x86/oprofile/init.c +7 −18 Original line number Diff line number Diff line Loading @@ -16,34 +16,23 @@ * with the NMI mode driver. */ #ifdef CONFIG_X86_LOCAL_APIC extern int op_nmi_init(struct oprofile_operations *ops); extern int op_nmi_timer_init(struct oprofile_operations *ops); extern void op_nmi_exit(void); extern void x86_backtrace(struct pt_regs * const regs, unsigned int depth); #else static int op_nmi_init(struct oprofile_operations *ops) { return -ENODEV; } static void op_nmi_exit(void) { } #endif extern void x86_backtrace(struct pt_regs * const regs, unsigned int depth); int __init oprofile_arch_init(struct oprofile_operations *ops) { int ret; ret = -ENODEV; #ifdef CONFIG_X86_LOCAL_APIC ret = op_nmi_init(ops); #endif #ifdef CONFIG_X86_IO_APIC if (ret < 0) ret = op_nmi_timer_init(ops); #endif ops->backtrace = x86_backtrace; return ret; return op_nmi_init(ops); } void oprofile_arch_exit(void) { #ifdef CONFIG_X86_LOCAL_APIC op_nmi_exit(); #endif } arch/x86/oprofile/nmi_int.c +21 −6 Original line number Diff line number Diff line Loading @@ -595,24 +595,36 @@ static int __init p4_init(char **cpu_type) return 0; } static int force_arch_perfmon; static int force_cpu_type(const char *str, struct kernel_param *kp) enum __force_cpu_type { reserved = 0, /* do not force */ timer, arch_perfmon, }; static int force_cpu_type; static int set_cpu_type(const char *str, struct kernel_param *kp) { if (!strcmp(str, "arch_perfmon")) { force_arch_perfmon = 1; if (!strcmp(str, "timer")) { force_cpu_type = timer; printk(KERN_INFO "oprofile: forcing NMI timer mode\n"); } else if (!strcmp(str, "arch_perfmon")) { force_cpu_type = arch_perfmon; printk(KERN_INFO "oprofile: forcing architectural perfmon\n"); } else { force_cpu_type = 0; } return 0; } module_param_call(cpu_type, force_cpu_type, NULL, NULL, 0); module_param_call(cpu_type, set_cpu_type, NULL, NULL, 0); static int __init ppro_init(char **cpu_type) { __u8 cpu_model = boot_cpu_data.x86_model; struct op_x86_model_spec *spec = &op_ppro_spec; /* default */ if (force_arch_perfmon && cpu_has_arch_perfmon) if (force_cpu_type == arch_perfmon && cpu_has_arch_perfmon) return 0; /* Loading Loading @@ -679,6 +691,9 @@ int __init op_nmi_init(struct oprofile_operations *ops) if (!cpu_has_apic) return -ENODEV; if (force_cpu_type == timer) return -ENODEV; switch (vendor) { case X86_VENDOR_AMD: /* Needs to be at least an Athlon (or hammer in 32bit mode) */ Loading Loading
Documentation/kernel-parameters.txt +3 −0 Original line number Diff line number Diff line Loading @@ -1885,6 +1885,9 @@ bytes respectively. Such letter suffixes can also be entirely omitted. arch_perfmon: [X86] Force use of architectural perfmon on Intel CPUs instead of the CPU specific event set. timer: [X86] Force use of architectural NMI timer mode (see also oprofile.timer for generic hr timer mode) oops=panic Always panic on oopses. Default is to just kill the process, but there is a small probability of Loading
arch/Kconfig +4 −0 Original line number Diff line number Diff line Loading @@ -30,6 +30,10 @@ config OPROFILE_EVENT_MULTIPLEX config HAVE_OPROFILE bool config OPROFILE_NMI_TIMER def_bool y depends on PERF_EVENTS && HAVE_PERF_EVENTS_NMI config KPROBES bool "Kprobes" depends on MODULES Loading
arch/x86/oprofile/Makefile +1 −2 Original line number Diff line number Diff line Loading @@ -4,9 +4,8 @@ DRIVER_OBJS = $(addprefix ../../../drivers/oprofile/, \ oprof.o cpu_buffer.o buffer_sync.o \ event_buffer.o oprofile_files.o \ oprofilefs.o oprofile_stats.o \ timer_int.o ) timer_int.o nmi_timer_int.o ) oprofile-y := $(DRIVER_OBJS) init.o backtrace.o oprofile-$(CONFIG_X86_LOCAL_APIC) += nmi_int.o op_model_amd.o \ op_model_ppro.o op_model_p4.o oprofile-$(CONFIG_X86_IO_APIC) += nmi_timer_int.o
arch/x86/oprofile/init.c +7 −18 Original line number Diff line number Diff line Loading @@ -16,34 +16,23 @@ * with the NMI mode driver. */ #ifdef CONFIG_X86_LOCAL_APIC extern int op_nmi_init(struct oprofile_operations *ops); extern int op_nmi_timer_init(struct oprofile_operations *ops); extern void op_nmi_exit(void); extern void x86_backtrace(struct pt_regs * const regs, unsigned int depth); #else static int op_nmi_init(struct oprofile_operations *ops) { return -ENODEV; } static void op_nmi_exit(void) { } #endif extern void x86_backtrace(struct pt_regs * const regs, unsigned int depth); int __init oprofile_arch_init(struct oprofile_operations *ops) { int ret; ret = -ENODEV; #ifdef CONFIG_X86_LOCAL_APIC ret = op_nmi_init(ops); #endif #ifdef CONFIG_X86_IO_APIC if (ret < 0) ret = op_nmi_timer_init(ops); #endif ops->backtrace = x86_backtrace; return ret; return op_nmi_init(ops); } void oprofile_arch_exit(void) { #ifdef CONFIG_X86_LOCAL_APIC op_nmi_exit(); #endif }
arch/x86/oprofile/nmi_int.c +21 −6 Original line number Diff line number Diff line Loading @@ -595,24 +595,36 @@ static int __init p4_init(char **cpu_type) return 0; } static int force_arch_perfmon; static int force_cpu_type(const char *str, struct kernel_param *kp) enum __force_cpu_type { reserved = 0, /* do not force */ timer, arch_perfmon, }; static int force_cpu_type; static int set_cpu_type(const char *str, struct kernel_param *kp) { if (!strcmp(str, "arch_perfmon")) { force_arch_perfmon = 1; if (!strcmp(str, "timer")) { force_cpu_type = timer; printk(KERN_INFO "oprofile: forcing NMI timer mode\n"); } else if (!strcmp(str, "arch_perfmon")) { force_cpu_type = arch_perfmon; printk(KERN_INFO "oprofile: forcing architectural perfmon\n"); } else { force_cpu_type = 0; } return 0; } module_param_call(cpu_type, force_cpu_type, NULL, NULL, 0); module_param_call(cpu_type, set_cpu_type, NULL, NULL, 0); static int __init ppro_init(char **cpu_type) { __u8 cpu_model = boot_cpu_data.x86_model; struct op_x86_model_spec *spec = &op_ppro_spec; /* default */ if (force_arch_perfmon && cpu_has_arch_perfmon) if (force_cpu_type == arch_perfmon && cpu_has_arch_perfmon) return 0; /* Loading Loading @@ -679,6 +691,9 @@ int __init op_nmi_init(struct oprofile_operations *ops) if (!cpu_has_apic) return -ENODEV; if (force_cpu_type == timer) return -ENODEV; switch (vendor) { case X86_VENDOR_AMD: /* Needs to be at least an Athlon (or hammer in 32bit mode) */ Loading