Loading arch/x86/include/asm/paravirt.h +5 −5 Original line number Diff line number Diff line Loading @@ -824,27 +824,27 @@ static __always_inline void arch_spin_unlock(struct arch_spinlock *lock) #define __PV_IS_CALLEE_SAVE(func) \ ((struct paravirt_callee_save) { func }) static inline unsigned long arch_local_save_flags(void) static inline notrace unsigned long arch_local_save_flags(void) { return PVOP_CALLEE0(unsigned long, pv_irq_ops.save_fl); } static inline void arch_local_irq_restore(unsigned long f) static inline notrace void arch_local_irq_restore(unsigned long f) { PVOP_VCALLEE1(pv_irq_ops.restore_fl, f); } static inline void arch_local_irq_disable(void) static inline notrace void arch_local_irq_disable(void) { PVOP_VCALLEE0(pv_irq_ops.irq_disable); } static inline void arch_local_irq_enable(void) static inline notrace void arch_local_irq_enable(void) { PVOP_VCALLEE0(pv_irq_ops.irq_enable); } static inline unsigned long arch_local_irq_save(void) static inline notrace unsigned long arch_local_irq_save(void) { unsigned long f; Loading kernel/module.c +12 −0 Original line number Diff line number Diff line Loading @@ -2326,6 +2326,18 @@ static void find_module_sections(struct module *mod, struct load_info *info) kmemleak_scan_area(mod->trace_events, sizeof(*mod->trace_events) * mod->num_trace_events, GFP_KERNEL); #endif #ifdef CONFIG_TRACING mod->trace_bprintk_fmt_start = section_objs(info, "__trace_printk_fmt", sizeof(*mod->trace_bprintk_fmt_start), &mod->num_trace_bprintk_fmt); /* * This section contains pointers to allocated objects in the trace * code and not scanning it leads to false positives. */ kmemleak_scan_area(mod->trace_bprintk_fmt_start, sizeof(*mod->trace_bprintk_fmt_start) * mod->num_trace_bprintk_fmt, GFP_KERNEL); #endif #ifdef CONFIG_FTRACE_MCOUNT_RECORD /* sechdrs[0].sh_size is always zero */ mod->ftrace_callsites = section_objs(info, "__mcount_loc", Loading kernel/trace/trace.c +19 −0 Original line number Diff line number Diff line Loading @@ -1284,6 +1284,8 @@ void trace_dump_stack(void) __ftrace_trace_stack(global_trace.buffer, flags, 3, preempt_count()); } static DEFINE_PER_CPU(int, user_stack_count); void ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc) { Loading @@ -1302,6 +1304,18 @@ ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc) if (unlikely(in_nmi())) return; /* * prevent recursion, since the user stack tracing may * trigger other kernel events. */ preempt_disable(); if (__this_cpu_read(user_stack_count)) goto out; __this_cpu_inc(user_stack_count); event = trace_buffer_lock_reserve(buffer, TRACE_USER_STACK, sizeof(*entry), flags, pc); if (!event) Loading @@ -1319,6 +1333,11 @@ ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc) save_stack_trace_user(&trace); if (!filter_check_discard(call, entry, buffer, event)) ring_buffer_unlock_commit(buffer, event); __this_cpu_dec(user_stack_count); out: preempt_enable(); } #ifdef UNUSED Loading Loading
arch/x86/include/asm/paravirt.h +5 −5 Original line number Diff line number Diff line Loading @@ -824,27 +824,27 @@ static __always_inline void arch_spin_unlock(struct arch_spinlock *lock) #define __PV_IS_CALLEE_SAVE(func) \ ((struct paravirt_callee_save) { func }) static inline unsigned long arch_local_save_flags(void) static inline notrace unsigned long arch_local_save_flags(void) { return PVOP_CALLEE0(unsigned long, pv_irq_ops.save_fl); } static inline void arch_local_irq_restore(unsigned long f) static inline notrace void arch_local_irq_restore(unsigned long f) { PVOP_VCALLEE1(pv_irq_ops.restore_fl, f); } static inline void arch_local_irq_disable(void) static inline notrace void arch_local_irq_disable(void) { PVOP_VCALLEE0(pv_irq_ops.irq_disable); } static inline void arch_local_irq_enable(void) static inline notrace void arch_local_irq_enable(void) { PVOP_VCALLEE0(pv_irq_ops.irq_enable); } static inline unsigned long arch_local_irq_save(void) static inline notrace unsigned long arch_local_irq_save(void) { unsigned long f; Loading
kernel/module.c +12 −0 Original line number Diff line number Diff line Loading @@ -2326,6 +2326,18 @@ static void find_module_sections(struct module *mod, struct load_info *info) kmemleak_scan_area(mod->trace_events, sizeof(*mod->trace_events) * mod->num_trace_events, GFP_KERNEL); #endif #ifdef CONFIG_TRACING mod->trace_bprintk_fmt_start = section_objs(info, "__trace_printk_fmt", sizeof(*mod->trace_bprintk_fmt_start), &mod->num_trace_bprintk_fmt); /* * This section contains pointers to allocated objects in the trace * code and not scanning it leads to false positives. */ kmemleak_scan_area(mod->trace_bprintk_fmt_start, sizeof(*mod->trace_bprintk_fmt_start) * mod->num_trace_bprintk_fmt, GFP_KERNEL); #endif #ifdef CONFIG_FTRACE_MCOUNT_RECORD /* sechdrs[0].sh_size is always zero */ mod->ftrace_callsites = section_objs(info, "__mcount_loc", Loading
kernel/trace/trace.c +19 −0 Original line number Diff line number Diff line Loading @@ -1284,6 +1284,8 @@ void trace_dump_stack(void) __ftrace_trace_stack(global_trace.buffer, flags, 3, preempt_count()); } static DEFINE_PER_CPU(int, user_stack_count); void ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc) { Loading @@ -1302,6 +1304,18 @@ ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc) if (unlikely(in_nmi())) return; /* * prevent recursion, since the user stack tracing may * trigger other kernel events. */ preempt_disable(); if (__this_cpu_read(user_stack_count)) goto out; __this_cpu_inc(user_stack_count); event = trace_buffer_lock_reserve(buffer, TRACE_USER_STACK, sizeof(*entry), flags, pc); if (!event) Loading @@ -1319,6 +1333,11 @@ ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc) save_stack_trace_user(&trace); if (!filter_check_discard(call, entry, buffer, event)) ring_buffer_unlock_commit(buffer, event); __this_cpu_dec(user_stack_count); out: preempt_enable(); } #ifdef UNUSED Loading