Loading arch/x86/mm/fault.c +4 −0 Original line number Diff line number Diff line Loading @@ -251,6 +251,8 @@ static noinline __kprobes int vmalloc_fault(unsigned long address) if (!(address >= VMALLOC_START && address < VMALLOC_END)) return -1; WARN_ON_ONCE(in_nmi()); /* * Synchronize this task's top level page-table * with the 'reference' page table. Loading Loading @@ -369,6 +371,8 @@ static noinline __kprobes int vmalloc_fault(unsigned long address) if (!(address >= VMALLOC_START && address < VMALLOC_END)) return -1; WARN_ON_ONCE(in_nmi()); /* * Copy kernel mappings over when needed. This can also * happen within a race in page table update. In the later Loading arch/x86/mm/kmemcheck/kmemcheck.c +2 −0 Original line number Diff line number Diff line Loading @@ -631,6 +631,8 @@ bool kmemcheck_fault(struct pt_regs *regs, unsigned long address, if (!pte) return false; WARN_ON_ONCE(in_nmi()); if (error_code & 2) kmemcheck_access(regs, address, KMEMCHECK_WRITE); else Loading kernel/trace/trace_functions_graph.c +3 −2 Original line number Diff line number Diff line Loading @@ -665,8 +665,9 @@ trace_print_graph_duration(unsigned long long duration, struct trace_seq *s) /* Print nsecs (we don't want to exceed 7 numbers) */ if (len < 7) { snprintf(nsecs_str, min(sizeof(nsecs_str), 8UL - len), "%03lu", nsecs_rem); size_t slen = min_t(size_t, sizeof(nsecs_str), 8UL - len); snprintf(nsecs_str, slen, "%03lu", nsecs_rem); ret = trace_seq_printf(s, ".%s", nsecs_str); if (!ret) return TRACE_TYPE_PARTIAL_LINE; Loading Loading
arch/x86/mm/fault.c +4 −0 Original line number Diff line number Diff line Loading @@ -251,6 +251,8 @@ static noinline __kprobes int vmalloc_fault(unsigned long address) if (!(address >= VMALLOC_START && address < VMALLOC_END)) return -1; WARN_ON_ONCE(in_nmi()); /* * Synchronize this task's top level page-table * with the 'reference' page table. Loading Loading @@ -369,6 +371,8 @@ static noinline __kprobes int vmalloc_fault(unsigned long address) if (!(address >= VMALLOC_START && address < VMALLOC_END)) return -1; WARN_ON_ONCE(in_nmi()); /* * Copy kernel mappings over when needed. This can also * happen within a race in page table update. In the later Loading
arch/x86/mm/kmemcheck/kmemcheck.c +2 −0 Original line number Diff line number Diff line Loading @@ -631,6 +631,8 @@ bool kmemcheck_fault(struct pt_regs *regs, unsigned long address, if (!pte) return false; WARN_ON_ONCE(in_nmi()); if (error_code & 2) kmemcheck_access(regs, address, KMEMCHECK_WRITE); else Loading
kernel/trace/trace_functions_graph.c +3 −2 Original line number Diff line number Diff line Loading @@ -665,8 +665,9 @@ trace_print_graph_duration(unsigned long long duration, struct trace_seq *s) /* Print nsecs (we don't want to exceed 7 numbers) */ if (len < 7) { snprintf(nsecs_str, min(sizeof(nsecs_str), 8UL - len), "%03lu", nsecs_rem); size_t slen = min_t(size_t, sizeof(nsecs_str), 8UL - len); snprintf(nsecs_str, slen, "%03lu", nsecs_rem); ret = trace_seq_printf(s, ".%s", nsecs_str); if (!ret) return TRACE_TYPE_PARTIAL_LINE; Loading