Loading Documentation/sysctl/kernel.txt +35 −0 Original line number Diff line number Diff line Loading @@ -62,6 +62,7 @@ show up in /proc/sys/kernel: - panic_on_warn - perf_cpu_time_max_percent - perf_event_paranoid - perf_event_max_stack - pid_max - powersave-nap [ PPC only ] - printk Loading @@ -88,6 +89,7 @@ show up in /proc/sys/kernel: - sysctl_writes_strict - tainted - threads-max - unprivileged_bpf_disabled - unknown_nmi_panic - watchdog - watchdog_thresh Loading Loading @@ -665,6 +667,19 @@ CONFIG_SECURITY_PERF_EVENTS_RESTRICT is set, or 1 otherwise. ============================================================== perf_event_max_stack: Controls maximum number of stack frames to copy for (attr.sample_type & PERF_SAMPLE_CALLCHAIN) configured events, for instance, when using 'perf record -g' or 'perf trace --call-graph fp'. This can only be done when no events are in use that have callchains enabled, otherwise writing to this file will return -EBUSY. The default value is 127. ============================================================== pid_max: PID allocation wrap value. When the kernel's next PID value Loading Loading @@ -944,6 +959,26 @@ available RAM pages threads-max is reduced accordingly. ============================================================== unprivileged_bpf_disabled: Writing 1 to this entry will disable unprivileged calls to bpf(); once disabled, calling bpf() without CAP_SYS_ADMIN will return -EPERM. Once set to 1, this can't be cleared from the running kernel anymore. Writing 2 to this entry will also disable unprivileged calls to bpf(), however, an admin can still change this setting later on, if needed, by writing 0 or 1 to this entry. If BPF_UNPRIV_DEFAULT_OFF is enabled in the kernel config, then this entry will default to 2 instead of 0. 0 - Unprivileged calls to bpf() are enabled 1 - Unprivileged calls to bpf() are disabled without recovery 2 - Unprivileged calls to bpf() are disabled ============================================================== unknown_nmi_panic: The value in this file affects behavior of handling NMI. When the Loading Documentation/sysctl/net.txt +19 −0 Original line number Diff line number Diff line Loading @@ -43,6 +43,25 @@ Values : 1 - enable the JIT 2 - enable the JIT and ask the compiler to emit traces on kernel log. bpf_jit_harden -------------- This enables hardening for the Berkeley Packet Filter Just in Time compiler. Supported are eBPF JIT backends. Enabling hardening trades off performance, but can mitigate JIT spraying. Values : 0 - disable JIT hardening (default value) 1 - enable JIT hardening for unprivileged users only 2 - enable JIT hardening for all users bpf_jit_limit ------------- This enforces a global limit for memory allocations to the BPF JIT compiler in order to reject unprivileged JIT requests once it has been surpassed. bpf_jit_limit contains the value of the global limit in bytes. dev_weight -------------- Loading arch/alpha/include/uapi/asm/socket.h +3 −0 Original line number Diff line number Diff line Loading @@ -92,4 +92,7 @@ #define SO_ATTACH_BPF 50 #define SO_DETACH_BPF SO_DETACH_FILTER #define SO_ATTACH_REUSEPORT_CBPF 51 #define SO_ATTACH_REUSEPORT_EBPF 52 #endif /* _UAPI_ASM_SOCKET_H */ arch/arc/kernel/perf_event.c +3 −3 Original line number Diff line number Diff line Loading @@ -48,7 +48,7 @@ struct arc_callchain_trace { static int callchain_trace(unsigned int addr, void *data) { struct arc_callchain_trace *ctrl = data; struct perf_callchain_entry *entry = ctrl->perf_stuff; struct perf_callchain_entry_ctx *entry = ctrl->perf_stuff; perf_callchain_store(entry, addr); if (ctrl->depth++ < 3) Loading @@ -58,7 +58,7 @@ static int callchain_trace(unsigned int addr, void *data) } void perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs) perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs) { struct arc_callchain_trace ctrl = { .depth = 0, Loading @@ -69,7 +69,7 @@ perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs) } void perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs) perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs) { /* * User stack can't be unwound trivially with kernel dwarf unwinder Loading arch/arm/kernel/hw_breakpoint.c +8 −17 Original line number Diff line number Diff line Loading @@ -631,7 +631,7 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp) info->address &= ~alignment_mask; info->ctrl.len <<= offset; if (!bp->overflow_handler) { if (is_default_overflow_handler(bp)) { /* * Mismatch breakpoints are required for single-stepping * breakpoints. Loading Loading @@ -688,12 +688,6 @@ static void disable_single_step(struct perf_event *bp) arch_install_hw_breakpoint(bp); } static int watchpoint_fault_on_uaccess(struct pt_regs *regs, struct arch_hw_breakpoint *info) { return !user_mode(regs) && info->ctrl.privilege == ARM_BREAKPOINT_USER; } static void watchpoint_handler(unsigned long addr, unsigned int fsr, struct pt_regs *regs) { Loading Loading @@ -760,19 +754,16 @@ static void watchpoint_handler(unsigned long addr, unsigned int fsr, * can't help us with this. */ if (watchpoint_fault_on_uaccess(regs, info)) goto step; enable_single_step(wp, instruction_pointer(regs)); perf_bp_event(wp, regs); /* * Defer stepping to the overflow handler if one is installed. * Otherwise, insert a temporary mismatch breakpoint so that * we can single-step over the watchpoint trigger. * If no overflow handler is present, insert a temporary * mismatch breakpoint so we can single-step over the * watchpoint trigger. */ if (wp->overflow_handler) goto unlock; step: if (is_default_overflow_handler(wp)) enable_single_step(wp, instruction_pointer(regs)); unlock: rcu_read_unlock(); Loading Loading
Documentation/sysctl/kernel.txt +35 −0 Original line number Diff line number Diff line Loading @@ -62,6 +62,7 @@ show up in /proc/sys/kernel: - panic_on_warn - perf_cpu_time_max_percent - perf_event_paranoid - perf_event_max_stack - pid_max - powersave-nap [ PPC only ] - printk Loading @@ -88,6 +89,7 @@ show up in /proc/sys/kernel: - sysctl_writes_strict - tainted - threads-max - unprivileged_bpf_disabled - unknown_nmi_panic - watchdog - watchdog_thresh Loading Loading @@ -665,6 +667,19 @@ CONFIG_SECURITY_PERF_EVENTS_RESTRICT is set, or 1 otherwise. ============================================================== perf_event_max_stack: Controls maximum number of stack frames to copy for (attr.sample_type & PERF_SAMPLE_CALLCHAIN) configured events, for instance, when using 'perf record -g' or 'perf trace --call-graph fp'. This can only be done when no events are in use that have callchains enabled, otherwise writing to this file will return -EBUSY. The default value is 127. ============================================================== pid_max: PID allocation wrap value. When the kernel's next PID value Loading Loading @@ -944,6 +959,26 @@ available RAM pages threads-max is reduced accordingly. ============================================================== unprivileged_bpf_disabled: Writing 1 to this entry will disable unprivileged calls to bpf(); once disabled, calling bpf() without CAP_SYS_ADMIN will return -EPERM. Once set to 1, this can't be cleared from the running kernel anymore. Writing 2 to this entry will also disable unprivileged calls to bpf(), however, an admin can still change this setting later on, if needed, by writing 0 or 1 to this entry. If BPF_UNPRIV_DEFAULT_OFF is enabled in the kernel config, then this entry will default to 2 instead of 0. 0 - Unprivileged calls to bpf() are enabled 1 - Unprivileged calls to bpf() are disabled without recovery 2 - Unprivileged calls to bpf() are disabled ============================================================== unknown_nmi_panic: The value in this file affects behavior of handling NMI. When the Loading
Documentation/sysctl/net.txt +19 −0 Original line number Diff line number Diff line Loading @@ -43,6 +43,25 @@ Values : 1 - enable the JIT 2 - enable the JIT and ask the compiler to emit traces on kernel log. bpf_jit_harden -------------- This enables hardening for the Berkeley Packet Filter Just in Time compiler. Supported are eBPF JIT backends. Enabling hardening trades off performance, but can mitigate JIT spraying. Values : 0 - disable JIT hardening (default value) 1 - enable JIT hardening for unprivileged users only 2 - enable JIT hardening for all users bpf_jit_limit ------------- This enforces a global limit for memory allocations to the BPF JIT compiler in order to reject unprivileged JIT requests once it has been surpassed. bpf_jit_limit contains the value of the global limit in bytes. dev_weight -------------- Loading
arch/alpha/include/uapi/asm/socket.h +3 −0 Original line number Diff line number Diff line Loading @@ -92,4 +92,7 @@ #define SO_ATTACH_BPF 50 #define SO_DETACH_BPF SO_DETACH_FILTER #define SO_ATTACH_REUSEPORT_CBPF 51 #define SO_ATTACH_REUSEPORT_EBPF 52 #endif /* _UAPI_ASM_SOCKET_H */
arch/arc/kernel/perf_event.c +3 −3 Original line number Diff line number Diff line Loading @@ -48,7 +48,7 @@ struct arc_callchain_trace { static int callchain_trace(unsigned int addr, void *data) { struct arc_callchain_trace *ctrl = data; struct perf_callchain_entry *entry = ctrl->perf_stuff; struct perf_callchain_entry_ctx *entry = ctrl->perf_stuff; perf_callchain_store(entry, addr); if (ctrl->depth++ < 3) Loading @@ -58,7 +58,7 @@ static int callchain_trace(unsigned int addr, void *data) } void perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs) perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs) { struct arc_callchain_trace ctrl = { .depth = 0, Loading @@ -69,7 +69,7 @@ perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs) } void perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs) perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs) { /* * User stack can't be unwound trivially with kernel dwarf unwinder Loading
arch/arm/kernel/hw_breakpoint.c +8 −17 Original line number Diff line number Diff line Loading @@ -631,7 +631,7 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp) info->address &= ~alignment_mask; info->ctrl.len <<= offset; if (!bp->overflow_handler) { if (is_default_overflow_handler(bp)) { /* * Mismatch breakpoints are required for single-stepping * breakpoints. Loading Loading @@ -688,12 +688,6 @@ static void disable_single_step(struct perf_event *bp) arch_install_hw_breakpoint(bp); } static int watchpoint_fault_on_uaccess(struct pt_regs *regs, struct arch_hw_breakpoint *info) { return !user_mode(regs) && info->ctrl.privilege == ARM_BREAKPOINT_USER; } static void watchpoint_handler(unsigned long addr, unsigned int fsr, struct pt_regs *regs) { Loading Loading @@ -760,19 +754,16 @@ static void watchpoint_handler(unsigned long addr, unsigned int fsr, * can't help us with this. */ if (watchpoint_fault_on_uaccess(regs, info)) goto step; enable_single_step(wp, instruction_pointer(regs)); perf_bp_event(wp, regs); /* * Defer stepping to the overflow handler if one is installed. * Otherwise, insert a temporary mismatch breakpoint so that * we can single-step over the watchpoint trigger. * If no overflow handler is present, insert a temporary * mismatch breakpoint so we can single-step over the * watchpoint trigger. */ if (wp->overflow_handler) goto unlock; step: if (is_default_overflow_handler(wp)) enable_single_step(wp, instruction_pointer(regs)); unlock: rcu_read_unlock(); Loading