Loading arch/arm64/kernel/cpu-reset.h +1 −1 Original line number Diff line number Diff line Loading @@ -16,7 +16,7 @@ void __cpu_soft_restart(unsigned long el2_switch, unsigned long entry, unsigned long arg0, unsigned long arg1, unsigned long arg2); static inline void __noreturn cpu_soft_restart(unsigned long entry, static inline void __noreturn __nocfi cpu_soft_restart(unsigned long entry, unsigned long arg0, unsigned long arg1, unsigned long arg2) Loading arch/arm64/net/bpf_jit_comp.c +3 −7 Original line number Diff line number Diff line Loading @@ -982,12 +982,8 @@ bool arch_bpf_jit_check_func(const struct bpf_prog *prog) { const uintptr_t func = (const uintptr_t)prog->bpf_func; /* * bpf_func must be correctly aligned and within the correct region. */ if (unlikely(!IS_ALIGNED(func, sizeof(u32)))) return false; return (func >= BPF_JIT_REGION_START && func < BPF_JIT_REGION_END); /* bpf_func must be correctly aligned and within the BPF JIT region */ return (func >= BPF_JIT_REGION_START && func < BPF_JIT_REGION_END && IS_ALIGNED(func, sizeof(u32))); } #endif include/linux/cfi.h +8 −2 Original line number Diff line number Diff line /* SPDX-License-Identifier: GPL-2.0 */ /* * Clang Control Flow Integrity (CFI) support. * * Copyright (C) 2019 Google LLC */ #ifndef _LINUX_CFI_H #define _LINUX_CFI_H Loading @@ -6,13 +12,13 @@ #ifdef CONFIG_CFI_CLANG #ifdef CONFIG_MODULES typedef void (*cfi_check_fn)(uint64_t, void *, void *); typedef void (*cfi_check_fn)(uint64_t id, void *ptr, void *diag); /* Compiler-generated function in each module, and the kernel */ #define CFI_CHECK_FN __cfi_check #define CFI_CHECK_FN_NAME __stringify(CFI_CHECK_FN) extern void CFI_CHECK_FN(uint64_t, void *, void *); extern void CFI_CHECK_FN(uint64_t id, void *ptr, void *diag); #ifdef CONFIG_CFI_CLANG_SHADOW extern void cfi_module_add(struct module *mod, unsigned long min_addr, Loading include/linux/compiler-clang.h +1 −1 Original line number Diff line number Diff line Loading @@ -49,7 +49,7 @@ __attribute__((__assume_aligned__(a, ## __VA_ARGS__))) #ifdef CONFIG_CFI_CLANG #define __nocfi __attribute__((no_sanitize("cfi"))) #define __nocfi __attribute__((__no_sanitize__("cfi"))) #endif #ifdef CONFIG_LTO_CLANG Loading kernel/cfi.c +45 −34 Original line number Diff line number Diff line // SPDX-License-Identifier: GPL-2.0 /* * CFI (Control Flow Integrity) error and slowpath handling * Clang Control Flow Integrity (CFI) error and slowpath handling. * * Copyright (C) 2017 Google, Inc. * Copyright (C) 2019 Google LLC */ #include <linux/gfp.h> #include <linux/hardirq.h> #include <linux/module.h> #include <linux/mutex.h> #include <linux/printk.h> #include <linux/ratelimit.h> #include <linux/rcupdate.h> #include <linux/spinlock.h> #include <asm/bug.h> #include <asm/cacheflush.h> #include <asm/set_memory.h> Loading @@ -25,12 +26,10 @@ static inline void handle_cfi_failure(void *ptr) { #ifdef CONFIG_CFI_PERMISSIVE WARN_RATELIMIT(1, "CFI failure (target: %pF):\n", ptr); #else pr_err("CFI failure (target: %pF):\n", ptr); BUG(); #endif if (IS_ENABLED(CONFIG_CFI_PERMISSIVE)) WARN_RATELIMIT(1, "CFI failure (target: %pS):\n", ptr); else panic("CFI failure (target: %pS)\n", ptr); } #ifdef CONFIG_MODULES Loading @@ -44,7 +43,7 @@ struct shadow_range { unsigned long max_page; }; #define SHADOW_ORDER 1 #define SHADOW_ORDER 2 #define SHADOW_PAGES (1 << SHADOW_ORDER) #define SHADOW_SIZE \ ((SHADOW_PAGES * PAGE_SIZE - sizeof(struct shadow_range)) / sizeof(u16)) Loading @@ -57,8 +56,8 @@ struct cfi_shadow { u16 shadow[SHADOW_SIZE]; }; static DEFINE_SPINLOCK(shadow_update_lock); static struct cfi_shadow __rcu *cfi_shadow __read_mostly = NULL; static DEFINE_MUTEX(shadow_update_lock); static struct cfi_shadow __rcu *cfi_shadow __read_mostly; static inline int ptr_to_shadow(const struct cfi_shadow *s, unsigned long ptr) { Loading @@ -79,7 +78,8 @@ static inline int ptr_to_shadow(const struct cfi_shadow *s, unsigned long ptr) static inline unsigned long shadow_to_ptr(const struct cfi_shadow *s, int index) { BUG_ON(index < 0 || index >= SHADOW_SIZE); if (unlikely(index < 0 || index >= SHADOW_SIZE)) return 0; if (unlikely(s->shadow[index] == SHADOW_INVALID)) return 0; Loading @@ -90,7 +90,8 @@ static inline unsigned long shadow_to_ptr(const struct cfi_shadow *s, static inline unsigned long shadow_to_page(const struct cfi_shadow *s, int index) { BUG_ON(index < 0 || index >= SHADOW_SIZE); if (unlikely(index < 0 || index >= SHADOW_SIZE)) return 0; return (s->r.min_page + index) << PAGE_SHIFT; } Loading Loading @@ -138,7 +139,8 @@ static void add_module_to_shadow(struct cfi_shadow *s, struct module *mod) unsigned long check = (unsigned long)mod->cfi_check; int check_index = ptr_to_shadow(s, check); BUG_ON((check & PAGE_MASK) != check); /* Must be page aligned */ if (unlikely((check & PAGE_MASK) != check)) return; /* Must be page aligned */ if (check_index < 0) return; /* Module not addressable with shadow */ Loading @@ -151,9 +153,10 @@ static void add_module_to_shadow(struct cfi_shadow *s, struct module *mod) /* For each page, store the check function index in the shadow */ for (ptr = min_page_addr; ptr <= max_page_addr; ptr += PAGE_SIZE) { int index = ptr_to_shadow(s, ptr); if (index >= 0) { /* Assume a page only contains code for one module */ BUG_ON(s->shadow[index] != SHADOW_INVALID); /* Each page must only contain one module */ WARN_ON(s->shadow[index] != SHADOW_INVALID); s->shadow[index] = (u16)check_index; } } Loading @@ -172,6 +175,7 @@ static void remove_module_from_shadow(struct cfi_shadow *s, struct module *mod) for (ptr = min_page_addr; ptr <= max_page_addr; ptr += PAGE_SIZE) { int index = ptr_to_shadow(s, ptr); if (index >= 0) s->shadow[index] = SHADOW_INVALID; } Loading @@ -186,14 +190,12 @@ static void update_shadow(struct module *mod, unsigned long min_addr, struct cfi_shadow *next = (struct cfi_shadow *) __get_free_pages(GFP_KERNEL, SHADOW_ORDER); BUG_ON(!next); next->r.mod_min_addr = min_addr; next->r.mod_max_addr = max_addr; next->r.min_page = min_addr >> PAGE_SHIFT; next->r.max_page = max_addr >> PAGE_SHIFT; spin_lock(&shadow_update_lock); mutex_lock(&shadow_update_lock); prev = rcu_dereference_protected(cfi_shadow, 1); prepare_next_shadow(prev, next); Loading @@ -201,7 +203,7 @@ static void update_shadow(struct module *mod, unsigned long min_addr, set_memory_ro((unsigned long)next, SHADOW_PAGES); rcu_assign_pointer(cfi_shadow, next); spin_unlock(&shadow_update_lock); mutex_unlock(&shadow_update_lock); synchronize_rcu(); if (prev) { Loading Loading @@ -245,33 +247,36 @@ static inline cfi_check_fn ptr_to_check_fn(const struct cfi_shadow __rcu *s, static inline cfi_check_fn find_module_cfi_check(void *ptr) { cfi_check_fn f = CFI_CHECK_FN; struct module *mod; preempt_disable(); mod = __module_address((unsigned long)ptr); preempt_enable(); if (mod) return mod->cfi_check; f = mod->cfi_check; preempt_enable(); return CFI_CHECK_FN; return f; } static inline cfi_check_fn find_cfi_check(void *ptr) { #ifdef CONFIG_CFI_CLANG_SHADOW bool rcu; cfi_check_fn f; if (!rcu_access_pointer(cfi_shadow)) return CFI_CHECK_FN; /* No loaded modules */ rcu = rcu_is_watching(); if (!rcu) rcu_nmi_enter(); #ifdef CONFIG_CFI_CLANG_SHADOW /* Look up the __cfi_check function to use */ rcu_read_lock(); f = ptr_to_check_fn(rcu_dereference(cfi_shadow), (unsigned long)ptr); rcu_read_unlock(); rcu_read_lock_sched(); f = ptr_to_check_fn(rcu_dereference_sched(cfi_shadow), (unsigned long)ptr); rcu_read_unlock_sched(); if (f) return f; goto out; /* * Fall back to find_module_cfi_check, which works also for a larger Loading @@ -279,7 +284,13 @@ static inline cfi_check_fn find_cfi_check(void *ptr) */ #endif /* CONFIG_CFI_CLANG_SHADOW */ return find_module_cfi_check(ptr); f = find_module_cfi_check(ptr); out: if (!rcu) rcu_nmi_exit(); return f; } void cfi_slowpath_handler(uint64_t id, void *ptr, void *diag) Loading Loading
arch/arm64/kernel/cpu-reset.h +1 −1 Original line number Diff line number Diff line Loading @@ -16,7 +16,7 @@ void __cpu_soft_restart(unsigned long el2_switch, unsigned long entry, unsigned long arg0, unsigned long arg1, unsigned long arg2); static inline void __noreturn cpu_soft_restart(unsigned long entry, static inline void __noreturn __nocfi cpu_soft_restart(unsigned long entry, unsigned long arg0, unsigned long arg1, unsigned long arg2) Loading
arch/arm64/net/bpf_jit_comp.c +3 −7 Original line number Diff line number Diff line Loading @@ -982,12 +982,8 @@ bool arch_bpf_jit_check_func(const struct bpf_prog *prog) { const uintptr_t func = (const uintptr_t)prog->bpf_func; /* * bpf_func must be correctly aligned and within the correct region. */ if (unlikely(!IS_ALIGNED(func, sizeof(u32)))) return false; return (func >= BPF_JIT_REGION_START && func < BPF_JIT_REGION_END); /* bpf_func must be correctly aligned and within the BPF JIT region */ return (func >= BPF_JIT_REGION_START && func < BPF_JIT_REGION_END && IS_ALIGNED(func, sizeof(u32))); } #endif
include/linux/cfi.h +8 −2 Original line number Diff line number Diff line /* SPDX-License-Identifier: GPL-2.0 */ /* * Clang Control Flow Integrity (CFI) support. * * Copyright (C) 2019 Google LLC */ #ifndef _LINUX_CFI_H #define _LINUX_CFI_H Loading @@ -6,13 +12,13 @@ #ifdef CONFIG_CFI_CLANG #ifdef CONFIG_MODULES typedef void (*cfi_check_fn)(uint64_t, void *, void *); typedef void (*cfi_check_fn)(uint64_t id, void *ptr, void *diag); /* Compiler-generated function in each module, and the kernel */ #define CFI_CHECK_FN __cfi_check #define CFI_CHECK_FN_NAME __stringify(CFI_CHECK_FN) extern void CFI_CHECK_FN(uint64_t, void *, void *); extern void CFI_CHECK_FN(uint64_t id, void *ptr, void *diag); #ifdef CONFIG_CFI_CLANG_SHADOW extern void cfi_module_add(struct module *mod, unsigned long min_addr, Loading
include/linux/compiler-clang.h +1 −1 Original line number Diff line number Diff line Loading @@ -49,7 +49,7 @@ __attribute__((__assume_aligned__(a, ## __VA_ARGS__))) #ifdef CONFIG_CFI_CLANG #define __nocfi __attribute__((no_sanitize("cfi"))) #define __nocfi __attribute__((__no_sanitize__("cfi"))) #endif #ifdef CONFIG_LTO_CLANG Loading
kernel/cfi.c +45 −34 Original line number Diff line number Diff line // SPDX-License-Identifier: GPL-2.0 /* * CFI (Control Flow Integrity) error and slowpath handling * Clang Control Flow Integrity (CFI) error and slowpath handling. * * Copyright (C) 2017 Google, Inc. * Copyright (C) 2019 Google LLC */ #include <linux/gfp.h> #include <linux/hardirq.h> #include <linux/module.h> #include <linux/mutex.h> #include <linux/printk.h> #include <linux/ratelimit.h> #include <linux/rcupdate.h> #include <linux/spinlock.h> #include <asm/bug.h> #include <asm/cacheflush.h> #include <asm/set_memory.h> Loading @@ -25,12 +26,10 @@ static inline void handle_cfi_failure(void *ptr) { #ifdef CONFIG_CFI_PERMISSIVE WARN_RATELIMIT(1, "CFI failure (target: %pF):\n", ptr); #else pr_err("CFI failure (target: %pF):\n", ptr); BUG(); #endif if (IS_ENABLED(CONFIG_CFI_PERMISSIVE)) WARN_RATELIMIT(1, "CFI failure (target: %pS):\n", ptr); else panic("CFI failure (target: %pS)\n", ptr); } #ifdef CONFIG_MODULES Loading @@ -44,7 +43,7 @@ struct shadow_range { unsigned long max_page; }; #define SHADOW_ORDER 1 #define SHADOW_ORDER 2 #define SHADOW_PAGES (1 << SHADOW_ORDER) #define SHADOW_SIZE \ ((SHADOW_PAGES * PAGE_SIZE - sizeof(struct shadow_range)) / sizeof(u16)) Loading @@ -57,8 +56,8 @@ struct cfi_shadow { u16 shadow[SHADOW_SIZE]; }; static DEFINE_SPINLOCK(shadow_update_lock); static struct cfi_shadow __rcu *cfi_shadow __read_mostly = NULL; static DEFINE_MUTEX(shadow_update_lock); static struct cfi_shadow __rcu *cfi_shadow __read_mostly; static inline int ptr_to_shadow(const struct cfi_shadow *s, unsigned long ptr) { Loading @@ -79,7 +78,8 @@ static inline int ptr_to_shadow(const struct cfi_shadow *s, unsigned long ptr) static inline unsigned long shadow_to_ptr(const struct cfi_shadow *s, int index) { BUG_ON(index < 0 || index >= SHADOW_SIZE); if (unlikely(index < 0 || index >= SHADOW_SIZE)) return 0; if (unlikely(s->shadow[index] == SHADOW_INVALID)) return 0; Loading @@ -90,7 +90,8 @@ static inline unsigned long shadow_to_ptr(const struct cfi_shadow *s, static inline unsigned long shadow_to_page(const struct cfi_shadow *s, int index) { BUG_ON(index < 0 || index >= SHADOW_SIZE); if (unlikely(index < 0 || index >= SHADOW_SIZE)) return 0; return (s->r.min_page + index) << PAGE_SHIFT; } Loading Loading @@ -138,7 +139,8 @@ static void add_module_to_shadow(struct cfi_shadow *s, struct module *mod) unsigned long check = (unsigned long)mod->cfi_check; int check_index = ptr_to_shadow(s, check); BUG_ON((check & PAGE_MASK) != check); /* Must be page aligned */ if (unlikely((check & PAGE_MASK) != check)) return; /* Must be page aligned */ if (check_index < 0) return; /* Module not addressable with shadow */ Loading @@ -151,9 +153,10 @@ static void add_module_to_shadow(struct cfi_shadow *s, struct module *mod) /* For each page, store the check function index in the shadow */ for (ptr = min_page_addr; ptr <= max_page_addr; ptr += PAGE_SIZE) { int index = ptr_to_shadow(s, ptr); if (index >= 0) { /* Assume a page only contains code for one module */ BUG_ON(s->shadow[index] != SHADOW_INVALID); /* Each page must only contain one module */ WARN_ON(s->shadow[index] != SHADOW_INVALID); s->shadow[index] = (u16)check_index; } } Loading @@ -172,6 +175,7 @@ static void remove_module_from_shadow(struct cfi_shadow *s, struct module *mod) for (ptr = min_page_addr; ptr <= max_page_addr; ptr += PAGE_SIZE) { int index = ptr_to_shadow(s, ptr); if (index >= 0) s->shadow[index] = SHADOW_INVALID; } Loading @@ -186,14 +190,12 @@ static void update_shadow(struct module *mod, unsigned long min_addr, struct cfi_shadow *next = (struct cfi_shadow *) __get_free_pages(GFP_KERNEL, SHADOW_ORDER); BUG_ON(!next); next->r.mod_min_addr = min_addr; next->r.mod_max_addr = max_addr; next->r.min_page = min_addr >> PAGE_SHIFT; next->r.max_page = max_addr >> PAGE_SHIFT; spin_lock(&shadow_update_lock); mutex_lock(&shadow_update_lock); prev = rcu_dereference_protected(cfi_shadow, 1); prepare_next_shadow(prev, next); Loading @@ -201,7 +203,7 @@ static void update_shadow(struct module *mod, unsigned long min_addr, set_memory_ro((unsigned long)next, SHADOW_PAGES); rcu_assign_pointer(cfi_shadow, next); spin_unlock(&shadow_update_lock); mutex_unlock(&shadow_update_lock); synchronize_rcu(); if (prev) { Loading Loading @@ -245,33 +247,36 @@ static inline cfi_check_fn ptr_to_check_fn(const struct cfi_shadow __rcu *s, static inline cfi_check_fn find_module_cfi_check(void *ptr) { cfi_check_fn f = CFI_CHECK_FN; struct module *mod; preempt_disable(); mod = __module_address((unsigned long)ptr); preempt_enable(); if (mod) return mod->cfi_check; f = mod->cfi_check; preempt_enable(); return CFI_CHECK_FN; return f; } static inline cfi_check_fn find_cfi_check(void *ptr) { #ifdef CONFIG_CFI_CLANG_SHADOW bool rcu; cfi_check_fn f; if (!rcu_access_pointer(cfi_shadow)) return CFI_CHECK_FN; /* No loaded modules */ rcu = rcu_is_watching(); if (!rcu) rcu_nmi_enter(); #ifdef CONFIG_CFI_CLANG_SHADOW /* Look up the __cfi_check function to use */ rcu_read_lock(); f = ptr_to_check_fn(rcu_dereference(cfi_shadow), (unsigned long)ptr); rcu_read_unlock(); rcu_read_lock_sched(); f = ptr_to_check_fn(rcu_dereference_sched(cfi_shadow), (unsigned long)ptr); rcu_read_unlock_sched(); if (f) return f; goto out; /* * Fall back to find_module_cfi_check, which works also for a larger Loading @@ -279,7 +284,13 @@ static inline cfi_check_fn find_cfi_check(void *ptr) */ #endif /* CONFIG_CFI_CLANG_SHADOW */ return find_module_cfi_check(ptr); f = find_module_cfi_check(ptr); out: if (!rcu) rcu_nmi_exit(); return f; } void cfi_slowpath_handler(uint64_t id, void *ptr, void *diag) Loading