Loading Makefile +1 −1 Original line number Diff line number Diff line # SPDX-License-Identifier: GPL-2.0 VERSION = 4 PATCHLEVEL = 14 SUBLEVEL = 147 SUBLEVEL = 148 EXTRAVERSION = NAME = Petit Gorille Loading arch/arm/mm/fault.c +2 −2 Original line number Diff line number Diff line Loading @@ -215,7 +215,7 @@ static inline bool access_error(unsigned int fsr, struct vm_area_struct *vma) { unsigned int mask = VM_READ | VM_WRITE | VM_EXEC; if (fsr & FSR_WRITE) if ((fsr & FSR_WRITE) && !(fsr & FSR_CM)) mask = VM_WRITE; if (fsr & FSR_LNX_PF) mask = VM_EXEC; Loading Loading @@ -285,7 +285,7 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs) if (user_mode(regs)) flags |= FAULT_FLAG_USER; if (fsr & FSR_WRITE) if ((fsr & FSR_WRITE) && !(fsr & FSR_CM)) flags |= FAULT_FLAG_WRITE; /* Loading arch/arm/mm/fault.h +1 −0 Original line number Diff line number Diff line Loading @@ -6,6 +6,7 @@ * Fault status register encodings. We steal bit 31 for our own purposes. */ #define FSR_LNX_PF (1 << 31) #define FSR_CM (1 << 13) #define FSR_WRITE (1 << 11) #define FSR_FS4 (1 << 10) #define FSR_FS3_0 (15) Loading arch/arm/mm/mmu.c +16 −0 Original line number Diff line number Diff line Loading @@ -1175,6 +1175,22 @@ void __init adjust_lowmem_bounds(void) */ vmalloc_limit = (u64)(uintptr_t)vmalloc_min - PAGE_OFFSET + PHYS_OFFSET; /* * The first usable region must be PMD aligned. Mark its start * as MEMBLOCK_NOMAP if it isn't */ for_each_memblock(memory, reg) { if (!memblock_is_nomap(reg)) { if (!IS_ALIGNED(reg->base, PMD_SIZE)) { phys_addr_t len; len = round_up(reg->base, PMD_SIZE) - reg->base; memblock_mark_nomap(reg->base, len); } break; } } for_each_memblock(memory, reg) { phys_addr_t block_start = reg->base; phys_addr_t block_end = reg->base + reg->size; Loading arch/arm64/include/asm/cmpxchg.h +3 −3 Original line number Diff line number Diff line Loading @@ -73,7 +73,7 @@ __XCHG_CASE( , , mb_8, dmb ish, nop, , a, l, "memory") #undef __XCHG_CASE #define __XCHG_GEN(sfx) \ static inline unsigned long __xchg##sfx(unsigned long x, \ static __always_inline unsigned long __xchg##sfx(unsigned long x, \ volatile void *ptr, \ int size) \ { \ Loading Loading @@ -115,7 +115,7 @@ __XCHG_GEN(_mb) #define xchg(...) __xchg_wrapper( _mb, __VA_ARGS__) #define __CMPXCHG_GEN(sfx) \ static inline unsigned long __cmpxchg##sfx(volatile void *ptr, \ static __always_inline unsigned long __cmpxchg##sfx(volatile void *ptr, \ unsigned long old, \ unsigned long new, \ int size) \ Loading Loading @@ -248,7 +248,7 @@ __CMPWAIT_CASE( , , 8); #undef __CMPWAIT_CASE #define __CMPWAIT_GEN(sfx) \ static inline void __cmpwait##sfx(volatile void *ptr, \ static __always_inline void __cmpwait##sfx(volatile void *ptr, \ unsigned long val, \ int size) \ { \ Loading Loading
Makefile +1 −1 Original line number Diff line number Diff line # SPDX-License-Identifier: GPL-2.0 VERSION = 4 PATCHLEVEL = 14 SUBLEVEL = 147 SUBLEVEL = 148 EXTRAVERSION = NAME = Petit Gorille Loading
arch/arm/mm/fault.c +2 −2 Original line number Diff line number Diff line Loading @@ -215,7 +215,7 @@ static inline bool access_error(unsigned int fsr, struct vm_area_struct *vma) { unsigned int mask = VM_READ | VM_WRITE | VM_EXEC; if (fsr & FSR_WRITE) if ((fsr & FSR_WRITE) && !(fsr & FSR_CM)) mask = VM_WRITE; if (fsr & FSR_LNX_PF) mask = VM_EXEC; Loading Loading @@ -285,7 +285,7 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs) if (user_mode(regs)) flags |= FAULT_FLAG_USER; if (fsr & FSR_WRITE) if ((fsr & FSR_WRITE) && !(fsr & FSR_CM)) flags |= FAULT_FLAG_WRITE; /* Loading
arch/arm/mm/fault.h +1 −0 Original line number Diff line number Diff line Loading @@ -6,6 +6,7 @@ * Fault status register encodings. We steal bit 31 for our own purposes. */ #define FSR_LNX_PF (1 << 31) #define FSR_CM (1 << 13) #define FSR_WRITE (1 << 11) #define FSR_FS4 (1 << 10) #define FSR_FS3_0 (15) Loading
arch/arm/mm/mmu.c +16 −0 Original line number Diff line number Diff line Loading @@ -1175,6 +1175,22 @@ void __init adjust_lowmem_bounds(void) */ vmalloc_limit = (u64)(uintptr_t)vmalloc_min - PAGE_OFFSET + PHYS_OFFSET; /* * The first usable region must be PMD aligned. Mark its start * as MEMBLOCK_NOMAP if it isn't */ for_each_memblock(memory, reg) { if (!memblock_is_nomap(reg)) { if (!IS_ALIGNED(reg->base, PMD_SIZE)) { phys_addr_t len; len = round_up(reg->base, PMD_SIZE) - reg->base; memblock_mark_nomap(reg->base, len); } break; } } for_each_memblock(memory, reg) { phys_addr_t block_start = reg->base; phys_addr_t block_end = reg->base + reg->size; Loading
arch/arm64/include/asm/cmpxchg.h +3 −3 Original line number Diff line number Diff line Loading @@ -73,7 +73,7 @@ __XCHG_CASE( , , mb_8, dmb ish, nop, , a, l, "memory") #undef __XCHG_CASE #define __XCHG_GEN(sfx) \ static inline unsigned long __xchg##sfx(unsigned long x, \ static __always_inline unsigned long __xchg##sfx(unsigned long x, \ volatile void *ptr, \ int size) \ { \ Loading Loading @@ -115,7 +115,7 @@ __XCHG_GEN(_mb) #define xchg(...) __xchg_wrapper( _mb, __VA_ARGS__) #define __CMPXCHG_GEN(sfx) \ static inline unsigned long __cmpxchg##sfx(volatile void *ptr, \ static __always_inline unsigned long __cmpxchg##sfx(volatile void *ptr, \ unsigned long old, \ unsigned long new, \ int size) \ Loading Loading @@ -248,7 +248,7 @@ __CMPWAIT_CASE( , , 8); #undef __CMPWAIT_CASE #define __CMPWAIT_GEN(sfx) \ static inline void __cmpwait##sfx(volatile void *ptr, \ static __always_inline void __cmpwait##sfx(volatile void *ptr, \ unsigned long val, \ int size) \ { \ Loading