Loading arch/arm/mm/mmu.c +1 −0 Original line number Original line Diff line number Diff line Loading @@ -1612,6 +1612,7 @@ static void __init map_lowmem(void) vm->flags |= VM_ARM_MTYPE(type); vm->flags |= VM_ARM_MTYPE(type); vm->caller = map_lowmem; vm->caller = map_lowmem; add_static_vm_early(svm++); add_static_vm_early(svm++); mark_vmalloc_reserved_area(vm->addr, vm->size); } } } } Loading include/linux/vmalloc.h +6 −0 Original line number Original line Diff line number Diff line Loading @@ -148,6 +148,12 @@ extern struct list_head vmap_area_list; extern __init void vm_area_add_early(struct vm_struct *vm); extern __init void vm_area_add_early(struct vm_struct *vm); extern __init void vm_area_register_early(struct vm_struct *vm, size_t align); extern __init void vm_area_register_early(struct vm_struct *vm, size_t align); extern __init int vm_area_check_early(struct vm_struct *vm); extern __init int vm_area_check_early(struct vm_struct *vm); #ifdef CONFIG_ENABLE_VMALLOC_SAVING extern void mark_vmalloc_reserved_area(void *addr, unsigned long size); #else static inline void mark_vmalloc_reserved_area(void *addr, unsigned long size) { }; #endif #ifdef CONFIG_SMP #ifdef CONFIG_SMP # ifdef CONFIG_MMU # ifdef CONFIG_MMU Loading mm/vmalloc.c +21 −21 Original line number Original line Diff line number Diff line Loading @@ -283,33 +283,33 @@ static unsigned long cached_align; static unsigned long vmap_area_pcpu_hole; static unsigned long vmap_area_pcpu_hole; #ifdef CONFIG_ENABLE_VMALLOC_SAVING #ifdef CONFIG_ENABLE_VMALLOC_SAVING int is_vmalloc_addr(const void *x) #define POSSIBLE_VMALLOC_START PAGE_OFFSET #define VMALLOC_BITMAP_SIZE ((VMALLOC_END - PAGE_OFFSET) >> \ PAGE_SHIFT) #define VMALLOC_TO_BIT(addr) ((addr - PAGE_OFFSET) >> PAGE_SHIFT) #define BIT_TO_VMALLOC(i) (PAGE_OFFSET + i * PAGE_SIZE) DECLARE_BITMAP(possible_areas, VMALLOC_BITMAP_SIZE); void mark_vmalloc_reserved_area(void *x, unsigned long size) { { struct vmap_area *va; unsigned long addr = (unsigned long)x; int ret = 0; spin_lock(&vmap_area_lock); bitmap_set(possible_areas, VMALLOC_TO_BIT(addr), size >> PAGE_SHIFT); list_for_each_entry(va, &vmap_area_list, list) { } if (va->flags & (VM_LAZY_FREE | VM_LAZY_FREEING)) continue; if (!(va->flags & VM_VM_AREA)) int is_vmalloc_addr(const void *x) continue; { unsigned long addr = (unsigned long)x; if (va->vm == NULL) if (addr < POSSIBLE_VMALLOC_START || addr >= VMALLOC_END) continue; return 0; if (va->vm->flags & VM_LOWMEM) if (test_bit(VMALLOC_TO_BIT(addr), possible_areas)) continue; return 0; if ((unsigned long)x >= va->va_start && return 1; (unsigned long)x < va->va_end) { ret = 1; break; } } spin_unlock(&vmap_area_lock); return ret; } } #else #else int is_vmalloc_addr(const void *x) int is_vmalloc_addr(const void *x) Loading Loading
arch/arm/mm/mmu.c +1 −0 Original line number Original line Diff line number Diff line Loading @@ -1612,6 +1612,7 @@ static void __init map_lowmem(void) vm->flags |= VM_ARM_MTYPE(type); vm->flags |= VM_ARM_MTYPE(type); vm->caller = map_lowmem; vm->caller = map_lowmem; add_static_vm_early(svm++); add_static_vm_early(svm++); mark_vmalloc_reserved_area(vm->addr, vm->size); } } } } Loading
include/linux/vmalloc.h +6 −0 Original line number Original line Diff line number Diff line Loading @@ -148,6 +148,12 @@ extern struct list_head vmap_area_list; extern __init void vm_area_add_early(struct vm_struct *vm); extern __init void vm_area_add_early(struct vm_struct *vm); extern __init void vm_area_register_early(struct vm_struct *vm, size_t align); extern __init void vm_area_register_early(struct vm_struct *vm, size_t align); extern __init int vm_area_check_early(struct vm_struct *vm); extern __init int vm_area_check_early(struct vm_struct *vm); #ifdef CONFIG_ENABLE_VMALLOC_SAVING extern void mark_vmalloc_reserved_area(void *addr, unsigned long size); #else static inline void mark_vmalloc_reserved_area(void *addr, unsigned long size) { }; #endif #ifdef CONFIG_SMP #ifdef CONFIG_SMP # ifdef CONFIG_MMU # ifdef CONFIG_MMU Loading
mm/vmalloc.c +21 −21 Original line number Original line Diff line number Diff line Loading @@ -283,33 +283,33 @@ static unsigned long cached_align; static unsigned long vmap_area_pcpu_hole; static unsigned long vmap_area_pcpu_hole; #ifdef CONFIG_ENABLE_VMALLOC_SAVING #ifdef CONFIG_ENABLE_VMALLOC_SAVING int is_vmalloc_addr(const void *x) #define POSSIBLE_VMALLOC_START PAGE_OFFSET #define VMALLOC_BITMAP_SIZE ((VMALLOC_END - PAGE_OFFSET) >> \ PAGE_SHIFT) #define VMALLOC_TO_BIT(addr) ((addr - PAGE_OFFSET) >> PAGE_SHIFT) #define BIT_TO_VMALLOC(i) (PAGE_OFFSET + i * PAGE_SIZE) DECLARE_BITMAP(possible_areas, VMALLOC_BITMAP_SIZE); void mark_vmalloc_reserved_area(void *x, unsigned long size) { { struct vmap_area *va; unsigned long addr = (unsigned long)x; int ret = 0; spin_lock(&vmap_area_lock); bitmap_set(possible_areas, VMALLOC_TO_BIT(addr), size >> PAGE_SHIFT); list_for_each_entry(va, &vmap_area_list, list) { } if (va->flags & (VM_LAZY_FREE | VM_LAZY_FREEING)) continue; if (!(va->flags & VM_VM_AREA)) int is_vmalloc_addr(const void *x) continue; { unsigned long addr = (unsigned long)x; if (va->vm == NULL) if (addr < POSSIBLE_VMALLOC_START || addr >= VMALLOC_END) continue; return 0; if (va->vm->flags & VM_LOWMEM) if (test_bit(VMALLOC_TO_BIT(addr), possible_areas)) continue; return 0; if ((unsigned long)x >= va->va_start && return 1; (unsigned long)x < va->va_end) { ret = 1; break; } } spin_unlock(&vmap_area_lock); return ret; } } #else #else int is_vmalloc_addr(const void *x) int is_vmalloc_addr(const void *x) Loading