Loading arch/arm/mm/ioremap.c +2 −1 Original line number Diff line number Diff line Loading @@ -91,6 +91,7 @@ void __init add_static_vm_early(struct static_vm *svm) void *vaddr; vm = &svm->vm; if (!vm_area_check_early(vm)) vm_area_add_early(vm); vaddr = vm->addr; Loading arch/arm/mm/mmu.c +38 −2 Original line number Diff line number Diff line Loading @@ -1374,12 +1374,21 @@ static void __init map_lowmem(void) struct memblock_region *reg; unsigned long kernel_x_start = round_down(__pa(_stext), SECTION_SIZE); unsigned long kernel_x_end = round_up(__pa(__init_end), SECTION_SIZE); struct static_vm *svm; phys_addr_t start; phys_addr_t end; unsigned long vaddr; unsigned long pfn; unsigned long length; unsigned int type; int nr = 0; /* Map all the lowmem memory banks. */ for_each_memblock(memory, reg) { phys_addr_t start = reg->base; phys_addr_t end = start + reg->size; struct map_desc map; start = reg->base; end = start + reg->size; nr++; if (end > arm_lowmem_limit) end = arm_lowmem_limit; Loading Loading @@ -1428,6 +1437,33 @@ static void __init map_lowmem(void) } } } svm = early_alloc_aligned(sizeof(*svm) * nr, __alignof__(*svm)); for_each_memblock(memory, reg) { struct vm_struct *vm; start = reg->base; end = start + reg->size; if (end > arm_lowmem_limit) end = arm_lowmem_limit; if (start >= end) break; vm = &svm->vm; pfn = __phys_to_pfn(start); vaddr = __phys_to_virt(start); length = end - start; type = MT_MEMORY_RW; vm->addr = (void *)(vaddr & PAGE_MASK); vm->size = PAGE_ALIGN(length + (vaddr & ~PAGE_MASK)); vm->phys_addr = __pfn_to_phys(pfn); vm->flags = VM_LOWMEM; vm->flags |= VM_ARM_MTYPE(type); vm->caller = map_lowmem; add_static_vm_early(svm++); } } #ifdef CONFIG_ARM_LPAE Loading include/linux/vmalloc.h +3 −0 Original line number Diff line number Diff line Loading @@ -18,6 +18,8 @@ struct vm_area_struct; /* vma defining user mapping in mm_types.h */ #define VM_UNINITIALIZED 0x00000020 /* vm_struct is not fully initialized */ #define VM_NO_GUARD 0x00000040 /* don't add guard page */ #define VM_KASAN 0x00000080 /* has allocated kasan shadow memory */ #define VM_LOWMEM 0x00000100 /* Tracking of direct mapped lowmem */ /* bits [20..32] reserved for arch specific ioremap internals */ /* Loading Loading @@ -158,6 +160,7 @@ extern long vwrite(char *buf, char *addr, unsigned long count); extern struct list_head vmap_area_list; extern __init void vm_area_add_early(struct vm_struct *vm); extern __init void vm_area_register_early(struct vm_struct *vm, size_t align); extern __init int vm_area_check_early(struct vm_struct *vm); #ifdef CONFIG_SMP # ifdef CONFIG_MMU Loading mm/vmalloc.c +27 −0 Original line number Diff line number Diff line Loading @@ -1123,6 +1123,33 @@ void *vm_map_ram(struct page **pages, unsigned int count, int node, pgprot_t pro EXPORT_SYMBOL(vm_map_ram); static struct vm_struct *vmlist __initdata; /** * vm_area_check_early - check if vmap area is already mapped * @vm: vm_struct to be checked * * This function is used to check if the vmap area has been * mapped already. @vm->addr, @vm->size and @vm->flags should * contain proper values. * */ int __init vm_area_check_early(struct vm_struct *vm) { struct vm_struct *tmp, **p; BUG_ON(vmap_initialized); for (p = &vmlist; (tmp = *p) != NULL; p = &tmp->next) { if (tmp->addr >= vm->addr) { if (tmp->addr < vm->addr + vm->size) return 1; } else { if (tmp->addr + tmp->size > vm->addr) return 1; } } return 0; } /** * vm_area_add_early - add vmap area early during boot * @vm: vm_struct to add Loading Loading
arch/arm/mm/ioremap.c +2 −1 Original line number Diff line number Diff line Loading @@ -91,6 +91,7 @@ void __init add_static_vm_early(struct static_vm *svm) void *vaddr; vm = &svm->vm; if (!vm_area_check_early(vm)) vm_area_add_early(vm); vaddr = vm->addr; Loading
arch/arm/mm/mmu.c +38 −2 Original line number Diff line number Diff line Loading @@ -1374,12 +1374,21 @@ static void __init map_lowmem(void) struct memblock_region *reg; unsigned long kernel_x_start = round_down(__pa(_stext), SECTION_SIZE); unsigned long kernel_x_end = round_up(__pa(__init_end), SECTION_SIZE); struct static_vm *svm; phys_addr_t start; phys_addr_t end; unsigned long vaddr; unsigned long pfn; unsigned long length; unsigned int type; int nr = 0; /* Map all the lowmem memory banks. */ for_each_memblock(memory, reg) { phys_addr_t start = reg->base; phys_addr_t end = start + reg->size; struct map_desc map; start = reg->base; end = start + reg->size; nr++; if (end > arm_lowmem_limit) end = arm_lowmem_limit; Loading Loading @@ -1428,6 +1437,33 @@ static void __init map_lowmem(void) } } } svm = early_alloc_aligned(sizeof(*svm) * nr, __alignof__(*svm)); for_each_memblock(memory, reg) { struct vm_struct *vm; start = reg->base; end = start + reg->size; if (end > arm_lowmem_limit) end = arm_lowmem_limit; if (start >= end) break; vm = &svm->vm; pfn = __phys_to_pfn(start); vaddr = __phys_to_virt(start); length = end - start; type = MT_MEMORY_RW; vm->addr = (void *)(vaddr & PAGE_MASK); vm->size = PAGE_ALIGN(length + (vaddr & ~PAGE_MASK)); vm->phys_addr = __pfn_to_phys(pfn); vm->flags = VM_LOWMEM; vm->flags |= VM_ARM_MTYPE(type); vm->caller = map_lowmem; add_static_vm_early(svm++); } } #ifdef CONFIG_ARM_LPAE Loading
include/linux/vmalloc.h +3 −0 Original line number Diff line number Diff line Loading @@ -18,6 +18,8 @@ struct vm_area_struct; /* vma defining user mapping in mm_types.h */ #define VM_UNINITIALIZED 0x00000020 /* vm_struct is not fully initialized */ #define VM_NO_GUARD 0x00000040 /* don't add guard page */ #define VM_KASAN 0x00000080 /* has allocated kasan shadow memory */ #define VM_LOWMEM 0x00000100 /* Tracking of direct mapped lowmem */ /* bits [20..32] reserved for arch specific ioremap internals */ /* Loading Loading @@ -158,6 +160,7 @@ extern long vwrite(char *buf, char *addr, unsigned long count); extern struct list_head vmap_area_list; extern __init void vm_area_add_early(struct vm_struct *vm); extern __init void vm_area_register_early(struct vm_struct *vm, size_t align); extern __init int vm_area_check_early(struct vm_struct *vm); #ifdef CONFIG_SMP # ifdef CONFIG_MMU Loading
mm/vmalloc.c +27 −0 Original line number Diff line number Diff line Loading @@ -1123,6 +1123,33 @@ void *vm_map_ram(struct page **pages, unsigned int count, int node, pgprot_t pro EXPORT_SYMBOL(vm_map_ram); static struct vm_struct *vmlist __initdata; /** * vm_area_check_early - check if vmap area is already mapped * @vm: vm_struct to be checked * * This function is used to check if the vmap area has been * mapped already. @vm->addr, @vm->size and @vm->flags should * contain proper values. * */ int __init vm_area_check_early(struct vm_struct *vm) { struct vm_struct *tmp, **p; BUG_ON(vmap_initialized); for (p = &vmlist; (tmp = *p) != NULL; p = &tmp->next) { if (tmp->addr >= vm->addr) { if (tmp->addr < vm->addr + vm->size) return 1; } else { if (tmp->addr + tmp->size > vm->addr) return 1; } } return 0; } /** * vm_area_add_early - add vmap area early during boot * @vm: vm_struct to add Loading