Loading arch/arm64/include/asm/memory.h +3 −0 Original line number Diff line number Diff line Loading @@ -189,6 +189,9 @@ extern u64 kimage_vaddr; /* the offset between the kernel virtual and physical mappings */ extern u64 kimage_voffset; /* physical memory limit imposed by the booloader */ extern phys_addr_t bootloader_memory_limit; static inline unsigned long kaslr_offset(void) { return kimage_vaddr - KIMAGE_VADDR; Loading arch/arm64/mm/init.c +7 −0 Original line number Diff line number Diff line Loading @@ -257,6 +257,7 @@ int pfn_valid(unsigned long pfn) EXPORT_SYMBOL(pfn_valid); static phys_addr_t memory_limit = PHYS_ADDR_MAX; phys_addr_t bootloader_memory_limit; /* * Limit the memory size that was specified via FDT. Loading Loading @@ -349,6 +350,12 @@ void __init arm64_memblock_init(void) memblock_remove(0, memstart_addr); } /* * Save bootloader imposed memory limit before we overwirte * memblock. */ bootloader_memory_limit = memblock_end_of_DRAM(); /* * Apply the memory limit if it was set. Since the kernel may be loaded * high up in memory, add back the kernel region that must be accessible Loading arch/arm64/mm/mmu.c +12 −3 Original line number Diff line number Diff line Loading @@ -1116,6 +1116,7 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node, pgd_t *pgdp; pud_t *pudp; pmd_t *pmdp; int ret = 0; do { next = pmd_addr_end(addr, end); Loading @@ -1133,15 +1134,23 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node, void *p = NULL; p = vmemmap_alloc_block_buf(PMD_SIZE, node); if (!p) return -ENOMEM; if (!p) { #ifdef CONFIG_MEMORY_HOTPLUG vmemmap_free(start, end, altmap); #endif ret = -ENOMEM; break; } pmd_set_huge(pmdp, __pa(p), __pgprot(PROT_SECT_NORMAL)); } else vmemmap_verify((pte_t *)pmdp, node, addr, next); } while (addr = next, addr != end); return 0; if (ret) return vmemmap_populate_basepages(start, end, node); else return ret; } #endif /* !ARM64_SWAPPER_USES_SECTION_MAPS */ void vmemmap_free(unsigned long start, unsigned long end, Loading Loading
arch/arm64/include/asm/memory.h +3 −0 Original line number Diff line number Diff line Loading @@ -189,6 +189,9 @@ extern u64 kimage_vaddr; /* the offset between the kernel virtual and physical mappings */ extern u64 kimage_voffset; /* physical memory limit imposed by the booloader */ extern phys_addr_t bootloader_memory_limit; static inline unsigned long kaslr_offset(void) { return kimage_vaddr - KIMAGE_VADDR; Loading
arch/arm64/mm/init.c +7 −0 Original line number Diff line number Diff line Loading @@ -257,6 +257,7 @@ int pfn_valid(unsigned long pfn) EXPORT_SYMBOL(pfn_valid); static phys_addr_t memory_limit = PHYS_ADDR_MAX; phys_addr_t bootloader_memory_limit; /* * Limit the memory size that was specified via FDT. Loading Loading @@ -349,6 +350,12 @@ void __init arm64_memblock_init(void) memblock_remove(0, memstart_addr); } /* * Save bootloader imposed memory limit before we overwirte * memblock. */ bootloader_memory_limit = memblock_end_of_DRAM(); /* * Apply the memory limit if it was set. Since the kernel may be loaded * high up in memory, add back the kernel region that must be accessible Loading
arch/arm64/mm/mmu.c +12 −3 Original line number Diff line number Diff line Loading @@ -1116,6 +1116,7 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node, pgd_t *pgdp; pud_t *pudp; pmd_t *pmdp; int ret = 0; do { next = pmd_addr_end(addr, end); Loading @@ -1133,15 +1134,23 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node, void *p = NULL; p = vmemmap_alloc_block_buf(PMD_SIZE, node); if (!p) return -ENOMEM; if (!p) { #ifdef CONFIG_MEMORY_HOTPLUG vmemmap_free(start, end, altmap); #endif ret = -ENOMEM; break; } pmd_set_huge(pmdp, __pa(p), __pgprot(PROT_SECT_NORMAL)); } else vmemmap_verify((pte_t *)pmdp, node, addr, next); } while (addr = next, addr != end); return 0; if (ret) return vmemmap_populate_basepages(start, end, node); else return ret; } #endif /* !ARM64_SWAPPER_USES_SECTION_MAPS */ void vmemmap_free(unsigned long start, unsigned long end, Loading