Loading arch/arm/mm/dma-mapping.c +15 −2 Original line number Diff line number Diff line Loading @@ -437,6 +437,15 @@ void __init dma_contiguous_remap(void) struct map_desc map; unsigned long addr; /* * Make start and end PMD_SIZE aligned, observing memory * boundaries */ if (memblock_is_memory(start & PMD_MASK)) start = start & PMD_MASK; if (memblock_is_memory(ALIGN(end, PMD_SIZE))) end = ALIGN(end, PMD_SIZE); if (end > arm_lowmem_limit) end = arm_lowmem_limit; if (start >= end) Loading @@ -457,8 +466,12 @@ void __init dma_contiguous_remap(void) * and ensures that this code is architecturally compliant. */ for (addr = __phys_to_virt(start); addr < __phys_to_virt(end); addr += PMD_SIZE) pmd_clear(pmd_off_k(addr)); addr += PMD_SIZE) { pmd_t *pmd; pmd = pmd_off_k(addr); if (pmd_bad(*pmd)) pmd_clear(pmd); } flush_tlb_kernel_range(__phys_to_virt(start), __phys_to_virt(end)); Loading arch/arm64/mm/mmu.c +17 −2 Original line number Diff line number Diff line Loading @@ -457,12 +457,27 @@ void __init remap_as_pages(unsigned long start, unsigned long size) unsigned long addr; unsigned long end = start + size; /* * Make start and end PMD_SIZE aligned, observing memory * boundaries */ if (memblock_is_memory(start & PMD_MASK)) start = start & PMD_MASK; if (memblock_is_memory(ALIGN(end, PMD_SIZE))) end = ALIGN(end, PMD_SIZE); size = end - start; /* * Clear previous low-memory mapping */ for (addr = __phys_to_virt(start); addr < __phys_to_virt(end); addr += PMD_SIZE) pmd_clear(pmd_off_k(addr)); addr += PMD_SIZE) { pmd_t *pmd; pmd = pmd_off_k(addr); if (pmd_bad(*pmd) || pmd_sect(*pmd)) pmd_clear(pmd); } create_mapping(start, __phys_to_virt(start), size, PAGE_KERNEL, true); } Loading Loading
arch/arm/mm/dma-mapping.c +15 −2 Original line number Diff line number Diff line Loading @@ -437,6 +437,15 @@ void __init dma_contiguous_remap(void) struct map_desc map; unsigned long addr; /* * Make start and end PMD_SIZE aligned, observing memory * boundaries */ if (memblock_is_memory(start & PMD_MASK)) start = start & PMD_MASK; if (memblock_is_memory(ALIGN(end, PMD_SIZE))) end = ALIGN(end, PMD_SIZE); if (end > arm_lowmem_limit) end = arm_lowmem_limit; if (start >= end) Loading @@ -457,8 +466,12 @@ void __init dma_contiguous_remap(void) * and ensures that this code is architecturally compliant. */ for (addr = __phys_to_virt(start); addr < __phys_to_virt(end); addr += PMD_SIZE) pmd_clear(pmd_off_k(addr)); addr += PMD_SIZE) { pmd_t *pmd; pmd = pmd_off_k(addr); if (pmd_bad(*pmd)) pmd_clear(pmd); } flush_tlb_kernel_range(__phys_to_virt(start), __phys_to_virt(end)); Loading
arch/arm64/mm/mmu.c +17 −2 Original line number Diff line number Diff line Loading @@ -457,12 +457,27 @@ void __init remap_as_pages(unsigned long start, unsigned long size) unsigned long addr; unsigned long end = start + size; /* * Make start and end PMD_SIZE aligned, observing memory * boundaries */ if (memblock_is_memory(start & PMD_MASK)) start = start & PMD_MASK; if (memblock_is_memory(ALIGN(end, PMD_SIZE))) end = ALIGN(end, PMD_SIZE); size = end - start; /* * Clear previous low-memory mapping */ for (addr = __phys_to_virt(start); addr < __phys_to_virt(end); addr += PMD_SIZE) pmd_clear(pmd_off_k(addr)); addr += PMD_SIZE) { pmd_t *pmd; pmd = pmd_off_k(addr); if (pmd_bad(*pmd) || pmd_sect(*pmd)) pmd_clear(pmd); } create_mapping(start, __phys_to_virt(start), size, PAGE_KERNEL, true); } Loading