Loading Makefile +1 −1 Original line number Diff line number Diff line VERSION = 4 PATCHLEVEL = 9 SUBLEVEL = 31 SUBLEVEL = 32 EXTRAVERSION = NAME = Roaring Lionus Loading arch/arm/kernel/setup.c +8 −2 Original line number Diff line number Diff line Loading @@ -81,7 +81,7 @@ __setup("fpe=", fpe_setup); extern void init_default_cache_policy(unsigned long); extern void paging_init(const struct machine_desc *desc); extern void early_paging_init(const struct machine_desc *); extern void sanity_check_meminfo(void); extern void adjust_lowmem_bounds(void); extern enum reboot_mode reboot_mode; extern void setup_dma_zone(const struct machine_desc *desc); Loading Loading @@ -1104,8 +1104,14 @@ void __init setup_arch(char **cmdline_p) setup_dma_zone(mdesc); xen_early_init(); efi_init(); sanity_check_meminfo(); /* * Make sure the calculation for lowmem/highmem is set appropriately * before reserving/allocating any mmeory */ adjust_lowmem_bounds(); arm_memblock_init(mdesc); /* Memory may have been removed so recalculate the bounds. */ adjust_lowmem_bounds(); early_ioremap_reset(); Loading arch/arm/kvm/init.S +2 −3 Original line number Diff line number Diff line Loading @@ -95,7 +95,6 @@ __do_hyp_init: @ - Write permission implies XN: disabled @ - Instruction cache: enabled @ - Data/Unified cache: enabled @ - Memory alignment checks: enabled @ - MMU: enabled (this code must be run from an identity mapping) mrc p15, 4, r0, c1, c0, 0 @ HSCR ldr r2, =HSCTLR_MASK Loading @@ -103,8 +102,8 @@ __do_hyp_init: mrc p15, 0, r1, c1, c0, 0 @ SCTLR ldr r2, =(HSCTLR_EE | HSCTLR_FI | HSCTLR_I | HSCTLR_C) and r1, r1, r2 ARM( ldr r2, =(HSCTLR_M | HSCTLR_A) ) THUMB( ldr r2, =(HSCTLR_M | HSCTLR_A | HSCTLR_TE) ) ARM( ldr r2, =(HSCTLR_M) ) THUMB( ldr r2, =(HSCTLR_M | HSCTLR_TE) ) orr r1, r1, r2 orr r0, r0, r1 mcr p15, 4, r0, c1, c0, 0 @ HSCR Loading arch/arm/kvm/mmu.c +3 −0 Original line number Diff line number Diff line Loading @@ -872,6 +872,9 @@ static pmd_t *stage2_get_pmd(struct kvm *kvm, struct kvm_mmu_memory_cache *cache pmd_t *pmd; pud = stage2_get_pud(kvm, cache, addr); if (!pud) return NULL; if (stage2_pud_none(*pud)) { if (!cache) return NULL; Loading arch/arm/mm/mmu.c +27 −42 Original line number Diff line number Diff line Loading @@ -1152,13 +1152,12 @@ early_param("vmalloc", early_vmalloc); phys_addr_t arm_lowmem_limit __initdata = 0; void __init sanity_check_meminfo(void) void __init adjust_lowmem_bounds(void) { phys_addr_t memblock_limit = 0; int highmem = 0; u64 vmalloc_limit; struct memblock_region *reg; bool should_use_highmem = false; phys_addr_t lowmem_limit = 0; /* * Let's use our own (unoptimized) equivalent of __pa() that is Loading @@ -1172,43 +1171,18 @@ void __init sanity_check_meminfo(void) for_each_memblock(memory, reg) { phys_addr_t block_start = reg->base; phys_addr_t block_end = reg->base + reg->size; phys_addr_t size_limit = reg->size; if (reg->base >= vmalloc_limit) highmem = 1; else size_limit = vmalloc_limit - reg->base; if (!IS_ENABLED(CONFIG_HIGHMEM) || cache_is_vipt_aliasing()) { if (highmem) { pr_notice("Ignoring RAM at %pa-%pa (!CONFIG_HIGHMEM)\n", &block_start, &block_end); memblock_remove(reg->base, reg->size); should_use_highmem = true; continue; } if (reg->size > size_limit) { phys_addr_t overlap_size = reg->size - size_limit; pr_notice("Truncating RAM at %pa-%pa", &block_start, &block_end); block_end = vmalloc_limit; pr_cont(" to -%pa", &block_end); memblock_remove(vmalloc_limit, overlap_size); should_use_highmem = true; } } if (!highmem) { if (block_end > arm_lowmem_limit) { if (reg->size > size_limit) arm_lowmem_limit = vmalloc_limit; else arm_lowmem_limit = block_end; } if (reg->base < vmalloc_limit) { if (block_end > lowmem_limit) /* * Compare as u64 to ensure vmalloc_limit does * not get truncated. block_end should always * fit in phys_addr_t so there should be no * issue with assignment. */ lowmem_limit = min_t(u64, vmalloc_limit, block_end); /* * Find the first non-pmd-aligned page, and point Loading @@ -1227,14 +1201,13 @@ void __init sanity_check_meminfo(void) if (!IS_ALIGNED(block_start, PMD_SIZE)) memblock_limit = block_start; else if (!IS_ALIGNED(block_end, PMD_SIZE)) memblock_limit = arm_lowmem_limit; memblock_limit = lowmem_limit; } } } if (should_use_highmem) pr_notice("Consider using a HIGHMEM enabled kernel.\n"); arm_lowmem_limit = lowmem_limit; high_memory = __va(arm_lowmem_limit - 1) + 1; Loading @@ -1248,6 +1221,18 @@ void __init sanity_check_meminfo(void) if (!memblock_limit) memblock_limit = arm_lowmem_limit; if (!IS_ENABLED(CONFIG_HIGHMEM) || cache_is_vipt_aliasing()) { if (memblock_end_of_DRAM() > arm_lowmem_limit) { phys_addr_t end = memblock_end_of_DRAM(); pr_notice("Ignoring RAM at %pa-%pa\n", &memblock_limit, &end); pr_notice("Consider using a HIGHMEM enabled kernel.\n"); memblock_remove(memblock_limit, end - memblock_limit); } } memblock_set_current_limit(memblock_limit); } Loading Loading
Makefile +1 −1 Original line number Diff line number Diff line VERSION = 4 PATCHLEVEL = 9 SUBLEVEL = 31 SUBLEVEL = 32 EXTRAVERSION = NAME = Roaring Lionus Loading
arch/arm/kernel/setup.c +8 −2 Original line number Diff line number Diff line Loading @@ -81,7 +81,7 @@ __setup("fpe=", fpe_setup); extern void init_default_cache_policy(unsigned long); extern void paging_init(const struct machine_desc *desc); extern void early_paging_init(const struct machine_desc *); extern void sanity_check_meminfo(void); extern void adjust_lowmem_bounds(void); extern enum reboot_mode reboot_mode; extern void setup_dma_zone(const struct machine_desc *desc); Loading Loading @@ -1104,8 +1104,14 @@ void __init setup_arch(char **cmdline_p) setup_dma_zone(mdesc); xen_early_init(); efi_init(); sanity_check_meminfo(); /* * Make sure the calculation for lowmem/highmem is set appropriately * before reserving/allocating any mmeory */ adjust_lowmem_bounds(); arm_memblock_init(mdesc); /* Memory may have been removed so recalculate the bounds. */ adjust_lowmem_bounds(); early_ioremap_reset(); Loading
arch/arm/kvm/init.S +2 −3 Original line number Diff line number Diff line Loading @@ -95,7 +95,6 @@ __do_hyp_init: @ - Write permission implies XN: disabled @ - Instruction cache: enabled @ - Data/Unified cache: enabled @ - Memory alignment checks: enabled @ - MMU: enabled (this code must be run from an identity mapping) mrc p15, 4, r0, c1, c0, 0 @ HSCR ldr r2, =HSCTLR_MASK Loading @@ -103,8 +102,8 @@ __do_hyp_init: mrc p15, 0, r1, c1, c0, 0 @ SCTLR ldr r2, =(HSCTLR_EE | HSCTLR_FI | HSCTLR_I | HSCTLR_C) and r1, r1, r2 ARM( ldr r2, =(HSCTLR_M | HSCTLR_A) ) THUMB( ldr r2, =(HSCTLR_M | HSCTLR_A | HSCTLR_TE) ) ARM( ldr r2, =(HSCTLR_M) ) THUMB( ldr r2, =(HSCTLR_M | HSCTLR_TE) ) orr r1, r1, r2 orr r0, r0, r1 mcr p15, 4, r0, c1, c0, 0 @ HSCR Loading
arch/arm/kvm/mmu.c +3 −0 Original line number Diff line number Diff line Loading @@ -872,6 +872,9 @@ static pmd_t *stage2_get_pmd(struct kvm *kvm, struct kvm_mmu_memory_cache *cache pmd_t *pmd; pud = stage2_get_pud(kvm, cache, addr); if (!pud) return NULL; if (stage2_pud_none(*pud)) { if (!cache) return NULL; Loading
arch/arm/mm/mmu.c +27 −42 Original line number Diff line number Diff line Loading @@ -1152,13 +1152,12 @@ early_param("vmalloc", early_vmalloc); phys_addr_t arm_lowmem_limit __initdata = 0; void __init sanity_check_meminfo(void) void __init adjust_lowmem_bounds(void) { phys_addr_t memblock_limit = 0; int highmem = 0; u64 vmalloc_limit; struct memblock_region *reg; bool should_use_highmem = false; phys_addr_t lowmem_limit = 0; /* * Let's use our own (unoptimized) equivalent of __pa() that is Loading @@ -1172,43 +1171,18 @@ void __init sanity_check_meminfo(void) for_each_memblock(memory, reg) { phys_addr_t block_start = reg->base; phys_addr_t block_end = reg->base + reg->size; phys_addr_t size_limit = reg->size; if (reg->base >= vmalloc_limit) highmem = 1; else size_limit = vmalloc_limit - reg->base; if (!IS_ENABLED(CONFIG_HIGHMEM) || cache_is_vipt_aliasing()) { if (highmem) { pr_notice("Ignoring RAM at %pa-%pa (!CONFIG_HIGHMEM)\n", &block_start, &block_end); memblock_remove(reg->base, reg->size); should_use_highmem = true; continue; } if (reg->size > size_limit) { phys_addr_t overlap_size = reg->size - size_limit; pr_notice("Truncating RAM at %pa-%pa", &block_start, &block_end); block_end = vmalloc_limit; pr_cont(" to -%pa", &block_end); memblock_remove(vmalloc_limit, overlap_size); should_use_highmem = true; } } if (!highmem) { if (block_end > arm_lowmem_limit) { if (reg->size > size_limit) arm_lowmem_limit = vmalloc_limit; else arm_lowmem_limit = block_end; } if (reg->base < vmalloc_limit) { if (block_end > lowmem_limit) /* * Compare as u64 to ensure vmalloc_limit does * not get truncated. block_end should always * fit in phys_addr_t so there should be no * issue with assignment. */ lowmem_limit = min_t(u64, vmalloc_limit, block_end); /* * Find the first non-pmd-aligned page, and point Loading @@ -1227,14 +1201,13 @@ void __init sanity_check_meminfo(void) if (!IS_ALIGNED(block_start, PMD_SIZE)) memblock_limit = block_start; else if (!IS_ALIGNED(block_end, PMD_SIZE)) memblock_limit = arm_lowmem_limit; memblock_limit = lowmem_limit; } } } if (should_use_highmem) pr_notice("Consider using a HIGHMEM enabled kernel.\n"); arm_lowmem_limit = lowmem_limit; high_memory = __va(arm_lowmem_limit - 1) + 1; Loading @@ -1248,6 +1221,18 @@ void __init sanity_check_meminfo(void) if (!memblock_limit) memblock_limit = arm_lowmem_limit; if (!IS_ENABLED(CONFIG_HIGHMEM) || cache_is_vipt_aliasing()) { if (memblock_end_of_DRAM() > arm_lowmem_limit) { phys_addr_t end = memblock_end_of_DRAM(); pr_notice("Ignoring RAM at %pa-%pa\n", &memblock_limit, &end); pr_notice("Consider using a HIGHMEM enabled kernel.\n"); memblock_remove(memblock_limit, end - memblock_limit); } } memblock_set_current_limit(memblock_limit); } Loading