Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit dae8c235 authored by Kefeng Wang's avatar Kefeng Wang Committed by Will Deacon
Browse files

arm64: mm: drop fixup_init() and mm.h



There is only fixup_init() in mm.h , and it is only called
in free_initmem(), so move the codes from fixup_init() into
free_initmem(), then drop fixup_init() and mm.h.

Acked-by: default avatarMark Rutland <mark.rutland@arm.com>
Signed-off-by: default avatarKefeng Wang <wangkefeng.wang@huawei.com>
Signed-off-by: default avatarWill Deacon <will.deacon@arm.com>
parent 282b8796
Loading
Loading
Loading
Loading
+0 −2
Original line number Diff line number Diff line
@@ -25,8 +25,6 @@
#include <asm/cachetype.h>
#include <asm/tlbflush.h>

#include "mm.h"

void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
		       unsigned long end)
{
+7 −3
Original line number Diff line number Diff line
@@ -35,6 +35,7 @@
#include <linux/dma-contiguous.h>
#include <linux/efi.h>
#include <linux/swiotlb.h>
#include <linux/vmalloc.h>

#include <asm/boot.h>
#include <asm/fixmap.h>
@@ -48,8 +49,6 @@
#include <asm/tlb.h>
#include <asm/alternative.h>

#include "mm.h"

/*
 * We need to be able to catch inadvertent references to memstart_addr
 * that occur (potentially in generic code) before arm64_memblock_init()
@@ -486,7 +485,12 @@ void free_initmem(void)
{
	free_reserved_area(__va(__pa(__init_begin)), __va(__pa(__init_end)),
			   0, "unused kernel");
	fixup_init();
	/*
	 * Unmap the __init region but leave the VM area in place. This
	 * prevents the region from being reused for kernel modules, which
	 * is not supported by kallsyms.
	 */
	unmap_kernel_range((u64)__init_begin, (u64)(__init_end - __init_begin));
}

#ifdef CONFIG_BLK_DEV_INITRD

arch/arm64/mm/mm.h

deleted100644 → 0
+0 −2
Original line number Diff line number Diff line

void fixup_init(void);
+0 −12
Original line number Diff line number Diff line
@@ -43,8 +43,6 @@
#include <asm/memblock.h>
#include <asm/mmu_context.h>

#include "mm.h"

u64 idmap_t0sz = TCR_T0SZ(VA_BITS);

u64 kimage_voffset __ro_after_init;
@@ -400,16 +398,6 @@ void mark_rodata_ro(void)
			    section_size, PAGE_KERNEL_RO);
}

void fixup_init(void)
{
	/*
	 * Unmap the __init region but leave the VM area in place. This
	 * prevents the region from being reused for kernel modules, which
	 * is not supported by kallsyms.
	 */
	unmap_kernel_range((u64)__init_begin, (u64)(__init_end - __init_begin));
}

static void __init map_kernel_segment(pgd_t *pgd, void *va_start, void *va_end,
				      pgprot_t prot, struct vm_struct *vma)
{
+0 −2
Original line number Diff line number Diff line
@@ -26,8 +26,6 @@
#include <asm/page.h>
#include <asm/tlbflush.h>

#include "mm.h"

static struct kmem_cache *pgd_cache;

pgd_t *pgd_alloc(struct mm_struct *mm)