Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit c7e9f686 authored by qctecmdr Service's avatar qctecmdr Service Committed by Gerrit - the friendly Code Review server
Browse files

Merge "arm64: Memory hotplug support for arm64 platform"

parents 18f7e708 ba895c1d
Loading
Loading
Loading
Loading
+8 −0
Original line number Diff line number Diff line
@@ -645,6 +645,10 @@ config HOTPLUG_CPU
	  Say Y here to experiment with turning CPUs off and on.  CPUs
	  can be controlled through /sys/devices/system/cpu.

config ARCH_ENABLE_MEMORY_HOTPLUG
    depends on !NUMA
	def_bool y

# The GPIO number here must be sorted by descending number. In case of
# a multiplatform kernel, we just want the highest value required by the
# selected platforms.
@@ -762,6 +766,10 @@ config ARM64_DMA_IOMMU_ALIGNMENT

endif

config ARCH_MEMORY_PROBE
	def_bool y
	depends on MEMORY_HOTPLUG

config SECCOMP
	bool "Enable seccomp to safely compute untrusted bytecode"
	---help---
+3 −0
Original line number Diff line number Diff line
@@ -87,6 +87,9 @@ extern void create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys,
			       pgprot_t prot, bool page_mappings_only);
extern void *fixmap_remap_fdt(phys_addr_t dt_phys);
extern void mark_linear_text_alias_ro(void);
#ifdef CONFIG_MEMORY_HOTPLUG
extern void hotplug_paging(phys_addr_t start, phys_addr_t size);
#endif

#endif	/* !__ASSEMBLY__ */
#endif
+82 −0
Original line number Diff line number Diff line
@@ -726,3 +726,85 @@ static int __init register_mem_limit_dumper(void)
	return 0;
}
__initcall(register_mem_limit_dumper);

#ifdef CONFIG_MEMORY_HOTPLUG
int arch_add_memory(int nid, u64 start, u64 size, bool want_memblock)
{
	pg_data_t *pgdat;
	unsigned long start_pfn = start >> PAGE_SHIFT;
	unsigned long nr_pages = size >> PAGE_SHIFT;
	unsigned long end_pfn = start_pfn + nr_pages;
	unsigned long max_sparsemem_pfn = 1UL << (MAX_PHYSMEM_BITS-PAGE_SHIFT);
	unsigned long pfn;
	int ret;

	if (end_pfn > max_sparsemem_pfn) {
		pr_err("end_pfn too big");
		return -1;
	}
	hotplug_paging(start, size);

	/*
	 * Mark the first page in the range as unusable. This is needed
	 * because __add_section (within __add_pages) wants pfn_valid
	 * of it to be false, and in arm64 pfn falid is implemented by
	 * just checking at the nomap flag for existing blocks.
	 *
	 * A small trick here is that __add_section() requires only
	 * phys_start_pfn (that is the first pfn of a section) to be
	 * invalid. Regardless of whether it was assumed (by the function
	 * author) that all pfns within a section are either all valid
	 * or all invalid, it allows to avoid looping twice (once here,
	 * second when memblock_clear_nomap() is called) through all
	 * pfns of the section and modify only one pfn. Thanks to that,
	 * further, in __add_zone() only this very first pfn is skipped
	 * and corresponding page is not flagged reserved. Therefore it
	 * is enough to correct this setup only for it.
	 *
	 * When arch_add_memory() returns the walk_memory_range() function
	 * is called and passed with online_memory_block() callback,
	 * which execution finally reaches the memory_block_action()
	 * function, where also only the first pfn of a memory block is
	 * checked to be reserved. Above, it was first pfn of a section,
	 * here it is a block but
	 * (drivers/base/memory.c):
	 *     sections_per_block = block_sz / MIN_MEMORY_BLOCK_SIZE;
	 * (include/linux/memory.h):
	 *     #define MIN_MEMORY_BLOCK_SIZE     (1UL << SECTION_SIZE_BITS)
	 * so we can consider block and section equivalently
	 */
	memblock_mark_nomap(start, 1<<PAGE_SHIFT);

	pgdat = NODE_DATA(nid);

	ret = __add_pages(nid, start_pfn, nr_pages, want_memblock);

	/*
	 * Make the pages usable after they have been added.
	 * This will make pfn_valid return true
	 */
	memblock_clear_nomap(start, 1<<PAGE_SHIFT);

	/*
	 * This is a hack to avoid having to mix arch specific code
	 * into arch independent code. SetPageReserved is supposed
	 * to be called by __add_zone (within __add_section, within
	 * __add_pages). However, when it is called there, it assumes that
	 * pfn_valid returns true.  For the way pfn_valid is implemented
	 * in arm64 (a check on the nomap flag), the only way to make
	 * this evaluate true inside __add_zone is to clear the nomap
	 * flags of blocks in architecture independent code.
	 *
	 * To avoid this, we set the Reserved flag here after we cleared
	 * the nomap flag in the line above.
	 */
	SetPageReserved(pfn_to_page(start_pfn));

	if (ret)
		pr_warn("%s: Problem encountered in __add_pages() ret=%d\n",
			__func__, ret);

	return ret;
}
#endif
+34 −0
Original line number Diff line number Diff line
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
/*
 * Based on arch/arm/mm/mmu.c
 *
@@ -196,6 +197,7 @@ static void alloc_init_cont_pte(pmd_t *pmd, unsigned long addr,
		phys_addr_t pte_phys;
		BUG_ON(!pgtable_alloc);
		pte_phys = pgtable_alloc();
		pr_debug("Allocating PTE at %pK\n", __va(pte_phys));
		__pmd_populate(pmd, pte_phys, PMD_TYPE_TABLE);
	}
	BUG_ON(pmd_bad(*pmd));
@@ -269,6 +271,7 @@ static void alloc_init_cont_pmd(pud_t *pud, unsigned long addr,
		phys_addr_t pmd_phys;
		BUG_ON(!pgtable_alloc);
		pmd_phys = pgtable_alloc();
		pr_debug("Allocating PMD at %pK\n", __va(pmd_phys));
		__pud_populate(pud, pmd_phys, PUD_TYPE_TABLE);
	}
	BUG_ON(pud_bad(*pud));
@@ -313,6 +316,7 @@ static void alloc_init_pud(pgd_t *pgd, unsigned long addr, unsigned long end,
		phys_addr_t pud_phys;
		BUG_ON(!pgtable_alloc);
		pud_phys = pgtable_alloc();
		pr_debug("Allocating PUD at %pK\n", __va(pud_phys));
		__pgd_populate(pgd, pud_phys, PUD_TYPE_TABLE);
	}
	BUG_ON(pgd_bad(*pgd));
@@ -688,6 +692,36 @@ void __init paging_init(void)
		      SWAPPER_DIR_SIZE - PAGE_SIZE);
}

#ifdef CONFIG_MEMORY_HOTPLUG
/*
 * hotplug_paging() is used by memory hotplug to build new page tables
 * for hot added memory.
 */
void hotplug_paging(phys_addr_t start, phys_addr_t size)
{

	struct page *pg;
	phys_addr_t pgd_phys = pgd_pgtable_alloc();
	pgd_t *pgd = pgd_set_fixmap(pgd_phys);

	memcpy(pgd, swapper_pg_dir, PAGE_SIZE);

	__create_pgd_mapping(pgd, start, __phys_to_virt(start), size,
		PAGE_KERNEL, pgd_pgtable_alloc, !debug_pagealloc_enabled());

	cpu_replace_ttbr1(__va(pgd_phys));
	memcpy(swapper_pg_dir, pgd, PAGE_SIZE);
	cpu_replace_ttbr1(swapper_pg_dir);

	pgd_clear_fixmap();

	pg = phys_to_page(pgd_phys);
	pgtable_page_dtor(pg);
	__free_pages(pg, 0);
}

#endif

/*
 * Check whether a kernel address is valid (derived from arch/x86/).
 */