Loading arch/arm64/Kconfig +1 −3 Original line number Diff line number Diff line Loading @@ -732,9 +732,7 @@ config HOTPLUG_CPU can be controlled through /sys/devices/system/cpu. config ARCH_ENABLE_MEMORY_HOTPLUG def_bool y config ARCH_ENABLE_MEMORY_HOTREMOVE depends on !NUMA def_bool y # The GPIO number here must be sorted by descending number. In case of Loading arch/arm64/include/asm/mmu.h +3 −0 Original line number Diff line number Diff line Loading @@ -94,6 +94,9 @@ extern void create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys, pgprot_t prot, bool page_mappings_only); extern void *fixmap_remap_fdt(phys_addr_t dt_phys); extern void mark_linear_text_alias_ro(void); #ifdef CONFIG_MEMORY_HOTPLUG extern void hotplug_paging(phys_addr_t start, phys_addr_t size); #endif #endif /* !__ASSEMBLY__ */ #endif arch/arm64/mm/init.c +60 −17 Original line number Diff line number Diff line Loading @@ -698,35 +698,78 @@ int arch_add_memory(int nid, u64 start, u64 size, bool want_memblock) pg_data_t *pgdat; unsigned long start_pfn = start >> PAGE_SHIFT; unsigned long nr_pages = size >> PAGE_SHIFT; unsigned long end_pfn = start_pfn + nr_pages; unsigned long max_sparsemem_pfn = 1UL << (MAX_PHYSMEM_BITS-PAGE_SHIFT); unsigned long pfn; int ret; if (end_pfn > max_sparsemem_pfn) { pr_err("end_pfn too big"); return -1; } hotplug_paging(start, size); /* * Mark the first page in the range as unusable. This is needed * because __add_section (within __add_pages) wants pfn_valid * of it to be false, and in arm64 pfn falid is implemented by * just checking at the nomap flag for existing blocks. * * A small trick here is that __add_section() requires only * phys_start_pfn (that is the first pfn of a section) to be * invalid. Regardless of whether it was assumed (by the function * author) that all pfns within a section are either all valid * or all invalid, it allows to avoid looping twice (once here, * second when memblock_clear_nomap() is called) through all * pfns of the section and modify only one pfn. Thanks to that, * further, in __add_zone() only this very first pfn is skipped * and corresponding page is not flagged reserved. Therefore it * is enough to correct this setup only for it. * * When arch_add_memory() returns the walk_memory_range() function * is called and passed with online_memory_block() callback, * which execution finally reaches the memory_block_action() * function, where also only the first pfn of a memory block is * checked to be reserved. Above, it was first pfn of a section, * here it is a block but * (drivers/base/memory.c): * sections_per_block = block_sz / MIN_MEMORY_BLOCK_SIZE; * (include/linux/memory.h): * #define MIN_MEMORY_BLOCK_SIZE (1UL << SECTION_SIZE_BITS) * so we can consider block and section equivalently */ memblock_mark_nomap(start, 1<<PAGE_SHIFT); pgdat = NODE_DATA(nid); ret = __add_pages(nid, start_pfn, nr_pages, want_memblock); if (ret) pr_warn("%s: Problem encountered in __add_pages() ret=%d\n", __func__, ret); return ret; } /* * Make the pages usable after they have been added. * This will make pfn_valid return true */ memblock_clear_nomap(start, 1<<PAGE_SHIFT); #ifdef CONFIG_MEMORY_HOTREMOVE int arch_remove_memory(u64 start, u64 size) { unsigned long start_pfn = start >> PAGE_SHIFT; unsigned long nr_pages = size >> PAGE_SHIFT; struct zone *zone; int ret; /* * This is a hack to avoid having to mix arch specific code * into arch independent code. SetPageReserved is supposed * to be called by __add_zone (within __add_section, within * __add_pages). However, when it is called there, it assumes that * pfn_valid returns true. For the way pfn_valid is implemented * in arm64 (a check on the nomap flag), the only way to make * this evaluate true inside __add_zone is to clear the nomap * flags of blocks in architecture independent code. * * To avoid this, we set the Reserved flag here after we cleared * the nomap flag in the line above. */ SetPageReserved(pfn_to_page(start_pfn)); zone = page_zone(pfn_to_page(start_pfn)); ret = __remove_pages(zone, start_pfn, nr_pages); if (ret) pr_warn("%s: Problem encountered in __remove_pages() ret=%d\n", pr_warn("%s: Problem encountered in __add_pages() ret=%d\n", __func__, ret); return ret; } #endif #endif arch/arm64/mm/mmu.c +31 −0 Original line number Diff line number Diff line #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt /* * Based on arch/arm/mm/mmu.c * Loading Loading @@ -697,6 +698,36 @@ void __init paging_init(void) - PAGE_SIZE); } #ifdef CONFIG_MEMORY_HOTPLUG /* * hotplug_paging() is used by memory hotplug to build new page tables * for hot added memory. */ void hotplug_paging(phys_addr_t start, phys_addr_t size) { struct page *pg; phys_addr_t pgd_phys = pgd_pgtable_alloc(); pgd_t *pgd = pgd_set_fixmap(pgd_phys); memcpy(pgd, swapper_pg_dir, PAGE_SIZE); __create_pgd_mapping(pgd, start, __phys_to_virt(start), size, PAGE_KERNEL, pgd_pgtable_alloc, !debug_pagealloc_enabled()); cpu_replace_ttbr1(__va(pgd_phys)); memcpy(swapper_pg_dir, pgd, PAGE_SIZE); cpu_replace_ttbr1(swapper_pg_dir); pgd_clear_fixmap(); pg = phys_to_page(pgd_phys); pgtable_page_dtor(pg); __free_pages(pg, 0); } #endif /* * Check whether a kernel address is valid (derived from arch/x86/). */ Loading Loading
arch/arm64/Kconfig +1 −3 Original line number Diff line number Diff line Loading @@ -732,9 +732,7 @@ config HOTPLUG_CPU can be controlled through /sys/devices/system/cpu. config ARCH_ENABLE_MEMORY_HOTPLUG def_bool y config ARCH_ENABLE_MEMORY_HOTREMOVE depends on !NUMA def_bool y # The GPIO number here must be sorted by descending number. In case of Loading
arch/arm64/include/asm/mmu.h +3 −0 Original line number Diff line number Diff line Loading @@ -94,6 +94,9 @@ extern void create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys, pgprot_t prot, bool page_mappings_only); extern void *fixmap_remap_fdt(phys_addr_t dt_phys); extern void mark_linear_text_alias_ro(void); #ifdef CONFIG_MEMORY_HOTPLUG extern void hotplug_paging(phys_addr_t start, phys_addr_t size); #endif #endif /* !__ASSEMBLY__ */ #endif
arch/arm64/mm/init.c +60 −17 Original line number Diff line number Diff line Loading @@ -698,35 +698,78 @@ int arch_add_memory(int nid, u64 start, u64 size, bool want_memblock) pg_data_t *pgdat; unsigned long start_pfn = start >> PAGE_SHIFT; unsigned long nr_pages = size >> PAGE_SHIFT; unsigned long end_pfn = start_pfn + nr_pages; unsigned long max_sparsemem_pfn = 1UL << (MAX_PHYSMEM_BITS-PAGE_SHIFT); unsigned long pfn; int ret; if (end_pfn > max_sparsemem_pfn) { pr_err("end_pfn too big"); return -1; } hotplug_paging(start, size); /* * Mark the first page in the range as unusable. This is needed * because __add_section (within __add_pages) wants pfn_valid * of it to be false, and in arm64 pfn falid is implemented by * just checking at the nomap flag for existing blocks. * * A small trick here is that __add_section() requires only * phys_start_pfn (that is the first pfn of a section) to be * invalid. Regardless of whether it was assumed (by the function * author) that all pfns within a section are either all valid * or all invalid, it allows to avoid looping twice (once here, * second when memblock_clear_nomap() is called) through all * pfns of the section and modify only one pfn. Thanks to that, * further, in __add_zone() only this very first pfn is skipped * and corresponding page is not flagged reserved. Therefore it * is enough to correct this setup only for it. * * When arch_add_memory() returns the walk_memory_range() function * is called and passed with online_memory_block() callback, * which execution finally reaches the memory_block_action() * function, where also only the first pfn of a memory block is * checked to be reserved. Above, it was first pfn of a section, * here it is a block but * (drivers/base/memory.c): * sections_per_block = block_sz / MIN_MEMORY_BLOCK_SIZE; * (include/linux/memory.h): * #define MIN_MEMORY_BLOCK_SIZE (1UL << SECTION_SIZE_BITS) * so we can consider block and section equivalently */ memblock_mark_nomap(start, 1<<PAGE_SHIFT); pgdat = NODE_DATA(nid); ret = __add_pages(nid, start_pfn, nr_pages, want_memblock); if (ret) pr_warn("%s: Problem encountered in __add_pages() ret=%d\n", __func__, ret); return ret; } /* * Make the pages usable after they have been added. * This will make pfn_valid return true */ memblock_clear_nomap(start, 1<<PAGE_SHIFT); #ifdef CONFIG_MEMORY_HOTREMOVE int arch_remove_memory(u64 start, u64 size) { unsigned long start_pfn = start >> PAGE_SHIFT; unsigned long nr_pages = size >> PAGE_SHIFT; struct zone *zone; int ret; /* * This is a hack to avoid having to mix arch specific code * into arch independent code. SetPageReserved is supposed * to be called by __add_zone (within __add_section, within * __add_pages). However, when it is called there, it assumes that * pfn_valid returns true. For the way pfn_valid is implemented * in arm64 (a check on the nomap flag), the only way to make * this evaluate true inside __add_zone is to clear the nomap * flags of blocks in architecture independent code. * * To avoid this, we set the Reserved flag here after we cleared * the nomap flag in the line above. */ SetPageReserved(pfn_to_page(start_pfn)); zone = page_zone(pfn_to_page(start_pfn)); ret = __remove_pages(zone, start_pfn, nr_pages); if (ret) pr_warn("%s: Problem encountered in __remove_pages() ret=%d\n", pr_warn("%s: Problem encountered in __add_pages() ret=%d\n", __func__, ret); return ret; } #endif #endif
arch/arm64/mm/mmu.c +31 −0 Original line number Diff line number Diff line #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt /* * Based on arch/arm/mm/mmu.c * Loading Loading @@ -697,6 +698,36 @@ void __init paging_init(void) - PAGE_SIZE); } #ifdef CONFIG_MEMORY_HOTPLUG /* * hotplug_paging() is used by memory hotplug to build new page tables * for hot added memory. */ void hotplug_paging(phys_addr_t start, phys_addr_t size) { struct page *pg; phys_addr_t pgd_phys = pgd_pgtable_alloc(); pgd_t *pgd = pgd_set_fixmap(pgd_phys); memcpy(pgd, swapper_pg_dir, PAGE_SIZE); __create_pgd_mapping(pgd, start, __phys_to_virt(start), size, PAGE_KERNEL, pgd_pgtable_alloc, !debug_pagealloc_enabled()); cpu_replace_ttbr1(__va(pgd_phys)); memcpy(swapper_pg_dir, pgd, PAGE_SIZE); cpu_replace_ttbr1(swapper_pg_dir); pgd_clear_fixmap(); pg = phys_to_page(pgd_phys); pgtable_page_dtor(pg); __free_pages(pg, 0); } #endif /* * Check whether a kernel address is valid (derived from arch/x86/). */ Loading