Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 69ec31a0 authored by qctecmdr's avatar qctecmdr Committed by Gerrit - the friendly Code Review server
Browse files

Merge "arm64: Add fallback option during memory hot add"

parents 1acaa06d d4dd236a
Loading
Loading
Loading
Loading
+3 −0
Original line number Diff line number Diff line
@@ -189,6 +189,9 @@ extern u64 kimage_vaddr;
/* the offset between the kernel virtual and physical mappings */
extern u64			kimage_voffset;

/* physical memory limit imposed by the booloader */
extern phys_addr_t bootloader_memory_limit;

static inline unsigned long kaslr_offset(void)
{
	return kimage_vaddr - KIMAGE_VADDR;
+7 −0
Original line number Diff line number Diff line
@@ -257,6 +257,7 @@ int pfn_valid(unsigned long pfn)
EXPORT_SYMBOL(pfn_valid);

static phys_addr_t memory_limit = PHYS_ADDR_MAX;
phys_addr_t bootloader_memory_limit;

/*
 * Limit the memory size that was specified via FDT.
@@ -349,6 +350,12 @@ void __init arm64_memblock_init(void)
		memblock_remove(0, memstart_addr);
	}

	/*
	 * Save bootloader imposed memory limit before we overwirte
	 * memblock.
	 */
	bootloader_memory_limit = memblock_end_of_DRAM();

	/*
	 * Apply the memory limit if it was set. Since the kernel may be loaded
	 * high up in memory, add back the kernel region that must be accessible
+12 −3
Original line number Diff line number Diff line
@@ -1116,6 +1116,7 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
	pgd_t *pgdp;
	pud_t *pudp;
	pmd_t *pmdp;
	int ret = 0;

	do {
		next = pmd_addr_end(addr, end);
@@ -1133,15 +1134,23 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
			void *p = NULL;

			p = vmemmap_alloc_block_buf(PMD_SIZE, node);
			if (!p)
				return -ENOMEM;
			if (!p) {
#ifdef CONFIG_MEMORY_HOTPLUG
				vmemmap_free(start, end, altmap);
#endif
				ret = -ENOMEM;
				break;
			}

			pmd_set_huge(pmdp, __pa(p), __pgprot(PROT_SECT_NORMAL));
		} else
			vmemmap_verify((pte_t *)pmdp, node, addr, next);
	} while (addr = next, addr != end);

	return 0;
	if (ret)
		return vmemmap_populate_basepages(start, end, node);
	else
		return ret;
}
#endif	/* !ARM64_SWAPPER_USES_SECTION_MAPS */
void vmemmap_free(unsigned long start, unsigned long end,