Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 42f47e27 authored by Tang Chen's avatar Tang Chen Committed by Linus Torvalds
Browse files

page_alloc: make movablemem_map have higher priority



If kernelcore or movablecore is specified at the same time with
movablemem_map, movablemem_map will have higher priority to be
satisfied.  This patch will make find_zone_movable_pfns_for_nodes()
calculate zone_movable_pfn[] with the limit from zone_movable_limit[].

Signed-off-by: default avatarTang Chen <tangchen@cn.fujitsu.com>
Reviewed-by: default avatarWen Congyang <wency@cn.fujitsu.com>
Cc: Wu Jianguo <wujianguo@huawei.com>
Reviewed-by: default avatarLai Jiangshan <laijs@cn.fujitsu.com>
Tested-by: default avatarLin Feng <linfeng@cn.fujitsu.com>
Cc: Mel Gorman <mel@csn.ul.ie>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 6981ec31
Loading
Loading
Loading
Loading
+25 −3
Original line number Original line Diff line number Diff line
@@ -4905,9 +4905,17 @@ static void __init find_zone_movable_pfns_for_nodes(void)
		required_kernelcore = max(required_kernelcore, corepages);
		required_kernelcore = max(required_kernelcore, corepages);
	}
	}


	/* If kernelcore was not specified, there is no ZONE_MOVABLE */
	/*
	if (!required_kernelcore)
	 * If neither kernelcore/movablecore nor movablemem_map is specified,
	 * there is no ZONE_MOVABLE. But if movablemem_map is specified, the
	 * start pfn of ZONE_MOVABLE has been stored in zone_movable_limit[].
	 */
	if (!required_kernelcore) {
		if (movablemem_map.nr_map)
			memcpy(zone_movable_pfn, zone_movable_limit,
				sizeof(zone_movable_pfn));
		goto out;
		goto out;
	}


	/* usable_startpfn is the lowest possible pfn ZONE_MOVABLE can be at */
	/* usable_startpfn is the lowest possible pfn ZONE_MOVABLE can be at */
	usable_startpfn = arch_zone_lowest_possible_pfn[movable_zone];
	usable_startpfn = arch_zone_lowest_possible_pfn[movable_zone];
@@ -4937,10 +4945,24 @@ static void __init find_zone_movable_pfns_for_nodes(void)
		for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
		for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
			unsigned long size_pages;
			unsigned long size_pages;


			/*
			 * Find more memory for kernelcore in
			 * [zone_movable_pfn[nid], zone_movable_limit[nid]).
			 */
			start_pfn = max(start_pfn, zone_movable_pfn[nid]);
			start_pfn = max(start_pfn, zone_movable_pfn[nid]);
			if (start_pfn >= end_pfn)
			if (start_pfn >= end_pfn)
				continue;
				continue;


			if (zone_movable_limit[nid]) {
				end_pfn = min(end_pfn, zone_movable_limit[nid]);
				/* No range left for kernelcore in this node */
				if (start_pfn >= end_pfn) {
					zone_movable_pfn[nid] =
							zone_movable_limit[nid];
					break;
				}
			}

			/* Account for what is only usable for kernelcore */
			/* Account for what is only usable for kernelcore */
			if (start_pfn < usable_startpfn) {
			if (start_pfn < usable_startpfn) {
				unsigned long kernel_pages;
				unsigned long kernel_pages;
@@ -5000,12 +5022,12 @@ static void __init find_zone_movable_pfns_for_nodes(void)
	if (usable_nodes && required_kernelcore > usable_nodes)
	if (usable_nodes && required_kernelcore > usable_nodes)
		goto restart;
		goto restart;


out:
	/* Align start of ZONE_MOVABLE on all nids to MAX_ORDER_NR_PAGES */
	/* Align start of ZONE_MOVABLE on all nids to MAX_ORDER_NR_PAGES */
	for (nid = 0; nid < MAX_NUMNODES; nid++)
	for (nid = 0; nid < MAX_NUMNODES; nid++)
		zone_movable_pfn[nid] =
		zone_movable_pfn[nid] =
			roundup(zone_movable_pfn[nid], MAX_ORDER_NR_PAGES);
			roundup(zone_movable_pfn[nid], MAX_ORDER_NR_PAGES);


out:
	/* restore the node_state */
	/* restore the node_state */
	node_states[N_MEMORY] = saved_node_state;
	node_states[N_MEMORY] = saved_node_state;
}
}