Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 4b0ef1fe authored by Lai Jiangshan's avatar Lai Jiangshan Committed by Linus Torvalds
Browse files

page_alloc: use N_MEMORY instead N_HIGH_MEMORY change the node_states initialization



N_HIGH_MEMORY stands for the nodes that has normal or high memory.
N_MEMORY stands for the nodes that has any memory.

The code here need to handle with the nodes which have memory, we should
use N_MEMORY instead.

Since we introduced N_MEMORY, we update the initialization of node_states.

Signed-off-by: default avatarLai Jiangshan <laijs@cn.fujitsu.com>
Signed-off-by: default avatarLin Feng <linfeng@cn.fujitsu.com>
Signed-off-by: default avatarWen Congyang <wency@cn.fujitsu.com>
Cc: Christoph Lameter <cl@linux.com>
Cc: Hillf Danton <dhillf@gmail.com>
Cc: David Rientjes <rientjes@google.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 48fb2e24
Loading
Loading
Loading
Loading
+3 −1
Original line number Diff line number Diff line
@@ -630,6 +630,8 @@ void __init paging_init(void)
	 *	 numa support is not compiled in, and later node_set_state
	 *	 will not set it back.
	 */
	node_clear_state(0, N_MEMORY);
	if (N_MEMORY != N_NORMAL_MEMORY)
		node_clear_state(0, N_NORMAL_MEMORY);

	zone_sizes_init();
+22 −18
Original line number Diff line number Diff line
@@ -1695,7 +1695,7 @@ bool zone_watermark_ok_safe(struct zone *z, int order, unsigned long mark,
 *
 * If the zonelist cache is present in the passed in zonelist, then
 * returns a pointer to the allowed node mask (either the current
 * tasks mems_allowed, or node_states[N_HIGH_MEMORY].)
 * tasks mems_allowed, or node_states[N_MEMORY].)
 *
 * If the zonelist cache is not available for this zonelist, does
 * nothing and returns NULL.
@@ -1724,7 +1724,7 @@ static nodemask_t *zlc_setup(struct zonelist *zonelist, int alloc_flags)

	allowednodes = !in_interrupt() && (alloc_flags & ALLOC_CPUSET) ?
					&cpuset_current_mems_allowed :
					&node_states[N_HIGH_MEMORY];
					&node_states[N_MEMORY];
	return allowednodes;
}

@@ -3238,7 +3238,7 @@ static int find_next_best_node(int node, nodemask_t *used_node_mask)
		return node;
	}

	for_each_node_state(n, N_HIGH_MEMORY) {
	for_each_node_state(n, N_MEMORY) {

		/* Don't want a node to appear more than once */
		if (node_isset(n, *used_node_mask))
@@ -3380,7 +3380,7 @@ static int default_zonelist_order(void)
 	 * local memory, NODE_ORDER may be suitable.
         */
	average_size = total_size /
				(nodes_weight(node_states[N_HIGH_MEMORY]) + 1);
				(nodes_weight(node_states[N_MEMORY]) + 1);
	for_each_online_node(nid) {
		low_kmem_size = 0;
		total_size = 0;
@@ -4731,7 +4731,7 @@ unsigned long __init find_min_pfn_with_active_regions(void)
/*
 * early_calculate_totalpages()
 * Sum pages in active regions for movable zone.
 * Populate N_HIGH_MEMORY for calculating usable_nodes.
 * Populate N_MEMORY for calculating usable_nodes.
 */
static unsigned long __init early_calculate_totalpages(void)
{
@@ -4744,7 +4744,7 @@ static unsigned long __init early_calculate_totalpages(void)

		totalpages += pages;
		if (pages)
			node_set_state(nid, N_HIGH_MEMORY);
			node_set_state(nid, N_MEMORY);
	}
  	return totalpages;
}
@@ -4761,9 +4761,9 @@ static void __init find_zone_movable_pfns_for_nodes(void)
	unsigned long usable_startpfn;
	unsigned long kernelcore_node, kernelcore_remaining;
	/* save the state before borrow the nodemask */
	nodemask_t saved_node_state = node_states[N_HIGH_MEMORY];
	nodemask_t saved_node_state = node_states[N_MEMORY];
	unsigned long totalpages = early_calculate_totalpages();
	int usable_nodes = nodes_weight(node_states[N_HIGH_MEMORY]);
	int usable_nodes = nodes_weight(node_states[N_MEMORY]);

	/*
	 * If movablecore was specified, calculate what size of
@@ -4798,7 +4798,7 @@ static void __init find_zone_movable_pfns_for_nodes(void)
restart:
	/* Spread kernelcore memory as evenly as possible throughout nodes */
	kernelcore_node = required_kernelcore / usable_nodes;
	for_each_node_state(nid, N_HIGH_MEMORY) {
	for_each_node_state(nid, N_MEMORY) {
		unsigned long start_pfn, end_pfn;

		/*
@@ -4890,23 +4890,27 @@ static void __init find_zone_movable_pfns_for_nodes(void)

out:
	/* restore the node_state */
	node_states[N_HIGH_MEMORY] = saved_node_state;
	node_states[N_MEMORY] = saved_node_state;
}

/* Any regular memory on that node ? */
static void __init check_for_regular_memory(pg_data_t *pgdat)
/* Any regular or high memory on that node ? */
static void check_for_memory(pg_data_t *pgdat, int nid)
{
#ifdef CONFIG_HIGHMEM
	enum zone_type zone_type;

	for (zone_type = 0; zone_type <= ZONE_NORMAL; zone_type++) {
	if (N_MEMORY == N_NORMAL_MEMORY)
		return;

	for (zone_type = 0; zone_type <= ZONE_MOVABLE - 1; zone_type++) {
		struct zone *zone = &pgdat->node_zones[zone_type];
		if (zone->present_pages) {
			node_set_state(zone_to_nid(zone), N_NORMAL_MEMORY);
			node_set_state(nid, N_HIGH_MEMORY);
			if (N_NORMAL_MEMORY != N_HIGH_MEMORY &&
			    zone_type <= ZONE_NORMAL)
				node_set_state(nid, N_NORMAL_MEMORY);
			break;
		}
	}
#endif
}

/**
@@ -4989,8 +4993,8 @@ void __init free_area_init_nodes(unsigned long *max_zone_pfn)

		/* Any memory on that node */
		if (pgdat->node_present_pages)
			node_set_state(nid, N_HIGH_MEMORY);
		check_for_regular_memory(pgdat);
			node_set_state(nid, N_MEMORY);
		check_for_memory(pgdat, nid);
	}
}