Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 1b79acc9 authored by KOSAKI Motohiro's avatar KOSAKI Motohiro Committed by Linus Torvalds
Browse files

mm, mem-hotplug: recalculate lowmem_reserve when memory hotplug occurs



Currently, memory hotplug calls setup_per_zone_wmarks() and
calculate_zone_inactive_ratio(), but doesn't call
setup_per_zone_lowmem_reserve().

It means the number of reserved pages aren't updated even if memory hot
plug occur.  This patch fixes it.

Signed-off-by: default avatarKOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Reviewed-by: default avatarKAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Acked-by: default avatarMel Gorman <mel@csn.ul.ie>
Reviewed-by: default avatarMinchan Kim <minchan.kim@gmail.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 839a4fcc
Loading
Loading
Loading
Loading
+1 −1
Original line number Original line Diff line number Diff line
@@ -1381,7 +1381,7 @@ extern void set_dma_reserve(unsigned long new_dma_reserve);
extern void memmap_init_zone(unsigned long, int, unsigned long,
extern void memmap_init_zone(unsigned long, int, unsigned long,
				unsigned long, enum memmap_context);
				unsigned long, enum memmap_context);
extern void setup_per_zone_wmarks(void);
extern void setup_per_zone_wmarks(void);
extern void calculate_zone_inactive_ratio(struct zone *zone);
extern int __meminit init_per_zone_wmark_min(void);
extern void mem_init(void);
extern void mem_init(void);
extern void __init mmap_init(void);
extern void __init mmap_init(void);
extern void show_mem(unsigned int flags);
extern void show_mem(unsigned int flags);
+5 −4
Original line number Original line Diff line number Diff line
@@ -459,8 +459,9 @@ int __ref online_pages(unsigned long pfn, unsigned long nr_pages)
		zone_pcp_update(zone);
		zone_pcp_update(zone);


	mutex_unlock(&zonelists_mutex);
	mutex_unlock(&zonelists_mutex);
	setup_per_zone_wmarks();

	calculate_zone_inactive_ratio(zone);
	init_per_zone_wmark_min();

	if (onlined_pages) {
	if (onlined_pages) {
		kswapd_run(zone_to_nid(zone));
		kswapd_run(zone_to_nid(zone));
		node_set_state(zone_to_nid(zone), N_HIGH_MEMORY);
		node_set_state(zone_to_nid(zone), N_HIGH_MEMORY);
@@ -893,8 +894,8 @@ static int __ref offline_pages(unsigned long start_pfn,
	zone->zone_pgdat->node_present_pages -= offlined_pages;
	zone->zone_pgdat->node_present_pages -= offlined_pages;
	totalram_pages -= offlined_pages;
	totalram_pages -= offlined_pages;


	setup_per_zone_wmarks();
	init_per_zone_wmark_min();
	calculate_zone_inactive_ratio(zone);

	if (!node_present_pages(node)) {
	if (!node_present_pages(node)) {
		node_clear_state(node, N_HIGH_MEMORY);
		node_clear_state(node, N_HIGH_MEMORY);
		kswapd_stop(node);
		kswapd_stop(node);
+2 −2
Original line number Original line Diff line number Diff line
@@ -5094,7 +5094,7 @@ void setup_per_zone_wmarks(void)
 *    1TB     101        10GB
 *    1TB     101        10GB
 *   10TB     320        32GB
 *   10TB     320        32GB
 */
 */
void __meminit calculate_zone_inactive_ratio(struct zone *zone)
static void __meminit calculate_zone_inactive_ratio(struct zone *zone)
{
{
	unsigned int gb, ratio;
	unsigned int gb, ratio;


@@ -5140,7 +5140,7 @@ static void __meminit setup_per_zone_inactive_ratio(void)
 * 8192MB:	11584k
 * 8192MB:	11584k
 * 16384MB:	16384k
 * 16384MB:	16384k
 */
 */
static int __init init_per_zone_wmark_min(void)
int __meminit init_per_zone_wmark_min(void)
{
{
	unsigned long lowmem_kbytes;
	unsigned long lowmem_kbytes;