Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 9f8f2172 authored by Christoph Lameter's avatar Christoph Lameter Committed by Linus Torvalds
Browse files

Page allocator: clean up pcp draining functions



- Add comments explaing how drain_pages() works.

- Eliminate useless functions

- Rename drain_all_local_pages to drain_all_pages(). It does drain
  all pages not only those of the local processor.

- Eliminate useless interrupt off / on sequences. drain_pages()
  disables interrupts on its own. The execution thread is
  pinned to processor by the caller. So there is no need to
  disable interrupts.

- Put drain_all_pages() declaration in gfp.h and remove the
  declarations from suspend.h and from mm/memory_hotplug.c

- Make software suspend call drain_all_pages(). The draining
  of processor local pages is may not the right approach if
  software suspend wants to support SMP. If they call drain_all_pages
  then we can make drain_pages() static.

[akpm@linux-foundation.org: fix build]
Signed-off-by: default avatarChristoph Lameter <clameter@sgi.com>
Acked-by: default avatarMel Gorman <mel@csn.ul.ie>
Cc: "Rafael J. Wysocki" <rjw@sisk.pl>
Cc: Daniel Walker <dwalker@mvista.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent e2848a0e
Loading
Loading
Loading
Loading
+2 −0
Original line number Diff line number Diff line
@@ -228,5 +228,7 @@ extern void FASTCALL(free_cold_page(struct page *page));

void page_alloc_init(void);
void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp);
void drain_all_pages(void);
void drain_local_pages(void *dummy);

#endif /* __LINUX_GFP_H */
+0 −1
Original line number Diff line number Diff line
@@ -130,7 +130,6 @@ struct pbe {
};

/* mm/page_alloc.c */
extern void drain_local_pages(void);
extern void mark_free_pages(struct zone *zone);

/**
+2 −2
Original line number Diff line number Diff line
@@ -1203,7 +1203,7 @@ asmlinkage int swsusp_save(void)

	printk(KERN_INFO "PM: Creating hibernation image: \n");

	drain_local_pages();
	drain_local_pages(NULL);
	nr_pages = count_data_pages();
	nr_highmem = count_highmem_pages();
	printk(KERN_INFO "PM: Need to copy %u pages\n", nr_pages + nr_highmem);
@@ -1221,7 +1221,7 @@ asmlinkage int swsusp_save(void)
	/* During allocating of suspend pagedir, new cold pages may appear.
	 * Kill them.
	 */
	drain_local_pages();
	drain_local_pages(NULL);
	copy_data_pages(&copy_bm, &orig_bm);

	/*
+2 −4
Original line number Diff line number Diff line
@@ -481,8 +481,6 @@ check_pages_isolated(unsigned long start_pfn, unsigned long end_pfn)
	return offlined;
}

extern void drain_all_local_pages(void);

int offline_pages(unsigned long start_pfn,
		  unsigned long end_pfn, unsigned long timeout)
{
@@ -540,7 +538,7 @@ repeat:
		lru_add_drain_all();
		flush_scheduled_work();
		cond_resched();
		drain_all_local_pages();
		drain_all_pages();
	}

	pfn = scan_lru_pages(start_pfn, end_pfn);
@@ -563,7 +561,7 @@ repeat:
	flush_scheduled_work();
	yield();
	/* drain pcp pages , this is synchrouns. */
	drain_all_local_pages();
	drain_all_pages();
	/* check again */
	offlined_pages = check_pages_isolated(start_pfn, end_pfn);
	if (offlined_pages < 0) {
+42 −37
Original line number Diff line number Diff line
@@ -890,7 +890,14 @@ void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp)
}
#endif

static void __drain_pages(unsigned int cpu)
/*
 * Drain pages of the indicated processor.
 *
 * The processor must either be the current processor and the
 * thread pinned to the current processor or a processor that
 * is not online.
 */
static void drain_pages(unsigned int cpu)
{
	unsigned long flags;
	struct zone *zone;
@@ -915,6 +922,22 @@ static void __drain_pages(unsigned int cpu)
	}
}

/*
 * Spill all of this CPU's per-cpu pages back into the buddy allocator.
 */
void drain_local_pages(void *arg)
{
	drain_pages(smp_processor_id());
}

/*
 * Spill all the per-cpu pages from all CPUs back into the buddy allocator
 */
void drain_all_pages(void)
{
	on_each_cpu(drain_local_pages, NULL, 0, 1);
}

#ifdef CONFIG_HIBERNATION

void mark_free_pages(struct zone *zone)
@@ -951,37 +974,6 @@ void mark_free_pages(struct zone *zone)
}
#endif /* CONFIG_PM */

/*
 * Spill all of this CPU's per-cpu pages back into the buddy allocator.
 */
void drain_local_pages(void)
{
	unsigned long flags;

	local_irq_save(flags);	
	__drain_pages(smp_processor_id());
	local_irq_restore(flags);	
}

void smp_drain_local_pages(void *arg)
{
	drain_local_pages();
}

/*
 * Spill all the per-cpu pages from all CPUs back into the buddy allocator
 */
void drain_all_local_pages(void)
{
	unsigned long flags;

	local_irq_save(flags);
	__drain_pages(smp_processor_id());
	local_irq_restore(flags);

	smp_call_function(smp_drain_local_pages, NULL, 0, 1);
}

/*
 * Free a 0-order page
 */
@@ -1569,7 +1561,7 @@ nofail_alloc:
	cond_resched();

	if (order != 0)
		drain_all_local_pages();
		drain_all_pages();

	if (likely(did_some_progress)) {
		page = get_page_from_freelist(gfp_mask, order,
@@ -3978,10 +3970,23 @@ static int page_alloc_cpu_notify(struct notifier_block *self,
	int cpu = (unsigned long)hcpu;

	if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) {
		local_irq_disable();
		__drain_pages(cpu);
		drain_pages(cpu);

		/*
		 * Spill the event counters of the dead processor
		 * into the current processors event counters.
		 * This artificially elevates the count of the current
		 * processor.
		 */
		vm_events_fold_cpu(cpu);
		local_irq_enable();

		/*
		 * Zero the differential counters of the dead processor
		 * so that the vm statistics are consistent.
		 *
		 * This is only okay since the processor is dead and cannot
		 * race with what we are doing.
		 */
		refresh_cpu_vm_stats(cpu);
	}
	return NOTIFY_OK;
@@ -4480,7 +4485,7 @@ int set_migratetype_isolate(struct page *page)
out:
	spin_unlock_irqrestore(&zone->lock, flags);
	if (!ret)
		drain_all_local_pages();
		drain_all_pages();
	return ret;
}