Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit c06b1fca authored by Andrew Morton's avatar Andrew Morton Committed by Linus Torvalds
Browse files

mm/page_alloc.c: don't cache `current' in a local



It's old-fashioned and unneeded.

akpm:/usr/src/25> size mm/page_alloc.o
   text    data     bss     dec     hex filename
  39884 1241317   18808 1300009  13d629 mm/page_alloc.o (before)
  39838 1241317   18808 1299963  13d5fb mm/page_alloc.o (after)

Acked-by: default avatarDavid Rientjes <rientjes@google.com>
Acked-by: default avatarMel Gorman <mel@csn.ul.ie>
Cc: Hugh Dickins <hughd@google.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent fd4a4663
Loading
Loading
Loading
Loading
+10 −14
Original line number Diff line number Diff line
@@ -1809,15 +1809,14 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
	bool sync_migration)
{
	struct page *page;
	struct task_struct *tsk = current;

	if (!order || compaction_deferred(preferred_zone))
		return NULL;

	tsk->flags |= PF_MEMALLOC;
	current->flags |= PF_MEMALLOC;
	*did_some_progress = try_to_compact_pages(zonelist, order, gfp_mask,
						nodemask, sync_migration);
	tsk->flags &= ~PF_MEMALLOC;
	current->flags &= ~PF_MEMALLOC;
	if (*did_some_progress != COMPACT_SKIPPED) {

		/* Page migration frees to the PCP lists but we want merging */
@@ -1869,23 +1868,22 @@ __alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order,
{
	struct page *page = NULL;
	struct reclaim_state reclaim_state;
	struct task_struct *p = current;
	bool drained = false;

	cond_resched();

	/* We now go into synchronous reclaim */
	cpuset_memory_pressure_bump();
	p->flags |= PF_MEMALLOC;
	current->flags |= PF_MEMALLOC;
	lockdep_set_current_reclaim_state(gfp_mask);
	reclaim_state.reclaimed_slab = 0;
	p->reclaim_state = &reclaim_state;
	current->reclaim_state = &reclaim_state;

	*did_some_progress = try_to_free_pages(zonelist, order, gfp_mask, nodemask);

	p->reclaim_state = NULL;
	current->reclaim_state = NULL;
	lockdep_clear_current_reclaim_state();
	p->flags &= ~PF_MEMALLOC;
	current->flags &= ~PF_MEMALLOC;

	cond_resched();

@@ -1950,7 +1948,6 @@ void wake_all_kswapd(unsigned int order, struct zonelist *zonelist,
static inline int
gfp_to_alloc_flags(gfp_t gfp_mask)
{
	struct task_struct *p = current;
	int alloc_flags = ALLOC_WMARK_MIN | ALLOC_CPUSET;
	const gfp_t wait = gfp_mask & __GFP_WAIT;

@@ -1977,12 +1974,12 @@ gfp_to_alloc_flags(gfp_t gfp_mask)
		 * See also cpuset_zone_allowed() comment in kernel/cpuset.c.
		 */
		alloc_flags &= ~ALLOC_CPUSET;
	} else if (unlikely(rt_task(p)) && !in_interrupt())
	} else if (unlikely(rt_task(current)) && !in_interrupt())
		alloc_flags |= ALLOC_HARDER;

	if (likely(!(gfp_mask & __GFP_NOMEMALLOC))) {
		if (!in_interrupt() &&
		    ((p->flags & PF_MEMALLOC) ||
		    ((current->flags & PF_MEMALLOC) ||
		     unlikely(test_thread_flag(TIF_MEMDIE))))
			alloc_flags |= ALLOC_NO_WATERMARKS;
	}
@@ -2001,7 +1998,6 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
	int alloc_flags;
	unsigned long pages_reclaimed = 0;
	unsigned long did_some_progress;
	struct task_struct *p = current;
	bool sync_migration = false;

	/*
@@ -2060,7 +2056,7 @@ rebalance:
		goto nopage;

	/* Avoid recursion of direct reclaim */
	if (p->flags & PF_MEMALLOC)
	if (current->flags & PF_MEMALLOC)
		goto nopage;

	/* Avoid allocations with no watermarks from looping endlessly */
@@ -2153,7 +2149,7 @@ nopage:
	if (!(gfp_mask & __GFP_NOWARN) && printk_ratelimit()) {
		printk(KERN_WARNING "%s: page allocation failure."
			" order:%d, mode:0x%x\n",
			p->comm, order, gfp_mask);
			current->comm, order, gfp_mask);
		dump_stack();
		show_mem();
	}