Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit c6543459 authored by Rik van Riel's avatar Rik van Riel Committed by Linus Torvalds
Browse files

mm: remove __GFP_NO_KSWAPD



When transparent huge pages were introduced, memory compaction and swap
storms were an issue, and the kernel had to be careful to not make THP
allocations cause pageout or compaction.

Now that we have working compaction deferral, kswapd is smart enough to
invoke compaction and the quadratic behaviour around isolate_free_pages
has been fixed, it should be safe to remove __GFP_NO_KSWAPD.

[minchan@kernel.org: Comment fix]
[mgorman@suse.de: Avoid direct reclaim for deferred compaction]
Cc: Andrea Arcangeli <aarcange@redhat.com>
Signed-off-by: default avatarRik van Riel <riel@redhat.com>
Signed-off-by: default avatarMel Gorman <mgorman@suse.de>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 075663d1
Loading
Loading
Loading
Loading
+2 −4
Original line number Diff line number Diff line
@@ -1056,8 +1056,7 @@ EXPORT_SYMBOL_GPL(mtd_writev);
 * until the request succeeds or until the allocation size falls below
 * the system page size. This attempts to make sure it does not adversely
 * impact system performance, so when allocating more than one page, we
 * ask the memory allocator to avoid re-trying, swapping, writing back
 * or performing I/O.
 * ask the memory allocator to avoid re-trying.
 *
 * Note, this function also makes sure that the allocated buffer is aligned to
 * the MTD device's min. I/O unit, i.e. the "mtd->writesize" value.
@@ -1071,8 +1070,7 @@ EXPORT_SYMBOL_GPL(mtd_writev);
 */
void *mtd_kmalloc_up_to(const struct mtd_info *mtd, size_t *size)
{
	gfp_t flags = __GFP_NOWARN | __GFP_WAIT |
		       __GFP_NORETRY | __GFP_NO_KSWAPD;
	gfp_t flags = __GFP_NOWARN | __GFP_WAIT | __GFP_NORETRY;
	size_t min_alloc = max_t(size_t, mtd->writesize, PAGE_SIZE);
	void *kbuf;

+1 −4
Original line number Diff line number Diff line
@@ -35,7 +35,6 @@ struct vm_area_struct;
#else
#define ___GFP_NOTRACK		0
#endif
#define ___GFP_NO_KSWAPD	0x400000u
#define ___GFP_OTHER_NODE	0x800000u
#define ___GFP_WRITE		0x1000000u

@@ -90,7 +89,6 @@ struct vm_area_struct;
#define __GFP_RECLAIMABLE ((__force gfp_t)___GFP_RECLAIMABLE) /* Page is reclaimable */
#define __GFP_NOTRACK	((__force gfp_t)___GFP_NOTRACK)  /* Don't track with kmemcheck */

#define __GFP_NO_KSWAPD	((__force gfp_t)___GFP_NO_KSWAPD)
#define __GFP_OTHER_NODE ((__force gfp_t)___GFP_OTHER_NODE) /* On behalf of other node */
#define __GFP_WRITE	((__force gfp_t)___GFP_WRITE)	/* Allocator intends to dirty page */

@@ -120,8 +118,7 @@ struct vm_area_struct;
				 __GFP_MOVABLE)
#define GFP_IOFS	(__GFP_IO | __GFP_FS)
#define GFP_TRANSHUGE	(GFP_HIGHUSER_MOVABLE | __GFP_COMP | \
			 __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN | \
			 __GFP_NO_KSWAPD)
			 __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN)

#ifdef CONFIG_NUMA
#define GFP_THISNODE	(__GFP_THISNODE | __GFP_NOWARN | __GFP_NORETRY)
+0 −1
Original line number Diff line number Diff line
@@ -36,7 +36,6 @@
	{(unsigned long)__GFP_RECLAIMABLE,	"GFP_RECLAIMABLE"},	\
	{(unsigned long)__GFP_MOVABLE,		"GFP_MOVABLE"},		\
	{(unsigned long)__GFP_NOTRACK,		"GFP_NOTRACK"},		\
	{(unsigned long)__GFP_NO_KSWAPD,	"GFP_NO_KSWAPD"},	\
	{(unsigned long)__GFP_OTHER_NODE,	"GFP_OTHER_NODE"}	\
	) : "GFP_NOWAIT"
+3 −4
Original line number Diff line number Diff line
@@ -2362,7 +2362,6 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
		goto nopage;

restart:
	if (!(gfp_mask & __GFP_NO_KSWAPD))
	wake_all_kswapd(order, zonelist, high_zoneidx,
					zone_idx(preferred_zone));

@@ -2441,7 +2440,7 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
	 * system then fail the allocation instead of entering direct reclaim.
	 */
	if ((deferred_compaction || contended_compaction) &&
						(gfp_mask & __GFP_NO_KSWAPD))
	    (gfp_mask & (__GFP_MOVABLE|__GFP_REPEAT)) == __GFP_MOVABLE)
		goto nopage;

	/* Try direct reclaim and then allocating */