Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit fbc355c1 authored by Mark Salyzyn's avatar Mark Salyzyn Committed by Martin Liu
Browse files

Revert "BACKPORT: mm: move zone watermark accesses behind an accessor"



This reverts commit acfb1c60.

Reason for revert: revert customized code
Bug: 140544941
Test: boot
Signed-off-by: default avatarMinchan Kim <minchan@google.com>
Signed-off-by: default avatarMartin Liu <liumartin@google.com>
Signed-off-by: default avatarMark Salyzyn <salyzyn@google.com>
Signed-off-by: default avatarSuren Baghdasaryan <surenb@google.com>
Change-Id: I4988ddf1fc24579d4fc478de1de6e45b870f7bcd
parent 35be952a
Loading
Loading
Loading
Loading
+4 −5
Original line number Diff line number Diff line
@@ -274,10 +274,9 @@ enum zone_watermarks {
	NR_WMARK
};

#define min_wmark_pages(z) (z->_watermark[WMARK_MIN])
#define low_wmark_pages(z) (z->_watermark[WMARK_LOW])
#define high_wmark_pages(z) (z->_watermark[WMARK_HIGH])
#define wmark_pages(z, i) (z->_watermark[i])
#define min_wmark_pages(z) (z->watermark[WMARK_MIN])
#define low_wmark_pages(z) (z->watermark[WMARK_LOW])
#define high_wmark_pages(z) (z->watermark[WMARK_HIGH])

struct per_cpu_pages {
	int count;		/* number of pages in the list */
@@ -368,7 +367,7 @@ struct zone {
	/* Read-mostly fields */

	/* zone watermarks, access with *_wmark_pages(zone) macros */
	unsigned long _watermark[NR_WMARK];
	unsigned long watermark[NR_WMARK];

	unsigned long nr_reserved_highatomic;

+1 −1
Original line number Diff line number Diff line
@@ -1431,7 +1431,7 @@ static enum compact_result __compaction_suitable(struct zone *zone, int order,
	if (is_via_compact_memory(order))
		return COMPACT_CONTINUE;

	watermark = wmark_pages(zone, alloc_flags & ALLOC_WMARK_MASK);
	watermark = zone->watermark[alloc_flags & ALLOC_WMARK_MASK];
	/*
	 * If watermarks for high-order allocation are already met, there
	 * should be no need for compaction at all.
+6 −6
Original line number Diff line number Diff line
@@ -3538,7 +3538,7 @@ get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags,
			}
		}

		mark = wmark_pages(zone, alloc_flags & ALLOC_WMARK_MASK);
		mark = zone->watermark[alloc_flags & ALLOC_WMARK_MASK];
		if (!zone_watermark_fast(zone, order, mark,
				       ac_classzone_idx(ac), alloc_flags)) {
			int ret;
@@ -4972,7 +4972,7 @@ long si_mem_available(void)
		pages[lru] = global_node_page_state(NR_LRU_BASE + lru);

	for_each_zone(zone)
		wmark_low += low_wmark_pages(zone);
		wmark_low += zone->watermark[WMARK_LOW];

	/*
	 * Estimate the amount of memory available for userspace allocations,
@@ -7546,13 +7546,13 @@ static void __setup_per_zone_wmarks(void)

			min_pages = zone->managed_pages / 1024;
			min_pages = clamp(min_pages, SWAP_CLUSTER_MAX, 128UL);
			zone->_watermark[WMARK_MIN] = min_pages;
			zone->watermark[WMARK_MIN] = min_pages;
		} else {
			/*
			 * If it's a lowmem zone, reserve a number of pages
			 * proportionate to the zone's size.
			 */
			zone->_watermark[WMARK_MIN] = min;
			zone->watermark[WMARK_MIN] = min;
		}

		/*
@@ -7564,9 +7564,9 @@ static void __setup_per_zone_wmarks(void)
			    mult_frac(zone->managed_pages,
				      watermark_scale_factor, 10000));

		zone->_watermark[WMARK_LOW]  = min_wmark_pages(zone) +
		zone->watermark[WMARK_LOW]  = min_wmark_pages(zone) +
					low + min;
		zone->_watermark[WMARK_HIGH] = min_wmark_pages(zone) +
		zone->watermark[WMARK_HIGH] = min_wmark_pages(zone) +
					low + min * 2;

		spin_unlock_irqrestore(&zone->lock, flags);