Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 92973153 authored by Rik van Riel's avatar Rik van Riel Committed by Bhargav Upperla
Browse files

add extra free kbytes tunable



Add a userspace visible knob to tell the VM to keep an extra amount
of memory free, by increasing the gap between each zone's min and
low watermarks.

This is useful for realtime applications that call system
calls and have a bound on the number of allocations that happen
in any short time period.  In this application, extra_free_kbytes
would be left at an amount equal to or larger than than the
maximum number of allocations that happen in any burst.

It may also be useful to reduce the memory use of virtual
machines (temporarily?), in a way that does not cause memory
fragmentation like ballooning does.

[ccross]
Revived for use on old kernels where no other solution exists.
The tunable will be removed on kernels that do better at avoiding
direct reclaim.

Change-Id: I765a42be8e964bfd3e2886d1ca85a29d60c3bb3e
Signed-off-by: default avatarRik van <Riel&lt;riel@redhat.com>
Signed-off-by: default avatarColin Cross <ccross@android.com>
Git-commit: 92189d47f66c67e5fd92eafaa287e153197a454f
Git-repo: https://android.googlesource.com/kernel/common/


[bhargavuln@codeaurora.org: resolve trivial merge conflicts]
Signed-off-by: default avatarBhargav Upperla <bhargavuln@codeaurora.org>
parent 76ccdc74
Loading
Loading
Loading
Loading
+16 −0
Original line number Diff line number Diff line
@@ -29,6 +29,7 @@ Currently, these files are in /proc/sys/vm:
- dirty_writeback_centisecs
- drop_caches
- extfrag_threshold
- extra_free_kbytes
- hugepages_treat_as_movable
- hugetlb_shm_group
- laptop_mode
@@ -198,6 +199,21 @@ fragmentation index is <= extfrag_threshold. The default value is 500.

==============================================================

extra_free_kbytes

This parameter tells the VM to keep extra free memory between the threshold
where background reclaim (kswapd) kicks in, and the threshold where direct
reclaim (by allocating processes) kicks in.

This is useful for workloads that require low latency memory allocations
and have a bounded burstiness in memory allocations, for example a
realtime application that receives and transmits network traffic
(causing in-kernel memory allocations) with a maximum total message burst
size of 200MB may need 200MB of extra free memory to avoid direct reclaim
related latencies.

==============================================================

hugepages_treat_as_movable

This parameter is only useful when kernelcore= is specified at boot time to
+9 −0
Original line number Diff line number Diff line
@@ -105,6 +105,7 @@ extern char core_pattern[];
extern unsigned int core_pipe_limit;
#endif
extern int pid_max;
extern int extra_free_kbytes;
extern int min_free_order_shift;
extern int pid_max_min, pid_max_max;
extern int percpu_pagelist_fraction;
@@ -1294,6 +1295,14 @@ static struct ctl_table vm_table[] = {
		.proc_handler	= min_free_kbytes_sysctl_handler,
		.extra1		= &zero,
	},
	{
		.procname	= "extra_free_kbytes",
		.data		= &extra_free_kbytes,
		.maxlen		= sizeof(extra_free_kbytes),
		.mode		= 0644,
		.proc_handler	= min_free_kbytes_sysctl_handler,
		.extra1		= &zero,
	},
	{
		.procname	= "min_free_order_shift",
		.data		= &min_free_order_shift,
+25 −7
Original line number Diff line number Diff line
@@ -203,9 +203,21 @@ static char * const zone_names[MAX_NR_ZONES] = {
	 "Movable",
};

/*
 * Try to keep at least this much lowmem free.  Do not allow normal
 * allocations below this point, only high priority ones. Automatically
 * tuned according to the amount of memory in the system.
 */
int min_free_kbytes = 1024;
int min_free_order_shift = 1;

/*
 * Extra memory for the system to try freeing. Used to temporarily
 * free memory, to make space for new workloads. Anyone can allocate
 * down to the min watermarks controlled by min_free_kbytes above.
 */
int extra_free_kbytes;

static unsigned long __meminitdata nr_kernel_pages;
static unsigned long __meminitdata nr_all_pages;
static unsigned long __meminitdata dma_reserve;
@@ -5380,6 +5392,7 @@ static void setup_per_zone_lowmem_reserve(void)
static void __setup_per_zone_wmarks(void)
{
	unsigned long pages_min = min_free_kbytes >> (PAGE_SHIFT - 10);
	unsigned long pages_low = extra_free_kbytes >> (PAGE_SHIFT - 10);
	unsigned long lowmem_pages = 0;
	struct zone *zone;
	unsigned long flags;
@@ -5391,11 +5404,14 @@ static void __setup_per_zone_wmarks(void)
	}

	for_each_zone(zone) {
		u64 tmp;
		u64 min, low;

		spin_lock_irqsave(&zone->lock, flags);
		tmp = (u64)pages_min * zone->managed_pages;
		do_div(tmp, lowmem_pages);
		min = (u64)pages_min * zone->present_pages;
		do_div(min, lowmem_pages);
		low = (u64)pages_low * zone->present_pages;
		do_div(low, vm_total_pages);

		if (is_highmem(zone)) {
			/*
			 * __GFP_HIGH and PF_MEMALLOC allocations usually don't
@@ -5416,11 +5432,13 @@ static void __setup_per_zone_wmarks(void)
			 * If it's a lowmem zone, reserve a number of pages
			 * proportionate to the zone's size.
			 */
			zone->watermark[WMARK_MIN] = tmp;
			zone->watermark[WMARK_MIN] = min;
		}

		zone->watermark[WMARK_LOW]  = min_wmark_pages(zone) + (tmp >> 2);
		zone->watermark[WMARK_HIGH] = min_wmark_pages(zone) + (tmp >> 1);
		zone->watermark[WMARK_LOW]  = min_wmark_pages(zone) +
					low + (min >> 2);
		zone->watermark[WMARK_HIGH] = min_wmark_pages(zone) +
					low + (min >> 1);

		setup_zone_migrate_reserve(zone);
		spin_unlock_irqrestore(&zone->lock, flags);
@@ -5533,7 +5551,7 @@ module_init(init_per_zone_wmark_min)
/*
 * min_free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() so 
 *	that we can call two helper functions whenever min_free_kbytes
 *	changes.
 *	or extra_free_kbytes changes.
 */
int min_free_kbytes_sysctl_handler(ctl_table *table, int write, 
	void __user *buffer, size_t *length, loff_t *ppos)