Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit b2f1f26a authored by qctecmdr Service's avatar qctecmdr Service Committed by Gerrit - the friendly Code Review server
Browse files

Merge "ion: invalidate the pool pointers after free"

parents 9a50beb0 c29ede69
Loading
Loading
Loading
Loading
+22 −0
Original line number Diff line number Diff line
@@ -65,6 +65,7 @@ Currently, these files are in /proc/sys/vm:
- vfs_cache_pressure
- watermark_scale_factor
- zone_reclaim_mode
- want_old_faultaround_pte

==============================================================

@@ -932,4 +933,25 @@ Allowing regular swap effectively restricts allocations to the local
node unless explicitly overridden by memory policies or cpuset
configurations.

=============================================================

want_old_faultaround_pte:

By default faultaround code produces young pte. When want_old_faultaround_pte is
set to 1, faultaround produces old ptes.

During sparse file access faultaround gets more pages mapped and when all of
them are young (default), under memory pressure, this makes vmscan swap out anon
pages instead, or to drop other page cache pages which otherwise stay resident.
Setting want_old_faultaround_pte to 1 avoids this.

Making the faultaround ptes old can result in performance regression on some
architectures. This is due to cycles spent in micro-faults which would take page
walk to set young bit in the pte. One such known test that shows a regression on
x86 is unixbench shell8. Set want_old_faultaround_pte to 1 on architectures
which does not show this regression or if the workload shows overall performance
benefit with old faultaround ptes.

The default value is 0.

============ End of Document =================================
+4 −2
Original line number Diff line number Diff line
@@ -2,7 +2,7 @@
 * drivers/staging/android/ion/ion_system_heap.c
 *
 * Copyright (C) 2011 Google, Inc.
 * Copyright (c) 2011-2017, The Linux Foundation. All rights reserved.
 * Copyright (c) 2011-2018, The Linux Foundation. All rights reserved.
 *
 * This software is licensed under the terms of the GNU General Public
 * License version 2, as published by the Free Software Foundation, and
@@ -587,8 +587,10 @@ static void ion_system_heap_destroy_pools(struct ion_page_pool **pools)
	int i;

	for (i = 0; i < NUM_ORDERS; i++)
		if (pools[i])
		if (pools[i]) {
			ion_page_pool_destroy(pools[i]);
			pools[i] = NULL;
		}
}

/**
+3 −0
Original line number Diff line number Diff line
@@ -294,6 +294,7 @@ extern pgprot_t protection_map[16];
#define FAULT_FLAG_INSTRUCTION  0x100	/* The fault was during an instruction fetch */
/* Speculative fault, not holding mmap_sem */
#define FAULT_FLAG_SPECULATIVE	0x200
#define FAULT_FLAG_PREFAULT_OLD 0x400   /* Make faultaround ptes old */

#define FAULT_FLAG_TRACE \
	{ FAULT_FLAG_WRITE,		"WRITE" }, \
@@ -2746,5 +2747,7 @@ void __init setup_nr_node_ids(void);
static inline void setup_nr_node_ids(void) {}
#endif

extern int want_old_faultaround_pte;

#endif /* __KERNEL__ */
#endif /* _LINUX_MM_H */
+9 −0
Original line number Diff line number Diff line
@@ -1481,6 +1481,15 @@ static struct ctl_table vm_table[] = {
		.extra1		= &zero,
		.extra2		= &one_hundred,
	},
	{
		.procname       = "want_old_faultaround_pte",
		.data           = &want_old_faultaround_pte,
		.maxlen         = sizeof(want_old_faultaround_pte),
		.mode           = 0644,
		.proc_handler   = proc_dointvec_minmax,
		.extra1         = &zero,
		.extra2         = &one,
	},
#ifdef CONFIG_HUGETLB_PAGE
	{
		.procname	= "nr_hugepages",
+10 −0
Original line number Diff line number Diff line
@@ -48,6 +48,8 @@

#include <asm/mman.h>

int want_old_faultaround_pte = 1;

/*
 * Shared mappings implemented 30.11.1994. It's not fully working yet,
 * though.
@@ -2565,6 +2567,14 @@ void filemap_map_pages(struct vm_fault *vmf,
		if (vmf->pte)
			vmf->pte += iter.index - last_pgoff;
		last_pgoff = iter.index;

		if (want_old_faultaround_pte) {
			if (iter.index == vmf->pgoff)
				vmf->flags &= ~FAULT_FLAG_PREFAULT_OLD;
			else
				vmf->flags |= FAULT_FLAG_PREFAULT_OLD;
		}

		if (alloc_set_pte(vmf, NULL, page))
			goto unlock;
		unlock_page(page);
Loading