Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit b85526df authored by Will McVicker's avatar Will McVicker Committed by Suren Baghdasaryan
Browse files

ANDROID: GKI: mm: add struct/enum fields for SPECULATIVE_PAGE_FAULTS



Add the necessary struct/enum attributes from the SPECULATIVE_PAGE_FAULTS
feature as padding to reduce the ABI diff between vendors and ACK. This
allows vendors to pick the SPECULATIVE_PAGE_FAULTS feature and still be
ABI compatible with ACK-4.19.

Bug: 149848888
Bug: 151963988
Test: build
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: default avatarLaurent Dufour <ldufour@linux.vnet.ibm.com>
Signed-off-by: default avatarVinayak Menon <vinmenon@codeaurora.org>
Signed-off-by: default avatarCharan Teja Reddy <charante@codeaurora.org>
[surenb: Squashed the following commits and kept only struct/enum changes
	396b9808 mm: provide speculative fault infrastructure
	47e3eb15 mm: cache some VMA fields in the vm_fault structure
	3f31f748 mm: protect mm_rb tree with a rwlock
	88a78dc2 mm: VMA sequence count
	0947fbfe mm: allow vmas with vm_ops to be speculatively handled ]
Signed-off-by: default avatarWill McVicker <willmcvicker@google.com>
Signed-off-by: default avatarSuren Baghdasaryan <surenb@google.com>
Merged-In: I1aa561649a836c753b9e52710c3bc1f6eee1dd78
Change-Id: I1aa561649a836c753b9e52710c3bc1f6eee1dd78
parent 6b142446
Loading
Loading
Loading
Loading
+4 −0
Original line number Diff line number Diff line
@@ -357,6 +357,8 @@ struct vm_fault {
	gfp_t gfp_mask;			/* gfp mask to be used for allocations */
	pgoff_t pgoff;			/* Logical page offset based on vma */
	unsigned long address;		/* Faulting virtual address */
	unsigned int sequence;		/* Speculative Page Fault field */
	pmd_t orig_pmd;			/* Speculative Page Fault field */
	pmd_t *pmd;			/* Pointer to pmd entry matching
					 * the 'address' */
	pud_t *pud;			/* Pointer to pud entry matching
@@ -387,6 +389,8 @@ struct vm_fault {
					 * page table to avoid allocation from
					 * atomic context.
					 */
	unsigned long vma_flags;	/* Speculative Page Fault field */
	pgprot_t vma_page_prot;		/* Speculative Page Fault field */
};

/* page entry size for vm->huge_fault() */
+5 −0
Original line number Diff line number Diff line
@@ -335,6 +335,9 @@ struct vm_area_struct {
	struct mempolicy *vm_policy;	/* NUMA policy for the VMA */
#endif
	struct vm_userfaultfd_ctx vm_userfaultfd_ctx;

	seqcount_t vm_sequence;		/* Speculative page fault field */
	atomic_t vm_ref_count;		/* Speculative page fault field */
} __randomize_layout;

struct core_thread {
@@ -354,6 +357,8 @@ struct mm_struct {
		struct vm_area_struct *mmap;		/* list of VMAs */
		struct rb_root mm_rb;
		u64 vmacache_seqnum;                   /* per-thread vmacache */

		rwlock_t mm_rb_lock;	/* Speculative page fault field */
#ifdef CONFIG_MMU
		unsigned long (*get_unmapped_area) (struct file *filp,
				unsigned long addr, unsigned long len,
+2 −0
Original line number Diff line number Diff line
@@ -110,6 +110,8 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
		SWAP_RA,
		SWAP_RA_HIT,
#endif
		SPECULATIVE_PGFAULT_ANON,	/* Speculative page fault field */
		SPECULATIVE_PGFAULT_FILE,	/* Speculative page fault field */
		NR_VM_EVENT_ITEMS
};

+2 −0
Original line number Diff line number Diff line
@@ -1291,6 +1291,8 @@ const char * const vmstat_text[] = {
	"swap_ra",
	"swap_ra_hit",
#endif
	"speculative_pgfault_anon",
	"speculative_pgfault_file",
#endif /* CONFIG_VM_EVENTS_COUNTERS */
};
#endif /* CONFIG_PROC_FS || CONFIG_SYSFS || CONFIG_NUMA */