Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 6462ba75 authored by qctecmdr Service's avatar qctecmdr Service Committed by Gerrit - the friendly Code Review server
Browse files

Merge "mm: adding speculative page fault failure trace events"

parents 28a395c7 9a54b720
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -8,7 +8,7 @@

static inline bool is_vm_hugetlb_page(struct vm_area_struct *vma)
{
	return !!(vma->vm_flags & VM_HUGETLB);
	return !!(READ_ONCE(vma->vm_flags) & VM_HUGETLB);
}

#else
+32 −0
Original line number Diff line number Diff line
@@ -292,6 +292,8 @@ extern pgprot_t protection_map[16];
#define FAULT_FLAG_USER		0x40	/* The fault originated in userspace */
#define FAULT_FLAG_REMOTE	0x80	/* faulting for non current tsk/mm */
#define FAULT_FLAG_INSTRUCTION  0x100	/* The fault was during an instruction fetch */
/* Speculative fault, not holding mmap_sem */
#define FAULT_FLAG_SPECULATIVE	0x200

#define FAULT_FLAG_TRACE \
	{ FAULT_FLAG_WRITE,		"WRITE" }, \
@@ -320,6 +322,10 @@ struct vm_fault {
	gfp_t gfp_mask;			/* gfp mask to be used for allocations */
	pgoff_t pgoff;			/* Logical page offset based on vma */
	unsigned long address;		/* Faulting virtual address */
#ifdef CONFIG_SPECULATIVE_PAGE_FAULT
	unsigned int sequence;
	pmd_t orig_pmd;			/* value of PMD at the time of fault */
#endif
	pmd_t *pmd;			/* Pointer to pmd entry matching
					 * the 'address' */
	pud_t *pud;			/* Pointer to pud entry matching
@@ -1251,6 +1257,7 @@ static inline void INIT_VMA(struct vm_area_struct *vma)
	INIT_LIST_HEAD(&vma->anon_vma_chain);
#ifdef CONFIG_SPECULATIVE_PAGE_FAULT
	seqcount_init(&vma->vm_sequence);
	atomic_set(&vma->vm_ref_count, 1);
#endif
}

@@ -1400,6 +1407,31 @@ int invalidate_inode_page(struct page *page);
#ifdef CONFIG_MMU
extern int handle_mm_fault(struct vm_area_struct *vma, unsigned long address,
		unsigned int flags);

#ifdef CONFIG_SPECULATIVE_PAGE_FAULT
extern int __handle_speculative_fault(struct mm_struct *mm,
				      unsigned long address,
				      unsigned int flags);
static inline int handle_speculative_fault(struct mm_struct *mm,
					   unsigned long address,
					   unsigned int flags)
{
	/*
	 * Try speculative page fault for multithreaded user space task only.
	 */
	if (!(flags & FAULT_FLAG_USER) || atomic_read(&mm->mm_users) == 1)
		return VM_FAULT_RETRY;
	return __handle_speculative_fault(mm, address, flags);
}
#else
static inline int handle_speculative_fault(struct mm_struct *mm,
					   unsigned long address,
					   unsigned int flags)
{
	return VM_FAULT_RETRY;
}
#endif /* CONFIG_SPECULATIVE_PAGE_FAULT */

extern int fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm,
			    unsigned long address, unsigned int fault_flags,
			    bool *unlocked);
+4 −0
Original line number Diff line number Diff line
@@ -346,6 +346,7 @@ struct vm_area_struct {
	struct vm_userfaultfd_ctx vm_userfaultfd_ctx;
#ifdef CONFIG_SPECULATIVE_PAGE_FAULT
	seqcount_t vm_sequence;
	atomic_t vm_ref_count;		/* see vma_get(), vma_put() */
#endif
} __randomize_layout;

@@ -364,6 +365,9 @@ struct kioctx_table;
struct mm_struct {
	struct vm_area_struct *mmap;		/* list of VMAs */
	struct rb_root mm_rb;
#ifdef CONFIG_SPECULATIVE_PAGE_FAULT
	rwlock_t mm_rb_lock;
#endif
	u32 vmacache_seqnum;                   /* per-thread vmacache */
#ifdef CONFIG_MMU
	unsigned long (*get_unmapped_area) (struct file *filp,
+2 −2
Original line number Diff line number Diff line
@@ -452,8 +452,8 @@ static inline pgoff_t linear_page_index(struct vm_area_struct *vma,
	pgoff_t pgoff;
	if (unlikely(is_vm_hugetlb_page(vma)))
		return linear_hugepage_index(vma, address);
	pgoff = (address - vma->vm_start) >> PAGE_SHIFT;
	pgoff += vma->vm_pgoff;
	pgoff = (address - READ_ONCE(vma->vm_start)) >> PAGE_SHIFT;
	pgoff += READ_ONCE(vma->vm_pgoff);
	return pgoff;
}

+10 −2
Original line number Diff line number Diff line
@@ -174,8 +174,16 @@ void page_add_anon_rmap(struct page *, struct vm_area_struct *,
		unsigned long, bool);
void do_page_add_anon_rmap(struct page *, struct vm_area_struct *,
			   unsigned long, int);
void page_add_new_anon_rmap(struct page *, struct vm_area_struct *,
		unsigned long, bool);
void __page_add_new_anon_rmap(struct page *page, struct vm_area_struct *vma,
			      unsigned long address, bool compound);
static inline void page_add_new_anon_rmap(struct page *page,
					  struct vm_area_struct *vma,
					  unsigned long address, bool compound)
{
	VM_BUG_ON_VMA(address < vma->vm_start || address >= vma->vm_end, vma);
	__page_add_new_anon_rmap(page, vma, address, compound);
}

void page_add_file_rmap(struct page *, bool);
void page_remove_rmap(struct page *, bool);

Loading