Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 1091e4b2 authored by Vinayak Menon's avatar Vinayak Menon Committed by Gerrit - the friendly Code Review server
Browse files

ANDROID: mm: use raw seqcount variants in vm_write_*



write_seqcount_begin expects to be called from a non-preemptible
context to avoid preemption by a read section that can spin due
to an odd value. But the readers of vm_sequence never retries and
thus writers need not disable preemption. Use the non-lockdep
variant as lockdep checks are now in-built to write_seqcount_begin.

Bug: 161210518
Signed-off-by: default avatarVinayak Menon <vinmenon@codeaurora.org>
Change-Id: If4f0cddd7f0a79136495060d4acc1702abb46817
Git-commit: c9201630e8a53106406516ba40862e637c324480
Git-repo: https://android.googlesource.com/kernel/common/


Signed-off-by: default avatarSrinivasarao Pathipati <quic_spathi@quicinc.com>
parent 4bd53b87
Loading
Loading
Loading
Loading
+5 −24
Original line number Diff line number Diff line
@@ -1498,22 +1498,13 @@ int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
#ifdef CONFIG_SPECULATIVE_PAGE_FAULT
static inline void vm_write_begin(struct vm_area_struct *vma)
{
	write_seqcount_begin(&vma->vm_sequence);
}
static inline void vm_write_begin_nested(struct vm_area_struct *vma,
					 int subclass)
{
	write_seqcount_begin_nested(&vma->vm_sequence, subclass);
}
static inline void vm_write_end(struct vm_area_struct *vma)
{
	write_seqcount_end(&vma->vm_sequence);
}
static inline void vm_raw_write_begin(struct vm_area_struct *vma)
{
	/*
	 * The reads never spins and preemption
	 * disablement is not required.
	 */
	raw_write_seqcount_begin(&vma->vm_sequence);
}
static inline void vm_raw_write_end(struct vm_area_struct *vma)
static inline void vm_write_end(struct vm_area_struct *vma)
{
	raw_write_seqcount_end(&vma->vm_sequence);
}
@@ -1521,19 +1512,9 @@ static inline void vm_raw_write_end(struct vm_area_struct *vma)
static inline void vm_write_begin(struct vm_area_struct *vma)
{
}
static inline void vm_write_begin_nested(struct vm_area_struct *vma,
					 int subclass)
{
}
static inline void vm_write_end(struct vm_area_struct *vma)
{
}
static inline void vm_raw_write_begin(struct vm_area_struct *vma)
{
}
static inline void vm_raw_write_end(struct vm_area_struct *vma)
{
}
#endif /* CONFIG_SPECULATIVE_PAGE_FAULT */

extern void truncate_pagecache(struct inode *inode, loff_t new);
+9 −29
Original line number Diff line number Diff line
@@ -735,29 +735,9 @@ int __vma_adjust(struct vm_area_struct *vma, unsigned long start,
	long adjust_next = 0;
	int remove_next = 0;

	/*
	 * Why using vm_raw_write*() functions here to avoid lockdep's warning ?
	 *
	 * Locked is complaining about a theoretical lock dependency, involving
	 * 3 locks:
	 *   mapping->i_mmap_rwsem --> vma->vm_sequence --> fs_reclaim
	 *
	 * Here are the major path leading to this dependency :
	 *  1. __vma_adjust() mmap_sem  -> vm_sequence -> i_mmap_rwsem
	 *  2. move_vmap() mmap_sem -> vm_sequence -> fs_reclaim
	 *  3. __alloc_pages_nodemask() fs_reclaim -> i_mmap_rwsem
	 *  4. unmap_mapping_range() i_mmap_rwsem -> vm_sequence
	 *
	 * So there is no way to solve this easily, especially because in
	 * unmap_mapping_range() the i_mmap_rwsem is grab while the impacted
	 * VMAs are not yet known.
	 * However, the way the vm_seq is used is guarantying that we will
	 * never block on it since we just check for its value and never wait
	 * for it to move, see vma_has_changed() and handle_speculative_fault().
	 */
	vm_raw_write_begin(vma);
	vm_write_begin(vma);
	if (next)
		vm_raw_write_begin(next);
		vm_write_begin(next);

	if (next && !insert) {
		struct vm_area_struct *exporter = NULL, *importer = NULL;
@@ -841,8 +821,8 @@ int __vma_adjust(struct vm_area_struct *vma, unsigned long start,
			error = anon_vma_clone(importer, exporter);
			if (error) {
				if (next && next != vma)
					vm_raw_write_end(next);
				vm_raw_write_end(vma);
					vm_write_end(next);
				vm_write_end(vma);
				return error;
			}
		}
@@ -971,7 +951,7 @@ int __vma_adjust(struct vm_area_struct *vma, unsigned long start,
		if (next->anon_vma)
			anon_vma_merge(vma, next);
		mm->map_count--;
		vm_raw_write_end(next);
		vm_write_end(next);
		put_vma(next);
		/*
		 * In mprotect's case 6 (see comments on vma_merge),
@@ -987,7 +967,7 @@ int __vma_adjust(struct vm_area_struct *vma, unsigned long start,
			 */
			next = vma->vm_next;
			if (next)
				vm_raw_write_begin(next);
				vm_write_begin(next);
		} else {
			/*
			 * For the scope of the comment "next" and
@@ -1035,9 +1015,9 @@ int __vma_adjust(struct vm_area_struct *vma, unsigned long start,
		uprobe_mmap(insert);

	if (next && next != vma)
		vm_raw_write_end(next);
		vm_write_end(next);
	if (!keep_locked)
		vm_raw_write_end(vma);
		vm_write_end(vma);

	validate_mm(mm);

@@ -3348,7 +3328,7 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
		 * that we protect it right now, and let the caller unprotect
		 * it once the move is done.
		 */
		vm_raw_write_begin(new_vma);
		vm_write_begin(new_vma);
		vma_link(mm, new_vma, prev, rb_link, rb_parent);
		*need_rmap_locks = false;
	}
+4 −4
Original line number Diff line number Diff line
@@ -300,7 +300,7 @@ static unsigned long move_vma(struct vm_area_struct *vma,
	 * to be mapped in our back while we are copying the PTEs.
	 */
	if (vma != new_vma)
		vm_raw_write_begin(vma);
		vm_write_begin(vma);

	moved_len = move_page_tables(vma, old_addr, new_vma, new_addr, old_len,
				     need_rmap_locks);
@@ -319,7 +319,7 @@ static unsigned long move_vma(struct vm_area_struct *vma,
		move_page_tables(new_vma, new_addr, vma, old_addr, moved_len,
				 true);
		if (vma != new_vma)
			vm_raw_write_end(vma);
			vm_write_end(vma);
		vma = new_vma;
		old_len = new_len;
		old_addr = new_addr;
@@ -329,9 +329,9 @@ static unsigned long move_vma(struct vm_area_struct *vma,
		arch_remap(mm, old_addr, old_addr + old_len,
			   new_addr, new_addr + new_len);
		if (vma != new_vma)
			vm_raw_write_end(vma);
			vm_write_end(vma);
	}
	vm_raw_write_end(new_vma);
	vm_write_end(new_vma);

	/* Conceal VM_ACCOUNT so old reservation is not undone */
	if (vm_flags & VM_ACCOUNT) {