Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 918e556e authored by David Howells's avatar David Howells Committed by Linus Torvalds
Browse files

NOMMU: Lock i_mmap_mutex for access to the VMA prio list



Lock i_mmap_mutex for access to the VMA prio list to prevent concurrent
access.  Currently, certain parts of the mmap handling are protected by
the region mutex, but not all.

Reported-by: default avatarAl Viro <viro@zeniv.linux.org.uk>
Signed-off-by: default avatarDavid Howells <dhowells@redhat.com>
Acked-by: default avatarAl Viro <viro@zeniv.linux.org.uk>
cc: stable@vger.kernel.org
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 37e79cbf
Loading
Loading
Loading
Loading
+7 −0
Original line number Diff line number Diff line
@@ -696,9 +696,11 @@ static void add_vma_to_mm(struct mm_struct *mm, struct vm_area_struct *vma)
	if (vma->vm_file) {
		mapping = vma->vm_file->f_mapping;

		mutex_lock(&mapping->i_mmap_mutex);
		flush_dcache_mmap_lock(mapping);
		vma_prio_tree_insert(vma, &mapping->i_mmap);
		flush_dcache_mmap_unlock(mapping);
		mutex_unlock(&mapping->i_mmap_mutex);
	}

	/* add the VMA to the tree */
@@ -760,9 +762,11 @@ static void delete_vma_from_mm(struct vm_area_struct *vma)
	if (vma->vm_file) {
		mapping = vma->vm_file->f_mapping;

		mutex_lock(&mapping->i_mmap_mutex);
		flush_dcache_mmap_lock(mapping);
		vma_prio_tree_remove(vma, &mapping->i_mmap);
		flush_dcache_mmap_unlock(mapping);
		mutex_unlock(&mapping->i_mmap_mutex);
	}

	/* remove from the MM's tree and list */
@@ -2052,6 +2056,7 @@ int nommu_shrink_inode_mappings(struct inode *inode, size_t size,
	high = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;

	down_write(&nommu_region_sem);
	mutex_lock(&inode->i_mapping->i_mmap_mutex);

	/* search for VMAs that fall within the dead zone */
	vma_prio_tree_foreach(vma, &iter, &inode->i_mapping->i_mmap,
@@ -2059,6 +2064,7 @@ int nommu_shrink_inode_mappings(struct inode *inode, size_t size,
		/* found one - only interested if it's shared out of the page
		 * cache */
		if (vma->vm_flags & VM_SHARED) {
			mutex_unlock(&inode->i_mapping->i_mmap_mutex);
			up_write(&nommu_region_sem);
			return -ETXTBSY; /* not quite true, but near enough */
		}
@@ -2086,6 +2092,7 @@ int nommu_shrink_inode_mappings(struct inode *inode, size_t size,
		}
	}

	mutex_unlock(&inode->i_mapping->i_mmap_mutex);
	up_write(&nommu_region_sem);
	return 0;
}