Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 9b679320 authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Linus Torvalds
Browse files

mm/memory-failure.c: fix spinlock vs mutex order



We cannot take a mutex while holding a spinlock, so flip the order and
fix the locking documentation.

Signed-off-by: default avatarPeter Zijlstra <a.p.zijlstra@chello.nl>
Acked-by: default avatarAndi Kleen <ak@linux.intel.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent aa2c96d6
Loading
Loading
Loading
Loading
+6 −15
Original line number Diff line number Diff line
@@ -391,10 +391,11 @@ static void collect_procs_anon(struct page *page, struct list_head *to_kill,
	struct task_struct *tsk;
	struct anon_vma *av;

	read_lock(&tasklist_lock);
	av = page_lock_anon_vma(page);
	if (av == NULL)	/* Not actually mapped anymore */
		goto out;
		return;

	read_lock(&tasklist_lock);
	for_each_process (tsk) {
		struct anon_vma_chain *vmac;

@@ -408,9 +409,8 @@ static void collect_procs_anon(struct page *page, struct list_head *to_kill,
				add_to_kill(tsk, page, vma, to_kill, tkc);
		}
	}
	page_unlock_anon_vma(av);
out:
	read_unlock(&tasklist_lock);
	page_unlock_anon_vma(av);
}

/*
@@ -424,17 +424,8 @@ static void collect_procs_file(struct page *page, struct list_head *to_kill,
	struct prio_tree_iter iter;
	struct address_space *mapping = page->mapping;

	/*
	 * A note on the locking order between the two locks.
	 * We don't rely on this particular order.
	 * If you have some other code that needs a different order
	 * feel free to switch them around. Or add a reverse link
	 * from mm_struct to task_struct, then this could be all
	 * done without taking tasklist_lock and looping over all tasks.
	 */

	read_lock(&tasklist_lock);
	mutex_lock(&mapping->i_mmap_mutex);
	read_lock(&tasklist_lock);
	for_each_process(tsk) {
		pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);

@@ -454,8 +445,8 @@ static void collect_procs_file(struct page *page, struct list_head *to_kill,
				add_to_kill(tsk, page, vma, to_kill, tkc);
		}
	}
	mutex_unlock(&mapping->i_mmap_mutex);
	read_unlock(&tasklist_lock);
	mutex_unlock(&mapping->i_mmap_mutex);
}

/*
+2 −3
Original line number Diff line number Diff line
@@ -38,9 +38,8 @@
 *                           in arch-dependent flush_dcache_mmap_lock,
 *                           within inode_wb_list_lock in __sync_single_inode)
 *
 * (code doesn't rely on that order so it could be switched around)
 * anon_vma->mutex,mapping->i_mutex      (memory_failure, collect_procs_anon)
 *   ->tasklist_lock
 *   anon_vma->mutex      (memory_failure, collect_procs_anon)
 *     pte map lock
 */