Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 28697355 authored by Mike Waychison's avatar Mike Waychison Committed by Linus Torvalds
Browse files

mm: remove __invalidate_mapping_pages variant



Remove __invalidate_mapping_pages atomic variant now that its sole caller
can sleep (fixed in eccb95ce ("vfs: fix
lock inversion in drop_pagecache_sb()")).

This fixes softlockups that can occur while in the drop_caches path.

Signed-off-by: default avatarMike Waychison <mikew@google.com>
Cc: Jan Kara <jack@suse.cz>
Cc: Wu Fengguang <fengguang.wu@intel.com>
Cc: Dave Chinner <david@fromorbit.com>
Cc: Nick Piggin <nickpiggin@yahoo.com.au>
Acked-by: default avatarJan Kara <jack@suse.cz>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 82553a93
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -24,7 +24,7 @@ static void drop_pagecache_sb(struct super_block *sb)
			continue;
		__iget(inode);
		spin_unlock(&inode_lock);
		__invalidate_mapping_pages(inode->i_mapping, 0, -1, true);
		invalidate_mapping_pages(inode->i_mapping, 0, -1);
		iput(toput_inode);
		toput_inode = inode;
		spin_lock(&inode_lock);
+0 −3
Original line number Diff line number Diff line
@@ -2036,9 +2036,6 @@ extern int __invalidate_device(struct block_device *);
extern int invalidate_partition(struct gendisk *, int);
#endif
extern int invalidate_inodes(struct super_block *);
unsigned long __invalidate_mapping_pages(struct address_space *mapping,
					pgoff_t start, pgoff_t end,
					bool be_atomic);
unsigned long invalidate_mapping_pages(struct address_space *mapping,
					pgoff_t start, pgoff_t end);

+16 −23
Original line number Diff line number Diff line
@@ -267,8 +267,21 @@ void truncate_inode_pages(struct address_space *mapping, loff_t lstart)
}
EXPORT_SYMBOL(truncate_inode_pages);

unsigned long __invalidate_mapping_pages(struct address_space *mapping,
				pgoff_t start, pgoff_t end, bool be_atomic)
/**
 * invalidate_mapping_pages - Invalidate all the unlocked pages of one inode
 * @mapping: the address_space which holds the pages to invalidate
 * @start: the offset 'from' which to invalidate
 * @end: the offset 'to' which to invalidate (inclusive)
 *
 * This function only removes the unlocked pages, if you want to
 * remove all the pages of one inode, you must call truncate_inode_pages.
 *
 * invalidate_mapping_pages() will not block on IO activity. It will not
 * invalidate pages which are dirty, locked, under writeback or mapped into
 * pagetables.
 */
unsigned long invalidate_mapping_pages(struct address_space *mapping,
				       pgoff_t start, pgoff_t end)
{
	struct pagevec pvec;
	pgoff_t next = start;
@@ -309,30 +322,10 @@ unsigned long __invalidate_mapping_pages(struct address_space *mapping,
				break;
		}
		pagevec_release(&pvec);
		if (likely(!be_atomic))
		cond_resched();
	}
	return ret;
}

/**
 * invalidate_mapping_pages - Invalidate all the unlocked pages of one inode
 * @mapping: the address_space which holds the pages to invalidate
 * @start: the offset 'from' which to invalidate
 * @end: the offset 'to' which to invalidate (inclusive)
 *
 * This function only removes the unlocked pages, if you want to
 * remove all the pages of one inode, you must call truncate_inode_pages.
 *
 * invalidate_mapping_pages() will not block on IO activity. It will not
 * invalidate pages which are dirty, locked, under writeback or mapped into
 * pagetables.
 */
unsigned long invalidate_mapping_pages(struct address_space *mapping,
				pgoff_t start, pgoff_t end)
{
	return __invalidate_mapping_pages(mapping, start, end, false);
}
EXPORT_SYMBOL(invalidate_mapping_pages);

/*