Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 55fa6091 authored by Dave Chinner's avatar Dave Chinner Committed by Al Viro
Browse files

fs: move i_sb_list out from under inode_lock



Protect the per-sb inode list with a new global lock
inode_sb_list_lock and use it to protect the list manipulations and
traversals. This lock replaces the inode_lock as the inodes on the
list can be validity checked while holding the inode->i_lock and
hence the inode_lock is no longer needed to protect the list.

Signed-off-by: default avatarDave Chinner <dchinner@redhat.com>
Signed-off-by: default avatarAl Viro <viro@zeniv.linux.org.uk>
parent f283c86a
Loading
Loading
Loading
Loading
+5 −4
Original line number Diff line number Diff line
@@ -8,6 +8,7 @@
#include <linux/writeback.h>
#include <linux/sysctl.h>
#include <linux/gfp.h>
#include "internal.h"

/* A global variable is a bit ugly, but it keeps the code simple */
int sysctl_drop_caches;
@@ -16,7 +17,7 @@ static void drop_pagecache_sb(struct super_block *sb, void *unused)
{
	struct inode *inode, *toput_inode = NULL;

	spin_lock(&inode_lock);
	spin_lock(&inode_sb_list_lock);
	list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
		spin_lock(&inode->i_lock);
		if ((inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW)) ||
@@ -26,13 +27,13 @@ static void drop_pagecache_sb(struct super_block *sb, void *unused)
		}
		__iget(inode);
		spin_unlock(&inode->i_lock);
		spin_unlock(&inode_lock);
		spin_unlock(&inode_sb_list_lock);
		invalidate_mapping_pages(inode->i_mapping, 0, -1);
		iput(toput_inode);
		toput_inode = inode;
		spin_lock(&inode_lock);
		spin_lock(&inode_sb_list_lock);
	}
	spin_unlock(&inode_lock);
	spin_unlock(&inode_sb_list_lock);
	iput(toput_inode);
}

+11 −10
Original line number Diff line number Diff line
@@ -1123,7 +1123,7 @@ static void wait_sb_inodes(struct super_block *sb)
	 */
	WARN_ON(!rwsem_is_locked(&sb->s_umount));

	spin_lock(&inode_lock);
	spin_lock(&inode_sb_list_lock);

	/*
	 * Data integrity sync. Must wait for all pages under writeback,
@@ -1143,14 +1143,15 @@ static void wait_sb_inodes(struct super_block *sb)
		}
		__iget(inode);
		spin_unlock(&inode->i_lock);
		spin_unlock(&inode_lock);
		spin_unlock(&inode_sb_list_lock);

		/*
		 * We hold a reference to 'inode' so it couldn't have
		 * been removed from s_inodes list while we dropped the
		 * inode_lock.  We cannot iput the inode now as we can
		 * be holding the last reference and we cannot iput it
		 * under inode_lock. So we keep the reference and iput
		 * it later.
		 * We hold a reference to 'inode' so it couldn't have been
		 * removed from s_inodes list while we dropped the
		 * inode_sb_list_lock.  We cannot iput the inode now as we can
		 * be holding the last reference and we cannot iput it under
		 * inode_sb_list_lock. So we keep the reference and iput it
		 * later.
		 */
		iput(old_inode);
		old_inode = inode;
@@ -1159,9 +1160,9 @@ static void wait_sb_inodes(struct super_block *sb)

		cond_resched();

		spin_lock(&inode_lock);
		spin_lock(&inode_sb_list_lock);
	}
	spin_unlock(&inode_lock);
	spin_unlock(&inode_sb_list_lock);
	iput(old_inode);
}

+23 −20
Original line number Diff line number Diff line
@@ -34,10 +34,15 @@
 *   inode->i_state, inode->i_hash, __iget()
 * inode_lru_lock protects:
 *   inode_lru, inode->i_lru
 * inode_sb_list_lock protects:
 *   sb->s_inodes, inode->i_sb_list
 *
 * Lock ordering:
 * inode_lock
 *   inode->i_lock
 *
 * inode_sb_list_lock
 *   inode->i_lock
 *     inode_lru_lock
 */

@@ -99,6 +104,8 @@ static struct hlist_head *inode_hashtable __read_mostly;
 */
DEFINE_SPINLOCK(inode_lock);

__cacheline_aligned_in_smp DEFINE_SPINLOCK(inode_sb_list_lock);

/*
 * iprune_sem provides exclusion between the icache shrinking and the
 * umount path.
@@ -378,26 +385,23 @@ static void inode_lru_list_del(struct inode *inode)
	spin_unlock(&inode_lru_lock);
}

static inline void __inode_sb_list_add(struct inode *inode)
{
	list_add(&inode->i_sb_list, &inode->i_sb->s_inodes);
}

/**
 * inode_sb_list_add - add inode to the superblock list of inodes
 * @inode: inode to add
 */
void inode_sb_list_add(struct inode *inode)
{
	spin_lock(&inode_lock);
	__inode_sb_list_add(inode);
	spin_unlock(&inode_lock);
	spin_lock(&inode_sb_list_lock);
	list_add(&inode->i_sb_list, &inode->i_sb->s_inodes);
	spin_unlock(&inode_sb_list_lock);
}
EXPORT_SYMBOL_GPL(inode_sb_list_add);

static inline void __inode_sb_list_del(struct inode *inode)
static inline void inode_sb_list_del(struct inode *inode)
{
	spin_lock(&inode_sb_list_lock);
	list_del_init(&inode->i_sb_list);
	spin_unlock(&inode_sb_list_lock);
}

static unsigned long hash(struct super_block *sb, unsigned long hashval)
@@ -481,9 +485,10 @@ static void evict(struct inode *inode)

	spin_lock(&inode_lock);
	list_del_init(&inode->i_wb_list);
	__inode_sb_list_del(inode);
	spin_unlock(&inode_lock);

	inode_sb_list_del(inode);

	if (op->evict_inode) {
		op->evict_inode(inode);
	} else {
@@ -539,7 +544,7 @@ void evict_inodes(struct super_block *sb)
	struct inode *inode, *next;
	LIST_HEAD(dispose);

	spin_lock(&inode_lock);
	spin_lock(&inode_sb_list_lock);
	list_for_each_entry_safe(inode, next, &sb->s_inodes, i_sb_list) {
		if (atomic_read(&inode->i_count))
			continue;
@@ -555,7 +560,7 @@ void evict_inodes(struct super_block *sb)
		spin_unlock(&inode->i_lock);
		list_add(&inode->i_lru, &dispose);
	}
	spin_unlock(&inode_lock);
	spin_unlock(&inode_sb_list_lock);

	dispose_list(&dispose);

@@ -584,7 +589,7 @@ int invalidate_inodes(struct super_block *sb, bool kill_dirty)
	struct inode *inode, *next;
	LIST_HEAD(dispose);

	spin_lock(&inode_lock);
	spin_lock(&inode_sb_list_lock);
	list_for_each_entry_safe(inode, next, &sb->s_inodes, i_sb_list) {
		spin_lock(&inode->i_lock);
		if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) {
@@ -607,7 +612,7 @@ int invalidate_inodes(struct super_block *sb, bool kill_dirty)
		spin_unlock(&inode->i_lock);
		list_add(&inode->i_lru, &dispose);
	}
	spin_unlock(&inode_lock);
	spin_unlock(&inode_sb_list_lock);

	dispose_list(&dispose);

@@ -867,16 +872,14 @@ struct inode *new_inode(struct super_block *sb)
{
	struct inode *inode;

	spin_lock_prefetch(&inode_lock);
	spin_lock_prefetch(&inode_sb_list_lock);

	inode = alloc_inode(sb);
	if (inode) {
		spin_lock(&inode_lock);
		spin_lock(&inode->i_lock);
		inode->i_state = 0;
		spin_unlock(&inode->i_lock);
		__inode_sb_list_add(inode);
		spin_unlock(&inode_lock);
		inode_sb_list_add(inode);
	}
	return inode;
}
@@ -945,7 +948,7 @@ static struct inode *get_new_inode(struct super_block *sb,
			inode->i_state = I_NEW;
			hlist_add_head(&inode->i_hash, head);
			spin_unlock(&inode->i_lock);
			__inode_sb_list_add(inode);
			inode_sb_list_add(inode);
			spin_unlock(&inode_lock);

			/* Return the locked inode with I_NEW set, the
@@ -994,7 +997,7 @@ static struct inode *get_new_inode_fast(struct super_block *sb,
			inode->i_state = I_NEW;
			hlist_add_head(&inode->i_hash, head);
			spin_unlock(&inode->i_lock);
			__inode_sb_list_add(inode);
			inode_sb_list_add(inode);
			spin_unlock(&inode_lock);

			/* Return the locked inode with I_NEW set, the
+2 −0
Original line number Diff line number Diff line
@@ -125,6 +125,8 @@ extern long do_handle_open(int mountdirfd,
/*
 * inode.c
 */
extern spinlock_t inode_sb_list_lock;

extern int get_nr_dirty_inodes(void);
extern void evict_inodes(struct super_block *);
extern int invalidate_inodes(struct super_block *, bool);
+10 −10
Original line number Diff line number Diff line
@@ -29,6 +29,8 @@
#include <linux/fsnotify_backend.h>
#include "fsnotify.h"

#include "../internal.h"

/*
 * Recalculate the mask of events relevant to a given inode locked.
 */
@@ -237,15 +239,14 @@ int fsnotify_add_inode_mark(struct fsnotify_mark *mark,
 * fsnotify_unmount_inodes - an sb is unmounting.  handle any watched inodes.
 * @list: list of inodes being unmounted (sb->s_inodes)
 *
 * Called with inode_lock held, protecting the unmounting super block's list
 * of inodes, and with iprune_mutex held, keeping shrink_icache_memory() at bay.
 * We temporarily drop inode_lock, however, and CAN block.
 * Called during unmount with no locks held, so needs to be safe against
 * concurrent modifiers. We temporarily drop inode_sb_list_lock and CAN block.
 */
void fsnotify_unmount_inodes(struct list_head *list)
{
	struct inode *inode, *next_i, *need_iput = NULL;

	spin_lock(&inode_lock);
	spin_lock(&inode_sb_list_lock);
	list_for_each_entry_safe(inode, next_i, list, i_sb_list) {
		struct inode *need_iput_tmp;

@@ -293,12 +294,11 @@ void fsnotify_unmount_inodes(struct list_head *list)
		}

		/*
		 * We can safely drop inode_lock here because we hold
		 * We can safely drop inode_sb_list_lock here because we hold
		 * references on both inode and next_i.  Also no new inodes
		 * will be added since the umount has begun.  Finally,
		 * iprune_mutex keeps shrink_icache_memory() away.
		 * will be added since the umount has begun.
		 */
		spin_unlock(&inode_lock);
		spin_unlock(&inode_sb_list_lock);

		if (need_iput_tmp)
			iput(need_iput_tmp);
@@ -310,7 +310,7 @@ void fsnotify_unmount_inodes(struct list_head *list)

		iput(inode);

		spin_lock(&inode_lock);
		spin_lock(&inode_sb_list_lock);
	}
	spin_unlock(&inode_lock);
	spin_unlock(&inode_sb_list_lock);
}
Loading