Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 67a23c49 authored by Dave Chinner's avatar Dave Chinner Committed by Al Viro
Browse files

fs: rename inode_lock to inode_hash_lock



All that remains of the inode_lock is protecting the inode hash list
manipulation and traversals. Rename the inode_lock to
inode_hash_lock to reflect it's actual function.

Signed-off-by: default avatarDave Chinner <dchinner@redhat.com>
Signed-off-by: default avatarAl Viro <viro@zeniv.linux.org.uk>
parent a66979ab
Loading
Loading
Loading
Loading
+61 −50
Original line number Diff line number Diff line
@@ -39,10 +39,10 @@
 *   sb->s_inodes, inode->i_sb_list
 * inode_wb_list_lock protects:
 *   bdi->wb.b_{dirty,io,more_io}, inode->i_wb_list
 * inode_hash_lock protects:
 *   inode_hashtable, inode->i_hash
 *
 * Lock ordering:
 * inode_lock
 *   inode->i_lock
 *
 * inode_sb_list_lock
 *   inode->i_lock
@@ -50,6 +50,13 @@
 *
 * inode_wb_list_lock
 *   inode->i_lock
 *
 * inode_hash_lock
 *   inode_sb_list_lock
 *   inode->i_lock
 *
 * iunique_lock
 *   inode_hash_lock
 */

/*
@@ -85,6 +92,8 @@

static unsigned int i_hash_mask __read_mostly;
static unsigned int i_hash_shift __read_mostly;
static struct hlist_head *inode_hashtable __read_mostly;
static __cacheline_aligned_in_smp DEFINE_SPINLOCK(inode_hash_lock);

/*
 * Each inode can be on two separate lists. One is
@@ -100,15 +109,6 @@ static unsigned int i_hash_shift __read_mostly;

static LIST_HEAD(inode_lru);
static DEFINE_SPINLOCK(inode_lru_lock);
static struct hlist_head *inode_hashtable __read_mostly;

/*
 * A simple spinlock to protect the list manipulations.
 *
 * NOTE! You also have to own the lock if you change
 * the i_state of an inode while it is in use..
 */
DEFINE_SPINLOCK(inode_lock);

__cacheline_aligned_in_smp DEFINE_SPINLOCK(inode_sb_list_lock);
__cacheline_aligned_in_smp DEFINE_SPINLOCK(inode_wb_list_lock);
@@ -433,11 +433,11 @@ void __insert_inode_hash(struct inode *inode, unsigned long hashval)
{
	struct hlist_head *b = inode_hashtable + hash(inode->i_sb, hashval);

	spin_lock(&inode_lock);
	spin_lock(&inode_hash_lock);
	spin_lock(&inode->i_lock);
	hlist_add_head(&inode->i_hash, b);
	spin_unlock(&inode->i_lock);
	spin_unlock(&inode_lock);
	spin_unlock(&inode_hash_lock);
}
EXPORT_SYMBOL(__insert_inode_hash);

@@ -449,11 +449,11 @@ EXPORT_SYMBOL(__insert_inode_hash);
 */
void remove_inode_hash(struct inode *inode)
{
	spin_lock(&inode_lock);
	spin_lock(&inode_hash_lock);
	spin_lock(&inode->i_lock);
	hlist_del_init(&inode->i_hash);
	spin_unlock(&inode->i_lock);
	spin_unlock(&inode_lock);
	spin_unlock(&inode_hash_lock);
}
EXPORT_SYMBOL(remove_inode_hash);

@@ -778,11 +778,15 @@ static struct inode *find_inode(struct super_block *sb,

repeat:
	hlist_for_each_entry(inode, node, head, i_hash) {
		if (inode->i_sb != sb)
		spin_lock(&inode->i_lock);
		if (inode->i_sb != sb) {
			spin_unlock(&inode->i_lock);
			continue;
		if (!test(inode, data))
		}
		if (!test(inode, data)) {
			spin_unlock(&inode->i_lock);
			continue;
		spin_lock(&inode->i_lock);
		}
		if (inode->i_state & (I_FREEING|I_WILL_FREE)) {
			__wait_on_freeing_inode(inode);
			goto repeat;
@@ -806,11 +810,15 @@ static struct inode *find_inode_fast(struct super_block *sb,

repeat:
	hlist_for_each_entry(inode, node, head, i_hash) {
		if (inode->i_ino != ino)
		spin_lock(&inode->i_lock);
		if (inode->i_ino != ino) {
			spin_unlock(&inode->i_lock);
			continue;
		if (inode->i_sb != sb)
		}
		if (inode->i_sb != sb) {
			spin_unlock(&inode->i_lock);
			continue;
		spin_lock(&inode->i_lock);
		}
		if (inode->i_state & (I_FREEING|I_WILL_FREE)) {
			__wait_on_freeing_inode(inode);
			goto repeat;
@@ -924,7 +932,7 @@ void unlock_new_inode(struct inode *inode)
EXPORT_SYMBOL(unlock_new_inode);

/*
 * This is called without the inode lock held.. Be careful.
 * This is called without the inode hash lock held.. Be careful.
 *
 * We no longer cache the sb_flags in i_flags - see fs.h
 *	-- rmk@arm.uk.linux.org
@@ -941,7 +949,7 @@ static struct inode *get_new_inode(struct super_block *sb,
	if (inode) {
		struct inode *old;

		spin_lock(&inode_lock);
		spin_lock(&inode_hash_lock);
		/* We released the lock, so.. */
		old = find_inode(sb, head, test, data);
		if (!old) {
@@ -953,7 +961,7 @@ static struct inode *get_new_inode(struct super_block *sb,
			hlist_add_head(&inode->i_hash, head);
			spin_unlock(&inode->i_lock);
			inode_sb_list_add(inode);
			spin_unlock(&inode_lock);
			spin_unlock(&inode_hash_lock);

			/* Return the locked inode with I_NEW set, the
			 * caller is responsible for filling in the contents
@@ -966,7 +974,7 @@ static struct inode *get_new_inode(struct super_block *sb,
		 * us. Use the old inode instead of the one we just
		 * allocated.
		 */
		spin_unlock(&inode_lock);
		spin_unlock(&inode_hash_lock);
		destroy_inode(inode);
		inode = old;
		wait_on_inode(inode);
@@ -974,7 +982,7 @@ static struct inode *get_new_inode(struct super_block *sb,
	return inode;

set_failed:
	spin_unlock(&inode_lock);
	spin_unlock(&inode_hash_lock);
	destroy_inode(inode);
	return NULL;
}
@@ -992,7 +1000,7 @@ static struct inode *get_new_inode_fast(struct super_block *sb,
	if (inode) {
		struct inode *old;

		spin_lock(&inode_lock);
		spin_lock(&inode_hash_lock);
		/* We released the lock, so.. */
		old = find_inode_fast(sb, head, ino);
		if (!old) {
@@ -1002,7 +1010,7 @@ static struct inode *get_new_inode_fast(struct super_block *sb,
			hlist_add_head(&inode->i_hash, head);
			spin_unlock(&inode->i_lock);
			inode_sb_list_add(inode);
			spin_unlock(&inode_lock);
			spin_unlock(&inode_hash_lock);

			/* Return the locked inode with I_NEW set, the
			 * caller is responsible for filling in the contents
@@ -1015,7 +1023,7 @@ static struct inode *get_new_inode_fast(struct super_block *sb,
		 * us. Use the old inode instead of the one we just
		 * allocated.
		 */
		spin_unlock(&inode_lock);
		spin_unlock(&inode_hash_lock);
		destroy_inode(inode);
		inode = old;
		wait_on_inode(inode);
@@ -1036,10 +1044,14 @@ static int test_inode_iunique(struct super_block *sb, unsigned long ino)
	struct hlist_node *node;
	struct inode *inode;

	spin_lock(&inode_hash_lock);
	hlist_for_each_entry(inode, node, b, i_hash) {
		if (inode->i_ino == ino && inode->i_sb == sb)
		if (inode->i_ino == ino && inode->i_sb == sb) {
			spin_unlock(&inode_hash_lock);
			return 0;
		}
	}
	spin_unlock(&inode_hash_lock);

	return 1;
}
@@ -1069,7 +1081,6 @@ ino_t iunique(struct super_block *sb, ino_t max_reserved)
	static unsigned int counter;
	ino_t res;

	spin_lock(&inode_lock);
	spin_lock(&iunique_lock);
	do {
		if (counter <= max_reserved)
@@ -1077,7 +1088,6 @@ ino_t iunique(struct super_block *sb, ino_t max_reserved)
		res = counter++;
	} while (!test_inode_iunique(sb, res));
	spin_unlock(&iunique_lock);
	spin_unlock(&inode_lock);

	return res;
}
@@ -1119,7 +1129,7 @@ EXPORT_SYMBOL(igrab);
 *
 * Otherwise NULL is returned.
 *
 * Note, @test is called with the inode_lock held, so can't sleep.
 * Note, @test is called with the inode_hash_lock held, so can't sleep.
 */
static struct inode *ifind(struct super_block *sb,
		struct hlist_head *head, int (*test)(struct inode *, void *),
@@ -1127,15 +1137,15 @@ static struct inode *ifind(struct super_block *sb,
{
	struct inode *inode;

	spin_lock(&inode_lock);
	spin_lock(&inode_hash_lock);
	inode = find_inode(sb, head, test, data);
	if (inode) {
		spin_unlock(&inode_lock);
		spin_unlock(&inode_hash_lock);
		if (likely(wait))
			wait_on_inode(inode);
		return inode;
	}
	spin_unlock(&inode_lock);
	spin_unlock(&inode_hash_lock);
	return NULL;
}

@@ -1159,14 +1169,14 @@ static struct inode *ifind_fast(struct super_block *sb,
{
	struct inode *inode;

	spin_lock(&inode_lock);
	spin_lock(&inode_hash_lock);
	inode = find_inode_fast(sb, head, ino);
	if (inode) {
		spin_unlock(&inode_lock);
		spin_unlock(&inode_hash_lock);
		wait_on_inode(inode);
		return inode;
	}
	spin_unlock(&inode_lock);
	spin_unlock(&inode_hash_lock);
	return NULL;
}

@@ -1189,7 +1199,7 @@ static struct inode *ifind_fast(struct super_block *sb,
 *
 * Otherwise NULL is returned.
 *
 * Note, @test is called with the inode_lock held, so can't sleep.
 * Note, @test is called with the inode_hash_lock held, so can't sleep.
 */
struct inode *ilookup5_nowait(struct super_block *sb, unsigned long hashval,
		int (*test)(struct inode *, void *), void *data)
@@ -1217,7 +1227,7 @@ EXPORT_SYMBOL(ilookup5_nowait);
 *
 * Otherwise NULL is returned.
 *
 * Note, @test is called with the inode_lock held, so can't sleep.
 * Note, @test is called with the inode_hash_lock held, so can't sleep.
 */
struct inode *ilookup5(struct super_block *sb, unsigned long hashval,
		int (*test)(struct inode *, void *), void *data)
@@ -1268,7 +1278,8 @@ EXPORT_SYMBOL(ilookup);
 * inode and this is returned locked, hashed, and with the I_NEW flag set. The
 * file system gets to fill it in before unlocking it via unlock_new_inode().
 *
 * Note both @test and @set are called with the inode_lock held, so can't sleep.
 * Note both @test and @set are called with the inode_hash_lock held, so can't
 * sleep.
 */
struct inode *iget5_locked(struct super_block *sb, unsigned long hashval,
		int (*test)(struct inode *, void *),
@@ -1328,7 +1339,7 @@ int insert_inode_locked(struct inode *inode)
	while (1) {
		struct hlist_node *node;
		struct inode *old = NULL;
		spin_lock(&inode_lock);
		spin_lock(&inode_hash_lock);
		hlist_for_each_entry(old, node, head, i_hash) {
			if (old->i_ino != ino)
				continue;
@@ -1346,12 +1357,12 @@ int insert_inode_locked(struct inode *inode)
			inode->i_state |= I_NEW;
			hlist_add_head(&inode->i_hash, head);
			spin_unlock(&inode->i_lock);
			spin_unlock(&inode_lock);
			spin_unlock(&inode_hash_lock);
			return 0;
		}
		__iget(old);
		spin_unlock(&old->i_lock);
		spin_unlock(&inode_lock);
		spin_unlock(&inode_hash_lock);
		wait_on_inode(old);
		if (unlikely(!inode_unhashed(old))) {
			iput(old);
@@ -1372,7 +1383,7 @@ int insert_inode_locked4(struct inode *inode, unsigned long hashval,
		struct hlist_node *node;
		struct inode *old = NULL;

		spin_lock(&inode_lock);
		spin_lock(&inode_hash_lock);
		hlist_for_each_entry(old, node, head, i_hash) {
			if (old->i_sb != sb)
				continue;
@@ -1390,12 +1401,12 @@ int insert_inode_locked4(struct inode *inode, unsigned long hashval,
			inode->i_state |= I_NEW;
			hlist_add_head(&inode->i_hash, head);
			spin_unlock(&inode->i_lock);
			spin_unlock(&inode_lock);
			spin_unlock(&inode_hash_lock);
			return 0;
		}
		__iget(old);
		spin_unlock(&old->i_lock);
		spin_unlock(&inode_lock);
		spin_unlock(&inode_hash_lock);
		wait_on_inode(old);
		if (unlikely(!inode_unhashed(old))) {
			iput(old);
@@ -1674,10 +1685,10 @@ static void __wait_on_freeing_inode(struct inode *inode)
	wq = bit_waitqueue(&inode->i_state, __I_NEW);
	prepare_to_wait(wq, &wait.wait, TASK_UNINTERRUPTIBLE);
	spin_unlock(&inode->i_lock);
	spin_unlock(&inode_lock);
	spin_unlock(&inode_hash_lock);
	schedule();
	finish_wait(wq, &wait.wait);
	spin_lock(&inode_lock);
	spin_lock(&inode_hash_lock);
}

static __initdata unsigned long ihash_entries;
+0 −1
Original line number Diff line number Diff line
@@ -22,7 +22,6 @@
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/spinlock.h>
#include <linux/writeback.h> /* for inode_lock */

#include <asm/atomic.h>

+0 −1
Original line number Diff line number Diff line
@@ -91,7 +91,6 @@
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/srcu.h>
#include <linux/writeback.h> /* for inode_lock */

#include <asm/atomic.h>

+0 −1
Original line number Diff line number Diff line
@@ -23,7 +23,6 @@
#include <linux/mount.h>
#include <linux/mutex.h>
#include <linux/spinlock.h>
#include <linux/writeback.h> /* for inode_lock */

#include <asm/atomic.h>

+2 −2
Original line number Diff line number Diff line
@@ -54,7 +54,7 @@
 *
 * Return 1 if the attributes match and 0 if not.
 *
 * NOTE: This function runs with the inode_lock spin lock held so it is not
 * NOTE: This function runs with the inode->i_lock spin lock held so it is not
 * allowed to sleep.
 */
int ntfs_test_inode(struct inode *vi, ntfs_attr *na)
@@ -98,7 +98,7 @@ int ntfs_test_inode(struct inode *vi, ntfs_attr *na)
 *
 * Return 0 on success and -errno on error.
 *
 * NOTE: This function runs with the inode_lock spin lock held so it is not
 * NOTE: This function runs with the inode->i_lock spin lock held so it is not
 * allowed to sleep. (Hence the GFP_ATOMIC allocation.)
 */
static int ntfs_init_locked_inode(struct inode *vi, ntfs_attr *na)
Loading