Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit eaff8079 authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Al Viro
Browse files

kill I_LOCK



After I_SYNC was split from I_LOCK the leftover is always used together with
I_NEW and thus superflous.

Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Signed-off-by: default avatarAl Viro <viro@zeniv.linux.org.uk>
parent 7a0ad10c
Loading
Loading
Loading
Loading
+1 −1
Original line number Original line Diff line number Diff line
@@ -125,7 +125,7 @@ static struct inode *gfs2_iget_skip(struct super_block *sb,
 * directory entry when gfs2_inode_lookup() is invoked. Part of the code
 * directory entry when gfs2_inode_lookup() is invoked. Part of the code
 * segment inside gfs2_inode_lookup code needs to get moved around.
 * segment inside gfs2_inode_lookup code needs to get moved around.
 *
 *
 * Clean up I_LOCK and I_NEW as well.
 * Clears I_NEW as well.
 **/
 **/


void gfs2_set_iop(struct inode *inode)
void gfs2_set_iop(struct inode *inode)
+13 −13
Original line number Original line Diff line number Diff line
@@ -113,7 +113,7 @@ static void wake_up_inode(struct inode *inode)
	 * Prevent speculative execution through spin_unlock(&inode_lock);
	 * Prevent speculative execution through spin_unlock(&inode_lock);
	 */
	 */
	smp_mb();
	smp_mb();
	wake_up_bit(&inode->i_state, __I_LOCK);
	wake_up_bit(&inode->i_state, __I_NEW);
}
}


/**
/**
@@ -690,17 +690,17 @@ void unlock_new_inode(struct inode *inode)
	}
	}
#endif
#endif
	/*
	/*
	 * This is special!  We do not need the spinlock when clearing I_LOCK,
	 * This is special!  We do not need the spinlock when clearing I_NEW,
	 * because we're guaranteed that nobody else tries to do anything about
	 * because we're guaranteed that nobody else tries to do anything about
	 * the state of the inode when it is locked, as we just created it (so
	 * the state of the inode when it is locked, as we just created it (so
	 * there can be no old holders that haven't tested I_LOCK).
	 * there can be no old holders that haven't tested I_NEW).
	 * However we must emit the memory barrier so that other CPUs reliably
	 * However we must emit the memory barrier so that other CPUs reliably
	 * see the clearing of I_LOCK after the other inode initialisation has
	 * see the clearing of I_NEW after the other inode initialisation has
	 * completed.
	 * completed.
	 */
	 */
	smp_mb();
	smp_mb();
	WARN_ON((inode->i_state & (I_LOCK|I_NEW)) != (I_LOCK|I_NEW));
	WARN_ON(!(inode->i_state & I_NEW));
	inode->i_state &= ~(I_LOCK|I_NEW);
	inode->i_state &= ~I_NEW;
	wake_up_inode(inode);
	wake_up_inode(inode);
}
}
EXPORT_SYMBOL(unlock_new_inode);
EXPORT_SYMBOL(unlock_new_inode);
@@ -731,7 +731,7 @@ static struct inode *get_new_inode(struct super_block *sb,
				goto set_failed;
				goto set_failed;


			__inode_add_to_lists(sb, head, inode);
			__inode_add_to_lists(sb, head, inode);
			inode->i_state = I_LOCK|I_NEW;
			inode->i_state = I_NEW;
			spin_unlock(&inode_lock);
			spin_unlock(&inode_lock);


			/* Return the locked inode with I_NEW set, the
			/* Return the locked inode with I_NEW set, the
@@ -778,7 +778,7 @@ static struct inode *get_new_inode_fast(struct super_block *sb,
		if (!old) {
		if (!old) {
			inode->i_ino = ino;
			inode->i_ino = ino;
			__inode_add_to_lists(sb, head, inode);
			__inode_add_to_lists(sb, head, inode);
			inode->i_state = I_LOCK|I_NEW;
			inode->i_state = I_NEW;
			spin_unlock(&inode_lock);
			spin_unlock(&inode_lock);


			/* Return the locked inode with I_NEW set, the
			/* Return the locked inode with I_NEW set, the
@@ -1083,7 +1083,7 @@ int insert_inode_locked(struct inode *inode)
	ino_t ino = inode->i_ino;
	ino_t ino = inode->i_ino;
	struct hlist_head *head = inode_hashtable + hash(sb, ino);
	struct hlist_head *head = inode_hashtable + hash(sb, ino);


	inode->i_state |= I_LOCK|I_NEW;
	inode->i_state |= I_NEW;
	while (1) {
	while (1) {
		struct hlist_node *node;
		struct hlist_node *node;
		struct inode *old = NULL;
		struct inode *old = NULL;
@@ -1120,7 +1120,7 @@ int insert_inode_locked4(struct inode *inode, unsigned long hashval,
	struct super_block *sb = inode->i_sb;
	struct super_block *sb = inode->i_sb;
	struct hlist_head *head = inode_hashtable + hash(sb, hashval);
	struct hlist_head *head = inode_hashtable + hash(sb, hashval);


	inode->i_state |= I_LOCK|I_NEW;
	inode->i_state |= I_NEW;


	while (1) {
	while (1) {
		struct hlist_node *node;
		struct hlist_node *node;
@@ -1510,7 +1510,7 @@ EXPORT_SYMBOL(inode_wait);
 * until the deletion _might_ have completed.  Callers are responsible
 * until the deletion _might_ have completed.  Callers are responsible
 * to recheck inode state.
 * to recheck inode state.
 *
 *
 * It doesn't matter if I_LOCK is not set initially, a call to
 * It doesn't matter if I_NEW is not set initially, a call to
 * wake_up_inode() after removing from the hash list will DTRT.
 * wake_up_inode() after removing from the hash list will DTRT.
 *
 *
 * This is called with inode_lock held.
 * This is called with inode_lock held.
@@ -1518,8 +1518,8 @@ EXPORT_SYMBOL(inode_wait);
static void __wait_on_freeing_inode(struct inode *inode)
static void __wait_on_freeing_inode(struct inode *inode)
{
{
	wait_queue_head_t *wq;
	wait_queue_head_t *wq;
	DEFINE_WAIT_BIT(wait, &inode->i_state, __I_LOCK);
	DEFINE_WAIT_BIT(wait, &inode->i_state, __I_NEW);
	wq = bit_waitqueue(&inode->i_state, __I_LOCK);
	wq = bit_waitqueue(&inode->i_state, __I_NEW);
	prepare_to_wait(wq, &wait.wait, TASK_UNINTERRUPTIBLE);
	prepare_to_wait(wq, &wait.wait, TASK_UNINTERRUPTIBLE);
	spin_unlock(&inode_lock);
	spin_unlock(&inode_lock);
	schedule();
	schedule();
+1 −1
Original line number Original line Diff line number Diff line
@@ -1292,7 +1292,7 @@ int txCommit(tid_t tid, /* transaction identifier */
		 */
		 */
		/*
		/*
		 * I believe this code is no longer needed.  Splitting I_LOCK
		 * I believe this code is no longer needed.  Splitting I_LOCK
		 * into two bits, I_LOCK and I_SYNC should prevent this
		 * into two bits, I_NEW and I_SYNC should prevent this
		 * deadlock as well.  But since I don't have a JFS testload
		 * deadlock as well.  But since I don't have a JFS testload
		 * to verify this, only a trivial s/I_LOCK/I_SYNC/ was done.
		 * to verify this, only a trivial s/I_LOCK/I_SYNC/ was done.
		 * Joern
		 * Joern
+3 −3
Original line number Original line Diff line number Diff line
@@ -530,7 +530,7 @@ static int ntfs_is_extended_system_file(ntfs_attr_search_ctx *ctx)
 * the ntfs inode.
 * the ntfs inode.
 *
 *
 * Q: What locks are held when the function is called?
 * Q: What locks are held when the function is called?
 * A: i_state has I_LOCK set, hence the inode is locked, also
 * A: i_state has I_NEW set, hence the inode is locked, also
 *    i_count is set to 1, so it is not going to go away
 *    i_count is set to 1, so it is not going to go away
 *    i_flags is set to 0 and we have no business touching it.  Only an ioctl()
 *    i_flags is set to 0 and we have no business touching it.  Only an ioctl()
 *    is allowed to write to them. We should of course be honouring them but
 *    is allowed to write to them. We should of course be honouring them but
@@ -1207,7 +1207,7 @@ static int ntfs_read_locked_inode(struct inode *vi)
 * necessary fields in @vi as well as initializing the ntfs inode.
 * necessary fields in @vi as well as initializing the ntfs inode.
 *
 *
 * Q: What locks are held when the function is called?
 * Q: What locks are held when the function is called?
 * A: i_state has I_LOCK set, hence the inode is locked, also
 * A: i_state has I_NEW set, hence the inode is locked, also
 *    i_count is set to 1, so it is not going to go away
 *    i_count is set to 1, so it is not going to go away
 *
 *
 * Return 0 on success and -errno on error.  In the error case, the inode will
 * Return 0 on success and -errno on error.  In the error case, the inode will
@@ -1474,7 +1474,7 @@ static int ntfs_read_locked_attr_inode(struct inode *base_vi, struct inode *vi)
 * normal directory inodes.
 * normal directory inodes.
 *
 *
 * Q: What locks are held when the function is called?
 * Q: What locks are held when the function is called?
 * A: i_state has I_LOCK set, hence the inode is locked, also
 * A: i_state has I_NEW set, hence the inode is locked, also
 *    i_count is set to 1, so it is not going to go away
 *    i_count is set to 1, so it is not going to go away
 *
 *
 * Return 0 on success and -errno on error.  In the error case, the inode will
 * Return 0 on success and -errno on error.  In the error case, the inode will
+1 −1
Original line number Original line Diff line number Diff line
@@ -45,7 +45,7 @@
 *
 *
 * Similarly, @i_mutex is not always locked in 'ubifs_readpage()', e.g., the
 * Similarly, @i_mutex is not always locked in 'ubifs_readpage()', e.g., the
 * read-ahead path does not lock it ("sys_read -> generic_file_aio_read ->
 * read-ahead path does not lock it ("sys_read -> generic_file_aio_read ->
 * ondemand_readahead -> readpage"). In case of readahead, @I_LOCK flag is not
 * ondemand_readahead -> readpage"). In case of readahead, @I_SYNC flag is not
 * set as well. However, UBIFS disables readahead.
 * set as well. However, UBIFS disables readahead.
 */
 */


Loading