Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 6a94cb73 authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge branch 'for-linus' of git://oss.sgi.com/xfs/xfs

* 'for-linus' of git://oss.sgi.com/xfs/xfs: (184 commits)
  [XFS] Fix race in xfs_write() between direct and buffered I/O with DMAPI
  [XFS] handle unaligned data in xfs_bmbt_disk_get_all
  [XFS] avoid memory allocations in xfs_fs_vcmn_err
  [XFS] Fix speculative allocation beyond eof
  [XFS] Remove XFS_BUF_SHUT() and friends
  [XFS] Use the incore inode size in xfs_file_readdir()
  [XFS] set b_error from bio error in xfs_buf_bio_end_io
  [XFS] use inode_change_ok for setattr permission checking
  [XFS] add a FMODE flag to make XFS invisible I/O less hacky
  [XFS] resync headers with libxfs
  [XFS] simplify projid check in xfs_rename
  [XFS] replace b_fspriv with b_mount
  [XFS] Remove unused tracing code
  [XFS] Remove unnecessary assertion
  [XFS] Remove unused variable in ktrace_free()
  [XFS] Check return value of xfs_buf_get_noaddr()
  [XFS] Fix hang after disallowed rename across directory quota domains
  [XFS] Fix compile with CONFIG_COMPAT enabled
  move inode tracing out of xfs_vnode.
  move vn_iowait / vn_iowake into xfs_aops.c
  ...
parents f57fa1d6 0a8c5395
Loading
Loading
Loading
Loading
+0 −4
Original line number Diff line number Diff line
@@ -229,10 +229,6 @@ The following sysctls are available for the XFS filesystem:
	ISGID bit is cleared if the irix_sgid_inherit compatibility sysctl
	is set.

  fs.xfs.restrict_chown		(Min: 0  Default: 1  Max: 1)
  	Controls whether unprivileged users can use chown to "give away"
	a file to another user.

  fs.xfs.inherit_sync		(Min: 0  Default: 1  Max: 1)
	Setting this to "1" will cause the "sync" flag set
	by the xfs_io(8) chattr command on a directory to be
+126 −83
Original line number Diff line number Diff line
@@ -108,19 +108,20 @@ static void wake_up_inode(struct inode *inode)
	wake_up_bit(&inode->i_state, __I_LOCK);
}

static struct inode *alloc_inode(struct super_block *sb)
/**
 * inode_init_always - perform inode structure intialisation
 * @sb		- superblock inode belongs to.
 * @inode	- inode to initialise
 *
 * These are initializations that need to be done on every inode
 * allocation as the fields are not initialised by slab allocation.
 */
struct inode *inode_init_always(struct super_block *sb, struct inode *inode)
{
	static const struct address_space_operations empty_aops;
	static struct inode_operations empty_iops;
	static const struct file_operations empty_fops;
	struct inode *inode;

	if (sb->s_op->alloc_inode)
		inode = sb->s_op->alloc_inode(sb);
	else
		inode = (struct inode *) kmem_cache_alloc(inode_cachep, GFP_KERNEL);

	if (inode) {
	struct address_space * const mapping = &inode->i_data;

	inode->i_sb = sb;
@@ -183,9 +184,24 @@ static struct inode *alloc_inode(struct super_block *sb)
	}
	inode->i_private = NULL;
	inode->i_mapping = mapping;
	}

	return inode;
}
EXPORT_SYMBOL(inode_init_always);

static struct inode *alloc_inode(struct super_block *sb)
{
	struct inode *inode;

	if (sb->s_op->alloc_inode)
		inode = sb->s_op->alloc_inode(sb);
	else
		inode = kmem_cache_alloc(inode_cachep, GFP_KERNEL);

	if (inode)
		return inode_init_always(sb, inode);
	return NULL;
}

void destroy_inode(struct inode *inode) 
{
@@ -196,6 +212,7 @@ void destroy_inode(struct inode *inode)
	else
		kmem_cache_free(inode_cachep, (inode));
}
EXPORT_SYMBOL(destroy_inode);


/*
@@ -534,6 +551,49 @@ static struct inode * find_inode_fast(struct super_block * sb, struct hlist_head
	return node ? inode : NULL;
}

static unsigned long hash(struct super_block *sb, unsigned long hashval)
{
	unsigned long tmp;

	tmp = (hashval * (unsigned long)sb) ^ (GOLDEN_RATIO_PRIME + hashval) /
			L1_CACHE_BYTES;
	tmp = tmp ^ ((tmp ^ GOLDEN_RATIO_PRIME) >> I_HASHBITS);
	return tmp & I_HASHMASK;
}

static inline void
__inode_add_to_lists(struct super_block *sb, struct hlist_head *head,
			struct inode *inode)
{
	inodes_stat.nr_inodes++;
	list_add(&inode->i_list, &inode_in_use);
	list_add(&inode->i_sb_list, &sb->s_inodes);
	if (head)
		hlist_add_head(&inode->i_hash, head);
}

/**
 * inode_add_to_lists - add a new inode to relevant lists
 * @sb		- superblock inode belongs to.
 * @inode	- inode to mark in use
 *
 * When an inode is allocated it needs to be accounted for, added to the in use
 * list, the owning superblock and the inode hash. This needs to be done under
 * the inode_lock, so export a function to do this rather than the inode lock
 * itself. We calculate the hash list to add to here so it is all internal
 * which requires the caller to have already set up the inode number in the
 * inode to add.
 */
void inode_add_to_lists(struct super_block *sb, struct inode *inode)
{
	struct hlist_head *head = inode_hashtable + hash(sb, inode->i_ino);

	spin_lock(&inode_lock);
	__inode_add_to_lists(sb, head, inode);
	spin_unlock(&inode_lock);
}
EXPORT_SYMBOL_GPL(inode_add_to_lists);

/**
 *	new_inode 	- obtain an inode
 *	@sb: superblock
@@ -561,9 +621,7 @@ struct inode *new_inode(struct super_block *sb)
	inode = alloc_inode(sb);
	if (inode) {
		spin_lock(&inode_lock);
		inodes_stat.nr_inodes++;
		list_add(&inode->i_list, &inode_in_use);
		list_add(&inode->i_sb_list, &sb->s_inodes);
		__inode_add_to_lists(sb, NULL, inode);
		inode->i_ino = ++last_ino;
		inode->i_state = 0;
		spin_unlock(&inode_lock);
@@ -622,10 +680,7 @@ static struct inode * get_new_inode(struct super_block *sb, struct hlist_head *h
			if (set(inode, data))
				goto set_failed;

			inodes_stat.nr_inodes++;
			list_add(&inode->i_list, &inode_in_use);
			list_add(&inode->i_sb_list, &sb->s_inodes);
			hlist_add_head(&inode->i_hash, head);
			__inode_add_to_lists(sb, head, inode);
			inode->i_state = I_LOCK|I_NEW;
			spin_unlock(&inode_lock);

@@ -671,10 +726,7 @@ static struct inode * get_new_inode_fast(struct super_block *sb, struct hlist_he
		old = find_inode_fast(sb, head, ino);
		if (!old) {
			inode->i_ino = ino;
			inodes_stat.nr_inodes++;
			list_add(&inode->i_list, &inode_in_use);
			list_add(&inode->i_sb_list, &sb->s_inodes);
			hlist_add_head(&inode->i_hash, head);
			__inode_add_to_lists(sb, head, inode);
			inode->i_state = I_LOCK|I_NEW;
			spin_unlock(&inode_lock);

@@ -698,16 +750,6 @@ static struct inode * get_new_inode_fast(struct super_block *sb, struct hlist_he
	return inode;
}

static unsigned long hash(struct super_block *sb, unsigned long hashval)
{
	unsigned long tmp;

	tmp = (hashval * (unsigned long)sb) ^ (GOLDEN_RATIO_PRIME + hashval) /
			L1_CACHE_BYTES;
	tmp = tmp ^ ((tmp ^ GOLDEN_RATIO_PRIME) >> I_HASHBITS);
	return tmp & I_HASHMASK;
}

/**
 *	iunique - get a unique inode number
 *	@sb: superblock
@@ -1292,6 +1334,7 @@ int inode_wait(void *word)
	schedule();
	return 0;
}
EXPORT_SYMBOL(inode_wait);

/*
 * If we try to find an inode in the inode hash while it is being
+3 −3
Original line number Diff line number Diff line
@@ -85,13 +85,13 @@ xfs-y += xfs_alloc.o \
				   xfs_trans_inode.o \
				   xfs_trans_item.o \
				   xfs_utils.o \
				   xfs_vfsops.o \
				   xfs_vnodeops.o \
				   xfs_rw.o \
				   xfs_dmops.o \
				   xfs_qmops.o

xfs-$(CONFIG_XFS_TRACE)		+= xfs_dir2_trace.o
xfs-$(CONFIG_XFS_TRACE)		+= xfs_btree_trace.o \
				   xfs_dir2_trace.o

# Objects in linux/
xfs-y				+= $(addprefix $(XFS_LINUX)/, \
@@ -106,7 +106,7 @@ xfs-y += $(addprefix $(XFS_LINUX)/, \
				   xfs_iops.o \
				   xfs_lrw.o \
				   xfs_super.o \
				   xfs_vnode.o \
				   xfs_sync.o \
				   xfs_xattr.o)

# Objects in support/
+4 −18
Original line number Diff line number Diff line
@@ -32,23 +32,15 @@ typedef struct sv_s {
	wait_queue_head_t waiters;
} sv_t;

#define SV_FIFO		0x0		/* sv_t is FIFO type */
#define SV_LIFO		0x2		/* sv_t is LIFO type */
#define SV_PRIO		0x4		/* sv_t is PRIO type */
#define SV_KEYED	0x6		/* sv_t is KEYED type */
#define SV_DEFAULT      SV_FIFO


static inline void _sv_wait(sv_t *sv, spinlock_t *lock, int state,
			     unsigned long timeout)
static inline void _sv_wait(sv_t *sv, spinlock_t *lock)
{
	DECLARE_WAITQUEUE(wait, current);

	add_wait_queue_exclusive(&sv->waiters, &wait);
	__set_current_state(state);
	__set_current_state(TASK_UNINTERRUPTIBLE);
	spin_unlock(lock);

	schedule_timeout(timeout);
	schedule();

	remove_wait_queue(&sv->waiters, &wait);
}
@@ -58,13 +50,7 @@ static inline void _sv_wait(sv_t *sv, spinlock_t *lock, int state,
#define sv_destroy(sv) \
	/*NOTHING*/
#define sv_wait(sv, pri, lock, s) \
	_sv_wait(sv, lock, TASK_UNINTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT)
#define sv_wait_sig(sv, pri, lock, s)   \
	_sv_wait(sv, lock, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT)
#define sv_timedwait(sv, pri, lock, s, svf, ts, rts) \
	_sv_wait(sv, lock, TASK_UNINTERRUPTIBLE, timespec_to_jiffies(ts))
#define sv_timedwait_sig(sv, pri, lock, s, svf, ts, rts) \
	_sv_wait(sv, lock, TASK_INTERRUPTIBLE, timespec_to_jiffies(ts))
	_sv_wait(sv, lock)
#define sv_signal(sv) \
	wake_up(&(sv)->waiters)
#define sv_broadcast(sv) \
+52 −14
Original line number Diff line number Diff line
@@ -42,6 +42,40 @@
#include <linux/pagevec.h>
#include <linux/writeback.h>


/*
 * Prime number of hash buckets since address is used as the key.
 */
#define NVSYNC		37
#define to_ioend_wq(v)	(&xfs_ioend_wq[((unsigned long)v) % NVSYNC])
static wait_queue_head_t xfs_ioend_wq[NVSYNC];

void __init
xfs_ioend_init(void)
{
	int i;

	for (i = 0; i < NVSYNC; i++)
		init_waitqueue_head(&xfs_ioend_wq[i]);
}

void
xfs_ioend_wait(
	xfs_inode_t	*ip)
{
	wait_queue_head_t *wq = to_ioend_wq(ip);

	wait_event(*wq, (atomic_read(&ip->i_iocount) == 0));
}

STATIC void
xfs_ioend_wake(
	xfs_inode_t	*ip)
{
	if (atomic_dec_and_test(&ip->i_iocount))
		wake_up(to_ioend_wq(ip));
}

STATIC void
xfs_count_page_state(
	struct page		*page,
@@ -146,16 +180,25 @@ xfs_destroy_ioend(
	xfs_ioend_t		*ioend)
{
	struct buffer_head	*bh, *next;
	struct xfs_inode	*ip = XFS_I(ioend->io_inode);

	for (bh = ioend->io_buffer_head; bh; bh = next) {
		next = bh->b_private;
		bh->b_end_io(bh, !ioend->io_error);
	}
	if (unlikely(ioend->io_error)) {
		vn_ioerror(XFS_I(ioend->io_inode), ioend->io_error,

	/*
	 * Volume managers supporting multiple paths can send back ENODEV
	 * when the final path disappears.  In this case continuing to fill
	 * the page cache with dirty data which cannot be written out is
	 * evil, so prevent that.
	 */
	if (unlikely(ioend->io_error == -ENODEV)) {
		xfs_do_force_shutdown(ip->i_mount, SHUTDOWN_DEVICE_REQ,
				      __FILE__, __LINE__);
	}
	vn_iowake(XFS_I(ioend->io_inode));

	xfs_ioend_wake(ip);
	mempool_free(ioend, xfs_ioend_pool);
}

@@ -191,7 +234,7 @@ xfs_setfilesize(
		ip->i_d.di_size = isize;
		ip->i_update_core = 1;
		ip->i_update_size = 1;
		mark_inode_dirty_sync(ioend->io_inode);
		xfs_mark_inode_dirty_sync(ip);
	}

	xfs_iunlock(ip, XFS_ILOCK_EXCL);
@@ -317,14 +360,9 @@ xfs_map_blocks(
	xfs_iomap_t		*mapp,
	int			flags)
{
	xfs_inode_t		*ip = XFS_I(inode);
	int			error, nmaps = 1;
	int			nmaps = 1;

	error = xfs_iomap(ip, offset, count,
				flags, mapp, &nmaps);
	if (!error && (flags & (BMAPI_WRITE|BMAPI_ALLOCATE)))
		xfs_iflags_set(ip, XFS_IMODIFIED);
	return -error;
	return -xfs_iomap(XFS_I(inode), offset, count, flags, mapp, &nmaps);
}

STATIC_INLINE int
@@ -512,7 +550,7 @@ xfs_cancel_ioend(
			unlock_buffer(bh);
		} while ((bh = next_bh) != NULL);

		vn_iowake(XFS_I(ioend->io_inode));
		xfs_ioend_wake(XFS_I(ioend->io_inode));
		mempool_free(ioend, xfs_ioend_pool);
	} while ((ioend = next) != NULL);
}
Loading