Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 958b7f37 authored by Linus Torvalds's avatar Linus Torvalds
Browse files
* git://oss.sgi.com:8090/xfs/xfs-2.6: (33 commits)
  [XFS] Don't use kmap in xfs_iozero.
  [XFS] Remove a bunch of unused functions from XFS.
  [XFS] Remove unused arguments from the XFS_BTREE_*_ADDR macros.
  [XFS] Remove unused header files for MAC and CAP checking functionality.
  [XFS] Make freeze code a little cleaner.
  [XFS] Remove unused argument to xfs_bmap_finish
  [XFS] Clean up use of VFS attr flags
  [XFS] Remove useless memory barrier
  [XFS] XFS sysctl cleanups
  [XFS] Fix assertion in xfs_attr_shortform_remove().
  [XFS] Fix callers of xfs_iozero() to zero the correct range.
  [XFS] Ensure a frozen filesystem has a clean log before writing the dummy
  [XFS] Fix sub-block zeroing for buffered writes into unwritten extents.
  [XFS] Re-initialize the per-cpu superblock counters after recovery.
  [XFS] Fix block reservation changes for non-SMP systems.
  [XFS] Fix block reservation mechanism.
  [XFS] Make growfs work for amounts greater than 2TB
  [XFS] Fix inode log item use-after-free on forced shutdown
  [XFS] Fix attr2 corruption with btree data extents
  [XFS] Workaround log space issue by increasing XFS_TRANS_PUSH_AIL_RESTARTS
  ...
parents d6879837 e7ff6aed
Loading
Loading
Loading
Loading
+2 −4
Original line number Diff line number Diff line
@@ -31,15 +31,13 @@ typedef struct {
	do { (mrp)->mr_writer = 0; init_rwsem(&(mrp)->mr_lock); } while (0)
#define mrlock_init(mrp, t,n,s)	mrinit(mrp, n)
#define mrfree(mrp)		do { } while (0)
#define mraccess(mrp)		mraccessf(mrp, 0)
#define mrupdate(mrp)		mrupdatef(mrp, 0)

static inline void mraccessf(mrlock_t *mrp, int flags)
static inline void mraccess(mrlock_t *mrp)
{
	down_read(&mrp->mr_lock);
}

static inline void mrupdatef(mrlock_t *mrp, int flags)
static inline void mrupdate(mrlock_t *mrp)
{
	down_write(&mrp->mr_lock);
	mrp->mr_writer = 1;
+10 −5
Original line number Diff line number Diff line
@@ -249,7 +249,7 @@ xfs_map_blocks(
	return -error;
}

STATIC inline int
STATIC_INLINE int
xfs_iomap_valid(
	xfs_iomap_t		*iomapp,
	loff_t			offset)
@@ -1283,13 +1283,18 @@ __xfs_get_blocks(
	bh_result->b_bdev = iomap.iomap_target->bt_bdev;

	/*
	 * If we previously allocated a block out beyond eof and we are
	 * now coming back to use it then we will need to flag it as new
	 * even if it has a disk address.
	 * If we previously allocated a block out beyond eof and we are now
	 * coming back to use it then we will need to flag it as new even if it
	 * has a disk address.
	 *
	 * With sub-block writes into unwritten extents we also need to mark
	 * the buffer as new so that the unwritten parts of the buffer gets
	 * correctly zeroed.
	 */
	if (create &&
	    ((!buffer_mapped(bh_result) && !buffer_uptodate(bh_result)) ||
	     (offset >= i_size_read(inode)) || (iomap.iomap_flags & IOMAP_NEW)))
	     (offset >= i_size_read(inode)) ||
	     (iomap.iomap_flags & (IOMAP_NEW|IOMAP_UNWRITTEN))))
		set_buffer_new(bh_result);

	if (iomap.iomap_flags & IOMAP_DELAY) {
+72 −70
Original line number Diff line number Diff line
@@ -34,13 +34,13 @@
#include <linux/backing-dev.h>
#include <linux/freezer.h>

STATIC kmem_zone_t *xfs_buf_zone;
STATIC kmem_shaker_t xfs_buf_shake;
static kmem_zone_t *xfs_buf_zone;
static kmem_shaker_t xfs_buf_shake;
STATIC int xfsbufd(void *);
STATIC int xfsbufd_wakeup(int, gfp_t);
STATIC void xfs_buf_delwri_queue(xfs_buf_t *, int);

STATIC struct workqueue_struct *xfslogd_workqueue;
static struct workqueue_struct *xfslogd_workqueue;
struct workqueue_struct *xfsdatad_workqueue;

#ifdef XFS_BUF_TRACE
@@ -139,7 +139,7 @@ page_region_mask(
	return mask;
}

STATIC inline void
STATIC_INLINE void
set_page_region(
	struct page	*page,
	size_t		offset,
@@ -151,7 +151,7 @@ set_page_region(
		SetPageUptodate(page);
}

STATIC inline int
STATIC_INLINE int
test_page_region(
	struct page	*page,
	size_t		offset,
@@ -171,9 +171,9 @@ typedef struct a_list {
	struct a_list	*next;
} a_list_t;

STATIC a_list_t		*as_free_head;
STATIC int		as_list_len;
STATIC DEFINE_SPINLOCK(as_lock);
static a_list_t		*as_free_head;
static int		as_list_len;
static DEFINE_SPINLOCK(as_lock);

/*
 *	Try to batch vunmaps because they are costly.
@@ -1085,7 +1085,7 @@ xfs_buf_iostart(
	return status;
}

STATIC __inline__ int
STATIC_INLINE int
_xfs_buf_iolocked(
	xfs_buf_t		*bp)
{
@@ -1095,7 +1095,7 @@ _xfs_buf_iolocked(
	return 0;
}

STATIC __inline__ void
STATIC_INLINE void
_xfs_buf_ioend(
	xfs_buf_t		*bp,
	int			schedule)
@@ -1426,8 +1426,8 @@ xfs_free_bufhash(
/*
 *	buftarg list for delwrite queue processing
 */
STATIC LIST_HEAD(xfs_buftarg_list);
STATIC DEFINE_SPINLOCK(xfs_buftarg_lock);
LIST_HEAD(xfs_buftarg_list);
static DEFINE_SPINLOCK(xfs_buftarg_lock);

STATIC void
xfs_register_buftarg(
@@ -1679,44 +1679,32 @@ xfsbufd_wakeup(
	return 0;
}

/*
 * Move as many buffers as specified to the supplied list
 * idicating if we skipped any buffers to prevent deadlocks.
 */
STATIC int
xfsbufd(
	void			*data)
xfs_buf_delwri_split(
	xfs_buftarg_t	*target,
	struct list_head *list,
	unsigned long	age)
{
	struct list_head	tmp;
	unsigned long		age;
	xfs_buftarg_t		*target = (xfs_buftarg_t *)data;
	xfs_buf_t	*bp, *n;
	struct list_head *dwq = &target->bt_delwrite_queue;
	spinlock_t	*dwlk = &target->bt_delwrite_lock;
	int			count;

	current->flags |= PF_MEMALLOC;

	INIT_LIST_HEAD(&tmp);
	do {
		if (unlikely(freezing(current))) {
			set_bit(XBT_FORCE_SLEEP, &target->bt_flags);
			refrigerator();
		} else {
			clear_bit(XBT_FORCE_SLEEP, &target->bt_flags);
		}

		schedule_timeout_interruptible(
			xfs_buf_timer_centisecs * msecs_to_jiffies(10));
	int		skipped = 0;
	int		force;

		count = 0;
		age = xfs_buf_age_centisecs * msecs_to_jiffies(10);
	force = test_and_clear_bit(XBT_FORCE_FLUSH, &target->bt_flags);
	INIT_LIST_HEAD(list);
	spin_lock(dwlk);
	list_for_each_entry_safe(bp, n, dwq, b_list) {
		XB_TRACE(bp, "walkq1", (long)xfs_buf_ispin(bp));
		ASSERT(bp->b_flags & XBF_DELWRI);

		if (!xfs_buf_ispin(bp) && !xfs_buf_cond_lock(bp)) {
				if (!test_bit(XBT_FORCE_FLUSH,
						&target->bt_flags) &&
				    time_before(jiffies,
						bp->b_queuetime + age)) {
			if (!force &&
			    time_before(jiffies, bp->b_queuetime + age)) {
				xfs_buf_unlock(bp);
				break;
			}
@@ -1724,18 +1712,49 @@ xfsbufd(
			bp->b_flags &= ~(XBF_DELWRI|_XBF_DELWRI_Q|
					 _XBF_RUN_QUEUES);
			bp->b_flags |= XBF_WRITE;
				list_move_tail(&bp->b_list, &tmp);
				count++;
			}
			list_move_tail(&bp->b_list, list);
		} else
			skipped++;
	}
	spin_unlock(dwlk);

	return skipped;

}

STATIC int
xfsbufd(
	void		*data)
{
	struct list_head tmp;
	xfs_buftarg_t	*target = (xfs_buftarg_t *)data;
	int		count;
	xfs_buf_t	*bp;

	current->flags |= PF_MEMALLOC;

	do {
		if (unlikely(freezing(current))) {
			set_bit(XBT_FORCE_SLEEP, &target->bt_flags);
			refrigerator();
		} else {
			clear_bit(XBT_FORCE_SLEEP, &target->bt_flags);
		}

		schedule_timeout_interruptible(
			xfs_buf_timer_centisecs * msecs_to_jiffies(10));

		xfs_buf_delwri_split(target, &tmp,
				xfs_buf_age_centisecs * msecs_to_jiffies(10));

		count = 0;
		while (!list_empty(&tmp)) {
			bp = list_entry(tmp.next, xfs_buf_t, b_list);
			ASSERT(target == bp->b_target);

			list_del_init(&bp->b_list);
			xfs_buf_iostrategy(bp);
			count++;
		}

		if (as_list_len > 0)
@@ -1743,7 +1762,6 @@ xfsbufd(
		if (count)
			blk_run_address_space(target->bt_mapping);

		clear_bit(XBT_FORCE_FLUSH, &target->bt_flags);
	} while (!kthread_should_stop());

	return 0;
@@ -1762,34 +1780,18 @@ xfs_flush_buftarg(
	struct list_head tmp;
	xfs_buf_t	*bp, *n;
	int		pincount = 0;
	struct list_head	*dwq = &target->bt_delwrite_queue;
	spinlock_t		*dwlk = &target->bt_delwrite_lock;

	xfs_buf_runall_queues(xfsdatad_workqueue);
	xfs_buf_runall_queues(xfslogd_workqueue);

	INIT_LIST_HEAD(&tmp);
	spin_lock(dwlk);
	list_for_each_entry_safe(bp, n, dwq, b_list) {
		ASSERT(bp->b_target == target);
		ASSERT(bp->b_flags & (XBF_DELWRI | _XBF_DELWRI_Q));
		XB_TRACE(bp, "walkq2", (long)xfs_buf_ispin(bp));
		if (xfs_buf_ispin(bp)) {
			pincount++;
			continue;
		}

		list_move_tail(&bp->b_list, &tmp);
	}
	spin_unlock(dwlk);
	set_bit(XBT_FORCE_FLUSH, &target->bt_flags);
	pincount = xfs_buf_delwri_split(target, &tmp, 0);

	/*
	 * Dropped the delayed write list lock, now walk the temporary list
	 */
	list_for_each_entry_safe(bp, n, &tmp, b_list) {
		xfs_buf_lock(bp);
		bp->b_flags &= ~(XBF_DELWRI|_XBF_DELWRI_Q|_XBF_RUN_QUEUES);
		bp->b_flags |= XBF_WRITE;
		ASSERT(target == bp->b_target);
		if (wait)
			bp->b_flags &= ~XBF_ASYNC;
		else
+2 −2
Original line number Diff line number Diff line
@@ -69,8 +69,8 @@ typedef enum {
} xfs_buf_flags_t;

typedef enum {
	XBT_FORCE_SLEEP = (0 << 1),
	XBT_FORCE_FLUSH = (1 << 1),
	XBT_FORCE_SLEEP = 0,
	XBT_FORCE_FLUSH = 1,
} xfs_buftarg_flags_t;

typedef struct xfs_bufhash {
+1 −1
Original line number Diff line number Diff line
@@ -24,7 +24,7 @@
#include "xfs_mount.h"
#include "xfs_export.h"

STATIC struct dentry dotdot = { .d_name.name = "..", .d_name.len = 2, };
static struct dentry dotdot = { .d_name.name = "..", .d_name.len = 2, };

/*
 * XFS encodes and decodes the fileid portion of NFS filehandles
Loading