Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 2d896c78 authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge branch 'for-linus' of git://oss.sgi.com:8090/xfs/xfs-2.6

* 'for-linus' of git://oss.sgi.com:8090/xfs/xfs-2.6: (37 commits)
  [XFS] Fix lockdep annotations for xfs_lock_inodes
  [LIB]: export radix_tree_preload()
  [XFS] Fix XFS_IOC_FSBULKSTAT{,_SINGLE} & XFS_IOC_FSINUMBERS in compat mode
  [XFS] Compat ioctl handler for handle operations
  [XFS] Compat ioctl handler for XFS_IOC_FSGEOMETRY_V1.
  [XFS] Clean up function name handling in tracing code
  [XFS] Quota inode has no parent.
  [XFS] Concurrent Multi-File Data Streams
  [XFS] Use uninitialized_var macro to stop warning about rtx
  [XFS] XFS should not be looking at filp reference counts
  [XFS] Use is_power_of_2 instead of open coding checks
  [XFS] Reduce shouting by removing unnecessary macros from dir2 code.
  [XFS] Simplify XFS min/max macros.
  [XFS] Kill off xfs_count_bits
  [XFS] Cancel transactions on xfs_itruncate_start error.
  [XFS] Use do_div() on 64 bit types.
  [XFS] Fix remount,readonly path to flush everything correctly.
  [XFS] Cleanup inode extent size hint extraction
  [XFS] Prevent ENOSPC from aborting transactions that need to succeed
  [XFS] Prevent deadlock when flushing inodes on unmount
  ...
parents 2a9915c8 0f1145cc
Loading
Loading
Loading
Loading
+2 −0
Original line number Diff line number Diff line
@@ -64,6 +64,7 @@ xfs-y += xfs_alloc.o \
				   xfs_dir2_sf.o \
				   xfs_error.o \
				   xfs_extfree_item.o \
				   xfs_filestream.o \
				   xfs_fsops.o \
				   xfs_ialloc.o \
				   xfs_ialloc_btree.o \
@@ -77,6 +78,7 @@ xfs-y += xfs_alloc.o \
				   xfs_log.o \
				   xfs_log_recover.o \
				   xfs_mount.o \
				   xfs_mru_cache.o \
				   xfs_rename.o \
				   xfs_trans.o \
				   xfs_trans_ail.o \
+0 −19
Original line number Diff line number Diff line
@@ -100,25 +100,6 @@ kmem_zone_destroy(kmem_zone_t *zone)
extern void *kmem_zone_alloc(kmem_zone_t *, unsigned int __nocast);
extern void *kmem_zone_zalloc(kmem_zone_t *, unsigned int __nocast);

/*
 * Low memory cache shrinkers
 */

typedef struct shrinker *kmem_shaker_t;
typedef int (*kmem_shake_func_t)(int, gfp_t);

static inline kmem_shaker_t
kmem_shake_register(kmem_shake_func_t sfunc)
{
	return set_shrinker(DEFAULT_SEEKS, sfunc);
}

static inline void
kmem_shake_deregister(kmem_shaker_t shrinker)
{
	remove_shrinker(shrinker);
}

static inline int
kmem_shake_allow(gfp_t gfp_mask)
{
+34 −9
Original line number Diff line number Diff line
@@ -108,14 +108,19 @@ xfs_page_trace(

/*
 * Schedule IO completion handling on a xfsdatad if this was
 * the final hold on this ioend.
 * the final hold on this ioend. If we are asked to wait,
 * flush the workqueue.
 */
STATIC void
xfs_finish_ioend(
	xfs_ioend_t		*ioend)
	xfs_ioend_t	*ioend,
	int		wait)
{
	if (atomic_dec_and_test(&ioend->io_remaining))
	if (atomic_dec_and_test(&ioend->io_remaining)) {
		queue_work(xfsdatad_workqueue, &ioend->io_work);
		if (wait)
			flush_workqueue(xfsdatad_workqueue);
	}
}

/*
@@ -156,6 +161,8 @@ xfs_setfilesize(
	xfs_fsize_t		bsize;

	ip = xfs_vtoi(ioend->io_vnode);
	if (!ip)
		return;

	ASSERT((ip->i_d.di_mode & S_IFMT) == S_IFREG);
	ASSERT(ioend->io_type != IOMAP_READ);
@@ -334,7 +341,7 @@ xfs_end_bio(
	bio->bi_end_io = NULL;
	bio_put(bio);

	xfs_finish_ioend(ioend);
	xfs_finish_ioend(ioend, 0);
	return 0;
}

@@ -470,7 +477,7 @@ xfs_submit_ioend(
		}
		if (bio)
			xfs_submit_ioend_bio(ioend, bio);
		xfs_finish_ioend(ioend);
		xfs_finish_ioend(ioend, 0);
	} while ((ioend = next) != NULL);
}

@@ -1003,6 +1010,8 @@ xfs_page_state_convert(
		if (buffer_unwritten(bh) || buffer_delay(bh) ||
		    ((buffer_uptodate(bh) || PageUptodate(page)) &&
		     !buffer_mapped(bh) && (unmapped || startio))) {
			int new_ioend = 0;

			/*
			 * Make sure we don't use a read-only iomap
			 */
@@ -1021,6 +1030,15 @@ xfs_page_state_convert(
			}

			if (!iomap_valid) {
				/*
				 * if we didn't have a valid mapping then we
				 * need to ensure that we put the new mapping
				 * in a new ioend structure. This needs to be
				 * done to ensure that the ioends correctly
				 * reflect the block mappings at io completion
				 * for unwritten extent conversion.
				 */
				new_ioend = 1;
				if (type == IOMAP_NEW) {
					size = xfs_probe_cluster(inode,
							page, bh, head, 0);
@@ -1040,7 +1058,7 @@ xfs_page_state_convert(
				if (startio) {
					xfs_add_to_ioend(inode, bh, offset,
							type, &ioend,
							!iomap_valid);
							new_ioend);
				} else {
					set_buffer_dirty(bh);
					unlock_buffer(bh);
@@ -1416,6 +1434,13 @@ xfs_end_io_direct(
	 * This is not necessary for synchronous direct I/O, but we do
	 * it anyway to keep the code uniform and simpler.
	 *
	 * Well, if only it were that simple. Because synchronous direct I/O
	 * requires extent conversion to occur *before* we return to userspace,
	 * we have to wait for extent conversion to complete. Look at the
	 * iocb that has been passed to us to determine if this is AIO or
	 * not. If it is synchronous, tell xfs_finish_ioend() to kick the
	 * workqueue and wait for it to complete.
	 *
	 * The core direct I/O code might be changed to always call the
	 * completion handler in the future, in which case all this can
	 * go away.
@@ -1423,9 +1448,9 @@ xfs_end_io_direct(
	ioend->io_offset = offset;
	ioend->io_size = size;
	if (ioend->io_type == IOMAP_READ) {
		xfs_finish_ioend(ioend);
		xfs_finish_ioend(ioend, 0);
	} else if (private && size > 0) {
		xfs_finish_ioend(ioend);
		xfs_finish_ioend(ioend, is_sync_kiocb(iocb));
	} else {
		/*
		 * A direct I/O write ioend starts it's life in unwritten
@@ -1434,7 +1459,7 @@ xfs_end_io_direct(
		 * handler.
		 */
		INIT_WORK(&ioend->io_work, xfs_end_bio_written);
		xfs_finish_ioend(ioend);
		xfs_finish_ioend(ioend, 0);
	}

	/*
+27 −32
Original line number Diff line number Diff line
@@ -35,7 +35,7 @@
#include <linux/freezer.h>

static kmem_zone_t *xfs_buf_zone;
static kmem_shaker_t xfs_buf_shake;
static struct shrinker *xfs_buf_shake;
STATIC int xfsbufd(void *);
STATIC int xfsbufd_wakeup(int, gfp_t);
STATIC void xfs_buf_delwri_queue(xfs_buf_t *, int);
@@ -314,7 +314,7 @@ xfs_buf_free(

	ASSERT(list_empty(&bp->b_hash_list));

	if (bp->b_flags & _XBF_PAGE_CACHE) {
	if (bp->b_flags & (_XBF_PAGE_CACHE|_XBF_PAGES)) {
		uint		i;

		if ((bp->b_flags & XBF_MAPPED) && (bp->b_page_count > 1))
@@ -323,18 +323,11 @@ xfs_buf_free(
		for (i = 0; i < bp->b_page_count; i++) {
			struct page	*page = bp->b_pages[i];

			if (bp->b_flags & _XBF_PAGE_CACHE)
				ASSERT(!PagePrivate(page));
			page_cache_release(page);
		}
		_xfs_buf_free_pages(bp);
	} else if (bp->b_flags & _XBF_KMEM_ALLOC) {
		 /*
		  * XXX(hch): bp->b_count_desired might be incorrect (see
		  * xfs_buf_associate_memory for details), but fortunately
		  * the Linux version of kmem_free ignores the len argument..
		  */
		kmem_free(bp->b_addr, bp->b_count_desired);
		_xfs_buf_free_pages(bp);
	}

	xfs_buf_deallocate(bp);
@@ -764,43 +757,44 @@ xfs_buf_get_noaddr(
	size_t			len,
	xfs_buftarg_t		*target)
{
	size_t			malloc_len = len;
	unsigned long		page_count = PAGE_ALIGN(len) >> PAGE_SHIFT;
	int			error, i;
	xfs_buf_t		*bp;
	void			*data;
	int			error;

	bp = xfs_buf_allocate(0);
	if (unlikely(bp == NULL))
		goto fail;
	_xfs_buf_initialize(bp, target, 0, len, 0);

 try_again:
	data = kmem_alloc(malloc_len, KM_SLEEP | KM_MAYFAIL | KM_LARGE);
	if (unlikely(data == NULL))
	error = _xfs_buf_get_pages(bp, page_count, 0);
	if (error)
		goto fail_free_buf;

	/* check whether alignment matches.. */
	if ((__psunsigned_t)data !=
	    ((__psunsigned_t)data & ~target->bt_smask)) {
		/* .. else double the size and try again */
		kmem_free(data, malloc_len);
		malloc_len <<= 1;
		goto try_again;
	for (i = 0; i < page_count; i++) {
		bp->b_pages[i] = alloc_page(GFP_KERNEL);
		if (!bp->b_pages[i])
			goto fail_free_mem;
	}
	bp->b_flags |= _XBF_PAGES;

	error = xfs_buf_associate_memory(bp, data, len);
	if (error)
	error = _xfs_buf_map_pages(bp, XBF_MAPPED);
	if (unlikely(error)) {
		printk(KERN_WARNING "%s: failed to map pages\n",
				__FUNCTION__);
		goto fail_free_mem;
	bp->b_flags |= _XBF_KMEM_ALLOC;
	}

	xfs_buf_unlock(bp);

	XB_TRACE(bp, "no_daddr", data);
	XB_TRACE(bp, "no_daddr", len);
	return bp;

 fail_free_mem:
	kmem_free(data, malloc_len);
	while (--i >= 0)
		__free_page(bp->b_pages[i]);
	_xfs_buf_free_pages(bp);
 fail_free_buf:
	xfs_buf_free(bp);
	xfs_buf_deallocate(bp);
 fail:
	return NULL;
}
@@ -1453,6 +1447,7 @@ xfs_free_buftarg(
	int			external)
{
	xfs_flush_buftarg(btp, 1);
	xfs_blkdev_issue_flush(btp);
	if (external)
		xfs_blkdev_put(btp->bt_bdev);
	xfs_free_bufhash(btp);
@@ -1837,7 +1832,7 @@ xfs_buf_init(void)
	if (!xfsdatad_workqueue)
		goto out_destroy_xfslogd_workqueue;

	xfs_buf_shake = kmem_shake_register(xfsbufd_wakeup);
	xfs_buf_shake = set_shrinker(DEFAULT_SEEKS, xfsbufd_wakeup);
	if (!xfs_buf_shake)
		goto out_destroy_xfsdatad_workqueue;

@@ -1859,7 +1854,7 @@ xfs_buf_init(void)
void
xfs_buf_terminate(void)
{
	kmem_shake_deregister(xfs_buf_shake);
	remove_shrinker(xfs_buf_shake);
	destroy_workqueue(xfsdatad_workqueue);
	destroy_workqueue(xfslogd_workqueue);
	kmem_zone_destroy(xfs_buf_zone);
+1 −1
Original line number Diff line number Diff line
@@ -63,7 +63,7 @@ typedef enum {

	/* flags used only internally */
	_XBF_PAGE_CACHE = (1 << 17),/* backed by pagecache		   */
	_XBF_KMEM_ALLOC = (1 << 18),/* backed by kmem_alloc()		   */
	_XBF_PAGES = (1 << 18),	    /* backed by refcounted pages	   */
	_XBF_RUN_QUEUES = (1 << 19),/* run block device task queue	   */
	_XBF_DELWRI_Q = (1 << 21),   /* buffer on delwri queue		   */
} xfs_buf_flags_t;
Loading