Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 3155fe6d authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge branch 'for-linus' of git://oss.sgi.com/xfs/xfs

* 'for-linus' of git://oss.sgi.com/xfs/xfs: (23 commits)
  xfs: don't name variables "panic"
  xfs: factor agf counter updates into a helper
  xfs: clean up the xfs_alloc_compute_aligned calling convention
  xfs: kill support/debug.[ch]
  xfs: Convert remaining cmn_err() callers to new API
  xfs: convert the quota debug prints to new API
  xfs: rename xfs_cmn_err_fsblock_zero()
  xfs: convert xfs_fs_cmn_err to new error logging API
  xfs: kill xfs_fs_mount_cmn_err() macro
  xfs: kill xfs_fs_repair_cmn_err() macro
  xfs: convert xfs_cmn_err to xfs_alert_tag
  xfs: Convert xlog_warn to new logging interface
  xfs: Convert linux-2.6/ files to new logging interface
  xfs: introduce new logging API.
  xfs: zero proper structure size for geometry calls
  xfs: enable delaylog by default
  xfs: more sensible inode refcounting for ialloc
  xfs: stop using xfs_trans_iget in the RT allocator
  xfs: check if device support discard in xfs_ioc_trim()
  xfs: prevent leaking uninitialized stack memory in FSGEOMETRY_V1
  ...
parents da48524e 0c9ba973
Loading
Loading
Loading
Loading
+0 −7
Original line number Original line Diff line number Diff line
@@ -791,10 +791,3 @@ mount option. Fundamentally, there is no reason why the log manager would not
be able to swap methods automatically and transparently depending on load
be able to swap methods automatically and transparently depending on load
characteristics, but this should not be necessary if delayed logging works as
characteristics, but this should not be necessary if delayed logging works as
designed.
designed.

Roadmap:

2.6.39 Switch default mount option to use delayed logging
	=> should be roughly 12 months after initial merge
	=> enough time to shake out remaining problems before next round of
	   enterprise distro kernel rebases
+2 −3
Original line number Original line Diff line number Diff line
@@ -102,11 +102,10 @@ xfs-y += $(addprefix $(XFS_LINUX)/, \
				   xfs_globals.o \
				   xfs_globals.o \
				   xfs_ioctl.o \
				   xfs_ioctl.o \
				   xfs_iops.o \
				   xfs_iops.o \
				   xfs_message.o \
				   xfs_super.o \
				   xfs_super.o \
				   xfs_sync.o \
				   xfs_sync.o \
				   xfs_xattr.o)
				   xfs_xattr.o)


# Objects in support/
# Objects in support/
xfs-y				+= $(addprefix support/, \
xfs-y				+= support/uuid.o
				   debug.o \
				   uuid.o)
+5 −4
Original line number Original line Diff line number Diff line
@@ -23,6 +23,7 @@
#include <linux/backing-dev.h>
#include <linux/backing-dev.h>
#include "time.h"
#include "time.h"
#include "kmem.h"
#include "kmem.h"
#include "xfs_message.h"


/*
/*
 * Greedy allocation.  May fail and may return vmalloced memory.
 * Greedy allocation.  May fail and may return vmalloced memory.
@@ -56,8 +57,8 @@ kmem_alloc(size_t size, unsigned int __nocast flags)
		if (ptr || (flags & (KM_MAYFAIL|KM_NOSLEEP)))
		if (ptr || (flags & (KM_MAYFAIL|KM_NOSLEEP)))
			return ptr;
			return ptr;
		if (!(++retries % 100))
		if (!(++retries % 100))
			printk(KERN_ERR "XFS: possible memory allocation "
			xfs_err(NULL,
					"deadlock in %s (mode:0x%x)\n",
		"possible memory allocation deadlock in %s (mode:0x%x)",
					__func__, lflags);
					__func__, lflags);
		congestion_wait(BLK_RW_ASYNC, HZ/50);
		congestion_wait(BLK_RW_ASYNC, HZ/50);
	} while (1);
	} while (1);
@@ -112,8 +113,8 @@ kmem_zone_alloc(kmem_zone_t *zone, unsigned int __nocast flags)
		if (ptr || (flags & (KM_MAYFAIL|KM_NOSLEEP)))
		if (ptr || (flags & (KM_MAYFAIL|KM_NOSLEEP)))
			return ptr;
			return ptr;
		if (!(++retries % 100))
		if (!(++retries % 100))
			printk(KERN_ERR "XFS: possible memory allocation "
			xfs_err(NULL,
					"deadlock in %s (mode:0x%x)\n",
		"possible memory allocation deadlock in %s (mode:0x%x)",
					__func__, lflags);
					__func__, lflags);
		congestion_wait(BLK_RW_ASYNC, HZ/50);
		congestion_wait(BLK_RW_ASYNC, HZ/50);
	} while (1);
	} while (1);
+3 −3
Original line number Original line Diff line number Diff line
@@ -854,7 +854,7 @@ xfs_aops_discard_page(
	if (XFS_FORCED_SHUTDOWN(ip->i_mount))
	if (XFS_FORCED_SHUTDOWN(ip->i_mount))
		goto out_invalidate;
		goto out_invalidate;


	xfs_fs_cmn_err(CE_ALERT, ip->i_mount,
	xfs_alert(ip->i_mount,
		"page discard on page %p, inode 0x%llx, offset %llu.",
		"page discard on page %p, inode 0x%llx, offset %llu.",
			page, ip->i_ino, offset);
			page, ip->i_ino, offset);


@@ -872,7 +872,7 @@ xfs_aops_discard_page(
		if (error) {
		if (error) {
			/* something screwed, just bail */
			/* something screwed, just bail */
			if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) {
			if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) {
				xfs_fs_cmn_err(CE_ALERT, ip->i_mount,
				xfs_alert(ip->i_mount,
			"page discard unable to remove delalloc mapping.");
			"page discard unable to remove delalloc mapping.");
			}
			}
			break;
			break;
@@ -1411,7 +1411,7 @@ xfs_vm_write_failed(
		if (error) {
		if (error) {
			/* something screwed, just bail */
			/* something screwed, just bail */
			if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) {
			if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) {
				xfs_fs_cmn_err(CE_ALERT, ip->i_mount,
				xfs_alert(ip->i_mount,
			"xfs_vm_write_failed: unable to clean up ino %lld",
			"xfs_vm_write_failed: unable to clean up ino %lld",
						ip->i_ino);
						ip->i_ino);
			}
			}
+8 −9
Original line number Original line Diff line number Diff line
@@ -401,9 +401,8 @@ _xfs_buf_lookup_pages(
			 * handle buffer allocation failures we can't do much.
			 * handle buffer allocation failures we can't do much.
			 */
			 */
			if (!(++retries % 100))
			if (!(++retries % 100))
				printk(KERN_ERR
				xfs_err(NULL,
					"XFS: possible memory allocation "
		"possible memory allocation deadlock in %s (mode:0x%x)",
					"deadlock in %s (mode:0x%x)\n",
					__func__, gfp_mask);
					__func__, gfp_mask);


			XFS_STATS_INC(xb_page_retries);
			XFS_STATS_INC(xb_page_retries);
@@ -615,8 +614,8 @@ xfs_buf_get(
	if (!(bp->b_flags & XBF_MAPPED)) {
	if (!(bp->b_flags & XBF_MAPPED)) {
		error = _xfs_buf_map_pages(bp, flags);
		error = _xfs_buf_map_pages(bp, flags);
		if (unlikely(error)) {
		if (unlikely(error)) {
			printk(KERN_WARNING "%s: failed to map pages\n",
			xfs_warn(target->bt_mount,
					__func__);
				"%s: failed to map pages\n", __func__);
			goto no_buffer;
			goto no_buffer;
		}
		}
	}
	}
@@ -850,8 +849,8 @@ xfs_buf_get_uncached(


	error = _xfs_buf_map_pages(bp, XBF_MAPPED);
	error = _xfs_buf_map_pages(bp, XBF_MAPPED);
	if (unlikely(error)) {
	if (unlikely(error)) {
		printk(KERN_WARNING "%s: failed to map pages\n",
		xfs_warn(target->bt_mount,
				__func__);
			"%s: failed to map pages\n", __func__);
		goto fail_free_mem;
		goto fail_free_mem;
	}
	}


@@ -1617,8 +1616,8 @@ xfs_setsize_buftarg_flags(
	btp->bt_smask = sectorsize - 1;
	btp->bt_smask = sectorsize - 1;


	if (set_blocksize(btp->bt_bdev, sectorsize)) {
	if (set_blocksize(btp->bt_bdev, sectorsize)) {
		printk(KERN_WARNING
		xfs_warn(btp->bt_mount,
			"XFS: Cannot set_blocksize to %u on device %s\n",
			"Cannot set_blocksize to %u on device %s\n",
			sectorsize, XFS_BUFTARG_NAME(btp));
			sectorsize, XFS_BUFTARG_NAME(btp));
		return EINVAL;
		return EINVAL;
	}
	}
Loading