Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit da89bd21 authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge tag 'for-linus-v3.11-rc1' of git://oss.sgi.com/xfs/xfs

Pull xfs update from Ben Myers:
 "This includes several bugfixes, part of the work for project quotas
  and group quotas to be used together, performance improvements for
  inode creation/deletion, buffer readahead, and bulkstat,
  implementation of the inode change count, an inode create transaction,
  and the removal of a bunch of dead code.

  There are also some duplicate commits that you already have from the
  3.10-rc series.

   - part of the work to allow project quotas and group quotas to be
     used together
   - inode change count
   - inode create transaction
   - block queue plugging in buffer readahead and bulkstat
   - ordered log vector support
   - removal of dead code in and around xfs_sync_inode_grab,
     xfs_ialloc_get_rec, XFS_MOUNT_RETERR, XFS_ALLOCFREE_LOG_RES,
     XFS_DIROP_LOG_RES, xfs_chash, ctl_table, and
     xfs_growfs_data_private
   - don't keep silent if sunit/swidth can not be changed via mount
   - fix a leak of remote symlink blocks into the filesystem when xattrs
     are used on symlinks
   - fix for fiemap to return FIEMAP_EXTENT_UNKOWN flag on delay extents
   - part of a fix for xfs_fsr
   - disable speculative preallocation with small files
   - performance improvements for inode creates and deletes"

* tag 'for-linus-v3.11-rc1' of git://oss.sgi.com/xfs/xfs: (61 commits)
  xfs: Remove incore use of XFS_OQUOTA_ENFD and XFS_OQUOTA_CHKD
  xfs: Change xfs_dquot_acct to be a 2-dimensional array
  xfs: Code cleanup and removal of some typedef usage
  xfs: Replace macro XFS_DQ_TO_QIP with a function
  xfs: Replace macro XFS_DQUOT_TREE with a function
  xfs: Define a new function xfs_is_quota_inode()
  xfs: implement inode change count
  xfs: Use inode create transaction
  xfs: Inode create item recovery
  xfs: Inode create transaction reservations
  xfs: Inode create log items
  xfs: Introduce an ordered buffer item
  xfs: Introduce ordered log vector support
  xfs: xfs_ifree doesn't need to modify the inode buffer
  xfs: don't do IO when creating an new inode
  xfs: don't use speculative prealloc for small files
  xfs: plug directory buffer readahead
  xfs: add pluging for bulkstat readahead
  xfs: Remove dead function prototype xfs_sync_inode_grab()
  xfs: Remove the left function variable from xfs_ialloc_get_rec()
  ...
parents be0c5d8c 83e782e1
Loading
Loading
Loading
Loading
+1 −0
Original line number Diff line number Diff line
@@ -71,6 +71,7 @@ xfs-y += xfs_alloc.o \
				   xfs_dir2_sf.o \
				   xfs_ialloc.o \
				   xfs_ialloc_btree.o \
				   xfs_icreate_item.o \
				   xfs_inode.o \
				   xfs_log_recover.o \
				   xfs_mount.o \
+18 −6
Original line number Diff line number Diff line
@@ -175,6 +175,7 @@ xfs_alloc_compute_diff(
	xfs_agblock_t	wantbno,	/* target starting block */
	xfs_extlen_t	wantlen,	/* target length */
	xfs_extlen_t	alignment,	/* target alignment */
	char		userdata,	/* are we allocating data? */
	xfs_agblock_t	freebno,	/* freespace's starting block */
	xfs_extlen_t	freelen,	/* freespace's length */
	xfs_agblock_t	*newbnop)	/* result: best start block from free */
@@ -189,7 +190,14 @@ xfs_alloc_compute_diff(
	ASSERT(freelen >= wantlen);
	freeend = freebno + freelen;
	wantend = wantbno + wantlen;
	if (freebno >= wantbno) {
	/*
	 * We want to allocate from the start of a free extent if it is past
	 * the desired block or if we are allocating user data and the free
	 * extent is before desired block. The second case is there to allow
	 * for contiguous allocation from the remaining free space if the file
	 * grows in the short term.
	 */
	if (freebno >= wantbno || (userdata && freeend < wantend)) {
		if ((newbno1 = roundup(freebno, alignment)) >= freeend)
			newbno1 = NULLAGBLOCK;
	} else if (freeend >= wantend && alignment > 1) {
@@ -805,7 +813,8 @@ xfs_alloc_find_best_extent(
			xfs_alloc_fix_len(args);

			sdiff = xfs_alloc_compute_diff(args->agbno, args->len,
						       args->alignment, *sbnoa,
						       args->alignment,
						       args->userdata, *sbnoa,
						       *slena, &new);

			/*
@@ -976,7 +985,8 @@ xfs_alloc_ag_vextent_near(
			if (args->len < blen)
				continue;
			ltdiff = xfs_alloc_compute_diff(args->agbno, args->len,
				args->alignment, ltbnoa, ltlena, &ltnew);
				args->alignment, args->userdata, ltbnoa,
				ltlena, &ltnew);
			if (ltnew != NULLAGBLOCK &&
			    (args->len > blen || ltdiff < bdiff)) {
				bdiff = ltdiff;
@@ -1128,7 +1138,8 @@ xfs_alloc_ag_vextent_near(
			args->len = XFS_EXTLEN_MIN(ltlena, args->maxlen);
			xfs_alloc_fix_len(args);
			ltdiff = xfs_alloc_compute_diff(args->agbno, args->len,
				args->alignment, ltbnoa, ltlena, &ltnew);
				args->alignment, args->userdata, ltbnoa,
				ltlena, &ltnew);

			error = xfs_alloc_find_best_extent(args,
						&bno_cur_lt, &bno_cur_gt,
@@ -1144,7 +1155,8 @@ xfs_alloc_ag_vextent_near(
			args->len = XFS_EXTLEN_MIN(gtlena, args->maxlen);
			xfs_alloc_fix_len(args);
			gtdiff = xfs_alloc_compute_diff(args->agbno, args->len,
				args->alignment, gtbnoa, gtlena, &gtnew);
				args->alignment, args->userdata, gtbnoa,
				gtlena, &gtnew);

			error = xfs_alloc_find_best_extent(args,
						&bno_cur_gt, &bno_cur_lt,
@@ -1203,7 +1215,7 @@ xfs_alloc_ag_vextent_near(
	}
	rlen = args->len;
	(void)xfs_alloc_compute_diff(args->agbno, rlen, args->alignment,
				     ltbnoa, ltlena, &ltnew);
				     args->userdata, ltbnoa, ltlena, &ltnew);
	ASSERT(ltnew >= ltbno);
	ASSERT(ltnew + rlen <= ltbnoa + ltlena);
	ASSERT(ltnew + rlen <= be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_length));
+2 −0
Original line number Diff line number Diff line
@@ -196,6 +196,8 @@ typedef __be64 xfs_bmbt_ptr_t, xfs_bmdr_ptr_t;
#define XFS_BMDR_SPACE_CALC(nrecs) \
	(int)(sizeof(xfs_bmdr_block_t) + \
	       ((nrecs) * (sizeof(xfs_bmbt_key_t) + sizeof(xfs_bmbt_ptr_t))))
#define XFS_BMAP_BMDR_SPACE(bb) \
	(XFS_BMDR_SPACE_CALC(be16_to_cpu((bb)->bb_numrecs)))

/*
 * Maximum number of bmap btree levels.
+58 −29
Original line number Diff line number Diff line
@@ -140,6 +140,16 @@ xfs_buf_item_size(

	ASSERT(bip->bli_flags & XFS_BLI_LOGGED);

	if (bip->bli_flags & XFS_BLI_ORDERED) {
		/*
		 * The buffer has been logged just to order it.
		 * It is not being included in the transaction
		 * commit, so no vectors are used at all.
		 */
		trace_xfs_buf_item_size_ordered(bip);
		return XFS_LOG_VEC_ORDERED;
	}

	/*
	 * the vector count is based on the number of buffer vectors we have
	 * dirty bits in. This will only be greater than one when we have a
@@ -212,6 +222,7 @@ xfs_buf_item_format_segment(
		goto out;
	}


	/*
	 * Fill in an iovec for each set of contiguous chunks.
	 */
@@ -299,18 +310,36 @@ xfs_buf_item_format(

	/*
	 * If it is an inode buffer, transfer the in-memory state to the
	 * format flags and clear the in-memory state. We do not transfer
	 * format flags and clear the in-memory state.
	 *
	 * For buffer based inode allocation, we do not transfer
	 * this state if the inode buffer allocation has not yet been committed
	 * to the log as setting the XFS_BLI_INODE_BUF flag will prevent
	 * correct replay of the inode allocation.
	 *
	 * For icreate item based inode allocation, the buffers aren't written
	 * to the journal during allocation, and hence we should always tag the
	 * buffer as an inode buffer so that the correct unlinked list replay
	 * occurs during recovery.
	 */
	if (bip->bli_flags & XFS_BLI_INODE_BUF) {
		if (!((bip->bli_flags & XFS_BLI_INODE_ALLOC_BUF) &&
		if (xfs_sb_version_hascrc(&lip->li_mountp->m_sb) ||
		    !((bip->bli_flags & XFS_BLI_INODE_ALLOC_BUF) &&
		      xfs_log_item_in_current_chkpt(lip)))
			bip->__bli_format.blf_flags |= XFS_BLF_INODE_BUF;
		bip->bli_flags &= ~XFS_BLI_INODE_BUF;
	}

	if ((bip->bli_flags & (XFS_BLI_ORDERED|XFS_BLI_STALE)) ==
							XFS_BLI_ORDERED) {
		/*
		 * The buffer has been logged just to order it.  It is not being
		 * included in the transaction commit, so don't format it.
		 */
		trace_xfs_buf_item_format_ordered(bip);
		return;
	}

	for (i = 0; i < bip->bli_format_count; i++) {
		vecp = xfs_buf_item_format_segment(bip, vecp, offset,
						&bip->bli_formats[i]);
@@ -340,6 +369,7 @@ xfs_buf_item_pin(

	ASSERT(atomic_read(&bip->bli_refcount) > 0);
	ASSERT((bip->bli_flags & XFS_BLI_LOGGED) ||
	       (bip->bli_flags & XFS_BLI_ORDERED) ||
	       (bip->bli_flags & XFS_BLI_STALE));

	trace_xfs_buf_item_pin(bip);
@@ -512,8 +542,9 @@ xfs_buf_item_unlock(
{
	struct xfs_buf_log_item	*bip = BUF_ITEM(lip);
	struct xfs_buf		*bp = bip->bli_buf;
	int			aborted, clean, i;
	uint			hold;
	bool			clean;
	bool			aborted;
	int			flags;

	/* Clear the buffer's association with this transaction. */
	bp->b_transp = NULL;
@@ -524,23 +555,21 @@ xfs_buf_item_unlock(
	 * (cancelled) buffers at unpin time, but we'll never go through the
	 * pin/unpin cycle if we abort inside commit.
	 */
	aborted = (lip->li_flags & XFS_LI_ABORTED) != 0;

	aborted = (lip->li_flags & XFS_LI_ABORTED) ? true : false;
	/*
	 * Before possibly freeing the buf item, determine if we should
	 * release the buffer at the end of this routine.
	 * Before possibly freeing the buf item, copy the per-transaction state
	 * so we can reference it safely later after clearing it from the
	 * buffer log item.
	 */
	hold = bip->bli_flags & XFS_BLI_HOLD;

	/* Clear the per transaction state. */
	bip->bli_flags &= ~(XFS_BLI_LOGGED | XFS_BLI_HOLD);
	flags = bip->bli_flags;
	bip->bli_flags &= ~(XFS_BLI_LOGGED | XFS_BLI_HOLD | XFS_BLI_ORDERED);

	/*
	 * If the buf item is marked stale, then don't do anything.  We'll
	 * unlock the buffer and free the buf item when the buffer is unpinned
	 * for the last time.
	 */
	if (bip->bli_flags & XFS_BLI_STALE) {
	if (flags & XFS_BLI_STALE) {
		trace_xfs_buf_item_unlock_stale(bip);
		ASSERT(bip->__bli_format.blf_flags & XFS_BLF_CANCEL);
		if (!aborted) {
@@ -557,15 +586,21 @@ xfs_buf_item_unlock(
	 * be the only reference to the buf item, so we free it anyway
	 * regardless of whether it is dirty or not. A dirty abort implies a
	 * shutdown, anyway.
	 *
	 * Ordered buffers are dirty but may have no recorded changes, so ensure
	 * we only release clean items here.
	 */
	clean = 1;
	clean = (flags & XFS_BLI_DIRTY) ? false : true;
	if (clean) {
		int i;
		for (i = 0; i < bip->bli_format_count; i++) {
			if (!xfs_bitmap_empty(bip->bli_formats[i].blf_data_map,
				     bip->bli_formats[i].blf_map_size)) {
			clean = 0;
				clean = false;
				break;
			}
		}
	}
	if (clean)
		xfs_buf_item_relse(bp);
	else if (aborted) {
@@ -576,7 +611,7 @@ xfs_buf_item_unlock(
	} else
		atomic_dec(&bip->bli_refcount);

	if (!hold)
	if (!(flags & XFS_BLI_HOLD))
		xfs_buf_relse(bp);
}

@@ -841,12 +876,6 @@ xfs_buf_item_log(
	uint			end;
	struct xfs_buf		*bp = bip->bli_buf;

	/*
	 * Mark the item as having some dirty data for
	 * quick reference in xfs_buf_item_dirty.
	 */
	bip->bli_flags |= XFS_BLI_DIRTY;

	/*
	 * walk each buffer segment and mark them dirty appropriately.
	 */
@@ -873,7 +902,7 @@ xfs_buf_item_log(


/*
 * Return 1 if the buffer has some data that has been logged (at any
 * Return 1 if the buffer has been logged or ordered in a transaction (at any
 * point, not just the current transaction) and 0 if not.
 */
uint
@@ -907,11 +936,11 @@ void
xfs_buf_item_relse(
	xfs_buf_t	*bp)
{
	xfs_buf_log_item_t	*bip;
	xfs_buf_log_item_t	*bip = bp->b_fspriv;

	trace_xfs_buf_item_relse(bp, _RET_IP_);
	ASSERT(!(bip->bli_item.li_flags & XFS_LI_IN_AIL));

	bip = bp->b_fspriv;
	bp->b_fspriv = bip->bli_item.li_bio_list;
	if (bp->b_fspriv == NULL)
		bp->b_iodone = NULL;
+3 −1
Original line number Diff line number Diff line
@@ -120,6 +120,7 @@ xfs_blft_from_flags(struct xfs_buf_log_format *blf)
#define	XFS_BLI_INODE_ALLOC_BUF	0x10
#define XFS_BLI_STALE_INODE	0x20
#define	XFS_BLI_INODE_BUF	0x40
#define	XFS_BLI_ORDERED		0x80

#define XFS_BLI_FLAGS \
	{ XFS_BLI_HOLD,		"HOLD" }, \
@@ -128,7 +129,8 @@ xfs_blft_from_flags(struct xfs_buf_log_format *blf)
	{ XFS_BLI_LOGGED,	"LOGGED" }, \
	{ XFS_BLI_INODE_ALLOC_BUF, "INODE_ALLOC" }, \
	{ XFS_BLI_STALE_INODE,	"STALE_INODE" }, \
	{ XFS_BLI_INODE_BUF,	"INODE_BUF" }
	{ XFS_BLI_INODE_BUF,	"INODE_BUF" }, \
	{ XFS_BLI_ORDERED,	"ORDERED" }


#ifdef __KERNEL__
Loading