Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit ffb8fb54 authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge branch 'for-linus' of git://oss.sgi.com/xfs/xfs

* 'for-linus' of git://oss.sgi.com/xfs/xfs:
  xfs: fix attr2 vs large data fork assert
  xfs: force buffer writeback before blocking on the ilock in inode reclaim
  xfs: validate acl count
parents 7ed89aed 4c393a60
Loading
Loading
Loading
Loading
+2 −0
Original line number Diff line number Diff line
@@ -42,6 +42,8 @@ xfs_acl_from_disk(struct xfs_acl *aclp)
	int count, i;

	count = be32_to_cpu(aclp->acl_cnt);
	if (count > XFS_ACL_MAX_ENTRIES)
		return ERR_PTR(-EFSCORRUPTED);

	acl = posix_acl_alloc(count, GFP_KERNEL);
	if (!acl)
+39 −25
Original line number Diff line number Diff line
@@ -110,6 +110,7 @@ xfs_attr_namesp_match(int arg_flags, int ondisk_flags)
/*
 * Query whether the requested number of additional bytes of extended
 * attribute space will be able to fit inline.
 *
 * Returns zero if not, else the di_forkoff fork offset to be used in the
 * literal area for attribute data once the new bytes have been added.
 *
@@ -136,11 +137,26 @@ xfs_attr_shortform_bytesfit(xfs_inode_t *dp, int bytes)
		return (offset >= minforkoff) ? minforkoff : 0;
	}

	if (!(mp->m_flags & XFS_MOUNT_ATTR2)) {
	/*
	 * If the requested numbers of bytes is smaller or equal to the
	 * current attribute fork size we can always proceed.
	 *
	 * Note that if_bytes in the data fork might actually be larger than
	 * the current data fork size is due to delalloc extents. In that
	 * case either the extent count will go down when they are converted
	 * to real extents, or the delalloc conversion will take care of the
	 * literal area rebalancing.
	 */
	if (bytes <= XFS_IFORK_ASIZE(dp))
		return dp->i_d.di_forkoff;

	/*
	 * For attr2 we can try to move the forkoff if there is space in the
	 * literal area, but for the old format we are done if there is no
	 * space in the fixed attribute fork.
	 */
	if (!(mp->m_flags & XFS_MOUNT_ATTR2))
		return 0;
	}

	dsize = dp->i_df.if_bytes;

@@ -157,10 +173,9 @@ xfs_attr_shortform_bytesfit(xfs_inode_t *dp, int bytes)
		    xfs_default_attroffset(dp))
			dsize = XFS_BMDR_SPACE_CALC(MINDBTPTRS);
		break;
		
	case XFS_DINODE_FMT_BTREE:
		/*
		 * If have data btree then keep forkoff if we have one,
		 * If we have a data btree then keep forkoff if we have one,
		 * otherwise we are adding a new attr, so then we set
		 * minforkoff to where the btree root can finish so we have
		 * plenty of room for attrs
@@ -168,9 +183,8 @@ xfs_attr_shortform_bytesfit(xfs_inode_t *dp, int bytes)
		if (dp->i_d.di_forkoff) {
			if (offset < dp->i_d.di_forkoff)
				return 0;
			else 
			return dp->i_d.di_forkoff;
		} else
		}
		dsize = XFS_BMAP_BROOT_SPACE(dp->i_df.if_broot);
		break;
	}
@@ -186,10 +200,10 @@ xfs_attr_shortform_bytesfit(xfs_inode_t *dp, int bytes)
	maxforkoff = XFS_LITINO(mp) - XFS_BMDR_SPACE_CALC(MINABTPTRS);
	maxforkoff = maxforkoff >> 3;	/* rounded down */

	if (offset >= minforkoff && offset < maxforkoff)
		return offset;
	if (offset >= maxforkoff)
		return maxforkoff;
	if (offset >= minforkoff)
		return offset;
	return 0;
}

+21 −0
Original line number Diff line number Diff line
@@ -2835,6 +2835,27 @@ xfs_iflush_int(
	return XFS_ERROR(EFSCORRUPTED);
}

void
xfs_promote_inode(
	struct xfs_inode	*ip)
{
	struct xfs_buf		*bp;

	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED));

	bp = xfs_incore(ip->i_mount->m_ddev_targp, ip->i_imap.im_blkno,
			ip->i_imap.im_len, XBF_TRYLOCK);
	if (!bp)
		return;

	if (XFS_BUF_ISDELAYWRITE(bp)) {
		xfs_buf_delwri_promote(bp);
		wake_up_process(ip->i_mount->m_ddev_targp->bt_task);
	}

	xfs_buf_relse(bp);
}

/*
 * Return a pointer to the extent record at file index idx.
 */
+1 −0
Original line number Diff line number Diff line
@@ -498,6 +498,7 @@ int xfs_iunlink(struct xfs_trans *, xfs_inode_t *);
void		xfs_iext_realloc(xfs_inode_t *, int, int);
void		xfs_iunpin_wait(xfs_inode_t *);
int		xfs_iflush(xfs_inode_t *, uint);
void		xfs_promote_inode(struct xfs_inode *);
void		xfs_lock_inodes(xfs_inode_t **, int, uint);
void		xfs_lock_two_inodes(xfs_inode_t *, xfs_inode_t *, uint);

+11 −0
Original line number Diff line number Diff line
@@ -770,6 +770,17 @@ xfs_reclaim_inode(
	if (!xfs_iflock_nowait(ip)) {
		if (!(sync_mode & SYNC_WAIT))
			goto out;

		/*
		 * If we only have a single dirty inode in a cluster there is
		 * a fair chance that the AIL push may have pushed it into
		 * the buffer, but xfsbufd won't touch it until 30 seconds
		 * from now, and thus we will lock up here.
		 *
		 * Promote the inode buffer to the front of the delwri list
		 * and wake up xfsbufd now.
		 */
		xfs_promote_inode(ip);
		xfs_iflock(ip);
	}