Loading fs/xfs/libxfs/xfs_format.h +6 −2 Original line number Diff line number Diff line Loading @@ -1495,9 +1495,13 @@ struct xfs_acl { sizeof(struct xfs_acl_entry) \ : 25) #define XFS_ACL_MAX_SIZE(mp) \ #define XFS_ACL_SIZE(cnt) \ (sizeof(struct xfs_acl) + \ sizeof(struct xfs_acl_entry) * XFS_ACL_MAX_ENTRIES((mp))) sizeof(struct xfs_acl_entry) * cnt) #define XFS_ACL_MAX_SIZE(mp) \ XFS_ACL_SIZE(XFS_ACL_MAX_ENTRIES((mp))) /* On-disk XFS extended attribute names */ #define SGI_ACL_FILE "SGI_ACL_FILE" Loading fs/xfs/xfs_acl.c +8 −5 Original line number Diff line number Diff line Loading @@ -37,16 +37,19 @@ STATIC struct posix_acl * xfs_acl_from_disk( struct xfs_acl *aclp, const struct xfs_acl *aclp, int len, int max_entries) { struct posix_acl_entry *acl_e; struct posix_acl *acl; struct xfs_acl_entry *ace; const struct xfs_acl_entry *ace; unsigned int count, i; if (len < sizeof(*aclp)) return ERR_PTR(-EFSCORRUPTED); count = be32_to_cpu(aclp->acl_cnt); if (count > max_entries) if (count > max_entries || XFS_ACL_SIZE(count) != len) return ERR_PTR(-EFSCORRUPTED); acl = posix_acl_alloc(count, GFP_KERNEL); Loading Loading @@ -163,7 +166,7 @@ xfs_get_acl(struct inode *inode, int type) goto out; } acl = xfs_acl_from_disk(xfs_acl, XFS_ACL_MAX_ENTRIES(ip->i_mount)); acl = xfs_acl_from_disk(xfs_acl, len, XFS_ACL_MAX_ENTRIES(ip->i_mount)); if (IS_ERR(acl)) goto out; Loading fs/xfs/xfs_acl.h +3 −0 Original line number Diff line number Diff line Loading @@ -36,4 +36,7 @@ static inline struct posix_acl *xfs_get_acl(struct inode *inode, int type) # define posix_acl_access_exists(inode) 0 # define posix_acl_default_exists(inode) 0 #endif /* CONFIG_XFS_POSIX_ACL */ extern void xfs_forget_acl(struct inode *inode, const char *name, int xflags); #endif /* __XFS_ACL_H__ */ fs/xfs/xfs_file.c +16 −5 Original line number Diff line number Diff line Loading @@ -242,19 +242,30 @@ xfs_file_fsync( } /* * All metadata updates are logged, which means that we just have * to flush the log up to the latest LSN that touched the inode. * All metadata updates are logged, which means that we just have to * flush the log up to the latest LSN that touched the inode. If we have * concurrent fsync/fdatasync() calls, we need them to all block on the * log force before we clear the ili_fsync_fields field. This ensures * that we don't get a racing sync operation that does not wait for the * metadata to hit the journal before returning. If we race with * clearing the ili_fsync_fields, then all that will happen is the log * force will do nothing as the lsn will already be on disk. We can't * race with setting ili_fsync_fields because that is done under * XFS_ILOCK_EXCL, and that can't happen because we hold the lock shared * until after the ili_fsync_fields is cleared. */ xfs_ilock(ip, XFS_ILOCK_SHARED); if (xfs_ipincount(ip)) { if (!datasync || (ip->i_itemp->ili_fields & ~XFS_ILOG_TIMESTAMP)) (ip->i_itemp->ili_fsync_fields & ~XFS_ILOG_TIMESTAMP)) lsn = ip->i_itemp->ili_last_lsn; } xfs_iunlock(ip, XFS_ILOCK_SHARED); if (lsn) if (lsn) { error = _xfs_log_force_lsn(mp, lsn, XFS_LOG_SYNC, &log_flushed); ip->i_itemp->ili_fsync_fields = 0; } xfs_iunlock(ip, XFS_ILOCK_SHARED); /* * If we only have a single device, and the log force about was Loading fs/xfs/xfs_inode.c +2 −0 Original line number Diff line number Diff line Loading @@ -2365,6 +2365,7 @@ xfs_ifree_cluster( iip->ili_last_fields = iip->ili_fields; iip->ili_fields = 0; iip->ili_fsync_fields = 0; iip->ili_logged = 1; xfs_trans_ail_copy_lsn(mp->m_ail, &iip->ili_flush_lsn, &iip->ili_item.li_lsn); Loading Loading @@ -3560,6 +3561,7 @@ xfs_iflush_int( */ iip->ili_last_fields = iip->ili_fields; iip->ili_fields = 0; iip->ili_fsync_fields = 0; iip->ili_logged = 1; xfs_trans_ail_copy_lsn(mp->m_ail, &iip->ili_flush_lsn, Loading Loading
fs/xfs/libxfs/xfs_format.h +6 −2 Original line number Diff line number Diff line Loading @@ -1495,9 +1495,13 @@ struct xfs_acl { sizeof(struct xfs_acl_entry) \ : 25) #define XFS_ACL_MAX_SIZE(mp) \ #define XFS_ACL_SIZE(cnt) \ (sizeof(struct xfs_acl) + \ sizeof(struct xfs_acl_entry) * XFS_ACL_MAX_ENTRIES((mp))) sizeof(struct xfs_acl_entry) * cnt) #define XFS_ACL_MAX_SIZE(mp) \ XFS_ACL_SIZE(XFS_ACL_MAX_ENTRIES((mp))) /* On-disk XFS extended attribute names */ #define SGI_ACL_FILE "SGI_ACL_FILE" Loading
fs/xfs/xfs_acl.c +8 −5 Original line number Diff line number Diff line Loading @@ -37,16 +37,19 @@ STATIC struct posix_acl * xfs_acl_from_disk( struct xfs_acl *aclp, const struct xfs_acl *aclp, int len, int max_entries) { struct posix_acl_entry *acl_e; struct posix_acl *acl; struct xfs_acl_entry *ace; const struct xfs_acl_entry *ace; unsigned int count, i; if (len < sizeof(*aclp)) return ERR_PTR(-EFSCORRUPTED); count = be32_to_cpu(aclp->acl_cnt); if (count > max_entries) if (count > max_entries || XFS_ACL_SIZE(count) != len) return ERR_PTR(-EFSCORRUPTED); acl = posix_acl_alloc(count, GFP_KERNEL); Loading Loading @@ -163,7 +166,7 @@ xfs_get_acl(struct inode *inode, int type) goto out; } acl = xfs_acl_from_disk(xfs_acl, XFS_ACL_MAX_ENTRIES(ip->i_mount)); acl = xfs_acl_from_disk(xfs_acl, len, XFS_ACL_MAX_ENTRIES(ip->i_mount)); if (IS_ERR(acl)) goto out; Loading
fs/xfs/xfs_acl.h +3 −0 Original line number Diff line number Diff line Loading @@ -36,4 +36,7 @@ static inline struct posix_acl *xfs_get_acl(struct inode *inode, int type) # define posix_acl_access_exists(inode) 0 # define posix_acl_default_exists(inode) 0 #endif /* CONFIG_XFS_POSIX_ACL */ extern void xfs_forget_acl(struct inode *inode, const char *name, int xflags); #endif /* __XFS_ACL_H__ */
fs/xfs/xfs_file.c +16 −5 Original line number Diff line number Diff line Loading @@ -242,19 +242,30 @@ xfs_file_fsync( } /* * All metadata updates are logged, which means that we just have * to flush the log up to the latest LSN that touched the inode. * All metadata updates are logged, which means that we just have to * flush the log up to the latest LSN that touched the inode. If we have * concurrent fsync/fdatasync() calls, we need them to all block on the * log force before we clear the ili_fsync_fields field. This ensures * that we don't get a racing sync operation that does not wait for the * metadata to hit the journal before returning. If we race with * clearing the ili_fsync_fields, then all that will happen is the log * force will do nothing as the lsn will already be on disk. We can't * race with setting ili_fsync_fields because that is done under * XFS_ILOCK_EXCL, and that can't happen because we hold the lock shared * until after the ili_fsync_fields is cleared. */ xfs_ilock(ip, XFS_ILOCK_SHARED); if (xfs_ipincount(ip)) { if (!datasync || (ip->i_itemp->ili_fields & ~XFS_ILOG_TIMESTAMP)) (ip->i_itemp->ili_fsync_fields & ~XFS_ILOG_TIMESTAMP)) lsn = ip->i_itemp->ili_last_lsn; } xfs_iunlock(ip, XFS_ILOCK_SHARED); if (lsn) if (lsn) { error = _xfs_log_force_lsn(mp, lsn, XFS_LOG_SYNC, &log_flushed); ip->i_itemp->ili_fsync_fields = 0; } xfs_iunlock(ip, XFS_ILOCK_SHARED); /* * If we only have a single device, and the log force about was Loading
fs/xfs/xfs_inode.c +2 −0 Original line number Diff line number Diff line Loading @@ -2365,6 +2365,7 @@ xfs_ifree_cluster( iip->ili_last_fields = iip->ili_fields; iip->ili_fields = 0; iip->ili_fsync_fields = 0; iip->ili_logged = 1; xfs_trans_ail_copy_lsn(mp->m_ail, &iip->ili_flush_lsn, &iip->ili_item.li_lsn); Loading Loading @@ -3560,6 +3561,7 @@ xfs_iflush_int( */ iip->ili_last_fields = iip->ili_fields; iip->ili_fields = 0; iip->ili_fsync_fields = 0; iip->ili_logged = 1; xfs_trans_ail_copy_lsn(mp->m_ail, &iip->ili_flush_lsn, Loading