Loading fs/xfs/xfs_aops.c +16 −7 Original line number Original line Diff line number Diff line Loading @@ -172,6 +172,12 @@ xfs_setfilesize_ioend( current_set_flags_nested(&tp->t_pflags, PF_FSTRANS); current_set_flags_nested(&tp->t_pflags, PF_FSTRANS); __sb_writers_acquired(VFS_I(ip)->i_sb, SB_FREEZE_FS); __sb_writers_acquired(VFS_I(ip)->i_sb, SB_FREEZE_FS); /* we abort the update if there was an IO error */ if (ioend->io_error) { xfs_trans_cancel(tp); return ioend->io_error; } return xfs_setfilesize(ip, tp, ioend->io_offset, ioend->io_size); return xfs_setfilesize(ip, tp, ioend->io_offset, ioend->io_size); } } Loading Loading @@ -212,14 +218,17 @@ xfs_end_io( ioend->io_error = -EIO; ioend->io_error = -EIO; goto done; goto done; } } if (ioend->io_error) goto done; /* /* * For unwritten extents we need to issue transactions to convert a * For unwritten extents we need to issue transactions to convert a * range to normal written extens after the data I/O has finished. * range to normal written extens after the data I/O has finished. * Detecting and handling completion IO errors is done individually * for each case as different cleanup operations need to be performed * on error. */ */ if (ioend->io_type == XFS_IO_UNWRITTEN) { if (ioend->io_type == XFS_IO_UNWRITTEN) { if (ioend->io_error) goto done; error = xfs_iomap_write_unwritten(ip, ioend->io_offset, error = xfs_iomap_write_unwritten(ip, ioend->io_offset, ioend->io_size); ioend->io_size); } else if (ioend->io_append_trans) { } else if (ioend->io_append_trans) { Loading Loading @@ -1399,12 +1408,12 @@ __xfs_get_blocks( imap.br_startblock == DELAYSTARTBLOCK))) { imap.br_startblock == DELAYSTARTBLOCK))) { if (direct || xfs_get_extsz_hint(ip)) { if (direct || xfs_get_extsz_hint(ip)) { /* /* * Drop the ilock in preparation for starting the block * xfs_iomap_write_direct() expects the shared lock. It * allocation transaction. It will be retaken * is unlocked on return. * exclusively inside xfs_iomap_write_direct for the * actual allocation. */ */ xfs_iunlock(ip, lockmode); if (lockmode == XFS_ILOCK_EXCL) xfs_ilock_demote(ip, lockmode); error = xfs_iomap_write_direct(ip, offset, size, error = xfs_iomap_write_direct(ip, offset, size, &imap, nimaps); &imap, nimaps); if (error) if (error) Loading fs/xfs/xfs_file.c +11 −6 Original line number Original line Diff line number Diff line Loading @@ -482,6 +482,8 @@ xfs_zero_eof( ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL)); ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL)); ASSERT(offset > isize); ASSERT(offset > isize); trace_xfs_zero_eof(ip, isize, offset - isize); /* /* * First handle zeroing the block on which isize resides. * First handle zeroing the block on which isize resides. * * Loading Loading @@ -574,6 +576,7 @@ xfs_file_aio_write_checks( struct xfs_inode *ip = XFS_I(inode); struct xfs_inode *ip = XFS_I(inode); ssize_t error = 0; ssize_t error = 0; size_t count = iov_iter_count(from); size_t count = iov_iter_count(from); bool drained_dio = false; restart: restart: error = generic_write_checks(iocb, from); error = generic_write_checks(iocb, from); Loading Loading @@ -611,12 +614,13 @@ xfs_file_aio_write_checks( bool zero = false; bool zero = false; spin_unlock(&ip->i_flags_lock); spin_unlock(&ip->i_flags_lock); if (!drained_dio) { if (*iolock == XFS_IOLOCK_SHARED) { if (*iolock == XFS_IOLOCK_SHARED) { xfs_rw_iunlock(ip, *iolock); xfs_rw_iunlock(ip, *iolock); *iolock = XFS_IOLOCK_EXCL; *iolock = XFS_IOLOCK_EXCL; xfs_rw_ilock(ip, *iolock); xfs_rw_ilock(ip, *iolock); iov_iter_reexpand(from, count); iov_iter_reexpand(from, count); } /* /* * We now have an IO submission barrier in place, but * We now have an IO submission barrier in place, but * AIO can do EOF updates during IO completion and hence * AIO can do EOF updates during IO completion and hence Loading @@ -626,6 +630,7 @@ xfs_file_aio_write_checks( * no-op. * no-op. */ */ inode_dio_wait(inode); inode_dio_wait(inode); drained_dio = true; goto restart; goto restart; } } error = xfs_zero_eof(ip, iocb->ki_pos, i_size_read(inode), &zero); error = xfs_zero_eof(ip, iocb->ki_pos, i_size_read(inode), &zero); Loading fs/xfs/xfs_iomap.c +26 −7 Original line number Original line Diff line number Diff line Loading @@ -131,20 +131,29 @@ xfs_iomap_write_direct( uint qblocks, resblks, resrtextents; uint qblocks, resblks, resrtextents; int committed; int committed; int error; int error; int lockmode; error = xfs_qm_dqattach(ip, 0); if (error) return error; rt = XFS_IS_REALTIME_INODE(ip); rt = XFS_IS_REALTIME_INODE(ip); extsz = xfs_get_extsz_hint(ip); extsz = xfs_get_extsz_hint(ip); lockmode = XFS_ILOCK_SHARED; /* locked by caller */ ASSERT(xfs_isilocked(ip, lockmode)); offset_fsb = XFS_B_TO_FSBT(mp, offset); offset_fsb = XFS_B_TO_FSBT(mp, offset); last_fsb = XFS_B_TO_FSB(mp, ((xfs_ufsize_t)(offset + count))); last_fsb = XFS_B_TO_FSB(mp, ((xfs_ufsize_t)(offset + count))); if ((offset + count) > XFS_ISIZE(ip)) { if ((offset + count) > XFS_ISIZE(ip)) { /* * Assert that the in-core extent list is present since this can * call xfs_iread_extents() and we only have the ilock shared. * This should be safe because the lock was held around a bmapi * call in the caller and we only need it to access the in-core * list. */ ASSERT(XFS_IFORK_PTR(ip, XFS_DATA_FORK)->if_flags & XFS_IFEXTENTS); error = xfs_iomap_eof_align_last_fsb(mp, ip, extsz, &last_fsb); error = xfs_iomap_eof_align_last_fsb(mp, ip, extsz, &last_fsb); if (error) if (error) return error; goto out_unlock; } else { } else { if (nmaps && (imap->br_startblock == HOLESTARTBLOCK)) if (nmaps && (imap->br_startblock == HOLESTARTBLOCK)) last_fsb = MIN(last_fsb, (xfs_fileoff_t) last_fsb = MIN(last_fsb, (xfs_fileoff_t) Loading Loading @@ -173,6 +182,15 @@ xfs_iomap_write_direct( quota_flag = XFS_QMOPT_RES_REGBLKS; quota_flag = XFS_QMOPT_RES_REGBLKS; } } /* * Drop the shared lock acquired by the caller, attach the dquot if * necessary and move on to transaction setup. */ xfs_iunlock(ip, lockmode); error = xfs_qm_dqattach(ip, 0); if (error) return error; /* /* * Allocate and setup the transaction * Allocate and setup the transaction */ */ Loading @@ -187,7 +205,8 @@ xfs_iomap_write_direct( return error; return error; } } xfs_ilock(ip, XFS_ILOCK_EXCL); lockmode = XFS_ILOCK_EXCL; xfs_ilock(ip, lockmode); error = xfs_trans_reserve_quota_nblks(tp, ip, qblocks, 0, quota_flag); error = xfs_trans_reserve_quota_nblks(tp, ip, qblocks, 0, quota_flag); if (error) if (error) Loading Loading @@ -229,7 +248,7 @@ xfs_iomap_write_direct( error = xfs_alert_fsblock_zero(ip, imap); error = xfs_alert_fsblock_zero(ip, imap); out_unlock: out_unlock: xfs_iunlock(ip, XFS_ILOCK_EXCL); xfs_iunlock(ip, lockmode); return error; return error; out_bmap_cancel: out_bmap_cancel: Loading fs/xfs/xfs_pnfs.c +5 −0 Original line number Original line Diff line number Diff line Loading @@ -181,6 +181,11 @@ xfs_fs_map_blocks( ASSERT(imap.br_startblock != DELAYSTARTBLOCK); ASSERT(imap.br_startblock != DELAYSTARTBLOCK); if (!nimaps || imap.br_startblock == HOLESTARTBLOCK) { if (!nimaps || imap.br_startblock == HOLESTARTBLOCK) { /* * xfs_iomap_write_direct() expects to take ownership of * the shared ilock. */ xfs_ilock(ip, XFS_ILOCK_SHARED); error = xfs_iomap_write_direct(ip, offset, length, error = xfs_iomap_write_direct(ip, offset, length, &imap, nimaps); &imap, nimaps); if (error) if (error) Loading fs/xfs/xfs_trace.h +1 −0 Original line number Original line Diff line number Diff line Loading @@ -1312,6 +1312,7 @@ DEFINE_SIMPLE_IO_EVENT(xfs_delalloc_enospc); DEFINE_SIMPLE_IO_EVENT(xfs_unwritten_convert); DEFINE_SIMPLE_IO_EVENT(xfs_unwritten_convert); DEFINE_SIMPLE_IO_EVENT(xfs_get_blocks_notfound); DEFINE_SIMPLE_IO_EVENT(xfs_get_blocks_notfound); DEFINE_SIMPLE_IO_EVENT(xfs_setfilesize); DEFINE_SIMPLE_IO_EVENT(xfs_setfilesize); DEFINE_SIMPLE_IO_EVENT(xfs_zero_eof); DECLARE_EVENT_CLASS(xfs_itrunc_class, DECLARE_EVENT_CLASS(xfs_itrunc_class, TP_PROTO(struct xfs_inode *ip, xfs_fsize_t new_size), TP_PROTO(struct xfs_inode *ip, xfs_fsize_t new_size), Loading Loading
fs/xfs/xfs_aops.c +16 −7 Original line number Original line Diff line number Diff line Loading @@ -172,6 +172,12 @@ xfs_setfilesize_ioend( current_set_flags_nested(&tp->t_pflags, PF_FSTRANS); current_set_flags_nested(&tp->t_pflags, PF_FSTRANS); __sb_writers_acquired(VFS_I(ip)->i_sb, SB_FREEZE_FS); __sb_writers_acquired(VFS_I(ip)->i_sb, SB_FREEZE_FS); /* we abort the update if there was an IO error */ if (ioend->io_error) { xfs_trans_cancel(tp); return ioend->io_error; } return xfs_setfilesize(ip, tp, ioend->io_offset, ioend->io_size); return xfs_setfilesize(ip, tp, ioend->io_offset, ioend->io_size); } } Loading Loading @@ -212,14 +218,17 @@ xfs_end_io( ioend->io_error = -EIO; ioend->io_error = -EIO; goto done; goto done; } } if (ioend->io_error) goto done; /* /* * For unwritten extents we need to issue transactions to convert a * For unwritten extents we need to issue transactions to convert a * range to normal written extens after the data I/O has finished. * range to normal written extens after the data I/O has finished. * Detecting and handling completion IO errors is done individually * for each case as different cleanup operations need to be performed * on error. */ */ if (ioend->io_type == XFS_IO_UNWRITTEN) { if (ioend->io_type == XFS_IO_UNWRITTEN) { if (ioend->io_error) goto done; error = xfs_iomap_write_unwritten(ip, ioend->io_offset, error = xfs_iomap_write_unwritten(ip, ioend->io_offset, ioend->io_size); ioend->io_size); } else if (ioend->io_append_trans) { } else if (ioend->io_append_trans) { Loading Loading @@ -1399,12 +1408,12 @@ __xfs_get_blocks( imap.br_startblock == DELAYSTARTBLOCK))) { imap.br_startblock == DELAYSTARTBLOCK))) { if (direct || xfs_get_extsz_hint(ip)) { if (direct || xfs_get_extsz_hint(ip)) { /* /* * Drop the ilock in preparation for starting the block * xfs_iomap_write_direct() expects the shared lock. It * allocation transaction. It will be retaken * is unlocked on return. * exclusively inside xfs_iomap_write_direct for the * actual allocation. */ */ xfs_iunlock(ip, lockmode); if (lockmode == XFS_ILOCK_EXCL) xfs_ilock_demote(ip, lockmode); error = xfs_iomap_write_direct(ip, offset, size, error = xfs_iomap_write_direct(ip, offset, size, &imap, nimaps); &imap, nimaps); if (error) if (error) Loading
fs/xfs/xfs_file.c +11 −6 Original line number Original line Diff line number Diff line Loading @@ -482,6 +482,8 @@ xfs_zero_eof( ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL)); ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL)); ASSERT(offset > isize); ASSERT(offset > isize); trace_xfs_zero_eof(ip, isize, offset - isize); /* /* * First handle zeroing the block on which isize resides. * First handle zeroing the block on which isize resides. * * Loading Loading @@ -574,6 +576,7 @@ xfs_file_aio_write_checks( struct xfs_inode *ip = XFS_I(inode); struct xfs_inode *ip = XFS_I(inode); ssize_t error = 0; ssize_t error = 0; size_t count = iov_iter_count(from); size_t count = iov_iter_count(from); bool drained_dio = false; restart: restart: error = generic_write_checks(iocb, from); error = generic_write_checks(iocb, from); Loading Loading @@ -611,12 +614,13 @@ xfs_file_aio_write_checks( bool zero = false; bool zero = false; spin_unlock(&ip->i_flags_lock); spin_unlock(&ip->i_flags_lock); if (!drained_dio) { if (*iolock == XFS_IOLOCK_SHARED) { if (*iolock == XFS_IOLOCK_SHARED) { xfs_rw_iunlock(ip, *iolock); xfs_rw_iunlock(ip, *iolock); *iolock = XFS_IOLOCK_EXCL; *iolock = XFS_IOLOCK_EXCL; xfs_rw_ilock(ip, *iolock); xfs_rw_ilock(ip, *iolock); iov_iter_reexpand(from, count); iov_iter_reexpand(from, count); } /* /* * We now have an IO submission barrier in place, but * We now have an IO submission barrier in place, but * AIO can do EOF updates during IO completion and hence * AIO can do EOF updates during IO completion and hence Loading @@ -626,6 +630,7 @@ xfs_file_aio_write_checks( * no-op. * no-op. */ */ inode_dio_wait(inode); inode_dio_wait(inode); drained_dio = true; goto restart; goto restart; } } error = xfs_zero_eof(ip, iocb->ki_pos, i_size_read(inode), &zero); error = xfs_zero_eof(ip, iocb->ki_pos, i_size_read(inode), &zero); Loading
fs/xfs/xfs_iomap.c +26 −7 Original line number Original line Diff line number Diff line Loading @@ -131,20 +131,29 @@ xfs_iomap_write_direct( uint qblocks, resblks, resrtextents; uint qblocks, resblks, resrtextents; int committed; int committed; int error; int error; int lockmode; error = xfs_qm_dqattach(ip, 0); if (error) return error; rt = XFS_IS_REALTIME_INODE(ip); rt = XFS_IS_REALTIME_INODE(ip); extsz = xfs_get_extsz_hint(ip); extsz = xfs_get_extsz_hint(ip); lockmode = XFS_ILOCK_SHARED; /* locked by caller */ ASSERT(xfs_isilocked(ip, lockmode)); offset_fsb = XFS_B_TO_FSBT(mp, offset); offset_fsb = XFS_B_TO_FSBT(mp, offset); last_fsb = XFS_B_TO_FSB(mp, ((xfs_ufsize_t)(offset + count))); last_fsb = XFS_B_TO_FSB(mp, ((xfs_ufsize_t)(offset + count))); if ((offset + count) > XFS_ISIZE(ip)) { if ((offset + count) > XFS_ISIZE(ip)) { /* * Assert that the in-core extent list is present since this can * call xfs_iread_extents() and we only have the ilock shared. * This should be safe because the lock was held around a bmapi * call in the caller and we only need it to access the in-core * list. */ ASSERT(XFS_IFORK_PTR(ip, XFS_DATA_FORK)->if_flags & XFS_IFEXTENTS); error = xfs_iomap_eof_align_last_fsb(mp, ip, extsz, &last_fsb); error = xfs_iomap_eof_align_last_fsb(mp, ip, extsz, &last_fsb); if (error) if (error) return error; goto out_unlock; } else { } else { if (nmaps && (imap->br_startblock == HOLESTARTBLOCK)) if (nmaps && (imap->br_startblock == HOLESTARTBLOCK)) last_fsb = MIN(last_fsb, (xfs_fileoff_t) last_fsb = MIN(last_fsb, (xfs_fileoff_t) Loading Loading @@ -173,6 +182,15 @@ xfs_iomap_write_direct( quota_flag = XFS_QMOPT_RES_REGBLKS; quota_flag = XFS_QMOPT_RES_REGBLKS; } } /* * Drop the shared lock acquired by the caller, attach the dquot if * necessary and move on to transaction setup. */ xfs_iunlock(ip, lockmode); error = xfs_qm_dqattach(ip, 0); if (error) return error; /* /* * Allocate and setup the transaction * Allocate and setup the transaction */ */ Loading @@ -187,7 +205,8 @@ xfs_iomap_write_direct( return error; return error; } } xfs_ilock(ip, XFS_ILOCK_EXCL); lockmode = XFS_ILOCK_EXCL; xfs_ilock(ip, lockmode); error = xfs_trans_reserve_quota_nblks(tp, ip, qblocks, 0, quota_flag); error = xfs_trans_reserve_quota_nblks(tp, ip, qblocks, 0, quota_flag); if (error) if (error) Loading Loading @@ -229,7 +248,7 @@ xfs_iomap_write_direct( error = xfs_alert_fsblock_zero(ip, imap); error = xfs_alert_fsblock_zero(ip, imap); out_unlock: out_unlock: xfs_iunlock(ip, XFS_ILOCK_EXCL); xfs_iunlock(ip, lockmode); return error; return error; out_bmap_cancel: out_bmap_cancel: Loading
fs/xfs/xfs_pnfs.c +5 −0 Original line number Original line Diff line number Diff line Loading @@ -181,6 +181,11 @@ xfs_fs_map_blocks( ASSERT(imap.br_startblock != DELAYSTARTBLOCK); ASSERT(imap.br_startblock != DELAYSTARTBLOCK); if (!nimaps || imap.br_startblock == HOLESTARTBLOCK) { if (!nimaps || imap.br_startblock == HOLESTARTBLOCK) { /* * xfs_iomap_write_direct() expects to take ownership of * the shared ilock. */ xfs_ilock(ip, XFS_ILOCK_SHARED); error = xfs_iomap_write_direct(ip, offset, length, error = xfs_iomap_write_direct(ip, offset, length, &imap, nimaps); &imap, nimaps); if (error) if (error) Loading
fs/xfs/xfs_trace.h +1 −0 Original line number Original line Diff line number Diff line Loading @@ -1312,6 +1312,7 @@ DEFINE_SIMPLE_IO_EVENT(xfs_delalloc_enospc); DEFINE_SIMPLE_IO_EVENT(xfs_unwritten_convert); DEFINE_SIMPLE_IO_EVENT(xfs_unwritten_convert); DEFINE_SIMPLE_IO_EVENT(xfs_get_blocks_notfound); DEFINE_SIMPLE_IO_EVENT(xfs_get_blocks_notfound); DEFINE_SIMPLE_IO_EVENT(xfs_setfilesize); DEFINE_SIMPLE_IO_EVENT(xfs_setfilesize); DEFINE_SIMPLE_IO_EVENT(xfs_zero_eof); DECLARE_EVENT_CLASS(xfs_itrunc_class, DECLARE_EVENT_CLASS(xfs_itrunc_class, TP_PROTO(struct xfs_inode *ip, xfs_fsize_t new_size), TP_PROTO(struct xfs_inode *ip, xfs_fsize_t new_size), Loading