Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 2291dab2 authored by Dave Chinner's avatar Dave Chinner Committed by Dave Chinner
Browse files

xfs: Always flush caches when integrity is required



There is no reason anymore for not issuing device integrity
operations when teh filesystem requires ordering or data integrity
guarantees. We should always issue cache flushes and FUA writes
where necessary and let the underlying storage optimise them as
necessary for correct integrity operation.

Signed-Off-By: default avatarDave Chinner <dchinner@redhat.com>
Reviewed-by: default avatarChristoph Hellwig <hch@lst.de>
Signed-off-by: default avatarDave Chinner <david@fromorbit.com>
parent 2e1d2337
Loading
Loading
Loading
Loading
+1 −2
Original line number Diff line number Diff line
@@ -1711,7 +1711,6 @@ xfs_free_buftarg(
	percpu_counter_destroy(&btp->bt_io_count);
	list_lru_destroy(&btp->bt_lru);

	if (mp->m_flags & XFS_MOUNT_BARRIER)
	xfs_blkdev_issue_flush(btp);

	kmem_free(btp);
+12 −17
Original line number Diff line number Diff line
@@ -149,19 +149,16 @@ xfs_file_fsync(

	xfs_iflags_clear(ip, XFS_ITRUNCATED);

	if (mp->m_flags & XFS_MOUNT_BARRIER) {
	/*
		 * If we have an RT and/or log subvolume we need to make sure
		 * to flush the write cache the device used for file data
		 * first.  This is to ensure newly written file data make
		 * it to disk before logging the new inode size in case of
		 * an extending write.
	 * If we have an RT and/or log subvolume we need to make sure to flush
	 * the write cache the device used for file data first.  This is to
	 * ensure newly written file data make it to disk before logging the new
	 * inode size in case of an extending write.
	 */
	if (XFS_IS_REALTIME_INODE(ip))
		xfs_blkdev_issue_flush(mp->m_rtdev_targp);
	else if (mp->m_logdev_targp != mp->m_ddev_targp)
		xfs_blkdev_issue_flush(mp->m_ddev_targp);
	}

	/*
	 * All metadata updates are logged, which means that we just have to
@@ -196,10 +193,8 @@ xfs_file_fsync(
	 * an already allocated file and thus do not have any metadata to
	 * commit.
	 */
	if ((mp->m_flags & XFS_MOUNT_BARRIER) &&
	    mp->m_logdev_targp == mp->m_ddev_targp &&
	    !XFS_IS_REALTIME_INODE(ip) &&
	    !log_flushed)
	if (!log_flushed && !XFS_IS_REALTIME_INODE(ip) &&
	    mp->m_logdev_targp == mp->m_ddev_targp)
		xfs_blkdev_issue_flush(mp->m_ddev_targp);

	return error;
+16 −23
Original line number Diff line number Diff line
@@ -1862,26 +1862,21 @@ xlog_sync(

	bp->b_io_length = BTOBB(count);
	bp->b_fspriv = iclog;
	bp->b_flags &= ~(XBF_FUA | XBF_FLUSH);
	bp->b_flags |= (XBF_ASYNC | XBF_SYNCIO | XBF_WRITE);

	if (log->l_mp->m_flags & XFS_MOUNT_BARRIER) {
		bp->b_flags |= XBF_FUA;
	bp->b_flags &= ~XBF_FLUSH;
	bp->b_flags |= (XBF_ASYNC | XBF_SYNCIO | XBF_WRITE | XBF_FUA);

	/*
		 * Flush the data device before flushing the log to make
		 * sure all meta data written back from the AIL actually made
		 * it to disk before stamping the new log tail LSN into the
		 * log buffer.  For an external log we need to issue the
		 * flush explicitly, and unfortunately synchronously here;
		 * for an internal log we can simply use the block layer
		 * state machine for preflushes.
	 * Flush the data device before flushing the log to make sure all meta
	 * data written back from the AIL actually made it to disk before
	 * stamping the new log tail LSN into the log buffer.  For an external
	 * log we need to issue the flush explicitly, and unfortunately
	 * synchronously here; for an internal log we can simply use the block
	 * layer state machine for preflushes.
	 */
	if (log->l_mp->m_logdev_targp != log->l_mp->m_ddev_targp)
		xfs_blkdev_issue_flush(log->l_mp->m_ddev_targp);
	else
		bp->b_flags |= XBF_FLUSH;
	}

	ASSERT(XFS_BUF_ADDR(bp) <= log->l_logBBsize-1);
	ASSERT(XFS_BUF_ADDR(bp) + BTOBB(count) <= log->l_logBBsize);
@@ -1906,10 +1901,8 @@ xlog_sync(
		xfs_buf_associate_memory(bp,
				(char *)&iclog->ic_header + count, split);
		bp->b_fspriv = iclog;
		bp->b_flags &= ~(XBF_FUA | XBF_FLUSH);
		bp->b_flags |= (XBF_ASYNC | XBF_SYNCIO | XBF_WRITE);
		if (log->l_mp->m_flags & XFS_MOUNT_BARRIER)
			bp->b_flags |= XBF_FUA;
		bp->b_flags &= ~XBF_FLUSH;
		bp->b_flags |= (XBF_ASYNC | XBF_SYNCIO | XBF_WRITE | XBF_FUA);

		ASSERT(XFS_BUF_ADDR(bp) <= log->l_logBBsize-1);
		ASSERT(XFS_BUF_ADDR(bp) + BTOBB(count) <= log->l_logBBsize);