Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 01a155e6 authored by Dave Chinner's avatar Dave Chinner Committed by Dave Chinner
Browse files

xfs: DAX does not use IO completion callbacks



For DAX, we are now doing block zeroing during allocation. This
means we no longer need a special DAX fault IO completion callback
to do unwritten extent conversion. Because mmap never extends the
file size (it SEGVs the process) we don't need a callback to update
the file size, either. Hence we can remove the completion callbacks
from the __dax_fault and __dax_mkwrite calls.

Signed-off-by: default avatarDave Chinner <dchinner@redhat.com>
Reviewed-by: default avatarBrian Foster <bfoster@redhat.com>
Signed-off-by: default avatarDave Chinner <david@fromorbit.com>
parent 1ca19157
Loading
Loading
Loading
Loading
+0 −39
Original line number Diff line number Diff line
@@ -1657,45 +1657,6 @@ xfs_end_io_direct_write(
	__xfs_end_io_direct_write(inode, ioend, offset, size);
}

/*
 * For DAX we need a mapping buffer callback for unwritten extent conversion
 * when page faults allocate blocks and then zero them. Note that in this
 * case the mapping indicated by the ioend may extend beyond EOF. We most
 * definitely do not want to extend EOF here, so we trim back the ioend size to
 * EOF.
 */
#ifdef CONFIG_FS_DAX
void
xfs_end_io_dax_write(
	struct buffer_head	*bh,
	int			uptodate)
{
	struct xfs_ioend	*ioend = bh->b_private;
	struct inode		*inode = ioend->io_inode;
	ssize_t			size = ioend->io_size;

	ASSERT(IS_DAX(ioend->io_inode));

	/* if there was an error zeroing, then don't convert it */
	if (!uptodate)
		ioend->io_error = -EIO;

	/*
	 * Trim update to EOF, so we don't extend EOF during unwritten extent
	 * conversion of partial EOF blocks.
	 */
	spin_lock(&XFS_I(inode)->i_flags_lock);
	if (ioend->io_offset + size > i_size_read(inode))
		size = i_size_read(inode) - ioend->io_offset;
	spin_unlock(&XFS_I(inode)->i_flags_lock);

	__xfs_end_io_direct_write(inode, ioend, ioend->io_offset, size);

}
#else
void xfs_end_io_dax_write(struct buffer_head *bh, int uptodate) { }
#endif

static inline ssize_t
xfs_vm_do_dio(
	struct inode		*inode,
+0 −1
Original line number Diff line number Diff line
@@ -60,7 +60,6 @@ int xfs_get_blocks_direct(struct inode *inode, sector_t offset,
			      struct buffer_head *map_bh, int create);
int	xfs_get_blocks_dax_fault(struct inode *inode, sector_t offset,
			         struct buffer_head *map_bh, int create);
void	xfs_end_io_dax_write(struct buffer_head *bh, int uptodate);

extern void xfs_count_page_state(struct page *, int *, int *);

+2 −3
Original line number Diff line number Diff line
@@ -1503,8 +1503,7 @@ xfs_filemap_page_mkwrite(
	xfs_ilock(XFS_I(inode), XFS_MMAPLOCK_SHARED);

	if (IS_DAX(inode)) {
		ret = __dax_mkwrite(vma, vmf, xfs_get_blocks_dax_fault,
				    xfs_end_io_dax_write);
		ret = __dax_mkwrite(vma, vmf, xfs_get_blocks_dax_fault, NULL);
	} else {
		ret = __block_page_mkwrite(vma, vmf, xfs_get_blocks);
		ret = block_page_mkwrite_return(ret);
@@ -1566,7 +1565,7 @@ xfs_filemap_pmd_fault(
	file_update_time(vma->vm_file);
	xfs_ilock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
	ret = __dax_pmd_fault(vma, addr, pmd, flags, xfs_get_blocks_dax_fault,
				    xfs_end_io_dax_write);
			      NULL);
	xfs_iunlock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
	sb_end_pagefault(inode->i_sb);