Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 541d7d3c authored by Lachlan McIlroy's avatar Lachlan McIlroy Committed by Lachlan McIlroy
Browse files

[XFS] kill unnessecary ioops indirection



Currently there is an indirection called ioops in the XFS data I/O path.
Various functions are called by functions pointers, but there is no
coherence in what this is for, and of course for XFS itself it's entirely
unused. This patch removes it instead and significantly reduces source and
binary size of XFS while making maintaince easier.

SGI-PV: 970841
SGI-Modid: xfs-linux-melb:xfs-kern:29737a

Signed-off-by: default avatarLachlan McIlroy <lachlan@sgi.com>
Signed-off-by: default avatarChristoph Hellwig <hch@infradead.org>
Signed-off-by: default avatarTim Shimmin <tes@sgi.com>
parent 21a62542
Loading
Loading
Loading
Loading
+2 −2
Original line number Original line Diff line number Diff line
@@ -317,7 +317,7 @@ xfs_map_blocks(
	xfs_inode_t		*ip = XFS_I(inode);
	xfs_inode_t		*ip = XFS_I(inode);
	int			error, nmaps = 1;
	int			error, nmaps = 1;


	error = xfs_bmap(ip, offset, count,
	error = xfs_iomap(ip, offset, count,
				flags, mapp, &nmaps);
				flags, mapp, &nmaps);
	if (!error && (flags & (BMAPI_WRITE|BMAPI_ALLOCATE)))
	if (!error && (flags & (BMAPI_WRITE|BMAPI_ALLOCATE)))
		xfs_iflags_set(ip, XFS_IMODIFIED);
		xfs_iflags_set(ip, XFS_IMODIFIED);
@@ -1336,7 +1336,7 @@ __xfs_get_blocks(
	offset = (xfs_off_t)iblock << inode->i_blkbits;
	offset = (xfs_off_t)iblock << inode->i_blkbits;
	ASSERT(bh_result->b_size >= (1 << inode->i_blkbits));
	ASSERT(bh_result->b_size >= (1 << inode->i_blkbits));
	size = bh_result->b_size;
	size = bh_result->b_size;
	error = xfs_bmap(XFS_I(inode), offset, size,
	error = xfs_iomap(XFS_I(inode), offset, size,
			     create ? flags : BMAPI_READ, &iomap, &niomap);
			     create ? flags : BMAPI_READ, &iomap, &niomap);
	if (error)
	if (error)
		return -error;
		return -error;
+17 −39
Original line number Original line Diff line number Diff line
@@ -131,7 +131,7 @@ xfs_inval_cached_trace(
 */
 */
STATIC int
STATIC int
xfs_iozero(
xfs_iozero(
	struct inode		*ip,	/* inode			*/
	struct xfs_inode	*ip,	/* inode			*/
	loff_t			pos,	/* offset in file		*/
	loff_t			pos,	/* offset in file		*/
	size_t			count)	/* size of data to zero		*/
	size_t			count)	/* size of data to zero		*/
{
{
@@ -139,7 +139,7 @@ xfs_iozero(
	struct address_space	*mapping;
	struct address_space	*mapping;
	int			status;
	int			status;


	mapping = ip->i_mapping;
	mapping = ip->i_vnode->i_mapping;
	do {
	do {
		unsigned offset, bytes;
		unsigned offset, bytes;
		void *fsdata;
		void *fsdata;
@@ -389,20 +389,19 @@ xfs_splice_write(
 */
 */
STATIC int				/* error (positive) */
STATIC int				/* error (positive) */
xfs_zero_last_block(
xfs_zero_last_block(
	struct inode	*ip,
	xfs_inode_t	*ip,
	xfs_iocore_t	*io,
	xfs_fsize_t	offset,
	xfs_fsize_t	offset,
	xfs_fsize_t	isize)
	xfs_fsize_t	isize)
{
{
	xfs_fileoff_t	last_fsb;
	xfs_fileoff_t	last_fsb;
	xfs_mount_t	*mp = io->io_mount;
	xfs_mount_t	*mp = ip->i_mount;
	int		nimaps;
	int		nimaps;
	int		zero_offset;
	int		zero_offset;
	int		zero_len;
	int		zero_len;
	int		error = 0;
	int		error = 0;
	xfs_bmbt_irec_t	imap;
	xfs_bmbt_irec_t	imap;


	ASSERT(ismrlocked(io->io_lock, MR_UPDATE) != 0);
	ASSERT(ismrlocked(&ip->i_lock, MR_UPDATE) != 0);


	zero_offset = XFS_B_FSB_OFFSET(mp, isize);
	zero_offset = XFS_B_FSB_OFFSET(mp, isize);
	if (zero_offset == 0) {
	if (zero_offset == 0) {
@@ -415,7 +414,7 @@ xfs_zero_last_block(


	last_fsb = XFS_B_TO_FSBT(mp, isize);
	last_fsb = XFS_B_TO_FSBT(mp, isize);
	nimaps = 1;
	nimaps = 1;
	error = XFS_BMAPI(mp, NULL, io, last_fsb, 1, 0, NULL, 0, &imap,
	error = xfs_bmapi(NULL, ip, last_fsb, 1, 0, NULL, 0, &imap,
			  &nimaps, NULL, NULL);
			  &nimaps, NULL, NULL);
	if (error) {
	if (error) {
		return error;
		return error;
@@ -433,14 +432,14 @@ xfs_zero_last_block(
	 * out sync.  We need to drop the ilock while we do this so we
	 * out sync.  We need to drop the ilock while we do this so we
	 * don't deadlock when the buffer cache calls back to us.
	 * don't deadlock when the buffer cache calls back to us.
	 */
	 */
	XFS_IUNLOCK(mp, io, XFS_ILOCK_EXCL| XFS_EXTSIZE_RD);
	xfs_iunlock(ip, XFS_ILOCK_EXCL| XFS_EXTSIZE_RD);


	zero_len = mp->m_sb.sb_blocksize - zero_offset;
	zero_len = mp->m_sb.sb_blocksize - zero_offset;
	if (isize + zero_len > offset)
	if (isize + zero_len > offset)
		zero_len = offset - isize;
		zero_len = offset - isize;
	error = xfs_iozero(ip, isize, zero_len);
	error = xfs_iozero(ip, isize, zero_len);


	XFS_ILOCK(mp, io, XFS_ILOCK_EXCL|XFS_EXTSIZE_RD);
	xfs_ilock(ip, XFS_ILOCK_EXCL|XFS_EXTSIZE_RD);
	ASSERT(error >= 0);
	ASSERT(error >= 0);
	return error;
	return error;
}
}
@@ -458,12 +457,11 @@ xfs_zero_last_block(


int					/* error (positive) */
int					/* error (positive) */
xfs_zero_eof(
xfs_zero_eof(
	bhv_vnode_t	*vp,
	xfs_inode_t	*ip,
	xfs_iocore_t	*io,
	xfs_off_t	offset,		/* starting I/O offset */
	xfs_off_t	offset,		/* starting I/O offset */
	xfs_fsize_t	isize)		/* current inode size */
	xfs_fsize_t	isize)		/* current inode size */
{
{
	struct inode	*ip = vn_to_inode(vp);
	xfs_iocore_t	*io = &ip->i_iocore;
	xfs_fileoff_t	start_zero_fsb;
	xfs_fileoff_t	start_zero_fsb;
	xfs_fileoff_t	end_zero_fsb;
	xfs_fileoff_t	end_zero_fsb;
	xfs_fileoff_t	zero_count_fsb;
	xfs_fileoff_t	zero_count_fsb;
@@ -483,7 +481,7 @@ xfs_zero_eof(
	 * First handle zeroing the block on which isize resides.
	 * First handle zeroing the block on which isize resides.
	 * We only zero a part of that block so it is handled specially.
	 * We only zero a part of that block so it is handled specially.
	 */
	 */
	error = xfs_zero_last_block(ip, io, offset, isize);
	error = xfs_zero_last_block(ip, offset, isize);
	if (error) {
	if (error) {
		ASSERT(ismrlocked(io->io_lock, MR_UPDATE));
		ASSERT(ismrlocked(io->io_lock, MR_UPDATE));
		ASSERT(ismrlocked(io->io_iolock, MR_UPDATE));
		ASSERT(ismrlocked(io->io_iolock, MR_UPDATE));
@@ -514,7 +512,7 @@ xfs_zero_eof(
	while (start_zero_fsb <= end_zero_fsb) {
	while (start_zero_fsb <= end_zero_fsb) {
		nimaps = 1;
		nimaps = 1;
		zero_count_fsb = end_zero_fsb - start_zero_fsb + 1;
		zero_count_fsb = end_zero_fsb - start_zero_fsb + 1;
		error = XFS_BMAPI(mp, NULL, io, start_zero_fsb, zero_count_fsb,
		error = xfs_bmapi(NULL, ip, start_zero_fsb, zero_count_fsb,
				  0, NULL, 0, &imap, &nimaps, NULL, NULL);
				  0, NULL, 0, &imap, &nimaps, NULL, NULL);
		if (error) {
		if (error) {
			ASSERT(ismrlocked(io->io_lock, MR_UPDATE));
			ASSERT(ismrlocked(io->io_lock, MR_UPDATE));
@@ -542,7 +540,7 @@ xfs_zero_eof(
		 * Drop the inode lock while we're doing the I/O.
		 * Drop the inode lock while we're doing the I/O.
		 * We'll still have the iolock to protect us.
		 * We'll still have the iolock to protect us.
		 */
		 */
		XFS_IUNLOCK(mp, io, XFS_ILOCK_EXCL|XFS_EXTSIZE_RD);
		xfs_iunlock(ip, XFS_ILOCK_EXCL|XFS_EXTSIZE_RD);


		zero_off = XFS_FSB_TO_B(mp, start_zero_fsb);
		zero_off = XFS_FSB_TO_B(mp, start_zero_fsb);
		zero_len = XFS_FSB_TO_B(mp, imap.br_blockcount);
		zero_len = XFS_FSB_TO_B(mp, imap.br_blockcount);
@@ -558,14 +556,13 @@ xfs_zero_eof(
		start_zero_fsb = imap.br_startoff + imap.br_blockcount;
		start_zero_fsb = imap.br_startoff + imap.br_blockcount;
		ASSERT(start_zero_fsb <= (end_zero_fsb + 1));
		ASSERT(start_zero_fsb <= (end_zero_fsb + 1));


		XFS_ILOCK(mp, io, XFS_ILOCK_EXCL|XFS_EXTSIZE_RD);
		xfs_ilock(ip, XFS_ILOCK_EXCL|XFS_EXTSIZE_RD);
	}
	}


	return 0;
	return 0;


out_lock:
out_lock:

	xfs_ilock(ip, XFS_ILOCK_EXCL|XFS_EXTSIZE_RD);
	XFS_ILOCK(mp, io, XFS_ILOCK_EXCL|XFS_EXTSIZE_RD);
	ASSERT(error >= 0);
	ASSERT(error >= 0);
	return error;
	return error;
}
}
@@ -706,7 +703,7 @@ xfs_write(
	 */
	 */


	if (pos > xip->i_size) {
	if (pos > xip->i_size) {
		error = xfs_zero_eof(vp, io, pos, xip->i_size);
		error = xfs_zero_eof(xip, pos, xip->i_size);
		if (error) {
		if (error) {
			xfs_iunlock(xip, XFS_ILOCK_EXCL);
			xfs_iunlock(xip, XFS_ILOCK_EXCL);
			goto out_unlock_internal;
			goto out_unlock_internal;
@@ -751,7 +748,7 @@ xfs_write(


		if (need_i_mutex) {
		if (need_i_mutex) {
			/* demote the lock now the cached pages are gone */
			/* demote the lock now the cached pages are gone */
			XFS_ILOCK_DEMOTE(mp, io, XFS_IOLOCK_EXCL);
			xfs_ilock_demote(xip, XFS_IOLOCK_EXCL);
			mutex_unlock(&inode->i_mutex);
			mutex_unlock(&inode->i_mutex);


			iolock = XFS_IOLOCK_SHARED;
			iolock = XFS_IOLOCK_SHARED;
@@ -894,25 +891,6 @@ xfs_bdstrat_cb(struct xfs_buf *bp)
	}
	}
}
}



int
xfs_bmap(
	xfs_inode_t	*ip,
	xfs_off_t	offset,
	ssize_t		count,
	int		flags,
	xfs_iomap_t	*iomapp,
	int		*niomaps)
{
	xfs_iocore_t	*io = &ip->i_iocore;

	ASSERT((ip->i_d.di_mode & S_IFMT) == S_IFREG);
	ASSERT(((ip->i_d.di_flags & XFS_DIFLAG_REALTIME) != 0) ==
	       ((ip->i_iocore.io_flags & XFS_IOCORE_RT) != 0));

	return xfs_iomap(io, offset, count, flags, iomapp, niomaps);
}

/*
/*
 * Wrapper around bdstrat so that we can stop data
 * Wrapper around bdstrat so that we can stop data
 * from going to disk in case we are shutting down the filesystem.
 * from going to disk in case we are shutting down the filesystem.
+1 −2
Original line number Original line Diff line number Diff line
@@ -73,7 +73,6 @@ extern int xfsbdstrat(struct xfs_mount *, struct xfs_buf *);
extern int xfs_bdstrat_cb(struct xfs_buf *);
extern int xfs_bdstrat_cb(struct xfs_buf *);
extern int xfs_dev_is_read_only(struct xfs_mount *, char *);
extern int xfs_dev_is_read_only(struct xfs_mount *, char *);


extern int xfs_zero_eof(struct inode *, struct xfs_iocore *, xfs_off_t,
extern int xfs_zero_eof(struct xfs_inode *, xfs_off_t, xfs_fsize_t);
				xfs_fsize_t);


#endif	/* __XFS_LRW_H__ */
#endif	/* __XFS_LRW_H__ */
+1 −1
Original line number Original line Diff line number Diff line
@@ -111,7 +111,7 @@ xfs_swapext(
		goto error0;
		goto error0;
	}
	}


	error = XFS_SWAP_EXTENTS(mp, &ip->i_iocore, &tip->i_iocore, sxp);
	error = xfs_swap_extents(ip, tip, sxp);


 error0:
 error0:
	if (fp != NULL)
	if (fp != NULL)
+2 −6
Original line number Original line Diff line number Diff line
@@ -1711,7 +1711,7 @@ xfs_itruncate_finish(
		 * runs.
		 * runs.
		 */
		 */
		XFS_BMAP_INIT(&free_list, &first_block);
		XFS_BMAP_INIT(&free_list, &first_block);
		error = XFS_BUNMAPI(mp, ntp, &ip->i_iocore,
		error = xfs_bunmapi(ntp, ip,
				    first_unmap_block, unmap_len,
				    first_unmap_block, unmap_len,
				    XFS_BMAPI_AFLAG(fork) |
				    XFS_BMAPI_AFLAG(fork) |
				      (sync ? 0 : XFS_BMAPI_ASYNC),
				      (sync ? 0 : XFS_BMAPI_ASYNC),
@@ -1844,8 +1844,6 @@ xfs_igrow_start(
	xfs_fsize_t	new_size,
	xfs_fsize_t	new_size,
	cred_t		*credp)
	cred_t		*credp)
{
{
	int		error;

	ASSERT(ismrlocked(&(ip->i_lock), MR_UPDATE) != 0);
	ASSERT(ismrlocked(&(ip->i_lock), MR_UPDATE) != 0);
	ASSERT(ismrlocked(&(ip->i_iolock), MR_UPDATE) != 0);
	ASSERT(ismrlocked(&(ip->i_iolock), MR_UPDATE) != 0);
	ASSERT(new_size > ip->i_size);
	ASSERT(new_size > ip->i_size);
@@ -1855,9 +1853,7 @@ xfs_igrow_start(
	 * xfs_write_file() beyond the end of the file
	 * xfs_write_file() beyond the end of the file
	 * and any blocks between the old and new file sizes.
	 * and any blocks between the old and new file sizes.
	 */
	 */
	error = xfs_zero_eof(XFS_ITOV(ip), &ip->i_iocore, new_size,
	return xfs_zero_eof(ip, new_size, ip->i_size);
			     ip->i_size);
	return error;
}
}


/*
/*
Loading