Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 6321e3ed authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Felix Blyakher
Browse files

xfs: fix getbmap vs mmap deadlock



xfs_getbmap (or rather the formatters called by it) copy out the getbmap
structures under the ilock, which can deadlock against mmap.  This has
been reported via bugzilla a while ago (#717) and has recently also
shown up via lockdep.

So allocate a temporary buffer to format the kernel getbmap structures
into and then copy them out after dropping the locks.

A little problem with this is that we limit the number of extents we
can copy out by the maximum allocation size, but I see no real way
around that.

Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarEric Sandeen <sandeen@sandeen.net>
Reviewed-by: default avatarFelix Blyakher <felixb@sgi.com>
Signed-off-by: default avatarFelix Blyakher <felixb@sgi.com>
parent 4be4a00f
Loading
Loading
Loading
Loading
+35 −17
Original line number Original line Diff line number Diff line
@@ -5890,12 +5890,13 @@ xfs_getbmap(
	int			nexleft;	/* # of user extents left */
	int			nexleft;	/* # of user extents left */
	int			subnex;		/* # of bmapi's can do */
	int			subnex;		/* # of bmapi's can do */
	int			nmap;		/* number of map entries */
	int			nmap;		/* number of map entries */
	struct getbmapx		out;		/* output structure */
	struct getbmapx		*out;		/* output structure */
	int			whichfork;	/* data or attr fork */
	int			whichfork;	/* data or attr fork */
	int			prealloced;	/* this is a file with
	int			prealloced;	/* this is a file with
						 * preallocated data space */
						 * preallocated data space */
	int			iflags;		/* interface flags */
	int			iflags;		/* interface flags */
	int			bmapi_flags;	/* flags for xfs_bmapi */
	int			bmapi_flags;	/* flags for xfs_bmapi */
	int			cur_ext = 0;


	mp = ip->i_mount;
	mp = ip->i_mount;
	iflags = bmv->bmv_iflags;
	iflags = bmv->bmv_iflags;
@@ -5971,6 +5972,13 @@ xfs_getbmap(
		return XFS_ERROR(EINVAL);
		return XFS_ERROR(EINVAL);
	bmvend = bmv->bmv_offset + bmv->bmv_length;
	bmvend = bmv->bmv_offset + bmv->bmv_length;



	if (bmv->bmv_count > ULONG_MAX / sizeof(struct getbmapx))
		return XFS_ERROR(ENOMEM);
	out = kmem_zalloc(bmv->bmv_count * sizeof(struct getbmapx), KM_MAYFAIL);
	if (!out)
		return XFS_ERROR(ENOMEM);

	xfs_ilock(ip, XFS_IOLOCK_SHARED);
	xfs_ilock(ip, XFS_IOLOCK_SHARED);
	if (whichfork == XFS_DATA_FORK && !(iflags & BMV_IF_DELALLOC)) {
	if (whichfork == XFS_DATA_FORK && !(iflags & BMV_IF_DELALLOC)) {
		if (ip->i_delayed_blks || ip->i_size > ip->i_d.di_size) {
		if (ip->i_delayed_blks || ip->i_size > ip->i_d.di_size) {
@@ -6025,39 +6033,39 @@ xfs_getbmap(
		ASSERT(nmap <= subnex);
		ASSERT(nmap <= subnex);


		for (i = 0; i < nmap && nexleft && bmv->bmv_length; i++) {
		for (i = 0; i < nmap && nexleft && bmv->bmv_length; i++) {
			int full = 0;	/* user array is full */
			out[cur_ext].bmv_oflags = 0;

			out.bmv_oflags = 0;
			if (map[i].br_state == XFS_EXT_UNWRITTEN)
			if (map[i].br_state == XFS_EXT_UNWRITTEN)
				out.bmv_oflags |= BMV_OF_PREALLOC;
				out[cur_ext].bmv_oflags |= BMV_OF_PREALLOC;
			else if (map[i].br_startblock == DELAYSTARTBLOCK)
			else if (map[i].br_startblock == DELAYSTARTBLOCK)
				out.bmv_oflags |= BMV_OF_DELALLOC;
				out[cur_ext].bmv_oflags |= BMV_OF_DELALLOC;
			out.bmv_offset = XFS_FSB_TO_BB(mp, map[i].br_startoff);
			out[cur_ext].bmv_offset =
			out.bmv_length = XFS_FSB_TO_BB(mp, map[i].br_blockcount);
				XFS_FSB_TO_BB(mp, map[i].br_startoff);
			out.bmv_unused1 = out.bmv_unused2 = 0;
			out[cur_ext].bmv_length =
				XFS_FSB_TO_BB(mp, map[i].br_blockcount);
			out[cur_ext].bmv_unused1 = 0;
			out[cur_ext].bmv_unused2 = 0;
			ASSERT(((iflags & BMV_IF_DELALLOC) != 0) ||
			ASSERT(((iflags & BMV_IF_DELALLOC) != 0) ||
			      (map[i].br_startblock != DELAYSTARTBLOCK));
			      (map[i].br_startblock != DELAYSTARTBLOCK));
                        if (map[i].br_startblock == HOLESTARTBLOCK &&
                        if (map[i].br_startblock == HOLESTARTBLOCK &&
			    whichfork == XFS_ATTR_FORK) {
			    whichfork == XFS_ATTR_FORK) {
				/* came to the end of attribute fork */
				/* came to the end of attribute fork */
				out.bmv_oflags |= BMV_OF_LAST;
				out[cur_ext].bmv_oflags |= BMV_OF_LAST;
				goto out_free_map;
				goto out_free_map;
			}
			}


			if (!xfs_getbmapx_fix_eof_hole(ip, &out, prealloced,
			if (!xfs_getbmapx_fix_eof_hole(ip, &out[cur_ext],
					bmvend, map[i].br_startblock))
					prealloced, bmvend,
					map[i].br_startblock))
				goto out_free_map;
				goto out_free_map;


			/* format results & advance arg */
			error = formatter(&arg, &out, &full);
			if (error || full)
				goto out_free_map;
			nexleft--;
			nexleft--;
			bmv->bmv_offset =
			bmv->bmv_offset =
				out.bmv_offset + out.bmv_length;
				out[cur_ext].bmv_offset +
				out[cur_ext].bmv_length;
			bmv->bmv_length =
			bmv->bmv_length =
				max_t(__int64_t, 0, bmvend - bmv->bmv_offset);
				max_t(__int64_t, 0, bmvend - bmv->bmv_offset);
			bmv->bmv_entries++;
			bmv->bmv_entries++;
			cur_ext++;
		}
		}
	} while (nmap && nexleft && bmv->bmv_length);
	} while (nmap && nexleft && bmv->bmv_length);


@@ -6067,6 +6075,16 @@ xfs_getbmap(
	xfs_iunlock_map_shared(ip, lock);
	xfs_iunlock_map_shared(ip, lock);
 out_unlock_iolock:
 out_unlock_iolock:
	xfs_iunlock(ip, XFS_IOLOCK_SHARED);
	xfs_iunlock(ip, XFS_IOLOCK_SHARED);

	for (i = 0; i < cur_ext; i++) {
		int full = 0;	/* user array is full */

		/* format results & advance arg */
		error = formatter(&arg, &out[i], &full);
		if (error || full)
			break;
	}

	return error;
	return error;
}
}