Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 9db61d6f authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge tag 'xfs-4.11-fixes-1' of git://git.kernel.org/pub/scm/fs/xfs/xfs-linux

Pull xfs fixes from Darrick Wong:
 "Here are some bug fixes for -rc2 to clean up the copy on write
  handling and to remove a cause of hangs.

   - Fix various iomap bugs

   - Fix overly aggressive CoW preallocation garbage collection

   - Fixes to CoW endio error handling

   - Fix some incorrect geometry calculations

   - Remove a potential system hang in bulkstat

   - Try to allocate blocks more aggressively to reduce ENOSPC errors"

* tag 'xfs-4.11-fixes-1' of git://git.kernel.org/pub/scm/fs/xfs/xfs-linux:
  xfs: try any AG when allocating the first btree block when reflinking
  xfs: use iomap new flag for newly allocated delalloc blocks
  xfs: remove kmem_zalloc_greedy
  xfs: Use xfs_icluster_size_fsb() to calculate inode alignment mask
  xfs: fix and streamline error handling in xfs_end_io
  xfs: only reclaim unwritten COW extents periodically
  iomap: invalidate page caches should be after iomap_dio_complete() in direct write
parents 794fe789 2fcc319d
Loading
Loading
Loading
Loading
+10 −7
Original line number Diff line number Diff line
@@ -846,7 +846,8 @@ iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
	struct address_space *mapping = iocb->ki_filp->f_mapping;
	struct inode *inode = file_inode(iocb->ki_filp);
	size_t count = iov_iter_count(iter);
	loff_t pos = iocb->ki_pos, end = iocb->ki_pos + count - 1, ret = 0;
	loff_t pos = iocb->ki_pos, start = pos;
	loff_t end = iocb->ki_pos + count - 1, ret = 0;
	unsigned int flags = IOMAP_DIRECT;
	struct blk_plug plug;
	struct iomap_dio *dio;
@@ -887,12 +888,12 @@ iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
	}

	if (mapping->nrpages) {
		ret = filemap_write_and_wait_range(mapping, iocb->ki_pos, end);
		ret = filemap_write_and_wait_range(mapping, start, end);
		if (ret)
			goto out_free_dio;

		ret = invalidate_inode_pages2_range(mapping,
				iocb->ki_pos >> PAGE_SHIFT, end >> PAGE_SHIFT);
				start >> PAGE_SHIFT, end >> PAGE_SHIFT);
		WARN_ON_ONCE(ret);
		ret = 0;
	}
@@ -941,6 +942,8 @@ iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
		__set_current_state(TASK_RUNNING);
	}

	ret = iomap_dio_complete(dio);

	/*
	 * Try again to invalidate clean pages which might have been cached by
	 * non-direct readahead, or faulted in by get_user_pages() if the source
@@ -949,12 +952,12 @@ iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
	 * this invalidation fails, tough, the write still worked...
	 */
	if (iov_iter_rw(iter) == WRITE && mapping->nrpages) {
		ret = invalidate_inode_pages2_range(mapping,
				iocb->ki_pos >> PAGE_SHIFT, end >> PAGE_SHIFT);
		WARN_ON_ONCE(ret);
		int err = invalidate_inode_pages2_range(mapping,
				start >> PAGE_SHIFT, end >> PAGE_SHIFT);
		WARN_ON_ONCE(err);
	}

	return iomap_dio_complete(dio);
	return ret;

out_free_dio:
	kfree(dio);
+0 −18
Original line number Diff line number Diff line
@@ -25,24 +25,6 @@
#include "kmem.h"
#include "xfs_message.h"

/*
 * Greedy allocation.  May fail and may return vmalloced memory.
 */
void *
kmem_zalloc_greedy(size_t *size, size_t minsize, size_t maxsize)
{
	void		*ptr;
	size_t		kmsize = maxsize;

	while (!(ptr = vzalloc(kmsize))) {
		if ((kmsize >>= 1) <= minsize)
			kmsize = minsize;
	}
	if (ptr)
		*size = kmsize;
	return ptr;
}

void *
kmem_alloc(size_t size, xfs_km_flags_t flags)
{
+0 −2
Original line number Diff line number Diff line
@@ -69,8 +69,6 @@ static inline void kmem_free(const void *ptr)
}


extern void *kmem_zalloc_greedy(size_t *, size_t, size_t);

static inline void *
kmem_zalloc(size_t size, xfs_km_flags_t flags)
{
+21 −13
Original line number Diff line number Diff line
@@ -763,8 +763,8 @@ xfs_bmap_extents_to_btree(
		args.type = XFS_ALLOCTYPE_START_BNO;
		args.fsbno = XFS_INO_TO_FSB(mp, ip->i_ino);
	} else if (dfops->dop_low) {
try_another_ag:
		args.type = XFS_ALLOCTYPE_START_BNO;
try_another_ag:
		args.fsbno = *firstblock;
	} else {
		args.type = XFS_ALLOCTYPE_NEAR_BNO;
@@ -790,13 +790,17 @@ xfs_bmap_extents_to_btree(
	if (xfs_sb_version_hasreflink(&cur->bc_mp->m_sb) &&
	    args.fsbno == NULLFSBLOCK &&
	    args.type == XFS_ALLOCTYPE_NEAR_BNO) {
		dfops->dop_low = true;
		args.type = XFS_ALLOCTYPE_FIRST_AG;
		goto try_another_ag;
	}
	if (WARN_ON_ONCE(args.fsbno == NULLFSBLOCK)) {
		xfs_iroot_realloc(ip, -1, whichfork);
		xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
		return -ENOSPC;
	}
	/*
	 * Allocation can't fail, the space was reserved.
	 */
	ASSERT(args.fsbno != NULLFSBLOCK);
	ASSERT(*firstblock == NULLFSBLOCK ||
	       args.agno >= XFS_FSB_TO_AGNO(mp, *firstblock));
	*firstblock = cur->bc_private.b.firstblock = args.fsbno;
@@ -4150,6 +4154,19 @@ xfs_bmapi_read(
	return 0;
}

/*
 * Add a delayed allocation extent to an inode. Blocks are reserved from the
 * global pool and the extent inserted into the inode in-core extent tree.
 *
 * On entry, got refers to the first extent beyond the offset of the extent to
 * allocate or eof is specified if no such extent exists. On return, got refers
 * to the extent record that was inserted to the inode fork.
 *
 * Note that the allocated extent may have been merged with contiguous extents
 * during insertion into the inode fork. Thus, got does not reflect the current
 * state of the inode fork on return. If necessary, the caller can use lastx to
 * look up the updated record in the inode fork.
 */
int
xfs_bmapi_reserve_delalloc(
	struct xfs_inode	*ip,
@@ -4236,13 +4253,8 @@ xfs_bmapi_reserve_delalloc(
	got->br_startblock = nullstartblock(indlen);
	got->br_blockcount = alen;
	got->br_state = XFS_EXT_NORM;
	xfs_bmap_add_extent_hole_delay(ip, whichfork, lastx, got);

	/*
	 * Update our extent pointer, given that xfs_bmap_add_extent_hole_delay
	 * might have merged it into one of the neighbouring ones.
	 */
	xfs_bmbt_get_all(xfs_iext_get_ext(ifp, *lastx), got);
	xfs_bmap_add_extent_hole_delay(ip, whichfork, lastx, got);

	/*
	 * Tag the inode if blocks were preallocated. Note that COW fork
@@ -4254,10 +4266,6 @@ xfs_bmapi_reserve_delalloc(
	if (whichfork == XFS_COW_FORK && (prealloc || aoff < off || alen > len))
		xfs_inode_set_cowblocks_tag(ip);

	ASSERT(got->br_startoff <= aoff);
	ASSERT(got->br_startoff + got->br_blockcount >= aoff + alen);
	ASSERT(isnullstartblock(got->br_startblock));
	ASSERT(got->br_state == XFS_EXT_NORM);
	return 0;

out_unreserve_blocks:
+3 −3
Original line number Diff line number Diff line
@@ -447,8 +447,8 @@ xfs_bmbt_alloc_block(

	if (args.fsbno == NULLFSBLOCK) {
		args.fsbno = be64_to_cpu(start->l);
try_another_ag:
		args.type = XFS_ALLOCTYPE_START_BNO;
try_another_ag:
		/*
		 * Make sure there is sufficient room left in the AG to
		 * complete a full tree split for an extent insert.  If
@@ -488,8 +488,8 @@ xfs_bmbt_alloc_block(
	if (xfs_sb_version_hasreflink(&cur->bc_mp->m_sb) &&
	    args.fsbno == NULLFSBLOCK &&
	    args.type == XFS_ALLOCTYPE_NEAR_BNO) {
		cur->bc_private.b.dfops->dop_low = true;
		args.fsbno = cur->bc_private.b.firstblock;
		args.type = XFS_ALLOCTYPE_FIRST_AG;
		goto try_another_ag;
	}

@@ -506,7 +506,7 @@ xfs_bmbt_alloc_block(
			goto error0;
		cur->bc_private.b.dfops->dop_low = true;
	}
	if (args.fsbno == NULLFSBLOCK) {
	if (WARN_ON_ONCE(args.fsbno == NULLFSBLOCK)) {
		XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
		*stat = 0;
		return 0;
Loading