Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit aa182e64 authored by Dave Chinner's avatar Dave Chinner Committed by Dave Chinner
Browse files

Revert "xfs: block allocation work needs to be kswapd aware"



This reverts commit 1f6d6482.

This commit resulted in regressions in performance in low
memory situations where kswapd was doing writeback of delayed
allocation blocks. It resulted in significant parallelism of the
kswapd work and with the special kswapd flags meant that hundreds of
active allocation could dip into kswapd specific memory reserves and
avoid being throttled. This cause a large amount of performance
variation, as well as random OOM-killer invocations that didn't
previously exist.

Signed-off-by: default avatarDave Chinner <dchinner@redhat.com>
Reviewed-by: default avatarBrian Foster <bfoster@redhat.com>
Signed-off-by: default avatarDave Chinner <david@fromorbit.com>
parent a497c3ba
Loading
Loading
Loading
Loading
+3 −13
Original line number Original line Diff line number Diff line
@@ -258,23 +258,14 @@ xfs_bmapi_allocate_worker(
	struct xfs_bmalloca	*args = container_of(work,
	struct xfs_bmalloca	*args = container_of(work,
						struct xfs_bmalloca, work);
						struct xfs_bmalloca, work);
	unsigned long		pflags;
	unsigned long		pflags;
	unsigned long		new_pflags = PF_FSTRANS;


	/*
	/* we are in a transaction context here */
	 * we are in a transaction context here, but may also be doing work
	current_set_flags_nested(&pflags, PF_FSTRANS);
	 * in kswapd context, and hence we may need to inherit that state
	 * temporarily to ensure that we don't block waiting for memory reclaim
	 * in any way.
	 */
	if (args->kswapd)
		new_pflags |= PF_MEMALLOC | PF_SWAPWRITE | PF_KSWAPD;

	current_set_flags_nested(&pflags, new_pflags);


	args->result = __xfs_bmapi_allocate(args);
	args->result = __xfs_bmapi_allocate(args);
	complete(args->done);
	complete(args->done);


	current_restore_flags_nested(&pflags, new_pflags);
	current_restore_flags_nested(&pflags, PF_FSTRANS);
}
}


/*
/*
@@ -293,7 +284,6 @@ xfs_bmapi_allocate(




	args->done = &done;
	args->done = &done;
	args->kswapd = current_is_kswapd();
	INIT_WORK_ONSTACK(&args->work, xfs_bmapi_allocate_worker);
	INIT_WORK_ONSTACK(&args->work, xfs_bmapi_allocate_worker);
	queue_work(xfs_alloc_wq, &args->work);
	queue_work(xfs_alloc_wq, &args->work);
	wait_for_completion(&done);
	wait_for_completion(&done);
+6 −7
Original line number Original line Diff line number Diff line
@@ -50,13 +50,12 @@ struct xfs_bmalloca {
	xfs_extlen_t		total;	/* total blocks needed for xaction */
	xfs_extlen_t		total;	/* total blocks needed for xaction */
	xfs_extlen_t		minlen;	/* minimum allocation size (blocks) */
	xfs_extlen_t		minlen;	/* minimum allocation size (blocks) */
	xfs_extlen_t		minleft; /* amount must be left after alloc */
	xfs_extlen_t		minleft; /* amount must be left after alloc */
	bool			eof;	/* set if allocating past last extent */
	char			eof;	/* set if allocating past last extent */
	bool			wasdel;	/* replacing a delayed allocation */
	char			wasdel;	/* replacing a delayed allocation */
	bool			userdata;/* set if is user data */
	char			userdata;/* set if is user data */
	bool			aeof;	/* allocated space at eof */
	char			aeof;	/* allocated space at eof */
	bool			conv;	/* overwriting unwritten extents */
	char			conv;	/* overwriting unwritten extents */
	bool			stack_switch;
	char			stack_switch;
	bool			kswapd;	/* allocation in kswapd context */
	int			flags;
	int			flags;
	struct completion	*done;
	struct completion	*done;
	struct work_struct	work;
	struct work_struct	work;