Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 585e6d88 authored by David Chinner's avatar David Chinner Committed by Tim Shimmin
Browse files

[XFS] Fix a synchronous buftarg flush deadlock when freezing.



At the last stage of a freeze, we flush the buftarg synchronously over and
over again until it succeeds twice without skipping any buffers.

The delwri list flush skips pinned buffers, but tries to flush all others.
It removes the buffers from the delwri list, then tries to lock them one
at a time as it traverses the list to issue the I/O. It holds them locked
until we issue all of the I/O and then unlocks them once we've waited for
it to complete.

The problem is that during a freeze, the filesystem may still be doing
stuff - like flushing delalloc data buffers - in the background and hence
we can be trying to lock buffers that were on the delwri list at the same
time. Hence we can get ABBA deadlocks between threads doing allocation and
the buftarg flush (freeze) thread.

Fix it by skipping locked (and pinned) buffers as we traverse the delwri
buffer list.

SGI-PV: 957195
SGI-Modid: xfs-linux-melb:xfs-kern:27535a

Signed-off-by: default avatarDavid Chinner <dgc@sgi.com>
Signed-off-by: default avatarTim Shimmin <tes@sgi.com>
parent dac61f52
Loading
Loading
Loading
Loading
+60 −57
Original line number Diff line number Diff line
@@ -1679,44 +1679,31 @@ xfsbufd_wakeup(
	return 0;
}

/*
 * Move as many buffers as specified to the supplied list
 * idicating if we skipped any buffers to prevent deadlocks.
 */
STATIC int
xfsbufd(
	void			*data)
xfs_buf_delwri_split(
	xfs_buftarg_t	*target,
	struct list_head *list,
	unsigned long	age,
	int		flags)
{
	struct list_head	tmp;
	unsigned long		age;
	xfs_buftarg_t		*target = (xfs_buftarg_t *)data;
	xfs_buf_t	*bp, *n;
	struct list_head *dwq = &target->bt_delwrite_queue;
	spinlock_t	*dwlk = &target->bt_delwrite_lock;
	int			count;

	current->flags |= PF_MEMALLOC;

	INIT_LIST_HEAD(&tmp);
	do {
		if (unlikely(freezing(current))) {
			set_bit(XBT_FORCE_SLEEP, &target->bt_flags);
			refrigerator();
		} else {
			clear_bit(XBT_FORCE_SLEEP, &target->bt_flags);
		}

		schedule_timeout_interruptible(
			xfs_buf_timer_centisecs * msecs_to_jiffies(10));
	int		skipped = 0;

		count = 0;
		age = xfs_buf_age_centisecs * msecs_to_jiffies(10);
	INIT_LIST_HEAD(list);
	spin_lock(dwlk);
	list_for_each_entry_safe(bp, n, dwq, b_list) {
		XB_TRACE(bp, "walkq1", (long)xfs_buf_ispin(bp));
		ASSERT(bp->b_flags & XBF_DELWRI);

		if (!xfs_buf_ispin(bp) && !xfs_buf_cond_lock(bp)) {
				if (!test_bit(XBT_FORCE_FLUSH,
						&target->bt_flags) &&
				    time_before(jiffies,
						bp->b_queuetime + age)) {
			if (!(flags & XBT_FORCE_FLUSH) &&
			    time_before(jiffies, bp->b_queuetime + age)) {
				xfs_buf_unlock(bp);
				break;
			}
@@ -1724,18 +1711,51 @@ xfsbufd(
			bp->b_flags &= ~(XBF_DELWRI|_XBF_DELWRI_Q|
					 _XBF_RUN_QUEUES);
			bp->b_flags |= XBF_WRITE;
				list_move_tail(&bp->b_list, &tmp);
				count++;
			}
			list_move_tail(&bp->b_list, list);
		} else
			skipped++;
	}
	spin_unlock(dwlk);

	return skipped;

}

STATIC int
xfsbufd(
	void		*data)
{
	struct list_head tmp;
	xfs_buftarg_t	*target = (xfs_buftarg_t *)data;
	int		count;
	xfs_buf_t	*bp;

	current->flags |= PF_MEMALLOC;

	do {
		if (unlikely(freezing(current))) {
			set_bit(XBT_FORCE_SLEEP, &target->bt_flags);
			refrigerator();
		} else {
			clear_bit(XBT_FORCE_SLEEP, &target->bt_flags);
		}

		schedule_timeout_interruptible(
			xfs_buf_timer_centisecs * msecs_to_jiffies(10));

		xfs_buf_delwri_split(target, &tmp,
				xfs_buf_age_centisecs * msecs_to_jiffies(10),
				test_bit(XBT_FORCE_FLUSH, &target->bt_flags)
						? XBT_FORCE_FLUSH : 0);

		count = 0;
		while (!list_empty(&tmp)) {
			bp = list_entry(tmp.next, xfs_buf_t, b_list);
			ASSERT(target == bp->b_target);

			list_del_init(&bp->b_list);
			xfs_buf_iostrategy(bp);
			count++;
		}

		if (as_list_len > 0)
@@ -1762,34 +1782,17 @@ xfs_flush_buftarg(
	struct list_head tmp;
	xfs_buf_t	*bp, *n;
	int		pincount = 0;
	struct list_head	*dwq = &target->bt_delwrite_queue;
	spinlock_t		*dwlk = &target->bt_delwrite_lock;

	xfs_buf_runall_queues(xfsdatad_workqueue);
	xfs_buf_runall_queues(xfslogd_workqueue);

	INIT_LIST_HEAD(&tmp);
	spin_lock(dwlk);
	list_for_each_entry_safe(bp, n, dwq, b_list) {
		ASSERT(bp->b_target == target);
		ASSERT(bp->b_flags & (XBF_DELWRI | _XBF_DELWRI_Q));
		XB_TRACE(bp, "walkq2", (long)xfs_buf_ispin(bp));
		if (xfs_buf_ispin(bp)) {
			pincount++;
			continue;
		}

		list_move_tail(&bp->b_list, &tmp);
	}
	spin_unlock(dwlk);
	pincount = xfs_buf_delwri_split(target, &tmp, 0, XBT_FORCE_FLUSH);

	/*
	 * Dropped the delayed write list lock, now walk the temporary list
	 */
	list_for_each_entry_safe(bp, n, &tmp, b_list) {
		xfs_buf_lock(bp);
		bp->b_flags &= ~(XBF_DELWRI|_XBF_DELWRI_Q|_XBF_RUN_QUEUES);
		bp->b_flags |= XBF_WRITE;
		ASSERT(target == bp->b_target);
		if (wait)
			bp->b_flags &= ~XBF_ASYNC;
		else