Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit eb2a021c authored by Francois Romieu's avatar Francois Romieu Committed by Jeff Garzik
Browse files

r8169: RTNL and flush_scheduled_work deadlock



flush_scheduled_work() in net_device->close has a slight tendency
to deadlock with tasks on the workqueue that hold RTNL.

rtl8169_close/down simply need the recovery tasks to not meddle
with the hardware while the device is going down.

Signed-off-by: default avatarFrancois Romieu <romieu@fr.zoreil.com>
Signed-off-by: default avatarJeff Garzik <jeff@garzik.org>
parent 1ca94929
Loading
Loading
Loading
Loading
+18 −7
Original line number Diff line number Diff line
@@ -1733,6 +1733,8 @@ rtl8169_remove_one(struct pci_dev *pdev)
	assert(dev != NULL);
	assert(tp != NULL);

	flush_scheduled_work();

	unregister_netdev(dev);
	rtl8169_release_board(pdev, dev, tp->mmio_addr);
	pci_set_drvdata(pdev, NULL);
@@ -2161,10 +2163,13 @@ static void rtl8169_reinit_task(struct work_struct *work)
	struct net_device *dev = tp->dev;
	int ret;

	if (netif_running(dev)) {
	rtnl_lock();

	if (!netif_running(dev))
		goto out_unlock;

	rtl8169_wait_for_quiescence(dev);
	rtl8169_close(dev);
	}

	ret = rtl8169_open(dev);
	if (unlikely(ret < 0)) {
@@ -2179,6 +2184,9 @@ static void rtl8169_reinit_task(struct work_struct *work)
		}
		rtl8169_schedule_work(dev, rtl8169_reinit_task);
	}

out_unlock:
	rtnl_unlock();
}

static void rtl8169_reset_task(struct work_struct *work)
@@ -2187,8 +2195,10 @@ static void rtl8169_reset_task(struct work_struct *work)
		container_of(work, struct rtl8169_private, task.work);
	struct net_device *dev = tp->dev;

	rtnl_lock();

	if (!netif_running(dev))
		return;
		goto out_unlock;

	rtl8169_wait_for_quiescence(dev);

@@ -2210,6 +2220,9 @@ static void rtl8169_reset_task(struct work_struct *work)
		}
		rtl8169_schedule_work(dev, rtl8169_reset_task);
	}

out_unlock:
	rtnl_unlock();
}

static void rtl8169_tx_timeout(struct net_device *dev)
@@ -2722,8 +2735,6 @@ static void rtl8169_down(struct net_device *dev)

	netif_stop_queue(dev);

	flush_scheduled_work();

core_down:
	spin_lock_irq(&tp->lock);