Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 7fbb9d84 authored by David Vrabel's avatar David Vrabel Committed by David S. Miller
Browse files

xen-netback: release pending index before pushing Tx responses



If the pending indexes are released /after/ pushing the Tx response
then a stale pending index may be used if a new Tx request is
immediately pushed by the frontend.  The may cause various WARNINGs or
BUGs if the stale pending index is actually still in use.

Fix this by releasing the pending index before pushing the Tx
response.

The full barrier for the pending ring update is not required since the
the Tx response push already has a suitable write barrier.

Signed-off-by: default avatarDavid Vrabel <david.vrabel@citrix.com>
Reviewed-by: default avatarWei Liu <wei.liu2@citrix.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 41a50d62
Loading
Loading
Loading
Loading
+21 −8
Original line number Original line Diff line number Diff line
@@ -655,9 +655,15 @@ static void xenvif_tx_err(struct xenvif_queue *queue,
	unsigned long flags;
	unsigned long flags;


	do {
	do {
		int notify;

		spin_lock_irqsave(&queue->response_lock, flags);
		spin_lock_irqsave(&queue->response_lock, flags);
		make_tx_response(queue, txp, XEN_NETIF_RSP_ERROR);
		make_tx_response(queue, txp, XEN_NETIF_RSP_ERROR);
		RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&queue->tx, notify);
		spin_unlock_irqrestore(&queue->response_lock, flags);
		spin_unlock_irqrestore(&queue->response_lock, flags);
		if (notify)
			notify_remote_via_irq(queue->tx_irq);

		if (cons == end)
		if (cons == end)
			break;
			break;
		txp = RING_GET_REQUEST(&queue->tx, cons++);
		txp = RING_GET_REQUEST(&queue->tx, cons++);
@@ -1649,17 +1655,28 @@ static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx,
{
{
	struct pending_tx_info *pending_tx_info;
	struct pending_tx_info *pending_tx_info;
	pending_ring_idx_t index;
	pending_ring_idx_t index;
	int notify;
	unsigned long flags;
	unsigned long flags;


	pending_tx_info = &queue->pending_tx_info[pending_idx];
	pending_tx_info = &queue->pending_tx_info[pending_idx];

	spin_lock_irqsave(&queue->response_lock, flags);
	spin_lock_irqsave(&queue->response_lock, flags);

	make_tx_response(queue, &pending_tx_info->req, status);
	make_tx_response(queue, &pending_tx_info->req, status);
	index = pending_index(queue->pending_prod);

	/* Release the pending index before pusing the Tx response so
	 * its available before a new Tx request is pushed by the
	 * frontend.
	 */
	index = pending_index(queue->pending_prod++);
	queue->pending_ring[index] = pending_idx;
	queue->pending_ring[index] = pending_idx;
	/* TX shouldn't use the index before we give it back here */

	mb();
	RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&queue->tx, notify);
	queue->pending_prod++;

	spin_unlock_irqrestore(&queue->response_lock, flags);
	spin_unlock_irqrestore(&queue->response_lock, flags);

	if (notify)
		notify_remote_via_irq(queue->tx_irq);
}
}




@@ -1669,7 +1686,6 @@ static void make_tx_response(struct xenvif_queue *queue,
{
{
	RING_IDX i = queue->tx.rsp_prod_pvt;
	RING_IDX i = queue->tx.rsp_prod_pvt;
	struct xen_netif_tx_response *resp;
	struct xen_netif_tx_response *resp;
	int notify;


	resp = RING_GET_RESPONSE(&queue->tx, i);
	resp = RING_GET_RESPONSE(&queue->tx, i);
	resp->id     = txp->id;
	resp->id     = txp->id;
@@ -1679,9 +1695,6 @@ static void make_tx_response(struct xenvif_queue *queue,
		RING_GET_RESPONSE(&queue->tx, ++i)->status = XEN_NETIF_RSP_NULL;
		RING_GET_RESPONSE(&queue->tx, ++i)->status = XEN_NETIF_RSP_NULL;


	queue->tx.rsp_prod_pvt = ++i;
	queue->tx.rsp_prod_pvt = ++i;
	RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&queue->tx, notify);
	if (notify)
		notify_remote_via_irq(queue->tx_irq);
}
}


static struct xen_netif_rx_response *make_rx_response(struct xenvif_queue *queue,
static struct xen_netif_rx_response *make_rx_response(struct xenvif_queue *queue,