Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 4404c2b8 authored by Jan Beulich's avatar Jan Beulich Committed by Greg Kroah-Hartman
Browse files

xen-netback: don't produce zero-size SKB frags



commit c7ec4f2d684e17d69bbdd7c4324db0ef5daac26a upstream.

While frontends may submit zero-size requests (wasting a precious slot),
core networking code as of at least 3ece7826 ("sock: skb_copy_ubufs
support for compound pages") can't deal with SKBs when they have all
zero-size fragments. Respond to empty requests right when populating
fragments; all further processing is fragment based and hence won't
encounter these empty requests anymore.

In a way this should have been that way from the beginning: When no data
is to be transferred for a particular request, there's not even a point
in validating the respective grant ref. That's no different from e.g.
passing NULL into memcpy() when at the same time the size is 0.

This is XSA-448 / CVE-2023-46838.

Cc: stable@vger.kernel.org
Signed-off-by: default avatarJan Beulich <jbeulich@suse.com>
Reviewed-by: default avatarJuergen Gross <jgross@suse.com>
Reviewed-by: default avatarPaul Durrant <paul@xen.org>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent ee4e9c5f
Loading
Loading
Loading
Loading
+38 −6
Original line number Diff line number Diff line
@@ -456,12 +456,25 @@ static void xenvif_get_requests(struct xenvif_queue *queue,
	}

	for (shinfo->nr_frags = 0; nr_slots > 0 && shinfo->nr_frags < MAX_SKB_FRAGS;
	     shinfo->nr_frags++, gop++, nr_slots--) {
	     nr_slots--) {
		if (unlikely(!txp->size)) {
			unsigned long flags;

			spin_lock_irqsave(&queue->response_lock, flags);
			make_tx_response(queue, txp, 0, XEN_NETIF_RSP_OKAY);
			push_tx_responses(queue);
			spin_unlock_irqrestore(&queue->response_lock, flags);
			++txp;
			continue;
		}

		index = pending_index(queue->pending_cons++);
		pending_idx = queue->pending_ring[index];
		xenvif_tx_create_map_op(queue, pending_idx, txp,
				        txp == first ? extra_count : 0, gop);
		frag_set_pending_idx(&frags[shinfo->nr_frags], pending_idx);
		++shinfo->nr_frags;
		++gop;

		if (txp == first)
			txp = txfrags;
@@ -474,20 +487,39 @@ static void xenvif_get_requests(struct xenvif_queue *queue,
		shinfo = skb_shinfo(nskb);
		frags = shinfo->frags;

		for (shinfo->nr_frags = 0; shinfo->nr_frags < nr_slots;
		     shinfo->nr_frags++, txp++, gop++) {
		for (shinfo->nr_frags = 0; shinfo->nr_frags < nr_slots; ++txp) {
			if (unlikely(!txp->size)) {
				unsigned long flags;

				spin_lock_irqsave(&queue->response_lock, flags);
				make_tx_response(queue, txp, 0,
						 XEN_NETIF_RSP_OKAY);
				push_tx_responses(queue);
				spin_unlock_irqrestore(&queue->response_lock,
						       flags);
				continue;
			}

			index = pending_index(queue->pending_cons++);
			pending_idx = queue->pending_ring[index];
			xenvif_tx_create_map_op(queue, pending_idx, txp, 0,
						gop);
			frag_set_pending_idx(&frags[shinfo->nr_frags],
					     pending_idx);
			++shinfo->nr_frags;
			++gop;
		}

		if (shinfo->nr_frags) {
			skb_shinfo(skb)->frag_list = nskb;
	} else if (nskb) {
			nskb = NULL;
		}
	}

	if (nskb) {
		/* A frag_list skb was allocated but it is no longer needed
		 * because enough slots were converted to copy ops above.
		 * because enough slots were converted to copy ops above or some
		 * were empty.
		 */
		kfree_skb(nskb);
	}