Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 26509bb5 authored by Juergen Gross's avatar Juergen Gross Committed by Greg Kroah-Hartman
Browse files

xen/netfront: don't read data from request on the ring page



commit 162081ec33c2686afa29d91bf8d302824aa846c7 upstream.

In order to avoid a malicious backend being able to influence the local
processing of a request build the request locally first and then copy
it to the ring page. Any reading from the request influencing the
processing in the frontend needs to be done on the local instance.

Signed-off-by: default avatarJuergen Gross <jgross@suse.com>
Reviewed-by: default avatarJan Beulich <jbeulich@suse.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent e7d1024f
Loading
Loading
Loading
Loading
+37 −43
Original line number Diff line number Diff line
@@ -425,7 +425,8 @@ struct xennet_gnttab_make_txreq {
	struct netfront_queue *queue;
	struct sk_buff *skb;
	struct page *page;
	struct xen_netif_tx_request *tx; /* Last request */
	struct xen_netif_tx_request *tx;      /* Last request on ring page */
	struct xen_netif_tx_request tx_local; /* Last request local copy*/
	unsigned int size;
};

@@ -453,30 +454,27 @@ static void xennet_tx_setup_grant(unsigned long gfn, unsigned int offset,
	queue->grant_tx_page[id] = page;
	queue->grant_tx_ref[id] = ref;

	tx->id = id;
	tx->gref = ref;
	tx->offset = offset;
	tx->size = len;
	tx->flags = 0;
	info->tx_local.id = id;
	info->tx_local.gref = ref;
	info->tx_local.offset = offset;
	info->tx_local.size = len;
	info->tx_local.flags = 0;

	*tx = info->tx_local;

	info->tx = tx;
	info->size += tx->size;
	info->size += info->tx_local.size;
}

static struct xen_netif_tx_request *xennet_make_first_txreq(
	struct netfront_queue *queue, struct sk_buff *skb,
	struct page *page, unsigned int offset, unsigned int len)
{
	struct xennet_gnttab_make_txreq info = {
		.queue = queue,
		.skb = skb,
		.page = page,
		.size = 0,
	};
	struct xennet_gnttab_make_txreq *info,
	unsigned int offset, unsigned int len)
{
	info->size = 0;

	gnttab_for_one_grant(page, offset, len, xennet_tx_setup_grant, &info);
	gnttab_for_one_grant(info->page, offset, len, xennet_tx_setup_grant, info);

	return info.tx;
	return info->tx;
}

static void xennet_make_one_txreq(unsigned long gfn, unsigned int offset,
@@ -489,35 +487,27 @@ static void xennet_make_one_txreq(unsigned long gfn, unsigned int offset,
	xennet_tx_setup_grant(gfn, offset, len, data);
}

static struct xen_netif_tx_request *xennet_make_txreqs(
	struct netfront_queue *queue, struct xen_netif_tx_request *tx,
	struct sk_buff *skb, struct page *page,
static void xennet_make_txreqs(
	struct xennet_gnttab_make_txreq *info,
	struct page *page,
	unsigned int offset, unsigned int len)
{
	struct xennet_gnttab_make_txreq info = {
		.queue = queue,
		.skb = skb,
		.tx = tx,
	};

	/* Skip unused frames from start of page */
	page += offset >> PAGE_SHIFT;
	offset &= ~PAGE_MASK;

	while (len) {
		info.page = page;
		info.size = 0;
		info->page = page;
		info->size = 0;

		gnttab_foreach_grant_in_range(page, offset, len,
					      xennet_make_one_txreq,
					      &info);
					      info);

		page++;
		offset = 0;
		len -= info.size;
		len -= info->size;
	}

	return info.tx;
}

/*
@@ -571,7 +561,7 @@ static netdev_tx_t xennet_start_xmit(struct sk_buff *skb, struct net_device *dev
{
	struct netfront_info *np = netdev_priv(dev);
	struct netfront_stats *tx_stats = this_cpu_ptr(np->tx_stats);
	struct xen_netif_tx_request *tx, *first_tx;
	struct xen_netif_tx_request *first_tx;
	unsigned int i;
	int notify;
	int slots;
@@ -580,6 +570,7 @@ static netdev_tx_t xennet_start_xmit(struct sk_buff *skb, struct net_device *dev
	unsigned int len;
	unsigned long flags;
	struct netfront_queue *queue = NULL;
	struct xennet_gnttab_make_txreq info = { };
	unsigned int num_queues = dev->real_num_tx_queues;
	u16 queue_index;
	struct sk_buff *nskb;
@@ -637,21 +628,24 @@ static netdev_tx_t xennet_start_xmit(struct sk_buff *skb, struct net_device *dev
	}

	/* First request for the linear area. */
	first_tx = tx = xennet_make_first_txreq(queue, skb,
						page, offset, len);
	offset += tx->size;
	info.queue = queue;
	info.skb = skb;
	info.page = page;
	first_tx = xennet_make_first_txreq(&info, offset, len);
	offset += info.tx_local.size;
	if (offset == PAGE_SIZE) {
		page++;
		offset = 0;
	}
	len -= tx->size;
	len -= info.tx_local.size;

	if (skb->ip_summed == CHECKSUM_PARTIAL)
		/* local packet? */
		tx->flags |= XEN_NETTXF_csum_blank | XEN_NETTXF_data_validated;
		first_tx->flags |= XEN_NETTXF_csum_blank |
				   XEN_NETTXF_data_validated;
	else if (skb->ip_summed == CHECKSUM_UNNECESSARY)
		/* remote but checksummed. */
		tx->flags |= XEN_NETTXF_data_validated;
		first_tx->flags |= XEN_NETTXF_data_validated;

	/* Optional extra info after the first request. */
	if (skb_shinfo(skb)->gso_size) {
@@ -660,7 +654,7 @@ static netdev_tx_t xennet_start_xmit(struct sk_buff *skb, struct net_device *dev
		gso = (struct xen_netif_extra_info *)
			RING_GET_REQUEST(&queue->tx, queue->tx.req_prod_pvt++);

		tx->flags |= XEN_NETTXF_extra_info;
		first_tx->flags |= XEN_NETTXF_extra_info;

		gso->u.gso.size = skb_shinfo(skb)->gso_size;
		gso->u.gso.type = (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) ?
@@ -674,13 +668,13 @@ static netdev_tx_t xennet_start_xmit(struct sk_buff *skb, struct net_device *dev
	}

	/* Requests for the rest of the linear area. */
	tx = xennet_make_txreqs(queue, tx, skb, page, offset, len);
	xennet_make_txreqs(&info, page, offset, len);

	/* Requests for all the frags. */
	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
		tx = xennet_make_txreqs(queue, tx, skb,
					skb_frag_page(frag), frag->page_offset,
		xennet_make_txreqs(&info, skb_frag_page(frag),
					frag->page_offset,
					skb_frag_size(frag));
	}