Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 62bad319 authored by Zoltan Kiss's avatar Zoltan Kiss Committed by David S. Miller
Browse files

xen-netback: Remove old TX grant copy definitons and fix indentations



These became obsolete with grant mapping. I've left intentionally the
indentations in this way, to improve readability of previous patches.

NOTE: if bisect brought you here, you should apply the series up until
"xen-netback: Timeout packets in RX path", otherwise Windows guests can't work
properly and malicious guests can block other guests by not releasing their sent
packets.

Signed-off-by: default avatarZoltan Kiss <zoltan.kiss@citrix.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent f53c3fe8
Loading
Loading
Loading
Loading
+1 −35
Original line number Original line Diff line number Diff line
@@ -48,37 +48,8 @@
typedef unsigned int pending_ring_idx_t;
typedef unsigned int pending_ring_idx_t;
#define INVALID_PENDING_RING_IDX (~0U)
#define INVALID_PENDING_RING_IDX (~0U)


/* For the head field in pending_tx_info: it is used to indicate
 * whether this tx info is the head of one or more coalesced requests.
 *
 * When head != INVALID_PENDING_RING_IDX, it means the start of a new
 * tx requests queue and the end of previous queue.
 *
 * An example sequence of head fields (I = INVALID_PENDING_RING_IDX):
 *
 * ...|0 I I I|5 I|9 I I I|...
 * -->|<-INUSE----------------
 *
 * After consuming the first slot(s) we have:
 *
 * ...|V V V V|5 I|9 I I I|...
 * -----FREE->|<-INUSE--------
 *
 * where V stands for "valid pending ring index". Any number other
 * than INVALID_PENDING_RING_IDX is OK. These entries are considered
 * free and can contain any number other than
 * INVALID_PENDING_RING_IDX. In practice we use 0.
 *
 * The in use non-INVALID_PENDING_RING_IDX (say 0, 5 and 9 in the
 * above example) number is the index into pending_tx_info and
 * mmap_pages arrays.
 */
struct pending_tx_info {
struct pending_tx_info {
	struct xen_netif_tx_request req; /* coalesced tx request */
	struct xen_netif_tx_request req; /* tx request */
	pending_ring_idx_t head; /* head != INVALID_PENDING_RING_IDX
				  * if it is head of one or more tx
				  * reqs
				  */
	/* Callback data for released SKBs. The callback is always
	/* Callback data for released SKBs. The callback is always
	 * xenvif_zerocopy_callback, desc contains the pending_idx, which is
	 * xenvif_zerocopy_callback, desc contains the pending_idx, which is
	 * also an index in pending_tx_info array. It is initialized in
	 * also an index in pending_tx_info array. It is initialized in
@@ -148,11 +119,6 @@ struct xenvif {
	struct pending_tx_info pending_tx_info[MAX_PENDING_REQS];
	struct pending_tx_info pending_tx_info[MAX_PENDING_REQS];
	grant_handle_t grant_tx_handle[MAX_PENDING_REQS];
	grant_handle_t grant_tx_handle[MAX_PENDING_REQS];


	/* Coalescing tx requests before copying makes number of grant
	 * copy ops greater or equal to number of slots required. In
	 * worst case a tx request consumes 2 gnttab_copy.
	 */
	struct gnttab_copy tx_copy_ops[2*MAX_PENDING_REQS];
	struct gnttab_map_grant_ref tx_map_ops[MAX_PENDING_REQS];
	struct gnttab_map_grant_ref tx_map_ops[MAX_PENDING_REQS];
	struct gnttab_unmap_grant_ref tx_unmap_ops[MAX_PENDING_REQS];
	struct gnttab_unmap_grant_ref tx_unmap_ops[MAX_PENDING_REQS];
	/* passed to gnttab_[un]map_refs with pages under (un)mapping */
	/* passed to gnttab_[un]map_refs with pages under (un)mapping */
+14 −58
Original line number Original line Diff line number Diff line
@@ -62,16 +62,6 @@ module_param(separate_tx_rx_irq, bool, 0644);
static unsigned int fatal_skb_slots = FATAL_SKB_SLOTS_DEFAULT;
static unsigned int fatal_skb_slots = FATAL_SKB_SLOTS_DEFAULT;
module_param(fatal_skb_slots, uint, 0444);
module_param(fatal_skb_slots, uint, 0444);


/*
 * If head != INVALID_PENDING_RING_IDX, it means this tx request is head of
 * one or more merged tx requests, otherwise it is the continuation of
 * previous tx request.
 */
static inline int pending_tx_is_head(struct xenvif *vif, RING_IDX idx)
{
	return vif->pending_tx_info[idx].head != INVALID_PENDING_RING_IDX;
}

static void xenvif_idx_release(struct xenvif *vif, u16 pending_idx,
static void xenvif_idx_release(struct xenvif *vif, u16 pending_idx,
			       u8 status);
			       u8 status);


@@ -790,19 +780,6 @@ static int xenvif_count_requests(struct xenvif *vif,
	return slots;
	return slots;
}
}


static struct page *xenvif_alloc_page(struct xenvif *vif,
				      u16 pending_idx)
{
	struct page *page;

	page = alloc_page(GFP_ATOMIC|__GFP_COLD);
	if (!page)
		return NULL;
	vif->mmap_pages[pending_idx] = page;

	return page;
}



struct xenvif_tx_cb {
struct xenvif_tx_cb {
	u16 pending_idx;
	u16 pending_idx;
@@ -832,13 +809,9 @@ static struct gnttab_map_grant_ref *xenvif_get_requests(struct xenvif *vif,
	struct skb_shared_info *shinfo = skb_shinfo(skb);
	struct skb_shared_info *shinfo = skb_shinfo(skb);
	skb_frag_t *frags = shinfo->frags;
	skb_frag_t *frags = shinfo->frags;
	u16 pending_idx = XENVIF_TX_CB(skb)->pending_idx;
	u16 pending_idx = XENVIF_TX_CB(skb)->pending_idx;
	u16 head_idx = 0;
	int start;
	int slot, start;
	pending_ring_idx_t index;
	struct page *page;
	pending_ring_idx_t index, start_idx = 0;
	uint16_t dst_offset;
	unsigned int nr_slots;
	unsigned int nr_slots;
	struct pending_tx_info *first = NULL;


	/* At this point shinfo->nr_frags is in fact the number of
	/* At this point shinfo->nr_frags is in fact the number of
	 * slots, which can be as large as XEN_NETBK_LEGACY_SLOTS_MAX.
	 * slots, which can be as large as XEN_NETBK_LEGACY_SLOTS_MAX.
@@ -859,18 +832,6 @@ static struct gnttab_map_grant_ref *xenvif_get_requests(struct xenvif *vif,
	BUG_ON(shinfo->nr_frags > MAX_SKB_FRAGS);
	BUG_ON(shinfo->nr_frags > MAX_SKB_FRAGS);


	return gop;
	return gop;
err:
	/* Unwind, freeing all pages and sending error responses. */
	while (shinfo->nr_frags-- > start) {
		xenvif_idx_release(vif,
				frag_get_pending_idx(&frags[shinfo->nr_frags]),
				XEN_NETIF_RSP_ERROR);
	}
	/* The head too, if necessary. */
	if (start)
		xenvif_idx_release(vif, pending_idx, XEN_NETIF_RSP_ERROR);

	return NULL;
}
}


static inline void xenvif_grant_handle_set(struct xenvif *vif,
static inline void xenvif_grant_handle_set(struct xenvif *vif,
@@ -910,7 +871,6 @@ static int xenvif_tx_check_gop(struct xenvif *vif,
	struct pending_tx_info *tx_info;
	struct pending_tx_info *tx_info;
	int nr_frags = shinfo->nr_frags;
	int nr_frags = shinfo->nr_frags;
	int i, err, start;
	int i, err, start;
	u16 peek; /* peek into next tx request */


	/* Check status of header. */
	/* Check status of header. */
	err = gop->status;
	err = gop->status;
@@ -924,11 +884,9 @@ static int xenvif_tx_check_gop(struct xenvif *vif,


	for (i = start; i < nr_frags; i++) {
	for (i = start; i < nr_frags; i++) {
		int j, newerr;
		int j, newerr;
		pending_ring_idx_t head;


		pending_idx = frag_get_pending_idx(&shinfo->frags[i]);
		pending_idx = frag_get_pending_idx(&shinfo->frags[i]);
		tx_info = &vif->pending_tx_info[pending_idx];
		tx_info = &vif->pending_tx_info[pending_idx];
		head = tx_info->head;


		/* Check error status: if okay then remember grant handle. */
		/* Check error status: if okay then remember grant handle. */
		newerr = (++gop)->status;
		newerr = (++gop)->status;
@@ -1136,7 +1094,6 @@ static unsigned xenvif_tx_build_gops(struct xenvif *vif, int budget)
	       (skb_queue_len(&vif->tx_queue) < budget)) {
	       (skb_queue_len(&vif->tx_queue) < budget)) {
		struct xen_netif_tx_request txreq;
		struct xen_netif_tx_request txreq;
		struct xen_netif_tx_request txfrags[XEN_NETBK_LEGACY_SLOTS_MAX];
		struct xen_netif_tx_request txfrags[XEN_NETBK_LEGACY_SLOTS_MAX];
		struct page *page;
		struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX-1];
		struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX-1];
		u16 pending_idx;
		u16 pending_idx;
		RING_IDX idx;
		RING_IDX idx;
@@ -1507,7 +1464,6 @@ static void xenvif_idx_release(struct xenvif *vif, u16 pending_idx,
{
{
	struct pending_tx_info *pending_tx_info;
	struct pending_tx_info *pending_tx_info;
	pending_ring_idx_t index;
	pending_ring_idx_t index;
	u16 peek; /* peek into next tx request */
	unsigned long flags;
	unsigned long flags;


	pending_tx_info = &vif->pending_tx_info[pending_idx];
	pending_tx_info = &vif->pending_tx_info[pending_idx];