Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit eb1723a2 authored by David Vrabel's avatar David Vrabel Committed by David S. Miller
Browse files

xen-netback: refactor guest rx



Refactor the to-guest (rx) path to:

1. Push responses for completed skbs earlier, reducing latency.

2. Reduce the per-queue memory overhead by greatly reducing the
   maximum number of grant copy ops in each hypercall (from 4352 to
   64).  Each struct xenvif_queue is now only 44 kB instead of 220 kB.

3. Make the code more maintainable.

Signed-off-by: default avatarDavid Vrabel <david.vrabel@citrix.com>
[re-based]
Signed-off-by: default avatarPaul Durrant <paul.durrant@citrix.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent fedbc8c1
Loading
Loading
Loading
Loading
+10 −13
Original line number Diff line number Diff line
@@ -91,13 +91,6 @@ struct xenvif_rx_meta {
 */
#define MAX_XEN_SKB_FRAGS (65536 / XEN_PAGE_SIZE + 1)

/* It's possible for an skb to have a maximal number of frags
 * but still be less than MAX_BUFFER_OFFSET in size. Thus the
 * worst-case number of copy operations is MAX_XEN_SKB_FRAGS per
 * ring slot.
 */
#define MAX_GRANT_COPY_OPS (MAX_XEN_SKB_FRAGS * XEN_NETIF_RX_RING_SIZE)

#define NETBACK_INVALID_HANDLE -1

/* To avoid confusion, we define XEN_NETBK_LEGACY_SLOTS_MAX indicating
@@ -133,6 +126,14 @@ struct xenvif_stats {
	unsigned long tx_frag_overflow;
};

#define COPY_BATCH_SIZE 64

struct xenvif_copy_state {
	struct gnttab_copy op[COPY_BATCH_SIZE];
	RING_IDX idx[COPY_BATCH_SIZE];
	unsigned int num;
};

struct xenvif_queue { /* Per-queue data for xenvif */
	unsigned int id; /* Queue ID, 0-based */
	char name[QUEUE_NAME_SIZE]; /* DEVNAME-qN */
@@ -189,12 +190,7 @@ struct xenvif_queue { /* Per-queue data for xenvif */
	unsigned long last_rx_time;
	bool stalled;

	struct gnttab_copy grant_copy_op[MAX_GRANT_COPY_OPS];

	/* We create one meta structure per ring request we consume, so
	 * the maximum number is the same as the ring size.
	 */
	struct xenvif_rx_meta meta[XEN_NETIF_RX_RING_SIZE];
	struct xenvif_copy_state rx_copy;

	/* Transmit shaping: allow 'credit_bytes' every 'credit_usec'. */
	unsigned long   credit_bytes;
@@ -358,6 +354,7 @@ int xenvif_dealloc_kthread(void *data);

irqreturn_t xenvif_ctrl_irq_fn(int irq, void *data);

void xenvif_rx_action(struct xenvif_queue *queue);
void xenvif_rx_queue_tail(struct xenvif_queue *queue, struct sk_buff *skb);

void xenvif_carrier_on(struct xenvif *vif);
+205 −371
Original line number Diff line number Diff line
@@ -26,7 +26,6 @@
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
 * IN THE SOFTWARE.
 */

#include "common.h"

#include <linux/kthread.h>
@@ -137,464 +136,299 @@ static void xenvif_rx_queue_drop_expired(struct xenvif_queue *queue)
	}
}

struct netrx_pending_operations {
	unsigned int copy_prod, copy_cons;
	unsigned int meta_prod, meta_cons;
	struct gnttab_copy *copy;
	struct xenvif_rx_meta *meta;
	int copy_off;
	grant_ref_t copy_gref;
};

static struct xenvif_rx_meta *get_next_rx_buffer(
	struct xenvif_queue *queue,
	struct netrx_pending_operations *npo)
static void xenvif_rx_copy_flush(struct xenvif_queue *queue)
{
	struct xenvif_rx_meta *meta;
	struct xen_netif_rx_request req;
	unsigned int i;

	RING_COPY_REQUEST(&queue->rx, queue->rx.req_cons++, &req);
	gnttab_batch_copy(queue->rx_copy.op, queue->rx_copy.num);

	meta = npo->meta + npo->meta_prod++;
	meta->gso_type = XEN_NETIF_GSO_TYPE_NONE;
	meta->gso_size = 0;
	meta->size = 0;
	meta->id = req.id;
	for (i = 0; i < queue->rx_copy.num; i++) {
		struct gnttab_copy *op;

	npo->copy_off = 0;
	npo->copy_gref = req.gref;
		op = &queue->rx_copy.op[i];

	return meta;
}
		/* If the copy failed, overwrite the status field in
		 * the corresponding response.
		 */
		if (unlikely(op->status != GNTST_okay)) {
			struct xen_netif_rx_response *rsp;

struct gop_frag_copy {
	struct xenvif_queue *queue;
	struct netrx_pending_operations *npo;
	struct xenvif_rx_meta *meta;
	int head;
	int gso_type;
	int protocol;
	int hash_present;
			rsp = RING_GET_RESPONSE(&queue->rx,
						queue->rx_copy.idx[i]);
			rsp->status = op->status;
		}
	}

	struct page *page;
};
	queue->rx_copy.num = 0;
}

static void xenvif_setup_copy_gop(unsigned long gfn,
				  unsigned int offset,
				  unsigned int *len,
				  struct gop_frag_copy *info)
static void xenvif_rx_copy_add(struct xenvif_queue *queue,
			       struct xen_netif_rx_request *req,
			       unsigned int offset, void *data, size_t len)
{
	struct gnttab_copy *copy_gop;
	struct gnttab_copy *op;
	struct page *page;
	struct xen_page_foreign *foreign;
	/* Convenient aliases */
	struct xenvif_queue *queue = info->queue;
	struct netrx_pending_operations *npo = info->npo;
	struct page *page = info->page;

	WARN_ON(npo->copy_off > MAX_BUFFER_OFFSET);
	if (queue->rx_copy.num == COPY_BATCH_SIZE)
		xenvif_rx_copy_flush(queue);

	if (npo->copy_off == MAX_BUFFER_OFFSET)
		info->meta = get_next_rx_buffer(queue, npo);
	op = &queue->rx_copy.op[queue->rx_copy.num];

	if (npo->copy_off + *len > MAX_BUFFER_OFFSET)
		*len = MAX_BUFFER_OFFSET - npo->copy_off;
	page = virt_to_page(data);

	copy_gop = npo->copy + npo->copy_prod++;
	copy_gop->flags = GNTCOPY_dest_gref;
	copy_gop->len = *len;
	op->flags = GNTCOPY_dest_gref;

	foreign = xen_page_foreign(page);
	if (foreign) {
		copy_gop->source.domid = foreign->domid;
		copy_gop->source.u.ref = foreign->gref;
		copy_gop->flags |= GNTCOPY_source_gref;
		op->source.domid = foreign->domid;
		op->source.u.ref = foreign->gref;
		op->flags |= GNTCOPY_source_gref;
	} else {
		copy_gop->source.domid = DOMID_SELF;
		copy_gop->source.u.gmfn = gfn;
		op->source.u.gmfn = virt_to_gfn(data);
		op->source.domid  = DOMID_SELF;
	}
	copy_gop->source.offset = offset;

	copy_gop->dest.domid = queue->vif->domid;
	copy_gop->dest.offset = npo->copy_off;
	copy_gop->dest.u.ref = npo->copy_gref;

	npo->copy_off += *len;
	info->meta->size += *len;

	if (!info->head)
		return;

	/* Leave a gap for the GSO descriptor. */
	if ((1 << info->gso_type) & queue->vif->gso_mask)
		queue->rx.req_cons++;

	/* Leave a gap for the hash extra segment. */
	if (info->hash_present)
		queue->rx.req_cons++;
	op->source.offset = xen_offset_in_page(data);
	op->dest.u.ref    = req->gref;
	op->dest.domid    = queue->vif->domid;
	op->dest.offset   = offset;
	op->len           = len;

	info->head = 0; /* There must be something in this buffer now */
	queue->rx_copy.idx[queue->rx_copy.num] = queue->rx.req_cons;
	queue->rx_copy.num++;
}

static void xenvif_gop_frag_copy_grant(unsigned long gfn,
				       unsigned int offset,
				       unsigned int len,
				       void *data)
static unsigned int xenvif_gso_type(struct sk_buff *skb)
{
	unsigned int bytes;

	while (len) {
		bytes = len;
		xenvif_setup_copy_gop(gfn, offset, &bytes, data);
		offset += bytes;
		len -= bytes;
	}
}

/* Set up the grant operations for this fragment. If it's a flipping
 * interface, we also set up the unmap request from here.
 */
static void xenvif_gop_frag_copy(struct xenvif_queue *queue,
				 struct sk_buff *skb,
				 struct netrx_pending_operations *npo,
				 struct page *page, unsigned long size,
				 unsigned long offset, int *head)
{
	struct gop_frag_copy info = {
		.queue = queue,
		.npo = npo,
		.head = *head,
		.gso_type = XEN_NETIF_GSO_TYPE_NONE,
		/* xenvif_set_skb_hash() will have either set a s/w
		 * hash or cleared the hash depending on
		 * whether the the frontend wants a hash for this skb.
		 */
		.hash_present = skb->sw_hash,
	};
	unsigned long bytes;

	if (skb_is_gso(skb)) {
		if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
			info.gso_type = XEN_NETIF_GSO_TYPE_TCPV4;
		else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
			info.gso_type = XEN_NETIF_GSO_TYPE_TCPV6;
			return XEN_NETIF_GSO_TYPE_TCPV4;
		else
			return XEN_NETIF_GSO_TYPE_TCPV6;
	}
	return XEN_NETIF_GSO_TYPE_NONE;
}

	/* Data must not cross a page boundary. */
	WARN_ON(size + offset > (PAGE_SIZE << compound_order(page)));

	info.meta = npo->meta + npo->meta_prod - 1;
struct xenvif_pkt_state {
	struct sk_buff *skb;
	size_t remaining_len;
	int frag; /* frag == -1 => skb->head */
	unsigned int frag_offset;
	struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX - 1];
	unsigned int extra_count;
	unsigned int slot;
};

	/* Skip unused frames from start of page */
	page += offset >> PAGE_SHIFT;
	offset &= ~PAGE_MASK;
static void xenvif_rx_next_skb(struct xenvif_queue *queue,
			       struct xenvif_pkt_state *pkt)
{
	struct sk_buff *skb;
	unsigned int gso_type;

	while (size > 0) {
		WARN_ON(offset >= PAGE_SIZE);
	skb = xenvif_rx_dequeue(queue);

		bytes = PAGE_SIZE - offset;
		if (bytes > size)
			bytes = size;
	queue->stats.tx_bytes += skb->len;
	queue->stats.tx_packets++;

		info.page = page;
		gnttab_foreach_grant_in_range(page, offset, bytes,
					      xenvif_gop_frag_copy_grant,
					      &info);
		size -= bytes;
		offset = 0;
	/* Reset packet state. */
	memset(pkt, 0, sizeof(struct xenvif_pkt_state));

		/* Next page */
		if (size) {
			WARN_ON(!PageCompound(page));
			page++;
		}
	}
	pkt->skb = skb;
	pkt->remaining_len = skb->len;
	pkt->frag = -1;

	*head = info.head;
}
	gso_type = xenvif_gso_type(skb);
	if ((1 << gso_type) & queue->vif->gso_mask) {
		struct xen_netif_extra_info *extra;

/* Prepare an SKB to be transmitted to the frontend.
 *
 * This function is responsible for allocating grant operations, meta
 * structures, etc.
 *
 * It returns the number of meta structures consumed. The number of
 * ring slots used is always equal to the number of meta slots used
 * plus the number of GSO descriptors used. Currently, we use either
 * zero GSO descriptors (for non-GSO packets) or one descriptor (for
 * frontend-side LRO).
 */
static int xenvif_gop_skb(struct sk_buff *skb,
			  struct netrx_pending_operations *npo,
			  struct xenvif_queue *queue)
{
	struct xenvif *vif = netdev_priv(skb->dev);
	int nr_frags = skb_shinfo(skb)->nr_frags;
	int i;
	struct xen_netif_rx_request req;
	struct xenvif_rx_meta *meta;
	unsigned char *data;
	int head = 1;
	int old_meta_prod;
	int gso_type;

	old_meta_prod = npo->meta_prod;

	gso_type = XEN_NETIF_GSO_TYPE_NONE;
	if (skb_is_gso(skb)) {
		if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
			gso_type = XEN_NETIF_GSO_TYPE_TCPV4;
		else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
			gso_type = XEN_NETIF_GSO_TYPE_TCPV6;
	}
		extra = &pkt->extras[XEN_NETIF_EXTRA_TYPE_GSO - 1];

	RING_COPY_REQUEST(&queue->rx, queue->rx.req_cons++, &req);
	meta = npo->meta + npo->meta_prod++;
		extra->u.gso.type = gso_type;
		extra->u.gso.size = skb_shinfo(skb)->gso_size;
		extra->u.gso.pad = 0;
		extra->u.gso.features = 0;
		extra->type = XEN_NETIF_EXTRA_TYPE_GSO;
		extra->flags = 0;

	if ((1 << gso_type) & vif->gso_mask) {
		meta->gso_type = gso_type;
		meta->gso_size = skb_shinfo(skb)->gso_size;
	} else {
		meta->gso_type = XEN_NETIF_GSO_TYPE_NONE;
		meta->gso_size = 0;
		pkt->extra_count++;
	}

	meta->size = 0;
	meta->id = req.id;
	npo->copy_off = 0;
	npo->copy_gref = req.gref;

	data = skb->data;
	while (data < skb_tail_pointer(skb)) {
		unsigned int offset = offset_in_page(data);
		unsigned int len = PAGE_SIZE - offset;
	if (skb->sw_hash) {
		struct xen_netif_extra_info *extra;

		if (data + len > skb_tail_pointer(skb))
			len = skb_tail_pointer(skb) - data;
		extra = &pkt->extras[XEN_NETIF_EXTRA_TYPE_HASH - 1];

		xenvif_gop_frag_copy(queue, skb, npo,
				     virt_to_page(data), len, offset, &head);
		data += len;
	}
		extra->u.hash.algorithm =
			XEN_NETIF_CTRL_HASH_ALGORITHM_TOEPLITZ;

	for (i = 0; i < nr_frags; i++) {
		xenvif_gop_frag_copy(queue, skb, npo,
				     skb_frag_page(&skb_shinfo(skb)->frags[i]),
				     skb_frag_size(&skb_shinfo(skb)->frags[i]),
				     skb_shinfo(skb)->frags[i].page_offset,
				     &head);
	}
		if (skb->l4_hash)
			extra->u.hash.type =
				skb->protocol == htons(ETH_P_IP) ?
				_XEN_NETIF_CTRL_HASH_TYPE_IPV4_TCP :
				_XEN_NETIF_CTRL_HASH_TYPE_IPV6_TCP;
		else
			extra->u.hash.type =
				skb->protocol == htons(ETH_P_IP) ?
				_XEN_NETIF_CTRL_HASH_TYPE_IPV4 :
				_XEN_NETIF_CTRL_HASH_TYPE_IPV6;

	return npo->meta_prod - old_meta_prod;
}
		*(uint32_t *)extra->u.hash.value = skb_get_hash_raw(skb);

/* This is a twin to xenvif_gop_skb.  Assume that xenvif_gop_skb was
 * used to set up the operations on the top of
 * netrx_pending_operations, which have since been done.  Check that
 * they didn't give any errors and advance over them.
 */
static int xenvif_check_gop(struct xenvif *vif, int nr_meta_slots,
			    struct netrx_pending_operations *npo)
{
	struct gnttab_copy     *copy_op;
	int status = XEN_NETIF_RSP_OKAY;
	int i;
		extra->type = XEN_NETIF_EXTRA_TYPE_HASH;
		extra->flags = 0;

	for (i = 0; i < nr_meta_slots; i++) {
		copy_op = npo->copy + npo->copy_cons++;
		if (copy_op->status != GNTST_okay) {
			netdev_dbg(vif->dev,
				   "Bad status %d from copy to DOM%d.\n",
				   copy_op->status, vif->domid);
			status = XEN_NETIF_RSP_ERROR;
		pkt->extra_count++;
	}
}

	return status;
}

static struct xen_netif_rx_response *make_rx_response(
	struct xenvif_queue *queue, u16 id, s8 st, u16 offset, u16 size,
	u16 flags)
static void xenvif_rx_complete(struct xenvif_queue *queue,
			       struct xenvif_pkt_state *pkt)
{
	RING_IDX i = queue->rx.rsp_prod_pvt;
	struct xen_netif_rx_response *resp;
	int notify;

	resp = RING_GET_RESPONSE(&queue->rx, i);
	resp->offset     = offset;
	resp->flags      = flags;
	resp->id         = id;
	resp->status     = (s16)size;
	if (st < 0)
		resp->status = (s16)st;
	/* Complete any outstanding copy ops for this skb. */
	xenvif_rx_copy_flush(queue);

	queue->rx.rsp_prod_pvt = ++i;
	/* Push responses and notify. */
	queue->rx.rsp_prod_pvt = queue->rx.req_cons;
	RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&queue->rx, notify);
	if (notify)
		notify_remote_via_irq(queue->rx_irq);

	return resp;
	dev_kfree_skb(pkt->skb);
}

static void xenvif_add_frag_responses(struct xenvif_queue *queue,
				      int status,
				      struct xenvif_rx_meta *meta,
				      int nr_meta_slots)
static void xenvif_rx_next_chunk(struct xenvif_queue *queue,
				 struct xenvif_pkt_state *pkt,
				 unsigned int offset, void **data,
				 size_t *len)
{
	int i;
	unsigned long offset;

	/* No fragments used */
	if (nr_meta_slots <= 1)
		return;

	nr_meta_slots--;
	struct sk_buff *skb = pkt->skb;
	void *frag_data;
	size_t frag_len, chunk_len;

	for (i = 0; i < nr_meta_slots; i++) {
		int flags;

		if (i == nr_meta_slots - 1)
			flags = 0;
		else
			flags = XEN_NETRXF_more_data;
	if (pkt->frag == -1) {
		frag_data = skb->data;
		frag_len = skb_headlen(skb);
	} else {
		skb_frag_t *frag = &skb_shinfo(skb)->frags[pkt->frag];

		offset = 0;
		make_rx_response(queue, meta[i].id, status, offset,
				 meta[i].size, flags);
	}
		frag_data = skb_frag_address(frag);
		frag_len = skb_frag_size(frag);
	}

static void xenvif_rx_action(struct xenvif_queue *queue)
{
	struct xenvif *vif = queue->vif;
	s8 status;
	u16 flags;
	struct xen_netif_rx_response *resp;
	struct sk_buff_head rxq;
	struct sk_buff *skb;
	LIST_HEAD(notify);
	int ret;
	unsigned long offset;
	bool need_to_notify = false;

	struct netrx_pending_operations npo = {
		.copy  = queue->grant_copy_op,
		.meta  = queue->meta,
	};

	skb_queue_head_init(&rxq);
	frag_data += pkt->frag_offset;
	frag_len -= pkt->frag_offset;

	while (xenvif_rx_ring_slots_available(queue) &&
	       (skb = xenvif_rx_dequeue(queue)) != NULL) {
		queue->last_rx_time = jiffies;
	chunk_len = min(frag_len, XEN_PAGE_SIZE - offset);
	chunk_len = min(chunk_len,
			XEN_PAGE_SIZE -	xen_offset_in_page(frag_data));

		XENVIF_RX_CB(skb)->meta_slots_used =
			xenvif_gop_skb(skb, &npo, queue);
	pkt->frag_offset += chunk_len;

		__skb_queue_tail(&rxq, skb);
	/* Advance to next frag? */
	if (frag_len == chunk_len) {
		pkt->frag++;
		pkt->frag_offset = 0;
	}

	WARN_ON(npo.meta_prod > ARRAY_SIZE(queue->meta));
	*data = frag_data;
	*len = chunk_len;
}

	if (!npo.copy_prod)
		goto done;
static void xenvif_rx_data_slot(struct xenvif_queue *queue,
				struct xenvif_pkt_state *pkt,
				struct xen_netif_rx_request *req,
				struct xen_netif_rx_response *rsp)
{
	unsigned int offset = 0;
	unsigned int flags;

	WARN_ON(npo.copy_prod > MAX_GRANT_COPY_OPS);
	gnttab_batch_copy(queue->grant_copy_op, npo.copy_prod);
	do {
		size_t len;
		void *data;

	while ((skb = __skb_dequeue(&rxq)) != NULL) {
		struct xen_netif_extra_info *extra = NULL;
		xenvif_rx_next_chunk(queue, pkt, offset, &data, &len);
		xenvif_rx_copy_add(queue, req, offset, data, len);

		queue->stats.tx_bytes += skb->len;
		queue->stats.tx_packets++;
		offset += len;
		pkt->remaining_len -= len;

		status = xenvif_check_gop(vif,
					  XENVIF_RX_CB(skb)->meta_slots_used,
					  &npo);
	} while (offset < XEN_PAGE_SIZE && pkt->remaining_len > 0);

		if (XENVIF_RX_CB(skb)->meta_slots_used == 1)
			flags = 0;
		else
	if (pkt->remaining_len > 0)
		flags = XEN_NETRXF_more_data;
	else
		flags = 0;

	if (pkt->slot == 0) {
		struct sk_buff *skb = pkt->skb;

		if (skb->ip_summed == CHECKSUM_PARTIAL) /* local packet? */
		if (skb->ip_summed == CHECKSUM_PARTIAL)
			flags |= XEN_NETRXF_csum_blank |
				 XEN_NETRXF_data_validated;
		else if (skb->ip_summed == CHECKSUM_UNNECESSARY)
			/* remote but checksummed. */
			flags |= XEN_NETRXF_data_validated;

		offset = 0;
		resp = make_rx_response(queue, queue->meta[npo.meta_cons].id,
					status, offset,
					queue->meta[npo.meta_cons].size,
					flags);
		if (pkt->extra_count != 0)
			flags |= XEN_NETRXF_extra_info;
	}

		if ((1 << queue->meta[npo.meta_cons].gso_type) &
		    vif->gso_mask) {
			extra = (struct xen_netif_extra_info *)
				RING_GET_RESPONSE(&queue->rx,
						  queue->rx.rsp_prod_pvt++);
	rsp->offset = 0;
	rsp->flags = flags;
	rsp->id = req->id;
	rsp->status = (s16)offset;
}

			resp->flags |= XEN_NETRXF_extra_info;
static void xenvif_rx_extra_slot(struct xenvif_queue *queue,
				 struct xenvif_pkt_state *pkt,
				 struct xen_netif_rx_request *req,
				 struct xen_netif_rx_response *rsp)
{
	struct xen_netif_extra_info *extra = (void *)rsp;
	unsigned int i;

			extra->u.gso.type = queue->meta[npo.meta_cons].gso_type;
			extra->u.gso.size = queue->meta[npo.meta_cons].gso_size;
			extra->u.gso.pad = 0;
			extra->u.gso.features = 0;
	pkt->extra_count--;

			extra->type = XEN_NETIF_EXTRA_TYPE_GSO;
			extra->flags = 0;
		}
	for (i = 0; i < ARRAY_SIZE(pkt->extras); i++) {
		if (pkt->extras[i].type) {
			*extra = pkt->extras[i];

		if (skb->sw_hash) {
			/* Since the skb got here via xenvif_select_queue()
			 * we know that the hash has been re-calculated
			 * according to a configuration set by the frontend
			 * and therefore we know that it is legitimate to
			 * pass it to the frontend.
			 */
			if (resp->flags & XEN_NETRXF_extra_info)
			if (pkt->extra_count != 0)
				extra->flags |= XEN_NETIF_EXTRA_FLAG_MORE;
			else
				resp->flags |= XEN_NETRXF_extra_info;

			extra = (struct xen_netif_extra_info *)
				RING_GET_RESPONSE(&queue->rx,
						  queue->rx.rsp_prod_pvt++);

			extra->u.hash.algorithm =
				XEN_NETIF_CTRL_HASH_ALGORITHM_TOEPLITZ;

			if (skb->l4_hash)
				extra->u.hash.type =
					skb->protocol == htons(ETH_P_IP) ?
					_XEN_NETIF_CTRL_HASH_TYPE_IPV4_TCP :
					_XEN_NETIF_CTRL_HASH_TYPE_IPV6_TCP;
			else
				extra->u.hash.type =
					skb->protocol == htons(ETH_P_IP) ?
					_XEN_NETIF_CTRL_HASH_TYPE_IPV4 :
					_XEN_NETIF_CTRL_HASH_TYPE_IPV6;
			pkt->extras[i].type = 0;
			return;
		}
	}
	BUG();
}

			*(uint32_t *)extra->u.hash.value =
				skb_get_hash_raw(skb);
void xenvif_rx_action(struct xenvif_queue *queue)
{
	struct xenvif_pkt_state pkt;

			extra->type = XEN_NETIF_EXTRA_TYPE_HASH;
			extra->flags = 0;
		}
	xenvif_rx_next_skb(queue, &pkt);

		xenvif_add_frag_responses(queue, status,
					  queue->meta + npo.meta_cons + 1,
					  XENVIF_RX_CB(skb)->meta_slots_used);
	do {
		struct xen_netif_rx_request *req;
		struct xen_netif_rx_response *rsp;

		RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&queue->rx, ret);
		req = RING_GET_REQUEST(&queue->rx, queue->rx.req_cons);
		rsp = RING_GET_RESPONSE(&queue->rx, queue->rx.req_cons);

		need_to_notify |= !!ret;
		/* Extras must go after the first data slot */
		if (pkt.slot != 0 && pkt.extra_count != 0)
			xenvif_rx_extra_slot(queue, &pkt, req, rsp);
		else
			xenvif_rx_data_slot(queue, &pkt, req, rsp);

		npo.meta_cons += XENVIF_RX_CB(skb)->meta_slots_used;
		dev_kfree_skb(skb);
	}
		queue->rx.req_cons++;
		pkt.slot++;
	} while (pkt.remaining_len > 0 || pkt.extra_count != 0);

done:
	if (need_to_notify)
		notify_remote_via_irq(queue->rx_irq);
	xenvif_rx_complete(queue, &pkt);
}

static bool xenvif_rx_queue_stalled(struct xenvif_queue *queue)