Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 7a6e288d authored by Daniel Borkmann's avatar Daniel Borkmann Committed by David S. Miller
Browse files

pktgen: ipv6: numa: consolidate skb allocation to pktgen_alloc_skb



We currently allow for numa-node aware skb allocation only within the
fill_packet_ipv4() path, but not in fill_packet_ipv6(). Consolidate that
code to a common allocation helper to enable numa-node aware skb
allocation for ipv6, and use it in both paths. This also makes both
functions a bit more readable.

Signed-off-by: default avatarDaniel Borkmann <dborkman@redhat.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent da5bab07
Loading
Loading
Loading
Loading
+27 −25
Original line number Diff line number Diff line
@@ -2627,6 +2627,29 @@ static void pktgen_finalize_skb(struct pktgen_dev *pkt_dev, struct sk_buff *skb,
	pgh->tv_usec = htonl(timestamp.tv_usec);
}

static struct sk_buff *pktgen_alloc_skb(struct net_device *dev,
					struct pktgen_dev *pkt_dev,
					unsigned int extralen)
{
	struct sk_buff *skb = NULL;
	unsigned int size = pkt_dev->cur_pkt_size + 64 + extralen +
			    pkt_dev->pkt_overhead;

	if (pkt_dev->flags & F_NODE) {
		int node = pkt_dev->node >= 0 ? pkt_dev->node : numa_node_id();

		skb = __alloc_skb(NET_SKB_PAD + size, GFP_NOWAIT, 0, node);
		if (likely(skb)) {
			skb_reserve(skb, NET_SKB_PAD);
			skb->dev = dev;
		}
	} else {
		 skb = __netdev_alloc_skb(dev, size, GFP_NOWAIT);
	}

	return skb;
}

static struct sk_buff *fill_packet_ipv4(struct net_device *odev,
					struct pktgen_dev *pkt_dev)
{
@@ -2657,32 +2680,13 @@ static struct sk_buff *fill_packet_ipv4(struct net_device *odev,

	datalen = (odev->hard_header_len + 16) & ~0xf;

	if (pkt_dev->flags & F_NODE) {
		int node;

		if (pkt_dev->node >= 0)
			node = pkt_dev->node;
		else
			node =  numa_node_id();

		skb = __alloc_skb(NET_SKB_PAD + pkt_dev->cur_pkt_size + 64
				  + datalen + pkt_dev->pkt_overhead, GFP_NOWAIT, 0, node);
		if (likely(skb)) {
			skb_reserve(skb, NET_SKB_PAD);
			skb->dev = odev;
		}
	}
	else
	  skb = __netdev_alloc_skb(odev,
				   pkt_dev->cur_pkt_size + 64
				   + datalen + pkt_dev->pkt_overhead, GFP_NOWAIT);

	skb = pktgen_alloc_skb(odev, pkt_dev, datalen);
	if (!skb) {
		sprintf(pkt_dev->result, "No memory");
		return NULL;
	}
	prefetchw(skb->data);

	prefetchw(skb->data);
	skb_reserve(skb, datalen);

	/*  Reserve for ethernet and IP header  */
@@ -2786,15 +2790,13 @@ static struct sk_buff *fill_packet_ipv6(struct net_device *odev,
	mod_cur_headers(pkt_dev);
	queue_map = pkt_dev->cur_queue_map;

	skb = __netdev_alloc_skb(odev,
				 pkt_dev->cur_pkt_size + 64
				 + 16 + pkt_dev->pkt_overhead, GFP_NOWAIT);
	skb = pktgen_alloc_skb(odev, pkt_dev, 16);
	if (!skb) {
		sprintf(pkt_dev->result, "No memory");
		return NULL;
	}
	prefetchw(skb->data);

	prefetchw(skb->data);
	skb_reserve(skb, 16);

	/*  Reserve for ethernet and IP header  */