Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit c73d2589 authored by Stephen Hemminger's avatar Stephen Hemminger Committed by Jeff Garzik
Browse files

via-velocity: use netdev_alloc_skb



Use netdev_alloc_skb for rx buffer allocation. This sets skb->dev
and can be overriden for NUMA machines.

Change code to return new buffer rather than call by reference.

Signed-off-by: default avatarStephen Hemminger <shemminger@vyatta.com>
Signed-off-by: default avatarJeff Garzik <jgarzik@redhat.com>
parent 47f98c7d
Loading
Loading
Loading
Loading
+6 −13
Original line number Original line Diff line number Diff line
@@ -1495,24 +1495,18 @@ static inline void velocity_rx_csum(struct rx_desc *rd, struct sk_buff *skb)
 *	enough. This function returns a negative value if the received
 *	enough. This function returns a negative value if the received
 *	packet is too big or if memory is exhausted.
 *	packet is too big or if memory is exhausted.
 */
 */
static inline int velocity_rx_copy(struct sk_buff **rx_skb, int pkt_size,
static int velocity_rx_copy(struct sk_buff **rx_skb, int pkt_size,
			    struct velocity_info *vptr)
			    struct velocity_info *vptr)
{
{
	int ret = -1;
	int ret = -1;

	if (pkt_size < rx_copybreak) {
	if (pkt_size < rx_copybreak) {
		struct sk_buff *new_skb;
		struct sk_buff *new_skb;


		new_skb = dev_alloc_skb(pkt_size + 2);
		new_skb = netdev_alloc_skb(vptr->dev, pkt_size + 2);
		if (new_skb) {
		if (new_skb) {
			new_skb->dev = vptr->dev;
			new_skb->ip_summed = rx_skb[0]->ip_summed;
			new_skb->ip_summed = rx_skb[0]->ip_summed;

			if (vptr->flags & VELOCITY_FLAGS_IP_ALIGN)
			skb_reserve(new_skb, 2);
			skb_reserve(new_skb, 2);

			skb_copy_from_linear_data(*rx_skb, new_skb->data, pkt_size);
			skb_copy_from_linear_data(rx_skb[0], new_skb->data,
						  pkt_size);
			*rx_skb = new_skb;
			*rx_skb = new_skb;
			ret = 0;
			ret = 0;
		}
		}
@@ -1629,7 +1623,7 @@ static int velocity_alloc_rx_buf(struct velocity_info *vptr, int idx)
	struct rx_desc *rd = &(vptr->rd_ring[idx]);
	struct rx_desc *rd = &(vptr->rd_ring[idx]);
	struct velocity_rd_info *rd_info = &(vptr->rd_info[idx]);
	struct velocity_rd_info *rd_info = &(vptr->rd_info[idx]);


	rd_info->skb = dev_alloc_skb(vptr->rx_buf_sz + 64);
	rd_info->skb = netdev_alloc_skb(vptr->dev, vptr->rx_buf_sz + 64);
	if (rd_info->skb == NULL)
	if (rd_info->skb == NULL)
		return -ENOMEM;
		return -ENOMEM;


@@ -1638,7 +1632,6 @@ static int velocity_alloc_rx_buf(struct velocity_info *vptr, int idx)
	 *	64byte alignment.
	 *	64byte alignment.
	 */
	 */
	skb_reserve(rd_info->skb, (unsigned long) rd_info->skb->data & 63);
	skb_reserve(rd_info->skb, (unsigned long) rd_info->skb->data & 63);
	rd_info->skb->dev = vptr->dev;
	rd_info->skb_dma = pci_map_single(vptr->pdev, rd_info->skb->data, vptr->rx_buf_sz, PCI_DMA_FROMDEVICE);
	rd_info->skb_dma = pci_map_single(vptr->pdev, rd_info->skb->data, vptr->rx_buf_sz, PCI_DMA_FROMDEVICE);


	/*
	/*