Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit e2adbcb4 authored by Santiago Leon's avatar Santiago Leon Committed by Jeff Garzik
Browse files

[PATCH] ibmveth fix buffer replenishing



This patch removes the allocation of RX skb's  buffers from a workqueue
to be called directly at RX processing time.  This change was suggested
by Dave Miller when the driver was starving the RX buffers and
deadlocking under heavy traffic:

> Allocating RX SKBs via tasklet is, IMHO, the worst way to
> do it.  It is no surprise that there are starvation cases.
>
> If tasklets or work queues get delayed in any way, you lose,
> and it's very easy for a card to catch up with the driver RX'ing
> packets very fast, no matter how aggressive you make the
> replenishing.  By the time you detect that you need to be
> "more aggressive" it is already too late.
> The only pseudo-reliable way is to allocate at RX processing time.
>

Signed-off-by: default avatarSantiago Leon <santil@us.ibm.com>
Signed-off-by: default avatarJeff Garzik <jgarzik@pobox.com>
parent b6d35182
Loading
Loading
Loading
Loading
+8 −40
Original line number Original line Diff line number Diff line
@@ -96,7 +96,6 @@ static void ibmveth_proc_unregister_driver(void);
static void ibmveth_proc_register_adapter(struct ibmveth_adapter *adapter);
static void ibmveth_proc_register_adapter(struct ibmveth_adapter *adapter);
static void ibmveth_proc_unregister_adapter(struct ibmveth_adapter *adapter);
static void ibmveth_proc_unregister_adapter(struct ibmveth_adapter *adapter);
static irqreturn_t ibmveth_interrupt(int irq, void *dev_instance, struct pt_regs *regs);
static irqreturn_t ibmveth_interrupt(int irq, void *dev_instance, struct pt_regs *regs);
static inline void ibmveth_schedule_replenishing(struct ibmveth_adapter*);
static inline void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter);
static inline void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter);


#ifdef CONFIG_PROC_FS
#ifdef CONFIG_PROC_FS
@@ -257,29 +256,7 @@ static void ibmveth_replenish_buffer_pool(struct ibmveth_adapter *adapter, struc
	atomic_add(buffers_added, &(pool->available));
	atomic_add(buffers_added, &(pool->available));
}
}


/* check if replenishing is needed.  */
/* replenish routine */
static inline int ibmveth_is_replenishing_needed(struct ibmveth_adapter *adapter)
{
	int i;

	for(i = 0; i < IbmVethNumBufferPools; i++)
		if(adapter->rx_buff_pool[i].active &&
		  (atomic_read(&adapter->rx_buff_pool[i].available) <
		   adapter->rx_buff_pool[i].threshold))
			return 1;
	return 0;
}

/* kick the replenish tasklet if we need replenishing and it isn't already running */
static inline void ibmveth_schedule_replenishing(struct ibmveth_adapter *adapter)
{
	if(ibmveth_is_replenishing_needed(adapter) &&
	   (atomic_dec_if_positive(&adapter->not_replenishing) == 0)) {
		schedule_work(&adapter->replenish_task);
	}
}

/* replenish tasklet routine */
static void ibmveth_replenish_task(struct ibmveth_adapter *adapter) 
static void ibmveth_replenish_task(struct ibmveth_adapter *adapter) 
{
{
	int i;
	int i;
@@ -292,10 +269,6 @@ static void ibmveth_replenish_task(struct ibmveth_adapter *adapter)
						     &adapter->rx_buff_pool[i]);
						     &adapter->rx_buff_pool[i]);


	adapter->rx_no_buffer = *(u64*)(((char*)adapter->buffer_list_addr) + 4096 - 8);
	adapter->rx_no_buffer = *(u64*)(((char*)adapter->buffer_list_addr) + 4096 - 8);

	atomic_inc(&adapter->not_replenishing);

	ibmveth_schedule_replenishing(adapter);
}
}


/* empty and free ana buffer pool - also used to do cleanup in error paths */
/* empty and free ana buffer pool - also used to do cleanup in error paths */
@@ -563,10 +536,10 @@ static int ibmveth_open(struct net_device *netdev)
		return rc;
		return rc;
	}
	}


	netif_start_queue(netdev);
	ibmveth_debug_printk("initial replenish cycle\n");
	ibmveth_replenish_task(adapter);


	ibmveth_debug_printk("scheduling initial replenish cycle\n");
	netif_start_queue(netdev);
	ibmveth_schedule_replenishing(adapter);


	ibmveth_debug_printk("open complete\n");
	ibmveth_debug_printk("open complete\n");


@@ -584,9 +557,6 @@ static int ibmveth_close(struct net_device *netdev)


	free_irq(netdev->irq, netdev);
	free_irq(netdev->irq, netdev);


	cancel_delayed_work(&adapter->replenish_task);
	flush_scheduled_work();

	do {
	do {
		lpar_rc = h_free_logical_lan(adapter->vdev->unit_address);
		lpar_rc = h_free_logical_lan(adapter->vdev->unit_address);
	} while (H_isLongBusy(lpar_rc) || (lpar_rc == H_Busy));
	} while (H_isLongBusy(lpar_rc) || (lpar_rc == H_Busy));
@@ -795,7 +765,7 @@ static int ibmveth_poll(struct net_device *netdev, int *budget)
		}
		}
	} while(more_work && (frames_processed < max_frames_to_process));
	} while(more_work && (frames_processed < max_frames_to_process));


	ibmveth_schedule_replenishing(adapter);
	ibmveth_replenish_task(adapter);


	if(more_work) {
	if(more_work) {
		/* more work to do - return that we are not done yet */
		/* more work to do - return that we are not done yet */
@@ -931,8 +901,10 @@ static int ibmveth_change_mtu(struct net_device *dev, int new_mtu)


	}
	}


	/* kick the interrupt handler so that the new buffer pools get
	   replenished or deallocated */
	ibmveth_interrupt(dev->irq, dev, NULL);


	ibmveth_schedule_replenishing(adapter);
	dev->mtu = new_mtu;
	dev->mtu = new_mtu;
	return 0;	
	return 0;	
}
}
@@ -1017,14 +989,10 @@ static int __devinit ibmveth_probe(struct vio_dev *dev, const struct vio_device_


	ibmveth_debug_printk("adapter @ 0x%p\n", adapter);
	ibmveth_debug_printk("adapter @ 0x%p\n", adapter);


	INIT_WORK(&adapter->replenish_task, (void*)ibmveth_replenish_task, (void*)adapter);

	adapter->buffer_list_dma = DMA_ERROR_CODE;
	adapter->buffer_list_dma = DMA_ERROR_CODE;
	adapter->filter_list_dma = DMA_ERROR_CODE;
	adapter->filter_list_dma = DMA_ERROR_CODE;
	adapter->rx_queue.queue_dma = DMA_ERROR_CODE;
	adapter->rx_queue.queue_dma = DMA_ERROR_CODE;


	atomic_set(&adapter->not_replenishing, 1);

	ibmveth_debug_printk("registering netdev...\n");
	ibmveth_debug_printk("registering netdev...\n");


	rc = register_netdev(netdev);
	rc = register_netdev(netdev);
+0 −4
Original line number Original line Diff line number Diff line
@@ -118,10 +118,6 @@ struct ibmveth_adapter {
    dma_addr_t filter_list_dma;
    dma_addr_t filter_list_dma;
    struct ibmveth_buff_pool rx_buff_pool[IbmVethNumBufferPools];
    struct ibmveth_buff_pool rx_buff_pool[IbmVethNumBufferPools];
    struct ibmveth_rx_q rx_queue;
    struct ibmveth_rx_q rx_queue;
    atomic_t not_replenishing;

    /* helper tasks */
    struct work_struct replenish_task;


    /* adapter specific stats */
    /* adapter specific stats */
    u64 replenish_task_cycles;
    u64 replenish_task_cycles;