Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 962082b6 authored by Malli Chilakala's avatar Malli Chilakala Committed by Jeff Garzik
Browse files

[PATCH] e100: Performance optimizations to e100 Tx Path



Performance optimizations to e100 Tx Path

Signed-off-by: default avatarMallikarjuna R Chilakala <mallikarjuna.chilakala@intel.com>
Signed-off-by: default avatarGanesh Venkatesan <ganesh.venkatesan@intel.com>
Signed-off-by: default avatarJohn Ronciak <john.ronciak@intel.com>
parent 6bdacb1a
Loading
Loading
Loading
Loading
+12 −4
Original line number Original line Diff line number Diff line
@@ -777,7 +777,7 @@ static int e100_eeprom_save(struct nic *nic, u16 start, u16 count)
	return 0;
	return 0;
}
}


#define E100_WAIT_SCB_TIMEOUT 40
#define E100_WAIT_SCB_TIMEOUT 20000 /* we might have to wait 100ms!!! */
static inline int e100_exec_cmd(struct nic *nic, u8 cmd, dma_addr_t dma_addr)
static inline int e100_exec_cmd(struct nic *nic, u8 cmd, dma_addr_t dma_addr)
{
{
	unsigned long flags;
	unsigned long flags;
@@ -847,6 +847,10 @@ static inline int e100_exec_cb(struct nic *nic, struct sk_buff *skb,
			 * because the controller is too busy, so
			 * because the controller is too busy, so
			 * let's just queue the command and try again
			 * let's just queue the command and try again
			 * when another command is scheduled. */
			 * when another command is scheduled. */
			if(err == -ENOSPC) {
				//request a reset
				schedule_work(&nic->tx_timeout_task);
			}
			break;
			break;
		} else {
		} else {
			nic->cuc_cmd = cuc_resume;
			nic->cuc_cmd = cuc_resume;
@@ -891,7 +895,7 @@ static void mdio_write(struct net_device *netdev, int addr, int reg, int data)


static void e100_get_defaults(struct nic *nic)
static void e100_get_defaults(struct nic *nic)
{
{
	struct param_range rfds = { .min = 64, .max = 256, .count = 64 };
	struct param_range rfds = { .min = 16, .max = 256, .count = 64 };
	struct param_range cbs  = { .min = 64, .max = 256, .count = 64 };
	struct param_range cbs  = { .min = 64, .max = 256, .count = 64 };


	pci_read_config_byte(nic->pdev, PCI_REVISION_ID, &nic->rev_id);
	pci_read_config_byte(nic->pdev, PCI_REVISION_ID, &nic->rev_id);
@@ -906,8 +910,9 @@ static void e100_get_defaults(struct nic *nic)
	/* Quadwords to DMA into FIFO before starting frame transmit */
	/* Quadwords to DMA into FIFO before starting frame transmit */
	nic->tx_threshold = 0xE0;
	nic->tx_threshold = 0xE0;


	nic->tx_command = cpu_to_le16(cb_tx | cb_i | cb_tx_sf |
	/* no interrupt for every tx completion, delay = 256us if not 557*/
		((nic->mac >= mac_82558_D101_A4) ? cb_cid : 0));
	nic->tx_command = cpu_to_le16(cb_tx | cb_tx_sf |
		((nic->mac >= mac_82558_D101_A4) ? cb_cid : cb_i));


	/* Template for a freshly allocated RFD */
	/* Template for a freshly allocated RFD */
	nic->blank_rfd.command = cpu_to_le16(cb_el);
	nic->blank_rfd.command = cpu_to_le16(cb_el);
@@ -1289,12 +1294,15 @@ static inline void e100_xmit_prepare(struct nic *nic, struct cb *cb,
	struct sk_buff *skb)
	struct sk_buff *skb)
{
{
	cb->command = nic->tx_command;
	cb->command = nic->tx_command;
	/* interrupt every 16 packets regardless of delay */
	if((nic->cbs_avail & ~15) == nic->cbs_avail) cb->command |= cb_i;
	cb->u.tcb.tbd_array = cb->dma_addr + offsetof(struct cb, u.tcb.tbd);
	cb->u.tcb.tbd_array = cb->dma_addr + offsetof(struct cb, u.tcb.tbd);
	cb->u.tcb.tcb_byte_count = 0;
	cb->u.tcb.tcb_byte_count = 0;
	cb->u.tcb.threshold = nic->tx_threshold;
	cb->u.tcb.threshold = nic->tx_threshold;
	cb->u.tcb.tbd_count = 1;
	cb->u.tcb.tbd_count = 1;
	cb->u.tcb.tbd.buf_addr = cpu_to_le32(pci_map_single(nic->pdev,
	cb->u.tcb.tbd.buf_addr = cpu_to_le32(pci_map_single(nic->pdev,
		skb->data, skb->len, PCI_DMA_TODEVICE));
		skb->data, skb->len, PCI_DMA_TODEVICE));
	// check for mapping failure?
	cb->u.tcb.tbd.size = cpu_to_le16(skb->len);
	cb->u.tcb.tbd.size = cpu_to_le16(skb->len);
}
}