Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit bf04415e authored by Mark Einon's avatar Mark Einon Committed by Greg Kroah-Hartman
Browse files

staging: et131x: Converting et1310_adapter.h variable names from CamelCase



Tested on an ET-131x device.

Signed-off-by: default avatarMark Einon <mark.einon@gmail.com>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@suse.de>
parent 567486ac
Loading
Loading
Loading
Loading
+39 −38
Original line number Diff line number Diff line
@@ -149,10 +149,10 @@ void et1310_config_mac_regs1(struct et131x_adapter *etdev)
	 * being truncated.  Allow the MAC to pass 4 more than our max packet
	 * size.  This is 4 for the Ethernet CRC.
	 *
	 * Packets larger than (RegistryJumboPacket) that do not contain a
	 * Packets larger than (registry_jumbo_packet) that do not contain a
	 * VLAN ID will be dropped by the Rx function.
	 */
	writel(etdev->RegistryJumboPacket + 4, &macregs->max_fm_len);
	writel(etdev->registry_jumbo_packet + 4, &macregs->max_fm_len);

	/* clear out MAC config reset */
	writel(0, &macregs->cfg1);
@@ -294,7 +294,7 @@ void et1310_config_rxmac_regs(struct et131x_adapter *etdev)
	writel(0, &rxmac->pf_ctrl);

	/* Let's initialize the Unicast Packet filtering address */
	if (etdev->PacketFilter & ET131X_PACKET_TYPE_DIRECTED) {
	if (etdev->packet_filter & ET131X_PACKET_TYPE_DIRECTED) {
		et1310_setup_device_for_unicast(etdev);
		pf_ctrl |= 4;	/* Unicast filter */
	} else {
@@ -304,7 +304,7 @@ void et1310_config_rxmac_regs(struct et131x_adapter *etdev)
	}

	/* Let's initialize the Multicast hash */
	if (!(etdev->PacketFilter & ET131X_PACKET_TYPE_ALL_MULTICAST)) {
	if (!(etdev->packet_filter & ET131X_PACKET_TYPE_ALL_MULTICAST)) {
		pf_ctrl |= 2;	/* Multicast filter */
		et1310_setup_device_for_multicast(etdev);
	}
@@ -313,7 +313,7 @@ void et1310_config_rxmac_regs(struct et131x_adapter *etdev)
	pf_ctrl |= (NIC_MIN_PACKET_SIZE + 4) << 16;
	pf_ctrl |= 8;	/* Fragment filter */

	if (etdev->RegistryJumboPacket > 8192)
	if (etdev->registry_jumbo_packet > 8192)
		/* In order to transmit jumbo packets greater than 8k, the
		 * FIFO between RxMAC and RxDMA needs to be reduced in size
		 * to (16k - Jumbo packet size).  In order to implement this,
@@ -489,22 +489,22 @@ void et1310_update_macstat_host_counters(struct et131x_adapter *etdev)
	struct macstat_regs __iomem *macstat =
		&etdev->regs->macstat;

	stats->collisions           += readl(&macstat->tx_total_collisions);
	stats->first_collision      += readl(&macstat->tx_single_collisions);
	stats->tx_collisions	       += readl(&macstat->tx_total_collisions);
	stats->tx_first_collisions     += readl(&macstat->tx_single_collisions);
	stats->tx_deferred	       += readl(&macstat->tx_deferred);
	stats->excessive_collisions += readl(&macstat->tx_multiple_collisions);
	stats->late_collisions      += readl(&macstat->tx_late_collisions);
	stats->tx_uflo              += readl(&macstat->tx_undersize_frames);
	stats->max_pkt_error        += readl(&macstat->tx_oversize_frames);

	stats->alignment_err        += readl(&macstat->rx_align_errs);
	stats->crc_err              += readl(&macstat->rx_code_errs);
	stats->norcvbuf             += readl(&macstat->rx_drops);
	stats->rx_ov_flow           += readl(&macstat->rx_oversize_packets);
	stats->code_violations      += readl(&macstat->rx_fcs_errs);
	stats->length_err           += readl(&macstat->rx_frame_len_errs);

	stats->other_errors         += readl(&macstat->rx_fragment_packets);
	stats->tx_excessive_collisions +=
				readl(&macstat->tx_multiple_collisions);
	stats->tx_late_collisions      += readl(&macstat->tx_late_collisions);
	stats->tx_underflows	       += readl(&macstat->tx_undersize_frames);
	stats->tx_max_pkt_errs	       += readl(&macstat->tx_oversize_frames);

	stats->rx_align_errs        += readl(&macstat->rx_align_errs);
	stats->rx_crc_errs          += readl(&macstat->rx_code_errs);
	stats->rcvd_pkts_dropped    += readl(&macstat->rx_drops);
	stats->rx_overflows         += readl(&macstat->rx_oversize_packets);
	stats->rx_code_violations   += readl(&macstat->rx_fcs_errs);
	stats->rx_length_errs       += readl(&macstat->rx_frame_len_errs);
	stats->rx_other_errs        += readl(&macstat->rx_fragment_packets);
}

/**
@@ -536,33 +536,33 @@ void et1310_handle_macstat_interrupt(struct et131x_adapter *etdev)
	 * block indicates that one of the counters has wrapped.
	 */
	if (carry_reg1 & (1 << 14))
		etdev->stats.code_violations      += COUNTER_WRAP_16_BIT;
		etdev->stats.rx_code_violations	+= COUNTER_WRAP_16_BIT;
	if (carry_reg1 & (1 << 8))
		etdev->stats.alignment_err        += COUNTER_WRAP_12_BIT;
		etdev->stats.rx_align_errs	+= COUNTER_WRAP_12_BIT;
	if (carry_reg1 & (1 << 7))
		etdev->stats.length_err           += COUNTER_WRAP_16_BIT;
		etdev->stats.rx_length_errs	+= COUNTER_WRAP_16_BIT;
	if (carry_reg1 & (1 << 2))
		etdev->stats.other_errors         += COUNTER_WRAP_16_BIT;
		etdev->stats.rx_other_errs	+= COUNTER_WRAP_16_BIT;
	if (carry_reg1 & (1 << 6))
		etdev->stats.crc_err              += COUNTER_WRAP_16_BIT;
		etdev->stats.rx_crc_errs	+= COUNTER_WRAP_16_BIT;
	if (carry_reg1 & (1 << 3))
		etdev->stats.rx_ov_flow           += COUNTER_WRAP_16_BIT;
		etdev->stats.rx_overflows	+= COUNTER_WRAP_16_BIT;
	if (carry_reg1 & (1 << 0))
		etdev->stats.norcvbuf             += COUNTER_WRAP_16_BIT;
		etdev->stats.rcvd_pkts_dropped	+= COUNTER_WRAP_16_BIT;
	if (carry_reg2 & (1 << 16))
		etdev->stats.max_pkt_error        += COUNTER_WRAP_12_BIT;
		etdev->stats.tx_max_pkt_errs	+= COUNTER_WRAP_12_BIT;
	if (carry_reg2 & (1 << 15))
		etdev->stats.tx_uflo              += COUNTER_WRAP_12_BIT;
		etdev->stats.tx_underflows	+= COUNTER_WRAP_12_BIT;
	if (carry_reg2 & (1 << 6))
		etdev->stats.first_collision      += COUNTER_WRAP_12_BIT;
		etdev->stats.tx_first_collisions += COUNTER_WRAP_12_BIT;
	if (carry_reg2 & (1 << 8))
		etdev->stats.tx_deferred	+= COUNTER_WRAP_12_BIT;
	if (carry_reg2 & (1 << 5))
		etdev->stats.excessive_collisions += COUNTER_WRAP_12_BIT;
		etdev->stats.tx_excessive_collisions += COUNTER_WRAP_12_BIT;
	if (carry_reg2 & (1 << 4))
		etdev->stats.late_collisions      += COUNTER_WRAP_12_BIT;
		etdev->stats.tx_late_collisions	+= COUNTER_WRAP_12_BIT;
	if (carry_reg2 & (1 << 2))
		etdev->stats.collisions           += COUNTER_WRAP_12_BIT;
		etdev->stats.tx_collisions	+= COUNTER_WRAP_12_BIT;
}

void et1310_setup_device_for_multicast(struct et131x_adapter *etdev)
@@ -581,10 +581,11 @@ void et1310_setup_device_for_multicast(struct et131x_adapter *etdev)
	 * specified) then we should pass NO multi-cast addresses to the
	 * driver.
	 */
	if (etdev->PacketFilter & ET131X_PACKET_TYPE_MULTICAST) {
	if (etdev->packet_filter & ET131X_PACKET_TYPE_MULTICAST) {
		/* Loop through our multicast array and set up the device */
		for (nIndex = 0; nIndex < etdev->MCAddressCount; nIndex++) {
			result = ether_crc(6, etdev->MCList[nIndex]);
		for (nIndex = 0; nIndex < etdev->multicast_addr_count;
		     nIndex++) {
			result = ether_crc(6, etdev->multicast_list[nIndex]);

			result = (result & 0x3F800000) >> 23;

+18 −18
Original line number Diff line number Diff line
@@ -596,7 +596,7 @@ static void et131x_xcvr_init(struct et131x_adapter *etdev)
	}

	/* Determine if we need to go into a force mode and set it */
	if (etdev->AiForceSpeed == 0 && etdev->AiForceDpx == 0) {
	if (etdev->ai_force_speed == 0 && etdev->ai_force_duplex == 0) {
		if (etdev->wanted_flow == FLOW_TXONLY ||
		    etdev->wanted_flow == FLOW_BOTH)
			et1310_phy_access_mii_bit(etdev,
@@ -623,7 +623,7 @@ static void et131x_xcvr_init(struct et131x_adapter *etdev)
	et1310_phy_auto_neg(etdev, false);

	/* Set to the correct force mode. */
	if (etdev->AiForceDpx != 1) {
	if (etdev->ai_force_duplex != 1) {
		if (etdev->wanted_flow == FLOW_TXONLY ||
		    etdev->wanted_flow == FLOW_BOTH)
			et1310_phy_access_mii_bit(etdev,
@@ -645,16 +645,16 @@ static void et131x_xcvr_init(struct et131x_adapter *etdev)
					  4, 11, NULL);
	}
	et1310_phy_power_down(etdev, 1);
	switch (etdev->AiForceSpeed) {
	switch (etdev->ai_force_speed) {
	case 10:
		/* First we need to turn off all other advertisement */
		et1310_phy_advertise_1000BaseT(etdev, TRUEPHY_ADV_DUPLEX_NONE);
		et1310_phy_advertise_100BaseT(etdev, TRUEPHY_ADV_DUPLEX_NONE);
		if (etdev->AiForceDpx == 1) {
		if (etdev->ai_force_duplex == 1) {
			/* Set our advertise values accordingly */
			et1310_phy_advertise_10BaseT(etdev,
						TRUEPHY_ADV_DUPLEX_HALF);
		} else if (etdev->AiForceDpx == 2) {
		} else if (etdev->ai_force_duplex == 2) {
			/* Set our advertise values accordingly */
			et1310_phy_advertise_10BaseT(etdev,
						TRUEPHY_ADV_DUPLEX_FULL);
@@ -674,13 +674,13 @@ static void et131x_xcvr_init(struct et131x_adapter *etdev)
		/* first we need to turn off all other advertisement */
		et1310_phy_advertise_1000BaseT(etdev, TRUEPHY_ADV_DUPLEX_NONE);
		et1310_phy_advertise_10BaseT(etdev, TRUEPHY_ADV_DUPLEX_NONE);
		if (etdev->AiForceDpx == 1) {
		if (etdev->ai_force_duplex == 1) {
			/* Set our advertise values accordingly */
			et1310_phy_advertise_100BaseT(etdev,
						TRUEPHY_ADV_DUPLEX_HALF);
			/* Set speed */
			et1310_phy_speed_select(etdev, TRUEPHY_SPEED_100MBPS);
		} else if (etdev->AiForceDpx == 2) {
		} else if (etdev->ai_force_duplex == 2) {
			/* Set our advertise values accordingly */
			et1310_phy_advertise_100BaseT(etdev,
						TRUEPHY_ADV_DUPLEX_FULL);
@@ -741,11 +741,11 @@ void et131x_mii_check(struct et131x_adapter *etdev,
			/* Update our state variables and indicate the
			 * connected state
			 */
			spin_lock_irqsave(&etdev->Lock, flags);
			spin_lock_irqsave(&etdev->lock, flags);

			etdev->MediaState = NETIF_STATUS_MEDIA_CONNECT;
			etdev->media_state = NETIF_STATUS_MEDIA_CONNECT;

			spin_unlock_irqrestore(&etdev->Lock, flags);
			spin_unlock_irqrestore(&etdev->lock, flags);

			netif_carrier_on(etdev->netdev);
		} else {
@@ -774,11 +774,11 @@ void et131x_mii_check(struct et131x_adapter *etdev,
			 * Timer expires, we can report disconnected (handled
			 * in the LinkDetectionDPC).
			 */
			if ((etdev->MediaState == NETIF_STATUS_MEDIA_DISCONNECT)) {
				spin_lock_irqsave(&etdev->Lock, flags);
				etdev->MediaState =
			if (etdev->media_state == NETIF_STATUS_MEDIA_DISCONNECT) {
				spin_lock_irqsave(&etdev->lock, flags);
				etdev->media_state =
				    NETIF_STATUS_MEDIA_DISCONNECT;
				spin_unlock_irqrestore(&etdev->Lock,
				spin_unlock_irqrestore(&etdev->lock,
						       flags);

				netif_carrier_off(etdev->netdev);
@@ -810,15 +810,15 @@ void et131x_mii_check(struct et131x_adapter *etdev,
			/* Setup the PHY into coma mode until the cable is
			 * plugged back in
			 */
			if (etdev->RegistryPhyComa == 1)
			if (etdev->registry_phy_coma == 1)
				et1310_enable_phy_coma(etdev);
		}
	}

	if ((bmsr_ints & MI_BMSR_AUTO_NEG_COMPLETE) ||
	    (etdev->AiForceDpx == 3 && (bmsr_ints & MI_BMSR_LINK_STATUS))) {
	   (etdev->ai_force_duplex == 3 && (bmsr_ints & MI_BMSR_LINK_STATUS))) {
		if ((bmsr & MI_BMSR_AUTO_NEG_COMPLETE) ||
		    etdev->AiForceDpx == 3) {
		    etdev->ai_force_duplex == 3) {
			et1310_phy_link_status(etdev,
					     &link_status, &autoneg_status,
					     &speed, &duplex, &mdi_mdix,
@@ -849,7 +849,7 @@ void et131x_mii_check(struct et131x_adapter *etdev,
			et1310_config_flow_control(etdev);

			if (etdev->linkspeed == TRUEPHY_SPEED_1000MBPS &&
					etdev->RegistryJumboPacket > 2048)
					etdev->registry_jumbo_packet > 2048)
				et1310_phy_and_or_reg(etdev, 0x16, 0xcfff,
								   0x2000);

+4 −4
Original line number Diff line number Diff line
@@ -116,8 +116,8 @@ void et1310_enable_phy_coma(struct et131x_adapter *etdev)
	/* Save the GbE PHY speed and duplex modes. Need to restore this
	 * when cable is plugged back in
	 */
	etdev->pdown_speed = etdev->AiForceSpeed;
	etdev->pdown_duplex = etdev->AiForceDpx;
	etdev->pdown_speed = etdev->ai_force_speed;
	etdev->pdown_duplex = etdev->ai_force_duplex;

	/* Stop sending packets. */
	spin_lock_irqsave(&etdev->send_hw_lock, flags);
@@ -153,8 +153,8 @@ void et1310_disable_phy_coma(struct et131x_adapter *etdev)
	/* Restore the GbE PHY speed and duplex modes;
	 * Reset JAGCore; re-configure and initialize JAGCore and gigE PHY
	 */
	etdev->AiForceSpeed = etdev->pdown_speed;
	etdev->AiForceDpx = etdev->pdown_duplex;
	etdev->ai_force_speed = etdev->pdown_speed;
	etdev->ai_force_duplex = etdev->pdown_duplex;

	/* Re-initialize the send structures */
	et131x_init_send(etdev);
+32 −22
Original line number Diff line number Diff line
@@ -149,14 +149,14 @@ int et131x_rx_dma_memory_alloc(struct et131x_adapter *adapter)
	 * the number of entries halves.  FBR0 increases in size, however.
	 */

	if (adapter->RegistryJumboPacket < 2048) {
	if (adapter->registry_jumbo_packet < 2048) {
#ifdef USE_FBR0
		rx_ring->fbr0_buffsize = 256;
		rx_ring->fbr0_num_entries = 512;
#endif
		rx_ring->fbr1_buffsize = 2048;
		rx_ring->fbr1_num_entries = 512;
	} else if (adapter->RegistryJumboPacket < 4096) {
	} else if (adapter->registry_jumbo_packet < 4096) {
#ifdef USE_FBR0
		rx_ring->fbr0_buffsize = 512;
		rx_ring->fbr0_num_entries = 1024;
@@ -755,7 +755,7 @@ static void nic_return_rfd(struct et131x_adapter *etdev, struct rfd *rfd)
	    (ring_index == 0 && buff_index < rx_local->fbr0_num_entries) ||
#endif
	    (ring_index == 1 && buff_index < rx_local->fbr1_num_entries)) {
		spin_lock_irqsave(&etdev->FbrLock, flags);
		spin_lock_irqsave(&etdev->fbr_lock, flags);

		if (ring_index == 1) {
			struct fbr_desc *next =
@@ -793,7 +793,7 @@ static void nic_return_rfd(struct et131x_adapter *etdev, struct rfd *rfd)
			       &rx_dma->fbr0_full_offset);
		}
#endif
		spin_unlock_irqrestore(&etdev->FbrLock, flags);
		spin_unlock_irqrestore(&etdev->fbr_lock, flags);
	} else {
		dev_err(&etdev->pdev->dev,
			  "%s illegal Buffer Index returned\n", __func__);
@@ -983,18 +983,18 @@ static struct rfd *nic_rx_pkts(struct et131x_adapter *etdev)
	 * also counted here.
	 */
	if (len < (NIC_MIN_PACKET_SIZE + 4)) {
		etdev->stats.other_errors++;
		etdev->stats.rx_other_errs++;
		len = 0;
	}

	if (len) {
		if (etdev->ReplicaPhyLoopbk == 1) {
		if (etdev->replica_phy_loopbk == 1) {
			buf = rx_local->fbr[ring_index]->virt[buff_index];

			if (memcmp(&buf[6], etdev->addr, ETH_ALEN) == 0) {
				if (memcmp(&buf[42], "Replica packet",
					   ETH_HLEN)) {
					etdev->ReplicaPhyLoopbkPF = 1;
					etdev->replica_phy_loopbk_passfail = 1;
				}
			}
		}
@@ -1009,9 +1009,12 @@ static struct rfd *nic_rx_pkts(struct et131x_adapter *etdev)
			 * filters. Generally filter is 0x2b when in
			 * promiscuous mode.
			 */
			if ((etdev->PacketFilter & ET131X_PACKET_TYPE_MULTICAST)
			    && !(etdev->PacketFilter & ET131X_PACKET_TYPE_PROMISCUOUS)
			    && !(etdev->PacketFilter & ET131X_PACKET_TYPE_ALL_MULTICAST)) {
			if ((etdev->packet_filter &
					ET131X_PACKET_TYPE_MULTICAST)
			    && !(etdev->packet_filter &
					ET131X_PACKET_TYPE_PROMISCUOUS)
			    && !(etdev->packet_filter &
					ET131X_PACKET_TYPE_ALL_MULTICAST)) {
				buf = rx_local->fbr[ring_index]->
						virt[buff_index];

@@ -1019,13 +1022,20 @@ static struct rfd *nic_rx_pkts(struct et131x_adapter *etdev)
				 * destination address of this packet
				 * matches one in our list.
				 */
				for (i = 0; i < etdev->MCAddressCount; i++) {
					if (buf[0] == etdev->MCList[i][0]
					    && buf[1] == etdev->MCList[i][1]
					    && buf[2] == etdev->MCList[i][2]
					    && buf[3] == etdev->MCList[i][3]
					    && buf[4] == etdev->MCList[i][4]
					    && buf[5] == etdev->MCList[i][5]) {
				for (i = 0; i < etdev->multicast_addr_count;
				     i++) {
					if (buf[0] ==
						etdev->multicast_list[i][0]
					    && buf[1] ==
						etdev->multicast_list[i][1]
					    && buf[2] ==
						etdev->multicast_list[i][2]
					    && buf[3] ==
						etdev->multicast_list[i][3]
					    && buf[4] ==
						etdev->multicast_list[i][4]
					    && buf[5] ==
						etdev->multicast_list[i][5]) {
						break;
					}
				}
@@ -1038,21 +1048,21 @@ static struct rfd *nic_rx_pkts(struct et131x_adapter *etdev)
				 * so we free our RFD when we return
				 * from this function.
				 */
				if (i == etdev->MCAddressCount)
				if (i == etdev->multicast_addr_count)
					len = 0;
			}

			if (len > 0)
				etdev->stats.multircv++;
				etdev->stats.multicast_pkts_rcvd++;
		} else if (word0 & ALCATEL_BROADCAST_PKT)
			etdev->stats.brdcstrcv++;
			etdev->stats.broadcast_pkts_rcvd++;
		else
			/* Not sure what this counter measures in
			 * promiscuous mode. Perhaps we should check
			 * the MAC address to see if it is directed
			 * to us in promiscuous mode.
			 */
			etdev->stats.unircv++;
			etdev->stats.unicast_pkts_rcvd++;
	}

	if (len > 0) {
@@ -1128,7 +1138,7 @@ void et131x_handle_recv_interrupt(struct et131x_adapter *etdev)
		 * If length is zero, return the RFD in order to advance the
		 * Free buffer ring.
		 */
		if (!etdev->PacketFilter ||
		if (!etdev->packet_filter ||
		    !netif_carrier_ok(etdev->netdev) ||
		    rfd->len == 0)
			continue;
+22 −22
Original line number Diff line number Diff line
@@ -456,7 +456,7 @@ static int nic_send_packet(struct et131x_adapter *etdev, struct tcb *tcb)
	} else
		tcb->index = etdev->tx_ring.send_idx - 1;

	spin_lock(&etdev->TCBSendQLock);
	spin_lock(&etdev->tcb_send_qlock);

	if (etdev->tx_ring.send_tail)
		etdev->tx_ring.send_tail->next = tcb;
@@ -469,7 +469,7 @@ static int nic_send_packet(struct et131x_adapter *etdev, struct tcb *tcb)

	etdev->tx_ring.used++;

	spin_unlock(&etdev->TCBSendQLock);
	spin_unlock(&etdev->tcb_send_qlock);

	/* Write the new write pointer back to the device. */
	writel(etdev->tx_ring.send_idx,
@@ -508,12 +508,12 @@ static int send_packet(struct sk_buff *skb, struct et131x_adapter *etdev)
		return -EIO;

	/* Get a TCB for this packet */
	spin_lock_irqsave(&etdev->TCBReadyQLock, flags);
	spin_lock_irqsave(&etdev->tcb_ready_qlock, flags);

	tcb = etdev->tx_ring.tcb_qhead;

	if (tcb == NULL) {
		spin_unlock_irqrestore(&etdev->TCBReadyQLock, flags);
		spin_unlock_irqrestore(&etdev->tcb_ready_qlock, flags);
		return -ENOMEM;
	}

@@ -522,7 +522,7 @@ static int send_packet(struct sk_buff *skb, struct et131x_adapter *etdev)
	if (etdev->tx_ring.tcb_qhead == NULL)
		etdev->tx_ring.tcb_qtail = NULL;

	spin_unlock_irqrestore(&etdev->TCBReadyQLock, flags);
	spin_unlock_irqrestore(&etdev->tcb_ready_qlock, flags);

	tcb->skb = skb;

@@ -543,7 +543,7 @@ static int send_packet(struct sk_buff *skb, struct et131x_adapter *etdev)
	status = nic_send_packet(etdev, tcb);

	if (status != 0) {
		spin_lock_irqsave(&etdev->TCBReadyQLock, flags);
		spin_lock_irqsave(&etdev->tcb_ready_qlock, flags);

		if (etdev->tx_ring.tcb_qtail)
			etdev->tx_ring.tcb_qtail->next = tcb;
@@ -552,7 +552,7 @@ static int send_packet(struct sk_buff *skb, struct et131x_adapter *etdev)
			etdev->tx_ring.tcb_qhead = tcb;

		etdev->tx_ring.tcb_qtail = tcb;
		spin_unlock_irqrestore(&etdev->TCBReadyQLock, flags);
		spin_unlock_irqrestore(&etdev->tcb_ready_qlock, flags);
		return status;
	}
	WARN_ON(etdev->tx_ring.used > NUM_TCB);
@@ -627,11 +627,11 @@ static inline void free_send_packet(struct et131x_adapter *etdev,
	struct net_device_stats *stats = &etdev->net_stats;

	if (tcb->flags & fMP_DEST_BROAD)
		atomic_inc(&etdev->stats.brdcstxmt);
		atomic_inc(&etdev->stats.broadcast_pkts_xmtd);
	else if (tcb->flags & fMP_DEST_MULTI)
		atomic_inc(&etdev->stats.multixmt);
		atomic_inc(&etdev->stats.multicast_pkts_xmtd);
	else
		atomic_inc(&etdev->stats.unixmt);
		atomic_inc(&etdev->stats.unicast_pkts_xmtd);

	if (tcb->skb) {
		stats->tx_bytes += tcb->skb->len;
@@ -663,7 +663,7 @@ static inline void free_send_packet(struct et131x_adapter *etdev,
	memset(tcb, 0, sizeof(struct tcb));

	/* Add the TCB to the Ready Q */
	spin_lock_irqsave(&etdev->TCBReadyQLock, flags);
	spin_lock_irqsave(&etdev->tcb_ready_qlock, flags);

	etdev->net_stats.tx_packets++;

@@ -675,7 +675,7 @@ static inline void free_send_packet(struct et131x_adapter *etdev,

	etdev->tx_ring.tcb_qtail = tcb;

	spin_unlock_irqrestore(&etdev->TCBReadyQLock, flags);
	spin_unlock_irqrestore(&etdev->tcb_ready_qlock, flags);
	WARN_ON(etdev->tx_ring.used < 0);
}

@@ -692,7 +692,7 @@ void et131x_free_busy_send_packets(struct et131x_adapter *etdev)
	u32 freed = 0;

	/* Any packets being sent? Check the first TCB on the send list */
	spin_lock_irqsave(&etdev->TCBSendQLock, flags);
	spin_lock_irqsave(&etdev->tcb_send_qlock, flags);

	tcb = etdev->tx_ring.send_head;

@@ -706,19 +706,19 @@ void et131x_free_busy_send_packets(struct et131x_adapter *etdev)

		etdev->tx_ring.used--;

		spin_unlock_irqrestore(&etdev->TCBSendQLock, flags);
		spin_unlock_irqrestore(&etdev->tcb_send_qlock, flags);

		freed++;
		free_send_packet(etdev, tcb);

		spin_lock_irqsave(&etdev->TCBSendQLock, flags);
		spin_lock_irqsave(&etdev->tcb_send_qlock, flags);

		tcb = etdev->tx_ring.send_head;
	}

	WARN_ON(freed == NUM_TCB);

	spin_unlock_irqrestore(&etdev->TCBSendQLock, flags);
	spin_unlock_irqrestore(&etdev->tcb_send_qlock, flags);

	etdev->tx_ring.used = 0;
}
@@ -745,7 +745,7 @@ void et131x_handle_send_interrupt(struct et131x_adapter *etdev)
	/* Has the ring wrapped?  Process any descriptors that do not have
	 * the same "wrap" indicator as the current completion indicator
	 */
	spin_lock_irqsave(&etdev->TCBSendQLock, flags);
	spin_lock_irqsave(&etdev->tcb_send_qlock, flags);

	tcb = etdev->tx_ring.send_head;

@@ -757,9 +757,9 @@ void et131x_handle_send_interrupt(struct et131x_adapter *etdev)
		if (tcb->next == NULL)
			etdev->tx_ring.send_tail = NULL;

		spin_unlock_irqrestore(&etdev->TCBSendQLock, flags);
		spin_unlock_irqrestore(&etdev->tcb_send_qlock, flags);
		free_send_packet(etdev, tcb);
		spin_lock_irqsave(&etdev->TCBSendQLock, flags);
		spin_lock_irqsave(&etdev->tcb_send_qlock, flags);

		/* Goto the next packet */
		tcb = etdev->tx_ring.send_head;
@@ -772,9 +772,9 @@ void et131x_handle_send_interrupt(struct et131x_adapter *etdev)
		if (tcb->next == NULL)
			etdev->tx_ring.send_tail = NULL;

		spin_unlock_irqrestore(&etdev->TCBSendQLock, flags);
		spin_unlock_irqrestore(&etdev->tcb_send_qlock, flags);
		free_send_packet(etdev, tcb);
		spin_lock_irqsave(&etdev->TCBSendQLock, flags);
		spin_lock_irqsave(&etdev->tcb_send_qlock, flags);

		/* Goto the next packet */
		tcb = etdev->tx_ring.send_head;
@@ -784,6 +784,6 @@ void et131x_handle_send_interrupt(struct et131x_adapter *etdev)
	if (etdev->tx_ring.used <= NUM_TCB / 3)
		netif_wake_queue(etdev->netdev);

	spin_unlock_irqrestore(&etdev->TCBSendQLock, flags);
	spin_unlock_irqrestore(&etdev->tcb_send_qlock, flags);
}
Loading