Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 13d64285 authored by Lennert Buytenhek's avatar Lennert Buytenhek
Browse files

mv643xx_eth: split out tx queue state



Split all TX queue related state into 'struct tx_queue', in
preparation for multiple TX queue support.

Signed-off-by: default avatarLennert Buytenhek <buytenh@marvell.com>
Acked-by: default avatarDale Farnsworth <dale@farnsworth.org>
parent 8a578111
Loading
Loading
Loading
Loading
+254 −256
Original line number Original line Diff line number Diff line
@@ -297,38 +297,30 @@ struct rx_queue {
	struct timer_list rx_oom;
	struct timer_list rx_oom;
};
};


struct mv643xx_eth_private {
struct tx_queue {
	struct mv643xx_eth_shared_private *shared;
	int tx_ring_size;
	int port_num;			/* User Ethernet port number	*/

	struct mv643xx_eth_shared_private *shared_smi;

	u32 tx_sram_addr;		/* Base address of tx sram area */
	u32 tx_sram_size;		/* Size of tx sram area		*/

	/* Tx/Rx rings managment indexes fields. For driver use */

	/* Next available and first returning Tx resource */
	int tx_curr_desc, tx_used_desc;


#ifdef MV643XX_ETH_TX_FAST_REFILL
	int tx_desc_count;
	u32 tx_clean_threshold;
	int tx_curr_desc;
#endif
	int tx_used_desc;


	struct tx_desc *tx_desc_area;
	struct tx_desc *tx_desc_area;
	dma_addr_t tx_desc_dma;
	dma_addr_t tx_desc_dma;
	int tx_desc_area_size;
	int tx_desc_area_size;
	struct sk_buff **tx_skb;
	struct sk_buff **tx_skb;
};

struct mv643xx_eth_private {
	struct mv643xx_eth_shared_private *shared;
	int port_num;			/* User Ethernet port number	*/

	struct mv643xx_eth_shared_private *shared_smi;


	struct work_struct tx_timeout_task;
	struct work_struct tx_timeout_task;


	struct net_device *dev;
	struct net_device *dev;
	struct mib_counters mib_counters;
	struct mib_counters mib_counters;
	spinlock_t lock;
	spinlock_t lock;
	/* Size of Tx Ring per queue */
	int tx_ring_size;
	/* Number of tx descriptors in use */
	int tx_desc_count;


	u32 rx_int_coal;
	u32 rx_int_coal;
	u32 tx_int_coal;
	u32 tx_int_coal;
@@ -342,6 +334,17 @@ struct mv643xx_eth_private {
	int rx_desc_sram_size;
	int rx_desc_sram_size;
	struct napi_struct napi;
	struct napi_struct napi;
	struct rx_queue rxq[1];
	struct rx_queue rxq[1];

	/*
	 * TX state.
	 */
	int default_tx_ring_size;
	unsigned long tx_desc_sram_addr;
	int tx_desc_sram_size;
	struct tx_queue txq[1];
#ifdef MV643XX_ETH_TX_FAST_REFILL
	int tx_clean_threshold;
#endif
};
};




@@ -363,6 +366,11 @@ static struct mv643xx_eth_private *rxq_to_mp(struct rx_queue *rxq)
	return container_of(rxq, struct mv643xx_eth_private, rxq[0]);
	return container_of(rxq, struct mv643xx_eth_private, rxq[0]);
}
}


static struct mv643xx_eth_private *txq_to_mp(struct tx_queue *txq)
{
	return container_of(txq, struct mv643xx_eth_private, txq[0]);
}

static void rxq_enable(struct rx_queue *rxq)
static void rxq_enable(struct rx_queue *rxq)
{
{
	struct mv643xx_eth_private *mp = rxq_to_mp(rxq);
	struct mv643xx_eth_private *mp = rxq_to_mp(rxq);
@@ -379,39 +387,33 @@ static void rxq_disable(struct rx_queue *rxq)
		udelay(10);
		udelay(10);
}
}


static void mv643xx_eth_port_enable_tx(struct mv643xx_eth_private *mp,
static void txq_enable(struct tx_queue *txq)
					unsigned int queues)
{
{
	wrl(mp, TXQ_COMMAND(mp->port_num), queues);
	struct mv643xx_eth_private *mp = txq_to_mp(txq);
	wrl(mp, TXQ_COMMAND(mp->port_num), 1);
}
}


static unsigned int mv643xx_eth_port_disable_tx(struct mv643xx_eth_private *mp)
static void txq_disable(struct tx_queue *txq)
{
{
	unsigned int port_num = mp->port_num;
	struct mv643xx_eth_private *mp = txq_to_mp(txq);
	u32 queues;
	u8 mask = 1;

	/* Stop Tx port activity. Check port Tx activity. */
	queues = rdl(mp, TXQ_COMMAND(port_num)) & 0xFF;
	if (queues) {
		/* Issue stop command for active queues only */
		wrl(mp, TXQ_COMMAND(port_num), (queues << 8));

		/* Wait for all Tx activity to terminate. */
		/* Check port cause register that all Tx queues are stopped */
		while (rdl(mp, TXQ_COMMAND(port_num)) & 0xFF)
			udelay(10);


		/* Wait for Tx FIFO to empty */
	wrl(mp, TXQ_COMMAND(mp->port_num), mask << 8);
		while (rdl(mp, PORT_STATUS(port_num)) & TX_FIFO_EMPTY)
	while (rdl(mp, TXQ_COMMAND(mp->port_num)) & mask)
		udelay(10);
		udelay(10);
}
}


	return queues;
static void __txq_maybe_wake(struct tx_queue *txq)
{
	struct mv643xx_eth_private *mp = txq_to_mp(txq);

	if (txq->tx_ring_size - txq->tx_desc_count >= MAX_DESCS_PER_SKB)
		netif_wake_queue(mp->dev);
}
}




/* rx ***********************************************************************/
/* rx ***********************************************************************/
static void mv643xx_eth_free_completed_tx_descs(struct net_device *dev);
static void txq_reclaim(struct tx_queue *txq, int force);


static void rxq_refill(struct rx_queue *rxq)
static void rxq_refill(struct rx_queue *rxq)
{
{
@@ -571,7 +573,7 @@ static int mv643xx_eth_poll(struct napi_struct *napi, int budget)


#ifdef MV643XX_ETH_TX_FAST_REFILL
#ifdef MV643XX_ETH_TX_FAST_REFILL
	if (++mp->tx_clean_threshold > 5) {
	if (++mp->tx_clean_threshold > 5) {
		mv643xx_eth_free_completed_tx_descs(mp->dev);
		txq_reclaim(mp->txq, 0);
		mp->tx_clean_threshold = 0;
		mp->tx_clean_threshold = 0;
	}
	}
#endif
#endif
@@ -593,55 +595,59 @@ static int mv643xx_eth_poll(struct napi_struct *napi, int budget)
/* tx ***********************************************************************/
/* tx ***********************************************************************/
static inline unsigned int has_tiny_unaligned_frags(struct sk_buff *skb)
static inline unsigned int has_tiny_unaligned_frags(struct sk_buff *skb)
{
{
	unsigned int frag;
	int frag;
	skb_frag_t *fragp;


	for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) {
	for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) {
		fragp = &skb_shinfo(skb)->frags[frag];
		skb_frag_t *fragp = &skb_shinfo(skb)->frags[frag];
		if (fragp->size <= 8 && fragp->page_offset & 0x7)
		if (fragp->size <= 8 && fragp->page_offset & 7)
			return 1;
			return 1;
	}
	}

	return 0;
	return 0;
}
}


static int alloc_tx_desc_index(struct mv643xx_eth_private *mp)
static int txq_alloc_desc_index(struct tx_queue *txq)
{
{
	int tx_desc_curr;
	int tx_desc_curr;


	BUG_ON(mp->tx_desc_count >= mp->tx_ring_size);
	BUG_ON(txq->tx_desc_count >= txq->tx_ring_size);


	tx_desc_curr = mp->tx_curr_desc;
	tx_desc_curr = txq->tx_curr_desc;
	mp->tx_curr_desc = (tx_desc_curr + 1) % mp->tx_ring_size;
	txq->tx_curr_desc = (tx_desc_curr + 1) % txq->tx_ring_size;


	BUG_ON(mp->tx_curr_desc == mp->tx_used_desc);
	BUG_ON(txq->tx_curr_desc == txq->tx_used_desc);


	return tx_desc_curr;
	return tx_desc_curr;
}
}


static void tx_fill_frag_descs(struct mv643xx_eth_private *mp,
static void txq_submit_frag_skb(struct tx_queue *txq, struct sk_buff *skb)
				   struct sk_buff *skb)
{
{
	int nr_frags = skb_shinfo(skb)->nr_frags;
	int frag;
	int frag;

	for (frag = 0; frag < nr_frags; frag++) {
		skb_frag_t *this_frag;
		int tx_index;
		int tx_index;
		struct tx_desc *desc;
		struct tx_desc *desc;


	for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) {
		this_frag = &skb_shinfo(skb)->frags[frag];
		skb_frag_t *this_frag = &skb_shinfo(skb)->frags[frag];
		tx_index = txq_alloc_desc_index(txq);

		desc = &txq->tx_desc_area[tx_index];
		tx_index = alloc_tx_desc_index(mp);
		desc = &mp->tx_desc_area[tx_index];


		desc->cmd_sts = BUFFER_OWNED_BY_DMA;
		/*
		/* Last Frag enables interrupt and frees the skb */
		 * The last fragment will generate an interrupt
		if (frag == (skb_shinfo(skb)->nr_frags - 1)) {
		 * which will free the skb on TX completion.
			desc->cmd_sts |= ZERO_PADDING |
		 */
					 TX_LAST_DESC |
		if (frag == nr_frags - 1) {
			desc->cmd_sts = BUFFER_OWNED_BY_DMA |
					ZERO_PADDING | TX_LAST_DESC |
					TX_ENABLE_INTERRUPT;
					TX_ENABLE_INTERRUPT;
			mp->tx_skb[tx_index] = skb;
			txq->tx_skb[tx_index] = skb;
		} else
		} else {
			mp->tx_skb[tx_index] = NULL;
			desc->cmd_sts = BUFFER_OWNED_BY_DMA;
			txq->tx_skb[tx_index] = NULL;
		}


		desc = &mp->tx_desc_area[tx_index];
		desc->l4i_chk = 0;
		desc->l4i_chk = 0;
		desc->byte_cnt = this_frag->size;
		desc->byte_cnt = this_frag->size;
		desc->buf_ptr = dma_map_page(NULL, this_frag->page,
		desc->buf_ptr = dma_map_page(NULL, this_frag->page,
@@ -656,29 +662,28 @@ static inline __be16 sum16_as_be(__sum16 sum)
	return (__force __be16)sum;
	return (__force __be16)sum;
}
}


static void tx_submit_descs_for_skb(struct mv643xx_eth_private *mp,
static void txq_submit_skb(struct tx_queue *txq, struct sk_buff *skb)
					struct sk_buff *skb)
{
{
	int nr_frags = skb_shinfo(skb)->nr_frags;
	int tx_index;
	int tx_index;
	struct tx_desc *desc;
	struct tx_desc *desc;
	u32 cmd_sts;
	u32 cmd_sts;
	int length;
	int length;
	int nr_frags = skb_shinfo(skb)->nr_frags;


	cmd_sts = TX_FIRST_DESC | GEN_CRC | BUFFER_OWNED_BY_DMA;
	cmd_sts = TX_FIRST_DESC | GEN_CRC | BUFFER_OWNED_BY_DMA;


	tx_index = alloc_tx_desc_index(mp);
	tx_index = txq_alloc_desc_index(txq);
	desc = &mp->tx_desc_area[tx_index];
	desc = &txq->tx_desc_area[tx_index];


	if (nr_frags) {
	if (nr_frags) {
		tx_fill_frag_descs(mp, skb);
		txq_submit_frag_skb(txq, skb);


		length = skb_headlen(skb);
		length = skb_headlen(skb);
		mp->tx_skb[tx_index] = NULL;
		txq->tx_skb[tx_index] = NULL;
	} else {
	} else {
		cmd_sts |= ZERO_PADDING | TX_LAST_DESC | TX_ENABLE_INTERRUPT;
		cmd_sts |= ZERO_PADDING | TX_LAST_DESC | TX_ENABLE_INTERRUPT;
		length = skb->len;
		length = skb->len;
		mp->tx_skb[tx_index] = skb;
		txq->tx_skb[tx_index] = skb;
	}
	}


	desc->byte_cnt = length;
	desc->byte_cnt = length;
@@ -714,15 +719,16 @@ static void tx_submit_descs_for_skb(struct mv643xx_eth_private *mp,


	/* ensure all descriptors are written before poking hardware */
	/* ensure all descriptors are written before poking hardware */
	wmb();
	wmb();
	mv643xx_eth_port_enable_tx(mp, 1);
	txq_enable(txq);


	mp->tx_desc_count += nr_frags + 1;
	txq->tx_desc_count += nr_frags + 1;
}
}


static int mv643xx_eth_start_xmit(struct sk_buff *skb, struct net_device *dev)
static int mv643xx_eth_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
{
	struct mv643xx_eth_private *mp = netdev_priv(dev);
	struct mv643xx_eth_private *mp = netdev_priv(dev);
	struct net_device_stats *stats = &dev->stats;
	struct net_device_stats *stats = &dev->stats;
	struct tx_queue *txq;
	unsigned long flags;
	unsigned long flags;


	BUG_ON(netif_queue_stopped(dev));
	BUG_ON(netif_queue_stopped(dev));
@@ -736,19 +742,21 @@ static int mv643xx_eth_start_xmit(struct sk_buff *skb, struct net_device *dev)


	spin_lock_irqsave(&mp->lock, flags);
	spin_lock_irqsave(&mp->lock, flags);


	if (mp->tx_ring_size - mp->tx_desc_count < MAX_DESCS_PER_SKB) {
	txq = mp->txq;

	if (txq->tx_ring_size - txq->tx_desc_count < MAX_DESCS_PER_SKB) {
		printk(KERN_ERR "%s: transmit with queue full\n", dev->name);
		printk(KERN_ERR "%s: transmit with queue full\n", dev->name);
		netif_stop_queue(dev);
		netif_stop_queue(dev);
		spin_unlock_irqrestore(&mp->lock, flags);
		spin_unlock_irqrestore(&mp->lock, flags);
		return NETDEV_TX_BUSY;
		return NETDEV_TX_BUSY;
	}
	}


	tx_submit_descs_for_skb(mp, skb);
	txq_submit_skb(txq, skb);
	stats->tx_bytes += skb->len;
	stats->tx_bytes += skb->len;
	stats->tx_packets++;
	stats->tx_packets++;
	dev->trans_start = jiffies;
	dev->trans_start = jiffies;


	if (mp->tx_ring_size - mp->tx_desc_count < MAX_DESCS_PER_SKB)
	if (txq->tx_ring_size - txq->tx_desc_count < MAX_DESCS_PER_SKB)
		netif_stop_queue(dev);
		netif_stop_queue(dev);


	spin_unlock_irqrestore(&mp->lock, flags);
	spin_unlock_irqrestore(&mp->lock, flags);
@@ -1348,69 +1356,106 @@ static void rxq_deinit(struct rx_queue *rxq)
	kfree(rxq->rx_skb);
	kfree(rxq->rx_skb);
}
}


static void ether_init_tx_desc_ring(struct mv643xx_eth_private *mp)
static int txq_init(struct mv643xx_eth_private *mp)
{
{
	int tx_desc_num = mp->tx_ring_size;
	struct tx_queue *txq = mp->txq;
	struct tx_desc *p_tx_desc;
	struct tx_desc *tx_desc;
	int size;
	int i;
	int i;


	/* Initialize the next_desc_ptr links in the Tx descriptors ring */
	txq->tx_ring_size = mp->default_tx_ring_size;
	p_tx_desc = (struct tx_desc *)mp->tx_desc_area;

	for (i = 0; i < tx_desc_num; i++) {
	txq->tx_desc_count = 0;
		p_tx_desc[i].next_desc_ptr = mp->tx_desc_dma +
	txq->tx_curr_desc = 0;
			((i + 1) % tx_desc_num) * sizeof(struct tx_desc);
	txq->tx_used_desc = 0;

	size = txq->tx_ring_size * sizeof(struct tx_desc);

	if (size <= mp->tx_desc_sram_size) {
		txq->tx_desc_area = ioremap(mp->tx_desc_sram_addr,
						mp->tx_desc_sram_size);
		txq->tx_desc_dma = mp->tx_desc_sram_addr;
	} else {
		txq->tx_desc_area = dma_alloc_coherent(NULL, size,
							&txq->tx_desc_dma,
							GFP_KERNEL);
	}

	if (txq->tx_desc_area == NULL) {
		dev_printk(KERN_ERR, &mp->dev->dev,
			   "can't allocate tx ring (%d bytes)\n", size);
		goto out;
	}
	}
	memset(txq->tx_desc_area, 0, size);


	mp->tx_curr_desc = 0;
	txq->tx_desc_area_size = size;
	mp->tx_used_desc = 0;
	txq->tx_skb = kmalloc(txq->tx_ring_size * sizeof(*txq->tx_skb),
								GFP_KERNEL);
	if (txq->tx_skb == NULL) {
		dev_printk(KERN_ERR, &mp->dev->dev,
			   "can't allocate tx skb ring\n");
		goto out_free;
	}


	mp->tx_desc_area_size = tx_desc_num * sizeof(struct tx_desc);
	tx_desc = (struct tx_desc *)txq->tx_desc_area;
	for (i = 0; i < txq->tx_ring_size; i++) {
		int nexti = (i + 1) % txq->tx_ring_size;
		tx_desc[i].next_desc_ptr = txq->tx_desc_dma +
					nexti * sizeof(struct tx_desc);
	}
	}


static int mv643xx_eth_free_tx_descs(struct net_device *dev, int force)
	return 0;


out_free:
	if (size <= mp->tx_desc_sram_size)
		iounmap(txq->tx_desc_area);
	else
		dma_free_coherent(NULL, size,
				  txq->tx_desc_area,
				  txq->tx_desc_dma);

out:
	return -ENOMEM;
}

static void txq_reclaim(struct tx_queue *txq, int force)
{
{
	struct mv643xx_eth_private *mp = netdev_priv(dev);
	struct mv643xx_eth_private *mp = txq_to_mp(txq);
	unsigned long flags;

	spin_lock_irqsave(&mp->lock, flags);
	while (txq->tx_desc_count > 0) {
		int tx_index;
		struct tx_desc *desc;
		struct tx_desc *desc;
		u32 cmd_sts;
		u32 cmd_sts;
		struct sk_buff *skb;
		struct sk_buff *skb;
	unsigned long flags;
	int tx_index;
		dma_addr_t addr;
		dma_addr_t addr;
		int count;
		int count;
	int released = 0;


	while (mp->tx_desc_count > 0) {
		tx_index = txq->tx_used_desc;
		spin_lock_irqsave(&mp->lock, flags);
		desc = &txq->tx_desc_area[tx_index];

		/* tx_desc_count might have changed before acquiring the lock */
		if (mp->tx_desc_count <= 0) {
			spin_unlock_irqrestore(&mp->lock, flags);
			return released;
		}

		tx_index = mp->tx_used_desc;
		desc = &mp->tx_desc_area[tx_index];
		cmd_sts = desc->cmd_sts;
		cmd_sts = desc->cmd_sts;


		if (!force && (cmd_sts & BUFFER_OWNED_BY_DMA)) {
		if (!force && (cmd_sts & BUFFER_OWNED_BY_DMA))
			spin_unlock_irqrestore(&mp->lock, flags);
			break;
			return released;
		}


		mp->tx_used_desc = (tx_index + 1) % mp->tx_ring_size;
		txq->tx_used_desc = (tx_index + 1) % txq->tx_ring_size;
		mp->tx_desc_count--;
		txq->tx_desc_count--;


		addr = desc->buf_ptr;
		addr = desc->buf_ptr;
		count = desc->byte_cnt;
		count = desc->byte_cnt;
		skb = mp->tx_skb[tx_index];
		skb = txq->tx_skb[tx_index];
		if (skb)
		txq->tx_skb[tx_index] = NULL;
			mp->tx_skb[tx_index] = NULL;


		if (cmd_sts & ERROR_SUMMARY) {
		if (cmd_sts & ERROR_SUMMARY) {
			printk("%s: Error in TX\n", dev->name);
			dev_printk(KERN_INFO, &mp->dev->dev, "tx error\n");
			dev->stats.tx_errors++;
			mp->dev->stats.tx_errors++;
		}
		}


		/*
		 * Drop mp->lock while we free the skb.
		 */
		spin_unlock_irqrestore(&mp->lock, flags);
		spin_unlock_irqrestore(&mp->lock, flags);


		if (cmd_sts & TX_FIRST_DESC)
		if (cmd_sts & TX_FIRST_DESC)
@@ -1421,91 +1466,68 @@ static int mv643xx_eth_free_tx_descs(struct net_device *dev, int force)
		if (skb)
		if (skb)
			dev_kfree_skb_irq(skb);
			dev_kfree_skb_irq(skb);


		released = 1;
		spin_lock_irqsave(&mp->lock, flags);
	}

	return released;
}

static void mv643xx_eth_free_completed_tx_descs(struct net_device *dev)
{
	struct mv643xx_eth_private *mp = netdev_priv(dev);

	if (mv643xx_eth_free_tx_descs(dev, 0) &&
	    mp->tx_ring_size - mp->tx_desc_count >= MAX_DESCS_PER_SKB)
		netif_wake_queue(dev);
	}
	}

	spin_unlock_irqrestore(&mp->lock, flags);
static void mv643xx_eth_free_all_tx_descs(struct net_device *dev)
{
	mv643xx_eth_free_tx_descs(dev, 1);
}
}


static void mv643xx_eth_free_tx_rings(struct net_device *dev)
static void txq_deinit(struct tx_queue *txq)
{
{
	struct mv643xx_eth_private *mp = netdev_priv(dev);
	struct mv643xx_eth_private *mp = txq_to_mp(txq);

	/* Stop Tx Queues */
	mv643xx_eth_port_disable_tx(mp);


	/* Free outstanding skb's on TX ring */
	txq_disable(txq);
	mv643xx_eth_free_all_tx_descs(dev);
	txq_reclaim(txq, 1);


	BUG_ON(mp->tx_used_desc != mp->tx_curr_desc);
	BUG_ON(txq->tx_used_desc != txq->tx_curr_desc);


	/* Free TX ring */
	if (txq->tx_desc_area_size <= mp->tx_desc_sram_size)
	if (mp->tx_sram_size)
		iounmap(txq->tx_desc_area);
		iounmap(mp->tx_desc_area);
	else
	else
		dma_free_coherent(NULL, mp->tx_desc_area_size,
		dma_free_coherent(NULL, txq->tx_desc_area_size,
				mp->tx_desc_area, mp->tx_desc_dma);
				  txq->tx_desc_area, txq->tx_desc_dma);

	kfree(txq->tx_skb);
}
}




/* netdev ops and related ***************************************************/
/* netdev ops and related ***************************************************/
static void port_reset(struct mv643xx_eth_private *mp);
static void port_reset(struct mv643xx_eth_private *mp);


static void mv643xx_eth_update_pscr(struct net_device *dev,
static void mv643xx_eth_update_pscr(struct mv643xx_eth_private *mp,
				    struct ethtool_cmd *ecmd)
				    struct ethtool_cmd *ecmd)
{
{
	struct mv643xx_eth_private *mp = netdev_priv(dev);
	u32 pscr_o;
	int port_num = mp->port_num;
	u32 pscr_n;
	u32 o_pscr, n_pscr;
	unsigned int queues;


	o_pscr = rdl(mp, PORT_SERIAL_CONTROL(port_num));
	pscr_o = rdl(mp, PORT_SERIAL_CONTROL(mp->port_num));
	n_pscr = o_pscr;


	/* clear speed, duplex and rx buffer size fields */
	/* clear speed, duplex and rx buffer size fields */
	n_pscr &= ~(SET_MII_SPEED_TO_100  |
	pscr_n = pscr_o & ~(SET_MII_SPEED_TO_100   |
			    SET_GMII_SPEED_TO_1000 |
			    SET_GMII_SPEED_TO_1000 |
			    SET_FULL_DUPLEX_MODE   |
			    SET_FULL_DUPLEX_MODE   |
			    MAX_RX_PACKET_MASK);
			    MAX_RX_PACKET_MASK);


	if (ecmd->duplex == DUPLEX_FULL)
	if (ecmd->speed == SPEED_1000) {
		n_pscr |= SET_FULL_DUPLEX_MODE;
		pscr_n |= SET_GMII_SPEED_TO_1000 | MAX_RX_PACKET_9700BYTE;

	} else {
	if (ecmd->speed == SPEED_1000)
		n_pscr |= SET_GMII_SPEED_TO_1000 |
			  MAX_RX_PACKET_9700BYTE;
	else {
		if (ecmd->speed == SPEED_100)
		if (ecmd->speed == SPEED_100)
			n_pscr |= SET_MII_SPEED_TO_100;
			pscr_n |= SET_MII_SPEED_TO_100;
		n_pscr |= MAX_RX_PACKET_1522BYTE;
		pscr_n |= MAX_RX_PACKET_1522BYTE;
	}
	}


	if (n_pscr != o_pscr) {
	if (ecmd->duplex == DUPLEX_FULL)
		if ((o_pscr & SERIAL_PORT_ENABLE) == 0)
		pscr_n |= SET_FULL_DUPLEX_MODE;
			wrl(mp, PORT_SERIAL_CONTROL(port_num), n_pscr);
		else {
			queues = mv643xx_eth_port_disable_tx(mp);


			o_pscr &= ~SERIAL_PORT_ENABLE;
	if (pscr_n != pscr_o) {
			wrl(mp, PORT_SERIAL_CONTROL(port_num), o_pscr);
		if ((pscr_o & SERIAL_PORT_ENABLE) == 0)
			wrl(mp, PORT_SERIAL_CONTROL(port_num), n_pscr);
			wrl(mp, PORT_SERIAL_CONTROL(mp->port_num), pscr_n);
			wrl(mp, PORT_SERIAL_CONTROL(port_num), n_pscr);
		else {
			if (queues)
			txq_disable(mp->txq);
				mv643xx_eth_port_enable_tx(mp, queues);
			pscr_o &= ~SERIAL_PORT_ENABLE;
			wrl(mp, PORT_SERIAL_CONTROL(mp->port_num), pscr_o);
			wrl(mp, PORT_SERIAL_CONTROL(mp->port_num), pscr_n);
			wrl(mp, PORT_SERIAL_CONTROL(mp->port_num), pscr_n);
			txq_enable(mp->txq);
		}
		}
	}
	}
}
}
@@ -1515,29 +1537,26 @@ static irqreturn_t mv643xx_eth_int_handler(int irq, void *dev_id)
	struct net_device *dev = (struct net_device *)dev_id;
	struct net_device *dev = (struct net_device *)dev_id;
	struct mv643xx_eth_private *mp = netdev_priv(dev);
	struct mv643xx_eth_private *mp = netdev_priv(dev);
	u32 int_cause, int_cause_ext = 0;
	u32 int_cause, int_cause_ext = 0;
	unsigned int port_num = mp->port_num;


	/* Read interrupt cause registers */
	/* Read interrupt cause registers */
	int_cause = rdl(mp, INT_CAUSE(port_num)) & (INT_RX | INT_EXT);
	int_cause = rdl(mp, INT_CAUSE(mp->port_num)) & (INT_RX | INT_EXT);
	if (int_cause & INT_EXT) {
	if (int_cause & INT_EXT) {
		int_cause_ext = rdl(mp, INT_CAUSE_EXT(port_num))
		int_cause_ext = rdl(mp, INT_CAUSE_EXT(mp->port_num))
				& (INT_EXT_LINK | INT_EXT_PHY | INT_EXT_TX);
				& (INT_EXT_LINK | INT_EXT_PHY | INT_EXT_TX);
		wrl(mp, INT_CAUSE_EXT(port_num), ~int_cause_ext);
		wrl(mp, INT_CAUSE_EXT(mp->port_num), ~int_cause_ext);
	}
	}


	/* PHY status changed */
	/* PHY status changed */
	if (int_cause_ext & (INT_EXT_LINK | INT_EXT_PHY)) {
	if (int_cause_ext & (INT_EXT_LINK | INT_EXT_PHY)) {
		if (mii_link_ok(&mp->mii)) {
			struct ethtool_cmd cmd;
			struct ethtool_cmd cmd;


		if (mii_link_ok(&mp->mii)) {
			mii_ethtool_gset(&mp->mii, &cmd);
			mii_ethtool_gset(&mp->mii, &cmd);
			mv643xx_eth_update_pscr(dev, &cmd);
			mv643xx_eth_update_pscr(mp, &cmd);
			mv643xx_eth_port_enable_tx(mp, 1);
			txq_enable(mp->txq);
			if (!netif_carrier_ok(dev)) {
			if (!netif_carrier_ok(dev)) {
				netif_carrier_on(dev);
				netif_carrier_on(dev);
				if (mp->tx_ring_size - mp->tx_desc_count >=
				__txq_maybe_wake(mp->txq);
							MAX_DESCS_PER_SKB)
					netif_wake_queue(dev);
			}
			}
		} else if (netif_carrier_ok(dev)) {
		} else if (netif_carrier_ok(dev)) {
			netif_stop_queue(dev);
			netif_stop_queue(dev);
@@ -1548,10 +1567,10 @@ static irqreturn_t mv643xx_eth_int_handler(int irq, void *dev_id)
#ifdef MV643XX_ETH_NAPI
#ifdef MV643XX_ETH_NAPI
	if (int_cause & INT_RX) {
	if (int_cause & INT_RX) {
		/* schedule the NAPI poll routine to maintain port */
		/* schedule the NAPI poll routine to maintain port */
		wrl(mp, INT_MASK(port_num), 0x00000000);
		wrl(mp, INT_MASK(mp->port_num), 0x00000000);


		/* wait for previous write to complete */
		/* wait for previous write to complete */
		rdl(mp, INT_MASK(port_num));
		rdl(mp, INT_MASK(mp->port_num));


		netif_rx_schedule(dev, &mp->napi);
		netif_rx_schedule(dev, &mp->napi);
	}
	}
@@ -1559,8 +1578,10 @@ static irqreturn_t mv643xx_eth_int_handler(int irq, void *dev_id)
	if (int_cause & INT_RX)
	if (int_cause & INT_RX)
		rxq_process(mp->rxq, INT_MAX);
		rxq_process(mp->rxq, INT_MAX);
#endif
#endif
	if (int_cause_ext & INT_EXT_TX)
	if (int_cause_ext & INT_EXT_TX) {
		mv643xx_eth_free_completed_tx_descs(dev);
		txq_reclaim(mp->txq, 0);
		__txq_maybe_wake(mp->txq);
	}


	/*
	/*
	 * If no real interrupt occured, exit.
	 * If no real interrupt occured, exit.
@@ -1616,6 +1637,20 @@ static void port_start(struct net_device *dev)
	phy_reset(mp);
	phy_reset(mp);
	mv643xx_eth_set_settings(dev, &ethtool_cmd);
	mv643xx_eth_set_settings(dev, &ethtool_cmd);


	/*
	 * Configure TX path and queues.
	 */
	wrl(mp, TX_BW_MTU(mp->port_num), 0);
	for (i = 0; i < 1; i++) {
		struct tx_queue *txq = mp->txq;
		int off = TXQ_CURRENT_DESC_PTR(mp->port_num);
		u32 addr;

		addr = (u32)txq->tx_desc_dma;
		addr += txq->tx_curr_desc * sizeof(struct tx_desc);
		wrl(mp, off, addr);
	}

	/* Add the assigned Ethernet address to the port's address table */
	/* Add the assigned Ethernet address to the port's address table */
	uc_addr_set(mp, dev->dev_addr);
	uc_addr_set(mp, dev->dev_addr);


@@ -1644,13 +1679,6 @@ static void port_start(struct net_device *dev)


		rxq_enable(rxq);
		rxq_enable(rxq);
	}
	}


	wrl(mp, TXQ_CURRENT_DESC_PTR(mp->port_num),
		(u32)((struct tx_desc *)mp->tx_desc_dma + mp->tx_curr_desc));

	/* Disable port bandwidth limits by clearing MTU register */
	wrl(mp, TX_BW_MTU(mp->port_num), 0);
}
}


#ifdef MV643XX_ETH_COAL
#ifdef MV643XX_ETH_COAL
@@ -1692,7 +1720,6 @@ static int mv643xx_eth_open(struct net_device *dev)
{
{
	struct mv643xx_eth_private *mp = netdev_priv(dev);
	struct mv643xx_eth_private *mp = netdev_priv(dev);
	unsigned int port_num = mp->port_num;
	unsigned int port_num = mp->port_num;
	unsigned int size;
	int err;
	int err;


	/* Clear any pending ethernet port interrupts */
	/* Clear any pending ethernet port interrupts */
@@ -1715,38 +1742,9 @@ static int mv643xx_eth_open(struct net_device *dev)
		goto out_free_irq;
		goto out_free_irq;
	rxq_refill(mp->rxq);
	rxq_refill(mp->rxq);


	mp->tx_skb = kmalloc(sizeof(*mp->tx_skb) * mp->tx_ring_size,
	err = txq_init(mp);
								GFP_KERNEL);
	if (err)
	if (!mp->tx_skb) {
		printk(KERN_ERR "%s: Cannot allocate Tx skb ring\n", dev->name);
		err = -ENOMEM;
		goto out_free_rx_skb;
		goto out_free_rx_skb;
	}

	/* Allocate TX ring */
	mp->tx_desc_count = 0;
	size = mp->tx_ring_size * sizeof(struct tx_desc);
	mp->tx_desc_area_size = size;

	if (mp->tx_sram_size) {
		mp->tx_desc_area = ioremap(mp->tx_sram_addr,
							mp->tx_sram_size);
		mp->tx_desc_dma = mp->tx_sram_addr;
	} else
		mp->tx_desc_area = dma_alloc_coherent(NULL, size,
							&mp->tx_desc_dma,
							GFP_KERNEL);

	if (!mp->tx_desc_area) {
		printk(KERN_ERR "%s: Cannot allocate Tx Ring (size %d bytes)\n",
							dev->name, size);
		err = -ENOMEM;
		goto out_free_tx_skb;
	}
	BUG_ON((u32) mp->tx_desc_area & 0xf);	/* check 16-byte alignment */
	memset((void *)mp->tx_desc_area, 0, mp->tx_desc_area_size);

	ether_init_tx_desc_ring(mp);


#ifdef MV643XX_ETH_NAPI
#ifdef MV643XX_ETH_NAPI
	napi_enable(&mp->napi);
	napi_enable(&mp->napi);
@@ -1770,8 +1768,7 @@ static int mv643xx_eth_open(struct net_device *dev)


	return 0;
	return 0;


out_free_tx_skb:

	kfree(mp->tx_skb);
out_free_rx_skb:
out_free_rx_skb:
	rxq_deinit(mp->rxq);
	rxq_deinit(mp->rxq);
out_free_irq:
out_free_irq:
@@ -1785,8 +1782,10 @@ static void port_reset(struct mv643xx_eth_private *mp)
	unsigned int port_num = mp->port_num;
	unsigned int port_num = mp->port_num;
	unsigned int reg_data;
	unsigned int reg_data;


	mv643xx_eth_port_disable_tx(mp);
	txq_disable(mp->txq);
	rxq_disable(mp->rxq);
	rxq_disable(mp->rxq);
	while (!(rdl(mp, PORT_STATUS(mp->port_num)) & TX_FIFO_EMPTY))
		udelay(10);


	/* Clear all MIB counters */
	/* Clear all MIB counters */
	clear_mib_counters(mp);
	clear_mib_counters(mp);
@@ -1817,7 +1816,7 @@ static int mv643xx_eth_stop(struct net_device *dev)


	port_reset(mp);
	port_reset(mp);


	mv643xx_eth_free_tx_rings(dev);
	txq_deinit(mp->txq);
	rxq_deinit(mp->rxq);
	rxq_deinit(mp->rxq);


	free_irq(dev->irq, dev);
	free_irq(dev->irq, dev);
@@ -1870,8 +1869,7 @@ static void mv643xx_eth_tx_timeout_task(struct work_struct *ugly)
	port_reset(mp);
	port_reset(mp);
	port_start(dev);
	port_start(dev);


	if (mp->tx_ring_size - mp->tx_desc_count >= MAX_DESCS_PER_SKB)
	__txq_maybe_wake(mp->txq);
		netif_wake_queue(dev);
}
}


static void mv643xx_eth_tx_timeout(struct net_device *dev)
static void mv643xx_eth_tx_timeout(struct net_device *dev)
@@ -2171,7 +2169,6 @@ static int mv643xx_eth_probe(struct platform_device *pdev)


	/* set default config values */
	/* set default config values */
	uc_addr_get(mp, dev->dev_addr);
	uc_addr_get(mp, dev->dev_addr);
	mp->tx_ring_size = DEFAULT_TX_QUEUE_SIZE;


	if (is_valid_ether_addr(pd->mac_addr))
	if (is_valid_ether_addr(pd->mac_addr))
		memcpy(dev->dev_addr, pd->mac_addr, 6);
		memcpy(dev->dev_addr, pd->mac_addr, 6);
@@ -2183,12 +2180,13 @@ static int mv643xx_eth_probe(struct platform_device *pdev)
	if (pd->rx_queue_size)
	if (pd->rx_queue_size)
		mp->default_rx_ring_size = pd->rx_queue_size;
		mp->default_rx_ring_size = pd->rx_queue_size;


	mp->default_tx_ring_size = DEFAULT_TX_QUEUE_SIZE;
	if (pd->tx_queue_size)
	if (pd->tx_queue_size)
		mp->tx_ring_size = pd->tx_queue_size;
		mp->default_tx_ring_size = pd->tx_queue_size;


	if (pd->tx_sram_size) {
	if (pd->tx_sram_size) {
		mp->tx_sram_size = pd->tx_sram_size;
		mp->tx_desc_sram_size = pd->tx_sram_size;
		mp->tx_sram_addr = pd->tx_sram_addr;
		mp->tx_desc_sram_addr = pd->tx_sram_addr;
	}
	}


	if (pd->rx_sram_size) {
	if (pd->rx_sram_size) {
@@ -2217,7 +2215,7 @@ static int mv643xx_eth_probe(struct platform_device *pdev)
	phy_reset(mp);
	phy_reset(mp);
	mp->mii.supports_gmii = mii_check_gmii_support(&mp->mii);
	mp->mii.supports_gmii = mii_check_gmii_support(&mp->mii);
	mv643xx_init_ethtool_cmd(dev, mp->mii.phy_id, speed, duplex, &cmd);
	mv643xx_init_ethtool_cmd(dev, mp->mii.phy_id, speed, duplex, &cmd);
	mv643xx_eth_update_pscr(dev, &cmd);
	mv643xx_eth_update_pscr(mp, &cmd);
	mv643xx_eth_set_settings(dev, &cmd);
	mv643xx_eth_set_settings(dev, &cmd);


	SET_NETDEV_DEV(dev, &pdev->dev);
	SET_NETDEV_DEV(dev, &pdev->dev);
@@ -2250,7 +2248,7 @@ static int mv643xx_eth_probe(struct platform_device *pdev)
	printk(KERN_NOTICE "%s: RX NAPI Enabled \n", dev->name);
	printk(KERN_NOTICE "%s: RX NAPI Enabled \n", dev->name);
#endif
#endif


	if (mp->tx_sram_size > 0)
	if (mp->tx_desc_sram_size > 0)
		printk(KERN_NOTICE "%s: Using SRAM\n", dev->name);
		printk(KERN_NOTICE "%s: Using SRAM\n", dev->name);


	return 0;
	return 0;