Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 4db43e67 authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge branch 'upstream-linus' of master.kernel.org:/pub/scm/linux/kernel/git/jgarzik/netdev-2.6

* 'upstream-linus' of master.kernel.org:/pub/scm/linux/kernel/git/jgarzik/netdev-2.6:
  SUN3/3X Lance trivial fix improved
  mv643xx_eth: Fix use of uninitialized port_num field
  forcedeth: fix tx timeout
  forcedeth: fix nic poll
  qla3xxx: bugfix: Jumbo frame handling.
  qla3xxx: bugfix: Dropping interrupt under heavy network load.
  qla3xxx: bugfix: Multi segment sends were getting whacked.
  qla3xxx: bugfix: Add tx control block memset.
  atl1: remove unnecessary crc inversion
  myri10ge: correctly detect when TSO should be used
  [PATCH] WE-22 : prevent information leak on 64 bit
  [PATCH] wext: Add missing ioctls to 64<->32 conversion
  [PATCH] bcm43xx: Fix machine check on PPC for version 1 PHY
  [PATCH] bcm43xx: fix radio_set_tx_iq
  [PATCH] bcm43xx: Fix code for confusion between PHY revision and PHY version
parents 935c631d a9c87a10
Loading
Loading
Loading
Loading
+0 −1
Original line number Diff line number Diff line
@@ -334,7 +334,6 @@ u32 atl1_hash_mc_addr(struct atl1_hw *hw, u8 *mc_addr)
	int i;

	crc32 = ether_crc_le(6, mc_addr);
	crc32 = ~crc32;
	for (i = 0; i < 32; i++)
		value |= (((crc32 >> i) & 1) << (31 - i));

+6 −2
Original line number Diff line number Diff line
@@ -2050,9 +2050,10 @@ static void nv_tx_timeout(struct net_device *dev)
		nv_drain_tx(dev);
		nv_init_tx(dev);
		setup_hw_rings(dev, NV_SETUP_TX_RING);
		netif_wake_queue(dev);
	}

	netif_wake_queue(dev);

	/* 4) restart tx engine */
	nv_start_tx(dev);
	spin_unlock_irq(&np->lock);
@@ -3536,6 +3537,9 @@ static void nv_do_nic_poll(unsigned long data)
	pci_push(base);

	if (!using_multi_irqs(dev)) {
		if (np->desc_ver == DESC_VER_3)
			nv_nic_irq_optimized(0, dev);
		else
			nv_nic_irq(0, dev);
		if (np->msi_flags & NV_MSI_X_ENABLED)
			enable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
+1 −3
Original line number Diff line number Diff line
@@ -1379,7 +1379,7 @@ static int mv643xx_eth_probe(struct platform_device *pdev)

	spin_lock_init(&mp->lock);

	port_num = pd->port_number;
	port_num = mp->port_num = pd->port_number;

	/* set default config values */
	eth_port_uc_addr_get(dev, dev->dev_addr);
@@ -1411,8 +1411,6 @@ static int mv643xx_eth_probe(struct platform_device *pdev)
	duplex = pd->duplex;
	speed = pd->speed;

	mp->port_num = port_num;

	/* Hook up MII support for ethtool */
	mp->mii.dev = dev;
	mp->mii.mdio_read = mv643xx_mdio_read;
+3 −4
Original line number Diff line number Diff line
@@ -71,7 +71,7 @@
#include "myri10ge_mcp.h"
#include "myri10ge_mcp_gen_header.h"

#define MYRI10GE_VERSION_STR "1.3.0-1.226"
#define MYRI10GE_VERSION_STR "1.3.0-1.227"

MODULE_DESCRIPTION("Myricom 10G driver (10GbE)");
MODULE_AUTHOR("Maintainer: help@myri.com");
@@ -2015,9 +2015,8 @@ static int myri10ge_xmit(struct sk_buff *skb, struct net_device *dev)
	mss = 0;
	max_segments = MXGEFW_MAX_SEND_DESC;

	if (skb->len > (dev->mtu + ETH_HLEN)) {
	if (skb_is_gso(skb)) {
		mss = skb_shinfo(skb)->gso_size;
		if (mss != 0)
		max_segments = MYRI10GE_MAX_SEND_DESC_TSO;
	}

+48 −62
Original line number Diff line number Diff line
@@ -1688,6 +1688,27 @@ static int ql_populate_free_queue(struct ql3_adapter *qdev)
	return 0;
}

/*
 * Caller holds hw_lock.
 */
static void ql_update_small_bufq_prod_index(struct ql3_adapter *qdev)
{
	struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
	if (qdev->small_buf_release_cnt >= 16) {
		while (qdev->small_buf_release_cnt >= 16) {
			qdev->small_buf_q_producer_index++;

			if (qdev->small_buf_q_producer_index ==
			    NUM_SBUFQ_ENTRIES)
				qdev->small_buf_q_producer_index = 0;
			qdev->small_buf_release_cnt -= 8;
		}
		wmb();
		writel(qdev->small_buf_q_producer_index,
			&port_regs->CommonRegs.rxSmallQProducerIndex);
	}
}

/*
 * Caller holds hw_lock.
 */
@@ -1732,13 +1753,10 @@ static void ql_update_lrg_bufq_prod_index(struct ql3_adapter *qdev)
				lrg_buf_q_ele = qdev->lrg_buf_q_virt_addr;
			}
		}

		wmb();
		qdev->lrg_buf_next_free = lrg_buf_q_ele;

		ql_write_common_reg(qdev,
				    &port_regs->CommonRegs.
				    rxLargeQProducerIndex,
				    qdev->lrg_buf_q_producer_index);
		writel(qdev->lrg_buf_q_producer_index,
			&port_regs->CommonRegs.rxLargeQProducerIndex);
	}
}

@@ -1915,15 +1933,16 @@ static void ql_process_macip_rx_intr(struct ql3_adapter *qdev,
		u16 checksum = le16_to_cpu(ib_ip_rsp_ptr->checksum);
		if (checksum & 
			(IB_IP_IOCB_RSP_3032_ICE | 
			 IB_IP_IOCB_RSP_3032_CE | 
			 IB_IP_IOCB_RSP_3032_NUC)) {
			 IB_IP_IOCB_RSP_3032_CE)) { 
			printk(KERN_ERR
			       "%s: Bad checksum for this %s packet, checksum = %x.\n",
			       __func__,
			       ((checksum & 
				IB_IP_IOCB_RSP_3032_TCP) ? "TCP" :
				"UDP"),checksum);
		} else if (checksum & IB_IP_IOCB_RSP_3032_TCP) {
		} else if ((checksum & IB_IP_IOCB_RSP_3032_TCP) ||
				(checksum & IB_IP_IOCB_RSP_3032_UDP &&
				!(checksum & IB_IP_IOCB_RSP_3032_NUC))) {
			skb2->ip_summed = CHECKSUM_UNNECESSARY;
		}
	}
@@ -1944,16 +1963,12 @@ static void ql_process_macip_rx_intr(struct ql3_adapter *qdev,
static int ql_tx_rx_clean(struct ql3_adapter *qdev,
			  int *tx_cleaned, int *rx_cleaned, int work_to_do)
{
	struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
	struct net_rsp_iocb *net_rsp;
	struct net_device *ndev = qdev->ndev;
	unsigned long hw_flags;
	int work_done = 0;

	u32 rsp_producer_index = le32_to_cpu(*(qdev->prsp_producer_index));

	/* While there are entries in the completion queue. */
	while ((rsp_producer_index !=
	while ((le32_to_cpu(*(qdev->prsp_producer_index)) !=
		qdev->rsp_consumer_index) && (work_done < work_to_do)) {

		net_rsp = qdev->rsp_current;
@@ -2009,33 +2024,7 @@ static int ql_tx_rx_clean(struct ql3_adapter *qdev,
		work_done = *tx_cleaned + *rx_cleaned;
	}

	if(work_done) {
		spin_lock_irqsave(&qdev->hw_lock, hw_flags);

		ql_update_lrg_bufq_prod_index(qdev);

		if (qdev->small_buf_release_cnt >= 16) {
			while (qdev->small_buf_release_cnt >= 16) {
				qdev->small_buf_q_producer_index++;

				if (qdev->small_buf_q_producer_index ==
				    NUM_SBUFQ_ENTRIES)
					qdev->small_buf_q_producer_index = 0;
				qdev->small_buf_release_cnt -= 8;
			}

			wmb();
			ql_write_common_reg(qdev,
					    &port_regs->CommonRegs.
					    rxSmallQProducerIndex,
					    qdev->small_buf_q_producer_index);

		}

		spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
	}

	return *tx_cleaned + *rx_cleaned;
	return work_done;
}

static int ql_poll(struct net_device *ndev, int *budget)
@@ -2059,9 +2048,10 @@ static int ql_poll(struct net_device *ndev, int *budget)
		netif_rx_complete(ndev);

		spin_lock_irqsave(&qdev->hw_lock, hw_flags);
		ql_write_common_reg(qdev,
				    &port_regs->CommonRegs.rspQConsumerIndex,
				    qdev->rsp_consumer_index);
		ql_update_small_bufq_prod_index(qdev);
		ql_update_lrg_bufq_prod_index(qdev);
		writel(qdev->rsp_consumer_index,
			    &port_regs->CommonRegs.rspQConsumerIndex);
		spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);

		ql_enable_interrupts(qdev);
@@ -2217,12 +2207,7 @@ static int ql_send_map(struct ql3_adapter *qdev,
	int seg_cnt, seg = 0;
	int frag_cnt = (int)skb_shinfo(skb)->nr_frags;

	seg_cnt = tx_cb->seg_count = ql_get_seg_count(qdev,
						      (skb_shinfo(skb)->nr_frags));
	if(seg_cnt == -1) {
		printk(KERN_ERR PFX"%s: invalid segment count!\n",__func__);
		return NETDEV_TX_BUSY;
	}
	seg_cnt = tx_cb->seg_count;
	/*
	 * Map the skb buffer first.
	 */
@@ -2278,7 +2263,7 @@ static int ql_send_map(struct ql3_adapter *qdev,
				pci_unmap_addr_set(&tx_cb->map[seg], mapaddr,
						   map);
				pci_unmap_len_set(&tx_cb->map[seg], maplen,
						  len);
						  sizeof(struct oal));
				oal_entry = (struct oal_entry *)oal;
				oal++;
				seg++;
@@ -2380,6 +2365,7 @@ static int ql3xxx_send(struct sk_buff *skb, struct net_device *ndev)
	}
	
	mac_iocb_ptr = tx_cb->queue_entry;
	memset((void *)mac_iocb_ptr, 0, sizeof(struct ob_mac_iocb_req));
	mac_iocb_ptr->opcode = qdev->mac_ob_opcode;
	mac_iocb_ptr->flags = OB_MAC_IOCB_REQ_X;
	mac_iocb_ptr->flags |= qdev->mb_bit_mask;
@@ -3054,15 +3040,6 @@ static int ql_adapter_initialize(struct ql3_adapter *qdev)
			goto out;
		}

		if (qdev->mac_index)
			ql_write_page0_reg(qdev,
					   &port_regs->mac1MaxFrameLengthReg,
					   qdev->max_frame_size);
		else
			ql_write_page0_reg(qdev,
					   &port_regs->mac0MaxFrameLengthReg,
					   qdev->max_frame_size);

		value = qdev->nvram_data.tcpMaxWindowSize;
		ql_write_page0_reg(qdev, &port_regs->tcpMaxWindow, value);

@@ -3082,6 +3059,14 @@ static int ql_adapter_initialize(struct ql3_adapter *qdev)
		ql_sem_unlock(qdev, QL_FLASH_SEM_MASK);
	}

	if (qdev->mac_index)
		ql_write_page0_reg(qdev,
				   &port_regs->mac1MaxFrameLengthReg,
				   qdev->max_frame_size);
	else
		ql_write_page0_reg(qdev,
					   &port_regs->mac0MaxFrameLengthReg,
					   qdev->max_frame_size);

	if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
			(QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
@@ -3152,7 +3137,8 @@ static int ql_adapter_initialize(struct ql3_adapter *qdev)
	if (qdev->device_id == QL3032_DEVICE_ID) {
		value =
		    (QL3032_PORT_CONTROL_EF | QL3032_PORT_CONTROL_KIE |
		     QL3032_PORT_CONTROL_EIv6 | QL3032_PORT_CONTROL_EIv4);
		     QL3032_PORT_CONTROL_EIv6 | QL3032_PORT_CONTROL_EIv4 |
			QL3032_PORT_CONTROL_ET);
		ql_write_page0_reg(qdev, &port_regs->functionControl,
				   ((value << 16) | value));
	} else {
Loading