Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 5a85d716 authored by David S. Miller's avatar David S. Miller
Browse files
parents b3ce5ae1 1b3d2d77
Loading
Loading
Loading
Loading
+2 −0
Original line number Diff line number Diff line
@@ -3869,7 +3869,9 @@ M: Greg Rose <gregory.v.rose@intel.com>
M:	Peter P Waskiewicz Jr <peter.p.waskiewicz.jr@intel.com>
M:	Alex Duyck <alexander.h.duyck@intel.com>
M:	John Ronciak <john.ronciak@intel.com>
M:	Tushar Dave <tushar.n.dave@intel.com>
L:	e1000-devel@lists.sourceforge.net
W:	http://www.intel.com/support/feedback.htm
W:	http://e1000.sourceforge.net/
T:	git git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/net.git
T:	git git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/net-next.git
+11 −0
Original line number Diff line number Diff line
@@ -5067,6 +5067,17 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
		return NETDEV_TX_OK;
	}

	/*
	 * The minimum packet size with TCTL.PSP set is 17 bytes so
	 * pad skb in order to meet this minimum size requirement
	 */
	if (unlikely(skb->len < 17)) {
		if (skb_pad(skb, 17 - skb->len))
			return NETDEV_TX_OK;
		skb->len = 17;
		skb_set_tail_pointer(skb, 17);
	}

	mss = skb_shinfo(skb)->gso_size;
	if (mss) {
		u8 hdr_len;
+20 −0
Original line number Diff line number Diff line
@@ -1710,6 +1710,26 @@ s32 igb_get_cable_length_m88_gen2(struct e1000_hw *hw)

	switch (hw->phy.id) {
	case I210_I_PHY_ID:
		/* Get cable length from PHY Cable Diagnostics Control Reg */
		ret_val = phy->ops.read_reg(hw, (0x7 << GS40G_PAGE_SHIFT) +
					    (I347AT4_PCDL + phy->addr),
					    &phy_data);
		if (ret_val)
			return ret_val;

		/* Check if the unit of cable length is meters or cm */
		ret_val = phy->ops.read_reg(hw, (0x7 << GS40G_PAGE_SHIFT) +
					    I347AT4_PCDC, &phy_data2);
		if (ret_val)
			return ret_val;

		is_cm = !(phy_data2 & I347AT4_PCDC_CABLE_LENGTH_UNIT);

		/* Populate the phy structure with cable length in meters */
		phy->min_cable_length = phy_data / (is_cm ? 100 : 1);
		phy->max_cable_length = phy_data / (is_cm ? 100 : 1);
		phy->cable_length = phy_data / (is_cm ? 100 : 1);
		break;
	case I347AT4_E_PHY_ID:
		/* Remember the original page select and set it to 7 */
		ret_val = phy->ops.read_reg(hw, I347AT4_PAGE_SELECT,
+1 −1
Original line number Diff line number Diff line
@@ -61,7 +61,7 @@

#define MAJ 4
#define MIN 0
#define BUILD 1
#define BUILD 17
#define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \
__stringify(BUILD) "-k"
char igb_driver_name[] = "igb";
+30 −29
Original line number Diff line number Diff line
@@ -58,7 +58,7 @@ const char ixgbevf_driver_name[] = "ixgbevf";
static const char ixgbevf_driver_string[] =
	"Intel(R) 10 Gigabit PCI Express Virtual Function Network Driver";

#define DRV_VERSION "2.6.0-k"
#define DRV_VERSION "2.7.12-k"
const char ixgbevf_driver_version[] = DRV_VERSION;
static char ixgbevf_copyright[] =
	"Copyright (c) 2009 - 2012 Intel Corporation.";
@@ -359,6 +359,12 @@ static void ixgbevf_alloc_rx_buffers(struct ixgbevf_adapter *adapter,
			bi->dma = dma_map_single(&pdev->dev, skb->data,
						 rx_ring->rx_buf_len,
						 DMA_FROM_DEVICE);
			if (dma_mapping_error(&pdev->dev, bi->dma)) {
				dev_kfree_skb(skb);
				bi->skb = NULL;
				dev_err(&pdev->dev, "RX DMA map failed\n");
				break;
			}
		}
		rx_desc->read.pkt_addr = cpu_to_le64(bi->dma);

@@ -1132,12 +1138,12 @@ static int ixgbevf_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
	if (!hw->mac.ops.set_vfta)
		return -EOPNOTSUPP;

	spin_lock(&adapter->mbx_lock);
	spin_lock_bh(&adapter->mbx_lock);

	/* add VID to filter table */
	err = hw->mac.ops.set_vfta(hw, vid, 0, true);

	spin_unlock(&adapter->mbx_lock);
	spin_unlock_bh(&adapter->mbx_lock);

	/* translate error return types so error makes sense */
	if (err == IXGBE_ERR_MBX)
@@ -1157,13 +1163,13 @@ static int ixgbevf_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
	struct ixgbe_hw *hw = &adapter->hw;
	int err = -EOPNOTSUPP;

	spin_lock(&adapter->mbx_lock);
	spin_lock_bh(&adapter->mbx_lock);

	/* remove VID from filter table */
	if (hw->mac.ops.set_vfta)
		err = hw->mac.ops.set_vfta(hw, vid, 0, false);

	spin_unlock(&adapter->mbx_lock);
	spin_unlock_bh(&adapter->mbx_lock);

	clear_bit(vid, adapter->active_vlans);

@@ -1219,7 +1225,7 @@ static void ixgbevf_set_rx_mode(struct net_device *netdev)
	struct ixgbevf_adapter *adapter = netdev_priv(netdev);
	struct ixgbe_hw *hw = &adapter->hw;

	spin_lock(&adapter->mbx_lock);
	spin_lock_bh(&adapter->mbx_lock);

	/* reprogram multicast list */
	if (hw->mac.ops.update_mc_addr_list)
@@ -1227,7 +1233,7 @@ static void ixgbevf_set_rx_mode(struct net_device *netdev)

	ixgbevf_write_uc_addr_list(netdev);

	spin_unlock(&adapter->mbx_lock);
	spin_unlock_bh(&adapter->mbx_lock);
}

static void ixgbevf_napi_enable_all(struct ixgbevf_adapter *adapter)
@@ -1341,7 +1347,7 @@ static void ixgbevf_negotiate_api(struct ixgbevf_adapter *adapter)
		      ixgbe_mbox_api_unknown };
	int err = 0, idx = 0;

	spin_lock(&adapter->mbx_lock);
	spin_lock_bh(&adapter->mbx_lock);

	while (api[idx] != ixgbe_mbox_api_unknown) {
		err = ixgbevf_negotiate_api_version(hw, api[idx]);
@@ -1350,7 +1356,7 @@ static void ixgbevf_negotiate_api(struct ixgbevf_adapter *adapter)
		idx++;
	}

	spin_unlock(&adapter->mbx_lock);
	spin_unlock_bh(&adapter->mbx_lock);
}

static void ixgbevf_up_complete(struct ixgbevf_adapter *adapter)
@@ -1391,7 +1397,7 @@ static void ixgbevf_up_complete(struct ixgbevf_adapter *adapter)

	ixgbevf_configure_msix(adapter);

	spin_lock(&adapter->mbx_lock);
	spin_lock_bh(&adapter->mbx_lock);

	if (hw->mac.ops.set_rar) {
		if (is_valid_ether_addr(hw->mac.addr))
@@ -1400,7 +1406,7 @@ static void ixgbevf_up_complete(struct ixgbevf_adapter *adapter)
			hw->mac.ops.set_rar(hw, 0, hw->mac.perm_addr, 0);
	}

	spin_unlock(&adapter->mbx_lock);
	spin_unlock_bh(&adapter->mbx_lock);

	clear_bit(__IXGBEVF_DOWN, &adapter->state);
	ixgbevf_napi_enable_all(adapter);
@@ -1424,12 +1430,12 @@ static int ixgbevf_reset_queues(struct ixgbevf_adapter *adapter)
	unsigned int num_rx_queues = 1;
	int err, i;

	spin_lock(&adapter->mbx_lock);
	spin_lock_bh(&adapter->mbx_lock);

	/* fetch queue configuration from the PF */
	err = ixgbevf_get_queues(hw, &num_tcs, &def_q);

	spin_unlock(&adapter->mbx_lock);
	spin_unlock_bh(&adapter->mbx_lock);

	if (err)
		return err;
@@ -1688,14 +1694,14 @@ void ixgbevf_reset(struct ixgbevf_adapter *adapter)
	struct ixgbe_hw *hw = &adapter->hw;
	struct net_device *netdev = adapter->netdev;

	spin_lock(&adapter->mbx_lock);
	spin_lock_bh(&adapter->mbx_lock);

	if (hw->mac.ops.reset_hw(hw))
		hw_dbg(hw, "PF still resetting\n");
	else
		hw->mac.ops.init_hw(hw);

	spin_unlock(&adapter->mbx_lock);
	spin_unlock_bh(&adapter->mbx_lock);

	if (is_valid_ether_addr(adapter->hw.mac.addr)) {
		memcpy(netdev->dev_addr, adapter->hw.mac.addr,
@@ -1912,17 +1918,12 @@ static int ixgbevf_alloc_q_vectors(struct ixgbevf_adapter *adapter)
 **/
static void ixgbevf_free_q_vectors(struct ixgbevf_adapter *adapter)
{
	int q_idx, num_q_vectors;
	int napi_vectors;

	num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
	napi_vectors = adapter->num_rx_queues;
	int q_idx, num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;

	for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
		struct ixgbevf_q_vector *q_vector = adapter->q_vector[q_idx];

		adapter->q_vector[q_idx] = NULL;
		if (q_idx < napi_vectors)
		netif_napi_del(&q_vector->napi);
		kfree(q_vector);
	}
@@ -2194,12 +2195,12 @@ static void ixgbevf_watchdog_task(struct work_struct *work)
	if (hw->mac.ops.check_link) {
		s32 need_reset;

		spin_lock(&adapter->mbx_lock);
		spin_lock_bh(&adapter->mbx_lock);

		need_reset = hw->mac.ops.check_link(hw, &link_speed,
						    &link_up, false);

		spin_unlock(&adapter->mbx_lock);
		spin_unlock_bh(&adapter->mbx_lock);

		if (need_reset) {
			adapter->link_up = link_up;
@@ -2467,12 +2468,12 @@ static int ixgbevf_setup_queues(struct ixgbevf_adapter *adapter)
	unsigned int num_rx_queues = 1;
	int err, i;

	spin_lock(&adapter->mbx_lock);
	spin_lock_bh(&adapter->mbx_lock);

	/* fetch queue configuration from the PF */
	err = ixgbevf_get_queues(hw, &num_tcs, &def_q);

	spin_unlock(&adapter->mbx_lock);
	spin_unlock_bh(&adapter->mbx_lock);

	if (err)
		return err;
@@ -2822,10 +2823,10 @@ static int ixgbevf_tx_map(struct ixgbevf_ring *tx_ring,
			tx_buffer_info->dma =
				skb_frag_dma_map(tx_ring->dev, frag,
						 offset, size, DMA_TO_DEVICE);
			tx_buffer_info->mapped_as_page = true;
			if (dma_mapping_error(tx_ring->dev,
					      tx_buffer_info->dma))
				goto dma_error;
			tx_buffer_info->mapped_as_page = true;
			tx_buffer_info->next_to_watch = i;

			len -= size;
@@ -3046,12 +3047,12 @@ static int ixgbevf_set_mac(struct net_device *netdev, void *p)
	memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
	memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);

	spin_lock(&adapter->mbx_lock);
	spin_lock_bh(&adapter->mbx_lock);

	if (hw->mac.ops.set_rar)
		hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0);

	spin_unlock(&adapter->mbx_lock);
	spin_unlock_bh(&adapter->mbx_lock);

	return 0;
}