Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 3a2e15df authored by David S. Miller's avatar David S. Miller
Browse files


Jeff Kirsher says:

====================
Intel Wired LAN Driver Updates

This series contains updates to i40e only.

Anjali provides two cleanups to remove unnecessary code and a fix
to resolve debugfs dumping only half the NVM.  Then provides a fix
to ethtool NVM reads where shadow RAM was used instead of actual
NVM reads.

Jesse provides a couple of fixes, one removes custom i40e functions
which duplicate existing kernel functionality.  Second fixes constant
cast issues by replacing __constant_htons with htons.

Mitch provides a couple of fixes for the VF interfaces in i40e.  First
provides a fix to guard against VF message races with can cause a panic.
Second fix reinitializes the buffer size each time we clean the ARQ,
because subsequent messages can be truncated. Lastly adds functionality
to enable/disable ICR 0 dynamically.

Vasu adds a simple guard against multiple includes of the i40e_txrx.h
file.

Shannon provides a couple of fixes, first fix swaps a couple of lines
around in the error handling if the allocation for the VSI array fails.
Second fixes an issue where we try to free the q_vector that has not
been setup which can panic the kernel.

David provides a patch to save off the point to memory and the length
of 2 structs used in the admin queue in order to store all info about
allocated kernel memory.

Neerav fixes ring allocation where allocation and clearing of rings
for a VSI should be using the alloc_queue_pairs and not num_queue_pairs.
Then removes the unused define for multi-queue enabled.
====================

Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 1e85c9b6 c3f0c4fe
Loading
Loading
Loading
Loading
+15 −25
Original line number Diff line number Diff line
@@ -65,7 +65,7 @@
#define I40E_MAX_NPAR_QPS     32

#define I40E_MAX_NUM_DESCRIPTORS      4096
#define I40E_MAX_REGISTER     0x0038FFFF
#define I40E_MAX_REGISTER     0x800000
#define I40E_DEFAULT_NUM_DESCRIPTORS  512
#define I40E_REQ_DESCRIPTOR_MULTIPLE  32
#define I40E_MIN_NUM_DESCRIPTORS      64
@@ -230,28 +230,24 @@ struct i40e_pf {
#define I40E_FLAG_RX_1BUF_ENABLED              (u64)(1 << 4)
#define I40E_FLAG_RX_PS_ENABLED                (u64)(1 << 5)
#define I40E_FLAG_RSS_ENABLED                  (u64)(1 << 6)
#define I40E_FLAG_MQ_ENABLED                   (u64)(1 << 7)
#define I40E_FLAG_VMDQ_ENABLED                 (u64)(1 << 8)
#define I40E_FLAG_FDIR_REQUIRES_REINIT         (u64)(1 << 9)
#define I40E_FLAG_NEED_LINK_UPDATE             (u64)(1 << 10)
#define I40E_FLAG_IN_NETPOLL                   (u64)(1 << 13)
#define I40E_FLAG_16BYTE_RX_DESC_ENABLED       (u64)(1 << 14)
#define I40E_FLAG_CLEAN_ADMINQ                 (u64)(1 << 15)
#define I40E_FLAG_FILTER_SYNC                  (u64)(1 << 16)
#define I40E_FLAG_PROCESS_MDD_EVENT            (u64)(1 << 18)
#define I40E_FLAG_PROCESS_VFLR_EVENT           (u64)(1 << 19)
#define I40E_FLAG_SRIOV_ENABLED                (u64)(1 << 20)
#define I40E_FLAG_DCB_ENABLED                  (u64)(1 << 21)
#define I40E_FLAG_FDIR_ENABLED                 (u64)(1 << 22)
#define I40E_FLAG_FDIR_ATR_ENABLED             (u64)(1 << 23)
#define I40E_FLAG_VMDQ_ENABLED                 (u64)(1 << 7)
#define I40E_FLAG_FDIR_REQUIRES_REINIT         (u64)(1 << 8)
#define I40E_FLAG_NEED_LINK_UPDATE             (u64)(1 << 9)
#define I40E_FLAG_IN_NETPOLL                   (u64)(1 << 12)
#define I40E_FLAG_16BYTE_RX_DESC_ENABLED       (u64)(1 << 13)
#define I40E_FLAG_CLEAN_ADMINQ                 (u64)(1 << 14)
#define I40E_FLAG_FILTER_SYNC                  (u64)(1 << 15)
#define I40E_FLAG_PROCESS_MDD_EVENT            (u64)(1 << 17)
#define I40E_FLAG_PROCESS_VFLR_EVENT           (u64)(1 << 18)
#define I40E_FLAG_SRIOV_ENABLED                (u64)(1 << 19)
#define I40E_FLAG_DCB_ENABLED                  (u64)(1 << 20)
#define I40E_FLAG_FDIR_ENABLED                 (u64)(1 << 21)
#define I40E_FLAG_FDIR_ATR_ENABLED             (u64)(1 << 22)
#define I40E_FLAG_MFP_ENABLED                  (u64)(1 << 26)
#ifdef CONFIG_I40E_VXLAN
#define I40E_FLAG_VXLAN_FILTER_SYNC            (u64)(1 << 27)
#endif

	u16 num_tx_queues;
	u16 num_rx_queues;

	bool stat_offsets_loaded;
	struct i40e_hw_port_stats stats;
	struct i40e_hw_port_stats stats_offsets;
@@ -521,13 +517,6 @@ struct rtnl_link_stats64 *i40e_get_vsi_stats_struct(struct i40e_vsi *vsi);
int i40e_fetch_switch_configuration(struct i40e_pf *pf,
				    bool printconfig);

/* needed by i40e_main.c */
void i40e_add_fdir_filter(struct i40e_fdir_data fdir_data,
			  struct i40e_ring *tx_ring);
void i40e_add_remove_filter(struct i40e_fdir_data fdir_data,
			    struct i40e_ring *tx_ring);
void i40e_update_fdir_filter(struct i40e_fdir_data fdir_data,
			     struct i40e_ring *tx_ring);
int i40e_program_fdir_filter(struct i40e_fdir_data *fdir_data,
			     struct i40e_pf *pf, bool add);

@@ -565,6 +554,7 @@ static inline void i40e_dbg_init(void) {}
static inline void i40e_dbg_exit(void) {}
#endif /* CONFIG_DEBUG_FS*/
void i40e_irq_dynamic_enable(struct i40e_vsi *vsi, int vector);
void i40e_irq_dynamic_disable_icr0(struct i40e_pf *pf);
void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf);
int i40e_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd);
void i40e_vlan_stripping_disable(struct i40e_vsi *vsi);
+44 −59
Original line number Diff line number Diff line
@@ -66,9 +66,8 @@ static void i40e_adminq_init_regs(struct i40e_hw *hw)
static i40e_status i40e_alloc_adminq_asq_ring(struct i40e_hw *hw)
{
	i40e_status ret_code;
	struct i40e_virt_mem mem;

	ret_code = i40e_allocate_dma_mem(hw, &hw->aq.asq_mem,
	ret_code = i40e_allocate_dma_mem(hw, &hw->aq.asq.desc_buf,
					 i40e_mem_atq_ring,
					 (hw->aq.num_asq_entries *
					 sizeof(struct i40e_aq_desc)),
@@ -76,21 +75,14 @@ static i40e_status i40e_alloc_adminq_asq_ring(struct i40e_hw *hw)
	if (ret_code)
		return ret_code;

	hw->aq.asq.desc = hw->aq.asq_mem.va;
	hw->aq.asq.dma_addr = hw->aq.asq_mem.pa;

	ret_code = i40e_allocate_virt_mem(hw, &mem,
	ret_code = i40e_allocate_virt_mem(hw, &hw->aq.asq.cmd_buf,
					  (hw->aq.num_asq_entries *
					  sizeof(struct i40e_asq_cmd_details)));
	if (ret_code) {
		i40e_free_dma_mem(hw, &hw->aq.asq_mem);
		hw->aq.asq_mem.va = NULL;
		hw->aq.asq_mem.pa = 0;
		i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);
		return ret_code;
	}

	hw->aq.asq.details = mem.va;

	return ret_code;
}

@@ -102,16 +94,11 @@ static i40e_status i40e_alloc_adminq_arq_ring(struct i40e_hw *hw)
{
	i40e_status ret_code;

	ret_code = i40e_allocate_dma_mem(hw, &hw->aq.arq_mem,
	ret_code = i40e_allocate_dma_mem(hw, &hw->aq.arq.desc_buf,
					 i40e_mem_arq_ring,
					 (hw->aq.num_arq_entries *
					 sizeof(struct i40e_aq_desc)),
					 I40E_ADMINQ_DESC_ALIGNMENT);
	if (ret_code)
		return ret_code;

	hw->aq.arq.desc = hw->aq.arq_mem.va;
	hw->aq.arq.dma_addr = hw->aq.arq_mem.pa;

	return ret_code;
}
@@ -125,14 +112,7 @@ static i40e_status i40e_alloc_adminq_arq_ring(struct i40e_hw *hw)
 **/
static void i40e_free_adminq_asq(struct i40e_hw *hw)
{
	struct i40e_virt_mem mem;

	i40e_free_dma_mem(hw, &hw->aq.asq_mem);
	hw->aq.asq_mem.va = NULL;
	hw->aq.asq_mem.pa = 0;
	mem.va = hw->aq.asq.details;
	i40e_free_virt_mem(hw, &mem);
	hw->aq.asq.details = NULL;
	i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);
}

/**
@@ -144,9 +124,7 @@ static void i40e_free_adminq_asq(struct i40e_hw *hw)
 **/
static void i40e_free_adminq_arq(struct i40e_hw *hw)
{
	i40e_free_dma_mem(hw, &hw->aq.arq_mem);
	hw->aq.arq_mem.va = NULL;
	hw->aq.arq_mem.pa = 0;
	i40e_free_dma_mem(hw, &hw->aq.arq.desc_buf);
}

/**
@@ -157,7 +135,6 @@ static i40e_status i40e_alloc_arq_bufs(struct i40e_hw *hw)
{
	i40e_status ret_code;
	struct i40e_aq_desc *desc;
	struct i40e_virt_mem mem;
	struct i40e_dma_mem *bi;
	int i;

@@ -166,11 +143,11 @@ static i40e_status i40e_alloc_arq_bufs(struct i40e_hw *hw)
	 */

	/* buffer_info structures do not need alignment */
	ret_code = i40e_allocate_virt_mem(hw, &mem, (hw->aq.num_arq_entries *
					  sizeof(struct i40e_dma_mem)));
	ret_code = i40e_allocate_virt_mem(hw, &hw->aq.arq.dma_head,
		(hw->aq.num_arq_entries * sizeof(struct i40e_dma_mem)));
	if (ret_code)
		goto alloc_arq_bufs;
	hw->aq.arq.r.arq_bi = (struct i40e_dma_mem *)mem.va;
	hw->aq.arq.r.arq_bi = (struct i40e_dma_mem *)hw->aq.arq.dma_head.va;

	/* allocate the mapped buffers */
	for (i = 0; i < hw->aq.num_arq_entries; i++) {
@@ -212,8 +189,7 @@ unwind_alloc_arq_bufs:
	i--;
	for (; i >= 0; i--)
		i40e_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
	mem.va = hw->aq.arq.r.arq_bi;
	i40e_free_virt_mem(hw, &mem);
	i40e_free_virt_mem(hw, &hw->aq.arq.dma_head);

	return ret_code;
}
@@ -225,16 +201,15 @@ unwind_alloc_arq_bufs:
static i40e_status i40e_alloc_asq_bufs(struct i40e_hw *hw)
{
	i40e_status ret_code;
	struct i40e_virt_mem mem;
	struct i40e_dma_mem *bi;
	int i;

	/* No mapped memory needed yet, just the buffer info structures */
	ret_code = i40e_allocate_virt_mem(hw, &mem, (hw->aq.num_asq_entries *
					  sizeof(struct i40e_dma_mem)));
	ret_code = i40e_allocate_virt_mem(hw, &hw->aq.asq.dma_head,
		(hw->aq.num_asq_entries * sizeof(struct i40e_dma_mem)));
	if (ret_code)
		goto alloc_asq_bufs;
	hw->aq.asq.r.asq_bi = (struct i40e_dma_mem *)mem.va;
	hw->aq.asq.r.asq_bi = (struct i40e_dma_mem *)hw->aq.asq.dma_head.va;

	/* allocate the mapped buffers */
	for (i = 0; i < hw->aq.num_asq_entries; i++) {
@@ -254,8 +229,7 @@ unwind_alloc_asq_bufs:
	i--;
	for (; i >= 0; i--)
		i40e_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
	mem.va = hw->aq.asq.r.asq_bi;
	i40e_free_virt_mem(hw, &mem);
	i40e_free_virt_mem(hw, &hw->aq.asq.dma_head);

	return ret_code;
}
@@ -266,14 +240,17 @@ unwind_alloc_asq_bufs:
 **/
static void i40e_free_arq_bufs(struct i40e_hw *hw)
{
	struct i40e_virt_mem mem;
	int i;

	/* free descriptors */
	for (i = 0; i < hw->aq.num_arq_entries; i++)
		i40e_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);

	mem.va = hw->aq.arq.r.arq_bi;
	i40e_free_virt_mem(hw, &mem);
	/* free the descriptor memory */
	i40e_free_dma_mem(hw, &hw->aq.arq.desc_buf);

	/* free the dma header */
	i40e_free_virt_mem(hw, &hw->aq.arq.dma_head);
}

/**
@@ -282,7 +259,6 @@ static void i40e_free_arq_bufs(struct i40e_hw *hw)
 **/
static void i40e_free_asq_bufs(struct i40e_hw *hw)
{
	struct i40e_virt_mem mem;
	int i;

	/* only unmap if the address is non-NULL */
@@ -290,9 +266,14 @@ static void i40e_free_asq_bufs(struct i40e_hw *hw)
		if (hw->aq.asq.r.asq_bi[i].pa)
			i40e_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);

	/* now free the buffer info list */
	mem.va = hw->aq.asq.r.asq_bi;
	i40e_free_virt_mem(hw, &mem);
	/* free the buffer info list */
	i40e_free_virt_mem(hw, &hw->aq.asq.cmd_buf);

	/* free the descriptor memory */
	i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);

	/* free the dma header */
	i40e_free_virt_mem(hw, &hw->aq.asq.dma_head);
}

/**
@@ -305,14 +286,18 @@ static void i40e_config_asq_regs(struct i40e_hw *hw)
{
	if (hw->mac.type == I40E_MAC_VF) {
		/* configure the transmit queue */
		wr32(hw, I40E_VF_ATQBAH1, upper_32_bits(hw->aq.asq.dma_addr));
		wr32(hw, I40E_VF_ATQBAL1, lower_32_bits(hw->aq.asq.dma_addr));
		wr32(hw, I40E_VF_ATQBAH1,
		    upper_32_bits(hw->aq.asq.desc_buf.pa));
		wr32(hw, I40E_VF_ATQBAL1,
		    lower_32_bits(hw->aq.asq.desc_buf.pa));
		wr32(hw, I40E_VF_ATQLEN1, (hw->aq.num_asq_entries |
					  I40E_VF_ATQLEN1_ATQENABLE_MASK));
	} else {
		/* configure the transmit queue */
		wr32(hw, I40E_PF_ATQBAH, upper_32_bits(hw->aq.asq.dma_addr));
		wr32(hw, I40E_PF_ATQBAL, lower_32_bits(hw->aq.asq.dma_addr));
		wr32(hw, I40E_PF_ATQBAH,
		    upper_32_bits(hw->aq.asq.desc_buf.pa));
		wr32(hw, I40E_PF_ATQBAL,
		    lower_32_bits(hw->aq.asq.desc_buf.pa));
		wr32(hw, I40E_PF_ATQLEN, (hw->aq.num_asq_entries |
					  I40E_PF_ATQLEN_ATQENABLE_MASK));
	}
@@ -328,14 +313,18 @@ static void i40e_config_arq_regs(struct i40e_hw *hw)
{
	if (hw->mac.type == I40E_MAC_VF) {
		/* configure the receive queue */
		wr32(hw, I40E_VF_ARQBAH1, upper_32_bits(hw->aq.arq.dma_addr));
		wr32(hw, I40E_VF_ARQBAL1, lower_32_bits(hw->aq.arq.dma_addr));
		wr32(hw, I40E_VF_ARQBAH1,
		    upper_32_bits(hw->aq.arq.desc_buf.pa));
		wr32(hw, I40E_VF_ARQBAL1,
		    lower_32_bits(hw->aq.arq.desc_buf.pa));
		wr32(hw, I40E_VF_ARQLEN1, (hw->aq.num_arq_entries |
					  I40E_VF_ARQLEN1_ARQENABLE_MASK));
	} else {
		/* configure the receive queue */
		wr32(hw, I40E_PF_ARQBAH, upper_32_bits(hw->aq.arq.dma_addr));
		wr32(hw, I40E_PF_ARQBAL, lower_32_bits(hw->aq.arq.dma_addr));
		wr32(hw, I40E_PF_ARQBAH,
		    upper_32_bits(hw->aq.arq.desc_buf.pa));
		wr32(hw, I40E_PF_ARQBAL,
		    lower_32_bits(hw->aq.arq.desc_buf.pa));
		wr32(hw, I40E_PF_ARQLEN, (hw->aq.num_arq_entries |
					  I40E_PF_ARQLEN_ARQENABLE_MASK));
	}
@@ -483,8 +472,6 @@ static i40e_status i40e_shutdown_asq(struct i40e_hw *hw)

	/* free ring buffers */
	i40e_free_asq_bufs(hw);
	/* free the ring descriptors */
	i40e_free_adminq_asq(hw);

	mutex_unlock(&hw->aq.asq_mutex);

@@ -516,8 +503,6 @@ static i40e_status i40e_shutdown_arq(struct i40e_hw *hw)

	/* free ring buffers */
	i40e_free_arq_bufs(hw);
	/* free the ring descriptors */
	i40e_free_adminq_arq(hw);

	mutex_unlock(&hw->aq.arq_mutex);

+5 −8
Original line number Diff line number Diff line
@@ -32,20 +32,20 @@
#include "i40e_adminq_cmd.h"

#define I40E_ADMINQ_DESC(R, i)   \
	(&(((struct i40e_aq_desc *)((R).desc))[i]))
	(&(((struct i40e_aq_desc *)((R).desc_buf.va))[i]))

#define I40E_ADMINQ_DESC_ALIGNMENT 4096

struct i40e_adminq_ring {
	void *desc;		/* Descriptor ring memory */
	void *details;		/* ASQ details */
	struct i40e_virt_mem dma_head;	/* space for dma structures */
	struct i40e_dma_mem desc_buf;	/* descriptor ring memory */
	struct i40e_virt_mem cmd_buf;	/* command buffer memory */

	union {
		struct i40e_dma_mem *asq_bi;
		struct i40e_dma_mem *arq_bi;
	} r;

	u64 dma_addr;		/* Physical address of the ring */
	u16 count;		/* Number of descriptors */
	u16 rx_buf_len;		/* Admin Receive Queue buffer length */

@@ -70,7 +70,7 @@ struct i40e_asq_cmd_details {
};

#define I40E_ADMINQ_DETAILS(R, i)   \
	(&(((struct i40e_asq_cmd_details *)((R).details))[i]))
	(&(((struct i40e_asq_cmd_details *)((R).cmd_buf.va))[i]))

/* ARQ event information */
struct i40e_arq_event_info {
@@ -95,9 +95,6 @@ struct i40e_adminq_info {
	struct mutex asq_mutex; /* Send queue lock */
	struct mutex arq_mutex; /* Receive queue lock */

	struct i40e_dma_mem asq_mem;    /* send queue dynamic memory */
	struct i40e_dma_mem arq_mem;    /* receive queue dynamic memory */

	/* last status values on send and receive queues */
	enum i40e_admin_queue_err asq_last_status;
	enum i40e_admin_queue_err arq_last_status;
+0 −27
Original line number Diff line number Diff line
@@ -239,33 +239,6 @@ i40e_status i40e_get_mac_addr(struct i40e_hw *hw, u8 *mac_addr)
	return status;
}

/**
 * i40e_validate_mac_addr - Validate MAC address
 * @mac_addr: pointer to MAC address
 *
 * Tests a MAC address to ensure it is a valid Individual Address
 **/
i40e_status i40e_validate_mac_addr(u8 *mac_addr)
{
	i40e_status status = 0;

	/* Make sure it is not a multicast address */
	if (I40E_IS_MULTICAST(mac_addr)) {
		hw_dbg(hw, "MAC address is multicast\n");
		status = I40E_ERR_INVALID_MAC_ADDR;
	/* Not a broadcast address */
	} else if (I40E_IS_BROADCAST(mac_addr)) {
		hw_dbg(hw, "MAC address is broadcast\n");
		status = I40E_ERR_INVALID_MAC_ADDR;
	/* Reject the zero address */
	} else if (mac_addr[0] == 0 && mac_addr[1] == 0 && mac_addr[2] == 0 &&
		   mac_addr[3] == 0 && mac_addr[4] == 0 && mac_addr[5] == 0) {
		hw_dbg(hw, "MAC address is all zeros\n");
		status = I40E_ERR_INVALID_MAC_ADDR;
	}
	return status;
}

/**
 * i40e_get_media_type - Gets media type
 * @hw: pointer to the hardware structure
+4 −4
Original line number Diff line number Diff line
@@ -192,12 +192,12 @@ static ssize_t i40e_dbg_dump_write(struct file *filp,

			len = (sizeof(struct i40e_aq_desc)
					* pf->hw.aq.num_asq_entries);
			memcpy(p, pf->hw.aq.asq.desc, len);
			memcpy(p, pf->hw.aq.asq.desc_buf.va, len);
			p += len;

			len = (sizeof(struct i40e_aq_desc)
					* pf->hw.aq.num_arq_entries);
			memcpy(p, pf->hw.aq.arq.desc, len);
			memcpy(p, pf->hw.aq.arq.desc_buf.va, len);
			p += len;

			i40e_dbg_dump_data_len = buflen;
@@ -1740,10 +1740,10 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
			dev_info(&pf->pdev->dev,
				 "Read NVM module=0x%x offset=0x%x words=%d\n",
				 module, offset, buffer_len);
			if (buffer_len)
			if (bytes)
				print_hex_dump(KERN_INFO, "NVM Dump: ",
					DUMP_PREFIX_OFFSET, 16, 2,
					buff, buffer_len, true);
					buff, bytes, true);
		}
		kfree(buff);
		buff = NULL;
Loading