Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 7cf210dc authored by David S. Miller's avatar David S. Miller
Browse files

Merge branch 'qlcnic-fixes'



Manish Chopra says:

====================
qlcnic: bug fixes

This series fixes a data structure corruption bug in
VF's async mailbox commands handling and an issue realted
to napi poll budget in the driver.

Please consider applying this series to "net"
====================

Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents cfaa2189 b8b2372d
Loading
Loading
Loading
Loading
+2 −2
Original line number Diff line number Diff line
@@ -37,8 +37,8 @@

#define _QLCNIC_LINUX_MAJOR 5
#define _QLCNIC_LINUX_MINOR 3
#define _QLCNIC_LINUX_SUBVERSION 64
#define QLCNIC_LINUX_VERSIONID  "5.3.64"
#define _QLCNIC_LINUX_SUBVERSION 65
#define QLCNIC_LINUX_VERSIONID  "5.3.65"
#define QLCNIC_DRV_IDC_VER  0x01
#define QLCNIC_DRIVER_VERSION  ((_QLCNIC_LINUX_MAJOR << 16) |\
		 (_QLCNIC_LINUX_MINOR << 8) | (_QLCNIC_LINUX_SUBVERSION))
+0 −2
Original line number Diff line number Diff line
@@ -102,7 +102,6 @@
#define QLCNIC_RESPONSE_DESC	0x05
#define QLCNIC_LRO_DESC  	0x12

#define QLCNIC_TX_POLL_BUDGET		128
#define QLCNIC_TCP_HDR_SIZE		20
#define QLCNIC_TCP_TS_OPTION_SIZE	12
#define QLCNIC_FETCH_RING_ID(handle)	((handle) >> 63)
@@ -2008,7 +2007,6 @@ static int qlcnic_83xx_msix_tx_poll(struct napi_struct *napi, int budget)
	struct qlcnic_host_tx_ring *tx_ring;
	struct qlcnic_adapter *adapter;

	budget = QLCNIC_TX_POLL_BUDGET;
	tx_ring = container_of(napi, struct qlcnic_host_tx_ring, napi);
	adapter = tx_ring->adapter;
	work_done = qlcnic_process_cmd_ring(adapter, tx_ring, budget);
+5 −4
Original line number Diff line number Diff line
@@ -156,10 +156,8 @@ struct qlcnic_vf_info {
	spinlock_t			vlan_list_lock;	/* Lock for VLAN list */
};

struct qlcnic_async_work_list {
struct qlcnic_async_cmd {
	struct list_head	list;
	struct work_struct	work;
	void			*ptr;
	struct qlcnic_cmd_args	*cmd;
};

@@ -168,7 +166,10 @@ struct qlcnic_back_channel {
	struct workqueue_struct *bc_trans_wq;
	struct workqueue_struct *bc_async_wq;
	struct workqueue_struct *bc_flr_wq;
	struct list_head	async_list;
	struct qlcnic_adapter	*adapter;
	struct list_head	async_cmd_list;
	struct work_struct	vf_async_work;
	spinlock_t		queue_lock; /* async_cmd_list queue lock */
};

struct qlcnic_sriov {
+55 −40
Original line number Diff line number Diff line
@@ -29,6 +29,7 @@
#define QLC_83XX_VF_RESET_FAIL_THRESH	8
#define QLC_BC_CMD_MAX_RETRY_CNT	5

static void qlcnic_sriov_handle_async_issue_cmd(struct work_struct *work);
static void qlcnic_sriov_vf_free_mac_list(struct qlcnic_adapter *);
static int qlcnic_sriov_alloc_bc_mbx_args(struct qlcnic_cmd_args *, u32);
static void qlcnic_sriov_vf_poll_dev_state(struct work_struct *);
@@ -177,7 +178,10 @@ int qlcnic_sriov_init(struct qlcnic_adapter *adapter, int num_vfs)
	}

	bc->bc_async_wq =  wq;
	INIT_LIST_HEAD(&bc->async_list);
	INIT_LIST_HEAD(&bc->async_cmd_list);
	INIT_WORK(&bc->vf_async_work, qlcnic_sriov_handle_async_issue_cmd);
	spin_lock_init(&bc->queue_lock);
	bc->adapter = adapter;

	for (i = 0; i < num_vfs; i++) {
		vf = &sriov->vf_info[i];
@@ -1517,17 +1521,21 @@ static void qlcnic_vf_add_mc_list(struct net_device *netdev, const u8 *mac,

void qlcnic_sriov_cleanup_async_list(struct qlcnic_back_channel *bc)
{
	struct list_head *head = &bc->async_list;
	struct qlcnic_async_work_list *entry;
	struct list_head *head = &bc->async_cmd_list;
	struct qlcnic_async_cmd *entry;

	flush_workqueue(bc->bc_async_wq);
	cancel_work_sync(&bc->vf_async_work);

	spin_lock(&bc->queue_lock);
	while (!list_empty(head)) {
		entry = list_entry(head->next, struct qlcnic_async_work_list,
		entry = list_entry(head->next, struct qlcnic_async_cmd,
				   list);
		cancel_work_sync(&entry->work);
		list_del(&entry->list);
		kfree(entry->cmd);
		kfree(entry);
	}
	spin_unlock(&bc->queue_lock);
}

void qlcnic_sriov_vf_set_multi(struct net_device *netdev)
@@ -1587,57 +1595,64 @@ void qlcnic_sriov_vf_set_multi(struct net_device *netdev)

static void qlcnic_sriov_handle_async_issue_cmd(struct work_struct *work)
{
	struct qlcnic_async_work_list *entry;
	struct qlcnic_adapter *adapter;
	struct qlcnic_async_cmd *entry, *tmp;
	struct qlcnic_back_channel *bc;
	struct qlcnic_cmd_args *cmd;
	struct list_head *head;
	LIST_HEAD(del_list);

	bc = container_of(work, struct qlcnic_back_channel, vf_async_work);
	head = &bc->async_cmd_list;

	entry = container_of(work, struct qlcnic_async_work_list, work);
	adapter = entry->ptr;
	spin_lock(&bc->queue_lock);
	list_splice_init(head, &del_list);
	spin_unlock(&bc->queue_lock);

	list_for_each_entry_safe(entry, tmp, &del_list, list) {
		list_del(&entry->list);
		cmd = entry->cmd;
	__qlcnic_sriov_issue_cmd(adapter, cmd);
	return;
		__qlcnic_sriov_issue_cmd(bc->adapter, cmd);
		kfree(entry);
	}

static struct qlcnic_async_work_list *
qlcnic_sriov_get_free_node_async_work(struct qlcnic_back_channel *bc)
{
	struct list_head *node;
	struct qlcnic_async_work_list *entry = NULL;
	u8 empty = 0;
	if (!list_empty(head))
		queue_work(bc->bc_async_wq, &bc->vf_async_work);

	list_for_each(node, &bc->async_list) {
		entry = list_entry(node, struct qlcnic_async_work_list, list);
		if (!work_pending(&entry->work)) {
			empty = 1;
			break;
		}
	return;
}

	if (!empty) {
		entry = kzalloc(sizeof(struct qlcnic_async_work_list),
				GFP_ATOMIC);
		if (entry == NULL)
static struct qlcnic_async_cmd *
qlcnic_sriov_alloc_async_cmd(struct qlcnic_back_channel *bc,
			     struct qlcnic_cmd_args *cmd)
{
	struct qlcnic_async_cmd *entry = NULL;

	entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
	if (!entry)
		return NULL;
		list_add_tail(&entry->list, &bc->async_list);
	}

	entry->cmd = cmd;

	spin_lock(&bc->queue_lock);
	list_add_tail(&entry->list, &bc->async_cmd_list);
	spin_unlock(&bc->queue_lock);

	return entry;
}

static void qlcnic_sriov_schedule_async_cmd(struct qlcnic_back_channel *bc,
					    work_func_t func, void *data,
					    struct qlcnic_cmd_args *cmd)
{
	struct qlcnic_async_work_list *entry = NULL;
	struct qlcnic_async_cmd *entry = NULL;

	entry = qlcnic_sriov_get_free_node_async_work(bc);
	if (!entry)
	entry = qlcnic_sriov_alloc_async_cmd(bc, cmd);
	if (!entry) {
		qlcnic_free_mbx_args(cmd);
		kfree(cmd);
		return;
	}

	entry->ptr = data;
	entry->cmd = cmd;
	INIT_WORK(&entry->work, func);
	queue_work(bc->bc_async_wq, &entry->work);
	queue_work(bc->bc_async_wq, &bc->vf_async_work);
}

static int qlcnic_sriov_async_issue_cmd(struct qlcnic_adapter *adapter,
@@ -1649,8 +1664,8 @@ static int qlcnic_sriov_async_issue_cmd(struct qlcnic_adapter *adapter,
	if (adapter->need_fw_reset)
		return -EIO;

	qlcnic_sriov_schedule_async_cmd(bc, qlcnic_sriov_handle_async_issue_cmd,
					adapter, cmd);
	qlcnic_sriov_schedule_async_cmd(bc, cmd);

	return 0;
}