Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit e74dbb73 authored by David S. Miller's avatar David S. Miller
Browse files

Merge branch 'bnx2x-next'



Yuval Mintz says:

====================
bnx2x: SR-IOV patch series

(With the exception of the first patch) This series contains IOV-related
patches, where the main changes are related to the driver's IOV-support
backbone - it adds a new workqueue for IOV related tasks and removes the vfop
mechanism from the driver.

Please consider applying this series to `net-next'.
====================

Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 869f2739 16bd41dd
Loading
Loading
Loading
Loading
+17 −4
Original line number Original line Diff line number Diff line
@@ -1155,10 +1155,6 @@ struct bnx2x_port {
			(offsetof(struct bnx2x_eth_stats, stat_name) / 4)
			(offsetof(struct bnx2x_eth_stats, stat_name) / 4)


/* slow path */
/* slow path */

/* slow path work-queue */
extern struct workqueue_struct *bnx2x_wq;

#define BNX2X_MAX_NUM_OF_VFS	64
#define BNX2X_MAX_NUM_OF_VFS	64
#define BNX2X_VF_CID_WND	4 /* log num of queues per VF. HW config. */
#define BNX2X_VF_CID_WND	4 /* log num of queues per VF. HW config. */
#define BNX2X_CIDS_PER_VF	(1 << BNX2X_VF_CID_WND)
#define BNX2X_CIDS_PER_VF	(1 << BNX2X_VF_CID_WND)
@@ -1413,6 +1409,12 @@ enum sp_rtnl_flag {
	BNX2X_SP_RTNL_RX_MODE,
	BNX2X_SP_RTNL_RX_MODE,
	BNX2X_SP_RTNL_HYPERVISOR_VLAN,
	BNX2X_SP_RTNL_HYPERVISOR_VLAN,
	BNX2X_SP_RTNL_TX_STOP,
	BNX2X_SP_RTNL_TX_STOP,
	BNX2X_SP_RTNL_GET_DRV_VERSION,
};

enum bnx2x_iov_flag {
	BNX2X_IOV_HANDLE_VF_MSG,
	BNX2X_IOV_HANDLE_FLR,
};
};


struct bnx2x_prev_path_list {
struct bnx2x_prev_path_list {
@@ -1613,6 +1615,8 @@ struct bnx2x {
	int			mrrs;
	int			mrrs;


	struct delayed_work	sp_task;
	struct delayed_work	sp_task;
	struct delayed_work	iov_task;

	atomic_t		interrupt_occurred;
	atomic_t		interrupt_occurred;
	struct delayed_work	sp_rtnl_task;
	struct delayed_work	sp_rtnl_task;


@@ -1703,6 +1707,10 @@ struct bnx2x {
	struct bnx2x_slowpath	*slowpath;
	struct bnx2x_slowpath	*slowpath;
	dma_addr_t		slowpath_mapping;
	dma_addr_t		slowpath_mapping;


	/* Mechanism protecting the drv_info_to_mcp */
	struct mutex		drv_info_mutex;
	bool			drv_info_mng_owner;

	/* Total number of FW statistics requests */
	/* Total number of FW statistics requests */
	u8			fw_stats_num;
	u8			fw_stats_num;


@@ -1892,6 +1900,9 @@ struct bnx2x {
	/* operation indication for the sp_rtnl task */
	/* operation indication for the sp_rtnl task */
	unsigned long				sp_rtnl_state;
	unsigned long				sp_rtnl_state;


	/* Indication of the IOV tasks */
	unsigned long				iov_task_state;

	/* DCBX Negotiation results */
	/* DCBX Negotiation results */
	struct dcbx_features			dcbx_local_feat;
	struct dcbx_features			dcbx_local_feat;
	u32					dcbx_error;
	u32					dcbx_error;
@@ -2535,6 +2546,8 @@ enum {


void bnx2x_set_local_cmng(struct bnx2x *bp);
void bnx2x_set_local_cmng(struct bnx2x *bp);


void bnx2x_update_mng_version(struct bnx2x *bp);

#define MCPR_SCRATCH_BASE(bp) \
#define MCPR_SCRATCH_BASE(bp) \
	(CHIP_IS_E1x(bp) ? MCP_REG_MCPR_SCRATCH : MCP_A_REG_MCPR_SCRATCH)
	(CHIP_IS_E1x(bp) ? MCP_REG_MCPR_SCRATCH : MCP_A_REG_MCPR_SCRATCH)


+7 −0
Original line number Original line Diff line number Diff line
@@ -2804,6 +2804,9 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
	if (CNIC_ENABLED(bp))
	if (CNIC_ENABLED(bp))
		bnx2x_load_cnic(bp);
		bnx2x_load_cnic(bp);


	if (IS_PF(bp))
		bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_GET_DRV_VERSION, 0);

	if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {
	if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {
		/* mark driver is loaded in shmem2 */
		/* mark driver is loaded in shmem2 */
		u32 val;
		u32 val;
@@ -3030,6 +3033,10 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
	bp->state = BNX2X_STATE_CLOSED;
	bp->state = BNX2X_STATE_CLOSED;
	bp->cnic_loaded = false;
	bp->cnic_loaded = false;


	/* Clear driver version indication in shmem */
	if (IS_PF(bp))
		bnx2x_update_mng_version(bp);

	/* Check if there are pending parity attentions. If there are - set
	/* Check if there are pending parity attentions. If there are - set
	 * RECOVERY_IN_PROGRESS.
	 * RECOVERY_IN_PROGRESS.
	 */
	 */
+6 −5
Original line number Original line Diff line number Diff line
@@ -2969,8 +2969,9 @@ static void bnx2x_self_test(struct net_device *dev,
#define IS_PORT_STAT(i) \
#define IS_PORT_STAT(i) \
	((bnx2x_stats_arr[i].flags & STATS_FLAGS_BOTH) == STATS_FLAGS_PORT)
	((bnx2x_stats_arr[i].flags & STATS_FLAGS_BOTH) == STATS_FLAGS_PORT)
#define IS_FUNC_STAT(i)		(bnx2x_stats_arr[i].flags & STATS_FLAGS_FUNC)
#define IS_FUNC_STAT(i)		(bnx2x_stats_arr[i].flags & STATS_FLAGS_FUNC)
#define IS_MF_MODE_STAT(bp) \
#define HIDE_PORT_STAT(bp) \
			(IS_MF(bp) && !(bp->msg_enable & BNX2X_MSG_STATS))
		((IS_MF(bp) && !(bp->msg_enable & BNX2X_MSG_STATS)) || \
		 IS_VF(bp))


/* ethtool statistics are displayed for all regular ethernet queues and the
/* ethtool statistics are displayed for all regular ethernet queues and the
 * fcoe L2 queue if not disabled
 * fcoe L2 queue if not disabled
@@ -2992,7 +2993,7 @@ static int bnx2x_get_sset_count(struct net_device *dev, int stringset)
				      BNX2X_NUM_Q_STATS;
				      BNX2X_NUM_Q_STATS;
		} else
		} else
			num_strings = 0;
			num_strings = 0;
		if (IS_MF_MODE_STAT(bp)) {
		if (HIDE_PORT_STAT(bp)) {
			for (i = 0; i < BNX2X_NUM_STATS; i++)
			for (i = 0; i < BNX2X_NUM_STATS; i++)
				if (IS_FUNC_STAT(i))
				if (IS_FUNC_STAT(i))
					num_strings++;
					num_strings++;
@@ -3047,7 +3048,7 @@ static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
		}
		}


		for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
		for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
			if (IS_MF_MODE_STAT(bp) && IS_PORT_STAT(i))
			if (HIDE_PORT_STAT(bp) && IS_PORT_STAT(i))
				continue;
				continue;
			strcpy(buf + (k + j)*ETH_GSTRING_LEN,
			strcpy(buf + (k + j)*ETH_GSTRING_LEN,
				   bnx2x_stats_arr[i].string);
				   bnx2x_stats_arr[i].string);
@@ -3105,7 +3106,7 @@ static void bnx2x_get_ethtool_stats(struct net_device *dev,


	hw_stats = (u32 *)&bp->eth_stats;
	hw_stats = (u32 *)&bp->eth_stats;
	for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
	for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
		if (IS_MF_MODE_STAT(bp) && IS_PORT_STAT(i))
		if (HIDE_PORT_STAT(bp) && IS_PORT_STAT(i))
			continue;
			continue;
		if (bnx2x_stats_arr[i].size == 0) {
		if (bnx2x_stats_arr[i].size == 0) {
			/* skip this counter */
			/* skip this counter */
+29 −0
Original line number Original line Diff line number Diff line
@@ -2003,6 +2003,23 @@ struct shmem_lfa {
	#define SHMEM_LFA_DONT_CLEAR_STAT		(1<<24)
	#define SHMEM_LFA_DONT_CLEAR_STAT		(1<<24)
};
};


/* Used to support NSCI get OS driver version
 * on driver load the version value will be set
 * on driver unload driver value of 0x0 will be set.
 */
struct os_drv_ver {
#define DRV_VER_NOT_LOADED			0

	/* personalties order is important */
#define DRV_PERS_ETHERNET			0
#define DRV_PERS_ISCSI				1
#define DRV_PERS_FCOE				2

	/* shmem2 struct is constant can't add more personalties here */
#define MAX_DRV_PERS				3
	u32 versions[MAX_DRV_PERS];
};

struct ncsi_oem_fcoe_features {
struct ncsi_oem_fcoe_features {
	u32 fcoe_features1;
	u32 fcoe_features1;
	#define FCOE_FEATURES1_IOS_PER_CONNECTION_MASK          0x0000FFFF
	#define FCOE_FEATURES1_IOS_PER_CONNECTION_MASK          0x0000FFFF
@@ -2217,6 +2234,18 @@ struct shmem2_region {
	u32 reserved4;				/* Offset 0x150 */
	u32 reserved4;				/* Offset 0x150 */
	u32 link_attr_sync[PORT_MAX];		/* Offset 0x154 */
	u32 link_attr_sync[PORT_MAX];		/* Offset 0x154 */
	#define LINK_ATTR_SYNC_KR2_ENABLE	(1<<0)
	#define LINK_ATTR_SYNC_KR2_ENABLE	(1<<0)

	u32 reserved5[2];
	u32 reserved6[PORT_MAX];

	/* driver version for each personality */
	struct os_drv_ver func_os_drv_ver[E2_FUNC_MAX]; /* Offset 0x16c */

	/* Flag to the driver that PF's drv_info_host_addr buffer was read  */
	u32 mfw_drv_indication;

	/* We use indication for each PF (0..3) */
#define MFW_DRV_IND_READ_DONE_OFFSET(_pf_) (1 << (_pf_))
};
};




+136 −14
Original line number Original line Diff line number Diff line
@@ -120,7 +120,8 @@ static int debug;
module_param(debug, int, S_IRUGO);
module_param(debug, int, S_IRUGO);
MODULE_PARM_DESC(debug, " Default debug msglevel");
MODULE_PARM_DESC(debug, " Default debug msglevel");


struct workqueue_struct *bnx2x_wq;
static struct workqueue_struct *bnx2x_wq;
struct workqueue_struct *bnx2x_iov_wq;


struct bnx2x_mac_vals {
struct bnx2x_mac_vals {
	u32 xmac_addr;
	u32 xmac_addr;
@@ -1856,8 +1857,6 @@ void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe)
#else
#else
		return;
		return;
#endif
#endif
	/* SRIOV: reschedule any 'in_progress' operations */
	bnx2x_iov_sp_event(bp, cid, true);


	smp_mb__before_atomic_inc();
	smp_mb__before_atomic_inc();
	atomic_inc(&bp->cq_spq_left);
	atomic_inc(&bp->cq_spq_left);
@@ -3482,10 +3481,15 @@ static void bnx2x_handle_eee_event(struct bnx2x *bp)
	bnx2x_fw_command(bp, DRV_MSG_CODE_EEE_RESULTS_ACK, 0);
	bnx2x_fw_command(bp, DRV_MSG_CODE_EEE_RESULTS_ACK, 0);
}
}


#define BNX2X_UPDATE_DRV_INFO_IND_LENGTH	(20)
#define BNX2X_UPDATE_DRV_INFO_IND_COUNT		(25)

static void bnx2x_handle_drv_info_req(struct bnx2x *bp)
static void bnx2x_handle_drv_info_req(struct bnx2x *bp)
{
{
	enum drv_info_opcode op_code;
	enum drv_info_opcode op_code;
	u32 drv_info_ctl = SHMEM2_RD(bp, drv_info_control);
	u32 drv_info_ctl = SHMEM2_RD(bp, drv_info_control);
	bool release = false;
	int wait;


	/* if drv_info version supported by MFW doesn't match - send NACK */
	/* if drv_info version supported by MFW doesn't match - send NACK */
	if ((drv_info_ctl & DRV_INFO_CONTROL_VER_MASK) != DRV_INFO_CUR_VER) {
	if ((drv_info_ctl & DRV_INFO_CONTROL_VER_MASK) != DRV_INFO_CUR_VER) {
@@ -3496,6 +3500,9 @@ static void bnx2x_handle_drv_info_req(struct bnx2x *bp)
	op_code = (drv_info_ctl & DRV_INFO_CONTROL_OP_CODE_MASK) >>
	op_code = (drv_info_ctl & DRV_INFO_CONTROL_OP_CODE_MASK) >>
		  DRV_INFO_CONTROL_OP_CODE_SHIFT;
		  DRV_INFO_CONTROL_OP_CODE_SHIFT;


	/* Must prevent other flows from accessing drv_info_to_mcp */
	mutex_lock(&bp->drv_info_mutex);

	memset(&bp->slowpath->drv_info_to_mcp, 0,
	memset(&bp->slowpath->drv_info_to_mcp, 0,
	       sizeof(union drv_info_to_mcp));
	       sizeof(union drv_info_to_mcp));


@@ -3512,7 +3519,7 @@ static void bnx2x_handle_drv_info_req(struct bnx2x *bp)
	default:
	default:
		/* if op code isn't supported - send NACK */
		/* if op code isn't supported - send NACK */
		bnx2x_fw_command(bp, DRV_MSG_CODE_DRV_INFO_NACK, 0);
		bnx2x_fw_command(bp, DRV_MSG_CODE_DRV_INFO_NACK, 0);
		return;
		goto out;
	}
	}


	/* if we got drv_info attn from MFW then these fields are defined in
	/* if we got drv_info attn from MFW then these fields are defined in
@@ -3524,6 +3531,106 @@ static void bnx2x_handle_drv_info_req(struct bnx2x *bp)
		U64_HI(bnx2x_sp_mapping(bp, drv_info_to_mcp)));
		U64_HI(bnx2x_sp_mapping(bp, drv_info_to_mcp)));


	bnx2x_fw_command(bp, DRV_MSG_CODE_DRV_INFO_ACK, 0);
	bnx2x_fw_command(bp, DRV_MSG_CODE_DRV_INFO_ACK, 0);

	/* Since possible management wants both this and get_driver_version
	 * need to wait until management notifies us it finished utilizing
	 * the buffer.
	 */
	if (!SHMEM2_HAS(bp, mfw_drv_indication)) {
		DP(BNX2X_MSG_MCP, "Management does not support indication\n");
	} else if (!bp->drv_info_mng_owner) {
		u32 bit = MFW_DRV_IND_READ_DONE_OFFSET((BP_ABS_FUNC(bp) >> 1));

		for (wait = 0; wait < BNX2X_UPDATE_DRV_INFO_IND_COUNT; wait++) {
			u32 indication = SHMEM2_RD(bp, mfw_drv_indication);

			/* Management is done; need to clear indication */
			if (indication & bit) {
				SHMEM2_WR(bp, mfw_drv_indication,
					  indication & ~bit);
				release = true;
				break;
			}

			msleep(BNX2X_UPDATE_DRV_INFO_IND_LENGTH);
		}
	}
	if (!release) {
		DP(BNX2X_MSG_MCP, "Management did not release indication\n");
		bp->drv_info_mng_owner = true;
	}

out:
	mutex_unlock(&bp->drv_info_mutex);
}

static u32 bnx2x_update_mng_version_utility(u8 *version, bool bnx2x_format)
{
	u8 vals[4];
	int i = 0;

	if (bnx2x_format) {
		i = sscanf(version, "1.%c%hhd.%hhd.%hhd",
			   &vals[0], &vals[1], &vals[2], &vals[3]);
		if (i > 0)
			vals[0] -= '0';
	} else {
		i = sscanf(version, "%hhd.%hhd.%hhd.%hhd",
			   &vals[0], &vals[1], &vals[2], &vals[3]);
	}

	while (i < 4)
		vals[i++] = 0;

	return (vals[0] << 24) | (vals[1] << 16) | (vals[2] << 8) | vals[3];
}

void bnx2x_update_mng_version(struct bnx2x *bp)
{
	u32 iscsiver = DRV_VER_NOT_LOADED;
	u32 fcoever = DRV_VER_NOT_LOADED;
	u32 ethver = DRV_VER_NOT_LOADED;
	int idx = BP_FW_MB_IDX(bp);
	u8 *version;

	if (!SHMEM2_HAS(bp, func_os_drv_ver))
		return;

	mutex_lock(&bp->drv_info_mutex);
	/* Must not proceed when `bnx2x_handle_drv_info_req' is feasible */
	if (bp->drv_info_mng_owner)
		goto out;

	if (bp->state != BNX2X_STATE_OPEN)
		goto out;

	/* Parse ethernet driver version */
	ethver = bnx2x_update_mng_version_utility(DRV_MODULE_VERSION, true);
	if (!CNIC_LOADED(bp))
		goto out;

	/* Try getting storage driver version via cnic */
	memset(&bp->slowpath->drv_info_to_mcp, 0,
	       sizeof(union drv_info_to_mcp));
	bnx2x_drv_info_iscsi_stat(bp);
	version = bp->slowpath->drv_info_to_mcp.iscsi_stat.version;
	iscsiver = bnx2x_update_mng_version_utility(version, false);

	memset(&bp->slowpath->drv_info_to_mcp, 0,
	       sizeof(union drv_info_to_mcp));
	bnx2x_drv_info_fcoe_stat(bp);
	version = bp->slowpath->drv_info_to_mcp.fcoe_stat.version;
	fcoever = bnx2x_update_mng_version_utility(version, false);

out:
	SHMEM2_WR(bp, func_os_drv_ver[idx].versions[DRV_PERS_ETHERNET], ethver);
	SHMEM2_WR(bp, func_os_drv_ver[idx].versions[DRV_PERS_ISCSI], iscsiver);
	SHMEM2_WR(bp, func_os_drv_ver[idx].versions[DRV_PERS_FCOE], fcoever);

	mutex_unlock(&bp->drv_info_mutex);

	DP(BNX2X_MSG_MCP, "Setting driver version: ETH [%08x] iSCSI [%08x] FCoE [%08x]\n",
	   ethver, iscsiver, fcoever);
}
}


static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
@@ -4052,7 +4159,8 @@ static void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
				bnx2x_handle_drv_info_req(bp);
				bnx2x_handle_drv_info_req(bp);


			if (val & DRV_STATUS_VF_DISABLED)
			if (val & DRV_STATUS_VF_DISABLED)
				bnx2x_vf_handle_flr_event(bp);
				bnx2x_schedule_iov_task(bp,
							BNX2X_IOV_HANDLE_FLR);


			if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF))
			if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF))
				bnx2x_pmf_update(bp);
				bnx2x_pmf_update(bp);
@@ -5243,8 +5351,8 @@ static void bnx2x_eq_int(struct bnx2x *bp)
		/* handle eq element */
		/* handle eq element */
		switch (opcode) {
		switch (opcode) {
		case EVENT_RING_OPCODE_VF_PF_CHANNEL:
		case EVENT_RING_OPCODE_VF_PF_CHANNEL:
			DP(BNX2X_MSG_IOV, "vf pf channel element on eq\n");
			bnx2x_vf_mbx_schedule(bp,
			bnx2x_vf_mbx(bp, &elem->message.data.vf_pf_event);
					      &elem->message.data.vf_pf_event);
			continue;
			continue;


		case EVENT_RING_OPCODE_STAT_QUERY:
		case EVENT_RING_OPCODE_STAT_QUERY:
@@ -5459,13 +5567,6 @@ static void bnx2x_sp_task(struct work_struct *work)
			     le16_to_cpu(bp->def_att_idx), IGU_INT_ENABLE, 1);
			     le16_to_cpu(bp->def_att_idx), IGU_INT_ENABLE, 1);
	}
	}


	/* must be called after the EQ processing (since eq leads to sriov
	 * ramrod completion flows).
	 * This flow may have been scheduled by the arrival of a ramrod
	 * completion, or by the sriov code rescheduling itself.
	 */
	bnx2x_iov_sp_task(bp);

	/* afex - poll to check if VIFSET_ACK should be sent to MFW */
	/* afex - poll to check if VIFSET_ACK should be sent to MFW */
	if (test_and_clear_bit(BNX2X_AFEX_PENDING_VIFSET_MCP_ACK,
	if (test_and_clear_bit(BNX2X_AFEX_PENDING_VIFSET_MCP_ACK,
			       &bp->sp_state)) {
			       &bp->sp_state)) {
@@ -8882,6 +8983,7 @@ static int bnx2x_func_wait_started(struct bnx2x *bp)
		synchronize_irq(bp->pdev->irq);
		synchronize_irq(bp->pdev->irq);


	flush_workqueue(bnx2x_wq);
	flush_workqueue(bnx2x_wq);
	flush_workqueue(bnx2x_iov_wq);


	while (bnx2x_func_get_state(bp, &bp->func_obj) !=
	while (bnx2x_func_get_state(bp, &bp->func_obj) !=
				BNX2X_F_STATE_STARTED && tout--)
				BNX2X_F_STATE_STARTED && tout--)
@@ -9807,6 +9909,10 @@ static void bnx2x_sp_rtnl_task(struct work_struct *work)
		bnx2x_dcbx_resume_hw_tx(bp);
		bnx2x_dcbx_resume_hw_tx(bp);
	}
	}


	if (test_and_clear_bit(BNX2X_SP_RTNL_GET_DRV_VERSION,
			       &bp->sp_rtnl_state))
		bnx2x_update_mng_version(bp);

	/* work which needs rtnl lock not-taken (as it takes the lock itself and
	/* work which needs rtnl lock not-taken (as it takes the lock itself and
	 * can be called from other contexts as well)
	 * can be called from other contexts as well)
	 */
	 */
@@ -11757,12 +11863,15 @@ static int bnx2x_init_bp(struct bnx2x *bp)


	mutex_init(&bp->port.phy_mutex);
	mutex_init(&bp->port.phy_mutex);
	mutex_init(&bp->fw_mb_mutex);
	mutex_init(&bp->fw_mb_mutex);
	mutex_init(&bp->drv_info_mutex);
	bp->drv_info_mng_owner = false;
	spin_lock_init(&bp->stats_lock);
	spin_lock_init(&bp->stats_lock);
	sema_init(&bp->stats_sema, 1);
	sema_init(&bp->stats_sema, 1);


	INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
	INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
	INIT_DELAYED_WORK(&bp->sp_rtnl_task, bnx2x_sp_rtnl_task);
	INIT_DELAYED_WORK(&bp->sp_rtnl_task, bnx2x_sp_rtnl_task);
	INIT_DELAYED_WORK(&bp->period_task, bnx2x_period_task);
	INIT_DELAYED_WORK(&bp->period_task, bnx2x_period_task);
	INIT_DELAYED_WORK(&bp->iov_task, bnx2x_iov_task);
	if (IS_PF(bp)) {
	if (IS_PF(bp)) {
		rc = bnx2x_get_hwinfo(bp);
		rc = bnx2x_get_hwinfo(bp);
		if (rc)
		if (rc)
@@ -13385,11 +13494,18 @@ static int __init bnx2x_init(void)
		pr_err("Cannot create workqueue\n");
		pr_err("Cannot create workqueue\n");
		return -ENOMEM;
		return -ENOMEM;
	}
	}
	bnx2x_iov_wq = create_singlethread_workqueue("bnx2x_iov");
	if (!bnx2x_iov_wq) {
		pr_err("Cannot create iov workqueue\n");
		destroy_workqueue(bnx2x_wq);
		return -ENOMEM;
	}


	ret = pci_register_driver(&bnx2x_pci_driver);
	ret = pci_register_driver(&bnx2x_pci_driver);
	if (ret) {
	if (ret) {
		pr_err("Cannot register driver\n");
		pr_err("Cannot register driver\n");
		destroy_workqueue(bnx2x_wq);
		destroy_workqueue(bnx2x_wq);
		destroy_workqueue(bnx2x_iov_wq);
	}
	}
	return ret;
	return ret;
}
}
@@ -13401,6 +13517,7 @@ static void __exit bnx2x_cleanup(void)
	pci_unregister_driver(&bnx2x_pci_driver);
	pci_unregister_driver(&bnx2x_pci_driver);


	destroy_workqueue(bnx2x_wq);
	destroy_workqueue(bnx2x_wq);
	destroy_workqueue(bnx2x_iov_wq);


	/* Free globally allocated resources */
	/* Free globally allocated resources */
	list_for_each_safe(pos, q, &bnx2x_prev_list) {
	list_for_each_safe(pos, q, &bnx2x_prev_list) {
@@ -13794,6 +13911,7 @@ static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
				REG_WR(bp, scratch_offset + i,
				REG_WR(bp, scratch_offset + i,
				       *(host_addr + i/4));
				       *(host_addr + i/4));
		}
		}
		bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_GET_DRV_VERSION, 0);
		break;
		break;
	}
	}


@@ -13811,6 +13929,7 @@ static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
				cap &= ~DRV_FLAGS_CAPABILITIES_LOADED_FCOE;
				cap &= ~DRV_FLAGS_CAPABILITIES_LOADED_FCOE;
			SHMEM2_WR(bp, drv_capabilities_flag[idx], cap);
			SHMEM2_WR(bp, drv_capabilities_flag[idx], cap);
		}
		}
		bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_GET_DRV_VERSION, 0);
		break;
		break;
	}
	}


@@ -13916,6 +14035,9 @@ static int bnx2x_register_cnic(struct net_device *dev, struct cnic_ops *ops,


	rcu_assign_pointer(bp->cnic_ops, ops);
	rcu_assign_pointer(bp->cnic_ops, ops);


	/* Schedule driver to read CNIC driver versions */
	bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_GET_DRV_VERSION, 0);

	return 0;
	return 0;
}
}


Loading