Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 6383c0b3 authored by Ariel Elior's avatar Ariel Elior Committed by David S. Miller
Browse files

bnx2x: Multiple concurrent l2 traffic classes



Overview:
 Support mapping of priorities to traffic classes and
 traffic classes to transmission queues ranges in the net device.
 The queue ranges are (count, offset) pairs relating to the txq
 array.
 This can be done via DCBX negotiation or by kernel.
 As a result Enhanced Transmission Selection (ETS) and Priority Flow
 Control (PFC) are supported between L2 network traffic classes.

 Mapping:
 This patch uses the netdev_set_num_tc, netdev_set_prio_tc_map and
 netdev_set_tc_queue functions to map priorities to traffic classes
 and traffic classes to transmission queue ranges.
 This mapping is performed by bnx2x_setup_tc function which is
 connected to the ndo_setup_tc.
 This function is always called at nic load where by default it
 maps all priorities to tc 0, and it may also be called by the
 kernel or by the bnx2x upon DCBX negotiation to modify the mapping.

 rtnl lock:
 When the ndo_setup_tc is called at nic load or by kernel the rtnl
 lock is already taken. However, when DCBX negotiation takes place
 the lock is not taken. The work is therefore scheduled to be
 handled by the sp_rtnl task.

 Fastpath:
 The fastpath structure of the bnx2x which was previously used
 to hold the information of one tx queue and one rx queue was
 redesigned to represent multiple tx queues, one for each traffic
 class.
 The transmission queue supplied in the skb by the kernel can no
 longer be interpreted as a straightforward index into the fastpath
 structure array, but it must rather be decoded to the appropriate
 fastpath index and the tc within that fastpath.

 Slowpath:
 The bnx2x's queue object was redesigned to accommodate multiple
 transmission queues. The queue object's state machine was enhanced
 to allow opening multiple transmission-only connections on top of
 the regular tx-rx connection.

 Firmware:
 This feature relies on the tx-only queue feature introduced in the
 bnx2x 7.0.23 firmware and the FW likewise must have the bnx2x multi
 cos support.

 Signed-off-by: default avatarAriel Elior <ariele@broadcom.com>
 Signed-off-by: default avatarEilon Greenstein <eilong@broadcom.com>

Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 7be08a72
Loading
Loading
Loading
Loading
+120 −61
Original line number Original line Diff line number Diff line
@@ -120,6 +120,7 @@ do { \




#ifdef BNX2X_STOP_ON_ERROR
#ifdef BNX2X_STOP_ON_ERROR
void bnx2x_int_disable(struct bnx2x *bp);
#define bnx2x_panic() do { \
#define bnx2x_panic() do { \
		bp->panic = 1; \
		bp->panic = 1; \
		BNX2X_ERR("driver assert\n"); \
		BNX2X_ERR("driver assert\n"); \
@@ -240,21 +241,21 @@ do { \
 */
 */
/* iSCSI L2 */
/* iSCSI L2 */
#define BNX2X_ISCSI_ETH_CL_ID_IDX	1
#define BNX2X_ISCSI_ETH_CL_ID_IDX	1
#define BNX2X_ISCSI_ETH_CID		17
#define BNX2X_ISCSI_ETH_CID		49


/* FCoE L2 */
/* FCoE L2 */
#define BNX2X_FCOE_ETH_CL_ID_IDX	2
#define BNX2X_FCOE_ETH_CL_ID_IDX	2
#define BNX2X_FCOE_ETH_CID		18
#define BNX2X_FCOE_ETH_CID		50


/** Additional rings budgeting */
/** Additional rings budgeting */
#ifdef BCM_CNIC
#ifdef BCM_CNIC
#define CNIC_CONTEXT_USE		1
#define CNIC_PRESENT			1
#define FCOE_CONTEXT_USE		1
#define FCOE_PRESENT			1
#else
#else
#define CNIC_CONTEXT_USE		0
#define CNIC_PRESENT			0
#define FCOE_CONTEXT_USE		0
#define FCOE_PRESENT			0
#endif /* BCM_CNIC */
#endif /* BCM_CNIC */
#define NONE_ETH_CONTEXT_USE	(FCOE_CONTEXT_USE)
#define NON_ETH_CONTEXT_USE	(FCOE_PRESENT)


#define AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR \
#define AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR \
	AEU_INPUTS_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR
	AEU_INPUTS_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR
@@ -262,8 +263,35 @@ do { \
#define SM_RX_ID			0
#define SM_RX_ID			0
#define SM_TX_ID			1
#define SM_TX_ID			1


/* fast path */
/* defines for multiple tx priority indices */
#define FIRST_TX_ONLY_COS_INDEX		1
#define FIRST_TX_COS_INDEX		0

/* defines for decodeing the fastpath index and the cos index out of the
 * transmission queue index
 */
#define MAX_TXQS_PER_COS	FP_SB_MAX_E1x

#define TXQ_TO_FP(txq_index)	((txq_index) % MAX_TXQS_PER_COS)
#define TXQ_TO_COS(txq_index)	((txq_index) / MAX_TXQS_PER_COS)

/* rules for calculating the cids of tx-only connections */
#define CID_TO_FP(cid)		((cid) % MAX_TXQS_PER_COS)
#define CID_COS_TO_TX_ONLY_CID(cid, cos)	(cid + cos * MAX_TXQS_PER_COS)

/* fp index inside class of service range */
#define FP_COS_TO_TXQ(fp, cos)    ((fp)->index + cos * MAX_TXQS_PER_COS)

/*
 * 0..15 eth cos0
 * 16..31 eth cos1 if applicable
 * 32..47 eth cos2 If applicable
 * fcoe queue follows eth queues (16, 32, 48 depending on cos)
 */
#define MAX_ETH_TXQ_IDX(bp)	(MAX_TXQS_PER_COS * (bp)->max_cos)
#define FCOE_TXQ_IDX(bp)	(MAX_ETH_TXQ_IDX(bp))


/* fast path */
struct sw_rx_bd {
struct sw_rx_bd {
	struct sk_buff	*skb;
	struct sk_buff	*skb;
	DEFINE_DMA_UNMAP_ADDR(mapping);
	DEFINE_DMA_UNMAP_ADDR(mapping);
@@ -388,6 +416,29 @@ struct bnx2x_agg_info {
#define Q_STATS_OFFSET32(stat_name) \
#define Q_STATS_OFFSET32(stat_name) \
			(offsetof(struct bnx2x_eth_q_stats, stat_name) / 4)
			(offsetof(struct bnx2x_eth_q_stats, stat_name) / 4)


struct bnx2x_fp_txdata {

	struct sw_tx_bd		*tx_buf_ring;

	union eth_tx_bd_types	*tx_desc_ring;
	dma_addr_t		tx_desc_mapping;

	u32			cid;

	union db_prod		tx_db;

	u16			tx_pkt_prod;
	u16			tx_pkt_cons;
	u16			tx_bd_prod;
	u16			tx_bd_cons;

	unsigned long		tx_pkt;

	__le16			*tx_cons_sb;

	int			txq_index;
};

struct bnx2x_fastpath {
struct bnx2x_fastpath {
	struct bnx2x		*bp; /* parent */
	struct bnx2x		*bp; /* parent */


@@ -404,10 +455,8 @@ struct bnx2x_fastpath {


	dma_addr_t		status_blk_mapping;
	dma_addr_t		status_blk_mapping;


	struct sw_tx_bd		*tx_buf_ring;
	u8			max_cos; /* actual number of active tx coses */

	struct bnx2x_fp_txdata	txdata[BNX2X_MULTI_TX_COS];
	union eth_tx_bd_types	*tx_desc_ring;
	dma_addr_t		tx_desc_mapping;


	struct sw_rx_bd		*rx_buf_ring;	/* BDs mappings ring */
	struct sw_rx_bd		*rx_buf_ring;	/* BDs mappings ring */
	struct sw_rx_page	*rx_page_ring;	/* SGE pages mappings ring */
	struct sw_rx_page	*rx_page_ring;	/* SGE pages mappings ring */
@@ -426,20 +475,13 @@ struct bnx2x_fastpath {


	u32			cid;
	u32			cid;


	__le16			fp_hc_idx;

	u8			index;		/* number in fp array */
	u8			index;		/* number in fp array */
	u8			cl_id;		/* eth client id */
	u8			cl_id;		/* eth client id */
	u8			cl_qzone_id;
	u8			cl_qzone_id;
	u8			fw_sb_id;	/* status block number in FW */
	u8			fw_sb_id;	/* status block number in FW */
	u8			igu_sb_id;	/* status block number in HW */
	u8			igu_sb_id;	/* status block number in HW */
	union db_prod		tx_db;

	u16			tx_pkt_prod;
	u16			tx_pkt_cons;
	u16			tx_bd_prod;
	u16			tx_bd_cons;
	__le16			*tx_cons_sb;

	__le16			fp_hc_idx;


	u16			rx_bd_prod;
	u16			rx_bd_prod;
	u16			rx_bd_cons;
	u16			rx_bd_cons;
@@ -449,8 +491,7 @@ struct bnx2x_fastpath {
	/* The last maximal completed SGE */
	/* The last maximal completed SGE */
	u16			last_max_sge;
	u16			last_max_sge;
	__le16			*rx_cons_sb;
	__le16			*rx_cons_sb;
	unsigned long		tx_pkt,
	unsigned long		rx_pkt,
				rx_pkt,
				rx_calls;
				rx_calls;


	/* TPA related */
	/* TPA related */
@@ -489,8 +530,12 @@ struct bnx2x_fastpath {
#define FCOE_IDX			BNX2X_NUM_ETH_QUEUES(bp)
#define FCOE_IDX			BNX2X_NUM_ETH_QUEUES(bp)
#define bnx2x_fcoe_fp(bp)		(&bp->fp[FCOE_IDX])
#define bnx2x_fcoe_fp(bp)		(&bp->fp[FCOE_IDX])
#define bnx2x_fcoe(bp, var)		(bnx2x_fcoe_fp(bp)->var)
#define bnx2x_fcoe(bp, var)		(bnx2x_fcoe_fp(bp)->var)
#define bnx2x_fcoe_tx(bp, var)		(bnx2x_fcoe_fp(bp)-> \
						txdata[FIRST_TX_COS_INDEX].var)




#define IS_ETH_FP(fp)			(fp->index < \
					 BNX2X_NUM_ETH_QUEUES(fp->bp))
#ifdef BCM_CNIC
#ifdef BCM_CNIC
#define IS_FCOE_FP(fp)			(fp->index == FCOE_IDX)
#define IS_FCOE_FP(fp)			(fp->index == FCOE_IDX)
#define IS_FCOE_IDX(idx)		((idx) == FCOE_IDX)
#define IS_FCOE_IDX(idx)		((idx) == FCOE_IDX)
@@ -649,18 +694,23 @@ struct bnx2x_fastpath {


#define HC_INDEX_TOE_TX_CQ_CONS		4 /* Formerly Cstorm TOE CQ index   */
#define HC_INDEX_TOE_TX_CQ_CONS		4 /* Formerly Cstorm TOE CQ index   */
					  /* (HC_INDEX_C_TOE_TX_CQ_CONS)    */
					  /* (HC_INDEX_C_TOE_TX_CQ_CONS)    */
#define HC_INDEX_ETH_TX_CQ_CONS		5 /* Formerly Cstorm ETH CQ index   */
#define HC_INDEX_ETH_TX_CQ_CONS_COS0	5 /* Formerly Cstorm ETH CQ index   */
					  /* (HC_INDEX_C_ETH_TX_CQ_CONS)    */
#define HC_INDEX_ETH_TX_CQ_CONS_COS1	6 /* Formerly Cstorm ETH CQ index   */
					  /* (HC_INDEX_C_ETH_TX_CQ_CONS)    */
#define HC_INDEX_ETH_TX_CQ_CONS_COS2	7 /* Formerly Cstorm ETH CQ index   */
					  /* (HC_INDEX_C_ETH_TX_CQ_CONS)    */
					  /* (HC_INDEX_C_ETH_TX_CQ_CONS)    */


#define U_SB_ETH_RX_CQ_INDEX		HC_INDEX_ETH_RX_CQ_CONS
#define HC_INDEX_ETH_FIRST_TX_CQ_CONS	HC_INDEX_ETH_TX_CQ_CONS_COS0
#define U_SB_ETH_RX_BD_INDEX		HC_INDEX_ETH_RX_BD_CONS

#define C_SB_ETH_TX_CQ_INDEX		HC_INDEX_ETH_TX_CQ_CONS


#define BNX2X_RX_SB_INDEX \
#define BNX2X_RX_SB_INDEX \
	(&fp->sb_index_values[HC_INDEX_ETH_RX_CQ_CONS])
	(&fp->sb_index_values[HC_INDEX_ETH_RX_CQ_CONS])


#define BNX2X_TX_SB_INDEX \
#define BNX2X_TX_SB_INDEX_BASE BNX2X_TX_SB_INDEX_COS0
	(&fp->sb_index_values[C_SB_ETH_TX_CQ_INDEX])

#define BNX2X_TX_SB_INDEX_COS0 \
	(&fp->sb_index_values[HC_INDEX_ETH_TX_CQ_CONS_COS0])


/* end of fast path */
/* end of fast path */


@@ -845,25 +895,6 @@ extern struct workqueue_struct *bnx2x_wq;
/* fast-path interrupt contexts E2 */
/* fast-path interrupt contexts E2 */
#define FP_SB_MAX_E2		HC_SB_MAX_SB_E2
#define FP_SB_MAX_E2		HC_SB_MAX_SB_E2


/*
 * cid_cnt paramter below refers to the value returned by
 * 'bnx2x_get_l2_cid_count()' routine
 */

/*
 * The number of FP context allocated by the driver == max number of regular
 * L2 queues + 1 for the FCoE L2 queue
 */
#define L2_FP_COUNT(cid_cnt)	((cid_cnt) - FCOE_CONTEXT_USE)

/*
 * The number of FP-SB allocated by the driver == max number of regular L2
 * queues + 1 for the CNIC which also consumes an FP-SB
 */
#define FP_SB_COUNT(cid_cnt)	((cid_cnt) - CNIC_CONTEXT_USE)
#define NUM_IGU_SB_REQUIRED(cid_cnt) \
				(FP_SB_COUNT(cid_cnt) - NONE_ETH_CONTEXT_USE)

union cdu_context {
union cdu_context {
	struct eth_context eth;
	struct eth_context eth;
	char pad[1024];
	char pad[1024];
@@ -871,7 +902,7 @@ union cdu_context {


/* CDU host DB constants */
/* CDU host DB constants */
#define CDU_ILT_PAGE_SZ_HW	3
#define CDU_ILT_PAGE_SZ_HW	3
#define CDU_ILT_PAGE_SZ		(4096 << CDU_ILT_PAGE_SZ_HW) /* 32K */
#define CDU_ILT_PAGE_SZ		(8192 << CDU_ILT_PAGE_SZ_HW) /* 64K */
#define ILT_PAGE_CIDS		(CDU_ILT_PAGE_SZ / sizeof(union cdu_context))
#define ILT_PAGE_CIDS		(CDU_ILT_PAGE_SZ / sizeof(union cdu_context))


#ifdef BCM_CNIC
#ifdef BCM_CNIC
@@ -1048,6 +1079,7 @@ struct bnx2x_fw_stats_data {


/* Public slow path states */
/* Public slow path states */
enum {
enum {
	BNX2X_SP_RTNL_SETUP_TC,
	BNX2X_SP_RTNL_TX_TIMEOUT,
	BNX2X_SP_RTNL_TX_TIMEOUT,
};
};


@@ -1226,6 +1258,10 @@ struct bnx2x {
#define BNX2X_STATE_ERROR		0xf000
#define BNX2X_STATE_ERROR		0xf000


	int			multi_mode;
	int			multi_mode;
#define BNX2X_MAX_PRIORITY		8
#define BNX2X_MAX_ENTRIES_PER_PRI	16
#define BNX2X_MAX_COS			3
#define BNX2X_MAX_TX_COS		2
	int			num_queues;
	int			num_queues;
	int			disable_tpa;
	int			disable_tpa;


@@ -1275,11 +1311,21 @@ struct bnx2x {
	struct bnx2x_ilt	*ilt;
	struct bnx2x_ilt	*ilt;
#define BP_ILT(bp)		((bp)->ilt)
#define BP_ILT(bp)		((bp)->ilt)
#define ILT_MAX_LINES		256
#define ILT_MAX_LINES		256
/*
 * Maximum supported number of RSS queues: number of IGU SBs minus one that goes
 * to CNIC.
 */
#define BNX2X_MAX_RSS_COUNT(bp)	((bp)->igu_sb_cnt - CNIC_PRESENT)


	int			l2_cid_count;
/*
#define L2_ILT_LINES(bp)	(DIV_ROUND_UP((bp)->l2_cid_count, \
 * Maximum CID count that might be required by the bnx2x:
 * Max Tss * Max_Tx_Multi_Cos + CNIC L2 Clients (FCoE and iSCSI related)
 */
#define BNX2X_L2_CID_COUNT(bp)	(MAX_TXQS_PER_COS * BNX2X_MULTI_TX_COS +\
					NON_ETH_CONTEXT_USE + CNIC_PRESENT)
#define L2_ILT_LINES(bp)	(DIV_ROUND_UP(BNX2X_L2_CID_COUNT(bp),\
					ILT_PAGE_CIDS))
					ILT_PAGE_CIDS))
#define BNX2X_DB_SIZE(bp)	((bp)->l2_cid_count * (1 << BNX2X_DB_SHIFT))
#define BNX2X_DB_SIZE(bp)	(BNX2X_L2_CID_COUNT(bp) * (1 << BNX2X_DB_SHIFT))


	int			qm_cid_count;
	int			qm_cid_count;


@@ -1421,16 +1467,24 @@ struct bnx2x {
	u32					dcbx_remote_flags;
	u32					dcbx_remote_flags;
#endif
#endif
	u32					pending_max;
	u32					pending_max;

	/* multiple tx classes of service */
	u8					max_cos;

	/* priority to cos mapping */
	u8					prio_to_cos[8];
};
};


/* Tx queues may be less or equal to Rx queues */
/* Tx queues may be less or equal to Rx queues */
extern int num_queues;
extern int num_queues;
#define BNX2X_NUM_QUEUES(bp)	(bp->num_queues)
#define BNX2X_NUM_QUEUES(bp)	(bp->num_queues)
#define BNX2X_NUM_ETH_QUEUES(bp) (BNX2X_NUM_QUEUES(bp) - NONE_ETH_CONTEXT_USE)
#define BNX2X_NUM_ETH_QUEUES(bp) (BNX2X_NUM_QUEUES(bp) - NON_ETH_CONTEXT_USE)
#define BNX2X_NUM_RX_QUEUES(bp)	BNX2X_NUM_QUEUES(bp)


#define is_multi(bp)		(BNX2X_NUM_QUEUES(bp) > 1)
#define is_multi(bp)		(BNX2X_NUM_QUEUES(bp) > 1)


#define BNX2X_MAX_QUEUES(bp)	(bp->igu_sb_cnt - CNIC_CONTEXT_USE)
#define BNX2X_MAX_QUEUES(bp)	BNX2X_MAX_RSS_COUNT(bp)
/* #define is_eth_multi(bp)	(BNX2X_NUM_ETH_QUEUES(bp) > 1) */


#define RSS_IPV4_CAP_MASK						\
#define RSS_IPV4_CAP_MASK						\
	TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_CAPABILITY
	TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_CAPABILITY
@@ -1465,35 +1519,40 @@ struct bnx2x_func_init_params {
};
};


#define for_each_eth_queue(bp, var) \
#define for_each_eth_queue(bp, var) \
	for (var = 0; var < BNX2X_NUM_ETH_QUEUES(bp); var++)
	for ((var) = 0; (var) < BNX2X_NUM_ETH_QUEUES(bp); (var)++)


#define for_each_nondefault_eth_queue(bp, var) \
#define for_each_nondefault_eth_queue(bp, var) \
	for (var = 1; var < BNX2X_NUM_ETH_QUEUES(bp); var++)
	for ((var) = 1; (var) < BNX2X_NUM_ETH_QUEUES(bp); (var)++)


#define for_each_queue(bp, var) \
#define for_each_queue(bp, var) \
	for (var = 0; var < BNX2X_NUM_QUEUES(bp); var++) \
	for ((var) = 0; (var) < BNX2X_NUM_QUEUES(bp); (var)++) \
		if (skip_queue(bp, var))	\
		if (skip_queue(bp, var))	\
			continue;		\
			continue;		\
		else
		else


/* Skip forwarding FP */
#define for_each_rx_queue(bp, var) \
#define for_each_rx_queue(bp, var) \
	for (var = 0; var < BNX2X_NUM_QUEUES(bp); var++) \
	for ((var) = 0; (var) < BNX2X_NUM_QUEUES(bp); (var)++) \
		if (skip_rx_queue(bp, var))	\
		if (skip_rx_queue(bp, var))	\
			continue;		\
			continue;		\
		else
		else


/* Skip OOO FP */
#define for_each_tx_queue(bp, var) \
#define for_each_tx_queue(bp, var) \
	for (var = 0; var < BNX2X_NUM_QUEUES(bp); var++) \
	for ((var) = 0; (var) < BNX2X_NUM_QUEUES(bp); (var)++) \
		if (skip_tx_queue(bp, var))	\
		if (skip_tx_queue(bp, var))	\
			continue;		\
			continue;		\
		else
		else


#define for_each_nondefault_queue(bp, var) \
#define for_each_nondefault_queue(bp, var) \
	for (var = 1; var < BNX2X_NUM_QUEUES(bp); var++) \
	for ((var) = 1; (var) < BNX2X_NUM_QUEUES(bp); (var)++) \
		if (skip_queue(bp, var))	\
		if (skip_queue(bp, var))	\
			continue;		\
			continue;		\
		else
		else


#define for_each_cos_in_tx_queue(fp, var) \
	for ((var) = 0; (var) < (fp)->max_cos; (var)++)

/* skip rx queue
/* skip rx queue
 * if FCOE l2 support is disabled and this is the fcoe L2 queue
 * if FCOE l2 support is disabled and this is the fcoe L2 queue
 */
 */
+271 −99

File changed.

Preview size limit exceeded, changes collapsed.

+73 −36
Original line number Original line Diff line number Diff line
@@ -439,6 +439,9 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode);
/* hard_xmit callback */
/* hard_xmit callback */
netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev);
netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev);


/* setup_tc callback */
int bnx2x_setup_tc(struct net_device *dev, u8 num_tc);

/* select_queue callback */
/* select_queue callback */
u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb);
u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb);


@@ -454,7 +457,7 @@ void bnx2x_update_rx_prod(struct bnx2x *bp, struct bnx2x_fastpath *fp,
			u16 bd_prod, u16 rx_comp_prod, u16 rx_sge_prod);
			u16 bd_prod, u16 rx_comp_prod, u16 rx_sge_prod);


/* NAPI poll Tx part */
/* NAPI poll Tx part */
int bnx2x_tx_int(struct bnx2x_fastpath *fp);
int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata);


/* suspend/resume callbacks */
/* suspend/resume callbacks */
int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state);
int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state);
@@ -715,21 +718,22 @@ static inline u16 bnx2x_ack_int(struct bnx2x *bp)
		return bnx2x_igu_ack_int(bp);
		return bnx2x_igu_ack_int(bp);
}
}


static inline int bnx2x_has_tx_work_unload(struct bnx2x_fastpath *fp)
static inline int bnx2x_has_tx_work_unload(struct bnx2x_fp_txdata *txdata)
{
{
	/* Tell compiler that consumer and producer can change */
	/* Tell compiler that consumer and producer can change */
	barrier();
	barrier();
	return fp->tx_pkt_prod != fp->tx_pkt_cons;
	return txdata->tx_pkt_prod != txdata->tx_pkt_cons;
}
}


static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
static inline u16 bnx2x_tx_avail(struct bnx2x *bp,
				 struct bnx2x_fp_txdata *txdata)
{
{
	s16 used;
	s16 used;
	u16 prod;
	u16 prod;
	u16 cons;
	u16 cons;


	prod = fp->tx_bd_prod;
	prod = txdata->tx_bd_prod;
	cons = fp->tx_bd_cons;
	cons = txdata->tx_bd_cons;


	/* NUM_TX_RINGS = number of "next-page" entries
	/* NUM_TX_RINGS = number of "next-page" entries
	   It will be used as a threshold */
	   It will be used as a threshold */
@@ -737,21 +741,30 @@ static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)


#ifdef BNX2X_STOP_ON_ERROR
#ifdef BNX2X_STOP_ON_ERROR
	WARN_ON(used < 0);
	WARN_ON(used < 0);
	WARN_ON(used > fp->bp->tx_ring_size);
	WARN_ON(used > bp->tx_ring_size);
	WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
	WARN_ON((bp->tx_ring_size - used) > MAX_TX_AVAIL);
#endif
#endif


	return (s16)(fp->bp->tx_ring_size) - used;
	return (s16)(bp->tx_ring_size) - used;
}
}


static inline int bnx2x_has_tx_work(struct bnx2x_fastpath *fp)
static inline int bnx2x_tx_queue_has_work(struct bnx2x_fp_txdata *txdata)
{
{
	u16 hw_cons;
	u16 hw_cons;


	/* Tell compiler that status block fields can change */
	/* Tell compiler that status block fields can change */
	barrier();
	barrier();
	hw_cons = le16_to_cpu(*fp->tx_cons_sb);
	hw_cons = le16_to_cpu(*txdata->tx_cons_sb);
	return hw_cons != fp->tx_pkt_cons;
	return hw_cons != txdata->tx_pkt_cons;
}

static inline bool bnx2x_has_tx_work(struct bnx2x_fastpath *fp)
{
	u8 cos;
	for_each_cos_in_tx_queue(fp, cos)
		if (bnx2x_tx_queue_has_work(&fp->txdata[cos]))
			return true;
	return false;
}
}


static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp)
static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp)
@@ -963,7 +976,10 @@ static inline int bnx2x_func_start(struct bnx2x *bp)
	/* Function parameters */
	/* Function parameters */
	start_params->mf_mode = bp->mf_mode;
	start_params->mf_mode = bp->mf_mode;
	start_params->sd_vlan_tag = bp->mf_ov;
	start_params->sd_vlan_tag = bp->mf_ov;
	if (CHIP_IS_E1x(bp))
		start_params->network_cos_mode = OVERRIDE_COS;
		start_params->network_cos_mode = OVERRIDE_COS;
	else
		start_params->network_cos_mode = STATIC_COS;


	return bnx2x_func_state_change(bp, &func_params);
	return bnx2x_func_state_change(bp, &func_params);
}
}
@@ -1023,39 +1039,41 @@ static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
	}
	}
}
}


static inline void bnx2x_init_tx_ring_one(struct bnx2x_fastpath *fp)
static inline void bnx2x_init_tx_ring_one(struct bnx2x_fp_txdata *txdata)
{
{
	int i;
	int i;


	for (i = 1; i <= NUM_TX_RINGS; i++) {
	for (i = 1; i <= NUM_TX_RINGS; i++) {
		struct eth_tx_next_bd *tx_next_bd =
		struct eth_tx_next_bd *tx_next_bd =
			&fp->tx_desc_ring[TX_DESC_CNT * i - 1].next_bd;
			&txdata->tx_desc_ring[TX_DESC_CNT * i - 1].next_bd;


		tx_next_bd->addr_hi =
		tx_next_bd->addr_hi =
			cpu_to_le32(U64_HI(fp->tx_desc_mapping +
			cpu_to_le32(U64_HI(txdata->tx_desc_mapping +
				    BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
				    BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
		tx_next_bd->addr_lo =
		tx_next_bd->addr_lo =
			cpu_to_le32(U64_LO(fp->tx_desc_mapping +
			cpu_to_le32(U64_LO(txdata->tx_desc_mapping +
				    BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
				    BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
	}
	}


	SET_FLAG(fp->tx_db.data.header.header, DOORBELL_HDR_DB_TYPE, 1);
	SET_FLAG(txdata->tx_db.data.header.header, DOORBELL_HDR_DB_TYPE, 1);
	fp->tx_db.data.zero_fill1 = 0;
	txdata->tx_db.data.zero_fill1 = 0;
	fp->tx_db.data.prod = 0;
	txdata->tx_db.data.prod = 0;


	fp->tx_pkt_prod = 0;
	txdata->tx_pkt_prod = 0;
	fp->tx_pkt_cons = 0;
	txdata->tx_pkt_cons = 0;
	fp->tx_bd_prod = 0;
	txdata->tx_bd_prod = 0;
	fp->tx_bd_cons = 0;
	txdata->tx_bd_cons = 0;
	fp->tx_pkt = 0;
	txdata->tx_pkt = 0;
}
}


static inline void bnx2x_init_tx_rings(struct bnx2x *bp)
static inline void bnx2x_init_tx_rings(struct bnx2x *bp)
{
{
	int i;
	int i;
	u8 cos;


	for_each_tx_queue(bp, i)
	for_each_tx_queue(bp, i)
		bnx2x_init_tx_ring_one(&bp->fp[i]);
		for_each_cos_in_tx_queue(&bp->fp[i], cos)
			bnx2x_init_tx_ring_one(&bp->fp[i].txdata[cos]);
}
}


static inline void bnx2x_set_next_page_rx_bd(struct bnx2x_fastpath *fp)
static inline void bnx2x_set_next_page_rx_bd(struct bnx2x_fastpath *fp)
@@ -1257,12 +1275,23 @@ static inline u32 bnx2x_rx_ustorm_prods_offset(struct bnx2x_fastpath *fp)
		return USTORM_RX_PRODS_E1X_OFFSET(BP_PORT(bp), fp->cl_id);
		return USTORM_RX_PRODS_E1X_OFFSET(BP_PORT(bp), fp->cl_id);
}
}


static inline void bnx2x_init_txdata(struct bnx2x *bp,
	struct bnx2x_fp_txdata *txdata, u32 cid, int txq_index,
	__le16 *tx_cons_sb)
{
	txdata->cid = cid;
	txdata->txq_index = txq_index;
	txdata->tx_cons_sb = tx_cons_sb;

	DP(BNX2X_MSG_SP, "created tx data cid %d, txq %d",
	   txdata->cid, txdata->txq_index);
}


#ifdef BCM_CNIC
#ifdef BCM_CNIC
static inline u8 bnx2x_cnic_eth_cl_id(struct bnx2x *bp, u8 cl_idx)
static inline u8 bnx2x_cnic_eth_cl_id(struct bnx2x *bp, u8 cl_idx)
{
{
	return bp->cnic_base_cl_id + cl_idx +
	return bp->cnic_base_cl_id + cl_idx +
		(bp->pf_num >> 1) * NONE_ETH_CONTEXT_USE;
		(bp->pf_num >> 1) * NON_ETH_CONTEXT_USE;
}
}


static inline u8 bnx2x_cnic_fw_sb_id(struct bnx2x *bp)
static inline u8 bnx2x_cnic_fw_sb_id(struct bnx2x *bp)
@@ -1293,10 +1322,13 @@ static inline void bnx2x_init_fcoe_fp(struct bnx2x *bp)
	bnx2x_fcoe(bp, cid) = BNX2X_FCOE_ETH_CID;
	bnx2x_fcoe(bp, cid) = BNX2X_FCOE_ETH_CID;
	bnx2x_fcoe(bp, fw_sb_id) = DEF_SB_ID;
	bnx2x_fcoe(bp, fw_sb_id) = DEF_SB_ID;
	bnx2x_fcoe(bp, igu_sb_id) = bp->igu_dsb_id;
	bnx2x_fcoe(bp, igu_sb_id) = bp->igu_dsb_id;
	bnx2x_fcoe(bp, bp) = bp;
	bnx2x_fcoe(bp, index) = FCOE_IDX;
	bnx2x_fcoe(bp, rx_cons_sb) = BNX2X_FCOE_L2_RX_INDEX;
	bnx2x_fcoe(bp, rx_cons_sb) = BNX2X_FCOE_L2_RX_INDEX;
	bnx2x_fcoe(bp, tx_cons_sb) = BNX2X_FCOE_L2_TX_INDEX;

	bnx2x_init_txdata(bp, &bnx2x_fcoe(bp, txdata[0]),
			  fp->cid, FCOE_TXQ_IDX(bp), BNX2X_FCOE_L2_TX_INDEX);

	DP(BNX2X_MSG_SP, "created fcoe tx data (fp index %d)", fp->index);

	/* qZone id equals to FW (per path) client id */
	/* qZone id equals to FW (per path) client id */
	bnx2x_fcoe(bp, cl_qzone_id) = bnx2x_fp_qzone_id(fp);
	bnx2x_fcoe(bp, cl_qzone_id) = bnx2x_fp_qzone_id(fp);
	/* init shortcut */
	/* init shortcut */
@@ -1306,9 +1338,13 @@ static inline void bnx2x_init_fcoe_fp(struct bnx2x *bp)
	/* Configure Queue State object */
	/* Configure Queue State object */
	__set_bit(BNX2X_Q_TYPE_HAS_RX, &q_type);
	__set_bit(BNX2X_Q_TYPE_HAS_RX, &q_type);
	__set_bit(BNX2X_Q_TYPE_HAS_TX, &q_type);
	__set_bit(BNX2X_Q_TYPE_HAS_TX, &q_type);
	bnx2x_init_queue_obj(bp, &fp->q_obj, fp->cl_id, fp->cid, BP_FUNC(bp),

		bnx2x_sp(bp, q_rdata), bnx2x_sp_mapping(bp, q_rdata),
	/* No multi-CoS for FCoE L2 client */
			      q_type);
	BUG_ON(fp->max_cos != 1);

	bnx2x_init_queue_obj(bp, &fp->q_obj, fp->cl_id, &fp->cid, 1,
			     BP_FUNC(bp), bnx2x_sp(bp, q_rdata),
			     bnx2x_sp_mapping(bp, q_rdata), q_type);


	DP(NETIF_MSG_IFUP, "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d fw_sb %d "
	DP(NETIF_MSG_IFUP, "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d fw_sb %d "
			   "igu_sb %d\n",
			   "igu_sb %d\n",
@@ -1318,15 +1354,16 @@ static inline void bnx2x_init_fcoe_fp(struct bnx2x *bp)
#endif
#endif


static inline int bnx2x_clean_tx_queue(struct bnx2x *bp,
static inline int bnx2x_clean_tx_queue(struct bnx2x *bp,
				       struct bnx2x_fastpath *fp)
				       struct bnx2x_fp_txdata *txdata)
{
{
	int cnt = 1000;
	int cnt = 1000;


	while (bnx2x_has_tx_work_unload(fp)) {
	while (bnx2x_has_tx_work_unload(txdata)) {
		if (!cnt) {
		if (!cnt) {
			BNX2X_ERR("timeout waiting for queue[%d]: "
			BNX2X_ERR("timeout waiting for queue[%d]: "
				 "fp->tx_pkt_prod(%d) != fp->tx_pkt_cons(%d)\n",
				 "txdata->tx_pkt_prod(%d) != txdata->tx_pkt_cons(%d)\n",
				  fp->index, fp->tx_pkt_prod, fp->tx_pkt_cons);
				  txdata->txq_index, txdata->tx_pkt_prod,
				  txdata->tx_pkt_cons);
#ifdef BNX2X_STOP_ON_ERROR
#ifdef BNX2X_STOP_ON_ERROR
			bnx2x_panic();
			bnx2x_panic();
			return -EBUSY;
			return -EBUSY;
+25 −0
Original line number Original line Diff line number Diff line
@@ -653,6 +653,26 @@ static inline void bnx2x_update_drv_flags(struct bnx2x *bp, u32 flags, u32 set)
	}
	}
}
}


static inline void bnx2x_dcbx_update_tc_mapping(struct bnx2x *bp)
{
	u8 prio, cos;
	for (cos = 0; cos < bp->dcbx_port_params.ets.num_of_cos; cos++) {
		for (prio = 0; prio < BNX2X_MAX_PRIORITY; prio++) {
			if (bp->dcbx_port_params.ets.cos_params[cos].pri_bitmask
			    & (1 << prio)) {
				bp->prio_to_cos[prio] = cos;
			}
		}
	}

	/* setup tc must be called under rtnl lock, but we can't take it here
	 * as we are handling an attetntion on a work queue which must be
	 * flushed at some rtnl-locked contexts (e.g. if down)
	 */
	if (!test_and_set_bit(BNX2X_SP_RTNL_SETUP_TC, &bp->sp_rtnl_state))
		schedule_delayed_work(&bp->sp_rtnl_task, 0);
}

void bnx2x_dcbx_set_params(struct bnx2x *bp, u32 state)
void bnx2x_dcbx_set_params(struct bnx2x *bp, u32 state)
{
{
	switch (state) {
	switch (state) {
@@ -690,6 +710,11 @@ void bnx2x_dcbx_set_params(struct bnx2x *bp, u32 state)
#endif
#endif
			bnx2x_dcbx_stop_hw_tx(bp);
			bnx2x_dcbx_stop_hw_tx(bp);


			/* reconfigure the netdevice with the results of the new
			 * dcbx negotiation.
			 */
			bnx2x_dcbx_update_tc_mapping(bp);

			return;
			return;
		}
		}
	case BNX2X_DCBX_STATE_TX_PAUSED:
	case BNX2X_DCBX_STATE_TX_PAUSED:
+14 −13
Original line number Original line Diff line number Diff line
@@ -1616,6 +1616,7 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode)
	unsigned char *packet;
	unsigned char *packet;
	struct bnx2x_fastpath *fp_rx = &bp->fp[0];
	struct bnx2x_fastpath *fp_rx = &bp->fp[0];
	struct bnx2x_fastpath *fp_tx = &bp->fp[0];
	struct bnx2x_fastpath *fp_tx = &bp->fp[0];
	struct bnx2x_fp_txdata *txdata = &fp_tx->txdata[0];
	u16 tx_start_idx, tx_idx;
	u16 tx_start_idx, tx_idx;
	u16 rx_start_idx, rx_idx;
	u16 rx_start_idx, rx_idx;
	u16 pkt_prod, bd_prod, rx_comp_cons;
	u16 pkt_prod, bd_prod, rx_comp_cons;
@@ -1670,17 +1671,17 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode)


	/* send the loopback packet */
	/* send the loopback packet */
	num_pkts = 0;
	num_pkts = 0;
	tx_start_idx = le16_to_cpu(*fp_tx->tx_cons_sb);
	tx_start_idx = le16_to_cpu(*txdata->tx_cons_sb);
	rx_start_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
	rx_start_idx = le16_to_cpu(*fp_rx->rx_cons_sb);


	pkt_prod = fp_tx->tx_pkt_prod++;
	pkt_prod = txdata->tx_pkt_prod++;
	tx_buf = &fp_tx->tx_buf_ring[TX_BD(pkt_prod)];
	tx_buf = &txdata->tx_buf_ring[TX_BD(pkt_prod)];
	tx_buf->first_bd = fp_tx->tx_bd_prod;
	tx_buf->first_bd = txdata->tx_bd_prod;
	tx_buf->skb = skb;
	tx_buf->skb = skb;
	tx_buf->flags = 0;
	tx_buf->flags = 0;


	bd_prod = TX_BD(fp_tx->tx_bd_prod);
	bd_prod = TX_BD(txdata->tx_bd_prod);
	tx_start_bd = &fp_tx->tx_desc_ring[bd_prod].start_bd;
	tx_start_bd = &txdata->tx_desc_ring[bd_prod].start_bd;
	tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
	tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
	tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
	tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
	tx_start_bd->nbd = cpu_to_le16(2); /* start + pbd */
	tx_start_bd->nbd = cpu_to_le16(2); /* start + pbd */
@@ -1697,27 +1698,27 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode)
	/* turn on parsing and get a BD */
	/* turn on parsing and get a BD */
	bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
	bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));


	pbd_e1x = &fp_tx->tx_desc_ring[bd_prod].parse_bd_e1x;
	pbd_e1x = &txdata->tx_desc_ring[bd_prod].parse_bd_e1x;
	pbd_e2 = &fp_tx->tx_desc_ring[bd_prod].parse_bd_e2;
	pbd_e2 = &txdata->tx_desc_ring[bd_prod].parse_bd_e2;


	memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2));
	memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2));
	memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x));
	memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x));


	wmb();
	wmb();


	fp_tx->tx_db.data.prod += 2;
	txdata->tx_db.data.prod += 2;
	barrier();
	barrier();
	DOORBELL(bp, fp_tx->index, fp_tx->tx_db.raw);
	DOORBELL(bp, txdata->cid, txdata->tx_db.raw);


	mmiowb();
	mmiowb();
	barrier();
	barrier();


	num_pkts++;
	num_pkts++;
	fp_tx->tx_bd_prod += 2; /* start + pbd */
	txdata->tx_bd_prod += 2; /* start + pbd */


	udelay(100);
	udelay(100);


	tx_idx = le16_to_cpu(*fp_tx->tx_cons_sb);
	tx_idx = le16_to_cpu(*txdata->tx_cons_sb);
	if (tx_idx != tx_start_idx + num_pkts)
	if (tx_idx != tx_start_idx + num_pkts)
		goto test_loopback_exit;
		goto test_loopback_exit;


@@ -1731,7 +1732,7 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode)
		 * bnx2x_tx_int()), as both are taking netif_tx_lock().
		 * bnx2x_tx_int()), as both are taking netif_tx_lock().
		 */
		 */
		local_bh_disable();
		local_bh_disable();
		bnx2x_tx_int(fp_tx);
		bnx2x_tx_int(bp, txdata);
		local_bh_enable();
		local_bh_enable();
	}
	}


Loading