Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit a31eb63a authored by David S. Miller's avatar David S. Miller
Browse files

Merge branch 'thunderx-next'



Sunil Goutham says:

====================
net: thunderx: Support for newer chips and miscellaneous patches

This patch series adds support for VNIC on 81xx and 83xx SOCs.
81xx/83xx is different from 88xx in terms of capabilities and new type
of interfaces supported (eg: QSGMII, RGMII) and have DLMs instead of
QLMs which allows single BGX to have interfaces of different LMAC types.

Also included some patches which are common for all 88xx/81xx/83xx
SOCs like using netdev's name while registering irqs, reset receive
queue stats and some changes to use standard API for split buffer Rx
packets, generating RSS key e.t.c

PS: Most of the patches were submitted earlier under different series but
for some reason were not picked up by patchwork. Since new patches have been
added in the meantime, resubmitting all as a new patchset.

Changes from v1:
- Incorporated Yuval Mintz's suggestion to use generic API to set minimum
  queue count i.e by using netif_get_num_default_rss_queues().
- Resolved a compilation issue reported by test robot while compiling
  patch 'Add support for 16 LMACs of 83xx'
====================

Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 2ce66f9c 93db2cf8
Loading
Loading
Loading
Loading
+10 −0
Original line number Original line Diff line number Diff line
@@ -36,10 +36,20 @@ config THUNDER_NIC_BGX
	depends on 64BIT
	depends on 64BIT
	select PHYLIB
	select PHYLIB
	select MDIO_THUNDER
	select MDIO_THUNDER
	select THUNDER_NIC_RGX
	---help---
	---help---
	  This driver supports programming and controlling of MAC
	  This driver supports programming and controlling of MAC
	  interface from NIC physical function driver.
	  interface from NIC physical function driver.


config	THUNDER_NIC_RGX
	tristate "Thunder MAC interface driver (RGX)"
	depends on 64BIT
	select PHYLIB
	select MDIO_THUNDER
	---help---
	  This driver supports configuring XCV block of RGX interface
	  present on CN81XX chip.

config LIQUIDIO
config LIQUIDIO
	tristate "Cavium LiquidIO support"
	tristate "Cavium LiquidIO support"
	depends on 64BIT
	depends on 64BIT
+1 −0
Original line number Original line Diff line number Diff line
@@ -2,6 +2,7 @@
# Makefile for Cavium's Thunder ethernet device
# Makefile for Cavium's Thunder ethernet device
#
#


obj-$(CONFIG_THUNDER_NIC_RGX) += thunder_xcv.o
obj-$(CONFIG_THUNDER_NIC_BGX) += thunder_bgx.o
obj-$(CONFIG_THUNDER_NIC_BGX) += thunder_bgx.o
obj-$(CONFIG_THUNDER_NIC_PF) += nicpf.o
obj-$(CONFIG_THUNDER_NIC_PF) += nicpf.o
obj-$(CONFIG_THUNDER_NIC_VF) += nicvf.o
obj-$(CONFIG_THUNDER_NIC_VF) += nicvf.o
+49 −36
Original line number Original line Diff line number Diff line
@@ -20,6 +20,17 @@
#define	PCI_DEVICE_ID_THUNDER_NIC_VF		0xA034
#define	PCI_DEVICE_ID_THUNDER_NIC_VF		0xA034
#define	PCI_DEVICE_ID_THUNDER_BGX		0xA026
#define	PCI_DEVICE_ID_THUNDER_BGX		0xA026


/* Subsystem device IDs */
#define PCI_SUBSYS_DEVID_88XX_NIC_PF		0xA11E
#define PCI_SUBSYS_DEVID_81XX_NIC_PF		0xA21E
#define PCI_SUBSYS_DEVID_83XX_NIC_PF		0xA31E

#define PCI_SUBSYS_DEVID_88XX_PASS1_NIC_VF	0xA11E
#define PCI_SUBSYS_DEVID_88XX_NIC_VF		0xA134
#define PCI_SUBSYS_DEVID_81XX_NIC_VF		0xA234
#define PCI_SUBSYS_DEVID_83XX_NIC_VF		0xA334


/* PCI BAR nos */
/* PCI BAR nos */
#define	PCI_CFG_REG_BAR_NUM		0
#define	PCI_CFG_REG_BAR_NUM		0
#define	PCI_MSIX_REG_BAR_NUM		4
#define	PCI_MSIX_REG_BAR_NUM		4
@@ -41,40 +52,8 @@
/* Max pkinds */
/* Max pkinds */
#define	NIC_MAX_PKIND			16
#define	NIC_MAX_PKIND			16


/* Rx Channels */
/* Max when CPI_ALG is IP diffserv */
/* Receive channel configuration in TNS bypass mode
#define	NIC_MAX_CPI_PER_LMAC		64
 * Below is configuration in TNS bypass mode
 * BGX0-LMAC0-CHAN0 - VNIC CHAN0
 * BGX0-LMAC1-CHAN0 - VNIC CHAN16
 * ...
 * BGX1-LMAC0-CHAN0 - VNIC CHAN128
 * ...
 * BGX1-LMAC3-CHAN0 - VNIC CHAN174
 */
#define	NIC_INTF_COUNT			2  /* Interfaces btw VNIC and TNS/BGX */
#define	NIC_CHANS_PER_INF		128
#define	NIC_MAX_CHANS			(NIC_INTF_COUNT * NIC_CHANS_PER_INF)
#define	NIC_CPI_COUNT			2048 /* No of channel parse indices */

/* TNS bypass mode: 1-1 mapping between VNIC and BGX:LMAC */
#define NIC_MAX_BGX			MAX_BGX_PER_CN88XX
#define	NIC_CPI_PER_BGX			(NIC_CPI_COUNT / NIC_MAX_BGX)
#define	NIC_MAX_CPI_PER_LMAC		64 /* Max when CPI_ALG is IP diffserv */
#define	NIC_RSSI_PER_BGX		(NIC_RSSI_COUNT / NIC_MAX_BGX)

/* Tx scheduling */
#define	NIC_MAX_TL4			1024
#define	NIC_MAX_TL4_SHAPERS		256 /* 1 shaper for 4 TL4s */
#define	NIC_MAX_TL3			256
#define	NIC_MAX_TL3_SHAPERS		64  /* 1 shaper for 4 TL3s */
#define	NIC_MAX_TL2			64
#define	NIC_MAX_TL2_SHAPERS		2  /* 1 shaper for 32 TL2s */
#define	NIC_MAX_TL1			2

/* TNS bypass mode */
#define	NIC_TL2_PER_BGX			32
#define	NIC_TL4_PER_BGX			(NIC_MAX_TL4 / NIC_MAX_BGX)
#define	NIC_TL4_PER_LMAC		(NIC_MAX_TL4 / NIC_CHANS_PER_INF)


/* NIC VF Interrupts */
/* NIC VF Interrupts */
#define	NICVF_INTR_CQ			0
#define	NICVF_INTR_CQ			0
@@ -148,7 +127,6 @@ struct nicvf_cq_poll {
	struct	napi_struct napi;
	struct	napi_struct napi;
};
};


#define	NIC_RSSI_COUNT			4096 /* Total no of RSS indices */
#define NIC_MAX_RSS_HASH_BITS		8
#define NIC_MAX_RSS_HASH_BITS		8
#define NIC_MAX_RSS_IDR_TBL_SIZE	(1 << NIC_MAX_RSS_HASH_BITS)
#define NIC_MAX_RSS_IDR_TBL_SIZE	(1 << NIC_MAX_RSS_HASH_BITS)
#define RSS_HASH_KEY_SIZE		5 /* 320 bit key */
#define RSS_HASH_KEY_SIZE		5 /* 320 bit key */
@@ -273,6 +251,7 @@ struct nicvf {
	struct net_device	*netdev;
	struct net_device	*netdev;
	struct pci_dev		*pdev;
	struct pci_dev		*pdev;
	void __iomem		*reg_base;
	void __iomem		*reg_base;
#define	MAX_QUEUES_PER_QSET			8
	struct queue_set	*qs;
	struct queue_set	*qs;
	struct nicvf_cq_poll	*napi[8];
	struct nicvf_cq_poll	*napi[8];
	u8			vf_id;
	u8			vf_id;
@@ -368,6 +347,7 @@ struct nicvf {
#define	NIC_MBOX_MSG_PNICVF_PTR		0x14	/* Get primary qset nicvf ptr */
#define	NIC_MBOX_MSG_PNICVF_PTR		0x14	/* Get primary qset nicvf ptr */
#define	NIC_MBOX_MSG_SNICVF_PTR		0x15	/* Send sqet nicvf ptr to PVF */
#define	NIC_MBOX_MSG_SNICVF_PTR		0x15	/* Send sqet nicvf ptr to PVF */
#define	NIC_MBOX_MSG_LOOPBACK		0x16	/* Set interface in loopback */
#define	NIC_MBOX_MSG_LOOPBACK		0x16	/* Set interface in loopback */
#define	NIC_MBOX_MSG_RESET_STAT_COUNTER 0x17	/* Reset statistics counters */
#define	NIC_MBOX_MSG_CFG_DONE		0xF0	/* VF configuration done */
#define	NIC_MBOX_MSG_CFG_DONE		0xF0	/* VF configuration done */
#define	NIC_MBOX_MSG_SHUTDOWN		0xF1	/* VF is being shutdown */
#define	NIC_MBOX_MSG_SHUTDOWN		0xF1	/* VF is being shutdown */


@@ -484,6 +464,31 @@ struct set_loopback {
	bool  enable;
	bool  enable;
};
};


/* Reset statistics counters */
struct reset_stat_cfg {
	u8    msg;
	/* Bitmap to select NIC_PF_VNIC(vf_id)_RX_STAT(0..13) */
	u16   rx_stat_mask;
	/* Bitmap to select NIC_PF_VNIC(vf_id)_TX_STAT(0..4) */
	u8    tx_stat_mask;
	/* Bitmap to select NIC_PF_QS(0..127)_RQ(0..7)_STAT(0..1)
	 * bit14, bit15 NIC_PF_QS(vf_id)_RQ7_STAT(0..1)
	 * bit12, bit13 NIC_PF_QS(vf_id)_RQ6_STAT(0..1)
	 * ..
	 * bit2, bit3 NIC_PF_QS(vf_id)_RQ1_STAT(0..1)
	 * bit0, bit1 NIC_PF_QS(vf_id)_RQ0_STAT(0..1)
	 */
	u16   rq_stat_mask;
	/* Bitmap to select NIC_PF_QS(0..127)_SQ(0..7)_STAT(0..1)
	 * bit14, bit15 NIC_PF_QS(vf_id)_SQ7_STAT(0..1)
	 * bit12, bit13 NIC_PF_QS(vf_id)_SQ6_STAT(0..1)
	 * ..
	 * bit2, bit3 NIC_PF_QS(vf_id)_SQ1_STAT(0..1)
	 * bit0, bit1 NIC_PF_QS(vf_id)_SQ0_STAT(0..1)
	 */
	u16   sq_stat_mask;
};

/* 128 bit shared memory between PF and each VF */
/* 128 bit shared memory between PF and each VF */
union nic_mbx {
union nic_mbx {
	struct { u8 msg; }	msg;
	struct { u8 msg; }	msg;
@@ -501,6 +506,7 @@ union nic_mbx {
	struct sqs_alloc        sqs_alloc;
	struct sqs_alloc        sqs_alloc;
	struct nicvf_ptr	nicvf;
	struct nicvf_ptr	nicvf;
	struct set_loopback	lbk;
	struct set_loopback	lbk;
	struct reset_stat_cfg	reset_stat;
};
};


#define NIC_NODE_ID_MASK	0x03
#define NIC_NODE_ID_MASK	0x03
@@ -514,7 +520,14 @@ static inline int nic_get_node_id(struct pci_dev *pdev)


static inline bool pass1_silicon(struct pci_dev *pdev)
static inline bool pass1_silicon(struct pci_dev *pdev)
{
{
	return pdev->revision < 8;
	return (pdev->revision < 8) &&
		(pdev->subsystem_device == PCI_SUBSYS_DEVID_88XX_NIC_PF);
}

static inline bool pass2_silicon(struct pci_dev *pdev)
{
	return (pdev->revision >= 8) &&
		(pdev->subsystem_device == PCI_SUBSYS_DEVID_88XX_NIC_PF);
}
}


int nicvf_set_real_num_queues(struct net_device *netdev,
int nicvf_set_real_num_queues(struct net_device *netdev,
+347 −86
Original line number Original line Diff line number Diff line
@@ -20,8 +20,25 @@
#define DRV_NAME	"thunder-nic"
#define DRV_NAME	"thunder-nic"
#define DRV_VERSION	"1.0"
#define DRV_VERSION	"1.0"


struct hw_info {
	u8		bgx_cnt;
	u8		chans_per_lmac;
	u8		chans_per_bgx; /* Rx/Tx chans */
	u8		chans_per_rgx;
	u8		chans_per_lbk;
	u16		cpi_cnt;
	u16		rssi_cnt;
	u16		rss_ind_tbl_size;
	u16		tl4_cnt;
	u16		tl3_cnt;
	u8		tl2_cnt;
	u8		tl1_cnt;
	bool		tl1_per_bgx; /* TL1 per BGX or per LMAC */
};

struct nicpf {
struct nicpf {
	struct pci_dev		*pdev;
	struct pci_dev		*pdev;
	struct hw_info          *hw;
	u8			node;
	u8			node;
	unsigned int		flags;
	unsigned int		flags;
	u8			num_vf_en;      /* No of VF enabled */
	u8			num_vf_en;      /* No of VF enabled */
@@ -36,22 +53,22 @@ struct nicpf {
#define	NIC_SET_VF_LMAC_MAP(bgx, lmac)	(((bgx & 0xF) << 4) | (lmac & 0xF))
#define	NIC_SET_VF_LMAC_MAP(bgx, lmac)	(((bgx & 0xF) << 4) | (lmac & 0xF))
#define	NIC_GET_BGX_FROM_VF_LMAC_MAP(map)	((map >> 4) & 0xF)
#define	NIC_GET_BGX_FROM_VF_LMAC_MAP(map)	((map >> 4) & 0xF)
#define	NIC_GET_LMAC_FROM_VF_LMAC_MAP(map)	(map & 0xF)
#define	NIC_GET_LMAC_FROM_VF_LMAC_MAP(map)	(map & 0xF)
	u8			vf_lmac_map[MAX_LMAC];
	u8			*vf_lmac_map;
	struct delayed_work     dwork;
	struct delayed_work     dwork;
	struct workqueue_struct *check_link;
	struct workqueue_struct *check_link;
	u8			link[MAX_LMAC];
	u8			*link;
	u8			duplex[MAX_LMAC];
	u8			*duplex;
	u32			speed[MAX_LMAC];
	u32			*speed;
	u16			cpi_base[MAX_NUM_VFS_SUPPORTED];
	u16			cpi_base[MAX_NUM_VFS_SUPPORTED];
	u16			rssi_base[MAX_NUM_VFS_SUPPORTED];
	u16			rssi_base[MAX_NUM_VFS_SUPPORTED];
	u16			rss_ind_tbl_size;
	bool			mbx_lock[MAX_NUM_VFS_SUPPORTED];
	bool			mbx_lock[MAX_NUM_VFS_SUPPORTED];


	/* MSI-X */
	/* MSI-X */
	bool			msix_enabled;
	bool			msix_enabled;
	u8			num_vec;
	u8			num_vec;
	struct msix_entry	msix_entries[NIC_PF_MSIX_VECTORS];
	struct msix_entry	*msix_entries;
	bool			irq_allocated[NIC_PF_MSIX_VECTORS];
	bool			irq_allocated[NIC_PF_MSIX_VECTORS];
	char			irq_name[NIC_PF_MSIX_VECTORS][20];
};
};


/* Supported devices */
/* Supported devices */
@@ -89,9 +106,22 @@ static u64 nic_reg_read(struct nicpf *nic, u64 offset)
/* PF -> VF mailbox communication APIs */
/* PF -> VF mailbox communication APIs */
static void nic_enable_mbx_intr(struct nicpf *nic)
static void nic_enable_mbx_intr(struct nicpf *nic)
{
{
	/* Enable mailbox interrupt for all 128 VFs */
	int vf_cnt = pci_sriov_get_totalvfs(nic->pdev);
	nic_reg_write(nic, NIC_PF_MAILBOX_ENA_W1S, ~0ull);

	nic_reg_write(nic, NIC_PF_MAILBOX_ENA_W1S + sizeof(u64), ~0ull);
#define INTR_MASK(vfs) ((vfs < 64) ? (BIT_ULL(vfs) - 1) : (~0ull))

	/* Clear it, to avoid spurious interrupts (if any) */
	nic_reg_write(nic, NIC_PF_MAILBOX_INT, INTR_MASK(vf_cnt));

	/* Enable mailbox interrupt for all VFs */
	nic_reg_write(nic, NIC_PF_MAILBOX_ENA_W1S, INTR_MASK(vf_cnt));
	/* One mailbox intr enable reg per 64 VFs */
	if (vf_cnt > 64) {
		nic_reg_write(nic, NIC_PF_MAILBOX_INT + sizeof(u64),
			      INTR_MASK(vf_cnt - 64));
		nic_reg_write(nic, NIC_PF_MAILBOX_ENA_W1S + sizeof(u64),
			      INTR_MASK(vf_cnt - 64));
	}
}
}


static void nic_clear_mbx_intr(struct nicpf *nic, int vf, int mbx_reg)
static void nic_clear_mbx_intr(struct nicpf *nic, int vf, int mbx_reg)
@@ -144,7 +174,7 @@ static void nic_mbx_send_ready(struct nicpf *nic, int vf)


	mbx.nic_cfg.tns_mode = NIC_TNS_BYPASS_MODE;
	mbx.nic_cfg.tns_mode = NIC_TNS_BYPASS_MODE;


	if (vf < MAX_LMAC) {
	if (vf < nic->num_vf_en) {
		bgx_idx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
		bgx_idx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
		lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
		lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);


@@ -155,7 +185,7 @@ static void nic_mbx_send_ready(struct nicpf *nic, int vf)
	mbx.nic_cfg.sqs_mode = (vf >= nic->num_vf_en) ? true : false;
	mbx.nic_cfg.sqs_mode = (vf >= nic->num_vf_en) ? true : false;
	mbx.nic_cfg.node_id = nic->node;
	mbx.nic_cfg.node_id = nic->node;


	mbx.nic_cfg.loopback_supported = vf < MAX_LMAC;
	mbx.nic_cfg.loopback_supported = vf < nic->num_vf_en;


	nic_send_msg_to_vf(nic, vf, &mbx);
	nic_send_msg_to_vf(nic, vf, &mbx);
}
}
@@ -248,14 +278,22 @@ static int nic_update_hw_frs(struct nicpf *nic, int new_frs, int vf)
/* Set minimum transmit packet size */
/* Set minimum transmit packet size */
static void nic_set_tx_pkt_pad(struct nicpf *nic, int size)
static void nic_set_tx_pkt_pad(struct nicpf *nic, int size)
{
{
	int lmac;
	int lmac, max_lmac;
	u16 sdevid;
	u64 lmac_cfg;
	u64 lmac_cfg;


	/* Max value that can be set is 60 */
	/* Max value that can be set is 60 */
	if (size > 60)
	if (size > 60)
		size = 60;
		size = 60;


	for (lmac = 0; lmac < (MAX_BGX_PER_CN88XX * MAX_LMAC_PER_BGX); lmac++) {
	pci_read_config_word(nic->pdev, PCI_SUBSYSTEM_ID, &sdevid);
	/* 81xx's RGX has only one LMAC */
	if (sdevid == PCI_SUBSYS_DEVID_81XX_NIC_PF)
		max_lmac = ((nic->hw->bgx_cnt - 1) * MAX_LMAC_PER_BGX) + 1;
	else
		max_lmac = nic->hw->bgx_cnt * MAX_LMAC_PER_BGX;

	for (lmac = 0; lmac < max_lmac; lmac++) {
		lmac_cfg = nic_reg_read(nic, NIC_PF_LMAC_0_7_CFG | (lmac << 3));
		lmac_cfg = nic_reg_read(nic, NIC_PF_LMAC_0_7_CFG | (lmac << 3));
		lmac_cfg &= ~(0xF << 2);
		lmac_cfg &= ~(0xF << 2);
		lmac_cfg |= ((size / 4) << 2);
		lmac_cfg |= ((size / 4) << 2);
@@ -275,7 +313,7 @@ static void nic_set_lmac_vf_mapping(struct nicpf *nic)


	nic->num_vf_en = 0;
	nic->num_vf_en = 0;


	for (bgx = 0; bgx < NIC_MAX_BGX; bgx++) {
	for (bgx = 0; bgx < nic->hw->bgx_cnt; bgx++) {
		if (!(bgx_map & (1 << bgx)))
		if (!(bgx_map & (1 << bgx)))
			continue;
			continue;
		lmac_cnt = bgx_get_lmac_count(nic->node, bgx);
		lmac_cnt = bgx_get_lmac_count(nic->node, bgx);
@@ -295,28 +333,125 @@ static void nic_set_lmac_vf_mapping(struct nicpf *nic)
			nic_reg_write(nic,
			nic_reg_write(nic,
				      NIC_PF_LMAC_0_7_CREDIT + (lmac * 8),
				      NIC_PF_LMAC_0_7_CREDIT + (lmac * 8),
				      lmac_credit);
				      lmac_credit);

		/* On CN81XX there are only 8 VFs but max possible no of
		 * interfaces are 9.
		 */
		if (nic->num_vf_en >= pci_sriov_get_totalvfs(nic->pdev)) {
			nic->num_vf_en = pci_sriov_get_totalvfs(nic->pdev);
			break;
		}
	}
	}
}
}


static void nic_free_lmacmem(struct nicpf *nic)
{
	kfree(nic->vf_lmac_map);
	kfree(nic->link);
	kfree(nic->duplex);
	kfree(nic->speed);
}

static int nic_get_hw_info(struct nicpf *nic)
{
	u8 max_lmac;
	u16 sdevid;
	struct hw_info *hw = nic->hw;

	pci_read_config_word(nic->pdev, PCI_SUBSYSTEM_ID, &sdevid);

	switch (sdevid) {
	case PCI_SUBSYS_DEVID_88XX_NIC_PF:
		hw->bgx_cnt = MAX_BGX_PER_CN88XX;
		hw->chans_per_lmac = 16;
		hw->chans_per_bgx = 128;
		hw->cpi_cnt = 2048;
		hw->rssi_cnt = 4096;
		hw->rss_ind_tbl_size = NIC_MAX_RSS_IDR_TBL_SIZE;
		hw->tl3_cnt = 256;
		hw->tl2_cnt = 64;
		hw->tl1_cnt = 2;
		hw->tl1_per_bgx = true;
		break;
	case PCI_SUBSYS_DEVID_81XX_NIC_PF:
		hw->bgx_cnt = MAX_BGX_PER_CN81XX;
		hw->chans_per_lmac = 8;
		hw->chans_per_bgx = 32;
		hw->chans_per_rgx = 8;
		hw->chans_per_lbk = 24;
		hw->cpi_cnt = 512;
		hw->rssi_cnt = 256;
		hw->rss_ind_tbl_size = 32; /* Max RSSI / Max interfaces */
		hw->tl3_cnt = 64;
		hw->tl2_cnt = 16;
		hw->tl1_cnt = 10;
		hw->tl1_per_bgx = false;
		break;
	case PCI_SUBSYS_DEVID_83XX_NIC_PF:
		hw->bgx_cnt = MAX_BGX_PER_CN83XX;
		hw->chans_per_lmac = 8;
		hw->chans_per_bgx = 32;
		hw->chans_per_lbk = 64;
		hw->cpi_cnt = 2048;
		hw->rssi_cnt = 1024;
		hw->rss_ind_tbl_size = 64; /* Max RSSI / Max interfaces */
		hw->tl3_cnt = 256;
		hw->tl2_cnt = 64;
		hw->tl1_cnt = 18;
		hw->tl1_per_bgx = false;
		break;
	}
	hw->tl4_cnt = MAX_QUEUES_PER_QSET * pci_sriov_get_totalvfs(nic->pdev);

	/* Allocate memory for LMAC tracking elements */
	max_lmac = hw->bgx_cnt * MAX_LMAC_PER_BGX;
	nic->vf_lmac_map = kmalloc_array(max_lmac, sizeof(u8), GFP_KERNEL);
	if (!nic->vf_lmac_map)
		goto error;
	nic->link = kmalloc_array(max_lmac, sizeof(u8), GFP_KERNEL);
	if (!nic->link)
		goto error;
	nic->duplex = kmalloc_array(max_lmac, sizeof(u8), GFP_KERNEL);
	if (!nic->duplex)
		goto error;
	nic->speed = kmalloc_array(max_lmac, sizeof(u32), GFP_KERNEL);
	if (!nic->speed)
		goto error;
	return 0;

error:
	nic_free_lmacmem(nic);
	return -ENOMEM;
}

#define BGX0_BLOCK 8
#define BGX0_BLOCK 8
#define BGX1_BLOCK 9
#define BGX1_BLOCK 9


static void nic_init_hw(struct nicpf *nic)
static int nic_init_hw(struct nicpf *nic)
{
{
	int i;
	int i, err;
	u64 cqm_cfg;
	u64 cqm_cfg;


	/* Get HW capability info */
	err = nic_get_hw_info(nic);
	if (err)
		return err;

	/* Enable NIC HW block */
	/* Enable NIC HW block */
	nic_reg_write(nic, NIC_PF_CFG, 0x3);
	nic_reg_write(nic, NIC_PF_CFG, 0x3);


	/* Enable backpressure */
	/* Enable backpressure */
	nic_reg_write(nic, NIC_PF_BP_CFG, (1ULL << 6) | 0x03);
	nic_reg_write(nic, NIC_PF_BP_CFG, (1ULL << 6) | 0x03);


	/* TNS and TNS bypass modes are present only on 88xx */
	if (nic->pdev->subsystem_device == PCI_SUBSYS_DEVID_88XX_NIC_PF) {
		/* Disable TNS mode on both interfaces */
		/* Disable TNS mode on both interfaces */
		nic_reg_write(nic, NIC_PF_INTF_0_1_SEND_CFG,
		nic_reg_write(nic, NIC_PF_INTF_0_1_SEND_CFG,
			      (NIC_TNS_BYPASS_MODE << 7) | BGX0_BLOCK);
			      (NIC_TNS_BYPASS_MODE << 7) | BGX0_BLOCK);
		nic_reg_write(nic, NIC_PF_INTF_0_1_SEND_CFG | (1 << 8),
		nic_reg_write(nic, NIC_PF_INTF_0_1_SEND_CFG | (1 << 8),
			      (NIC_TNS_BYPASS_MODE << 7) | BGX1_BLOCK);
			      (NIC_TNS_BYPASS_MODE << 7) | BGX1_BLOCK);
	}

	nic_reg_write(nic, NIC_PF_INTF_0_1_BP_CFG,
	nic_reg_write(nic, NIC_PF_INTF_0_1_BP_CFG,
		      (1ULL << 63) | BGX0_BLOCK);
		      (1ULL << 63) | BGX0_BLOCK);
	nic_reg_write(nic, NIC_PF_INTF_0_1_BP_CFG + (1 << 8),
	nic_reg_write(nic, NIC_PF_INTF_0_1_BP_CFG + (1 << 8),
@@ -346,11 +481,14 @@ static void nic_init_hw(struct nicpf *nic)
	cqm_cfg = nic_reg_read(nic, NIC_PF_CQM_CFG);
	cqm_cfg = nic_reg_read(nic, NIC_PF_CQM_CFG);
	if (cqm_cfg < NICPF_CQM_MIN_DROP_LEVEL)
	if (cqm_cfg < NICPF_CQM_MIN_DROP_LEVEL)
		nic_reg_write(nic, NIC_PF_CQM_CFG, NICPF_CQM_MIN_DROP_LEVEL);
		nic_reg_write(nic, NIC_PF_CQM_CFG, NICPF_CQM_MIN_DROP_LEVEL);

	return 0;
}
}


/* Channel parse index configuration */
/* Channel parse index configuration */
static void nic_config_cpi(struct nicpf *nic, struct cpi_cfg_msg *cfg)
static void nic_config_cpi(struct nicpf *nic, struct cpi_cfg_msg *cfg)
{
{
	struct hw_info *hw = nic->hw;
	u32 vnic, bgx, lmac, chan;
	u32 vnic, bgx, lmac, chan;
	u32 padd, cpi_count = 0;
	u32 padd, cpi_count = 0;
	u64 cpi_base, cpi, rssi_base, rssi;
	u64 cpi_base, cpi, rssi_base, rssi;
@@ -360,9 +498,9 @@ static void nic_config_cpi(struct nicpf *nic, struct cpi_cfg_msg *cfg)
	bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vnic]);
	bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vnic]);
	lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vnic]);
	lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vnic]);


	chan = (lmac * MAX_BGX_CHANS_PER_LMAC) + (bgx * NIC_CHANS_PER_INF);
	chan = (lmac * hw->chans_per_lmac) + (bgx * hw->chans_per_bgx);
	cpi_base = (lmac * NIC_MAX_CPI_PER_LMAC) + (bgx * NIC_CPI_PER_BGX);
	cpi_base = vnic * NIC_MAX_CPI_PER_LMAC;
	rssi_base = (lmac * nic->rss_ind_tbl_size) + (bgx * NIC_RSSI_PER_BGX);
	rssi_base = vnic * hw->rss_ind_tbl_size;


	/* Rx channel configuration */
	/* Rx channel configuration */
	nic_reg_write(nic, NIC_PF_CHAN_0_255_RX_BP_CFG | (chan << 3),
	nic_reg_write(nic, NIC_PF_CHAN_0_255_RX_BP_CFG | (chan << 3),
@@ -434,7 +572,7 @@ static void nic_send_rss_size(struct nicpf *nic, int vf)
	msg = (u64 *)&mbx;
	msg = (u64 *)&mbx;


	mbx.rss_size.msg = NIC_MBOX_MSG_RSS_SIZE;
	mbx.rss_size.msg = NIC_MBOX_MSG_RSS_SIZE;
	mbx.rss_size.ind_tbl_size = nic->rss_ind_tbl_size;
	mbx.rss_size.ind_tbl_size = nic->hw->rss_ind_tbl_size;
	nic_send_msg_to_vf(nic, vf, &mbx);
	nic_send_msg_to_vf(nic, vf, &mbx);
}
}


@@ -481,7 +619,7 @@ static void nic_config_rss(struct nicpf *nic, struct rss_cfg_msg *cfg)
/* 4 level transmit side scheduler configutation
/* 4 level transmit side scheduler configutation
 * for TNS bypass mode
 * for TNS bypass mode
 *
 *
 * Sample configuration for SQ0
 * Sample configuration for SQ0 on 88xx
 * VNIC0-SQ0 -> TL4(0)   -> TL3[0]   -> TL2[0]  -> TL1[0] -> BGX0
 * VNIC0-SQ0 -> TL4(0)   -> TL3[0]   -> TL2[0]  -> TL1[0] -> BGX0
 * VNIC1-SQ0 -> TL4(8)   -> TL3[2]   -> TL2[0]  -> TL1[0] -> BGX0
 * VNIC1-SQ0 -> TL4(8)   -> TL3[2]   -> TL2[0]  -> TL1[0] -> BGX0
 * VNIC2-SQ0 -> TL4(16)  -> TL3[4]   -> TL2[1]  -> TL1[0] -> BGX0
 * VNIC2-SQ0 -> TL4(16)  -> TL3[4]   -> TL2[1]  -> TL1[0] -> BGX0
@@ -494,6 +632,7 @@ static void nic_config_rss(struct nicpf *nic, struct rss_cfg_msg *cfg)
static void nic_tx_channel_cfg(struct nicpf *nic, u8 vnic,
static void nic_tx_channel_cfg(struct nicpf *nic, u8 vnic,
			       struct sq_cfg_msg *sq)
			       struct sq_cfg_msg *sq)
{
{
	struct hw_info *hw = nic->hw;
	u32 bgx, lmac, chan;
	u32 bgx, lmac, chan;
	u32 tl2, tl3, tl4;
	u32 tl2, tl3, tl4;
	u32 rr_quantum;
	u32 rr_quantum;
@@ -512,21 +651,28 @@ static void nic_tx_channel_cfg(struct nicpf *nic, u8 vnic,
	/* 24 bytes for FCS, IPG and preamble */
	/* 24 bytes for FCS, IPG and preamble */
	rr_quantum = ((NIC_HW_MAX_FRS + 24) / 4);
	rr_quantum = ((NIC_HW_MAX_FRS + 24) / 4);


	/* For 88xx 0-511 TL4 transmits via BGX0 and
	 * 512-1023 TL4s transmit via BGX1.
	 */
	if (hw->tl1_per_bgx) {
		tl4 = bgx * (hw->tl4_cnt / hw->bgx_cnt);
		if (!sq->sqs_mode) {
		if (!sq->sqs_mode) {
		tl4 = (lmac * NIC_TL4_PER_LMAC) + (bgx * NIC_TL4_PER_BGX);
			tl4 += (lmac * MAX_QUEUES_PER_QSET);
		} else {
		} else {
			for (svf = 0; svf < MAX_SQS_PER_VF; svf++) {
			for (svf = 0; svf < MAX_SQS_PER_VF; svf++) {
				if (nic->vf_sqs[pqs_vnic][svf] == vnic)
				if (nic->vf_sqs[pqs_vnic][svf] == vnic)
					break;
					break;
			}
			}
		tl4 = (MAX_LMAC_PER_BGX * NIC_TL4_PER_LMAC);
			tl4 += (MAX_LMAC_PER_BGX * MAX_QUEUES_PER_QSET);
		tl4 += (lmac * NIC_TL4_PER_LMAC * MAX_SQS_PER_VF);
			tl4 += (lmac * MAX_QUEUES_PER_QSET * MAX_SQS_PER_VF);
		tl4 += (svf * NIC_TL4_PER_LMAC);
			tl4 += (svf * MAX_QUEUES_PER_QSET);
		tl4 += (bgx * NIC_TL4_PER_BGX);
		}
	} else {
		tl4 = (vnic * MAX_QUEUES_PER_QSET);
	}
	}
	tl4 += sq_idx;
	tl4 += sq_idx;


	tl3 = tl4 / (NIC_MAX_TL4 / NIC_MAX_TL3);
	tl3 = tl4 / (hw->tl4_cnt / hw->tl3_cnt);
	nic_reg_write(nic, NIC_PF_QSET_0_127_SQ_0_7_CFG2 |
	nic_reg_write(nic, NIC_PF_QSET_0_127_SQ_0_7_CFG2 |
		      ((u64)vnic << NIC_QS_ID_SHIFT) |
		      ((u64)vnic << NIC_QS_ID_SHIFT) |
		      ((u32)sq_idx << NIC_Q_NUM_SHIFT), tl4);
		      ((u32)sq_idx << NIC_Q_NUM_SHIFT), tl4);
@@ -534,8 +680,19 @@ static void nic_tx_channel_cfg(struct nicpf *nic, u8 vnic,
		      ((u64)vnic << 27) | ((u32)sq_idx << 24) | rr_quantum);
		      ((u64)vnic << 27) | ((u32)sq_idx << 24) | rr_quantum);


	nic_reg_write(nic, NIC_PF_TL3_0_255_CFG | (tl3 << 3), rr_quantum);
	nic_reg_write(nic, NIC_PF_TL3_0_255_CFG | (tl3 << 3), rr_quantum);
	chan = (lmac * MAX_BGX_CHANS_PER_LMAC) + (bgx * NIC_CHANS_PER_INF);

	/* On 88xx 0-127 channels are for BGX0 and
	 * 127-255 channels for BGX1.
	 *
	 * On 81xx/83xx TL3_CHAN reg should be configured with channel
	 * within LMAC i.e 0-7 and not the actual channel number like on 88xx
	 */
	chan = (lmac * hw->chans_per_lmac) + (bgx * hw->chans_per_bgx);
	if (hw->tl1_per_bgx)
		nic_reg_write(nic, NIC_PF_TL3_0_255_CHAN | (tl3 << 3), chan);
		nic_reg_write(nic, NIC_PF_TL3_0_255_CHAN | (tl3 << 3), chan);
	else
		nic_reg_write(nic, NIC_PF_TL3_0_255_CHAN | (tl3 << 3), 0);

	/* Enable backpressure on the channel */
	/* Enable backpressure on the channel */
	nic_reg_write(nic, NIC_PF_CHAN_0_255_TX_CFG | (chan << 3), 1);
	nic_reg_write(nic, NIC_PF_CHAN_0_255_TX_CFG | (chan << 3), 1);


@@ -544,6 +701,16 @@ static void nic_tx_channel_cfg(struct nicpf *nic, u8 vnic,
	nic_reg_write(nic, NIC_PF_TL2_0_63_CFG | (tl2 << 3), rr_quantum);
	nic_reg_write(nic, NIC_PF_TL2_0_63_CFG | (tl2 << 3), rr_quantum);
	/* No priorities as of now */
	/* No priorities as of now */
	nic_reg_write(nic, NIC_PF_TL2_0_63_PRI | (tl2 << 3), 0x00);
	nic_reg_write(nic, NIC_PF_TL2_0_63_PRI | (tl2 << 3), 0x00);

	/* Unlike 88xx where TL2s 0-31 transmits to TL1 '0' and rest to TL1 '1'
	 * on 81xx/83xx TL2 needs to be configured to transmit to one of the
	 * possible LMACs.
	 *
	 * This register doesn't exist on 88xx.
	 */
	if (!hw->tl1_per_bgx)
		nic_reg_write(nic, NIC_PF_TL2_LMAC | (tl2 << 3),
			      lmac + (bgx * MAX_LMAC_PER_BGX));
}
}


/* Send primary nicvf pointer to secondary QS's VF */
/* Send primary nicvf pointer to secondary QS's VF */
@@ -615,7 +782,7 @@ static int nic_config_loopback(struct nicpf *nic, struct set_loopback *lbk)
{
{
	int bgx_idx, lmac_idx;
	int bgx_idx, lmac_idx;


	if (lbk->vf_id > MAX_LMAC)
	if (lbk->vf_id >= nic->num_vf_en)
		return -1;
		return -1;


	bgx_idx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[lbk->vf_id]);
	bgx_idx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[lbk->vf_id]);
@@ -626,6 +793,67 @@ static int nic_config_loopback(struct nicpf *nic, struct set_loopback *lbk)
	return 0;
	return 0;
}
}


/* Reset statistics counters */
static int nic_reset_stat_counters(struct nicpf *nic,
				   int vf, struct reset_stat_cfg *cfg)
{
	int i, stat, qnum;
	u64 reg_addr;

	for (i = 0; i < RX_STATS_ENUM_LAST; i++) {
		if (cfg->rx_stat_mask & BIT(i)) {
			reg_addr = NIC_PF_VNIC_0_127_RX_STAT_0_13 |
				   (vf << NIC_QS_ID_SHIFT) |
				   (i << 3);
			nic_reg_write(nic, reg_addr, 0);
		}
	}

	for (i = 0; i < TX_STATS_ENUM_LAST; i++) {
		if (cfg->tx_stat_mask & BIT(i)) {
			reg_addr = NIC_PF_VNIC_0_127_TX_STAT_0_4 |
				   (vf << NIC_QS_ID_SHIFT) |
				   (i << 3);
			nic_reg_write(nic, reg_addr, 0);
		}
	}

	for (i = 0; i <= 15; i++) {
		qnum = i >> 1;
		stat = i & 1 ? 1 : 0;
		reg_addr = (vf << NIC_QS_ID_SHIFT) |
			   (qnum << NIC_Q_NUM_SHIFT) | (stat << 3);
		if (cfg->rq_stat_mask & BIT(i)) {
			reg_addr |= NIC_PF_QSET_0_127_RQ_0_7_STAT_0_1;
			nic_reg_write(nic, reg_addr, 0);
		}
		if (cfg->sq_stat_mask & BIT(i)) {
			reg_addr |= NIC_PF_QSET_0_127_SQ_0_7_STAT_0_1;
			nic_reg_write(nic, reg_addr, 0);
		}
	}
	return 0;
}

static void nic_enable_tunnel_parsing(struct nicpf *nic, int vf)
{
	u64 prot_def = (IPV6_PROT << 32) | (IPV4_PROT << 16) | ET_PROT;
	u64 vxlan_prot_def = (IPV6_PROT_DEF << 32) |
			      (IPV4_PROT_DEF) << 16 | ET_PROT_DEF;

	/* Configure tunnel parsing parameters */
	nic_reg_write(nic, NIC_PF_RX_GENEVE_DEF,
		      (1ULL << 63 | UDP_GENEVE_PORT_NUM));
	nic_reg_write(nic, NIC_PF_RX_GENEVE_PROT_DEF,
		      ((7ULL << 61) | prot_def));
	nic_reg_write(nic, NIC_PF_RX_NVGRE_PROT_DEF,
		      ((7ULL << 61) | prot_def));
	nic_reg_write(nic, NIC_PF_RX_VXLAN_DEF_0_1,
		      ((1ULL << 63) | UDP_VXLAN_PORT_NUM));
	nic_reg_write(nic, NIC_PF_RX_VXLAN_PROT_DEF,
		      ((0xfULL << 60) | vxlan_prot_def));
}

static void nic_enable_vf(struct nicpf *nic, int vf, bool enable)
static void nic_enable_vf(struct nicpf *nic, int vf, bool enable)
{
{
	int bgx, lmac;
	int bgx, lmac;
@@ -664,18 +892,17 @@ static void nic_handle_mbx_intr(struct nicpf *nic, int vf)
		mbx_addr += sizeof(u64);
		mbx_addr += sizeof(u64);
	}
	}


	dev_dbg(&nic->pdev->dev, "%s: Mailbox msg %d from VF%d\n",
	dev_dbg(&nic->pdev->dev, "%s: Mailbox msg 0x%02x from VF%d\n",
		__func__, mbx.msg.msg, vf);
		__func__, mbx.msg.msg, vf);
	switch (mbx.msg.msg) {
	switch (mbx.msg.msg) {
	case NIC_MBOX_MSG_READY:
	case NIC_MBOX_MSG_READY:
		nic_mbx_send_ready(nic, vf);
		nic_mbx_send_ready(nic, vf);
		if (vf < MAX_LMAC) {
		if (vf < nic->num_vf_en) {
			nic->link[vf] = 0;
			nic->link[vf] = 0;
			nic->duplex[vf] = 0;
			nic->duplex[vf] = 0;
			nic->speed[vf] = 0;
			nic->speed[vf] = 0;
		}
		}
		ret = 1;
		goto unlock;
		break;
	case NIC_MBOX_MSG_QS_CFG:
	case NIC_MBOX_MSG_QS_CFG:
		reg_addr = NIC_PF_QSET_0_127_CFG |
		reg_addr = NIC_PF_QSET_0_127_CFG |
			   (mbx.qs.num << NIC_QS_ID_SHIFT);
			   (mbx.qs.num << NIC_QS_ID_SHIFT);
@@ -693,6 +920,15 @@ static void nic_handle_mbx_intr(struct nicpf *nic, int vf)
			   (mbx.rq.qs_num << NIC_QS_ID_SHIFT) |
			   (mbx.rq.qs_num << NIC_QS_ID_SHIFT) |
			   (mbx.rq.rq_num << NIC_Q_NUM_SHIFT);
			   (mbx.rq.rq_num << NIC_Q_NUM_SHIFT);
		nic_reg_write(nic, reg_addr, mbx.rq.cfg);
		nic_reg_write(nic, reg_addr, mbx.rq.cfg);
		/* Enable CQE_RX2_S extension in CQE_RX descriptor.
		 * This gets appended by default on 81xx/83xx chips,
		 * for consistency enabling the same on 88xx pass2
		 * where this is introduced.
		 */
		if (pass2_silicon(nic->pdev))
			nic_reg_write(nic, NIC_PF_RX_CFG, 0x01);
		if (!pass1_silicon(nic->pdev))
			nic_enable_tunnel_parsing(nic, vf);
		break;
		break;
	case NIC_MBOX_MSG_RQ_BP_CFG:
	case NIC_MBOX_MSG_RQ_BP_CFG:
		reg_addr = NIC_PF_QSET_0_127_RQ_0_7_BP_CFG |
		reg_addr = NIC_PF_QSET_0_127_RQ_0_7_BP_CFG |
@@ -717,8 +953,10 @@ static void nic_handle_mbx_intr(struct nicpf *nic, int vf)
		nic_tx_channel_cfg(nic, mbx.qs.num, &mbx.sq);
		nic_tx_channel_cfg(nic, mbx.qs.num, &mbx.sq);
		break;
		break;
	case NIC_MBOX_MSG_SET_MAC:
	case NIC_MBOX_MSG_SET_MAC:
		if (vf >= nic->num_vf_en)
		if (vf >= nic->num_vf_en) {
			ret = -1; /* NACK */
			break;
			break;
		}
		lmac = mbx.mac.vf_id;
		lmac = mbx.mac.vf_id;
		bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[lmac]);
		bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[lmac]);
		lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[lmac]);
		lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[lmac]);
@@ -767,25 +1005,38 @@ static void nic_handle_mbx_intr(struct nicpf *nic, int vf)
	case NIC_MBOX_MSG_LOOPBACK:
	case NIC_MBOX_MSG_LOOPBACK:
		ret = nic_config_loopback(nic, &mbx.lbk);
		ret = nic_config_loopback(nic, &mbx.lbk);
		break;
		break;
	case NIC_MBOX_MSG_RESET_STAT_COUNTER:
		ret = nic_reset_stat_counters(nic, vf, &mbx.reset_stat);
		break;
	default:
	default:
		dev_err(&nic->pdev->dev,
		dev_err(&nic->pdev->dev,
			"Invalid msg from VF%d, msg 0x%x\n", vf, mbx.msg.msg);
			"Invalid msg from VF%d, msg 0x%x\n", vf, mbx.msg.msg);
		break;
		break;
	}
	}


	if (!ret)
	if (!ret) {
		nic_mbx_send_ack(nic, vf);
		nic_mbx_send_ack(nic, vf);
	else if (mbx.msg.msg != NIC_MBOX_MSG_READY)
	} else if (mbx.msg.msg != NIC_MBOX_MSG_READY) {
		dev_err(&nic->pdev->dev, "NACK for MBOX 0x%02x from VF %d\n",
			mbx.msg.msg, vf);
		nic_mbx_send_nack(nic, vf);
		nic_mbx_send_nack(nic, vf);
	}
unlock:
unlock:
	nic->mbx_lock[vf] = false;
	nic->mbx_lock[vf] = false;
}
}


static void nic_mbx_intr_handler (struct nicpf *nic, int mbx)
static irqreturn_t nic_mbx_intr_handler(int irq, void *nic_irq)
{
{
	struct nicpf *nic = (struct nicpf *)nic_irq;
	int mbx;
	u64 intr;
	u64 intr;
	u8  vf, vf_per_mbx_reg = 64;
	u8  vf, vf_per_mbx_reg = 64;


	if (irq == nic->msix_entries[NIC_PF_INTR_ID_MBOX0].vector)
		mbx = 0;
	else
		mbx = 1;

	intr = nic_reg_read(nic, NIC_PF_MAILBOX_INT + (mbx << 3));
	intr = nic_reg_read(nic, NIC_PF_MAILBOX_INT + (mbx << 3));
	dev_dbg(&nic->pdev->dev, "PF interrupt Mbox%d 0x%llx\n", mbx, intr);
	dev_dbg(&nic->pdev->dev, "PF interrupt Mbox%d 0x%llx\n", mbx, intr);
	for (vf = 0; vf < vf_per_mbx_reg; vf++) {
	for (vf = 0; vf < vf_per_mbx_reg; vf++) {
@@ -797,23 +1048,6 @@ static void nic_mbx_intr_handler (struct nicpf *nic, int mbx)
			nic_clear_mbx_intr(nic, vf, mbx);
			nic_clear_mbx_intr(nic, vf, mbx);
		}
		}
	}
	}
}

static irqreturn_t nic_mbx0_intr_handler (int irq, void *nic_irq)
{
	struct nicpf *nic = (struct nicpf *)nic_irq;

	nic_mbx_intr_handler(nic, 0);

	return IRQ_HANDLED;
}

static irqreturn_t nic_mbx1_intr_handler (int irq, void *nic_irq)
{
	struct nicpf *nic = (struct nicpf *)nic_irq;

	nic_mbx_intr_handler(nic, 1);

	return IRQ_HANDLED;
	return IRQ_HANDLED;
}
}


@@ -821,7 +1055,13 @@ static int nic_enable_msix(struct nicpf *nic)
{
{
	int i, ret;
	int i, ret;


	nic->num_vec = NIC_PF_MSIX_VECTORS;
	nic->num_vec = pci_msix_vec_count(nic->pdev);

	nic->msix_entries = kmalloc_array(nic->num_vec,
					  sizeof(struct msix_entry),
					  GFP_KERNEL);
	if (!nic->msix_entries)
		return -ENOMEM;


	for (i = 0; i < nic->num_vec; i++)
	for (i = 0; i < nic->num_vec; i++)
		nic->msix_entries[i].entry = i;
		nic->msix_entries[i].entry = i;
@@ -829,8 +1069,9 @@ static int nic_enable_msix(struct nicpf *nic)
	ret = pci_enable_msix(nic->pdev, nic->msix_entries, nic->num_vec);
	ret = pci_enable_msix(nic->pdev, nic->msix_entries, nic->num_vec);
	if (ret) {
	if (ret) {
		dev_err(&nic->pdev->dev,
		dev_err(&nic->pdev->dev,
			"Request for #%d msix vectors failed\n",
			"Request for #%d msix vectors failed, returned %d\n",
			   nic->num_vec);
			   nic->num_vec, ret);
		kfree(nic->msix_entries);
		return ret;
		return ret;
	}
	}


@@ -842,6 +1083,7 @@ static void nic_disable_msix(struct nicpf *nic)
{
{
	if (nic->msix_enabled) {
	if (nic->msix_enabled) {
		pci_disable_msix(nic->pdev);
		pci_disable_msix(nic->pdev);
		kfree(nic->msix_entries);
		nic->msix_enabled = 0;
		nic->msix_enabled = 0;
		nic->num_vec = 0;
		nic->num_vec = 0;
	}
	}
@@ -860,27 +1102,26 @@ static void nic_free_all_interrupts(struct nicpf *nic)


static int nic_register_interrupts(struct nicpf *nic)
static int nic_register_interrupts(struct nicpf *nic)
{
{
	int ret;
	int i, ret;


	/* Enable MSI-X */
	/* Enable MSI-X */
	ret = nic_enable_msix(nic);
	ret = nic_enable_msix(nic);
	if (ret)
	if (ret)
		return ret;
		return ret;


	/* Register mailbox interrupt handlers */
	/* Register mailbox interrupt handler */
	ret = request_irq(nic->msix_entries[NIC_PF_INTR_ID_MBOX0].vector,
	for (i = NIC_PF_INTR_ID_MBOX0; i < nic->num_vec; i++) {
			  nic_mbx0_intr_handler, 0, "NIC Mbox0", nic);
		sprintf(nic->irq_name[i],
	if (ret)
			"NICPF Mbox%d", (i - NIC_PF_INTR_ID_MBOX0));
		goto fail;


	nic->irq_allocated[NIC_PF_INTR_ID_MBOX0] = true;
		ret = request_irq(nic->msix_entries[i].vector,

				  nic_mbx_intr_handler, 0,
	ret = request_irq(nic->msix_entries[NIC_PF_INTR_ID_MBOX1].vector,
				  nic->irq_name[i], nic);
			  nic_mbx1_intr_handler, 0, "NIC Mbox1", nic);
		if (ret)
		if (ret)
			goto fail;
			goto fail;


	nic->irq_allocated[NIC_PF_INTR_ID_MBOX1] = true;
		nic->irq_allocated[i] = true;
	}


	/* Enable mailbox interrupt */
	/* Enable mailbox interrupt */
	nic_enable_mbx_intr(nic);
	nic_enable_mbx_intr(nic);
@@ -889,6 +1130,7 @@ static int nic_register_interrupts(struct nicpf *nic)
fail:
fail:
	dev_err(&nic->pdev->dev, "Request irq failed\n");
	dev_err(&nic->pdev->dev, "Request irq failed\n");
	nic_free_all_interrupts(nic);
	nic_free_all_interrupts(nic);
	nic_disable_msix(nic);
	return ret;
	return ret;
}
}


@@ -903,6 +1145,12 @@ static int nic_num_sqs_en(struct nicpf *nic, int vf_en)
	int pos, sqs_per_vf = MAX_SQS_PER_VF_SINGLE_NODE;
	int pos, sqs_per_vf = MAX_SQS_PER_VF_SINGLE_NODE;
	u16 total_vf;
	u16 total_vf;


	/* Secondary Qsets are needed only if CPU count is
	 * morethan MAX_QUEUES_PER_QSET.
	 */
	if (num_online_cpus() <= MAX_QUEUES_PER_QSET)
		return 0;

	/* Check if its a multi-node environment */
	/* Check if its a multi-node environment */
	if (nr_node_ids > 1)
	if (nr_node_ids > 1)
		sqs_per_vf = MAX_SQS_PER_VF;
		sqs_per_vf = MAX_SQS_PER_VF;
@@ -1008,6 +1256,12 @@ static int nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
	if (!nic)
	if (!nic)
		return -ENOMEM;
		return -ENOMEM;


	nic->hw = devm_kzalloc(dev, sizeof(struct hw_info), GFP_KERNEL);
	if (!nic->hw) {
		devm_kfree(dev, nic);
		return -ENOMEM;
	}

	pci_set_drvdata(pdev, nic);
	pci_set_drvdata(pdev, nic);


	nic->pdev = pdev;
	nic->pdev = pdev;
@@ -1047,13 +1301,12 @@ static int nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)


	nic->node = nic_get_node_id(pdev);
	nic->node = nic_get_node_id(pdev);


	nic_set_lmac_vf_mapping(nic);

	/* Initialize hardware */
	/* Initialize hardware */
	nic_init_hw(nic);
	err = nic_init_hw(nic);
	if (err)
		goto err_release_regions;


	/* Set RSS TBL size for each VF */
	nic_set_lmac_vf_mapping(nic);
	nic->rss_ind_tbl_size = NIC_MAX_RSS_IDR_TBL_SIZE;


	/* Register interrupts */
	/* Register interrupts */
	err = nic_register_interrupts(nic);
	err = nic_register_interrupts(nic);
@@ -1086,6 +1339,9 @@ static int nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
err_release_regions:
err_release_regions:
	pci_release_regions(pdev);
	pci_release_regions(pdev);
err_disable_device:
err_disable_device:
	nic_free_lmacmem(nic);
	devm_kfree(dev, nic->hw);
	devm_kfree(dev, nic);
	pci_disable_device(pdev);
	pci_disable_device(pdev);
	pci_set_drvdata(pdev, NULL);
	pci_set_drvdata(pdev, NULL);
	return err;
	return err;
@@ -1106,6 +1362,11 @@ static void nic_remove(struct pci_dev *pdev)


	nic_unregister_interrupts(nic);
	nic_unregister_interrupts(nic);
	pci_release_regions(pdev);
	pci_release_regions(pdev);

	nic_free_lmacmem(nic);
	devm_kfree(&pdev->dev, nic->hw);
	devm_kfree(&pdev->dev, nic);

	pci_disable_device(pdev);
	pci_disable_device(pdev);
	pci_set_drvdata(pdev, NULL);
	pci_set_drvdata(pdev, NULL);
}
}
Loading