Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 793768f5 authored by David S. Miller's avatar David S. Miller
Browse files

Merge branch 'thunderx-features-fixes'



Aleksey Makarov says:

====================
net: thunderx: New features and fixes

v2:
  - The unused affinity_mask field of the structure cmp_queue
  has been deleted. (thanks to David Miller)
  - The unneeded initializers have been dropped. (thanks to Alexey Klimov)
  - The commit message "net: thunderx: Rework interrupt handling"
  has been fixed. (thanks to Alexey Klimov)
====================

Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents ef34c0f6 d77a2384
Loading
Loading
Loading
Loading
+1 −2
Original line number Diff line number Diff line
@@ -928,7 +928,7 @@ M: Sunil Goutham <sgoutham@cavium.com>
M:	Robert Richter <rric@kernel.org>
L:	linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S:	Supported
F:	drivers/net/ethernet/cavium/
F:	drivers/net/ethernet/cavium/thunder/

ARM/CIRRUS LOGIC CLPS711X ARM ARCHITECTURE
M:	Alexander Shiyan <shc_work@mail.ru>
@@ -2543,7 +2543,6 @@ M: Raghu Vatsavayi <raghu.vatsavayi@caviumnetworks.com>
L:     netdev@vger.kernel.org
W:     http://www.cavium.com
S:     Supported
F:     drivers/net/ethernet/cavium/
F:     drivers/net/ethernet/cavium/liquidio/

CC2520 IEEE-802.15.4 RADIO DRIVER
+82 −11
Original line number Diff line number Diff line
@@ -135,6 +135,7 @@
#define	NICVF_TX_TIMEOUT		(50 * HZ)

struct nicvf_cq_poll {
	struct  nicvf *nicvf;
	u8	cq_idx;		/* Completion queue index */
	struct	napi_struct napi;
};
@@ -190,10 +191,10 @@ enum tx_stats_reg_offset {
};

struct nicvf_hw_stats {
	u64 rx_bytes_ok;
	u64 rx_ucast_frames_ok;
	u64 rx_bcast_frames_ok;
	u64 rx_mcast_frames_ok;
	u64 rx_bytes;
	u64 rx_ucast_frames;
	u64 rx_bcast_frames;
	u64 rx_mcast_frames;
	u64 rx_fcs_errors;
	u64 rx_l2_errors;
	u64 rx_drop_red;
@@ -204,6 +205,31 @@ struct nicvf_hw_stats {
	u64 rx_drop_mcast;
	u64 rx_drop_l3_bcast;
	u64 rx_drop_l3_mcast;
	u64 rx_bgx_truncated_pkts;
	u64 rx_jabber_errs;
	u64 rx_fcs_errs;
	u64 rx_bgx_errs;
	u64 rx_prel2_errs;
	u64 rx_l2_hdr_malformed;
	u64 rx_oversize;
	u64 rx_undersize;
	u64 rx_l2_len_mismatch;
	u64 rx_l2_pclp;
	u64 rx_ip_ver_errs;
	u64 rx_ip_csum_errs;
	u64 rx_ip_hdr_malformed;
	u64 rx_ip_payload_malformed;
	u64 rx_ip_ttl_errs;
	u64 rx_l3_pclp;
	u64 rx_l4_malformed;
	u64 rx_l4_csum_errs;
	u64 rx_udp_len_errs;
	u64 rx_l4_port_errs;
	u64 rx_tcp_flag_errs;
	u64 rx_tcp_offset_errs;
	u64 rx_l4_pclp;
	u64 rx_truncated_pkts;

	u64 tx_bytes_ok;
	u64 tx_ucast_frames_ok;
	u64 tx_bcast_frames_ok;
@@ -222,6 +248,7 @@ struct nicvf_drv_stats {
	u64 rx_frames_1518;
	u64 rx_frames_jumbo;
	u64 rx_drops;

	/* Tx */
	u64 tx_frames_ok;
	u64 tx_drops;
@@ -231,13 +258,24 @@ struct nicvf_drv_stats {
};

struct nicvf {
	struct nicvf		*pnicvf;
	struct net_device	*netdev;
	struct pci_dev		*pdev;
	u8			vf_id;
	u8			node;
	u8			tns_mode;
	u8			tns_mode:1;
	u8			sqs_mode:1;
	u8			loopback_supported:1;
	u16			mtu;
	struct queue_set	*qs;
#define	MAX_SQS_PER_VF_SINGLE_NODE		5
#define	MAX_SQS_PER_VF				11
	u8			sqs_id;
	u8			sqs_count; /* Secondary Qset count */
	struct nicvf		*snicvf[MAX_SQS_PER_VF];
	u8			rx_queues;
	u8			tx_queues;
	u8			max_queues;
	void __iomem		*reg_base;
	bool			link_up;
	u8			duplex;
@@ -257,7 +295,7 @@ struct nicvf {
	u32			cq_coalesce_usecs;

	u32			msg_enable;
	struct nicvf_hw_stats   stats;
	struct nicvf_hw_stats   hw_stats;
	struct nicvf_drv_stats  drv_stats;
	struct bgx_stats	bgx_stats;
	struct work_struct	reset_task;
@@ -269,10 +307,9 @@ struct nicvf {
	char			irq_name[NIC_VF_MSIX_VECTORS][20];
	bool			irq_allocated[NIC_VF_MSIX_VECTORS];

	bool			pf_ready_to_rcv_msg;
	/* VF <-> PF mailbox communication */
	bool			pf_acked;
	bool			pf_nacked;
	bool			bgx_stats_acked;
	bool			set_mac_pending;
} ____cacheline_aligned_in_smp;

@@ -304,14 +341,21 @@ struct nicvf {
#define	NIC_MBOX_MSG_RQ_SW_SYNC		0x0F	/* Flush inflight pkts to RQ */
#define	NIC_MBOX_MSG_BGX_STATS		0x10	/* Get stats from BGX */
#define	NIC_MBOX_MSG_BGX_LINK_CHANGE	0x11	/* BGX:LMAC link status */
#define NIC_MBOX_MSG_CFG_DONE		0x12	/* VF configuration done */
#define NIC_MBOX_MSG_SHUTDOWN		0x13	/* VF is being shutdown */
#define	NIC_MBOX_MSG_ALLOC_SQS		0x12	/* Allocate secondary Qset */
#define	NIC_MBOX_MSG_NICVF_PTR		0x13	/* Send nicvf ptr to PF */
#define	NIC_MBOX_MSG_PNICVF_PTR		0x14	/* Get primary qset nicvf ptr */
#define	NIC_MBOX_MSG_SNICVF_PTR		0x15	/* Send sqet nicvf ptr to PVF */
#define	NIC_MBOX_MSG_LOOPBACK		0x16	/* Set interface in loopback */
#define	NIC_MBOX_MSG_CFG_DONE		0xF0	/* VF configuration done */
#define	NIC_MBOX_MSG_SHUTDOWN		0xF1	/* VF is being shutdown */

struct nic_cfg_msg {
	u8    msg;
	u8    vf_id;
	u8    tns_mode;
	u8    node_id;
	u8    tns_mode:1;
	u8    sqs_mode:1;
	u8    loopback_supported:1;
	u8    mac_addr[ETH_ALEN];
};

@@ -319,6 +363,7 @@ struct nic_cfg_msg {
struct qs_cfg_msg {
	u8    msg;
	u8    num;
	u8    sqs_count;
	u64   cfg;
};

@@ -335,6 +380,7 @@ struct sq_cfg_msg {
	u8    msg;
	u8    qs_num;
	u8    sq_num;
	bool  sqs_mode;
	u64   cfg;
};

@@ -394,6 +440,28 @@ struct bgx_link_status {
	u32   speed;
};

/* Get Extra Qset IDs */
struct sqs_alloc {
	u8    msg;
	u8    vf_id;
	u8    qs_count;
};

struct nicvf_ptr {
	u8    msg;
	u8    vf_id;
	bool  sqs_mode;
	u8    sqs_id;
	u64   nicvf;
};

/* Set interface in loopback mode */
struct set_loopback {
	u8    msg;
	u8    vf_id;
	bool  enable;
};

/* 128 bit shared memory between PF and each VF */
union nic_mbx {
	struct { u8 msg; }	msg;
@@ -408,6 +476,9 @@ union nic_mbx {
	struct rss_cfg_msg	rss_cfg;
	struct bgx_stats_msg    bgx_stats;
	struct bgx_link_status  link_status;
	struct sqs_alloc        sqs_alloc;
	struct nicvf_ptr	nicvf;
	struct set_loopback	lbk;
};

#define NIC_NODE_ID_MASK	0x03
+179 −19
Original line number Diff line number Diff line
@@ -28,6 +28,11 @@ struct nicpf {
	u8			num_vf_en;      /* No of VF enabled */
	bool			vf_enabled[MAX_NUM_VFS_SUPPORTED];
	void __iomem		*reg_base;       /* Register start address */
	u8			num_sqs_en;	/* Secondary qsets enabled */
	u64			nicvf[MAX_NUM_VFS_SUPPORTED];
	u8			vf_sqs[MAX_NUM_VFS_SUPPORTED][MAX_SQS_PER_VF];
	u8			pqs_vf[MAX_NUM_VFS_SUPPORTED];
	bool			sqs_used[MAX_NUM_VFS_SUPPORTED];
	struct pkind_cfg	pkind;
#define	NIC_SET_VF_LMAC_MAP(bgx, lmac)	(((bgx & 0xF) << 4) | (lmac & 0xF))
#define	NIC_GET_BGX_FROM_VF_LMAC_MAP(map)	((map >> 4) & 0xF)
@@ -139,14 +144,19 @@ static void nic_mbx_send_ready(struct nicpf *nic, int vf)

	mbx.nic_cfg.tns_mode = NIC_TNS_BYPASS_MODE;

	if (vf < MAX_LMAC) {
		bgx_idx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
		lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);

		mac = bgx_get_lmac_mac(nic->node, bgx_idx, lmac);
		if (mac)
			ether_addr_copy((u8 *)&mbx.nic_cfg.mac_addr, mac);

	}
	mbx.nic_cfg.sqs_mode = (vf >= nic->num_vf_en) ? true : false;
	mbx.nic_cfg.node_id = nic->node;

	mbx.nic_cfg.loopback_supported = vf < MAX_LMAC;

	nic_send_msg_to_vf(nic, vf, &mbx);
}

@@ -329,6 +339,10 @@ static void nic_init_hw(struct nicpf *nic)

	/* Timer config */
	nic_reg_write(nic, NIC_PF_INTR_TIMER_CFG, NICPF_CLK_PER_INT_TICK);

	/* Enable VLAN ethertype matching and stripping */
	nic_reg_write(nic, NIC_PF_RX_ETYPE_0_7,
		      (2 << 19) | (ETYPE_ALG_VLAN_STRIP << 16) | ETH_P_8021Q);
}

/* Channel parse index configuration */
@@ -429,6 +443,12 @@ static void nic_config_rss(struct nicpf *nic, struct rss_cfg_msg *cfg)
	qset = cfg->vf_id;

	for (; rssi < (rssi_base + cfg->tbl_len); rssi++) {
		u8 svf = cfg->ind_tbl[idx] >> 3;

		if (svf)
			qset = nic->vf_sqs[cfg->vf_id][svf - 1];
		else
			qset = cfg->vf_id;
		nic_reg_write(nic, NIC_PF_RSSI_0_4097_RQ | (rssi << 3),
			      (qset << 3) | (cfg->ind_tbl[idx] & 0x7));
		idx++;
@@ -452,19 +472,31 @@ static void nic_config_rss(struct nicpf *nic, struct rss_cfg_msg *cfg)
 * VNIC6-SQ0 -> TL4(528) -> TL3[132] -> TL2[33] -> TL1[1] -> BGX1
 * VNIC7-SQ0 -> TL4(536) -> TL3[134] -> TL2[33] -> TL1[1] -> BGX1
 */
static void nic_tx_channel_cfg(struct nicpf *nic, u8 vnic, u8 sq_idx)
static void nic_tx_channel_cfg(struct nicpf *nic, u8 vnic,
			       struct sq_cfg_msg *sq)
{
	u32 bgx, lmac, chan;
	u32 tl2, tl3, tl4;
	u32 rr_quantum;
	u8 sq_idx = sq->sq_num;
	u8 pqs_vnic;

	if (sq->sqs_mode)
		pqs_vnic = nic->pqs_vf[vnic];
	else
		pqs_vnic = vnic;

	bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[pqs_vnic]);
	lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[pqs_vnic]);

	bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vnic]);
	lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vnic]);
	/* 24 bytes for FCS, IPG and preamble */
	rr_quantum = ((NIC_HW_MAX_FRS + 24) / 4);

	tl4 = (lmac * NIC_TL4_PER_LMAC) + (bgx * NIC_TL4_PER_BGX);
	tl4 += sq_idx;
	if (sq->sqs_mode)
		tl4 += vnic * 8;

	tl3 = tl4 / (NIC_MAX_TL4 / NIC_MAX_TL3);
	nic_reg_write(nic, NIC_PF_QSET_0_127_SQ_0_7_CFG2 |
		      ((u64)vnic << NIC_QS_ID_SHIFT) |
@@ -485,6 +517,86 @@ static void nic_tx_channel_cfg(struct nicpf *nic, u8 vnic, u8 sq_idx)
	nic_reg_write(nic, NIC_PF_TL2_0_63_PRI | (tl2 << 3), 0x00);
}

/* Send primary nicvf pointer to secondary QS's VF */
static void nic_send_pnicvf(struct nicpf *nic, int sqs)
{
	union nic_mbx mbx = {};

	mbx.nicvf.msg = NIC_MBOX_MSG_PNICVF_PTR;
	mbx.nicvf.nicvf = nic->nicvf[nic->pqs_vf[sqs]];
	nic_send_msg_to_vf(nic, sqs, &mbx);
}

/* Send SQS's nicvf pointer to primary QS's VF */
static void nic_send_snicvf(struct nicpf *nic, struct nicvf_ptr *nicvf)
{
	union nic_mbx mbx = {};
	int sqs_id = nic->vf_sqs[nicvf->vf_id][nicvf->sqs_id];

	mbx.nicvf.msg = NIC_MBOX_MSG_SNICVF_PTR;
	mbx.nicvf.sqs_id = nicvf->sqs_id;
	mbx.nicvf.nicvf = nic->nicvf[sqs_id];
	nic_send_msg_to_vf(nic, nicvf->vf_id, &mbx);
}

/* Find next available Qset that can be assigned as a
 * secondary Qset to a VF.
 */
static int nic_nxt_avail_sqs(struct nicpf *nic)
{
	int sqs;

	for (sqs = 0; sqs < nic->num_sqs_en; sqs++) {
		if (!nic->sqs_used[sqs])
			nic->sqs_used[sqs] = true;
		else
			continue;
		return sqs + nic->num_vf_en;
	}
	return -1;
}

/* Allocate additional Qsets for requested VF */
static void nic_alloc_sqs(struct nicpf *nic, struct sqs_alloc *sqs)
{
	union nic_mbx mbx = {};
	int idx, alloc_qs = 0;
	int sqs_id;

	if (!nic->num_sqs_en)
		goto send_mbox;

	for (idx = 0; idx < sqs->qs_count; idx++) {
		sqs_id = nic_nxt_avail_sqs(nic);
		if (sqs_id < 0)
			break;
		nic->vf_sqs[sqs->vf_id][idx] = sqs_id;
		nic->pqs_vf[sqs_id] = sqs->vf_id;
		alloc_qs++;
	}

send_mbox:
	mbx.sqs_alloc.msg = NIC_MBOX_MSG_ALLOC_SQS;
	mbx.sqs_alloc.vf_id = sqs->vf_id;
	mbx.sqs_alloc.qs_count = alloc_qs;
	nic_send_msg_to_vf(nic, sqs->vf_id, &mbx);
}

static int nic_config_loopback(struct nicpf *nic, struct set_loopback *lbk)
{
	int bgx_idx, lmac_idx;

	if (lbk->vf_id > MAX_LMAC)
		return -1;

	bgx_idx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[lbk->vf_id]);
	lmac_idx = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[lbk->vf_id]);

	bgx_lmac_internal_loopback(nic->node, bgx_idx, lmac_idx, lbk->enable);

	return 0;
}

/* Interrupt handler to handle mailbox messages from VFs */
static void nic_handle_mbx_intr(struct nicpf *nic, int vf)
{
@@ -492,6 +604,7 @@ static void nic_handle_mbx_intr(struct nicpf *nic, int vf)
	u64 *mbx_data;
	u64 mbx_addr;
	u64 reg_addr;
	u64 cfg;
	int bgx, lmac;
	int i;
	int ret = 0;
@@ -512,15 +625,24 @@ static void nic_handle_mbx_intr(struct nicpf *nic, int vf)
	switch (mbx.msg.msg) {
	case NIC_MBOX_MSG_READY:
		nic_mbx_send_ready(nic, vf);
		if (vf < MAX_LMAC) {
			nic->link[vf] = 0;
			nic->duplex[vf] = 0;
			nic->speed[vf] = 0;
		}
		ret = 1;
		break;
	case NIC_MBOX_MSG_QS_CFG:
		reg_addr = NIC_PF_QSET_0_127_CFG |
			   (mbx.qs.num << NIC_QS_ID_SHIFT);
		nic_reg_write(nic, reg_addr, mbx.qs.cfg);
		cfg = mbx.qs.cfg;
		/* Check if its a secondary Qset */
		if (vf >= nic->num_vf_en) {
			cfg = cfg & (~0x7FULL);
			/* Assign this Qset to primary Qset's VF */
			cfg |= nic->pqs_vf[vf];
		}
		nic_reg_write(nic, reg_addr, cfg);
		break;
	case NIC_MBOX_MSG_RQ_CFG:
		reg_addr = NIC_PF_QSET_0_127_RQ_0_7_CFG |
@@ -548,9 +670,11 @@ static void nic_handle_mbx_intr(struct nicpf *nic, int vf)
			   (mbx.sq.qs_num << NIC_QS_ID_SHIFT) |
			   (mbx.sq.sq_num << NIC_Q_NUM_SHIFT);
		nic_reg_write(nic, reg_addr, mbx.sq.cfg);
		nic_tx_channel_cfg(nic, mbx.qs.num, mbx.sq.sq_num);
		nic_tx_channel_cfg(nic, mbx.qs.num, &mbx.sq);
		break;
	case NIC_MBOX_MSG_SET_MAC:
		if (vf >= nic->num_vf_en)
			break;
		lmac = mbx.mac.vf_id;
		bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[lmac]);
		lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[lmac]);
@@ -577,10 +701,28 @@ static void nic_handle_mbx_intr(struct nicpf *nic, int vf)
	case NIC_MBOX_MSG_SHUTDOWN:
		/* First msg in VF teardown sequence */
		nic->vf_enabled[vf] = false;
		if (vf >= nic->num_vf_en)
			nic->sqs_used[vf - nic->num_vf_en] = false;
		nic->pqs_vf[vf] = 0;
		break;
	case NIC_MBOX_MSG_ALLOC_SQS:
		nic_alloc_sqs(nic, &mbx.sqs_alloc);
		goto unlock;
	case NIC_MBOX_MSG_NICVF_PTR:
		nic->nicvf[vf] = mbx.nicvf.nicvf;
		break;
	case NIC_MBOX_MSG_PNICVF_PTR:
		nic_send_pnicvf(nic, vf);
		goto unlock;
	case NIC_MBOX_MSG_SNICVF_PTR:
		nic_send_snicvf(nic, &mbx.nicvf);
		goto unlock;
	case NIC_MBOX_MSG_BGX_STATS:
		nic_get_bgx_stats(nic, &mbx.bgx_stats);
		goto unlock;
	case NIC_MBOX_MSG_LOOPBACK:
		ret = nic_config_loopback(nic, &mbx.lbk);
		break;
	default:
		dev_err(&nic->pdev->dev,
			"Invalid msg from VF%d, msg 0x%x\n", vf, mbx.msg.msg);
@@ -606,8 +748,7 @@ static void nic_mbx_intr_handler (struct nicpf *nic, int mbx)
		if (intr & (1ULL << vf)) {
			dev_dbg(&nic->pdev->dev, "Intr from VF %d\n",
				vf + (mbx * vf_per_mbx_reg));
			if ((vf + (mbx * vf_per_mbx_reg)) > nic->num_vf_en)
				break;

			nic_handle_mbx_intr(nic, vf + (mbx * vf_per_mbx_reg));
			nic_clear_mbx_intr(nic, vf, mbx);
		}
@@ -713,9 +854,24 @@ static void nic_unregister_interrupts(struct nicpf *nic)
	nic_disable_msix(nic);
}

static int nic_num_sqs_en(struct nicpf *nic, int vf_en)
{
	int pos, sqs_per_vf = MAX_SQS_PER_VF_SINGLE_NODE;
	u16 total_vf;

	/* Check if its a multi-node environment */
	if (nr_node_ids > 1)
		sqs_per_vf = MAX_SQS_PER_VF;

	pos = pci_find_ext_capability(nic->pdev, PCI_EXT_CAP_ID_SRIOV);
	pci_read_config_word(nic->pdev, (pos + PCI_SRIOV_TOTAL_VF), &total_vf);
	return min(total_vf - vf_en, vf_en * sqs_per_vf);
}

static int nic_sriov_init(struct pci_dev *pdev, struct nicpf *nic)
{
	int pos = 0;
	int vf_en;
	int err;
	u16 total_vf_cnt;

@@ -732,16 +888,20 @@ static int nic_sriov_init(struct pci_dev *pdev, struct nicpf *nic)
	if (!total_vf_cnt)
		return 0;

	err = pci_enable_sriov(pdev, nic->num_vf_en);
	vf_en = nic->num_vf_en;
	nic->num_sqs_en = nic_num_sqs_en(nic, nic->num_vf_en);
	vf_en += nic->num_sqs_en;

	err = pci_enable_sriov(pdev, vf_en);
	if (err) {
		dev_err(&pdev->dev, "SRIOV enable failed, num VF is %d\n",
			nic->num_vf_en);
			vf_en);
		nic->num_vf_en = 0;
		return err;
	}

	dev_info(&pdev->dev, "SRIOV enabled, number of VF available %d\n",
		 nic->num_vf_en);
		 vf_en);

	nic->flags |= NIC_SRIOV_ENABLED;
	return 0;
+132 −50
Original line number Diff line number Diff line
@@ -35,10 +35,10 @@ struct nicvf_stat {
}

static const struct nicvf_stat nicvf_hw_stats[] = {
	NICVF_HW_STAT(rx_bytes_ok),
	NICVF_HW_STAT(rx_ucast_frames_ok),
	NICVF_HW_STAT(rx_bcast_frames_ok),
	NICVF_HW_STAT(rx_mcast_frames_ok),
	NICVF_HW_STAT(rx_bytes),
	NICVF_HW_STAT(rx_ucast_frames),
	NICVF_HW_STAT(rx_bcast_frames),
	NICVF_HW_STAT(rx_mcast_frames),
	NICVF_HW_STAT(rx_fcs_errors),
	NICVF_HW_STAT(rx_l2_errors),
	NICVF_HW_STAT(rx_drop_red),
@@ -49,6 +49,30 @@ static const struct nicvf_stat nicvf_hw_stats[] = {
	NICVF_HW_STAT(rx_drop_mcast),
	NICVF_HW_STAT(rx_drop_l3_bcast),
	NICVF_HW_STAT(rx_drop_l3_mcast),
	NICVF_HW_STAT(rx_bgx_truncated_pkts),
	NICVF_HW_STAT(rx_jabber_errs),
	NICVF_HW_STAT(rx_fcs_errs),
	NICVF_HW_STAT(rx_bgx_errs),
	NICVF_HW_STAT(rx_prel2_errs),
	NICVF_HW_STAT(rx_l2_hdr_malformed),
	NICVF_HW_STAT(rx_oversize),
	NICVF_HW_STAT(rx_undersize),
	NICVF_HW_STAT(rx_l2_len_mismatch),
	NICVF_HW_STAT(rx_l2_pclp),
	NICVF_HW_STAT(rx_ip_ver_errs),
	NICVF_HW_STAT(rx_ip_csum_errs),
	NICVF_HW_STAT(rx_ip_hdr_malformed),
	NICVF_HW_STAT(rx_ip_payload_malformed),
	NICVF_HW_STAT(rx_ip_ttl_errs),
	NICVF_HW_STAT(rx_l3_pclp),
	NICVF_HW_STAT(rx_l4_malformed),
	NICVF_HW_STAT(rx_l4_csum_errs),
	NICVF_HW_STAT(rx_udp_len_errs),
	NICVF_HW_STAT(rx_l4_port_errs),
	NICVF_HW_STAT(rx_tcp_flag_errs),
	NICVF_HW_STAT(rx_tcp_offset_errs),
	NICVF_HW_STAT(rx_l4_pclp),
	NICVF_HW_STAT(rx_truncated_pkts),
	NICVF_HW_STAT(tx_bytes_ok),
	NICVF_HW_STAT(tx_ucast_frames_ok),
	NICVF_HW_STAT(tx_bcast_frames_ok),
@@ -125,10 +149,33 @@ static void nicvf_set_msglevel(struct net_device *netdev, u32 lvl)
	nic->msg_enable = lvl;
}

static void nicvf_get_qset_strings(struct nicvf *nic, u8 **data, int qset)
{
	int stats, qidx;
	int start_qidx = qset * MAX_RCV_QUEUES_PER_QS;

	for (qidx = 0; qidx < nic->qs->rq_cnt; qidx++) {
		for (stats = 0; stats < nicvf_n_queue_stats; stats++) {
			sprintf(*data, "rxq%d: %s", qidx + start_qidx,
				nicvf_queue_stats[stats].name);
			*data += ETH_GSTRING_LEN;
		}
	}

	for (qidx = 0; qidx < nic->qs->sq_cnt; qidx++) {
		for (stats = 0; stats < nicvf_n_queue_stats; stats++) {
			sprintf(*data, "txq%d: %s", qidx + start_qidx,
				nicvf_queue_stats[stats].name);
			*data += ETH_GSTRING_LEN;
		}
	}
}

static void nicvf_get_strings(struct net_device *netdev, u32 sset, u8 *data)
{
	struct nicvf *nic = netdev_priv(netdev);
	int stats, qidx;
	int stats;
	int sqs;

	if (sset != ETH_SS_STATS)
		return;
@@ -143,20 +190,12 @@ static void nicvf_get_strings(struct net_device *netdev, u32 sset, u8 *data)
		data += ETH_GSTRING_LEN;
	}

	for (qidx = 0; qidx < nic->qs->rq_cnt; qidx++) {
		for (stats = 0; stats < nicvf_n_queue_stats; stats++) {
			sprintf(data, "rxq%d: %s", qidx,
				nicvf_queue_stats[stats].name);
			data += ETH_GSTRING_LEN;
		}
	}
	nicvf_get_qset_strings(nic, &data, 0);

	for (qidx = 0; qidx < nic->qs->sq_cnt; qidx++) {
		for (stats = 0; stats < nicvf_n_queue_stats; stats++) {
			sprintf(data, "txq%d: %s", qidx,
				nicvf_queue_stats[stats].name);
			data += ETH_GSTRING_LEN;
		}
	for (sqs = 0; sqs < nic->sqs_count; sqs++) {
		if (!nic->snicvf[sqs])
			continue;
		nicvf_get_qset_strings(nic->snicvf[sqs], &data, sqs + 1);
	}

	for (stats = 0; stats < BGX_RX_STATS_COUNT; stats++) {
@@ -173,21 +212,58 @@ static void nicvf_get_strings(struct net_device *netdev, u32 sset, u8 *data)
static int nicvf_get_sset_count(struct net_device *netdev, int sset)
{
	struct nicvf *nic = netdev_priv(netdev);
	int qstats_count;
	int sqs;

	if (sset != ETH_SS_STATS)
		return -EINVAL;

	qstats_count = nicvf_n_queue_stats *
		       (nic->qs->rq_cnt + nic->qs->sq_cnt);
	for (sqs = 0; sqs < nic->sqs_count; sqs++) {
		struct nicvf *snic;

		snic = nic->snicvf[sqs];
		if (!snic)
			continue;
		qstats_count += nicvf_n_queue_stats *
				(snic->qs->rq_cnt + snic->qs->sq_cnt);
	}

	return nicvf_n_hw_stats + nicvf_n_drv_stats +
		(nicvf_n_queue_stats *
		 (nic->qs->rq_cnt + nic->qs->sq_cnt)) +
		qstats_count +
		BGX_RX_STATS_COUNT + BGX_TX_STATS_COUNT;
}

static void nicvf_get_qset_stats(struct nicvf *nic,
				 struct ethtool_stats *stats, u64 **data)
{
	int stat, qidx;

	if (!nic)
		return;

	for (qidx = 0; qidx < nic->qs->rq_cnt; qidx++) {
		nicvf_update_rq_stats(nic, qidx);
		for (stat = 0; stat < nicvf_n_queue_stats; stat++)
			*((*data)++) = ((u64 *)&nic->qs->rq[qidx].stats)
					[nicvf_queue_stats[stat].index];
	}

	for (qidx = 0; qidx < nic->qs->sq_cnt; qidx++) {
		nicvf_update_sq_stats(nic, qidx);
		for (stat = 0; stat < nicvf_n_queue_stats; stat++)
			*((*data)++) = ((u64 *)&nic->qs->sq[qidx].stats)
					[nicvf_queue_stats[stat].index];
	}
}

static void nicvf_get_ethtool_stats(struct net_device *netdev,
				    struct ethtool_stats *stats, u64 *data)
{
	struct nicvf *nic = netdev_priv(netdev);
	int stat, qidx;
	int stat;
	int sqs;

	nicvf_update_stats(nic);

@@ -195,22 +271,18 @@ static void nicvf_get_ethtool_stats(struct net_device *netdev,
	nicvf_update_lmac_stats(nic);

	for (stat = 0; stat < nicvf_n_hw_stats; stat++)
		*(data++) = ((u64 *)&nic->stats)
		*(data++) = ((u64 *)&nic->hw_stats)
				[nicvf_hw_stats[stat].index];
	for (stat = 0; stat < nicvf_n_drv_stats; stat++)
		*(data++) = ((u64 *)&nic->drv_stats)
				[nicvf_drv_stats[stat].index];

	for (qidx = 0; qidx < nic->qs->rq_cnt; qidx++) {
		for (stat = 0; stat < nicvf_n_queue_stats; stat++)
			*(data++) = ((u64 *)&nic->qs->rq[qidx].stats)
					[nicvf_queue_stats[stat].index];
	}
	nicvf_get_qset_stats(nic, stats, &data);

	for (qidx = 0; qidx < nic->qs->sq_cnt; qidx++) {
		for (stat = 0; stat < nicvf_n_queue_stats; stat++)
			*(data++) = ((u64 *)&nic->qs->sq[qidx].stats)
					[nicvf_queue_stats[stat].index];
	for (sqs = 0; sqs < nic->sqs_count; sqs++) {
		if (!nic->snicvf[sqs])
			continue;
		nicvf_get_qset_stats(nic->snicvf[sqs], stats, &data);
	}

	for (stat = 0; stat < BGX_RX_STATS_COUNT; stat++)
@@ -369,7 +441,7 @@ static int nicvf_get_rxnfc(struct net_device *dev,

	switch (info->cmd) {
	case ETHTOOL_GRXRINGS:
		info->data = nic->qs->rq_cnt;
		info->data = nic->rx_queues;
		ret = 0;
		break;
	case ETHTOOL_GRXFH:
@@ -501,17 +573,15 @@ static int nicvf_set_rxfh(struct net_device *dev, const u32 *indir,
	struct nicvf_rss_info *rss = &nic->rss_info;
	int idx;

	if ((nic->qs->rq_cnt <= 1) || (nic->cpi_alg != CPI_ALG_NONE)) {
		rss->enable = false;
		rss->hash_bits = 0;
		return -EIO;
	}

	/* We do not allow change in unsupported parameters */
	if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
		return -EOPNOTSUPP;

	rss->enable = true;
	if (!rss->enable) {
		netdev_err(nic->netdev,
			   "RSS is disabled, cannot change settings\n");
		return -EIO;
	}

	if (indir) {
		for (idx = 0; idx < rss->rss_size; idx++)
			rss->ind_tbl[idx] = indir[idx];
@@ -534,11 +604,11 @@ static void nicvf_get_channels(struct net_device *dev,

	memset(channel, 0, sizeof(*channel));

	channel->max_rx = MAX_RCV_QUEUES_PER_QS;
	channel->max_tx = MAX_SND_QUEUES_PER_QS;
	channel->max_rx = nic->max_queues;
	channel->max_tx = nic->max_queues;

	channel->rx_count = nic->qs->rq_cnt;
	channel->tx_count = nic->qs->sq_cnt;
	channel->rx_count = nic->rx_queues;
	channel->tx_count = nic->tx_queues;
}

/* Set no of Tx, Rx queues to be used */
@@ -548,22 +618,34 @@ static int nicvf_set_channels(struct net_device *dev,
	struct nicvf *nic = netdev_priv(dev);
	int err = 0;
	bool if_up = netif_running(dev);
	int cqcount;

	if (!channel->rx_count || !channel->tx_count)
		return -EINVAL;
	if (channel->rx_count > MAX_RCV_QUEUES_PER_QS)
	if (channel->rx_count > nic->max_queues)
		return -EINVAL;
	if (channel->tx_count > MAX_SND_QUEUES_PER_QS)
	if (channel->tx_count > nic->max_queues)
		return -EINVAL;

	if (if_up)
		nicvf_stop(dev);

	nic->qs->rq_cnt = channel->rx_count;
	nic->qs->sq_cnt = channel->tx_count;
	cqcount = max(channel->rx_count, channel->tx_count);

	if (cqcount > MAX_CMP_QUEUES_PER_QS) {
		nic->sqs_count = roundup(cqcount, MAX_CMP_QUEUES_PER_QS);
		nic->sqs_count = (nic->sqs_count / MAX_CMP_QUEUES_PER_QS) - 1;
	} else {
		nic->sqs_count = 0;
	}

	nic->qs->rq_cnt = min_t(u32, channel->rx_count, MAX_RCV_QUEUES_PER_QS);
	nic->qs->sq_cnt = min_t(u32, channel->tx_count, MAX_SND_QUEUES_PER_QS);
	nic->qs->cq_cnt = max(nic->qs->rq_cnt, nic->qs->sq_cnt);

	err = nicvf_set_real_num_queues(dev, nic->qs->sq_cnt, nic->qs->rq_cnt);
	nic->rx_queues = channel->rx_count;
	nic->tx_queues = channel->tx_count;
	err = nicvf_set_real_num_queues(dev, nic->tx_queues, nic->rx_queues);
	if (err)
		return err;

@@ -571,7 +653,7 @@ static int nicvf_set_channels(struct net_device *dev,
		nicvf_open(dev);

	netdev_info(dev, "Setting num Tx rings to %d, Rx rings to %d success\n",
		    nic->qs->sq_cnt, nic->qs->rq_cnt);
		    nic->tx_queues, nic->rx_queues);

	return err;
}
+402 −139

File changed.

Preview size limit exceeded, changes collapsed.

Loading