Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit fc48b7a6 authored by Yuval Mintz's avatar Yuval Mintz Committed by David S. Miller
Browse files

qed/qede: use 8.7.3.0 FW.



This patch moves the qed* driver into utilizing the 8.7.3.0 FW.
This new FW is required for a lot of new SW features, including:
  - Vlan filtering offload
  - Encapsulation offload support
  - HW ingress aggregations
As well as paving the way for the possibility of adding storage protocols
in the future.

V2:
 - Fix kbuild test robot error/warnings.

Signed-off-by: default avatarYuval Mintz <Yuval.Mintz@qlogic.com>
Signed-off-by: default avatarSudarsana Reddy Kalluru <Sudarsana.Kalluru@qlogic.com>
Signed-off-by: default avatarManish Chopra <manish.chopra@qlogic.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 7530e44c
Loading
Loading
Loading
Loading
+28 −15
Original line number Diff line number Diff line
@@ -70,8 +70,8 @@ struct qed_sb_sp_info;
struct qed_mcp_info;

struct qed_rt_data {
	u32 init_val;
	bool b_valid;
	u32	*init_val;
	bool	*b_valid;
};

/* The PCI personality is not quite synonymous to protocol ID:
@@ -120,6 +120,10 @@ enum QED_PORT_MODE {
	QED_PORT_MODE_DE_1X25G
};

enum qed_dev_cap {
	QED_DEV_CAP_ETH,
};

struct qed_hw_info {
	/* PCI personality */
	enum qed_pci_personality	personality;
@@ -151,6 +155,7 @@ struct qed_hw_info {

	u32				port_mode;
	u32				hw_mode;
	unsigned long		device_capabilities;
};

struct qed_hw_cid_data {
@@ -267,7 +272,7 @@ struct qed_hwfn {
	struct qed_hw_info		hw_info;

	/* rt_array (for init-tool) */
	struct qed_rt_data		*rt_data;
	struct qed_rt_data		rt_data;

	/* SPQ */
	struct qed_spq			*p_spq;
@@ -350,9 +355,20 @@ struct qed_dev {
	char	name[NAME_SIZE];

	u8	type;
#define QED_DEV_TYPE_BB_A0      (0 << 0)
#define QED_DEV_TYPE_MASK       (0x3)
#define QED_DEV_TYPE_SHIFT      (0)
#define QED_DEV_TYPE_BB (0 << 0)
#define QED_DEV_TYPE_AH BIT(0)
/* Translate type/revision combo into the proper conditions */
#define QED_IS_BB(dev)  ((dev)->type == QED_DEV_TYPE_BB)
#define QED_IS_BB_A0(dev)       (QED_IS_BB(dev) && \
				 CHIP_REV_IS_A0(dev))
#define QED_IS_BB_B0(dev)       (QED_IS_BB(dev) && \
				 CHIP_REV_IS_B0(dev))

#define QED_GET_TYPE(dev)       (QED_IS_BB_A0(dev) ? CHIP_BB_A0 : \
				 QED_IS_BB_B0(dev) ? CHIP_BB_B0 : CHIP_K2)

	u16	vendor_id;
	u16	device_id;

	u16	chip_num;
#define CHIP_NUM_MASK                   0xffff
@@ -361,6 +377,8 @@ struct qed_dev {
	u16	chip_rev;
#define CHIP_REV_MASK                   0xf
#define CHIP_REV_SHIFT                  12
#define CHIP_REV_IS_A0(_cdev)   (!(_cdev)->chip_rev)
#define CHIP_REV_IS_B0(_cdev)   ((_cdev)->chip_rev == 1)

	u16				chip_metal;
#define CHIP_METAL_MASK                 0xff
@@ -375,10 +393,10 @@ struct qed_dev {
	u8				num_funcs_in_port;

	u8				path_id;
	enum mf_mode			mf_mode;
#define IS_MF(_p_hwfn)          (((_p_hwfn)->cdev)->mf_mode != SF)
#define IS_MF_SI(_p_hwfn)       (((_p_hwfn)->cdev)->mf_mode == MF_NPAR)
#define IS_MF_SD(_p_hwfn)       (((_p_hwfn)->cdev)->mf_mode == MF_OVLAN)
	enum qed_mf_mode		mf_mode;
#define IS_MF_DEFAULT(_p_hwfn)  (((_p_hwfn)->cdev)->mf_mode == QED_MF_DEFAULT)
#define IS_MF_SI(_p_hwfn)       (((_p_hwfn)->cdev)->mf_mode == QED_MF_NPAR)
#define IS_MF_SD(_p_hwfn)       (((_p_hwfn)->cdev)->mf_mode == QED_MF_OVLAN)

	int				pcie_width;
	int				pcie_speed;
@@ -441,11 +459,6 @@ struct qed_dev {
	const struct firmware		*firmware;
};

#define QED_GET_TYPE(dev)       (((dev)->type & QED_DEV_TYPE_MASK) >> \
				 QED_DEV_TYPE_SHIFT)
#define QED_IS_BB_A0(dev)       (QED_GET_TYPE(dev) == QED_DEV_TYPE_BB_A0)
#define QED_IS_BB(dev)  (QED_IS_BB_A0(dev))

#define NUM_OF_SBS(dev)         MAX_SB_PER_PATH_BB
#define NUM_OF_ENG_PFS(dev)     MAX_NUM_PFS_BB

+2 −1
Original line number Diff line number Diff line
@@ -581,7 +581,8 @@ void qed_qm_init_pf(struct qed_hwfn *p_hwfn)
	params.num_pf_cids = iids.cids;
	params.start_pq = qm_info->start_pq;
	params.num_pf_pqs = qm_info->num_pqs;
	params.start_vport = qm_info->num_vports;
	params.start_vport = qm_info->start_vport;
	params.num_vports = qm_info->num_vports;
	params.pf_wfq = qm_info->pf_wfq;
	params.pf_rl = qm_info->pf_rl;
	params.pq_params = qm_info->qm_pq_params;
+42 −46
Original line number Diff line number Diff line
@@ -341,11 +341,6 @@ void qed_resc_setup(struct qed_dev *cdev)
	}
}

#define FINAL_CLEANUP_CMD_OFFSET        (0)
#define FINAL_CLEANUP_CMD (0x1)
#define FINAL_CLEANUP_VALID_OFFSET      (6)
#define FINAL_CLEANUP_VFPF_ID_SHIFT     (7)
#define FINAL_CLEANUP_COMP (0x2)
#define FINAL_CLEANUP_POLL_CNT          (100)
#define FINAL_CLEANUP_POLL_TIME         (10)
int qed_final_cleanup(struct qed_hwfn *p_hwfn,
@@ -355,12 +350,14 @@ int qed_final_cleanup(struct qed_hwfn *p_hwfn,
	u32 command = 0, addr, count = FINAL_CLEANUP_POLL_CNT;
	int rc = -EBUSY;

	addr = GTT_BAR0_MAP_REG_USDM_RAM + USTORM_FLR_FINAL_ACK_OFFSET;
	addr = GTT_BAR0_MAP_REG_USDM_RAM +
		USTORM_FLR_FINAL_ACK_OFFSET(p_hwfn->rel_pf_id);

	command |= FINAL_CLEANUP_CMD << FINAL_CLEANUP_CMD_OFFSET;
	command |= 1 << FINAL_CLEANUP_VALID_OFFSET;
	command |= id << FINAL_CLEANUP_VFPF_ID_SHIFT;
	command |= FINAL_CLEANUP_COMP << SDM_OP_GEN_COMP_TYPE_SHIFT;
	command |= X_FINAL_CLEANUP_AGG_INT <<
		SDM_AGG_INT_COMP_PARAMS_AGG_INT_INDEX_SHIFT;
	command |= 1 << SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_ENABLE_SHIFT;
	command |= id << SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_BIT_SHIFT;
	command |= SDM_COMP_TYPE_AGG_INT << SDM_OP_GEN_COMP_TYPE_SHIFT;

	/* Make sure notification is not set before initiating final cleanup */
	if (REG_RD(p_hwfn, addr)) {
@@ -415,18 +412,16 @@ static void qed_calc_hw_mode(struct qed_hwfn *p_hwfn)
	}

	switch (p_hwfn->cdev->mf_mode) {
	case SF:
		hw_mode |= 1 << MODE_SF;
	case QED_MF_DEFAULT:
	case QED_MF_NPAR:
		hw_mode |= 1 << MODE_MF_SI;
		break;
	case MF_OVLAN:
	case QED_MF_OVLAN:
		hw_mode |= 1 << MODE_MF_SD;
		break;
	case MF_NPAR:
		hw_mode |= 1 << MODE_MF_SI;
		break;
	default:
		DP_NOTICE(p_hwfn, "Unsupported MF mode, init as SF\n");
		hw_mode |= 1 << MODE_SF;
		DP_NOTICE(p_hwfn, "Unsupported MF mode, init as DEFAULT\n");
		hw_mode |= 1 << MODE_MF_SI;
	}

	hw_mode |= 1 << MODE_ASIC;
@@ -1018,8 +1013,7 @@ static void qed_hw_get_resc(struct qed_hwfn *p_hwfn)
	u32 *resc_num = p_hwfn->hw_info.resc_num;
	int num_funcs, i;

	num_funcs = IS_MF(p_hwfn) ? MAX_NUM_PFS_BB
				  : p_hwfn->cdev->num_ports_in_engines;
	num_funcs = MAX_NUM_PFS_BB;

	resc_num[QED_SB] = min_t(u32,
				 (MAX_SB_PER_PATH_BB / num_funcs),
@@ -1071,7 +1065,7 @@ static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn,
			       struct qed_ptt *p_ptt)
{
	u32 nvm_cfg1_offset, mf_mode, addr, generic_cont0, core_cfg;
	u32 port_cfg_addr, link_temp, val, nvm_cfg_addr;
	u32 port_cfg_addr, link_temp, nvm_cfg_addr, device_capabilities;
	struct qed_mcp_link_params *link;

	/* Read global nvm_cfg address */
@@ -1134,21 +1128,6 @@ static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn,
		break;
	}

	addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
	       offsetof(struct nvm_cfg1, func[MCP_PF_ID(p_hwfn)]) +
	       offsetof(struct nvm_cfg1_func, device_id);
	val = qed_rd(p_hwfn, p_ptt, addr);

	if (IS_MF(p_hwfn)) {
		p_hwfn->hw_info.device_id =
			(val & NVM_CFG1_FUNC_MF_VENDOR_DEVICE_ID_MASK) >>
			NVM_CFG1_FUNC_MF_VENDOR_DEVICE_ID_OFFSET;
	} else {
		p_hwfn->hw_info.device_id =
			(val & NVM_CFG1_FUNC_VENDOR_DEVICE_ID_MASK) >>
			NVM_CFG1_FUNC_VENDOR_DEVICE_ID_OFFSET;
	}

	/* Read default link configuration */
	link = &p_hwfn->mcp_info->link_input;
	port_cfg_addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
@@ -1220,18 +1199,28 @@ static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn,

	switch (mf_mode) {
	case NVM_CFG1_GLOB_MF_MODE_MF_ALLOWED:
		p_hwfn->cdev->mf_mode = MF_OVLAN;
		p_hwfn->cdev->mf_mode = QED_MF_OVLAN;
		break;
	case NVM_CFG1_GLOB_MF_MODE_NPAR1_0:
		p_hwfn->cdev->mf_mode = MF_NPAR;
		p_hwfn->cdev->mf_mode = QED_MF_NPAR;
		break;
	case NVM_CFG1_GLOB_MF_MODE_FORCED_SF:
		p_hwfn->cdev->mf_mode = SF;
	case NVM_CFG1_GLOB_MF_MODE_DEFAULT:
		p_hwfn->cdev->mf_mode = QED_MF_DEFAULT;
		break;
	}
	DP_INFO(p_hwfn, "Multi function mode is %08x\n",
		p_hwfn->cdev->mf_mode);

	/* Read Multi-function information from shmem */
	addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
		offsetof(struct nvm_cfg1, glob) +
		offsetof(struct nvm_cfg1_glob, device_capabilities);

	device_capabilities = qed_rd(p_hwfn, p_ptt, addr);
	if (device_capabilities & NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ETHERNET)
		__set_bit(QED_DEV_CAP_ETH,
			  &p_hwfn->hw_info.device_capabilities);

	return qed_mcp_fill_shmem_func_info(p_hwfn, p_ptt);
}

@@ -1293,29 +1282,36 @@ qed_get_hw_info(struct qed_hwfn *p_hwfn,

static void qed_get_dev_info(struct qed_dev *cdev)
{
	struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
	u32 tmp;

	cdev->chip_num = (u16)qed_rd(cdev->hwfns, cdev->hwfns[0].p_main_ptt,
	/* Read Vendor Id / Device Id */
	pci_read_config_word(cdev->pdev, PCI_VENDOR_ID,
			     &cdev->vendor_id);
	pci_read_config_word(cdev->pdev, PCI_DEVICE_ID,
			     &cdev->device_id);
	cdev->chip_num = (u16)qed_rd(p_hwfn, p_hwfn->p_main_ptt,
				     MISCS_REG_CHIP_NUM);
	cdev->chip_rev = (u16)qed_rd(cdev->hwfns, cdev->hwfns[0].p_main_ptt,
	cdev->chip_rev = (u16)qed_rd(p_hwfn, p_hwfn->p_main_ptt,
				     MISCS_REG_CHIP_REV);
	MASK_FIELD(CHIP_REV, cdev->chip_rev);

	cdev->type = QED_DEV_TYPE_BB;
	/* Learn number of HW-functions */
	tmp = qed_rd(cdev->hwfns, cdev->hwfns[0].p_main_ptt,
	tmp = qed_rd(p_hwfn, p_hwfn->p_main_ptt,
		     MISCS_REG_CMT_ENABLED_FOR_PAIR);

	if (tmp & (1 << cdev->hwfns[0].rel_pf_id)) {
	if (tmp & (1 << p_hwfn->rel_pf_id)) {
		DP_NOTICE(cdev->hwfns, "device in CMT mode\n");
		cdev->num_hwfns = 2;
	} else {
		cdev->num_hwfns = 1;
	}

	cdev->chip_bond_id = qed_rd(cdev->hwfns, cdev->hwfns[0].p_main_ptt,
	cdev->chip_bond_id = qed_rd(p_hwfn, p_hwfn->p_main_ptt,
				    MISCS_REG_CHIP_TEST_REG) >> 4;
	MASK_FIELD(CHIP_BOND_ID, cdev->chip_bond_id);
	cdev->chip_metal = (u16)qed_rd(cdev->hwfns, cdev->hwfns[0].p_main_ptt,
	cdev->chip_metal = (u16)qed_rd(p_hwfn, p_hwfn->p_main_ptt,
				       MISCS_REG_CHIP_METAL);
	MASK_FIELD(CHIP_METAL, cdev->chip_metal);

+1220 −1470

File changed.

Preview size limit exceeded, changes collapsed.

+7 −15
Original line number Diff line number Diff line
@@ -513,17 +513,14 @@ static int qed_pf_rl_rt_init(struct qed_hwfn *p_hwfn,
 * Return -1 on error.
 */
static int qed_vp_wfq_rt_init(struct qed_hwfn *p_hwfn,
			      u8 start_vport,
			      u8 num_vports,
			      struct init_qm_vport_params *vport_params)
{
	u8 tc, i, vport_id;
	u32 inc_val;
	u8 tc, i;

	/* go over all PF VPORTs */
	for (i = 0, vport_id = start_vport; i < num_vports; i++, vport_id++) {
		u32 temp = QM_REG_WFQVPUPPERBOUND_RT_OFFSET;
		u16 *pq_ids = &vport_params[i].first_tx_pq_id[0];
	for (i = 0; i < num_vports; i++) {

		if (!vport_params[i].vport_wfq)
			continue;
@@ -539,20 +536,16 @@ static int qed_vp_wfq_rt_init(struct qed_hwfn *p_hwfn,
		 * different TCs
		 */
		for (tc = 0; tc < NUM_OF_TCS; tc++) {
			u16 vport_pq_id = pq_ids[tc];
			u16 vport_pq_id = vport_params[i].first_tx_pq_id[tc];

			if (vport_pq_id != QM_INVALID_PQ_ID) {
				STORE_RT_REG(p_hwfn,
					     QM_REG_WFQVPWEIGHT_RT_OFFSET +
					     vport_pq_id, inc_val);
				STORE_RT_REG(p_hwfn, temp + vport_pq_id,
					     QM_WFQ_UPPER_BOUND |
					     QM_WFQ_CRD_REG_SIGN_BIT);
				STORE_RT_REG(p_hwfn,
					     QM_REG_WFQVPCRD_RT_OFFSET +
					     vport_pq_id,
					     QM_WFQ_INIT_CRD(inc_val) |
					     QM_WFQ_CRD_REG_SIGN_BIT);
				STORE_RT_REG(p_hwfn,
					     QM_REG_WFQVPWEIGHT_RT_OFFSET +
					     vport_pq_id, inc_val);
			}
		}
	}
@@ -709,8 +702,7 @@ int qed_qm_pf_rt_init(struct qed_hwfn *p_hwfn,
	if (qed_pf_rl_rt_init(p_hwfn, p_params->pf_id, p_params->pf_rl))
		return -1;

	if (qed_vp_wfq_rt_init(p_hwfn, p_params->start_vport,
			       p_params->num_vports, vport_params))
	if (qed_vp_wfq_rt_init(p_hwfn, p_params->num_vports, vport_params))
		return -1;

	if (qed_vport_rl_rt_init(p_hwfn, p_params->start_vport,
Loading