Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit b5a9ee7c authored by Ariel Elior's avatar Ariel Elior Committed by David S. Miller
Browse files

qed: Revise QM cofiguration



Refactor and clean up the queue manager initialization logic.
Also, this adds support for RoC low latency queues, which later
would be used for improving RoCE latency in high throughput scenarios.

Signed-off-by: default avatarAriel Elior <Ariel.Elior@cavium.com>
Signed-off-by: default avatarYuval Mintz <Yuval.Mintz@cavium.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent c8b5d129
Loading
Loading
Loading
Loading
+35 −8
Original line number Diff line number Diff line
@@ -271,9 +271,14 @@ struct qed_hw_info {
				 RESC_NUM(_p_hwfn, resc))
#define FEAT_NUM(_p_hwfn, resc) ((_p_hwfn)->hw_info.feat_num[resc])

	u8				num_tc;
	/* Amount of traffic classes HW supports */
	u8 num_hw_tc;

	/* Amount of TCs which should be active according to DCBx or upper
	 * layer driver configuration.
	 */
	u8 num_active_tc;
	u8				offload_tc;
	u8				non_offload_tc;

	u32				concrete_fid;
	u16				opaque_fid;
@@ -336,15 +341,19 @@ struct qed_qm_info {
	struct init_qm_port_params	*qm_port_params;
	u16				start_pq;
	u8				start_vport;
	u8				pure_lb_pq;
	u8				offload_pq;
	u8				pure_ack_pq;
	u8 ooo_pq;
	u8				vf_queues_offset;
	u16				 pure_lb_pq;
	u16				offload_pq;
	u16				low_latency_pq;
	u16				pure_ack_pq;
	u16				ooo_pq;
	u16				first_vf_pq;
	u16				first_mcos_pq;
	u16				first_rl_pq;
	u16				num_pqs;
	u16				num_vf_pqs;
	u8				num_vports;
	u8				max_phys_tcs_per_port;
	u8				ooo_tc;
	bool				pf_rl_en;
	bool				pf_wfq_en;
	bool				vport_rl_en;
@@ -729,9 +738,27 @@ void qed_configure_vp_wfq_on_link_change(struct qed_dev *cdev,
					 u32 min_pf_rate);

void qed_clean_wfq_db(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
#define QED_LEADING_HWFN(dev)   (&dev->hwfns[0])
int qed_device_num_engines(struct qed_dev *cdev);

#define QED_LEADING_HWFN(dev)   (&dev->hwfns[0])

/* Flags for indication of required queues */
#define PQ_FLAGS_RLS    (BIT(0))
#define PQ_FLAGS_MCOS   (BIT(1))
#define PQ_FLAGS_LB     (BIT(2))
#define PQ_FLAGS_OOO    (BIT(3))
#define PQ_FLAGS_ACK    (BIT(4))
#define PQ_FLAGS_OFLD   (BIT(5))
#define PQ_FLAGS_VFS    (BIT(6))
#define PQ_FLAGS_LLT    (BIT(7))

/* physical queue index for cm context intialization */
u16 qed_get_cm_pq_idx(struct qed_hwfn *p_hwfn, u32 pq_flags);
u16 qed_get_cm_pq_idx_mcos(struct qed_hwfn *p_hwfn, u8 tc);
u16 qed_get_cm_pq_idx_vf(struct qed_hwfn *p_hwfn, u16 vf);

#define QED_LEADING_HWFN(dev)   (&dev->hwfns[0])

/* Other Linux specific common definitions */
#define DP_NAME(cdev) ((cdev)->name)

+3 −10
Original line number Diff line number Diff line
@@ -1396,18 +1396,11 @@ void qed_qm_init_pf(struct qed_hwfn *p_hwfn)
}

/* CM PF */
static int qed_cm_init_pf(struct qed_hwfn *p_hwfn)
void qed_cm_init_pf(struct qed_hwfn *p_hwfn)
{
	union qed_qm_pq_params pq_params;
	u16 pq;

	/* XCM pure-LB queue */
	memset(&pq_params, 0, sizeof(pq_params));
	pq_params.core.tc = LB_TC;
	pq = qed_get_qm_pq(p_hwfn, PROTOCOLID_CORE, &pq_params);
	STORE_RT_REG(p_hwfn, XCM_REG_CON_PHY_Q3_RT_OFFSET, pq);

	return 0;
	STORE_RT_REG(p_hwfn, XCM_REG_CON_PHY_Q3_RT_OFFSET,
		     qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_LB));
}

/* DQ PF */
+6 −8
Original line number Diff line number Diff line
@@ -183,7 +183,7 @@ qed_dcbx_dp_protocol(struct qed_hwfn *p_hwfn, struct qed_dcbx_results *p_data)
			   "%s info: update %d, enable %d, prio %d, tc %d, num_tc %d\n",
			   qed_dcbx_app_update[i].name, p_data->arr[id].update,
			   p_data->arr[id].enable, p_data->arr[id].priority,
			   p_data->arr[id].tc, p_hwfn->hw_info.num_tc);
			   p_data->arr[id].tc, p_hwfn->hw_info.num_active_tc);
	}
}

@@ -204,13 +204,9 @@ qed_dcbx_set_params(struct qed_dcbx_results *p_data,
	p_data->arr[type].tc = tc;

	/* QM reconf data */
	if (p_info->personality == personality) {
		if (personality == QED_PCI_ETH)
			p_info->non_offload_tc = tc;
		else
	if (p_info->personality == personality)
		p_info->offload_tc = tc;
}
}

/* Update app protocol data and hw_info fields with the TLV info */
static void
@@ -376,7 +372,9 @@ static int qed_dcbx_process_mib_info(struct qed_hwfn *p_hwfn)
	if (rc)
		return rc;

	p_info->num_tc = QED_MFW_GET_FIELD(p_ets->flags, DCBX_ETS_MAX_TCS);
	p_info->num_active_tc = QED_MFW_GET_FIELD(p_ets->flags,
						  DCBX_ETS_MAX_TCS);
	p_hwfn->qm_info.ooo_tc = QED_MFW_GET_FIELD(p_ets->flags, DCBX_OOO_TC);
	data.pf_id = p_hwfn->rel_pf_id;
	data.dcbx_enabled = !!dcbx_version;

+580 −169

File changed.

Preview size limit exceeded, changes collapsed.

+4 −4
Original line number Diff line number Diff line
@@ -241,7 +241,7 @@ qed_sp_fcoe_conn_offload(struct qed_hwfn *p_hwfn,
	struct fcoe_conn_offload_ramrod_data *p_data;
	struct qed_spq_entry *p_ent = NULL;
	struct qed_sp_init_data init_data;
	u16 pq_id = 0, tmp;
	u16 physical_q0, tmp;
	int rc;

	/* Get SPQ entry */
@@ -261,9 +261,9 @@ qed_sp_fcoe_conn_offload(struct qed_hwfn *p_hwfn,
	p_data = &p_ramrod->offload_ramrod_data;

	/* Transmission PQ is the first of the PF */
	pq_id = qed_get_qm_pq(p_hwfn, PROTOCOLID_FCOE, NULL);
	p_conn->physical_q0 = cpu_to_le16(pq_id);
	p_data->physical_q0 = cpu_to_le16(pq_id);
	physical_q0 = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_OFLD);
	p_conn->physical_q0 = cpu_to_le16(physical_q0);
	p_data->physical_q0 = cpu_to_le16(physical_q0);

	p_data->conn_id = cpu_to_le16(p_conn->conn_id);
	DMA_REGPAIR_LE(p_data->sq_pbl_addr, p_conn->sq_pbl_addr);
Loading