Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 9c7664cb authored by David S. Miller's avatar David S. Miller
Browse files

Merge branch 'qed-next'



Manish Chopra says:

====================
qed*: driver updates

There are several new additions in this series;
Most are connected to either Tx offloading or Rx classifications
[either fastpath changes or supporting configuration].

In addition, there's a single IOV enhancement.

Please consider applying this series to `net-next'.

V2->V3:
Fixes below kbuild warning
call to '__compiletime_assert_60' declared with
attribute error: Need native word sized stores/loads for atomicity.

V1->V2:
Added a fix for the race in ramrod handling
pointed by Eric Dumazet [patch 7].
====================

Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents d0b3fbb2 d5df7688
Loading
Loading
Loading
Loading
+10 −2
Original line number Diff line number Diff line
@@ -1652,6 +1652,7 @@ static int qed_fill_eth_dev_info(struct qed_dev *cdev,

	if (IS_PF(cdev)) {
		int max_vf_vlan_filters = 0;
		int max_vf_mac_filters = 0;

		if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) {
			for_each_hwfn(cdev, i)
@@ -1665,11 +1666,18 @@ static int qed_fill_eth_dev_info(struct qed_dev *cdev,
			info->num_queues = cdev->num_hwfns;
		}

		if (IS_QED_SRIOV(cdev))
		if (IS_QED_SRIOV(cdev)) {
			max_vf_vlan_filters = cdev->p_iov_info->total_vfs *
					      QED_ETH_VF_NUM_VLAN_FILTERS;
		info->num_vlan_filters = RESC_NUM(&cdev->hwfns[0], QED_VLAN) -
			max_vf_mac_filters = cdev->p_iov_info->total_vfs *
					     QED_ETH_VF_NUM_MAC_FILTERS;
		}
		info->num_vlan_filters = RESC_NUM(QED_LEADING_HWFN(cdev),
						  QED_VLAN) -
					 max_vf_vlan_filters;
		info->num_mac_filters = RESC_NUM(QED_LEADING_HWFN(cdev),
						 QED_MAC) -
					max_vf_mac_filters;

		ether_addr_copy(info->port_mac,
				cdev->hwfns[0].hw_info.hw_mac_addr);
+2 −2
Original line number Diff line number Diff line
@@ -111,7 +111,7 @@ union qed_spq_req_comp {
};

struct qed_spq_comp_done {
	u64	done;
	unsigned int	done;
	u8		fw_return_code;
};

+65 −32
Original line number Diff line number Diff line
@@ -37,7 +37,11 @@
***************************************************************************/

#define SPQ_HIGH_PRI_RESERVE_DEFAULT    (1)
#define SPQ_BLOCK_SLEEP_LENGTH          (1000)

#define SPQ_BLOCK_DELAY_MAX_ITER        (10)
#define SPQ_BLOCK_DELAY_US              (10)
#define SPQ_BLOCK_SLEEP_MAX_ITER        (1000)
#define SPQ_BLOCK_SLEEP_MS              (5)

/***************************************************************************
* Blocking Imp. (BLOCK/EBLOCK mode)
@@ -50,60 +54,88 @@ static void qed_spq_blocking_cb(struct qed_hwfn *p_hwfn,

	comp_done = (struct qed_spq_comp_done *)cookie;

	comp_done->done			= 0x1;
	comp_done->fw_return_code = fw_return_code;

	/* make update visible to waiting thread */
	smp_wmb();
	/* Make sure completion done is visible on waiting thread */
	smp_store_release(&comp_done->done, 0x1);
}

static int qed_spq_block(struct qed_hwfn *p_hwfn,
static int __qed_spq_block(struct qed_hwfn *p_hwfn,
			   struct qed_spq_entry *p_ent,
			 u8 *p_fw_ret)
			   u8 *p_fw_ret, bool sleep_between_iter)
{
	int sleep_count = SPQ_BLOCK_SLEEP_LENGTH;
	struct qed_spq_comp_done *comp_done;
	int rc;
	u32 iter_cnt;

	comp_done = (struct qed_spq_comp_done *)p_ent->comp_cb.cookie;
	while (sleep_count) {
		/* validate we receive completion update */
		smp_rmb();
		if (comp_done->done == 1) {
	iter_cnt = sleep_between_iter ? SPQ_BLOCK_SLEEP_MAX_ITER
				      : SPQ_BLOCK_DELAY_MAX_ITER;

	while (iter_cnt--) {
		/* Validate we receive completion update */
		if (READ_ONCE(comp_done->done) == 1) {
			/* Read updated FW return value */
			smp_read_barrier_depends();
			if (p_fw_ret)
				*p_fw_ret = comp_done->fw_return_code;
			return 0;
		}
		usleep_range(5000, 10000);
		sleep_count--;

		if (sleep_between_iter)
			msleep(SPQ_BLOCK_SLEEP_MS);
		else
			udelay(SPQ_BLOCK_DELAY_US);
	}

	return -EBUSY;
}

static int qed_spq_block(struct qed_hwfn *p_hwfn,
			 struct qed_spq_entry *p_ent,
			 u8 *p_fw_ret, bool skip_quick_poll)
{
	struct qed_spq_comp_done *comp_done;
	int rc;

	/* A relatively short polling period w/o sleeping, to allow the FW to
	 * complete the ramrod and thus possibly to avoid the following sleeps.
	 */
	if (!skip_quick_poll) {
		rc = __qed_spq_block(p_hwfn, p_ent, p_fw_ret, false);
		if (!rc)
			return 0;
	}

	/* Move to polling with a sleeping period between iterations */
	rc = __qed_spq_block(p_hwfn, p_ent, p_fw_ret, true);
	if (!rc)
		return 0;

	DP_INFO(p_hwfn, "Ramrod is stuck, requesting MCP drain\n");
	rc = qed_mcp_drain(p_hwfn, p_hwfn->p_main_ptt);
	if (rc != 0)
	if (rc) {
		DP_NOTICE(p_hwfn, "MCP drain failed\n");
		goto err;
	}

	/* Retry after drain */
	sleep_count = SPQ_BLOCK_SLEEP_LENGTH;
	while (sleep_count) {
		/* validate we receive completion update */
		smp_rmb();
		if (comp_done->done == 1) {
			if (p_fw_ret)
				*p_fw_ret = comp_done->fw_return_code;
	rc = __qed_spq_block(p_hwfn, p_ent, p_fw_ret, true);
	if (!rc)
		return 0;
		}
		usleep_range(5000, 10000);
		sleep_count--;
	}

	comp_done = (struct qed_spq_comp_done *)p_ent->comp_cb.cookie;
	if (comp_done->done == 1) {
		if (p_fw_ret)
			*p_fw_ret = comp_done->fw_return_code;
		return 0;
	}

	DP_NOTICE(p_hwfn, "Ramrod is stuck, MCP drain failed\n");
err:
	DP_NOTICE(p_hwfn,
		  "Ramrod is stuck [CID %08x cmd %02x protocol %02x echo %04x]\n",
		  le32_to_cpu(p_ent->elem.hdr.cid),
		  p_ent->elem.hdr.cmd_id,
		  p_ent->elem.hdr.protocol_id,
		  le16_to_cpu(p_ent->elem.hdr.echo));

	return -EBUSY;
}
@@ -729,7 +761,8 @@ int qed_spq_post(struct qed_hwfn *p_hwfn,
		 * access p_ent here to see whether it's successful or not.
		 * Thus, after gaining the answer perform the cleanup here.
		 */
		rc = qed_spq_block(p_hwfn, p_ent, fw_return_code);
		rc = qed_spq_block(p_hwfn, p_ent, fw_return_code,
				   p_ent->queue == &p_spq->unlimited_pending);

		if (p_ent->queue == &p_spq->unlimited_pending) {
			/* This is an allocated p_ent which does not need to
+94 −20
Original line number Diff line number Diff line
@@ -109,7 +109,8 @@ static int qed_sp_vf_stop(struct qed_hwfn *p_hwfn,
}

static bool qed_iov_is_valid_vfid(struct qed_hwfn *p_hwfn,
				  int rel_vf_id, bool b_enabled_only)
				  int rel_vf_id,
				  bool b_enabled_only, bool b_non_malicious)
{
	if (!p_hwfn->pf_iov_info) {
		DP_NOTICE(p_hwfn->cdev, "No iov info\n");
@@ -124,6 +125,10 @@ static bool qed_iov_is_valid_vfid(struct qed_hwfn *p_hwfn,
	    b_enabled_only)
		return false;

	if ((p_hwfn->pf_iov_info->vfs_array[rel_vf_id].b_malicious) &&
	    b_non_malicious)
		return false;

	return true;
}

@@ -138,7 +143,8 @@ static struct qed_vf_info *qed_iov_get_vf_info(struct qed_hwfn *p_hwfn,
		return NULL;
	}

	if (qed_iov_is_valid_vfid(p_hwfn, relative_vf_id, b_enabled_only))
	if (qed_iov_is_valid_vfid(p_hwfn, relative_vf_id,
				  b_enabled_only, false))
		vf = &p_hwfn->pf_iov_info->vfs_array[relative_vf_id];
	else
		DP_ERR(p_hwfn, "qed_iov_get_vf_info: VF[%d] is not enabled\n",
@@ -542,7 +548,8 @@ int qed_iov_hw_info(struct qed_hwfn *p_hwfn)
	return 0;
}

static bool qed_iov_pf_sanity_check(struct qed_hwfn *p_hwfn, int vfid)
bool _qed_iov_pf_sanity_check(struct qed_hwfn *p_hwfn,
			      int vfid, bool b_fail_malicious)
{
	/* Check PF supports sriov */
	if (IS_VF(p_hwfn->cdev) || !IS_QED_SRIOV(p_hwfn->cdev) ||
@@ -550,12 +557,17 @@ static bool qed_iov_pf_sanity_check(struct qed_hwfn *p_hwfn, int vfid)
		return false;

	/* Check VF validity */
	if (!qed_iov_is_valid_vfid(p_hwfn, vfid, true))
	if (!qed_iov_is_valid_vfid(p_hwfn, vfid, true, b_fail_malicious))
		return false;

	return true;
}

bool qed_iov_pf_sanity_check(struct qed_hwfn *p_hwfn, int vfid)
{
	return _qed_iov_pf_sanity_check(p_hwfn, vfid, true);
}

static void qed_iov_set_vf_to_disable(struct qed_dev *cdev,
				      u16 rel_vf_id, u8 to_disable)
{
@@ -652,6 +664,9 @@ static int qed_iov_enable_vf_access(struct qed_hwfn *p_hwfn,

	qed_iov_vf_igu_reset(p_hwfn, p_ptt, vf);

	/* It's possible VF was previously considered malicious */
	vf->b_malicious = false;

	rc = qed_mcp_config_vf_msix(p_hwfn, p_ptt, vf->abs_vf_id, vf->num_sbs);
	if (rc)
		return rc;
@@ -2804,6 +2819,13 @@ qed_iov_execute_vf_flr_cleanup(struct qed_hwfn *p_hwfn,
			return rc;
		}

		/* Workaround to make VF-PF channel ready, as FW
		 * doesn't do that as a part of FLR.
		 */
		REG_WR(p_hwfn,
		       GTT_BAR0_MAP_REG_USDM_RAM +
		       USTORM_VF_PF_CHANNEL_READY_OFFSET(vfid), 1);

		/* VF_STOPPED has to be set only after final cleanup
		 * but prior to re-enabling the VF.
		 */
@@ -2942,7 +2964,8 @@ static void qed_iov_process_mbx_req(struct qed_hwfn *p_hwfn,
	mbx->first_tlv = mbx->req_virt->first_tlv;

	/* check if tlv type is known */
	if (qed_iov_tlv_supported(mbx->first_tlv.tl.type)) {
	if (qed_iov_tlv_supported(mbx->first_tlv.tl.type) &&
	    !p_vf->b_malicious) {
		switch (mbx->first_tlv.tl.type) {
		case CHANNEL_TLV_ACQUIRE:
			qed_iov_vf_mbx_acquire(p_hwfn, p_ptt, p_vf);
@@ -2984,6 +3007,15 @@ static void qed_iov_process_mbx_req(struct qed_hwfn *p_hwfn,
			qed_iov_vf_mbx_release(p_hwfn, p_ptt, p_vf);
			break;
		}
	} else if (qed_iov_tlv_supported(mbx->first_tlv.tl.type)) {
		DP_VERBOSE(p_hwfn, QED_MSG_IOV,
			   "VF [%02x] - considered malicious; Ignoring TLV [%04x]\n",
			   p_vf->abs_vf_id, mbx->first_tlv.tl.type);

		qed_iov_prepare_resp(p_hwfn, p_ptt, p_vf,
				     mbx->first_tlv.tl.type,
				     sizeof(struct pfvf_def_resp_tlv),
				     PFVF_STATUS_MALICIOUS);
	} else {
		/* unknown TLV - this may belong to a VF driver from the future
		 * - a version written after this PF driver was written, which
@@ -3033,20 +3065,30 @@ static void qed_iov_pf_get_and_clear_pending_events(struct qed_hwfn *p_hwfn,
	memset(p_pending_events, 0, sizeof(u64) * QED_VF_ARRAY_LENGTH);
}

static int qed_sriov_vfpf_msg(struct qed_hwfn *p_hwfn,
			      u16 abs_vfid, struct regpair *vf_msg)
static struct qed_vf_info *qed_sriov_get_vf_from_absid(struct qed_hwfn *p_hwfn,
						       u16 abs_vfid)
{
	u8 min = (u8) p_hwfn->cdev->p_iov_info->first_vf_in_pf;
	struct qed_vf_info *p_vf;

	if (!qed_iov_pf_sanity_check(p_hwfn, (int)abs_vfid - min)) {
	if (!_qed_iov_pf_sanity_check(p_hwfn, (int)abs_vfid - min, false)) {
		DP_VERBOSE(p_hwfn,
			   QED_MSG_IOV,
			   "Got a message from VF [abs 0x%08x] that cannot be handled by PF\n",
			   "Got indication for VF [abs 0x%08x] that cannot be handled by PF\n",
			   abs_vfid);
		return 0;
		return NULL;
	}

	return &p_hwfn->pf_iov_info->vfs_array[(u8) abs_vfid - min];
}
	p_vf = &p_hwfn->pf_iov_info->vfs_array[(u8)abs_vfid - min];

static int qed_sriov_vfpf_msg(struct qed_hwfn *p_hwfn,
			      u16 abs_vfid, struct regpair *vf_msg)
{
	struct qed_vf_info *p_vf = qed_sriov_get_vf_from_absid(p_hwfn,
			   abs_vfid);

	if (!p_vf)
		return 0;

	/* List the physical address of the request so that handler
	 * could later on copy the message from it.
@@ -3060,6 +3102,23 @@ static int qed_sriov_vfpf_msg(struct qed_hwfn *p_hwfn,
	return 0;
}

static void qed_sriov_vfpf_malicious(struct qed_hwfn *p_hwfn,
				     struct malicious_vf_eqe_data *p_data)
{
	struct qed_vf_info *p_vf;

	p_vf = qed_sriov_get_vf_from_absid(p_hwfn, p_data->vf_id);

	if (!p_vf)
		return;

	DP_INFO(p_hwfn,
		"VF [%d] - Malicious behavior [%02x]\n",
		p_vf->abs_vf_id, p_data->err_id);

	p_vf->b_malicious = true;
}

int qed_sriov_eqe_event(struct qed_hwfn *p_hwfn,
			u8 opcode, __le16 echo, union event_ring_data *data)
{
@@ -3067,6 +3126,9 @@ int qed_sriov_eqe_event(struct qed_hwfn *p_hwfn,
	case COMMON_EVENT_VF_PF_CHANNEL:
		return qed_sriov_vfpf_msg(p_hwfn, le16_to_cpu(echo),
					  &data->vf_pf_channel.msg_addr);
	case COMMON_EVENT_MALICIOUS_VF:
		qed_sriov_vfpf_malicious(p_hwfn, &data->malicious_vf);
		return 0;
	default:
		DP_INFO(p_hwfn->cdev, "Unknown sriov eqe event 0x%02x\n",
			opcode);
@@ -3083,7 +3145,7 @@ u16 qed_iov_get_next_active_vf(struct qed_hwfn *p_hwfn, u16 rel_vf_id)
		goto out;

	for (i = rel_vf_id; i < p_iov->total_vfs; i++)
		if (qed_iov_is_valid_vfid(p_hwfn, rel_vf_id, true))
		if (qed_iov_is_valid_vfid(p_hwfn, rel_vf_id, true, false))
			return i;

out:
@@ -3130,6 +3192,12 @@ static void qed_iov_bulletin_set_forced_mac(struct qed_hwfn *p_hwfn,
		return;
	}

	if (vf_info->b_malicious) {
		DP_NOTICE(p_hwfn->cdev,
			  "Can't set forced MAC to malicious VF [%d]\n", vfid);
		return;
	}

	feature = 1 << MAC_ADDR_FORCED;
	memcpy(vf_info->bulletin.p_virt->mac, mac, ETH_ALEN);

@@ -3153,6 +3221,12 @@ static void qed_iov_bulletin_set_forced_vlan(struct qed_hwfn *p_hwfn,
		return;
	}

	if (vf_info->b_malicious) {
		DP_NOTICE(p_hwfn->cdev,
			  "Can't set forced vlan to malicious VF [%d]\n", vfid);
		return;
	}

	feature = 1 << VLAN_ADDR_FORCED;
	vf_info->bulletin.p_virt->pvid = pvid;
	if (pvid)
@@ -3367,7 +3441,7 @@ int qed_sriov_disable(struct qed_dev *cdev, bool pci_enabled)
		qed_for_each_vf(hwfn, j) {
			int k;

			if (!qed_iov_is_valid_vfid(hwfn, j, true))
			if (!qed_iov_is_valid_vfid(hwfn, j, true, false))
				continue;

			/* Wait until VF is disabled before releasing */
@@ -3425,7 +3499,7 @@ static int qed_sriov_enable(struct qed_dev *cdev, int num)
		num_sbs = min_t(int, sb_cnt_info.sb_free_blk, limit);

		for (i = 0; i < num; i++) {
			if (!qed_iov_is_valid_vfid(hwfn, i, false))
			if (!qed_iov_is_valid_vfid(hwfn, i, false, true))
				continue;

			rc = qed_iov_init_hw_for_vf(hwfn,
@@ -3477,7 +3551,7 @@ static int qed_sriov_pf_set_mac(struct qed_dev *cdev, u8 *mac, int vfid)
		return -EINVAL;
	}

	if (!qed_iov_is_valid_vfid(&cdev->hwfns[0], vfid, true)) {
	if (!qed_iov_is_valid_vfid(&cdev->hwfns[0], vfid, true, true)) {
		DP_VERBOSE(cdev, QED_MSG_IOV,
			   "Cannot set VF[%d] MAC (VF is not active)\n", vfid);
		return -EINVAL;
@@ -3509,7 +3583,7 @@ static int qed_sriov_pf_set_vlan(struct qed_dev *cdev, u16 vid, int vfid)
		return -EINVAL;
	}

	if (!qed_iov_is_valid_vfid(&cdev->hwfns[0], vfid, true)) {
	if (!qed_iov_is_valid_vfid(&cdev->hwfns[0], vfid, true, true)) {
		DP_VERBOSE(cdev, QED_MSG_IOV,
			   "Cannot set VF[%d] MAC (VF is not active)\n", vfid);
		return -EINVAL;
@@ -3543,7 +3617,7 @@ static int qed_get_vf_config(struct qed_dev *cdev,
	if (IS_VF(cdev))
		return -EINVAL;

	if (!qed_iov_is_valid_vfid(&cdev->hwfns[0], vf_id, true)) {
	if (!qed_iov_is_valid_vfid(&cdev->hwfns[0], vf_id, true, false)) {
		DP_VERBOSE(cdev, QED_MSG_IOV,
			   "VF index [%d] isn't active\n", vf_id);
		return -EINVAL;
@@ -3647,7 +3721,7 @@ static int qed_set_vf_link_state(struct qed_dev *cdev,
	if (IS_VF(cdev))
		return -EINVAL;

	if (!qed_iov_is_valid_vfid(&cdev->hwfns[0], vf_id, true)) {
	if (!qed_iov_is_valid_vfid(&cdev->hwfns[0], vf_id, true, true)) {
		DP_VERBOSE(cdev, QED_MSG_IOV,
			   "VF index [%d] isn't active\n", vf_id);
		return -EINVAL;
+1 −0
Original line number Diff line number Diff line
@@ -132,6 +132,7 @@ struct qed_vf_info {
	struct qed_iov_vf_mbx vf_mbx;
	enum vf_state state;
	bool b_init;
	bool b_malicious;
	u8 to_disable;

	struct qed_bulletin bulletin;
Loading