Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 899dc833 authored by David S. Miller's avatar David S. Miller
Browse files

Merge branch 'qed-ptp-enhancements'



Sudarsana Reddy Kalluru says:

====================
qed*: PTP enhancements.

The patch series contains set of enhancements for qed/qede ptp
implementation.
Patches (1)-(3) adds resource locking implementation to allow
PTP functionality only on the first detected ethernet PF of the port.
The change is required as the adapter currently supports only one
instance of the PTP client on a given port.
Patch (4) removes the un-needed header file.
Patch (5) moves the ptt-lock get/release logic to the ptp specific
code.

Please consider applying this series to "net-next" branch.
====================

Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 26d31ac1 d179bd16
Loading
Loading
Loading
Loading
+6 −2
Original line number Diff line number Diff line
@@ -474,6 +474,11 @@ struct qed_hwfn {
	struct qed_ptt			*p_main_ptt;
	struct qed_ptt			*p_dpc_ptt;

	/* PTP will be used only by the leading function.
	 * Usage of all PTP-apis should be synchronized as result.
	 */
	struct qed_ptt *p_ptp_ptt;

	struct qed_sb_sp_info		*p_sp_sb;
	struct qed_sb_attn_info		*p_sb_attn;

@@ -532,8 +537,6 @@ struct qed_hwfn {

	struct qed_ptt *p_arfs_ptt;

	/* p_ptp_ptt is valid for leading HWFN only */
	struct qed_ptt *p_ptp_ptt;
	struct qed_simd_fp_handler	simd_proto_handler[64];

#ifdef CONFIG_QED_SRIOV
@@ -767,6 +770,7 @@ void qed_configure_vp_wfq_on_link_change(struct qed_dev *cdev,

void qed_clean_wfq_db(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
int qed_device_num_engines(struct qed_dev *cdev);
int qed_device_get_port_id(struct qed_dev *cdev);

#define QED_LEADING_HWFN(dev)   (&dev->hwfns[0])

+16 −10
Original line number Diff line number Diff line
@@ -2347,9 +2347,6 @@ static int qed_hw_set_resc_info(struct qed_hwfn *p_hwfn)
	return 0;
}

#define QED_RESC_ALLOC_LOCK_RETRY_CNT           10
#define QED_RESC_ALLOC_LOCK_RETRY_INTVL_US      10000	/* 10 msec */

static int qed_hw_get_resc(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{
	struct qed_resc_unlock_params resc_unlock_params;
@@ -2366,13 +2363,8 @@ static int qed_hw_get_resc(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
	 * needed, and proceed to the queries. Other failures, including a
	 * failure to acquire the lock, will cause this function to fail.
	 */
	memset(&resc_lock_params, 0, sizeof(resc_lock_params));
	resc_lock_params.resource = QED_RESC_LOCK_RESC_ALLOC;
	resc_lock_params.retry_num = QED_RESC_ALLOC_LOCK_RETRY_CNT;
	resc_lock_params.retry_interval = QED_RESC_ALLOC_LOCK_RETRY_INTVL_US;
	resc_lock_params.sleep_b4_retry = true;
	memset(&resc_unlock_params, 0, sizeof(resc_unlock_params));
	resc_unlock_params.resource = QED_RESC_LOCK_RESC_ALLOC;
	qed_mcp_resc_lock_default_init(&resc_lock_params, &resc_unlock_params,
				       QED_RESC_LOCK_RESC_ALLOC, false);

	rc = qed_mcp_resc_lock(p_hwfn, p_ptt, &resc_lock_params);
	if (rc && rc != -EINVAL) {
@@ -4072,3 +4064,17 @@ int qed_device_num_engines(struct qed_dev *cdev)
{
	return QED_IS_BB(cdev) ? 2 : 1;
}

static int qed_device_num_ports(struct qed_dev *cdev)
{
	/* in CMT always only one port */
	if (cdev->num_hwfns > 1)
		return 1;

	return cdev->num_ports_in_engines * qed_device_num_engines(cdev);
}

int qed_device_get_port_id(struct qed_dev *cdev)
{
	return (QED_LEADING_HWFN(cdev)->abs_pf_id) % qed_device_num_ports(cdev);
}
+0 −12
Original line number Diff line number Diff line
@@ -956,13 +956,6 @@ static int qed_slowpath_start(struct qed_dev *cdev,
			}
		}
#endif
		p_ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev));
		if (p_ptt) {
			QED_LEADING_HWFN(cdev)->p_ptp_ptt = p_ptt;
		} else {
			DP_NOTICE(cdev, "Failed to acquire PTT for PTP\n");
			goto err;
		}
	}

	cdev->rx_coalesce_usecs = QED_DEFAULT_RX_USECS;
@@ -1076,9 +1069,6 @@ static int qed_slowpath_start(struct qed_dev *cdev,
		qed_ptt_release(QED_LEADING_HWFN(cdev),
				QED_LEADING_HWFN(cdev)->p_arfs_ptt);
#endif
	if (IS_PF(cdev) && QED_LEADING_HWFN(cdev)->p_ptp_ptt)
		qed_ptt_release(QED_LEADING_HWFN(cdev),
				QED_LEADING_HWFN(cdev)->p_ptp_ptt);

	qed_iov_wq_stop(cdev, false);

@@ -1098,8 +1088,6 @@ static int qed_slowpath_stop(struct qed_dev *cdev)
			qed_ptt_release(QED_LEADING_HWFN(cdev),
					QED_LEADING_HWFN(cdev)->p_arfs_ptt);
#endif
		qed_ptt_release(QED_LEADING_HWFN(cdev),
				QED_LEADING_HWFN(cdev)->p_ptp_ptt);
		qed_free_stream_mem(cdev);
		if (IS_QED_ETH_IF(cdev))
			qed_sriov_disable(cdev, true);
+30 −0
Original line number Diff line number Diff line
@@ -2615,3 +2615,33 @@ qed_mcp_resc_unlock(struct qed_hwfn *p_hwfn,

	return 0;
}

void qed_mcp_resc_lock_default_init(struct qed_resc_lock_params *p_lock,
				    struct qed_resc_unlock_params *p_unlock,
				    enum qed_resc_lock
				    resource, bool b_is_permanent)
{
	if (p_lock) {
		memset(p_lock, 0, sizeof(*p_lock));

		/* Permanent resources don't require aging, and there's no
		 * point in trying to acquire them more than once since it's
		 * unexpected another entity would release them.
		 */
		if (b_is_permanent) {
			p_lock->timeout = QED_MCP_RESC_LOCK_TO_NONE;
		} else {
			p_lock->retry_num = QED_MCP_RESC_LOCK_RETRY_CNT_DFLT;
			p_lock->retry_interval =
			    QED_MCP_RESC_LOCK_RETRY_VAL_DFLT;
			p_lock->sleep_b4_retry = true;
		}

		p_lock->resource = resource;
	}

	if (p_unlock) {
		memset(p_unlock, 0, sizeof(*p_unlock));
		p_unlock->resource = resource;
	}
}
+21 −1
Original line number Diff line number Diff line
@@ -795,7 +795,12 @@ int qed_mcp_ov_update_eswitch(struct qed_hwfn *p_hwfn,

enum qed_resc_lock {
	QED_RESC_LOCK_DBG_DUMP = QED_MCP_RESC_LOCK_MIN_VAL,
	QED_RESC_LOCK_RESC_ALLOC = QED_MCP_RESC_LOCK_MAX_VAL
	QED_RESC_LOCK_PTP_PORT0,
	QED_RESC_LOCK_PTP_PORT1,
	QED_RESC_LOCK_PTP_PORT2,
	QED_RESC_LOCK_PTP_PORT3,
	QED_RESC_LOCK_RESC_ALLOC = QED_MCP_RESC_LOCK_MAX_VAL,
	QED_RESC_LOCK_RESC_INVALID
};

/**
@@ -818,9 +823,11 @@ struct qed_resc_lock_params {

	/* Number of times to retry locking */
	u8 retry_num;
#define QED_MCP_RESC_LOCK_RETRY_CNT_DFLT        10

	/* The interval in usec between retries */
	u16 retry_interval;
#define QED_MCP_RESC_LOCK_RETRY_VAL_DFLT        10000

	/* Use sleep or delay between retries */
	bool sleep_b4_retry;
@@ -872,4 +879,17 @@ qed_mcp_resc_unlock(struct qed_hwfn *p_hwfn,
		    struct qed_ptt *p_ptt,
		    struct qed_resc_unlock_params *p_params);

/**
 * @brief - default initialization for lock/unlock resource structs
 *
 * @param p_lock - lock params struct to be initialized; Can be NULL
 * @param p_unlock - unlock params struct to be initialized; Can be NULL
 * @param resource - the requested resource
 * @paral b_is_permanent - disable retries & aging when set
 */
void qed_mcp_resc_lock_default_init(struct qed_resc_lock_params *p_lock,
				    struct qed_resc_unlock_params *p_unlock,
				    enum qed_resc_lock
				    resource, bool b_is_permanent);

#endif
Loading