Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 464f6645 authored by Manish Chopra's avatar Manish Chopra Committed by David S. Miller
Browse files

qed: Add infrastructure support for tunneling



This patch adds various structure/APIs needed to configure/enable different
tunnel [VXLAN/GRE/GENEVE] parameters on the adapter.

Signed-off-by: default avatarManish Chopra <manish.chopra@qlogic.com>
Signed-off-by: default avatarYuval Mintz <Yuval.Mintz@qlogic.com>
Signed-off-by: default avatarAriel Elior <Ariel.Elior@qlogic.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent ee1c2797
Loading
Loading
Loading
Loading
+46 −0
Original line number Original line Diff line number Diff line
@@ -74,6 +74,51 @@ struct qed_rt_data {
	bool	*b_valid;
	bool	*b_valid;
};
};


enum qed_tunn_mode {
	QED_MODE_L2GENEVE_TUNN,
	QED_MODE_IPGENEVE_TUNN,
	QED_MODE_L2GRE_TUNN,
	QED_MODE_IPGRE_TUNN,
	QED_MODE_VXLAN_TUNN,
};

enum qed_tunn_clss {
	QED_TUNN_CLSS_MAC_VLAN,
	QED_TUNN_CLSS_MAC_VNI,
	QED_TUNN_CLSS_INNER_MAC_VLAN,
	QED_TUNN_CLSS_INNER_MAC_VNI,
	MAX_QED_TUNN_CLSS,
};

struct qed_tunn_start_params {
	unsigned long	tunn_mode;
	u16		vxlan_udp_port;
	u16		geneve_udp_port;
	u8		update_vxlan_udp_port;
	u8		update_geneve_udp_port;
	u8		tunn_clss_vxlan;
	u8		tunn_clss_l2geneve;
	u8		tunn_clss_ipgeneve;
	u8		tunn_clss_l2gre;
	u8		tunn_clss_ipgre;
};

struct qed_tunn_update_params {
	unsigned long	tunn_mode_update_mask;
	unsigned long	tunn_mode;
	u16		vxlan_udp_port;
	u16		geneve_udp_port;
	u8		update_rx_pf_clss;
	u8		update_tx_pf_clss;
	u8		update_vxlan_udp_port;
	u8		update_geneve_udp_port;
	u8		tunn_clss_vxlan;
	u8		tunn_clss_l2geneve;
	u8		tunn_clss_ipgeneve;
	u8		tunn_clss_l2gre;
	u8		tunn_clss_ipgre;
};

/* The PCI personality is not quite synonymous to protocol ID:
/* The PCI personality is not quite synonymous to protocol ID:
 * 1. All personalities need CORE connections
 * 1. All personalities need CORE connections
 * 2. The Ethernet personality may support also the RoCE protocol
 * 2. The Ethernet personality may support also the RoCE protocol
@@ -430,6 +475,7 @@ struct qed_dev {
	u8				num_hwfns;
	u8				num_hwfns;
	struct qed_hwfn			hwfns[MAX_HWFNS_PER_DEVICE];
	struct qed_hwfn			hwfns[MAX_HWFNS_PER_DEVICE];


	unsigned long			tunn_mode;
	u32				drv_type;
	u32				drv_type;


	struct qed_eth_stats		*reset_stats;
	struct qed_eth_stats		*reset_stats;
+4 −2
Original line number Original line Diff line number Diff line
@@ -558,6 +558,7 @@ static int qed_hw_init_port(struct qed_hwfn *p_hwfn,


static int qed_hw_init_pf(struct qed_hwfn *p_hwfn,
static int qed_hw_init_pf(struct qed_hwfn *p_hwfn,
			  struct qed_ptt *p_ptt,
			  struct qed_ptt *p_ptt,
			  struct qed_tunn_start_params *p_tunn,
			  int hw_mode,
			  int hw_mode,
			  bool b_hw_start,
			  bool b_hw_start,
			  enum qed_int_mode int_mode,
			  enum qed_int_mode int_mode,
@@ -625,7 +626,7 @@ static int qed_hw_init_pf(struct qed_hwfn *p_hwfn,
		qed_int_igu_enable(p_hwfn, p_ptt, int_mode);
		qed_int_igu_enable(p_hwfn, p_ptt, int_mode);


		/* send function start command */
		/* send function start command */
		rc = qed_sp_pf_start(p_hwfn, p_hwfn->cdev->mf_mode);
		rc = qed_sp_pf_start(p_hwfn, p_tunn, p_hwfn->cdev->mf_mode);
		if (rc)
		if (rc)
			DP_NOTICE(p_hwfn, "Function start ramrod failed\n");
			DP_NOTICE(p_hwfn, "Function start ramrod failed\n");
	}
	}
@@ -672,6 +673,7 @@ static void qed_reset_mb_shadow(struct qed_hwfn *p_hwfn,
}
}


int qed_hw_init(struct qed_dev *cdev,
int qed_hw_init(struct qed_dev *cdev,
		struct qed_tunn_start_params *p_tunn,
		bool b_hw_start,
		bool b_hw_start,
		enum qed_int_mode int_mode,
		enum qed_int_mode int_mode,
		bool allow_npar_tx_switch,
		bool allow_npar_tx_switch,
@@ -724,7 +726,7 @@ int qed_hw_init(struct qed_dev *cdev,
		/* Fall into */
		/* Fall into */
		case FW_MSG_CODE_DRV_LOAD_FUNCTION:
		case FW_MSG_CODE_DRV_LOAD_FUNCTION:
			rc = qed_hw_init_pf(p_hwfn, p_hwfn->p_main_ptt,
			rc = qed_hw_init_pf(p_hwfn, p_hwfn->p_main_ptt,
					    p_hwfn->hw_info.hw_mode,
					    p_tunn, p_hwfn->hw_info.hw_mode,
					    b_hw_start, int_mode,
					    b_hw_start, int_mode,
					    allow_npar_tx_switch);
					    allow_npar_tx_switch);
			break;
			break;
+2 −0
Original line number Original line Diff line number Diff line
@@ -62,6 +62,7 @@ void qed_resc_setup(struct qed_dev *cdev);
 * @brief qed_hw_init -
 * @brief qed_hw_init -
 *
 *
 * @param cdev
 * @param cdev
 * @param p_tunn
 * @param b_hw_start
 * @param b_hw_start
 * @param int_mode - interrupt mode [msix, inta, etc.] to use.
 * @param int_mode - interrupt mode [msix, inta, etc.] to use.
 * @param allow_npar_tx_switch - npar tx switching to be used
 * @param allow_npar_tx_switch - npar tx switching to be used
@@ -72,6 +73,7 @@ void qed_resc_setup(struct qed_dev *cdev);
 * @return int
 * @return int
 */
 */
int qed_hw_init(struct qed_dev *cdev,
int qed_hw_init(struct qed_dev *cdev,
		struct qed_tunn_start_params *p_tunn,
		bool b_hw_start,
		bool b_hw_start,
		enum qed_int_mode int_mode,
		enum qed_int_mode int_mode,
		bool allow_npar_tx_switch,
		bool allow_npar_tx_switch,
+50 −1
Original line number Original line Diff line number Diff line
@@ -46,7 +46,7 @@ enum common_ramrod_cmd_id {
	COMMON_RAMROD_PF_STOP /* PF Function Stop Ramrod */,
	COMMON_RAMROD_PF_STOP /* PF Function Stop Ramrod */,
	COMMON_RAMROD_RESERVED,
	COMMON_RAMROD_RESERVED,
	COMMON_RAMROD_RESERVED2,
	COMMON_RAMROD_RESERVED2,
	COMMON_RAMROD_RESERVED3,
	COMMON_RAMROD_PF_UPDATE,
	COMMON_RAMROD_EMPTY,
	COMMON_RAMROD_EMPTY,
	MAX_COMMON_RAMROD_CMD_ID
	MAX_COMMON_RAMROD_CMD_ID
};
};
@@ -626,6 +626,42 @@ struct pf_start_ramrod_data {
	u8				reserved0[4];
	u8				reserved0[4];
};
};


/* tunnel configuration */
struct pf_update_tunnel_config {
	u8	update_rx_pf_clss;
	u8	update_tx_pf_clss;
	u8	set_vxlan_udp_port_flg;
	u8	set_geneve_udp_port_flg;
	u8	tx_enable_vxlan;
	u8	tx_enable_l2geneve;
	u8	tx_enable_ipgeneve;
	u8	tx_enable_l2gre;
	u8	tx_enable_ipgre;
	u8	tunnel_clss_vxlan;
	u8	tunnel_clss_l2geneve;
	u8	tunnel_clss_ipgeneve;
	u8	tunnel_clss_l2gre;
	u8	tunnel_clss_ipgre;
	__le16	vxlan_udp_port;
	__le16	geneve_udp_port;
	__le16	reserved[3];
};

struct pf_update_ramrod_data {
	u32				reserved[2];
	u32				reserved_1[6];
	struct pf_update_tunnel_config	tunnel_config;
};

/* Tunnel classification scheme */
enum tunnel_clss {
	TUNNEL_CLSS_MAC_VLAN = 0,
	TUNNEL_CLSS_MAC_VNI,
	TUNNEL_CLSS_INNER_MAC_VLAN,
	TUNNEL_CLSS_INNER_MAC_VNI,
	MAX_TUNNEL_CLSS
};

enum ports_mode {
enum ports_mode {
	ENGX2_PORTX1 /* 2 engines x 1 port */,
	ENGX2_PORTX1 /* 2 engines x 1 port */,
	ENGX2_PORTX2 /* 2 engines x 2 ports */,
	ENGX2_PORTX2 /* 2 engines x 2 ports */,
@@ -1603,6 +1639,19 @@ bool qed_send_qm_stop_cmd(struct qed_hwfn *p_hwfn,
			  u16			start_pq,
			  u16			start_pq,
			  u16			num_pqs);
			  u16			num_pqs);


void qed_set_vxlan_dest_port(struct qed_hwfn *p_hwfn,
			     struct qed_ptt  *p_ptt, u16 dest_port);
void qed_set_vxlan_enable(struct qed_hwfn *p_hwfn,
			  struct qed_ptt *p_ptt, bool vxlan_enable);
void qed_set_gre_enable(struct qed_hwfn *p_hwfn,
			struct qed_ptt  *p_ptt, bool eth_gre_enable,
			bool ip_gre_enable);
void qed_set_geneve_dest_port(struct qed_hwfn *p_hwfn,
			      struct qed_ptt *p_ptt, u16 dest_port);
void qed_set_geneve_enable(struct qed_hwfn *p_hwfn,
			   struct qed_ptt *p_ptt, bool eth_geneve_enable,
			   bool ip_geneve_enable);

/* Ystorm flow control mode. Use enum fw_flow_ctrl_mode */
/* Ystorm flow control mode. Use enum fw_flow_ctrl_mode */
#define YSTORM_FLOW_CONTROL_MODE_OFFSET  (IRO[0].base)
#define YSTORM_FLOW_CONTROL_MODE_OFFSET  (IRO[0].base)
#define YSTORM_FLOW_CONTROL_MODE_SIZE    (IRO[0].size)
#define YSTORM_FLOW_CONTROL_MODE_SIZE    (IRO[0].size)
+127 −0
Original line number Original line Diff line number Diff line
@@ -788,3 +788,130 @@ bool qed_send_qm_stop_cmd(struct qed_hwfn *p_hwfn,


	return true;
	return true;
}
}

static void
qed_set_tunnel_type_enable_bit(unsigned long *var, int bit, bool enable)
{
	if (enable)
		set_bit(bit, var);
	else
		clear_bit(bit, var);
}

#define PRS_ETH_TUNN_FIC_FORMAT	-188897008

void qed_set_vxlan_dest_port(struct qed_hwfn *p_hwfn,
			     struct qed_ptt *p_ptt,
			     u16 dest_port)
{
	qed_wr(p_hwfn, p_ptt, PRS_REG_VXLAN_PORT, dest_port);
	qed_wr(p_hwfn, p_ptt, NIG_REG_VXLAN_PORT, dest_port);
	qed_wr(p_hwfn, p_ptt, PBF_REG_VXLAN_PORT, dest_port);
}

void qed_set_vxlan_enable(struct qed_hwfn *p_hwfn,
			  struct qed_ptt *p_ptt,
			  bool vxlan_enable)
{
	unsigned long reg_val = 0;
	u8 shift;

	reg_val = qed_rd(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN);
	shift = PRS_REG_ENCAPSULATION_TYPE_EN_VXLAN_ENABLE_SHIFT;
	qed_set_tunnel_type_enable_bit(&reg_val, shift, vxlan_enable);

	qed_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val);

	if (reg_val)
		qed_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0,
		       PRS_ETH_TUNN_FIC_FORMAT);

	reg_val = qed_rd(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE);
	shift = NIG_REG_ENC_TYPE_ENABLE_VXLAN_ENABLE_SHIFT;
	qed_set_tunnel_type_enable_bit(&reg_val, shift, vxlan_enable);

	qed_wr(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE, reg_val);

	qed_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_VXLAN_EN,
	       vxlan_enable ? 1 : 0);
}

void qed_set_gre_enable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
			bool eth_gre_enable, bool ip_gre_enable)
{
	unsigned long reg_val = 0;
	u8 shift;

	reg_val = qed_rd(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN);
	shift = PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GRE_ENABLE_SHIFT;
	qed_set_tunnel_type_enable_bit(&reg_val, shift, eth_gre_enable);

	shift = PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GRE_ENABLE_SHIFT;
	qed_set_tunnel_type_enable_bit(&reg_val, shift, ip_gre_enable);
	qed_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val);
	if (reg_val)
		qed_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0,
		       PRS_ETH_TUNN_FIC_FORMAT);

	reg_val = qed_rd(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE);
	shift = NIG_REG_ENC_TYPE_ENABLE_ETH_OVER_GRE_ENABLE_SHIFT;
	qed_set_tunnel_type_enable_bit(&reg_val, shift, eth_gre_enable);

	shift = NIG_REG_ENC_TYPE_ENABLE_IP_OVER_GRE_ENABLE_SHIFT;
	qed_set_tunnel_type_enable_bit(&reg_val, shift, ip_gre_enable);
	qed_wr(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE, reg_val);

	qed_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_GRE_ETH_EN,
	       eth_gre_enable ? 1 : 0);
	qed_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_GRE_IP_EN,
	       ip_gre_enable ? 1 : 0);
}

void qed_set_geneve_dest_port(struct qed_hwfn *p_hwfn,
			      struct qed_ptt *p_ptt,
			      u16 dest_port)
{
	qed_wr(p_hwfn, p_ptt, PRS_REG_NGE_PORT, dest_port);
	qed_wr(p_hwfn, p_ptt, NIG_REG_NGE_PORT, dest_port);
	qed_wr(p_hwfn, p_ptt, PBF_REG_NGE_PORT, dest_port);
}

void qed_set_geneve_enable(struct qed_hwfn *p_hwfn,
			   struct qed_ptt *p_ptt,
			   bool eth_geneve_enable,
			   bool ip_geneve_enable)
{
	unsigned long reg_val = 0;
	u8 shift;

	reg_val = qed_rd(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN);
	shift = PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GENEVE_ENABLE_SHIFT;
	qed_set_tunnel_type_enable_bit(&reg_val, shift, eth_geneve_enable);

	shift = PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GENEVE_ENABLE_SHIFT;
	qed_set_tunnel_type_enable_bit(&reg_val, shift, ip_geneve_enable);

	qed_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val);
	if (reg_val)
		qed_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0,
		       PRS_ETH_TUNN_FIC_FORMAT);

	qed_wr(p_hwfn, p_ptt, NIG_REG_NGE_ETH_ENABLE,
	       eth_geneve_enable ? 1 : 0);
	qed_wr(p_hwfn, p_ptt, NIG_REG_NGE_IP_ENABLE, ip_geneve_enable ? 1 : 0);

	/* comp ver */
	reg_val = (ip_geneve_enable || eth_geneve_enable) ? 1 : 0;
	qed_wr(p_hwfn, p_ptt, NIG_REG_NGE_COMP_VER, reg_val);
	qed_wr(p_hwfn, p_ptt, PBF_REG_NGE_COMP_VER, reg_val);
	qed_wr(p_hwfn, p_ptt, PRS_REG_NGE_COMP_VER, reg_val);

	/* EDPM with geneve tunnel not supported in BB_B0 */
	if (QED_IS_BB_B0(p_hwfn->cdev))
		return;

	qed_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_NGE_ETH_EN,
	       eth_geneve_enable ? 1 : 0);
	qed_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_NGE_IP_EN,
	       ip_geneve_enable ? 1 : 0);
}
Loading