Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 33377152 authored by David S. Miller's avatar David S. Miller
Browse files

Merge branch 'qed-Enhancements'



Manish Chopra says:

====================
qed*: Enhancements

This patch series adds following support in drivers -

1. Egress mqprio offload.
2. Add destination IP based flow profile.
3. Ingress flower offload (for drop action).

Please consider applying this series to "net-next".
====================

Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 2eee32a7 2ce9c93e
Loading
Loading
Loading
Loading
+6 −3
Original line number Diff line number Diff line
@@ -2188,16 +2188,17 @@ int qed_get_queue_coalesce(struct qed_hwfn *p_hwfn, u16 *p_coal, void *handle)
static int qed_fill_eth_dev_info(struct qed_dev *cdev,
				 struct qed_dev_eth_info *info)
{
	struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
	int i;

	memset(info, 0, sizeof(*info));

	info->num_tc = 1;

	if (IS_PF(cdev)) {
		int max_vf_vlan_filters = 0;
		int max_vf_mac_filters = 0;

		info->num_tc = p_hwfn->hw_info.num_hw_tc;

		if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) {
			u16 num_queues = 0;

@@ -2248,6 +2249,8 @@ static int qed_fill_eth_dev_info(struct qed_dev *cdev,
	} else {
		u16 total_cids = 0;

		info->num_tc = 1;

		/* Determine queues &  XDP support */
		for_each_hwfn(cdev, i) {
			struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
@@ -2554,7 +2557,7 @@ static int qed_start_txq(struct qed_dev *cdev,

	rc = qed_eth_tx_queue_start(p_hwfn,
				    p_hwfn->hw_info.opaque_fid,
				    p_params, 0,
				    p_params, p_params->tc,
				    pbl_addr, pbl_size, ret_params);

	if (rc) {
+3 −2
Original line number Diff line number Diff line
@@ -948,13 +948,14 @@ static void qed_update_pf_params(struct qed_dev *cdev,
		params->eth_pf_params.num_arfs_filters = 0;

	/* In case we might support RDMA, don't allow qede to be greedy
	 * with the L2 contexts. Allow for 64 queues [rx, tx, xdp] per hwfn.
	 * with the L2 contexts. Allow for 64 queues [rx, tx cos, xdp]
	 * per hwfn.
	 */
	if (QED_IS_RDMA_PERSONALITY(QED_LEADING_HWFN(cdev))) {
		u16 *num_cons;

		num_cons = &params->eth_pf_params.num_cons;
		*num_cons = min_t(u16, *num_cons, 192);
		*num_cons = min_t(u16, *num_cons, QED_MAX_L2_CONS);
	}

	for (i = 0; i < cdev->num_hwfns; i++) {
+19 −1
Original line number Diff line number Diff line
@@ -52,6 +52,9 @@
#include <linux/qed/qed_chain.h>
#include <linux/qed/qed_eth_if.h>

#include <net/pkt_cls.h>
#include <net/tc_act/tc_gact.h>

#define QEDE_MAJOR_VERSION		8
#define QEDE_MINOR_VERSION		33
#define QEDE_REVISION_VERSION		0
@@ -386,6 +389,15 @@ struct qede_tx_queue {
#define QEDE_TXQ_XDP_TO_IDX(edev, txq)	((txq)->index - \
					 QEDE_MAX_TSS_CNT(edev))
#define QEDE_TXQ_IDX_TO_XDP(edev, idx)	((idx) + QEDE_MAX_TSS_CNT(edev))
#define QEDE_NDEV_TXQ_ID_TO_FP_ID(edev, idx)	((edev)->fp_num_rx + \
						 ((idx) % QEDE_TSS_COUNT(edev)))
#define QEDE_NDEV_TXQ_ID_TO_TXQ_COS(edev, idx)	((idx) / QEDE_TSS_COUNT(edev))
#define QEDE_TXQ_TO_NDEV_TXQ_ID(edev, txq)	((QEDE_TSS_COUNT(edev) * \
						 (txq)->cos) + (txq)->index)
#define QEDE_NDEV_TXQ_ID_TO_TXQ(edev, idx)	\
	(&((edev)->fp_array[QEDE_NDEV_TXQ_ID_TO_FP_ID(edev, idx)].txq \
	[QEDE_NDEV_TXQ_ID_TO_TXQ_COS(edev, idx)]))
#define QEDE_FP_TC0_TXQ(fp)	(&((fp)->txq[0]))

	/* Regular Tx requires skb + metadata for release purpose,
	 * while XDP requires the pages and the mapped address.
@@ -399,6 +411,8 @@ struct qede_tx_queue {

	/* Slowpath; Should be kept in end [unless missing padding] */
	void *handle;
	u16 cos;
	u16 ndev_txq_id;
};

#define BD_UNMAP_ADDR(bd)		HILO_U64(le32_to_cpu((bd)->addr.hi), \
@@ -458,7 +472,7 @@ void qede_arfs_filter_op(void *dev, void *filter, u8 fw_rc);
void qede_free_arfs(struct qede_dev *edev);
int qede_alloc_arfs(struct qede_dev *edev);
int qede_add_cls_rule(struct qede_dev *edev, struct ethtool_rxnfc *info);
int qede_del_cls_rule(struct qede_dev *edev, struct ethtool_rxnfc *info);
int qede_delete_flow_filter(struct qede_dev *edev, u64 cookie);
int qede_get_cls_rule_entry(struct qede_dev *edev, struct ethtool_rxnfc *cmd);
int qede_get_cls_rule_all(struct qede_dev *edev, struct ethtool_rxnfc *info,
			  u32 *rule_locs);
@@ -524,6 +538,8 @@ bool qede_has_rx_work(struct qede_rx_queue *rxq);
int qede_txq_has_work(struct qede_tx_queue *txq);
void qede_recycle_rx_bd_ring(struct qede_rx_queue *rxq, u8 count);
void qede_update_rx_prod(struct qede_dev *edev, struct qede_rx_queue *rxq);
int qede_add_tc_flower_fltr(struct qede_dev *edev, __be16 proto,
			    struct tc_cls_flower_offload *f);

#define RX_RING_SIZE_POW	13
#define RX_RING_SIZE		((u16)BIT(RX_RING_SIZE_POW))
@@ -541,5 +557,7 @@ void qede_update_rx_prod(struct qede_dev *edev, struct qede_rx_queue *rxq);
#define QEDE_RX_HDR_SIZE		256
#define QEDE_MAX_JUMBO_PACKET_SIZE	9600
#define	for_each_queue(i) for (i = 0; i < edev->num_queues; i++)
#define for_each_cos_in_txq(edev, var) \
	for ((var) = 0; (var) < (edev)->dev_info.num_tc; (var)++)

#endif /* _QEDE_H_ */
+39 −11
Original line number Diff line number Diff line
@@ -222,7 +222,7 @@ static void qede_get_strings_stats_txq(struct qede_dev *edev,
				QEDE_TXQ_XDP_TO_IDX(edev, txq),
				qede_tqstats_arr[i].string);
		else
			sprintf(*buf, "%d: %s", txq->index,
			sprintf(*buf, "%d_%d: %s", txq->index, txq->cos,
				qede_tqstats_arr[i].string);
		*buf += ETH_GSTRING_LEN;
	}
@@ -262,8 +262,13 @@ static void qede_get_strings_stats(struct qede_dev *edev, u8 *buf)
		if (fp->type & QEDE_FASTPATH_XDP)
			qede_get_strings_stats_txq(edev, fp->xdp_tx, &buf);

		if (fp->type & QEDE_FASTPATH_TX)
			qede_get_strings_stats_txq(edev, fp->txq, &buf);
		if (fp->type & QEDE_FASTPATH_TX) {
			int cos;

			for_each_cos_in_txq(edev, cos)
				qede_get_strings_stats_txq(edev,
							   &fp->txq[cos], &buf);
		}
	}

	/* Account for non-queue statistics */
@@ -338,8 +343,12 @@ static void qede_get_ethtool_stats(struct net_device *dev,
		if (fp->type & QEDE_FASTPATH_XDP)
			qede_get_ethtool_stats_txq(fp->xdp_tx, &buf);

		if (fp->type & QEDE_FASTPATH_TX)
			qede_get_ethtool_stats_txq(fp->txq, &buf);
		if (fp->type & QEDE_FASTPATH_TX) {
			int cos;

			for_each_cos_in_txq(edev, cos)
				qede_get_ethtool_stats_txq(&fp->txq[cos], &buf);
		}
	}

	for (i = 0; i < QEDE_NUM_STATS; i++) {
@@ -366,7 +375,8 @@ static int qede_get_sset_count(struct net_device *dev, int stringset)
				num_stats--;

		/* Account for the Regular Tx statistics */
		num_stats += QEDE_TSS_COUNT(edev) * QEDE_NUM_TQSTATS;
		num_stats += QEDE_TSS_COUNT(edev) * QEDE_NUM_TQSTATS *
				edev->dev_info.num_tc;

		/* Account for the Regular Rx statistics */
		num_stats += QEDE_RSS_COUNT(edev) * QEDE_NUM_RQSTATS;
@@ -741,9 +751,17 @@ static int qede_get_coalesce(struct net_device *dev,
		}

		for_each_queue(i) {
			struct qede_tx_queue *txq;

			fp = &edev->fp_array[i];

			/* All TX queues of given fastpath uses same
			 * coalescing value, so no need to iterate over
			 * all TCs, TC0 txq should suffice.
			 */
			if (fp->type & QEDE_FASTPATH_TX) {
				tx_handle = fp->txq->handle;
				txq = QEDE_FP_TC0_TXQ(fp);
				tx_handle = txq->handle;
				break;
			}
		}
@@ -801,9 +819,17 @@ static int qede_set_coalesce(struct net_device *dev,
		}

		if (edev->fp_array[i].type & QEDE_FASTPATH_TX) {
			struct qede_tx_queue *txq;

			/* All TX queues of given fastpath uses same
			 * coalescing value, so no need to iterate over
			 * all TCs, TC0 txq should suffice.
			 */
			txq = QEDE_FP_TC0_TXQ(fp);

			rc = edev->ops->common->set_coalesce(edev->cdev,
							     0, txc,
							     fp->txq->handle);
							     txq->handle);
			if (rc) {
				DP_INFO(edev,
					"Set TX coalesce error, rc = %d\n", rc);
@@ -1259,7 +1285,7 @@ static int qede_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info)
		rc = qede_add_cls_rule(edev, info);
		break;
	case ETHTOOL_SRXCLSRLDEL:
		rc = qede_del_cls_rule(edev, info);
		rc = qede_delete_flow_filter(edev, info->fs.location);
		break;
	default:
		DP_INFO(edev, "Command parameters not supported\n");
@@ -1385,8 +1411,10 @@ static int qede_selftest_transmit_traffic(struct qede_dev *edev,
	u16 val;

	for_each_queue(i) {
		if (edev->fp_array[i].type & QEDE_FASTPATH_TX) {
			txq = edev->fp_array[i].txq;
		struct qede_fastpath *fp = &edev->fp_array[i];

		if (fp->type & QEDE_FASTPATH_TX) {
			txq = QEDE_FP_TC0_TXQ(fp);
			break;
		}
	}
+366 −56
Original line number Diff line number Diff line
@@ -83,7 +83,7 @@ struct qede_arfs_fltr_node {
	struct qede_arfs_tuple tuple;

	u32 flow_id;
	u16 sw_id;
	u64 sw_id;
	u16 rxq_id;
	u16 next_rxq_id;
	u8 vfid;
@@ -138,7 +138,7 @@ static void qede_configure_arfs_fltr(struct qede_dev *edev,

		n->tuple.stringify(&n->tuple, tuple_buffer);
		DP_VERBOSE(edev, NETIF_MSG_RX_STATUS,
			   "%s sw_id[0x%x]: %s [vf %u queue %d]\n",
			   "%s sw_id[0x%llx]: %s [vf %u queue %d]\n",
			   add_fltr ? "Adding" : "Deleting",
			   n->sw_id, tuple_buffer, n->vfid, rxq_id);
	}
@@ -152,7 +152,10 @@ static void
qede_free_arfs_filter(struct qede_dev *edev,  struct qede_arfs_fltr_node *fltr)
{
	kfree(fltr->data);

	if (fltr->sw_id < QEDE_RFS_MAX_FLTR)
		clear_bit(fltr->sw_id, edev->arfs->arfs_fltr_bmap);

	kfree(fltr);
}

@@ -214,7 +217,7 @@ void qede_arfs_filter_op(void *dev, void *filter, u8 fw_rc)

	if (fw_rc) {
		DP_NOTICE(edev,
			  "Failed arfs filter configuration fw_rc=%d, flow_id=%d, sw_id=%d, src_port=%d, dst_port=%d, rxq=%d\n",
			  "Failed arfs filter configuration fw_rc=%d, flow_id=%d, sw_id=0x%llx, src_port=%d, dst_port=%d, rxq=%d\n",
			  fw_rc, fltr->flow_id, fltr->sw_id,
			  ntohs(fltr->tuple.src_port),
			  ntohs(fltr->tuple.dst_port), fltr->rxq_id);
@@ -1348,7 +1351,7 @@ void qede_config_rx_mode(struct net_device *ndev)
}

static struct qede_arfs_fltr_node *
qede_get_arfs_fltr_by_loc(struct hlist_head *head, u32 location)
qede_get_arfs_fltr_by_loc(struct hlist_head *head, u64 location)
{
	struct qede_arfs_fltr_node *fltr;

@@ -1599,6 +1602,69 @@ static int qede_flow_spec_validate_unused(struct qede_dev *edev,
	return 0;
}

static int qede_set_v4_tuple_to_profile(struct qede_dev *edev,
					struct qede_arfs_tuple *t)
{
	/* We must have Only 4-tuples/l4 port/src ip/dst ip
	 * as an input.
	 */
	if (t->src_port && t->dst_port && t->src_ipv4 && t->dst_ipv4) {
		t->mode = QED_FILTER_CONFIG_MODE_5_TUPLE;
	} else if (!t->src_port && t->dst_port &&
		   !t->src_ipv4 && !t->dst_ipv4) {
		t->mode = QED_FILTER_CONFIG_MODE_L4_PORT;
	} else if (!t->src_port && !t->dst_port &&
		   !t->dst_ipv4 && t->src_ipv4) {
		t->mode = QED_FILTER_CONFIG_MODE_IP_SRC;
	} else if (!t->src_port && !t->dst_port &&
		   t->dst_ipv4 && !t->src_ipv4) {
		t->mode = QED_FILTER_CONFIG_MODE_IP_DEST;
	} else {
		DP_INFO(edev, "Invalid N-tuple\n");
		return -EOPNOTSUPP;
	}

	t->ip_comp = qede_flow_spec_ipv4_cmp;
	t->build_hdr = qede_flow_build_ipv4_hdr;
	t->stringify = qede_flow_stringify_ipv4_hdr;

	return 0;
}

static int qede_set_v6_tuple_to_profile(struct qede_dev *edev,
					struct qede_arfs_tuple *t,
					struct in6_addr *zaddr)
{
	/* We must have Only 4-tuples/l4 port/src ip/dst ip
	 * as an input.
	 */
	if (t->src_port && t->dst_port &&
	    memcmp(&t->src_ipv6, zaddr, sizeof(struct in6_addr)) &&
	    memcmp(&t->dst_ipv6, zaddr, sizeof(struct in6_addr))) {
		t->mode = QED_FILTER_CONFIG_MODE_5_TUPLE;
	} else if (!t->src_port && t->dst_port &&
		   !memcmp(&t->src_ipv6, zaddr, sizeof(struct in6_addr)) &&
		   !memcmp(&t->dst_ipv6, zaddr, sizeof(struct in6_addr))) {
		t->mode = QED_FILTER_CONFIG_MODE_L4_PORT;
	} else if (!t->src_port && !t->dst_port &&
		   !memcmp(&t->dst_ipv6, zaddr, sizeof(struct in6_addr)) &&
		   memcmp(&t->src_ipv6, zaddr, sizeof(struct in6_addr))) {
		t->mode = QED_FILTER_CONFIG_MODE_IP_SRC;
	} else if (!t->src_port && !t->dst_port &&
		   memcmp(&t->dst_ipv6, zaddr, sizeof(struct in6_addr)) &&
		   !memcmp(&t->src_ipv6, zaddr, sizeof(struct in6_addr))) {
		t->mode = QED_FILTER_CONFIG_MODE_IP_DEST;
	} else {
		DP_INFO(edev, "Invalid N-tuple\n");
		return -EOPNOTSUPP;
	}

	t->ip_comp = qede_flow_spec_ipv6_cmp;
	t->build_hdr = qede_flow_build_ipv6_hdr;

	return 0;
}

static int qede_flow_spec_to_tuple_ipv4_common(struct qede_dev *edev,
					       struct qede_arfs_tuple *t,
					       struct ethtool_rx_flow_spec *fs)
@@ -1638,27 +1704,7 @@ static int qede_flow_spec_to_tuple_ipv4_common(struct qede_dev *edev,
	t->src_port = fs->h_u.tcp_ip4_spec.psrc;
	t->dst_port = fs->h_u.tcp_ip4_spec.pdst;

	/* We must either have a valid 4-tuple or only dst port
	 * or only src ip as an input
	 */
	if (t->src_port && t->dst_port && t->src_ipv4 && t->dst_ipv4) {
		t->mode = QED_FILTER_CONFIG_MODE_5_TUPLE;
	} else if (!t->src_port && t->dst_port &&
		   !t->src_ipv4 && !t->dst_ipv4) {
		t->mode = QED_FILTER_CONFIG_MODE_L4_PORT;
	}  else if (!t->src_port && !t->dst_port &&
		    !t->dst_ipv4 && t->src_ipv4) {
		t->mode = QED_FILTER_CONFIG_MODE_IP_SRC;
	} else {
		DP_INFO(edev, "Invalid N-tuple\n");
		return -EOPNOTSUPP;
	}

	t->ip_comp = qede_flow_spec_ipv4_cmp;
	t->build_hdr = qede_flow_build_ipv4_hdr;
	t->stringify = qede_flow_stringify_ipv4_hdr;

	return 0;
	return qede_set_v4_tuple_to_profile(edev, t);
}

static int qede_flow_spec_to_tuple_tcpv4(struct qede_dev *edev,
@@ -1690,10 +1736,8 @@ static int qede_flow_spec_to_tuple_ipv6_common(struct qede_dev *edev,
					       struct ethtool_rx_flow_spec *fs)
{
	struct in6_addr zero_addr;
	void *p;

	p = &zero_addr;
	memset(p, 0, sizeof(zero_addr));
	memset(&zero_addr, 0, sizeof(zero_addr));

	if ((fs->h_u.tcp_ip6_spec.psrc &
	     fs->m_u.tcp_ip6_spec.psrc) != fs->h_u.tcp_ip6_spec.psrc) {
@@ -1720,30 +1764,7 @@ static int qede_flow_spec_to_tuple_ipv6_common(struct qede_dev *edev,
	t->src_port = fs->h_u.tcp_ip6_spec.psrc;
	t->dst_port = fs->h_u.tcp_ip6_spec.pdst;

	/* We must make sure we have a valid 4-tuple or only dest port
	 * or only src ip as an input
	 */
	if (t->src_port && t->dst_port &&
	    memcmp(&t->src_ipv6, p, sizeof(struct in6_addr)) &&
	    memcmp(&t->dst_ipv6, p, sizeof(struct in6_addr))) {
		t->mode = QED_FILTER_CONFIG_MODE_5_TUPLE;
	} else if (!t->src_port && t->dst_port &&
		   !memcmp(&t->src_ipv6, p, sizeof(struct in6_addr)) &&
		   !memcmp(&t->dst_ipv6, p, sizeof(struct in6_addr))) {
		t->mode = QED_FILTER_CONFIG_MODE_L4_PORT;
	} else if (!t->src_port && !t->dst_port &&
		   !memcmp(&t->dst_ipv6, p, sizeof(struct in6_addr)) &&
		   memcmp(&t->src_ipv6, p, sizeof(struct in6_addr))) {
		t->mode = QED_FILTER_CONFIG_MODE_IP_SRC;
	} else {
		DP_INFO(edev, "Invalid N-tuple\n");
		return -EOPNOTSUPP;
	}

	t->ip_comp = qede_flow_spec_ipv6_cmp;
	t->build_hdr = qede_flow_build_ipv6_hdr;

	return 0;
	return qede_set_v6_tuple_to_profile(edev, t, &zero_addr);
}

static int qede_flow_spec_to_tuple_tcpv6(struct qede_dev *edev,
@@ -1941,9 +1962,8 @@ int qede_add_cls_rule(struct qede_dev *edev, struct ethtool_rxnfc *info)
	return rc;
}

int qede_del_cls_rule(struct qede_dev *edev, struct ethtool_rxnfc *info)
int qede_delete_flow_filter(struct qede_dev *edev, u64 cookie)
{
	struct ethtool_rx_flow_spec *fsp = &info->fs;
	struct qede_arfs_fltr_node *fltr = NULL;
	int rc = -EPERM;

@@ -1952,7 +1972,7 @@ int qede_del_cls_rule(struct qede_dev *edev, struct ethtool_rxnfc *info)
		goto unlock;

	fltr = qede_get_arfs_fltr_by_loc(QEDE_ARFS_BUCKET_HEAD(edev, 0),
					 fsp->location);
					 cookie);
	if (!fltr)
		goto unlock;

@@ -1982,3 +2002,293 @@ int qede_get_arfs_filter_count(struct qede_dev *edev)
	__qede_unlock(edev);
	return count;
}

static int qede_parse_actions(struct qede_dev *edev,
			      struct tcf_exts *exts)
{
	int rc = -EINVAL, num_act = 0;
	const struct tc_action *a;
	bool is_drop = false;
	LIST_HEAD(actions);

	if (!tcf_exts_has_actions(exts)) {
		DP_NOTICE(edev, "No tc actions received\n");
		return rc;
	}

	tcf_exts_to_list(exts, &actions);
	list_for_each_entry(a, &actions, list) {
		num_act++;

		if (is_tcf_gact_shot(a))
			is_drop = true;
	}

	if (num_act == 1 && is_drop)
		return 0;

	return rc;
}

static int
qede_tc_parse_ports(struct qede_dev *edev,
		    struct tc_cls_flower_offload *f,
		    struct qede_arfs_tuple *t)
{
	if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_PORTS)) {
		struct flow_dissector_key_ports *key, *mask;

		key = skb_flow_dissector_target(f->dissector,
						FLOW_DISSECTOR_KEY_PORTS,
						f->key);
		mask = skb_flow_dissector_target(f->dissector,
						 FLOW_DISSECTOR_KEY_PORTS,
						 f->mask);

		if ((key->src && mask->src != U16_MAX) ||
		    (key->dst && mask->dst != U16_MAX)) {
			DP_NOTICE(edev, "Do not support ports masks\n");
			return -EINVAL;
		}

		t->src_port = key->src;
		t->dst_port = key->dst;
	}

	return 0;
}

static int
qede_tc_parse_v6_common(struct qede_dev *edev,
			struct tc_cls_flower_offload *f,
			struct qede_arfs_tuple *t)
{
	struct in6_addr zero_addr, addr;

	memset(&zero_addr, 0, sizeof(addr));
	memset(&addr, 0xff, sizeof(addr));

	if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_IPV6_ADDRS)) {
		struct flow_dissector_key_ipv6_addrs *key, *mask;

		key = skb_flow_dissector_target(f->dissector,
						FLOW_DISSECTOR_KEY_IPV6_ADDRS,
						f->key);
		mask = skb_flow_dissector_target(f->dissector,
						 FLOW_DISSECTOR_KEY_IPV6_ADDRS,
						 f->mask);

		if ((memcmp(&key->src, &zero_addr, sizeof(addr)) &&
		     memcmp(&mask->src, &addr, sizeof(addr))) ||
		    (memcmp(&key->dst, &zero_addr, sizeof(addr)) &&
		     memcmp(&mask->dst, &addr, sizeof(addr)))) {
			DP_NOTICE(edev,
				  "Do not support IPv6 address prefix/mask\n");
			return -EINVAL;
		}

		memcpy(&t->src_ipv6, &key->src, sizeof(addr));
		memcpy(&t->dst_ipv6, &key->dst, sizeof(addr));
	}

	if (qede_tc_parse_ports(edev, f, t))
		return -EINVAL;

	return qede_set_v6_tuple_to_profile(edev, t, &zero_addr);
}

static int
qede_tc_parse_v4_common(struct qede_dev *edev,
			struct tc_cls_flower_offload *f,
			struct qede_arfs_tuple *t)
{
	if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_IPV4_ADDRS)) {
		struct flow_dissector_key_ipv4_addrs *key, *mask;

		key = skb_flow_dissector_target(f->dissector,
						FLOW_DISSECTOR_KEY_IPV4_ADDRS,
						f->key);
		mask = skb_flow_dissector_target(f->dissector,
						 FLOW_DISSECTOR_KEY_IPV4_ADDRS,
						 f->mask);

		if ((key->src && mask->src != U32_MAX) ||
		    (key->dst && mask->dst != U32_MAX)) {
			DP_NOTICE(edev, "Do not support ipv4 prefix/masks\n");
			return -EINVAL;
		}

		t->src_ipv4 = key->src;
		t->dst_ipv4 = key->dst;
	}

	if (qede_tc_parse_ports(edev, f, t))
		return -EINVAL;

	return qede_set_v4_tuple_to_profile(edev, t);
}

static int
qede_tc_parse_tcp_v6(struct qede_dev *edev,
		     struct tc_cls_flower_offload *f,
		     struct qede_arfs_tuple *tuple)
{
	tuple->ip_proto = IPPROTO_TCP;
	tuple->eth_proto = htons(ETH_P_IPV6);

	return qede_tc_parse_v6_common(edev, f, tuple);
}

static int
qede_tc_parse_tcp_v4(struct qede_dev *edev,
		     struct tc_cls_flower_offload *f,
		     struct qede_arfs_tuple *tuple)
{
	tuple->ip_proto = IPPROTO_TCP;
	tuple->eth_proto = htons(ETH_P_IP);

	return qede_tc_parse_v4_common(edev, f, tuple);
}

static int
qede_tc_parse_udp_v6(struct qede_dev *edev,
		     struct tc_cls_flower_offload *f,
		     struct qede_arfs_tuple *tuple)
{
	tuple->ip_proto = IPPROTO_UDP;
	tuple->eth_proto = htons(ETH_P_IPV6);

	return qede_tc_parse_v6_common(edev, f, tuple);
}

static int
qede_tc_parse_udp_v4(struct qede_dev *edev,
		     struct tc_cls_flower_offload *f,
		     struct qede_arfs_tuple *tuple)
{
	tuple->ip_proto = IPPROTO_UDP;
	tuple->eth_proto = htons(ETH_P_IP);

	return qede_tc_parse_v4_common(edev, f, tuple);
}

static int
qede_parse_flower_attr(struct qede_dev *edev, __be16 proto,
		       struct tc_cls_flower_offload *f,
		       struct qede_arfs_tuple *tuple)
{
	int rc = -EINVAL;
	u8 ip_proto = 0;

	memset(tuple, 0, sizeof(*tuple));

	if (f->dissector->used_keys &
	    ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
	      BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
	      BIT(FLOW_DISSECTOR_KEY_BASIC) |
	      BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
	      BIT(FLOW_DISSECTOR_KEY_PORTS))) {
		DP_NOTICE(edev, "Unsupported key set:0x%x\n",
			  f->dissector->used_keys);
		return -EOPNOTSUPP;
	}

	if (proto != htons(ETH_P_IP) &&
	    proto != htons(ETH_P_IPV6)) {
		DP_NOTICE(edev, "Unsupported proto=0x%x\n", proto);
		return -EPROTONOSUPPORT;
	}

	if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
		struct flow_dissector_key_basic *key;

		key = skb_flow_dissector_target(f->dissector,
						FLOW_DISSECTOR_KEY_BASIC,
						f->key);
		ip_proto = key->ip_proto;
	}

	if (ip_proto == IPPROTO_TCP && proto == htons(ETH_P_IP))
		rc = qede_tc_parse_tcp_v4(edev, f, tuple);
	else if (ip_proto == IPPROTO_TCP && proto == htons(ETH_P_IPV6))
		rc = qede_tc_parse_tcp_v6(edev, f, tuple);
	else if (ip_proto == IPPROTO_UDP && proto == htons(ETH_P_IP))
		rc = qede_tc_parse_udp_v4(edev, f, tuple);
	else if (ip_proto == IPPROTO_UDP && proto == htons(ETH_P_IPV6))
		rc = qede_tc_parse_udp_v6(edev, f, tuple);
	else
		DP_NOTICE(edev, "Invalid tc protocol request\n");

	return rc;
}

int qede_add_tc_flower_fltr(struct qede_dev *edev, __be16 proto,
			    struct tc_cls_flower_offload *f)
{
	struct qede_arfs_fltr_node *n;
	int min_hlen, rc = -EINVAL;
	struct qede_arfs_tuple t;

	__qede_lock(edev);

	if (!edev->arfs) {
		rc = -EPERM;
		goto unlock;
	}

	/* parse flower attribute and prepare filter */
	if (qede_parse_flower_attr(edev, proto, f, &t))
		goto unlock;

	/* Validate profile mode and number of filters */
	if ((edev->arfs->filter_count && edev->arfs->mode != t.mode) ||
	    edev->arfs->filter_count == QEDE_RFS_MAX_FLTR) {
		DP_NOTICE(edev,
			  "Filter configuration invalidated, filter mode=0x%x, configured mode=0x%x, filter count=0x%x\n",
			  t.mode, edev->arfs->mode, edev->arfs->filter_count);
		goto unlock;
	}

	/* parse tc actions and get the vf_id */
	if (qede_parse_actions(edev, f->exts))
		goto unlock;

	if (qede_flow_find_fltr(edev, &t)) {
		rc = -EEXIST;
		goto unlock;
	}

	n = kzalloc(sizeof(*n), GFP_KERNEL);
	if (!n) {
		rc = -ENOMEM;
		goto unlock;
	}

	min_hlen = qede_flow_get_min_header_size(&t);

	n->data = kzalloc(min_hlen, GFP_KERNEL);
	if (!n->data) {
		kfree(n);
		rc = -ENOMEM;
		goto unlock;
	}

	memcpy(&n->tuple, &t, sizeof(n->tuple));

	n->buf_len = min_hlen;
	n->b_is_drop = true;
	n->sw_id = f->cookie;

	n->tuple.build_hdr(&n->tuple, n->data);

	rc = qede_enqueue_fltr_and_config_searcher(edev, n, 0);
	if (rc)
		goto unlock;

	qede_configure_arfs_fltr(edev, n, n->rxq_id, true);
	rc = qede_poll_arfs_filter_config(edev, n);

unlock:
	__qede_unlock(edev);
	return rc;
}
Loading