Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit c1957126 authored by David S. Miller's avatar David S. Miller
Browse files

Merge branch 'hns3-next'



Huazhong Tan says:

====================
code optimizations & bugfixes for HNS3 driver

This patch-set includes code optimizations and bugfixes for the HNS3
ethernet controller driver.

[patch 1/12 - 4/12] optimizes the VLAN freature and adds support for port
based VLAN, fixes some related bugs about the current implementation.

[patch 5/12 - 12/12] includes some other code optimizations for the HNS3
ethernet controller driver.

Change log:
V1->V2: modifies some patches' commint log and code.
====================

Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 81f2eeb3 6814b590
Loading
Loading
Loading
Loading
+3 −0
Original line number Diff line number Diff line
@@ -43,6 +43,7 @@ enum HCLGE_MBX_OPCODE {
	HCLGE_MBX_GET_QID_IN_PF,	/* (VF -> PF) get queue id in pf */
	HCLGE_MBX_LINK_STAT_MODE,	/* (PF -> VF) link mode has changed */
	HCLGE_MBX_GET_LINK_MODE,	/* (VF -> PF) get the link mode of pf */
	HLCGE_MBX_PUSH_VLAN_INFO,	/* (PF -> VF) push port base vlan */
	HCLGE_MBX_GET_MEDIA_TYPE,       /* (VF -> PF) get media type */

	HCLGE_MBX_GET_VF_FLR_STATUS = 200, /* (M7 -> PF) get vf reset status */
@@ -63,6 +64,8 @@ enum hclge_mbx_vlan_cfg_subcode {
	HCLGE_MBX_VLAN_FILTER = 0,	/* set vlan filter */
	HCLGE_MBX_VLAN_TX_OFF_CFG,	/* set tx side vlan offload */
	HCLGE_MBX_VLAN_RX_OFF_CFG,	/* set rx side vlan offload */
	HCLGE_MBX_PORT_BASE_VLAN_CFG,	/* set port based vlan configuration */
	HCLGE_MBX_GET_PORT_BASE_VLAN_STATE,	/* get port based vlan state */
};

#define HCLGE_MBX_MAX_MSG_SIZE	16
+9 −0
Original line number Diff line number Diff line
@@ -147,6 +147,13 @@ enum hnae3_flr_state {
	HNAE3_FLR_DONE,
};

enum hnae3_port_base_vlan_state {
	HNAE3_PORT_BASE_VLAN_DISABLE,
	HNAE3_PORT_BASE_VLAN_ENABLE,
	HNAE3_PORT_BASE_VLAN_MODIFY,
	HNAE3_PORT_BASE_VLAN_NOCHANGE,
};

struct hnae3_vector_info {
	u8 __iomem *io_addr;
	int vector;
@@ -578,6 +585,8 @@ struct hnae3_handle {

	u32 numa_node_mask;	/* for multi-chip support */

	enum hnae3_port_base_vlan_state port_base_vlan_state;

	u8 netdev_flags;
	struct dentry *hnae3_dbgfs;
};
+173 −83
Original line number Diff line number Diff line
@@ -963,6 +963,16 @@ static int hns3_fill_desc_vtags(struct sk_buff *skb,
{
#define HNS3_TX_VLAN_PRIO_SHIFT 13

	struct hnae3_handle *handle = tx_ring->tqp->handle;

	/* Since HW limitation, if port based insert VLAN enabled, only one VLAN
	 * header is allowed in skb, otherwise it will cause RAS error.
	 */
	if (unlikely(skb_vlan_tagged_multi(skb) &&
		     handle->port_base_vlan_state ==
		     HNAE3_PORT_BASE_VLAN_ENABLE))
		return -EINVAL;

	if (skb->protocol == htons(ETH_P_8021Q) &&
	    !(tx_ring->tqp->handle->kinfo.netdev->features &
	    NETIF_F_HW_VLAN_CTAG_TX)) {
@@ -984,8 +994,16 @@ static int hns3_fill_desc_vtags(struct sk_buff *skb,
		 * and use inner_vtag in one tag case.
		 */
		if (skb->protocol == htons(ETH_P_8021Q)) {
			hns3_set_field(*out_vlan_flag, HNS3_TXD_OVLAN_B, 1);
			if (handle->port_base_vlan_state ==
			    HNAE3_PORT_BASE_VLAN_DISABLE){
				hns3_set_field(*out_vlan_flag,
					       HNS3_TXD_OVLAN_B, 1);
				*out_vtag = vlan_tag;
			} else {
				hns3_set_field(*inner_vlan_flag,
					       HNS3_TXD_VLAN_B, 1);
				*inner_vtag = vlan_tag;
			}
		} else {
			hns3_set_field(*inner_vlan_flag, HNS3_TXD_VLAN_B, 1);
			*inner_vtag = vlan_tag;
@@ -2313,17 +2331,50 @@ static void hns3_nic_reuse_page(struct sk_buff *skb, int i,
	}
}

static int hns3_gro_complete(struct sk_buff *skb)
{
	__be16 type = skb->protocol;
	struct tcphdr *th;
	int depth = 0;

	while (type == htons(ETH_P_8021Q)) {
		struct vlan_hdr *vh;

		if ((depth + VLAN_HLEN) > skb_headlen(skb))
			return -EFAULT;

		vh = (struct vlan_hdr *)(skb->data + depth);
		type = vh->h_vlan_encapsulated_proto;
		depth += VLAN_HLEN;
	}

	if (type == htons(ETH_P_IP)) {
		depth += sizeof(struct iphdr);
	} else if (type == htons(ETH_P_IPV6)) {
		depth += sizeof(struct ipv6hdr);
	} else {
		netdev_err(skb->dev,
			   "Error: FW GRO supports only IPv4/IPv6, not 0x%04x, depth: %d\n",
			   be16_to_cpu(type), depth);
		return -EFAULT;
	}

	th = (struct tcphdr *)(skb->data + depth);
	skb_shinfo(skb)->gso_segs = NAPI_GRO_CB(skb)->count;
	if (th->cwr)
		skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;

	skb->ip_summed = CHECKSUM_UNNECESSARY;

	return 0;
}

static void hns3_rx_checksum(struct hns3_enet_ring *ring, struct sk_buff *skb,
			     struct hns3_desc *desc)
			     u32 l234info, u32 bd_base_info)
{
	struct net_device *netdev = ring->tqp->handle->kinfo.netdev;
	int l3_type, l4_type;
	u32 bd_base_info;
	int ol4_type;
	u32 l234info;

	bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
	l234info = le32_to_cpu(desc->rx.l234_info);

	skb->ip_summed = CHECKSUM_NONE;

@@ -2332,12 +2383,6 @@ static void hns3_rx_checksum(struct hns3_enet_ring *ring, struct sk_buff *skb,
	if (!(netdev->features & NETIF_F_RXCSUM))
		return;

	/* We MUST enable hardware checksum before enabling hardware GRO */
	if (skb_shinfo(skb)->gso_size) {
		skb->ip_summed = CHECKSUM_UNNECESSARY;
		return;
	}

	/* check if hardware has done checksum */
	if (!(bd_base_info & BIT(HNS3_RXD_L3L4P_B)))
		return;
@@ -2390,6 +2435,7 @@ static bool hns3_parse_vlan_tag(struct hns3_enet_ring *ring,
				struct hns3_desc *desc, u32 l234info,
				u16 *vlan_tag)
{
	struct hnae3_handle *handle = ring->tqp->handle;
	struct pci_dev *pdev = ring->tqp->handle->pdev;

	if (pdev->revision == 0x20) {
@@ -2402,14 +2448,35 @@ static bool hns3_parse_vlan_tag(struct hns3_enet_ring *ring,

#define HNS3_STRP_OUTER_VLAN	0x1
#define HNS3_STRP_INNER_VLAN	0x2
#define HNS3_STRP_BOTH		0x3

	/* Hardware always insert VLAN tag into RX descriptor when
	 * remove the tag from packet, driver needs to determine
	 * reporting which tag to stack.
	 */
	switch (hnae3_get_field(l234info, HNS3_RXD_STRP_TAGP_M,
				HNS3_RXD_STRP_TAGP_S)) {
	case HNS3_STRP_OUTER_VLAN:
		if (handle->port_base_vlan_state !=
				HNAE3_PORT_BASE_VLAN_DISABLE)
			return false;

		*vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag);
		return true;
	case HNS3_STRP_INNER_VLAN:
		if (handle->port_base_vlan_state !=
				HNAE3_PORT_BASE_VLAN_DISABLE)
			return false;

		*vlan_tag = le16_to_cpu(desc->rx.vlan_tag);
		return true;
	case HNS3_STRP_BOTH:
		if (handle->port_base_vlan_state ==
				HNAE3_PORT_BASE_VLAN_DISABLE)
			*vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag);
		else
			*vlan_tag = le16_to_cpu(desc->rx.vlan_tag);

		return true;
	default:
		return false;
@@ -2532,7 +2599,8 @@ static int hns3_add_frag(struct hns3_enet_ring *ring, struct hns3_desc *desc,
	return 0;
}

static void hns3_set_gro_param(struct sk_buff *skb, u32 l234info,
static int hns3_set_gro_and_checksum(struct hns3_enet_ring *ring,
				     struct sk_buff *skb, u32 l234info,
				     u32 bd_base_info)
{
	u16 gro_count;
@@ -2541,12 +2609,11 @@ static void hns3_set_gro_param(struct sk_buff *skb, u32 l234info,
	gro_count = hnae3_get_field(l234info, HNS3_RXD_GRO_COUNT_M,
				    HNS3_RXD_GRO_COUNT_S);
	/* if there is no HW GRO, do not set gro params */
	if (!gro_count)
		return;
	if (!gro_count) {
		hns3_rx_checksum(ring, skb, l234info, bd_base_info);
		return 0;
	}

	/* tcp_gro_complete() will copy NAPI_GRO_CB(skb)->count
	 * to skb_shinfo(skb)->gso_segs
	 */
	NAPI_GRO_CB(skb)->count = gro_count;

	l3_type = hnae3_get_field(l234info, HNS3_RXD_L3ID_M,
@@ -2556,13 +2623,13 @@ static void hns3_set_gro_param(struct sk_buff *skb, u32 l234info,
	else if (l3_type == HNS3_L3_TYPE_IPV6)
		skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
	else
		return;
		return -EFAULT;

	skb_shinfo(skb)->gso_size = hnae3_get_field(bd_base_info,
						    HNS3_RXD_GRO_SIZE_M,
						    HNS3_RXD_GRO_SIZE_S);
	if (skb_shinfo(skb)->gso_size)
		tcp_gro_complete(skb);

	return  hns3_gro_complete(skb);
}

static void hns3_set_rx_skb_rss_type(struct hns3_enet_ring *ring,
@@ -2587,16 +2654,85 @@ static void hns3_set_rx_skb_rss_type(struct hns3_enet_ring *ring,
	skb_set_hash(skb, le32_to_cpu(desc->rx.rss_hash), rss_type);
}

static int hns3_handle_rx_bd(struct hns3_enet_ring *ring,
			     struct sk_buff **out_skb)
static int hns3_handle_bdinfo(struct hns3_enet_ring *ring, struct sk_buff *skb,
			      struct hns3_desc *desc)
{
	struct net_device *netdev = ring->tqp->handle->kinfo.netdev;
	u32 bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
	u32 l234info = le32_to_cpu(desc->rx.l234_info);
	enum hns3_pkt_l2t_type l2_frame_type;
	unsigned int len;
	int ret;

	/* Based on hw strategy, the tag offloaded will be stored at
	 * ot_vlan_tag in two layer tag case, and stored at vlan_tag
	 * in one layer tag case.
	 */
	if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) {
		u16 vlan_tag;

		if (hns3_parse_vlan_tag(ring, desc, l234info, &vlan_tag))
			__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
					       vlan_tag);
	}

	if (unlikely(!(bd_base_info & BIT(HNS3_RXD_VLD_B)))) {
		u64_stats_update_begin(&ring->syncp);
		ring->stats.non_vld_descs++;
		u64_stats_update_end(&ring->syncp);

		return -EINVAL;
	}

	if (unlikely(!desc->rx.pkt_len || (l234info & (BIT(HNS3_RXD_TRUNCAT_B) |
				  BIT(HNS3_RXD_L2E_B))))) {
		u64_stats_update_begin(&ring->syncp);
		if (l234info & BIT(HNS3_RXD_L2E_B))
			ring->stats.l2_err++;
		else
			ring->stats.err_pkt_len++;
		u64_stats_update_end(&ring->syncp);

		return -EFAULT;
	}

	len = skb->len;

	/* Do update ip stack process */
	skb->protocol = eth_type_trans(skb, netdev);

	/* This is needed in order to enable forwarding support */
	ret = hns3_set_gro_and_checksum(ring, skb, l234info, bd_base_info);
	if (unlikely(ret)) {
		u64_stats_update_begin(&ring->syncp);
		ring->stats.rx_err_cnt++;
		u64_stats_update_end(&ring->syncp);
		return ret;
	}

	l2_frame_type = hnae3_get_field(l234info, HNS3_RXD_DMAC_M,
					HNS3_RXD_DMAC_S);

	u64_stats_update_begin(&ring->syncp);
	ring->stats.rx_pkts++;
	ring->stats.rx_bytes += len;

	if (l2_frame_type == HNS3_L2_TYPE_MULTICAST)
		ring->stats.rx_multicast++;

	u64_stats_update_end(&ring->syncp);

	ring->tqp_vector->rx_group.total_bytes += len;
	return 0;
}

static int hns3_handle_rx_bd(struct hns3_enet_ring *ring,
			     struct sk_buff **out_skb)
{
	struct sk_buff *skb = ring->skb;
	struct hns3_desc_cb *desc_cb;
	struct hns3_desc *desc;
	u32 bd_base_info;
	u32 l234info;
	int length;
	int ret;

@@ -2656,62 +2792,12 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring,
		       ALIGN(ring->pull_len, sizeof(long)));
	}

	l234info = le32_to_cpu(desc->rx.l234_info);
	bd_base_info = le32_to_cpu(desc->rx.bd_base_info);

	/* Based on hw strategy, the tag offloaded will be stored at
	 * ot_vlan_tag in two layer tag case, and stored at vlan_tag
	 * in one layer tag case.
	 */
	if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) {
		u16 vlan_tag;

		if (hns3_parse_vlan_tag(ring, desc, l234info, &vlan_tag))
			__vlan_hwaccel_put_tag(skb,
					       htons(ETH_P_8021Q),
					       vlan_tag);
	}

	if (unlikely(!(bd_base_info & BIT(HNS3_RXD_VLD_B)))) {
		u64_stats_update_begin(&ring->syncp);
		ring->stats.non_vld_descs++;
		u64_stats_update_end(&ring->syncp);

	ret = hns3_handle_bdinfo(ring, skb, desc);
	if (unlikely(ret)) {
		dev_kfree_skb_any(skb);
		return -EINVAL;
	}

	if (unlikely((!desc->rx.pkt_len) ||
		     (l234info & (BIT(HNS3_RXD_TRUNCAT_B) |
				  BIT(HNS3_RXD_L2E_B))))) {
		u64_stats_update_begin(&ring->syncp);
		if (l234info & BIT(HNS3_RXD_L2E_B))
			ring->stats.l2_err++;
		else
			ring->stats.err_pkt_len++;
		u64_stats_update_end(&ring->syncp);

		dev_kfree_skb_any(skb);
		return -EFAULT;
		return ret;
	}


	l2_frame_type = hnae3_get_field(l234info, HNS3_RXD_DMAC_M,
					HNS3_RXD_DMAC_S);
	u64_stats_update_begin(&ring->syncp);
	if (l2_frame_type == HNS3_L2_TYPE_MULTICAST)
		ring->stats.rx_multicast++;

	ring->stats.rx_pkts++;
	ring->stats.rx_bytes += skb->len;
	u64_stats_update_end(&ring->syncp);

	ring->tqp_vector->rx_group.total_bytes += skb->len;

	/* This is needed in order to enable forwarding support */
	hns3_set_gro_param(skb, l234info, bd_base_info);

	hns3_rx_checksum(ring, skb, desc);
	*out_skb = skb;
	hns3_set_rx_skb_rss_type(ring, skb);

@@ -2723,7 +2809,6 @@ int hns3_clean_rx_ring(
		void (*rx_fn)(struct hns3_enet_ring *, struct sk_buff *))
{
#define RCB_NOF_ALLOC_RX_BUFF_ONCE 16
	struct net_device *netdev = ring->tqp->handle->kinfo.netdev;
	int recv_pkts, recv_bds, clean_count, err;
	int unused_count = hns3_desc_unused(ring) - ring->pending_buf;
	struct sk_buff *skb = ring->skb;
@@ -2760,8 +2845,6 @@ int hns3_clean_rx_ring(
			continue;
		}

		/* Do update ip stack process */
		skb->protocol = eth_type_trans(skb, netdev);
		rx_fn(ring, skb);
		recv_bds += ring->pending_buf;
		clean_count += ring->pending_buf;
@@ -3877,6 +3960,13 @@ static int hns3_clear_rx_ring(struct hns3_enet_ring *ring)
		ring_ptr_move_fw(ring, next_to_use);
	}

	/* Free the pending skb in rx ring */
	if (ring->skb) {
		dev_kfree_skb_any(ring->skb);
		ring->skb = NULL;
		ring->pending_buf = 0;
	}

	return 0;
}

+2 −2
Original line number Diff line number Diff line
@@ -355,7 +355,7 @@ int hclge_cmd_init(struct hclge_dev *hdev)
	int ret;

	spin_lock_bh(&hdev->hw.cmq.csq.lock);
	spin_lock_bh(&hdev->hw.cmq.crq.lock);
	spin_lock(&hdev->hw.cmq.crq.lock);

	hdev->hw.cmq.csq.next_to_clean = 0;
	hdev->hw.cmq.csq.next_to_use = 0;
@@ -364,7 +364,7 @@ int hclge_cmd_init(struct hclge_dev *hdev)

	hclge_cmd_init_regs(&hdev->hw);

	spin_unlock_bh(&hdev->hw.cmq.crq.lock);
	spin_unlock(&hdev->hw.cmq.crq.lock);
	spin_unlock_bh(&hdev->hw.cmq.csq.lock);

	clear_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
+617 −339

File changed.

Preview size limit exceeded, changes collapsed.

Loading