Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit f6779e4e authored by David S. Miller's avatar David S. Miller
Browse files

Merge branch 'hns3-a-few-code-improvements'



Peng Li says:

====================
net: hns3: a few code improvements

This patchset removes some redundant code and fixes a few code
stylistic issues from internal concentrated review,
no functional changes introduced.

---
Change log:
V1 -> V2:
1, remove a patch according to the comment reported by David Miller.
---
====================

Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 50d4feb5 e4e87715
Loading
Loading
Loading
Loading
+9 −6
Original line number Diff line number Diff line
@@ -85,10 +85,12 @@ config HNS3
	  drivers(like ODP)to register with HNAE devices and their associated
	  operations.

if HNS3

config HNS3_HCLGE
	tristate "Hisilicon HNS3 HCLGE Acceleration Engine & Compatibility Layer Support"
	default m
	depends on PCI_MSI
	depends on HNS3
	---help---
	  This selects the HNS3_HCLGE network acceleration engine & its hardware
	  compatibility layer. The engine would be used in Hisilicon hip08 family of
@@ -97,7 +99,7 @@ config HNS3_HCLGE
config HNS3_DCB
	bool "Hisilicon HNS3 Data Center Bridge Support"
	default n
	depends on HNS3 && HNS3_HCLGE && DCB
	depends on HNS3_HCLGE && DCB
	---help---
	  Say Y here if you want to use Data Center Bridging (DCB) in the HNS3 driver.

@@ -106,7 +108,6 @@ config HNS3_DCB
config HNS3_HCLGEVF
	tristate "Hisilicon HNS3VF Acceleration Engine & Compatibility Layer Support"
	depends on PCI_MSI
    depends on HNS3
	depends on HNS3_HCLGE
    ---help---
	  This selects the HNS3 VF drivers network acceleration engine & its hardware
@@ -115,11 +116,13 @@ config HNS3_HCLGEVF

config HNS3_ENET
	tristate "Hisilicon HNS3 Ethernet Device Support"
	default m
	depends on 64BIT && PCI
	depends on HNS3
	---help---
	  This selects the Ethernet Driver for Hisilicon Network Subsystem 3 for hip08
	  family of SoCs. This module depends upon HNAE3 driver to access the HNAE3
	  devices and their associated operations.

endif #HNS3

endif # NET_VENDOR_HISILICON
+14 −14
Original line number Diff line number Diff line
@@ -40,13 +40,13 @@ static void hnae3_set_client_init_flag(struct hnae3_client *client,
{
	switch (client->type) {
	case HNAE3_CLIENT_KNIC:
		hnae_set_bit(ae_dev->flag, HNAE3_KNIC_CLIENT_INITED_B, inited);
		hnae3_set_bit(ae_dev->flag, HNAE3_KNIC_CLIENT_INITED_B, inited);
		break;
	case HNAE3_CLIENT_UNIC:
		hnae_set_bit(ae_dev->flag, HNAE3_UNIC_CLIENT_INITED_B, inited);
		hnae3_set_bit(ae_dev->flag, HNAE3_UNIC_CLIENT_INITED_B, inited);
		break;
	case HNAE3_CLIENT_ROCE:
		hnae_set_bit(ae_dev->flag, HNAE3_ROCE_CLIENT_INITED_B, inited);
		hnae3_set_bit(ae_dev->flag, HNAE3_ROCE_CLIENT_INITED_B, inited);
		break;
	default:
		break;
@@ -60,15 +60,15 @@ static int hnae3_get_client_init_flag(struct hnae3_client *client,

	switch (client->type) {
	case HNAE3_CLIENT_KNIC:
		inited = hnae_get_bit(ae_dev->flag,
		inited = hnae3_get_bit(ae_dev->flag,
				       HNAE3_KNIC_CLIENT_INITED_B);
		break;
	case HNAE3_CLIENT_UNIC:
		inited = hnae_get_bit(ae_dev->flag,
		inited = hnae3_get_bit(ae_dev->flag,
				       HNAE3_UNIC_CLIENT_INITED_B);
		break;
	case HNAE3_CLIENT_ROCE:
		inited = hnae_get_bit(ae_dev->flag,
		inited = hnae3_get_bit(ae_dev->flag,
				       HNAE3_ROCE_CLIENT_INITED_B);
		break;
	default:
@@ -85,7 +85,7 @@ static int hnae3_match_n_instantiate(struct hnae3_client *client,

	/* check if this client matches the type of ae_dev */
	if (!(hnae3_client_match(client->type, ae_dev->dev_type) &&
	      hnae_get_bit(ae_dev->flag, HNAE3_DEV_INITED_B))) {
	      hnae3_get_bit(ae_dev->flag, HNAE3_DEV_INITED_B))) {
		return 0;
	}

@@ -190,7 +190,7 @@ void hnae3_register_ae_algo(struct hnae3_ae_algo *ae_algo)
			continue;
		}

		hnae_set_bit(ae_dev->flag, HNAE3_DEV_INITED_B, 1);
		hnae3_set_bit(ae_dev->flag, HNAE3_DEV_INITED_B, 1);

		/* check the client list for the match with this ae_dev type and
		 * initialize the figure out client instance
@@ -220,7 +220,7 @@ void hnae3_unregister_ae_algo(struct hnae3_ae_algo *ae_algo)
	mutex_lock(&hnae3_common_lock);
	/* Check if there are matched ae_dev */
	list_for_each_entry(ae_dev, &hnae3_ae_dev_list, node) {
		if (!hnae_get_bit(ae_dev->flag, HNAE3_DEV_INITED_B))
		if (!hnae3_get_bit(ae_dev->flag, HNAE3_DEV_INITED_B))
			continue;

		id = pci_match_id(ae_algo->pdev_id_table, ae_dev->pdev);
@@ -234,7 +234,7 @@ void hnae3_unregister_ae_algo(struct hnae3_ae_algo *ae_algo)
			hnae3_match_n_instantiate(client, ae_dev, false);

		ae_algo->ops->uninit_ae_dev(ae_dev);
		hnae_set_bit(ae_dev->flag, HNAE3_DEV_INITED_B, 0);
		hnae3_set_bit(ae_dev->flag, HNAE3_DEV_INITED_B, 0);
	}

	list_del(&ae_algo->node);
@@ -278,7 +278,7 @@ void hnae3_register_ae_dev(struct hnae3_ae_dev *ae_dev)
			goto out_err;
		}

		hnae_set_bit(ae_dev->flag, HNAE3_DEV_INITED_B, 1);
		hnae3_set_bit(ae_dev->flag, HNAE3_DEV_INITED_B, 1);
		break;
	}

@@ -310,7 +310,7 @@ void hnae3_unregister_ae_dev(struct hnae3_ae_dev *ae_dev)
	mutex_lock(&hnae3_common_lock);
	/* Check if there are matched ae_algo */
	list_for_each_entry(ae_algo, &hnae3_ae_algo_list, node) {
		if (!hnae_get_bit(ae_dev->flag, HNAE3_DEV_INITED_B))
		if (!hnae3_get_bit(ae_dev->flag, HNAE3_DEV_INITED_B))
			continue;

		id = pci_match_id(ae_algo->pdev_id_table, ae_dev->pdev);
@@ -321,7 +321,7 @@ void hnae3_unregister_ae_dev(struct hnae3_ae_dev *ae_dev)
			hnae3_match_n_instantiate(client, ae_dev, false);

		ae_algo->ops->uninit_ae_dev(ae_dev);
		hnae_set_bit(ae_dev->flag, HNAE3_DEV_INITED_B, 0);
		hnae3_set_bit(ae_dev->flag, HNAE3_DEV_INITED_B, 0);
	}

	list_del(&ae_dev->node);
+8 −10
Original line number Diff line number Diff line
@@ -62,10 +62,10 @@
		BIT(HNAE3_DEV_SUPPORT_ROCE_B))

#define hnae3_dev_roce_supported(hdev) \
	hnae_get_bit(hdev->ae_dev->flag, HNAE3_DEV_SUPPORT_ROCE_B)
	hnae3_get_bit(hdev->ae_dev->flag, HNAE3_DEV_SUPPORT_ROCE_B)

#define hnae3_dev_dcb_supported(hdev) \
	hnae_get_bit(hdev->ae_dev->flag, HNAE3_DEV_SUPPORT_DCB_B)
	hnae3_get_bit(hdev->ae_dev->flag, HNAE3_DEV_SUPPORT_DCB_B)

#define ring_ptr_move_fw(ring, p) \
	((ring)->p = ((ring)->p + 1) % (ring)->desc_num)
@@ -167,7 +167,6 @@ struct hnae3_client_ops {
#define HNAE3_CLIENT_NAME_LENGTH 16
struct hnae3_client {
	char name[HNAE3_CLIENT_NAME_LENGTH];
	u16 version;
	unsigned long state;
	enum hnae3_client_type type;
	const struct hnae3_client_ops *ops;
@@ -436,7 +435,6 @@ struct hnae3_dcb_ops {
struct hnae3_ae_algo {
	const struct hnae3_ae_ops *ops;
	struct list_head node;
	char name[HNAE3_CLASS_NAME_SIZE];
	const struct pci_device_id *pdev_id_table;
};

@@ -509,17 +507,17 @@ struct hnae3_handle {
	u32 numa_node_mask;	/* for multi-chip support */
};

#define hnae_set_field(origin, mask, shift, val) \
#define hnae3_set_field(origin, mask, shift, val) \
	do { \
		(origin) &= (~(mask)); \
		(origin) |= ((val) << (shift)) & (mask); \
	} while (0)
#define hnae_get_field(origin, mask, shift) (((origin) & (mask)) >> (shift))
#define hnae3_get_field(origin, mask, shift) (((origin) & (mask)) >> (shift))

#define hnae_set_bit(origin, shift, val) \
	hnae_set_field((origin), (0x1 << (shift)), (shift), (val))
#define hnae_get_bit(origin, shift) \
	hnae_get_field((origin), (0x1 << (shift)), (shift))
#define hnae3_set_bit(origin, shift, val) \
	hnae3_set_field((origin), (0x1 << (shift)), (shift), (val))
#define hnae3_get_bit(origin, shift) \
	hnae3_get_field((origin), (0x1 << (shift)), (shift))

void hnae3_register_ae_dev(struct hnae3_ae_dev *ae_dev);
void hnae3_unregister_ae_dev(struct hnae3_ae_dev *ae_dev);
+122 −122
Original line number Diff line number Diff line
@@ -493,7 +493,7 @@ static int hns3_set_tso(struct sk_buff *skb, u32 *paylen,

	/* find the txbd field values */
	*paylen = skb->len - hdr_len;
	hnae_set_bit(*type_cs_vlan_tso,
	hnae3_set_bit(*type_cs_vlan_tso,
		      HNS3_TXD_TSO_B, 1);

	/* get MSS for TSO */
@@ -586,20 +586,20 @@ static void hns3_set_l2l3l4_len(struct sk_buff *skb, u8 ol4_proto,

	/* compute L2 header size for normal packet, defined in 2 Bytes */
	l2_len = l3.hdr - skb->data;
	hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L2LEN_M,
	hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L2LEN_M,
			HNS3_TXD_L2LEN_S, l2_len >> 1);

	/* tunnel packet*/
	if (skb->encapsulation) {
		/* compute OL2 header size, defined in 2 Bytes */
		ol2_len = l2_len;
		hnae_set_field(*ol_type_vlan_len_msec,
		hnae3_set_field(*ol_type_vlan_len_msec,
				HNS3_TXD_L2LEN_M,
				HNS3_TXD_L2LEN_S, ol2_len >> 1);

		/* compute OL3 header size, defined in 4 Bytes */
		ol3_len = l4.hdr - l3.hdr;
		hnae_set_field(*ol_type_vlan_len_msec, HNS3_TXD_L3LEN_M,
		hnae3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_L3LEN_M,
				HNS3_TXD_L3LEN_S, ol3_len >> 2);

		/* MAC in UDP, MAC in GRE (0x6558)*/
@@ -609,15 +609,16 @@ static void hns3_set_l2l3l4_len(struct sk_buff *skb, u8 ol4_proto,

			/* compute OL4 header size, defined in 4 Bytes. */
			ol4_len = l2_hdr - l4.hdr;
			hnae_set_field(*ol_type_vlan_len_msec, HNS3_TXD_L4LEN_M,
				       HNS3_TXD_L4LEN_S, ol4_len >> 2);
			hnae3_set_field(*ol_type_vlan_len_msec,
					HNS3_TXD_L4LEN_M, HNS3_TXD_L4LEN_S,
					ol4_len >> 2);

			/* switch IP header ptr from outer to inner header */
			l3.hdr = skb_inner_network_header(skb);

			/* compute inner l2 header size, defined in 2 Bytes. */
			l2_len = l3.hdr - l2_hdr;
			hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L2LEN_M,
			hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L2LEN_M,
					HNS3_TXD_L2LEN_S, l2_len >> 1);
		} else {
			/* skb packet types not supported by hardware,
@@ -634,22 +635,24 @@ static void hns3_set_l2l3l4_len(struct sk_buff *skb, u8 ol4_proto,

	/* compute inner(/normal) L3 header size, defined in 4 Bytes */
	l3_len = l4.hdr - l3.hdr;
	hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L3LEN_M,
	hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3LEN_M,
			HNS3_TXD_L3LEN_S, l3_len >> 2);

	/* compute inner(/normal) L4 header size, defined in 4 Bytes */
	switch (l4_proto) {
	case IPPROTO_TCP:
		hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_M,
		hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_M,
				HNS3_TXD_L4LEN_S, l4.tcp->doff);
		break;
	case IPPROTO_SCTP:
		hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_M,
			       HNS3_TXD_L4LEN_S, (sizeof(struct sctphdr) >> 2));
		hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_M,
				HNS3_TXD_L4LEN_S,
				(sizeof(struct sctphdr) >> 2));
		break;
	case IPPROTO_UDP:
		hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_M,
			       HNS3_TXD_L4LEN_S, (sizeof(struct udphdr) >> 2));
		hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_M,
				HNS3_TXD_L4LEN_S,
				(sizeof(struct udphdr) >> 2));
		break;
	default:
		/* skb packet types not supported by hardware,
@@ -703,29 +706,31 @@ static int hns3_set_l3l4_type_csum(struct sk_buff *skb, u8 ol4_proto,
		/* define outer network header type.*/
		if (skb->protocol == htons(ETH_P_IP)) {
			if (skb_is_gso(skb))
				hnae_set_field(*ol_type_vlan_len_msec,
					       HNS3_TXD_OL3T_M, HNS3_TXD_OL3T_S,
				hnae3_set_field(*ol_type_vlan_len_msec,
						HNS3_TXD_OL3T_M,
						HNS3_TXD_OL3T_S,
						HNS3_OL3T_IPV4_CSUM);
			else
				hnae_set_field(*ol_type_vlan_len_msec,
					       HNS3_TXD_OL3T_M, HNS3_TXD_OL3T_S,
				hnae3_set_field(*ol_type_vlan_len_msec,
						HNS3_TXD_OL3T_M,
						HNS3_TXD_OL3T_S,
						HNS3_OL3T_IPV4_NO_CSUM);

		} else if (skb->protocol == htons(ETH_P_IPV6)) {
			hnae_set_field(*ol_type_vlan_len_msec, HNS3_TXD_OL3T_M,
			hnae3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_OL3T_M,
					HNS3_TXD_OL3T_S, HNS3_OL3T_IPV6);
		}

		/* define tunnel type(OL4).*/
		switch (l4_proto) {
		case IPPROTO_UDP:
			hnae_set_field(*ol_type_vlan_len_msec,
			hnae3_set_field(*ol_type_vlan_len_msec,
					HNS3_TXD_TUNTYPE_M,
					HNS3_TXD_TUNTYPE_S,
					HNS3_TUN_MAC_IN_UDP);
			break;
		case IPPROTO_GRE:
			hnae_set_field(*ol_type_vlan_len_msec,
			hnae3_set_field(*ol_type_vlan_len_msec,
					HNS3_TXD_TUNTYPE_M,
					HNS3_TXD_TUNTYPE_S,
					HNS3_TUN_NVGRE);
@@ -749,25 +754,25 @@ static int hns3_set_l3l4_type_csum(struct sk_buff *skb, u8 ol4_proto,
	}

	if (l3.v4->version == 4) {
		hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L3T_M,
		hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3T_M,
				HNS3_TXD_L3T_S, HNS3_L3T_IPV4);

		/* the stack computes the IP header already, the only time we
		 * need the hardware to recompute it is in the case of TSO.
		 */
		if (skb_is_gso(skb))
			hnae_set_bit(*type_cs_vlan_tso, HNS3_TXD_L3CS_B, 1);
			hnae3_set_bit(*type_cs_vlan_tso, HNS3_TXD_L3CS_B, 1);

		hnae_set_bit(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1);
		hnae3_set_bit(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1);
	} else if (l3.v6->version == 6) {
		hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L3T_M,
		hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3T_M,
				HNS3_TXD_L3T_S, HNS3_L3T_IPV6);
		hnae_set_bit(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1);
		hnae3_set_bit(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1);
	}

	switch (l4_proto) {
	case IPPROTO_TCP:
		hnae_set_field(*type_cs_vlan_tso,
		hnae3_set_field(*type_cs_vlan_tso,
				HNS3_TXD_L4T_M,
				HNS3_TXD_L4T_S,
				HNS3_L4T_TCP);
@@ -776,13 +781,13 @@ static int hns3_set_l3l4_type_csum(struct sk_buff *skb, u8 ol4_proto,
		if (hns3_tunnel_csum_bug(skb))
			break;

		hnae_set_field(*type_cs_vlan_tso,
		hnae3_set_field(*type_cs_vlan_tso,
				HNS3_TXD_L4T_M,
				HNS3_TXD_L4T_S,
				HNS3_L4T_UDP);
		break;
	case IPPROTO_SCTP:
		hnae_set_field(*type_cs_vlan_tso,
		hnae3_set_field(*type_cs_vlan_tso,
				HNS3_TXD_L4T_M,
				HNS3_TXD_L4T_S,
				HNS3_L4T_SCTP);
@@ -807,11 +812,11 @@ static int hns3_set_l3l4_type_csum(struct sk_buff *skb, u8 ol4_proto,
static void hns3_set_txbd_baseinfo(u16 *bdtp_fe_sc_vld_ra_ri, int frag_end)
{
	/* Config bd buffer end */
	hnae_set_field(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_BDTYPE_M,
	hnae3_set_field(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_BDTYPE_M,
			HNS3_TXD_BDTYPE_S, 0);
	hnae_set_bit(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_FE_B, !!frag_end);
	hnae_set_bit(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_VLD_B, 1);
	hnae_set_field(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_SC_M, HNS3_TXD_SC_S, 0);
	hnae3_set_bit(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_FE_B, !!frag_end);
	hnae3_set_bit(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_VLD_B, 1);
	hnae3_set_field(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_SC_M, HNS3_TXD_SC_S, 0);
}

static int hns3_fill_desc_vtags(struct sk_buff *skb,
@@ -844,10 +849,10 @@ static int hns3_fill_desc_vtags(struct sk_buff *skb,
		 * and use inner_vtag in one tag case.
		 */
		if (skb->protocol == htons(ETH_P_8021Q)) {
			hnae_set_bit(*out_vlan_flag, HNS3_TXD_OVLAN_B, 1);
			hnae3_set_bit(*out_vlan_flag, HNS3_TXD_OVLAN_B, 1);
			*out_vtag = vlan_tag;
		} else {
			hnae_set_bit(*inner_vlan_flag, HNS3_TXD_VLAN_B, 1);
			hnae3_set_bit(*inner_vlan_flag, HNS3_TXD_VLAN_B, 1);
			*inner_vtag = vlan_tag;
		}
	} else if (skb->protocol == htons(ETH_P_8021Q)) {
@@ -1135,7 +1140,7 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev)

	wmb(); /* Commit all data before submit */

	hnae_queue_xmit(ring->tqp, buf_num);
	hnae3_queue_xmit(ring->tqp, buf_num);

	return NETDEV_TX_OK;

@@ -1703,7 +1708,7 @@ static void hns3_set_default_feature(struct net_device *netdev)
static int hns3_alloc_buffer(struct hns3_enet_ring *ring,
			     struct hns3_desc_cb *cb)
{
	unsigned int order = hnae_page_order(ring);
	unsigned int order = hnae3_page_order(ring);
	struct page *p;

	p = dev_alloc_pages(order);
@@ -1714,7 +1719,7 @@ static int hns3_alloc_buffer(struct hns3_enet_ring *ring,
	cb->page_offset = 0;
	cb->reuse_flag = 0;
	cb->buf  = page_address(p);
	cb->length = hnae_page_size(ring);
	cb->length = hnae3_page_size(ring);
	cb->type = DESC_TYPE_PAGE;

	return 0;
@@ -1780,33 +1785,27 @@ static void hns3_free_buffers(struct hns3_enet_ring *ring)
/* free desc along with its attached buffer */
static void hns3_free_desc(struct hns3_enet_ring *ring)
{
	int size = ring->desc_num * sizeof(ring->desc[0]);

	hns3_free_buffers(ring);

	dma_unmap_single(ring_to_dev(ring), ring->desc_dma_addr,
			 ring->desc_num * sizeof(ring->desc[0]),
			 DMA_BIDIRECTIONAL);
	ring->desc_dma_addr = 0;
	kfree(ring->desc);
	if (ring->desc) {
		dma_free_coherent(ring_to_dev(ring), size,
				  ring->desc, ring->desc_dma_addr);
		ring->desc = NULL;
	}
}

static int hns3_alloc_desc(struct hns3_enet_ring *ring)
{
	int size = ring->desc_num * sizeof(ring->desc[0]);

	ring->desc = kzalloc(size, GFP_KERNEL);
	ring->desc = dma_zalloc_coherent(ring_to_dev(ring), size,
					 &ring->desc_dma_addr,
					 GFP_KERNEL);
	if (!ring->desc)
		return -ENOMEM;

	ring->desc_dma_addr = dma_map_single(ring_to_dev(ring), ring->desc,
					     size, DMA_BIDIRECTIONAL);
	if (dma_mapping_error(ring_to_dev(ring), ring->desc_dma_addr)) {
		ring->desc_dma_addr = 0;
		kfree(ring->desc);
		ring->desc = NULL;
		return -ENOMEM;
	}

	return 0;
}

@@ -1887,7 +1886,7 @@ static void hns3_nic_reclaim_one_desc(struct hns3_enet_ring *ring, int *bytes,

	(*pkts) += (desc_cb->type == DESC_TYPE_SKB);
	(*bytes) += desc_cb->length;
	/* desc_cb will be cleaned, after hnae_free_buffer_detach*/
	/* desc_cb will be cleaned, after hnae3_free_buffer_detach*/
	hns3_free_buffer_detach(ring, ring->next_to_clean);

	ring_ptr_move_fw(ring, next_to_clean);
@@ -2016,15 +2015,15 @@ static void hns3_nic_reuse_page(struct sk_buff *skb, int i,
	bool twobufs;

	twobufs = ((PAGE_SIZE < 8192) &&
		hnae_buf_size(ring) == HNS3_BUFFER_SIZE_2048);
		hnae3_buf_size(ring) == HNS3_BUFFER_SIZE_2048);

	desc = &ring->desc[ring->next_to_clean];
	size = le16_to_cpu(desc->rx.size);

	truesize = hnae_buf_size(ring);
	truesize = hnae3_buf_size(ring);

	if (!twobufs)
		last_offset = hnae_page_size(ring) - hnae_buf_size(ring);
		last_offset = hnae3_page_size(ring) - hnae3_buf_size(ring);

	skb_add_rx_frag(skb, i, desc_cb->priv, desc_cb->page_offset + pull_len,
			size - pull_len, truesize);
@@ -2076,13 +2075,13 @@ static void hns3_rx_checksum(struct hns3_enet_ring *ring, struct sk_buff *skb,
		return;

	/* check if hardware has done checksum */
	if (!hnae_get_bit(bd_base_info, HNS3_RXD_L3L4P_B))
	if (!hnae3_get_bit(bd_base_info, HNS3_RXD_L3L4P_B))
		return;

	if (unlikely(hnae_get_bit(l234info, HNS3_RXD_L3E_B) ||
		     hnae_get_bit(l234info, HNS3_RXD_L4E_B) ||
		     hnae_get_bit(l234info, HNS3_RXD_OL3E_B) ||
		     hnae_get_bit(l234info, HNS3_RXD_OL4E_B))) {
	if (unlikely(hnae3_get_bit(l234info, HNS3_RXD_L3E_B) ||
		     hnae3_get_bit(l234info, HNS3_RXD_L4E_B) ||
		     hnae3_get_bit(l234info, HNS3_RXD_OL3E_B) ||
		     hnae3_get_bit(l234info, HNS3_RXD_OL4E_B))) {
		netdev_err(netdev, "L3/L4 error pkt\n");
		u64_stats_update_begin(&ring->syncp);
		ring->stats.l3l4_csum_err++;
@@ -2091,12 +2090,13 @@ static void hns3_rx_checksum(struct hns3_enet_ring *ring, struct sk_buff *skb,
		return;
	}

	l3_type = hnae_get_field(l234info, HNS3_RXD_L3ID_M,
	l3_type = hnae3_get_field(l234info, HNS3_RXD_L3ID_M,
				  HNS3_RXD_L3ID_S);
	l4_type = hnae_get_field(l234info, HNS3_RXD_L4ID_M,
	l4_type = hnae3_get_field(l234info, HNS3_RXD_L4ID_M,
				  HNS3_RXD_L4ID_S);

	ol4_type = hnae_get_field(l234info, HNS3_RXD_OL4ID_M, HNS3_RXD_OL4ID_S);
	ol4_type = hnae3_get_field(l234info, HNS3_RXD_OL4ID_M,
				   HNS3_RXD_OL4ID_S);
	switch (ol4_type) {
	case HNS3_OL4_TYPE_MAC_IN_UDP:
	case HNS3_OL4_TYPE_NVGRE:
@@ -2135,7 +2135,7 @@ static u16 hns3_parse_vlan_tag(struct hns3_enet_ring *ring,
#define HNS3_STRP_OUTER_VLAN	0x1
#define HNS3_STRP_INNER_VLAN	0x2

	switch (hnae_get_field(l234info, HNS3_RXD_STRP_TAGP_M,
	switch (hnae3_get_field(l234info, HNS3_RXD_STRP_TAGP_M,
				HNS3_RXD_STRP_TAGP_S)) {
	case HNS3_STRP_OUTER_VLAN:
		vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag);
@@ -2174,7 +2174,7 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring,
	bd_base_info = le32_to_cpu(desc->rx.bd_base_info);

	/* Check valid BD */
	if (unlikely(!hnae_get_bit(bd_base_info, HNS3_RXD_VLD_B)))
	if (unlikely(!hnae3_get_bit(bd_base_info, HNS3_RXD_VLD_B)))
		return -EFAULT;

	va = (unsigned char *)desc_cb->buf + desc_cb->page_offset;
@@ -2229,7 +2229,7 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring,
		hns3_nic_reuse_page(skb, 0, ring, pull_len, desc_cb);
		ring_ptr_move_fw(ring, next_to_clean);

		while (!hnae_get_bit(bd_base_info, HNS3_RXD_FE_B)) {
		while (!hnae3_get_bit(bd_base_info, HNS3_RXD_FE_B)) {
			desc = &ring->desc[ring->next_to_clean];
			desc_cb = &ring->desc_cb[ring->next_to_clean];
			bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
@@ -2257,7 +2257,7 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring,
					       vlan_tag);
	}

	if (unlikely(!hnae_get_bit(bd_base_info, HNS3_RXD_VLD_B))) {
	if (unlikely(!hnae3_get_bit(bd_base_info, HNS3_RXD_VLD_B))) {
		netdev_err(netdev, "no valid bd,%016llx,%016llx\n",
			   ((u64 *)desc)[0], ((u64 *)desc)[1]);
		u64_stats_update_begin(&ring->syncp);
@@ -2269,7 +2269,7 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring,
	}

	if (unlikely((!desc->rx.pkt_len) ||
		     hnae_get_bit(l234info, HNS3_RXD_TRUNCAT_B))) {
		     hnae3_get_bit(l234info, HNS3_RXD_TRUNCAT_B))) {
		netdev_err(netdev, "truncated pkt\n");
		u64_stats_update_begin(&ring->syncp);
		ring->stats.err_pkt_len++;
@@ -2279,7 +2279,7 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring,
		return -EFAULT;
	}

	if (unlikely(hnae_get_bit(l234info, HNS3_RXD_L2E_B))) {
	if (unlikely(hnae3_get_bit(l234info, HNS3_RXD_L2E_B))) {
		netdev_err(netdev, "L2 error pkt\n");
		u64_stats_update_begin(&ring->syncp);
		ring->stats.l2_err++;
@@ -2532,9 +2532,9 @@ static int hns3_get_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector,
	tx_ring = tqp_vector->tx_group.ring;
	if (tx_ring) {
		cur_chain->tqp_index = tx_ring->tqp->tqp_index;
		hnae_set_bit(cur_chain->flag, HNAE3_RING_TYPE_B,
		hnae3_set_bit(cur_chain->flag, HNAE3_RING_TYPE_B,
			      HNAE3_RING_TYPE_TX);
		hnae_set_field(cur_chain->int_gl_idx, HNAE3_RING_GL_IDX_M,
		hnae3_set_field(cur_chain->int_gl_idx, HNAE3_RING_GL_IDX_M,
				HNAE3_RING_GL_IDX_S, HNAE3_RING_GL_TX);

		cur_chain->next = NULL;
@@ -2549,9 +2549,9 @@ static int hns3_get_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector,

			cur_chain->next = chain;
			chain->tqp_index = tx_ring->tqp->tqp_index;
			hnae_set_bit(chain->flag, HNAE3_RING_TYPE_B,
			hnae3_set_bit(chain->flag, HNAE3_RING_TYPE_B,
				      HNAE3_RING_TYPE_TX);
			hnae_set_field(chain->int_gl_idx,
			hnae3_set_field(chain->int_gl_idx,
					HNAE3_RING_GL_IDX_M,
					HNAE3_RING_GL_IDX_S,
					HNAE3_RING_GL_TX);
@@ -2564,9 +2564,9 @@ static int hns3_get_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector,
	if (!tx_ring && rx_ring) {
		cur_chain->next = NULL;
		cur_chain->tqp_index = rx_ring->tqp->tqp_index;
		hnae_set_bit(cur_chain->flag, HNAE3_RING_TYPE_B,
		hnae3_set_bit(cur_chain->flag, HNAE3_RING_TYPE_B,
			      HNAE3_RING_TYPE_RX);
		hnae_set_field(cur_chain->int_gl_idx, HNAE3_RING_GL_IDX_M,
		hnae3_set_field(cur_chain->int_gl_idx, HNAE3_RING_GL_IDX_M,
				HNAE3_RING_GL_IDX_S, HNAE3_RING_GL_RX);

		rx_ring = rx_ring->next;
@@ -2579,9 +2579,9 @@ static int hns3_get_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector,

		cur_chain->next = chain;
		chain->tqp_index = rx_ring->tqp->tqp_index;
		hnae_set_bit(chain->flag, HNAE3_RING_TYPE_B,
		hnae3_set_bit(chain->flag, HNAE3_RING_TYPE_B,
			      HNAE3_RING_TYPE_RX);
		hnae_set_field(chain->int_gl_idx, HNAE3_RING_GL_IDX_M,
		hnae3_set_field(chain->int_gl_idx, HNAE3_RING_GL_IDX_M,
				HNAE3_RING_GL_IDX_S, HNAE3_RING_GL_RX);

		cur_chain = chain;
@@ -2805,7 +2805,7 @@ static int hns3_ring_get_cfg(struct hnae3_queue *q, struct hns3_nic_priv *priv,
		ring->io_base = q->io_base;
	}

	hnae_set_bit(ring->flag, HNAE3_RING_TYPE_B, ring_type);
	hnae3_set_bit(ring->flag, HNAE3_RING_TYPE_B, ring_type);

	ring->tqp = q;
	ring->desc = NULL;
+4 −5
Original line number Diff line number Diff line
@@ -499,7 +499,6 @@ struct hns3_enet_tqp_vector {

	u16 num_tqps;	/* total number of tqps in TQP vector */

	cpumask_t affinity_mask;
	char name[HNAE3_INT_NAME_LEN];

	/* when 0 should adjust interrupt coalesce parameter */
@@ -591,7 +590,7 @@ static inline void hns3_write_reg(void __iomem *base, u32 reg, u32 value)
#define hns3_write_dev(a, reg, value) \
	hns3_write_reg((a)->io_base, (reg), (value))

#define hnae_queue_xmit(tqp, buf_num) writel_relaxed(buf_num, \
#define hnae3_queue_xmit(tqp, buf_num) writel_relaxed(buf_num, \
		(tqp)->io_base + HNS3_RING_TX_RING_TAIL_REG)

#define ring_to_dev(ring) (&(ring)->tqp->handle->pdev->dev)
@@ -601,9 +600,9 @@ static inline void hns3_write_reg(void __iomem *base, u32 reg, u32 value)

#define tx_ring_data(priv, idx) ((priv)->ring_data[idx])

#define hnae_buf_size(_ring) ((_ring)->buf_size)
#define hnae_page_order(_ring) (get_order(hnae_buf_size(_ring)))
#define hnae_page_size(_ring) (PAGE_SIZE << hnae_page_order(_ring))
#define hnae3_buf_size(_ring) ((_ring)->buf_size)
#define hnae3_page_order(_ring) (get_order(hnae3_buf_size(_ring)))
#define hnae3_page_size(_ring) (PAGE_SIZE << hnae3_page_order(_ring))

/* iterator for handling rings in ring group */
#define hns3_for_each_ring(pos, head) \
Loading