Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 2760f5a3 authored by David S. Miller's avatar David S. Miller
Browse files

Merge branch 'aquantia-fixes'



Igor Russkikh says:

====================
aquantia: Atlantic driver bugfixes und improvements

This series contains bugfixes for aQuantia Atlantic driver.

Changes in v2:
Review comments applied:
- min_mtu set removed
- extra mtu range check is removed
- err codes handling improved
====================

Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 62b982ee c7545689
Loading
Loading
Loading
Loading
+4 −0
Original line number Diff line number Diff line
@@ -51,6 +51,10 @@

#define AQ_CFG_SKB_FRAGS_MAX   32U

/* Number of descriptors available in one ring to resume this ring queue
 */
#define AQ_CFG_RESTART_DESC_THRES   (AQ_CFG_SKB_FRAGS_MAX * 2)

#define AQ_CFG_NAPI_WEIGHT     64U

#define AQ_CFG_MULTICAST_ADDRESS_MAX     32U
+68 −77
Original line number Diff line number Diff line
@@ -119,6 +119,35 @@ int aq_nic_cfg_start(struct aq_nic_s *self)
	return 0;
}

static int aq_nic_update_link_status(struct aq_nic_s *self)
{
	int err = self->aq_hw_ops.hw_get_link_status(self->aq_hw);

	if (err)
		return err;

	if (self->link_status.mbps != self->aq_hw->aq_link_status.mbps)
		pr_info("%s: link change old %d new %d\n",
			AQ_CFG_DRV_NAME, self->link_status.mbps,
			self->aq_hw->aq_link_status.mbps);

	self->link_status = self->aq_hw->aq_link_status;
	if (!netif_carrier_ok(self->ndev) && self->link_status.mbps) {
		aq_utils_obj_set(&self->header.flags,
				 AQ_NIC_FLAG_STARTED);
		aq_utils_obj_clear(&self->header.flags,
				   AQ_NIC_LINK_DOWN);
		netif_carrier_on(self->ndev);
		netif_tx_wake_all_queues(self->ndev);
	}
	if (netif_carrier_ok(self->ndev) && !self->link_status.mbps) {
		netif_carrier_off(self->ndev);
		netif_tx_disable(self->ndev);
		aq_utils_obj_set(&self->header.flags, AQ_NIC_LINK_DOWN);
	}
	return 0;
}

static void aq_nic_service_timer_cb(unsigned long param)
{
	struct aq_nic_s *self = (struct aq_nic_s *)param;
@@ -131,26 +160,13 @@ static void aq_nic_service_timer_cb(unsigned long param)
	if (aq_utils_obj_test(&self->header.flags, AQ_NIC_FLAGS_IS_NOT_READY))
		goto err_exit;

	err = self->aq_hw_ops.hw_get_link_status(self->aq_hw);
	if (err < 0)
	err = aq_nic_update_link_status(self);
	if (err)
		goto err_exit;

	self->link_status = self->aq_hw->aq_link_status;

	self->aq_hw_ops.hw_interrupt_moderation_set(self->aq_hw,
		    self->aq_nic_cfg.is_interrupt_moderation);

	if (self->link_status.mbps) {
		aq_utils_obj_set(&self->header.flags,
				 AQ_NIC_FLAG_STARTED);
		aq_utils_obj_clear(&self->header.flags,
				   AQ_NIC_LINK_DOWN);
		netif_carrier_on(self->ndev);
	} else {
		netif_carrier_off(self->ndev);
		aq_utils_obj_set(&self->header.flags, AQ_NIC_LINK_DOWN);
	}

	memset(&stats_rx, 0U, sizeof(struct aq_ring_stats_rx_s));
	memset(&stats_tx, 0U, sizeof(struct aq_ring_stats_tx_s));
	for (i = AQ_DIMOF(self->aq_vec); i--;) {
@@ -214,7 +230,6 @@ struct aq_nic_s *aq_nic_alloc_cold(const struct net_device_ops *ndev_ops,
	SET_NETDEV_DEV(ndev, dev);

	ndev->if_port = port;
	ndev->min_mtu = ETH_MIN_MTU;
	self->ndev = ndev;

	self->aq_pci_func = aq_pci_func;
@@ -241,7 +256,6 @@ struct aq_nic_s *aq_nic_alloc_cold(const struct net_device_ops *ndev_ops,
int aq_nic_ndev_register(struct aq_nic_s *self)
{
	int err = 0;
	unsigned int i = 0U;

	if (!self->ndev) {
		err = -EINVAL;
@@ -263,8 +277,7 @@ int aq_nic_ndev_register(struct aq_nic_s *self)

	netif_carrier_off(self->ndev);

	for (i = AQ_CFG_VECS_MAX; i--;)
		aq_nic_ndev_queue_stop(self, i);
	netif_tx_disable(self->ndev);

	err = register_netdev(self->ndev);
	if (err < 0)
@@ -283,6 +296,7 @@ int aq_nic_ndev_init(struct aq_nic_s *self)
	self->ndev->features = aq_hw_caps->hw_features;
	self->ndev->priv_flags = aq_hw_caps->hw_priv_flags;
	self->ndev->mtu = aq_nic_cfg->mtu - ETH_HLEN;
	self->ndev->max_mtu = self->aq_hw_caps.mtu - ETH_FCS_LEN - ETH_HLEN;

	return 0;
}
@@ -318,12 +332,8 @@ struct aq_nic_s *aq_nic_alloc_hot(struct net_device *ndev)
		err = -EINVAL;
		goto err_exit;
	}
	if (netif_running(ndev)) {
		unsigned int i;

		for (i = AQ_CFG_VECS_MAX; i--;)
			netif_stop_subqueue(ndev, i);
	}
	if (netif_running(ndev))
		netif_tx_disable(ndev);

	for (self->aq_vecs = 0; self->aq_vecs < self->aq_nic_cfg.vecs;
		self->aq_vecs++) {
@@ -383,16 +393,6 @@ int aq_nic_init(struct aq_nic_s *self)
	return err;
}

void aq_nic_ndev_queue_start(struct aq_nic_s *self, unsigned int idx)
{
	netif_start_subqueue(self->ndev, idx);
}

void aq_nic_ndev_queue_stop(struct aq_nic_s *self, unsigned int idx)
{
	netif_stop_subqueue(self->ndev, idx);
}

int aq_nic_start(struct aq_nic_s *self)
{
	struct aq_vec_s *aq_vec = NULL;
@@ -451,10 +451,6 @@ int aq_nic_start(struct aq_nic_s *self)
			goto err_exit;
	}

	for (i = 0U, aq_vec = self->aq_vec[0];
		self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i])
		aq_nic_ndev_queue_start(self, i);

	err = netif_set_real_num_tx_queues(self->ndev, self->aq_vecs);
	if (err < 0)
		goto err_exit;
@@ -463,6 +459,8 @@ int aq_nic_start(struct aq_nic_s *self)
	if (err < 0)
		goto err_exit;

	netif_tx_start_all_queues(self->ndev);

err_exit:
	return err;
}
@@ -475,6 +473,7 @@ static unsigned int aq_nic_map_skb(struct aq_nic_s *self,
	unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
	unsigned int frag_count = 0U;
	unsigned int dx = ring->sw_tail;
	struct aq_ring_buff_s *first = NULL;
	struct aq_ring_buff_s *dx_buff = &ring->buff_ring[dx];

	if (unlikely(skb_is_gso(skb))) {
@@ -485,6 +484,7 @@ static unsigned int aq_nic_map_skb(struct aq_nic_s *self,
		dx_buff->len_l4 = tcp_hdrlen(skb);
		dx_buff->mss = skb_shinfo(skb)->gso_size;
		dx_buff->is_txc = 1U;
		dx_buff->eop_index = 0xffffU;

		dx_buff->is_ipv6 =
			(ip_hdr(skb)->version == 6) ? 1U : 0U;
@@ -504,6 +504,7 @@ static unsigned int aq_nic_map_skb(struct aq_nic_s *self,
	if (unlikely(dma_mapping_error(aq_nic_get_dev(self), dx_buff->pa)))
		goto exit;

	first = dx_buff;
	dx_buff->len_pkt = skb->len;
	dx_buff->is_sop = 1U;
	dx_buff->is_mapped = 1U;
@@ -532,40 +533,46 @@ static unsigned int aq_nic_map_skb(struct aq_nic_s *self,

	for (; nr_frags--; ++frag_count) {
		unsigned int frag_len = 0U;
		unsigned int buff_offset = 0U;
		unsigned int buff_size = 0U;
		dma_addr_t frag_pa;
		skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_count];

		frag_len = skb_frag_size(frag);
		frag_pa = skb_frag_dma_map(aq_nic_get_dev(self), frag, 0,
					   frag_len, DMA_TO_DEVICE);

		if (unlikely(dma_mapping_error(aq_nic_get_dev(self), frag_pa)))
		while (frag_len) {
			if (frag_len > AQ_CFG_TX_FRAME_MAX)
				buff_size = AQ_CFG_TX_FRAME_MAX;
			else
				buff_size = frag_len;

			frag_pa = skb_frag_dma_map(aq_nic_get_dev(self),
						   frag,
						   buff_offset,
						   buff_size,
						   DMA_TO_DEVICE);

			if (unlikely(dma_mapping_error(aq_nic_get_dev(self),
						       frag_pa)))
				goto mapping_error;

		while (frag_len > AQ_CFG_TX_FRAME_MAX) {
			dx = aq_ring_next_dx(ring, dx);
			dx_buff = &ring->buff_ring[dx];

			dx_buff->flags = 0U;
			dx_buff->len = AQ_CFG_TX_FRAME_MAX;
			dx_buff->len = buff_size;
			dx_buff->pa = frag_pa;
			dx_buff->is_mapped = 1U;
			dx_buff->eop_index = 0xffffU;

			frag_len -= AQ_CFG_TX_FRAME_MAX;
			frag_pa += AQ_CFG_TX_FRAME_MAX;
			++ret;
		}

		dx = aq_ring_next_dx(ring, dx);
		dx_buff = &ring->buff_ring[dx];
			frag_len -= buff_size;
			buff_offset += buff_size;

		dx_buff->flags = 0U;
		dx_buff->len = frag_len;
		dx_buff->pa = frag_pa;
		dx_buff->is_mapped = 1U;
			++ret;
		}
	}

	first->eop_index = dx;
	dx_buff->is_eop = 1U;
	dx_buff->skb = skb;
	goto exit;
@@ -602,7 +609,6 @@ int aq_nic_xmit(struct aq_nic_s *self, struct sk_buff *skb)
	unsigned int vec = skb->queue_mapping % self->aq_nic_cfg.vecs;
	unsigned int tc = 0U;
	int err = NETDEV_TX_OK;
	bool is_nic_in_bad_state;

	frags = skb_shinfo(skb)->nr_frags + 1;

@@ -613,13 +619,10 @@ int aq_nic_xmit(struct aq_nic_s *self, struct sk_buff *skb)
		goto err_exit;
	}

	is_nic_in_bad_state = aq_utils_obj_test(&self->header.flags,
						AQ_NIC_FLAGS_IS_NOT_TX_READY) ||
						(aq_ring_avail_dx(ring) <
						AQ_CFG_SKB_FRAGS_MAX);
	aq_ring_update_queue_state(ring);

	if (is_nic_in_bad_state) {
		aq_nic_ndev_queue_stop(self, ring->idx);
	/* Above status update may stop the queue. Check this. */
	if (__netif_subqueue_stopped(self->ndev, ring->idx)) {
		err = NETDEV_TX_BUSY;
		goto err_exit;
	}
@@ -631,9 +634,6 @@ int aq_nic_xmit(struct aq_nic_s *self, struct sk_buff *skb)
						      ring,
						      frags);
		if (err >= 0) {
			if (aq_ring_avail_dx(ring) < AQ_CFG_SKB_FRAGS_MAX + 1)
				aq_nic_ndev_queue_stop(self, ring->idx);

			++ring->stats.tx.packets;
			ring->stats.tx.bytes += skb->len;
		}
@@ -693,16 +693,9 @@ int aq_nic_set_multicast_list(struct aq_nic_s *self, struct net_device *ndev)

int aq_nic_set_mtu(struct aq_nic_s *self, int new_mtu)
{
	int err = 0;

	if (new_mtu > self->aq_hw_caps.mtu) {
		err = -EINVAL;
		goto err_exit;
	}
	self->aq_nic_cfg.mtu = new_mtu;

err_exit:
	return err;
	return 0;
}

int aq_nic_set_mac(struct aq_nic_s *self, struct net_device *ndev)
@@ -905,9 +898,7 @@ int aq_nic_stop(struct aq_nic_s *self)
	struct aq_vec_s *aq_vec = NULL;
	unsigned int i = 0U;

	for (i = 0U, aq_vec = self->aq_vec[0];
		self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i])
		aq_nic_ndev_queue_stop(self, i);
	netif_tx_disable(self->ndev);

	del_timer_sync(&self->service_timer);

+0 −2
Original line number Diff line number Diff line
@@ -83,8 +83,6 @@ struct net_device *aq_nic_get_ndev(struct aq_nic_s *self);
int aq_nic_init(struct aq_nic_s *self);
int aq_nic_cfg_start(struct aq_nic_s *self);
int aq_nic_ndev_register(struct aq_nic_s *self);
void aq_nic_ndev_queue_start(struct aq_nic_s *self, unsigned int idx);
void aq_nic_ndev_queue_stop(struct aq_nic_s *self, unsigned int idx);
void aq_nic_ndev_free(struct aq_nic_s *self);
int aq_nic_start(struct aq_nic_s *self);
int aq_nic_xmit(struct aq_nic_s *self, struct sk_buff *skb);
+45 −8
Original line number Diff line number Diff line
@@ -104,6 +104,38 @@ int aq_ring_init(struct aq_ring_s *self)
	return 0;
}

static inline bool aq_ring_dx_in_range(unsigned int h, unsigned int i,
				       unsigned int t)
{
	return (h < t) ? ((h < i) && (i < t)) : ((h < i) || (i < t));
}

void aq_ring_update_queue_state(struct aq_ring_s *ring)
{
	if (aq_ring_avail_dx(ring) <= AQ_CFG_SKB_FRAGS_MAX)
		aq_ring_queue_stop(ring);
	else if (aq_ring_avail_dx(ring) > AQ_CFG_RESTART_DESC_THRES)
		aq_ring_queue_wake(ring);
}

void aq_ring_queue_wake(struct aq_ring_s *ring)
{
	struct net_device *ndev = aq_nic_get_ndev(ring->aq_nic);

	if (__netif_subqueue_stopped(ndev, ring->idx)) {
		netif_wake_subqueue(ndev, ring->idx);
		ring->stats.tx.queue_restarts++;
	}
}

void aq_ring_queue_stop(struct aq_ring_s *ring)
{
	struct net_device *ndev = aq_nic_get_ndev(ring->aq_nic);

	if (!__netif_subqueue_stopped(ndev, ring->idx))
		netif_stop_subqueue(ndev, ring->idx);
}

void aq_ring_tx_clean(struct aq_ring_s *self)
{
	struct device *dev = aq_nic_get_dev(self->aq_nic);
@@ -113,23 +145,28 @@ void aq_ring_tx_clean(struct aq_ring_s *self)
		struct aq_ring_buff_s *buff = &self->buff_ring[self->sw_head];

		if (likely(buff->is_mapped)) {
			if (unlikely(buff->is_sop))
			if (unlikely(buff->is_sop)) {
				if (!buff->is_eop &&
				    buff->eop_index != 0xffffU &&
				    (!aq_ring_dx_in_range(self->sw_head,
						buff->eop_index,
						self->hw_head)))
					break;

				dma_unmap_single(dev, buff->pa, buff->len,
						 DMA_TO_DEVICE);
			else
			} else {
				dma_unmap_page(dev, buff->pa, buff->len,
					       DMA_TO_DEVICE);
			}
		}

		if (unlikely(buff->is_eop))
			dev_kfree_skb_any(buff->skb);
	}
}

static inline unsigned int aq_ring_dx_in_range(unsigned int h, unsigned int i,
					       unsigned int t)
{
	return (h < t) ? ((h < i) && (i < t)) : ((h < i) || (i < t));
		buff->pa = 0U;
		buff->eop_index = 0xffffU;
	}
}

#define AQ_SKB_ALIGN SKB_DATA_ALIGN(sizeof(struct skb_shared_info))
+8 −2
Original line number Diff line number Diff line
@@ -65,7 +65,7 @@ struct __packed aq_ring_buff_s {
	};
	union {
		struct {
			u32 len:16;
			u16 len;
			u32 is_ip_cso:1;
			u32 is_udp_cso:1;
			u32 is_tcp_cso:1;
@@ -77,8 +77,10 @@ struct __packed aq_ring_buff_s {
			u32 is_cleaned:1;
			u32 is_error:1;
			u32 rsvd3:6;
			u16 eop_index;
			u16 rsvd4;
		};
		u32 flags;
		u64 flags;
	};
};

@@ -94,6 +96,7 @@ struct aq_ring_stats_tx_s {
	u64 errors;
	u64 packets;
	u64 bytes;
	u64 queue_restarts;
};

union aq_ring_stats_s {
@@ -147,6 +150,9 @@ struct aq_ring_s *aq_ring_rx_alloc(struct aq_ring_s *self,
int aq_ring_init(struct aq_ring_s *self);
void aq_ring_rx_deinit(struct aq_ring_s *self);
void aq_ring_free(struct aq_ring_s *self);
void aq_ring_update_queue_state(struct aq_ring_s *ring);
void aq_ring_queue_wake(struct aq_ring_s *ring);
void aq_ring_queue_stop(struct aq_ring_s *ring);
void aq_ring_tx_clean(struct aq_ring_s *self);
int aq_ring_rx_clean(struct aq_ring_s *self,
		     struct napi_struct *napi,
Loading