Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit acea73d6 authored by Eric Dumazet's avatar Eric Dumazet Committed by David S. Miller
Browse files

net/mlx4_en: Enable the compiler to make is_inline() inlined



Reorganize code to call is_inline() once, so compiler can inline it

Signed-off-by: default avatarEric Dumazet <edumazet@google.com>
Signed-off-by: default avatarAmir Vadai <amirv@mellanox.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent e70602a8
Loading
Loading
Loading
Loading
+38 −29
Original line number Diff line number Diff line
@@ -531,29 +531,32 @@ static struct mlx4_en_tx_desc *mlx4_en_bounce_to_desc(struct mlx4_en_priv *priv,
	return ring->buf + index * TXBB_SIZE;
}

/* Decide if skb can be inlined in tx descriptor to avoid dma mapping
 *
 * It seems strange we do not simply use skb_copy_bits().
 * This would allow to inline all skbs iff skb->len <= inline_thold
 *
 * Note that caller already checked skb was not a gso packet
 */
static bool is_inline(int inline_thold, const struct sk_buff *skb,
		      const struct skb_shared_info *shinfo,
		      void **pfrag)
{
	void *ptr;

	if (inline_thold && !skb_is_gso(skb) && skb->len <= inline_thold) {
	if (skb->len > inline_thold || !inline_thold)
		return false;

	if (shinfo->nr_frags == 1) {
		ptr = skb_frag_address_safe(&shinfo->frags[0]);
		if (unlikely(!ptr))
				return 0;

			if (pfrag)
			return false;
		*pfrag = ptr;

			return 1;
		} else if (unlikely(shinfo->nr_frags))
			return 0;
		else
			return 1;
		return true;
	}

	return 0;
	if (shinfo->nr_frags)
		return false;
	return true;
}

static int inline_size(const struct sk_buff *skb)
@@ -570,12 +573,15 @@ static int inline_size(const struct sk_buff *skb)
static int get_real_size(const struct sk_buff *skb,
			 const struct skb_shared_info *shinfo,
			 struct net_device *dev,
			 int *lso_header_size)
			 int *lso_header_size,
			 bool *inline_ok,
			 void **pfrag)
{
	struct mlx4_en_priv *priv = netdev_priv(dev);
	int real_size;

	if (shinfo->gso_size) {
		*inline_ok = false;
		if (skb->encapsulation)
			*lso_header_size = (skb_inner_transport_header(skb) - skb->data) + inner_tcp_hdrlen(skb);
		else
@@ -595,10 +601,14 @@ static int get_real_size(const struct sk_buff *skb,
		}
	} else {
		*lso_header_size = 0;
		if (!is_inline(priv->prof->inline_thold, skb, shinfo, NULL))
			real_size = CTRL_SIZE + (shinfo->nr_frags + 1) * DS_SIZE;
		else
		*inline_ok = is_inline(priv->prof->inline_thold, skb,
				       shinfo, pfrag);

		if (*inline_ok)
			real_size = inline_size(skb);
		else
			real_size = CTRL_SIZE +
				    (shinfo->nr_frags + 1) * DS_SIZE;
	}

	return real_size;
@@ -694,9 +704,10 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
	u16 vlan_tag = 0;
	int i_frag;
	int lso_header_size;
	void *fragptr;
	void *fragptr = NULL;
	bool bounce = false;
	bool send_doorbell;
	bool inline_ok;
	u32 ring_cons;

	if (!priv->port_up)
@@ -708,7 +719,8 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
	/* fetch ring->cons far ahead before needing it to avoid stall */
	ring_cons = ACCESS_ONCE(ring->cons);

	real_size = get_real_size(skb, shinfo, dev, &lso_header_size);
	real_size = get_real_size(skb, shinfo, dev, &lso_header_size,
				  &inline_ok, &fragptr);
	if (unlikely(!real_size))
		goto tx_drop;

@@ -781,15 +793,15 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
	/* valid only for none inline segments */
	tx_info->data_offset = (void *)data - (void *)tx_desc;

	tx_info->inl = inline_ok;

	tx_info->linear = (lso_header_size < skb_headlen(skb) &&
			   !is_inline(ring->inline_thold, skb, shinfo, NULL)) ? 1 : 0;
			   !inline_ok) ? 1 : 0;

	tx_info->nr_maps = shinfo->nr_frags + tx_info->linear;
	data += tx_info->nr_maps - 1;

	if (is_inline(ring->inline_thold, skb, shinfo, &fragptr)) {
		tx_info->inl = 1;
	} else {
	if (!tx_info->inl) {
		dma_addr_t dma = 0;
		u32 byte_count = 0;

@@ -827,7 +839,6 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
			wmb();
			data->byte_count = cpu_to_be32(byte_count);
		}
		tx_info->inl = 0;
		/* tx completion can avoid cache line miss for common cases */
		tx_info->map0_dma = dma;
		tx_info->map0_byte_count = byte_count;
@@ -899,11 +910,9 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
	netdev_tx_sent_queue(ring->tx_queue, tx_info->nr_bytes);
	AVG_PERF_COUNTER(priv->pstats.tx_pktsz_avg, skb->len);

	if (tx_info->inl) {
	if (tx_info->inl)
		build_inline_wqe(tx_desc, skb, shinfo, real_size, &vlan_tag,
				 tx_ind, fragptr);
		tx_info->inl = 1;
	}

	if (skb->encapsulation) {
		struct iphdr *ipv4 = (struct iphdr *)skb_inner_network_header(skb);