Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 10a0176e authored by Mintz, Yuval's avatar Mintz, Yuval Committed by David S. Miller
Browse files

qede: Update receive statistic once per NAPI



Currently, each time an ingress packet is passed to networking stack
the driver increments a per-queue SW statistic.
As we want to have additional fields in the first cache-line of the
Rx-queue struct, change flow so this statistic would be updated once per
NAPI run. We will later push the statistic to a different cache line.

Signed-off-by: default avatarYuval Mintz <Yuval.Mintz@cavium.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 1ca22126
Loading
Loading
Loading
Loading
+11 −10
Original line number Original line Diff line number Diff line
@@ -624,7 +624,6 @@ static inline void qede_skb_receive(struct qede_dev *edev,
		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);


	napi_gro_receive(&fp->napi, skb);
	napi_gro_receive(&fp->napi, skb);
	rxq->rcv_pkts++;
}
}


static void qede_set_gro_params(struct qede_dev *edev,
static void qede_set_gro_params(struct qede_dev *edev,
@@ -884,7 +883,7 @@ static inline void qede_tpa_cont(struct qede_dev *edev,
		       "Strange - TPA cont with more than a single len_list entry\n");
		       "Strange - TPA cont with more than a single len_list entry\n");
}
}


static void qede_tpa_end(struct qede_dev *edev,
static int qede_tpa_end(struct qede_dev *edev,
			struct qede_fastpath *fp,
			struct qede_fastpath *fp,
			struct eth_fast_path_rx_tpa_end_cqe *cqe)
			struct eth_fast_path_rx_tpa_end_cqe *cqe)
{
{
@@ -934,11 +933,12 @@ static void qede_tpa_end(struct qede_dev *edev,


	tpa_info->state = QEDE_AGG_STATE_NONE;
	tpa_info->state = QEDE_AGG_STATE_NONE;


	return;
	return 1;
err:
err:
	tpa_info->state = QEDE_AGG_STATE_NONE;
	tpa_info->state = QEDE_AGG_STATE_NONE;
	dev_kfree_skb_any(tpa_info->skb);
	dev_kfree_skb_any(tpa_info->skb);
	tpa_info->skb = NULL;
	tpa_info->skb = NULL;
	return 0;
}
}


static u8 qede_check_notunn_csum(u16 flag)
static u8 qede_check_notunn_csum(u16 flag)
@@ -1178,8 +1178,7 @@ static int qede_rx_process_tpa_cqe(struct qede_dev *edev,
		qede_tpa_cont(edev, rxq, &cqe->fast_path_tpa_cont);
		qede_tpa_cont(edev, rxq, &cqe->fast_path_tpa_cont);
		return 0;
		return 0;
	case ETH_RX_CQE_TYPE_TPA_END:
	case ETH_RX_CQE_TYPE_TPA_END:
		qede_tpa_end(edev, fp, &cqe->fast_path_tpa_end);
		return qede_tpa_end(edev, fp, &cqe->fast_path_tpa_end);
		return 1;
	default:
	default:
		return 0;
		return 0;
	}
	}
@@ -1229,7 +1228,7 @@ static int qede_rx_process_cqe(struct qede_dev *edev,
	/* Run eBPF program if one is attached */
	/* Run eBPF program if one is attached */
	if (xdp_prog)
	if (xdp_prog)
		if (!qede_rx_xdp(edev, fp, rxq, xdp_prog, bd, fp_cqe))
		if (!qede_rx_xdp(edev, fp, rxq, xdp_prog, bd, fp_cqe))
			return 1;
			return 0;


	/* If this is an error packet then drop it */
	/* If this is an error packet then drop it */
	flags = cqe->fast_path_regular.pars_flags.flags;
	flags = cqe->fast_path_regular.pars_flags.flags;
@@ -1290,8 +1289,8 @@ static int qede_rx_int(struct qede_fastpath *fp, int budget)
{
{
	struct qede_rx_queue *rxq = fp->rxq;
	struct qede_rx_queue *rxq = fp->rxq;
	struct qede_dev *edev = fp->edev;
	struct qede_dev *edev = fp->edev;
	int work_done = 0, rcv_pkts = 0;
	u16 hw_comp_cons, sw_comp_cons;
	u16 hw_comp_cons, sw_comp_cons;
	int work_done = 0;


	hw_comp_cons = le16_to_cpu(*rxq->hw_cons_ptr);
	hw_comp_cons = le16_to_cpu(*rxq->hw_cons_ptr);
	sw_comp_cons = qed_chain_get_cons_idx(&rxq->rx_comp_ring);
	sw_comp_cons = qed_chain_get_cons_idx(&rxq->rx_comp_ring);
@@ -1305,12 +1304,14 @@ static int qede_rx_int(struct qede_fastpath *fp, int budget)


	/* Loop to complete all indicated BDs */
	/* Loop to complete all indicated BDs */
	while ((sw_comp_cons != hw_comp_cons) && (work_done < budget)) {
	while ((sw_comp_cons != hw_comp_cons) && (work_done < budget)) {
		qede_rx_process_cqe(edev, fp, rxq);
		rcv_pkts += qede_rx_process_cqe(edev, fp, rxq);
		qed_chain_recycle_consumed(&rxq->rx_comp_ring);
		qed_chain_recycle_consumed(&rxq->rx_comp_ring);
		sw_comp_cons = qed_chain_get_cons_idx(&rxq->rx_comp_ring);
		sw_comp_cons = qed_chain_get_cons_idx(&rxq->rx_comp_ring);
		work_done++;
		work_done++;
	}
	}


	rxq->rcv_pkts += rcv_pkts;

	/* Allocate replacement buffers */
	/* Allocate replacement buffers */
	while (rxq->num_rx_buffers - rxq->filled_buffers)
	while (rxq->num_rx_buffers - rxq->filled_buffers)
		if (qede_alloc_rx_buffer(rxq, false))
		if (qede_alloc_rx_buffer(rxq, false))