Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit fb6113e6 authored by Eric Dumazet's avatar Eric Dumazet Committed by David S. Miller
Browse files

be2net: get rid of custom busy poll code



Compared to custom busy_poll, the generic NAPI one is better, since
it allows to use GRO, and it removes a lot of code and extra locked
operations in fast path.

Signed-off-by: default avatarEric Dumazet <edumazet@google.com>
Cc: Sathya Perla <sathya.perla@broadcom.com>
Cc: Ajit Khaparde <ajit.khaparde@broadcom.com>
Cc: Sriharsha Basavapatna <sriharsha.basavapatna@broadcom.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 94b5e0f9
Loading
Loading
Loading
Loading
+0 −5
Original line number Diff line number Diff line
@@ -226,11 +226,6 @@ struct be_aic_obj { /* Adaptive interrupt coalescing (AIC) info */
	u64 tx_reqs_prev;	/* Used to calculate TX pps */
};

enum {
	NAPI_POLLING,
	BUSY_POLLING
};

struct be_mcc_obj {
	struct be_queue_info q;
	struct be_queue_info cq;
+9 −142
Original line number Diff line number Diff line
@@ -3063,7 +3063,7 @@ static inline bool do_gro(struct be_rx_compl_info *rxcp)
}

static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
			 int budget, int polling)
			 int budget)
{
	struct be_adapter *adapter = rxo->adapter;
	struct be_queue_info *rx_cq = &rxo->cq;
@@ -3095,8 +3095,7 @@ static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
			goto loop_continue;
		}

		/* Don't do gro when we're busy_polling */
		if (do_gro(rxcp) && polling != BUSY_POLLING)
		if (do_gro(rxcp))
			be_rx_compl_process_gro(rxo, napi, rxcp);
		else
			be_rx_compl_process(rxo, napi, rxcp);
@@ -3194,106 +3193,6 @@ static void be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
	}
}

#ifdef CONFIG_NET_RX_BUSY_POLL
static inline bool be_lock_napi(struct be_eq_obj *eqo)
{
	bool status = true;

	spin_lock(&eqo->lock); /* BH is already disabled */
	if (eqo->state & BE_EQ_LOCKED) {
		WARN_ON(eqo->state & BE_EQ_NAPI);
		eqo->state |= BE_EQ_NAPI_YIELD;
		status = false;
	} else {
		eqo->state = BE_EQ_NAPI;
	}
	spin_unlock(&eqo->lock);
	return status;
}

static inline void be_unlock_napi(struct be_eq_obj *eqo)
{
	spin_lock(&eqo->lock); /* BH is already disabled */

	WARN_ON(eqo->state & (BE_EQ_POLL | BE_EQ_NAPI_YIELD));
	eqo->state = BE_EQ_IDLE;

	spin_unlock(&eqo->lock);
}

static inline bool be_lock_busy_poll(struct be_eq_obj *eqo)
{
	bool status = true;

	spin_lock_bh(&eqo->lock);
	if (eqo->state & BE_EQ_LOCKED) {
		eqo->state |= BE_EQ_POLL_YIELD;
		status = false;
	} else {
		eqo->state |= BE_EQ_POLL;
	}
	spin_unlock_bh(&eqo->lock);
	return status;
}

static inline void be_unlock_busy_poll(struct be_eq_obj *eqo)
{
	spin_lock_bh(&eqo->lock);

	WARN_ON(eqo->state & (BE_EQ_NAPI));
	eqo->state = BE_EQ_IDLE;

	spin_unlock_bh(&eqo->lock);
}

static inline void be_enable_busy_poll(struct be_eq_obj *eqo)
{
	spin_lock_init(&eqo->lock);
	eqo->state = BE_EQ_IDLE;
}

static inline void be_disable_busy_poll(struct be_eq_obj *eqo)
{
	local_bh_disable();

	/* It's enough to just acquire napi lock on the eqo to stop
	 * be_busy_poll() from processing any queueus.
	 */
	while (!be_lock_napi(eqo))
		mdelay(1);

	local_bh_enable();
}

#else /* CONFIG_NET_RX_BUSY_POLL */

static inline bool be_lock_napi(struct be_eq_obj *eqo)
{
	return true;
}

static inline void be_unlock_napi(struct be_eq_obj *eqo)
{
}

static inline bool be_lock_busy_poll(struct be_eq_obj *eqo)
{
	return false;
}

static inline void be_unlock_busy_poll(struct be_eq_obj *eqo)
{
}

static inline void be_enable_busy_poll(struct be_eq_obj *eqo)
{
}

static inline void be_disable_busy_poll(struct be_eq_obj *eqo)
{
}
#endif /* CONFIG_NET_RX_BUSY_POLL */

int be_poll(struct napi_struct *napi, int budget)
{
	struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
@@ -3308,19 +3207,14 @@ int be_poll(struct napi_struct *napi, int budget)
	for_all_tx_queues_on_eq(adapter, eqo, txo, i)
		be_process_tx(adapter, txo, i);

	if (be_lock_napi(eqo)) {
	/* This loop will iterate twice for EQ0 in which
	 * completions of the last RXQ (default one) are also processed
	 * For other EQs the loop iterates only once
	 */
	for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
			work = be_process_rx(rxo, napi, budget, NAPI_POLLING);
		work = be_process_rx(rxo, napi, budget);
		max_work = max(work, max_work);
	}
		be_unlock_napi(eqo);
	} else {
		max_work = budget;
	}

	if (is_mcc_eqo(eqo))
		be_process_mcc(adapter);
@@ -3343,28 +3237,6 @@ int be_poll(struct napi_struct *napi, int budget)
	return max_work;
}

#ifdef CONFIG_NET_RX_BUSY_POLL
static int be_busy_poll(struct napi_struct *napi)
{
	struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
	struct be_adapter *adapter = eqo->adapter;
	struct be_rx_obj *rxo;
	int i, work = 0;

	if (!be_lock_busy_poll(eqo))
		return LL_FLUSH_BUSY;

	for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
		work = be_process_rx(rxo, napi, 4, BUSY_POLLING);
		if (work)
			break;
	}

	be_unlock_busy_poll(eqo);
	return work;
}
#endif

void be_detect_error(struct be_adapter *adapter)
{
	u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
@@ -3669,7 +3541,6 @@ static int be_close(struct net_device *netdev)
	if (adapter->flags & BE_FLAGS_NAPI_ENABLED) {
		for_all_evt_queues(adapter, eqo, i) {
			napi_disable(&eqo->napi);
			be_disable_busy_poll(eqo);
		}
		adapter->flags &= ~BE_FLAGS_NAPI_ENABLED;
	}
@@ -3839,7 +3710,6 @@ static int be_open(struct net_device *netdev)

	for_all_evt_queues(adapter, eqo, i) {
		napi_enable(&eqo->napi);
		be_enable_busy_poll(eqo);
		be_eq_notify(adapter, eqo->q.id, true, true, 0, 0);
	}
	adapter->flags |= BE_FLAGS_NAPI_ENABLED;
@@ -5245,9 +5115,6 @@ static const struct net_device_ops be_netdev_ops = {
#endif
	.ndo_bridge_setlink	= be_ndo_bridge_setlink,
	.ndo_bridge_getlink	= be_ndo_bridge_getlink,
#ifdef CONFIG_NET_RX_BUSY_POLL
	.ndo_busy_poll		= be_busy_poll,
#endif
	.ndo_udp_tunnel_add	= be_add_vxlan_port,
	.ndo_udp_tunnel_del	= be_del_vxlan_port,
	.ndo_features_check	= be_features_check,