Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit c0f9c7e4 authored by Bert Kenward's avatar Bert Kenward Committed by David S. Miller
Browse files

sfc: replace spinlocks with bit ops for busy poll locking



This patch reduces the overhead of locking for busy poll.
Previously the state was protected by a lock, whereas now
it's manipulated solely with atomic operations.

Signed-off-by: default avatarShradha Shah <sshah@solarflare.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 080a270f
Loading
Loading
Loading
Loading
+2 −2
Original line number Original line Diff line number Diff line
@@ -2062,7 +2062,7 @@ static void efx_init_napi_channel(struct efx_channel *channel)
	netif_napi_add(channel->napi_dev, &channel->napi_str,
	netif_napi_add(channel->napi_dev, &channel->napi_str,
		       efx_poll, napi_weight);
		       efx_poll, napi_weight);
	napi_hash_add(&channel->napi_str);
	napi_hash_add(&channel->napi_str);
	efx_channel_init_lock(channel);
	efx_channel_busy_poll_init(channel);
}
}


static void efx_init_napi(struct efx_nic *efx)
static void efx_init_napi(struct efx_nic *efx)
@@ -2125,7 +2125,7 @@ static int efx_busy_poll(struct napi_struct *napi)
	if (!netif_running(efx->net_dev))
	if (!netif_running(efx->net_dev))
		return LL_FLUSH_FAILED;
		return LL_FLUSH_FAILED;


	if (!efx_channel_lock_poll(channel))
	if (!efx_channel_try_lock_poll(channel))
		return LL_FLUSH_BUSY;
		return LL_FLUSH_BUSY;


	old_rx_packets = channel->rx_queue.rx_packets;
	old_rx_packets = channel->rx_queue.rx_packets;
+56 −73
Original line number Original line Diff line number Diff line
@@ -431,21 +431,8 @@ struct efx_channel {
	struct net_device *napi_dev;
	struct net_device *napi_dev;
	struct napi_struct napi_str;
	struct napi_struct napi_str;
#ifdef CONFIG_NET_RX_BUSY_POLL
#ifdef CONFIG_NET_RX_BUSY_POLL
	unsigned int state;
	unsigned long busy_poll_state;
	spinlock_t state_lock;
#endif
#define EFX_CHANNEL_STATE_IDLE		0
#define EFX_CHANNEL_STATE_NAPI		(1 << 0)  /* NAPI owns this channel */
#define EFX_CHANNEL_STATE_POLL		(1 << 1)  /* poll owns this channel */
#define EFX_CHANNEL_STATE_DISABLED	(1 << 2)  /* channel is disabled */
#define EFX_CHANNEL_STATE_NAPI_YIELD	(1 << 3)  /* NAPI yielded this channel */
#define EFX_CHANNEL_STATE_POLL_YIELD	(1 << 4)  /* poll yielded this channel */
#define EFX_CHANNEL_OWNED \
	(EFX_CHANNEL_STATE_NAPI | EFX_CHANNEL_STATE_POLL)
#define EFX_CHANNEL_LOCKED \
	(EFX_CHANNEL_OWNED | EFX_CHANNEL_STATE_DISABLED)
#define EFX_CHANNEL_USER_PEND \
	(EFX_CHANNEL_STATE_POLL | EFX_CHANNEL_STATE_POLL_YIELD)
#endif /* CONFIG_NET_RX_BUSY_POLL */
	struct efx_special_buffer eventq;
	struct efx_special_buffer eventq;
	unsigned int eventq_mask;
	unsigned int eventq_mask;
	unsigned int eventq_read_ptr;
	unsigned int eventq_read_ptr;
@@ -480,98 +467,94 @@ struct efx_channel {
};
};


#ifdef CONFIG_NET_RX_BUSY_POLL
#ifdef CONFIG_NET_RX_BUSY_POLL
static inline void efx_channel_init_lock(struct efx_channel *channel)
enum efx_channel_busy_poll_state {
	EFX_CHANNEL_STATE_IDLE = 0,
	EFX_CHANNEL_STATE_NAPI = BIT(0),
	EFX_CHANNEL_STATE_NAPI_REQ_BIT = 1,
	EFX_CHANNEL_STATE_NAPI_REQ = BIT(1),
	EFX_CHANNEL_STATE_POLL_BIT = 2,
	EFX_CHANNEL_STATE_POLL = BIT(2),
	EFX_CHANNEL_STATE_DISABLE_BIT = 3,
};

static inline void efx_channel_busy_poll_init(struct efx_channel *channel)
{
{
	spin_lock_init(&channel->state_lock);
	WRITE_ONCE(channel->busy_poll_state, EFX_CHANNEL_STATE_IDLE);
}
}


/* Called from the device poll routine to get ownership of a channel. */
/* Called from the device poll routine to get ownership of a channel. */
static inline bool efx_channel_lock_napi(struct efx_channel *channel)
static inline bool efx_channel_lock_napi(struct efx_channel *channel)
{
{
	bool rc = true;
	unsigned long prev, old = READ_ONCE(channel->busy_poll_state);


	spin_lock_bh(&channel->state_lock);
	while (1) {
	if (channel->state & EFX_CHANNEL_LOCKED) {
		switch (old) {
		WARN_ON(channel->state & EFX_CHANNEL_STATE_NAPI);
		case EFX_CHANNEL_STATE_POLL:
		channel->state |= EFX_CHANNEL_STATE_NAPI_YIELD;
			/* Ensure efx_channel_try_lock_poll() wont starve us */
		rc = false;
			set_bit(EFX_CHANNEL_STATE_NAPI_REQ_BIT,
	} else {
				&channel->busy_poll_state);
		/* we don't care if someone yielded */
			/* fallthrough */
		channel->state = EFX_CHANNEL_STATE_NAPI;
		case EFX_CHANNEL_STATE_POLL | EFX_CHANNEL_STATE_NAPI_REQ:
			return false;
		default:
			break;
		}
		prev = cmpxchg(&channel->busy_poll_state, old,
			       EFX_CHANNEL_STATE_NAPI);
		if (unlikely(prev != old)) {
			/* This is likely to mean we've just entered polling
			 * state. Go back round to set the REQ bit.
			 */
			old = prev;
			continue;
		}
		return true;
	}
	}
	spin_unlock_bh(&channel->state_lock);
	return rc;
}
}


static inline void efx_channel_unlock_napi(struct efx_channel *channel)
static inline void efx_channel_unlock_napi(struct efx_channel *channel)
{
{
	spin_lock_bh(&channel->state_lock);
	/* Make sure write has completed from efx_channel_lock_napi() */
	WARN_ON(channel->state &
	smp_wmb();
		(EFX_CHANNEL_STATE_POLL | EFX_CHANNEL_STATE_NAPI_YIELD));
	WRITE_ONCE(channel->busy_poll_state, EFX_CHANNEL_STATE_IDLE);

	channel->state &= EFX_CHANNEL_STATE_DISABLED;
	spin_unlock_bh(&channel->state_lock);
}
}


/* Called from efx_busy_poll(). */
/* Called from efx_busy_poll(). */
static inline bool efx_channel_lock_poll(struct efx_channel *channel)
static inline bool efx_channel_try_lock_poll(struct efx_channel *channel)
{
{
	bool rc = true;
	return cmpxchg(&channel->busy_poll_state, EFX_CHANNEL_STATE_IDLE,

			EFX_CHANNEL_STATE_POLL) == EFX_CHANNEL_STATE_IDLE;
	spin_lock_bh(&channel->state_lock);
	if ((channel->state & EFX_CHANNEL_LOCKED)) {
		channel->state |= EFX_CHANNEL_STATE_POLL_YIELD;
		rc = false;
	} else {
		/* preserve yield marks */
		channel->state |= EFX_CHANNEL_STATE_POLL;
	}
	spin_unlock_bh(&channel->state_lock);
	return rc;
}
}


/* Returns true if NAPI tried to get the channel while it was locked. */
static inline void efx_channel_unlock_poll(struct efx_channel *channel)
static inline void efx_channel_unlock_poll(struct efx_channel *channel)
{
{
	spin_lock_bh(&channel->state_lock);
	clear_bit_unlock(EFX_CHANNEL_STATE_POLL_BIT, &channel->busy_poll_state);
	WARN_ON(channel->state & EFX_CHANNEL_STATE_NAPI);

	/* will reset state to idle, unless channel is disabled */
	channel->state &= EFX_CHANNEL_STATE_DISABLED;
	spin_unlock_bh(&channel->state_lock);
}
}


/* True if a socket is polling, even if it did not get the lock. */
static inline bool efx_channel_busy_polling(struct efx_channel *channel)
static inline bool efx_channel_busy_polling(struct efx_channel *channel)
{
{
	WARN_ON(!(channel->state & EFX_CHANNEL_OWNED));
	return test_bit(EFX_CHANNEL_STATE_POLL_BIT, &channel->busy_poll_state);
	return channel->state & EFX_CHANNEL_USER_PEND;
}
}


static inline void efx_channel_enable(struct efx_channel *channel)
static inline void efx_channel_enable(struct efx_channel *channel)
{
{
	spin_lock_bh(&channel->state_lock);
	clear_bit_unlock(EFX_CHANNEL_STATE_DISABLE_BIT,
	channel->state = EFX_CHANNEL_STATE_IDLE;
			 &channel->busy_poll_state);
	spin_unlock_bh(&channel->state_lock);
}
}


/* False if the channel is currently owned. */
/* Stop further polling or napi access.
 * Returns false if the channel is currently busy polling.
 */
static inline bool efx_channel_disable(struct efx_channel *channel)
static inline bool efx_channel_disable(struct efx_channel *channel)
{
{
	bool rc = true;
	set_bit(EFX_CHANNEL_STATE_DISABLE_BIT, &channel->busy_poll_state);

	/* Implicit barrier in efx_channel_busy_polling() */
	spin_lock_bh(&channel->state_lock);
	return !efx_channel_busy_polling(channel);
	if (channel->state & EFX_CHANNEL_OWNED)
		rc = false;
	channel->state |= EFX_CHANNEL_STATE_DISABLED;
	spin_unlock_bh(&channel->state_lock);

	return rc;
}
}


#else /* CONFIG_NET_RX_BUSY_POLL */
#else /* CONFIG_NET_RX_BUSY_POLL */


static inline void efx_channel_init_lock(struct efx_channel *channel)
static inline void efx_channel_busy_poll_init(struct efx_channel *channel)
{
{
}
}


@@ -584,7 +567,7 @@ static inline void efx_channel_unlock_napi(struct efx_channel *channel)
{
{
}
}


static inline bool efx_channel_lock_poll(struct efx_channel *channel)
static inline bool efx_channel_try_lock_poll(struct efx_channel *channel)
{
{
	return false;
	return false;
}
}