Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 3ffeabdd authored by Ben Hutchings's avatar Ben Hutchings Committed by David S. Miller
Browse files

sfc: Eliminate indirect lookups of queue size constants



Move size and mask definitions into efx.h; calculate page orders in falcon.c.

Signed-off-by: default avatarBen Hutchings <bhutchings@solarflare.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 12d00cad
Loading
Loading
Loading
Loading
+3 −11
Original line number Original line Diff line number Diff line
@@ -290,7 +290,7 @@ void efx_process_channel_now(struct efx_channel *channel)
	napi_disable(&channel->napi_str);
	napi_disable(&channel->napi_str);


	/* Poll the channel */
	/* Poll the channel */
	efx_process_channel(channel, efx->type->evq_size);
	efx_process_channel(channel, EFX_EVQ_SIZE);


	/* Ack the eventq. This may cause an interrupt to be generated
	/* Ack the eventq. This may cause an interrupt to be generated
	 * when they are reenabled */
	 * when they are reenabled */
@@ -1981,17 +1981,9 @@ static int efx_init_struct(struct efx_nic *efx, struct efx_nic_type *type,


	efx->type = type;
	efx->type = type;


	/* Sanity-check NIC type */
	EFX_BUG_ON_PARANOID(efx->type->txd_ring_mask &
			    (efx->type->txd_ring_mask + 1));
	EFX_BUG_ON_PARANOID(efx->type->rxd_ring_mask &
			    (efx->type->rxd_ring_mask + 1));
	EFX_BUG_ON_PARANOID(efx->type->evq_size &
			    (efx->type->evq_size - 1));
	/* As close as we can get to guaranteeing that we don't overflow */
	/* As close as we can get to guaranteeing that we don't overflow */
	EFX_BUG_ON_PARANOID(efx->type->evq_size <
	BUILD_BUG_ON(EFX_EVQ_SIZE < EFX_TXQ_SIZE + EFX_RXQ_SIZE);
			    (efx->type->txd_ring_mask + 1 +

			     efx->type->rxd_ring_mask + 1));
	EFX_BUG_ON_PARANOID(efx->type->phys_addr_channels > EFX_MAX_CHANNELS);
	EFX_BUG_ON_PARANOID(efx->type->phys_addr_channels > EFX_MAX_CHANNELS);


	/* Higher numbered interrupt modes are less capable! */
	/* Higher numbered interrupt modes are less capable! */
+6 −0
Original line number Original line Diff line number Diff line
@@ -25,16 +25,22 @@ extern netdev_tx_t efx_xmit(struct efx_nic *efx,
				  struct sk_buff *skb);
				  struct sk_buff *skb);
extern void efx_stop_queue(struct efx_nic *efx);
extern void efx_stop_queue(struct efx_nic *efx);
extern void efx_wake_queue(struct efx_nic *efx);
extern void efx_wake_queue(struct efx_nic *efx);
#define EFX_TXQ_SIZE 1024
#define EFX_TXQ_MASK (EFX_TXQ_SIZE - 1)


/* RX */
/* RX */
extern void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index);
extern void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index);
extern void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
extern void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
			  unsigned int len, bool checksummed, bool discard);
			  unsigned int len, bool checksummed, bool discard);
extern void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue, int delay);
extern void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue, int delay);
#define EFX_RXQ_SIZE 1024
#define EFX_RXQ_MASK (EFX_RXQ_SIZE - 1)


/* Channels */
/* Channels */
extern void efx_process_channel_now(struct efx_channel *channel);
extern void efx_process_channel_now(struct efx_channel *channel);
extern void efx_flush_queues(struct efx_nic *efx);
extern void efx_flush_queues(struct efx_nic *efx);
#define EFX_EVQ_SIZE 4096
#define EFX_EVQ_MASK (EFX_EVQ_SIZE - 1)


/* Ports */
/* Ports */
extern void efx_stats_disable(struct efx_nic *efx);
extern void efx_stats_disable(struct efx_nic *efx);
+26 −44
Original line number Original line Diff line number Diff line
@@ -108,21 +108,6 @@ static int rx_xon_thresh_bytes = -1;
module_param(rx_xon_thresh_bytes, int, 0644);
module_param(rx_xon_thresh_bytes, int, 0644);
MODULE_PARM_DESC(rx_xon_thresh_bytes, "RX fifo XON threshold");
MODULE_PARM_DESC(rx_xon_thresh_bytes, "RX fifo XON threshold");


/* TX descriptor ring size - min 512 max 4k */
#define FALCON_TXD_RING_ORDER FFE_AZ_TX_DESCQ_SIZE_1K
#define FALCON_TXD_RING_SIZE 1024
#define FALCON_TXD_RING_MASK (FALCON_TXD_RING_SIZE - 1)

/* RX descriptor ring size - min 512 max 4k */
#define FALCON_RXD_RING_ORDER FFE_AZ_RX_DESCQ_SIZE_1K
#define FALCON_RXD_RING_SIZE 1024
#define FALCON_RXD_RING_MASK (FALCON_RXD_RING_SIZE - 1)

/* Event queue size - max 32k */
#define FALCON_EVQ_ORDER FFE_AZ_EVQ_SIZE_4K
#define FALCON_EVQ_SIZE 4096
#define FALCON_EVQ_MASK (FALCON_EVQ_SIZE - 1)

/* If FALCON_MAX_INT_ERRORS internal errors occur within
/* If FALCON_MAX_INT_ERRORS internal errors occur within
 * FALCON_INT_ERROR_EXPIRE seconds, we consider the NIC broken and
 * FALCON_INT_ERROR_EXPIRE seconds, we consider the NIC broken and
 * disable it.
 * disable it.
@@ -420,7 +405,7 @@ static inline void falcon_notify_tx_desc(struct efx_tx_queue *tx_queue)
	unsigned write_ptr;
	unsigned write_ptr;
	efx_dword_t reg;
	efx_dword_t reg;


	write_ptr = tx_queue->write_count & FALCON_TXD_RING_MASK;
	write_ptr = tx_queue->write_count & EFX_TXQ_MASK;
	EFX_POPULATE_DWORD_1(reg, FRF_AZ_TX_DESC_WPTR_DWORD, write_ptr);
	EFX_POPULATE_DWORD_1(reg, FRF_AZ_TX_DESC_WPTR_DWORD, write_ptr);
	efx_writed_page(tx_queue->efx, &reg,
	efx_writed_page(tx_queue->efx, &reg,
			FR_AZ_TX_DESC_UPD_DWORD_P0, tx_queue->queue);
			FR_AZ_TX_DESC_UPD_DWORD_P0, tx_queue->queue);
@@ -441,7 +426,7 @@ void falcon_push_buffers(struct efx_tx_queue *tx_queue)
	BUG_ON(tx_queue->write_count == tx_queue->insert_count);
	BUG_ON(tx_queue->write_count == tx_queue->insert_count);


	do {
	do {
		write_ptr = tx_queue->write_count & FALCON_TXD_RING_MASK;
		write_ptr = tx_queue->write_count & EFX_TXQ_MASK;
		buffer = &tx_queue->buffer[write_ptr];
		buffer = &tx_queue->buffer[write_ptr];
		txd = falcon_tx_desc(tx_queue, write_ptr);
		txd = falcon_tx_desc(tx_queue, write_ptr);
		++tx_queue->write_count;
		++tx_queue->write_count;
@@ -462,9 +447,10 @@ void falcon_push_buffers(struct efx_tx_queue *tx_queue)
int falcon_probe_tx(struct efx_tx_queue *tx_queue)
int falcon_probe_tx(struct efx_tx_queue *tx_queue)
{
{
	struct efx_nic *efx = tx_queue->efx;
	struct efx_nic *efx = tx_queue->efx;
	BUILD_BUG_ON(EFX_TXQ_SIZE < 512 || EFX_TXQ_SIZE > 4096 ||
		     EFX_TXQ_SIZE & EFX_TXQ_MASK);
	return falcon_alloc_special_buffer(efx, &tx_queue->txd,
	return falcon_alloc_special_buffer(efx, &tx_queue->txd,
					   FALCON_TXD_RING_SIZE *
					   EFX_TXQ_SIZE * sizeof(efx_qword_t));
					   sizeof(efx_qword_t));
}
}


void falcon_init_tx(struct efx_tx_queue *tx_queue)
void falcon_init_tx(struct efx_tx_queue *tx_queue)
@@ -487,7 +473,8 @@ void falcon_init_tx(struct efx_tx_queue *tx_queue)
			      tx_queue->channel->channel,
			      tx_queue->channel->channel,
			      FRF_AZ_TX_DESCQ_OWNER_ID, 0,
			      FRF_AZ_TX_DESCQ_OWNER_ID, 0,
			      FRF_AZ_TX_DESCQ_LABEL, tx_queue->queue,
			      FRF_AZ_TX_DESCQ_LABEL, tx_queue->queue,
			      FRF_AZ_TX_DESCQ_SIZE, FALCON_TXD_RING_ORDER,
			      FRF_AZ_TX_DESCQ_SIZE,
			      __ffs(tx_queue->txd.entries),
			      FRF_AZ_TX_DESCQ_TYPE, 0,
			      FRF_AZ_TX_DESCQ_TYPE, 0,
			      FRF_BZ_TX_NON_IP_DROP_DIS, 1);
			      FRF_BZ_TX_NON_IP_DROP_DIS, 1);


@@ -592,12 +579,12 @@ void falcon_notify_rx_desc(struct efx_rx_queue *rx_queue)
	while (rx_queue->notified_count != rx_queue->added_count) {
	while (rx_queue->notified_count != rx_queue->added_count) {
		falcon_build_rx_desc(rx_queue,
		falcon_build_rx_desc(rx_queue,
				     rx_queue->notified_count &
				     rx_queue->notified_count &
				     FALCON_RXD_RING_MASK);
				     EFX_RXQ_MASK);
		++rx_queue->notified_count;
		++rx_queue->notified_count;
	}
	}


	wmb();
	wmb();
	write_ptr = rx_queue->added_count & FALCON_RXD_RING_MASK;
	write_ptr = rx_queue->added_count & EFX_RXQ_MASK;
	EFX_POPULATE_DWORD_1(reg, FRF_AZ_RX_DESC_WPTR_DWORD, write_ptr);
	EFX_POPULATE_DWORD_1(reg, FRF_AZ_RX_DESC_WPTR_DWORD, write_ptr);
	efx_writed_page(rx_queue->efx, &reg,
	efx_writed_page(rx_queue->efx, &reg,
			FR_AZ_RX_DESC_UPD_DWORD_P0, rx_queue->queue);
			FR_AZ_RX_DESC_UPD_DWORD_P0, rx_queue->queue);
@@ -606,9 +593,10 @@ void falcon_notify_rx_desc(struct efx_rx_queue *rx_queue)
int falcon_probe_rx(struct efx_rx_queue *rx_queue)
int falcon_probe_rx(struct efx_rx_queue *rx_queue)
{
{
	struct efx_nic *efx = rx_queue->efx;
	struct efx_nic *efx = rx_queue->efx;
	BUILD_BUG_ON(EFX_RXQ_SIZE < 512 || EFX_RXQ_SIZE > 4096 ||
		     EFX_RXQ_SIZE & EFX_RXQ_MASK);
	return falcon_alloc_special_buffer(efx, &rx_queue->rxd,
	return falcon_alloc_special_buffer(efx, &rx_queue->rxd,
					   FALCON_RXD_RING_SIZE *
					   EFX_RXQ_SIZE * sizeof(efx_qword_t));
					   sizeof(efx_qword_t));
}
}


void falcon_init_rx(struct efx_rx_queue *rx_queue)
void falcon_init_rx(struct efx_rx_queue *rx_queue)
@@ -636,7 +624,8 @@ void falcon_init_rx(struct efx_rx_queue *rx_queue)
			      rx_queue->channel->channel,
			      rx_queue->channel->channel,
			      FRF_AZ_RX_DESCQ_OWNER_ID, 0,
			      FRF_AZ_RX_DESCQ_OWNER_ID, 0,
			      FRF_AZ_RX_DESCQ_LABEL, rx_queue->queue,
			      FRF_AZ_RX_DESCQ_LABEL, rx_queue->queue,
			      FRF_AZ_RX_DESCQ_SIZE, FALCON_RXD_RING_ORDER,
			      FRF_AZ_RX_DESCQ_SIZE,
			      __ffs(rx_queue->rxd.entries),
			      FRF_AZ_RX_DESCQ_TYPE, 0 /* kernel queue */ ,
			      FRF_AZ_RX_DESCQ_TYPE, 0 /* kernel queue */ ,
			      /* For >=B0 this is scatter so disable */
			      /* For >=B0 this is scatter so disable */
			      FRF_AZ_RX_DESCQ_JUMBO, !is_b0,
			      FRF_AZ_RX_DESCQ_JUMBO, !is_b0,
@@ -741,7 +730,7 @@ static void falcon_handle_tx_event(struct efx_channel *channel,
		tx_queue = &efx->tx_queue[tx_ev_q_label];
		tx_queue = &efx->tx_queue[tx_ev_q_label];
		channel->irq_mod_score +=
		channel->irq_mod_score +=
			(tx_ev_desc_ptr - tx_queue->read_count) &
			(tx_ev_desc_ptr - tx_queue->read_count) &
			efx->type->txd_ring_mask;
			EFX_TXQ_MASK;
		efx_xmit_done(tx_queue, tx_ev_desc_ptr);
		efx_xmit_done(tx_queue, tx_ev_desc_ptr);
	} else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_WQ_FF_FULL)) {
	} else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_WQ_FF_FULL)) {
		/* Rewrite the FIFO write pointer */
		/* Rewrite the FIFO write pointer */
@@ -848,9 +837,8 @@ static void falcon_handle_rx_bad_index(struct efx_rx_queue *rx_queue,
	struct efx_nic *efx = rx_queue->efx;
	struct efx_nic *efx = rx_queue->efx;
	unsigned expected, dropped;
	unsigned expected, dropped;


	expected = rx_queue->removed_count & FALCON_RXD_RING_MASK;
	expected = rx_queue->removed_count & EFX_RXQ_MASK;
	dropped = ((index + FALCON_RXD_RING_SIZE - expected) &
	dropped = (index - expected) & EFX_RXQ_MASK;
		   FALCON_RXD_RING_MASK);
	EFX_INFO(efx, "dropped %d events (index=%d expected=%d)\n",
	EFX_INFO(efx, "dropped %d events (index=%d expected=%d)\n",
		dropped, index, expected);
		dropped, index, expected);


@@ -887,7 +875,7 @@ static void falcon_handle_rx_event(struct efx_channel *channel,
	rx_queue = &efx->rx_queue[channel->channel];
	rx_queue = &efx->rx_queue[channel->channel];


	rx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_DESC_PTR);
	rx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_DESC_PTR);
	expected_ptr = rx_queue->removed_count & FALCON_RXD_RING_MASK;
	expected_ptr = rx_queue->removed_count & EFX_RXQ_MASK;
	if (unlikely(rx_ev_desc_ptr != expected_ptr))
	if (unlikely(rx_ev_desc_ptr != expected_ptr))
		falcon_handle_rx_bad_index(rx_queue, rx_ev_desc_ptr);
		falcon_handle_rx_bad_index(rx_queue, rx_ev_desc_ptr);


@@ -1075,7 +1063,7 @@ int falcon_process_eventq(struct efx_channel *channel, int rx_quota)
		}
		}


		/* Increment read pointer */
		/* Increment read pointer */
		read_ptr = (read_ptr + 1) & FALCON_EVQ_MASK;
		read_ptr = (read_ptr + 1) & EFX_EVQ_MASK;


	} while (rx_packets < rx_quota);
	} while (rx_packets < rx_quota);


@@ -1120,10 +1108,10 @@ void falcon_set_int_moderation(struct efx_channel *channel)
int falcon_probe_eventq(struct efx_channel *channel)
int falcon_probe_eventq(struct efx_channel *channel)
{
{
	struct efx_nic *efx = channel->efx;
	struct efx_nic *efx = channel->efx;
	unsigned int evq_size;
	BUILD_BUG_ON(EFX_EVQ_SIZE < 512 || EFX_EVQ_SIZE > 32768 ||

		     EFX_EVQ_SIZE & EFX_EVQ_MASK);
	evq_size = FALCON_EVQ_SIZE * sizeof(efx_qword_t);
	return falcon_alloc_special_buffer(efx, &channel->eventq,
	return falcon_alloc_special_buffer(efx, &channel->eventq, evq_size);
					   EFX_EVQ_SIZE * sizeof(efx_qword_t));
}
}


void falcon_init_eventq(struct efx_channel *channel)
void falcon_init_eventq(struct efx_channel *channel)
@@ -1144,7 +1132,7 @@ void falcon_init_eventq(struct efx_channel *channel)
	/* Push event queue to card */
	/* Push event queue to card */
	EFX_POPULATE_OWORD_3(evq_ptr,
	EFX_POPULATE_OWORD_3(evq_ptr,
			     FRF_AZ_EVQ_EN, 1,
			     FRF_AZ_EVQ_EN, 1,
			     FRF_AZ_EVQ_SIZE, FALCON_EVQ_ORDER,
			     FRF_AZ_EVQ_SIZE, __ffs(channel->eventq.entries),
			     FRF_AZ_EVQ_BUF_BASE_ID, channel->eventq.index);
			     FRF_AZ_EVQ_BUF_BASE_ID, channel->eventq.index);
	efx_writeo_table(efx, &evq_ptr, efx->type->evq_ptr_tbl_base,
	efx_writeo_table(efx, &evq_ptr, efx->type->evq_ptr_tbl_base,
			 channel->channel);
			 channel->channel);
@@ -1214,7 +1202,7 @@ static void falcon_poll_flush_events(struct efx_nic *efx)
	struct efx_tx_queue *tx_queue;
	struct efx_tx_queue *tx_queue;
	struct efx_rx_queue *rx_queue;
	struct efx_rx_queue *rx_queue;
	unsigned int read_ptr = channel->eventq_read_ptr;
	unsigned int read_ptr = channel->eventq_read_ptr;
	unsigned int end_ptr = (read_ptr - 1) & FALCON_EVQ_MASK;
	unsigned int end_ptr = (read_ptr - 1) & EFX_EVQ_MASK;


	do {
	do {
		efx_qword_t *event = falcon_event(channel, read_ptr);
		efx_qword_t *event = falcon_event(channel, read_ptr);
@@ -1252,7 +1240,7 @@ static void falcon_poll_flush_events(struct efx_nic *efx)
			}
			}
		}
		}


		read_ptr = (read_ptr + 1) & FALCON_EVQ_MASK;
		read_ptr = (read_ptr + 1) & EFX_EVQ_MASK;
	} while (read_ptr != end_ptr);
	} while (read_ptr != end_ptr);
}
}


@@ -3160,9 +3148,6 @@ struct efx_nic_type falcon_a_nic_type = {
	.buf_tbl_base = FR_AA_BUF_FULL_TBL_KER,
	.buf_tbl_base = FR_AA_BUF_FULL_TBL_KER,
	.evq_ptr_tbl_base = FR_AA_EVQ_PTR_TBL_KER,
	.evq_ptr_tbl_base = FR_AA_EVQ_PTR_TBL_KER,
	.evq_rptr_tbl_base = FR_AA_EVQ_RPTR_KER,
	.evq_rptr_tbl_base = FR_AA_EVQ_RPTR_KER,
	.txd_ring_mask = FALCON_TXD_RING_MASK,
	.rxd_ring_mask = FALCON_RXD_RING_MASK,
	.evq_size = FALCON_EVQ_SIZE,
	.max_dma_mask = FALCON_DMA_MASK,
	.max_dma_mask = FALCON_DMA_MASK,
	.tx_dma_mask = FALCON_TX_DMA_MASK,
	.tx_dma_mask = FALCON_TX_DMA_MASK,
	.bug5391_mask = 0xf,
	.bug5391_mask = 0xf,
@@ -3184,9 +3169,6 @@ struct efx_nic_type falcon_b_nic_type = {
	.buf_tbl_base = FR_BZ_BUF_FULL_TBL,
	.buf_tbl_base = FR_BZ_BUF_FULL_TBL,
	.evq_ptr_tbl_base = FR_BZ_EVQ_PTR_TBL,
	.evq_ptr_tbl_base = FR_BZ_EVQ_PTR_TBL,
	.evq_rptr_tbl_base = FR_BZ_EVQ_RPTR,
	.evq_rptr_tbl_base = FR_BZ_EVQ_RPTR,
	.txd_ring_mask = FALCON_TXD_RING_MASK,
	.rxd_ring_mask = FALCON_RXD_RING_MASK,
	.evq_size = FALCON_EVQ_SIZE,
	.max_dma_mask = FALCON_DMA_MASK,
	.max_dma_mask = FALCON_DMA_MASK,
	.tx_dma_mask = FALCON_TX_DMA_MASK,
	.tx_dma_mask = FALCON_TX_DMA_MASK,
	.bug5391_mask = 0,
	.bug5391_mask = 0,
+0 −6
Original line number Original line Diff line number Diff line
@@ -869,9 +869,6 @@ static inline const char *efx_dev_name(struct efx_nic *efx)
 * @buf_tbl_base: Buffer table base address
 * @buf_tbl_base: Buffer table base address
 * @evq_ptr_tbl_base: Event queue pointer table base address
 * @evq_ptr_tbl_base: Event queue pointer table base address
 * @evq_rptr_tbl_base: Event queue read-pointer table base address
 * @evq_rptr_tbl_base: Event queue read-pointer table base address
 * @txd_ring_mask: TX descriptor ring size - 1 (must be a power of two - 1)
 * @rxd_ring_mask: RX descriptor ring size - 1 (must be a power of two - 1)
 * @evq_size: Event queue size (must be a power of two)
 * @max_dma_mask: Maximum possible DMA mask
 * @max_dma_mask: Maximum possible DMA mask
 * @tx_dma_mask: TX DMA mask
 * @tx_dma_mask: TX DMA mask
 * @bug5391_mask: Address mask for bug 5391 workaround
 * @bug5391_mask: Address mask for bug 5391 workaround
@@ -890,9 +887,6 @@ struct efx_nic_type {
	unsigned int evq_ptr_tbl_base;
	unsigned int evq_ptr_tbl_base;
	unsigned int evq_rptr_tbl_base;
	unsigned int evq_rptr_tbl_base;


	unsigned int txd_ring_mask;
	unsigned int rxd_ring_mask;
	unsigned int evq_size;
	u64 max_dma_mask;
	u64 max_dma_mask;
	unsigned int tx_dma_mask;
	unsigned int tx_dma_mask;
	unsigned bug5391_mask;
	unsigned bug5391_mask;
+6 −10
Original line number Original line Diff line number Diff line
@@ -293,8 +293,7 @@ static int __efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue,
	 * fill anyway.
	 * fill anyway.
	 */
	 */
	fill_level = (rx_queue->added_count - rx_queue->removed_count);
	fill_level = (rx_queue->added_count - rx_queue->removed_count);
	EFX_BUG_ON_PARANOID(fill_level >
	EFX_BUG_ON_PARANOID(fill_level > EFX_RXQ_SIZE);
			    rx_queue->efx->type->rxd_ring_mask + 1);


	/* Don't fill if we don't need to */
	/* Don't fill if we don't need to */
	if (fill_level >= rx_queue->fast_fill_trigger)
	if (fill_level >= rx_queue->fast_fill_trigger)
@@ -316,8 +315,7 @@ static int __efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue,
 retry:
 retry:
	/* Recalculate current fill level now that we have the lock */
	/* Recalculate current fill level now that we have the lock */
	fill_level = (rx_queue->added_count - rx_queue->removed_count);
	fill_level = (rx_queue->added_count - rx_queue->removed_count);
	EFX_BUG_ON_PARANOID(fill_level >
	EFX_BUG_ON_PARANOID(fill_level > EFX_RXQ_SIZE);
			    rx_queue->efx->type->rxd_ring_mask + 1);
	space = rx_queue->fast_fill_limit - fill_level;
	space = rx_queue->fast_fill_limit - fill_level;
	if (space < EFX_RX_BATCH)
	if (space < EFX_RX_BATCH)
		goto out_unlock;
		goto out_unlock;
@@ -329,8 +327,7 @@ static int __efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue,


	do {
	do {
		for (i = 0; i < EFX_RX_BATCH; ++i) {
		for (i = 0; i < EFX_RX_BATCH; ++i) {
			index = (rx_queue->added_count &
			index = rx_queue->added_count & EFX_RXQ_MASK;
				 rx_queue->efx->type->rxd_ring_mask);
			rx_buf = efx_rx_buffer(rx_queue, index);
			rx_buf = efx_rx_buffer(rx_queue, index);
			rc = efx_init_rx_buffer(rx_queue, rx_buf);
			rc = efx_init_rx_buffer(rx_queue, rx_buf);
			if (unlikely(rc))
			if (unlikely(rc))
@@ -629,7 +626,7 @@ int efx_probe_rx_queue(struct efx_rx_queue *rx_queue)
	EFX_LOG(efx, "creating RX queue %d\n", rx_queue->queue);
	EFX_LOG(efx, "creating RX queue %d\n", rx_queue->queue);


	/* Allocate RX buffers */
	/* Allocate RX buffers */
	rxq_size = (efx->type->rxd_ring_mask + 1) * sizeof(*rx_queue->buffer);
	rxq_size = EFX_RXQ_SIZE * sizeof(*rx_queue->buffer);
	rx_queue->buffer = kzalloc(rxq_size, GFP_KERNEL);
	rx_queue->buffer = kzalloc(rxq_size, GFP_KERNEL);
	if (!rx_queue->buffer)
	if (!rx_queue->buffer)
		return -ENOMEM;
		return -ENOMEM;
@@ -644,7 +641,6 @@ int efx_probe_rx_queue(struct efx_rx_queue *rx_queue)


void efx_init_rx_queue(struct efx_rx_queue *rx_queue)
void efx_init_rx_queue(struct efx_rx_queue *rx_queue)
{
{
	struct efx_nic *efx = rx_queue->efx;
	unsigned int max_fill, trigger, limit;
	unsigned int max_fill, trigger, limit;


	EFX_LOG(rx_queue->efx, "initialising RX queue %d\n", rx_queue->queue);
	EFX_LOG(rx_queue->efx, "initialising RX queue %d\n", rx_queue->queue);
@@ -657,7 +653,7 @@ void efx_init_rx_queue(struct efx_rx_queue *rx_queue)
	rx_queue->min_overfill = -1U;
	rx_queue->min_overfill = -1U;


	/* Initialise limit fields */
	/* Initialise limit fields */
	max_fill = efx->type->rxd_ring_mask + 1 - EFX_RXD_HEAD_ROOM;
	max_fill = EFX_RXQ_SIZE - EFX_RXD_HEAD_ROOM;
	trigger = max_fill * min(rx_refill_threshold, 100U) / 100U;
	trigger = max_fill * min(rx_refill_threshold, 100U) / 100U;
	limit = max_fill * min(rx_refill_limit, 100U) / 100U;
	limit = max_fill * min(rx_refill_limit, 100U) / 100U;


@@ -680,7 +676,7 @@ void efx_fini_rx_queue(struct efx_rx_queue *rx_queue)


	/* Release RX buffers NB start at index 0 not current HW ptr */
	/* Release RX buffers NB start at index 0 not current HW ptr */
	if (rx_queue->buffer) {
	if (rx_queue->buffer) {
		for (i = 0; i <= rx_queue->efx->type->rxd_ring_mask; i++) {
		for (i = 0; i <= EFX_RXQ_MASK; i++) {
			rx_buf = efx_rx_buffer(rx_queue, i);
			rx_buf = efx_rx_buffer(rx_queue, i);
			efx_fini_rx_buffer(rx_queue, rx_buf);
			efx_fini_rx_buffer(rx_queue, rx_buf);
		}
		}
Loading