Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 0fe5565b authored by Ben Hutchings's avatar Ben Hutchings
Browse files

sfc: Introduce inline functions to simplify TX insertion

parent 306a2782
Loading
Loading
Loading
Loading
+33 −20
Original line number Original line Diff line number Diff line
@@ -33,6 +33,31 @@ unsigned int efx_piobuf_size __read_mostly = EFX_PIOBUF_SIZE_DEF;


#endif /* EFX_USE_PIO */
#endif /* EFX_USE_PIO */


static inline unsigned int
efx_tx_queue_get_insert_index(const struct efx_tx_queue *tx_queue)
{
	return tx_queue->insert_count & tx_queue->ptr_mask;
}

static inline struct efx_tx_buffer *
__efx_tx_queue_get_insert_buffer(const struct efx_tx_queue *tx_queue)
{
	return &tx_queue->buffer[efx_tx_queue_get_insert_index(tx_queue)];
}

static inline struct efx_tx_buffer *
efx_tx_queue_get_insert_buffer(const struct efx_tx_queue *tx_queue)
{
	struct efx_tx_buffer *buffer =
		__efx_tx_queue_get_insert_buffer(tx_queue);

	EFX_BUG_ON_PARANOID(buffer->len);
	EFX_BUG_ON_PARANOID(buffer->flags);
	EFX_BUG_ON_PARANOID(buffer->unmap_len);

	return buffer;
}

static void efx_dequeue_buffer(struct efx_tx_queue *tx_queue,
static void efx_dequeue_buffer(struct efx_tx_queue *tx_queue,
			       struct efx_tx_buffer *buffer,
			       struct efx_tx_buffer *buffer,
			       unsigned int *pkts_compl,
			       unsigned int *pkts_compl,
@@ -180,7 +205,7 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
	struct device *dma_dev = &efx->pci_dev->dev;
	struct device *dma_dev = &efx->pci_dev->dev;
	struct efx_tx_buffer *buffer;
	struct efx_tx_buffer *buffer;
	skb_frag_t *fragment;
	skb_frag_t *fragment;
	unsigned int len, unmap_len = 0, insert_ptr;
	unsigned int len, unmap_len = 0;
	dma_addr_t dma_addr, unmap_addr = 0;
	dma_addr_t dma_addr, unmap_addr = 0;
	unsigned int dma_len;
	unsigned int dma_len;
	unsigned short dma_flags;
	unsigned short dma_flags;
@@ -221,11 +246,7 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)


		/* Add to TX queue, splitting across DMA boundaries */
		/* Add to TX queue, splitting across DMA boundaries */
		do {
		do {
			insert_ptr = tx_queue->insert_count & tx_queue->ptr_mask;
			buffer = efx_tx_queue_get_insert_buffer(tx_queue);
			buffer = &tx_queue->buffer[insert_ptr];
			EFX_BUG_ON_PARANOID(buffer->flags);
			EFX_BUG_ON_PARANOID(buffer->len);
			EFX_BUG_ON_PARANOID(buffer->unmap_len);


			dma_len = efx_max_tx_len(efx, dma_addr);
			dma_len = efx_max_tx_len(efx, dma_addr);
			if (likely(dma_len >= len))
			if (likely(dma_len >= len))
@@ -283,8 +304,7 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
	while (tx_queue->insert_count != tx_queue->write_count) {
	while (tx_queue->insert_count != tx_queue->write_count) {
		unsigned int pkts_compl = 0, bytes_compl = 0;
		unsigned int pkts_compl = 0, bytes_compl = 0;
		--tx_queue->insert_count;
		--tx_queue->insert_count;
		insert_ptr = tx_queue->insert_count & tx_queue->ptr_mask;
		buffer = __efx_tx_queue_get_insert_buffer(tx_queue);
		buffer = &tx_queue->buffer[insert_ptr];
		efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl);
		efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl);
	}
	}


@@ -755,23 +775,18 @@ static void efx_tx_queue_insert(struct efx_tx_queue *tx_queue,
{
{
	struct efx_tx_buffer *buffer;
	struct efx_tx_buffer *buffer;
	struct efx_nic *efx = tx_queue->efx;
	struct efx_nic *efx = tx_queue->efx;
	unsigned dma_len, insert_ptr;
	unsigned dma_len;


	EFX_BUG_ON_PARANOID(len <= 0);
	EFX_BUG_ON_PARANOID(len <= 0);


	while (1) {
	while (1) {
		insert_ptr = tx_queue->insert_count & tx_queue->ptr_mask;
		buffer = efx_tx_queue_get_insert_buffer(tx_queue);
		buffer = &tx_queue->buffer[insert_ptr];
		++tx_queue->insert_count;
		++tx_queue->insert_count;


		EFX_BUG_ON_PARANOID(tx_queue->insert_count -
		EFX_BUG_ON_PARANOID(tx_queue->insert_count -
				    tx_queue->read_count >=
				    tx_queue->read_count >=
				    efx->txq_entries);
				    efx->txq_entries);


		EFX_BUG_ON_PARANOID(buffer->len);
		EFX_BUG_ON_PARANOID(buffer->unmap_len);
		EFX_BUG_ON_PARANOID(buffer->flags);

		buffer->dma_addr = dma_addr;
		buffer->dma_addr = dma_addr;


		dma_len = efx_max_tx_len(efx, dma_addr);
		dma_len = efx_max_tx_len(efx, dma_addr);
@@ -832,8 +847,7 @@ static void efx_enqueue_unwind(struct efx_tx_queue *tx_queue)
	/* Work backwards until we hit the original insert pointer value */
	/* Work backwards until we hit the original insert pointer value */
	while (tx_queue->insert_count != tx_queue->write_count) {
	while (tx_queue->insert_count != tx_queue->write_count) {
		--tx_queue->insert_count;
		--tx_queue->insert_count;
		buffer = &tx_queue->buffer[tx_queue->insert_count &
		buffer = __efx_tx_queue_get_insert_buffer(tx_queue);
					   tx_queue->ptr_mask];
		efx_dequeue_buffer(tx_queue, buffer, NULL, NULL);
		efx_dequeue_buffer(tx_queue, buffer, NULL, NULL);
	}
	}
}
}
@@ -978,7 +992,7 @@ static int tso_start_new_packet(struct efx_tx_queue *tx_queue,
				struct tso_state *st)
				struct tso_state *st)
{
{
	struct efx_tx_buffer *buffer =
	struct efx_tx_buffer *buffer =
		&tx_queue->buffer[tx_queue->insert_count & tx_queue->ptr_mask];
		efx_tx_queue_get_insert_buffer(tx_queue);
	bool is_last = st->out_len <= skb_shinfo(skb)->gso_size;
	bool is_last = st->out_len <= skb_shinfo(skb)->gso_size;
	u8 tcp_flags_clear;
	u8 tcp_flags_clear;


@@ -1048,8 +1062,7 @@ static int tso_start_new_packet(struct efx_tx_queue *tx_queue,
		/* We mapped the headers in tso_start().  Unmap them
		/* We mapped the headers in tso_start().  Unmap them
		 * when the last segment is completed.
		 * when the last segment is completed.
		 */
		 */
		buffer = &tx_queue->buffer[tx_queue->insert_count &
		buffer = efx_tx_queue_get_insert_buffer(tx_queue);
					   tx_queue->ptr_mask];
		buffer->dma_addr = st->header_dma_addr;
		buffer->dma_addr = st->header_dma_addr;
		buffer->len = st->header_len;
		buffer->len = st->header_len;
		if (is_last) {
		if (is_last) {