Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit e2821fc8 authored by David S. Miller's avatar David S. Miller
Browse files

Merge branch 'ena-next'



Sameeh Jubran says:

====================
Extending the ena driver to support new features and enhance performance

This patchset introduces the following:

* add support for changing the inline header size (max_header_size) for applications
  with overlay and nested headers
* enable automatic fallback to polling mode for admin queue when interrupt is not
  available or missed
* add good checksum counter for Rx ethtool statistics
* update ena.txt
* some minor code clean-up
* some performance enhancements with doorbell calculations

Differences from V1:

* net: ena: add handling of llq max tx burst size (1/11):
 * fixed christmas tree issue

* net: ena: ethtool: add extra properties retrieval via get_priv_flags (2/11):
 * replaced snprintf with strlcpy
 * dropped confusing error message
 * added more details to  the commit message
====================

Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents cfd10888 1e9c3fba
Loading
Loading
Loading
Loading
+4 −1
Original line number Diff line number Diff line
@@ -73,7 +73,7 @@ operation.
AQ is used for submitting management commands, and the
results/responses are reported asynchronously through ACQ.

ENA introduces a very small set of management commands with room for
ENA introduces a small set of management commands with room for
vendor-specific extensions. Most of the management operations are
framed in a generic Get/Set feature command.

@@ -202,11 +202,14 @@ delay value to each level.
The user can enable/disable adaptive moderation, modify the interrupt
delay table and restore its default values through sysfs.

RX copybreak:
=============
The rx_copybreak is initialized by default to ENA_DEFAULT_RX_COPYBREAK
and can be configured by the ETHTOOL_STUNABLE command of the
SIOCETHTOOL ioctl.

SKB:
====
The driver-allocated SKB for frames received from Rx handling using
NAPI context. The allocation method depends on the size of the packet.
If the frame length is larger than rx_copybreak, napi_get_frags()
+21 −0
Original line number Diff line number Diff line
@@ -32,6 +32,8 @@
#ifndef _ENA_ADMIN_H_
#define _ENA_ADMIN_H_

#define ENA_ADMIN_EXTRA_PROPERTIES_STRING_LEN 32
#define ENA_ADMIN_EXTRA_PROPERTIES_COUNT     32

enum ena_admin_aq_opcode {
	ENA_ADMIN_CREATE_SQ                         = 1,
@@ -60,6 +62,8 @@ enum ena_admin_aq_feature_id {
	ENA_ADMIN_MAX_QUEUES_NUM                    = 2,
	ENA_ADMIN_HW_HINTS                          = 3,
	ENA_ADMIN_LLQ                               = 4,
	ENA_ADMIN_EXTRA_PROPERTIES_STRINGS          = 5,
	ENA_ADMIN_EXTRA_PROPERTIES_FLAGS            = 6,
	ENA_ADMIN_RSS_HASH_FUNCTION                 = 10,
	ENA_ADMIN_STATELESS_OFFLOAD_CONFIG          = 11,
	ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG      = 12,
@@ -524,6 +528,11 @@ struct ena_admin_feature_llq_desc {

	/* the stride control the driver selected to use */
	u16 descriptors_stride_ctrl_enabled;

	/* Maximum size in bytes taken by llq entries in a single tx burst.
	 * Set to 0 when there is no such limit.
	 */
	u32 max_tx_burst_size;
};

struct ena_admin_queue_feature_desc {
@@ -555,6 +564,14 @@ struct ena_admin_set_feature_mtu_desc {
	u32 mtu;
};

struct ena_admin_get_extra_properties_strings_desc {
	u32 count;
};

struct ena_admin_get_extra_properties_flags_desc {
	u32 flags;
};

struct ena_admin_set_feature_host_attr_desc {
	/* host OS info base address in OS memory. host info is 4KB of
	 * physically contiguous
@@ -859,6 +876,10 @@ struct ena_admin_get_feat_resp {
		struct ena_admin_feature_intr_moder_desc intr_moderation;

		struct ena_admin_ena_hw_hints hw_hints;

		struct ena_admin_get_extra_properties_strings_desc extra_properties_strings;

		struct ena_admin_get_extra_properties_flags_desc extra_properties_flags;
	} u;
};

+100 −23
Original line number Diff line number Diff line
@@ -115,7 +115,7 @@ static int ena_com_admin_init_sq(struct ena_com_admin_queue *queue)
					 GFP_KERNEL);

	if (!sq->entries) {
		pr_err("memory allocation failed");
		pr_err("memory allocation failed\n");
		return -ENOMEM;
	}

@@ -137,7 +137,7 @@ static int ena_com_admin_init_cq(struct ena_com_admin_queue *queue)
					 GFP_KERNEL);

	if (!cq->entries) {
		pr_err("memory allocation failed");
		pr_err("memory allocation failed\n");
		return -ENOMEM;
	}

@@ -160,7 +160,7 @@ static int ena_com_admin_init_aenq(struct ena_com_dev *dev,
					   GFP_KERNEL);

	if (!aenq->entries) {
		pr_err("memory allocation failed");
		pr_err("memory allocation failed\n");
		return -ENOMEM;
	}

@@ -285,7 +285,7 @@ static inline int ena_com_init_comp_ctxt(struct ena_com_admin_queue *queue)

	queue->comp_ctx = devm_kzalloc(queue->q_dmadev, size, GFP_KERNEL);
	if (unlikely(!queue->comp_ctx)) {
		pr_err("memory allocation failed");
		pr_err("memory allocation failed\n");
		return -ENOMEM;
	}

@@ -356,7 +356,7 @@ static int ena_com_init_io_sq(struct ena_com_dev *ena_dev,
		}

		if (!io_sq->desc_addr.virt_addr) {
			pr_err("memory allocation failed");
			pr_err("memory allocation failed\n");
			return -ENOMEM;
		}
	}
@@ -382,7 +382,7 @@ static int ena_com_init_io_sq(struct ena_com_dev *ena_dev,
				devm_kzalloc(ena_dev->dmadev, size, GFP_KERNEL);

		if (!io_sq->bounce_buf_ctrl.base_buffer) {
			pr_err("bounce buffer memory allocation failed");
			pr_err("bounce buffer memory allocation failed\n");
			return -ENOMEM;
		}

@@ -396,6 +396,10 @@ static int ena_com_init_io_sq(struct ena_com_dev *ena_dev,
		       0x0, io_sq->llq_info.desc_list_entry_size);
		io_sq->llq_buf_ctrl.descs_left_in_line =
			io_sq->llq_info.descs_num_before_header;

		if (io_sq->llq_info.max_entries_in_tx_burst > 0)
			io_sq->entries_in_tx_burst_left =
				io_sq->llq_info.max_entries_in_tx_burst;
	}

	io_sq->tail = 0;
@@ -436,7 +440,7 @@ static int ena_com_init_io_cq(struct ena_com_dev *ena_dev,
	}

	if (!io_cq->cdesc_addr.virt_addr) {
		pr_err("memory allocation failed");
		pr_err("memory allocation failed\n");
		return -ENOMEM;
	}

@@ -727,6 +731,9 @@ static int ena_com_config_llq_info(struct ena_com_dev *ena_dev,
		       supported_feat, llq_info->descs_num_before_header);
	}

	llq_info->max_entries_in_tx_burst =
		(u16)(llq_features->max_tx_burst_size /	llq_default_cfg->llq_ring_entry_size_value);

	rc = ena_com_set_llq(ena_dev);
	if (rc)
		pr_err("Cannot set LLQ configuration: %d\n", rc);
@@ -755,17 +762,27 @@ static int ena_com_wait_and_process_admin_cq_interrupts(struct ena_comp_ctx *com
		admin_queue->stats.no_completion++;
		spin_unlock_irqrestore(&admin_queue->q_lock, flags);

		if (comp_ctx->status == ENA_CMD_COMPLETED)
			pr_err("The ena device have completion but the driver didn't receive any MSI-X interrupt (cmd %d)\n",
			       comp_ctx->cmd_opcode);
		else
			pr_err("The ena device doesn't send any completion for the admin cmd %d status %d\n",
		if (comp_ctx->status == ENA_CMD_COMPLETED) {
			pr_err("The ena device sent a completion but the driver didn't receive a MSI-X interrupt (cmd %d), autopolling mode is %s\n",
			       comp_ctx->cmd_opcode,
			       admin_queue->auto_polling ? "ON" : "OFF");
			/* Check if fallback to polling is enabled */
			if (admin_queue->auto_polling)
				admin_queue->polling = true;
		} else {
			pr_err("The ena device doesn't send a completion for the admin cmd %d status %d\n",
			       comp_ctx->cmd_opcode, comp_ctx->status);

		}
		/* Check if shifted to polling mode.
		 * This will happen if there is a completion without an interrupt
		 * and autopolling mode is enabled. Continuing normal execution in such case
		 */
		if (!admin_queue->polling) {
			admin_queue->running_state = false;
			ret = -ETIME;
			goto err;
		}
	}

	ret = ena_com_comp_status_to_errno(comp_ctx->comp_status);
err:
@@ -822,7 +839,7 @@ static u32 ena_com_reg_bar_read32(struct ena_com_dev *ena_dev, u16 offset)
	}

	if (read_resp->reg_off != offset) {
		pr_err("Read failure: wrong offset provided");
		pr_err("Read failure: wrong offset provided\n");
		ret = ENA_MMIO_READ_TIMEOUT;
	} else {
		ret = read_resp->reg_val;
@@ -1643,6 +1660,12 @@ void ena_com_set_admin_polling_mode(struct ena_com_dev *ena_dev, bool polling)
	ena_dev->admin_queue.polling = polling;
}

void ena_com_set_admin_auto_polling_mode(struct ena_com_dev *ena_dev,
					 bool polling)
{
	ena_dev->admin_queue.auto_polling = polling;
}

int ena_com_mmio_reg_read_request_init(struct ena_com_dev *ena_dev)
{
	struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
@@ -1870,6 +1893,62 @@ int ena_com_get_link_params(struct ena_com_dev *ena_dev,
	return ena_com_get_feature(ena_dev, resp, ENA_ADMIN_LINK_CONFIG);
}

int ena_com_extra_properties_strings_init(struct ena_com_dev *ena_dev)
{
	struct ena_admin_get_feat_resp resp;
	struct ena_extra_properties_strings *extra_properties_strings =
			&ena_dev->extra_properties_strings;
	u32 rc;

	extra_properties_strings->size = ENA_ADMIN_EXTRA_PROPERTIES_COUNT *
		ENA_ADMIN_EXTRA_PROPERTIES_STRING_LEN;

	extra_properties_strings->virt_addr =
		dma_alloc_coherent(ena_dev->dmadev,
				   extra_properties_strings->size,
				   &extra_properties_strings->dma_addr,
				   GFP_KERNEL);
	if (unlikely(!extra_properties_strings->virt_addr)) {
		pr_err("Failed to allocate extra properties strings\n");
		return 0;
	}

	rc = ena_com_get_feature_ex(ena_dev, &resp,
				    ENA_ADMIN_EXTRA_PROPERTIES_STRINGS,
				    extra_properties_strings->dma_addr,
				    extra_properties_strings->size);
	if (rc) {
		pr_debug("Failed to get extra properties strings\n");
		goto err;
	}

	return resp.u.extra_properties_strings.count;
err:
	ena_com_delete_extra_properties_strings(ena_dev);
	return 0;
}

void ena_com_delete_extra_properties_strings(struct ena_com_dev *ena_dev)
{
	struct ena_extra_properties_strings *extra_properties_strings =
				&ena_dev->extra_properties_strings;

	if (extra_properties_strings->virt_addr) {
		dma_free_coherent(ena_dev->dmadev,
				  extra_properties_strings->size,
				  extra_properties_strings->virt_addr,
				  extra_properties_strings->dma_addr);
		extra_properties_strings->virt_addr = NULL;
	}
}

int ena_com_get_extra_properties_flags(struct ena_com_dev *ena_dev,
				       struct ena_admin_get_feat_resp *resp)
{
	return ena_com_get_feature(ena_dev, resp,
				   ENA_ADMIN_EXTRA_PROPERTIES_FLAGS);
}

int ena_com_get_dev_attr_feat(struct ena_com_dev *ena_dev,
			      struct ena_com_dev_get_features_ctx *get_feat_ctx)
{
@@ -2913,8 +2992,8 @@ int ena_com_config_dev_mode(struct ena_com_dev *ena_dev,
			    struct ena_admin_feature_llq_desc *llq_features,
			    struct ena_llq_configurations *llq_default_cfg)
{
	struct ena_com_llq_info *llq_info = &ena_dev->llq_info;
	int rc;
	int size;

	if (!llq_features->max_llq_num) {
		ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
@@ -2925,12 +3004,10 @@ int ena_com_config_dev_mode(struct ena_com_dev *ena_dev,
	if (rc)
		return rc;

	/* Validate the descriptor is not too big */
	size = ena_dev->tx_max_header_size;
	size += ena_dev->llq_info.descs_num_before_header *
		sizeof(struct ena_eth_io_tx_desc);
	ena_dev->tx_max_header_size = llq_info->desc_list_entry_size -
		(llq_info->descs_num_before_header * sizeof(struct ena_eth_io_tx_desc));

	if (unlikely(ena_dev->llq_info.desc_list_entry_size < size)) {
	if (unlikely(ena_dev->tx_max_header_size == 0)) {
		pr_err("the size of the LLQ entry is smaller than needed\n");
		return -EINVAL;
	}
+48 −0
Original line number Diff line number Diff line
@@ -159,6 +159,7 @@ struct ena_com_llq_info {
	u16 desc_list_entry_size;
	u16 descs_num_before_header;
	u16 descs_per_entry;
	u16 max_entries_in_tx_burst;
};

struct ena_com_io_cq {
@@ -238,6 +239,7 @@ struct ena_com_io_sq {
	u8 phase;
	u8 desc_entry_size;
	u8 dma_addr_bits;
	u16 entries_in_tx_burst_left;
} ____cacheline_aligned;

struct ena_com_admin_cq {
@@ -281,6 +283,9 @@ struct ena_com_admin_queue {
	/* Indicate if the admin queue should poll for completion */
	bool polling;

	/* Define if fallback to polling mode should occur */
	bool auto_polling;

	u16 curr_cmd_id;

	/* Indicate that the ena was initialized and can
@@ -345,6 +350,12 @@ struct ena_host_attribute {
	dma_addr_t host_info_dma_addr;
};

struct ena_extra_properties_strings {
	u8 *virt_addr;
	dma_addr_t dma_addr;
	u32 size;
};

/* Each ena_dev is a PCI function. */
struct ena_com_dev {
	struct ena_com_admin_queue admin_queue;
@@ -373,6 +384,7 @@ struct ena_com_dev {
	struct ena_intr_moder_entry *intr_moder_tbl;

	struct ena_com_llq_info llq_info;
	struct ena_extra_properties_strings extra_properties_strings;
};

struct ena_com_dev_get_features_ctx {
@@ -536,6 +548,17 @@ void ena_com_set_admin_polling_mode(struct ena_com_dev *ena_dev, bool polling);
 */
bool ena_com_get_ena_admin_polling_mode(struct ena_com_dev *ena_dev);

/* ena_com_set_admin_auto_polling_mode - Enable autoswitch to polling mode
 * @ena_dev: ENA communication layer struct
 * @polling: Enable/Disable polling mode
 *
 * Set the autopolling mode.
 * If autopolling is on:
 * In case of missing interrupt when data is available switch to polling.
 */
void ena_com_set_admin_auto_polling_mode(struct ena_com_dev *ena_dev,
					 bool polling);

/* ena_com_admin_q_comp_intr_handler - admin queue interrupt handler
 * @ena_dev: ENA communication layer struct
 *
@@ -594,6 +617,31 @@ int ena_com_validate_version(struct ena_com_dev *ena_dev);
int ena_com_get_link_params(struct ena_com_dev *ena_dev,
			    struct ena_admin_get_feat_resp *resp);

/* ena_com_extra_properties_strings_init - Initialize the extra properties strings buffer.
 * @ena_dev: ENA communication layer struct
 *
 * Initialize the extra properties strings buffer.
 */
int ena_com_extra_properties_strings_init(struct ena_com_dev *ena_dev);

/* ena_com_delete_extra_properties_strings - Free the extra properties strings buffer.
 * @ena_dev: ENA communication layer struct
 *
 * Free the allocated extra properties strings buffer.
 */
void ena_com_delete_extra_properties_strings(struct ena_com_dev *ena_dev);

/* ena_com_get_extra_properties_flags - Retrieve extra properties flags.
 * @ena_dev: ENA communication layer struct
 * @resp: Extra properties flags.
 *
 * Retrieve the extra properties flags.
 *
 * @return - 0 on Success negative value otherwise.
 */
int ena_com_get_extra_properties_flags(struct ena_com_dev *ena_dev,
				       struct ena_admin_get_feat_resp *resp);

/* ena_com_get_dma_width - Retrieve physical dma address width the device
 * supports.
 * @ena_dev: ENA communication layer struct
+11 −17
Original line number Diff line number Diff line
@@ -82,6 +82,17 @@ static inline int ena_com_write_bounce_buffer_to_dev(struct ena_com_io_sq *io_sq
	dst_tail_mask = io_sq->tail & (io_sq->q_depth - 1);
	dst_offset = dst_tail_mask * llq_info->desc_list_entry_size;

	if (is_llq_max_tx_burst_exists(io_sq)) {
		if (unlikely(!io_sq->entries_in_tx_burst_left)) {
			pr_err("Error: trying to send more packets than tx burst allows\n");
			return -ENOSPC;
		}

		io_sq->entries_in_tx_burst_left--;
		pr_debug("decreasing entries_in_tx_burst_left of queue %d to %d\n",
			 io_sq->qid, io_sq->entries_in_tx_burst_left);
	}

	/* Make sure everything was written into the bounce buffer before
	 * writing the bounce buffer to the device
	 */
@@ -274,23 +285,6 @@ static inline u16 ena_com_cdesc_rx_pkt_get(struct ena_com_io_cq *io_cq,
	return count;
}

static inline bool ena_com_meta_desc_changed(struct ena_com_io_sq *io_sq,
					     struct ena_com_tx_ctx *ena_tx_ctx)
{
	int rc;

	if (ena_tx_ctx->meta_valid) {
		rc = memcmp(&io_sq->cached_tx_meta,
			    &ena_tx_ctx->ena_meta,
			    sizeof(struct ena_com_tx_meta));

		if (unlikely(rc != 0))
			return true;
	}

	return false;
}

static inline int ena_com_create_and_store_tx_meta_desc(struct ena_com_io_sq *io_sq,
							struct ena_com_tx_ctx *ena_tx_ctx)
{
Loading