Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 31aa9857 authored by Sameeh Jubran's avatar Sameeh Jubran Committed by David S. Miller
Browse files

net: ena: enable negotiating larger Rx ring size



Use MAX_QUEUES_EXT get feature capability to query the device.

Signed-off-by: default avatarNetanel Belgazal <netanel@amazon.com>
Signed-off-by: default avatarSameeh Jubran <sameehj@amazon.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent ba8ef506
Loading
Loading
Loading
Loading
+95 −49
Original line number Diff line number Diff line
@@ -2465,13 +2465,6 @@ static int ena_device_validate_params(struct ena_adapter *adapter,
		return -EINVAL;
	}

	if ((get_feat_ctx->max_queues.max_cq_num < adapter->num_queues) ||
	    (get_feat_ctx->max_queues.max_sq_num < adapter->num_queues)) {
		netif_err(adapter, drv, netdev,
			  "Error, device doesn't support enough queues\n");
		return -EINVAL;
	}

	if (get_feat_ctx->dev_attr.max_mtu < netdev->mtu) {
		netif_err(adapter, drv, netdev,
			  "Error, device max mtu is smaller than netdev MTU\n");
@@ -3045,18 +3038,32 @@ static int ena_calc_io_queue_num(struct pci_dev *pdev,
				 struct ena_com_dev *ena_dev,
				 struct ena_com_dev_get_features_ctx *get_feat_ctx)
{
	int io_sq_num, io_queue_num;
	int io_tx_sq_num, io_tx_cq_num, io_rx_num, io_queue_num;

	if (ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) {
		struct ena_admin_queue_ext_feature_fields *max_queue_ext =
			&get_feat_ctx->max_queue_ext.max_queue_ext;
		io_rx_num = min_t(int, max_queue_ext->max_rx_sq_num,
				  max_queue_ext->max_rx_cq_num);

		io_tx_sq_num = max_queue_ext->max_tx_sq_num;
		io_tx_cq_num = max_queue_ext->max_tx_cq_num;
	} else {
		struct ena_admin_queue_feature_desc *max_queues =
			&get_feat_ctx->max_queues;
		io_tx_sq_num = max_queues->max_sq_num;
		io_tx_cq_num = max_queues->max_cq_num;
		io_rx_num = min_t(int, io_tx_sq_num, io_tx_cq_num);
	}

	/* In case of LLQ use the llq number in the get feature cmd */
	/* In case of LLQ use the llq fields for the tx SQ/CQ */
	if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV)
		io_sq_num = get_feat_ctx->llq.max_llq_num;
	else
		io_sq_num = get_feat_ctx->max_queues.max_sq_num;
		io_tx_sq_num = get_feat_ctx->llq.max_llq_num;

	io_queue_num = min_t(int, num_online_cpus(), ENA_MAX_NUM_IO_QUEUES);
	io_queue_num = min_t(int, io_queue_num, io_sq_num);
	io_queue_num = min_t(int, io_queue_num,
			     get_feat_ctx->max_queues.max_cq_num);
	io_queue_num = min_t(int, io_queue_num, io_rx_num);
	io_queue_num = min_t(int, io_queue_num, io_tx_sq_num);
	io_queue_num = min_t(int, io_queue_num, io_tx_cq_num);
	/* 1 IRQ for for mgmnt and 1 IRQs for each IO direction */
	io_queue_num = min_t(int, io_queue_num, pci_msix_vec_count(pdev) - 1);
	if (unlikely(!io_queue_num)) {
@@ -3239,36 +3246,73 @@ static inline void set_default_llq_configurations(struct ena_llq_configurations
	llq_config->llq_ring_entry_size_value = 128;
}

static int ena_calc_queue_size(struct pci_dev *pdev,
			       struct ena_com_dev *ena_dev,
			       u16 *max_tx_sgl_size,
			       u16 *max_rx_sgl_size,
			       struct ena_com_dev_get_features_ctx *get_feat_ctx)
static int ena_calc_queue_size(struct ena_calc_queue_size_ctx *ctx)
{
	u32 queue_size = ENA_DEFAULT_RING_SIZE;
	struct ena_admin_feature_llq_desc *llq = &ctx->get_feat_ctx->llq;
	struct ena_com_dev *ena_dev = ctx->ena_dev;
	u32 tx_queue_size = ENA_DEFAULT_RING_SIZE;
	u32 rx_queue_size = ENA_DEFAULT_RING_SIZE;
	u32 max_tx_queue_size;
	u32 max_rx_queue_size;

	if (ctx->ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) {
		struct ena_admin_queue_ext_feature_fields *max_queue_ext =
			&ctx->get_feat_ctx->max_queue_ext.max_queue_ext;
		max_rx_queue_size = min_t(u32, max_queue_ext->max_rx_cq_depth,
					  max_queue_ext->max_rx_sq_depth);
		max_tx_queue_size = max_queue_ext->max_tx_cq_depth;

	queue_size = min_t(u32, queue_size,
			   get_feat_ctx->max_queues.max_cq_depth);
	queue_size = min_t(u32, queue_size,
			   get_feat_ctx->max_queues.max_sq_depth);
		if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV)
			max_tx_queue_size = min_t(u32, max_tx_queue_size,
						  llq->max_llq_depth);
		else
			max_tx_queue_size = min_t(u32, max_tx_queue_size,
						  max_queue_ext->max_tx_sq_depth);

		ctx->max_tx_sgl_size = min_t(u16, ENA_PKT_MAX_BUFS,
					     max_queue_ext->max_per_packet_tx_descs);
		ctx->max_rx_sgl_size = min_t(u16, ENA_PKT_MAX_BUFS,
					     max_queue_ext->max_per_packet_rx_descs);
	} else {
		struct ena_admin_queue_feature_desc *max_queues =
			&ctx->get_feat_ctx->max_queues;
		max_rx_queue_size = min_t(u32, max_queues->max_cq_depth,
					  max_queues->max_sq_depth);
		max_tx_queue_size = max_queues->max_cq_depth;

		if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV)
		queue_size = min_t(u32, queue_size,
				   get_feat_ctx->llq.max_llq_depth);
			max_tx_queue_size = min_t(u32, max_tx_queue_size,
						  llq->max_llq_depth);
		else
			max_tx_queue_size = min_t(u32, max_tx_queue_size,
						  max_queues->max_sq_depth);

		ctx->max_tx_sgl_size = min_t(u16, ENA_PKT_MAX_BUFS,
					     max_queues->max_packet_tx_descs);
		ctx->max_rx_sgl_size = min_t(u16, ENA_PKT_MAX_BUFS,
					     max_queues->max_packet_rx_descs);
	}

	max_tx_queue_size = rounddown_pow_of_two(max_tx_queue_size);
	max_rx_queue_size = rounddown_pow_of_two(max_rx_queue_size);

	tx_queue_size = min_t(u32, tx_queue_size, max_tx_queue_size);
	rx_queue_size = min_t(u32, rx_queue_size, max_rx_queue_size);

	queue_size = rounddown_pow_of_two(queue_size);
	tx_queue_size = rounddown_pow_of_two(tx_queue_size);
	rx_queue_size = rounddown_pow_of_two(rx_queue_size);

	if (unlikely(!queue_size)) {
		dev_err(&pdev->dev, "Invalid queue size\n");
	if (unlikely(!rx_queue_size || !tx_queue_size)) {
		dev_err(&ctx->pdev->dev, "Invalid queue size\n");
		return -EFAULT;
	}

	*max_tx_sgl_size = min_t(u16, ENA_PKT_MAX_BUFS,
				 get_feat_ctx->max_queues.max_packet_tx_descs);
	*max_rx_sgl_size = min_t(u16, ENA_PKT_MAX_BUFS,
				 get_feat_ctx->max_queues.max_packet_rx_descs);
	ctx->max_tx_queue_size = max_tx_queue_size;
	ctx->max_rx_queue_size = max_rx_queue_size;
	ctx->tx_queue_size = tx_queue_size;
	ctx->rx_queue_size = rx_queue_size;

	return queue_size;
	return 0;
}

/* ena_probe - Device Initialization Routine
@@ -3284,6 +3328,7 @@ static int ena_calc_queue_size(struct pci_dev *pdev,
static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
	struct ena_com_dev_get_features_ctx get_feat_ctx;
	struct ena_calc_queue_size_ctx calc_queue_ctx = { 0 };
	struct ena_llq_configurations llq_config;
	struct ena_com_dev *ena_dev = NULL;
	struct ena_adapter *adapter;
@@ -3291,9 +3336,6 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
	struct net_device *netdev;
	static int adapters_found;
	char *queue_type_str;
	u16 tx_sgl_size = 0;
	u16 rx_sgl_size = 0;
	int queue_size;
	bool wd_state;

	dev_dbg(&pdev->dev, "%s\n", __func__);
@@ -3350,20 +3392,25 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
		goto err_device_destroy;
	}

	calc_queue_ctx.ena_dev = ena_dev;
	calc_queue_ctx.get_feat_ctx = &get_feat_ctx;
	calc_queue_ctx.pdev = pdev;

	/* initial Tx interrupt delay, Assumes 1 usec granularity.
	* Updated during device initialization with the real granularity
	*/
	ena_dev->intr_moder_tx_interval = ENA_INTR_INITIAL_TX_INTERVAL_USECS;
	io_queue_num = ena_calc_io_queue_num(pdev, ena_dev, &get_feat_ctx);
	queue_size = ena_calc_queue_size(pdev, ena_dev, &tx_sgl_size,
					 &rx_sgl_size, &get_feat_ctx);
	if ((queue_size <= 0) || (io_queue_num <= 0)) {
	rc = ena_calc_queue_size(&calc_queue_ctx);
	if (rc || io_queue_num <= 0) {
		rc = -EFAULT;
		goto err_device_destroy;
	}

	dev_info(&pdev->dev, "creating %d io queues. queue size: %d. LLQ is %s\n",
		 io_queue_num, queue_size,
	dev_info(&pdev->dev, "creating %d io queues. rx queue size: %d tx queue size. %d LLQ is %s\n",
		 io_queue_num,
		 calc_queue_ctx.rx_queue_size,
		 calc_queue_ctx.tx_queue_size,
		 (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) ?
		 "ENABLED" : "DISABLED");

@@ -3389,11 +3436,10 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
	adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
	adapter->reset_reason = ENA_REGS_RESET_NORMAL;

	adapter->tx_ring_size = queue_size;
	adapter->rx_ring_size = queue_size;

	adapter->max_tx_sgl_size = tx_sgl_size;
	adapter->max_rx_sgl_size = rx_sgl_size;
	adapter->tx_ring_size = calc_queue_ctx.tx_queue_size;
	adapter->rx_ring_size = calc_queue_ctx.rx_queue_size;
	adapter->max_tx_sgl_size = calc_queue_ctx.max_tx_sgl_size;
	adapter->max_rx_sgl_size = calc_queue_ctx.max_rx_sgl_size;

	adapter->num_queues = io_queue_num;
	adapter->last_monitored_tx_qid = 0;
+15 −0
Original line number Diff line number Diff line
@@ -154,6 +154,18 @@ struct ena_napi {
	u32 qid;
};

struct ena_calc_queue_size_ctx {
	struct ena_com_dev_get_features_ctx *get_feat_ctx;
	struct ena_com_dev *ena_dev;
	struct pci_dev *pdev;
	u16 tx_queue_size;
	u16 rx_queue_size;
	u16 max_tx_queue_size;
	u16 max_rx_queue_size;
	u16 max_tx_sgl_size;
	u16 max_rx_sgl_size;
};

struct ena_tx_buffer {
	struct sk_buff *skb;
	/* num of ena desc for this specific skb
@@ -322,6 +334,9 @@ struct ena_adapter {
	u32 tx_ring_size;
	u32 rx_ring_size;

	u32 max_tx_ring_size;
	u32 max_rx_ring_size;

	u32 msg_enable;

	u16 max_tx_sgl_size;