Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 96ae48b7 authored by Raghu Vatsavayi's avatar Raghu Vatsavayi Committed by David S. Miller
Browse files

liquidio:RX queue alloc changes



This patch is to allocate rx queue's memory based on numa node and also use
page based buffers for rx traffic improvements.

Signed-off-by: default avatarDerek Chickles <derek.chickles@caviumnetworks.com>
Signed-off-by: default avatarSatanand Burla <satananda.burla@caviumnetworks.com>
Signed-off-by: default avatarFelix Manlunas <felix.manlunas@caviumnetworks.com>
Signed-off-by: default avatarRaghu Vatsavayi <raghu.vatsavayi@caviumnetworks.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent fcd2b5e3
Loading
Loading
Loading
Loading
+12 −15
Original line number Diff line number Diff line
@@ -783,14 +783,15 @@ int octeon_setup_instr_queues(struct octeon_device *oct)

int octeon_setup_output_queues(struct octeon_device *oct)
{
	u32 i, num_oqs = 0;
	u32 num_oqs = 0;
	u32 num_descs = 0;
	u32 desc_size = 0;
	u32 oq_no = 0;
	int numa_node = cpu_to_node(oq_no % num_online_cpus());

	num_oqs = 1;
	/* this causes queue 0 to be default queue */
	if (OCTEON_CN6XXX(oct)) {
		/* CFG_GET_OQ_MAX_BASE_Q(CHIP_FIELD(oct, cn6xxx, conf)); */
		num_oqs = 1;
		num_descs =
			CFG_GET_NUM_DEF_RX_DESCS(CHIP_FIELD(oct, cn6xxx, conf));
		desc_size =
@@ -798,19 +799,15 @@ int octeon_setup_output_queues(struct octeon_device *oct)
	}

	oct->num_oqs = 0;

	for (i = 0; i < num_oqs; i++) {
		oct->droq[i] = vmalloc(sizeof(*oct->droq[i]));
		if (!oct->droq[i])
	oct->droq[0] = vmalloc_node(sizeof(*oct->droq[0]), numa_node);
	if (!oct->droq[0])
		oct->droq[0] = vmalloc(sizeof(*oct->droq[0]));
	if (!oct->droq[0])
		return 1;

		memset(oct->droq[i], 0, sizeof(struct octeon_droq));

		if (octeon_init_droq(oct, i, num_descs, desc_size, NULL))
	if (octeon_init_droq(oct, oq_no, num_descs, desc_size, NULL))
		return 1;

	oct->num_oqs++;
	}

	return 0;
}
+26 −9
Original line number Diff line number Diff line
@@ -242,6 +242,8 @@ int octeon_init_droq(struct octeon_device *oct,
	struct octeon_droq *droq;
	u32 desc_ring_size = 0, c_num_descs = 0, c_buf_size = 0;
	u32 c_pkts_per_intr = 0, c_refill_threshold = 0;
	int orig_node = dev_to_node(&oct->pci_dev->dev);
	int numa_node = cpu_to_node(q_no % num_online_cpus());

	dev_dbg(&oct->pci_dev->dev, "%s[%d]\n", __func__, q_no);

@@ -261,13 +263,21 @@ int octeon_init_droq(struct octeon_device *oct,
		struct octeon_config *conf6x = CHIP_FIELD(oct, cn6xxx, conf);

		c_pkts_per_intr = (u32)CFG_GET_OQ_PKTS_PER_INTR(conf6x);
		c_refill_threshold = (u32)CFG_GET_OQ_REFILL_THRESHOLD(conf6x);
		c_refill_threshold =
			(u32)CFG_GET_OQ_REFILL_THRESHOLD(conf6x);
	} else {
		return 1;
	}

	droq->max_count = c_num_descs;
	droq->buffer_size = c_buf_size;

	desc_ring_size = droq->max_count * OCT_DROQ_DESC_SIZE;
	set_dev_node(&oct->pci_dev->dev, numa_node);
	droq->desc_ring = lio_dma_alloc(oct, desc_ring_size,
					(dma_addr_t *)&droq->desc_ring_dma);
	set_dev_node(&oct->pci_dev->dev, orig_node);
	if (!droq->desc_ring)
		droq->desc_ring = lio_dma_alloc(oct, desc_ring_size,
					(dma_addr_t *)&droq->desc_ring_dma);

@@ -283,12 +293,11 @@ int octeon_init_droq(struct octeon_device *oct,
		droq->max_count);

	droq->info_list =
		cnnic_alloc_aligned_dma(oct->pci_dev,
					(droq->max_count * OCT_DROQ_INFO_SIZE),
		cnnic_numa_alloc_aligned_dma((droq->max_count *
					      OCT_DROQ_INFO_SIZE),
					     &droq->info_alloc_size,
					     &droq->info_base_addr,
					&droq->info_list_dma);

					     numa_node);
	if (!droq->info_list) {
		dev_err(&oct->pci_dev->dev, "Cannot allocate memory for info list.\n");
		lio_dma_free(oct, (droq->max_count * OCT_DROQ_DESC_SIZE),
@@ -296,6 +305,11 @@ int octeon_init_droq(struct octeon_device *oct,
		return 1;
	}

	droq->recv_buf_list = (struct octeon_recv_buffer *)
			      vmalloc_node(droq->max_count *
						OCT_DROQ_RECVBUF_SIZE,
						numa_node);
	if (!droq->recv_buf_list)
		droq->recv_buf_list = (struct octeon_recv_buffer *)
				      vmalloc(droq->max_count *
						OCT_DROQ_RECVBUF_SIZE);
@@ -949,6 +963,7 @@ int octeon_create_droq(struct octeon_device *oct,
		       u32 desc_size, void *app_ctx)
{
	struct octeon_droq *droq;
	int numa_node = cpu_to_node(q_no % num_online_cpus());

	if (oct->droq[q_no]) {
		dev_dbg(&oct->pci_dev->dev, "Droq already in use. Cannot create droq %d again\n",
@@ -957,6 +972,8 @@ int octeon_create_droq(struct octeon_device *oct,
	}

	/* Allocate the DS for the new droq. */
	droq = vmalloc_node(sizeof(*droq), numa_node);
	if (!droq)
		droq = vmalloc(sizeof(*droq));
	if (!droq)
		goto create_droq_fail;
+14 −9
Original line number Diff line number Diff line
@@ -126,22 +126,27 @@ static inline int octeon_map_pci_barx(struct octeon_device *oct,
}

static inline void *
cnnic_alloc_aligned_dma(struct pci_dev *pci_dev,
			u32 size,
cnnic_numa_alloc_aligned_dma(u32 size,
			     u32 *alloc_size,
			     size_t *orig_ptr,
			size_t *dma_addr __attribute__((unused)))
			     int numa_node)
{
	int retries = 0;
	void *ptr = NULL;

#define OCTEON_MAX_ALLOC_RETRIES     1
	do {
		ptr =
		    (void *)__get_free_pages(GFP_KERNEL,
		struct page *page = NULL;

		page = alloc_pages_node(numa_node,
					GFP_KERNEL,
					get_order(size));
		if (!page)
			page = alloc_pages(GFP_KERNEL,
					   get_order(size));
		ptr = (void *)page_address(page);
		if ((unsigned long)ptr & 0x07) {
			free_pages((unsigned long)ptr, get_order(size));
			__free_pages(page, get_order(size));
			ptr = NULL;
			/* Increment the size required if the first
			 * attempt failed.