Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 59793164 authored by Subash Abhinov Kasiviswanathan's avatar Subash Abhinov Kasiviswanathan
Browse files

net: qualcomm: rmnet: Switch uplink aggregation to pages allocations



Allocate the data using pages and associate them with skbs using
head_frag.

CRs-Fixed: 2512312
Change-Id: I5263a932ecd80fcc80e2d2327c190db58a7ade62
Signed-off-by: default avatarSubash Abhinov Kasiviswanathan <subashab@codeaurora.org>
parent b8658fe7
Loading
Loading
Loading
Loading
+14 −0
Original line number Diff line number Diff line
@@ -20,6 +20,11 @@ struct rmnet_endpoint {
	struct hlist_node hlnode;
};

struct rmnet_agg_stats {
	u64 ul_agg_reuse;
	u64 ul_agg_alloc;
};

struct rmnet_port_priv_stats {
	u64 dl_hdr_last_qmap_vers;
	u64 dl_hdr_last_ep_id;
@@ -33,6 +38,7 @@ struct rmnet_port_priv_stats {
	u64 dl_hdr_total_pkts;
	u64 dl_trl_last_seq;
	u64 dl_trl_count;
	struct rmnet_agg_stats agg;
};

struct rmnet_egress_agg_params {
@@ -41,6 +47,11 @@ struct rmnet_egress_agg_params {
	u32 agg_time;
};

struct rmnet_agg_page {
	struct list_head list;
	struct page *page;
};

/* One instance of this structure is instantiated for each real_dev associated
 * with rmnet.
 */
@@ -65,6 +76,9 @@ struct rmnet_port {
	struct timespec agg_last;
	struct hrtimer hrtimer;
	struct work_struct agg_wq;
	u8 agg_size_order;
	struct list_head agg_list;
	struct rmnet_agg_page *agg_head;

	void *qmi_info;

+125 −3
Original line number Diff line number Diff line
@@ -1250,9 +1250,107 @@ static void rmnet_map_linearize_copy(struct sk_buff *dst, struct sk_buff *src)
	}
}

static void rmnet_free_agg_pages(struct rmnet_port *port)
{
	struct rmnet_agg_page *agg_page, *idx;

	list_for_each_entry_safe(agg_page, idx, &port->agg_list, list) {
		put_page(agg_page->page);
		kfree(agg_page);
	}

	port->agg_head = NULL;
}

static struct page *rmnet_get_agg_pages(struct rmnet_port *port)
{
	struct rmnet_agg_page *agg_page;
	struct page *page = NULL;
	int i = 0;

	do {
		agg_page = port->agg_head;
		if (unlikely(!agg_page))
			break;

		if (page_ref_count(agg_page->page) == 1) {
			page = agg_page->page;
			page_ref_inc(agg_page->page);

			port->stats.agg.ul_agg_reuse++;
			port->agg_head = list_next_entry(agg_page, list);
			break;
		}

		port->agg_head = list_next_entry(agg_page, list);
		i++;
	} while (i <= 5);

	if (!page) {
		page =  __dev_alloc_pages(GFP_ATOMIC, port->agg_size_order);
		port->stats.agg.ul_agg_alloc++;
	}

	return page;
}

static struct rmnet_agg_page *__rmnet_alloc_agg_pages(struct rmnet_port *port)
{
	struct rmnet_agg_page *agg_page;
	struct page *page;

	agg_page = kzalloc(sizeof(*agg_page), GFP_ATOMIC);
	if (!agg_page)
		return NULL;

	page = __dev_alloc_pages(GFP_ATOMIC, port->agg_size_order);
	if (!page) {
		kfree(agg_page);
		return NULL;
	}

	agg_page->page = page;

	return agg_page;
}

static void rmnet_alloc_agg_pages(struct rmnet_port *port)
{
	struct rmnet_agg_page *agg_page = NULL;
	int i = 0;

	for (i = 0; i < 512; i++) {
		agg_page = __rmnet_alloc_agg_pages(port);

		if (agg_page)
			list_add_tail(&agg_page->list, &port->agg_list);
	}

	port->agg_head = list_first_entry_or_null(&port->agg_list,
						  struct rmnet_agg_page, list);
}

static struct sk_buff *rmnet_map_build_skb(struct rmnet_port *port)
{
	return alloc_skb(port->egress_agg_params.agg_size, GFP_ATOMIC);
	struct sk_buff *skb;
	unsigned int size;
	struct page *page;
	void *vaddr;

	page = rmnet_get_agg_pages(port);
	if (!page)
		return NULL;

	vaddr = page_address(page);
	size = PAGE_SIZE << port->agg_size_order;

	skb = build_skb(vaddr, size);
	if (!skb) {
		put_page(page);
		return NULL;
	}

	return skb;
}

void rmnet_map_tx_aggregate(struct sk_buff *skb, struct rmnet_port *port)
@@ -1338,9 +1436,27 @@ void rmnet_map_update_ul_agg_config(struct rmnet_port *port, u16 size,
	unsigned long irq_flags;

	spin_lock_irqsave(&port->agg_lock, irq_flags);
	port->egress_agg_params.agg_size = size;
	port->egress_agg_params.agg_count = count;
	port->egress_agg_params.agg_time = time;
	port->egress_agg_params.agg_size = size;

	rmnet_free_agg_pages(port);

	/* This effectively disables recycling in case the UL aggregation
	 * size is lesser than PAGE_SIZE.
	 */
	if (size < PAGE_SIZE)
		goto done;

	port->agg_size_order = get_order(size);

	size = PAGE_SIZE << port->agg_size_order;
	size -= SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
	port->egress_agg_params.agg_size = size;

	rmnet_alloc_agg_pages(port);

done:
	spin_unlock_irqrestore(&port->agg_lock, irq_flags);
}

@@ -1349,8 +1465,13 @@ void rmnet_map_tx_aggregate_init(struct rmnet_port *port)
	hrtimer_init(&port->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
	port->hrtimer.function = rmnet_map_flush_tx_packet_queue;
	spin_lock_init(&port->agg_lock);
	INIT_LIST_HEAD(&port->agg_list);

	rmnet_map_update_ul_agg_config(port, 8192, 20, 3000000);
	/* Since PAGE_SIZE - 1 is specified here, no pages are pre-allocated.
	 * This is done to reduce memory usage in cases where
	 * UL aggregation is disabled.
	 */
	rmnet_map_update_ul_agg_config(port, PAGE_SIZE - 1, 20, 3000000);

	INIT_WORK(&port->agg_wq, rmnet_map_flush_tx_packet_work);
}
@@ -1374,6 +1495,7 @@ void rmnet_map_tx_aggregate_exit(struct rmnet_port *port)
		port->agg_state = 0;
	}

	rmnet_free_agg_pages(port);
	spin_unlock_irqrestore(&port->agg_lock, flags);
}

+2 −0
Original line number Diff line number Diff line
@@ -226,6 +226,8 @@ static const char rmnet_port_gstrings_stats[][ETH_GSTRING_LEN] = {
	"DL header total pkts received",
	"DL trailer last seen sequence",
	"DL trailer pkts received",
	"UL agg reuse",
	"UL agg alloc",
};

static void rmnet_get_strings(struct net_device *dev, u32 stringset, u8 *buf)