Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit c8b61ee6 authored by qctecmdr's avatar qctecmdr Committed by Gerrit - the friendly Code Review server
Browse files

Merge "net: qualcomm: rmnet: Allow configuration of page recycling"

parents 31079b8c c70cb7c9
Loading
Loading
Loading
Loading
+5 −6
Original line number Diff line number Diff line
@@ -397,14 +397,13 @@ static int rmnet_changelink(struct net_device *dev, struct nlattr *tb[],
	}

	if (data[IFLA_RMNET_UL_AGG_PARAMS]) {
		void *agg_params;
		unsigned long irq_flags;
		struct rmnet_egress_agg_params *agg_params;

		agg_params = nla_data(data[IFLA_RMNET_UL_AGG_PARAMS]);
		spin_lock_irqsave(&port->agg_lock, irq_flags);
		memcpy(&port->egress_agg_params, agg_params,
		       sizeof(port->egress_agg_params));
		spin_unlock_irqrestore(&port->agg_lock, irq_flags);
		rmnet_map_update_ul_agg_config(port, agg_params->agg_size,
					       agg_params->agg_count,
					       agg_params->agg_features,
					       agg_params->agg_time);
	}

	return 0;
+16 −1
Original line number Diff line number Diff line
@@ -28,6 +28,11 @@ struct rmnet_endpoint {
	struct hlist_node hlnode;
};

struct rmnet_agg_stats {
	u64 ul_agg_reuse;
	u64 ul_agg_alloc;
};

struct rmnet_port_priv_stats {
	u64 dl_hdr_last_qmap_vers;
	u64 dl_hdr_last_ep_id;
@@ -41,14 +46,21 @@ struct rmnet_port_priv_stats {
	u64 dl_hdr_total_pkts;
	u64 dl_trl_last_seq;
	u64 dl_trl_count;
	struct rmnet_agg_stats agg;
};

struct rmnet_egress_agg_params {
	u16 agg_size;
	u16 agg_count;
	u8 agg_count;
	u8 agg_features;
	u32 agg_time;
};

struct rmnet_agg_page {
	struct list_head list;
	struct page *page;
};

/* One instance of this structure is instantiated for each real_dev associated
 * with rmnet.
 */
@@ -73,6 +85,9 @@ struct rmnet_port {
	struct timespec agg_last;
	struct hrtimer hrtimer;
	struct work_struct agg_wq;
	u8 agg_size_order;
	struct list_head agg_list;
	struct rmnet_agg_page *agg_head;

	void *qmi_info;

+2 −0
Original line number Diff line number Diff line
@@ -268,6 +268,8 @@ int rmnet_map_tx_agg_skip(struct sk_buff *skb, int offset);
void rmnet_map_tx_aggregate(struct sk_buff *skb, struct rmnet_port *port);
void rmnet_map_tx_aggregate_init(struct rmnet_port *port);
void rmnet_map_tx_aggregate_exit(struct rmnet_port *port);
void rmnet_map_update_ul_agg_config(struct rmnet_port *port, u16 size,
				    u8 count, u8 features, u32 time);
void rmnet_map_dl_hdr_notify(struct rmnet_port *port,
			     struct rmnet_map_dl_ind_hdr *dl_hdr);
void rmnet_map_dl_hdr_notify_v2(struct rmnet_port *port,
+163 −5
Original line number Diff line number Diff line
@@ -1273,6 +1273,127 @@ static void rmnet_map_linearize_copy(struct sk_buff *dst, struct sk_buff *src)
	}
}

static void rmnet_free_agg_pages(struct rmnet_port *port)
{
	struct rmnet_agg_page *agg_page, *idx;

	list_for_each_entry_safe(agg_page, idx, &port->agg_list, list) {
		put_page(agg_page->page);
		kfree(agg_page);
	}

	port->agg_head = NULL;
}

static struct page *rmnet_get_agg_pages(struct rmnet_port *port)
{
	struct rmnet_agg_page *agg_page;
	struct page *page = NULL;
	int i = 0;

	if (!(port->egress_agg_params.agg_features & RMNET_PAGE_RECYCLE))
		goto alloc;

	do {
		agg_page = port->agg_head;
		if (unlikely(!agg_page))
			break;

		if (page_ref_count(agg_page->page) == 1) {
			page = agg_page->page;
			page_ref_inc(agg_page->page);

			port->stats.agg.ul_agg_reuse++;
			port->agg_head = list_next_entry(agg_page, list);
			break;
		}

		port->agg_head = list_next_entry(agg_page, list);
		i++;
	} while (i <= 5);

alloc:
	if (!page) {
		page =  __dev_alloc_pages(GFP_ATOMIC, port->agg_size_order);
		port->stats.agg.ul_agg_alloc++;
	}

	return page;
}

static struct rmnet_agg_page *__rmnet_alloc_agg_pages(struct rmnet_port *port)
{
	struct rmnet_agg_page *agg_page;
	struct page *page;

	agg_page = kzalloc(sizeof(*agg_page), GFP_ATOMIC);
	if (!agg_page)
		return NULL;

	page = __dev_alloc_pages(GFP_ATOMIC, port->agg_size_order);
	if (!page) {
		kfree(agg_page);
		return NULL;
	}

	agg_page->page = page;

	return agg_page;
}

static void rmnet_alloc_agg_pages(struct rmnet_port *port)
{
	struct rmnet_agg_page *agg_page = NULL;
	int i = 0;

	for (i = 0; i < 512; i++) {
		agg_page = __rmnet_alloc_agg_pages(port);

		if (agg_page)
			list_add_tail(&agg_page->list, &port->agg_list);
	}

	port->agg_head = list_first_entry_or_null(&port->agg_list,
						  struct rmnet_agg_page, list);
}

static u8 rmnet_get_page_order(u16 size)
{
	u8 order = 0;

	size = size / PAGE_SIZE;

	while (size >= 2) {
		size >>= 1;
		order++;
	}

	return order;
}

static struct sk_buff *rmnet_map_build_skb(struct rmnet_port *port)
{
	struct sk_buff *skb;
	unsigned int size;
	struct page *page;
	void *vaddr;

	page = rmnet_get_agg_pages(port);
	if (!page)
		return NULL;

	vaddr = page_address(page);
	size = PAGE_SIZE << port->agg_size_order;

	skb = build_skb(vaddr, size);
	if (!skb) {
		put_page(page);
		return NULL;
	}

	return skb;
}

void rmnet_map_tx_aggregate(struct sk_buff *skb, struct rmnet_port *port)
{
	struct timespec diff, last;
@@ -1300,8 +1421,7 @@ void rmnet_map_tx_aggregate(struct sk_buff *skb, struct rmnet_port *port)
			return;
		}

		port->agg_skb = alloc_skb(port->egress_agg_params.agg_size,
					  GFP_ATOMIC);
		port->agg_skb = rmnet_map_build_skb(port);
		if (!port->agg_skb) {
			port->agg_skb = 0;
			port->agg_count = 0;
@@ -1351,14 +1471,51 @@ void rmnet_map_tx_aggregate(struct sk_buff *skb, struct rmnet_port *port)
	spin_unlock_irqrestore(&port->agg_lock, flags);
}

void rmnet_map_update_ul_agg_config(struct rmnet_port *port, u16 size,
				    u8 count, u8 features, u32 time)
{
	unsigned long irq_flags;

	spin_lock_irqsave(&port->agg_lock, irq_flags);
	port->egress_agg_params.agg_count = count;
	port->egress_agg_params.agg_time = time;
	port->egress_agg_params.agg_size = size;
	port->egress_agg_params.agg_features = features;

	rmnet_free_agg_pages(port);

	/* This effectively disables recycling in case the UL aggregation
	 * size is lesser than PAGE_SIZE.
	 */
	if (size < PAGE_SIZE)
		goto done;

	port->agg_size_order = rmnet_get_page_order(size);

	size = PAGE_SIZE << port->agg_size_order;
	size -= SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
	port->egress_agg_params.agg_size = size;

	if (port->egress_agg_params.agg_features == RMNET_PAGE_RECYCLE)
		rmnet_alloc_agg_pages(port);

done:
	spin_unlock_irqrestore(&port->agg_lock, irq_flags);
}

void rmnet_map_tx_aggregate_init(struct rmnet_port *port)
{
	hrtimer_init(&port->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
	port->hrtimer.function = rmnet_map_flush_tx_packet_queue;
	port->egress_agg_params.agg_size = 8192;
	port->egress_agg_params.agg_count = 20;
	port->egress_agg_params.agg_time = 3000000;
	spin_lock_init(&port->agg_lock);
	INIT_LIST_HEAD(&port->agg_list);

	/* Since PAGE_SIZE - 1 is specified here, no pages are pre-allocated.
	 * This is done to reduce memory usage in cases where
	 * UL aggregation is disabled.
	 * Additionally, the features flag is also set to 0.
	 */
	rmnet_map_update_ul_agg_config(port, PAGE_SIZE - 1, 20, 0, 3000000);

	INIT_WORK(&port->agg_wq, rmnet_map_flush_tx_packet_work);
}
@@ -1382,6 +1539,7 @@ void rmnet_map_tx_aggregate_exit(struct rmnet_port *port)
		port->agg_state = 0;
	}

	rmnet_free_agg_pages(port);
	spin_unlock_irqrestore(&port->agg_lock, flags);
}

+3 −0
Original line number Diff line number Diff line
@@ -30,6 +30,9 @@ RMNET_INGRESS_FORMAT_DL_MARKER_V2)
#define RMNET_INGRESS_FORMAT_PS                 BIT(27)
#define RMNET_FORMAT_PS_NOTIF                   BIT(26)

/* UL Aggregation parameters */
#define RMNET_PAGE_RECYCLE                      BIT(0)

/* Replace skb->dev to a virtual rmnet device and pass up the stack */
#define RMNET_EPMODE_VND (1)
/* Pass the frame directly to another device with dev_queue_xmit() */
Loading