Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 15a18cda authored by qctecmdr's avatar qctecmdr Committed by Gerrit - the friendly Code Review server
Browse files

Merge "net: qualcomm: rmnet: Allow configuration of page recycling"

parents 30351587 975914b5
Loading
Loading
Loading
Loading
+1 −0
Original line number Diff line number Diff line
@@ -394,6 +394,7 @@ static int rmnet_changelink(struct net_device *dev, struct nlattr *tb[],
		agg_params = nla_data(data[IFLA_RMNET_UL_AGG_PARAMS]);
		rmnet_map_update_ul_agg_config(port, agg_params->agg_size,
					       agg_params->agg_count,
					       agg_params->agg_features,
					       agg_params->agg_time);
	}

+16 −1
Original line number Diff line number Diff line
@@ -20,6 +20,11 @@ struct rmnet_endpoint {
	struct hlist_node hlnode;
};

struct rmnet_agg_stats {
	u64 ul_agg_reuse;
	u64 ul_agg_alloc;
};

struct rmnet_port_priv_stats {
	u64 dl_hdr_last_qmap_vers;
	u64 dl_hdr_last_ep_id;
@@ -33,14 +38,21 @@ struct rmnet_port_priv_stats {
	u64 dl_hdr_total_pkts;
	u64 dl_trl_last_seq;
	u64 dl_trl_count;
	struct rmnet_agg_stats agg;
};

struct rmnet_egress_agg_params {
	u16 agg_size;
	u16 agg_count;
	u8 agg_count;
	u8 agg_features;
	u32 agg_time;
};

struct rmnet_agg_page {
	struct list_head list;
	struct page *page;
};

/* One instance of this structure is instantiated for each real_dev associated
 * with rmnet.
 */
@@ -65,6 +77,9 @@ struct rmnet_port {
	struct timespec agg_last;
	struct hrtimer hrtimer;
	struct work_struct agg_wq;
	u8 agg_size_order;
	struct list_head agg_list;
	struct rmnet_agg_page *agg_head;

	void *qmi_info;

+1 −1
Original line number Diff line number Diff line
@@ -262,7 +262,7 @@ void rmnet_map_tx_aggregate(struct sk_buff *skb, struct rmnet_port *port);
void rmnet_map_tx_aggregate_init(struct rmnet_port *port);
void rmnet_map_tx_aggregate_exit(struct rmnet_port *port);
void rmnet_map_update_ul_agg_config(struct rmnet_port *port, u16 size,
				    u16 count, u32 time);
				    u8 count, u8 features, u32 time);
void rmnet_map_dl_hdr_notify(struct rmnet_port *port,
			     struct rmnet_map_dl_ind_hdr *dl_hdr);
void rmnet_map_dl_hdr_notify_v2(struct rmnet_port *port,
+133 −4
Original line number Diff line number Diff line
@@ -1250,9 +1250,111 @@ static void rmnet_map_linearize_copy(struct sk_buff *dst, struct sk_buff *src)
	}
}

static void rmnet_free_agg_pages(struct rmnet_port *port)
{
	struct rmnet_agg_page *agg_page, *idx;

	list_for_each_entry_safe(agg_page, idx, &port->agg_list, list) {
		put_page(agg_page->page);
		kfree(agg_page);
	}

	port->agg_head = NULL;
}

static struct page *rmnet_get_agg_pages(struct rmnet_port *port)
{
	struct rmnet_agg_page *agg_page;
	struct page *page = NULL;
	int i = 0;

	if (!(port->egress_agg_params.agg_features & RMNET_PAGE_RECYCLE))
		goto alloc;

	do {
		agg_page = port->agg_head;
		if (unlikely(!agg_page))
			break;

		if (page_ref_count(agg_page->page) == 1) {
			page = agg_page->page;
			page_ref_inc(agg_page->page);

			port->stats.agg.ul_agg_reuse++;
			port->agg_head = list_next_entry(agg_page, list);
			break;
		}

		port->agg_head = list_next_entry(agg_page, list);
		i++;
	} while (i <= 5);

alloc:
	if (!page) {
		page =  __dev_alloc_pages(GFP_ATOMIC, port->agg_size_order);
		port->stats.agg.ul_agg_alloc++;
	}

	return page;
}

static struct rmnet_agg_page *__rmnet_alloc_agg_pages(struct rmnet_port *port)
{
	struct rmnet_agg_page *agg_page;
	struct page *page;

	agg_page = kzalloc(sizeof(*agg_page), GFP_ATOMIC);
	if (!agg_page)
		return NULL;

	page = __dev_alloc_pages(GFP_ATOMIC, port->agg_size_order);
	if (!page) {
		kfree(agg_page);
		return NULL;
	}

	agg_page->page = page;

	return agg_page;
}

static void rmnet_alloc_agg_pages(struct rmnet_port *port)
{
	struct rmnet_agg_page *agg_page = NULL;
	int i = 0;

	for (i = 0; i < 512; i++) {
		agg_page = __rmnet_alloc_agg_pages(port);

		if (agg_page)
			list_add_tail(&agg_page->list, &port->agg_list);
	}

	port->agg_head = list_first_entry_or_null(&port->agg_list,
						  struct rmnet_agg_page, list);
}

static struct sk_buff *rmnet_map_build_skb(struct rmnet_port *port)
{
	return alloc_skb(port->egress_agg_params.agg_size, GFP_ATOMIC);
	struct sk_buff *skb;
	unsigned int size;
	struct page *page;
	void *vaddr;

	page = rmnet_get_agg_pages(port);
	if (!page)
		return NULL;

	vaddr = page_address(page);
	size = PAGE_SIZE << port->agg_size_order;

	skb = build_skb(vaddr, size);
	if (!skb) {
		put_page(page);
		return NULL;
	}

	return skb;
}

void rmnet_map_tx_aggregate(struct sk_buff *skb, struct rmnet_port *port)
@@ -1333,14 +1435,34 @@ void rmnet_map_tx_aggregate(struct sk_buff *skb, struct rmnet_port *port)
}

void rmnet_map_update_ul_agg_config(struct rmnet_port *port, u16 size,
				    u16 count, u32 time)
				    u8 count, u8 features, u32 time)
{
	unsigned long irq_flags;

	spin_lock_irqsave(&port->agg_lock, irq_flags);
	port->egress_agg_params.agg_size = size;
	port->egress_agg_params.agg_count = count;
	port->egress_agg_params.agg_time = time;
	port->egress_agg_params.agg_size = size;
	port->egress_agg_params.agg_features = features;

	rmnet_free_agg_pages(port);

	/* This effectively disables recycling in case the UL aggregation
	 * size is lesser than PAGE_SIZE.
	 */
	if (size < PAGE_SIZE)
		goto done;

	port->agg_size_order = get_order(size);

	size = PAGE_SIZE << port->agg_size_order;
	size -= SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
	port->egress_agg_params.agg_size = size;

	if (port->egress_agg_params.agg_features == RMNET_PAGE_RECYCLE)
		rmnet_alloc_agg_pages(port);

done:
	spin_unlock_irqrestore(&port->agg_lock, irq_flags);
}

@@ -1349,8 +1471,14 @@ void rmnet_map_tx_aggregate_init(struct rmnet_port *port)
	hrtimer_init(&port->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
	port->hrtimer.function = rmnet_map_flush_tx_packet_queue;
	spin_lock_init(&port->agg_lock);
	INIT_LIST_HEAD(&port->agg_list);

	rmnet_map_update_ul_agg_config(port, 8192, 20, 3000000);
	/* Since PAGE_SIZE - 1 is specified here, no pages are pre-allocated.
	 * This is done to reduce memory usage in cases where
	 * UL aggregation is disabled.
	 * Additionally, the features flag is also set to 0.
	 */
	rmnet_map_update_ul_agg_config(port, PAGE_SIZE - 1, 20, 0, 3000000);

	INIT_WORK(&port->agg_wq, rmnet_map_flush_tx_packet_work);
}
@@ -1374,6 +1502,7 @@ void rmnet_map_tx_aggregate_exit(struct rmnet_port *port)
		port->agg_state = 0;
	}

	rmnet_free_agg_pages(port);
	spin_unlock_irqrestore(&port->agg_lock, flags);
}

+3 −0
Original line number Diff line number Diff line
@@ -22,6 +22,9 @@ RMNET_INGRESS_FORMAT_DL_MARKER_V2)
#define RMNET_INGRESS_FORMAT_PS                 BIT(27)
#define RMNET_FORMAT_PS_NOTIF                   BIT(26)

/* UL Aggregation parameters */
#define RMNET_PAGE_RECYCLE                      BIT(0)

/* Replace skb->dev to a virtual rmnet device and pass up the stack */
#define RMNET_EPMODE_VND (1)
/* Pass the frame directly to another device with dev_queue_xmit() */
Loading