Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 1fc01703 authored by Sean Tranchetti's avatar Sean Tranchetti
Browse files

net: qualcomm: rmnet: Introduce descriptor framework



Introduces a generic packet descriptor framework to allow RmNet to avoid
allocating intermediate SKBs. Instead, SKBs for the incoming packets will
allocated only when all possible forms of coalescing have been performed
to avoid unnecessary allocs and frees.

Change-Id: Ic3e4c9f174f2129faa2169c3b3f449becd2b5bdf
Signed-off-by: default avatarSean Tranchetti <stranche@codeaurora.org>
parent 11f6911c
Loading
Loading
Loading
Loading
+1 −0
Original line number Original line Diff line number Diff line
@@ -9,4 +9,5 @@ rmnet-y += rmnet_vnd.o
rmnet-y		 += rmnet_handlers.o
rmnet-y		 += rmnet_handlers.o
rmnet-y		 += rmnet_map_data.o
rmnet-y		 += rmnet_map_data.o
rmnet-y		 += rmnet_map_command.o
rmnet-y		 += rmnet_map_command.o
rmnet-y		 += rmnet_descriptor.o
obj-$(CONFIG_RMNET) += rmnet.o
obj-$(CONFIG_RMNET) += rmnet.o
+9 −0
Original line number Original line Diff line number Diff line
@@ -14,6 +14,7 @@
#include "rmnet_vnd.h"
#include "rmnet_vnd.h"
#include "rmnet_private.h"
#include "rmnet_private.h"
#include "rmnet_map.h"
#include "rmnet_map.h"
#include "rmnet_descriptor.h"
#include <soc/qcom/rmnet_qmi.h>
#include <soc/qcom/rmnet_qmi.h>
#include <soc/qcom/qmi_rmnet.h>
#include <soc/qcom/qmi_rmnet.h>


@@ -81,6 +82,8 @@ static int rmnet_unregister_real_device(struct net_device *real_dev,
	rmnet_map_cmd_exit(port);
	rmnet_map_cmd_exit(port);
	rmnet_map_tx_aggregate_exit(port);
	rmnet_map_tx_aggregate_exit(port);


	rmnet_descriptor_deinit(port);

	kfree(port);
	kfree(port);


	netdev_rx_handler_unregister(real_dev);
	netdev_rx_handler_unregister(real_dev);
@@ -118,6 +121,12 @@ static int rmnet_register_real_device(struct net_device *real_dev)
	for (entry = 0; entry < RMNET_MAX_LOGICAL_EP; entry++)
	for (entry = 0; entry < RMNET_MAX_LOGICAL_EP; entry++)
		INIT_HLIST_HEAD(&port->muxed_ep[entry]);
		INIT_HLIST_HEAD(&port->muxed_ep[entry]);


	rc = rmnet_descriptor_init(port);
	if (rc) {
		rmnet_descriptor_deinit(port);
		return rc;
	}

	rmnet_map_tx_aggregate_init(port);
	rmnet_map_tx_aggregate_init(port);
	rmnet_map_cmd_init(port);
	rmnet_map_cmd_init(port);


+3 −0
Original line number Original line Diff line number Diff line
@@ -69,6 +69,9 @@ struct rmnet_port {
	struct list_head dl_list;
	struct list_head dl_list;
	struct rmnet_port_priv_stats stats;
	struct rmnet_port_priv_stats stats;
	int dl_marker_flush;
	int dl_marker_flush;

	struct rmnet_descriptor *rmnet_desc;
	struct rmnet_frag_descriptor_pool *frag_desc_pool;
};
};


extern struct rtnl_link_ops rmnet_link_ops;
extern struct rtnl_link_ops rmnet_link_ops;
+1052 −0

File added.

Preview size limit exceeded, changes collapsed.

+143 −0
Original line number Original line Diff line number Diff line
/* SPDX-License-Identifier: GPL-2.0-only */
/* Copyright (c) 2013-2019, The Linux Foundation. All rights reserved.
 *
 * RMNET Packet Descriptor Framework
 *
 */

#ifndef _RMNET_DESCRIPTOR_H_
#define _RMNET_DESCRIPTOR_H_

#include <linux/netdevice.h>
#include <linux/list.h>
#include <linux/skbuff.h>
#include "rmnet_config.h"
#include "rmnet_map.h"

struct rmnet_frag_descriptor_pool {
	struct list_head free_list;
	u32 pool_size;
};

struct rmnet_frag_descriptor {
	struct list_head list;
	struct list_head sub_frags;
	skb_frag_t frag;
	u8 *hdr_ptr;
	struct net_device *dev;
	u32 hash;
	__be32 tcp_seq;
	__be16 ip_id;
	u16 data_offset;
	u16 gso_size;
	u16 gso_segs;
	u16 ip_len;
	u16 trans_len;
	u8 ip_proto;
	u8 trans_proto;
	u8 pkt_id;
	u8 csum_valid:1,
	   hdrs_valid:1,
	   ip_id_set:1,
	   tcp_seq_set:1,
	   flush_shs:1,
	   reserved:3;
};

struct rmnet_descriptor {
	struct list_head frags;
	u8 nr_frags;
};

/* Descriptor management */
struct rmnet_frag_descriptor *
rmnet_get_frag_descriptor(struct rmnet_port *port);
void rmnet_recycle_frag_descriptor(struct rmnet_frag_descriptor *frag_desc,
				   struct rmnet_port *port);
void rmnet_descriptor_add_frag(struct rmnet_port *port, struct page *p,
			       u32 page_offset, u32 len);
int rmnet_frag_ipv6_skip_exthdr(struct rmnet_frag_descriptor *frag_desc,
				int start, u8 *nexthdrp, __be16 *fragp);

/* QMAP command packets */
void rmnet_frag_command(struct rmnet_map_header *qmap, struct rmnet_port *port);
int rmnet_frag_flow_command(struct rmnet_map_header *qmap,
			    struct rmnet_port *port, u16 pkt_len);

/* Ingress data handlers */
void rmnet_frag_deaggregate(skb_frag_t *frag, struct rmnet_port *port);
void rmnet_frag_deliver(struct rmnet_frag_descriptor *frag_desc,
			struct rmnet_port *port);
int rmnet_frag_process_next_hdr_packet(struct rmnet_frag_descriptor *frag_desc,
				       struct rmnet_port *port,
				       struct list_head *list,
				       u16 len);
void rmnet_frag_ingress_handler(struct sk_buff *skb,
				struct rmnet_port *port);

int rmnet_descriptor_init(struct rmnet_port *port);
void rmnet_descriptor_deinit(struct rmnet_port *port);

static inline void *rmnet_frag_data_ptr(struct rmnet_frag_descriptor *frag_desc)
{
	return skb_frag_address(&frag_desc->frag);
}

static inline void *rmnet_frag_pull(struct rmnet_frag_descriptor *frag_desc,
				    struct rmnet_port *port,
				    unsigned int size)
{
	if (size >= skb_frag_size(&frag_desc->frag)) {
		rmnet_recycle_frag_descriptor(frag_desc, port);
		return NULL;
	}

	frag_desc->frag.page_offset += size;
	skb_frag_size_sub(&frag_desc->frag, size);

	return rmnet_frag_data_ptr(frag_desc);
}

static inline void *rmnet_frag_trim(struct rmnet_frag_descriptor *frag_desc,
				    struct rmnet_port *port,
				    unsigned int size)
{
	if (!size) {
		rmnet_recycle_frag_descriptor(frag_desc, port);
		return NULL;
	}

	if (size < skb_frag_size(&frag_desc->frag))
		skb_frag_size_set(&frag_desc->frag, size);

	return rmnet_frag_data_ptr(frag_desc);
}

static inline void rmnet_frag_fill(struct rmnet_frag_descriptor *frag_desc,
				   struct page *p, u32 page_offset, u32 len)
{
	get_page(p);
	__skb_frag_set_page(&frag_desc->frag, p);
	skb_frag_size_set(&frag_desc->frag, len);
	frag_desc->frag.page_offset = page_offset;
}

static inline u8
rmnet_frag_get_next_hdr_type(struct rmnet_frag_descriptor *frag_desc)
{
	unsigned char *data = rmnet_frag_data_ptr(frag_desc);

	data += sizeof(struct rmnet_map_header);
	return ((struct rmnet_map_v5_coal_header *)data)->header_type;
}

static inline bool
rmnet_frag_get_csum_valid(struct rmnet_frag_descriptor *frag_desc)
{
	unsigned char *data = rmnet_frag_data_ptr(frag_desc);

	data += sizeof(struct rmnet_map_header);
	return ((struct rmnet_map_v5_csum_header *)data)->csum_valid_required;
}

#endif /* _RMNET_DESCRIPTOR_H_ */
Loading