Loading drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c +14 −10 Original line number Diff line number Diff line // SPDX-License-Identifier: GPL-2.0-only /* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved. /* Copyright (c) 2013-2019, The Linux Foundation. All rights reserved. * * RMNET Data ingress/egress handler * Loading Loading @@ -45,13 +45,15 @@ EXPORT_TRACEPOINT_SYMBOL(rmnet_err); static int rmnet_check_skb_can_gro(struct sk_buff *skb) { unsigned char *data = rmnet_map_data_ptr(skb); switch (skb->protocol) { case htons(ETH_P_IP): if (ip_hdr(skb)->protocol == IPPROTO_TCP) if (((struct iphdr *)data)->protocol == IPPROTO_TCP) return 0; break; case htons(ETH_P_IPV6): if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP) if (((struct ipv6hdr *)data)->nexthdr == IPPROTO_TCP) return 0; /* Fall through */ } Loading @@ -61,7 +63,7 @@ static int rmnet_check_skb_can_gro(struct sk_buff *skb) void rmnet_set_skb_proto(struct sk_buff *skb) { switch (skb->data[0] & 0xF0) { switch (rmnet_map_data_ptr(skb)[0] & 0xF0) { case RMNET_IP_VERSION_4: skb->protocol = htons(ETH_P_IP); break; Loading Loading @@ -129,11 +131,13 @@ static void __rmnet_map_ingress_handler(struct sk_buff *skb, struct rmnet_port *port) { struct rmnet_map_header *qmap; struct rmnet_endpoint *ep; u16 len, pad; u8 mux_id; if (RMNET_MAP_GET_CD_BIT(skb)) { qmap = (struct rmnet_map_header *)rmnet_map_data_ptr(skb); if (qmap->cd_bit) { if (port->data_format & RMNET_INGRESS_FORMAT_DL_MARKER) { if (!rmnet_map_flow_command(skb, port, false)) return; Loading @@ -145,9 +149,9 @@ __rmnet_map_ingress_handler(struct sk_buff *skb, goto free_skb; } mux_id = RMNET_MAP_GET_MUX_ID(skb); pad = RMNET_MAP_GET_PAD(skb); len = RMNET_MAP_GET_LENGTH(skb) - pad; mux_id = qmap->mux_id; pad = qmap->pad_len; len = ntohs(qmap->pkt_len) - pad; if (mux_id >= RMNET_MAX_LOGICAL_EP) goto free_skb; Loading @@ -159,7 +163,7 @@ __rmnet_map_ingress_handler(struct sk_buff *skb, skb->dev = ep->egress_dev; /* Subtract MAP header */ skb_pull(skb, sizeof(struct rmnet_map_header)); pskb_pull(skb, sizeof(struct rmnet_map_header)); rmnet_set_skb_proto(skb); if (port->data_format & RMNET_FLAGS_INGRESS_MAP_CKSUMV4) { Loading @@ -172,7 +176,7 @@ __rmnet_map_ingress_handler(struct sk_buff *skb, qmi_rmnet_work_maybe_restart(port); #endif skb_trim(skb, len); pskb_trim(skb, len); rmnet_deliver_skb(skb, port); return; Loading drivers/net/ethernet/qualcomm/rmnet/rmnet_map.h +19 −1 Original line number Diff line number Diff line /* SPDX-License-Identifier: GPL-2.0 */ /* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved. */ /* Copyright (c) 2013-2019, The Linux Foundation. All rights reserved. */ #ifndef _RMNET_MAP_H_ #define _RMNET_MAP_H_ Loading Loading @@ -134,6 +134,24 @@ struct rmnet_map_dl_ind { #define RMNET_MAP_NO_PAD_BYTES 0 #define RMNET_MAP_ADD_PAD_BYTES 1 static inline unsigned char *rmnet_map_data_ptr(struct sk_buff *skb) { /* Nonlinear packets we receive are entirely within frag 0 */ if (skb_is_nonlinear(skb) && skb->len == skb->data_len) return skb_frag_address(skb_shinfo(skb)->frags); return skb->data; } static inline struct rmnet_map_control_command * rmnet_map_get_cmd_start(struct sk_buff *skb) { unsigned char *data = rmnet_map_data_ptr(skb); data += sizeof(struct rmnet_map_header); return (struct rmnet_map_control_command *)data; } struct sk_buff *rmnet_map_deaggregate(struct sk_buff *skb, struct rmnet_port *port); struct rmnet_map_header *rmnet_map_add_map_header(struct sk_buff *skb, Loading drivers/net/ethernet/qualcomm/rmnet/rmnet_map_command.c +16 −14 Original line number Diff line number Diff line // SPDX-License-Identifier: GPL-2.0-only /* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved. */ /* Copyright (c) 2013-2019, The Linux Foundation. All rights reserved. */ #include <linux/netdevice.h> #include "rmnet_config.h" Loading @@ -22,6 +22,7 @@ static u8 rmnet_map_do_flow_control(struct sk_buff *skb, struct rmnet_port *port, int enable) { struct rmnet_map_header *qmap; struct rmnet_map_control_command *cmd; struct rmnet_endpoint *ep; struct net_device *vnd; Loading @@ -31,8 +32,9 @@ static u8 rmnet_map_do_flow_control(struct sk_buff *skb, u8 mux_id; int r; mux_id = RMNET_MAP_GET_MUX_ID(skb); cmd = RMNET_MAP_GET_CMD_START(skb); qmap = (struct rmnet_map_header *)rmnet_map_data_ptr(skb); mux_id = qmap->mux_id; cmd = rmnet_map_get_cmd_start(skb); if (mux_id >= RMNET_MAX_LOGICAL_EP) { kfree_skb(skb); Loading Loading @@ -72,12 +74,12 @@ static void rmnet_map_send_ack(struct sk_buff *skb, struct net_device *dev = skb->dev; if (port->data_format & RMNET_FLAGS_INGRESS_MAP_CKSUMV4) skb_trim(skb, pskb_trim(skb, skb->len - sizeof(struct rmnet_map_dl_csum_trailer)); skb->protocol = htons(ETH_P_MAP); cmd = RMNET_MAP_GET_CMD_START(skb); cmd = rmnet_map_get_cmd_start(skb); cmd->cmd_type = type & 0x03; netif_tx_lock(dev); Loading Loading @@ -122,9 +124,9 @@ static void rmnet_map_process_flow_start(struct sk_buff *skb, if (skb->len < RMNET_DL_IND_HDR_SIZE) return; skb_pull(skb, RMNET_MAP_CMD_SIZE); pskb_pull(skb, RMNET_MAP_CMD_SIZE); dlhdr = (struct rmnet_map_dl_ind_hdr *)skb->data; dlhdr = (struct rmnet_map_dl_ind_hdr *)rmnet_map_data_ptr(skb); port->stats.dl_hdr_last_seq = dlhdr->le.seq; port->stats.dl_hdr_last_bytes = dlhdr->le.bytes; Loading @@ -150,7 +152,7 @@ static void rmnet_map_process_flow_start(struct sk_buff *skb, pull_size = sizeof(struct rmnet_map_dl_ind_hdr); if (port->data_format & RMNET_FLAGS_INGRESS_MAP_CKSUMV4) pull_size += sizeof(struct rmnet_map_dl_csum_trailer); skb_pull(skb, pull_size); pskb_pull(skb, pull_size); } } Loading @@ -163,9 +165,9 @@ static void rmnet_map_process_flow_end(struct sk_buff *skb, if (skb->len < RMNET_DL_IND_TRL_SIZE) return; skb_pull(skb, RMNET_MAP_CMD_SIZE); pskb_pull(skb, RMNET_MAP_CMD_SIZE); dltrl = (struct rmnet_map_dl_ind_trl *)skb->data; dltrl = (struct rmnet_map_dl_ind_trl *)rmnet_map_data_ptr(skb); port->stats.dl_trl_last_seq = dltrl->seq_le; port->stats.dl_trl_count++; Loading @@ -177,7 +179,7 @@ static void rmnet_map_process_flow_end(struct sk_buff *skb, pull_size = sizeof(struct rmnet_map_dl_ind_trl); if (port->data_format & RMNET_FLAGS_INGRESS_MAP_CKSUMV4) pull_size += sizeof(struct rmnet_map_dl_csum_trailer); skb_pull(skb, pull_size); pskb_pull(skb, pull_size); } } Loading @@ -190,7 +192,7 @@ void rmnet_map_command(struct sk_buff *skb, struct rmnet_port *port) unsigned char command_name; unsigned char rc = 0; cmd = RMNET_MAP_GET_CMD_START(skb); cmd = rmnet_map_get_cmd_start(skb); command_name = cmd->command_name; switch (command_name) { Loading @@ -217,7 +219,7 @@ int rmnet_map_flow_command(struct sk_buff *skb, struct rmnet_port *port, struct rmnet_map_control_command *cmd; unsigned char command_name; cmd = RMNET_MAP_GET_CMD_START(skb); cmd = rmnet_map_get_cmd_start(skb); command_name = cmd->command_name; switch (command_name) { Loading drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c +34 −16 Original line number Diff line number Diff line // SPDX-License-Identifier: GPL-2.0-only /* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved. /* Copyright (c) 2013-2019, The Linux Foundation. All rights reserved. * * RMNET Data MAP protocol * Loading Loading @@ -51,14 +51,14 @@ rmnet_map_ipv4_dl_csum_trailer(struct sk_buff *skb, void *txporthdr; __be16 addend; ip4h = (struct iphdr *)(skb->data); ip4h = (struct iphdr *)rmnet_map_data_ptr(skb); if ((ntohs(ip4h->frag_off) & IP_MF) || ((ntohs(ip4h->frag_off) & IP_OFFSET) > 0)) { priv->stats.csum_fragmented_pkt++; return -EOPNOTSUPP; } txporthdr = skb->data + ip4h->ihl * 4; txporthdr = rmnet_map_data_ptr(skb) + ip4h->ihl * 4; csum_field = rmnet_map_get_csum_field(ip4h->protocol, txporthdr); Loading Loading @@ -122,12 +122,12 @@ rmnet_map_ipv6_dl_csum_trailer(struct sk_buff *skb, u16 csum_value, csum_value_final; __be16 ip6_hdr_csum, addend; struct ipv6hdr *ip6h; void *txporthdr; void *txporthdr, *data = rmnet_map_data_ptr(skb); u32 length; ip6h = (struct ipv6hdr *)(skb->data); ip6h = data; txporthdr = skb->data + sizeof(struct ipv6hdr); txporthdr = data + sizeof(struct ipv6hdr); csum_field = rmnet_map_get_csum_field(ip6h->nexthdr, txporthdr); if (!csum_field) { Loading @@ -138,7 +138,7 @@ rmnet_map_ipv6_dl_csum_trailer(struct sk_buff *skb, csum_value = ~ntohs(csum_trailer->csum_value); ip6_hdr_csum = (__force __be16) ~ntohs((__force __be16)ip_compute_csum(ip6h, (int)(txporthdr - (void *)(skb->data)))); (int)(txporthdr - data))); ip6_payload_csum = csum16_sub((__force __sum16)csum_value, ip6_hdr_csum); Loading Loading @@ -311,12 +311,13 @@ struct sk_buff *rmnet_map_deaggregate(struct sk_buff *skb, { struct rmnet_map_header *maph; struct sk_buff *skbn; unsigned char *data = rmnet_map_data_ptr(skb); u32 packet_len; if (skb->len == 0) return NULL; maph = (struct rmnet_map_header *)skb->data; maph = (struct rmnet_map_header *)data; packet_len = ntohs(maph->pkt_len) + sizeof(struct rmnet_map_header); if (port->data_format & RMNET_FLAGS_INGRESS_MAP_CKSUMV4) Loading @@ -329,14 +330,30 @@ struct sk_buff *rmnet_map_deaggregate(struct sk_buff *skb, if (ntohs(maph->pkt_len) == 0) return NULL; skbn = alloc_skb(packet_len + RMNET_MAP_DEAGGR_SPACING, GFP_ATOMIC); if (skb_is_nonlinear(skb)) { skb_frag_t *frag0 = skb_shinfo(skb)->frags; struct page *page = skb_frag_page(frag0); skbn = alloc_skb(RMNET_MAP_DEAGGR_HEADROOM, GFP_ATOMIC); if (!skbn) return NULL; skb_append_pagefrags(skbn, page, frag0->page_offset, packet_len); skbn->data_len += packet_len; skbn->len += packet_len; } else { skbn = alloc_skb(packet_len + RMNET_MAP_DEAGGR_SPACING, GFP_ATOMIC); if (!skbn) return NULL; skb_reserve(skbn, RMNET_MAP_DEAGGR_HEADROOM); skb_put(skbn, packet_len); memcpy(skbn->data, skb->data, packet_len); skb_pull(skb, packet_len); memcpy(skbn->data, data, packet_len); } pskb_pull(skb, packet_len); return skbn; } Loading @@ -357,7 +374,8 @@ int rmnet_map_checksum_downlink_packet(struct sk_buff *skb, u16 len) return -EOPNOTSUPP; } csum_trailer = (struct rmnet_map_dl_csum_trailer *)(skb->data + len); csum_trailer = (struct rmnet_map_dl_csum_trailer *) (rmnet_map_data_ptr(skb) + len); if (!csum_trailer->valid) { priv->stats.csum_valid_unset++; Loading Loading
drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c +14 −10 Original line number Diff line number Diff line // SPDX-License-Identifier: GPL-2.0-only /* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved. /* Copyright (c) 2013-2019, The Linux Foundation. All rights reserved. * * RMNET Data ingress/egress handler * Loading Loading @@ -45,13 +45,15 @@ EXPORT_TRACEPOINT_SYMBOL(rmnet_err); static int rmnet_check_skb_can_gro(struct sk_buff *skb) { unsigned char *data = rmnet_map_data_ptr(skb); switch (skb->protocol) { case htons(ETH_P_IP): if (ip_hdr(skb)->protocol == IPPROTO_TCP) if (((struct iphdr *)data)->protocol == IPPROTO_TCP) return 0; break; case htons(ETH_P_IPV6): if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP) if (((struct ipv6hdr *)data)->nexthdr == IPPROTO_TCP) return 0; /* Fall through */ } Loading @@ -61,7 +63,7 @@ static int rmnet_check_skb_can_gro(struct sk_buff *skb) void rmnet_set_skb_proto(struct sk_buff *skb) { switch (skb->data[0] & 0xF0) { switch (rmnet_map_data_ptr(skb)[0] & 0xF0) { case RMNET_IP_VERSION_4: skb->protocol = htons(ETH_P_IP); break; Loading Loading @@ -129,11 +131,13 @@ static void __rmnet_map_ingress_handler(struct sk_buff *skb, struct rmnet_port *port) { struct rmnet_map_header *qmap; struct rmnet_endpoint *ep; u16 len, pad; u8 mux_id; if (RMNET_MAP_GET_CD_BIT(skb)) { qmap = (struct rmnet_map_header *)rmnet_map_data_ptr(skb); if (qmap->cd_bit) { if (port->data_format & RMNET_INGRESS_FORMAT_DL_MARKER) { if (!rmnet_map_flow_command(skb, port, false)) return; Loading @@ -145,9 +149,9 @@ __rmnet_map_ingress_handler(struct sk_buff *skb, goto free_skb; } mux_id = RMNET_MAP_GET_MUX_ID(skb); pad = RMNET_MAP_GET_PAD(skb); len = RMNET_MAP_GET_LENGTH(skb) - pad; mux_id = qmap->mux_id; pad = qmap->pad_len; len = ntohs(qmap->pkt_len) - pad; if (mux_id >= RMNET_MAX_LOGICAL_EP) goto free_skb; Loading @@ -159,7 +163,7 @@ __rmnet_map_ingress_handler(struct sk_buff *skb, skb->dev = ep->egress_dev; /* Subtract MAP header */ skb_pull(skb, sizeof(struct rmnet_map_header)); pskb_pull(skb, sizeof(struct rmnet_map_header)); rmnet_set_skb_proto(skb); if (port->data_format & RMNET_FLAGS_INGRESS_MAP_CKSUMV4) { Loading @@ -172,7 +176,7 @@ __rmnet_map_ingress_handler(struct sk_buff *skb, qmi_rmnet_work_maybe_restart(port); #endif skb_trim(skb, len); pskb_trim(skb, len); rmnet_deliver_skb(skb, port); return; Loading
drivers/net/ethernet/qualcomm/rmnet/rmnet_map.h +19 −1 Original line number Diff line number Diff line /* SPDX-License-Identifier: GPL-2.0 */ /* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved. */ /* Copyright (c) 2013-2019, The Linux Foundation. All rights reserved. */ #ifndef _RMNET_MAP_H_ #define _RMNET_MAP_H_ Loading Loading @@ -134,6 +134,24 @@ struct rmnet_map_dl_ind { #define RMNET_MAP_NO_PAD_BYTES 0 #define RMNET_MAP_ADD_PAD_BYTES 1 static inline unsigned char *rmnet_map_data_ptr(struct sk_buff *skb) { /* Nonlinear packets we receive are entirely within frag 0 */ if (skb_is_nonlinear(skb) && skb->len == skb->data_len) return skb_frag_address(skb_shinfo(skb)->frags); return skb->data; } static inline struct rmnet_map_control_command * rmnet_map_get_cmd_start(struct sk_buff *skb) { unsigned char *data = rmnet_map_data_ptr(skb); data += sizeof(struct rmnet_map_header); return (struct rmnet_map_control_command *)data; } struct sk_buff *rmnet_map_deaggregate(struct sk_buff *skb, struct rmnet_port *port); struct rmnet_map_header *rmnet_map_add_map_header(struct sk_buff *skb, Loading
drivers/net/ethernet/qualcomm/rmnet/rmnet_map_command.c +16 −14 Original line number Diff line number Diff line // SPDX-License-Identifier: GPL-2.0-only /* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved. */ /* Copyright (c) 2013-2019, The Linux Foundation. All rights reserved. */ #include <linux/netdevice.h> #include "rmnet_config.h" Loading @@ -22,6 +22,7 @@ static u8 rmnet_map_do_flow_control(struct sk_buff *skb, struct rmnet_port *port, int enable) { struct rmnet_map_header *qmap; struct rmnet_map_control_command *cmd; struct rmnet_endpoint *ep; struct net_device *vnd; Loading @@ -31,8 +32,9 @@ static u8 rmnet_map_do_flow_control(struct sk_buff *skb, u8 mux_id; int r; mux_id = RMNET_MAP_GET_MUX_ID(skb); cmd = RMNET_MAP_GET_CMD_START(skb); qmap = (struct rmnet_map_header *)rmnet_map_data_ptr(skb); mux_id = qmap->mux_id; cmd = rmnet_map_get_cmd_start(skb); if (mux_id >= RMNET_MAX_LOGICAL_EP) { kfree_skb(skb); Loading Loading @@ -72,12 +74,12 @@ static void rmnet_map_send_ack(struct sk_buff *skb, struct net_device *dev = skb->dev; if (port->data_format & RMNET_FLAGS_INGRESS_MAP_CKSUMV4) skb_trim(skb, pskb_trim(skb, skb->len - sizeof(struct rmnet_map_dl_csum_trailer)); skb->protocol = htons(ETH_P_MAP); cmd = RMNET_MAP_GET_CMD_START(skb); cmd = rmnet_map_get_cmd_start(skb); cmd->cmd_type = type & 0x03; netif_tx_lock(dev); Loading Loading @@ -122,9 +124,9 @@ static void rmnet_map_process_flow_start(struct sk_buff *skb, if (skb->len < RMNET_DL_IND_HDR_SIZE) return; skb_pull(skb, RMNET_MAP_CMD_SIZE); pskb_pull(skb, RMNET_MAP_CMD_SIZE); dlhdr = (struct rmnet_map_dl_ind_hdr *)skb->data; dlhdr = (struct rmnet_map_dl_ind_hdr *)rmnet_map_data_ptr(skb); port->stats.dl_hdr_last_seq = dlhdr->le.seq; port->stats.dl_hdr_last_bytes = dlhdr->le.bytes; Loading @@ -150,7 +152,7 @@ static void rmnet_map_process_flow_start(struct sk_buff *skb, pull_size = sizeof(struct rmnet_map_dl_ind_hdr); if (port->data_format & RMNET_FLAGS_INGRESS_MAP_CKSUMV4) pull_size += sizeof(struct rmnet_map_dl_csum_trailer); skb_pull(skb, pull_size); pskb_pull(skb, pull_size); } } Loading @@ -163,9 +165,9 @@ static void rmnet_map_process_flow_end(struct sk_buff *skb, if (skb->len < RMNET_DL_IND_TRL_SIZE) return; skb_pull(skb, RMNET_MAP_CMD_SIZE); pskb_pull(skb, RMNET_MAP_CMD_SIZE); dltrl = (struct rmnet_map_dl_ind_trl *)skb->data; dltrl = (struct rmnet_map_dl_ind_trl *)rmnet_map_data_ptr(skb); port->stats.dl_trl_last_seq = dltrl->seq_le; port->stats.dl_trl_count++; Loading @@ -177,7 +179,7 @@ static void rmnet_map_process_flow_end(struct sk_buff *skb, pull_size = sizeof(struct rmnet_map_dl_ind_trl); if (port->data_format & RMNET_FLAGS_INGRESS_MAP_CKSUMV4) pull_size += sizeof(struct rmnet_map_dl_csum_trailer); skb_pull(skb, pull_size); pskb_pull(skb, pull_size); } } Loading @@ -190,7 +192,7 @@ void rmnet_map_command(struct sk_buff *skb, struct rmnet_port *port) unsigned char command_name; unsigned char rc = 0; cmd = RMNET_MAP_GET_CMD_START(skb); cmd = rmnet_map_get_cmd_start(skb); command_name = cmd->command_name; switch (command_name) { Loading @@ -217,7 +219,7 @@ int rmnet_map_flow_command(struct sk_buff *skb, struct rmnet_port *port, struct rmnet_map_control_command *cmd; unsigned char command_name; cmd = RMNET_MAP_GET_CMD_START(skb); cmd = rmnet_map_get_cmd_start(skb); command_name = cmd->command_name; switch (command_name) { Loading
drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c +34 −16 Original line number Diff line number Diff line // SPDX-License-Identifier: GPL-2.0-only /* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved. /* Copyright (c) 2013-2019, The Linux Foundation. All rights reserved. * * RMNET Data MAP protocol * Loading Loading @@ -51,14 +51,14 @@ rmnet_map_ipv4_dl_csum_trailer(struct sk_buff *skb, void *txporthdr; __be16 addend; ip4h = (struct iphdr *)(skb->data); ip4h = (struct iphdr *)rmnet_map_data_ptr(skb); if ((ntohs(ip4h->frag_off) & IP_MF) || ((ntohs(ip4h->frag_off) & IP_OFFSET) > 0)) { priv->stats.csum_fragmented_pkt++; return -EOPNOTSUPP; } txporthdr = skb->data + ip4h->ihl * 4; txporthdr = rmnet_map_data_ptr(skb) + ip4h->ihl * 4; csum_field = rmnet_map_get_csum_field(ip4h->protocol, txporthdr); Loading Loading @@ -122,12 +122,12 @@ rmnet_map_ipv6_dl_csum_trailer(struct sk_buff *skb, u16 csum_value, csum_value_final; __be16 ip6_hdr_csum, addend; struct ipv6hdr *ip6h; void *txporthdr; void *txporthdr, *data = rmnet_map_data_ptr(skb); u32 length; ip6h = (struct ipv6hdr *)(skb->data); ip6h = data; txporthdr = skb->data + sizeof(struct ipv6hdr); txporthdr = data + sizeof(struct ipv6hdr); csum_field = rmnet_map_get_csum_field(ip6h->nexthdr, txporthdr); if (!csum_field) { Loading @@ -138,7 +138,7 @@ rmnet_map_ipv6_dl_csum_trailer(struct sk_buff *skb, csum_value = ~ntohs(csum_trailer->csum_value); ip6_hdr_csum = (__force __be16) ~ntohs((__force __be16)ip_compute_csum(ip6h, (int)(txporthdr - (void *)(skb->data)))); (int)(txporthdr - data))); ip6_payload_csum = csum16_sub((__force __sum16)csum_value, ip6_hdr_csum); Loading Loading @@ -311,12 +311,13 @@ struct sk_buff *rmnet_map_deaggregate(struct sk_buff *skb, { struct rmnet_map_header *maph; struct sk_buff *skbn; unsigned char *data = rmnet_map_data_ptr(skb); u32 packet_len; if (skb->len == 0) return NULL; maph = (struct rmnet_map_header *)skb->data; maph = (struct rmnet_map_header *)data; packet_len = ntohs(maph->pkt_len) + sizeof(struct rmnet_map_header); if (port->data_format & RMNET_FLAGS_INGRESS_MAP_CKSUMV4) Loading @@ -329,14 +330,30 @@ struct sk_buff *rmnet_map_deaggregate(struct sk_buff *skb, if (ntohs(maph->pkt_len) == 0) return NULL; skbn = alloc_skb(packet_len + RMNET_MAP_DEAGGR_SPACING, GFP_ATOMIC); if (skb_is_nonlinear(skb)) { skb_frag_t *frag0 = skb_shinfo(skb)->frags; struct page *page = skb_frag_page(frag0); skbn = alloc_skb(RMNET_MAP_DEAGGR_HEADROOM, GFP_ATOMIC); if (!skbn) return NULL; skb_append_pagefrags(skbn, page, frag0->page_offset, packet_len); skbn->data_len += packet_len; skbn->len += packet_len; } else { skbn = alloc_skb(packet_len + RMNET_MAP_DEAGGR_SPACING, GFP_ATOMIC); if (!skbn) return NULL; skb_reserve(skbn, RMNET_MAP_DEAGGR_HEADROOM); skb_put(skbn, packet_len); memcpy(skbn->data, skb->data, packet_len); skb_pull(skb, packet_len); memcpy(skbn->data, data, packet_len); } pskb_pull(skb, packet_len); return skbn; } Loading @@ -357,7 +374,8 @@ int rmnet_map_checksum_downlink_packet(struct sk_buff *skb, u16 len) return -EOPNOTSUPP; } csum_trailer = (struct rmnet_map_dl_csum_trailer *)(skb->data + len); csum_trailer = (struct rmnet_map_dl_csum_trailer *) (rmnet_map_data_ptr(skb) + len); if (!csum_trailer->valid) { priv->stats.csum_valid_unset++; Loading