Loading drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h +13 −0 Original line number Diff line number Diff line Loading @@ -67,11 +67,24 @@ struct rmnet_pcpu_stats { struct u64_stats_sync syncp; }; struct rmnet_priv_stats { u64 csum_ok; u64 csum_valid_unset; u64 csum_validation_failed; u64 csum_err_bad_buffer; u64 csum_err_invalid_ip_version; u64 csum_err_invalid_transport; u64 csum_fragmented_pkt; u64 csum_skipped; u64 csum_sw; }; struct rmnet_priv { u8 mux_id; struct net_device *real_dev; struct rmnet_pcpu_stats __percpu *pcpu_stats; struct gro_cells gro_cells; struct rmnet_priv_stats stats; }; struct rmnet_port *rmnet_get_port(struct net_device *real_dev); Loading drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c +14 −11 Original line number Diff line number Diff line Loading @@ -179,7 +179,7 @@ static int rmnet_map_egress_handler(struct sk_buff *skb, if (skb_headroom(skb) < required_headroom) { if (pskb_expand_head(skb, required_headroom, 0, GFP_KERNEL)) goto fail; return -ENOMEM; } if (port->data_format & RMNET_FLAGS_EGRESS_MAP_CKSUMV4) Loading @@ -187,7 +187,7 @@ static int rmnet_map_egress_handler(struct sk_buff *skb, map_header = rmnet_map_add_map_header(skb, additional_header_len, 0); if (!map_header) goto fail; return -ENOMEM; map_header->mux_id = mux_id; Loading @@ -212,10 +212,6 @@ static int rmnet_map_egress_handler(struct sk_buff *skb, done: skb->protocol = htons(ETH_P_MAP); return 0; fail: kfree_skb(skb); return -ENOMEM; } static void Loading Loading @@ -268,6 +264,7 @@ void rmnet_egress_handler(struct sk_buff *skb) struct rmnet_port *port; struct rmnet_priv *priv; u8 mux_id; int err; sk_pacing_shift_update(skb->sk, 8); Loading @@ -277,15 +274,21 @@ void rmnet_egress_handler(struct sk_buff *skb) mux_id = priv->mux_id; port = rmnet_get_port(skb->dev); if (!port) { kfree_skb(skb); return; } if (!port) goto drop; if (rmnet_map_egress_handler(skb, port, mux_id, orig_dev)) err = rmnet_map_egress_handler(skb, port, mux_id, orig_dev); if (err == -ENOMEM) goto drop; else if (err == -EINPROGRESS) return; rmnet_vnd_tx_fixup(skb, orig_dev); dev_queue_xmit(skb); return; drop: this_cpu_inc(priv->pcpu_stats->stats.tx_drops); kfree_skb(skb); } drivers/net/ethernet/qualcomm/rmnet/rmnet_map_command.c +3 −11 Original line number Diff line number Diff line Loading @@ -69,17 +69,9 @@ static void rmnet_map_send_ack(struct sk_buff *skb, struct rmnet_map_control_command *cmd; int xmit_status; if (port->data_format & RMNET_FLAGS_INGRESS_MAP_CKSUMV4) { if (skb->len < sizeof(struct rmnet_map_header) + RMNET_MAP_GET_LENGTH(skb) + sizeof(struct rmnet_map_dl_csum_trailer)) { kfree_skb(skb); return; } skb_trim(skb, skb->len - sizeof(struct rmnet_map_dl_csum_trailer)); } if (port->data_format & RMNET_FLAGS_INGRESS_MAP_CKSUMV4) skb_trim(skb, skb->len - sizeof(struct rmnet_map_dl_csum_trailer)); skb->protocol = htons(ETH_P_MAP); Loading drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c +48 −16 Original line number Diff line number Diff line Loading @@ -48,7 +48,8 @@ static __sum16 *rmnet_map_get_csum_field(unsigned char protocol, static int rmnet_map_ipv4_dl_csum_trailer(struct sk_buff *skb, struct rmnet_map_dl_csum_trailer *csum_trailer) struct rmnet_map_dl_csum_trailer *csum_trailer, struct rmnet_priv *priv) { __sum16 *csum_field, csum_temp, pseudo_csum, hdr_csum, ip_payload_csum; u16 csum_value, csum_value_final; Loading @@ -58,19 +59,25 @@ rmnet_map_ipv4_dl_csum_trailer(struct sk_buff *skb, ip4h = (struct iphdr *)(skb->data); if ((ntohs(ip4h->frag_off) & IP_MF) || ((ntohs(ip4h->frag_off) & IP_OFFSET) > 0)) ((ntohs(ip4h->frag_off) & IP_OFFSET) > 0)) { priv->stats.csum_fragmented_pkt++; return -EOPNOTSUPP; } txporthdr = skb->data + ip4h->ihl * 4; csum_field = rmnet_map_get_csum_field(ip4h->protocol, txporthdr); if (!csum_field) if (!csum_field) { priv->stats.csum_err_invalid_transport++; return -EPROTONOSUPPORT; } /* RFC 768 - Skip IPv4 UDP packets where sender checksum field is 0 */ if (*csum_field == 0 && ip4h->protocol == IPPROTO_UDP) if (*csum_field == 0 && ip4h->protocol == IPPROTO_UDP) { priv->stats.csum_skipped++; return 0; } csum_value = ~ntohs(csum_trailer->csum_value); hdr_csum = ~ip_fast_csum(ip4h, (int)ip4h->ihl); Loading Loading @@ -102,16 +109,20 @@ rmnet_map_ipv4_dl_csum_trailer(struct sk_buff *skb, } } if (csum_value_final == ntohs((__force __be16)*csum_field)) if (csum_value_final == ntohs((__force __be16)*csum_field)) { priv->stats.csum_ok++; return 0; else } else { priv->stats.csum_validation_failed++; return -EINVAL; } } #if IS_ENABLED(CONFIG_IPV6) static int rmnet_map_ipv6_dl_csum_trailer(struct sk_buff *skb, struct rmnet_map_dl_csum_trailer *csum_trailer) struct rmnet_map_dl_csum_trailer *csum_trailer, struct rmnet_priv *priv) { __sum16 *csum_field, ip6_payload_csum, pseudo_csum, csum_temp; u16 csum_value, csum_value_final; Loading @@ -125,8 +136,10 @@ rmnet_map_ipv6_dl_csum_trailer(struct sk_buff *skb, txporthdr = skb->data + sizeof(struct ipv6hdr); csum_field = rmnet_map_get_csum_field(ip6h->nexthdr, txporthdr); if (!csum_field) if (!csum_field) { priv->stats.csum_err_invalid_transport++; return -EPROTONOSUPPORT; } csum_value = ~ntohs(csum_trailer->csum_value); ip6_hdr_csum = (__force __be16) Loading Loading @@ -164,11 +177,14 @@ rmnet_map_ipv6_dl_csum_trailer(struct sk_buff *skb, } } if (csum_value_final == ntohs((__force __be16)*csum_field)) if (csum_value_final == ntohs((__force __be16)*csum_field)) { priv->stats.csum_ok++; return 0; else } else { priv->stats.csum_validation_failed++; return -EINVAL; } } #endif static void rmnet_map_complement_ipv4_txporthdr_csum_field(void *iphdr) Loading Loading @@ -339,24 +355,34 @@ struct sk_buff *rmnet_map_deaggregate(struct sk_buff *skb, */ int rmnet_map_checksum_downlink_packet(struct sk_buff *skb, u16 len) { struct rmnet_priv *priv = netdev_priv(skb->dev); struct rmnet_map_dl_csum_trailer *csum_trailer; if (unlikely(!(skb->dev->features & NETIF_F_RXCSUM))) if (unlikely(!(skb->dev->features & NETIF_F_RXCSUM))) { priv->stats.csum_sw++; return -EOPNOTSUPP; } csum_trailer = (struct rmnet_map_dl_csum_trailer *)(skb->data + len); if (!csum_trailer->valid) if (!csum_trailer->valid) { priv->stats.csum_valid_unset++; return -EINVAL; } if (skb->protocol == htons(ETH_P_IP)) return rmnet_map_ipv4_dl_csum_trailer(skb, csum_trailer); else if (skb->protocol == htons(ETH_P_IPV6)) if (skb->protocol == htons(ETH_P_IP)) { return rmnet_map_ipv4_dl_csum_trailer(skb, csum_trailer, priv); } else if (skb->protocol == htons(ETH_P_IPV6)) { #if IS_ENABLED(CONFIG_IPV6) return rmnet_map_ipv6_dl_csum_trailer(skb, csum_trailer); return rmnet_map_ipv6_dl_csum_trailer(skb, csum_trailer, priv); #else priv->stats.csum_err_invalid_ip_version++; return -EPROTONOSUPPORT; #endif } else { priv->stats.csum_err_invalid_ip_version++; return -EPROTONOSUPPORT; } return 0; } Loading @@ -367,6 +393,7 @@ int rmnet_map_checksum_downlink_packet(struct sk_buff *skb, u16 len) void rmnet_map_checksum_uplink_packet(struct sk_buff *skb, struct net_device *orig_dev) { struct rmnet_priv *priv = netdev_priv(orig_dev); struct rmnet_map_ul_csum_header *ul_header; void *iphdr; Loading @@ -389,8 +416,11 @@ void rmnet_map_checksum_uplink_packet(struct sk_buff *skb, rmnet_map_ipv6_ul_csum_header(iphdr, ul_header, skb); return; #else priv->stats.csum_err_invalid_ip_version++; goto sw_csum; #endif } else { priv->stats.csum_err_invalid_ip_version++; } } Loading @@ -399,6 +429,8 @@ void rmnet_map_checksum_uplink_packet(struct sk_buff *skb, ul_header->csum_insert_offset = 0; ul_header->csum_enabled = 0; ul_header->udp_ip4_ind = 0; priv->stats.csum_sw++; } struct rmnet_agg_work { Loading drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c +51 −0 Original line number Diff line number Diff line Loading @@ -152,6 +152,56 @@ static const struct net_device_ops rmnet_vnd_ops = { .ndo_get_stats64 = rmnet_get_stats64, }; static const char rmnet_gstrings_stats[][ETH_GSTRING_LEN] = { "Checksum ok", "Checksum valid bit not set", "Checksum validation failed", "Checksum error bad buffer", "Checksum error bad ip version", "Checksum error bad transport", "Checksum skipped on ip fragment", "Checksum skipped", "Checksum computed in software", }; static void rmnet_get_strings(struct net_device *dev, u32 stringset, u8 *buf) { switch (stringset) { case ETH_SS_STATS: memcpy(buf, &rmnet_gstrings_stats, sizeof(rmnet_gstrings_stats)); break; } } static int rmnet_get_sset_count(struct net_device *dev, int sset) { switch (sset) { case ETH_SS_STATS: return ARRAY_SIZE(rmnet_gstrings_stats); default: return -EOPNOTSUPP; } } static void rmnet_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *stats, u64 *data) { struct rmnet_priv *priv = netdev_priv(dev); struct rmnet_priv_stats *st = &priv->stats; if (!data) return; memcpy(data, st, ARRAY_SIZE(rmnet_gstrings_stats) * sizeof(u64)); } static const struct ethtool_ops rmnet_ethtool_ops = { .get_ethtool_stats = rmnet_get_ethtool_stats, .get_strings = rmnet_get_strings, .get_sset_count = rmnet_get_sset_count, }; /* Called by kernel whenever a new rmnet<n> device is created. Sets MTU, * flags, ARP type, needed headroom, etc... */ Loading @@ -170,6 +220,7 @@ void rmnet_vnd_setup(struct net_device *rmnet_dev) rmnet_dev->flags &= ~(IFF_BROADCAST | IFF_MULTICAST); rmnet_dev->needs_free_netdev = true; rmnet_dev->ethtool_ops = &rmnet_ethtool_ops; } /* Exposed API */ Loading Loading
drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h +13 −0 Original line number Diff line number Diff line Loading @@ -67,11 +67,24 @@ struct rmnet_pcpu_stats { struct u64_stats_sync syncp; }; struct rmnet_priv_stats { u64 csum_ok; u64 csum_valid_unset; u64 csum_validation_failed; u64 csum_err_bad_buffer; u64 csum_err_invalid_ip_version; u64 csum_err_invalid_transport; u64 csum_fragmented_pkt; u64 csum_skipped; u64 csum_sw; }; struct rmnet_priv { u8 mux_id; struct net_device *real_dev; struct rmnet_pcpu_stats __percpu *pcpu_stats; struct gro_cells gro_cells; struct rmnet_priv_stats stats; }; struct rmnet_port *rmnet_get_port(struct net_device *real_dev); Loading
drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c +14 −11 Original line number Diff line number Diff line Loading @@ -179,7 +179,7 @@ static int rmnet_map_egress_handler(struct sk_buff *skb, if (skb_headroom(skb) < required_headroom) { if (pskb_expand_head(skb, required_headroom, 0, GFP_KERNEL)) goto fail; return -ENOMEM; } if (port->data_format & RMNET_FLAGS_EGRESS_MAP_CKSUMV4) Loading @@ -187,7 +187,7 @@ static int rmnet_map_egress_handler(struct sk_buff *skb, map_header = rmnet_map_add_map_header(skb, additional_header_len, 0); if (!map_header) goto fail; return -ENOMEM; map_header->mux_id = mux_id; Loading @@ -212,10 +212,6 @@ static int rmnet_map_egress_handler(struct sk_buff *skb, done: skb->protocol = htons(ETH_P_MAP); return 0; fail: kfree_skb(skb); return -ENOMEM; } static void Loading Loading @@ -268,6 +264,7 @@ void rmnet_egress_handler(struct sk_buff *skb) struct rmnet_port *port; struct rmnet_priv *priv; u8 mux_id; int err; sk_pacing_shift_update(skb->sk, 8); Loading @@ -277,15 +274,21 @@ void rmnet_egress_handler(struct sk_buff *skb) mux_id = priv->mux_id; port = rmnet_get_port(skb->dev); if (!port) { kfree_skb(skb); return; } if (!port) goto drop; if (rmnet_map_egress_handler(skb, port, mux_id, orig_dev)) err = rmnet_map_egress_handler(skb, port, mux_id, orig_dev); if (err == -ENOMEM) goto drop; else if (err == -EINPROGRESS) return; rmnet_vnd_tx_fixup(skb, orig_dev); dev_queue_xmit(skb); return; drop: this_cpu_inc(priv->pcpu_stats->stats.tx_drops); kfree_skb(skb); }
drivers/net/ethernet/qualcomm/rmnet/rmnet_map_command.c +3 −11 Original line number Diff line number Diff line Loading @@ -69,17 +69,9 @@ static void rmnet_map_send_ack(struct sk_buff *skb, struct rmnet_map_control_command *cmd; int xmit_status; if (port->data_format & RMNET_FLAGS_INGRESS_MAP_CKSUMV4) { if (skb->len < sizeof(struct rmnet_map_header) + RMNET_MAP_GET_LENGTH(skb) + sizeof(struct rmnet_map_dl_csum_trailer)) { kfree_skb(skb); return; } skb_trim(skb, skb->len - sizeof(struct rmnet_map_dl_csum_trailer)); } if (port->data_format & RMNET_FLAGS_INGRESS_MAP_CKSUMV4) skb_trim(skb, skb->len - sizeof(struct rmnet_map_dl_csum_trailer)); skb->protocol = htons(ETH_P_MAP); Loading
drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c +48 −16 Original line number Diff line number Diff line Loading @@ -48,7 +48,8 @@ static __sum16 *rmnet_map_get_csum_field(unsigned char protocol, static int rmnet_map_ipv4_dl_csum_trailer(struct sk_buff *skb, struct rmnet_map_dl_csum_trailer *csum_trailer) struct rmnet_map_dl_csum_trailer *csum_trailer, struct rmnet_priv *priv) { __sum16 *csum_field, csum_temp, pseudo_csum, hdr_csum, ip_payload_csum; u16 csum_value, csum_value_final; Loading @@ -58,19 +59,25 @@ rmnet_map_ipv4_dl_csum_trailer(struct sk_buff *skb, ip4h = (struct iphdr *)(skb->data); if ((ntohs(ip4h->frag_off) & IP_MF) || ((ntohs(ip4h->frag_off) & IP_OFFSET) > 0)) ((ntohs(ip4h->frag_off) & IP_OFFSET) > 0)) { priv->stats.csum_fragmented_pkt++; return -EOPNOTSUPP; } txporthdr = skb->data + ip4h->ihl * 4; csum_field = rmnet_map_get_csum_field(ip4h->protocol, txporthdr); if (!csum_field) if (!csum_field) { priv->stats.csum_err_invalid_transport++; return -EPROTONOSUPPORT; } /* RFC 768 - Skip IPv4 UDP packets where sender checksum field is 0 */ if (*csum_field == 0 && ip4h->protocol == IPPROTO_UDP) if (*csum_field == 0 && ip4h->protocol == IPPROTO_UDP) { priv->stats.csum_skipped++; return 0; } csum_value = ~ntohs(csum_trailer->csum_value); hdr_csum = ~ip_fast_csum(ip4h, (int)ip4h->ihl); Loading Loading @@ -102,16 +109,20 @@ rmnet_map_ipv4_dl_csum_trailer(struct sk_buff *skb, } } if (csum_value_final == ntohs((__force __be16)*csum_field)) if (csum_value_final == ntohs((__force __be16)*csum_field)) { priv->stats.csum_ok++; return 0; else } else { priv->stats.csum_validation_failed++; return -EINVAL; } } #if IS_ENABLED(CONFIG_IPV6) static int rmnet_map_ipv6_dl_csum_trailer(struct sk_buff *skb, struct rmnet_map_dl_csum_trailer *csum_trailer) struct rmnet_map_dl_csum_trailer *csum_trailer, struct rmnet_priv *priv) { __sum16 *csum_field, ip6_payload_csum, pseudo_csum, csum_temp; u16 csum_value, csum_value_final; Loading @@ -125,8 +136,10 @@ rmnet_map_ipv6_dl_csum_trailer(struct sk_buff *skb, txporthdr = skb->data + sizeof(struct ipv6hdr); csum_field = rmnet_map_get_csum_field(ip6h->nexthdr, txporthdr); if (!csum_field) if (!csum_field) { priv->stats.csum_err_invalid_transport++; return -EPROTONOSUPPORT; } csum_value = ~ntohs(csum_trailer->csum_value); ip6_hdr_csum = (__force __be16) Loading Loading @@ -164,11 +177,14 @@ rmnet_map_ipv6_dl_csum_trailer(struct sk_buff *skb, } } if (csum_value_final == ntohs((__force __be16)*csum_field)) if (csum_value_final == ntohs((__force __be16)*csum_field)) { priv->stats.csum_ok++; return 0; else } else { priv->stats.csum_validation_failed++; return -EINVAL; } } #endif static void rmnet_map_complement_ipv4_txporthdr_csum_field(void *iphdr) Loading Loading @@ -339,24 +355,34 @@ struct sk_buff *rmnet_map_deaggregate(struct sk_buff *skb, */ int rmnet_map_checksum_downlink_packet(struct sk_buff *skb, u16 len) { struct rmnet_priv *priv = netdev_priv(skb->dev); struct rmnet_map_dl_csum_trailer *csum_trailer; if (unlikely(!(skb->dev->features & NETIF_F_RXCSUM))) if (unlikely(!(skb->dev->features & NETIF_F_RXCSUM))) { priv->stats.csum_sw++; return -EOPNOTSUPP; } csum_trailer = (struct rmnet_map_dl_csum_trailer *)(skb->data + len); if (!csum_trailer->valid) if (!csum_trailer->valid) { priv->stats.csum_valid_unset++; return -EINVAL; } if (skb->protocol == htons(ETH_P_IP)) return rmnet_map_ipv4_dl_csum_trailer(skb, csum_trailer); else if (skb->protocol == htons(ETH_P_IPV6)) if (skb->protocol == htons(ETH_P_IP)) { return rmnet_map_ipv4_dl_csum_trailer(skb, csum_trailer, priv); } else if (skb->protocol == htons(ETH_P_IPV6)) { #if IS_ENABLED(CONFIG_IPV6) return rmnet_map_ipv6_dl_csum_trailer(skb, csum_trailer); return rmnet_map_ipv6_dl_csum_trailer(skb, csum_trailer, priv); #else priv->stats.csum_err_invalid_ip_version++; return -EPROTONOSUPPORT; #endif } else { priv->stats.csum_err_invalid_ip_version++; return -EPROTONOSUPPORT; } return 0; } Loading @@ -367,6 +393,7 @@ int rmnet_map_checksum_downlink_packet(struct sk_buff *skb, u16 len) void rmnet_map_checksum_uplink_packet(struct sk_buff *skb, struct net_device *orig_dev) { struct rmnet_priv *priv = netdev_priv(orig_dev); struct rmnet_map_ul_csum_header *ul_header; void *iphdr; Loading @@ -389,8 +416,11 @@ void rmnet_map_checksum_uplink_packet(struct sk_buff *skb, rmnet_map_ipv6_ul_csum_header(iphdr, ul_header, skb); return; #else priv->stats.csum_err_invalid_ip_version++; goto sw_csum; #endif } else { priv->stats.csum_err_invalid_ip_version++; } } Loading @@ -399,6 +429,8 @@ void rmnet_map_checksum_uplink_packet(struct sk_buff *skb, ul_header->csum_insert_offset = 0; ul_header->csum_enabled = 0; ul_header->udp_ip4_ind = 0; priv->stats.csum_sw++; } struct rmnet_agg_work { Loading
drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c +51 −0 Original line number Diff line number Diff line Loading @@ -152,6 +152,56 @@ static const struct net_device_ops rmnet_vnd_ops = { .ndo_get_stats64 = rmnet_get_stats64, }; static const char rmnet_gstrings_stats[][ETH_GSTRING_LEN] = { "Checksum ok", "Checksum valid bit not set", "Checksum validation failed", "Checksum error bad buffer", "Checksum error bad ip version", "Checksum error bad transport", "Checksum skipped on ip fragment", "Checksum skipped", "Checksum computed in software", }; static void rmnet_get_strings(struct net_device *dev, u32 stringset, u8 *buf) { switch (stringset) { case ETH_SS_STATS: memcpy(buf, &rmnet_gstrings_stats, sizeof(rmnet_gstrings_stats)); break; } } static int rmnet_get_sset_count(struct net_device *dev, int sset) { switch (sset) { case ETH_SS_STATS: return ARRAY_SIZE(rmnet_gstrings_stats); default: return -EOPNOTSUPP; } } static void rmnet_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *stats, u64 *data) { struct rmnet_priv *priv = netdev_priv(dev); struct rmnet_priv_stats *st = &priv->stats; if (!data) return; memcpy(data, st, ARRAY_SIZE(rmnet_gstrings_stats) * sizeof(u64)); } static const struct ethtool_ops rmnet_ethtool_ops = { .get_ethtool_stats = rmnet_get_ethtool_stats, .get_strings = rmnet_get_strings, .get_sset_count = rmnet_get_sset_count, }; /* Called by kernel whenever a new rmnet<n> device is created. Sets MTU, * flags, ARP type, needed headroom, etc... */ Loading @@ -170,6 +220,7 @@ void rmnet_vnd_setup(struct net_device *rmnet_dev) rmnet_dev->flags &= ~(IFF_BROADCAST | IFF_MULTICAST); rmnet_dev->needs_free_netdev = true; rmnet_dev->ethtool_ops = &rmnet_ethtool_ops; } /* Exposed API */ Loading