Loading drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c +2 −0 Original line number Diff line number Diff line Loading @@ -70,6 +70,7 @@ static int rmnet_unregister_real_device(struct net_device *real_dev, if (port->nr_rmnet_devs) return -EINVAL; rmnet_map_cmd_exit(port); rmnet_map_tx_aggregate_exit(port); kfree(port); Loading Loading @@ -111,6 +112,7 @@ static int rmnet_register_real_device(struct net_device *real_dev) INIT_HLIST_HEAD(&port->muxed_ep[entry]); rmnet_map_tx_aggregate_init(port); rmnet_map_cmd_init(port); netdev_dbg(real_dev, "registered with rmnet\n"); return 0; Loading drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h +19 −0 Original line number Diff line number Diff line Loading @@ -27,6 +27,20 @@ struct rmnet_endpoint { struct hlist_node hlnode; }; struct rmnet_port_priv_stats { u64 dl_hdr_last_seq; u64 dl_hdr_last_bytes; u64 dl_hdr_last_pkts; u64 dl_hdr_last_flows; u64 dl_hdr_count; u64 dl_hdr_total_bytes; u64 dl_hdr_total_pkts; u64 dl_hdr_avg_bytes; u64 dl_hdr_avg_pkts; u64 dl_trl_last_seq; u64 dl_trl_count; }; /* One instance of this structure is instantiated for each real_dev associated * with rmnet. */ Loading @@ -52,6 +66,11 @@ struct rmnet_port { struct hrtimer hrtimer; void *qmi_info; /* dl marker elements */ spinlock_t dl_list_lock; struct list_head dl_list; struct rmnet_port_priv_stats stats; }; extern struct rtnl_link_ops rmnet_link_ops; Loading drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c +5 −0 Original line number Diff line number Diff line Loading @@ -92,6 +92,11 @@ __rmnet_map_ingress_handler(struct sk_buff *skb, u8 mux_id; if (RMNET_MAP_GET_CD_BIT(skb)) { if (port->data_format & RMNET_INGRESS_FORMAT_DL_MARKER) { if (!rmnet_map_flow_command(skb, port)) return; } if (port->data_format & RMNET_FLAGS_INGRESS_MAP_COMMANDS) return rmnet_map_command(skb, port); Loading drivers/net/ethernet/qualcomm/rmnet/rmnet_map.h +63 −1 Original line number Diff line number Diff line Loading @@ -34,6 +34,8 @@ enum rmnet_map_commands { RMNET_MAP_COMMAND_NONE, RMNET_MAP_COMMAND_FLOW_DISABLE, RMNET_MAP_COMMAND_FLOW_ENABLE, RMNET_MAP_COMMAND_FLOW_START = 7, RMNET_MAP_COMMAND_FLOW_END = 8, /* These should always be the last 2 elements */ RMNET_MAP_COMMAND_UNKNOWN, RMNET_MAP_COMMAND_ENUM_LENGTH Loading Loading @@ -63,6 +65,60 @@ struct rmnet_map_ul_csum_header { u16 csum_enabled:1; } __aligned(1); struct rmnet_map_control_command_header { u8 command_name; u8 cmd_type:2; u8 reserved:6; u16 reserved2; u32 transaction_id; } __aligned(1); struct rmnet_map_flow_info_le { __be32 mux_id; __be32 flow_id; __be32 bytes; __be32 pkts; } __aligned(1); struct rmnet_map_flow_info_be { u32 mux_id; u32 flow_id; u32 bytes; u32 pkts; } __aligned(1); struct rmnet_map_dl_ind_hdr { union { struct { u32 seq; u32 bytes; u32 pkts; u32 flows; struct rmnet_map_flow_info_le flow[0]; } le __aligned(1); struct { __be32 seq; __be32 bytes; __be32 pkts; __be32 flows; struct rmnet_map_flow_info_be flow[0]; } be __aligned(1); } __aligned(1); } __aligned(1); struct rmnet_map_dl_ind_trl { union { __be32 seq_be; u32 seq_le; } __aligned(1); } __aligned(1); struct rmnet_map_dl_ind { void (*dl_hdr_handler)(struct rmnet_map_dl_ind_hdr *); void (*dl_trl_handler)(struct rmnet_map_dl_ind_trl *); struct list_head list; }; #define RMNET_MAP_GET_MUX_ID(Y) (((struct rmnet_map_header *) \ (Y)->data)->mux_id) #define RMNET_MAP_GET_CD_BIT(Y) (((struct rmnet_map_header *) \ Loading Loading @@ -95,5 +151,11 @@ int rmnet_map_tx_agg_skip(struct sk_buff *skb, int offset); void rmnet_map_tx_aggregate(struct sk_buff *skb, struct rmnet_port *port); void rmnet_map_tx_aggregate_init(struct rmnet_port *port); void rmnet_map_tx_aggregate_exit(struct rmnet_port *port); int rmnet_map_flow_command(struct sk_buff *skb, struct rmnet_port *port); void rmnet_map_cmd_init(struct rmnet_port *port); int rmnet_map_dl_ind_register(struct rmnet_port *port, struct rmnet_map_dl_ind *dl_ind); int rmnet_map_dl_ind_deregister(struct rmnet_port *port, struct rmnet_map_dl_ind *dl_ind); void rmnet_map_cmd_exit(struct rmnet_port *port); #endif /* _RMNET_MAP_H_ */ drivers/net/ethernet/qualcomm/rmnet/rmnet_map_command.c +167 −0 Original line number Diff line number Diff line Loading @@ -16,6 +16,17 @@ #include "rmnet_private.h" #include "rmnet_vnd.h" #define RMNET_DL_IND_HDR_SIZE (sizeof(struct rmnet_map_dl_ind_hdr) + \ sizeof(struct rmnet_map_header) + \ sizeof(struct rmnet_map_control_command_header)) #define RMNET_MAP_CMD_SIZE (sizeof(struct rmnet_map_header) + \ sizeof(struct rmnet_map_control_command_header)) #define RMNET_DL_IND_TRL_SIZE (sizeof(struct rmnet_map_dl_ind_trl) + \ sizeof(struct rmnet_map_header) + \ sizeof(struct rmnet_map_control_command_header)) static u8 rmnet_map_do_flow_control(struct sk_buff *skb, struct rmnet_port *port, int enable) Loading Loading @@ -83,6 +94,82 @@ static void rmnet_map_send_ack(struct sk_buff *skb, netif_tx_unlock(dev); } static void rmnet_map_dl_hdr_notify(struct rmnet_port *port, struct rmnet_map_dl_ind_hdr *dlhdr) { struct rmnet_map_dl_ind *tmp; spin_lock(&port->dl_list_lock); list_for_each_entry(tmp, &port->dl_list, list) tmp->dl_hdr_handler(dlhdr); spin_unlock(&port->dl_list_lock); } static void rmnet_map_dl_trl_notify(struct rmnet_port *port, struct rmnet_map_dl_ind_trl *dltrl) { struct rmnet_map_dl_ind *tmp; spin_lock(&port->dl_list_lock); list_for_each_entry(tmp, &port->dl_list, list) tmp->dl_trl_handler(dltrl); spin_unlock(&port->dl_list_lock); } static void rmnet_map_process_flow_start(struct sk_buff *skb, struct rmnet_port *port) { struct rmnet_map_dl_ind_hdr *dlhdr; if (skb->len < RMNET_DL_IND_HDR_SIZE) return; skb_pull(skb, RMNET_MAP_CMD_SIZE); dlhdr = (struct rmnet_map_dl_ind_hdr *)skb->data; port->stats.dl_hdr_last_seq = dlhdr->le.seq; port->stats.dl_hdr_last_bytes = dlhdr->le.bytes; port->stats.dl_hdr_last_pkts = dlhdr->le.pkts; port->stats.dl_hdr_last_flows = dlhdr->le.flows; port->stats.dl_hdr_total_bytes += port->stats.dl_hdr_last_bytes; port->stats.dl_hdr_total_pkts += port->stats.dl_hdr_last_pkts; port->stats.dl_hdr_count++; if (unlikely(!(port->stats.dl_hdr_count))) port->stats.dl_hdr_count = 1; port->stats.dl_hdr_avg_bytes = port->stats.dl_hdr_total_bytes / port->stats.dl_hdr_count; port->stats.dl_hdr_avg_pkts = port->stats.dl_hdr_total_pkts / port->stats.dl_hdr_count; rmnet_map_dl_hdr_notify(port, dlhdr); } static void rmnet_map_process_flow_end(struct sk_buff *skb, struct rmnet_port *port) { struct rmnet_map_dl_ind_trl *dltrl; if (skb->len < RMNET_DL_IND_TRL_SIZE) return; skb_pull(skb, RMNET_MAP_CMD_SIZE); dltrl = (struct rmnet_map_dl_ind_trl *)skb->data; port->stats.dl_trl_last_seq = dltrl->seq_le; port->stats.dl_trl_count++; rmnet_map_dl_trl_notify(port, dltrl); } /* Process MAP command frame and send N/ACK message as appropriate. Message cmd * name is decoded here and appropriate handler is called. */ Loading Loading @@ -112,3 +199,83 @@ void rmnet_map_command(struct sk_buff *skb, struct rmnet_port *port) if (rc == RMNET_MAP_COMMAND_ACK) rmnet_map_send_ack(skb, rc, port); } int rmnet_map_flow_command(struct sk_buff *skb, struct rmnet_port *port) { struct rmnet_map_control_command *cmd; unsigned char command_name; cmd = RMNET_MAP_GET_CMD_START(skb); command_name = cmd->command_name; switch (command_name) { case RMNET_MAP_COMMAND_FLOW_START: rmnet_map_process_flow_start(skb, port); break; case RMNET_MAP_COMMAND_FLOW_END: rmnet_map_process_flow_end(skb, port); break; default: return 1; } consume_skb(skb); return 0; } void rmnet_map_cmd_exit(struct rmnet_port *port) { struct rmnet_map_dl_ind *tmp, *idx; spin_lock(&port->dl_list_lock); list_for_each_entry_safe(tmp, idx, &port->dl_list, list) list_del_rcu(&tmp->list); spin_unlock(&port->dl_list_lock); } void rmnet_map_cmd_init(struct rmnet_port *port) { INIT_LIST_HEAD(&port->dl_list); spin_lock_init(&port->dl_list_lock); } int rmnet_map_dl_ind_register(struct rmnet_port *port, struct rmnet_map_dl_ind *dl_ind) { if (!port || !dl_ind || !dl_ind->dl_hdr_handler || !dl_ind->dl_trl_handler) return -EINVAL; spin_lock(&port->dl_list_lock); list_add_rcu(&dl_ind->list, &port->dl_list); spin_unlock(&port->dl_list_lock); return 0; } int rmnet_map_dl_ind_deregister(struct rmnet_port *port, struct rmnet_map_dl_ind *dl_ind) { struct rmnet_map_dl_ind *tmp; if (!port || !dl_ind) return -EINVAL; spin_lock(&port->dl_list_lock); list_for_each_entry(tmp, &port->dl_list, list) { if (tmp == dl_ind) { list_del_rcu(&dl_ind->list); goto done; } } done: spin_unlock(&port->dl_list_lock); return 0; } Loading
drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c +2 −0 Original line number Diff line number Diff line Loading @@ -70,6 +70,7 @@ static int rmnet_unregister_real_device(struct net_device *real_dev, if (port->nr_rmnet_devs) return -EINVAL; rmnet_map_cmd_exit(port); rmnet_map_tx_aggregate_exit(port); kfree(port); Loading Loading @@ -111,6 +112,7 @@ static int rmnet_register_real_device(struct net_device *real_dev) INIT_HLIST_HEAD(&port->muxed_ep[entry]); rmnet_map_tx_aggregate_init(port); rmnet_map_cmd_init(port); netdev_dbg(real_dev, "registered with rmnet\n"); return 0; Loading
drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h +19 −0 Original line number Diff line number Diff line Loading @@ -27,6 +27,20 @@ struct rmnet_endpoint { struct hlist_node hlnode; }; struct rmnet_port_priv_stats { u64 dl_hdr_last_seq; u64 dl_hdr_last_bytes; u64 dl_hdr_last_pkts; u64 dl_hdr_last_flows; u64 dl_hdr_count; u64 dl_hdr_total_bytes; u64 dl_hdr_total_pkts; u64 dl_hdr_avg_bytes; u64 dl_hdr_avg_pkts; u64 dl_trl_last_seq; u64 dl_trl_count; }; /* One instance of this structure is instantiated for each real_dev associated * with rmnet. */ Loading @@ -52,6 +66,11 @@ struct rmnet_port { struct hrtimer hrtimer; void *qmi_info; /* dl marker elements */ spinlock_t dl_list_lock; struct list_head dl_list; struct rmnet_port_priv_stats stats; }; extern struct rtnl_link_ops rmnet_link_ops; Loading
drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c +5 −0 Original line number Diff line number Diff line Loading @@ -92,6 +92,11 @@ __rmnet_map_ingress_handler(struct sk_buff *skb, u8 mux_id; if (RMNET_MAP_GET_CD_BIT(skb)) { if (port->data_format & RMNET_INGRESS_FORMAT_DL_MARKER) { if (!rmnet_map_flow_command(skb, port)) return; } if (port->data_format & RMNET_FLAGS_INGRESS_MAP_COMMANDS) return rmnet_map_command(skb, port); Loading
drivers/net/ethernet/qualcomm/rmnet/rmnet_map.h +63 −1 Original line number Diff line number Diff line Loading @@ -34,6 +34,8 @@ enum rmnet_map_commands { RMNET_MAP_COMMAND_NONE, RMNET_MAP_COMMAND_FLOW_DISABLE, RMNET_MAP_COMMAND_FLOW_ENABLE, RMNET_MAP_COMMAND_FLOW_START = 7, RMNET_MAP_COMMAND_FLOW_END = 8, /* These should always be the last 2 elements */ RMNET_MAP_COMMAND_UNKNOWN, RMNET_MAP_COMMAND_ENUM_LENGTH Loading Loading @@ -63,6 +65,60 @@ struct rmnet_map_ul_csum_header { u16 csum_enabled:1; } __aligned(1); struct rmnet_map_control_command_header { u8 command_name; u8 cmd_type:2; u8 reserved:6; u16 reserved2; u32 transaction_id; } __aligned(1); struct rmnet_map_flow_info_le { __be32 mux_id; __be32 flow_id; __be32 bytes; __be32 pkts; } __aligned(1); struct rmnet_map_flow_info_be { u32 mux_id; u32 flow_id; u32 bytes; u32 pkts; } __aligned(1); struct rmnet_map_dl_ind_hdr { union { struct { u32 seq; u32 bytes; u32 pkts; u32 flows; struct rmnet_map_flow_info_le flow[0]; } le __aligned(1); struct { __be32 seq; __be32 bytes; __be32 pkts; __be32 flows; struct rmnet_map_flow_info_be flow[0]; } be __aligned(1); } __aligned(1); } __aligned(1); struct rmnet_map_dl_ind_trl { union { __be32 seq_be; u32 seq_le; } __aligned(1); } __aligned(1); struct rmnet_map_dl_ind { void (*dl_hdr_handler)(struct rmnet_map_dl_ind_hdr *); void (*dl_trl_handler)(struct rmnet_map_dl_ind_trl *); struct list_head list; }; #define RMNET_MAP_GET_MUX_ID(Y) (((struct rmnet_map_header *) \ (Y)->data)->mux_id) #define RMNET_MAP_GET_CD_BIT(Y) (((struct rmnet_map_header *) \ Loading Loading @@ -95,5 +151,11 @@ int rmnet_map_tx_agg_skip(struct sk_buff *skb, int offset); void rmnet_map_tx_aggregate(struct sk_buff *skb, struct rmnet_port *port); void rmnet_map_tx_aggregate_init(struct rmnet_port *port); void rmnet_map_tx_aggregate_exit(struct rmnet_port *port); int rmnet_map_flow_command(struct sk_buff *skb, struct rmnet_port *port); void rmnet_map_cmd_init(struct rmnet_port *port); int rmnet_map_dl_ind_register(struct rmnet_port *port, struct rmnet_map_dl_ind *dl_ind); int rmnet_map_dl_ind_deregister(struct rmnet_port *port, struct rmnet_map_dl_ind *dl_ind); void rmnet_map_cmd_exit(struct rmnet_port *port); #endif /* _RMNET_MAP_H_ */
drivers/net/ethernet/qualcomm/rmnet/rmnet_map_command.c +167 −0 Original line number Diff line number Diff line Loading @@ -16,6 +16,17 @@ #include "rmnet_private.h" #include "rmnet_vnd.h" #define RMNET_DL_IND_HDR_SIZE (sizeof(struct rmnet_map_dl_ind_hdr) + \ sizeof(struct rmnet_map_header) + \ sizeof(struct rmnet_map_control_command_header)) #define RMNET_MAP_CMD_SIZE (sizeof(struct rmnet_map_header) + \ sizeof(struct rmnet_map_control_command_header)) #define RMNET_DL_IND_TRL_SIZE (sizeof(struct rmnet_map_dl_ind_trl) + \ sizeof(struct rmnet_map_header) + \ sizeof(struct rmnet_map_control_command_header)) static u8 rmnet_map_do_flow_control(struct sk_buff *skb, struct rmnet_port *port, int enable) Loading Loading @@ -83,6 +94,82 @@ static void rmnet_map_send_ack(struct sk_buff *skb, netif_tx_unlock(dev); } static void rmnet_map_dl_hdr_notify(struct rmnet_port *port, struct rmnet_map_dl_ind_hdr *dlhdr) { struct rmnet_map_dl_ind *tmp; spin_lock(&port->dl_list_lock); list_for_each_entry(tmp, &port->dl_list, list) tmp->dl_hdr_handler(dlhdr); spin_unlock(&port->dl_list_lock); } static void rmnet_map_dl_trl_notify(struct rmnet_port *port, struct rmnet_map_dl_ind_trl *dltrl) { struct rmnet_map_dl_ind *tmp; spin_lock(&port->dl_list_lock); list_for_each_entry(tmp, &port->dl_list, list) tmp->dl_trl_handler(dltrl); spin_unlock(&port->dl_list_lock); } static void rmnet_map_process_flow_start(struct sk_buff *skb, struct rmnet_port *port) { struct rmnet_map_dl_ind_hdr *dlhdr; if (skb->len < RMNET_DL_IND_HDR_SIZE) return; skb_pull(skb, RMNET_MAP_CMD_SIZE); dlhdr = (struct rmnet_map_dl_ind_hdr *)skb->data; port->stats.dl_hdr_last_seq = dlhdr->le.seq; port->stats.dl_hdr_last_bytes = dlhdr->le.bytes; port->stats.dl_hdr_last_pkts = dlhdr->le.pkts; port->stats.dl_hdr_last_flows = dlhdr->le.flows; port->stats.dl_hdr_total_bytes += port->stats.dl_hdr_last_bytes; port->stats.dl_hdr_total_pkts += port->stats.dl_hdr_last_pkts; port->stats.dl_hdr_count++; if (unlikely(!(port->stats.dl_hdr_count))) port->stats.dl_hdr_count = 1; port->stats.dl_hdr_avg_bytes = port->stats.dl_hdr_total_bytes / port->stats.dl_hdr_count; port->stats.dl_hdr_avg_pkts = port->stats.dl_hdr_total_pkts / port->stats.dl_hdr_count; rmnet_map_dl_hdr_notify(port, dlhdr); } static void rmnet_map_process_flow_end(struct sk_buff *skb, struct rmnet_port *port) { struct rmnet_map_dl_ind_trl *dltrl; if (skb->len < RMNET_DL_IND_TRL_SIZE) return; skb_pull(skb, RMNET_MAP_CMD_SIZE); dltrl = (struct rmnet_map_dl_ind_trl *)skb->data; port->stats.dl_trl_last_seq = dltrl->seq_le; port->stats.dl_trl_count++; rmnet_map_dl_trl_notify(port, dltrl); } /* Process MAP command frame and send N/ACK message as appropriate. Message cmd * name is decoded here and appropriate handler is called. */ Loading Loading @@ -112,3 +199,83 @@ void rmnet_map_command(struct sk_buff *skb, struct rmnet_port *port) if (rc == RMNET_MAP_COMMAND_ACK) rmnet_map_send_ack(skb, rc, port); } int rmnet_map_flow_command(struct sk_buff *skb, struct rmnet_port *port) { struct rmnet_map_control_command *cmd; unsigned char command_name; cmd = RMNET_MAP_GET_CMD_START(skb); command_name = cmd->command_name; switch (command_name) { case RMNET_MAP_COMMAND_FLOW_START: rmnet_map_process_flow_start(skb, port); break; case RMNET_MAP_COMMAND_FLOW_END: rmnet_map_process_flow_end(skb, port); break; default: return 1; } consume_skb(skb); return 0; } void rmnet_map_cmd_exit(struct rmnet_port *port) { struct rmnet_map_dl_ind *tmp, *idx; spin_lock(&port->dl_list_lock); list_for_each_entry_safe(tmp, idx, &port->dl_list, list) list_del_rcu(&tmp->list); spin_unlock(&port->dl_list_lock); } void rmnet_map_cmd_init(struct rmnet_port *port) { INIT_LIST_HEAD(&port->dl_list); spin_lock_init(&port->dl_list_lock); } int rmnet_map_dl_ind_register(struct rmnet_port *port, struct rmnet_map_dl_ind *dl_ind) { if (!port || !dl_ind || !dl_ind->dl_hdr_handler || !dl_ind->dl_trl_handler) return -EINVAL; spin_lock(&port->dl_list_lock); list_add_rcu(&dl_ind->list, &port->dl_list); spin_unlock(&port->dl_list_lock); return 0; } int rmnet_map_dl_ind_deregister(struct rmnet_port *port, struct rmnet_map_dl_ind *dl_ind) { struct rmnet_map_dl_ind *tmp; if (!port || !dl_ind) return -EINVAL; spin_lock(&port->dl_list_lock); list_for_each_entry(tmp, &port->dl_list, list) { if (tmp == dl_ind) { list_del_rcu(&dl_ind->list); goto done; } } done: spin_unlock(&port->dl_list_lock); return 0; }