Loading drivers/platform/msm/ipa/ipa_clients/ipa_gsb.c +53 −40 Original line number Original line Diff line number Diff line Loading @@ -67,7 +67,8 @@ static char dbg_buff[IPA_GSB_MAX_MSG_LEN]; static char dbg_buff[IPA_GSB_MAX_MSG_LEN]; #define IPA_GSB_SKB_HEADROOM 256 #define IPA_GSB_SKB_HEADROOM 256 #define IPA_GSB_AGGR_BYTE_LIMIT 6 #define IPA_GSB_SKB_DUMMY_HEADER 42 #define IPA_GSB_AGGR_BYTE_LIMIT 14 #define IPA_GSB_AGGR_TIME_LIMIT 1 #define IPA_GSB_AGGR_TIME_LIMIT 1 static struct dentry *dent; static struct dentry *dent; Loading Loading @@ -278,22 +279,36 @@ static int ipa_gsb_commit_partial_hdr(struct ipa_gsb_iface_info *iface_info) "%s_ipv4", iface_info->netdev_name); "%s_ipv4", iface_info->netdev_name); snprintf(hdr->hdr[1].name, sizeof(hdr->hdr[1].name), snprintf(hdr->hdr[1].name, sizeof(hdr->hdr[1].name), "%s_ipv6", iface_info->netdev_name); "%s_ipv6", iface_info->netdev_name); /* partial header: [hdl][QMAP ID][pkt size][ETH header] */ /* * partial header: * [hdl][QMAP ID][pkt size][Dummy Header][ETH header] */ for (i = IPA_IP_v4; i < IPA_IP_MAX; i++) { for (i = IPA_IP_v4; i < IPA_IP_MAX; i++) { hdr->hdr[i].hdr_len = ETH_HLEN + sizeof(struct ipa_gsb_mux_hdr); /* * Optimization: add dummy header to reserve space * for rndis header, so we can do the skb_clone * instead of deep copy. */ hdr->hdr[i].hdr_len = ETH_HLEN + sizeof(struct ipa_gsb_mux_hdr) + IPA_GSB_SKB_DUMMY_HEADER; hdr->hdr[i].type = IPA_HDR_L2_ETHERNET_II; hdr->hdr[i].type = IPA_HDR_L2_ETHERNET_II; hdr->hdr[i].is_partial = 1; hdr->hdr[i].is_partial = 1; hdr->hdr[i].is_eth2_ofst_valid = 1; hdr->hdr[i].is_eth2_ofst_valid = 1; hdr->hdr[i].eth2_ofst = sizeof(struct ipa_gsb_mux_hdr); hdr->hdr[i].eth2_ofst = sizeof(struct ipa_gsb_mux_hdr) + IPA_GSB_SKB_DUMMY_HEADER; /* populate iface handle */ /* populate iface handle */ hdr->hdr[i].hdr[0] = iface_info->iface_hdl; hdr->hdr[i].hdr[0] = iface_info->iface_hdl; /* populate src ETH address */ /* populate src ETH address */ memcpy(&hdr->hdr[i].hdr[10], iface_info->device_ethaddr, 6); memcpy(&hdr->hdr[i].hdr[10 + IPA_GSB_SKB_DUMMY_HEADER], iface_info->device_ethaddr, 6); /* populate Ethertype */ /* populate Ethertype */ if (i == IPA_IP_v4) if (i == IPA_IP_v4) *(u16 *)(hdr->hdr[i].hdr + 16) = htons(ETH_P_IP); *(u16 *)(hdr->hdr[i].hdr + 16 + IPA_GSB_SKB_DUMMY_HEADER) = htons(ETH_P_IP); else else *(u16 *)(hdr->hdr[i].hdr + 16) = htons(ETH_P_IPV6); *(u16 *)(hdr->hdr[i].hdr + 16 + IPA_GSB_SKB_DUMMY_HEADER) = htons(ETH_P_IPV6); } } if (ipa_add_hdr(hdr)) { if (ipa_add_hdr(hdr)) { Loading Loading @@ -410,7 +425,7 @@ static void ipa_gsb_pm_cb(void *user_data, enum ipa_pm_cb_event event) return; return; } } IPA_GSB_DBG("wake up clients\n"); IPA_GSB_DBG_LOW("wake up clients\n"); for (i = 0; i < MAX_SUPPORTED_IFACE; i++) for (i = 0; i < MAX_SUPPORTED_IFACE; i++) if (ipa_gsb_ctx->iface[i] != NULL) if (ipa_gsb_ctx->iface[i] != NULL) ipa_gsb_ctx->iface[i]->wakeup_request( ipa_gsb_ctx->iface[i]->wakeup_request( Loading Loading @@ -629,21 +644,6 @@ int ipa_bridge_cleanup(u32 hdl) } } EXPORT_SYMBOL(ipa_bridge_cleanup); EXPORT_SYMBOL(ipa_bridge_cleanup); static struct sk_buff *ipa_gsb_skb_copy(struct sk_buff *skb, int len) { struct sk_buff *skb2 = NULL; skb2 = __dev_alloc_skb(len + IPA_GSB_SKB_HEADROOM, GFP_KERNEL); if (likely(skb2)) { skb_reserve(skb2, IPA_GSB_SKB_HEADROOM); memcpy(skb2->data, skb->data, len); skb2->len = len; skb_set_tail_pointer(skb2, len); } return skb2; } static void ipa_gsb_cons_cb(void *priv, enum ipa_dp_evt_type evt, static void ipa_gsb_cons_cb(void *priv, enum ipa_dp_evt_type evt, unsigned long data) unsigned long data) { { Loading @@ -665,21 +665,33 @@ static void ipa_gsb_cons_cb(void *priv, enum ipa_dp_evt_type evt, mux_hdr = (struct ipa_gsb_mux_hdr *)skb->data; mux_hdr = (struct ipa_gsb_mux_hdr *)skb->data; pkt_size = mux_hdr->pkt_size; pkt_size = mux_hdr->pkt_size; /* 4-byte padding */ /* 4-byte padding */ pad_byte = ((pkt_size + sizeof(*mux_hdr) + ETH_HLEN + 3) & ~3) pad_byte = ((pkt_size + sizeof(*mux_hdr) + ETH_HLEN + - (pkt_size + sizeof(*mux_hdr) + ETH_HLEN); 3 + IPA_GSB_SKB_DUMMY_HEADER) & ~3) - (pkt_size + sizeof(*mux_hdr) + ETH_HLEN + IPA_GSB_SKB_DUMMY_HEADER); hdl = mux_hdr->iface_hdl; hdl = mux_hdr->iface_hdl; IPA_GSB_DBG("pkt_size: %d, pad_byte: %d, hdl: %d\n", IPA_GSB_DBG_LOW("pkt_size: %d, pad_byte: %d, hdl: %d\n", pkt_size, pad_byte, hdl); pkt_size, pad_byte, hdl); /* remove 4 byte mux header */ /* remove 4 byte mux header AND dummy header*/ skb_pull(skb, sizeof(*mux_hdr)); skb_pull(skb, sizeof(*mux_hdr) + IPA_GSB_SKB_DUMMY_HEADER); skb2 = ipa_gsb_skb_copy(skb, pkt_size + ETH_HLEN); skb2 = skb_clone(skb, GFP_KERNEL); if (!skb2) { IPA_GSB_ERR("skb_clone failed\n"); WARN_ON(1); break; } skb_trim(skb2, pkt_size + ETH_HLEN); ipa_gsb_ctx->iface[hdl]->send_dl_skb( ipa_gsb_ctx->iface[hdl]->send_dl_skb( ipa_gsb_ctx->iface[hdl]->priv, skb2); ipa_gsb_ctx->iface[hdl]->priv, skb2); ipa_gsb_ctx->iface[hdl]->iface_stats.num_dl_packets++; ipa_gsb_ctx->iface[hdl]->iface_stats.num_dl_packets++; skb_pull(skb, pkt_size + ETH_HLEN + pad_byte); skb_pull(skb, pkt_size + ETH_HLEN + pad_byte); } } if (skb) { dev_kfree_skb_any(skb); skb = NULL; } } } static void ipa_gsb_tx_dp_notify(void *priv, enum ipa_dp_evt_type evt, static void ipa_gsb_tx_dp_notify(void *priv, enum ipa_dp_evt_type evt, Loading @@ -702,7 +714,7 @@ static void ipa_gsb_tx_dp_notify(void *priv, enum ipa_dp_evt_type evt, /* change to host order */ /* change to host order */ *(u32 *)mux_hdr = ntohl(*(u32 *)mux_hdr); *(u32 *)mux_hdr = ntohl(*(u32 *)mux_hdr); hdl = mux_hdr->iface_hdl; hdl = mux_hdr->iface_hdl; IPA_GSB_DBG("evt: %d, hdl in tx_dp_notify: %d\n", evt, hdl); IPA_GSB_DBG_LOW("evt: %d, hdl in tx_dp_notify: %d\n", evt, hdl); /* remove 4 byte mux header */ /* remove 4 byte mux header */ skb_pull(skb, sizeof(struct ipa_gsb_mux_hdr)); skb_pull(skb, sizeof(struct ipa_gsb_mux_hdr)); Loading Loading @@ -740,7 +752,8 @@ static int ipa_gsb_connect_sys_pipe(void) /* configure TX EP */ /* configure TX EP */ cons_params.client = IPA_CLIENT_ODU_EMB_CONS; cons_params.client = IPA_CLIENT_ODU_EMB_CONS; cons_params.ipa_ep_cfg.hdr.hdr_len = cons_params.ipa_ep_cfg.hdr.hdr_len = ETH_HLEN + sizeof(struct ipa_gsb_mux_hdr); ETH_HLEN + sizeof(struct ipa_gsb_mux_hdr) + IPA_GSB_SKB_DUMMY_HEADER; cons_params.ipa_ep_cfg.hdr.hdr_ofst_pkt_size_valid = 1; cons_params.ipa_ep_cfg.hdr.hdr_ofst_pkt_size_valid = 1; cons_params.ipa_ep_cfg.hdr.hdr_ofst_pkt_size = 2; cons_params.ipa_ep_cfg.hdr.hdr_ofst_pkt_size = 2; cons_params.ipa_ep_cfg.hdr_ext.hdr_pad_to_alignment = 2; cons_params.ipa_ep_cfg.hdr_ext.hdr_pad_to_alignment = 2; Loading Loading @@ -911,7 +924,7 @@ int ipa_bridge_resume(u32 hdl) return -EFAULT; return -EFAULT; } } IPA_GSB_DBG("client hdl: %d\n", hdl); IPA_GSB_DBG_LOW("client hdl: %d\n", hdl); if (!ipa_gsb_ctx->iface[hdl]->is_connected) { if (!ipa_gsb_ctx->iface[hdl]->is_connected) { IPA_GSB_ERR("iface is not connected\n"); IPA_GSB_ERR("iface is not connected\n"); Loading @@ -919,7 +932,7 @@ int ipa_bridge_resume(u32 hdl) } } if (ipa_gsb_ctx->iface[hdl]->is_resumed) { if (ipa_gsb_ctx->iface[hdl]->is_resumed) { IPA_GSB_DBG("iface was already resumed\n"); IPA_GSB_DBG_LOW("iface was already resumed\n"); return 0; return 0; } } Loading @@ -946,7 +959,7 @@ int ipa_bridge_resume(u32 hdl) ipa_gsb_ctx->iface[hdl]->is_resumed = true; ipa_gsb_ctx->iface[hdl]->is_resumed = true; ipa_gsb_ctx->num_resumed_iface++; ipa_gsb_ctx->num_resumed_iface++; IPA_GSB_DBG("num resumed iface: %d\n", IPA_GSB_DBG_LOW("num resumed iface: %d\n", ipa_gsb_ctx->num_resumed_iface); ipa_gsb_ctx->num_resumed_iface); mutex_unlock(&ipa_gsb_ctx->lock); mutex_unlock(&ipa_gsb_ctx->lock); Loading @@ -963,7 +976,7 @@ int ipa_bridge_suspend(u32 hdl) return -EFAULT; return -EFAULT; } } IPA_GSB_DBG("client hdl: %d\n", hdl); IPA_GSB_DBG_LOW("client hdl: %d\n", hdl); if (!ipa_gsb_ctx->iface[hdl]->is_connected) { if (!ipa_gsb_ctx->iface[hdl]->is_connected) { IPA_GSB_ERR("iface is not connected\n"); IPA_GSB_ERR("iface is not connected\n"); Loading @@ -971,7 +984,7 @@ int ipa_bridge_suspend(u32 hdl) } } if (!ipa_gsb_ctx->iface[hdl]->is_resumed) { if (!ipa_gsb_ctx->iface[hdl]->is_resumed) { IPA_GSB_DBG("iface was already suspended\n"); IPA_GSB_DBG_LOW("iface was already suspended\n"); return 0; return 0; } } Loading Loading @@ -999,7 +1012,7 @@ int ipa_bridge_suspend(u32 hdl) ipa_gsb_ctx->iface[hdl]->is_resumed = false; ipa_gsb_ctx->iface[hdl]->is_resumed = false; ipa_gsb_ctx->num_resumed_iface--; ipa_gsb_ctx->num_resumed_iface--; IPA_GSB_DBG("num resumed iface: %d\n", IPA_GSB_DBG_LOW("num resumed iface: %d\n", ipa_gsb_ctx->num_resumed_iface); ipa_gsb_ctx->num_resumed_iface); mutex_unlock(&ipa_gsb_ctx->lock); mutex_unlock(&ipa_gsb_ctx->lock); Loading Loading @@ -1032,11 +1045,11 @@ int ipa_bridge_tx_dp(u32 hdl, struct sk_buff *skb, struct sk_buff *skb2; struct sk_buff *skb2; int ret; int ret; IPA_GSB_DBG("client hdl: %d\n", hdl); IPA_GSB_DBG_LOW("client hdl: %d\n", hdl); /* make sure skb has enough headroom */ /* make sure skb has enough headroom */ if (unlikely(skb_headroom(skb) < sizeof(struct ipa_gsb_mux_hdr))) { if (unlikely(skb_headroom(skb) < sizeof(struct ipa_gsb_mux_hdr))) { IPA_GSB_DBG("skb doesn't have enough headroom\n"); IPA_GSB_DBG_LOW("skb doesn't have enough headroom\n"); skb2 = skb_copy_expand(skb, sizeof(struct ipa_gsb_mux_hdr), skb2 = skb_copy_expand(skb, sizeof(struct ipa_gsb_mux_hdr), 0, GFP_ATOMIC); 0, GFP_ATOMIC); if (!skb2) { if (!skb2) { Loading drivers/platform/msm/ipa/ipa_v3/ipa_dp.c +9 −3 Original line number Original line Diff line number Diff line Loading @@ -35,6 +35,9 @@ #define IPA_GENERIC_AGGR_TIME_LIMIT 1 #define IPA_GENERIC_AGGR_TIME_LIMIT 1 #define IPA_GENERIC_AGGR_PKT_LIMIT 0 #define IPA_GENERIC_AGGR_PKT_LIMIT 0 #define IPA_GSB_AGGR_BYTE_LIMIT 14 #define IPA_GSB_RX_BUFF_BASE_SZ 16384 #define IPA_GENERIC_RX_BUFF_BASE_SZ 8192 #define IPA_GENERIC_RX_BUFF_BASE_SZ 8192 #define IPA_REAL_GENERIC_RX_BUFF_SZ(X) (SKB_DATA_ALIGN(\ #define IPA_REAL_GENERIC_RX_BUFF_SZ(X) (SKB_DATA_ALIGN(\ (X) + NET_SKB_PAD) +\ (X) + NET_SKB_PAD) +\ Loading Loading @@ -2946,11 +2949,14 @@ static int ipa3_assign_policy(struct ipa_sys_connect_params *in, /* recycle skb for GSB use case */ /* recycle skb for GSB use case */ if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0) { if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0) { sys->free_rx_wrapper = sys->free_rx_wrapper = ipa3_recycle_rx_wrapper; ipa3_free_rx_wrapper; sys->repl_hdlr = sys->repl_hdlr = ipa3_replenish_rx_cache_recycle; ipa3_replenish_rx_cache; /* Overwrite buffer size & aggr limit for GSB */ sys->rx_buff_sz = IPA_GENERIC_RX_BUFF_SZ( sys->rx_buff_sz = IPA_GENERIC_RX_BUFF_SZ( IPA_GENERIC_RX_BUFF_BASE_SZ); IPA_GSB_RX_BUFF_BASE_SZ); in->ipa_ep_cfg.aggr.aggr_byte_limit = IPA_GSB_AGGR_BYTE_LIMIT; } else { } else { sys->free_rx_wrapper = sys->free_rx_wrapper = ipa3_free_rx_wrapper; ipa3_free_rx_wrapper; Loading Loading
drivers/platform/msm/ipa/ipa_clients/ipa_gsb.c +53 −40 Original line number Original line Diff line number Diff line Loading @@ -67,7 +67,8 @@ static char dbg_buff[IPA_GSB_MAX_MSG_LEN]; static char dbg_buff[IPA_GSB_MAX_MSG_LEN]; #define IPA_GSB_SKB_HEADROOM 256 #define IPA_GSB_SKB_HEADROOM 256 #define IPA_GSB_AGGR_BYTE_LIMIT 6 #define IPA_GSB_SKB_DUMMY_HEADER 42 #define IPA_GSB_AGGR_BYTE_LIMIT 14 #define IPA_GSB_AGGR_TIME_LIMIT 1 #define IPA_GSB_AGGR_TIME_LIMIT 1 static struct dentry *dent; static struct dentry *dent; Loading Loading @@ -278,22 +279,36 @@ static int ipa_gsb_commit_partial_hdr(struct ipa_gsb_iface_info *iface_info) "%s_ipv4", iface_info->netdev_name); "%s_ipv4", iface_info->netdev_name); snprintf(hdr->hdr[1].name, sizeof(hdr->hdr[1].name), snprintf(hdr->hdr[1].name, sizeof(hdr->hdr[1].name), "%s_ipv6", iface_info->netdev_name); "%s_ipv6", iface_info->netdev_name); /* partial header: [hdl][QMAP ID][pkt size][ETH header] */ /* * partial header: * [hdl][QMAP ID][pkt size][Dummy Header][ETH header] */ for (i = IPA_IP_v4; i < IPA_IP_MAX; i++) { for (i = IPA_IP_v4; i < IPA_IP_MAX; i++) { hdr->hdr[i].hdr_len = ETH_HLEN + sizeof(struct ipa_gsb_mux_hdr); /* * Optimization: add dummy header to reserve space * for rndis header, so we can do the skb_clone * instead of deep copy. */ hdr->hdr[i].hdr_len = ETH_HLEN + sizeof(struct ipa_gsb_mux_hdr) + IPA_GSB_SKB_DUMMY_HEADER; hdr->hdr[i].type = IPA_HDR_L2_ETHERNET_II; hdr->hdr[i].type = IPA_HDR_L2_ETHERNET_II; hdr->hdr[i].is_partial = 1; hdr->hdr[i].is_partial = 1; hdr->hdr[i].is_eth2_ofst_valid = 1; hdr->hdr[i].is_eth2_ofst_valid = 1; hdr->hdr[i].eth2_ofst = sizeof(struct ipa_gsb_mux_hdr); hdr->hdr[i].eth2_ofst = sizeof(struct ipa_gsb_mux_hdr) + IPA_GSB_SKB_DUMMY_HEADER; /* populate iface handle */ /* populate iface handle */ hdr->hdr[i].hdr[0] = iface_info->iface_hdl; hdr->hdr[i].hdr[0] = iface_info->iface_hdl; /* populate src ETH address */ /* populate src ETH address */ memcpy(&hdr->hdr[i].hdr[10], iface_info->device_ethaddr, 6); memcpy(&hdr->hdr[i].hdr[10 + IPA_GSB_SKB_DUMMY_HEADER], iface_info->device_ethaddr, 6); /* populate Ethertype */ /* populate Ethertype */ if (i == IPA_IP_v4) if (i == IPA_IP_v4) *(u16 *)(hdr->hdr[i].hdr + 16) = htons(ETH_P_IP); *(u16 *)(hdr->hdr[i].hdr + 16 + IPA_GSB_SKB_DUMMY_HEADER) = htons(ETH_P_IP); else else *(u16 *)(hdr->hdr[i].hdr + 16) = htons(ETH_P_IPV6); *(u16 *)(hdr->hdr[i].hdr + 16 + IPA_GSB_SKB_DUMMY_HEADER) = htons(ETH_P_IPV6); } } if (ipa_add_hdr(hdr)) { if (ipa_add_hdr(hdr)) { Loading Loading @@ -410,7 +425,7 @@ static void ipa_gsb_pm_cb(void *user_data, enum ipa_pm_cb_event event) return; return; } } IPA_GSB_DBG("wake up clients\n"); IPA_GSB_DBG_LOW("wake up clients\n"); for (i = 0; i < MAX_SUPPORTED_IFACE; i++) for (i = 0; i < MAX_SUPPORTED_IFACE; i++) if (ipa_gsb_ctx->iface[i] != NULL) if (ipa_gsb_ctx->iface[i] != NULL) ipa_gsb_ctx->iface[i]->wakeup_request( ipa_gsb_ctx->iface[i]->wakeup_request( Loading Loading @@ -629,21 +644,6 @@ int ipa_bridge_cleanup(u32 hdl) } } EXPORT_SYMBOL(ipa_bridge_cleanup); EXPORT_SYMBOL(ipa_bridge_cleanup); static struct sk_buff *ipa_gsb_skb_copy(struct sk_buff *skb, int len) { struct sk_buff *skb2 = NULL; skb2 = __dev_alloc_skb(len + IPA_GSB_SKB_HEADROOM, GFP_KERNEL); if (likely(skb2)) { skb_reserve(skb2, IPA_GSB_SKB_HEADROOM); memcpy(skb2->data, skb->data, len); skb2->len = len; skb_set_tail_pointer(skb2, len); } return skb2; } static void ipa_gsb_cons_cb(void *priv, enum ipa_dp_evt_type evt, static void ipa_gsb_cons_cb(void *priv, enum ipa_dp_evt_type evt, unsigned long data) unsigned long data) { { Loading @@ -665,21 +665,33 @@ static void ipa_gsb_cons_cb(void *priv, enum ipa_dp_evt_type evt, mux_hdr = (struct ipa_gsb_mux_hdr *)skb->data; mux_hdr = (struct ipa_gsb_mux_hdr *)skb->data; pkt_size = mux_hdr->pkt_size; pkt_size = mux_hdr->pkt_size; /* 4-byte padding */ /* 4-byte padding */ pad_byte = ((pkt_size + sizeof(*mux_hdr) + ETH_HLEN + 3) & ~3) pad_byte = ((pkt_size + sizeof(*mux_hdr) + ETH_HLEN + - (pkt_size + sizeof(*mux_hdr) + ETH_HLEN); 3 + IPA_GSB_SKB_DUMMY_HEADER) & ~3) - (pkt_size + sizeof(*mux_hdr) + ETH_HLEN + IPA_GSB_SKB_DUMMY_HEADER); hdl = mux_hdr->iface_hdl; hdl = mux_hdr->iface_hdl; IPA_GSB_DBG("pkt_size: %d, pad_byte: %d, hdl: %d\n", IPA_GSB_DBG_LOW("pkt_size: %d, pad_byte: %d, hdl: %d\n", pkt_size, pad_byte, hdl); pkt_size, pad_byte, hdl); /* remove 4 byte mux header */ /* remove 4 byte mux header AND dummy header*/ skb_pull(skb, sizeof(*mux_hdr)); skb_pull(skb, sizeof(*mux_hdr) + IPA_GSB_SKB_DUMMY_HEADER); skb2 = ipa_gsb_skb_copy(skb, pkt_size + ETH_HLEN); skb2 = skb_clone(skb, GFP_KERNEL); if (!skb2) { IPA_GSB_ERR("skb_clone failed\n"); WARN_ON(1); break; } skb_trim(skb2, pkt_size + ETH_HLEN); ipa_gsb_ctx->iface[hdl]->send_dl_skb( ipa_gsb_ctx->iface[hdl]->send_dl_skb( ipa_gsb_ctx->iface[hdl]->priv, skb2); ipa_gsb_ctx->iface[hdl]->priv, skb2); ipa_gsb_ctx->iface[hdl]->iface_stats.num_dl_packets++; ipa_gsb_ctx->iface[hdl]->iface_stats.num_dl_packets++; skb_pull(skb, pkt_size + ETH_HLEN + pad_byte); skb_pull(skb, pkt_size + ETH_HLEN + pad_byte); } } if (skb) { dev_kfree_skb_any(skb); skb = NULL; } } } static void ipa_gsb_tx_dp_notify(void *priv, enum ipa_dp_evt_type evt, static void ipa_gsb_tx_dp_notify(void *priv, enum ipa_dp_evt_type evt, Loading @@ -702,7 +714,7 @@ static void ipa_gsb_tx_dp_notify(void *priv, enum ipa_dp_evt_type evt, /* change to host order */ /* change to host order */ *(u32 *)mux_hdr = ntohl(*(u32 *)mux_hdr); *(u32 *)mux_hdr = ntohl(*(u32 *)mux_hdr); hdl = mux_hdr->iface_hdl; hdl = mux_hdr->iface_hdl; IPA_GSB_DBG("evt: %d, hdl in tx_dp_notify: %d\n", evt, hdl); IPA_GSB_DBG_LOW("evt: %d, hdl in tx_dp_notify: %d\n", evt, hdl); /* remove 4 byte mux header */ /* remove 4 byte mux header */ skb_pull(skb, sizeof(struct ipa_gsb_mux_hdr)); skb_pull(skb, sizeof(struct ipa_gsb_mux_hdr)); Loading Loading @@ -740,7 +752,8 @@ static int ipa_gsb_connect_sys_pipe(void) /* configure TX EP */ /* configure TX EP */ cons_params.client = IPA_CLIENT_ODU_EMB_CONS; cons_params.client = IPA_CLIENT_ODU_EMB_CONS; cons_params.ipa_ep_cfg.hdr.hdr_len = cons_params.ipa_ep_cfg.hdr.hdr_len = ETH_HLEN + sizeof(struct ipa_gsb_mux_hdr); ETH_HLEN + sizeof(struct ipa_gsb_mux_hdr) + IPA_GSB_SKB_DUMMY_HEADER; cons_params.ipa_ep_cfg.hdr.hdr_ofst_pkt_size_valid = 1; cons_params.ipa_ep_cfg.hdr.hdr_ofst_pkt_size_valid = 1; cons_params.ipa_ep_cfg.hdr.hdr_ofst_pkt_size = 2; cons_params.ipa_ep_cfg.hdr.hdr_ofst_pkt_size = 2; cons_params.ipa_ep_cfg.hdr_ext.hdr_pad_to_alignment = 2; cons_params.ipa_ep_cfg.hdr_ext.hdr_pad_to_alignment = 2; Loading Loading @@ -911,7 +924,7 @@ int ipa_bridge_resume(u32 hdl) return -EFAULT; return -EFAULT; } } IPA_GSB_DBG("client hdl: %d\n", hdl); IPA_GSB_DBG_LOW("client hdl: %d\n", hdl); if (!ipa_gsb_ctx->iface[hdl]->is_connected) { if (!ipa_gsb_ctx->iface[hdl]->is_connected) { IPA_GSB_ERR("iface is not connected\n"); IPA_GSB_ERR("iface is not connected\n"); Loading @@ -919,7 +932,7 @@ int ipa_bridge_resume(u32 hdl) } } if (ipa_gsb_ctx->iface[hdl]->is_resumed) { if (ipa_gsb_ctx->iface[hdl]->is_resumed) { IPA_GSB_DBG("iface was already resumed\n"); IPA_GSB_DBG_LOW("iface was already resumed\n"); return 0; return 0; } } Loading @@ -946,7 +959,7 @@ int ipa_bridge_resume(u32 hdl) ipa_gsb_ctx->iface[hdl]->is_resumed = true; ipa_gsb_ctx->iface[hdl]->is_resumed = true; ipa_gsb_ctx->num_resumed_iface++; ipa_gsb_ctx->num_resumed_iface++; IPA_GSB_DBG("num resumed iface: %d\n", IPA_GSB_DBG_LOW("num resumed iface: %d\n", ipa_gsb_ctx->num_resumed_iface); ipa_gsb_ctx->num_resumed_iface); mutex_unlock(&ipa_gsb_ctx->lock); mutex_unlock(&ipa_gsb_ctx->lock); Loading @@ -963,7 +976,7 @@ int ipa_bridge_suspend(u32 hdl) return -EFAULT; return -EFAULT; } } IPA_GSB_DBG("client hdl: %d\n", hdl); IPA_GSB_DBG_LOW("client hdl: %d\n", hdl); if (!ipa_gsb_ctx->iface[hdl]->is_connected) { if (!ipa_gsb_ctx->iface[hdl]->is_connected) { IPA_GSB_ERR("iface is not connected\n"); IPA_GSB_ERR("iface is not connected\n"); Loading @@ -971,7 +984,7 @@ int ipa_bridge_suspend(u32 hdl) } } if (!ipa_gsb_ctx->iface[hdl]->is_resumed) { if (!ipa_gsb_ctx->iface[hdl]->is_resumed) { IPA_GSB_DBG("iface was already suspended\n"); IPA_GSB_DBG_LOW("iface was already suspended\n"); return 0; return 0; } } Loading Loading @@ -999,7 +1012,7 @@ int ipa_bridge_suspend(u32 hdl) ipa_gsb_ctx->iface[hdl]->is_resumed = false; ipa_gsb_ctx->iface[hdl]->is_resumed = false; ipa_gsb_ctx->num_resumed_iface--; ipa_gsb_ctx->num_resumed_iface--; IPA_GSB_DBG("num resumed iface: %d\n", IPA_GSB_DBG_LOW("num resumed iface: %d\n", ipa_gsb_ctx->num_resumed_iface); ipa_gsb_ctx->num_resumed_iface); mutex_unlock(&ipa_gsb_ctx->lock); mutex_unlock(&ipa_gsb_ctx->lock); Loading Loading @@ -1032,11 +1045,11 @@ int ipa_bridge_tx_dp(u32 hdl, struct sk_buff *skb, struct sk_buff *skb2; struct sk_buff *skb2; int ret; int ret; IPA_GSB_DBG("client hdl: %d\n", hdl); IPA_GSB_DBG_LOW("client hdl: %d\n", hdl); /* make sure skb has enough headroom */ /* make sure skb has enough headroom */ if (unlikely(skb_headroom(skb) < sizeof(struct ipa_gsb_mux_hdr))) { if (unlikely(skb_headroom(skb) < sizeof(struct ipa_gsb_mux_hdr))) { IPA_GSB_DBG("skb doesn't have enough headroom\n"); IPA_GSB_DBG_LOW("skb doesn't have enough headroom\n"); skb2 = skb_copy_expand(skb, sizeof(struct ipa_gsb_mux_hdr), skb2 = skb_copy_expand(skb, sizeof(struct ipa_gsb_mux_hdr), 0, GFP_ATOMIC); 0, GFP_ATOMIC); if (!skb2) { if (!skb2) { Loading
drivers/platform/msm/ipa/ipa_v3/ipa_dp.c +9 −3 Original line number Original line Diff line number Diff line Loading @@ -35,6 +35,9 @@ #define IPA_GENERIC_AGGR_TIME_LIMIT 1 #define IPA_GENERIC_AGGR_TIME_LIMIT 1 #define IPA_GENERIC_AGGR_PKT_LIMIT 0 #define IPA_GENERIC_AGGR_PKT_LIMIT 0 #define IPA_GSB_AGGR_BYTE_LIMIT 14 #define IPA_GSB_RX_BUFF_BASE_SZ 16384 #define IPA_GENERIC_RX_BUFF_BASE_SZ 8192 #define IPA_GENERIC_RX_BUFF_BASE_SZ 8192 #define IPA_REAL_GENERIC_RX_BUFF_SZ(X) (SKB_DATA_ALIGN(\ #define IPA_REAL_GENERIC_RX_BUFF_SZ(X) (SKB_DATA_ALIGN(\ (X) + NET_SKB_PAD) +\ (X) + NET_SKB_PAD) +\ Loading Loading @@ -2946,11 +2949,14 @@ static int ipa3_assign_policy(struct ipa_sys_connect_params *in, /* recycle skb for GSB use case */ /* recycle skb for GSB use case */ if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0) { if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0) { sys->free_rx_wrapper = sys->free_rx_wrapper = ipa3_recycle_rx_wrapper; ipa3_free_rx_wrapper; sys->repl_hdlr = sys->repl_hdlr = ipa3_replenish_rx_cache_recycle; ipa3_replenish_rx_cache; /* Overwrite buffer size & aggr limit for GSB */ sys->rx_buff_sz = IPA_GENERIC_RX_BUFF_SZ( sys->rx_buff_sz = IPA_GENERIC_RX_BUFF_SZ( IPA_GENERIC_RX_BUFF_BASE_SZ); IPA_GSB_RX_BUFF_BASE_SZ); in->ipa_ep_cfg.aggr.aggr_byte_limit = IPA_GSB_AGGR_BYTE_LIMIT; } else { } else { sys->free_rx_wrapper = sys->free_rx_wrapper = ipa3_free_rx_wrapper; ipa3_free_rx_wrapper; Loading