Loading drivers/platform/msm/gsi/gsi.c +11 −5 Original line number Diff line number Diff line Loading @@ -3433,7 +3433,7 @@ int gsi_queue_xfer(unsigned long chan_hdl, uint16_t num_xfers, return -GSI_STATUS_NODEV; } if (chan_hdl >= gsi_ctx->max_ch || !num_xfers || !xfer) { if (chan_hdl >= gsi_ctx->max_ch || (num_xfers && !xfer)) { GSIERR("bad params chan_hdl=%lu num_xfers=%u xfer=%pK\n", chan_hdl, num_xfers, xfer); return -GSI_STATUS_INVALID_PARAMS; Loading @@ -3453,6 +3453,11 @@ int gsi_queue_xfer(unsigned long chan_hdl, uint16_t num_xfers, slock = &ctx->ring.slock; spin_lock_irqsave(slock, flags); /* allow only ring doorbell */ if (!num_xfers) goto ring_doorbell; /* * for GCI channels the responsibility is on the caller to make sure * there is enough room in the TRE. Loading Loading @@ -3488,11 +3493,12 @@ int gsi_queue_xfer(unsigned long chan_hdl, uint16_t num_xfers, ctx->stats.queued += num_xfers; ring_doorbell: if (ring_db) { /* ensure TRE is set before ringing doorbell */ wmb(); if (ring_db) gsi_ring_chan_doorbell(ctx); } spin_unlock_irqrestore(slock, flags); Loading drivers/platform/msm/ipa/ipa_v3/ipa_dp.c +46 −57 Original line number Diff line number Diff line Loading @@ -78,6 +78,7 @@ #define IPA_DEFAULT_SYS_YELLOW_WM 32 #define IPA_REPL_XFER_THRESH 20 #define IPA_REPL_XFER_MAX 36 #define IPA_TX_SEND_COMPL_NOP_DELAY_NS (2 * 1000 * 1000) Loading Loading @@ -2058,7 +2059,7 @@ static void ipa3_replenish_rx_cache(struct ipa3_sys_context *sys) int ret; int idx = 0; int rx_len_cached = 0; struct gsi_xfer_elem gsi_xfer_elem_array[IPA_REPL_XFER_THRESH]; struct gsi_xfer_elem gsi_xfer_elem_array[IPA_REPL_XFER_MAX]; gfp_t flag = GFP_NOWAIT | __GFP_NOWARN; rx_len_cached = sys->len; Loading Loading @@ -2102,15 +2103,13 @@ static void ipa3_replenish_rx_cache(struct ipa3_sys_context *sys) idx++; rx_len_cached++; /* * gsi_xfer_elem_buffer has a size of IPA_REPL_XFER_THRESH. * gsi_xfer_elem_buffer has a size of IPA_REPL_XFER_MAX. * If this size is reached we need to queue the xfers. */ if (idx == IPA_REPL_XFER_THRESH) { if (idx == IPA_REPL_XFER_MAX) { ret = gsi_queue_xfer(sys->ep->gsi_chan_hdl, idx, gsi_xfer_elem_array, true); if (ret == GSI_STATUS_SUCCESS) { sys->len = rx_len_cached; } else { gsi_xfer_elem_array, false); if (ret != GSI_STATUS_SUCCESS) { /* we don't expect this will happen */ IPAERR("failed to provide buffer: %d\n", ret); WARN_ON(1); Loading @@ -2130,7 +2129,7 @@ static void ipa3_replenish_rx_cache(struct ipa3_sys_context *sys) queue_delayed_work(sys->wq, &sys->replenish_rx_work, msecs_to_jiffies(1)); done: if (idx) { /* only ring doorbell once here */ ret = gsi_queue_xfer(sys->ep->gsi_chan_hdl, idx, gsi_xfer_elem_array, true); if (ret == GSI_STATUS_SUCCESS) { Loading @@ -2141,7 +2140,6 @@ static void ipa3_replenish_rx_cache(struct ipa3_sys_context *sys) WARN_ON(1); } } } static void ipa3_replenish_rx_cache_recycle(struct ipa3_sys_context *sys) { Loading @@ -2150,7 +2148,7 @@ static void ipa3_replenish_rx_cache_recycle(struct ipa3_sys_context *sys) int ret; int idx = 0; int rx_len_cached = 0; struct gsi_xfer_elem gsi_xfer_elem_array[IPA_REPL_XFER_THRESH]; struct gsi_xfer_elem gsi_xfer_elem_array[IPA_REPL_XFER_MAX]; gfp_t flag = GFP_NOWAIT | __GFP_NOWARN; /* start replenish only when buffers go lower than the threshold */ Loading Loading @@ -2212,15 +2210,13 @@ static void ipa3_replenish_rx_cache_recycle(struct ipa3_sys_context *sys) idx++; rx_len_cached++; /* * gsi_xfer_elem_buffer has a size of IPA_REPL_XFER_THRESH. * gsi_xfer_elem_buffer has a size of IPA_REPL_XFER_MAX. * If this size is reached we need to queue the xfers. */ if (idx == IPA_REPL_XFER_THRESH) { if (idx == IPA_REPL_XFER_MAX) { ret = gsi_queue_xfer(sys->ep->gsi_chan_hdl, idx, gsi_xfer_elem_array, true); if (ret == GSI_STATUS_SUCCESS) { sys->len = rx_len_cached; } else { gsi_xfer_elem_array, false); if (ret != GSI_STATUS_SUCCESS) { /* we don't expect this will happen */ IPAERR("failed to provide buffer: %d\n", ret); WARN_ON(1); Loading @@ -2240,7 +2236,7 @@ static void ipa3_replenish_rx_cache_recycle(struct ipa3_sys_context *sys) queue_delayed_work(sys->wq, &sys->replenish_rx_work, msecs_to_jiffies(1)); done: if (idx) { /* only ring doorbell once here */ ret = gsi_queue_xfer(sys->ep->gsi_chan_hdl, idx, gsi_xfer_elem_array, true); if (ret == GSI_STATUS_SUCCESS) { Loading @@ -2251,7 +2247,6 @@ static void ipa3_replenish_rx_cache_recycle(struct ipa3_sys_context *sys) WARN_ON(1); } } } static inline void __trigger_repl_work(struct ipa3_sys_context *sys) { Loading @@ -2275,7 +2270,7 @@ static void ipa3_fast_replenish_rx_cache(struct ipa3_sys_context *sys) struct ipa3_rx_pkt_wrapper *rx_pkt; int ret; int rx_len_cached = 0; struct gsi_xfer_elem gsi_xfer_elem_array[IPA_REPL_XFER_THRESH]; struct gsi_xfer_elem gsi_xfer_elem_array[IPA_REPL_XFER_MAX]; u32 curr; int idx = 0; Loading Loading @@ -2305,15 +2300,10 @@ static void ipa3_fast_replenish_rx_cache(struct ipa3_sys_context *sys) * gsi_xfer_elem_buffer has a size of IPA_REPL_XFER_THRESH. * If this size is reached we need to queue the xfers. */ if (idx == IPA_REPL_XFER_THRESH) { if (idx == IPA_REPL_XFER_MAX) { ret = gsi_queue_xfer(sys->ep->gsi_chan_hdl, idx, gsi_xfer_elem_array, true); if (ret == GSI_STATUS_SUCCESS) { /* ensure write is done before setting head */ mb(); atomic_set(&sys->repl->head_idx, curr); sys->len = rx_len_cached; } else { gsi_xfer_elem_array, false); if (ret != GSI_STATUS_SUCCESS) { /* we don't expect this will happen */ IPAERR("failed to provide buffer: %d\n", ret); WARN_ON(1); Loading @@ -2322,8 +2312,7 @@ static void ipa3_fast_replenish_rx_cache(struct ipa3_sys_context *sys) idx = 0; } } /* There can still be something left which has not been xfer yet */ if (idx) { /* only ring doorbell once here */ ret = gsi_queue_xfer(sys->ep->gsi_chan_hdl, idx, gsi_xfer_elem_array, true); if (ret == GSI_STATUS_SUCCESS) { Loading @@ -2336,7 +2325,7 @@ static void ipa3_fast_replenish_rx_cache(struct ipa3_sys_context *sys) IPAERR("failed to provide buffer: %d\n", ret); WARN_ON(1); } } spin_unlock_bh(&sys->spinlock); __trigger_repl_work(sys); Loading Loading
drivers/platform/msm/gsi/gsi.c +11 −5 Original line number Diff line number Diff line Loading @@ -3433,7 +3433,7 @@ int gsi_queue_xfer(unsigned long chan_hdl, uint16_t num_xfers, return -GSI_STATUS_NODEV; } if (chan_hdl >= gsi_ctx->max_ch || !num_xfers || !xfer) { if (chan_hdl >= gsi_ctx->max_ch || (num_xfers && !xfer)) { GSIERR("bad params chan_hdl=%lu num_xfers=%u xfer=%pK\n", chan_hdl, num_xfers, xfer); return -GSI_STATUS_INVALID_PARAMS; Loading @@ -3453,6 +3453,11 @@ int gsi_queue_xfer(unsigned long chan_hdl, uint16_t num_xfers, slock = &ctx->ring.slock; spin_lock_irqsave(slock, flags); /* allow only ring doorbell */ if (!num_xfers) goto ring_doorbell; /* * for GCI channels the responsibility is on the caller to make sure * there is enough room in the TRE. Loading Loading @@ -3488,11 +3493,12 @@ int gsi_queue_xfer(unsigned long chan_hdl, uint16_t num_xfers, ctx->stats.queued += num_xfers; ring_doorbell: if (ring_db) { /* ensure TRE is set before ringing doorbell */ wmb(); if (ring_db) gsi_ring_chan_doorbell(ctx); } spin_unlock_irqrestore(slock, flags); Loading
drivers/platform/msm/ipa/ipa_v3/ipa_dp.c +46 −57 Original line number Diff line number Diff line Loading @@ -78,6 +78,7 @@ #define IPA_DEFAULT_SYS_YELLOW_WM 32 #define IPA_REPL_XFER_THRESH 20 #define IPA_REPL_XFER_MAX 36 #define IPA_TX_SEND_COMPL_NOP_DELAY_NS (2 * 1000 * 1000) Loading Loading @@ -2058,7 +2059,7 @@ static void ipa3_replenish_rx_cache(struct ipa3_sys_context *sys) int ret; int idx = 0; int rx_len_cached = 0; struct gsi_xfer_elem gsi_xfer_elem_array[IPA_REPL_XFER_THRESH]; struct gsi_xfer_elem gsi_xfer_elem_array[IPA_REPL_XFER_MAX]; gfp_t flag = GFP_NOWAIT | __GFP_NOWARN; rx_len_cached = sys->len; Loading Loading @@ -2102,15 +2103,13 @@ static void ipa3_replenish_rx_cache(struct ipa3_sys_context *sys) idx++; rx_len_cached++; /* * gsi_xfer_elem_buffer has a size of IPA_REPL_XFER_THRESH. * gsi_xfer_elem_buffer has a size of IPA_REPL_XFER_MAX. * If this size is reached we need to queue the xfers. */ if (idx == IPA_REPL_XFER_THRESH) { if (idx == IPA_REPL_XFER_MAX) { ret = gsi_queue_xfer(sys->ep->gsi_chan_hdl, idx, gsi_xfer_elem_array, true); if (ret == GSI_STATUS_SUCCESS) { sys->len = rx_len_cached; } else { gsi_xfer_elem_array, false); if (ret != GSI_STATUS_SUCCESS) { /* we don't expect this will happen */ IPAERR("failed to provide buffer: %d\n", ret); WARN_ON(1); Loading @@ -2130,7 +2129,7 @@ static void ipa3_replenish_rx_cache(struct ipa3_sys_context *sys) queue_delayed_work(sys->wq, &sys->replenish_rx_work, msecs_to_jiffies(1)); done: if (idx) { /* only ring doorbell once here */ ret = gsi_queue_xfer(sys->ep->gsi_chan_hdl, idx, gsi_xfer_elem_array, true); if (ret == GSI_STATUS_SUCCESS) { Loading @@ -2141,7 +2140,6 @@ static void ipa3_replenish_rx_cache(struct ipa3_sys_context *sys) WARN_ON(1); } } } static void ipa3_replenish_rx_cache_recycle(struct ipa3_sys_context *sys) { Loading @@ -2150,7 +2148,7 @@ static void ipa3_replenish_rx_cache_recycle(struct ipa3_sys_context *sys) int ret; int idx = 0; int rx_len_cached = 0; struct gsi_xfer_elem gsi_xfer_elem_array[IPA_REPL_XFER_THRESH]; struct gsi_xfer_elem gsi_xfer_elem_array[IPA_REPL_XFER_MAX]; gfp_t flag = GFP_NOWAIT | __GFP_NOWARN; /* start replenish only when buffers go lower than the threshold */ Loading Loading @@ -2212,15 +2210,13 @@ static void ipa3_replenish_rx_cache_recycle(struct ipa3_sys_context *sys) idx++; rx_len_cached++; /* * gsi_xfer_elem_buffer has a size of IPA_REPL_XFER_THRESH. * gsi_xfer_elem_buffer has a size of IPA_REPL_XFER_MAX. * If this size is reached we need to queue the xfers. */ if (idx == IPA_REPL_XFER_THRESH) { if (idx == IPA_REPL_XFER_MAX) { ret = gsi_queue_xfer(sys->ep->gsi_chan_hdl, idx, gsi_xfer_elem_array, true); if (ret == GSI_STATUS_SUCCESS) { sys->len = rx_len_cached; } else { gsi_xfer_elem_array, false); if (ret != GSI_STATUS_SUCCESS) { /* we don't expect this will happen */ IPAERR("failed to provide buffer: %d\n", ret); WARN_ON(1); Loading @@ -2240,7 +2236,7 @@ static void ipa3_replenish_rx_cache_recycle(struct ipa3_sys_context *sys) queue_delayed_work(sys->wq, &sys->replenish_rx_work, msecs_to_jiffies(1)); done: if (idx) { /* only ring doorbell once here */ ret = gsi_queue_xfer(sys->ep->gsi_chan_hdl, idx, gsi_xfer_elem_array, true); if (ret == GSI_STATUS_SUCCESS) { Loading @@ -2251,7 +2247,6 @@ static void ipa3_replenish_rx_cache_recycle(struct ipa3_sys_context *sys) WARN_ON(1); } } } static inline void __trigger_repl_work(struct ipa3_sys_context *sys) { Loading @@ -2275,7 +2270,7 @@ static void ipa3_fast_replenish_rx_cache(struct ipa3_sys_context *sys) struct ipa3_rx_pkt_wrapper *rx_pkt; int ret; int rx_len_cached = 0; struct gsi_xfer_elem gsi_xfer_elem_array[IPA_REPL_XFER_THRESH]; struct gsi_xfer_elem gsi_xfer_elem_array[IPA_REPL_XFER_MAX]; u32 curr; int idx = 0; Loading Loading @@ -2305,15 +2300,10 @@ static void ipa3_fast_replenish_rx_cache(struct ipa3_sys_context *sys) * gsi_xfer_elem_buffer has a size of IPA_REPL_XFER_THRESH. * If this size is reached we need to queue the xfers. */ if (idx == IPA_REPL_XFER_THRESH) { if (idx == IPA_REPL_XFER_MAX) { ret = gsi_queue_xfer(sys->ep->gsi_chan_hdl, idx, gsi_xfer_elem_array, true); if (ret == GSI_STATUS_SUCCESS) { /* ensure write is done before setting head */ mb(); atomic_set(&sys->repl->head_idx, curr); sys->len = rx_len_cached; } else { gsi_xfer_elem_array, false); if (ret != GSI_STATUS_SUCCESS) { /* we don't expect this will happen */ IPAERR("failed to provide buffer: %d\n", ret); WARN_ON(1); Loading @@ -2322,8 +2312,7 @@ static void ipa3_fast_replenish_rx_cache(struct ipa3_sys_context *sys) idx = 0; } } /* There can still be something left which has not been xfer yet */ if (idx) { /* only ring doorbell once here */ ret = gsi_queue_xfer(sys->ep->gsi_chan_hdl, idx, gsi_xfer_elem_array, true); if (ret == GSI_STATUS_SUCCESS) { Loading @@ -2336,7 +2325,7 @@ static void ipa3_fast_replenish_rx_cache(struct ipa3_sys_context *sys) IPAERR("failed to provide buffer: %d\n", ret); WARN_ON(1); } } spin_unlock_bh(&sys->spinlock); __trigger_repl_work(sys); Loading