Loading drivers/platform/msm/ipa/ipa_dp.c +24 −18 Original line number Diff line number Diff line Loading @@ -53,8 +53,10 @@ static void ipa_wq_write_done_common(struct ipa_sys_context *sys, u32 cnt) int i; for (i = 0; i < cnt; i++) { if (unlikely(list_empty(&sys->head_desc_list))) if (unlikely(list_empty(&sys->head_desc_list))) { WARN_ON(1); return; } tx_pkt_expected = list_first_entry(&sys->head_desc_list, struct ipa_tx_pkt_wrapper, link); Loading Loading @@ -87,14 +89,17 @@ static void ipa_wq_write_done_status(int src_pipe) return; } spin_lock(&sys->spinlock); if (unlikely(list_empty(&sys->head_desc_list))) spin_lock_bh(&sys->spinlock); if (unlikely(list_empty(&sys->head_desc_list))) { WARN_ON(1); spin_unlock_bh(&sys->spinlock); return; } tx_pkt_expected = list_first_entry(&sys->head_desc_list, struct ipa_tx_pkt_wrapper, link); ipa_wq_write_done_common(sys, tx_pkt_expected->cnt); spin_unlock(&sys->spinlock); spin_unlock_bh(&sys->spinlock); } /** Loading @@ -120,9 +125,9 @@ static void ipa_wq_write_done(struct work_struct *work) cnt = tx_pkt->cnt; sys = tx_pkt->sys; spin_lock(&sys->spinlock); spin_lock_bh(&sys->spinlock); ipa_wq_write_done_common(sys, cnt); spin_unlock(&sys->spinlock); spin_unlock_bh(&sys->spinlock); } static int ipa_handle_tx_core(struct ipa_sys_context *sys, bool process_all, Loading @@ -145,9 +150,9 @@ static int ipa_handle_tx_core(struct ipa_sys_context *sys, bool process_all, if (iov.addr == 0) break; spin_lock(&sys->spinlock); spin_lock_bh(&sys->spinlock); ipa_wq_write_done_common(sys, 1); spin_unlock(&sys->spinlock); spin_unlock_bh(&sys->spinlock); cnt++; }; Loading Loading @@ -314,7 +319,7 @@ int ipa_send_one(struct ipa_sys_context *sys, struct ipa_desc *desc, INIT_WORK(&tx_pkt->work, ipa_wq_write_done); spin_lock(&sys->spinlock); spin_lock_bh(&sys->spinlock); list_add_tail(&tx_pkt->link, &sys->head_desc_list); result = sps_transfer_one(sys->ep->ep_hdl, dma_address, len, tx_pkt, sps_flags); Loading @@ -323,13 +328,13 @@ int ipa_send_one(struct ipa_sys_context *sys, struct ipa_desc *desc, goto fail_sps_send; } spin_unlock(&sys->spinlock); spin_unlock_bh(&sys->spinlock); return 0; fail_sps_send: list_del(&tx_pkt->link); spin_unlock(&sys->spinlock); spin_unlock_bh(&sys->spinlock); if (unlikely(ipa_ctx->ipa_hw_type == IPA_HW_v1_0)) dma_pool_free(ipa_ctx->dma_pool, tx_pkt->bounce, dma_address); Loading Loading @@ -384,7 +389,7 @@ int ipa_send(struct ipa_sys_context *sys, u32 num_desc, struct ipa_desc *desc, transfer.iovec = dma_pool_alloc(ipa_ctx->dma_pool, mem_flag, &dma_addr); transfer.iovec_phys = dma_addr; transfer.iovec_count = num_desc; spin_lock(&sys->spinlock); spin_lock_bh(&sys->spinlock); if (!transfer.iovec) { IPAERR("fail to alloc DMA mem for sps xfr buff\n"); goto failure_coherent; Loading Loading @@ -491,7 +496,7 @@ int ipa_send(struct ipa_sys_context *sys, u32 num_desc, struct ipa_desc *desc, goto failure; } spin_unlock(&sys->spinlock); spin_unlock_bh(&sys->spinlock); return 0; failure: Loading @@ -518,7 +523,7 @@ failure: dma_pool_free(ipa_ctx->dma_pool, transfer.iovec, transfer.iovec_phys); failure_coherent: spin_unlock(&sys->spinlock); spin_unlock_bh(&sys->spinlock); return -EFAULT; } Loading Loading @@ -1913,8 +1918,10 @@ static void ipa_wq_rx_common(struct ipa_sys_context *sys, u32 size) struct ipa_rx_pkt_wrapper *rx_pkt_expected; struct sk_buff *rx_skb; if (unlikely(list_empty(&sys->head_desc_list))) if (unlikely(list_empty(&sys->head_desc_list))) { WARN_ON(1); return; } rx_pkt_expected = list_first_entry(&sys->head_desc_list, struct ipa_rx_pkt_wrapper, link); Loading @@ -1932,7 +1939,6 @@ static void ipa_wq_rx_common(struct ipa_sys_context *sys, u32 size) ipa_replenish_rx_cache(sys); kmem_cache_free(ipa_ctx->rx_pkt_wrapper_cache, rx_pkt_expected); } static void ipa_wq_rx_avail(struct work_struct *work) Loading Loading
drivers/platform/msm/ipa/ipa_dp.c +24 −18 Original line number Diff line number Diff line Loading @@ -53,8 +53,10 @@ static void ipa_wq_write_done_common(struct ipa_sys_context *sys, u32 cnt) int i; for (i = 0; i < cnt; i++) { if (unlikely(list_empty(&sys->head_desc_list))) if (unlikely(list_empty(&sys->head_desc_list))) { WARN_ON(1); return; } tx_pkt_expected = list_first_entry(&sys->head_desc_list, struct ipa_tx_pkt_wrapper, link); Loading Loading @@ -87,14 +89,17 @@ static void ipa_wq_write_done_status(int src_pipe) return; } spin_lock(&sys->spinlock); if (unlikely(list_empty(&sys->head_desc_list))) spin_lock_bh(&sys->spinlock); if (unlikely(list_empty(&sys->head_desc_list))) { WARN_ON(1); spin_unlock_bh(&sys->spinlock); return; } tx_pkt_expected = list_first_entry(&sys->head_desc_list, struct ipa_tx_pkt_wrapper, link); ipa_wq_write_done_common(sys, tx_pkt_expected->cnt); spin_unlock(&sys->spinlock); spin_unlock_bh(&sys->spinlock); } /** Loading @@ -120,9 +125,9 @@ static void ipa_wq_write_done(struct work_struct *work) cnt = tx_pkt->cnt; sys = tx_pkt->sys; spin_lock(&sys->spinlock); spin_lock_bh(&sys->spinlock); ipa_wq_write_done_common(sys, cnt); spin_unlock(&sys->spinlock); spin_unlock_bh(&sys->spinlock); } static int ipa_handle_tx_core(struct ipa_sys_context *sys, bool process_all, Loading @@ -145,9 +150,9 @@ static int ipa_handle_tx_core(struct ipa_sys_context *sys, bool process_all, if (iov.addr == 0) break; spin_lock(&sys->spinlock); spin_lock_bh(&sys->spinlock); ipa_wq_write_done_common(sys, 1); spin_unlock(&sys->spinlock); spin_unlock_bh(&sys->spinlock); cnt++; }; Loading Loading @@ -314,7 +319,7 @@ int ipa_send_one(struct ipa_sys_context *sys, struct ipa_desc *desc, INIT_WORK(&tx_pkt->work, ipa_wq_write_done); spin_lock(&sys->spinlock); spin_lock_bh(&sys->spinlock); list_add_tail(&tx_pkt->link, &sys->head_desc_list); result = sps_transfer_one(sys->ep->ep_hdl, dma_address, len, tx_pkt, sps_flags); Loading @@ -323,13 +328,13 @@ int ipa_send_one(struct ipa_sys_context *sys, struct ipa_desc *desc, goto fail_sps_send; } spin_unlock(&sys->spinlock); spin_unlock_bh(&sys->spinlock); return 0; fail_sps_send: list_del(&tx_pkt->link); spin_unlock(&sys->spinlock); spin_unlock_bh(&sys->spinlock); if (unlikely(ipa_ctx->ipa_hw_type == IPA_HW_v1_0)) dma_pool_free(ipa_ctx->dma_pool, tx_pkt->bounce, dma_address); Loading Loading @@ -384,7 +389,7 @@ int ipa_send(struct ipa_sys_context *sys, u32 num_desc, struct ipa_desc *desc, transfer.iovec = dma_pool_alloc(ipa_ctx->dma_pool, mem_flag, &dma_addr); transfer.iovec_phys = dma_addr; transfer.iovec_count = num_desc; spin_lock(&sys->spinlock); spin_lock_bh(&sys->spinlock); if (!transfer.iovec) { IPAERR("fail to alloc DMA mem for sps xfr buff\n"); goto failure_coherent; Loading Loading @@ -491,7 +496,7 @@ int ipa_send(struct ipa_sys_context *sys, u32 num_desc, struct ipa_desc *desc, goto failure; } spin_unlock(&sys->spinlock); spin_unlock_bh(&sys->spinlock); return 0; failure: Loading @@ -518,7 +523,7 @@ failure: dma_pool_free(ipa_ctx->dma_pool, transfer.iovec, transfer.iovec_phys); failure_coherent: spin_unlock(&sys->spinlock); spin_unlock_bh(&sys->spinlock); return -EFAULT; } Loading Loading @@ -1913,8 +1918,10 @@ static void ipa_wq_rx_common(struct ipa_sys_context *sys, u32 size) struct ipa_rx_pkt_wrapper *rx_pkt_expected; struct sk_buff *rx_skb; if (unlikely(list_empty(&sys->head_desc_list))) if (unlikely(list_empty(&sys->head_desc_list))) { WARN_ON(1); return; } rx_pkt_expected = list_first_entry(&sys->head_desc_list, struct ipa_rx_pkt_wrapper, link); Loading @@ -1932,7 +1939,6 @@ static void ipa_wq_rx_common(struct ipa_sys_context *sys, u32 size) ipa_replenish_rx_cache(sys); kmem_cache_free(ipa_ctx->rx_pkt_wrapper_cache, rx_pkt_expected); } static void ipa_wq_rx_avail(struct work_struct *work) Loading