Loading drivers/platform/msm/ipa/ipa_api.c +92 −0 Original line number Diff line number Diff line Loading @@ -2617,6 +2617,98 @@ static int ipa_ap_resume(struct device *dev) return ret; } int ipa_usb_init_teth_prot(enum ipa_usb_teth_prot teth_prot, struct ipa_usb_teth_params *teth_params, int (*ipa_usb_notify_cb)(enum ipa_usb_notify_event, void *), void *user_data) { int ret; IPA_API_DISPATCH_RETURN(ipa_usb_init_teth_prot, teth_prot, teth_params, ipa_usb_notify_cb, user_data); return ret; } EXPORT_SYMBOL(ipa_usb_init_teth_prot); int ipa_usb_request_xdci_channel(struct ipa_usb_xdci_chan_params *params, struct ipa_req_chan_out_params *out_params) { int ret; IPA_API_DISPATCH_RETURN(ipa_usb_request_xdci_channel, params, out_params); return ret; } EXPORT_SYMBOL(ipa_usb_request_xdci_channel); int ipa_usb_xdci_connect(struct ipa_usb_xdci_connect_params *params) { int ret; IPA_API_DISPATCH_RETURN(ipa_usb_xdci_connect, params); return ret; } EXPORT_SYMBOL(ipa_usb_xdci_connect); int ipa_usb_xdci_disconnect(u32 ul_clnt_hdl, u32 dl_clnt_hdl, enum ipa_usb_teth_prot teth_prot) { int ret; IPA_API_DISPATCH_RETURN(ipa_usb_xdci_disconnect, ul_clnt_hdl, dl_clnt_hdl, teth_prot); return ret; } EXPORT_SYMBOL(ipa_usb_xdci_disconnect); int ipa_usb_release_xdci_channel(u32 clnt_hdl, enum ipa_usb_teth_prot teth_prot) { int ret; IPA_API_DISPATCH_RETURN(ipa_usb_release_xdci_channel, clnt_hdl, teth_prot); return ret; } EXPORT_SYMBOL(ipa_usb_release_xdci_channel); int ipa_usb_deinit_teth_prot(enum ipa_usb_teth_prot teth_prot) { int ret; IPA_API_DISPATCH_RETURN(ipa_usb_deinit_teth_prot, teth_prot); return ret; } EXPORT_SYMBOL(ipa_usb_deinit_teth_prot); int ipa_usb_xdci_suspend(u32 ul_clnt_hdl, u32 dl_clnt_hdl, enum ipa_usb_teth_prot teth_prot) { int ret; IPA_API_DISPATCH_RETURN(ipa_usb_xdci_suspend, ul_clnt_hdl, dl_clnt_hdl, teth_prot); return ret; } EXPORT_SYMBOL(ipa_usb_xdci_suspend); int ipa_usb_xdci_resume(u32 ul_clnt_hdl, u32 dl_clnt_hdl) { int ret; IPA_API_DISPATCH_RETURN(ipa_usb_xdci_resume, ul_clnt_hdl, dl_clnt_hdl); return ret; } EXPORT_SYMBOL(ipa_usb_xdci_resume); static const struct dev_pm_ops ipa_pm_ops = { .suspend_noirq = ipa_ap_suspend, .resume_noirq = ipa_ap_resume, Loading drivers/platform/msm/ipa/ipa_api.h +25 −0 Original line number Diff line number Diff line Loading @@ -320,6 +320,31 @@ struct ipa_api_controller { struct ipa_gsi_ep_config *(*ipa_get_gsi_ep_info)(int ipa_ep_idx); int (*ipa_usb_init_teth_prot)(enum ipa_usb_teth_prot teth_prot, struct ipa_usb_teth_params *teth_params, int (*ipa_usb_notify_cb)(enum ipa_usb_notify_event, void*), void *user_data); int (*ipa_usb_request_xdci_channel)( struct ipa_usb_xdci_chan_params *params, struct ipa_req_chan_out_params *out_params); int (*ipa_usb_xdci_connect)( struct ipa_usb_xdci_connect_params *params); int (*ipa_usb_xdci_disconnect)(u32 ul_clnt_hdl, u32 dl_clnt_hdl, enum ipa_usb_teth_prot teth_prot); int (*ipa_usb_release_xdci_channel)(u32 clnt_hdl, enum ipa_usb_teth_prot teth_prot); int (*ipa_usb_deinit_teth_prot)(enum ipa_usb_teth_prot teth_prot); int (*ipa_usb_xdci_suspend)(u32 ul_clnt_hdl, u32 dl_clnt_hdl, enum ipa_usb_teth_prot teth_prot); int (*ipa_usb_xdci_resume)(u32 ul_clnt_hdl, u32 dl_clnt_hdl); }; #ifdef CONFIG_IPA Loading drivers/platform/msm/ipa/ipa_v3/Makefile +1 −1 Original line number Diff line number Diff line Loading @@ -2,6 +2,6 @@ obj-$(CONFIG_IPA3) += ipat.o ipat-y := ipa.o ipa_debugfs.o ipa_hdr.o ipa_flt.o ipa_rt.o ipa_dp.o ipa_client.o \ ipa_utils.o ipa_nat.o ipa_intf.o teth_bridge.o ipa_interrupts.o odu_bridge.o \ ipa_rm.o ipa_rm_dependency_graph.o ipa_rm_peers_list.o ipa_rm_resource.o ipa_rm_inactivity_timer.o \ ipa_uc.o ipa_uc_wdi.o ipa_dma.o ipa_uc_mhi.o ipa_mhi.o ipa_uc.o ipa_uc_wdi.o ipa_dma.o ipa_uc_mhi.o ipa_mhi.o ipa_usb.o obj-$(CONFIG_RMNET_IPA3) += rmnet_ipa.o ipa_qmi_service_v01.o ipa_qmi_service.o rmnet_ipa_fd_ioctl.o drivers/platform/msm/ipa/ipa_v3/ipa.c +6 −0 Original line number Diff line number Diff line Loading @@ -3529,6 +3529,12 @@ static int ipa3_init(const struct ipa3_plat_drv_res *resource_p, else IPADBG(":wdi init ok\n"); result = ipa3_usb_init(); if (result) IPAERR(":ipa_usb init failed (%d)\n", -result); else IPADBG(":ipa_usb init ok\n"); ipa3_ctx->q6_proxy_clk_vote_valid = true; ipa3_register_panic_hdlr(); Loading drivers/platform/msm/ipa/ipa_v3/ipa_client.c +694 −0 Original line number Diff line number Diff line Loading @@ -27,6 +27,16 @@ #define IPA_PKT_FLUSH_TO_US 100 #define IPA_POLL_FOR_EMPTINESS_NUM 50 #define IPA_POLL_FOR_EMPTINESS_SLEEP_USEC 20 #define IPA_POLL_FOR_CHANNEL_STOP_NUM 10 /* xfer_rsc_idx should be 7 bits */ #define IPA_XFER_RSC_IDX_MAX 127 static bool ipa3_is_xdci_channel_empty(struct ipa3_ep_context *ep, struct gsi_chan_info *chan_info); int ipa3_enable_data_path(u32 clnt_hdl) { struct ipa3_ep_context *ep = &ipa3_ctx->ep[clnt_hdl]; Loading Loading @@ -989,3 +999,687 @@ reset_chan_fail: ipa3_dec_client_disable_clks(); return result; } int ipa3_reset_gsi_event_ring(u32 clnt_hdl) { struct ipa3_ep_context *ep; int result = -EFAULT; enum gsi_status gsi_res; IPADBG("ipa3_reset_gsi_event_ring: entry\n"); if (clnt_hdl >= ipa3_ctx->ipa_num_pipes || ipa3_ctx->ep[clnt_hdl].valid == 0) { IPAERR("Bad parameter.\n"); return -EINVAL; } ep = &ipa3_ctx->ep[clnt_hdl]; if (!ep->keep_ipa_awake) ipa3_inc_client_enable_clks(); /* Reset event ring */ gsi_res = gsi_reset_evt_ring(ep->gsi_evt_ring_hdl); if (gsi_res != GSI_STATUS_SUCCESS) { IPAERR("Error resetting event: %d\n", gsi_res); result = -EFAULT; goto reset_evt_fail; } if (!ep->keep_ipa_awake) ipa3_dec_client_disable_clks(); IPADBG("ipa3_reset_gsi_event_ring: exit\n"); return 0; reset_evt_fail: if (!ep->keep_ipa_awake) ipa3_dec_client_disable_clks(); return result; } static bool ipa3_is_legal_params(struct ipa_request_gsi_channel_params *params) { if (params->client >= IPA_CLIENT_MAX) return false; else return true; } int ipa3_request_gsi_channel(struct ipa_request_gsi_channel_params *params, struct ipa_req_chan_out_params *out_params) { int ipa_ep_idx; int result = -EFAULT; struct ipa3_ep_context *ep; struct ipa3_ep_cfg_status ep_status; unsigned long gsi_dev_hdl; enum gsi_status gsi_res; struct ipa_gsi_ep_config gsi_ep_cfg; struct ipa_gsi_ep_config *gsi_ep_cfg_ptr = &gsi_ep_cfg; IPADBG("ipa3_request_gsi_channel: entry\n"); if (params == NULL || out_params == NULL || !ipa3_is_legal_params(params)) { IPAERR("bad parameters\n"); return -EINVAL; } ipa_ep_idx = ipa3_get_ep_mapping(params->client); if (ipa_ep_idx == -1) { IPAERR("fail to alloc EP.\n"); goto fail; } ep = &ipa3_ctx->ep[ipa_ep_idx]; if (ep->valid) { IPAERR("EP already allocated.\n"); goto fail; } memset(&ipa3_ctx->ep[ipa_ep_idx], 0, sizeof(struct ipa3_ep_context)); ipa3_inc_client_enable_clks(); ep->skip_ep_cfg = params->skip_ep_cfg; ep->valid = 1; ep->client = params->client; ep->client_notify = params->notify; ep->priv = params->priv; ep->keep_ipa_awake = params->keep_ipa_awake; if (!ep->skip_ep_cfg) { if (ipa3_cfg_ep(ipa_ep_idx, ¶ms->ipa_ep_cfg)) { IPAERR("fail to configure EP.\n"); goto ipa_cfg_ep_fail; } /* Setting EP status 0 */ memset(&ep_status, 0, sizeof(ep_status)); if (ipa3_cfg_ep_status(ipa_ep_idx, &ep_status)) { IPAERR("fail to configure status of EP.\n"); goto ipa_cfg_ep_fail; } IPADBG("ep configuration successful\n"); } else { IPADBG("Skipping endpoint configuration.\n"); } out_params->clnt_hdl = ipa_ep_idx; result = ipa3_enable_data_path(out_params->clnt_hdl); if (result) { IPAERR("enable data path failed res=%d clnt=%d.\n", result, out_params->clnt_hdl); goto ipa_cfg_ep_fail; } gsi_dev_hdl = ipa3_ctx->gsi_dev_hdl; gsi_res = gsi_alloc_evt_ring(¶ms->evt_ring_params, gsi_dev_hdl, &ep->gsi_evt_ring_hdl); if (gsi_res != GSI_STATUS_SUCCESS) { IPAERR("Error allocating event ring: %d\n", gsi_res); result = -EFAULT; goto ipa_cfg_ep_fail; } gsi_res = gsi_write_evt_ring_scratch(ep->gsi_evt_ring_hdl, params->evt_scratch); if (gsi_res != GSI_STATUS_SUCCESS) { IPAERR("Error writing event ring scratch: %d\n", gsi_res); result = -EFAULT; goto write_evt_scratch_fail; } memset(gsi_ep_cfg_ptr, 0, sizeof(struct ipa_gsi_ep_config)); gsi_ep_cfg_ptr = ipa_get_gsi_ep_info(ipa_ep_idx); params->chan_params.evt_ring_hdl = ep->gsi_evt_ring_hdl; params->chan_params.ch_id = gsi_ep_cfg_ptr->ipa_gsi_chan_num; gsi_res = gsi_alloc_channel(¶ms->chan_params, gsi_dev_hdl, &ep->gsi_chan_hdl); if (gsi_res != GSI_STATUS_SUCCESS) { IPAERR("Error allocating channel: %d, chan_id: %d\n", gsi_res, params->chan_params.ch_id); result = -EFAULT; goto write_evt_scratch_fail; } memcpy(&ep->chan_scratch, ¶ms->chan_scratch, sizeof(union __packed gsi_channel_scratch)); ep->chan_scratch.xdci.max_outstanding_tre = gsi_ep_cfg_ptr->ipa_if_aos; gsi_res = gsi_write_channel_scratch(ep->gsi_chan_hdl, params->chan_scratch); if (gsi_res != GSI_STATUS_SUCCESS) { IPAERR("Error writing channel scratch: %d\n", gsi_res); result = -EFAULT; goto write_chan_scratch_fail; } gsi_res = gsi_query_channel_db_addr(ep->gsi_chan_hdl, &out_params->db_reg_phs_addr_lsb, &out_params->db_reg_phs_addr_msb); if (gsi_res != GSI_STATUS_SUCCESS) { IPAERR("Error querying channel DB registers addresses: %d\n", gsi_res); result = -EFAULT; goto write_chan_scratch_fail; } ep->gsi_mem_info.evt_ring_len = params->evt_ring_params.ring_len; ep->gsi_mem_info.evt_ring_base_addr = params->evt_ring_params.ring_base_addr; ep->gsi_mem_info.evt_ring_base_vaddr = params->evt_ring_params.ring_base_vaddr; ep->gsi_mem_info.chan_ring_len = params->chan_params.ring_len; ep->gsi_mem_info.chan_ring_base_addr = params->chan_params.ring_base_addr; ep->gsi_mem_info.chan_ring_base_vaddr = params->chan_params.ring_base_vaddr; ipa3_ctx->skip_ep_cfg_shadow[ipa_ep_idx] = ep->skip_ep_cfg; if (!ep->skip_ep_cfg && IPA_CLIENT_IS_PROD(params->client)) ipa3_install_dflt_flt_rules(ipa_ep_idx); ipa3_dec_client_disable_clks(); IPADBG("client %d (ep: %d) connected\n", params->client, ipa_ep_idx); IPADBG("ipa3_request_gsi_channel: exit\n"); return 0; write_chan_scratch_fail: gsi_dealloc_channel(ep->gsi_chan_hdl); write_evt_scratch_fail: gsi_dealloc_evt_ring(ep->gsi_evt_ring_hdl); ipa_cfg_ep_fail: memset(&ipa3_ctx->ep[ipa_ep_idx], 0, sizeof(struct ipa3_ep_context)); ipa3_dec_client_disable_clks(); fail: return result; } int ipa3_set_usb_max_packet_size( enum ipa_usb_max_usb_packet_size usb_max_packet_size) { struct gsi_device_scratch dev_scratch; enum gsi_status gsi_res; IPADBG("ipa3_set_usb_max_packet_size: entry\n"); ipa3_inc_client_enable_clks(); memset(&dev_scratch, 0, sizeof(struct gsi_device_scratch)); dev_scratch.mhi_base_chan_idx_valid = false; dev_scratch.max_usb_pkt_size_valid = true; dev_scratch.max_usb_pkt_size = usb_max_packet_size; gsi_res = gsi_write_device_scratch(ipa3_ctx->gsi_dev_hdl, &dev_scratch); if (gsi_res != GSI_STATUS_SUCCESS) { IPAERR("Error writing device scratch: %d\n", gsi_res); return -EFAULT; } ipa3_dec_client_disable_clks(); IPADBG("ipa3_set_usb_max_packet_size: exit\n"); return 0; } int ipa3_xdci_connect(u32 clnt_hdl, u8 xferrscidx, bool xferrscidx_valid) { struct ipa3_ep_context *ep; int result = -EFAULT; enum gsi_status gsi_res; IPADBG("ipa3_xdci_connect: entry\n"); if (clnt_hdl >= ipa3_ctx->ipa_num_pipes || ipa3_ctx->ep[clnt_hdl].valid == 0 || xferrscidx < 0 || xferrscidx > IPA_XFER_RSC_IDX_MAX) { IPAERR("Bad parameters.\n"); return -EINVAL; } ep = &ipa3_ctx->ep[clnt_hdl]; ipa3_inc_client_enable_clks(); if (xferrscidx_valid) { ep->chan_scratch.xdci.xferrscidx = xferrscidx; gsi_res = gsi_write_channel_scratch(ep->gsi_chan_hdl, ep->chan_scratch); if (gsi_res != GSI_STATUS_SUCCESS) { IPAERR("Error writing channel scratch: %d\n", gsi_res); goto write_chan_scratch_fail; } } gsi_res = gsi_start_channel(ep->gsi_chan_hdl); if (gsi_res != GSI_STATUS_SUCCESS) { IPAERR("Error starting channel: %d\n", gsi_res); goto write_chan_scratch_fail; } if (!ep->keep_ipa_awake) ipa3_dec_client_disable_clks(); IPADBG("ipa3_xdci_connect: exit\n"); return 0; write_chan_scratch_fail: ipa3_dec_client_disable_clks(); return result; } static bool ipa3_is_xdci_channel_empty(struct ipa3_ep_context *ep, struct gsi_chan_info *chan_info) { bool is_empty = false; if (!IPA_CLIENT_IS_CONS(ep->client)) { /* For UL channel: chan.RP == chan.WP */ is_empty = (chan_info->rp == chan_info->wp); } else { /* For DL channel: */ if (chan_info->wp != (ep->gsi_mem_info.chan_ring_len - 1)) { /* if chan.WP != LINK TRB: chan.WP == evt.RP */ is_empty = (chan_info->wp == chan_info->evt_rp); } else { /* * if chan.WP == LINK TRB: chan.base_xfer_ring_addr * == evt.RP */ is_empty = (ep->gsi_mem_info.chan_ring_base_addr == chan_info->evt_rp); } } return is_empty; } static int ipa3_enable_force_clear(u32 request_id, bool throttle_source, u32 source_pipe_bitmask) { struct ipa_enable_force_clear_datapath_req_msg_v01 req; int result; memset(&req, 0, sizeof(req)); req.request_id = request_id; req.source_pipe_bitmask = source_pipe_bitmask; if (throttle_source) { req.throttle_source_valid = 1; req.throttle_source = 1; } result = ipa3_qmi_enable_force_clear_datapath_send(&req); if (result) { IPAERR("ipa3_qmi_enable_force_clear_datapath_send failed %d\n", result); return result; } return 0; } static int ipa3_disable_force_clear(u32 request_id) { struct ipa_disable_force_clear_datapath_req_msg_v01 req; int result; memset(&req, 0, sizeof(req)); req.request_id = request_id; result = ipa3_qmi_disable_force_clear_datapath_send(&req); if (result) { IPAERR("ipa3_qmi_disable_force_clear_datapath_send failed %d\n", result); return result; } return 0; } static int ipa3_get_gsi_chan_info(struct gsi_chan_info *gsi_chan_info, unsigned long chan_hdl) { enum gsi_status gsi_res; memset(gsi_chan_info, 0, sizeof(struct gsi_chan_info)); gsi_res = gsi_query_channel_info(chan_hdl, gsi_chan_info); if (gsi_res != GSI_STATUS_SUCCESS) { IPAERR("Error querying channel info: %d\n", gsi_res); return -EFAULT; } if (!gsi_chan_info->evt_valid) { IPAERR("Event info invalid\n"); return -EFAULT; } return 0; } /* Clocks should be voted for before invoking this function */ static int ipa3_drain_ul_chan_data(struct ipa3_ep_context *ep, u32 qmi_req_id, u32 source_pipe_bitmask, bool should_force_clear) { int i; bool is_empty = false; int result; struct gsi_chan_info gsi_chan_info; result = ipa3_get_gsi_chan_info(&gsi_chan_info, ep->gsi_chan_hdl); if (result) return -EFAULT; do { for (i = 0; i < IPA_POLL_FOR_EMPTINESS_NUM; i++) { is_empty = ipa3_is_xdci_channel_empty(ep, &gsi_chan_info); if (is_empty) break; udelay(IPA_POLL_FOR_EMPTINESS_SLEEP_USEC); } if (is_empty) break; if (should_force_clear) { result = ipa3_enable_force_clear(qmi_req_id, true, source_pipe_bitmask); if (result) return -EFAULT; } for (i = 0; i < IPA_POLL_FOR_EMPTINESS_NUM; i++) { is_empty = ipa3_is_xdci_channel_empty(ep, &gsi_chan_info); if (is_empty) break; udelay(IPA_POLL_FOR_EMPTINESS_SLEEP_USEC); } if (should_force_clear) { result = ipa3_disable_force_clear(qmi_req_id); if (result) return -EFAULT; } if (is_empty) break; IPAERR("UL channel is not empty after draining it!\n"); BUG(); } while (0); return 0; } int ipa3_xdci_disconnect(u32 clnt_hdl, bool should_force_clear, u32 qmi_req_id) { struct ipa3_ep_context *ep; int result; u32 source_pipe_bitmask = 0; IPADBG("ipa3_xdci_disconnect: entry\n"); if (clnt_hdl >= ipa3_ctx->ipa_num_pipes || ipa3_ctx->ep[clnt_hdl].valid == 0) { IPAERR("Bad parameter.\n"); return -EINVAL; } ep = &ipa3_ctx->ep[clnt_hdl]; if (!ep->keep_ipa_awake) ipa3_inc_client_enable_clks(); ipa3_disable_data_path(clnt_hdl); /* Drain UL channel before stopping it */ if (!IPA_CLIENT_IS_CONS(ep->client)) { source_pipe_bitmask = 1 << ipa3_get_ep_mapping(ep->client); result = ipa3_drain_ul_chan_data(ep, qmi_req_id, source_pipe_bitmask, should_force_clear); if (result) IPAERR("Error draining UL channel data: %d\n", result); } result = ipa3_stop_gsi_channel(clnt_hdl); if (result) { IPAERR("Error stopping channel: %d\n", result); goto stop_chan_fail; } ipa3_dec_client_disable_clks(); IPADBG("ipa3_xdci_disconnect: exit\n"); return 0; stop_chan_fail: if (!ep->keep_ipa_awake) ipa3_dec_client_disable_clks(); return result; } int ipa3_release_gsi_channel(u32 clnt_hdl) { struct ipa3_ep_context *ep; int result = -EFAULT; enum gsi_status gsi_res; IPADBG("ipa3_release_gsi_channel: entry\n"); if (clnt_hdl >= ipa3_ctx->ipa_num_pipes || ipa3_ctx->ep[clnt_hdl].valid == 0) { IPAERR("Bad parameter.\n"); return -EINVAL; } ep = &ipa3_ctx->ep[clnt_hdl]; if (!ep->keep_ipa_awake) ipa3_inc_client_enable_clks(); gsi_res = gsi_dealloc_channel(ep->gsi_chan_hdl); if (gsi_res != GSI_STATUS_SUCCESS) { IPAERR("Error deallocating channel: %d\n", gsi_res); goto dealloc_chan_fail; } gsi_res = gsi_dealloc_evt_ring(ep->gsi_evt_ring_hdl); if (gsi_res != GSI_STATUS_SUCCESS) { IPAERR("Error deallocating event: %d\n", gsi_res); goto dealloc_chan_fail; } if (!ep->skip_ep_cfg && IPA_CLIENT_IS_PROD(ep->client)) ipa3_delete_dflt_flt_rules(clnt_hdl); if (!ep->keep_ipa_awake) ipa3_dec_client_disable_clks(); memset(&ipa3_ctx->ep[clnt_hdl], 0, sizeof(struct ipa3_ep_context)); IPADBG("ipa3_release_gsi_channel: exit\n"); return 0; dealloc_chan_fail: if (!ep->keep_ipa_awake) ipa3_dec_client_disable_clks(); return result; } int ipa3_xdci_suspend(u32 ul_clnt_hdl, u32 dl_clnt_hdl, bool should_force_clear, u32 qmi_req_id) { struct ipa3_ep_context *ul_ep, *dl_ep; int result = -EFAULT; u32 source_pipe_bitmask = 0; bool dl_data_pending = true; bool ul_data_pending = true; int i; bool is_empty = false; struct gsi_chan_info ul_gsi_chan_info, dl_gsi_chan_info; int aggr_active_bitmap = 0; struct ipa_ep_cfg_ctrl ep_cfg_ctrl; IPADBG("ipa3_xdci_suspend: entry\n"); if (ul_clnt_hdl >= ipa3_ctx->ipa_num_pipes || ipa3_ctx->ep[ul_clnt_hdl].valid == 0 || dl_clnt_hdl >= ipa3_ctx->ipa_num_pipes || ipa3_ctx->ep[dl_clnt_hdl].valid == 0) { IPAERR("Bad parameter.\n"); return -EINVAL; } ul_ep = &ipa3_ctx->ep[ul_clnt_hdl]; dl_ep = &ipa3_ctx->ep[dl_clnt_hdl]; if (!ul_ep->keep_ipa_awake && !dl_ep->keep_ipa_awake) ipa3_inc_client_enable_clks(); result = ipa3_get_gsi_chan_info(&ul_gsi_chan_info, ul_ep->gsi_chan_hdl); if (result) goto query_chan_info_fail; result = ipa3_get_gsi_chan_info(&dl_gsi_chan_info, dl_ep->gsi_chan_hdl); if (result) goto query_chan_info_fail; for (i = 0; i < IPA_POLL_FOR_EMPTINESS_NUM; i++) { if (!dl_data_pending && !ul_data_pending) break; is_empty = ipa3_is_xdci_channel_empty(dl_ep, &dl_gsi_chan_info); if (!is_empty) { dl_data_pending = true; break; } dl_data_pending = false; is_empty = ipa3_is_xdci_channel_empty(ul_ep, &ul_gsi_chan_info); ul_data_pending = is_empty ? false : true; udelay(IPA_POLL_FOR_EMPTINESS_SLEEP_USEC); } if (!dl_data_pending) { aggr_active_bitmap = ipa_read_reg(ipa3_ctx->mmio, IPA_STATE_AGGR_ACTIVE_OFST); if (aggr_active_bitmap & (1 << dl_clnt_hdl)) dl_data_pending = true; } if (dl_data_pending) { IPADBG("DL data pending, can't suspend\n"); result = -EFAULT; goto query_chan_info_fail; } /* Drain UL channel before stopping it */ if (ul_data_pending) { source_pipe_bitmask = 1 << ipa3_get_ep_mapping(ul_ep->client); result = ipa3_drain_ul_chan_data(ul_ep, qmi_req_id, source_pipe_bitmask, should_force_clear); if (result) IPAERR("Error draining UL channel data: %d\n", result); } /* Suspend the DL EP */ memset(&ep_cfg_ctrl, 0 , sizeof(struct ipa_ep_cfg_ctrl)); ep_cfg_ctrl.ipa_ep_suspend = true; ipa3_cfg_ep_ctrl(dl_clnt_hdl, &ep_cfg_ctrl); /* * Check if DL channel is empty again, data could enter the channel * before its IPA EP was suspended */ is_empty = ipa3_is_xdci_channel_empty(dl_ep, &dl_gsi_chan_info); if (!is_empty) { IPADBG("DL data pending, can't suspend\n"); /* Unsuspend the DL EP */ memset(&ep_cfg_ctrl, 0 , sizeof(struct ipa_ep_cfg_ctrl)); ep_cfg_ctrl.ipa_ep_suspend = false; ipa3_cfg_ep_ctrl(dl_clnt_hdl, &ep_cfg_ctrl); result = -EFAULT; goto query_chan_info_fail; } result = ipa3_stop_gsi_channel(ul_clnt_hdl); if (result) { IPAERR("Error stopping UL channel: %d\n", result); goto query_chan_info_fail; } if (!ul_ep->keep_ipa_awake && !dl_ep->keep_ipa_awake) ipa3_dec_client_disable_clks(); IPADBG("ipa3_xdci_suspend: exit\n"); return 0; query_chan_info_fail: if (!ul_ep->keep_ipa_awake && !dl_ep->keep_ipa_awake) ipa3_dec_client_disable_clks(); return result; } int ipa3_start_gsi_channel(u32 clnt_hdl) { struct ipa3_ep_context *ep; int result = -EFAULT; enum gsi_status gsi_res; IPADBG("ipa3_start_gsi_channel: entry\n"); if (clnt_hdl >= ipa3_ctx->ipa_num_pipes || ipa3_ctx->ep[clnt_hdl].valid == 0) { IPAERR("Bad parameters.\n"); return -EINVAL; } ep = &ipa3_ctx->ep[clnt_hdl]; if (!ep->keep_ipa_awake) ipa3_inc_client_enable_clks(); gsi_res = gsi_start_channel(ep->gsi_chan_hdl); if (gsi_res != GSI_STATUS_SUCCESS) { IPAERR("Error starting channel: %d\n", gsi_res); goto start_chan_fail; } if (!ep->keep_ipa_awake) ipa3_dec_client_disable_clks(); IPADBG("ipa3_start_gsi_channel: exit\n"); return 0; start_chan_fail: if (!ep->keep_ipa_awake) ipa3_dec_client_disable_clks(); return result; } int ipa3_xdci_resume(u32 ul_clnt_hdl, u32 dl_clnt_hdl) { struct ipa3_ep_context *ul_ep, *dl_ep; enum gsi_status gsi_res; struct ipa_ep_cfg_ctrl ep_cfg_ctrl; IPADBG("ipa3_xdci_resume: entry\n"); if (ul_clnt_hdl >= ipa3_ctx->ipa_num_pipes || ipa3_ctx->ep[ul_clnt_hdl].valid == 0 || dl_clnt_hdl >= ipa3_ctx->ipa_num_pipes || ipa3_ctx->ep[dl_clnt_hdl].valid == 0) { IPAERR("Bad parameter.\n"); return -EINVAL; } ul_ep = &ipa3_ctx->ep[ul_clnt_hdl]; dl_ep = &ipa3_ctx->ep[dl_clnt_hdl]; if (!ul_ep->keep_ipa_awake && !dl_ep->keep_ipa_awake) ipa3_inc_client_enable_clks(); /* Unsuspend the DL EP */ memset(&ep_cfg_ctrl, 0 , sizeof(struct ipa_ep_cfg_ctrl)); ep_cfg_ctrl.ipa_ep_suspend = true; ipa3_cfg_ep_ctrl(dl_clnt_hdl, &ep_cfg_ctrl); /* Start UL channel */ gsi_res = gsi_start_channel(ul_ep->gsi_chan_hdl); if (gsi_res != GSI_STATUS_SUCCESS) IPAERR("Error starting UL channel: %d\n", gsi_res); if (!ul_ep->keep_ipa_awake && !dl_ep->keep_ipa_awake) ipa3_dec_client_disable_clks(); IPADBG("ipa3_xdci_resume: exit\n"); return 0; } Loading
drivers/platform/msm/ipa/ipa_api.c +92 −0 Original line number Diff line number Diff line Loading @@ -2617,6 +2617,98 @@ static int ipa_ap_resume(struct device *dev) return ret; } int ipa_usb_init_teth_prot(enum ipa_usb_teth_prot teth_prot, struct ipa_usb_teth_params *teth_params, int (*ipa_usb_notify_cb)(enum ipa_usb_notify_event, void *), void *user_data) { int ret; IPA_API_DISPATCH_RETURN(ipa_usb_init_teth_prot, teth_prot, teth_params, ipa_usb_notify_cb, user_data); return ret; } EXPORT_SYMBOL(ipa_usb_init_teth_prot); int ipa_usb_request_xdci_channel(struct ipa_usb_xdci_chan_params *params, struct ipa_req_chan_out_params *out_params) { int ret; IPA_API_DISPATCH_RETURN(ipa_usb_request_xdci_channel, params, out_params); return ret; } EXPORT_SYMBOL(ipa_usb_request_xdci_channel); int ipa_usb_xdci_connect(struct ipa_usb_xdci_connect_params *params) { int ret; IPA_API_DISPATCH_RETURN(ipa_usb_xdci_connect, params); return ret; } EXPORT_SYMBOL(ipa_usb_xdci_connect); int ipa_usb_xdci_disconnect(u32 ul_clnt_hdl, u32 dl_clnt_hdl, enum ipa_usb_teth_prot teth_prot) { int ret; IPA_API_DISPATCH_RETURN(ipa_usb_xdci_disconnect, ul_clnt_hdl, dl_clnt_hdl, teth_prot); return ret; } EXPORT_SYMBOL(ipa_usb_xdci_disconnect); int ipa_usb_release_xdci_channel(u32 clnt_hdl, enum ipa_usb_teth_prot teth_prot) { int ret; IPA_API_DISPATCH_RETURN(ipa_usb_release_xdci_channel, clnt_hdl, teth_prot); return ret; } EXPORT_SYMBOL(ipa_usb_release_xdci_channel); int ipa_usb_deinit_teth_prot(enum ipa_usb_teth_prot teth_prot) { int ret; IPA_API_DISPATCH_RETURN(ipa_usb_deinit_teth_prot, teth_prot); return ret; } EXPORT_SYMBOL(ipa_usb_deinit_teth_prot); int ipa_usb_xdci_suspend(u32 ul_clnt_hdl, u32 dl_clnt_hdl, enum ipa_usb_teth_prot teth_prot) { int ret; IPA_API_DISPATCH_RETURN(ipa_usb_xdci_suspend, ul_clnt_hdl, dl_clnt_hdl, teth_prot); return ret; } EXPORT_SYMBOL(ipa_usb_xdci_suspend); int ipa_usb_xdci_resume(u32 ul_clnt_hdl, u32 dl_clnt_hdl) { int ret; IPA_API_DISPATCH_RETURN(ipa_usb_xdci_resume, ul_clnt_hdl, dl_clnt_hdl); return ret; } EXPORT_SYMBOL(ipa_usb_xdci_resume); static const struct dev_pm_ops ipa_pm_ops = { .suspend_noirq = ipa_ap_suspend, .resume_noirq = ipa_ap_resume, Loading
drivers/platform/msm/ipa/ipa_api.h +25 −0 Original line number Diff line number Diff line Loading @@ -320,6 +320,31 @@ struct ipa_api_controller { struct ipa_gsi_ep_config *(*ipa_get_gsi_ep_info)(int ipa_ep_idx); int (*ipa_usb_init_teth_prot)(enum ipa_usb_teth_prot teth_prot, struct ipa_usb_teth_params *teth_params, int (*ipa_usb_notify_cb)(enum ipa_usb_notify_event, void*), void *user_data); int (*ipa_usb_request_xdci_channel)( struct ipa_usb_xdci_chan_params *params, struct ipa_req_chan_out_params *out_params); int (*ipa_usb_xdci_connect)( struct ipa_usb_xdci_connect_params *params); int (*ipa_usb_xdci_disconnect)(u32 ul_clnt_hdl, u32 dl_clnt_hdl, enum ipa_usb_teth_prot teth_prot); int (*ipa_usb_release_xdci_channel)(u32 clnt_hdl, enum ipa_usb_teth_prot teth_prot); int (*ipa_usb_deinit_teth_prot)(enum ipa_usb_teth_prot teth_prot); int (*ipa_usb_xdci_suspend)(u32 ul_clnt_hdl, u32 dl_clnt_hdl, enum ipa_usb_teth_prot teth_prot); int (*ipa_usb_xdci_resume)(u32 ul_clnt_hdl, u32 dl_clnt_hdl); }; #ifdef CONFIG_IPA Loading
drivers/platform/msm/ipa/ipa_v3/Makefile +1 −1 Original line number Diff line number Diff line Loading @@ -2,6 +2,6 @@ obj-$(CONFIG_IPA3) += ipat.o ipat-y := ipa.o ipa_debugfs.o ipa_hdr.o ipa_flt.o ipa_rt.o ipa_dp.o ipa_client.o \ ipa_utils.o ipa_nat.o ipa_intf.o teth_bridge.o ipa_interrupts.o odu_bridge.o \ ipa_rm.o ipa_rm_dependency_graph.o ipa_rm_peers_list.o ipa_rm_resource.o ipa_rm_inactivity_timer.o \ ipa_uc.o ipa_uc_wdi.o ipa_dma.o ipa_uc_mhi.o ipa_mhi.o ipa_uc.o ipa_uc_wdi.o ipa_dma.o ipa_uc_mhi.o ipa_mhi.o ipa_usb.o obj-$(CONFIG_RMNET_IPA3) += rmnet_ipa.o ipa_qmi_service_v01.o ipa_qmi_service.o rmnet_ipa_fd_ioctl.o
drivers/platform/msm/ipa/ipa_v3/ipa.c +6 −0 Original line number Diff line number Diff line Loading @@ -3529,6 +3529,12 @@ static int ipa3_init(const struct ipa3_plat_drv_res *resource_p, else IPADBG(":wdi init ok\n"); result = ipa3_usb_init(); if (result) IPAERR(":ipa_usb init failed (%d)\n", -result); else IPADBG(":ipa_usb init ok\n"); ipa3_ctx->q6_proxy_clk_vote_valid = true; ipa3_register_panic_hdlr(); Loading
drivers/platform/msm/ipa/ipa_v3/ipa_client.c +694 −0 Original line number Diff line number Diff line Loading @@ -27,6 +27,16 @@ #define IPA_PKT_FLUSH_TO_US 100 #define IPA_POLL_FOR_EMPTINESS_NUM 50 #define IPA_POLL_FOR_EMPTINESS_SLEEP_USEC 20 #define IPA_POLL_FOR_CHANNEL_STOP_NUM 10 /* xfer_rsc_idx should be 7 bits */ #define IPA_XFER_RSC_IDX_MAX 127 static bool ipa3_is_xdci_channel_empty(struct ipa3_ep_context *ep, struct gsi_chan_info *chan_info); int ipa3_enable_data_path(u32 clnt_hdl) { struct ipa3_ep_context *ep = &ipa3_ctx->ep[clnt_hdl]; Loading Loading @@ -989,3 +999,687 @@ reset_chan_fail: ipa3_dec_client_disable_clks(); return result; } int ipa3_reset_gsi_event_ring(u32 clnt_hdl) { struct ipa3_ep_context *ep; int result = -EFAULT; enum gsi_status gsi_res; IPADBG("ipa3_reset_gsi_event_ring: entry\n"); if (clnt_hdl >= ipa3_ctx->ipa_num_pipes || ipa3_ctx->ep[clnt_hdl].valid == 0) { IPAERR("Bad parameter.\n"); return -EINVAL; } ep = &ipa3_ctx->ep[clnt_hdl]; if (!ep->keep_ipa_awake) ipa3_inc_client_enable_clks(); /* Reset event ring */ gsi_res = gsi_reset_evt_ring(ep->gsi_evt_ring_hdl); if (gsi_res != GSI_STATUS_SUCCESS) { IPAERR("Error resetting event: %d\n", gsi_res); result = -EFAULT; goto reset_evt_fail; } if (!ep->keep_ipa_awake) ipa3_dec_client_disable_clks(); IPADBG("ipa3_reset_gsi_event_ring: exit\n"); return 0; reset_evt_fail: if (!ep->keep_ipa_awake) ipa3_dec_client_disable_clks(); return result; } static bool ipa3_is_legal_params(struct ipa_request_gsi_channel_params *params) { if (params->client >= IPA_CLIENT_MAX) return false; else return true; } int ipa3_request_gsi_channel(struct ipa_request_gsi_channel_params *params, struct ipa_req_chan_out_params *out_params) { int ipa_ep_idx; int result = -EFAULT; struct ipa3_ep_context *ep; struct ipa3_ep_cfg_status ep_status; unsigned long gsi_dev_hdl; enum gsi_status gsi_res; struct ipa_gsi_ep_config gsi_ep_cfg; struct ipa_gsi_ep_config *gsi_ep_cfg_ptr = &gsi_ep_cfg; IPADBG("ipa3_request_gsi_channel: entry\n"); if (params == NULL || out_params == NULL || !ipa3_is_legal_params(params)) { IPAERR("bad parameters\n"); return -EINVAL; } ipa_ep_idx = ipa3_get_ep_mapping(params->client); if (ipa_ep_idx == -1) { IPAERR("fail to alloc EP.\n"); goto fail; } ep = &ipa3_ctx->ep[ipa_ep_idx]; if (ep->valid) { IPAERR("EP already allocated.\n"); goto fail; } memset(&ipa3_ctx->ep[ipa_ep_idx], 0, sizeof(struct ipa3_ep_context)); ipa3_inc_client_enable_clks(); ep->skip_ep_cfg = params->skip_ep_cfg; ep->valid = 1; ep->client = params->client; ep->client_notify = params->notify; ep->priv = params->priv; ep->keep_ipa_awake = params->keep_ipa_awake; if (!ep->skip_ep_cfg) { if (ipa3_cfg_ep(ipa_ep_idx, ¶ms->ipa_ep_cfg)) { IPAERR("fail to configure EP.\n"); goto ipa_cfg_ep_fail; } /* Setting EP status 0 */ memset(&ep_status, 0, sizeof(ep_status)); if (ipa3_cfg_ep_status(ipa_ep_idx, &ep_status)) { IPAERR("fail to configure status of EP.\n"); goto ipa_cfg_ep_fail; } IPADBG("ep configuration successful\n"); } else { IPADBG("Skipping endpoint configuration.\n"); } out_params->clnt_hdl = ipa_ep_idx; result = ipa3_enable_data_path(out_params->clnt_hdl); if (result) { IPAERR("enable data path failed res=%d clnt=%d.\n", result, out_params->clnt_hdl); goto ipa_cfg_ep_fail; } gsi_dev_hdl = ipa3_ctx->gsi_dev_hdl; gsi_res = gsi_alloc_evt_ring(¶ms->evt_ring_params, gsi_dev_hdl, &ep->gsi_evt_ring_hdl); if (gsi_res != GSI_STATUS_SUCCESS) { IPAERR("Error allocating event ring: %d\n", gsi_res); result = -EFAULT; goto ipa_cfg_ep_fail; } gsi_res = gsi_write_evt_ring_scratch(ep->gsi_evt_ring_hdl, params->evt_scratch); if (gsi_res != GSI_STATUS_SUCCESS) { IPAERR("Error writing event ring scratch: %d\n", gsi_res); result = -EFAULT; goto write_evt_scratch_fail; } memset(gsi_ep_cfg_ptr, 0, sizeof(struct ipa_gsi_ep_config)); gsi_ep_cfg_ptr = ipa_get_gsi_ep_info(ipa_ep_idx); params->chan_params.evt_ring_hdl = ep->gsi_evt_ring_hdl; params->chan_params.ch_id = gsi_ep_cfg_ptr->ipa_gsi_chan_num; gsi_res = gsi_alloc_channel(¶ms->chan_params, gsi_dev_hdl, &ep->gsi_chan_hdl); if (gsi_res != GSI_STATUS_SUCCESS) { IPAERR("Error allocating channel: %d, chan_id: %d\n", gsi_res, params->chan_params.ch_id); result = -EFAULT; goto write_evt_scratch_fail; } memcpy(&ep->chan_scratch, ¶ms->chan_scratch, sizeof(union __packed gsi_channel_scratch)); ep->chan_scratch.xdci.max_outstanding_tre = gsi_ep_cfg_ptr->ipa_if_aos; gsi_res = gsi_write_channel_scratch(ep->gsi_chan_hdl, params->chan_scratch); if (gsi_res != GSI_STATUS_SUCCESS) { IPAERR("Error writing channel scratch: %d\n", gsi_res); result = -EFAULT; goto write_chan_scratch_fail; } gsi_res = gsi_query_channel_db_addr(ep->gsi_chan_hdl, &out_params->db_reg_phs_addr_lsb, &out_params->db_reg_phs_addr_msb); if (gsi_res != GSI_STATUS_SUCCESS) { IPAERR("Error querying channel DB registers addresses: %d\n", gsi_res); result = -EFAULT; goto write_chan_scratch_fail; } ep->gsi_mem_info.evt_ring_len = params->evt_ring_params.ring_len; ep->gsi_mem_info.evt_ring_base_addr = params->evt_ring_params.ring_base_addr; ep->gsi_mem_info.evt_ring_base_vaddr = params->evt_ring_params.ring_base_vaddr; ep->gsi_mem_info.chan_ring_len = params->chan_params.ring_len; ep->gsi_mem_info.chan_ring_base_addr = params->chan_params.ring_base_addr; ep->gsi_mem_info.chan_ring_base_vaddr = params->chan_params.ring_base_vaddr; ipa3_ctx->skip_ep_cfg_shadow[ipa_ep_idx] = ep->skip_ep_cfg; if (!ep->skip_ep_cfg && IPA_CLIENT_IS_PROD(params->client)) ipa3_install_dflt_flt_rules(ipa_ep_idx); ipa3_dec_client_disable_clks(); IPADBG("client %d (ep: %d) connected\n", params->client, ipa_ep_idx); IPADBG("ipa3_request_gsi_channel: exit\n"); return 0; write_chan_scratch_fail: gsi_dealloc_channel(ep->gsi_chan_hdl); write_evt_scratch_fail: gsi_dealloc_evt_ring(ep->gsi_evt_ring_hdl); ipa_cfg_ep_fail: memset(&ipa3_ctx->ep[ipa_ep_idx], 0, sizeof(struct ipa3_ep_context)); ipa3_dec_client_disable_clks(); fail: return result; } int ipa3_set_usb_max_packet_size( enum ipa_usb_max_usb_packet_size usb_max_packet_size) { struct gsi_device_scratch dev_scratch; enum gsi_status gsi_res; IPADBG("ipa3_set_usb_max_packet_size: entry\n"); ipa3_inc_client_enable_clks(); memset(&dev_scratch, 0, sizeof(struct gsi_device_scratch)); dev_scratch.mhi_base_chan_idx_valid = false; dev_scratch.max_usb_pkt_size_valid = true; dev_scratch.max_usb_pkt_size = usb_max_packet_size; gsi_res = gsi_write_device_scratch(ipa3_ctx->gsi_dev_hdl, &dev_scratch); if (gsi_res != GSI_STATUS_SUCCESS) { IPAERR("Error writing device scratch: %d\n", gsi_res); return -EFAULT; } ipa3_dec_client_disable_clks(); IPADBG("ipa3_set_usb_max_packet_size: exit\n"); return 0; } int ipa3_xdci_connect(u32 clnt_hdl, u8 xferrscidx, bool xferrscidx_valid) { struct ipa3_ep_context *ep; int result = -EFAULT; enum gsi_status gsi_res; IPADBG("ipa3_xdci_connect: entry\n"); if (clnt_hdl >= ipa3_ctx->ipa_num_pipes || ipa3_ctx->ep[clnt_hdl].valid == 0 || xferrscidx < 0 || xferrscidx > IPA_XFER_RSC_IDX_MAX) { IPAERR("Bad parameters.\n"); return -EINVAL; } ep = &ipa3_ctx->ep[clnt_hdl]; ipa3_inc_client_enable_clks(); if (xferrscidx_valid) { ep->chan_scratch.xdci.xferrscidx = xferrscidx; gsi_res = gsi_write_channel_scratch(ep->gsi_chan_hdl, ep->chan_scratch); if (gsi_res != GSI_STATUS_SUCCESS) { IPAERR("Error writing channel scratch: %d\n", gsi_res); goto write_chan_scratch_fail; } } gsi_res = gsi_start_channel(ep->gsi_chan_hdl); if (gsi_res != GSI_STATUS_SUCCESS) { IPAERR("Error starting channel: %d\n", gsi_res); goto write_chan_scratch_fail; } if (!ep->keep_ipa_awake) ipa3_dec_client_disable_clks(); IPADBG("ipa3_xdci_connect: exit\n"); return 0; write_chan_scratch_fail: ipa3_dec_client_disable_clks(); return result; } static bool ipa3_is_xdci_channel_empty(struct ipa3_ep_context *ep, struct gsi_chan_info *chan_info) { bool is_empty = false; if (!IPA_CLIENT_IS_CONS(ep->client)) { /* For UL channel: chan.RP == chan.WP */ is_empty = (chan_info->rp == chan_info->wp); } else { /* For DL channel: */ if (chan_info->wp != (ep->gsi_mem_info.chan_ring_len - 1)) { /* if chan.WP != LINK TRB: chan.WP == evt.RP */ is_empty = (chan_info->wp == chan_info->evt_rp); } else { /* * if chan.WP == LINK TRB: chan.base_xfer_ring_addr * == evt.RP */ is_empty = (ep->gsi_mem_info.chan_ring_base_addr == chan_info->evt_rp); } } return is_empty; } static int ipa3_enable_force_clear(u32 request_id, bool throttle_source, u32 source_pipe_bitmask) { struct ipa_enable_force_clear_datapath_req_msg_v01 req; int result; memset(&req, 0, sizeof(req)); req.request_id = request_id; req.source_pipe_bitmask = source_pipe_bitmask; if (throttle_source) { req.throttle_source_valid = 1; req.throttle_source = 1; } result = ipa3_qmi_enable_force_clear_datapath_send(&req); if (result) { IPAERR("ipa3_qmi_enable_force_clear_datapath_send failed %d\n", result); return result; } return 0; } static int ipa3_disable_force_clear(u32 request_id) { struct ipa_disable_force_clear_datapath_req_msg_v01 req; int result; memset(&req, 0, sizeof(req)); req.request_id = request_id; result = ipa3_qmi_disable_force_clear_datapath_send(&req); if (result) { IPAERR("ipa3_qmi_disable_force_clear_datapath_send failed %d\n", result); return result; } return 0; } static int ipa3_get_gsi_chan_info(struct gsi_chan_info *gsi_chan_info, unsigned long chan_hdl) { enum gsi_status gsi_res; memset(gsi_chan_info, 0, sizeof(struct gsi_chan_info)); gsi_res = gsi_query_channel_info(chan_hdl, gsi_chan_info); if (gsi_res != GSI_STATUS_SUCCESS) { IPAERR("Error querying channel info: %d\n", gsi_res); return -EFAULT; } if (!gsi_chan_info->evt_valid) { IPAERR("Event info invalid\n"); return -EFAULT; } return 0; } /* Clocks should be voted for before invoking this function */ static int ipa3_drain_ul_chan_data(struct ipa3_ep_context *ep, u32 qmi_req_id, u32 source_pipe_bitmask, bool should_force_clear) { int i; bool is_empty = false; int result; struct gsi_chan_info gsi_chan_info; result = ipa3_get_gsi_chan_info(&gsi_chan_info, ep->gsi_chan_hdl); if (result) return -EFAULT; do { for (i = 0; i < IPA_POLL_FOR_EMPTINESS_NUM; i++) { is_empty = ipa3_is_xdci_channel_empty(ep, &gsi_chan_info); if (is_empty) break; udelay(IPA_POLL_FOR_EMPTINESS_SLEEP_USEC); } if (is_empty) break; if (should_force_clear) { result = ipa3_enable_force_clear(qmi_req_id, true, source_pipe_bitmask); if (result) return -EFAULT; } for (i = 0; i < IPA_POLL_FOR_EMPTINESS_NUM; i++) { is_empty = ipa3_is_xdci_channel_empty(ep, &gsi_chan_info); if (is_empty) break; udelay(IPA_POLL_FOR_EMPTINESS_SLEEP_USEC); } if (should_force_clear) { result = ipa3_disable_force_clear(qmi_req_id); if (result) return -EFAULT; } if (is_empty) break; IPAERR("UL channel is not empty after draining it!\n"); BUG(); } while (0); return 0; } int ipa3_xdci_disconnect(u32 clnt_hdl, bool should_force_clear, u32 qmi_req_id) { struct ipa3_ep_context *ep; int result; u32 source_pipe_bitmask = 0; IPADBG("ipa3_xdci_disconnect: entry\n"); if (clnt_hdl >= ipa3_ctx->ipa_num_pipes || ipa3_ctx->ep[clnt_hdl].valid == 0) { IPAERR("Bad parameter.\n"); return -EINVAL; } ep = &ipa3_ctx->ep[clnt_hdl]; if (!ep->keep_ipa_awake) ipa3_inc_client_enable_clks(); ipa3_disable_data_path(clnt_hdl); /* Drain UL channel before stopping it */ if (!IPA_CLIENT_IS_CONS(ep->client)) { source_pipe_bitmask = 1 << ipa3_get_ep_mapping(ep->client); result = ipa3_drain_ul_chan_data(ep, qmi_req_id, source_pipe_bitmask, should_force_clear); if (result) IPAERR("Error draining UL channel data: %d\n", result); } result = ipa3_stop_gsi_channel(clnt_hdl); if (result) { IPAERR("Error stopping channel: %d\n", result); goto stop_chan_fail; } ipa3_dec_client_disable_clks(); IPADBG("ipa3_xdci_disconnect: exit\n"); return 0; stop_chan_fail: if (!ep->keep_ipa_awake) ipa3_dec_client_disable_clks(); return result; } int ipa3_release_gsi_channel(u32 clnt_hdl) { struct ipa3_ep_context *ep; int result = -EFAULT; enum gsi_status gsi_res; IPADBG("ipa3_release_gsi_channel: entry\n"); if (clnt_hdl >= ipa3_ctx->ipa_num_pipes || ipa3_ctx->ep[clnt_hdl].valid == 0) { IPAERR("Bad parameter.\n"); return -EINVAL; } ep = &ipa3_ctx->ep[clnt_hdl]; if (!ep->keep_ipa_awake) ipa3_inc_client_enable_clks(); gsi_res = gsi_dealloc_channel(ep->gsi_chan_hdl); if (gsi_res != GSI_STATUS_SUCCESS) { IPAERR("Error deallocating channel: %d\n", gsi_res); goto dealloc_chan_fail; } gsi_res = gsi_dealloc_evt_ring(ep->gsi_evt_ring_hdl); if (gsi_res != GSI_STATUS_SUCCESS) { IPAERR("Error deallocating event: %d\n", gsi_res); goto dealloc_chan_fail; } if (!ep->skip_ep_cfg && IPA_CLIENT_IS_PROD(ep->client)) ipa3_delete_dflt_flt_rules(clnt_hdl); if (!ep->keep_ipa_awake) ipa3_dec_client_disable_clks(); memset(&ipa3_ctx->ep[clnt_hdl], 0, sizeof(struct ipa3_ep_context)); IPADBG("ipa3_release_gsi_channel: exit\n"); return 0; dealloc_chan_fail: if (!ep->keep_ipa_awake) ipa3_dec_client_disable_clks(); return result; } int ipa3_xdci_suspend(u32 ul_clnt_hdl, u32 dl_clnt_hdl, bool should_force_clear, u32 qmi_req_id) { struct ipa3_ep_context *ul_ep, *dl_ep; int result = -EFAULT; u32 source_pipe_bitmask = 0; bool dl_data_pending = true; bool ul_data_pending = true; int i; bool is_empty = false; struct gsi_chan_info ul_gsi_chan_info, dl_gsi_chan_info; int aggr_active_bitmap = 0; struct ipa_ep_cfg_ctrl ep_cfg_ctrl; IPADBG("ipa3_xdci_suspend: entry\n"); if (ul_clnt_hdl >= ipa3_ctx->ipa_num_pipes || ipa3_ctx->ep[ul_clnt_hdl].valid == 0 || dl_clnt_hdl >= ipa3_ctx->ipa_num_pipes || ipa3_ctx->ep[dl_clnt_hdl].valid == 0) { IPAERR("Bad parameter.\n"); return -EINVAL; } ul_ep = &ipa3_ctx->ep[ul_clnt_hdl]; dl_ep = &ipa3_ctx->ep[dl_clnt_hdl]; if (!ul_ep->keep_ipa_awake && !dl_ep->keep_ipa_awake) ipa3_inc_client_enable_clks(); result = ipa3_get_gsi_chan_info(&ul_gsi_chan_info, ul_ep->gsi_chan_hdl); if (result) goto query_chan_info_fail; result = ipa3_get_gsi_chan_info(&dl_gsi_chan_info, dl_ep->gsi_chan_hdl); if (result) goto query_chan_info_fail; for (i = 0; i < IPA_POLL_FOR_EMPTINESS_NUM; i++) { if (!dl_data_pending && !ul_data_pending) break; is_empty = ipa3_is_xdci_channel_empty(dl_ep, &dl_gsi_chan_info); if (!is_empty) { dl_data_pending = true; break; } dl_data_pending = false; is_empty = ipa3_is_xdci_channel_empty(ul_ep, &ul_gsi_chan_info); ul_data_pending = is_empty ? false : true; udelay(IPA_POLL_FOR_EMPTINESS_SLEEP_USEC); } if (!dl_data_pending) { aggr_active_bitmap = ipa_read_reg(ipa3_ctx->mmio, IPA_STATE_AGGR_ACTIVE_OFST); if (aggr_active_bitmap & (1 << dl_clnt_hdl)) dl_data_pending = true; } if (dl_data_pending) { IPADBG("DL data pending, can't suspend\n"); result = -EFAULT; goto query_chan_info_fail; } /* Drain UL channel before stopping it */ if (ul_data_pending) { source_pipe_bitmask = 1 << ipa3_get_ep_mapping(ul_ep->client); result = ipa3_drain_ul_chan_data(ul_ep, qmi_req_id, source_pipe_bitmask, should_force_clear); if (result) IPAERR("Error draining UL channel data: %d\n", result); } /* Suspend the DL EP */ memset(&ep_cfg_ctrl, 0 , sizeof(struct ipa_ep_cfg_ctrl)); ep_cfg_ctrl.ipa_ep_suspend = true; ipa3_cfg_ep_ctrl(dl_clnt_hdl, &ep_cfg_ctrl); /* * Check if DL channel is empty again, data could enter the channel * before its IPA EP was suspended */ is_empty = ipa3_is_xdci_channel_empty(dl_ep, &dl_gsi_chan_info); if (!is_empty) { IPADBG("DL data pending, can't suspend\n"); /* Unsuspend the DL EP */ memset(&ep_cfg_ctrl, 0 , sizeof(struct ipa_ep_cfg_ctrl)); ep_cfg_ctrl.ipa_ep_suspend = false; ipa3_cfg_ep_ctrl(dl_clnt_hdl, &ep_cfg_ctrl); result = -EFAULT; goto query_chan_info_fail; } result = ipa3_stop_gsi_channel(ul_clnt_hdl); if (result) { IPAERR("Error stopping UL channel: %d\n", result); goto query_chan_info_fail; } if (!ul_ep->keep_ipa_awake && !dl_ep->keep_ipa_awake) ipa3_dec_client_disable_clks(); IPADBG("ipa3_xdci_suspend: exit\n"); return 0; query_chan_info_fail: if (!ul_ep->keep_ipa_awake && !dl_ep->keep_ipa_awake) ipa3_dec_client_disable_clks(); return result; } int ipa3_start_gsi_channel(u32 clnt_hdl) { struct ipa3_ep_context *ep; int result = -EFAULT; enum gsi_status gsi_res; IPADBG("ipa3_start_gsi_channel: entry\n"); if (clnt_hdl >= ipa3_ctx->ipa_num_pipes || ipa3_ctx->ep[clnt_hdl].valid == 0) { IPAERR("Bad parameters.\n"); return -EINVAL; } ep = &ipa3_ctx->ep[clnt_hdl]; if (!ep->keep_ipa_awake) ipa3_inc_client_enable_clks(); gsi_res = gsi_start_channel(ep->gsi_chan_hdl); if (gsi_res != GSI_STATUS_SUCCESS) { IPAERR("Error starting channel: %d\n", gsi_res); goto start_chan_fail; } if (!ep->keep_ipa_awake) ipa3_dec_client_disable_clks(); IPADBG("ipa3_start_gsi_channel: exit\n"); return 0; start_chan_fail: if (!ep->keep_ipa_awake) ipa3_dec_client_disable_clks(); return result; } int ipa3_xdci_resume(u32 ul_clnt_hdl, u32 dl_clnt_hdl) { struct ipa3_ep_context *ul_ep, *dl_ep; enum gsi_status gsi_res; struct ipa_ep_cfg_ctrl ep_cfg_ctrl; IPADBG("ipa3_xdci_resume: entry\n"); if (ul_clnt_hdl >= ipa3_ctx->ipa_num_pipes || ipa3_ctx->ep[ul_clnt_hdl].valid == 0 || dl_clnt_hdl >= ipa3_ctx->ipa_num_pipes || ipa3_ctx->ep[dl_clnt_hdl].valid == 0) { IPAERR("Bad parameter.\n"); return -EINVAL; } ul_ep = &ipa3_ctx->ep[ul_clnt_hdl]; dl_ep = &ipa3_ctx->ep[dl_clnt_hdl]; if (!ul_ep->keep_ipa_awake && !dl_ep->keep_ipa_awake) ipa3_inc_client_enable_clks(); /* Unsuspend the DL EP */ memset(&ep_cfg_ctrl, 0 , sizeof(struct ipa_ep_cfg_ctrl)); ep_cfg_ctrl.ipa_ep_suspend = true; ipa3_cfg_ep_ctrl(dl_clnt_hdl, &ep_cfg_ctrl); /* Start UL channel */ gsi_res = gsi_start_channel(ul_ep->gsi_chan_hdl); if (gsi_res != GSI_STATUS_SUCCESS) IPAERR("Error starting UL channel: %d\n", gsi_res); if (!ul_ep->keep_ipa_awake && !dl_ep->keep_ipa_awake) ipa3_dec_client_disable_clks(); IPADBG("ipa3_xdci_resume: exit\n"); return 0; }