Loading drivers/bus/mhi/core/mhi_init.c +15 −3 Original line number Diff line number Diff line Loading @@ -924,11 +924,22 @@ static int of_parse_ch_cfg(struct mhi_controller *mhi_cntrl, mhi_chan->chan = chan; ret = of_property_read_u32(child, "mhi,num-elements", (u32 *)&mhi_chan->buf_ring.elements); if (!ret && !mhi_chan->buf_ring.elements) (u32 *)&mhi_chan->tre_ring.elements); if (!ret && !mhi_chan->tre_ring.elements) goto error_chan_cfg; mhi_chan->tre_ring.elements = mhi_chan->buf_ring.elements; /* * For some channels, local ring len should be bigger than * transfer ring len due to internal logical channels in device. * So host can queue much more buffers than transfer ring len. * Example, RSC channels should have a larger local channel * than transfer ring length. */ ret = of_property_read_u32(child, "mhi,local-elements", (u32 *)&mhi_chan->buf_ring.elements); if (ret) mhi_chan->buf_ring.elements = mhi_chan->tre_ring.elements; ret = of_property_read_u32(child, "mhi,event-ring", &mhi_chan->er_index); Loading Loading @@ -966,6 +977,7 @@ static int of_parse_ch_cfg(struct mhi_controller *mhi_cntrl, mhi_chan->gen_tre = mhi_gen_tre; mhi_chan->queue_xfer = mhi_queue_buf; break; case MHI_XFER_RSC_SKB: case MHI_XFER_SKB: mhi_chan->queue_xfer = mhi_queue_skb; break; Loading drivers/bus/mhi/core/mhi_internal.h +11 −0 Original line number Diff line number Diff line Loading @@ -314,6 +314,8 @@ enum mhi_cmd_type { #define MHI_TRE_GET_EV_EXECENV(tre) (((tre)->dword[0] >> 24) & 0xFF) #define MHI_TRE_GET_EV_SEQ(tre) ((tre)->dword[0]) #define MHI_TRE_GET_EV_TIME(tre) ((tre)->ptr) #define MHI_TRE_GET_EV_COOKIE(tre) lower_32_bits((tre)->ptr) #define MHI_TRE_GET_EV_VEID(tre) (((tre)->dword[0] >> 16) & 0xFF) /* transfer descriptor macros */ #define MHI_TRE_DATA_PTR(ptr) (ptr) Loading @@ -321,6 +323,11 @@ enum mhi_cmd_type { #define MHI_TRE_DATA_DWORD1(bei, ieot, ieob, chain) ((2 << 16) | (bei << 10) \ | (ieot << 9) | (ieob << 8) | chain) /* rsc transfer descriptor macros */ #define MHI_RSCTRE_DATA_PTR(ptr, len) ((len << 48) | ptr) #define MHI_RSCTRE_DATA_DWORD0(cookie) (cookie) #define MHI_RSCTRE_DATA_DWORD1 (MHI_PKT_TYPE_COALESCING << 16) enum MHI_CMD { MHI_CMD_RESET_CHAN, MHI_CMD_START_CHAN, Loading @@ -331,12 +338,14 @@ enum MHI_PKT_TYPE { MHI_PKT_TYPE_INVALID = 0x0, MHI_PKT_TYPE_NOOP_CMD = 0x1, MHI_PKT_TYPE_TRANSFER = 0x2, MHI_PKT_TYPE_COALESCING = 0x8, MHI_PKT_TYPE_RESET_CHAN_CMD = 0x10, MHI_PKT_TYPE_STOP_CHAN_CMD = 0x11, MHI_PKT_TYPE_START_CHAN_CMD = 0x12, MHI_PKT_TYPE_STATE_CHANGE_EVENT = 0x20, MHI_PKT_TYPE_CMD_COMPLETION_EVENT = 0x21, MHI_PKT_TYPE_TX_EVENT = 0x22, MHI_PKT_TYPE_RSC_TX_EVENT = 0x28, MHI_PKT_TYPE_EE_EVENT = 0x40, MHI_PKT_TYPE_TSYNC_EVENT = 0x48, MHI_PKT_TYPE_STALE_EVENT, /* internal event */ Loading Loading @@ -454,6 +463,7 @@ enum MHI_XFER_TYPE { MHI_XFER_SKB, MHI_XFER_SCLIST, MHI_XFER_NOP, /* CPU offload channel, host does not accept transfer */ MHI_XFER_RSC_SKB, /* RSC type, accept skb from client */ }; #define NR_OF_CMD_RINGS (1) Loading Loading @@ -548,6 +558,7 @@ struct mhi_buf_info { void *wp; size_t len; void *cb_buf; bool used; /* indicate element is free to use */ enum dma_data_direction dir; }; Loading drivers/bus/mhi/core/mhi_main.c +157 −40 Original line number Diff line number Diff line Loading @@ -380,6 +380,7 @@ int mhi_queue_skb(struct mhi_device *mhi_dev, /* generate the tre */ buf_info = buf_ring->wp; MHI_ASSERT(buf_info->used, "TRE Not Freed\n"); buf_info->v_addr = skb->data; buf_info->cb_buf = skb; buf_info->wp = tre_ring->wp; Loading @@ -390,9 +391,19 @@ int mhi_queue_skb(struct mhi_device *mhi_dev, goto map_error; mhi_tre = tre_ring->wp; if (mhi_chan->xfer_type == MHI_XFER_RSC_SKB) { buf_info->used = true; mhi_tre->ptr = MHI_RSCTRE_DATA_PTR(buf_info->p_addr, buf_info->len); mhi_tre->dword[0] = MHI_RSCTRE_DATA_DWORD0(buf_ring->wp - buf_ring->base); mhi_tre->dword[1] = MHI_RSCTRE_DATA_DWORD1; } else { mhi_tre->ptr = MHI_TRE_DATA_PTR(buf_info->p_addr); mhi_tre->dword[0] = MHI_TRE_DATA_DWORD0(buf_info->len); mhi_tre->dword[1] = MHI_TRE_DATA_DWORD1(1, 1, 0, 0); } MHI_VERB("chan:%d WP:0x%llx TRE:0x%llx 0x%08x 0x%08x\n", mhi_chan->chan, (u64)mhi_to_physical(tre_ring, mhi_tre), mhi_tre->ptr, Loading Loading @@ -913,6 +924,70 @@ static int parse_xfer_event(struct mhi_controller *mhi_cntrl, return 0; } static int parse_rsc_event(struct mhi_controller *mhi_cntrl, struct mhi_tre *event, struct mhi_chan *mhi_chan) { struct mhi_ring *buf_ring, *tre_ring; struct mhi_buf_info *buf_info; struct mhi_result result; int ev_code; u32 cookie; /* offset to local descriptor */ u16 xfer_len; buf_ring = &mhi_chan->buf_ring; tre_ring = &mhi_chan->tre_ring; ev_code = MHI_TRE_GET_EV_CODE(event); cookie = MHI_TRE_GET_EV_COOKIE(event); xfer_len = MHI_TRE_GET_EV_LEN(event); /* received out of bound cookie */ MHI_ASSERT(cookie >= buf_ring->len, "Invalid Cookie\n"); buf_info = buf_ring->base + cookie; result.transaction_status = (ev_code == MHI_EV_CC_OVERFLOW) ? -EOVERFLOW : 0; result.bytes_xferd = xfer_len; result.buf_addr = buf_info->cb_buf; result.dir = mhi_chan->dir; read_lock_bh(&mhi_chan->lock); if (mhi_chan->ch_state != MHI_CH_STATE_ENABLED) goto end_process_rsc_event; MHI_ASSERT(!buf_info->used, "TRE already Freed\n"); mhi_cntrl->unmap_single(mhi_cntrl, buf_info); /* notify the client */ mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result); /* * Note: We're arbitrarily incrementing RP even though, completion * packet we processed might not be the same one, reason we can do this * is because device guaranteed to cache descriptors in order it * receive, so even though completion event is different we can re-use * all descriptors in between. * Example: * Transfer Ring has descriptors: A, B, C, D * Last descriptor host queue is D (WP) and first descriptor * host queue is A (RP). * The completion event we just serviced is descriptor C. * Then we can safely queue descriptors to replace A, B, and C * even though host did not receive any completions. */ mhi_del_ring_element(mhi_cntrl, tre_ring); buf_info->used = false; end_process_rsc_event: read_unlock_bh(&mhi_chan->lock); return 0; } static void mhi_process_cmd_completion(struct mhi_controller *mhi_cntrl, struct mhi_tre *tre) { Loading Loading @@ -1103,11 +1178,15 @@ int mhi_process_data_event_ring(struct mhi_controller *mhi_cntrl, MHI_VERB("Processing Event:0x%llx 0x%08x 0x%08x\n", local_rp->ptr, local_rp->dword[0], local_rp->dword[1]); if (likely(type == MHI_PKT_TYPE_TX_EVENT)) { chan = MHI_TRE_GET_EV_CHID(local_rp); mhi_chan = &mhi_cntrl->mhi_chan[chan]; if (likely(type == MHI_PKT_TYPE_TX_EVENT)) { parse_xfer_event(mhi_cntrl, local_rp, mhi_chan); event_quota--; } else if (type == MHI_PKT_TYPE_RSC_TX_EVENT) { parse_rsc_event(mhi_cntrl, local_rp, mhi_chan); event_quota--; } mhi_recycle_ev_ring_element(mhi_cntrl, ev_ring); Loading Loading @@ -1522,31 +1601,23 @@ static int __mhi_prepare_channel(struct mhi_controller *mhi_cntrl, return ret; } void mhi_reset_chan(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan) static void mhi_mark_stale_events(struct mhi_controller *mhi_cntrl, struct mhi_event *mhi_event, struct mhi_event_ctxt *er_ctxt, int chan) { struct mhi_tre *dev_rp, *local_rp; struct mhi_event_ctxt *er_ctxt; struct mhi_event *mhi_event; struct mhi_ring *ev_ring, *buf_ring, *tre_ring; struct mhi_ring *ev_ring; unsigned long flags; int chan = mhi_chan->chan; struct mhi_result result; /* nothing to reset, client don't queue buffers */ if (mhi_chan->offload_ch) return; MHI_LOG("Marking all events for chan:%d as stale\n", chan); read_lock_bh(&mhi_cntrl->pm_lock); mhi_event = &mhi_cntrl->mhi_event[mhi_chan->er_index]; ev_ring = &mhi_event->ring; er_ctxt = &mhi_cntrl->mhi_ctxt->er_ctxt[mhi_chan->er_index]; MHI_LOG("Marking all events for chan:%d as stale\n", chan); /* mark all stale events related to channel as STALE event */ spin_lock_irqsave(&mhi_event->lock, flags); dev_rp = mhi_to_virtual(ev_ring, er_ctxt->rp); if (!mhi_event->mhi_chan) { local_rp = ev_ring->rp; while (dev_rp != local_rp) { if (MHI_TRE_GET_EV_TYPE(local_rp) == Loading @@ -1558,19 +1629,17 @@ void mhi_reset_chan(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan) if (local_rp == (ev_ring->base + ev_ring->len)) local_rp = ev_ring->base; } } else { /* dedicated event ring so move the ptr to end */ ev_ring->rp = dev_rp; ev_ring->wp = ev_ring->rp - ev_ring->el_size; if (ev_ring->wp < ev_ring->base) ev_ring->wp = ev_ring->base + ev_ring->len - ev_ring->el_size; if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl->pm_state))) mhi_ring_er_db(mhi_event); } MHI_LOG("Finished marking events as stale events\n"); spin_unlock_irqrestore(&mhi_event->lock, flags); } static void mhi_reset_data_chan(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan) { struct mhi_ring *buf_ring, *tre_ring; struct mhi_result result; /* reset any pending buffers */ buf_ring = &mhi_chan->buf_ring; Loading @@ -1594,6 +1663,54 @@ void mhi_reset_chan(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan) mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result); } } } static void mhi_reset_rsc_chan(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan) { struct mhi_ring *buf_ring, *tre_ring; struct mhi_result result; struct mhi_buf_info *buf_info; /* reset any pending buffers */ buf_ring = &mhi_chan->buf_ring; tre_ring = &mhi_chan->tre_ring; result.transaction_status = -ENOTCONN; result.bytes_xferd = 0; buf_info = buf_ring->base; for (; (void *)buf_info < buf_ring->base + buf_ring->len; buf_info++) { if (!buf_info->used) continue; mhi_cntrl->unmap_single(mhi_cntrl, buf_info); result.buf_addr = buf_info->cb_buf; mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result); buf_info->used = false; } } void mhi_reset_chan(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan) { struct mhi_event *mhi_event; struct mhi_event_ctxt *er_ctxt; int chan = mhi_chan->chan; /* nothing to reset, client don't queue buffers */ if (mhi_chan->offload_ch) return; read_lock_bh(&mhi_cntrl->pm_lock); mhi_event = &mhi_cntrl->mhi_event[mhi_chan->er_index]; er_ctxt = &mhi_cntrl->mhi_ctxt->er_ctxt[mhi_chan->er_index]; mhi_mark_stale_events(mhi_cntrl, mhi_event, er_ctxt, chan); if (mhi_chan->xfer_type == MHI_XFER_RSC_SKB) mhi_reset_rsc_chan(mhi_cntrl, mhi_chan); else mhi_reset_data_chan(mhi_cntrl, mhi_chan); read_unlock_bh(&mhi_cntrl->pm_lock); MHI_LOG("Reset complete.\n"); Loading Loading
drivers/bus/mhi/core/mhi_init.c +15 −3 Original line number Diff line number Diff line Loading @@ -924,11 +924,22 @@ static int of_parse_ch_cfg(struct mhi_controller *mhi_cntrl, mhi_chan->chan = chan; ret = of_property_read_u32(child, "mhi,num-elements", (u32 *)&mhi_chan->buf_ring.elements); if (!ret && !mhi_chan->buf_ring.elements) (u32 *)&mhi_chan->tre_ring.elements); if (!ret && !mhi_chan->tre_ring.elements) goto error_chan_cfg; mhi_chan->tre_ring.elements = mhi_chan->buf_ring.elements; /* * For some channels, local ring len should be bigger than * transfer ring len due to internal logical channels in device. * So host can queue much more buffers than transfer ring len. * Example, RSC channels should have a larger local channel * than transfer ring length. */ ret = of_property_read_u32(child, "mhi,local-elements", (u32 *)&mhi_chan->buf_ring.elements); if (ret) mhi_chan->buf_ring.elements = mhi_chan->tre_ring.elements; ret = of_property_read_u32(child, "mhi,event-ring", &mhi_chan->er_index); Loading Loading @@ -966,6 +977,7 @@ static int of_parse_ch_cfg(struct mhi_controller *mhi_cntrl, mhi_chan->gen_tre = mhi_gen_tre; mhi_chan->queue_xfer = mhi_queue_buf; break; case MHI_XFER_RSC_SKB: case MHI_XFER_SKB: mhi_chan->queue_xfer = mhi_queue_skb; break; Loading
drivers/bus/mhi/core/mhi_internal.h +11 −0 Original line number Diff line number Diff line Loading @@ -314,6 +314,8 @@ enum mhi_cmd_type { #define MHI_TRE_GET_EV_EXECENV(tre) (((tre)->dword[0] >> 24) & 0xFF) #define MHI_TRE_GET_EV_SEQ(tre) ((tre)->dword[0]) #define MHI_TRE_GET_EV_TIME(tre) ((tre)->ptr) #define MHI_TRE_GET_EV_COOKIE(tre) lower_32_bits((tre)->ptr) #define MHI_TRE_GET_EV_VEID(tre) (((tre)->dword[0] >> 16) & 0xFF) /* transfer descriptor macros */ #define MHI_TRE_DATA_PTR(ptr) (ptr) Loading @@ -321,6 +323,11 @@ enum mhi_cmd_type { #define MHI_TRE_DATA_DWORD1(bei, ieot, ieob, chain) ((2 << 16) | (bei << 10) \ | (ieot << 9) | (ieob << 8) | chain) /* rsc transfer descriptor macros */ #define MHI_RSCTRE_DATA_PTR(ptr, len) ((len << 48) | ptr) #define MHI_RSCTRE_DATA_DWORD0(cookie) (cookie) #define MHI_RSCTRE_DATA_DWORD1 (MHI_PKT_TYPE_COALESCING << 16) enum MHI_CMD { MHI_CMD_RESET_CHAN, MHI_CMD_START_CHAN, Loading @@ -331,12 +338,14 @@ enum MHI_PKT_TYPE { MHI_PKT_TYPE_INVALID = 0x0, MHI_PKT_TYPE_NOOP_CMD = 0x1, MHI_PKT_TYPE_TRANSFER = 0x2, MHI_PKT_TYPE_COALESCING = 0x8, MHI_PKT_TYPE_RESET_CHAN_CMD = 0x10, MHI_PKT_TYPE_STOP_CHAN_CMD = 0x11, MHI_PKT_TYPE_START_CHAN_CMD = 0x12, MHI_PKT_TYPE_STATE_CHANGE_EVENT = 0x20, MHI_PKT_TYPE_CMD_COMPLETION_EVENT = 0x21, MHI_PKT_TYPE_TX_EVENT = 0x22, MHI_PKT_TYPE_RSC_TX_EVENT = 0x28, MHI_PKT_TYPE_EE_EVENT = 0x40, MHI_PKT_TYPE_TSYNC_EVENT = 0x48, MHI_PKT_TYPE_STALE_EVENT, /* internal event */ Loading Loading @@ -454,6 +463,7 @@ enum MHI_XFER_TYPE { MHI_XFER_SKB, MHI_XFER_SCLIST, MHI_XFER_NOP, /* CPU offload channel, host does not accept transfer */ MHI_XFER_RSC_SKB, /* RSC type, accept skb from client */ }; #define NR_OF_CMD_RINGS (1) Loading Loading @@ -548,6 +558,7 @@ struct mhi_buf_info { void *wp; size_t len; void *cb_buf; bool used; /* indicate element is free to use */ enum dma_data_direction dir; }; Loading
drivers/bus/mhi/core/mhi_main.c +157 −40 Original line number Diff line number Diff line Loading @@ -380,6 +380,7 @@ int mhi_queue_skb(struct mhi_device *mhi_dev, /* generate the tre */ buf_info = buf_ring->wp; MHI_ASSERT(buf_info->used, "TRE Not Freed\n"); buf_info->v_addr = skb->data; buf_info->cb_buf = skb; buf_info->wp = tre_ring->wp; Loading @@ -390,9 +391,19 @@ int mhi_queue_skb(struct mhi_device *mhi_dev, goto map_error; mhi_tre = tre_ring->wp; if (mhi_chan->xfer_type == MHI_XFER_RSC_SKB) { buf_info->used = true; mhi_tre->ptr = MHI_RSCTRE_DATA_PTR(buf_info->p_addr, buf_info->len); mhi_tre->dword[0] = MHI_RSCTRE_DATA_DWORD0(buf_ring->wp - buf_ring->base); mhi_tre->dword[1] = MHI_RSCTRE_DATA_DWORD1; } else { mhi_tre->ptr = MHI_TRE_DATA_PTR(buf_info->p_addr); mhi_tre->dword[0] = MHI_TRE_DATA_DWORD0(buf_info->len); mhi_tre->dword[1] = MHI_TRE_DATA_DWORD1(1, 1, 0, 0); } MHI_VERB("chan:%d WP:0x%llx TRE:0x%llx 0x%08x 0x%08x\n", mhi_chan->chan, (u64)mhi_to_physical(tre_ring, mhi_tre), mhi_tre->ptr, Loading Loading @@ -913,6 +924,70 @@ static int parse_xfer_event(struct mhi_controller *mhi_cntrl, return 0; } static int parse_rsc_event(struct mhi_controller *mhi_cntrl, struct mhi_tre *event, struct mhi_chan *mhi_chan) { struct mhi_ring *buf_ring, *tre_ring; struct mhi_buf_info *buf_info; struct mhi_result result; int ev_code; u32 cookie; /* offset to local descriptor */ u16 xfer_len; buf_ring = &mhi_chan->buf_ring; tre_ring = &mhi_chan->tre_ring; ev_code = MHI_TRE_GET_EV_CODE(event); cookie = MHI_TRE_GET_EV_COOKIE(event); xfer_len = MHI_TRE_GET_EV_LEN(event); /* received out of bound cookie */ MHI_ASSERT(cookie >= buf_ring->len, "Invalid Cookie\n"); buf_info = buf_ring->base + cookie; result.transaction_status = (ev_code == MHI_EV_CC_OVERFLOW) ? -EOVERFLOW : 0; result.bytes_xferd = xfer_len; result.buf_addr = buf_info->cb_buf; result.dir = mhi_chan->dir; read_lock_bh(&mhi_chan->lock); if (mhi_chan->ch_state != MHI_CH_STATE_ENABLED) goto end_process_rsc_event; MHI_ASSERT(!buf_info->used, "TRE already Freed\n"); mhi_cntrl->unmap_single(mhi_cntrl, buf_info); /* notify the client */ mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result); /* * Note: We're arbitrarily incrementing RP even though, completion * packet we processed might not be the same one, reason we can do this * is because device guaranteed to cache descriptors in order it * receive, so even though completion event is different we can re-use * all descriptors in between. * Example: * Transfer Ring has descriptors: A, B, C, D * Last descriptor host queue is D (WP) and first descriptor * host queue is A (RP). * The completion event we just serviced is descriptor C. * Then we can safely queue descriptors to replace A, B, and C * even though host did not receive any completions. */ mhi_del_ring_element(mhi_cntrl, tre_ring); buf_info->used = false; end_process_rsc_event: read_unlock_bh(&mhi_chan->lock); return 0; } static void mhi_process_cmd_completion(struct mhi_controller *mhi_cntrl, struct mhi_tre *tre) { Loading Loading @@ -1103,11 +1178,15 @@ int mhi_process_data_event_ring(struct mhi_controller *mhi_cntrl, MHI_VERB("Processing Event:0x%llx 0x%08x 0x%08x\n", local_rp->ptr, local_rp->dword[0], local_rp->dword[1]); if (likely(type == MHI_PKT_TYPE_TX_EVENT)) { chan = MHI_TRE_GET_EV_CHID(local_rp); mhi_chan = &mhi_cntrl->mhi_chan[chan]; if (likely(type == MHI_PKT_TYPE_TX_EVENT)) { parse_xfer_event(mhi_cntrl, local_rp, mhi_chan); event_quota--; } else if (type == MHI_PKT_TYPE_RSC_TX_EVENT) { parse_rsc_event(mhi_cntrl, local_rp, mhi_chan); event_quota--; } mhi_recycle_ev_ring_element(mhi_cntrl, ev_ring); Loading Loading @@ -1522,31 +1601,23 @@ static int __mhi_prepare_channel(struct mhi_controller *mhi_cntrl, return ret; } void mhi_reset_chan(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan) static void mhi_mark_stale_events(struct mhi_controller *mhi_cntrl, struct mhi_event *mhi_event, struct mhi_event_ctxt *er_ctxt, int chan) { struct mhi_tre *dev_rp, *local_rp; struct mhi_event_ctxt *er_ctxt; struct mhi_event *mhi_event; struct mhi_ring *ev_ring, *buf_ring, *tre_ring; struct mhi_ring *ev_ring; unsigned long flags; int chan = mhi_chan->chan; struct mhi_result result; /* nothing to reset, client don't queue buffers */ if (mhi_chan->offload_ch) return; MHI_LOG("Marking all events for chan:%d as stale\n", chan); read_lock_bh(&mhi_cntrl->pm_lock); mhi_event = &mhi_cntrl->mhi_event[mhi_chan->er_index]; ev_ring = &mhi_event->ring; er_ctxt = &mhi_cntrl->mhi_ctxt->er_ctxt[mhi_chan->er_index]; MHI_LOG("Marking all events for chan:%d as stale\n", chan); /* mark all stale events related to channel as STALE event */ spin_lock_irqsave(&mhi_event->lock, flags); dev_rp = mhi_to_virtual(ev_ring, er_ctxt->rp); if (!mhi_event->mhi_chan) { local_rp = ev_ring->rp; while (dev_rp != local_rp) { if (MHI_TRE_GET_EV_TYPE(local_rp) == Loading @@ -1558,19 +1629,17 @@ void mhi_reset_chan(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan) if (local_rp == (ev_ring->base + ev_ring->len)) local_rp = ev_ring->base; } } else { /* dedicated event ring so move the ptr to end */ ev_ring->rp = dev_rp; ev_ring->wp = ev_ring->rp - ev_ring->el_size; if (ev_ring->wp < ev_ring->base) ev_ring->wp = ev_ring->base + ev_ring->len - ev_ring->el_size; if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl->pm_state))) mhi_ring_er_db(mhi_event); } MHI_LOG("Finished marking events as stale events\n"); spin_unlock_irqrestore(&mhi_event->lock, flags); } static void mhi_reset_data_chan(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan) { struct mhi_ring *buf_ring, *tre_ring; struct mhi_result result; /* reset any pending buffers */ buf_ring = &mhi_chan->buf_ring; Loading @@ -1594,6 +1663,54 @@ void mhi_reset_chan(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan) mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result); } } } static void mhi_reset_rsc_chan(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan) { struct mhi_ring *buf_ring, *tre_ring; struct mhi_result result; struct mhi_buf_info *buf_info; /* reset any pending buffers */ buf_ring = &mhi_chan->buf_ring; tre_ring = &mhi_chan->tre_ring; result.transaction_status = -ENOTCONN; result.bytes_xferd = 0; buf_info = buf_ring->base; for (; (void *)buf_info < buf_ring->base + buf_ring->len; buf_info++) { if (!buf_info->used) continue; mhi_cntrl->unmap_single(mhi_cntrl, buf_info); result.buf_addr = buf_info->cb_buf; mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result); buf_info->used = false; } } void mhi_reset_chan(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan) { struct mhi_event *mhi_event; struct mhi_event_ctxt *er_ctxt; int chan = mhi_chan->chan; /* nothing to reset, client don't queue buffers */ if (mhi_chan->offload_ch) return; read_lock_bh(&mhi_cntrl->pm_lock); mhi_event = &mhi_cntrl->mhi_event[mhi_chan->er_index]; er_ctxt = &mhi_cntrl->mhi_ctxt->er_ctxt[mhi_chan->er_index]; mhi_mark_stale_events(mhi_cntrl, mhi_event, er_ctxt, chan); if (mhi_chan->xfer_type == MHI_XFER_RSC_SKB) mhi_reset_rsc_chan(mhi_cntrl, mhi_chan); else mhi_reset_data_chan(mhi_cntrl, mhi_chan); read_unlock_bh(&mhi_cntrl->pm_lock); MHI_LOG("Reset complete.\n"); Loading