Loading drivers/platform/msm/mhi_dev/mhi.c +65 −23 Original line number Diff line number Diff line Loading @@ -176,6 +176,14 @@ void mhi_dev_write_to_host_ipa(struct mhi_dev *mhi, struct mhi_addr *transfer, (uint64_t) mhi->cache_dma_handle, host_addr_pa, (int) transfer->size); if (tr_type == MHI_DEV_DMA_ASYNC) { /* * Event read pointer memory is dma_alloc_coherent memory * don't need to dma_map. Assigns the physical address in * phy_addr. */ if (transfer->phy_addr) dma = transfer->phy_addr; else dma = dma_map_single(&mhi->pdev->dev, transfer->virt_addr, transfer->size, DMA_TO_DEVICE); Loading @@ -183,6 +191,13 @@ void mhi_dev_write_to_host_ipa(struct mhi_dev *mhi, struct mhi_addr *transfer, ereq->dma = dma; ereq->dma_len = transfer->size; } else if (ereq->event_type == SEND_EVENT_RD_OFFSET) { /* * Event read pointer memory is dma_alloc_coherent * memory. Don't need to dma_unmap. */ if (transfer->phy_addr) ereq->event_rd_dma = 0; else ereq->event_rd_dma = dma; } rc = ipa_dma_async_memcpy(host_addr_pa, (uint64_t) dma, Loading Loading @@ -379,6 +394,14 @@ void mhi_dev_write_to_host_edma(struct mhi_dev *mhi, struct mhi_addr *transfer, mhi->cache_dma_handle, host_addr_pa, (int) transfer->size); if (tr_type == MHI_DEV_DMA_ASYNC) { /* * Event read pointer memory is dma_alloc_coherent memory * don't need to dma_map. Assigns the physical address in * phy_addr. */ if (transfer->phy_addr) { dma = transfer->phy_addr; } else { dma = dma_map_single(&mhi->pdev->dev, transfer->virt_addr, transfer->size, DMA_TO_DEVICE); Loading @@ -386,11 +409,19 @@ void mhi_dev_write_to_host_edma(struct mhi_dev *mhi, struct mhi_addr *transfer, pr_err("%s(): dma mapping failed\n", __func__); return; } } if (ereq->event_type == SEND_EVENT_BUFFER) { ereq->dma = dma; ereq->dma_len = transfer->size; } else { /* * Event read pointer memory is dma_alloc_coherent * memory. Don't need to dma_unmap. */ if (transfer->phy_addr) ereq->event_rd_dma = 0; else ereq->event_rd_dma = dma; } Loading Loading @@ -922,16 +953,17 @@ int mhi_dev_send_event(struct mhi_dev *mhi, int evnt_ring, if (MHI_USE_DMA(mhi)) transfer_addr.host_pa = (mhi->ev_ctx_shadow.host_pa + sizeof(struct mhi_dev_ev_ctx) * evnt_ring) + (uint32_t) &ring->ring_ctx->ev.rp - (uint32_t) ring->ring_ctx; evnt_ring) + (size_t) &ring->ring_ctx->ev.rp - (size_t) ring->ring_ctx; else transfer_addr.device_va = (mhi->ev_ctx_shadow.device_va + sizeof(struct mhi_dev_ev_ctx) * evnt_ring) + (uint32_t) &ring->ring_ctx->ev.rp - (uint32_t) ring->ring_ctx; evnt_ring) + (size_t) &ring->ring_ctx->ev.rp - (size_t) ring->ring_ctx; transfer_addr.virt_addr = &ring->ring_ctx_shadow->ev.rp; transfer_addr.size = sizeof(uint64_t); transfer_addr.phy_addr = 0; mhi_ctx->write_to_host(mhi, &transfer_addr, NULL, MHI_DEV_DMA_SYNC); /* Loading Loading @@ -984,6 +1016,7 @@ static void mhi_dev_event_rd_offset_completion_cb(void *req) struct mhi_dev *mhi = ch->ring->mhi_dev; unsigned long flags; if (ereq->event_rd_dma) dma_unmap_single(&mhi_ctx->pdev->dev, ereq->event_rd_dma, sizeof(uint64_t), DMA_TO_DEVICE); ctx = (union mhi_dev_ring_ctx *)&mhi->ev_ctx_cache[ereq->event_ring]; Loading Loading @@ -1042,16 +1075,25 @@ static int mhi_dev_send_multiple_tr_events(struct mhi_dev *mhi, int evnt_ring, mhi_log(MHI_MSG_VERBOSE, "ev.rp = %llx for %lld\n", ring->ring_ctx_shadow->ev.rp, evnt_ring_idx); if (MHI_USE_DMA(mhi)) if (MHI_USE_DMA(mhi)) { transfer_addr.host_pa = (mhi->ev_ctx_shadow.host_pa + sizeof(struct mhi_dev_ev_ctx) * evnt_ring) + (uint32_t)&ring->ring_ctx->ev.rp - (uint32_t)ring->ring_ctx; else evnt_ring) + (size_t)&ring->ring_ctx->ev.rp - (size_t)ring->ring_ctx; /* * As ev_ctx_cache memory is dma_alloc_coherent, dma_map_single * should not be called. Pass physical address to write to host. */ transfer_addr.phy_addr = (mhi->ev_ctx_cache_dma_handle + sizeof(struct mhi_dev_ev_ctx) * evnt_ring) + (size_t)&ring->ring_ctx->ev.rp - (size_t)ring->ring_ctx; } else { transfer_addr.device_va = (mhi->ev_ctx_shadow.device_va + sizeof(struct mhi_dev_ev_ctx) * evnt_ring) + (uint32_t)&ring->ring_ctx->ev.rp - (uint32_t)ring->ring_ctx; evnt_ring) + (size_t)&ring->ring_ctx->ev.rp - (size_t)ring->ring_ctx; } transfer_addr.virt_addr = &ring->ring_ctx_shadow->ev.rp; transfer_addr.size = sizeof(uint64_t); Loading @@ -1063,7 +1105,7 @@ static int mhi_dev_send_multiple_tr_events(struct mhi_dev *mhi, int evnt_ring, } static int mhi_dev_send_completion_event(struct mhi_dev_channel *ch, uint32_t rd_ofst, uint32_t len, size_t rd_ofst, uint32_t len, enum mhi_dev_cmd_completion_code code) { union mhi_dev_ring_element_type compl_event; Loading Loading @@ -1143,7 +1185,7 @@ static int mhi_dev_send_cmd_comp_event(struct mhi_dev *mhi, + (mhi->ring[MHI_RING_CMD_ID].rd_offset * (sizeof(union mhi_dev_ring_element_type))); mhi_log(MHI_MSG_VERBOSE, "evt cmd comp ptr :%d\n", (uint32_t) event.evt_cmd_comp.ptr); (size_t) event.evt_cmd_comp.ptr); event.evt_cmd_comp.type = MHI_DEV_RING_EL_CMD_COMPLETION_EVT; event.evt_cmd_comp.code = code; return mhi_dev_send_event(mhi, 0, &event); Loading Loading @@ -1703,7 +1745,7 @@ static void mhi_dev_transfer_completion_cb(void *mreq) unsigned long flags; size_t transfer_len; u32 snd_cmpl; uint32_t rd_offset; size_t rd_offset; client = req->client; ch = client->channel; Loading Loading @@ -2402,7 +2444,7 @@ int mhi_dev_read_channel(struct mhi_req *mreq) bytes_read += bytes_to_read; addr_offset = ch->tre_size - ch->tre_bytes_left; read_from_loc = ch->tre_loc + addr_offset; write_to_loc = (uint32_t) mreq->buf + write_to_loc = (size_t) mreq->buf + (mreq->len - usr_buf_remaining); ch->tre_bytes_left -= bytes_to_read; mreq->el = el; Loading Loading @@ -2566,7 +2608,7 @@ int mhi_dev_write_channel(struct mhi_req *wreq) bytes_to_write = min(usr_buf_remaining, tre_len); usr_buf_offset = wreq->len - bytes_to_write; read_from_loc = (uint32_t) wreq->buf + usr_buf_offset; read_from_loc = (size_t) wreq->buf + usr_buf_offset; write_to_loc = el->tre.data_buf_ptr; wreq->rd_offset = ring->rd_offset; wreq->el = el; Loading drivers/platform/msm/mhi_dev/mhi.h +11 −11 Original line number Diff line number Diff line Loading @@ -327,8 +327,8 @@ struct mhi_meminfo { struct mhi_addr { uint64_t host_pa; uintptr_t device_pa; uintptr_t device_va; size_t device_pa; size_t device_va; size_t size; dma_addr_t phy_addr; void *virt_addr; Loading Loading @@ -374,9 +374,9 @@ struct mhi_dev_ring { struct mhi_dev *mhi_dev; uint32_t id; uint32_t rd_offset; uint32_t wr_offset; uint32_t ring_size; size_t rd_offset; size_t wr_offset; size_t ring_size; enum mhi_dev_ring_type type; enum mhi_dev_ring_state state; Loading @@ -397,7 +397,7 @@ struct mhi_dev_ring { }; static inline void mhi_dev_ring_inc_index(struct mhi_dev_ring *ring, uint32_t rd_offset) size_t rd_offset) { ring->rd_offset++; if (ring->rd_offset == ring->ring_size) Loading Loading @@ -516,9 +516,9 @@ struct mhi_dev { struct list_head event_ring_list; struct list_head process_ring_list; uint32_t cmd_ring_idx; uint32_t ev_ring_start; uint32_t ch_ring_start; size_t cmd_ring_idx; size_t ev_ring_start; size_t ch_ring_start; /* IPA Handles */ u32 ipa_clnt_hndl[4]; Loading Loading @@ -672,7 +672,7 @@ int mhi_ring_start(struct mhi_dev_ring *ring, * @ring: Ring for the respective context - Channel/Event/Command. * @wr_offset: Cache the TRE's upto the write offset value. */ int mhi_dev_cache_ring(struct mhi_dev_ring *ring, uint32_t wr_offset); int mhi_dev_cache_ring(struct mhi_dev_ring *ring, size_t wr_offset); /** * mhi_dev_update_wr_offset() - Check for any updates in the write offset. Loading @@ -693,7 +693,7 @@ int mhi_dev_process_ring(struct mhi_dev_ring *ring); * @ring: Ring for the respective context - Channel/Event/Command. * @offset: Offset index into the respective ring's cache element. */ int mhi_dev_process_ring_element(struct mhi_dev_ring *ring, uint32_t offset); int mhi_dev_process_ring_element(struct mhi_dev_ring *ring, size_t offset); /** * mhi_dev_add_element() - Copy the element to the respective transfer rings Loading drivers/platform/msm/mhi_dev/mhi_ring.c +17 −15 Original line number Diff line number Diff line Loading @@ -26,7 +26,7 @@ #include "mhi.h" static uint32_t mhi_dev_ring_addr2ofst(struct mhi_dev_ring *ring, uint64_t p) static size_t mhi_dev_ring_addr2ofst(struct mhi_dev_ring *ring, uint64_t p) { uint64_t rbase; Loading @@ -42,7 +42,7 @@ static uint32_t mhi_dev_ring_num_elems(struct mhi_dev_ring *ring) } int mhi_dev_fetch_ring_elements(struct mhi_dev_ring *ring, uint32_t start, uint32_t end) size_t start, size_t end) { struct mhi_addr host_addr; struct mhi_dev *mhi_ctx; Loading Loading @@ -84,9 +84,9 @@ int mhi_dev_fetch_ring_elements(struct mhi_dev_ring *ring, return 0; } int mhi_dev_cache_ring(struct mhi_dev_ring *ring, uint32_t wr_offset) int mhi_dev_cache_ring(struct mhi_dev_ring *ring, size_t wr_offset) { uint32_t old_offset = 0; size_t old_offset = 0; struct mhi_dev *mhi_ctx; if (WARN_ON(!ring)) Loading Loading @@ -135,7 +135,7 @@ EXPORT_SYMBOL(mhi_dev_cache_ring); int mhi_dev_update_wr_offset(struct mhi_dev_ring *ring) { uint64_t wr_offset = 0; uint32_t new_wr_offset = 0; size_t new_wr_offset = 0; int32_t rc = 0; if (WARN_ON(!ring)) Loading @@ -150,7 +150,7 @@ int mhi_dev_update_wr_offset(struct mhi_dev_ring *ring) } mhi_log(MHI_MSG_VERBOSE, "ring %d wr_offset from db 0x%x\n", ring->id, (uint32_t) wr_offset); ring->id, (size_t) wr_offset); break; case RING_TYPE_ER: rc = mhi_dev_mmio_get_erc_db(ring, &wr_offset); Loading @@ -167,7 +167,7 @@ int mhi_dev_update_wr_offset(struct mhi_dev_ring *ring) } mhi_log(MHI_MSG_VERBOSE, "ring %d wr_offset from db 0x%x\n", ring->id, (uint32_t) wr_offset); ring->id, (size_t) wr_offset); break; default: mhi_log(MHI_MSG_ERROR, "invalid ring type\n"); Loading @@ -184,7 +184,7 @@ int mhi_dev_update_wr_offset(struct mhi_dev_ring *ring) } EXPORT_SYMBOL(mhi_dev_update_wr_offset); int mhi_dev_process_ring_element(struct mhi_dev_ring *ring, uint32_t offset) int mhi_dev_process_ring_element(struct mhi_dev_ring *ring, size_t offset) { union mhi_dev_ring_element_type *el; Loading Loading @@ -276,7 +276,7 @@ int mhi_dev_add_element(struct mhi_dev_ring *ring, union mhi_dev_ring_element_type *element, struct event_req *ereq, int size) { uint32_t old_offset = 0; size_t old_offset = 0; struct mhi_addr host_addr; uint32_t num_elem = 1; uint32_t num_free_elem; Loading Loading @@ -347,6 +347,7 @@ int mhi_dev_add_element(struct mhi_dev_ring *ring, /* No wrap-around case */ host_addr.virt_addr = element; host_addr.size = size; host_addr.phy_addr = 0; mhi_ctx->write_to_host(ring->mhi_dev, &host_addr, ereq, MHI_DEV_DMA_ASYNC); } else { Loading @@ -365,6 +366,7 @@ int mhi_dev_add_element(struct mhi_dev_ring *ring, host_addr.virt_addr = element + (ring->ring_size - old_offset); host_addr.size = ring->rd_offset * sizeof(union mhi_dev_ring_element_type); host_addr.phy_addr = 0; mhi_ctx->write_to_host(ring->mhi_dev, &host_addr, ereq, MHI_DEV_DMA_ASYNC); } Loading @@ -376,8 +378,8 @@ int mhi_ring_start(struct mhi_dev_ring *ring, union mhi_dev_ring_ctx *ctx, struct mhi_dev *mhi) { int rc = 0; uint32_t wr_offset = 0; uint32_t offset = 0; size_t wr_offset = 0; size_t offset = 0; if (WARN_ON(!ring || !ctx || !mhi)) return -EINVAL; Loading @@ -403,7 +405,7 @@ int mhi_ring_start(struct mhi_dev_ring *ring, union mhi_dev_ring_ctx *ctx, if (!ring->ring_cache) return -ENOMEM; offset = (uint32_t)(ring->ring_ctx->generic.rbase - offset = (size_t)(ring->ring_ctx->generic.rbase - mhi->ctrl_base.host_pa); ring->ring_shadow.device_pa = mhi->ctrl_base.device_pa + offset; Loading Loading @@ -432,9 +434,9 @@ int mhi_ring_start(struct mhi_dev_ring *ring, union mhi_dev_ring_ctx *ctx, } mhi_log(MHI_MSG_VERBOSE, "ctx ring_base:0x%x, rp:0x%x, wp:0x%x\n", (uint32_t)ring->ring_ctx->generic.rbase, (uint32_t)ring->ring_ctx->generic.rp, (uint32_t)ring->ring_ctx->generic.wp); (size_t)ring->ring_ctx->generic.rbase, (size_t)ring->ring_ctx->generic.rp, (size_t)ring->ring_ctx->generic.wp); ring->wr_offset = wr_offset; return rc; Loading Loading
drivers/platform/msm/mhi_dev/mhi.c +65 −23 Original line number Diff line number Diff line Loading @@ -176,6 +176,14 @@ void mhi_dev_write_to_host_ipa(struct mhi_dev *mhi, struct mhi_addr *transfer, (uint64_t) mhi->cache_dma_handle, host_addr_pa, (int) transfer->size); if (tr_type == MHI_DEV_DMA_ASYNC) { /* * Event read pointer memory is dma_alloc_coherent memory * don't need to dma_map. Assigns the physical address in * phy_addr. */ if (transfer->phy_addr) dma = transfer->phy_addr; else dma = dma_map_single(&mhi->pdev->dev, transfer->virt_addr, transfer->size, DMA_TO_DEVICE); Loading @@ -183,6 +191,13 @@ void mhi_dev_write_to_host_ipa(struct mhi_dev *mhi, struct mhi_addr *transfer, ereq->dma = dma; ereq->dma_len = transfer->size; } else if (ereq->event_type == SEND_EVENT_RD_OFFSET) { /* * Event read pointer memory is dma_alloc_coherent * memory. Don't need to dma_unmap. */ if (transfer->phy_addr) ereq->event_rd_dma = 0; else ereq->event_rd_dma = dma; } rc = ipa_dma_async_memcpy(host_addr_pa, (uint64_t) dma, Loading Loading @@ -379,6 +394,14 @@ void mhi_dev_write_to_host_edma(struct mhi_dev *mhi, struct mhi_addr *transfer, mhi->cache_dma_handle, host_addr_pa, (int) transfer->size); if (tr_type == MHI_DEV_DMA_ASYNC) { /* * Event read pointer memory is dma_alloc_coherent memory * don't need to dma_map. Assigns the physical address in * phy_addr. */ if (transfer->phy_addr) { dma = transfer->phy_addr; } else { dma = dma_map_single(&mhi->pdev->dev, transfer->virt_addr, transfer->size, DMA_TO_DEVICE); Loading @@ -386,11 +409,19 @@ void mhi_dev_write_to_host_edma(struct mhi_dev *mhi, struct mhi_addr *transfer, pr_err("%s(): dma mapping failed\n", __func__); return; } } if (ereq->event_type == SEND_EVENT_BUFFER) { ereq->dma = dma; ereq->dma_len = transfer->size; } else { /* * Event read pointer memory is dma_alloc_coherent * memory. Don't need to dma_unmap. */ if (transfer->phy_addr) ereq->event_rd_dma = 0; else ereq->event_rd_dma = dma; } Loading Loading @@ -922,16 +953,17 @@ int mhi_dev_send_event(struct mhi_dev *mhi, int evnt_ring, if (MHI_USE_DMA(mhi)) transfer_addr.host_pa = (mhi->ev_ctx_shadow.host_pa + sizeof(struct mhi_dev_ev_ctx) * evnt_ring) + (uint32_t) &ring->ring_ctx->ev.rp - (uint32_t) ring->ring_ctx; evnt_ring) + (size_t) &ring->ring_ctx->ev.rp - (size_t) ring->ring_ctx; else transfer_addr.device_va = (mhi->ev_ctx_shadow.device_va + sizeof(struct mhi_dev_ev_ctx) * evnt_ring) + (uint32_t) &ring->ring_ctx->ev.rp - (uint32_t) ring->ring_ctx; evnt_ring) + (size_t) &ring->ring_ctx->ev.rp - (size_t) ring->ring_ctx; transfer_addr.virt_addr = &ring->ring_ctx_shadow->ev.rp; transfer_addr.size = sizeof(uint64_t); transfer_addr.phy_addr = 0; mhi_ctx->write_to_host(mhi, &transfer_addr, NULL, MHI_DEV_DMA_SYNC); /* Loading Loading @@ -984,6 +1016,7 @@ static void mhi_dev_event_rd_offset_completion_cb(void *req) struct mhi_dev *mhi = ch->ring->mhi_dev; unsigned long flags; if (ereq->event_rd_dma) dma_unmap_single(&mhi_ctx->pdev->dev, ereq->event_rd_dma, sizeof(uint64_t), DMA_TO_DEVICE); ctx = (union mhi_dev_ring_ctx *)&mhi->ev_ctx_cache[ereq->event_ring]; Loading Loading @@ -1042,16 +1075,25 @@ static int mhi_dev_send_multiple_tr_events(struct mhi_dev *mhi, int evnt_ring, mhi_log(MHI_MSG_VERBOSE, "ev.rp = %llx for %lld\n", ring->ring_ctx_shadow->ev.rp, evnt_ring_idx); if (MHI_USE_DMA(mhi)) if (MHI_USE_DMA(mhi)) { transfer_addr.host_pa = (mhi->ev_ctx_shadow.host_pa + sizeof(struct mhi_dev_ev_ctx) * evnt_ring) + (uint32_t)&ring->ring_ctx->ev.rp - (uint32_t)ring->ring_ctx; else evnt_ring) + (size_t)&ring->ring_ctx->ev.rp - (size_t)ring->ring_ctx; /* * As ev_ctx_cache memory is dma_alloc_coherent, dma_map_single * should not be called. Pass physical address to write to host. */ transfer_addr.phy_addr = (mhi->ev_ctx_cache_dma_handle + sizeof(struct mhi_dev_ev_ctx) * evnt_ring) + (size_t)&ring->ring_ctx->ev.rp - (size_t)ring->ring_ctx; } else { transfer_addr.device_va = (mhi->ev_ctx_shadow.device_va + sizeof(struct mhi_dev_ev_ctx) * evnt_ring) + (uint32_t)&ring->ring_ctx->ev.rp - (uint32_t)ring->ring_ctx; evnt_ring) + (size_t)&ring->ring_ctx->ev.rp - (size_t)ring->ring_ctx; } transfer_addr.virt_addr = &ring->ring_ctx_shadow->ev.rp; transfer_addr.size = sizeof(uint64_t); Loading @@ -1063,7 +1105,7 @@ static int mhi_dev_send_multiple_tr_events(struct mhi_dev *mhi, int evnt_ring, } static int mhi_dev_send_completion_event(struct mhi_dev_channel *ch, uint32_t rd_ofst, uint32_t len, size_t rd_ofst, uint32_t len, enum mhi_dev_cmd_completion_code code) { union mhi_dev_ring_element_type compl_event; Loading Loading @@ -1143,7 +1185,7 @@ static int mhi_dev_send_cmd_comp_event(struct mhi_dev *mhi, + (mhi->ring[MHI_RING_CMD_ID].rd_offset * (sizeof(union mhi_dev_ring_element_type))); mhi_log(MHI_MSG_VERBOSE, "evt cmd comp ptr :%d\n", (uint32_t) event.evt_cmd_comp.ptr); (size_t) event.evt_cmd_comp.ptr); event.evt_cmd_comp.type = MHI_DEV_RING_EL_CMD_COMPLETION_EVT; event.evt_cmd_comp.code = code; return mhi_dev_send_event(mhi, 0, &event); Loading Loading @@ -1703,7 +1745,7 @@ static void mhi_dev_transfer_completion_cb(void *mreq) unsigned long flags; size_t transfer_len; u32 snd_cmpl; uint32_t rd_offset; size_t rd_offset; client = req->client; ch = client->channel; Loading Loading @@ -2402,7 +2444,7 @@ int mhi_dev_read_channel(struct mhi_req *mreq) bytes_read += bytes_to_read; addr_offset = ch->tre_size - ch->tre_bytes_left; read_from_loc = ch->tre_loc + addr_offset; write_to_loc = (uint32_t) mreq->buf + write_to_loc = (size_t) mreq->buf + (mreq->len - usr_buf_remaining); ch->tre_bytes_left -= bytes_to_read; mreq->el = el; Loading Loading @@ -2566,7 +2608,7 @@ int mhi_dev_write_channel(struct mhi_req *wreq) bytes_to_write = min(usr_buf_remaining, tre_len); usr_buf_offset = wreq->len - bytes_to_write; read_from_loc = (uint32_t) wreq->buf + usr_buf_offset; read_from_loc = (size_t) wreq->buf + usr_buf_offset; write_to_loc = el->tre.data_buf_ptr; wreq->rd_offset = ring->rd_offset; wreq->el = el; Loading
drivers/platform/msm/mhi_dev/mhi.h +11 −11 Original line number Diff line number Diff line Loading @@ -327,8 +327,8 @@ struct mhi_meminfo { struct mhi_addr { uint64_t host_pa; uintptr_t device_pa; uintptr_t device_va; size_t device_pa; size_t device_va; size_t size; dma_addr_t phy_addr; void *virt_addr; Loading Loading @@ -374,9 +374,9 @@ struct mhi_dev_ring { struct mhi_dev *mhi_dev; uint32_t id; uint32_t rd_offset; uint32_t wr_offset; uint32_t ring_size; size_t rd_offset; size_t wr_offset; size_t ring_size; enum mhi_dev_ring_type type; enum mhi_dev_ring_state state; Loading @@ -397,7 +397,7 @@ struct mhi_dev_ring { }; static inline void mhi_dev_ring_inc_index(struct mhi_dev_ring *ring, uint32_t rd_offset) size_t rd_offset) { ring->rd_offset++; if (ring->rd_offset == ring->ring_size) Loading Loading @@ -516,9 +516,9 @@ struct mhi_dev { struct list_head event_ring_list; struct list_head process_ring_list; uint32_t cmd_ring_idx; uint32_t ev_ring_start; uint32_t ch_ring_start; size_t cmd_ring_idx; size_t ev_ring_start; size_t ch_ring_start; /* IPA Handles */ u32 ipa_clnt_hndl[4]; Loading Loading @@ -672,7 +672,7 @@ int mhi_ring_start(struct mhi_dev_ring *ring, * @ring: Ring for the respective context - Channel/Event/Command. * @wr_offset: Cache the TRE's upto the write offset value. */ int mhi_dev_cache_ring(struct mhi_dev_ring *ring, uint32_t wr_offset); int mhi_dev_cache_ring(struct mhi_dev_ring *ring, size_t wr_offset); /** * mhi_dev_update_wr_offset() - Check for any updates in the write offset. Loading @@ -693,7 +693,7 @@ int mhi_dev_process_ring(struct mhi_dev_ring *ring); * @ring: Ring for the respective context - Channel/Event/Command. * @offset: Offset index into the respective ring's cache element. */ int mhi_dev_process_ring_element(struct mhi_dev_ring *ring, uint32_t offset); int mhi_dev_process_ring_element(struct mhi_dev_ring *ring, size_t offset); /** * mhi_dev_add_element() - Copy the element to the respective transfer rings Loading
drivers/platform/msm/mhi_dev/mhi_ring.c +17 −15 Original line number Diff line number Diff line Loading @@ -26,7 +26,7 @@ #include "mhi.h" static uint32_t mhi_dev_ring_addr2ofst(struct mhi_dev_ring *ring, uint64_t p) static size_t mhi_dev_ring_addr2ofst(struct mhi_dev_ring *ring, uint64_t p) { uint64_t rbase; Loading @@ -42,7 +42,7 @@ static uint32_t mhi_dev_ring_num_elems(struct mhi_dev_ring *ring) } int mhi_dev_fetch_ring_elements(struct mhi_dev_ring *ring, uint32_t start, uint32_t end) size_t start, size_t end) { struct mhi_addr host_addr; struct mhi_dev *mhi_ctx; Loading Loading @@ -84,9 +84,9 @@ int mhi_dev_fetch_ring_elements(struct mhi_dev_ring *ring, return 0; } int mhi_dev_cache_ring(struct mhi_dev_ring *ring, uint32_t wr_offset) int mhi_dev_cache_ring(struct mhi_dev_ring *ring, size_t wr_offset) { uint32_t old_offset = 0; size_t old_offset = 0; struct mhi_dev *mhi_ctx; if (WARN_ON(!ring)) Loading Loading @@ -135,7 +135,7 @@ EXPORT_SYMBOL(mhi_dev_cache_ring); int mhi_dev_update_wr_offset(struct mhi_dev_ring *ring) { uint64_t wr_offset = 0; uint32_t new_wr_offset = 0; size_t new_wr_offset = 0; int32_t rc = 0; if (WARN_ON(!ring)) Loading @@ -150,7 +150,7 @@ int mhi_dev_update_wr_offset(struct mhi_dev_ring *ring) } mhi_log(MHI_MSG_VERBOSE, "ring %d wr_offset from db 0x%x\n", ring->id, (uint32_t) wr_offset); ring->id, (size_t) wr_offset); break; case RING_TYPE_ER: rc = mhi_dev_mmio_get_erc_db(ring, &wr_offset); Loading @@ -167,7 +167,7 @@ int mhi_dev_update_wr_offset(struct mhi_dev_ring *ring) } mhi_log(MHI_MSG_VERBOSE, "ring %d wr_offset from db 0x%x\n", ring->id, (uint32_t) wr_offset); ring->id, (size_t) wr_offset); break; default: mhi_log(MHI_MSG_ERROR, "invalid ring type\n"); Loading @@ -184,7 +184,7 @@ int mhi_dev_update_wr_offset(struct mhi_dev_ring *ring) } EXPORT_SYMBOL(mhi_dev_update_wr_offset); int mhi_dev_process_ring_element(struct mhi_dev_ring *ring, uint32_t offset) int mhi_dev_process_ring_element(struct mhi_dev_ring *ring, size_t offset) { union mhi_dev_ring_element_type *el; Loading Loading @@ -276,7 +276,7 @@ int mhi_dev_add_element(struct mhi_dev_ring *ring, union mhi_dev_ring_element_type *element, struct event_req *ereq, int size) { uint32_t old_offset = 0; size_t old_offset = 0; struct mhi_addr host_addr; uint32_t num_elem = 1; uint32_t num_free_elem; Loading Loading @@ -347,6 +347,7 @@ int mhi_dev_add_element(struct mhi_dev_ring *ring, /* No wrap-around case */ host_addr.virt_addr = element; host_addr.size = size; host_addr.phy_addr = 0; mhi_ctx->write_to_host(ring->mhi_dev, &host_addr, ereq, MHI_DEV_DMA_ASYNC); } else { Loading @@ -365,6 +366,7 @@ int mhi_dev_add_element(struct mhi_dev_ring *ring, host_addr.virt_addr = element + (ring->ring_size - old_offset); host_addr.size = ring->rd_offset * sizeof(union mhi_dev_ring_element_type); host_addr.phy_addr = 0; mhi_ctx->write_to_host(ring->mhi_dev, &host_addr, ereq, MHI_DEV_DMA_ASYNC); } Loading @@ -376,8 +378,8 @@ int mhi_ring_start(struct mhi_dev_ring *ring, union mhi_dev_ring_ctx *ctx, struct mhi_dev *mhi) { int rc = 0; uint32_t wr_offset = 0; uint32_t offset = 0; size_t wr_offset = 0; size_t offset = 0; if (WARN_ON(!ring || !ctx || !mhi)) return -EINVAL; Loading @@ -403,7 +405,7 @@ int mhi_ring_start(struct mhi_dev_ring *ring, union mhi_dev_ring_ctx *ctx, if (!ring->ring_cache) return -ENOMEM; offset = (uint32_t)(ring->ring_ctx->generic.rbase - offset = (size_t)(ring->ring_ctx->generic.rbase - mhi->ctrl_base.host_pa); ring->ring_shadow.device_pa = mhi->ctrl_base.device_pa + offset; Loading Loading @@ -432,9 +434,9 @@ int mhi_ring_start(struct mhi_dev_ring *ring, union mhi_dev_ring_ctx *ctx, } mhi_log(MHI_MSG_VERBOSE, "ctx ring_base:0x%x, rp:0x%x, wp:0x%x\n", (uint32_t)ring->ring_ctx->generic.rbase, (uint32_t)ring->ring_ctx->generic.rp, (uint32_t)ring->ring_ctx->generic.wp); (size_t)ring->ring_ctx->generic.rbase, (size_t)ring->ring_ctx->generic.rp, (size_t)ring->ring_ctx->generic.wp); ring->wr_offset = wr_offset; return rc; Loading