Loading drivers/platform/msm/mhi_dev/mhi.c +18 −8 Original line number Diff line number Diff line Loading @@ -2137,25 +2137,34 @@ static void mhi_dev_transfer_completion_cb(void *mreq) { int rc = 0; struct mhi_req *req = mreq; struct mhi_dev_channel *ch = req->client->channel; struct mhi_dev_channel *ch; u32 snd_cmpl = req->snd_cmpl; bool inbound = false; if (mhi_ctx->ch_ctx_cache[ch->ch_id].ch_type == MHI_DEV_CH_TYPE_INBOUND_CHANNEL) ch->pend_wr_count--; ch = &mhi_ctx->ch[req->chan]; dma_unmap_single(&mhi_ctx->pdev->dev, req->dma, req->len, DMA_FROM_DEVICE); if (mhi_ctx->ch_ctx_cache[ch->ch_id].ch_type == MHI_DEV_CH_TYPE_INBOUND_CHANNEL) { inbound = true; ch->pend_wr_count--; } /* * Channel got stopped or closed with transfers pending * Channel got closed with transfers pending * Do not trigger callback or send cmpl to host */ if (ch->state == MHI_DEV_CH_CLOSED || ch->state == MHI_DEV_CH_STOPPED) { if (inbound) mhi_log(MHI_MSG_DBG, "Ch %d not in started state, %d writes pending\n", "Ch %d closed with %d writes pending\n", ch->ch_id, ch->pend_wr_count + 1); else mhi_log(MHI_MSG_DBG, "Ch %d closed with read pending\n", ch->ch_id); return; } Loading Loading @@ -2707,6 +2716,7 @@ int mhi_dev_open_channel(uint32_t chan_id, ch->active_client = (*handle_client); (*handle_client)->channel = ch; (*handle_client)->event_trigger = mhi_dev_client_cb_reason; ch->pend_wr_count = 0; if (ch->state == MHI_DEV_CH_UNINT) { ch->ring = &mhi_ctx->ring[chan_id + mhi_ctx->ch_ring_start]; Loading drivers/platform/msm/mhi_dev/mhi_uci.c +192 −93 Original line number Diff line number Diff line Loading @@ -24,8 +24,8 @@ #define MHI_SOFTWARE_CLIENT_LIMIT (MHI_MAX_SOFTWARE_CHANNELS/2) #define MHI_UCI_IPC_LOG_PAGES (100) /* Max number of MHI write request structures (used in async writes) */ #define MHI_UCI_NUM_WR_REQ_DEFAULT 10 /* Max number of MHI read/write request structs (used in async transfers) */ #define MHI_UCI_NUM_REQ_DEFAULT 10 #define MAX_NR_TRBS_PER_CHAN 9 #define MHI_QTI_IFACE_ID 4 #define MHI_ADPL_IFACE_ID 5 Loading @@ -42,6 +42,8 @@ #define MHI_UCI_RELEASE_TIMEOUT_MAX 5100 #define MHI_UCI_RELEASE_TIMEOUT_COUNT 30 #define MHI_UCI_IS_CHAN_DIR_IN(n) ((n % 2) ? true : false) enum uci_dbg_level { UCI_DBG_VERBOSE = 0x0, UCI_DBG_INFO = 0x1, Loading Loading @@ -86,8 +88,7 @@ struct chan_attr { /* Skip node creation if not needed */ bool skip_node; /* Number of write request structs to allocate */ u32 num_wr_reqs; u32 num_reqs; }; static void mhi_uci_generic_client_cb(struct mhi_dev_client_cb_data *cb_data); Loading Loading @@ -348,15 +349,19 @@ struct uci_client { struct mhi_uci_ctxt_t *uci_ctxt; struct mutex in_chan_lock; struct mutex out_chan_lock; spinlock_t wr_req_lock; spinlock_t req_lock; unsigned int f_flags; struct mhi_req *wreqs; struct list_head wr_req_list; /* Pointer to dynamically allocated mhi_req structs */ struct mhi_req *reqs; /* Pointer to available (free) reqs */ struct list_head req_list; /* Pointer to in-use reqs */ struct list_head in_use_list; struct completion read_done; struct completion at_ctrl_read_done; struct completion *write_done; int (*send)(struct uci_client *h, void *data_loc, u32 size); int (*read)(struct uci_client *h, struct mhi_req *ureq, int *bytes); int (*read)(struct uci_client *h, int *bytes); unsigned int tiocm; unsigned int at_ctrl_mask; }; Loading Loading @@ -490,23 +495,75 @@ static int mhi_init_read_chan(struct uci_client *client_handle, return rc; } static struct mhi_req *mhi_uci_get_req(struct uci_client *uci_handle) { struct mhi_req *req; unsigned long flags; spin_lock_irqsave(&uci_handle->req_lock, flags); if (list_empty(&uci_handle->req_list)) { uci_log(UCI_DBG_ERROR, "Request pool empty for chans %d, %d\n", uci_handle->in_chan, uci_handle->out_chan); spin_unlock_irqrestore(&uci_handle->req_lock, flags); return NULL; } /* Remove from free list and add to in-use list */ req = container_of(uci_handle->req_list.next, struct mhi_req, list); list_del_init(&req->list); /* * If req is marked stale and if it was used for the write channel * to host, free the previously allocated input buffer before the * req is re-used */ if (req->is_stale && req->buf && MHI_UCI_IS_CHAN_DIR_IN(req->chan)) { uci_log(UCI_DBG_VERBOSE, "Freeing write buf for chan %d\n", req->chan); kfree(req->buf); } req->is_stale = false; uci_log(UCI_DBG_VERBOSE, "Adding req to in-use list\n"); list_add_tail(&req->list, &uci_handle->in_use_list); spin_unlock_irqrestore(&uci_handle->req_lock, flags); return req; } static void mhi_uci_put_req(struct uci_client *uci_handle, struct mhi_req *req) { unsigned long flags; spin_lock_irqsave(&uci_handle->req_lock, flags); /* Remove from in-use list and add back to free list */ list_del_init(&req->list); list_add_tail(&req->list, &uci_handle->req_list); spin_unlock_irqrestore(&uci_handle->req_lock, flags); } static void mhi_uci_write_completion_cb(void *req) { struct mhi_req *ureq = req; struct uci_client *uci_handle; unsigned long flags; struct uci_client *uci_handle = (struct uci_client *)ureq->context; uci_handle = (struct uci_client *)ureq->context; kfree(ureq->buf); ureq->buf = NULL; spin_lock_irqsave(&uci_handle->wr_req_lock, flags); list_add_tail(&ureq->list, &uci_handle->wr_req_list); spin_unlock_irqrestore(&uci_handle->wr_req_lock, flags); /* * If this is a delayed write completion, just clear * the stale flag and return. The ureq was added to * the free list when client called release function. */ if (ureq->is_stale) { uci_log(UCI_DBG_VERBOSE, "Got stale completion for ch %d\n", ureq->chan); ureq->is_stale = false; return; } if (uci_handle->write_done) complete(uci_handle->write_done); mhi_uci_put_req(uci_handle, ureq); /* Write queue may be waiting for write request structs */ wake_up(&uci_handle->write_wq); } Loading @@ -516,7 +573,19 @@ static void mhi_uci_read_completion_cb(void *req) struct mhi_req *ureq = req; struct uci_client *uci_handle; if (ureq->is_stale) { uci_log(UCI_DBG_VERBOSE, "Got stale completion for ch %d, ignoring\n", ureq->chan); return; } uci_handle = (struct uci_client *)ureq->context; uci_handle->pkt_loc = (void *)ureq->buf; uci_handle->pkt_size = ureq->transfer_len; mhi_uci_put_req(uci_handle, ureq); complete(&uci_handle->read_done); } Loading @@ -526,6 +595,9 @@ static int mhi_uci_send_sync(struct uci_client *uci_handle, struct mhi_req ureq; int ret_val; uci_log(UCI_DBG_VERBOSE, "Sync write for ch %d size %d\n", uci_handle->out_chan, size); ureq.client = uci_handle->out_handle; ureq.buf = data_loc; ureq.len = size; Loading @@ -546,19 +618,12 @@ static int mhi_uci_send_async(struct uci_client *uci_handle, struct mhi_req *ureq; uci_log(UCI_DBG_VERBOSE, "Got async write for ch %d of size %d\n", "Async write for ch %d size %d\n", uci_handle->out_chan, size); spin_lock_irq(&uci_handle->wr_req_lock); if (list_empty(&uci_handle->wr_req_list)) { uci_log(UCI_DBG_ERROR, "Write request pool empty\n"); spin_unlock_irq(&uci_handle->wr_req_lock); ureq = mhi_uci_get_req(uci_handle); if (!ureq) return -EBUSY; } ureq = container_of(uci_handle->wr_req_list.next, struct mhi_req, list); list_del_init(&ureq->list); spin_unlock_irq(&uci_handle->wr_req_lock); ureq->client = uci_handle->out_handle; ureq->context = uci_handle; Loading @@ -577,9 +642,7 @@ static int mhi_uci_send_async(struct uci_client *uci_handle, error_async_transfer: ureq->buf = NULL; spin_lock_irq(&uci_handle->wr_req_lock); list_add_tail(&ureq->list, &uci_handle->wr_req_list); spin_unlock_irq(&uci_handle->wr_req_lock); mhi_uci_put_req(uci_handle, ureq); return bytes_to_write; } Loading Loading @@ -621,7 +684,7 @@ static int mhi_uci_send_packet(struct uci_client *uci_handle, void *data_loc, return -EAGAIN; ret_val = wait_event_interruptible_timeout( uci_handle->write_wq, !list_empty(&uci_handle->wr_req_list), !list_empty(&uci_handle->req_list), MHI_UCI_WRITE_REQ_AVAIL_TIMEOUT); if (ret_val > 0) { /* Loading Loading @@ -722,42 +785,62 @@ static unsigned int mhi_uci_client_poll(struct file *file, poll_table *wait) return mask; } static int mhi_uci_alloc_write_reqs(struct uci_client *client) static int mhi_uci_alloc_reqs(struct uci_client *client) { int i; u32 num_wr_reqs; u32 num_reqs; num_wr_reqs = client->in_chan_attr->num_wr_reqs; if (!num_wr_reqs) num_wr_reqs = MHI_UCI_NUM_WR_REQ_DEFAULT; if (client->reqs) { uci_log(UCI_DBG_VERBOSE, "Reqs already allocated\n"); return 0; } client->wreqs = kcalloc(num_wr_reqs, num_reqs = client->in_chan_attr->num_reqs; if (!num_reqs) num_reqs = MHI_UCI_NUM_REQ_DEFAULT; client->reqs = kcalloc(num_reqs, sizeof(struct mhi_req), GFP_KERNEL); if (!client->wreqs) { uci_log(UCI_DBG_ERROR, "Write reqs alloc failed\n"); if (!client->reqs) { uci_log(UCI_DBG_ERROR, "Reqs alloc failed\n"); return -ENOMEM; } INIT_LIST_HEAD(&client->wr_req_list); for (i = 0; i < num_wr_reqs; ++i) list_add_tail(&client->wreqs[i].list, &client->wr_req_list); INIT_LIST_HEAD(&client->req_list); INIT_LIST_HEAD(&client->in_use_list); for (i = 0; i < num_reqs; ++i) list_add_tail(&client->reqs[i].list, &client->req_list); uci_log(UCI_DBG_INFO, "Allocated %d write reqs for chan %d\n", num_wr_reqs, client->out_chan); num_reqs, client->out_chan); return 0; } static int mhi_uci_read_async(struct uci_client *uci_handle, struct mhi_req *ureq, int *bytes_avail) static int mhi_uci_read_async(struct uci_client *uci_handle, int *bytes_avail) { int ret_val = 0; unsigned long compl_ret; struct mhi_req *ureq; struct mhi_dev_client *client_handle; uci_log(UCI_DBG_ERROR, "Async read for ch %d\n", uci_handle->in_chan); ureq = mhi_uci_get_req(uci_handle); if (!ureq) { uci_log(UCI_DBG_ERROR, "Out of reqs for chan %d\n", uci_handle->in_chan); return -EBUSY; } client_handle = uci_handle->in_handle; ureq->chan = uci_handle->in_chan; ureq->client = client_handle; ureq->buf = uci_handle->in_buf_list[0].addr; ureq->len = uci_handle->in_buf_list[0].buf_size; ureq->mode = DMA_ASYNC; ureq->client_cb = mhi_uci_read_completion_cb; ureq->snd_cmpl = 1; Loading @@ -766,14 +849,14 @@ static int mhi_uci_read_async(struct uci_client *uci_handle, reinit_completion(&uci_handle->read_done); *bytes_avail = mhi_dev_read_channel(ureq); uci_log(UCI_DBG_VERBOSE, "buf_size = 0x%lx bytes_read = 0x%x\n", ureq->len, *bytes_avail); if (*bytes_avail < 0) { uci_log(UCI_DBG_ERROR, "Failed to read channel ret %dlu\n", *bytes_avail); mhi_uci_put_req(uci_handle, ureq); return -EIO; } uci_log(UCI_DBG_VERBOSE, "buf_size = 0x%lx bytes_read = 0x%x\n", ureq->len, *bytes_avail); if (*bytes_avail > 0) { uci_log(UCI_DBG_VERBOSE, "Waiting for async read completion!\n"); Loading @@ -781,7 +864,6 @@ static int mhi_uci_read_async(struct uci_client *uci_handle, wait_for_completion_interruptible_timeout( &uci_handle->read_done, MHI_UCI_ASYNC_READ_TIMEOUT); if (compl_ret == -ERESTARTSYS) { uci_log(UCI_DBG_ERROR, "Exit signal caught\n"); return compl_ret; Loading @@ -791,33 +873,41 @@ static int mhi_uci_read_async(struct uci_client *uci_handle, return -EIO; } uci_log(UCI_DBG_VERBOSE, "wk up Read completed on ch %d\n", ureq->chan); uci_handle->pkt_loc = (void *)ureq->buf; uci_handle->pkt_size = ureq->transfer_len; "wk up Read completed on ch %d\n", uci_handle->in_chan); uci_log(UCI_DBG_VERBOSE, "Got pkt of sz 0x%lx at adr %pK, ch %d\n", uci_handle->pkt_size, ureq->buf, ureq->chan); uci_handle->pkt_loc, uci_handle->in_chan); } else { uci_handle->pkt_loc = NULL; uci_handle->pkt_size = 0; uci_log(UCI_DBG_VERBOSE, "No read data available, return req to free liat\n"); mhi_uci_put_req(uci_handle, ureq); } return ret_val; } static int mhi_uci_read_sync(struct uci_client *uci_handle, struct mhi_req *ureq, int *bytes_avail) static int mhi_uci_read_sync(struct uci_client *uci_handle, int *bytes_avail) { int ret_val = 0; struct mhi_req ureq; struct mhi_dev_client *client_handle; ureq->mode = DMA_SYNC; *bytes_avail = mhi_dev_read_channel(ureq); uci_log(UCI_DBG_ERROR, "Sync read for ch %d\n", uci_handle->in_chan); client_handle = uci_handle->in_handle; ureq.chan = uci_handle->in_chan; ureq.client = client_handle; ureq.buf = uci_handle->in_buf_list[0].addr; ureq.len = uci_handle->in_buf_list[0].buf_size; ureq.mode = DMA_SYNC; *bytes_avail = mhi_dev_read_channel(&ureq); uci_log(UCI_DBG_VERBOSE, "buf_size = 0x%lx bytes_read = 0x%x\n", ureq->len, *bytes_avail); ureq.len, *bytes_avail); if (*bytes_avail < 0) { uci_log(UCI_DBG_ERROR, "Failed to read channel ret %d\n", Loading @@ -826,13 +916,13 @@ static int mhi_uci_read_sync(struct uci_client *uci_handle, } if (*bytes_avail > 0) { uci_handle->pkt_loc = (void *)ureq->buf; uci_handle->pkt_size = ureq->transfer_len; uci_handle->pkt_loc = (void *)ureq.buf; uci_handle->pkt_size = ureq.transfer_len; uci_log(UCI_DBG_VERBOSE, "Got pkt of sz 0x%lx at adr %pK, ch %d\n", uci_handle->pkt_size, ureq->buf, ureq->chan); ureq.buf, ureq.chan); } else { uci_handle->pkt_loc = NULL; uci_handle->pkt_size = 0; Loading @@ -857,7 +947,7 @@ static int open_client_mhi_channels(struct uci_client *uci_client) /* Allocate write requests for async operations */ if (!(uci_client->f_flags & O_SYNC)) { rc = mhi_uci_alloc_write_reqs(uci_client); rc = mhi_uci_alloc_reqs(uci_client); if (rc) goto handle_not_rdy_err; uci_client->send = mhi_uci_send_async; Loading Loading @@ -978,6 +1068,7 @@ static int mhi_uci_client_release(struct inode *mhi_inode, { struct uci_client *uci_handle = file_handle->private_data; int count = 0; struct mhi_req *ureq; if (!uci_handle) return -EINVAL; Loading Loading @@ -1008,9 +1099,6 @@ static int mhi_uci_client_release(struct inode *mhi_inode, if (atomic_read(&uci_handle->mhi_chans_open)) { atomic_set(&uci_handle->mhi_chans_open, 0); if (!(uci_handle->f_flags & O_SYNC)) kfree(uci_handle->wreqs); mutex_lock(&uci_handle->out_chan_lock); mhi_dev_close_channel(uci_handle->out_handle); wake_up(&uci_handle->write_wq); Loading @@ -1020,6 +1108,27 @@ static int mhi_uci_client_release(struct inode *mhi_inode, mhi_dev_close_channel(uci_handle->in_handle); wake_up(&uci_handle->read_wq); mutex_unlock(&uci_handle->in_chan_lock); /* * Add back reqs for in-use list, if any, to free list. * Mark the ureq stale to avoid returning stale data * to client if the transfer completes later. */ count = 0; while (!(list_empty(&uci_handle->in_use_list))) { ureq = container_of(uci_handle->in_use_list.next, struct mhi_req, list); list_del_init(&ureq->list); ureq->is_stale = true; uci_log(UCI_DBG_VERBOSE, "Adding back req for chan %d to free list\n", ureq->chan); list_add_tail(&ureq->list, &uci_handle->req_list); count++; } if (count) uci_log(UCI_DBG_DBG, "Client %d closed with %d transfers pending\n", iminor(mhi_inode), count); } atomic_set(&uci_handle->read_data_ready, 0); Loading Loading @@ -1136,20 +1245,11 @@ static int __mhi_uci_client_read(struct uci_client *uci_handle, int *bytes_avail) { int ret_val = 0; struct mhi_dev_client *client_handle; struct mhi_req ureq; client_handle = uci_handle->in_handle; ureq.chan = uci_handle->in_chan; ureq.client = client_handle; ureq.buf = uci_handle->in_buf_list[0].addr; ureq.len = uci_handle->in_buf_list[0].buf_size; do { if (!uci_handle->pkt_loc && !atomic_read(&uci_ctxt.mhi_disabled)) { ret_val = uci_handle->read(uci_handle, &ureq, bytes_avail); ret_val = uci_handle->read(uci_handle, bytes_avail); if (ret_val) return ret_val; } Loading @@ -1159,12 +1259,13 @@ static int __mhi_uci_client_read(struct uci_client *uci_handle, uci_log(UCI_DBG_VERBOSE, "No data read_data_ready %d, chan %d\n", atomic_read(&uci_handle->read_data_ready), ureq.chan); uci_handle->in_chan); if (uci_handle->f_flags & (O_NONBLOCK | O_NDELAY)) return -EAGAIN; ret_val = wait_event_interruptible(uci_handle->read_wq, (!mhi_dev_channel_isempty(client_handle))); (!mhi_dev_channel_isempty( uci_handle->in_handle))); if (ret_val == -ERESTARTSYS) { uci_log(UCI_DBG_ERROR, "Exit signal caught\n"); Loading @@ -1173,26 +1274,16 @@ static int __mhi_uci_client_read(struct uci_client *uci_handle, uci_log(UCI_DBG_VERBOSE, "wk up Got data on ch %d read_data_ready %d\n", ureq.chan, uci_handle->in_chan, atomic_read(&uci_handle->read_data_ready)); } else if (*bytes_avail > 0) { /* A valid packet was returned from MHI */ uci_log(UCI_DBG_VERBOSE, "Got packet: avail pkts %d phy_adr %pK, ch %d\n", atomic_read(&uci_handle->read_data_ready), ureq.buf, ureq.chan); } else { /* * MHI did not return a valid packet, but we have one * which we did not finish returning to user */ uci_log(UCI_DBG_CRITICAL, "chan %d err: avail pkts %d phy_adr %pK", ureq.chan, atomic_read(&uci_handle->read_data_ready), ureq.buf); return -EIO; uci_handle->pkt_loc, uci_handle->in_chan); break; } } while (!uci_handle->pkt_loc); Loading Loading @@ -1454,7 +1545,7 @@ static int mhi_register_client(struct uci_client *mhi_client, int index) mutex_init(&mhi_client->in_chan_lock); mutex_init(&mhi_client->out_chan_lock); spin_lock_init(&mhi_client->wr_req_lock); spin_lock_init(&mhi_client->req_lock); /* Init the completion event for AT ctrl read */ init_completion(&mhi_client->at_ctrl_read_done); Loading Loading @@ -1822,6 +1913,7 @@ static void mhi_uci_at_ctrl_client_cb(struct mhi_dev_client_cb_data *cb_data) { struct uci_client *client = cb_data->user_data; int rc; struct mhi_req *ureq; uci_log(UCI_DBG_VERBOSE, " Rcvd MHI cb for channel %d, state %d\n", cb_data->channel, cb_data->ctrl_info); Loading @@ -1848,10 +1940,17 @@ static void mhi_uci_at_ctrl_client_cb(struct mhi_dev_client_cb_data *cb_data) } destroy_workqueue(uci_ctxt.at_ctrl_wq); uci_ctxt.at_ctrl_wq = NULL; if (!(client->f_flags & O_SYNC)) kfree(client->wreqs); mhi_dev_close_channel(client->out_handle); mhi_dev_close_channel(client->in_handle); /* Add back reqs for in-use list, if any, to free list */ while (!(list_empty(&client->in_use_list))) { ureq = container_of(client->in_use_list.next, struct mhi_req, list); list_del_init(&ureq->list); /* Add to in-use list */ list_add_tail(&ureq->list, &client->req_list); } } } Loading include/linux/msm_mhi_dev.h +1 −0 Original line number Diff line number Diff line Loading @@ -64,6 +64,7 @@ struct mhi_req { struct list_head list; union mhi_dev_ring_element_type *el; void (*client_cb)(void *req); bool is_stale; }; /* SW channel client list */ Loading Loading
drivers/platform/msm/mhi_dev/mhi.c +18 −8 Original line number Diff line number Diff line Loading @@ -2137,25 +2137,34 @@ static void mhi_dev_transfer_completion_cb(void *mreq) { int rc = 0; struct mhi_req *req = mreq; struct mhi_dev_channel *ch = req->client->channel; struct mhi_dev_channel *ch; u32 snd_cmpl = req->snd_cmpl; bool inbound = false; if (mhi_ctx->ch_ctx_cache[ch->ch_id].ch_type == MHI_DEV_CH_TYPE_INBOUND_CHANNEL) ch->pend_wr_count--; ch = &mhi_ctx->ch[req->chan]; dma_unmap_single(&mhi_ctx->pdev->dev, req->dma, req->len, DMA_FROM_DEVICE); if (mhi_ctx->ch_ctx_cache[ch->ch_id].ch_type == MHI_DEV_CH_TYPE_INBOUND_CHANNEL) { inbound = true; ch->pend_wr_count--; } /* * Channel got stopped or closed with transfers pending * Channel got closed with transfers pending * Do not trigger callback or send cmpl to host */ if (ch->state == MHI_DEV_CH_CLOSED || ch->state == MHI_DEV_CH_STOPPED) { if (inbound) mhi_log(MHI_MSG_DBG, "Ch %d not in started state, %d writes pending\n", "Ch %d closed with %d writes pending\n", ch->ch_id, ch->pend_wr_count + 1); else mhi_log(MHI_MSG_DBG, "Ch %d closed with read pending\n", ch->ch_id); return; } Loading Loading @@ -2707,6 +2716,7 @@ int mhi_dev_open_channel(uint32_t chan_id, ch->active_client = (*handle_client); (*handle_client)->channel = ch; (*handle_client)->event_trigger = mhi_dev_client_cb_reason; ch->pend_wr_count = 0; if (ch->state == MHI_DEV_CH_UNINT) { ch->ring = &mhi_ctx->ring[chan_id + mhi_ctx->ch_ring_start]; Loading
drivers/platform/msm/mhi_dev/mhi_uci.c +192 −93 Original line number Diff line number Diff line Loading @@ -24,8 +24,8 @@ #define MHI_SOFTWARE_CLIENT_LIMIT (MHI_MAX_SOFTWARE_CHANNELS/2) #define MHI_UCI_IPC_LOG_PAGES (100) /* Max number of MHI write request structures (used in async writes) */ #define MHI_UCI_NUM_WR_REQ_DEFAULT 10 /* Max number of MHI read/write request structs (used in async transfers) */ #define MHI_UCI_NUM_REQ_DEFAULT 10 #define MAX_NR_TRBS_PER_CHAN 9 #define MHI_QTI_IFACE_ID 4 #define MHI_ADPL_IFACE_ID 5 Loading @@ -42,6 +42,8 @@ #define MHI_UCI_RELEASE_TIMEOUT_MAX 5100 #define MHI_UCI_RELEASE_TIMEOUT_COUNT 30 #define MHI_UCI_IS_CHAN_DIR_IN(n) ((n % 2) ? true : false) enum uci_dbg_level { UCI_DBG_VERBOSE = 0x0, UCI_DBG_INFO = 0x1, Loading Loading @@ -86,8 +88,7 @@ struct chan_attr { /* Skip node creation if not needed */ bool skip_node; /* Number of write request structs to allocate */ u32 num_wr_reqs; u32 num_reqs; }; static void mhi_uci_generic_client_cb(struct mhi_dev_client_cb_data *cb_data); Loading Loading @@ -348,15 +349,19 @@ struct uci_client { struct mhi_uci_ctxt_t *uci_ctxt; struct mutex in_chan_lock; struct mutex out_chan_lock; spinlock_t wr_req_lock; spinlock_t req_lock; unsigned int f_flags; struct mhi_req *wreqs; struct list_head wr_req_list; /* Pointer to dynamically allocated mhi_req structs */ struct mhi_req *reqs; /* Pointer to available (free) reqs */ struct list_head req_list; /* Pointer to in-use reqs */ struct list_head in_use_list; struct completion read_done; struct completion at_ctrl_read_done; struct completion *write_done; int (*send)(struct uci_client *h, void *data_loc, u32 size); int (*read)(struct uci_client *h, struct mhi_req *ureq, int *bytes); int (*read)(struct uci_client *h, int *bytes); unsigned int tiocm; unsigned int at_ctrl_mask; }; Loading Loading @@ -490,23 +495,75 @@ static int mhi_init_read_chan(struct uci_client *client_handle, return rc; } static struct mhi_req *mhi_uci_get_req(struct uci_client *uci_handle) { struct mhi_req *req; unsigned long flags; spin_lock_irqsave(&uci_handle->req_lock, flags); if (list_empty(&uci_handle->req_list)) { uci_log(UCI_DBG_ERROR, "Request pool empty for chans %d, %d\n", uci_handle->in_chan, uci_handle->out_chan); spin_unlock_irqrestore(&uci_handle->req_lock, flags); return NULL; } /* Remove from free list and add to in-use list */ req = container_of(uci_handle->req_list.next, struct mhi_req, list); list_del_init(&req->list); /* * If req is marked stale and if it was used for the write channel * to host, free the previously allocated input buffer before the * req is re-used */ if (req->is_stale && req->buf && MHI_UCI_IS_CHAN_DIR_IN(req->chan)) { uci_log(UCI_DBG_VERBOSE, "Freeing write buf for chan %d\n", req->chan); kfree(req->buf); } req->is_stale = false; uci_log(UCI_DBG_VERBOSE, "Adding req to in-use list\n"); list_add_tail(&req->list, &uci_handle->in_use_list); spin_unlock_irqrestore(&uci_handle->req_lock, flags); return req; } static void mhi_uci_put_req(struct uci_client *uci_handle, struct mhi_req *req) { unsigned long flags; spin_lock_irqsave(&uci_handle->req_lock, flags); /* Remove from in-use list and add back to free list */ list_del_init(&req->list); list_add_tail(&req->list, &uci_handle->req_list); spin_unlock_irqrestore(&uci_handle->req_lock, flags); } static void mhi_uci_write_completion_cb(void *req) { struct mhi_req *ureq = req; struct uci_client *uci_handle; unsigned long flags; struct uci_client *uci_handle = (struct uci_client *)ureq->context; uci_handle = (struct uci_client *)ureq->context; kfree(ureq->buf); ureq->buf = NULL; spin_lock_irqsave(&uci_handle->wr_req_lock, flags); list_add_tail(&ureq->list, &uci_handle->wr_req_list); spin_unlock_irqrestore(&uci_handle->wr_req_lock, flags); /* * If this is a delayed write completion, just clear * the stale flag and return. The ureq was added to * the free list when client called release function. */ if (ureq->is_stale) { uci_log(UCI_DBG_VERBOSE, "Got stale completion for ch %d\n", ureq->chan); ureq->is_stale = false; return; } if (uci_handle->write_done) complete(uci_handle->write_done); mhi_uci_put_req(uci_handle, ureq); /* Write queue may be waiting for write request structs */ wake_up(&uci_handle->write_wq); } Loading @@ -516,7 +573,19 @@ static void mhi_uci_read_completion_cb(void *req) struct mhi_req *ureq = req; struct uci_client *uci_handle; if (ureq->is_stale) { uci_log(UCI_DBG_VERBOSE, "Got stale completion for ch %d, ignoring\n", ureq->chan); return; } uci_handle = (struct uci_client *)ureq->context; uci_handle->pkt_loc = (void *)ureq->buf; uci_handle->pkt_size = ureq->transfer_len; mhi_uci_put_req(uci_handle, ureq); complete(&uci_handle->read_done); } Loading @@ -526,6 +595,9 @@ static int mhi_uci_send_sync(struct uci_client *uci_handle, struct mhi_req ureq; int ret_val; uci_log(UCI_DBG_VERBOSE, "Sync write for ch %d size %d\n", uci_handle->out_chan, size); ureq.client = uci_handle->out_handle; ureq.buf = data_loc; ureq.len = size; Loading @@ -546,19 +618,12 @@ static int mhi_uci_send_async(struct uci_client *uci_handle, struct mhi_req *ureq; uci_log(UCI_DBG_VERBOSE, "Got async write for ch %d of size %d\n", "Async write for ch %d size %d\n", uci_handle->out_chan, size); spin_lock_irq(&uci_handle->wr_req_lock); if (list_empty(&uci_handle->wr_req_list)) { uci_log(UCI_DBG_ERROR, "Write request pool empty\n"); spin_unlock_irq(&uci_handle->wr_req_lock); ureq = mhi_uci_get_req(uci_handle); if (!ureq) return -EBUSY; } ureq = container_of(uci_handle->wr_req_list.next, struct mhi_req, list); list_del_init(&ureq->list); spin_unlock_irq(&uci_handle->wr_req_lock); ureq->client = uci_handle->out_handle; ureq->context = uci_handle; Loading @@ -577,9 +642,7 @@ static int mhi_uci_send_async(struct uci_client *uci_handle, error_async_transfer: ureq->buf = NULL; spin_lock_irq(&uci_handle->wr_req_lock); list_add_tail(&ureq->list, &uci_handle->wr_req_list); spin_unlock_irq(&uci_handle->wr_req_lock); mhi_uci_put_req(uci_handle, ureq); return bytes_to_write; } Loading Loading @@ -621,7 +684,7 @@ static int mhi_uci_send_packet(struct uci_client *uci_handle, void *data_loc, return -EAGAIN; ret_val = wait_event_interruptible_timeout( uci_handle->write_wq, !list_empty(&uci_handle->wr_req_list), !list_empty(&uci_handle->req_list), MHI_UCI_WRITE_REQ_AVAIL_TIMEOUT); if (ret_val > 0) { /* Loading Loading @@ -722,42 +785,62 @@ static unsigned int mhi_uci_client_poll(struct file *file, poll_table *wait) return mask; } static int mhi_uci_alloc_write_reqs(struct uci_client *client) static int mhi_uci_alloc_reqs(struct uci_client *client) { int i; u32 num_wr_reqs; u32 num_reqs; num_wr_reqs = client->in_chan_attr->num_wr_reqs; if (!num_wr_reqs) num_wr_reqs = MHI_UCI_NUM_WR_REQ_DEFAULT; if (client->reqs) { uci_log(UCI_DBG_VERBOSE, "Reqs already allocated\n"); return 0; } client->wreqs = kcalloc(num_wr_reqs, num_reqs = client->in_chan_attr->num_reqs; if (!num_reqs) num_reqs = MHI_UCI_NUM_REQ_DEFAULT; client->reqs = kcalloc(num_reqs, sizeof(struct mhi_req), GFP_KERNEL); if (!client->wreqs) { uci_log(UCI_DBG_ERROR, "Write reqs alloc failed\n"); if (!client->reqs) { uci_log(UCI_DBG_ERROR, "Reqs alloc failed\n"); return -ENOMEM; } INIT_LIST_HEAD(&client->wr_req_list); for (i = 0; i < num_wr_reqs; ++i) list_add_tail(&client->wreqs[i].list, &client->wr_req_list); INIT_LIST_HEAD(&client->req_list); INIT_LIST_HEAD(&client->in_use_list); for (i = 0; i < num_reqs; ++i) list_add_tail(&client->reqs[i].list, &client->req_list); uci_log(UCI_DBG_INFO, "Allocated %d write reqs for chan %d\n", num_wr_reqs, client->out_chan); num_reqs, client->out_chan); return 0; } static int mhi_uci_read_async(struct uci_client *uci_handle, struct mhi_req *ureq, int *bytes_avail) static int mhi_uci_read_async(struct uci_client *uci_handle, int *bytes_avail) { int ret_val = 0; unsigned long compl_ret; struct mhi_req *ureq; struct mhi_dev_client *client_handle; uci_log(UCI_DBG_ERROR, "Async read for ch %d\n", uci_handle->in_chan); ureq = mhi_uci_get_req(uci_handle); if (!ureq) { uci_log(UCI_DBG_ERROR, "Out of reqs for chan %d\n", uci_handle->in_chan); return -EBUSY; } client_handle = uci_handle->in_handle; ureq->chan = uci_handle->in_chan; ureq->client = client_handle; ureq->buf = uci_handle->in_buf_list[0].addr; ureq->len = uci_handle->in_buf_list[0].buf_size; ureq->mode = DMA_ASYNC; ureq->client_cb = mhi_uci_read_completion_cb; ureq->snd_cmpl = 1; Loading @@ -766,14 +849,14 @@ static int mhi_uci_read_async(struct uci_client *uci_handle, reinit_completion(&uci_handle->read_done); *bytes_avail = mhi_dev_read_channel(ureq); uci_log(UCI_DBG_VERBOSE, "buf_size = 0x%lx bytes_read = 0x%x\n", ureq->len, *bytes_avail); if (*bytes_avail < 0) { uci_log(UCI_DBG_ERROR, "Failed to read channel ret %dlu\n", *bytes_avail); mhi_uci_put_req(uci_handle, ureq); return -EIO; } uci_log(UCI_DBG_VERBOSE, "buf_size = 0x%lx bytes_read = 0x%x\n", ureq->len, *bytes_avail); if (*bytes_avail > 0) { uci_log(UCI_DBG_VERBOSE, "Waiting for async read completion!\n"); Loading @@ -781,7 +864,6 @@ static int mhi_uci_read_async(struct uci_client *uci_handle, wait_for_completion_interruptible_timeout( &uci_handle->read_done, MHI_UCI_ASYNC_READ_TIMEOUT); if (compl_ret == -ERESTARTSYS) { uci_log(UCI_DBG_ERROR, "Exit signal caught\n"); return compl_ret; Loading @@ -791,33 +873,41 @@ static int mhi_uci_read_async(struct uci_client *uci_handle, return -EIO; } uci_log(UCI_DBG_VERBOSE, "wk up Read completed on ch %d\n", ureq->chan); uci_handle->pkt_loc = (void *)ureq->buf; uci_handle->pkt_size = ureq->transfer_len; "wk up Read completed on ch %d\n", uci_handle->in_chan); uci_log(UCI_DBG_VERBOSE, "Got pkt of sz 0x%lx at adr %pK, ch %d\n", uci_handle->pkt_size, ureq->buf, ureq->chan); uci_handle->pkt_loc, uci_handle->in_chan); } else { uci_handle->pkt_loc = NULL; uci_handle->pkt_size = 0; uci_log(UCI_DBG_VERBOSE, "No read data available, return req to free liat\n"); mhi_uci_put_req(uci_handle, ureq); } return ret_val; } static int mhi_uci_read_sync(struct uci_client *uci_handle, struct mhi_req *ureq, int *bytes_avail) static int mhi_uci_read_sync(struct uci_client *uci_handle, int *bytes_avail) { int ret_val = 0; struct mhi_req ureq; struct mhi_dev_client *client_handle; ureq->mode = DMA_SYNC; *bytes_avail = mhi_dev_read_channel(ureq); uci_log(UCI_DBG_ERROR, "Sync read for ch %d\n", uci_handle->in_chan); client_handle = uci_handle->in_handle; ureq.chan = uci_handle->in_chan; ureq.client = client_handle; ureq.buf = uci_handle->in_buf_list[0].addr; ureq.len = uci_handle->in_buf_list[0].buf_size; ureq.mode = DMA_SYNC; *bytes_avail = mhi_dev_read_channel(&ureq); uci_log(UCI_DBG_VERBOSE, "buf_size = 0x%lx bytes_read = 0x%x\n", ureq->len, *bytes_avail); ureq.len, *bytes_avail); if (*bytes_avail < 0) { uci_log(UCI_DBG_ERROR, "Failed to read channel ret %d\n", Loading @@ -826,13 +916,13 @@ static int mhi_uci_read_sync(struct uci_client *uci_handle, } if (*bytes_avail > 0) { uci_handle->pkt_loc = (void *)ureq->buf; uci_handle->pkt_size = ureq->transfer_len; uci_handle->pkt_loc = (void *)ureq.buf; uci_handle->pkt_size = ureq.transfer_len; uci_log(UCI_DBG_VERBOSE, "Got pkt of sz 0x%lx at adr %pK, ch %d\n", uci_handle->pkt_size, ureq->buf, ureq->chan); ureq.buf, ureq.chan); } else { uci_handle->pkt_loc = NULL; uci_handle->pkt_size = 0; Loading @@ -857,7 +947,7 @@ static int open_client_mhi_channels(struct uci_client *uci_client) /* Allocate write requests for async operations */ if (!(uci_client->f_flags & O_SYNC)) { rc = mhi_uci_alloc_write_reqs(uci_client); rc = mhi_uci_alloc_reqs(uci_client); if (rc) goto handle_not_rdy_err; uci_client->send = mhi_uci_send_async; Loading Loading @@ -978,6 +1068,7 @@ static int mhi_uci_client_release(struct inode *mhi_inode, { struct uci_client *uci_handle = file_handle->private_data; int count = 0; struct mhi_req *ureq; if (!uci_handle) return -EINVAL; Loading Loading @@ -1008,9 +1099,6 @@ static int mhi_uci_client_release(struct inode *mhi_inode, if (atomic_read(&uci_handle->mhi_chans_open)) { atomic_set(&uci_handle->mhi_chans_open, 0); if (!(uci_handle->f_flags & O_SYNC)) kfree(uci_handle->wreqs); mutex_lock(&uci_handle->out_chan_lock); mhi_dev_close_channel(uci_handle->out_handle); wake_up(&uci_handle->write_wq); Loading @@ -1020,6 +1108,27 @@ static int mhi_uci_client_release(struct inode *mhi_inode, mhi_dev_close_channel(uci_handle->in_handle); wake_up(&uci_handle->read_wq); mutex_unlock(&uci_handle->in_chan_lock); /* * Add back reqs for in-use list, if any, to free list. * Mark the ureq stale to avoid returning stale data * to client if the transfer completes later. */ count = 0; while (!(list_empty(&uci_handle->in_use_list))) { ureq = container_of(uci_handle->in_use_list.next, struct mhi_req, list); list_del_init(&ureq->list); ureq->is_stale = true; uci_log(UCI_DBG_VERBOSE, "Adding back req for chan %d to free list\n", ureq->chan); list_add_tail(&ureq->list, &uci_handle->req_list); count++; } if (count) uci_log(UCI_DBG_DBG, "Client %d closed with %d transfers pending\n", iminor(mhi_inode), count); } atomic_set(&uci_handle->read_data_ready, 0); Loading Loading @@ -1136,20 +1245,11 @@ static int __mhi_uci_client_read(struct uci_client *uci_handle, int *bytes_avail) { int ret_val = 0; struct mhi_dev_client *client_handle; struct mhi_req ureq; client_handle = uci_handle->in_handle; ureq.chan = uci_handle->in_chan; ureq.client = client_handle; ureq.buf = uci_handle->in_buf_list[0].addr; ureq.len = uci_handle->in_buf_list[0].buf_size; do { if (!uci_handle->pkt_loc && !atomic_read(&uci_ctxt.mhi_disabled)) { ret_val = uci_handle->read(uci_handle, &ureq, bytes_avail); ret_val = uci_handle->read(uci_handle, bytes_avail); if (ret_val) return ret_val; } Loading @@ -1159,12 +1259,13 @@ static int __mhi_uci_client_read(struct uci_client *uci_handle, uci_log(UCI_DBG_VERBOSE, "No data read_data_ready %d, chan %d\n", atomic_read(&uci_handle->read_data_ready), ureq.chan); uci_handle->in_chan); if (uci_handle->f_flags & (O_NONBLOCK | O_NDELAY)) return -EAGAIN; ret_val = wait_event_interruptible(uci_handle->read_wq, (!mhi_dev_channel_isempty(client_handle))); (!mhi_dev_channel_isempty( uci_handle->in_handle))); if (ret_val == -ERESTARTSYS) { uci_log(UCI_DBG_ERROR, "Exit signal caught\n"); Loading @@ -1173,26 +1274,16 @@ static int __mhi_uci_client_read(struct uci_client *uci_handle, uci_log(UCI_DBG_VERBOSE, "wk up Got data on ch %d read_data_ready %d\n", ureq.chan, uci_handle->in_chan, atomic_read(&uci_handle->read_data_ready)); } else if (*bytes_avail > 0) { /* A valid packet was returned from MHI */ uci_log(UCI_DBG_VERBOSE, "Got packet: avail pkts %d phy_adr %pK, ch %d\n", atomic_read(&uci_handle->read_data_ready), ureq.buf, ureq.chan); } else { /* * MHI did not return a valid packet, but we have one * which we did not finish returning to user */ uci_log(UCI_DBG_CRITICAL, "chan %d err: avail pkts %d phy_adr %pK", ureq.chan, atomic_read(&uci_handle->read_data_ready), ureq.buf); return -EIO; uci_handle->pkt_loc, uci_handle->in_chan); break; } } while (!uci_handle->pkt_loc); Loading Loading @@ -1454,7 +1545,7 @@ static int mhi_register_client(struct uci_client *mhi_client, int index) mutex_init(&mhi_client->in_chan_lock); mutex_init(&mhi_client->out_chan_lock); spin_lock_init(&mhi_client->wr_req_lock); spin_lock_init(&mhi_client->req_lock); /* Init the completion event for AT ctrl read */ init_completion(&mhi_client->at_ctrl_read_done); Loading Loading @@ -1822,6 +1913,7 @@ static void mhi_uci_at_ctrl_client_cb(struct mhi_dev_client_cb_data *cb_data) { struct uci_client *client = cb_data->user_data; int rc; struct mhi_req *ureq; uci_log(UCI_DBG_VERBOSE, " Rcvd MHI cb for channel %d, state %d\n", cb_data->channel, cb_data->ctrl_info); Loading @@ -1848,10 +1940,17 @@ static void mhi_uci_at_ctrl_client_cb(struct mhi_dev_client_cb_data *cb_data) } destroy_workqueue(uci_ctxt.at_ctrl_wq); uci_ctxt.at_ctrl_wq = NULL; if (!(client->f_flags & O_SYNC)) kfree(client->wreqs); mhi_dev_close_channel(client->out_handle); mhi_dev_close_channel(client->in_handle); /* Add back reqs for in-use list, if any, to free list */ while (!(list_empty(&client->in_use_list))) { ureq = container_of(client->in_use_list.next, struct mhi_req, list); list_del_init(&ureq->list); /* Add to in-use list */ list_add_tail(&ureq->list, &client->req_list); } } } Loading
include/linux/msm_mhi_dev.h +1 −0 Original line number Diff line number Diff line Loading @@ -64,6 +64,7 @@ struct mhi_req { struct list_head list; union mhi_dev_ring_element_type *el; void (*client_cb)(void *req); bool is_stale; }; /* SW channel client list */ Loading