Loading drivers/rpmsg/qcom_glink_native.c +42 −4 Original line number Diff line number Diff line Loading @@ -112,6 +112,8 @@ struct glink_core_rx_intent { * @in_reset: reset status of this edge * @features: remote features * @intentless: flag to indicate that there is no intent * @tx_avail_notify: Waitqueue for pending tx tasks * @sent_read_notify: flag to check cmd sent or not * @ilc: ipc logging context reference */ struct qcom_glink { Loading Loading @@ -146,6 +148,9 @@ struct qcom_glink { bool intentless; wait_queue_head_t tx_avail_notify; bool sent_read_notify; void *ilc; }; Loading Loading @@ -357,6 +362,22 @@ static void qcom_glink_pipe_reset(struct qcom_glink *glink) glink->rx_pipe->reset(glink->rx_pipe); } static void qcom_glink_send_read_notify(struct qcom_glink *glink) { struct glink_msg msg; msg.cmd = cpu_to_le16(RPM_CMD_READ_NOTIF); msg.param1 = 0; msg.param2 = 0; GLINK_INFO(glink->ilc, "send READ NOTIFY cmd\n"); qcom_glink_tx_write(glink, &msg, sizeof(msg), NULL, 0); mbox_send_message(glink->mbox_chan, NULL); mbox_client_txdone(glink->mbox_chan, 0); } static int qcom_glink_tx(struct qcom_glink *glink, const void *hdr, size_t hlen, const void *data, size_t dlen, bool wait) Loading @@ -380,17 +401,27 @@ static int qcom_glink_tx(struct qcom_glink *glink, goto out; } if (atomic_read(&glink->in_reset)) { ret = -ECONNRESET; goto out; if (!glink->sent_read_notify) { glink->sent_read_notify = true; qcom_glink_send_read_notify(glink); } /* Wait without holding the tx_lock */ spin_unlock_irqrestore(&glink->tx_lock, flags); usleep_range(10000, 15000); wait_event_timeout(glink->tx_avail_notify, (qcom_glink_tx_avail(glink) >= tlen || atomic_read(&glink->in_reset)), 10 * HZ); spin_lock_irqsave(&glink->tx_lock, flags); if (atomic_read(&glink->in_reset)) { ret = -ECONNRESET; goto out; } if (qcom_glink_tx_avail(glink) >= tlen) glink->sent_read_notify = false; } qcom_glink_tx_write(glink, hdr, hlen, data, dlen); Loading Loading @@ -1198,6 +1229,9 @@ static irqreturn_t qcom_glink_native_intr(int irq, void *data) unsigned int cmd; int ret = 0; /* To wakeup any blocking writers */ wake_up_all(&glink->tx_avail_notify); for (;;) { avail = qcom_glink_rx_avail(glink); if (avail < sizeof(msg)) Loading Loading @@ -1981,6 +2015,9 @@ static void qcom_glink_notif_reset(void *data) return; atomic_inc(&glink->in_reset); /* To wakeup any blocking writers */ wake_up_all(&glink->tx_avail_notify); spin_lock_irqsave(&glink->idr_lock, flags); idr_for_each_entry(&glink->lcids, channel, cid) { wake_up(&channel->intent_req_event); Loading Loading @@ -2029,6 +2066,7 @@ struct qcom_glink *qcom_glink_native_probe(struct device *dev, spin_lock_init(&glink->rx_lock); INIT_LIST_HEAD(&glink->rx_queue); INIT_WORK(&glink->rx_work, qcom_glink_work); init_waitqueue_head(&glink->tx_avail_notify); spin_lock_init(&glink->idr_lock); idr_init(&glink->lcids); Loading Loading
drivers/rpmsg/qcom_glink_native.c +42 −4 Original line number Diff line number Diff line Loading @@ -112,6 +112,8 @@ struct glink_core_rx_intent { * @in_reset: reset status of this edge * @features: remote features * @intentless: flag to indicate that there is no intent * @tx_avail_notify: Waitqueue for pending tx tasks * @sent_read_notify: flag to check cmd sent or not * @ilc: ipc logging context reference */ struct qcom_glink { Loading Loading @@ -146,6 +148,9 @@ struct qcom_glink { bool intentless; wait_queue_head_t tx_avail_notify; bool sent_read_notify; void *ilc; }; Loading Loading @@ -357,6 +362,22 @@ static void qcom_glink_pipe_reset(struct qcom_glink *glink) glink->rx_pipe->reset(glink->rx_pipe); } static void qcom_glink_send_read_notify(struct qcom_glink *glink) { struct glink_msg msg; msg.cmd = cpu_to_le16(RPM_CMD_READ_NOTIF); msg.param1 = 0; msg.param2 = 0; GLINK_INFO(glink->ilc, "send READ NOTIFY cmd\n"); qcom_glink_tx_write(glink, &msg, sizeof(msg), NULL, 0); mbox_send_message(glink->mbox_chan, NULL); mbox_client_txdone(glink->mbox_chan, 0); } static int qcom_glink_tx(struct qcom_glink *glink, const void *hdr, size_t hlen, const void *data, size_t dlen, bool wait) Loading @@ -380,17 +401,27 @@ static int qcom_glink_tx(struct qcom_glink *glink, goto out; } if (atomic_read(&glink->in_reset)) { ret = -ECONNRESET; goto out; if (!glink->sent_read_notify) { glink->sent_read_notify = true; qcom_glink_send_read_notify(glink); } /* Wait without holding the tx_lock */ spin_unlock_irqrestore(&glink->tx_lock, flags); usleep_range(10000, 15000); wait_event_timeout(glink->tx_avail_notify, (qcom_glink_tx_avail(glink) >= tlen || atomic_read(&glink->in_reset)), 10 * HZ); spin_lock_irqsave(&glink->tx_lock, flags); if (atomic_read(&glink->in_reset)) { ret = -ECONNRESET; goto out; } if (qcom_glink_tx_avail(glink) >= tlen) glink->sent_read_notify = false; } qcom_glink_tx_write(glink, hdr, hlen, data, dlen); Loading Loading @@ -1198,6 +1229,9 @@ static irqreturn_t qcom_glink_native_intr(int irq, void *data) unsigned int cmd; int ret = 0; /* To wakeup any blocking writers */ wake_up_all(&glink->tx_avail_notify); for (;;) { avail = qcom_glink_rx_avail(glink); if (avail < sizeof(msg)) Loading Loading @@ -1981,6 +2015,9 @@ static void qcom_glink_notif_reset(void *data) return; atomic_inc(&glink->in_reset); /* To wakeup any blocking writers */ wake_up_all(&glink->tx_avail_notify); spin_lock_irqsave(&glink->idr_lock, flags); idr_for_each_entry(&glink->lcids, channel, cid) { wake_up(&channel->intent_req_event); Loading Loading @@ -2029,6 +2066,7 @@ struct qcom_glink *qcom_glink_native_probe(struct device *dev, spin_lock_init(&glink->rx_lock); INIT_LIST_HEAD(&glink->rx_queue); INIT_WORK(&glink->rx_work, qcom_glink_work); init_waitqueue_head(&glink->tx_avail_notify); spin_lock_init(&glink->idr_lock); idr_init(&glink->lcids); Loading