Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit f62c2004 authored by Sai Chaitanya Kaveti's avatar Sai Chaitanya Kaveti Committed by Gerrit - the friendly Code Review server
Browse files

msm: mhi_dev: Using channel mutex lock for channel specific lists



Using channel mutex lock while accessing channel specific lists,
event_req_buffers and flush_event_req_buffers. This will avoid any
synchronization issues for these lists. Previously two different mutex
locks, global MHI lock and channel lock are used while accessing these
lists.

Removing MHI spin lock while accessing the above lists in
mhi_dev_process_reset_cmd, mhi_dev_queue_transfer_completion and
mhi_dev_flush_transfer_completion_events APIs as they are already
protected by channel mutex lock obtained from the APIs that are
calling these three APIs.

Removing MHI spin lock in mhi_dev_event_msi_cb and adding channel mutex
lock. In other instances where these lists are accessed they are already
protected by channel mutex lock.

Change-Id: I58b673be452a942ff946e07f17c96a0715805621
Signed-off-by: default avatarSai Chaitanya Kaveti <quic_skaveti@quicinc.com>
parent 77221bae
Loading
Loading
Loading
Loading
+2 −9
Original line number Diff line number Diff line
@@ -380,7 +380,7 @@ static void mhi_dev_event_msi_cb(void *req)
	if (ch->evt_buf_wp == ch->evt_buf_size)
		ch->evt_buf_wp = 0;
	/* Return the event req to the list */
	spin_lock_irqsave(&mhi->lock, flags);
	mutex_lock(&ch->ch_lock);
	if (ch->curr_ereq == NULL)
		ch->curr_ereq = ereq;
	else {
@@ -388,7 +388,7 @@ static void mhi_dev_event_msi_cb(void *req)
			ereq->is_stale = false;
		list_add_tail(&ereq->list, &ch->event_req_buffers);
	}
	spin_unlock_irqrestore(&mhi->lock, flags);
	mutex_unlock(&ch->ch_lock);
}

static void msi_trigger_completion_cb(void *data)
@@ -646,15 +646,12 @@ static int mhi_dev_flush_transfer_completion_events(struct mhi_dev *mhi,
			break;
		}

		spin_lock_irqsave(&mhi->lock, flags);
		if (list_empty(&ch->flush_event_req_buffers)) {
			spin_unlock_irqrestore(&mhi->lock, flags);
			break;
		}
		flush_ereq = container_of(ch->flush_event_req_buffers.next,
					struct event_req, list);
		list_del_init(&flush_ereq->list);
		spin_unlock_irqrestore(&mhi->lock, flags);

		if (ch->flush_req_cnt++ >= U32_MAX)
			ch->flush_req_cnt = 0;
@@ -792,7 +789,6 @@ static int mhi_dev_queue_transfer_completion(struct mhi_req *mreq, bool *flush)
			ch->curr_ereq->context = ch;

			/* Move current event req to flush list*/
			spin_lock_irqsave(&mhi_ctx->lock, flags);
			list_add_tail(&ch->curr_ereq->list,
				&ch->flush_event_req_buffers);

@@ -809,7 +805,6 @@ static int mhi_dev_queue_transfer_completion(struct mhi_req *mreq, bool *flush)
						"evt req buffers empty\n");
				ch->curr_ereq = NULL;
			}
			spin_unlock_irqrestore(&mhi_ctx->lock, flags);
		}
		return 0;
	}
@@ -1872,14 +1867,12 @@ static void mhi_dev_process_reset_cmd(struct mhi_dev *mhi, int ch_id)
	 * reset. Otherwise, those stale events may get flushed along with a
	 * valid event in the next flush operation.
	 */
	spin_lock_irqsave(&mhi_ctx->lock, flags);
	if (!list_empty(&ch->flush_event_req_buffers)) {
		list_for_each_entry_safe(itr, tmp, &ch->flush_event_req_buffers, list) {
			list_del(&itr->list);
			list_add_tail(&itr->list, &ch->event_req_buffers);
		}
	}
	spin_unlock_irqrestore(&mhi_ctx->lock, flags);

	/* hard stop and set the channel to stop */
	mhi->ch_ctx_cache[ch_id].ch_state =