Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit cdbb7ba5 authored by Linux Build Service Account's avatar Linux Build Service Account Committed by Gerrit - the friendly Code Review server
Browse files

Merge "msm: mhi_dev: Add asynchronous read/write support in UCI driver"

parents 44a16435 6c694d32
Loading
Loading
Loading
Loading
+45 −35
Original line number Diff line number Diff line
@@ -1889,6 +1889,7 @@ int mhi_dev_open_channel(uint32_t chan_id,
			(struct mhi_dev_client_cb_reason *cb))
{
	int rc = 0;
	int i = 0;
	struct mhi_dev_channel *ch;
	struct platform_device *pdev;

@@ -1912,6 +1913,38 @@ int mhi_dev_open_channel(uint32_t chan_id,
		goto exit;
	}

	/* Pre allocate event requests */
	ch->ereqs = kcalloc(MHI_MAX_EVT_REQ, sizeof(*ch->ereqs), GFP_KERNEL);
	if (!ch->ereqs) {
		rc = -ENOMEM;
		goto free_client;
	}
	/* pre allocate buffers to queue transfer completion events */
	ch->tr_events = kcalloc(MHI_MAX_EVT_REQ,
				MAX_TR_EVENTS * sizeof(*ch->tr_events),
				GFP_KERNEL);
	if (!ch->tr_events) {
		rc = -ENOMEM;
		goto free_ereqs;
	}

	/*
	 * Organize the above allocated event request block and
	 * completion event block into linked lists. Each event
	 * request includes a pointer to a block of MAX_TR_EVENTS
	 * completion events.
	 */
	INIT_LIST_HEAD(&mhi_ctx->ch[chan_id].event_req_buffers);
	for (i = 0; i < MHI_MAX_EVT_REQ; ++i) {
		ch->ereqs[i].tr_events = ch->tr_events + i * MAX_TR_EVENTS;
		list_add_tail(&ch->ereqs[i].list,
				&mhi_ctx->ch[chan_id].event_req_buffers);
	}
	mhi_ctx->ch[chan_id].curr_ereq =
		container_of(mhi_ctx->ch[chan_id].event_req_buffers.next,
				struct event_req, list);
	list_del_init(&mhi_ctx->ch[chan_id].curr_ereq->list);

	ch->active_client = (*handle_client);
	(*handle_client)->channel = ch;
	(*handle_client)->event_trigger = mhi_dev_client_cb_reason;
@@ -1924,6 +1957,13 @@ int mhi_dev_open_channel(uint32_t chan_id,
	else if (ch->state == MHI_DEV_CH_STOPPED)
		ch->state = MHI_DEV_CH_PENDING_START;

	goto exit;

free_ereqs:
	kfree(ch->ereqs);
	ch->ereqs = NULL;
free_client:
	kfree(*handle_client);
exit:
	mutex_unlock(&ch->ch_lock);
	return rc;
@@ -1957,14 +1997,12 @@ int mhi_dev_close_channel(struct mhi_dev_client *handle)
			mhi_log(MHI_MSG_ERROR,
				"Trying to close an active channel (%d)\n",
				ch->ch_id);
			mutex_unlock(&ch->ch_lock);
			rc = -EAGAIN;
			goto exit;
		} else if (ch->tre_loc) {
			mhi_log(MHI_MSG_ERROR,
				"Trying to close channel (%d) when a TRE is active",
				ch->ch_id);
			mutex_unlock(&ch->ch_lock);
			rc = -EAGAIN;
			goto exit;
		}
@@ -1972,6 +2010,10 @@ int mhi_dev_close_channel(struct mhi_dev_client *handle)

	ch->state = MHI_DEV_CH_CLOSED;
	ch->active_client = NULL;
	kfree(ch->ereqs);
	kfree(ch->tr_events);
	ch->ereqs = NULL;
	ch->tr_events = NULL;
	kfree(handle);
exit:
	mutex_unlock(&ch->ch_lock);
@@ -2678,40 +2720,8 @@ static int mhi_init(struct mhi_dev *mhi)
	if (!mhi->ch)
		return -ENOMEM;


	for (i = 0; i < mhi->cfg.channels; i++) {
	for (i = 0; i < mhi->cfg.channels; i++)
		mutex_init(&mhi->ch[i].ch_lock);
		if (i == MHI_CLIENT_IP_SW_4_OUT || i == MHI_CLIENT_IP_SW_4_IN) {
			int nreq = 0;

			INIT_LIST_HEAD(&mhi->ch[i].event_req_buffers);
			while (nreq < MHI_MAX_EVT_REQ) {
				struct event_req *ereq;
				/* Pre allocate event requests */
				ereq = kzalloc(sizeof(struct event_req),
						GFP_KERNEL);
				if (!ereq)
					return -ENOMEM;

				/* pre allocate buffers to queue
				 * transfer completion events
				 */
				ereq->tr_events = kzalloc(RING_ELEMENT_TYPE_SZ*
						MAX_TR_EVENTS, GFP_KERNEL);
				if (!ereq->tr_events) {
					kfree(ereq);
					return -ENOMEM;
				}
				list_add_tail(&ereq->list,
						&mhi->ch[i].event_req_buffers);
				nreq++;
			}
			mhi->ch[i].curr_ereq =
				container_of(mhi->ch[i].event_req_buffers.next,
						struct event_req, list);
			list_del_init(&mhi->ch[i].curr_ereq->list);
		}
	}

	spin_lock_init(&mhi->lock);
	mhi->mmio_backup = devm_kzalloc(&pdev->dev,
+7 −1
Original line number Diff line number Diff line
@@ -484,7 +484,13 @@ struct mhi_dev_channel {
	struct mutex			ch_lock;
	/* client which the current inbound/outbound message is for */
	struct mhi_dev_client		*active_client;

	/*
	 * Pointer to event request structs used to temporarily store
	 * completion events and meta data before sending them to host
	 */
	struct event_req		*ereqs;
	/* Pointer to completion event buffers */
	union mhi_dev_ring_element_type *tr_events;
	struct list_head		event_req_buffers;
	struct event_req		*curr_ereq;

+270 −69
Original line number Diff line number Diff line
@@ -35,10 +35,14 @@
#define MHI_SOFTWARE_CLIENT_LIMIT	(MHI_MAX_SOFTWARE_CHANNELS/2)
#define MHI_UCI_IPC_LOG_PAGES		(100)

/* Max number of MHI write request structures (used in async writes) */
#define MAX_UCI_WR_REQ			10
#define MAX_NR_TRBS_PER_CHAN		9
#define MHI_QTI_IFACE_ID		4
#define DEVICE_NAME			"mhi"

#define MHI_UCI_ASYNC_READ_TIMEOUT	msecs_to_jiffies(100)

enum uci_dbg_level {
	UCI_DBG_VERBOSE = 0x0,
	UCI_DBG_INFO = 0x1,
@@ -227,6 +231,13 @@ struct uci_client {
	struct mhi_uci_ctxt_t *uci_ctxt;
	struct mutex in_chan_lock;
	struct mutex out_chan_lock;
	spinlock_t wr_req_lock;
	unsigned int f_flags;
	struct mhi_req *wreqs;
	struct list_head wr_req_list;
	struct completion read_done;
	int (*send)(struct uci_client*, void*, u32);
	int (*read)(struct uci_client*, struct mhi_req*, int*);
};

struct mhi_uci_ctxt_t {
@@ -301,8 +312,11 @@ static int mhi_init_read_chan(struct uci_client *client_handle,
				client_handle->in_chan);
		return -EINVAL;
	}
	buf_size = in_chan_attr->max_packet_size;

	/* Init the completion event for read */
	init_completion(&client_handle->read_done);

	buf_size = in_chan_attr->max_packet_size;
	for (i = 0; i < (in_chan_attr->nr_trbs); i++) {
		data_loc = kmalloc(buf_size, GFP_KERNEL);
		if (!data_loc) {
@@ -322,48 +336,129 @@ static int mhi_init_read_chan(struct uci_client *client_handle,
	return rc;
}

static int mhi_uci_send_packet(struct mhi_dev_client **client_handle, void *buf,
		u32 size, u32 is_uspace_buf)
static void mhi_uci_write_completion_cb(void *req)
{
	struct mhi_req *ureq = req;
	struct uci_client *uci_handle;
	unsigned long flags;

	uci_handle = (struct uci_client *)ureq->context;
	kfree(ureq->buf);
	ureq->buf = NULL;

	spin_lock_irqsave(&uci_handle->wr_req_lock, flags);
	list_add_tail(&ureq->list, &uci_handle->wr_req_list);
	spin_unlock_irqrestore(&uci_handle->wr_req_lock, flags);
}

static void mhi_uci_read_completion_cb(void *req)
{
	void *data_loc = NULL;
	uintptr_t memcpy_result = 0;
	u32 data_inserted_so_far = 0;
	struct mhi_req *ureq = req;
	struct uci_client *uci_handle;

	uci_handle = (struct uci_client *)ureq->context;
	complete(&uci_handle->read_done);
}

static int mhi_uci_send_sync(struct uci_client *uci_handle,
			void *data_loc, u32 size)
{
	struct mhi_req ureq;
	int ret_val;

	ureq.client = uci_handle->out_handle;
	ureq.buf = data_loc;
	ureq.len = size;
	ureq.chan = uci_handle->out_chan;
	ureq.mode = IPA_DMA_SYNC;

	ret_val = mhi_dev_write_channel(&ureq);

	uci_handle = container_of(client_handle, struct uci_client,
					out_handle);
	kfree(data_loc);
	return ret_val;
}

static int mhi_uci_send_async(struct uci_client *uci_handle,
			void *data_loc, u32 size)
{
	int bytes_to_write;
	struct mhi_req *ureq;

	uci_log(UCI_DBG_VERBOSE,
		"Got async write for ch %d of size %d\n",
		uci_handle->out_chan, size);

	spin_lock_irq(&uci_handle->wr_req_lock);
	if (list_empty(&uci_handle->wr_req_list)) {
		uci_log(UCI_DBG_ERROR, "Write request pool empty\n");
		spin_unlock_irq(&uci_handle->wr_req_lock);
		return -ENOMEM;
	}
	ureq = container_of(uci_handle->wr_req_list.next,
						struct mhi_req, list);
	list_del_init(&ureq->list);
	spin_unlock_irq(&uci_handle->wr_req_lock);

	ureq->client = uci_handle->out_handle;
	ureq->context = uci_handle;
	ureq->buf = data_loc;
	ureq->len = size;
	ureq->chan = uci_handle->out_chan;
	ureq->mode = IPA_DMA_ASYNC;
	ureq->client_cb = mhi_uci_write_completion_cb;
	ureq->snd_cmpl = 1;

	bytes_to_write = mhi_dev_write_channel(ureq);
	if (bytes_to_write != size)
		goto error_async_transfer;

	return bytes_to_write;

error_async_transfer:
	kfree(data_loc);
	ureq->buf = NULL;
	spin_lock_irq(&uci_handle->wr_req_lock);
	list_add_tail(&ureq->list, &uci_handle->wr_req_list);
	spin_unlock_irq(&uci_handle->wr_req_lock);

	if (!client_handle || !buf ||
		!size || !uci_handle)
	return bytes_to_write;
}

static int mhi_uci_send_packet(struct mhi_dev_client **client_handle,
		const char __user *buf, u32 size)
{
	void *data_loc;
	unsigned long memcpy_result;
	struct uci_client *uci_handle;

	if (!client_handle || !buf || !size)
		return -EINVAL;

	if (is_uspace_buf) {
	if (size > TRB_MAX_DATA_SIZE) {
		uci_log(UCI_DBG_ERROR,
			"Too big write size: %d, max supported size is %d\n",
			size, TRB_MAX_DATA_SIZE);
		return -EFBIG;
	}

	uci_handle = container_of(client_handle, struct uci_client,
					out_handle);
	data_loc = kmalloc(size, GFP_KERNEL);
	if (!data_loc) {
		uci_log(UCI_DBG_ERROR,
				"Failed to allocate memory 0x%x\n",
		"Failed to allocate kernel buf for user requested size 0x%x\n",
			size);
		return -ENOMEM;
	}
	memcpy_result = copy_from_user(data_loc, buf, size);
	if (memcpy_result)
		goto error_memcpy;
	} else {
		data_loc = buf;
	}
	ureq.client = *client_handle;
	ureq.buf = data_loc;
	ureq.len = size;
	ureq.chan = uci_handle->out_chan;
	ureq.mode = IPA_DMA_SYNC;

	data_inserted_so_far = mhi_dev_write_channel(&ureq);
	return uci_handle->send(uci_handle, data_loc, size);

error_memcpy:
	kfree(data_loc);
	return data_inserted_so_far;
	return -EFAULT;
}

static unsigned int mhi_uci_ctrl_poll(struct file *file, poll_table *wait)
@@ -421,6 +516,119 @@ static unsigned int mhi_uci_client_poll(struct file *file, poll_table *wait)
	return mask;
}

static int mhi_uci_alloc_write_reqs(struct uci_client *client)
{
	int i;

	client->wreqs = kcalloc(MAX_UCI_WR_REQ,
				sizeof(struct mhi_req),
				GFP_KERNEL);
	if (!client->wreqs) {
		uci_log(UCI_DBG_ERROR, "Write reqs alloc failed\n");
		return -ENOMEM;
	}

	INIT_LIST_HEAD(&client->wr_req_list);
	for (i = 0; i < MAX_UCI_WR_REQ; ++i)
		list_add_tail(&client->wreqs[i].list, &client->wr_req_list);

	uci_log(UCI_DBG_INFO,
		"UCI write reqs allocation successful\n");
	return 0;
}

static int mhi_uci_read_async(struct uci_client *uci_handle,
			struct mhi_req *ureq, int *bytes_avail)
{
	int ret_val = 0;
	unsigned long compl_ret;

	uci_log(UCI_DBG_ERROR,
		"Async read for ch %d\n", uci_handle->in_chan);

	ureq->mode = IPA_DMA_ASYNC;
	ureq->client_cb = mhi_uci_read_completion_cb;
	ureq->snd_cmpl = 1;
	ureq->context = uci_handle;

	reinit_completion(&uci_handle->read_done);

	*bytes_avail = mhi_dev_read_channel(ureq);
	uci_log(UCI_DBG_VERBOSE, "buf_size = 0x%x bytes_read = 0x%x\n",
		ureq->len, *bytes_avail);
	if (*bytes_avail < 0) {
		uci_log(UCI_DBG_ERROR, "Failed to read channel ret %d\n",
			*bytes_avail);
		return -EIO;
	}

	if (*bytes_avail > 0) {
		uci_log(UCI_DBG_VERBOSE,
			"Waiting for async read completion!\n");
		compl_ret =
			wait_for_completion_interruptible_timeout(
			&uci_handle->read_done,
			MHI_UCI_ASYNC_READ_TIMEOUT);

		if (compl_ret == -ERESTARTSYS) {
			uci_log(UCI_DBG_ERROR, "Exit signal caught\n");
			return compl_ret;
		} else if (compl_ret == 0) {
			uci_log(UCI_DBG_ERROR, "Read timed out for ch %d\n",
				uci_handle->in_chan);
			return -EIO;
		}
		uci_log(UCI_DBG_VERBOSE,
			"wk up Read completed on ch %d\n", ureq->chan);

		uci_handle->pkt_loc = (void *)ureq->buf;
		uci_handle->pkt_size = ureq->actual_len;

		uci_log(UCI_DBG_VERBOSE,
			"Got pkt of sz 0x%x at adr %pK, ch %d\n",
			uci_handle->pkt_size,
			ureq->buf, ureq->chan);
	} else {
		uci_handle->pkt_loc = NULL;
		uci_handle->pkt_size = 0;
	}

	return ret_val;
}

static int mhi_uci_read_sync(struct uci_client *uci_handle,
			struct mhi_req *ureq, int *bytes_avail)
{
	int ret_val = 0;

	ureq->mode = IPA_DMA_SYNC;
	*bytes_avail = mhi_dev_read_channel(ureq);

	uci_log(UCI_DBG_VERBOSE, "buf_size = 0x%x bytes_read = 0x%x\n",
		ureq->len, *bytes_avail);

	if (*bytes_avail < 0) {
		uci_log(UCI_DBG_ERROR, "Failed to read channel ret %d\n",
			*bytes_avail);
		return -EIO;
	}

	if (*bytes_avail > 0) {
		uci_handle->pkt_loc = (void *)ureq->buf;
		uci_handle->pkt_size = ureq->actual_len;

		uci_log(UCI_DBG_VERBOSE,
			"Got pkt of sz 0x%x at adr %pK, ch %d\n",
			uci_handle->pkt_size,
			ureq->buf, ureq->chan);
	} else {
		uci_handle->pkt_loc = NULL;
		uci_handle->pkt_size = 0;
	}

	return ret_val;
}

static int open_client_mhi_channels(struct uci_client *uci_client)
{
	int rc = 0;
@@ -431,16 +639,27 @@ static int open_client_mhi_channels(struct uci_client *uci_client)
			uci_client->in_chan);
	mutex_lock(&uci_client->out_chan_lock);
	mutex_lock(&uci_client->in_chan_lock);

	/* Allocate write requests for async operations */
	if (!(uci_client->f_flags & O_SYNC)) {
		rc = mhi_uci_alloc_write_reqs(uci_client);
		if (rc)
			goto handle_not_rdy_err;
		uci_client->send = mhi_uci_send_async;
		uci_client->read = mhi_uci_read_async;
	} else {
		uci_client->send = mhi_uci_send_sync;
		uci_client->read = mhi_uci_read_sync;
	}

	uci_log(UCI_DBG_DBG,
			"Initializing inbound chan %d.\n",
			uci_client->in_chan);

	rc = mhi_init_read_chan(uci_client, uci_client->in_chan);
	if (rc < 0) {
	if (rc < 0)
		uci_log(UCI_DBG_ERROR,
			"Failed to init inbound 0x%x, ret 0x%x\n",
			uci_client->in_chan, rc);
	}

	rc = mhi_dev_open_channel(uci_client->out_chan,
			&uci_client->out_handle,
@@ -451,7 +670,6 @@ static int open_client_mhi_channels(struct uci_client *uci_client)
	rc = mhi_dev_open_channel(uci_client->in_chan,
			&uci_client->in_handle,
			uci_ctxt.event_notifier);

	if (rc < 0) {
		uci_log(UCI_DBG_ERROR,
			"Failed to open chan %d, ret 0x%x\n",
@@ -506,6 +724,7 @@ static int mhi_uci_client_open(struct inode *mhi_inode,
			return -ENOMEM;
		}
		uci_handle->uci_ctxt = &uci_ctxt;
		uci_handle->f_flags = file_handle->f_flags;
		if (!atomic_read(&uci_handle->mhi_chans_open)) {
			uci_log(UCI_DBG_INFO,
				"Opening channels client %d\n",
@@ -540,6 +759,8 @@ static int mhi_uci_client_release(struct inode *mhi_inode,
		if (atomic_read(&uci_handle->mhi_chans_open)) {
			atomic_set(&uci_handle->mhi_chans_open, 0);

			if (!(uci_handle->f_flags & O_SYNC))
				kfree(uci_handle->wreqs);
			mutex_lock(&uci_handle->out_chan_lock);
			rc = mhi_dev_close_channel(uci_handle->out_handle);
			wake_up(&uci_handle->write_wq);
@@ -675,7 +896,6 @@ static ssize_t mhi_uci_client_read(struct file *file, char __user *ubuf,
	struct mutex *mutex;
	ssize_t bytes_copied = 0;
	u32 addr_offset = 0;
	void *local_buf = NULL;
	struct mhi_req ureq;

	if (!file || !ubuf || !uspace_buf_size ||
@@ -691,44 +911,19 @@ static ssize_t mhi_uci_client_read(struct file *file, char __user *ubuf,
	ureq.client = client_handle;
	ureq.buf = uci_handle->in_buf_list[0].addr;
	ureq.len = uci_handle->in_buf_list[0].buf_size;
	ureq.mode = IPA_DMA_SYNC;


	uci_log(UCI_DBG_VERBOSE, "Client attempted read on chan %d\n",
			ureq.chan);
	do {
		if (!uci_handle->pkt_loc &&
			!atomic_read(&uci_ctxt.mhi_disabled)) {

			bytes_avail = mhi_dev_read_channel(&ureq);

			uci_log(UCI_DBG_VERBOSE,
				"reading from mhi_core local_buf = %p",
				local_buf);
			uci_log(UCI_DBG_VERBOSE,
					"buf_size = 0x%x bytes_read = 0x%x\n",
					 ureq.len, bytes_avail);

			if (bytes_avail < 0) {
				uci_log(UCI_DBG_ERROR,
				"Failed to read channel ret %d\n",
					bytes_avail);
				ret_val =  -EIO;
			ret_val = uci_handle->read(uci_handle, &ureq,
							&bytes_avail);
			if (ret_val)
				goto error;
			}

			if (bytes_avail > 0) {
				uci_handle->pkt_loc = (void *) ureq.buf;
				uci_handle->pkt_size = ureq.actual_len;

			if (bytes_avail > 0)
				*bytes_pending = (loff_t)uci_handle->pkt_size;
				uci_log(UCI_DBG_VERBOSE,
					"Got pkt of sz 0x%x at adr %p, ch %d\n",
					uci_handle->pkt_size,
					ureq.buf, ureq.chan);
			} else {
				uci_handle->pkt_loc = 0;
				uci_handle->pkt_size = 0;
			}
		}
		if (bytes_avail == 0) {

@@ -737,7 +932,10 @@ static ssize_t mhi_uci_client_read(struct file *file, char __user *ubuf,
				"No data read_data_ready %d, chan %d\n",
				atomic_read(&uci_handle->read_data_ready),
				ureq.chan);

			if (uci_handle->f_flags & (O_NONBLOCK | O_NDELAY)) {
				ret_val = -EAGAIN;
				goto error;
			}
			ret_val = wait_event_interruptible(uci_handle->read_wq,
				(!mhi_dev_channel_isempty(client_handle)));

@@ -841,10 +1039,10 @@ static ssize_t mhi_uci_client_write(struct file *file,
	mutex_lock(&uci_handle->out_chan_lock);
	while (!ret_val) {
		ret_val = mhi_uci_send_packet(&uci_handle->out_handle,
				(void *)buf, count, 1);
						buf, count);
		if (ret_val < 0) {
			uci_log(UCI_DBG_ERROR,
				"Error while writing data to MHI, chan %d, buf %p, size %d\n",
				"Error while writing data to MHI, chan %d, buf %pK, size %d\n",
				chan, (void *)buf, count);
			ret_val = -EIO;
			break;
@@ -854,6 +1052,8 @@ static ssize_t mhi_uci_client_write(struct file *file,
				"No descriptors available, did we poll, chan %d?\n",
				chan);
			mutex_unlock(&uci_handle->out_chan_lock);
			if (uci_handle->f_flags & (O_NONBLOCK | O_NDELAY))
				return -EAGAIN;
			ret_val = wait_event_interruptible(uci_handle->write_wq,
				!mhi_dev_channel_isempty(
					uci_handle->out_handle));
@@ -945,6 +1145,7 @@ static int mhi_register_client(struct uci_client *mhi_client, int index)

	mutex_init(&mhi_client->in_chan_lock);
	mutex_init(&mhi_client->out_chan_lock);
	spin_lock_init(&mhi_client->wr_req_lock);

	uci_log(UCI_DBG_DBG, "Registering chan %d.\n", mhi_client->out_chan);
	return 0;