Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit a7053e57 authored by Ravi Aravamudhan's avatar Ravi Aravamudhan
Browse files

diag: Make fixes to diag-MHI interaction



Diag driver was handling the close of MHI channels asynchronously.
Also, buffers that were mapped to DMA were not unmapped at right
instances. This patch fixes those issues.

Change-Id: I285fc901c730ef9e3a89cc7211289f362f7eb2e4
Signed-off-by: default avatarRavi Aravamudhan <aravamud@codeaurora.org>
parent cdecac51
Loading
Loading
Loading
Loading
+2 −2
Original line number Diff line number Diff line
@@ -785,9 +785,9 @@ static ssize_t diag_dbgfs_read_mhiinfo(struct file *file, char __user *ubuf,
			mhi_info->name,
			DIAG_BRIDGE_GET_NAME(mhi_info->dev_id),
			DIAG_MEMPOOL_GET_NAME(mhi_info->mempool),
			mhi_info->read_ch.opened,
			atomic_read(&mhi_info->read_ch.opened),
			mhi_info->read_ch.hdl,
			mhi_info->write_ch.opened,
			atomic_read(&mhi_info->write_ch.opened),
			mhi_info->write_ch.hdl,
			work_pending(&mhi_info->read_work),
			work_pending(&mhi_info->read_done_work),
+2 −0
Original line number Diff line number Diff line
@@ -23,6 +23,8 @@
#define DIAG_DEBUG_PERIPHERALS	0x0008
#define DIAG_DEBUG_MASKS	0x0010
#define DIAG_DEBUG_POWER	0x0020
#define DIAG_DEBUG_BRIDGE	0x0040

#define DIAG_DEBUG

#ifdef DIAG_DEBUG
+161 −83
Original line number Diff line number Diff line
/* Copyright (c) 2014, The Linux Foundation. All rights reserved.
/* Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 and
@@ -24,9 +24,11 @@
#include <linux/delay.h>
#include <linux/vmalloc.h>
#include <asm/current.h>
#include <linux/atomic.h>
#include "diagmem.h"
#include "diagfwd_bridge.h"
#include "diagfwd_mhi.h"
#include "diag_ipc_logging.h"

#define SET_CH_CTXT(index, type)	(((index & 0xFF) << 8) | (type & 0xFF))
#define GET_INFO_INDEX(val)		((val & 0xFF00) >> 8)
@@ -34,6 +36,8 @@

#define CHANNELS_OPENED			0
#define OPEN_CHANNELS			1
#define CHANNELS_CLOSED			0
#define CLOSE_CHANNELS			1

#define DIAG_MHI_STRING_SZ		11

@@ -49,13 +53,11 @@ struct diag_mhi_info diag_mhi[NUM_MHI_DEV] = {
		.read_ch = {
			.chan = MHI_CLIENT_DIAG_IN,
			.type = TYPE_MHI_READ_CH,
			.opened = 0,
			.hdl = NULL,
		},
		.write_ch = {
			.chan = MHI_CLIENT_DIAG_OUT,
			.type = TYPE_MHI_WRITE_CH,
			.opened = 0,
			.hdl = NULL,
		}
	},
@@ -70,13 +72,11 @@ struct diag_mhi_info diag_mhi[NUM_MHI_DEV] = {
		.read_ch = {
			.chan = MHI_CLIENT_DCI_IN,
			.type = TYPE_MHI_READ_CH,
			.opened = 0,
			.hdl = NULL,
		},
		.write_ch = {
			.chan = MHI_CLIENT_DCI_OUT,
			.type = TYPE_MHI_WRITE_CH,
			.opened = 0,
			.hdl = NULL,
		}
	}
@@ -85,12 +85,11 @@ struct diag_mhi_info diag_mhi[NUM_MHI_DEV] = {
static int mhi_ch_open(struct diag_mhi_ch_t *ch)
{
	int err = 0;
	unsigned long flags;

	if (!ch)
		return -EINVAL;

	if (ch->opened) {
	if (atomic_read(&ch->opened)) {
		pr_debug("diag: In %s, channel is already opened, id: %d\n",
			 __func__, ch->type);
		return 0;
@@ -101,15 +100,14 @@ static int mhi_ch_open(struct diag_mhi_ch_t *ch)
		       __func__, ch->type, err);
		return err;
	}
	spin_lock_irqsave(&ch->lock, flags);
	ch->opened = 1;
	spin_unlock_irqrestore(&ch->lock, flags);

	atomic_set(&ch->opened, 1);
	INIT_LIST_HEAD(&ch->buf_tbl);
	return 0;
}

static int mhi_buf_tbl_add(struct diag_mhi_info *mhi_info, int type,
			   void *buf, int len)
			   void *buf, dma_addr_t dma_addr, int len)
{
	unsigned long flags;
	struct diag_mhi_buf_tbl_t *item;
@@ -142,6 +140,7 @@ static int mhi_buf_tbl_add(struct diag_mhi_info *mhi_info, int type,
	spin_lock_irqsave(&ch->lock, flags);
	item->buf = buf;
	item->len = len;
	item->dma_addr = dma_addr;
	list_add_tail(&item->link, &ch->buf_tbl);
	spin_unlock_irqrestore(&ch->lock, flags);

@@ -199,18 +198,20 @@ static void mhi_buf_tbl_clear(struct diag_mhi_info *mhi_info)
	struct diag_mhi_buf_tbl_t *item = NULL;
	struct diag_mhi_ch_t *ch = NULL;

	if (!mhi_info)
	if (!mhi_info || !mhi_info->enabled)
		return;

	/* Clear all the pending reads */
	ch = &mhi_info->read_ch;
	/* At this point, the channel should already by closed */
	if (!ch->opened) {
	if (!(atomic_read(&ch->opened))) {
		spin_lock_irqsave(&ch->lock, flags);
		list_for_each_safe(start, temp, &ch->buf_tbl) {
			item = list_entry(start, struct diag_mhi_buf_tbl_t,
					  link);
			list_del(&item->link);
			dma_unmap_single(NULL, item->dma_addr,
					 DIAG_MDM_BUF_SIZE, DMA_FROM_DEVICE);
			diagmem_free(driver, item->buf, mhi_info->mempool);
			kfree(item);

@@ -221,12 +222,14 @@ static void mhi_buf_tbl_clear(struct diag_mhi_info *mhi_info)
	/* Clear all the pending writes */
	ch = &mhi_info->write_ch;
	/* At this point, the channel should already by closed */
	if (!ch->opened) {
	if (!(atomic_read(&ch->opened))) {
		spin_lock_irqsave(&ch->lock, flags);
		list_for_each_safe(start, temp, &ch->buf_tbl) {
			item = list_entry(start, struct diag_mhi_buf_tbl_t,
					  link);
			list_del(&item->link);
			dma_unmap_single(NULL, item->dma_addr, item->len,
					 DMA_TO_DEVICE);
			diag_remote_dev_write_done(mhi_info->dev_id, item->buf,
						   item->len, mhi_info->id);
			kfree(item);
@@ -236,69 +239,97 @@ static void mhi_buf_tbl_clear(struct diag_mhi_info *mhi_info)
	}
}

static int mhi_close(int id)
static int __mhi_close(struct diag_mhi_info *mhi_info, int close_flag)
{
	struct diag_mhi_info *mhi_info = NULL;
	unsigned long flags;
	if (!mhi_info)
		return -EIO;

	if (id < 0 || id >= NUM_MHI_DEV) {
		pr_err("diag: In %s, invalid index %d\n", __func__, id);
		return -EINVAL;
	if (!mhi_info->enabled)
		return -ENODEV;

	if (close_flag == CLOSE_CHANNELS) {
		atomic_set(&(mhi_info->read_ch.opened), 0);
		atomic_set(&(mhi_info->write_ch.opened), 0);
	}

	mhi_info = &diag_mhi[id];
	if (mhi_info->read_ch.opened) {
		spin_lock_irqsave(&mhi_info->read_ch.lock, flags);
		mhi_info->read_ch.opened = 0;
		spin_unlock_irqrestore(&mhi_info->read_ch.lock, flags);
	if (!(atomic_read(&(mhi_info->read_ch.opened)))) {
		flush_workqueue(mhi_info->mhi_wq);
		mhi_close_channel(mhi_info->read_ch.hdl);
	}
	if (mhi_info->write_ch.opened) {
		spin_lock_irqsave(&mhi_info->write_ch.lock, flags);
		mhi_info->write_ch.opened = 0;
		spin_unlock_irqrestore(&mhi_info->write_ch.lock, flags);

	if (!(atomic_read(&(mhi_info->write_ch.opened)))) {
		flush_workqueue(mhi_info->mhi_wq);
		mhi_close_channel(mhi_info->write_ch.hdl);
	}

	mhi_buf_tbl_clear(mhi_info);
	diag_remote_dev_close(mhi_info->dev_id);
	return 0;
}

static int mhi_close(int id)
{
	if (id < 0 || id >= NUM_MHI_DEV) {
		pr_err("diag: In %s, invalid index %d\n", __func__, id);
		return -EINVAL;
	}

	if (!diag_mhi[id].enabled)
		return -ENODEV;
	/*
	 * This function is called whenever the channel needs to be closed
	 * explicitly by Diag. Close both the read and write channels (denoted
	 * by CLOSE_CHANNELS flag)
	 */
	return __mhi_close(&diag_mhi[id], CLOSE_CHANNELS);
}

static void mhi_close_work_fn(struct work_struct *work)
{
	struct diag_mhi_info *mhi_info = container_of(work,
						      struct diag_mhi_info,
						      close_work);
	/*
	 * This is a part of work function which is queued after the channels
	 * are explicitly closed. Do not close channels again (denoted by
	 * CHANNELS_CLOSED flag)
	 */
	if (mhi_info)
		mhi_close(mhi_info->id);
		__mhi_close(mhi_info, CHANNELS_CLOSED);
}

static int __mhi_open(struct diag_mhi_info *mhi_info, int open_flag)
{
	int err = 0;
	int tbl_size = 0;
	unsigned long flags;

	if (!mhi_info)
		return -EIO;

	if (open_flag == OPEN_CHANNELS) {
		if (!mhi_info->read_ch.opened) {
		if (!atomic_read(&mhi_info->read_ch.opened)) {
			err = mhi_ch_open(&mhi_info->read_ch);
			if (err)
				goto fail;
			DIAG_LOG(DIAG_DEBUG_BRIDGE,
				 "opened mhi read channel, port: %d\n",
				 mhi_info->id);
		}
		if (!mhi_info->write_ch.opened) {
		if (!atomic_read(&mhi_info->write_ch.opened)) {
			err = mhi_ch_open(&mhi_info->write_ch);
			if (err)
				goto fail;
			DIAG_LOG(DIAG_DEBUG_BRIDGE,
				 "opened mhi write channel, port: %d\n",
				 mhi_info->id);
		}
	} else if (open_flag == CHANNELS_OPENED) {
		if (!mhi_info->read_ch.opened || !mhi_info->write_ch.opened)
		if (!atomic_read(&(mhi_info->read_ch.opened)) ||
		    !atomic_read(&(mhi_info->write_ch.opened))) {
			return -ENODEV;
		}
	}

	tbl_size = sizeof(void *) * diag_mempools[mhi_info->mempool].poolsize;
	spin_lock_irqsave(&mhi_info->lock, flags);
	mhi_info->enabled = 1;
	spin_unlock_irqrestore(&mhi_info->lock, flags);
@@ -307,6 +338,7 @@ static int __mhi_open(struct diag_mhi_info *mhi_info, int open_flag)
	return 0;

fail:
	pr_err("diag: Failed to open mhi channlels, err: %d\n", err);
	mhi_close(mhi_info->id);
	return err;
}
@@ -325,7 +357,11 @@ static int mhi_open(int id)
	 * explicitly by Diag. Open both the read and write channels (denoted by
	 * OPEN_CHANNELS flag)
	 */
	return __mhi_open(&diag_mhi[id], OPEN_CHANNELS);
	__mhi_open(&diag_mhi[id], OPEN_CHANNELS);
	diag_remote_dev_open(diag_mhi[id].dev_id);
	queue_work(diag_mhi[id].mhi_wq, &(diag_mhi[id].read_work));

	return 0;
}

static void mhi_open_work_fn(struct work_struct *work)
@@ -338,8 +374,10 @@ static void mhi_open_work_fn(struct work_struct *work)
	 * are explicitly opened. Do not open channels again (denoted by
	 * CHANNELS_OPENED flag)
	 */
	if (mhi_info)
		__mhi_open(mhi_info, CHANNELS_OPENED);
	if (mhi_info) {
		diag_remote_dev_open(mhi_info->dev_id);
		queue_work(mhi_info->mhi_wq, &(mhi_info->read_work));
	}
}

static void mhi_read_done_work_fn(struct work_struct *work)
@@ -355,6 +393,8 @@ static void mhi_read_done_work_fn(struct work_struct *work)
		return;

	do {
		if (!(atomic_read(&(mhi_info->read_ch.opened))))
			break;
		err = mhi_poll_inbound(mhi_info->read_ch.hdl, &result);
		if (err) {
			pr_debug("diag: In %s, err %d\n", __func__, err);
@@ -366,8 +406,21 @@ static void mhi_read_done_work_fn(struct work_struct *work)
		dma_unmap_single(NULL, result.payload_buf, result.bytes_xferd,
				 DMA_FROM_DEVICE);
		buf = dma_to_virt(NULL, result.payload_buf);
		DIAG_LOG(DIAG_DEBUG_BRIDGE,
			 "read from mhi port %d buf %p unmapped from %u\n",
			 mhi_info->id, buf, result.payload_buf);
		/*
		 * The read buffers can come after the MHI channels are closed.
		 * If the channels are closed at the time of read, discard the
		 * buffers here and do not forward them to the mux layer.
		 */
		if ((atomic_read(&(mhi_info->read_ch.opened)))) {
			diag_remote_dev_read_done(mhi_info->dev_id, buf,
						  result.bytes_xferd);
		} else {
			mhi_buf_tbl_remove(mhi_info, TYPE_MHI_READ_CH, buf,
					   result.bytes_xferd);
		}
	} while (phy_buf);
}

@@ -376,44 +429,54 @@ static void mhi_read_work_fn(struct work_struct *work)
	int err = 0;
	unsigned char *buf = NULL;
	dma_addr_t dma_addr;
	enum MHI_FLAGS flags = MHI_EOT;
	enum MHI_FLAGS mhi_flags = MHI_EOT;
	struct diag_mhi_ch_t *read_ch = NULL;
	unsigned long flags;
	struct diag_mhi_info *mhi_info = container_of(work,
						      struct diag_mhi_info,
						      read_work);
	if (!mhi_info || !mhi_info->read_ch.opened)
	if (!mhi_info)
		return;

	read_ch = &mhi_info->read_ch;
	if (!read_ch->opened)
		return;
	do {
		if (!(atomic_read(&(read_ch->opened))))
			break;

	buf = diagmem_alloc(driver, DIAG_MDM_BUF_SIZE, mhi_info->mempool);
		buf = diagmem_alloc(driver, DIAG_MDM_BUF_SIZE,
				    mhi_info->mempool);
		if (!buf)
		return;
			break;

	err = mhi_buf_tbl_add(mhi_info, TYPE_MHI_READ_CH, buf,
		dma_addr = dma_map_single(NULL, buf, DIAG_MDM_BUF_SIZE,
					  DMA_FROM_DEVICE);
		if (dma_mapping_error(NULL, dma_addr))
			panic("ASSERT");

		err = mhi_buf_tbl_add(mhi_info, TYPE_MHI_READ_CH, buf, dma_addr,
				      DIAG_MDM_BUF_SIZE);
		if (err)
			goto fail;

	dma_addr = dma_map_single(NULL, buf, DIAG_MDM_BUF_SIZE, DMA_TO_DEVICE);
	if (dma_mapping_error(NULL, dma_addr))
		goto fail;

	err = mhi_queue_xfer(read_ch->hdl, dma_addr, DIAG_MDM_BUF_SIZE, flags);
		DIAG_LOG(DIAG_DEBUG_BRIDGE,
			 "queueing a read buf %p mapped to 0x%x, ch: %s\n",
			 buf, dma_addr, mhi_info->name);
		spin_lock_irqsave(&read_ch->lock, flags);
		err = mhi_queue_xfer(read_ch->hdl, dma_addr, DIAG_MDM_BUF_SIZE,
				     mhi_flags);
		spin_unlock_irqrestore(&read_ch->lock, flags);
		if (err) {
			pr_err_ratelimited("diag: Unable to read from MHI channel %s, err: %d\n",
					   mhi_info->name, err);
		dma_unmap_single(NULL, dma_addr, DIAG_MDM_BUF_SIZE,
				 DMA_TO_DEVICE);
		buf = dma_to_virt(NULL, dma_addr);
			goto fail;
		}
	} while (buf);

	queue_work(mhi_info->mhi_wq, &mhi_info->read_work);
	return;
fail:
	dma_unmap_single(NULL, dma_addr, DIAG_MDM_BUF_SIZE,
			 DMA_FROM_DEVICE);
	buf = dma_to_virt(NULL, dma_addr);
	mhi_buf_tbl_remove(mhi_info, TYPE_MHI_READ_CH, buf, DIAG_MDM_BUF_SIZE);
	queue_work(mhi_info->mhi_wq, &mhi_info->read_work);
}
@@ -432,7 +495,8 @@ static int mhi_queue_read(int id)
static int mhi_write(int id, unsigned char *buf, int len, int ctxt)
{
	int err = 0;
	enum MHI_FLAGS flags = MHI_EOT;
	enum MHI_FLAGS mhi_flags = MHI_EOT;
	unsigned long flags;
	dma_addr_t dma_addr = 0;
	struct diag_mhi_ch_t *ch = NULL;

@@ -455,31 +519,35 @@ static int mhi_write(int id, unsigned char *buf, int len, int ctxt)
	}

	ch = &diag_mhi[id].write_ch;
	if (!ch->opened) {
	if (!(atomic_read(&(ch->opened)))) {
		pr_err_ratelimited("diag: In %s, MHI write channel %s is not open\n",
				   __func__, diag_mhi[id].name);
		return -EIO;
	}

	err = mhi_buf_tbl_add(&diag_mhi[id], TYPE_MHI_WRITE_CH, buf, len);
	if (err)
		return err;

	dma_addr = dma_map_single(NULL, buf, len, DMA_TO_DEVICE);
	if (dma_mapping_error(NULL, dma_addr)) {
		mhi_buf_tbl_remove(&diag_mhi[id], TYPE_MHI_WRITE_CH, buf, len);
		return -ENOMEM;
	}
	if (dma_mapping_error(NULL, dma_addr))
		panic("ASSERT");

	err = mhi_buf_tbl_add(&diag_mhi[id], TYPE_MHI_WRITE_CH, buf, dma_addr,
			      len);
	if (err)
		goto fail;

	err = mhi_queue_xfer(ch->hdl, dma_addr, len, flags);
	DIAG_LOG(DIAG_DEBUG_BRIDGE, "buf %p mapped to %u\n", buf, dma_addr);
	spin_lock_irqsave(&ch->lock, flags);
	err = mhi_queue_xfer(ch->hdl, dma_addr, len, mhi_flags);
	spin_unlock_irqrestore(&ch->lock, flags);
	if (err) {
		pr_err_ratelimited("diag: In %s, cannot write to MHI channel %p, len %d, err: %d\n",
				   __func__, diag_mhi[id].name, len, err);
		dma_unmap_single(NULL, (dma_addr_t)dma_addr, len,
				 DMA_TO_DEVICE);
		mhi_buf_tbl_remove(&diag_mhi[id], TYPE_MHI_WRITE_CH, buf, len);
		goto fail;
	}

	return 0;
fail:
	dma_unmap_single(NULL, dma_addr, len, DMA_TO_DEVICE);
	return err;
}

@@ -540,6 +608,9 @@ static void mhi_notifier(struct mhi_cb_info *cb_info)

	switch (cb_info->cb_reason) {
	case MHI_CB_MHI_ENABLED:
		DIAG_LOG(DIAG_DEBUG_BRIDGE,
			 "received mhi enabled notifiation port: %d ch: %d\n",
			 index, ch->type);
		err = mhi_ch_open(ch);
		if (err)
			break;
@@ -550,17 +621,17 @@ static void mhi_notifier(struct mhi_cb_info *cb_info)
				       __func__, diag_mhi[index].num_read);
				break;
			}
			diagmem_init(driver, diag_mhi[index].mempool);
		}
		__mhi_open(&diag_mhi[index], CHANNELS_OPENED);
		queue_work(diag_mhi[index].mhi_wq,
			   &(diag_mhi[index].open_work));
		break;
	case MHI_CB_MHI_DISABLED:
		ch->opened = 0;
		if (ch->type == TYPE_MHI_READ_CH)
			diagmem_exit(driver, diag_mhi[index].mempool);
		queue_work(diag_mhi[index].mhi_wq,
			   &(diag_mhi[index].close_work));
		DIAG_LOG(DIAG_DEBUG_BRIDGE,
			 "received mhi disabled notifiation port: %d ch: %d\n",
			 index, ch->type);
		atomic_set(&(ch->opened), 0);
		__mhi_close(&diag_mhi[index], CHANNELS_CLOSED);
		break;
	case MHI_CB_XFER:
		/*
@@ -569,6 +640,9 @@ static void mhi_notifier(struct mhi_cb_info *cb_info)
		 * a write channel.
		 */
		if (type == TYPE_MHI_READ_CH) {
			if (!atomic_read(&(diag_mhi[index].read_ch.opened)))
				break;

			queue_work(diag_mhi[index].mhi_wq,
				   &(diag_mhi[index].read_done_work));
			break;
@@ -612,6 +686,7 @@ static int diag_mhi_register_ch(int id, struct diag_mhi_ch_t *ch)
	if (id < 0 || id >= NUM_MHI_DEV)
		return -EINVAL;
	spin_lock_init(&ch->lock);
	atomic_set(&(ch->opened), 0);
	ctxt = SET_CH_CTXT(id, ch->type);
	ch->client_info.mhi_client_cb = mhi_notifier;
	return mhi_register_channel(&ch->hdl, ch->chan, 0, &ch->client_info,
@@ -634,6 +709,7 @@ int diag_mhi_init()
		INIT_WORK(&(mhi_info->close_work), mhi_close_work_fn);
		strlcpy(wq_name, "diag_mhi_", DIAG_MHI_STRING_SZ);
		strlcat(wq_name, mhi_info->name, sizeof(mhi_info->name));
		diagmem_init(driver, mhi_info->mempool);
		mhi_info->mhi_wq = create_singlethread_workqueue(wq_name);
		if (!mhi_info->mhi_wq)
			goto fail;
@@ -656,6 +732,7 @@ int diag_mhi_init()
			       i, err);
			goto fail;
		}
		DIAG_LOG(DIAG_DEBUG_BRIDGE, "mhi port %d is initailzed\n", i);
	}

	return 0;
@@ -674,6 +751,7 @@ void diag_mhi_exit()
		if (mhi_info->mhi_wq)
			destroy_workqueue(mhi_info->mhi_wq);
		mhi_close(mhi_info->id);
		diagmem_exit(driver, mhi_info->mempool);
	}
}
+4 −3
Original line number Diff line number Diff line
/* Copyright (c) 2014, The Linux Foundation. All rights reserved.
/* Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 and
@@ -45,16 +45,17 @@
#define DIAG_MHI_NAME_SZ	24

struct diag_mhi_buf_tbl_t {
	struct list_head link;
	unsigned char *buf;
	dma_addr_t dma_addr;
	int len;
	struct list_head link;
};

struct diag_mhi_ch_t {
	uint8_t type;
	u32 channel;
	enum MHI_CLIENT_CHANNEL chan;
	uint8_t opened;
	atomic_t opened;
	spinlock_t lock;
	struct mhi_client_info_t client_info;
	struct mhi_client_handle *hdl;