Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit a6f60958 authored by Hemant Kumar's avatar Hemant Kumar
Browse files

mhi: uci: Add channel mutex lock for read and write file ops



In case of two readers trying to read buffer one acquires spin
lock and populates uci_chan cur_buf and rx_size and local uci_buf.
After first one releases the spin lock other reader populates local
uci_buf with same cur_buf used by first reader. Second reader queues
the buffer to a TRE and releases the spinlock. Now first reader get
the spin lock and ends up copying 0 byte to user space buffer as the
rx_size becomes zero when second reader queues the buffer to transfer
ring. Now first reader queues the same buffer to a different TRE.
When dl xfer completion comes for both TREs, same buffer gets freed up
twice causes the double free. In case of a write it is possible that
two writers can race in case only one TRE is available to transfer the
request, although it does not result into any invalid access issue as in
case of read. Hence acquire channel mutex lock for both read and write
file operations to make sure read and write are done atomically.

Change-Id: I63bccedb28e1bfd59801ccdf642e4f70eb81021d
Signed-off-by: default avatarHemant Kumar <hemantk@codeaurora.org>
parent 8543fd31
Loading
Loading
Loading
Loading
+28 −8
Original line number Diff line number Diff line
@@ -27,6 +27,7 @@ struct uci_chan {
	struct list_head pending; /* user space waiting to read */
	struct uci_buf *cur_buf; /* current buffer user space reading */
	size_t rx_size;
	struct mutex chan_lock;
};

struct uci_buf {
@@ -205,6 +206,8 @@ static int mhi_uci_release(struct inode *inode, struct file *file)
			MSG_LOG("Node is deleted, freeing dev node\n");
			mutex_unlock(&uci_dev->mutex);
			mutex_destroy(&uci_dev->mutex);
			mutex_destroy(&uci_dev->dl_chan.chan_lock);
			mutex_destroy(&uci_dev->ul_chan.chan_lock);
			clear_bit(MINOR(uci_dev->devt), uci_minors);
			kfree(uci_dev);
			return 0;
@@ -274,11 +277,14 @@ static ssize_t mhi_uci_write(struct file *file,
	if (!buf || !count)
		return -EINVAL;

	mutex_lock(&uci_chan->chan_lock);

	/* confirm channel is active */
	spin_lock_bh(&uci_chan->lock);
	if (!uci_dev->enabled) {
		spin_unlock_bh(&uci_chan->lock);
		return -ERESTARTSYS;
		ret = -ERESTARTSYS;
		goto err_mtx_unlock;
	}

	MSG_VERB("Enter: to xfer:%lu bytes\n", count);
@@ -298,20 +304,22 @@ static ssize_t mhi_uci_write(struct file *file,

		if (ret == -ERESTARTSYS || !uci_dev->enabled) {
			MSG_LOG("Exit signal caught for node or not enabled\n");
			return -ERESTARTSYS;
			ret = -ERESTARTSYS;
			goto err_mtx_unlock;
		}

		xfer_size = min_t(size_t, count, uci_dev->mtu);
		kbuf = kmalloc(xfer_size, GFP_KERNEL);
		if (!kbuf) {
			MSG_ERR("Failed to allocate memory %lu\n", xfer_size);
			return -ENOMEM;
			ret = -ENOMEM;
			goto err_mtx_unlock;
		}

		ret = copy_from_user(kbuf, buf, xfer_size);
		if (unlikely(ret)) {
			kfree(kbuf);
			return ret;
			goto err_mtx_unlock;
		}

		spin_lock_bh(&uci_chan->lock);
@@ -339,12 +347,15 @@ static ssize_t mhi_uci_write(struct file *file,
	}

	spin_unlock_bh(&uci_chan->lock);
	mutex_unlock(&uci_chan->chan_lock);
	MSG_VERB("Exit: Number of bytes xferred:%lu\n", bytes_xfered);

	return bytes_xfered;

sys_interrupt:
	spin_unlock_bh(&uci_chan->lock);
err_mtx_unlock:
	mutex_unlock(&uci_chan->chan_lock);

	return ret;
}
@@ -367,11 +378,14 @@ static ssize_t mhi_uci_read(struct file *file,

	MSG_VERB("Client provided buf len:%lu\n", count);

	mutex_lock(&uci_chan->chan_lock);

	/* confirm channel is active */
	spin_lock_bh(&uci_chan->lock);
	if (!uci_dev->enabled) {
		spin_unlock_bh(&uci_chan->lock);
		return -ERESTARTSYS;
		ret = -ERESTARTSYS;
		goto err_mtx_unlock;
	}

	/* No data available to read, wait */
@@ -384,7 +398,8 @@ static ssize_t mhi_uci_read(struct file *file,
				 !list_empty(&uci_chan->pending)));
		if (ret == -ERESTARTSYS) {
			MSG_LOG("Exit signal caught for node\n");
			return -ERESTARTSYS;
			ret = -ERESTARTSYS;
			goto err_mtx_unlock;
		}

		spin_lock_bh(&uci_chan->lock);
@@ -418,7 +433,7 @@ static ssize_t mhi_uci_read(struct file *file,
	ptr = uci_buf->data + (uci_buf->len - uci_chan->rx_size);
	ret = copy_to_user(buf, ptr, to_copy);
	if (ret)
		return ret;
		goto err_mtx_unlock;

	MSG_VERB("Copied %lu of %lu bytes\n", to_copy, uci_chan->rx_size);
	uci_chan->rx_size -= to_copy;
@@ -445,12 +460,14 @@ static ssize_t mhi_uci_read(struct file *file,
	}

	MSG_VERB("Returning %lu bytes\n", to_copy);
	mutex_unlock(&uci_chan->chan_lock);

	return to_copy;

read_error:
	spin_unlock_bh(&uci_chan->lock);

err_mtx_unlock:
	mutex_unlock(&uci_chan->chan_lock);
	return ret;
}

@@ -557,6 +574,8 @@ static void mhi_uci_remove(struct mhi_device *mhi_dev)
	if (!uci_dev->ref_count) {
		mutex_unlock(&uci_dev->mutex);
		mutex_destroy(&uci_dev->mutex);
		mutex_destroy(&uci_dev->dl_chan.chan_lock);
		mutex_destroy(&uci_dev->ul_chan.chan_lock);
		clear_bit(MINOR(uci_dev->devt), uci_minors);
		kfree(uci_dev);
		mutex_unlock(&mhi_uci_drv.lock);
@@ -615,6 +634,7 @@ static int mhi_uci_probe(struct mhi_device *mhi_dev,
		struct uci_chan *uci_chan = (dir) ?
			&uci_dev->ul_chan : &uci_dev->dl_chan;
		spin_lock_init(&uci_chan->lock);
		mutex_init(&uci_chan->chan_lock);
		init_waitqueue_head(&uci_chan->wq);
		INIT_LIST_HEAD(&uci_chan->pending);
	}