Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit a9bed610 authored by Tomas Winkler's avatar Tomas Winkler Committed by Greg Kroah-Hartman
Browse files

mei: allow read concurrency



Replace clunky read state machine with read stack
implemented as per client read list, this is important
mostly for mei drivers with unsolicited reads

Signed-off-by: default avatarTomas Winkler <tomas.winkler@intel.com>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent 928fa666
Loading
Loading
Loading
Loading
+19 −17
Original line number Diff line number Diff line
@@ -288,19 +288,20 @@ ssize_t __mei_cl_recv(struct mei_cl *cl, u8 *buf, size_t length)

	mutex_lock(&dev->device_lock);

	if (!cl->read_cb) {
	cb = mei_cl_read_cb(cl, NULL);
	if (cb)
		goto copy;

	rets = mei_cl_read_start(cl, length, NULL);
		if (rets < 0)
	if (rets && rets != -EBUSY)
		goto out;
	}

	if (cl->reading_state != MEI_READ_COMPLETE &&
	    !waitqueue_active(&cl->rx_wait)) {
	if (list_empty(&cl->rd_completed) && !waitqueue_active(&cl->rx_wait)) {

		mutex_unlock(&dev->device_lock);

		if (wait_event_interruptible(cl->rx_wait,
				cl->reading_state == MEI_READ_COMPLETE  ||
				(!list_empty(&cl->rd_completed)) ||
				mei_cl_is_transitioning(cl))) {

			if (signal_pending(current))
@@ -309,15 +310,20 @@ ssize_t __mei_cl_recv(struct mei_cl *cl, u8 *buf, size_t length)
		}

		mutex_lock(&dev->device_lock);
	}

		if (mei_cl_is_transitioning(cl)) {
			rets = -EBUSY;
			goto out;
		}
	}

	if (cl->reading_state != MEI_READ_COMPLETE) {
	cb = mei_cl_read_cb(cl, NULL);
	if (!cb) {
		rets = 0;
		goto out;
	}

	cb = cl->read_cb;
copy:
	if (cb->status) {
		rets = cb->status;
		goto free;
@@ -329,9 +335,6 @@ ssize_t __mei_cl_recv(struct mei_cl *cl, u8 *buf, size_t length)

free:
	mei_io_cb_free(cb);
	cl->read_cb = NULL;
	cl->reading_state = MEI_IDLE;

out:
	mutex_unlock(&dev->device_lock);

@@ -443,7 +446,7 @@ int mei_cl_enable_device(struct mei_cl_device *device)

	mutex_unlock(&dev->device_lock);

	if (device->event_cb && !cl->read_cb)
	if (device->event_cb)
		mei_cl_read_start(device->cl, 0, NULL);

	if (!device->ops || !device->ops->enable)
@@ -485,8 +488,7 @@ int mei_cl_disable_device(struct mei_cl_device *device)
	}

	/* Flush queues and remove any pending read */
	mei_cl_flush_queues(cl);
	mei_io_cb_free(cl->read_cb);
	mei_cl_flush_queues(cl, NULL);

	device->event_cb = NULL;

+54 −30
Original line number Diff line number Diff line
@@ -457,14 +457,56 @@ struct mei_cl_cb *mei_cl_alloc_cb(struct mei_cl *cl, size_t length,
	return cb;
}

/**
 * mei_cl_read_cb - find this cl's callback in the read list
 *     for a specific file
 *
 * @cl: host client
 * @fp: file pointer (matching cb file object), may be NULL
 *
 * Return: cb on success, NULL if cb is not found
 */
struct mei_cl_cb *mei_cl_read_cb(const struct mei_cl *cl, const struct file *fp)
{
	struct mei_cl_cb *cb;

	list_for_each_entry(cb, &cl->rd_completed, list)
		if (!fp || fp == cb->file_object)
			return cb;

	return NULL;
}

/**
 * mei_cl_read_cb_flush - free client's read pending and completed cbs
 *   for a specific file
 *
 * @cl: host client
 * @fp: file pointer (matching cb file object), may be NULL
 */
void mei_cl_read_cb_flush(const struct mei_cl *cl, const struct file *fp)
{
	struct mei_cl_cb *cb, *next;

	list_for_each_entry_safe(cb, next, &cl->rd_completed, list)
		if (!fp || fp == cb->file_object)
			mei_io_cb_free(cb);


	list_for_each_entry_safe(cb, next, &cl->rd_pending, list)
		if (!fp || fp == cb->file_object)
			mei_io_cb_free(cb);
}

/**
 * mei_cl_flush_queues - flushes queue lists belonging to cl.
 *
 * @cl: host client
 * @fp: file pointer (matching cb file object), may be NULL
 *
 * Return: 0 on success, -EINVAL if cl or cl->dev is NULL.
 */
int mei_cl_flush_queues(struct mei_cl *cl)
int mei_cl_flush_queues(struct mei_cl *cl, const struct file *fp)
{
	struct mei_device *dev;

@@ -474,13 +516,15 @@ int mei_cl_flush_queues(struct mei_cl *cl)
	dev = cl->dev;

	cl_dbg(dev, cl, "remove list entry belonging to cl\n");
	mei_io_list_flush(&cl->dev->read_list, cl);
	mei_io_list_free(&cl->dev->write_list, cl);
	mei_io_list_free(&cl->dev->write_waiting_list, cl);
	mei_io_list_flush(&cl->dev->ctrl_wr_list, cl);
	mei_io_list_flush(&cl->dev->ctrl_rd_list, cl);
	mei_io_list_flush(&cl->dev->amthif_cmd_list, cl);
	mei_io_list_flush(&cl->dev->amthif_rd_complete_list, cl);

	mei_cl_read_cb_flush(cl, fp);

	return 0;
}

@@ -497,9 +541,10 @@ void mei_cl_init(struct mei_cl *cl, struct mei_device *dev)
	init_waitqueue_head(&cl->wait);
	init_waitqueue_head(&cl->rx_wait);
	init_waitqueue_head(&cl->tx_wait);
	INIT_LIST_HEAD(&cl->rd_completed);
	INIT_LIST_HEAD(&cl->rd_pending);
	INIT_LIST_HEAD(&cl->link);
	INIT_LIST_HEAD(&cl->device_link);
	cl->reading_state = MEI_IDLE;
	cl->writing_state = MEI_IDLE;
	cl->dev = dev;
}
@@ -523,24 +568,6 @@ struct mei_cl *mei_cl_allocate(struct mei_device *dev)
	return cl;
}

/**
 * mei_cl_find_read_cb - find this cl's callback in the read list
 *
 * @cl: host client
 *
 * Return: cb on success, NULL on error
 */
struct mei_cl_cb *mei_cl_find_read_cb(struct mei_cl *cl)
{
	struct mei_device *dev = cl->dev;
	struct mei_cl_cb *cb;

	list_for_each_entry(cb, &dev->read_list.list, list)
		if (mei_cl_cmp_id(cl, cb->cl))
			return cb;
	return NULL;
}

/**
 * mei_cl_link - allocate host id in the host map
 *
@@ -1006,10 +1033,10 @@ int mei_cl_read_start(struct mei_cl *cl, size_t length, struct file *fp)
	if (!mei_cl_is_connected(cl))
		return -ENODEV;

	if (cl->read_cb) {
		cl_dbg(dev, cl, "read is pending.\n");
	/* HW currently supports only one pending read */
	if (!list_empty(&cl->rd_pending))
		return -EBUSY;
	}

	me_cl = mei_me_cl_by_uuid_id(dev, &cl->cl_uuid, cl->me_client_id);
	if (!me_cl) {
		cl_err(dev, cl, "no such me client %d\n", cl->me_client_id);
@@ -1036,13 +1063,11 @@ int mei_cl_read_start(struct mei_cl *cl, size_t length, struct file *fp)
		if (rets < 0)
			goto out;

		list_add_tail(&cb->list, &dev->read_list.list);
		list_add_tail(&cb->list, &cl->rd_pending);
	} else {
		list_add_tail(&cb->list, &dev->ctrl_wr_list.list);
	}

	cl->read_cb = cb;

out:
	cl_dbg(dev, cl, "rpm: autosuspend\n");
	pm_runtime_mark_last_busy(dev->dev);
@@ -1268,9 +1293,8 @@ void mei_cl_complete(struct mei_cl *cl, struct mei_cl_cb *cb)
		if (waitqueue_active(&cl->tx_wait))
			wake_up_interruptible(&cl->tx_wait);

	} else if (cb->fop_type == MEI_FOP_READ &&
			MEI_READING == cl->reading_state) {
		cl->reading_state = MEI_READ_COMPLETE;
	} else if (cb->fop_type == MEI_FOP_READ) {
		list_add_tail(&cb->list, &cl->rd_completed);
		if (waitqueue_active(&cl->rx_wait))
			wake_up_interruptible(&cl->rx_wait);
		else
+4 −3
Original line number Diff line number Diff line
@@ -77,11 +77,12 @@ int mei_cl_unlink(struct mei_cl *cl);

struct mei_cl *mei_cl_alloc_linked(struct mei_device *dev, int id);

int mei_cl_flush_queues(struct mei_cl *cl);
struct mei_cl_cb *mei_cl_find_read_cb(struct mei_cl *cl);

struct mei_cl_cb *mei_cl_read_cb(const struct mei_cl *cl,
				 const struct file *fp);
void mei_cl_read_cb_flush(const struct mei_cl *cl, const struct file *fp);
struct mei_cl_cb *mei_cl_alloc_cb(struct mei_cl *cl, size_t length,
				  enum mei_cb_file_ops type, struct file *fp);
int mei_cl_flush_queues(struct mei_cl *cl, const struct file *fp);

int mei_cl_flow_ctrl_creds(struct mei_cl *cl);

+1 −1
Original line number Diff line number Diff line
@@ -117,7 +117,7 @@ static ssize_t mei_dbgfs_read_active(struct file *fp, char __user *ubuf,
		pos += scnprintf(buf + pos, bufsz - pos,
			"%2d|%2d|%4d|%5d|%2d|%2d|\n",
			i, cl->me_client_id, cl->host_client_id, cl->state,
			cl->reading_state, cl->writing_state);
			!list_empty(&cl->rd_completed), cl->writing_state);
		i++;
	}
out:
+0 −1
Original line number Diff line number Diff line
@@ -395,7 +395,6 @@ void mei_device_init(struct mei_device *dev,
	dev->dev_state = MEI_DEV_INITIALIZING;
	dev->reset_count = 0;

	mei_io_list_init(&dev->read_list);
	mei_io_list_init(&dev->write_list);
	mei_io_list_init(&dev->write_waiting_list);
	mei_io_list_init(&dev->ctrl_wr_list);
Loading