Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 37bc52a5 authored by Linux Build Service Account's avatar Linux Build Service Account Committed by Gerrit - the friendly Code Review server
Browse files

Merge "slimbus: Support multiple message transactions per call"

parents cf1933da 71598bfd
Loading
Loading
Loading
Loading
+156 −14
Original line number Diff line number Diff line
@@ -92,6 +92,8 @@ static irqreturn_t ngd_slim_interrupt(int irq, void *d)
	u32 stat = readl_relaxed(ngd + NGD_INT_STAT);
	u32 pstat;

	if (dev->bulk.in_progress)
		SLIM_INFO(dev, "Interrupt in bulk:stat:0x%x", stat);
	if ((stat & NGD_INT_MSG_BUF_CONTE) ||
		(stat & NGD_INT_MSG_TX_INVAL) || (stat & NGD_INT_DEV_ERR) ||
		(stat & NGD_INT_TX_NACKED_2)) {
@@ -651,32 +653,41 @@ ngd_xfer_err:
	return ret ? ret : dev->err;
}

static int ngd_user_msg(struct slim_controller *ctrl, u8 la, u8 mt, u8 mc,
				struct slim_ele_access *msg, u8 *buf, u8 len)
static int ngd_get_ec(u16 start_offset, u8 len, u16 *ec)
{
	struct slim_msg_txn txn;

	if (mt != SLIM_MSG_MT_DEST_REFERRED_USER ||
		mc != SLIM_USR_MC_REPEAT_CHANGE_VALUE) {
		return -EPROTONOSUPPORT;
	}
	if (len > SLIM_MAX_VE_SLC_BYTES ||
		msg->start_offset > MSM_SLIM_VE_MAX_MAP_ADDR)
		start_offset > MSM_SLIM_VE_MAX_MAP_ADDR)
		return -EINVAL;
	if (len <= 4) {
		txn.ec = len - 1;
		*ec = len - 1;
	} else if (len <= 8) {
		if (len & 0x1)
			return -EINVAL;
		txn.ec = ((len >> 1) + 1);
		*ec = ((len >> 1) + 1);
	} else {
		if (len & 0x3)
			return -EINVAL;
		txn.ec = ((len >> 2) + 3);
		*ec = ((len >> 2) + 3);
	}
	*ec |= (0x8 | ((start_offset & 0xF) << 4));
	*ec |= ((start_offset & 0xFF0) << 4);
	return 0;
}
	txn.ec |= (0x8 | ((msg->start_offset & 0xF) << 4));
	txn.ec |= ((msg->start_offset & 0xFF0) << 4);

static int ngd_user_msg(struct slim_controller *ctrl, u8 la, u8 mt, u8 mc,
				struct slim_ele_access *msg, u8 *buf, u8 len)
{
	int ret;
	struct slim_msg_txn txn;

	if (mt != SLIM_MSG_MT_DEST_REFERRED_USER ||
		mc != SLIM_USR_MC_REPEAT_CHANGE_VALUE) {
		return -EPROTONOSUPPORT;
	}

	ret = ngd_get_ec(msg->start_offset, len, &txn.ec);
	if (ret)
		return ret;
	txn.la = la;
	txn.mt = mt;
	txn.mc = mc;
@@ -689,6 +700,132 @@ static int ngd_user_msg(struct slim_controller *ctrl, u8 la, u8 mt, u8 mc,
	return ngd_xfer_msg(ctrl, &txn);
}

static int ngd_bulk_cb(void *ctx, int err)
{
	if (ctx)
		complete(ctx);
	return err;
}

static int ngd_bulk_wr(struct slim_controller *ctrl, u8 la, u8 mt, u8 mc,
			struct slim_val_inf msgs[], int n,
			int (*comp_cb)(void *ctx, int err), void *ctx)
{
	struct msm_slim_ctrl *dev = slim_get_ctrldata(ctrl);
	int i, ret;
	struct msm_slim_endp *endpoint = &dev->tx_msgq;
	struct sps_pipe *pipe = endpoint->sps;
	u32 *header;
	DECLARE_COMPLETION_ONSTACK(done);

	msm_slim_get_ctrl(dev);
	mutex_lock(&dev->tx_lock);
	if (dev->state == MSM_CTRL_ASLEEP) {
		mutex_unlock(&dev->tx_lock);
		ret = ngd_slim_runtime_resume(dev->dev);

		if (ret) {
			SLIM_ERR(dev, "slim resume failed ret:%d, state:%d",
					ret, dev->state);
			return -EREMOTEIO;
		}
		mutex_lock(&dev->tx_lock);
	}
	if (dev->use_tx_msgqs != MSM_MSGQ_ENABLED) {
		SLIM_WARN(dev, "bulk wr not supported");
		ret = -EPROTONOSUPPORT;
		goto ret_async;
	}
	if (dev->bulk.in_progress) {
		SLIM_WARN(dev, "bulk wr in progress:");
		ret = -EAGAIN;
		goto ret_async;
	}
	if (dev->bulk.size) {
		dma_free_coherent(dev->dev, dev->bulk.size, dev->bulk.base,
					dev->bulk.phys);
		memset(&dev->bulk, 0, sizeof(dev->bulk));
	}
	/* every txn has 5 bytes of overhead: la, mc, mt, ec, len */
	dev->bulk.size = n * 5;
	for (i = 0; i < n; i++) {
		dev->bulk.size += msgs[i].num_bytes;
		dev->bulk.size += (4 - ((msgs[i].num_bytes + 1) & 0x3));
	}

	if (dev->bulk.size > 0xffff) {
		SLIM_WARN(dev, "len exceeds limit, split bulk and retry");
		ret = -EDQUOT;
		goto ret_async;
	}
	header = dma_alloc_coherent(dev->dev, dev->bulk.size, &dev->bulk.phys,
					GFP_KERNEL);
	if (!header) {
		ret = -ENOMEM;
		goto ret_async;
	}

	dev->bulk.base = header;
	dev->bulk.in_progress = true;
	for (i = 0; i < n; i++) {
		u8 *buf = (u8 *)header;
		int rl = msgs[i].num_bytes + 5;
		u16 ec;

		*header = SLIM_MSG_ASM_FIRST_WORD(rl, mt, mc, 0, la);
		buf += 3;
		ret = ngd_get_ec(msgs[i].start_offset, msgs[i].num_bytes, &ec);
		if (ret)
			goto retpath;
		*(buf++) = (ec & 0xFF);
		*(buf++) = (ec >> 8) & 0xFF;
		memcpy(buf, msgs[i].wbuf, msgs[i].num_bytes);
		buf += msgs[i].num_bytes;
		header += (rl >> 2);
		if (rl & 3) {
			header++;
			memset(buf, 0, ((u8 *)header - buf));
		}
	}
	header = dev->bulk.base;
	/* SLIM_INFO only prints to internal buffer log, does not do pr_info */
	for (i = 0; i < (dev->bulk.size); i += 4, header += 4)
		SLIM_INFO(dev, "bulk sz:%d:0x%x, 0x%x, 0x%x, 0x%x",
			  dev->bulk.size, *header, *(header+1), *(header+2),
			  *(header+3));
	if (comp_cb) {
		dev->bulk.cb = comp_cb;
		dev->bulk.ctx = ctx;
	} else {
		dev->bulk.cb = ngd_bulk_cb;
		dev->bulk.ctx = &done;
	}
	ret = sps_transfer_one(pipe, dev->bulk.phys, dev->bulk.size, NULL,
				SPS_IOVEC_FLAG_EOT);
	if (ret) {
		SLIM_WARN(dev, "sps transfer one returned error:%d", ret);
		goto retpath;
	}
	if (dev->bulk.cb == ngd_bulk_cb) {
		int timeout = wait_for_completion_timeout(&done, HZ);

		if (!timeout) {
			SLIM_WARN(dev, "timeout for bulk wr");
			ret = -ETIMEDOUT;
		}
	} else {
		goto ret_async;
	}
retpath:
	dma_free_coherent(dev->dev, dev->bulk.size, dev->bulk.base,
				dev->bulk.phys);
	memset(&dev->bulk, 0, sizeof(dev->bulk));
ret_async:
	mutex_unlock(&dev->tx_lock);
	msm_slim_put_ctrl(dev);
	return ret;
}

static int ngd_xferandwait_ack(struct slim_controller *ctrl,
				struct slim_msg_txn *txn)
{
@@ -1421,6 +1558,7 @@ static int ngd_slim_probe(struct platform_device *pdev)
	dev->ctrl.allocbw = ngd_allocbw;
	dev->ctrl.xfer_msg = ngd_xfer_msg;
	dev->ctrl.xfer_user_msg = ngd_user_msg;
	dev->ctrl.xfer_bulk_wr = ngd_bulk_wr;
	dev->ctrl.wakeup = NULL;
	dev->ctrl.alloc_port = msm_alloc_port;
	dev->ctrl.dealloc_port = msm_dealloc_port;
@@ -1562,6 +1700,10 @@ static int ngd_slim_remove(struct platform_device *pdev)
	if (!IS_ERR_OR_NULL(dev->ext_mdm.ssr))
		subsys_notif_unregister_notifier(dev->ext_mdm.ssr,
						&dev->ext_mdm.nb);
	if (dev->bulk.size)
		dma_free_coherent(dev->dev, dev->bulk.size, dev->bulk.base,
					dev->bulk.phys);

	free_irq(dev->irq, dev);
	slim_del_controller(&dev->ctrl);
	kthread_stop(dev->rx_msgq_thread);
+10 −0
Original line number Diff line number Diff line
@@ -446,6 +446,16 @@ void msm_slim_tx_msg_return(struct msm_slim_ctrl *dev, int err)
				pr_err("SLIM TX get IOVEC failed:%d", ret);
			return;
		}
		if (addr == dev->bulk.phys) {
			SLIM_INFO(dev, "BULK WR complete");
			dev->bulk.in_progress = false;
			if (!dev->bulk.cb)
				SLIM_WARN(dev, "no callback for bulk WR?");
			else
				dev->bulk.cb(dev->bulk.ctx, err);
			pm_runtime_mark_last_busy(dev->dev);
			return;
		}
		idx = (int) ((addr - mem->phys_base)
			/ SLIM_MSGQ_BUF_LEN);
		if (idx < MSM_TX_BUFS && dev->wr_comp[idx]) {
+10 −0
Original line number Diff line number Diff line
@@ -227,6 +227,15 @@ struct msm_slim_pdata {
	u32 eapc;
};

struct msm_slim_bulk_wr {
	phys_addr_t	phys;
	void		*base;
	int		size;
	int		(*cb)(void *ctx, int err);
	void		*ctx;
	bool		in_progress;
};

struct msm_slim_ctrl {
	struct slim_controller  ctrl;
	struct slim_framer	framer;
@@ -273,6 +282,7 @@ struct msm_slim_ctrl {
	struct msm_slim_pdata	pdata;
	struct msm_slim_ss	ext_mdm;
	struct msm_slim_ss	dsp;
	struct msm_slim_bulk_wr	bulk;
	int			default_ipc_log_mask;
	int			ipc_log_mask;
	bool			sysfs_created;
+44 −0
Original line number Diff line number Diff line
@@ -1104,6 +1104,50 @@ int slim_user_msg(struct slim_device *sb, u8 la, u8 mt, u8 mc,
}
EXPORT_SYMBOL(slim_user_msg);

/*
 * Queue bulk of message writes:
 * slim_bulk_msg_write: Write bulk of messages (e.g. downloading FW)
 * @sb: Client handle sending these messages
 * @la: Destination device for these messages
 * @mt: Message Type
 * @mc: Message Code
 * @msgs: List of messages to be written in bulk
 * @n: Number of messages in the list
 * @cb: Callback if client needs this to be non-blocking
 * @ctx: Context for this callback
 * If supported by controller, this message list will be sent in bulk to the HW
 * If the client specifies this to be non-blocking, the callback will be
 * called from atomic context.
 */
int slim_bulk_msg_write(struct slim_device *sb, u8 mt, u8 mc,
			struct slim_val_inf msgs[], int n,
			int (*comp_cb)(void *ctx, int err), void *ctx)
{
	int i, ret;

	if (!sb || !sb->ctrl || !msgs)
		return -EINVAL;
	if (!sb->ctrl->xfer_bulk_wr) {
		pr_warn("controller does not support bulk WR, serializing");
		for (i = 0; i < n; i++) {
			struct slim_ele_access ele;

			ele.comp = NULL;
			ele.start_offset = msgs[i].start_offset;
			ele.num_bytes = msgs[i].num_bytes;
			ret = slim_xfer_msg(sb->ctrl, sb, &ele, mc,
					msgs[i].rbuf, msgs[i].wbuf,
					ele.num_bytes);
			if (ret)
				return ret;
		}
		return ret;
	}
	return sb->ctrl->xfer_bulk_wr(sb->ctrl, sb->laddr, mt, mc, msgs, n,
					comp_cb, ctx);
}
EXPORT_SYMBOL(slim_bulk_msg_write);

/*
 * slim_alloc_mgrports: Allocate port on manager side.
 * @sb: device/client handle.
+41 −0
Original line number Diff line number Diff line
@@ -155,6 +155,20 @@ struct slim_addrt {
	u8	laddr;
};

/*
 * struct slim_val_inf: slimbus value/information element transaction
 * @start_offset: Specifies starting offset in information/value element map
 * @num_bytes: number of bytes to be read/written
 * @wbuf: buffer if this transaction has 'write' component in it
 * @rbuf: buffer if this transaction has 'read' component in it
 */
struct slim_val_inf {
	u16 start_offset;
	u8 num_bytes;
	u8 *wbuf;
	u8 *rbuf;
};

/*
 * struct slim_msg_txn: Message to be sent by the controller.
 * Linux framework uses this structure with drivers implementing controller.
@@ -530,6 +544,10 @@ enum slim_clk_state {
 *	errors (e.g. overflow/underflow) if any.
 * @xfer_user_msg: Send user message to specified logical address. Underlying
 *	controller has to support sending user messages. Returns error if any.
 * @xfer_bulk_wr: Send bulk of write messages to specified logical address.
 *	Underlying controller has to support this. Typically useful to transfer
 *	messages to download firmware, or messages where strict ordering for
 *	slave is necessary
 */
struct slim_controller {
	struct device		dev;
@@ -580,6 +598,10 @@ struct slim_controller {
	int			(*xfer_user_msg)(struct slim_controller *ctrl,
				u8 la, u8 mt, u8 mc,
				struct slim_ele_access *msg, u8 *buf, u8 len);
	int			(*xfer_bulk_wr)(struct slim_controller *ctrl,
				u8 la, u8 mt, u8 mc, struct slim_val_inf msgs[],
				int n, int (*comp_cb)(void *ctx, int err),
				void *ctx);
};
#define to_slim_controller(d) container_of(d, struct slim_controller, dev)

@@ -767,6 +789,25 @@ extern int slim_xfer_msg(struct slim_controller *ctrl,
 */
extern int slim_user_msg(struct slim_device *sb, u8 la, u8 mt, u8 mc,
				struct slim_ele_access *msg, u8 *buf, u8 len);

/*
 * Queue bulk of message writes:
 * slim_bulk_msg_write: Write bulk of messages (e.g. downloading FW)
 * @sb: Client handle sending these messages
 * @la: Destination device for these messages
 * @mt: Message Type
 * @mc: Message Code
 * @msgs: List of messages to be written in bulk
 * @n: Number of messages in the list
 * @cb: Callback if client needs this to be non-blocking
 * @ctx: Context for this callback
 * If supported by controller, this message list will be sent in bulk to the HW
 * If the client specifies this to be non-blocking, the callback will be
 * called from atomic context.
 */
extern int slim_bulk_msg_write(struct slim_device *sb, u8 mt, u8 mc,
			struct slim_val_inf msgs[], int n,
			int (*comp_cb)(void *ctx, int err), void *ctx);
/* end of message apis */

/* Port management for manager device APIs */