Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 28b12c5b authored by qctecmdr's avatar qctecmdr Committed by Gerrit - the friendly Code Review server
Browse files

Merge "spi: spi-msm-geni: Add lock/unlock tre support in SPI GSI mode"

parents 64b7fed4 67640229
Loading
Loading
Loading
Loading
+2 −1
Original line number Diff line number Diff line
@@ -223,7 +223,8 @@ CONFIG_I2C_MSM_GENI=y
CONFIG_SPI=y
CONFIG_SPI_MEM=y
CONFIG_SPI_PL022=y
CONFIG_SPI_SPIDEV=m
CONFIG_SPI_MSM_GENI=y
CONFIG_SPI_SPIDEV=y
CONFIG_SPMI=y
CONFIG_PINCTRL_SINGLE=y
CONFIG_GPIO_GENERIC_PLATFORM=y
+388 −124
Original line number Diff line number Diff line
@@ -109,6 +109,8 @@ struct gsi_desc_cb {
};

struct spi_geni_gsi {
	struct msm_gpi_tre lock_t;
	struct msm_gpi_tre unlock_t;
	struct msm_gpi_tre config0_tre;
	struct msm_gpi_tre go_tre;
	struct msm_gpi_tre tx_dma_tre;
@@ -144,7 +146,7 @@ struct spi_geni_master {
	struct completion xfer_done;
	struct device *wrapper_dev;
	int oversampling;
	struct spi_geni_gsi *gsi;
	struct spi_geni_gsi *gsi, *gsi_lock_unlock;
	struct dma_chan *tx;
	struct dma_chan *rx;
	struct msm_gpi_ctrl tx_event;
@@ -157,13 +159,15 @@ struct spi_geni_master {
	int num_rx_eot;
	int num_xfers;
	void *ipc;
	bool shared_se; /* GSI Mode */
	bool gsi_mode; /* GSI Mode */
	bool shared_ee; /* Dual EE use case */
	bool shared_se; /* True Multi EE use case */
	bool is_le_vm;	/* LE VM usecase */
	bool dis_autosuspend;
	bool cmd_done;
	bool set_miso_sampling;
	u32 miso_sampling_ctrl_val;
	bool is_le_vm;	/* LE VM usecase */
	bool gpi_reset; /* GPI channel reset*/
};

static struct spi_master *get_spi_master(struct device *dev)
@@ -205,11 +209,13 @@ static int get_spi_clk_cfg(u32 speed_hz, struct spi_geni_master *mas,
		__func__, speed_hz, res_freq, sclk_freq, *clk_idx, *clk_div);

	ret = clk_set_rate(rsc->se_clk, sclk_freq);
	if (ret)
	if (ret) {
		dev_err(mas->dev, "%s: clk_set_rate failed %d\n",
							__func__, ret);
		return ret;
	}
	return 0;
}

static void spi_setup_word_len(struct spi_geni_master *mas, u32 mode,
						int bits_per_word)
@@ -346,6 +352,19 @@ static int select_xfer_mode(struct spi_master *spi,
	return mode;
}

static struct msm_gpi_tre *setup_lock_tre(struct spi_geni_master *mas)
{
	struct msm_gpi_tre *lock_t = &mas->gsi_lock_unlock->lock_t;

	lock_t->dword[0] = MSM_GPI_LOCK_TRE_DWORD0;
	lock_t->dword[1] = MSM_GPI_LOCK_TRE_DWORD1;
	lock_t->dword[2] = MSM_GPI_LOCK_TRE_DWORD2;
	/* lock tre: ieob set */
	lock_t->dword[3] = MSM_GPI_LOCK_TRE_DWORD3(0, 0, 0, 1, 0);

	return lock_t;
}

static struct msm_gpi_tre *setup_config0_tre(struct spi_transfer *xfer,
				struct spi_geni_master *mas, u16 mode,
				u32 cs_clk_delay, u32 inter_words_delay)
@@ -357,6 +376,7 @@ static struct msm_gpi_tre *setup_config0_tre(struct spi_transfer *xfer,
	int div = 0;
	int idx = 0;
	int ret = 0;
	int m_clk_cfg;

	if (IS_ERR_OR_NULL(c0_tre))
		return c0_tre;
@@ -372,11 +392,19 @@ static struct msm_gpi_tre *setup_config0_tre(struct spi_transfer *xfer,

	word_len = xfer->bits_per_word - MIN_WORD_LEN;
	pack |= (GSI_TX_PACK_EN | GSI_RX_PACK_EN);
	if (mas->is_le_vm) {
		idx = geni_read_reg(mas->base, SE_GENI_CLK_SEL);
		m_clk_cfg = geni_read_reg(mas->base, GENI_SER_M_CLK_CFG);
		div = (m_clk_cfg & CLK_DIV_MSK) >> CLK_DIV_SHFT;
	} else {
		ret = get_spi_clk_cfg(mas->cur_speed_hz, mas, &idx, &div);
		if (ret) {
		dev_err(mas->dev, "%s:Err setting clks:%d\n", __func__, ret);
			dev_err(mas->dev, "%s:Err setting clks:%d\n",
				__func__, ret);
			return ERR_PTR(ret);
		}
	}

	c0_tre->dword[0] = MSM_GPI_SPI_CONFIG0_TRE_DWORD0(pack, flags,
								word_len);
	c0_tre->dword[1] = MSM_GPI_SPI_CONFIG0_TRE_DWORD1(0, cs_clk_delay,
@@ -441,6 +469,19 @@ static struct msm_gpi_tre *setup_dma_tre(struct msm_gpi_tre *tre,
	return tre;
}

static struct msm_gpi_tre *setup_unlock_tre(struct spi_geni_master *mas)
{
	struct msm_gpi_tre *unlock_t = &mas->gsi_lock_unlock->unlock_t;

	/* unlock tre: ieob set */
	unlock_t->dword[0] = MSM_GPI_UNLOCK_TRE_DWORD0;
	unlock_t->dword[1] = MSM_GPI_UNLOCK_TRE_DWORD1;
	unlock_t->dword[2] = MSM_GPI_UNLOCK_TRE_DWORD2;
	unlock_t->dword[3] = MSM_GPI_UNLOCK_TRE_DWORD3(0, 0, 0, 1, 0);

	return unlock_t;
}

static void spi_gsi_ch_cb(struct dma_chan *ch, struct msm_gpi_cb const *cb,
				void *ptr)
{
@@ -513,6 +554,20 @@ static void spi_gsi_tx_callback(void *cb)
	struct spi_transfer *xfer = desc_cb->xfer;
	struct spi_geni_master *mas = spi_master_get_devdata(spi);

	/*
	 * Case when lock/unlock support is required:
	 * The callback comes on tx channel as lock/unlock
	 * tres are submitted on tx channel. Check if there's
	 * no xfer scheduled, that specifies a gsi completion
	 * callback for lock/unlock tre being submitted.
	 */
	if (!xfer) {
		GENI_SE_DBG(mas->ipc, false, mas->dev,
		"Lock/unlock IEOB received %s\n", __func__);
		complete(&mas->tx_cb);
		return;
	}

	if (xfer->tx_buf) {
		if (cb_param->status == MSM_GPI_TCE_UNEXP_ERR) {
			GENI_SE_ERR(mas->ipc, true, mas->dev,
@@ -531,6 +586,125 @@ static void spi_gsi_tx_callback(void *cb)
	}
}

/*
 * Locking the GPII:
 * For a shared_se usecase, lock the bus per message.
 * Lock bus is done in prepare_message and unlock bus
 * is done in unprepare_message.
 * For an LE-VM usecase, lock the bus per session.
 * Lock bus is done in runtime_resume and unlock
 * bus is done in runtime_suspend.
 */
static int spi_geni_lock_bus(struct spi_master *spi)
{
	struct spi_geni_master *mas = spi_master_get_devdata(spi);
	struct msm_gpi_tre *lock_t = NULL;
	int ret = 0, timeout = 0;
	struct scatterlist *xfer_tx_sg = mas->gsi_lock_unlock->tx_sg;
	unsigned long flags = DMA_PREP_INTERRUPT | DMA_CTRL_ACK;

	reinit_completion(&mas->tx_cb);

	GENI_SE_DBG(mas->ipc, false, mas->dev, "%s\n", __func__);

	lock_t = setup_lock_tre(mas);
	sg_init_table(xfer_tx_sg, 1);
	sg_set_buf(xfer_tx_sg, lock_t, sizeof(*lock_t));
	mas->gsi_lock_unlock->desc_cb.spi = spi;

	mas->gsi_lock_unlock->tx_desc = dmaengine_prep_slave_sg(mas->tx,
					mas->gsi_lock_unlock->tx_sg, 1,
					DMA_MEM_TO_DEV, flags);
	if (IS_ERR_OR_NULL(mas->gsi_lock_unlock->tx_desc)) {
		dev_err(mas->dev, "Err setting up tx desc\n");
		ret = -EIO;
		goto err_spi_geni_lock_bus;
	}

	mas->gsi_lock_unlock->tx_desc->callback = spi_gsi_tx_callback;
	mas->gsi_lock_unlock->tx_desc->callback_param =
					&mas->gsi_lock_unlock->tx_cb_param;
	mas->gsi_lock_unlock->tx_cb_param.userdata =
					&mas->gsi_lock_unlock->desc_cb;
	/* Issue TX */
	mas->gsi_lock_unlock->tx_cookie =
			dmaengine_submit(mas->gsi_lock_unlock->tx_desc);
	dma_async_issue_pending(mas->tx);

	timeout = wait_for_completion_timeout(&mas->tx_cb,
					msecs_to_jiffies(SPI_XFER_TIMEOUT_MS));
	if (timeout <= 0) {
		GENI_SE_ERR(mas->ipc, true, mas->dev,
		"%s failed\n", __func__);
		geni_se_dump_dbg_regs(&mas->spi_rsc, mas->base, mas->ipc);
		ret = -ETIMEDOUT;
		goto err_spi_geni_lock_bus;
	}
	return ret;
err_spi_geni_lock_bus:
	if (ret)
		dmaengine_terminate_all(mas->tx);
	return ret;
}

static void spi_geni_unlock_bus(struct spi_master *spi)
{
	struct spi_geni_master *mas = spi_master_get_devdata(spi);
	struct msm_gpi_tre *unlock_t = NULL;
	int ret = 0, timeout = 0;
	struct scatterlist *xfer_tx_sg = mas->gsi_lock_unlock->tx_sg;
	unsigned long flags = DMA_PREP_INTERRUPT | DMA_CTRL_ACK;

	reinit_completion(&mas->tx_cb);

	GENI_SE_DBG(mas->ipc, false, mas->dev, "%s\n", __func__);

	if (mas->gpi_reset) {
		GENI_SE_DBG(mas->ipc, false, mas->dev, "GPI Reset required\n");
		goto err_spi_geni_unlock_bus;
	}

	unlock_t = setup_unlock_tre(mas);
	sg_init_table(xfer_tx_sg, 1);
	sg_set_buf(xfer_tx_sg, unlock_t, sizeof(*unlock_t));
	mas->gsi_lock_unlock->desc_cb.spi = spi;

	mas->gsi_lock_unlock->tx_desc = dmaengine_prep_slave_sg(mas->tx,
					mas->gsi_lock_unlock->tx_sg, 1,
					DMA_MEM_TO_DEV, flags);
	if (IS_ERR_OR_NULL(mas->gsi_lock_unlock->tx_desc)) {
		dev_err(mas->dev, "Err setting up tx desc\n");
		ret = -EIO;
		goto err_spi_geni_unlock_bus;
	}

	mas->gsi_lock_unlock->tx_desc->callback = spi_gsi_tx_callback;
	mas->gsi_lock_unlock->tx_desc->callback_param =
					&mas->gsi_lock_unlock->tx_cb_param;
	mas->gsi_lock_unlock->tx_cb_param.userdata =
					&mas->gsi_lock_unlock->desc_cb;
	/* Issue TX */
	mas->gsi_lock_unlock->tx_cookie =
			dmaengine_submit(mas->gsi_lock_unlock->tx_desc);
	dma_async_issue_pending(mas->tx);

	timeout = wait_for_completion_timeout(&mas->tx_cb,
					msecs_to_jiffies(SPI_XFER_TIMEOUT_MS));
	if (timeout <= 0) {
		GENI_SE_ERR(mas->ipc, true, mas->dev,
			"%s failed\n", __func__);
		geni_se_dump_dbg_regs(&mas->spi_rsc, mas->base, mas->ipc);
		ret = -ETIMEDOUT;
		goto err_spi_geni_unlock_bus;
	}

err_spi_geni_unlock_bus:
	if (mas->gpi_reset || ret) {
		dmaengine_terminate_all(mas->tx);
		mas->gpi_reset = false;
	}
}

static int setup_gsi_xfer(struct spi_transfer *xfer,
				struct spi_geni_master *mas,
				struct spi_device *spi_slv,
@@ -755,6 +929,15 @@ static int spi_geni_prepare_message(struct spi_master *spi,
		} else {
			mas->setup = true;
		}

		if (mas->shared_se) {
			ret = spi_geni_lock_bus(spi);
			if (ret) {
				GENI_SE_ERR(mas->ipc, true, NULL,
					"%s failed: %d\n", __func__, ret);
				return ret;
			}
		}
	}

	mas->cur_xfer_mode = select_xfer_mode(spi, spi_msg);
@@ -789,6 +972,9 @@ static int spi_geni_unprepare_message(struct spi_master *spi_mas,
		spi_geni_unmap_buf(mas, spi_msg);

	if (mas->shared_ee) {
		if (mas->shared_se)
			spi_geni_unlock_bus(spi_mas);

		if (mas->dis_autosuspend) {
			pm_runtime_put_sync(mas->dev);
			count = atomic_read(&mas->dev->power.usage_count);
@@ -855,46 +1041,16 @@ static void spi_geni_set_sampling_rate(struct spi_geni_master *mas,
		__func__, cfg_reg108, cfg_reg109, cfg_seq_start);
}

static int spi_geni_prepare_transfer_hardware(struct spi_master *spi)
/*
 * spi_geni_mas_setup is done once per spi session.
 * In LA, it is called in prepare_transfer_hardware whereas
 * in LE, it is called in runtime_resume. Make sure this api
 * is called before any actual transfer begins as it involves
 * generic SW/HW intializations required for a spi transfer.
 */
static int spi_geni_mas_setup(struct spi_master *spi)
{
	struct spi_geni_master *mas = spi_master_get_devdata(spi);
	int ret = 0, count = 0;
	u32 max_speed = spi->cur_msg->spi->max_speed_hz;
	struct se_geni_rsc *rsc = &mas->spi_rsc;

	/* Adjust the IB based on the max speed of the slave in kHz.*/
	rsc->ib = (max_speed * DEFAULT_BUS_WIDTH) / 1000;
	if (mas->shared_se && !mas->shared_ee) {
		struct se_geni_rsc *rsc;
		int ret = 0;

		rsc = &mas->spi_rsc;
		ret = pinctrl_select_state(rsc->geni_pinctrl,
						rsc->geni_gpio_active);
		if (ret)
			GENI_SE_ERR(mas->ipc, false, NULL,
			"%s: Error %d pinctrl_select_state\n", __func__, ret);
	}

	if (!mas->setup || !mas->shared_ee) {
		ret = pm_runtime_get_sync(mas->dev);
		if (ret < 0) {
			dev_err(mas->dev,
				"%s:pm_runtime_get_sync failed %d\n",
							__func__, ret);
			pm_runtime_put_noidle(mas->dev);
			goto exit_prepare_transfer_hardware;
		}
		ret = 0;

		if (mas->dis_autosuspend) {
			count = atomic_read(&mas->dev->power.usage_count);
			if (count <= 0)
				GENI_SE_ERR(mas->ipc, false, NULL,
				"resume usage count mismatch:%d", count);
		}
	}
	if (unlikely(!mas->setup)) {
	int proto = get_se_proto(mas->base);
	unsigned int major;
	unsigned int minor;
@@ -936,6 +1092,19 @@ static int spi_geni_prepare_transfer_hardware(struct spi_master *spi)
			mas->rx = NULL;
			goto setup_ipc;
		}
		if (mas->shared_se || mas->is_le_vm) {
			mas->gsi_lock_unlock = devm_kzalloc(mas->dev,
				(sizeof(struct spi_geni_gsi)),
				GFP_KERNEL);
			if (IS_ERR_OR_NULL(mas->gsi_lock_unlock)) {
				dev_err(mas->dev, "Failed to get GSI lock mem\n");
				dma_release_channel(mas->tx);
				dma_release_channel(mas->rx);
				mas->tx = NULL;
				mas->rx = NULL;
				goto setup_ipc;
			}
		}
		mas->tx_event.init.callback = spi_gsi_ch_cb;
		mas->tx_event.init.cb_param = spi;
		mas->tx_event.cmd = MSM_GPI_INIT;
@@ -960,7 +1129,6 @@ static int spi_geni_prepare_transfer_hardware(struct spi_master *spi)
			mas->rx = NULL;
			goto setup_ipc;
		}

	}
setup_ipc:
	mas->ipc = ipc_log_context_create(4, dev_name(mas->dev), 0);
@@ -984,14 +1152,72 @@ static int spi_geni_prepare_transfer_hardware(struct spi_master *spi)
	if (mas->set_miso_sampling)
		spi_geni_set_sampling_rate(mas, major, minor);

		mas->shared_se =
	mas->gsi_mode =
		(geni_read_reg(mas->base, GENI_IF_FIFO_DISABLE_RO) &
						FIFO_IF_DISABLE);
	if (mas->dis_autosuspend)
		GENI_SE_DBG(mas->ipc, false, mas->dev,
				"Auto Suspend is disabled\n");
	return 0;
}
exit_prepare_transfer_hardware:

static int spi_geni_prepare_transfer_hardware(struct spi_master *spi)
{
	struct spi_geni_master *mas = spi_master_get_devdata(spi);
	int ret = 0, count = 0;
	u32 max_speed = spi->cur_msg->spi->max_speed_hz;
	struct se_geni_rsc *rsc = &mas->spi_rsc;

	/* Adjust the IB based on the max speed of the slave in kHz.*/
	rsc->ib = (max_speed * DEFAULT_BUS_WIDTH) / 1000;

	/*
	 * Not required for LE as below intializations are specific
	 * to usecases. For LE, client takes care of get_sync.
	 */
	if (mas->is_le_vm)
		return 0;

	if (mas->gsi_mode && !mas->shared_ee) {
		struct se_geni_rsc *rsc;
		int ret = 0;

		rsc = &mas->spi_rsc;
		ret = pinctrl_select_state(rsc->geni_pinctrl,
					rsc->geni_gpio_active);
		if (ret)
			GENI_SE_ERR(mas->ipc, false, NULL,
			"%s: Error %d pinctrl_select_state\n", __func__, ret);
	}

	if (!mas->setup || !mas->shared_ee) {
		ret = pm_runtime_get_sync(mas->dev);
		if (ret < 0) {
			dev_err(mas->dev,
				"%s:pm_runtime_get_sync failed %d\n",
							__func__, ret);
			pm_runtime_put_noidle(mas->dev);
			return ret;
		}

		if (!mas->setup) {
			ret = spi_geni_mas_setup(spi);
			if (ret) {
				GENI_SE_ERR(mas->ipc, true, NULL,
				"%s mas_setup failed: %d\n", __func__, ret);
				return ret;
			}
		}
		ret = 0;

		if (mas->dis_autosuspend) {
			count = atomic_read(&mas->dev->power.usage_count);
			if (count <= 0)
				GENI_SE_ERR(mas->ipc, false, NULL,
				"resume usage count mismatch:%d", count);
		}
	}

	return ret;
}

@@ -1000,10 +1226,10 @@ static int spi_geni_unprepare_transfer_hardware(struct spi_master *spi)
	struct spi_geni_master *mas = spi_master_get_devdata(spi);
	int count = 0;

	if (mas->shared_ee)
	if (mas->shared_ee || mas->is_le_vm)
		return 0;

	if (mas->shared_se) {
	if (mas->gsi_mode) {
		struct se_geni_rsc *rsc;
		int ret = 0;

@@ -1291,7 +1517,17 @@ static int spi_geni_transfer_one(struct spi_master *spi,
	return ret;
err_gsi_geni_transfer_one:
	geni_se_dump_dbg_regs(&mas->spi_rsc, mas->base, mas->ipc);
	if (!mas->is_le_vm) {
		dmaengine_terminate_all(mas->tx);
	} else {
		/* Stop channel in case of error in LE-VM */
		ret = dmaengine_pause(mas->tx);
		if (ret) {
			mas->gpi_reset = true;
			GENI_SE_ERR(mas->ipc, true, mas->dev,
				"Channel cancel failed\n");
		}
	}
	return ret;
err_fifo_geni_transfer_one:
	handle_fifo_timeout(mas, xfer);
@@ -1630,14 +1866,24 @@ static int spi_geni_probe(struct platform_device *pdev)
	geni_mas->dis_autosuspend =
		of_property_read_bool(pdev->dev.of_node,
				"qcom,disable-autosuspend");

	/*
	 * shared_se property is set when spi is being used simultaneously
	 * from two Execution Environments.
	 */
	if (of_property_read_bool(pdev->dev.of_node, "qcom,shared_se")) {
		geni_mas->shared_se = true;
		geni_mas->shared_ee = true;
	} else {

		/*
	 * This property will be set when spi is being used from
	 * dual Execution Environments unlike shared_se flag
		 * shared_ee property will be set when spi is being used from
		 * dual Execution Environments unlike gsi_mode flag
		 * which is set if SE is in GSI mode.
		 */
		geni_mas->shared_ee =
		of_property_read_bool(pdev->dev.of_node,
				"qcom,shared_ee");
		of_property_read_bool(pdev->dev.of_node, "qcom,shared_ee");
	}

	geni_mas->set_miso_sampling = of_property_read_bool(pdev->dev.of_node,
				"qcom,set-miso-sampling");
@@ -1711,13 +1957,16 @@ static int spi_geni_runtime_suspend(struct device *dev)
	struct spi_master *spi = get_spi_master(dev);
	struct spi_geni_master *geni_mas = spi_master_get_devdata(spi);

	if (geni_mas->is_le_vm)
	if (geni_mas->is_le_vm) {
		spi_geni_unlock_bus(spi);
		return 0;
	}

	if (geni_mas->shared_ee)
	/* Do not unconfigure the GPIOs for a shared_se usecase */
	if (geni_mas->shared_ee && !geni_mas->shared_se)
		goto exit_rt_suspend;

	if (geni_mas->shared_se) {
	if (geni_mas->gsi_mode) {
		ret = se_geni_clks_off(&geni_mas->spi_rsc);
		if (ret)
			GENI_SE_ERR(geni_mas->ipc, false, NULL,
@@ -1736,13 +1985,28 @@ static int spi_geni_runtime_resume(struct device *dev)
	struct spi_master *spi = get_spi_master(dev);
	struct spi_geni_master *geni_mas = spi_master_get_devdata(spi);

	if (geni_mas->is_le_vm)
		return 0;
	if (geni_mas->is_le_vm) {
		if (!geni_mas->setup) {
			ret = spi_geni_mas_setup(spi);
			if (ret) {
				GENI_SE_ERR(geni_mas->ipc, true, NULL,
				"%s mas_setup failed: %d\n", __func__, ret);
				return ret;
			}
		}

		ret = spi_geni_lock_bus(spi);
		if (ret) {
			GENI_SE_ERR(geni_mas->ipc, true, NULL,
				"%s lock_bus failed: %d\n", __func__, ret);
			return ret;
		}
	}

	if (geni_mas->shared_ee)
		goto exit_rt_resume;

	if (geni_mas->shared_se) {
	if (geni_mas->gsi_mode) {
		ret = se_geni_clks_on(&geni_mas->spi_rsc);
		if (ret)
			GENI_SE_ERR(geni_mas->ipc, false, NULL,