Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit d5636316 authored by qctecmdr's avatar qctecmdr Committed by Gerrit - the friendly Code Review server
Browse files

Merge "i2c: i2c-qcom-geni: Correct the IEOB/IEOT sequence for I2C GSI tres"

parents b35ddc40 d06ac016
Loading
Loading
Loading
Loading
+77 −26
Original line number Diff line number Diff line
@@ -582,7 +582,7 @@ struct gpii {
	struct gpi_reg_table dbg_reg_table;
	bool reg_table_dump;
	u32 dbg_gpi_irq_cnt;
	bool ieob_set;
	bool unlock_tre_set;
};

struct gpi_desc {
@@ -1449,6 +1449,22 @@ static void gpi_process_qup_notif_event(struct gpii_chan *gpii_chan,
			      client_info->cb_param);
}

/* free gpi_desc for the specified channel */
static void gpi_free_chan_desc(struct gpii_chan *gpii_chan)
{
	struct virt_dma_desc *vd;
	struct gpi_desc *gpi_desc;
	unsigned long flags;

	spin_lock_irqsave(&gpii_chan->vc.lock, flags);
	vd = vchan_next_desc(&gpii_chan->vc);
	gpi_desc = to_gpi_desc(vd);
	list_del(&vd->node);
	spin_unlock_irqrestore(&gpii_chan->vc.lock, flags);
	kfree(gpi_desc);
	gpi_desc = NULL;
}

/* process DMA Immediate completion data events */
static void gpi_process_imed_data_event(struct gpii_chan *gpii_chan,
					struct immediate_data_event *imed_event)
@@ -1462,6 +1478,7 @@ static void gpi_process_imed_data_event(struct gpii_chan *gpii_chan,
	struct msm_gpi_dma_async_tx_cb_param *tx_cb_param;
	unsigned long flags;
	u32 chid;
	struct gpii_chan *gpii_tx_chan = &gpii->gpii_chan[GPI_TX_CHAN];

	/*
	 * If channel not active don't process event but let
@@ -1514,12 +1531,33 @@ static void gpi_process_imed_data_event(struct gpii_chan *gpii_chan,
	/* make sure rp updates are immediately visible to all cores */
	smp_wmb();

	/*
	 * If unlock tre is present, don't send transfer callback on
	 * on IEOT, wait for unlock IEOB. Free the respective channel
	 * descriptors.
	 * If unlock is not present, IEOB indicates freeing the descriptor
	 * and IEOT indicates channel transfer completion.
	 */
	chid = imed_event->chid;
	if (imed_event->code == MSM_GPI_TCE_EOT && gpii->ieob_set) {
		if (chid == GPI_RX_CHAN)
	if (gpii->unlock_tre_set) {
		if (chid == GPI_RX_CHAN) {
			if (imed_event->code == MSM_GPI_TCE_EOT)
				goto gpi_free_desc;
			else if (imed_event->code == MSM_GPI_TCE_UNEXP_ERR)
				/*
				 * In case of an error in a read transfer on a
				 * shared se, unlock tre will not be processed
				 * as channels go to bad state so tx desc should
				 * be freed manually.
				 */
				gpi_free_chan_desc(gpii_tx_chan);
			else
				return;
		} else if (imed_event->code == MSM_GPI_TCE_EOT) {
			return;
		}
	} else if (imed_event->code == MSM_GPI_TCE_EOB) {
		goto gpi_free_desc;
	}

	tx_cb_param = vd->tx.callback_param;
@@ -1539,11 +1577,7 @@ static void gpi_process_imed_data_event(struct gpii_chan *gpii_chan,
	}

gpi_free_desc:
	spin_lock_irqsave(&gpii_chan->vc.lock, flags);
	list_del(&vd->node);
	spin_unlock_irqrestore(&gpii_chan->vc.lock, flags);
	kfree(gpi_desc);
	gpi_desc = NULL;
	gpi_free_chan_desc(gpii_chan);
}

/* processing transfer completion events */
@@ -1558,6 +1592,7 @@ static void gpi_process_xfer_compl_event(struct gpii_chan *gpii_chan,
	struct gpi_desc *gpi_desc;
	unsigned long flags;
	u32 chid;
	struct gpii_chan *gpii_tx_chan = &gpii->gpii_chan[GPI_TX_CHAN];

	/* only process events on active channel */
	if (unlikely(gpii_chan->pm_state != ACTIVE_STATE)) {
@@ -1602,12 +1637,33 @@ static void gpi_process_xfer_compl_event(struct gpii_chan *gpii_chan,
	/* update must be visible to other cores */
	smp_wmb();

	/*
	 * If unlock tre is present, don't send transfer callback on
	 * on IEOT, wait for unlock IEOB. Free the respective channel
	 * descriptors.
	 * If unlock is not present, IEOB indicates freeing the descriptor
	 * and IEOT indicates channel transfer completion.
	 */
	chid = compl_event->chid;
	if (compl_event->code == MSM_GPI_TCE_EOT && gpii->ieob_set) {
		if (chid == GPI_RX_CHAN)
	if (gpii->unlock_tre_set) {
		if (chid == GPI_RX_CHAN) {
			if (compl_event->code == MSM_GPI_TCE_EOT)
				goto gpi_free_desc;
			else if (compl_event->code == MSM_GPI_TCE_UNEXP_ERR)
				/*
				 * In case of an error in a read transfer on a
				 * shared se, unlock tre will not be processed
				 * as channels go to bad state so tx desc should
				 * be freed manually.
				 */
				gpi_free_chan_desc(gpii_tx_chan);
			else
				return;
		} else if (compl_event->code == MSM_GPI_TCE_EOT) {
			return;
		}
	} else if (compl_event->code == MSM_GPI_TCE_EOB) {
		goto gpi_free_desc;
	}

	tx_cb_param = vd->tx.callback_param;
@@ -1623,11 +1679,7 @@ static void gpi_process_xfer_compl_event(struct gpii_chan *gpii_chan,
	}

gpi_free_desc:
	spin_lock_irqsave(&gpii_chan->vc.lock, flags);
	list_del(&vd->node);
	spin_unlock_irqrestore(&gpii_chan->vc.lock, flags);
	kfree(gpi_desc);
	gpi_desc = NULL;
	gpi_free_chan_desc(gpii_chan);

}

@@ -2325,7 +2377,7 @@ struct dma_async_tx_descriptor *gpi_prep_slave_sg(struct dma_chan *chan,
	void *tre, *wp = NULL;
	const gfp_t gfp = GFP_ATOMIC;
	struct gpi_desc *gpi_desc;
	gpii->ieob_set = false;
	u32 tre_type;

	GPII_VERB(gpii, gpii_chan->chid, "enter\n");

@@ -2362,13 +2414,12 @@ struct dma_async_tx_descriptor *gpi_prep_slave_sg(struct dma_chan *chan,
	for_each_sg(sgl, sg, sg_len, i) {
		tre = sg_virt(sg);

		/* Check if last tre has ieob set */
		/* Check if last tre is an unlock tre */
		if (i == sg_len - 1) {
			if ((((struct msm_gpi_tre *)tre)->dword[3] &
					GPI_IEOB_BMSK) >> GPI_IEOB_BMSK_SHIFT)
				gpii->ieob_set = true;
			else
				gpii->ieob_set = false;
			tre_type =
			MSM_GPI_TRE_TYPE(((struct msm_gpi_tre *)tre));
			gpii->unlock_tre_set =
			tre_type == MSM_GPI_TRE_UNLOCK ? true : false;
		}

		for (j = 0; j < sg->length;
+27 −10
Original line number Diff line number Diff line
@@ -405,13 +405,9 @@ static void gi2c_gsi_tx_cb(void *ptr)
	struct msm_gpi_dma_async_tx_cb_param *tx_cb = ptr;
	struct geni_i2c_dev *gi2c = tx_cb->userdata;

	if (tx_cb->completion_code == MSM_GPI_TCE_EOB) {
		complete(&gi2c->xfer);
	} else if (!(gi2c->cur->flags & I2C_M_RD)) {
	gi2c_gsi_cb_err(tx_cb, "TX");
	complete(&gi2c->xfer);
}
}

static void gi2c_gsi_rx_cb(void *ptr)
{
@@ -480,7 +476,7 @@ static int geni_i2c_gsi_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[],
		lock_t->dword[2] = MSM_GPI_LOCK_TRE_DWORD2;
		lock_t->dword[3] = MSM_GPI_LOCK_TRE_DWORD3(0, 0, 0, 0, 1);

		/* unlock */
		/* unlock tre: ieob set */
		unlock_t->dword[0] = MSM_GPI_UNLOCK_TRE_DWORD0;
		unlock_t->dword[1] = MSM_GPI_UNLOCK_TRE_DWORD1;
		unlock_t->dword[2] = MSM_GPI_UNLOCK_TRE_DWORD2;
@@ -535,12 +531,14 @@ static int geni_i2c_gsi_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[],
				segs++;
			sg_init_table(gi2c->tx_sg, segs);
			if (i == 0)
				/* Send lock tre for first transfer in a msg */
				sg_set_buf(&gi2c->tx_sg[index++], &gi2c->lock_t,
					sizeof(gi2c->lock_t));
		} else {
			sg_init_table(gi2c->tx_sg, segs);
		}

		/* Send cfg tre when cfg not sent already */
		if (!gi2c->cfg_sent) {
			sg_set_buf(&gi2c->tx_sg[index++], &gi2c->cfg0_t,
						sizeof(gi2c->cfg0_t));
@@ -553,9 +551,18 @@ static int geni_i2c_gsi_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[],

		if (msgs[i].flags & I2C_M_RD) {
			go_t->dword[2] = MSM_GPI_I2C_GO_TRE_DWORD2(msgs[i].len);
			go_t->dword[3] = MSM_GPI_I2C_GO_TRE_DWORD3(1, 0, 0, 0,
									0);
			/*
			 * For Rx Go tre: Set ieob for non-shared se and for all
			 * but last transfer in shared se
			 */
			if (!gi2c->is_shared || (gi2c->is_shared && i != num-1))
				go_t->dword[3] = MSM_GPI_I2C_GO_TRE_DWORD3(1, 0,
								0, 1, 0);
			else
				go_t->dword[3] = MSM_GPI_I2C_GO_TRE_DWORD3(1, 0,
								0, 0, 0);
		} else {
			/* For Tx Go tre: ieob is not set, chain bit is set */
			go_t->dword[2] = MSM_GPI_I2C_GO_TRE_DWORD2(0);
			go_t->dword[3] = MSM_GPI_I2C_GO_TRE_DWORD3(0, 0, 0, 0,
								1);
@@ -591,6 +598,7 @@ static int geni_i2c_gsi_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[],
				MSM_GPI_DMA_W_BUFFER_TRE_DWORD1(gi2c->rx_ph);
			gi2c->rx_t.dword[2] =
				MSM_GPI_DMA_W_BUFFER_TRE_DWORD2(msgs[i].len);
			/* Set ieot for all Rx/Tx DMA tres */
			gi2c->rx_t.dword[3] =
				MSM_GPI_DMA_W_BUFFER_TRE_DWORD3(0, 0, 1, 0, 0);

@@ -641,6 +649,10 @@ static int geni_i2c_gsi_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[],
			gi2c->tx_t.dword[2] =
				MSM_GPI_DMA_W_BUFFER_TRE_DWORD2(msgs[i].len);
			if (gi2c->is_shared && i == num-1)
				/*
				 * For Tx: unlock tre is send for last transfer
				 * so set chain bit for last transfer DMA tre.
				 */
				gi2c->tx_t.dword[3] =
				MSM_GPI_DMA_W_BUFFER_TRE_DWORD3(0, 0, 1, 0, 1);
			else
@@ -652,6 +664,7 @@ static int geni_i2c_gsi_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[],
		}

		if (gi2c->is_shared && i == num-1) {
			/* Send unlock tre at the end of last transfer */
			sg_set_buf(&gi2c->tx_sg[index++],
				&gi2c->unlock_t, sizeof(gi2c->unlock_t));
		}
@@ -689,6 +702,10 @@ static int geni_i2c_gsi_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[],
			dmaengine_terminate_all(gi2c->tx_c);
			gi2c->cfg_sent = 0;
		}
		if (gi2c->is_shared)
			/* Resend cfg tre for every new message on shared se */
			gi2c->cfg_sent = 0;

		if (msgs[i].flags & I2C_M_RD)
			geni_se_iommu_unmap_buf(rx_dev, &gi2c->rx_ph,
				msgs[i].len, DMA_FROM_DEVICE);