Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 824ab37d authored by Ezequiel Garcia's avatar Ezequiel Garcia Committed by Mark Brown
Browse files

spi: img-spfi: Implement a handle_err() callback



The driver can be greatly simplified by moving the transfer timeout
handling to a handle_err() callback.

Signed-off-by: default avatarEzequiel Garcia <ezequiel.garcia@imgtec.com>
Signed-off-by: default avatarAndrew Bresticker <abrestic@chromium.org>
Signed-off-by: default avatarMark Brown <broonie@kernel.org>
parent 89cda06d
Loading
Loading
Loading
Loading
+24 −20
Original line number Diff line number Diff line
@@ -270,7 +270,6 @@ static int img_spfi_start_pio(struct spi_master *master,

	if (rx_bytes > 0 || tx_bytes > 0) {
		dev_err(spfi->dev, "PIO transfer timed out\n");
		spfi_reset(spfi);
		return -ETIMEDOUT;
	}

@@ -396,6 +395,29 @@ static int img_spfi_start_dma(struct spi_master *master,
	return -EIO;
}

static void img_spfi_handle_err(struct spi_master *master,
				struct spi_message *msg)
{
	struct img_spfi *spfi = spi_master_get_devdata(master);
	unsigned long flags;

	/*
	 * Stop all DMA and reset the controller if the previous transaction
	 * timed-out and never completed it's DMA.
	 */
	spin_lock_irqsave(&spfi->lock, flags);
	if (spfi->tx_dma_busy || spfi->rx_dma_busy) {
		spfi->tx_dma_busy = false;
		spfi->rx_dma_busy = false;

		dmaengine_terminate_all(spfi->tx_ch);
		dmaengine_terminate_all(spfi->rx_ch);
	}
	spin_unlock_irqrestore(&spfi->lock, flags);

	spfi_reset(spfi);
}

static int img_spfi_prepare(struct spi_master *master, struct spi_message *msg)
{
	struct img_spfi *spfi = spi_master_get_devdata(master);
@@ -462,8 +484,6 @@ static int img_spfi_transfer_one(struct spi_master *master,
				 struct spi_transfer *xfer)
{
	struct img_spfi *spfi = spi_master_get_devdata(spi->master);
	bool dma_reset = false;
	unsigned long flags;
	int ret;

	if (xfer->len > SPFI_TRANSACTION_TSIZE_MASK) {
@@ -473,23 +493,6 @@ static int img_spfi_transfer_one(struct spi_master *master,
		return -EINVAL;
	}

	/*
	 * Stop all DMA and reset the controller if the previous transaction
	 * timed-out and never completed it's DMA.
	 */
	spin_lock_irqsave(&spfi->lock, flags);
	if (spfi->tx_dma_busy || spfi->rx_dma_busy) {
		dev_err(spfi->dev, "SPI DMA still busy\n");
		dma_reset = true;
	}
	spin_unlock_irqrestore(&spfi->lock, flags);

	if (dma_reset) {
		dmaengine_terminate_all(spfi->tx_ch);
		dmaengine_terminate_all(spfi->rx_ch);
		spfi_reset(spfi);
	}

	img_spfi_config(master, spi, xfer);
	if (master->can_dma && master->can_dma(master, spi, xfer))
		ret = img_spfi_start_dma(master, spi, xfer);
@@ -607,6 +610,7 @@ static int img_spfi_probe(struct platform_device *pdev)
	master->set_cs = img_spfi_set_cs;
	master->transfer_one = img_spfi_transfer_one;
	master->prepare_message = img_spfi_prepare;
	master->handle_err = img_spfi_handle_err;

	spfi->tx_ch = dma_request_slave_channel(spfi->dev, "tx");
	spfi->rx_ch = dma_request_slave_channel(spfi->dev, "rx");