Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 87467bd9 authored by Brian Niebuhr's avatar Brian Niebuhr Committed by Sekhar Nori
Browse files

spi: davinci: let DMA operation be specified on per-device basis



Let DMA operation be specified on a per-device basis instead
of selecting it once during probe.

A side effect of this is the need to combine the PIO and DMA buffer
txrx_bufs routine. This is good since they anyway share some common
functionality.

Signed-off-by: default avatarBrian Niebuhr <bniebuhr@efjohnson.com>
Tested-By: default avatarMichael Williamson <michael.williamson@criticallink.com>
Signed-off-by: default avatarSekhar Nori <nsekhar@ti.com>
parent 6dbd29b2
Loading
Loading
Loading
Loading
+1 −0
Original line number Diff line number Diff line
@@ -41,6 +41,7 @@ struct davinci_spi_config {
	u8	parity_enable;
#define SPI_IO_TYPE_INTR	0
#define SPI_IO_TYPE_POLL	1
#define SPI_IO_TYPE_DMA		2
	u8	io_type;
	u8	timer_disable;
	u8	c2tdelay;
+155 −187
Original line number Diff line number Diff line
@@ -500,6 +500,25 @@ static int davinci_spi_process_events(struct davinci_spi *davinci_spi)
	return errors;
}

static void davinci_spi_dma_callback(unsigned lch, u16 status, void *data)
{
	struct davinci_spi *davinci_spi = data;
	struct davinci_spi_dma *davinci_spi_dma = &davinci_spi->dma_channels;

	edma_stop(lch);

	if (status == DMA_COMPLETE) {
		if (lch == davinci_spi_dma->dma_rx_channel)
			davinci_spi->rcount = 0;
		if (lch == davinci_spi_dma->dma_tx_channel)
			davinci_spi->wcount = 0;
	}

	if ((!davinci_spi->wcount && !davinci_spi->rcount) ||
	    (status != DMA_COMPLETE))
		complete(&davinci_spi->done);
}

/**
 * davinci_spi_bufs - functions which will handle transfer data
 * @spi: spi device on which data transfer to be done
@@ -509,25 +528,30 @@ static int davinci_spi_process_events(struct davinci_spi *davinci_spi)
 * of SPI controller and then wait until the completion will be marked
 * by the IRQ Handler.
 */
static int davinci_spi_bufs_pio(struct spi_device *spi, struct spi_transfer *t)
static int davinci_spi_bufs(struct spi_device *spi, struct spi_transfer *t)
{
	struct davinci_spi *davinci_spi;
	int ret;
	int data_type, ret;
	u32 tx_data, data1_reg_val;
	u32 errors = 0;
	struct davinci_spi_config *spicfg;
	struct davinci_spi_platform_data *pdata;
	unsigned uninitialized_var(rx_buf_count);
	struct device *sdev;

	davinci_spi = spi_master_get_devdata(spi->master);
	pdata = davinci_spi->pdata;
	spicfg = (struct davinci_spi_config *)spi->controller_data;
	if (!spicfg)
		spicfg = &davinci_spi_default_cfg;
	sdev = davinci_spi->bitbang.master->dev.parent;

	/* convert len to words based on bits_per_word */
	data_type = davinci_spi->bytes_per_word[spi->chip_select];

	davinci_spi->tx = t->tx_buf;
	davinci_spi->rx = t->rx_buf;
	davinci_spi->wcount = t->len /
				davinci_spi->bytes_per_word[spi->chip_select];
	davinci_spi->wcount = t->len / data_type;
	davinci_spi->rcount = davinci_spi->wcount;

	data1_reg_val = ioread32(davinci_spi->base + SPIDAT1);
@@ -535,145 +559,47 @@ static int davinci_spi_bufs_pio(struct spi_device *spi, struct spi_transfer *t)
	/* Enable SPI */
	set_io_bits(davinci_spi->base + SPIGCR1, SPIGCR1_SPIENA_MASK);

	if (spicfg->io_type == SPI_IO_TYPE_INTR) {
		set_io_bits(davinci_spi->base + SPIINT, SPIINT_MASKINT);
	INIT_COMPLETION(davinci_spi->done);
	}

	if (spicfg->io_type == SPI_IO_TYPE_INTR)
		set_io_bits(davinci_spi->base + SPIINT, SPIINT_MASKINT);

	if (spicfg->io_type != SPI_IO_TYPE_DMA) {
		/* start the transfer */
		davinci_spi->wcount--;
		tx_data = davinci_spi->get_tx(davinci_spi);
		data1_reg_val &= 0xFFFF0000;
		data1_reg_val |= tx_data & 0xFFFF;
		iowrite32(data1_reg_val, davinci_spi->base + SPIDAT1);

	/* Wait for the transfer to complete */
	if (spicfg->io_type == SPI_IO_TYPE_INTR) {
		wait_for_completion_interruptible(&(davinci_spi->done));
	} else {
		while (davinci_spi->rcount > 0 || davinci_spi->wcount > 0) {
			errors = davinci_spi_process_events(davinci_spi);
			if (errors)
				break;
			cpu_relax();
		}
	}

	clear_io_bits(davinci_spi->base + SPIINT, SPIINT_MASKALL);

	/*
	 * Check for bit error, desync error,parity error,timeout error and
	 * receive overflow errors
	 */
	if (errors) {
		ret = davinci_spi_check_error(davinci_spi, errors);
		WARN(!ret, "%s: error reported but no error found!\n",
							dev_name(&spi->dev));
		return ret;
	}

	return t->len;
}

/**
 * davinci_spi_irq - Interrupt handler for SPI Master Controller
 * @irq: IRQ number for this SPI Master
 * @context_data: structure for SPI Master controller davinci_spi
 *
 * ISR will determine that interrupt arrives either for READ or WRITE command.
 * According to command it will do the appropriate action. It will check
 * transfer length and if it is not zero then dispatch transfer command again.
 * If transfer length is zero then it will indicate the COMPLETION so that
 * davinci_spi_bufs function can go ahead.
 */
static irqreturn_t davinci_spi_irq(s32 irq, void *context_data)
{
	struct davinci_spi *davinci_spi = context_data;
	int status;

	status = davinci_spi_process_events(davinci_spi);
	if (unlikely(status != 0))
		clear_io_bits(davinci_spi->base + SPIINT, SPIINT_MASKINT);

	if ((!davinci_spi->rcount && !davinci_spi->wcount) || status)
		complete(&davinci_spi->done);

	return IRQ_HANDLED;
}

static void davinci_spi_dma_callback(unsigned lch, u16 status, void *data)
{
	struct davinci_spi *davinci_spi = data;
	struct davinci_spi_dma *davinci_spi_dma = &davinci_spi->dma_channels;

	edma_stop(lch);

	if (status == DMA_COMPLETE) {
		if (lch == davinci_spi_dma->dma_rx_channel)
			davinci_spi->rcount = 0;
		if (lch == davinci_spi_dma->dma_tx_channel)
			davinci_spi->wcount = 0;
	}

	if ((!davinci_spi->wcount && !davinci_spi->rcount) ||
	    (status != DMA_COMPLETE))
		complete(&davinci_spi->done);
}

static int davinci_spi_bufs_dma(struct spi_device *spi, struct spi_transfer *t)
{
	struct davinci_spi *davinci_spi;
	int int_status = 0;
	unsigned rx_buf_count;
		struct davinci_spi_dma *davinci_spi_dma;
	int data_type, ret;
		unsigned long tx_reg, rx_reg;
	struct davinci_spi_platform_data *pdata;
	void *rx_buf;
	struct device *sdev;
		struct edmacc_param param;

	davinci_spi = spi_master_get_devdata(spi->master);
	pdata = davinci_spi->pdata;
	sdev = davinci_spi->bitbang.master->dev.parent;
		void *rx_buf;

		davinci_spi_dma = &davinci_spi->dma_channels;

	/* convert len to words based on bits_per_word */
	data_type = davinci_spi->bytes_per_word[spi->chip_select];

		tx_reg = (unsigned long)davinci_spi->pbase + SPIDAT1;
		rx_reg = (unsigned long)davinci_spi->pbase + SPIBUF;

	davinci_spi->tx = t->tx_buf;
	davinci_spi->rx = t->rx_buf;
	davinci_spi->wcount = t->len / data_type;
	davinci_spi->rcount = davinci_spi->wcount;

	INIT_COMPLETION(davinci_spi->done);

	/* disable all interrupts for dma transfers */
	clear_io_bits(davinci_spi->base + SPIINT, SPIINT_MASKALL);
	/* Enable SPI */
	set_io_bits(davinci_spi->base + SPIGCR1, SPIGCR1_SPIENA_MASK);

		/*
		 * Transmit DMA setup
		 *
	 * If there is transmit data, map the transmit buffer, set it as the
	 * source of data and set the source B index to data size.
	 * If there is no transmit data, set the transmit register as the
	 * source of data, and set the source B index to zero.
		 * If there is transmit data, map the transmit buffer, set it
		 * as the source of data and set the source B index to data
		 * size. If there is no transmit data, set the transmit register
		 * as the source of data, and set the source B index to zero.
		 *
	 * The destination is always the transmit register itself. And the
	 * destination never increments.
		 * The destination is always the transmit register itself. And
		 * the destination never increments.
		 */

		if (t->tx_buf) {
			t->tx_dma = dma_map_single(&spi->dev, (void *)t->tx_buf,
					davinci_spi->wcount, DMA_TO_DEVICE);
			if (dma_mapping_error(&spi->dev, t->tx_dma)) {
			dev_dbg(sdev, "Unable to DMA map %d bytes TX buffer\n",
				dev_dbg(sdev, "Unable to DMA map %d bytes"
						"TX buffer\n",
						davinci_spi->wcount);
				return -ENOMEM;
			}
@@ -717,7 +643,8 @@ static int davinci_spi_bufs_dma(struct spi_device *spi, struct spi_transfer *t)
			dev_dbg(sdev, "Couldn't DMA map a %d bytes RX buffer\n",
								rx_buf_count);
			if (t->tx_buf)
			dma_unmap_single(NULL, t->tx_dma, davinci_spi->wcount,
				dma_unmap_single(NULL, t->tx_dma,
						davinci_spi->wcount,
						DMA_TO_DEVICE);
			return -ENOMEM;
		}
@@ -732,34 +659,50 @@ static int davinci_spi_bufs_dma(struct spi_device *spi, struct spi_transfer *t)
		param.ccnt = 1;
		edma_write_slot(davinci_spi_dma->dma_rx_channel, &param);

	if (pdata->cshold_bug) {
		u16 spidat1 = ioread16(davinci_spi->base + SPIDAT1 + 2);
		iowrite16(spidat1, davinci_spi->base + SPIDAT1 + 2);
	}
		if (pdata->cshold_bug)
			iowrite16(data1_reg_val >> 16,
					davinci_spi->base + SPIDAT1 + 2);

		edma_start(davinci_spi_dma->dma_rx_channel);
		edma_start(davinci_spi_dma->dma_tx_channel);
		set_io_bits(davinci_spi->base + SPIINT, SPIINT_DMA_REQ_EN);
	}

	/* Wait for the transfer to complete */
	if (spicfg->io_type != SPI_IO_TYPE_POLL) {
		wait_for_completion_interruptible(&(davinci_spi->done));
	} else {
		while (davinci_spi->rcount > 0 || davinci_spi->wcount > 0) {
			errors = davinci_spi_process_events(davinci_spi);
			if (errors)
				break;
			cpu_relax();
		}
	}

	wait_for_completion_interruptible(&davinci_spi->done);
	clear_io_bits(davinci_spi->base + SPIINT, SPIINT_MASKALL);
	if (spicfg->io_type == SPI_IO_TYPE_DMA) {

		if (t->tx_buf)
			dma_unmap_single(NULL, t->tx_dma, davinci_spi->wcount,
								DMA_TO_DEVICE);

	dma_unmap_single(NULL, t->rx_dma, rx_buf_count, DMA_FROM_DEVICE);
		dma_unmap_single(NULL, t->rx_dma, rx_buf_count,
							DMA_FROM_DEVICE);

		clear_io_bits(davinci_spi->base + SPIINT, SPIINT_DMA_REQ_EN);
	}

	/*
	 * Check for bit error, desync error,parity error,timeout error and
	 * receive overflow errors
	 */
	int_status = ioread32(davinci_spi->base + SPIFLG);

	ret = davinci_spi_check_error(davinci_spi, int_status);
	if (ret != 0)
	if (errors) {
		ret = davinci_spi_check_error(davinci_spi, errors);
		WARN(!ret, "%s: error reported but no error found!\n",
							dev_name(&spi->dev));
		return ret;
	}

	if (davinci_spi->rcount != 0 || davinci_spi->wcount != 0) {
		dev_err(sdev, "SPI data transfer error\n");
@@ -769,6 +712,32 @@ static int davinci_spi_bufs_dma(struct spi_device *spi, struct spi_transfer *t)
	return t->len;
}

/**
 * davinci_spi_irq - Interrupt handler for SPI Master Controller
 * @irq: IRQ number for this SPI Master
 * @context_data: structure for SPI Master controller davinci_spi
 *
 * ISR will determine that interrupt arrives either for READ or WRITE command.
 * According to command it will do the appropriate action. It will check
 * transfer length and if it is not zero then dispatch transfer command again.
 * If transfer length is zero then it will indicate the COMPLETION so that
 * davinci_spi_bufs function can go ahead.
 */
static irqreturn_t davinci_spi_irq(s32 irq, void *context_data)
{
	struct davinci_spi *davinci_spi = context_data;
	int status;

	status = davinci_spi_process_events(davinci_spi);
	if (unlikely(status != 0))
		clear_io_bits(davinci_spi->base + SPIINT, SPIINT_MASKINT);

	if ((!davinci_spi->rcount && !davinci_spi->wcount) || status)
		complete(&davinci_spi->done);

	return IRQ_HANDLED;
}

static int davinci_spi_request_dma(struct davinci_spi *davinci_spi)
{
	int r;
@@ -918,7 +887,7 @@ static int davinci_spi_probe(struct platform_device *pdev)
	if (r)
		dma_eventq = r->start;

	davinci_spi->bitbang.txrx_bufs = davinci_spi_bufs_pio;
	davinci_spi->bitbang.txrx_bufs = davinci_spi_bufs;
	if (dma_rx_chan != SPI_NO_RESOURCE &&
	    dma_tx_chan != SPI_NO_RESOURCE &&
	    dma_eventq != SPI_NO_RESOURCE) {
@@ -930,10 +899,9 @@ static int davinci_spi_probe(struct platform_device *pdev)
		if (ret)
			goto free_clk;

		davinci_spi->bitbang.txrx_bufs = davinci_spi_bufs_dma;
		dev_info(&pdev->dev, "DaVinci SPI driver in EDMA mode\n"
				"Using RX channel = %d , TX channel = %d and "
				"event queue = %d", dma_rx_chan, dma_tx_chan,
		dev_info(&pdev->dev, "DMA: supported\n");
		dev_info(&pdev->dev, "DMA: RX channel: %d, TX channel: %d, "
				"event queue: %d\n", dma_rx_chan, dma_tx_chan,
				dma_eventq);
	}