Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit b45bbf07 authored by Linus Torvalds's avatar Linus Torvalds
Browse files
* 'fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/djbw/async_tx: (21 commits)
  dmaengine: add slave-dma maintainer
  dma: ipu_idmac: do not lose valid received data in the irq handler
  dmaengine: imx-sdma: fix up param for the last BD in sdma_prep_slave_sg()
  dmaengine: imx-sdma: correct sdmac->status in sdma_handle_channel_loop()
  dmaengine: imx-sdma: return sdmac->status in sdma_tx_status()
  dmaengine: imx-sdma: set sdmac->status to DMA_ERROR in err_out of sdma_prep_slave_sg()
  dmaengine: imx-sdma: remove IMX_DMA_SG_LOOP handling in sdma_prep_slave_sg()
  dmaengine i.MX dma: initialize dma capabilities outside channel loop
  dmaengine i.MX DMA: do not initialize chan_id field
  dmaengine i.MX dma: check sg entries for valid addresses and lengths
  dmaengine i.MX dma: set maximum segment size for our device
  dmaengine i.MX SDMA: reserve channel 0 by not registering it
  dmaengine i.MX SDMA: initialize dma capabilities outside channel loop
  dmaengine i.MX SDMA: do not initialize chan_id field
  dmaengine i.MX sdma: check sg entries for valid addresses and lengths
  dmaengine i.MX sdma: set maximum segment size for our device
  DMA: PL08x: fix channel pausing to timeout rather than lockup
  DMA: PL08x: fix infinite wait when terminating transfers
  dmaengine: imx-sdma: fix inconsistent naming in sdma_assign_cookie()
  dmaengine: imx-sdma: propagate error in sdma_probe() instead of returning 0
  ...
parents f60c153d 4abed0af
Loading
Loading
Loading
Loading
+1 −0
Original line number Diff line number Diff line
@@ -2126,6 +2126,7 @@ S: Supported
F:	fs/dlm/

DMA GENERIC OFFLOAD ENGINE SUBSYSTEM
M:	Vinod Koul <vinod.koul@intel.com>
M:	Dan Williams <dan.j.williams@intel.com>
S:	Supported
F:	drivers/dma/
+33 −20
Original line number Diff line number Diff line
@@ -79,6 +79,7 @@
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/dmapool.h>
#include <linux/dmaengine.h>
#include <linux/amba/bus.h>
@@ -235,16 +236,19 @@ static void pl08x_start_txd(struct pl08x_dma_chan *plchan,
}

/*
 * Overall DMAC remains enabled always.
 * Pause the channel by setting the HALT bit.
 *
 * Disabling individual channels could lose data.
 * For M->P transfers, pause the DMAC first and then stop the peripheral -
 * the FIFO can only drain if the peripheral is still requesting data.
 * (note: this can still timeout if the DMAC FIFO never drains of data.)
 *
 * Disable the peripheral DMA after disabling the DMAC in order to allow
 * the DMAC FIFO to drain, and hence allow the channel to show inactive
 * For P->M transfers, disable the peripheral first to stop it filling
 * the DMAC FIFO, and then pause the DMAC.
 */
static void pl08x_pause_phy_chan(struct pl08x_phy_chan *ch)
{
	u32 val;
	int timeout;

	/* Set the HALT bit and wait for the FIFO to drain */
	val = readl(ch->base + PL080_CH_CONFIG);
@@ -252,8 +256,13 @@ static void pl08x_pause_phy_chan(struct pl08x_phy_chan *ch)
	writel(val, ch->base + PL080_CH_CONFIG);

	/* Wait for channel inactive */
	while (pl08x_phy_channel_busy(ch))
		cpu_relax();
	for (timeout = 1000; timeout; timeout--) {
		if (!pl08x_phy_channel_busy(ch))
			break;
		udelay(1);
	}
	if (pl08x_phy_channel_busy(ch))
		pr_err("pl08x: channel%u timeout waiting for pause\n", ch->id);
}

static void pl08x_resume_phy_chan(struct pl08x_phy_chan *ch)
@@ -267,19 +276,24 @@ static void pl08x_resume_phy_chan(struct pl08x_phy_chan *ch)
}


/* Stops the channel */
static void pl08x_stop_phy_chan(struct pl08x_phy_chan *ch)
/*
 * pl08x_terminate_phy_chan() stops the channel, clears the FIFO and
 * clears any pending interrupt status.  This should not be used for
 * an on-going transfer, but as a method of shutting down a channel
 * (eg, when it's no longer used) or terminating a transfer.
 */
static void pl08x_terminate_phy_chan(struct pl08x_driver_data *pl08x,
	struct pl08x_phy_chan *ch)
{
	u32 val;
	u32 val = readl(ch->base + PL080_CH_CONFIG);

	pl08x_pause_phy_chan(ch);
	val &= ~(PL080_CONFIG_ENABLE | PL080_CONFIG_ERR_IRQ_MASK |
	         PL080_CONFIG_TC_IRQ_MASK);

	/* Disable channel */
	val = readl(ch->base + PL080_CH_CONFIG);
	val &= ~PL080_CONFIG_ENABLE;
	val &= ~PL080_CONFIG_ERR_IRQ_MASK;
	val &= ~PL080_CONFIG_TC_IRQ_MASK;
	writel(val, ch->base + PL080_CH_CONFIG);

	writel(1 << ch->id, pl08x->base + PL080_ERR_CLEAR);
	writel(1 << ch->id, pl08x->base + PL080_TC_CLEAR);
}

static inline u32 get_bytes_in_cctl(u32 cctl)
@@ -404,13 +418,12 @@ static inline void pl08x_put_phy_channel(struct pl08x_driver_data *pl08x,
{
	unsigned long flags;

	spin_lock_irqsave(&ch->lock, flags);

	/* Stop the channel and clear its interrupts */
	pl08x_stop_phy_chan(ch);
	writel((1 << ch->id), pl08x->base + PL080_ERR_CLEAR);
	writel((1 << ch->id), pl08x->base + PL080_TC_CLEAR);
	pl08x_terminate_phy_chan(pl08x, ch);

	/* Mark it as free */
	spin_lock_irqsave(&ch->lock, flags);
	ch->serving = NULL;
	spin_unlock_irqrestore(&ch->lock, flags);
}
@@ -1449,7 +1462,7 @@ static int pl08x_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
		plchan->state = PL08X_CHAN_IDLE;

		if (plchan->phychan) {
			pl08x_stop_phy_chan(plchan->phychan);
			pl08x_terminate_phy_chan(pl08x, plchan->phychan);

			/*
			 * Mark physical channel as free and free any slave
+22 −4
Original line number Diff line number Diff line
@@ -49,6 +49,7 @@ struct imxdma_channel {

struct imxdma_engine {
	struct device			*dev;
	struct device_dma_parameters	dma_parms;
	struct dma_device		dma_device;
	struct imxdma_channel		channel[MAX_DMA_CHANNELS];
};
@@ -242,6 +243,21 @@ static struct dma_async_tx_descriptor *imxdma_prep_slave_sg(
	else
		dmamode = DMA_MODE_WRITE;

	switch (imxdmac->word_size) {
	case DMA_SLAVE_BUSWIDTH_4_BYTES:
		if (sgl->length & 3 || sgl->dma_address & 3)
			return NULL;
		break;
	case DMA_SLAVE_BUSWIDTH_2_BYTES:
		if (sgl->length & 1 || sgl->dma_address & 1)
			return NULL;
		break;
	case DMA_SLAVE_BUSWIDTH_1_BYTE:
		break;
	default:
		return NULL;
	}

	ret = imx_dma_setup_sg(imxdmac->imxdma_channel, sgl, sg_len,
		 dma_length, imxdmac->per_address, dmamode);
	if (ret)
@@ -329,6 +345,9 @@ static int __init imxdma_probe(struct platform_device *pdev)

	INIT_LIST_HEAD(&imxdma->dma_device.channels);

	dma_cap_set(DMA_SLAVE, imxdma->dma_device.cap_mask);
	dma_cap_set(DMA_CYCLIC, imxdma->dma_device.cap_mask);

	/* Initialize channel parameters */
	for (i = 0; i < MAX_DMA_CHANNELS; i++) {
		struct imxdma_channel *imxdmac = &imxdma->channel[i];
@@ -346,11 +365,7 @@ static int __init imxdma_probe(struct platform_device *pdev)
		imxdmac->imxdma = imxdma;
		spin_lock_init(&imxdmac->lock);

		dma_cap_set(DMA_SLAVE, imxdma->dma_device.cap_mask);
		dma_cap_set(DMA_CYCLIC, imxdma->dma_device.cap_mask);

		imxdmac->chan.device = &imxdma->dma_device;
		imxdmac->chan.chan_id = i;
		imxdmac->channel = i;

		/* Add the channel to the DMAC list */
@@ -370,6 +385,9 @@ static int __init imxdma_probe(struct platform_device *pdev)

	platform_set_drvdata(pdev, imxdma);

	imxdma->dma_device.dev->dma_parms = &imxdma->dma_parms;
	dma_set_max_seg_size(imxdma->dma_device.dev, 0xffffff);

	ret = dma_async_device_register(&imxdma->dma_device);
	if (ret) {
		dev_err(&pdev->dev, "unable to register\n");
+47 −41
Original line number Diff line number Diff line
@@ -230,7 +230,7 @@ struct sdma_engine;
 * struct sdma_channel - housekeeping for a SDMA channel
 *
 * @sdma		pointer to the SDMA engine for this channel
 * @channel		the channel number, matches dmaengine chan_id
 * @channel		the channel number, matches dmaengine chan_id + 1
 * @direction		transfer type. Needed for setting SDMA script
 * @peripheral_type	Peripheral type. Needed for setting SDMA script
 * @event_id0		aka dma request line
@@ -301,6 +301,7 @@ struct sdma_firmware_header {

struct sdma_engine {
	struct device			*dev;
	struct device_dma_parameters	dma_parms;
	struct sdma_channel		channel[MAX_DMA_CHANNELS];
	struct sdma_channel_control	*channel_control;
	void __iomem			*regs;
@@ -449,7 +450,7 @@ static void sdma_handle_channel_loop(struct sdma_channel *sdmac)
		if (bd->mode.status & BD_RROR)
			sdmac->status = DMA_ERROR;
		else
			sdmac->status = DMA_SUCCESS;
			sdmac->status = DMA_IN_PROGRESS;

		bd->mode.status |= BD_DONE;
		sdmac->buf_tail++;
@@ -770,15 +771,15 @@ static void sdma_enable_channel(struct sdma_engine *sdma, int channel)
	__raw_writel(1 << channel, sdma->regs + SDMA_H_START);
}

static dma_cookie_t sdma_assign_cookie(struct sdma_channel *sdma)
static dma_cookie_t sdma_assign_cookie(struct sdma_channel *sdmac)
{
	dma_cookie_t cookie = sdma->chan.cookie;
	dma_cookie_t cookie = sdmac->chan.cookie;

	if (++cookie < 0)
		cookie = 1;

	sdma->chan.cookie = cookie;
	sdma->desc.cookie = cookie;
	sdmac->chan.cookie = cookie;
	sdmac->desc.cookie = cookie;

	return cookie;
}
@@ -798,7 +799,7 @@ static dma_cookie_t sdma_tx_submit(struct dma_async_tx_descriptor *tx)

	cookie = sdma_assign_cookie(sdmac);

	sdma_enable_channel(sdma, tx->chan->chan_id);
	sdma_enable_channel(sdma, sdmac->channel);

	spin_unlock_irq(&sdmac->lock);

@@ -811,10 +812,6 @@ static int sdma_alloc_chan_resources(struct dma_chan *chan)
	struct imx_dma_data *data = chan->private;
	int prio, ret;

	/* No need to execute this for internal channel 0 */
	if (chan->chan_id == 0)
		return 0;

	if (!data)
		return -EINVAL;

@@ -879,7 +876,7 @@ static struct dma_async_tx_descriptor *sdma_prep_slave_sg(
	struct sdma_channel *sdmac = to_sdma_chan(chan);
	struct sdma_engine *sdma = sdmac->sdma;
	int ret, i, count;
	int channel = chan->chan_id;
	int channel = sdmac->channel;
	struct scatterlist *sg;

	if (sdmac->status == DMA_IN_PROGRESS)
@@ -924,22 +921,33 @@ static struct dma_async_tx_descriptor *sdma_prep_slave_sg(
			ret =  -EINVAL;
			goto err_out;
		}
		if (sdmac->word_size == DMA_SLAVE_BUSWIDTH_4_BYTES)

		switch (sdmac->word_size) {
		case DMA_SLAVE_BUSWIDTH_4_BYTES:
			bd->mode.command = 0;
		else
			bd->mode.command = sdmac->word_size;
			if (count & 3 || sg->dma_address & 3)
				return NULL;
			break;
		case DMA_SLAVE_BUSWIDTH_2_BYTES:
			bd->mode.command = 2;
			if (count & 1 || sg->dma_address & 1)
				return NULL;
			break;
		case DMA_SLAVE_BUSWIDTH_1_BYTE:
			bd->mode.command = 1;
			break;
		default:
			return NULL;
		}

		param = BD_DONE | BD_EXTD | BD_CONT;

		if (sdmac->flags & IMX_DMA_SG_LOOP) {
		if (i + 1 == sg_len) {
			param |= BD_INTR;
			if (i + 1 == sg_len)
				param |= BD_WRAP;
			param |= BD_LAST;
			param &= ~BD_CONT;
		}

		if (i + 1 == sg_len)
			param |= BD_INTR;

		dev_dbg(sdma->dev, "entry %d: count: %d dma: 0x%08x %s%s\n",
				i, count, sg->dma_address,
				param & BD_WRAP ? "wrap" : "",
@@ -953,6 +961,7 @@ static struct dma_async_tx_descriptor *sdma_prep_slave_sg(

	return &sdmac->desc;
err_out:
	sdmac->status = DMA_ERROR;
	return NULL;
}

@@ -963,7 +972,7 @@ static struct dma_async_tx_descriptor *sdma_prep_dma_cyclic(
	struct sdma_channel *sdmac = to_sdma_chan(chan);
	struct sdma_engine *sdma = sdmac->sdma;
	int num_periods = buf_len / period_len;
	int channel = chan->chan_id;
	int channel = sdmac->channel;
	int ret, i = 0, buf = 0;

	dev_dbg(sdma->dev, "%s channel: %d\n", __func__, channel);
@@ -1066,14 +1075,12 @@ static enum dma_status sdma_tx_status(struct dma_chan *chan,
{
	struct sdma_channel *sdmac = to_sdma_chan(chan);
	dma_cookie_t last_used;
	enum dma_status ret;

	last_used = chan->cookie;

	ret = dma_async_is_complete(cookie, sdmac->last_completed, last_used);
	dma_set_tx_state(txstate, sdmac->last_completed, last_used, 0);

	return ret;
	return sdmac->status;
}

static void sdma_issue_pending(struct dma_chan *chan)
@@ -1135,7 +1142,7 @@ static int __init sdma_get_firmware(struct sdma_engine *sdma,
	/* download the RAM image for SDMA */
	sdma_load_script(sdma, ram_code,
			header->ram_code_size,
			sdma->script_addrs->ram_code_start_addr);
			addr->ram_code_start_addr);
	clk_disable(sdma->clk);

	sdma_add_scripts(sdma, addr);
@@ -1237,7 +1244,6 @@ static int __init sdma_probe(struct platform_device *pdev)
	struct resource *iores;
	struct sdma_platform_data *pdata = pdev->dev.platform_data;
	int i;
	dma_cap_mask_t mask;
	struct sdma_engine *sdma;

	sdma = kzalloc(sizeof(*sdma), GFP_KERNEL);
@@ -1280,6 +1286,9 @@ static int __init sdma_probe(struct platform_device *pdev)

	sdma->version = pdata->sdma_version;

	dma_cap_set(DMA_SLAVE, sdma->dma_device.cap_mask);
	dma_cap_set(DMA_CYCLIC, sdma->dma_device.cap_mask);

	INIT_LIST_HEAD(&sdma->dma_device.channels);
	/* Initialize channel parameters */
	for (i = 0; i < MAX_DMA_CHANNELS; i++) {
@@ -1288,15 +1297,17 @@ static int __init sdma_probe(struct platform_device *pdev)
		sdmac->sdma = sdma;
		spin_lock_init(&sdmac->lock);

		dma_cap_set(DMA_SLAVE, sdma->dma_device.cap_mask);
		dma_cap_set(DMA_CYCLIC, sdma->dma_device.cap_mask);

		sdmac->chan.device = &sdma->dma_device;
		sdmac->chan.chan_id = i;
		sdmac->channel = i;

		/* Add the channel to the DMAC list */
		list_add_tail(&sdmac->chan.device_node, &sdma->dma_device.channels);
		/*
		 * Add the channel to the DMAC list. Do not add channel 0 though
		 * because we need it internally in the SDMA driver. This also means
		 * that channel 0 in dmaengine counting matches sdma channel 1.
		 */
		if (i)
			list_add_tail(&sdmac->chan.device_node,
					&sdma->dma_device.channels);
	}

	ret = sdma_init(sdma);
@@ -1317,6 +1328,8 @@ static int __init sdma_probe(struct platform_device *pdev)
	sdma->dma_device.device_prep_dma_cyclic = sdma_prep_dma_cyclic;
	sdma->dma_device.device_control = sdma_control;
	sdma->dma_device.device_issue_pending = sdma_issue_pending;
	sdma->dma_device.dev->dma_parms = &sdma->dma_parms;
	dma_set_max_seg_size(sdma->dma_device.dev, 65535);

	ret = dma_async_device_register(&sdma->dma_device);
	if (ret) {
@@ -1324,13 +1337,6 @@ static int __init sdma_probe(struct platform_device *pdev)
		goto err_init;
	}

	/* request channel 0. This is an internal control channel
	 * to the SDMA engine and not available to clients.
	 */
	dma_cap_zero(mask);
	dma_cap_set(DMA_SLAVE, mask);
	dma_request_channel(mask, NULL, NULL);

	dev_info(sdma->dev, "initialized\n");

	return 0;
@@ -1348,7 +1354,7 @@ static int __init sdma_probe(struct platform_device *pdev)
err_request_region:
err_irq:
	kfree(sdma);
	return 0;
	return ret;
}

static int __exit sdma_remove(struct platform_device *pdev)
+0 −50
Original line number Diff line number Diff line
@@ -1145,29 +1145,6 @@ static int ipu_disable_channel(struct idmac *idmac, struct idmac_channel *ichan,
	reg = idmac_read_icreg(ipu, IDMAC_CHA_EN);
	idmac_write_icreg(ipu, reg & ~chan_mask, IDMAC_CHA_EN);

	/*
	 * Problem (observed with channel DMAIC_7): after enabling the channel
	 * and initialising buffers, there comes an interrupt with current still
	 * pointing at buffer 0, whereas it should use buffer 0 first and only
	 * generate an interrupt when it is done, then current should already
	 * point to buffer 1. This spurious interrupt also comes on channel
	 * DMASDC_0. With DMAIC_7 normally, is we just leave the ISR after the
	 * first interrupt, there comes the second with current correctly
	 * pointing to buffer 1 this time. But sometimes this second interrupt
	 * doesn't come and the channel hangs. Clearing BUFx_RDY when disabling
	 * the channel seems to prevent the channel from hanging, but it doesn't
	 * prevent the spurious interrupt. This might also be unsafe. Think
	 * about the IDMAC controller trying to switch to a buffer, when we
	 * clear the ready bit, and re-enable it a moment later.
	 */
	reg = idmac_read_ipureg(ipu, IPU_CHA_BUF0_RDY);
	idmac_write_ipureg(ipu, 0, IPU_CHA_BUF0_RDY);
	idmac_write_ipureg(ipu, reg & ~(1UL << channel), IPU_CHA_BUF0_RDY);

	reg = idmac_read_ipureg(ipu, IPU_CHA_BUF1_RDY);
	idmac_write_ipureg(ipu, 0, IPU_CHA_BUF1_RDY);
	idmac_write_ipureg(ipu, reg & ~(1UL << channel), IPU_CHA_BUF1_RDY);

	spin_unlock_irqrestore(&ipu->lock, flags);

	return 0;
@@ -1246,33 +1223,6 @@ static irqreturn_t idmac_interrupt(int irq, void *dev_id)

	/* Other interrupts do not interfere with this channel */
	spin_lock(&ichan->lock);
	if (unlikely(chan_id != IDMAC_SDC_0 && chan_id != IDMAC_SDC_1 &&
		     ((curbuf >> chan_id) & 1) == ichan->active_buffer &&
		     !list_is_last(ichan->queue.next, &ichan->queue))) {
		int i = 100;

		/* This doesn't help. See comment in ipu_disable_channel() */
		while (--i) {
			curbuf = idmac_read_ipureg(&ipu_data, IPU_CHA_CUR_BUF);
			if (((curbuf >> chan_id) & 1) != ichan->active_buffer)
				break;
			cpu_relax();
		}

		if (!i) {
			spin_unlock(&ichan->lock);
			dev_dbg(dev,
				"IRQ on active buffer on channel %x, active "
				"%d, ready %x, %x, current %x!\n", chan_id,
				ichan->active_buffer, ready0, ready1, curbuf);
			return IRQ_NONE;
		} else
			dev_dbg(dev,
				"Buffer deactivated on channel %x, active "
				"%d, ready %x, %x, current %x, rest %d!\n", chan_id,
				ichan->active_buffer, ready0, ready1, curbuf, i);
	}

	if (unlikely((ichan->active_buffer && (ready1 >> chan_id) & 1) ||
		     (!ichan->active_buffer && (ready0 >> chan_id) & 1)
		     )) {