Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit ee1cdcda authored by Andy Shevchenko's avatar Andy Shevchenko Committed by Vinod Koul
Browse files

dmaengine: dw: disable BLOCK IRQs for non-cyclic xfer



The commit 2895b2ca ("dmaengine: dw: fix cyclic transfer callbacks")
re-enabled BLOCK interrupts with regard to make cyclic transfers work. However,
this change becomes a regression for non-cyclic transfers as interrupt counters
under stress test had been grown enormously (approximately per 4-5 bytes in the
UART loop back test).

Taking into consideration above enable BLOCK interrupts if and only if channel
is programmed to perform cyclic transfer.

Fixes: 2895b2ca ("dmaengine: dw: fix cyclic transfer callbacks")
Signed-off-by: default avatarAndy Shevchenko <andriy.shevchenko@linux.intel.com>
Acked-by: default avatarMans Rullgard <mans@mansr.com>
Tested-by: default avatarMans Rullgard <mans@mansr.com>
Acked-by: default avatarViresh Kumar <viresh.kumar@linaro.org>
Cc: <stable@vger.kernel.org>
Signed-off-by: default avatarVinod Koul <vinod.koul@intel.com>
parent 4ac31d18
Loading
Loading
Loading
Loading
+10 −5
Original line number Original line Diff line number Diff line
@@ -156,7 +156,6 @@ static void dwc_initialize(struct dw_dma_chan *dwc)


	/* Enable interrupts */
	/* Enable interrupts */
	channel_set_bit(dw, MASK.XFER, dwc->mask);
	channel_set_bit(dw, MASK.XFER, dwc->mask);
	channel_set_bit(dw, MASK.BLOCK, dwc->mask);
	channel_set_bit(dw, MASK.ERROR, dwc->mask);
	channel_set_bit(dw, MASK.ERROR, dwc->mask);


	dwc->initialized = true;
	dwc->initialized = true;
@@ -588,6 +587,9 @@ static void dwc_handle_cyclic(struct dw_dma *dw, struct dw_dma_chan *dwc,


		spin_unlock_irqrestore(&dwc->lock, flags);
		spin_unlock_irqrestore(&dwc->lock, flags);
	}
	}

	/* Re-enable interrupts */
	channel_set_bit(dw, MASK.BLOCK, dwc->mask);
}
}


/* ------------------------------------------------------------------------- */
/* ------------------------------------------------------------------------- */
@@ -618,11 +620,8 @@ static void dw_dma_tasklet(unsigned long data)
			dwc_scan_descriptors(dw, dwc);
			dwc_scan_descriptors(dw, dwc);
	}
	}


	/*
	/* Re-enable interrupts */
	 * Re-enable interrupts.
	 */
	channel_set_bit(dw, MASK.XFER, dw->all_chan_mask);
	channel_set_bit(dw, MASK.XFER, dw->all_chan_mask);
	channel_set_bit(dw, MASK.BLOCK, dw->all_chan_mask);
	channel_set_bit(dw, MASK.ERROR, dw->all_chan_mask);
	channel_set_bit(dw, MASK.ERROR, dw->all_chan_mask);
}
}


@@ -1261,6 +1260,7 @@ static void dwc_free_chan_resources(struct dma_chan *chan)
int dw_dma_cyclic_start(struct dma_chan *chan)
int dw_dma_cyclic_start(struct dma_chan *chan)
{
{
	struct dw_dma_chan	*dwc = to_dw_dma_chan(chan);
	struct dw_dma_chan	*dwc = to_dw_dma_chan(chan);
	struct dw_dma		*dw = to_dw_dma(chan->device);
	unsigned long		flags;
	unsigned long		flags;


	if (!test_bit(DW_DMA_IS_CYCLIC, &dwc->flags)) {
	if (!test_bit(DW_DMA_IS_CYCLIC, &dwc->flags)) {
@@ -1269,7 +1269,12 @@ int dw_dma_cyclic_start(struct dma_chan *chan)
	}
	}


	spin_lock_irqsave(&dwc->lock, flags);
	spin_lock_irqsave(&dwc->lock, flags);

	/* Enable interrupts to perform cyclic transfer */
	channel_set_bit(dw, MASK.BLOCK, dwc->mask);

	dwc_dostart(dwc, dwc->cdesc->desc[0]);
	dwc_dostart(dwc, dwc->cdesc->desc[0]);

	spin_unlock_irqrestore(&dwc->lock, flags);
	spin_unlock_irqrestore(&dwc->lock, flags);


	return 0;
	return 0;