Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit eaadcfeb authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge tag 'dmaengine-fixes-3.13-rc4' of...

Merge tag 'dmaengine-fixes-3.13-rc4' of git://git.kernel.org/pub/scm/linux/kernel/git/djbw/dmaengine

Pull dmaengine fixes from Dan Williams:

 - deprecation of net_dma to be removed in 3.14

 - crash regression fix in pl330 from the dmaengine_unmap rework

 - crash regression fix for any channel running raid ops without
   CONFIG_ASYNC_TX_DMA from dmaengine_unmap

 - memory leak regression in mv_xor from dmaengine_unmap

 - build warning regressions in mv_xor, fsldma, ppc4xx, txx9, and
   at_hdmac from dmaengine_unmap

 - sleep in atomic regression in dma_async_memcpy_pg_to_pg

 - new fix in mv_xor for handling channel initialization failures

* tag 'dmaengine-fixes-3.13-rc4' of git://git.kernel.org/pub/scm/linux/kernel/git/djbw/dmaengine:
  net_dma: mark broken
  dma: pl330: ensure DMA descriptors are zero-initialised
  dmaengine: fix sleep in atomic
  dmaengine: mv_xor: fix oops when channels fail to initialise
  dma: mv_xor: Use dmaengine_unmap_data for the self-tests
  dmaengine: fix enable for high order unmap pools
  dma: fix build warnings in txx9
  dmatest: fix build warning on mips
  dma: fix fsldma build warnings
  dma: fix build warnings in ppc4xx
  dmaengine: at_hdmac: remove unused function
  dma: mv_xor: remove mv_desc_get_dest_addr()
parents 46dd0835 77873803
Loading
Loading
Loading
Loading
+7 −0
Original line number Original line Diff line number Diff line
@@ -62,6 +62,7 @@ config INTEL_IOATDMA
	tristate "Intel I/OAT DMA support"
	tristate "Intel I/OAT DMA support"
	depends on PCI && X86
	depends on PCI && X86
	select DMA_ENGINE
	select DMA_ENGINE
	select DMA_ENGINE_RAID
	select DCA
	select DCA
	help
	help
	  Enable support for the Intel(R) I/OAT DMA engine present
	  Enable support for the Intel(R) I/OAT DMA engine present
@@ -112,6 +113,7 @@ config MV_XOR
	bool "Marvell XOR engine support"
	bool "Marvell XOR engine support"
	depends on PLAT_ORION
	depends on PLAT_ORION
	select DMA_ENGINE
	select DMA_ENGINE
	select DMA_ENGINE_RAID
	select ASYNC_TX_ENABLE_CHANNEL_SWITCH
	select ASYNC_TX_ENABLE_CHANNEL_SWITCH
	---help---
	---help---
	  Enable support for the Marvell XOR engine.
	  Enable support for the Marvell XOR engine.
@@ -187,6 +189,7 @@ config AMCC_PPC440SPE_ADMA
	tristate "AMCC PPC440SPe ADMA support"
	tristate "AMCC PPC440SPe ADMA support"
	depends on 440SPe || 440SP
	depends on 440SPe || 440SP
	select DMA_ENGINE
	select DMA_ENGINE
	select DMA_ENGINE_RAID
	select ARCH_HAS_ASYNC_TX_FIND_CHANNEL
	select ARCH_HAS_ASYNC_TX_FIND_CHANNEL
	select ASYNC_TX_ENABLE_CHANNEL_SWITCH
	select ASYNC_TX_ENABLE_CHANNEL_SWITCH
	help
	help
@@ -352,6 +355,7 @@ config NET_DMA
	bool "Network: TCP receive copy offload"
	bool "Network: TCP receive copy offload"
	depends on DMA_ENGINE && NET
	depends on DMA_ENGINE && NET
	default (INTEL_IOATDMA || FSL_DMA)
	default (INTEL_IOATDMA || FSL_DMA)
	depends on BROKEN
	help
	help
	  This enables the use of DMA engines in the network stack to
	  This enables the use of DMA engines in the network stack to
	  offload receive copy-to-user operations, freeing CPU cycles.
	  offload receive copy-to-user operations, freeing CPU cycles.
@@ -377,4 +381,7 @@ config DMATEST
	  Simple DMA test client. Say N unless you're debugging a
	  Simple DMA test client. Say N unless you're debugging a
	  DMA Device driver.
	  DMA Device driver.


config DMA_ENGINE_RAID
	bool

endif
endif
+0 −4
Original line number Original line Diff line number Diff line
@@ -347,10 +347,6 @@ static struct device *chan2dev(struct dma_chan *chan)
{
{
	return &chan->dev->device;
	return &chan->dev->device;
}
}
static struct device *chan2parent(struct dma_chan *chan)
{
	return chan->dev->device.parent;
}


#if defined(VERBOSE_DEBUG)
#if defined(VERBOSE_DEBUG)
static void vdbg_dump_regs(struct at_dma_chan *atchan)
static void vdbg_dump_regs(struct at_dma_chan *atchan)
+2 −2
Original line number Original line Diff line number Diff line
@@ -912,7 +912,7 @@ struct dmaengine_unmap_pool {
#define __UNMAP_POOL(x) { .size = x, .name = "dmaengine-unmap-" __stringify(x) }
#define __UNMAP_POOL(x) { .size = x, .name = "dmaengine-unmap-" __stringify(x) }
static struct dmaengine_unmap_pool unmap_pool[] = {
static struct dmaengine_unmap_pool unmap_pool[] = {
	__UNMAP_POOL(2),
	__UNMAP_POOL(2),
	#if IS_ENABLED(CONFIG_ASYNC_TX_DMA)
	#if IS_ENABLED(CONFIG_DMA_ENGINE_RAID)
	__UNMAP_POOL(16),
	__UNMAP_POOL(16),
	__UNMAP_POOL(128),
	__UNMAP_POOL(128),
	__UNMAP_POOL(256),
	__UNMAP_POOL(256),
@@ -1054,7 +1054,7 @@ dma_async_memcpy_pg_to_pg(struct dma_chan *chan, struct page *dest_pg,
	dma_cookie_t cookie;
	dma_cookie_t cookie;
	unsigned long flags;
	unsigned long flags;


	unmap = dmaengine_get_unmap_data(dev->dev, 2, GFP_NOIO);
	unmap = dmaengine_get_unmap_data(dev->dev, 2, GFP_NOWAIT);
	if (!unmap)
	if (!unmap)
		return -ENOMEM;
		return -ENOMEM;


+4 −4
Original line number Original line Diff line number Diff line
@@ -539,9 +539,9 @@ static int dmatest_func(void *data)


		um->len = params->buf_size;
		um->len = params->buf_size;
		for (i = 0; i < src_cnt; i++) {
		for (i = 0; i < src_cnt; i++) {
			unsigned long buf = (unsigned long) thread->srcs[i];
			void *buf = thread->srcs[i];
			struct page *pg = virt_to_page(buf);
			struct page *pg = virt_to_page(buf);
			unsigned pg_off = buf & ~PAGE_MASK;
			unsigned pg_off = (unsigned long) buf & ~PAGE_MASK;


			um->addr[i] = dma_map_page(dev->dev, pg, pg_off,
			um->addr[i] = dma_map_page(dev->dev, pg, pg_off,
						   um->len, DMA_TO_DEVICE);
						   um->len, DMA_TO_DEVICE);
@@ -559,9 +559,9 @@ static int dmatest_func(void *data)
		/* map with DMA_BIDIRECTIONAL to force writeback/invalidate */
		/* map with DMA_BIDIRECTIONAL to force writeback/invalidate */
		dsts = &um->addr[src_cnt];
		dsts = &um->addr[src_cnt];
		for (i = 0; i < dst_cnt; i++) {
		for (i = 0; i < dst_cnt; i++) {
			unsigned long buf = (unsigned long) thread->dsts[i];
			void *buf = thread->dsts[i];
			struct page *pg = virt_to_page(buf);
			struct page *pg = virt_to_page(buf);
			unsigned pg_off = buf & ~PAGE_MASK;
			unsigned pg_off = (unsigned long) buf & ~PAGE_MASK;


			dsts[i] = dma_map_page(dev->dev, pg, pg_off, um->len,
			dsts[i] = dma_map_page(dev->dev, pg, pg_off, um->len,
					       DMA_BIDIRECTIONAL);
					       DMA_BIDIRECTIONAL);
+1 −30
Original line number Original line Diff line number Diff line
@@ -86,11 +86,6 @@ static void set_desc_cnt(struct fsldma_chan *chan,
	hw->count = CPU_TO_DMA(chan, count, 32);
	hw->count = CPU_TO_DMA(chan, count, 32);
}
}


static u32 get_desc_cnt(struct fsldma_chan *chan, struct fsl_desc_sw *desc)
{
	return DMA_TO_CPU(chan, desc->hw.count, 32);
}

static void set_desc_src(struct fsldma_chan *chan,
static void set_desc_src(struct fsldma_chan *chan,
			 struct fsl_dma_ld_hw *hw, dma_addr_t src)
			 struct fsl_dma_ld_hw *hw, dma_addr_t src)
{
{
@@ -101,16 +96,6 @@ static void set_desc_src(struct fsldma_chan *chan,
	hw->src_addr = CPU_TO_DMA(chan, snoop_bits | src, 64);
	hw->src_addr = CPU_TO_DMA(chan, snoop_bits | src, 64);
}
}


static dma_addr_t get_desc_src(struct fsldma_chan *chan,
			       struct fsl_desc_sw *desc)
{
	u64 snoop_bits;

	snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX)
		? ((u64)FSL_DMA_SATR_SREADTYPE_SNOOP_READ << 32) : 0;
	return DMA_TO_CPU(chan, desc->hw.src_addr, 64) & ~snoop_bits;
}

static void set_desc_dst(struct fsldma_chan *chan,
static void set_desc_dst(struct fsldma_chan *chan,
			 struct fsl_dma_ld_hw *hw, dma_addr_t dst)
			 struct fsl_dma_ld_hw *hw, dma_addr_t dst)
{
{
@@ -121,16 +106,6 @@ static void set_desc_dst(struct fsldma_chan *chan,
	hw->dst_addr = CPU_TO_DMA(chan, snoop_bits | dst, 64);
	hw->dst_addr = CPU_TO_DMA(chan, snoop_bits | dst, 64);
}
}


static dma_addr_t get_desc_dst(struct fsldma_chan *chan,
			       struct fsl_desc_sw *desc)
{
	u64 snoop_bits;

	snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX)
		? ((u64)FSL_DMA_DATR_DWRITETYPE_SNOOP_WRITE << 32) : 0;
	return DMA_TO_CPU(chan, desc->hw.dst_addr, 64) & ~snoop_bits;
}

static void set_desc_next(struct fsldma_chan *chan,
static void set_desc_next(struct fsldma_chan *chan,
			  struct fsl_dma_ld_hw *hw, dma_addr_t next)
			  struct fsl_dma_ld_hw *hw, dma_addr_t next)
{
{
@@ -408,7 +383,7 @@ static dma_cookie_t fsl_dma_tx_submit(struct dma_async_tx_descriptor *tx)
	struct fsl_desc_sw *desc = tx_to_fsl_desc(tx);
	struct fsl_desc_sw *desc = tx_to_fsl_desc(tx);
	struct fsl_desc_sw *child;
	struct fsl_desc_sw *child;
	unsigned long flags;
	unsigned long flags;
	dma_cookie_t cookie;
	dma_cookie_t cookie = -EINVAL;


	spin_lock_irqsave(&chan->desc_lock, flags);
	spin_lock_irqsave(&chan->desc_lock, flags);


@@ -854,10 +829,6 @@ static void fsldma_cleanup_descriptor(struct fsldma_chan *chan,
				      struct fsl_desc_sw *desc)
				      struct fsl_desc_sw *desc)
{
{
	struct dma_async_tx_descriptor *txd = &desc->async_tx;
	struct dma_async_tx_descriptor *txd = &desc->async_tx;
	struct device *dev = chan->common.device->dev;
	dma_addr_t src = get_desc_src(chan, desc);
	dma_addr_t dst = get_desc_dst(chan, desc);
	u32 len = get_desc_cnt(chan, desc);


	/* Run the link descriptor callback function */
	/* Run the link descriptor callback function */
	if (txd->callback) {
	if (txd->callback) {
Loading