Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 9c4d1e7b authored by Ira Snyder's avatar Ira Snyder Committed by Dan Williams
Browse files

fsldma: support async_tx dependencies and automatic unmapping



Previous to this patch, the dma_run_dependencies() function has been
called while holding desc_lock. This function can call tx_submit() for
other descriptors, which may try to re-grab the lock. Avoid this by
moving the descriptors to be cleaned up to a temporary list, and
dropping the lock before cleanup.

At the same time, add support for automatic unmapping of src and dst
buffers, as offered by the DMAEngine API.

Signed-off-by: default avatarIra W. Snyder <iws@ovro.caltech.edu>
Signed-off-by: default avatarDan Williams <dan.j.williams@intel.com>
parent f04cd407
Loading
Loading
Loading
Loading
+95 −36
Original line number Original line Diff line number Diff line
@@ -83,6 +83,11 @@ static void set_desc_cnt(struct fsldma_chan *chan,
	hw->count = CPU_TO_DMA(chan, count, 32);
	hw->count = CPU_TO_DMA(chan, count, 32);
}
}


static u32 get_desc_cnt(struct fsldma_chan *chan, struct fsl_desc_sw *desc)
{
	return DMA_TO_CPU(chan, desc->hw.count, 32);
}

static void set_desc_src(struct fsldma_chan *chan,
static void set_desc_src(struct fsldma_chan *chan,
			 struct fsl_dma_ld_hw *hw, dma_addr_t src)
			 struct fsl_dma_ld_hw *hw, dma_addr_t src)
{
{
@@ -93,6 +98,16 @@ static void set_desc_src(struct fsldma_chan *chan,
	hw->src_addr = CPU_TO_DMA(chan, snoop_bits | src, 64);
	hw->src_addr = CPU_TO_DMA(chan, snoop_bits | src, 64);
}
}


static dma_addr_t get_desc_src(struct fsldma_chan *chan,
			       struct fsl_desc_sw *desc)
{
	u64 snoop_bits;

	snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX)
		? ((u64)FSL_DMA_SATR_SREADTYPE_SNOOP_READ << 32) : 0;
	return DMA_TO_CPU(chan, desc->hw.src_addr, 64) & ~snoop_bits;
}

static void set_desc_dst(struct fsldma_chan *chan,
static void set_desc_dst(struct fsldma_chan *chan,
			 struct fsl_dma_ld_hw *hw, dma_addr_t dst)
			 struct fsl_dma_ld_hw *hw, dma_addr_t dst)
{
{
@@ -103,6 +118,16 @@ static void set_desc_dst(struct fsldma_chan *chan,
	hw->dst_addr = CPU_TO_DMA(chan, snoop_bits | dst, 64);
	hw->dst_addr = CPU_TO_DMA(chan, snoop_bits | dst, 64);
}
}


static dma_addr_t get_desc_dst(struct fsldma_chan *chan,
			       struct fsl_desc_sw *desc)
{
	u64 snoop_bits;

	snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX)
		? ((u64)FSL_DMA_DATR_DWRITETYPE_SNOOP_WRITE << 32) : 0;
	return DMA_TO_CPU(chan, desc->hw.dst_addr, 64) & ~snoop_bits;
}

static void set_desc_next(struct fsldma_chan *chan,
static void set_desc_next(struct fsldma_chan *chan,
			  struct fsl_dma_ld_hw *hw, dma_addr_t next)
			  struct fsl_dma_ld_hw *hw, dma_addr_t next)
{
{
@@ -805,6 +830,57 @@ static int fsl_dma_device_control(struct dma_chan *dchan,
	return 0;
	return 0;
}
}


/**
 * fsldma_cleanup_descriptor - cleanup and free a single link descriptor
 * @chan: Freescale DMA channel
 * @desc: descriptor to cleanup and free
 *
 * This function is used on a descriptor which has been executed by the DMA
 * controller. It will run any callbacks, submit any dependencies, and then
 * free the descriptor.
 */
static void fsldma_cleanup_descriptor(struct fsldma_chan *chan,
				      struct fsl_desc_sw *desc)
{
	struct dma_async_tx_descriptor *txd = &desc->async_tx;
	struct device *dev = chan->common.device->dev;
	dma_addr_t src = get_desc_src(chan, desc);
	dma_addr_t dst = get_desc_dst(chan, desc);
	u32 len = get_desc_cnt(chan, desc);

	/* Run the link descriptor callback function */
	if (txd->callback) {
#ifdef FSL_DMA_LD_DEBUG
		chan_dbg(chan, "LD %p callback\n", desc);
#endif
		txd->callback(txd->callback_param);
	}

	/* Run any dependencies */
	dma_run_dependencies(txd);

	/* Unmap the dst buffer, if requested */
	if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
		if (txd->flags & DMA_COMPL_DEST_UNMAP_SINGLE)
			dma_unmap_single(dev, dst, len, DMA_FROM_DEVICE);
		else
			dma_unmap_page(dev, dst, len, DMA_FROM_DEVICE);
	}

	/* Unmap the src buffer, if requested */
	if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
		if (txd->flags & DMA_COMPL_SRC_UNMAP_SINGLE)
			dma_unmap_single(dev, src, len, DMA_TO_DEVICE);
		else
			dma_unmap_page(dev, src, len, DMA_TO_DEVICE);
	}

#ifdef FSL_DMA_LD_DEBUG
	chan_dbg(chan, "LD %p free\n", desc);
#endif
	dma_pool_free(chan->desc_pool, desc, txd->phys);
}

/**
/**
 * fsl_chan_ld_cleanup - Clean up link descriptors
 * fsl_chan_ld_cleanup - Clean up link descriptors
 * @chan : Freescale DMA channel
 * @chan : Freescale DMA channel
@@ -818,56 +894,39 @@ static int fsl_dma_device_control(struct dma_chan *dchan,
static void fsl_chan_ld_cleanup(struct fsldma_chan *chan)
static void fsl_chan_ld_cleanup(struct fsldma_chan *chan)
{
{
	struct fsl_desc_sw *desc, *_desc;
	struct fsl_desc_sw *desc, *_desc;
	LIST_HEAD(ld_cleanup);
	unsigned long flags;
	unsigned long flags;


	spin_lock_irqsave(&chan->desc_lock, flags);
	spin_lock_irqsave(&chan->desc_lock, flags);


	/* if the ld_running list is empty, there is nothing to do */
	/* update the cookie if we have some descriptors to cleanup */
	if (list_empty(&chan->ld_running)) {
	if (!list_empty(&chan->ld_running)) {
		chan_dbg(chan, "no descriptors to cleanup\n");
		dma_cookie_t cookie;
		goto out_unlock;

		desc = to_fsl_desc(chan->ld_running.prev);
		cookie = desc->async_tx.cookie;

		chan->completed_cookie = cookie;
		chan_dbg(chan, "completed cookie=%d\n", cookie);
	}
	}


	/*
	/*
	 * Get the last descriptor, update the cookie to it
	 * move the descriptors to a temporary list so we can drop the lock
	 *
	 * during the entire cleanup operation
	 * This is done before callbacks run so that clients can check the
	 * status of their DMA transfer inside the callback.
	 */
	 */
	desc = to_fsl_desc(chan->ld_running.prev);
	list_splice_tail_init(&chan->ld_running, &ld_cleanup);
	chan->completed_cookie = desc->async_tx.cookie;

	chan_dbg(chan, "completed_cookie = %d\n", chan->completed_cookie);
	spin_unlock_irqrestore(&chan->desc_lock, flags);


	/* Run the callback for each descriptor, in order */
	/* Run the callback for each descriptor, in order */
	list_for_each_entry_safe(desc, _desc, &chan->ld_running, node) {
	list_for_each_entry_safe(desc, _desc, &ld_cleanup, node) {
		dma_async_tx_callback callback;
		void *callback_param;


		/* Remove from the list of running transactions */
		/* Remove from the list of transactions */
		list_del(&desc->node);
		list_del(&desc->node);


		/* Run the link descriptor callback function */
		/* Run all cleanup for this descriptor */
		callback = desc->async_tx.callback;
		fsldma_cleanup_descriptor(chan, desc);
		callback_param = desc->async_tx.callback_param;
		if (callback) {
			spin_unlock_irqrestore(&chan->desc_lock, flags);
#ifdef FSL_DMA_LD_DEBUG
			chan_dbg(chan, "LD %p callback\n", desc);
#endif
			callback(callback_param);
			spin_lock_irqsave(&chan->desc_lock, flags);
		}

		/* Run any dependencies, then free the descriptor */
		dma_run_dependencies(&desc->async_tx);
#ifdef FSL_DMA_LD_DEBUG
		chan_dbg(chan, "LD %p free\n", desc);
#endif
		dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys);
	}
	}

out_unlock:
	spin_unlock_irqrestore(&chan->desc_lock, flags);
}
}


/**
/**