Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 2a5ecb79 authored by Hongbo Zhang's avatar Hongbo Zhang Committed by Vinod Koul
Browse files

DMA: Freescale: move functions to avoid forward declarations



These functions will be modified in the next patch in the series. By moving the
function in a patch separate from the changes, it will make review easier.

Signed-off-by: default avatarHongbo Zhang <hongbo.zhang@freescale.com>
Signed-off-by: default avatarQiang Liu <qiang.liu@freescale.com>
Signed-off-by: default avatarVinod Koul <vinod.koul@intel.com>
parent 86d19a54
Loading
Loading
Loading
Loading
+95 −95
Original line number Diff line number Diff line
@@ -458,6 +458,101 @@ static struct fsl_desc_sw *fsl_dma_alloc_descriptor(struct fsldma_chan *chan)
	return desc;
}

/**
 * fsl_chan_xfer_ld_queue - transfer any pending transactions
 * @chan : Freescale DMA channel
 *
 * HARDWARE STATE: idle
 * LOCKING: must hold chan->desc_lock
 */
static void fsl_chan_xfer_ld_queue(struct fsldma_chan *chan)
{
	struct fsl_desc_sw *desc;

	/*
	 * If the list of pending descriptors is empty, then we
	 * don't need to do any work at all
	 */
	if (list_empty(&chan->ld_pending)) {
		chan_dbg(chan, "no pending LDs\n");
		return;
	}

	/*
	 * The DMA controller is not idle, which means that the interrupt
	 * handler will start any queued transactions when it runs after
	 * this transaction finishes
	 */
	if (!chan->idle) {
		chan_dbg(chan, "DMA controller still busy\n");
		return;
	}

	/*
	 * If there are some link descriptors which have not been
	 * transferred, we need to start the controller
	 */

	/*
	 * Move all elements from the queue of pending transactions
	 * onto the list of running transactions
	 */
	chan_dbg(chan, "idle, starting controller\n");
	desc = list_first_entry(&chan->ld_pending, struct fsl_desc_sw, node);
	list_splice_tail_init(&chan->ld_pending, &chan->ld_running);

	/*
	 * The 85xx DMA controller doesn't clear the channel start bit
	 * automatically at the end of a transfer. Therefore we must clear
	 * it in software before starting the transfer.
	 */
	if ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) {
		u32 mode;

		mode = get_mr(chan);
		mode &= ~FSL_DMA_MR_CS;
		set_mr(chan, mode);
	}

	/*
	 * Program the descriptor's address into the DMA controller,
	 * then start the DMA transaction
	 */
	set_cdar(chan, desc->async_tx.phys);
	get_cdar(chan);

	dma_start(chan);
	chan->idle = false;
}

/**
 * fsldma_cleanup_descriptor - cleanup and free a single link descriptor
 * @chan: Freescale DMA channel
 * @desc: descriptor to cleanup and free
 *
 * This function is used on a descriptor which has been executed by the DMA
 * controller. It will run any callbacks, submit any dependencies, and then
 * free the descriptor.
 */
static void fsldma_cleanup_descriptor(struct fsldma_chan *chan,
				      struct fsl_desc_sw *desc)
{
	struct dma_async_tx_descriptor *txd = &desc->async_tx;

	/* Run the link descriptor callback function */
	if (txd->callback) {
		chan_dbg(chan, "LD %p callback\n", desc);
		txd->callback(txd->callback_param);
	}

	/* Run any dependencies */
	dma_run_dependencies(txd);

	dma_descriptor_unmap(txd);
	chan_dbg(chan, "LD %p free\n", desc);
	dma_pool_free(chan->desc_pool, desc, txd->phys);
}

/**
 * fsl_dma_alloc_chan_resources - Allocate resources for DMA channel.
 * @chan : Freescale DMA channel
@@ -802,101 +897,6 @@ static int fsl_dma_device_control(struct dma_chan *dchan,
	return 0;
}

/**
 * fsldma_cleanup_descriptor - cleanup and free a single link descriptor
 * @chan: Freescale DMA channel
 * @desc: descriptor to cleanup and free
 *
 * This function is used on a descriptor which has been executed by the DMA
 * controller. It will run any callbacks, submit any dependencies, and then
 * free the descriptor.
 */
static void fsldma_cleanup_descriptor(struct fsldma_chan *chan,
				      struct fsl_desc_sw *desc)
{
	struct dma_async_tx_descriptor *txd = &desc->async_tx;

	/* Run the link descriptor callback function */
	if (txd->callback) {
		chan_dbg(chan, "LD %p callback\n", desc);
		txd->callback(txd->callback_param);
	}

	/* Run any dependencies */
	dma_run_dependencies(txd);

	dma_descriptor_unmap(txd);
	chan_dbg(chan, "LD %p free\n", desc);
	dma_pool_free(chan->desc_pool, desc, txd->phys);
}

/**
 * fsl_chan_xfer_ld_queue - transfer any pending transactions
 * @chan : Freescale DMA channel
 *
 * HARDWARE STATE: idle
 * LOCKING: must hold chan->desc_lock
 */
static void fsl_chan_xfer_ld_queue(struct fsldma_chan *chan)
{
	struct fsl_desc_sw *desc;

	/*
	 * If the list of pending descriptors is empty, then we
	 * don't need to do any work at all
	 */
	if (list_empty(&chan->ld_pending)) {
		chan_dbg(chan, "no pending LDs\n");
		return;
	}

	/*
	 * The DMA controller is not idle, which means that the interrupt
	 * handler will start any queued transactions when it runs after
	 * this transaction finishes
	 */
	if (!chan->idle) {
		chan_dbg(chan, "DMA controller still busy\n");
		return;
	}

	/*
	 * If there are some link descriptors which have not been
	 * transferred, we need to start the controller
	 */

	/*
	 * Move all elements from the queue of pending transactions
	 * onto the list of running transactions
	 */
	chan_dbg(chan, "idle, starting controller\n");
	desc = list_first_entry(&chan->ld_pending, struct fsl_desc_sw, node);
	list_splice_tail_init(&chan->ld_pending, &chan->ld_running);

	/*
	 * The 85xx DMA controller doesn't clear the channel start bit
	 * automatically at the end of a transfer. Therefore we must clear
	 * it in software before starting the transfer.
	 */
	if ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) {
		u32 mode;

		mode = get_mr(chan);
		mode &= ~FSL_DMA_MR_CS;
		set_mr(chan, mode);
	}

	/*
	 * Program the descriptor's address into the DMA controller,
	 * then start the DMA transaction
	 */
	set_cdar(chan, desc->async_tx.phys);
	get_cdar(chan);

	dma_start(chan);
	chan->idle = false;
}

/**
 * fsl_dma_memcpy_issue_pending - Issue the DMA start command
 * @chan : Freescale DMA channel