Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit c678fa66 authored by Dave Jiang's avatar Dave Jiang Committed by Vinod Koul
Browse files

dmaengine: remove DMA_SG as it is dead code in kernel



There are no in kernel consumers for DMA_SG op. Removing operation,
dead code, and test code in dmatest.

Signed-off-by: default avatarDave Jiang <dave.jiang@intel.com>
Reviewed-by: default avatarLinus Walleij <linus.walleij@linaro.org>
Cc: Gary Hook <gary.hook@amd.com>
Cc: Ludovic Desroches <ludovic.desroches@microchip.com>
Cc: Kedareswara rao Appana <appana.durga.rao@xilinx.com>
Cc: Li Yang <leoyang.li@nxp.com>
Cc: Michal Simek <michal.simek@xilinx.com>
Signed-off-by: default avatarVinod Koul <vinod.koul@intel.com>
parent 61b5f54d
Loading
Loading
Loading
Loading
+0 −7
Original line number Diff line number Diff line
@@ -181,13 +181,6 @@ Currently, the types available are:
    - Used by the client drivers to register a callback that will be
      called on a regular basis through the DMA controller interrupt

  * DMA_SG
    - The device supports memory to memory scatter-gather
      transfers.
    - Even though a plain memcpy can look like a particular case of a
      scatter-gather transfer, with a single chunk to transfer, it's a
      distinct transaction type in the mem2mem transfers case

  * DMA_PRIVATE
    - The devices only supports slave transfers, and as such isn't
      available for async transfers.
+0 −23
Original line number Diff line number Diff line
@@ -502,27 +502,6 @@ static struct dma_async_tx_descriptor *ccp_prep_dma_memcpy(
	return &desc->tx_desc;
}

static struct dma_async_tx_descriptor *ccp_prep_dma_sg(
	struct dma_chan *dma_chan, struct scatterlist *dst_sg,
	unsigned int dst_nents, struct scatterlist *src_sg,
	unsigned int src_nents, unsigned long flags)
{
	struct ccp_dma_chan *chan = container_of(dma_chan, struct ccp_dma_chan,
						 dma_chan);
	struct ccp_dma_desc *desc;

	dev_dbg(chan->ccp->dev,
		"%s - src=%p, src_nents=%u dst=%p, dst_nents=%u, flags=%#lx\n",
		__func__, src_sg, src_nents, dst_sg, dst_nents, flags);

	desc = ccp_create_desc(dma_chan, dst_sg, dst_nents, src_sg, src_nents,
			       flags);
	if (!desc)
		return NULL;

	return &desc->tx_desc;
}

static struct dma_async_tx_descriptor *ccp_prep_dma_interrupt(
	struct dma_chan *dma_chan, unsigned long flags)
{
@@ -704,7 +683,6 @@ int ccp_dmaengine_register(struct ccp_device *ccp)
	dma_dev->directions = DMA_MEM_TO_MEM;
	dma_dev->residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
	dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask);
	dma_cap_set(DMA_SG, dma_dev->cap_mask);
	dma_cap_set(DMA_INTERRUPT, dma_dev->cap_mask);

	/* The DMA channels for this device can be set to public or private,
@@ -740,7 +718,6 @@ int ccp_dmaengine_register(struct ccp_device *ccp)

	dma_dev->device_free_chan_resources = ccp_free_chan_resources;
	dma_dev->device_prep_dma_memcpy = ccp_prep_dma_memcpy;
	dma_dev->device_prep_dma_sg = ccp_prep_dma_sg;
	dma_dev->device_prep_dma_interrupt = ccp_prep_dma_interrupt;
	dma_dev->device_issue_pending = ccp_issue_pending;
	dma_dev->device_tx_status = ccp_tx_status;
+1 −139
Original line number Diff line number Diff line
@@ -1202,138 +1202,6 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
	return NULL;
}

/**
 * atc_prep_dma_sg - prepare memory to memory scather-gather operation
 * @chan: the channel to prepare operation on
 * @dst_sg: destination scatterlist
 * @dst_nents: number of destination scatterlist entries
 * @src_sg: source scatterlist
 * @src_nents: number of source scatterlist entries
 * @flags: tx descriptor status flags
 */
static struct dma_async_tx_descriptor *
atc_prep_dma_sg(struct dma_chan *chan,
		struct scatterlist *dst_sg, unsigned int dst_nents,
		struct scatterlist *src_sg, unsigned int src_nents,
		unsigned long flags)
{
	struct at_dma_chan	*atchan = to_at_dma_chan(chan);
	struct at_desc		*desc = NULL;
	struct at_desc		*first = NULL;
	struct at_desc		*prev = NULL;
	unsigned int		src_width;
	unsigned int		dst_width;
	size_t			xfer_count;
	u32			ctrla;
	u32			ctrlb;
	size_t			dst_len = 0, src_len = 0;
	dma_addr_t		dst = 0, src = 0;
	size_t			len = 0, total_len = 0;

	if (unlikely(dst_nents == 0 || src_nents == 0))
		return NULL;

	if (unlikely(dst_sg == NULL || src_sg == NULL))
		return NULL;

	ctrlb =   ATC_DEFAULT_CTRLB | ATC_IEN
		| ATC_SRC_ADDR_MODE_INCR
		| ATC_DST_ADDR_MODE_INCR
		| ATC_FC_MEM2MEM;

	/*
	 * loop until there is either no more source or no more destination
	 * scatterlist entry
	 */
	while (true) {

		/* prepare the next transfer */
		if (dst_len == 0) {

			/* no more destination scatterlist entries */
			if (!dst_sg || !dst_nents)
				break;

			dst = sg_dma_address(dst_sg);
			dst_len = sg_dma_len(dst_sg);

			dst_sg = sg_next(dst_sg);
			dst_nents--;
		}

		if (src_len == 0) {

			/* no more source scatterlist entries */
			if (!src_sg || !src_nents)
				break;

			src = sg_dma_address(src_sg);
			src_len = sg_dma_len(src_sg);

			src_sg = sg_next(src_sg);
			src_nents--;
		}

		len = min_t(size_t, src_len, dst_len);
		if (len == 0)
			continue;

		/* take care for the alignment */
		src_width = dst_width = atc_get_xfer_width(src, dst, len);

		ctrla = ATC_SRC_WIDTH(src_width) |
			ATC_DST_WIDTH(dst_width);

		/*
		 * The number of transfers to set up refer to the source width
		 * that depends on the alignment.
		 */
		xfer_count = len >> src_width;
		if (xfer_count > ATC_BTSIZE_MAX) {
			xfer_count = ATC_BTSIZE_MAX;
			len = ATC_BTSIZE_MAX << src_width;
		}

		/* create the transfer */
		desc = atc_desc_get(atchan);
		if (!desc)
			goto err_desc_get;

		desc->lli.saddr = src;
		desc->lli.daddr = dst;
		desc->lli.ctrla = ctrla | xfer_count;
		desc->lli.ctrlb = ctrlb;

		desc->txd.cookie = 0;
		desc->len = len;

		atc_desc_chain(&first, &prev, desc);

		/* update the lengths and addresses for the next loop cycle */
		dst_len -= len;
		src_len -= len;
		dst += len;
		src += len;

		total_len += len;
	}

	/* First descriptor of the chain embedds additional information */
	first->txd.cookie = -EBUSY;
	first->total_len = total_len;

	/* set end-of-link to the last link descriptor of list*/
	set_desc_eol(desc);

	first->txd.flags = flags; /* client is in control of this ack */

	return &first->txd;

err_desc_get:
	atc_desc_put(atchan, first);
	return NULL;
}

/**
 * atc_dma_cyclic_check_values
 * Check for too big/unaligned periods and unaligned DMA buffer
@@ -1933,14 +1801,12 @@ static int __init at_dma_probe(struct platform_device *pdev)

	/* setup platform data for each SoC */
	dma_cap_set(DMA_MEMCPY, at91sam9rl_config.cap_mask);
	dma_cap_set(DMA_SG, at91sam9rl_config.cap_mask);
	dma_cap_set(DMA_INTERLEAVE, at91sam9g45_config.cap_mask);
	dma_cap_set(DMA_MEMCPY, at91sam9g45_config.cap_mask);
	dma_cap_set(DMA_MEMSET, at91sam9g45_config.cap_mask);
	dma_cap_set(DMA_MEMSET_SG, at91sam9g45_config.cap_mask);
	dma_cap_set(DMA_PRIVATE, at91sam9g45_config.cap_mask);
	dma_cap_set(DMA_SLAVE, at91sam9g45_config.cap_mask);
	dma_cap_set(DMA_SG, at91sam9g45_config.cap_mask);

	/* get DMA parameters from controller type */
	plat_dat = at_dma_get_driver_data(pdev);
@@ -2078,16 +1944,12 @@ static int __init at_dma_probe(struct platform_device *pdev)
		atdma->dma_common.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
	}

	if (dma_has_cap(DMA_SG, atdma->dma_common.cap_mask))
		atdma->dma_common.device_prep_dma_sg = atc_prep_dma_sg;

	dma_writel(atdma, EN, AT_DMA_ENABLE);

	dev_info(&pdev->dev, "Atmel AHB DMA Controller ( %s%s%s%s), %d channels\n",
	dev_info(&pdev->dev, "Atmel AHB DMA Controller ( %s%s%s), %d channels\n",
	  dma_has_cap(DMA_MEMCPY, atdma->dma_common.cap_mask) ? "cpy " : "",
	  dma_has_cap(DMA_MEMSET, atdma->dma_common.cap_mask) ? "set " : "",
	  dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask)  ? "slave " : "",
	  dma_has_cap(DMA_SG, atdma->dma_common.cap_mask)  ? "sg-cpy " : "",
	  plat_dat->nr_channels);

	dma_async_device_register(&atdma->dma_common);
+0 −2
Original line number Diff line number Diff line
@@ -937,8 +937,6 @@ int dma_async_device_register(struct dma_device *device)
		!device->device_prep_dma_memset);
	BUG_ON(dma_has_cap(DMA_INTERRUPT, device->cap_mask) &&
		!device->device_prep_dma_interrupt);
	BUG_ON(dma_has_cap(DMA_SG, device->cap_mask) &&
		!device->device_prep_dma_sg);
	BUG_ON(dma_has_cap(DMA_CYCLIC, device->cap_mask) &&
		!device->device_prep_dma_cyclic);
	BUG_ON(dma_has_cap(DMA_INTERLEAVE, device->cap_mask) &&
+2 −34
Original line number Diff line number Diff line
@@ -52,15 +52,10 @@ module_param(iterations, uint, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(iterations,
		"Iterations before stopping test (default: infinite)");

static unsigned int sg_buffers = 1;
module_param(sg_buffers, uint, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(sg_buffers,
		"Number of scatter gather buffers (default: 1)");

static unsigned int dmatest;
module_param(dmatest, uint, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(dmatest,
		"dmatest 0-memcpy 1-slave_sg 2-memset (default: 0)");
		"dmatest 0-memcpy 1-memset (default: 0)");

static unsigned int xor_sources = 3;
module_param(xor_sources, uint, S_IRUGO | S_IWUSR);
@@ -471,9 +466,6 @@ static int dmatest_func(void *data)
		align = dev->fill_align;
		src_cnt = dst_cnt = 1;
		is_memset = true;
	} else if (thread->type == DMA_SG) {
		align = dev->copy_align;
		src_cnt = dst_cnt = sg_buffers;
	} else if (thread->type == DMA_XOR) {
		/* force odd to ensure dst = src */
		src_cnt = min_odd(params->xor_sources | 1, dev->max_xor);
@@ -553,8 +545,6 @@ static int dmatest_func(void *data)
		dma_addr_t srcs[src_cnt];
		dma_addr_t *dsts;
		unsigned int src_off, dst_off, len;
		struct scatterlist tx_sg[src_cnt];
		struct scatterlist rx_sg[src_cnt];

		total_tests++;

@@ -650,15 +640,6 @@ static int dmatest_func(void *data)
			um->bidi_cnt++;
		}

		sg_init_table(tx_sg, src_cnt);
		sg_init_table(rx_sg, src_cnt);
		for (i = 0; i < src_cnt; i++) {
			sg_dma_address(&rx_sg[i]) = srcs[i];
			sg_dma_address(&tx_sg[i]) = dsts[i] + dst_off;
			sg_dma_len(&tx_sg[i]) = len;
			sg_dma_len(&rx_sg[i]) = len;
		}

		if (thread->type == DMA_MEMCPY)
			tx = dev->device_prep_dma_memcpy(chan,
							 dsts[0] + dst_off,
@@ -668,9 +649,6 @@ static int dmatest_func(void *data)
						dsts[0] + dst_off,
						*(thread->srcs[0] + src_off),
						len, flags);
		else if (thread->type == DMA_SG)
			tx = dev->device_prep_dma_sg(chan, tx_sg, src_cnt,
						     rx_sg, src_cnt, flags);
		else if (thread->type == DMA_XOR)
			tx = dev->device_prep_dma_xor(chan,
						      dsts[0] + dst_off,
@@ -853,8 +831,6 @@ static int dmatest_add_threads(struct dmatest_info *info,
		op = "copy";
	else if (type == DMA_MEMSET)
		op = "set";
	else if (type == DMA_SG)
		op = "sg";
	else if (type == DMA_XOR)
		op = "xor";
	else if (type == DMA_PQ)
@@ -916,15 +892,8 @@ static int dmatest_add_channel(struct dmatest_info *info,
	}

	if (dma_has_cap(DMA_MEMSET, dma_dev->cap_mask)) {
		if (dmatest == 2) {
			cnt = dmatest_add_threads(info, dtc, DMA_MEMSET);
			thread_count += cnt > 0 ? cnt : 0;
		}
	}

	if (dma_has_cap(DMA_SG, dma_dev->cap_mask)) {
		if (dmatest == 1) {
			cnt = dmatest_add_threads(info, dtc, DMA_SG);
			cnt = dmatest_add_threads(info, dtc, DMA_MEMSET);
			thread_count += cnt > 0 ? cnt : 0;
		}
	}
@@ -1002,7 +971,6 @@ static void run_threaded_test(struct dmatest_info *info)
	request_channels(info, DMA_MEMCPY);
	request_channels(info, DMA_MEMSET);
	request_channels(info, DMA_XOR);
	request_channels(info, DMA_SG);
	request_channels(info, DMA_PQ);
}

Loading