Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 41bd0314 authored by Vinod Koul's avatar Vinod Koul
Browse files

Merge branch 'topic/dmatest' into for-linus

parents 346ea25e 3eeb5156
Loading
Loading
Loading
Loading
+0 −7
Original line number Diff line number Diff line
@@ -181,13 +181,6 @@ Currently, the types available are:
    - Used by the client drivers to register a callback that will be
      called on a regular basis through the DMA controller interrupt

  * DMA_SG
    - The device supports memory to memory scatter-gather
      transfers.
    - Even though a plain memcpy can look like a particular case of a
      scatter-gather transfer, with a single chunk to transfer, it's a
      distinct transaction type in the mem2mem transfers case

  * DMA_PRIVATE
    - The devices only supports slave transfers, and as such isn't
      available for async transfers.
+0 −23
Original line number Diff line number Diff line
@@ -502,27 +502,6 @@ static struct dma_async_tx_descriptor *ccp_prep_dma_memcpy(
	return &desc->tx_desc;
}

static struct dma_async_tx_descriptor *ccp_prep_dma_sg(
	struct dma_chan *dma_chan, struct scatterlist *dst_sg,
	unsigned int dst_nents, struct scatterlist *src_sg,
	unsigned int src_nents, unsigned long flags)
{
	struct ccp_dma_chan *chan = container_of(dma_chan, struct ccp_dma_chan,
						 dma_chan);
	struct ccp_dma_desc *desc;

	dev_dbg(chan->ccp->dev,
		"%s - src=%p, src_nents=%u dst=%p, dst_nents=%u, flags=%#lx\n",
		__func__, src_sg, src_nents, dst_sg, dst_nents, flags);

	desc = ccp_create_desc(dma_chan, dst_sg, dst_nents, src_sg, src_nents,
			       flags);
	if (!desc)
		return NULL;

	return &desc->tx_desc;
}

static struct dma_async_tx_descriptor *ccp_prep_dma_interrupt(
	struct dma_chan *dma_chan, unsigned long flags)
{
@@ -704,7 +683,6 @@ int ccp_dmaengine_register(struct ccp_device *ccp)
	dma_dev->directions = DMA_MEM_TO_MEM;
	dma_dev->residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
	dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask);
	dma_cap_set(DMA_SG, dma_dev->cap_mask);
	dma_cap_set(DMA_INTERRUPT, dma_dev->cap_mask);

	/* The DMA channels for this device can be set to public or private,
@@ -740,7 +718,6 @@ int ccp_dmaengine_register(struct ccp_device *ccp)

	dma_dev->device_free_chan_resources = ccp_free_chan_resources;
	dma_dev->device_prep_dma_memcpy = ccp_prep_dma_memcpy;
	dma_dev->device_prep_dma_sg = ccp_prep_dma_sg;
	dma_dev->device_prep_dma_interrupt = ccp_prep_dma_interrupt;
	dma_dev->device_issue_pending = ccp_issue_pending;
	dma_dev->device_tx_status = ccp_tx_status;
+1 −139
Original line number Diff line number Diff line
@@ -1202,138 +1202,6 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
	return NULL;
}

/**
 * atc_prep_dma_sg - prepare memory to memory scather-gather operation
 * @chan: the channel to prepare operation on
 * @dst_sg: destination scatterlist
 * @dst_nents: number of destination scatterlist entries
 * @src_sg: source scatterlist
 * @src_nents: number of source scatterlist entries
 * @flags: tx descriptor status flags
 */
static struct dma_async_tx_descriptor *
atc_prep_dma_sg(struct dma_chan *chan,
		struct scatterlist *dst_sg, unsigned int dst_nents,
		struct scatterlist *src_sg, unsigned int src_nents,
		unsigned long flags)
{
	struct at_dma_chan	*atchan = to_at_dma_chan(chan);
	struct at_desc		*desc = NULL;
	struct at_desc		*first = NULL;
	struct at_desc		*prev = NULL;
	unsigned int		src_width;
	unsigned int		dst_width;
	size_t			xfer_count;
	u32			ctrla;
	u32			ctrlb;
	size_t			dst_len = 0, src_len = 0;
	dma_addr_t		dst = 0, src = 0;
	size_t			len = 0, total_len = 0;

	if (unlikely(dst_nents == 0 || src_nents == 0))
		return NULL;

	if (unlikely(dst_sg == NULL || src_sg == NULL))
		return NULL;

	ctrlb =   ATC_DEFAULT_CTRLB | ATC_IEN
		| ATC_SRC_ADDR_MODE_INCR
		| ATC_DST_ADDR_MODE_INCR
		| ATC_FC_MEM2MEM;

	/*
	 * loop until there is either no more source or no more destination
	 * scatterlist entry
	 */
	while (true) {

		/* prepare the next transfer */
		if (dst_len == 0) {

			/* no more destination scatterlist entries */
			if (!dst_sg || !dst_nents)
				break;

			dst = sg_dma_address(dst_sg);
			dst_len = sg_dma_len(dst_sg);

			dst_sg = sg_next(dst_sg);
			dst_nents--;
		}

		if (src_len == 0) {

			/* no more source scatterlist entries */
			if (!src_sg || !src_nents)
				break;

			src = sg_dma_address(src_sg);
			src_len = sg_dma_len(src_sg);

			src_sg = sg_next(src_sg);
			src_nents--;
		}

		len = min_t(size_t, src_len, dst_len);
		if (len == 0)
			continue;

		/* take care for the alignment */
		src_width = dst_width = atc_get_xfer_width(src, dst, len);

		ctrla = ATC_SRC_WIDTH(src_width) |
			ATC_DST_WIDTH(dst_width);

		/*
		 * The number of transfers to set up refer to the source width
		 * that depends on the alignment.
		 */
		xfer_count = len >> src_width;
		if (xfer_count > ATC_BTSIZE_MAX) {
			xfer_count = ATC_BTSIZE_MAX;
			len = ATC_BTSIZE_MAX << src_width;
		}

		/* create the transfer */
		desc = atc_desc_get(atchan);
		if (!desc)
			goto err_desc_get;

		desc->lli.saddr = src;
		desc->lli.daddr = dst;
		desc->lli.ctrla = ctrla | xfer_count;
		desc->lli.ctrlb = ctrlb;

		desc->txd.cookie = 0;
		desc->len = len;

		atc_desc_chain(&first, &prev, desc);

		/* update the lengths and addresses for the next loop cycle */
		dst_len -= len;
		src_len -= len;
		dst += len;
		src += len;

		total_len += len;
	}

	/* First descriptor of the chain embedds additional information */
	first->txd.cookie = -EBUSY;
	first->total_len = total_len;

	/* set end-of-link to the last link descriptor of list*/
	set_desc_eol(desc);

	first->txd.flags = flags; /* client is in control of this ack */

	return &first->txd;

err_desc_get:
	atc_desc_put(atchan, first);
	return NULL;
}

/**
 * atc_dma_cyclic_check_values
 * Check for too big/unaligned periods and unaligned DMA buffer
@@ -1933,14 +1801,12 @@ static int __init at_dma_probe(struct platform_device *pdev)

	/* setup platform data for each SoC */
	dma_cap_set(DMA_MEMCPY, at91sam9rl_config.cap_mask);
	dma_cap_set(DMA_SG, at91sam9rl_config.cap_mask);
	dma_cap_set(DMA_INTERLEAVE, at91sam9g45_config.cap_mask);
	dma_cap_set(DMA_MEMCPY, at91sam9g45_config.cap_mask);
	dma_cap_set(DMA_MEMSET, at91sam9g45_config.cap_mask);
	dma_cap_set(DMA_MEMSET_SG, at91sam9g45_config.cap_mask);
	dma_cap_set(DMA_PRIVATE, at91sam9g45_config.cap_mask);
	dma_cap_set(DMA_SLAVE, at91sam9g45_config.cap_mask);
	dma_cap_set(DMA_SG, at91sam9g45_config.cap_mask);

	/* get DMA parameters from controller type */
	plat_dat = at_dma_get_driver_data(pdev);
@@ -2078,16 +1944,12 @@ static int __init at_dma_probe(struct platform_device *pdev)
		atdma->dma_common.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
	}

	if (dma_has_cap(DMA_SG, atdma->dma_common.cap_mask))
		atdma->dma_common.device_prep_dma_sg = atc_prep_dma_sg;

	dma_writel(atdma, EN, AT_DMA_ENABLE);

	dev_info(&pdev->dev, "Atmel AHB DMA Controller ( %s%s%s%s), %d channels\n",
	dev_info(&pdev->dev, "Atmel AHB DMA Controller ( %s%s%s), %d channels\n",
	  dma_has_cap(DMA_MEMCPY, atdma->dma_common.cap_mask) ? "cpy " : "",
	  dma_has_cap(DMA_MEMSET, atdma->dma_common.cap_mask) ? "set " : "",
	  dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask)  ? "slave " : "",
	  dma_has_cap(DMA_SG, atdma->dma_common.cap_mask)  ? "sg-cpy " : "",
	  plat_dat->nr_channels);

	dma_async_device_register(&atdma->dma_common);
+79 −24
Original line number Diff line number Diff line
@@ -923,30 +923,85 @@ int dma_async_device_register(struct dma_device *device)
		return -ENODEV;

	/* validate device routines */
	BUG_ON(dma_has_cap(DMA_MEMCPY, device->cap_mask) &&
		!device->device_prep_dma_memcpy);
	BUG_ON(dma_has_cap(DMA_XOR, device->cap_mask) &&
		!device->device_prep_dma_xor);
	BUG_ON(dma_has_cap(DMA_XOR_VAL, device->cap_mask) &&
		!device->device_prep_dma_xor_val);
	BUG_ON(dma_has_cap(DMA_PQ, device->cap_mask) &&
		!device->device_prep_dma_pq);
	BUG_ON(dma_has_cap(DMA_PQ_VAL, device->cap_mask) &&
		!device->device_prep_dma_pq_val);
	BUG_ON(dma_has_cap(DMA_MEMSET, device->cap_mask) &&
		!device->device_prep_dma_memset);
	BUG_ON(dma_has_cap(DMA_INTERRUPT, device->cap_mask) &&
		!device->device_prep_dma_interrupt);
	BUG_ON(dma_has_cap(DMA_SG, device->cap_mask) &&
		!device->device_prep_dma_sg);
	BUG_ON(dma_has_cap(DMA_CYCLIC, device->cap_mask) &&
		!device->device_prep_dma_cyclic);
	BUG_ON(dma_has_cap(DMA_INTERLEAVE, device->cap_mask) &&
		!device->device_prep_interleaved_dma);

	BUG_ON(!device->device_tx_status);
	BUG_ON(!device->device_issue_pending);
	BUG_ON(!device->dev);
	if (!device->dev) {
		pr_err("DMAdevice must have dev\n");
		return -EIO;
	}

	if (dma_has_cap(DMA_MEMCPY, device->cap_mask) && !device->device_prep_dma_memcpy) {
		dev_err(device->dev,
			"Device claims capability %s, but op is not defined\n",
			"DMA_MEMCPY");
		return -EIO;
	}

	if (dma_has_cap(DMA_XOR, device->cap_mask) && !device->device_prep_dma_xor) {
		dev_err(device->dev,
			"Device claims capability %s, but op is not defined\n",
			"DMA_XOR");
		return -EIO;
	}

	if (dma_has_cap(DMA_XOR_VAL, device->cap_mask) && !device->device_prep_dma_xor_val) {
		dev_err(device->dev,
			"Device claims capability %s, but op is not defined\n",
			"DMA_XOR_VAL");
		return -EIO;
	}

	if (dma_has_cap(DMA_PQ, device->cap_mask) && !device->device_prep_dma_pq) {
		dev_err(device->dev,
			"Device claims capability %s, but op is not defined\n",
			"DMA_PQ");
		return -EIO;
	}

	if (dma_has_cap(DMA_PQ_VAL, device->cap_mask) && !device->device_prep_dma_pq_val) {
		dev_err(device->dev,
			"Device claims capability %s, but op is not defined\n",
			"DMA_PQ_VAL");
		return -EIO;
	}

	if (dma_has_cap(DMA_MEMSET, device->cap_mask) && !device->device_prep_dma_memset) {
		dev_err(device->dev,
			"Device claims capability %s, but op is not defined\n",
			"DMA_MEMSET");
		return -EIO;
	}

	if (dma_has_cap(DMA_INTERRUPT, device->cap_mask) && !device->device_prep_dma_interrupt) {
		dev_err(device->dev,
			"Device claims capability %s, but op is not defined\n",
			"DMA_INTERRUPT");
		return -EIO;
	}

	if (dma_has_cap(DMA_CYCLIC, device->cap_mask) && !device->device_prep_dma_cyclic) {
		dev_err(device->dev,
			"Device claims capability %s, but op is not defined\n",
			"DMA_CYCLIC");
		return -EIO;
	}

	if (dma_has_cap(DMA_INTERLEAVE, device->cap_mask) && !device->device_prep_interleaved_dma) {
		dev_err(device->dev,
			"Device claims capability %s, but op is not defined\n",
			"DMA_INTERLEAVE");
		return -EIO;
	}


	if (!device->device_tx_status) {
		dev_err(device->dev, "Device tx_status is not defined\n");
		return -EIO;
	}


	if (!device->device_issue_pending) {
		dev_err(device->dev, "Device issue_pending is not defined\n");
		return -EIO;
	}

	/* note: this only matters in the
	 * CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH=n case
+59 −51
Original line number Diff line number Diff line
@@ -52,15 +52,10 @@ module_param(iterations, uint, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(iterations,
		"Iterations before stopping test (default: infinite)");

static unsigned int sg_buffers = 1;
module_param(sg_buffers, uint, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(sg_buffers,
		"Number of scatter gather buffers (default: 1)");

static unsigned int dmatest;
module_param(dmatest, uint, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(dmatest,
		"dmatest 0-memcpy 1-slave_sg (default: 0)");
		"dmatest 0-memcpy 1-memset (default: 0)");

static unsigned int xor_sources = 3;
module_param(xor_sources, uint, S_IRUGO | S_IWUSR);
@@ -158,6 +153,7 @@ MODULE_PARM_DESC(run, "Run the test (default: false)");
#define PATTERN_COPY		0x40
#define PATTERN_OVERWRITE	0x20
#define PATTERN_COUNT_MASK	0x1f
#define PATTERN_MEMSET_IDX	0x01

struct dmatest_thread {
	struct list_head	node;
@@ -239,46 +235,62 @@ static unsigned long dmatest_random(void)
	return buf;
}

static inline u8 gen_inv_idx(u8 index, bool is_memset)
{
	u8 val = is_memset ? PATTERN_MEMSET_IDX : index;

	return ~val & PATTERN_COUNT_MASK;
}

static inline u8 gen_src_value(u8 index, bool is_memset)
{
	return PATTERN_SRC | gen_inv_idx(index, is_memset);
}

static inline u8 gen_dst_value(u8 index, bool is_memset)
{
	return PATTERN_DST | gen_inv_idx(index, is_memset);
}

static void dmatest_init_srcs(u8 **bufs, unsigned int start, unsigned int len,
		unsigned int buf_size)
		unsigned int buf_size, bool is_memset)
{
	unsigned int i;
	u8 *buf;

	for (; (buf = *bufs); bufs++) {
		for (i = 0; i < start; i++)
			buf[i] = PATTERN_SRC | (~i & PATTERN_COUNT_MASK);
			buf[i] = gen_src_value(i, is_memset);
		for ( ; i < start + len; i++)
			buf[i] = PATTERN_SRC | PATTERN_COPY
				| (~i & PATTERN_COUNT_MASK);
			buf[i] = gen_src_value(i, is_memset) | PATTERN_COPY;
		for ( ; i < buf_size; i++)
			buf[i] = PATTERN_SRC | (~i & PATTERN_COUNT_MASK);
			buf[i] = gen_src_value(i, is_memset);
		buf++;
	}
}

static void dmatest_init_dsts(u8 **bufs, unsigned int start, unsigned int len,
		unsigned int buf_size)
		unsigned int buf_size, bool is_memset)
{
	unsigned int i;
	u8 *buf;

	for (; (buf = *bufs); bufs++) {
		for (i = 0; i < start; i++)
			buf[i] = PATTERN_DST | (~i & PATTERN_COUNT_MASK);
			buf[i] = gen_dst_value(i, is_memset);
		for ( ; i < start + len; i++)
			buf[i] = PATTERN_DST | PATTERN_OVERWRITE
				| (~i & PATTERN_COUNT_MASK);
			buf[i] = gen_dst_value(i, is_memset) |
						PATTERN_OVERWRITE;
		for ( ; i < buf_size; i++)
			buf[i] = PATTERN_DST | (~i & PATTERN_COUNT_MASK);
			buf[i] = gen_dst_value(i, is_memset);
	}
}

static void dmatest_mismatch(u8 actual, u8 pattern, unsigned int index,
		unsigned int counter, bool is_srcbuf)
		unsigned int counter, bool is_srcbuf, bool is_memset)
{
	u8		diff = actual ^ pattern;
	u8		expected = pattern | (~counter & PATTERN_COUNT_MASK);
	u8		expected = pattern | gen_inv_idx(counter, is_memset);
	const char	*thread_name = current->comm;

	if (is_srcbuf)
@@ -298,7 +310,7 @@ static void dmatest_mismatch(u8 actual, u8 pattern, unsigned int index,

static unsigned int dmatest_verify(u8 **bufs, unsigned int start,
		unsigned int end, unsigned int counter, u8 pattern,
		bool is_srcbuf)
		bool is_srcbuf, bool is_memset)
{
	unsigned int i;
	unsigned int error_count = 0;
@@ -311,11 +323,12 @@ static unsigned int dmatest_verify(u8 **bufs, unsigned int start,
		counter = counter_orig;
		for (i = start; i < end; i++) {
			actual = buf[i];
			expected = pattern | (~counter & PATTERN_COUNT_MASK);
			expected = pattern | gen_inv_idx(counter, is_memset);
			if (actual != expected) {
				if (error_count < MAX_ERROR_COUNT)
					dmatest_mismatch(actual, pattern, i,
							 counter, is_srcbuf);
							 counter, is_srcbuf,
							 is_memset);
				error_count++;
			}
			counter++;
@@ -435,6 +448,7 @@ static int dmatest_func(void *data)
	s64			runtime = 0;
	unsigned long long	total_len = 0;
	u8			align = 0;
	bool			is_memset = false;

	set_freezable();

@@ -448,9 +462,10 @@ static int dmatest_func(void *data)
	if (thread->type == DMA_MEMCPY) {
		align = dev->copy_align;
		src_cnt = dst_cnt = 1;
	} else if (thread->type == DMA_SG) {
		align = dev->copy_align;
		src_cnt = dst_cnt = sg_buffers;
	} else if (thread->type == DMA_MEMSET) {
		align = dev->fill_align;
		src_cnt = dst_cnt = 1;
		is_memset = true;
	} else if (thread->type == DMA_XOR) {
		/* force odd to ensure dst = src */
		src_cnt = min_odd(params->xor_sources | 1, dev->max_xor);
@@ -530,8 +545,6 @@ static int dmatest_func(void *data)
		dma_addr_t srcs[src_cnt];
		dma_addr_t *dsts;
		unsigned int src_off, dst_off, len;
		struct scatterlist tx_sg[src_cnt];
		struct scatterlist rx_sg[src_cnt];

		total_tests++;

@@ -571,9 +584,9 @@ static int dmatest_func(void *data)
			dst_off = (dst_off >> align) << align;

			dmatest_init_srcs(thread->srcs, src_off, len,
					  params->buf_size);
					  params->buf_size, is_memset);
			dmatest_init_dsts(thread->dsts, dst_off, len,
					  params->buf_size);
					  params->buf_size, is_memset);

			diff = ktime_sub(ktime_get(), start);
			filltime = ktime_add(filltime, diff);
@@ -627,22 +640,15 @@ static int dmatest_func(void *data)
			um->bidi_cnt++;
		}

		sg_init_table(tx_sg, src_cnt);
		sg_init_table(rx_sg, src_cnt);
		for (i = 0; i < src_cnt; i++) {
			sg_dma_address(&rx_sg[i]) = srcs[i];
			sg_dma_address(&tx_sg[i]) = dsts[i] + dst_off;
			sg_dma_len(&tx_sg[i]) = len;
			sg_dma_len(&rx_sg[i]) = len;
		}

		if (thread->type == DMA_MEMCPY)
			tx = dev->device_prep_dma_memcpy(chan,
							 dsts[0] + dst_off,
							 srcs[0], len, flags);
		else if (thread->type == DMA_SG)
			tx = dev->device_prep_dma_sg(chan, tx_sg, src_cnt,
						     rx_sg, src_cnt, flags);
		else if (thread->type == DMA_MEMSET)
			tx = dev->device_prep_dma_memset(chan,
						dsts[0] + dst_off,
						*(thread->srcs[0] + src_off),
						len, flags);
		else if (thread->type == DMA_XOR)
			tx = dev->device_prep_dma_xor(chan,
						      dsts[0] + dst_off,
@@ -722,23 +728,25 @@ static int dmatest_func(void *data)
		start = ktime_get();
		pr_debug("%s: verifying source buffer...\n", current->comm);
		error_count = dmatest_verify(thread->srcs, 0, src_off,
				0, PATTERN_SRC, true);
				0, PATTERN_SRC, true, is_memset);
		error_count += dmatest_verify(thread->srcs, src_off,
				src_off + len, src_off,
				PATTERN_SRC | PATTERN_COPY, true);
				PATTERN_SRC | PATTERN_COPY, true, is_memset);
		error_count += dmatest_verify(thread->srcs, src_off + len,
				params->buf_size, src_off + len,
				PATTERN_SRC, true);
				PATTERN_SRC, true, is_memset);

		pr_debug("%s: verifying dest buffer...\n", current->comm);
		error_count += dmatest_verify(thread->dsts, 0, dst_off,
				0, PATTERN_DST, false);
				0, PATTERN_DST, false, is_memset);

		error_count += dmatest_verify(thread->dsts, dst_off,
				dst_off + len, src_off,
				PATTERN_SRC | PATTERN_COPY, false);
				PATTERN_SRC | PATTERN_COPY, false, is_memset);

		error_count += dmatest_verify(thread->dsts, dst_off + len,
				params->buf_size, dst_off + len,
				PATTERN_DST, false);
				PATTERN_DST, false, is_memset);

		diff = ktime_sub(ktime_get(), start);
		comparetime = ktime_add(comparetime, diff);
@@ -821,8 +829,8 @@ static int dmatest_add_threads(struct dmatest_info *info,

	if (type == DMA_MEMCPY)
		op = "copy";
	else if (type == DMA_SG)
		op = "sg";
	else if (type == DMA_MEMSET)
		op = "set";
	else if (type == DMA_XOR)
		op = "xor";
	else if (type == DMA_PQ)
@@ -883,9 +891,9 @@ static int dmatest_add_channel(struct dmatest_info *info,
		}
	}

	if (dma_has_cap(DMA_SG, dma_dev->cap_mask)) {
	if (dma_has_cap(DMA_MEMSET, dma_dev->cap_mask)) {
		if (dmatest == 1) {
			cnt = dmatest_add_threads(info, dtc, DMA_SG);
			cnt = dmatest_add_threads(info, dtc, DMA_MEMSET);
			thread_count += cnt > 0 ? cnt : 0;
		}
	}
@@ -961,8 +969,8 @@ static void run_threaded_test(struct dmatest_info *info)
	params->noverify = noverify;

	request_channels(info, DMA_MEMCPY);
	request_channels(info, DMA_MEMSET);
	request_channels(info, DMA_XOR);
	request_channels(info, DMA_SG);
	request_channels(info, DMA_PQ);
}

Loading