Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 92153bb5 authored by Martin Sperl's avatar Martin Sperl Committed by Vinod Koul
Browse files

dmaengine: bcm2835: move controlblock chain generation into separate method



In preparation of adding slave_sg functionality this patch moves the
generation/allocation of bcm2835_desc and the building of
the corresponding DMA-control-block chain from bcm2835_dma_prep_dma_cyclic
into the newly created method bcm2835_dma_create_cb_chain.

Signed-off-by: default avatarMartin Sperl <kernel@martin.sperl.org>
Reviewed-by: default avatarEric Anholt <eric@anholt.net>
Signed-off-by: default avatarEric Anholt <eric@anholt.net>
Signed-off-by: default avatarVinod Koul <vinod.koul@intel.com>
parent a4dcdd84
Loading
Loading
Loading
Loading
+198 −96
Original line number Original line Diff line number Diff line
@@ -88,12 +88,12 @@ struct bcm2835_desc {
	struct virt_dma_desc vd;
	struct virt_dma_desc vd;
	enum dma_transfer_direction dir;
	enum dma_transfer_direction dir;


	struct bcm2835_cb_entry *cb_list;

	unsigned int frames;
	unsigned int frames;
	size_t size;
	size_t size;


	bool cyclic;
	bool cyclic;

	struct bcm2835_cb_entry cb_list[];
};
};


#define BCM2835_DMA_CS		0x00
#define BCM2835_DMA_CS		0x00
@@ -169,6 +169,13 @@ struct bcm2835_desc {
#define BCM2835_DMA_CHAN(n)	((n) << 8) /* Base address */
#define BCM2835_DMA_CHAN(n)	((n) << 8) /* Base address */
#define BCM2835_DMA_CHANIO(base, n) ((base) + BCM2835_DMA_CHAN(n))
#define BCM2835_DMA_CHANIO(base, n) ((base) + BCM2835_DMA_CHAN(n))


/* how many frames of max_len size do we need to transfer len bytes */
static inline size_t bcm2835_dma_frames_for_length(size_t len,
						   size_t max_len)
{
	return DIV_ROUND_UP(len, max_len);
}

static inline struct bcm2835_dmadev *to_bcm2835_dma_dev(struct dma_device *d)
static inline struct bcm2835_dmadev *to_bcm2835_dma_dev(struct dma_device *d)
{
{
	return container_of(d, struct bcm2835_dmadev, ddev);
	return container_of(d, struct bcm2835_dmadev, ddev);
@@ -185,19 +192,161 @@ static inline struct bcm2835_desc *to_bcm2835_dma_desc(
	return container_of(t, struct bcm2835_desc, vd.tx);
	return container_of(t, struct bcm2835_desc, vd.tx);
}
}


static void bcm2835_dma_desc_free(struct virt_dma_desc *vd)
static void bcm2835_dma_free_cb_chain(struct bcm2835_desc *desc)
{
{
	struct bcm2835_desc *desc = container_of(vd, struct bcm2835_desc, vd);
	size_t i;
	int i;


	for (i = 0; i < desc->frames; i++)
	for (i = 0; i < desc->frames; i++)
		dma_pool_free(desc->c->cb_pool, desc->cb_list[i].cb,
		dma_pool_free(desc->c->cb_pool, desc->cb_list[i].cb,
			      desc->cb_list[i].paddr);
			      desc->cb_list[i].paddr);


	kfree(desc->cb_list);
	kfree(desc);
	kfree(desc);
}
}


static void bcm2835_dma_desc_free(struct virt_dma_desc *vd)
{
	bcm2835_dma_free_cb_chain(
		container_of(vd, struct bcm2835_desc, vd));
}

static void bcm2835_dma_create_cb_set_length(
	struct bcm2835_chan *chan,
	struct bcm2835_dma_cb *control_block,
	size_t len,
	size_t period_len,
	size_t *total_len,
	u32 finalextrainfo)
{
	/* set the length */
	control_block->length = len;

	/* finished if we have no period_length */
	if (!period_len)
		return;

	/*
	 * period_len means: that we need to generate
	 * transfers that are terminating at every
	 * multiple of period_len - this is typically
	 * used to set the interrupt flag in info
	 * which is required during cyclic transfers
	 */

	/* have we filled in period_length yet? */
	if (*total_len + control_block->length < period_len)
		return;

	/* calculate the length that remains to reach period_length */
	control_block->length = period_len - *total_len;

	/* reset total_length for next period */
	*total_len = 0;

	/* add extrainfo bits in info */
	control_block->info |= finalextrainfo;
}

/**
 * bcm2835_dma_create_cb_chain - create a control block and fills data in
 *
 * @chan:           the @dma_chan for which we run this
 * @direction:      the direction in which we transfer
 * @cyclic:         it is a cyclic transfer
 * @info:           the default info bits to apply per controlblock
 * @frames:         number of controlblocks to allocate
 * @src:            the src address to assign (if the S_INC bit is set
 *                  in @info, then it gets incremented)
 * @dst:            the dst address to assign (if the D_INC bit is set
 *                  in @info, then it gets incremented)
 * @buf_len:        the full buffer length (may also be 0)
 * @period_len:     the period length when to apply @finalextrainfo
 *                  in addition to the last transfer
 *                  this will also break some control-blocks early
 * @finalextrainfo: additional bits in last controlblock
 *                  (or when period_len is reached in case of cyclic)
 * @gfp:            the GFP flag to use for allocation
 */
static struct bcm2835_desc *bcm2835_dma_create_cb_chain(
	struct dma_chan *chan, enum dma_transfer_direction direction,
	bool cyclic, u32 info, u32 finalextrainfo, size_t frames,
	dma_addr_t src, dma_addr_t dst, size_t buf_len,
	size_t period_len, gfp_t gfp)
{
	struct bcm2835_chan *c = to_bcm2835_dma_chan(chan);
	size_t len = buf_len, total_len;
	size_t frame;
	struct bcm2835_desc *d;
	struct bcm2835_cb_entry *cb_entry;
	struct bcm2835_dma_cb *control_block;

	/* allocate and setup the descriptor. */
	d = kzalloc(sizeof(*d) + frames * sizeof(struct bcm2835_cb_entry),
		    gfp);
	if (!d)
		return NULL;

	d->c = c;
	d->dir = direction;
	d->cyclic = cyclic;

	/*
	 * Iterate over all frames, create a control block
	 * for each frame and link them together.
	 */
	for (frame = 0, total_len = 0; frame < frames; d->frames++, frame++) {
		cb_entry = &d->cb_list[frame];
		cb_entry->cb = dma_pool_alloc(c->cb_pool, gfp,
					      &cb_entry->paddr);
		if (!cb_entry->cb)
			goto error_cb;

		/* fill in the control block */
		control_block = cb_entry->cb;
		control_block->info = info;
		control_block->src = src;
		control_block->dst = dst;
		control_block->stride = 0;
		control_block->next = 0;
		/* set up length in control_block if requested */
		if (buf_len) {
			/* calculate length honoring period_length */
			bcm2835_dma_create_cb_set_length(
				c, control_block,
				len, period_len, &total_len,
				cyclic ? finalextrainfo : 0);

			/* calculate new remaining length */
			len -= control_block->length;
		}

		/* link this the last controlblock */
		if (frame)
			d->cb_list[frame - 1].cb->next = cb_entry->paddr;

		/* update src and dst and length */
		if (src && (info & BCM2835_DMA_S_INC))
			src += control_block->length;
		if (dst && (info & BCM2835_DMA_D_INC))
			dst += control_block->length;

		/* Length of total transfer */
		d->size += control_block->length;
	}

	/* the last frame requires extra flags */
	d->cb_list[d->frames - 1].cb->info |= finalextrainfo;

	/* detect a size missmatch */
	if (buf_len && (d->size != buf_len))
		goto error_cb;

	return d;
error_cb:
	bcm2835_dma_free_cb_chain(d);

	return NULL;
}

static int bcm2835_dma_abort(void __iomem *chan_base)
static int bcm2835_dma_abort(void __iomem *chan_base)
{
{
	unsigned long cs;
	unsigned long cs;
@@ -391,12 +540,11 @@ static struct dma_async_tx_descriptor *bcm2835_dma_prep_dma_cyclic(
	unsigned long flags)
	unsigned long flags)
{
{
	struct bcm2835_chan *c = to_bcm2835_dma_chan(chan);
	struct bcm2835_chan *c = to_bcm2835_dma_chan(chan);
	enum dma_slave_buswidth dev_width;
	struct bcm2835_desc *d;
	struct bcm2835_desc *d;
	dma_addr_t dev_addr;
	dma_addr_t src, dst;
	unsigned int es, sync_type;
	u32 info = BCM2835_DMA_WAIT_RESP;
	unsigned int frame;
	u32 extra = BCM2835_DMA_INT_EN;
	int i;
	size_t frames;


	/* Grab configuration */
	/* Grab configuration */
	if (!is_slave_direction(direction)) {
	if (!is_slave_direction(direction)) {
@@ -404,104 +552,58 @@ static struct dma_async_tx_descriptor *bcm2835_dma_prep_dma_cyclic(
		return NULL;
		return NULL;
	}
	}


	if (direction == DMA_DEV_TO_MEM) {
	if (!buf_len) {
		dev_addr = c->cfg.src_addr;
		dev_err(chan->device->dev,
		dev_width = c->cfg.src_addr_width;
			"%s: bad buffer length (= 0)\n", __func__);
		sync_type = BCM2835_DMA_S_DREQ;
	} else {
		dev_addr = c->cfg.dst_addr;
		dev_width = c->cfg.dst_addr_width;
		sync_type = BCM2835_DMA_D_DREQ;
	}

	/* Bus width translates to the element size (ES) */
	switch (dev_width) {
	case DMA_SLAVE_BUSWIDTH_4_BYTES:
		es = BCM2835_DMA_DATA_TYPE_S32;
		break;
	default:
		return NULL;
	}

	/* Now allocate and setup the descriptor. */
	d = kzalloc(sizeof(*d), GFP_NOWAIT);
	if (!d)
		return NULL;

	d->c = c;
	d->dir = direction;
	d->frames = buf_len / period_len;
	d->cyclic = true;

	d->cb_list = kcalloc(d->frames, sizeof(*d->cb_list), GFP_KERNEL);
	if (!d->cb_list) {
		kfree(d);
		return NULL;
		return NULL;
	}
	}
	/* Allocate memory for control blocks */
	for (i = 0; i < d->frames; i++) {
		struct bcm2835_cb_entry *cb_entry = &d->cb_list[i];

		cb_entry->cb = dma_pool_zalloc(c->cb_pool, GFP_ATOMIC,
					       &cb_entry->paddr);
		if (!cb_entry->cb)
			goto error_cb;
	}


	/*
	/*
	 * Iterate over all frames, create a control block
	 * warn if buf_len is not a multiple of period_len - this may leed
	 * for each frame and link them together.
	 * to unexpected latencies for interrupts and thus audiable clicks
	 */
	 */
	for (frame = 0; frame < d->frames; frame++) {
	if (buf_len % period_len)
		struct bcm2835_dma_cb *control_block = d->cb_list[frame].cb;
		dev_warn_once(chan->device->dev,

			      "%s: buffer_length (%zd) is not a multiple of period_len (%zd)\n",
		/* Setup adresses */
			      __func__, buf_len, period_len);
		if (d->dir == DMA_DEV_TO_MEM) {
			control_block->info = BCM2835_DMA_D_INC;
			control_block->src = dev_addr;
			control_block->dst = buf_addr + frame * period_len;
		} else {
			control_block->info = BCM2835_DMA_S_INC;
			control_block->src = buf_addr + frame * period_len;
			control_block->dst = dev_addr;
		}

		/* Enable interrupt */
		control_block->info |= BCM2835_DMA_INT_EN;

		/* Setup synchronization */
		if (sync_type != 0)
			control_block->info |= sync_type;


	/* Setup DREQ channel */
	/* Setup DREQ channel */
	if (c->dreq != 0)
	if (c->dreq != 0)
			control_block->info |=
		info |= BCM2835_DMA_PER_MAP(c->dreq);
				BCM2835_DMA_PER_MAP(c->dreq);


		/* Length of a frame */
	if (direction == DMA_DEV_TO_MEM) {
		control_block->length = period_len;
		if (c->cfg.src_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES)
		d->size += control_block->length;
			return NULL;
		src = c->cfg.src_addr;
		dst = buf_addr;
		info |= BCM2835_DMA_S_DREQ | BCM2835_DMA_D_INC;
	} else {
		if (c->cfg.dst_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES)
			return NULL;
		dst = c->cfg.dst_addr;
		src = buf_addr;
		info |= BCM2835_DMA_D_DREQ | BCM2835_DMA_S_INC;
	}

	/* calculate number of frames */
	frames = DIV_ROUND_UP(buf_len, period_len);


	/*
	/*
		 * Next block is the next frame.
	 * allocate the CB chain
		 * This DMA engine driver currently only supports cyclic DMA.
	 * note that we need to use GFP_NOWAIT, as the ALSA i2s dmaengine
		 * Therefore, wrap around at number of frames.
	 * implementation calls prep_dma_cyclic with interrupts disabled.
	 */
	 */
		control_block->next = d->cb_list[((frame + 1) % d->frames)].paddr;
	d = bcm2835_dma_create_cb_chain(chan, direction, true,
	}
					info, extra,

					frames, src, dst, buf_len,
	return vchan_tx_prep(&c->vc, &d->vd, flags);
					period_len, GFP_NOWAIT);
error_cb:
	if (!d)
	i--;
		return NULL;
	for (; i >= 0; i--) {
		struct bcm2835_cb_entry *cb_entry = &d->cb_list[i];


		dma_pool_free(c->cb_pool, cb_entry->cb, cb_entry->paddr);
	/* wrap around into a loop */
	}
	d->cb_list[d->frames - 1].cb->next = d->cb_list[0].paddr;


	kfree(d->cb_list);
	return vchan_tx_prep(&c->vc, &d->vd, flags);
	kfree(d);
	return NULL;
}
}


static int bcm2835_dma_slave_config(struct dma_chan *chan,
static int bcm2835_dma_slave_config(struct dma_chan *chan,