Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 3e40a784 authored by Vinod Koul's avatar Vinod Koul Committed by Mark Brown
Browse files

ASoC: Intel: Skylake: Add code loader DMA APIs



This patch adds the last piece of code loader DMA APIs by adding the code
loader DMA APIs for the driver to use

Signed-off-by: default avatarSubhransu S. Prusty <subhransu.s.prusty@intel.com>
Signed-off-by: default avatarJeeja KP <jeeja.kp@intel.com>
Signed-off-by: default avatarVinod Koul <vinod.koul@intel.com>
Signed-off-by: default avatarMark Brown <broonie@kernel.org>
parent 914426c8
Loading
Loading
Loading
Loading
+3 −0
Original line number Diff line number Diff line
@@ -308,8 +308,11 @@ struct sst_dsp {

	/* SKL data */

	/* To allocate CL dma buffers */
	struct skl_dsp_loader_ops dsp_ops;
	struct skl_dsp_fw_ops fw_ops;
	int sst_state;
	struct skl_cl_dev cl_dev;
	u32 intr_status;
};

+1 −1
Original line number Diff line number Diff line
@@ -3,6 +3,6 @@ snd-soc-skl-objs := skl.o skl-pcm.o
obj-$(CONFIG_SND_SOC_INTEL_SKYLAKE) += snd-soc-skl.o

# Skylake IPC Support
snd-soc-skl-ipc-objs := skl-sst-ipc.o skl-sst-dsp.o
snd-soc-skl-ipc-objs := skl-sst-ipc.o skl-sst-dsp.o skl-sst-cldma.o

obj-$(CONFIG_SND_SOC_INTEL_SKYLAKE) += snd-soc-skl-ipc.o
+133 −0
Original line number Diff line number Diff line
@@ -192,3 +192,136 @@ static void skl_cldma_fill_buffer(struct sst_dsp *ctx, unsigned int size,
	if (trigger)
		ctx->cl_dev.ops.cl_trigger(ctx, true);
}

/*
 * The CL dma doesn't have any way to update the transfer status until a BDL
 * buffer is fully transferred
 *
 * So Copying is divided in two parts.
 * 1. Interrupt on buffer done where the size to be transferred is more than
 *    ring buffer size.
 * 2. Polling on fw register to identify if data left to transferred doesn't
 *    fill the ring buffer. Caller takes care of polling the required status
 *    register to identify the transfer status.
 */
static int
skl_cldma_copy_to_buf(struct sst_dsp *ctx, const void *bin, u32 total_size)
{
	int ret = 0;
	bool start = true;
	unsigned int excess_bytes;
	u32 size;
	unsigned int bytes_left = total_size;
	const void *curr_pos = bin;

	if (total_size <= 0)
		return -EINVAL;

	dev_dbg(ctx->dev, "%s: Total binary size: %u\n", __func__, bytes_left);

	while (bytes_left) {
		if (bytes_left > ctx->cl_dev.bufsize) {

			/*
			 * dma transfers only till the write pointer as
			 * updated in spib
			 */
			if (ctx->cl_dev.curr_spib_pos == 0)
				ctx->cl_dev.curr_spib_pos = ctx->cl_dev.bufsize;

			size = ctx->cl_dev.bufsize;
			skl_cldma_fill_buffer(ctx, size, curr_pos, true, start);

			start = false;
			ret = skl_cldma_wait_interruptible(ctx);
			if (ret < 0) {
				skl_cldma_stop(ctx);
				return ret;
			}

		} else {
			skl_cldma_int_disable(ctx);

			if ((ctx->cl_dev.curr_spib_pos + bytes_left)
							<= ctx->cl_dev.bufsize) {
				ctx->cl_dev.curr_spib_pos += bytes_left;
			} else {
				excess_bytes = bytes_left -
					(ctx->cl_dev.bufsize -
					ctx->cl_dev.curr_spib_pos);
				ctx->cl_dev.curr_spib_pos = excess_bytes;
			}

			size = bytes_left;
			skl_cldma_fill_buffer(ctx, size,
					curr_pos, false, start);
		}
		bytes_left -= size;
		curr_pos = curr_pos + size;
	}

	return ret;
}

void skl_cldma_process_intr(struct sst_dsp *ctx)
{
	u8 cl_dma_intr_status;

	cl_dma_intr_status =
		sst_dsp_shim_read_unlocked(ctx, SKL_ADSP_REG_CL_SD_STS);

	if (!(cl_dma_intr_status & SKL_CL_DMA_SD_INT_COMPLETE))
		ctx->cl_dev.wake_status = SKL_CL_DMA_ERR;
	else
		ctx->cl_dev.wake_status = SKL_CL_DMA_BUF_COMPLETE;

	ctx->cl_dev.wait_condition = true;
	wake_up(&ctx->cl_dev.wait_queue);
}

int skl_cldma_prepare(struct sst_dsp *ctx)
{
	int ret;
	u32 *bdl;

	ctx->cl_dev.bufsize = SKL_MAX_BUFFER_SIZE;

	/* Allocate cl ops */
	ctx->cl_dev.ops.cl_setup_bdle = skl_cldma_setup_bdle;
	ctx->cl_dev.ops.cl_setup_controller = skl_cldma_setup_controller;
	ctx->cl_dev.ops.cl_setup_spb = skl_cldma_setup_spb;
	ctx->cl_dev.ops.cl_cleanup_spb = skl_cldma_cleanup_spb;
	ctx->cl_dev.ops.cl_trigger = skl_cldma_trigger;
	ctx->cl_dev.ops.cl_cleanup_controller = skl_cldma_cleanup;
	ctx->cl_dev.ops.cl_copy_to_dmabuf = skl_cldma_copy_to_buf;
	ctx->cl_dev.ops.cl_stop_dma = skl_cldma_stop;

	/* Allocate buffer*/
	ret = ctx->dsp_ops.alloc_dma_buf(ctx->dev,
			&ctx->cl_dev.dmab_data, ctx->cl_dev.bufsize);
	if (ret < 0) {
		dev_err(ctx->dev, "Alloc buffer for base fw failed: %x", ret);
		return ret;
	}
	/* Setup Code loader BDL */
	ret = ctx->dsp_ops.alloc_dma_buf(ctx->dev,
			&ctx->cl_dev.dmab_bdl, PAGE_SIZE);
	if (ret < 0) {
		dev_err(ctx->dev, "Alloc buffer for blde failed: %x", ret);
		ctx->dsp_ops.free_dma_buf(ctx->dev, &ctx->cl_dev.dmab_data);
		return ret;
	}
	bdl = (u32 *)ctx->cl_dev.dmab_bdl.area;

	/* Allocate BDLs */
	ctx->cl_dev.ops.cl_setup_bdle(ctx, &ctx->cl_dev.dmab_data,
			&bdl, ctx->cl_dev.bufsize, 1);
	ctx->cl_dev.ops.cl_setup_controller(ctx, &ctx->cl_dev.dmab_bdl,
			ctx->cl_dev.bufsize, ctx->cl_dev.frags);

	ctx->cl_dev.curr_spib_pos = 0;
	ctx->cl_dev.dma_buffer_offset = 0;
	init_waitqueue_head(&ctx->cl_dev.wait_queue);

	return ret;
}
+6 −0
Original line number Diff line number Diff line
@@ -17,7 +17,9 @@
#define __SKL_SST_DSP_H__

#include <sound/memalloc.h>
#include "skl-sst-cldma.h"

struct sst_dsp;
struct sst_dsp_device;

/* Intel HD Audio General DSP Registers */
@@ -113,6 +115,10 @@ struct skl_dsp_loader_ops {
		struct snd_dma_buffer *dmab);
};

void skl_cldma_process_intr(struct sst_dsp *ctx);
void skl_cldma_int_disable(struct sst_dsp *ctx);
int skl_cldma_prepare(struct sst_dsp *ctx);

void skl_dsp_set_state_locked(struct sst_dsp *ctx, int state);
struct sst_dsp *skl_dsp_ctx_init(struct device *dev,
		struct sst_dsp_device *sst_dev, int irq);