Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit b967aecf authored by Vinod Koul's avatar Vinod Koul
Browse files

Merge branch 'for-linus' into next



Conflicts:
	drivers/dma/edma.c
Moved the memory leak fix post merge

Signed-off-by: default avatarVinod Koul <vinod.koul@intel.com>
parents 959f5854 2abd5f1b
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -28,7 +28,7 @@ The three cells in order are:
dependent:
  - bit 7-0: peripheral identifier for the hardware handshaking interface. The
  identifier can be different for tx and rx.
  - bit 11-8: FIFO configuration. 0 for half FIFO, 1 for ALAP, 1 for ASAP.
  - bit 11-8: FIFO configuration. 0 for half FIFO, 1 for ALAP, 2 for ASAP.

Example:

+1 −2
Original line number Diff line number Diff line
@@ -2133,8 +2133,7 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
	writel(0x000000FF, pl08x->base + PL080_ERR_CLEAR);
	writel(0x000000FF, pl08x->base + PL080_TC_CLEAR);

	ret = request_irq(adev->irq[0], pl08x_irq, IRQF_DISABLED,
			  DRIVER_NAME, pl08x);
	ret = request_irq(adev->irq[0], pl08x_irq, 0, DRIVER_NAME, pl08x);
	if (ret) {
		dev_err(&adev->dev, "%s failed to request interrupt %d\n",
			__func__, adev->irq[0]);
+1 −1
Original line number Diff line number Diff line
@@ -2694,7 +2694,7 @@ static int __init coh901318_probe(struct platform_device *pdev)
	if (irq < 0)
		return irq;

	err = devm_request_irq(&pdev->dev, irq, dma_irq_handler, IRQF_DISABLED,
	err = devm_request_irq(&pdev->dev, irq, dma_irq_handler, 0,
			       "coh901318", base);
	if (err)
		return err;
+62 −32
Original line number Diff line number Diff line
@@ -674,14 +674,14 @@ static void cleanup_chans(struct cppi41_dd *cdd)
	}
}

static int cppi41_add_chans(struct platform_device *pdev, struct cppi41_dd *cdd)
static int cppi41_add_chans(struct device *dev, struct cppi41_dd *cdd)
{
	struct cppi41_channel *cchan;
	int i;
	int ret;
	u32 n_chans;

	ret = of_property_read_u32(pdev->dev.of_node, "#dma-channels",
	ret = of_property_read_u32(dev->of_node, "#dma-channels",
			&n_chans);
	if (ret)
		return ret;
@@ -719,7 +719,7 @@ static int cppi41_add_chans(struct platform_device *pdev, struct cppi41_dd *cdd)
	return -ENOMEM;
}

static void purge_descs(struct platform_device *pdev, struct cppi41_dd *cdd)
static void purge_descs(struct device *dev, struct cppi41_dd *cdd)
{
	unsigned int mem_decs;
	int i;
@@ -731,7 +731,7 @@ static void purge_descs(struct platform_device *pdev, struct cppi41_dd *cdd)
		cppi_writel(0, cdd->qmgr_mem + QMGR_MEMBASE(i));
		cppi_writel(0, cdd->qmgr_mem + QMGR_MEMCTRL(i));

		dma_free_coherent(&pdev->dev, mem_decs, cdd->cd,
		dma_free_coherent(dev, mem_decs, cdd->cd,
				cdd->descs_phys);
	}
}
@@ -741,19 +741,19 @@ static void disable_sched(struct cppi41_dd *cdd)
	cppi_writel(0, cdd->sched_mem + DMA_SCHED_CTRL);
}

static void deinit_cpii41(struct platform_device *pdev, struct cppi41_dd *cdd)
static void deinit_cppi41(struct device *dev, struct cppi41_dd *cdd)
{
	disable_sched(cdd);

	purge_descs(pdev, cdd);
	purge_descs(dev, cdd);

	cppi_writel(0, cdd->qmgr_mem + QMGR_LRAM0_BASE);
	cppi_writel(0, cdd->qmgr_mem + QMGR_LRAM0_BASE);
	dma_free_coherent(&pdev->dev, QMGR_SCRATCH_SIZE, cdd->qmgr_scratch,
	dma_free_coherent(dev, QMGR_SCRATCH_SIZE, cdd->qmgr_scratch,
			cdd->scratch_phys);
}

static int init_descs(struct platform_device *pdev, struct cppi41_dd *cdd)
static int init_descs(struct device *dev, struct cppi41_dd *cdd)
{
	unsigned int desc_size;
	unsigned int mem_decs;
@@ -777,7 +777,7 @@ static int init_descs(struct platform_device *pdev, struct cppi41_dd *cdd)
		reg |= ilog2(ALLOC_DECS_NUM) - 5;

		BUILD_BUG_ON(DESCS_AREAS != 1);
		cdd->cd = dma_alloc_coherent(&pdev->dev, mem_decs,
		cdd->cd = dma_alloc_coherent(dev, mem_decs,
				&cdd->descs_phys, GFP_KERNEL);
		if (!cdd->cd)
			return -ENOMEM;
@@ -813,12 +813,12 @@ static void init_sched(struct cppi41_dd *cdd)
	cppi_writel(reg, cdd->sched_mem + DMA_SCHED_CTRL);
}

static int init_cppi41(struct platform_device *pdev, struct cppi41_dd *cdd)
static int init_cppi41(struct device *dev, struct cppi41_dd *cdd)
{
	int ret;

	BUILD_BUG_ON(QMGR_SCRATCH_SIZE > ((1 << 14) - 1));
	cdd->qmgr_scratch = dma_alloc_coherent(&pdev->dev, QMGR_SCRATCH_SIZE,
	cdd->qmgr_scratch = dma_alloc_coherent(dev, QMGR_SCRATCH_SIZE,
			&cdd->scratch_phys, GFP_KERNEL);
	if (!cdd->qmgr_scratch)
		return -ENOMEM;
@@ -827,7 +827,7 @@ static int init_cppi41(struct platform_device *pdev, struct cppi41_dd *cdd)
	cppi_writel(QMGR_SCRATCH_SIZE, cdd->qmgr_mem + QMGR_LRAM_SIZE);
	cppi_writel(0, cdd->qmgr_mem + QMGR_LRAM1_BASE);

	ret = init_descs(pdev, cdd);
	ret = init_descs(dev, cdd);
	if (ret)
		goto err_td;

@@ -835,7 +835,7 @@ static int init_cppi41(struct platform_device *pdev, struct cppi41_dd *cdd)
	init_sched(cdd);
	return 0;
err_td:
	deinit_cpii41(pdev, cdd);
	deinit_cppi41(dev, cdd);
	return ret;
}

@@ -914,11 +914,11 @@ static const struct of_device_id cppi41_dma_ids[] = {
};
MODULE_DEVICE_TABLE(of, cppi41_dma_ids);

static const struct cppi_glue_infos *get_glue_info(struct platform_device *pdev)
static const struct cppi_glue_infos *get_glue_info(struct device *dev)
{
	const struct of_device_id *of_id;

	of_id = of_match_node(cppi41_dma_ids, pdev->dev.of_node);
	of_id = of_match_node(cppi41_dma_ids, dev->of_node);
	if (!of_id)
		return NULL;
	return of_id->data;
@@ -927,11 +927,12 @@ static const struct cppi_glue_infos *get_glue_info(struct platform_device *pdev)
static int cppi41_dma_probe(struct platform_device *pdev)
{
	struct cppi41_dd *cdd;
	struct device *dev = &pdev->dev;
	const struct cppi_glue_infos *glue_info;
	int irq;
	int ret;

	glue_info = get_glue_info(pdev);
	glue_info = get_glue_info(dev);
	if (!glue_info)
		return -EINVAL;

@@ -946,14 +947,14 @@ static int cppi41_dma_probe(struct platform_device *pdev)
	cdd->ddev.device_issue_pending = cppi41_dma_issue_pending;
	cdd->ddev.device_prep_slave_sg = cppi41_dma_prep_slave_sg;
	cdd->ddev.device_control = cppi41_dma_control;
	cdd->ddev.dev = &pdev->dev;
	cdd->ddev.dev = dev;
	INIT_LIST_HEAD(&cdd->ddev.channels);
	cpp41_dma_info.dma_cap = cdd->ddev.cap_mask;

	cdd->usbss_mem = of_iomap(pdev->dev.of_node, 0);
	cdd->ctrl_mem = of_iomap(pdev->dev.of_node, 1);
	cdd->sched_mem = of_iomap(pdev->dev.of_node, 2);
	cdd->qmgr_mem = of_iomap(pdev->dev.of_node, 3);
	cdd->usbss_mem = of_iomap(dev->of_node, 0);
	cdd->ctrl_mem = of_iomap(dev->of_node, 1);
	cdd->sched_mem = of_iomap(dev->of_node, 2);
	cdd->qmgr_mem = of_iomap(dev->of_node, 3);

	if (!cdd->usbss_mem || !cdd->ctrl_mem || !cdd->sched_mem ||
			!cdd->qmgr_mem) {
@@ -961,8 +962,8 @@ static int cppi41_dma_probe(struct platform_device *pdev)
		goto err_remap;
	}

	pm_runtime_enable(&pdev->dev);
	ret = pm_runtime_get_sync(&pdev->dev);
	pm_runtime_enable(dev);
	ret = pm_runtime_get_sync(dev);
	if (ret)
		goto err_get_sync;

@@ -970,22 +971,22 @@ static int cppi41_dma_probe(struct platform_device *pdev)
	cdd->queues_tx = glue_info->queues_tx;
	cdd->td_queue = glue_info->td_queue;

	ret = init_cppi41(pdev, cdd);
	ret = init_cppi41(dev, cdd);
	if (ret)
		goto err_init_cppi;

	ret = cppi41_add_chans(pdev, cdd);
	ret = cppi41_add_chans(dev, cdd);
	if (ret)
		goto err_chans;

	irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
	irq = irq_of_parse_and_map(dev->of_node, 0);
	if (!irq)
		goto err_irq;

	cppi_writel(USBSS_IRQ_PD_COMP, cdd->usbss_mem + USBSS_IRQ_ENABLER);

	ret = request_irq(irq, glue_info->isr, IRQF_SHARED,
			dev_name(&pdev->dev), cdd);
			dev_name(dev), cdd);
	if (ret)
		goto err_irq;
	cdd->irq = irq;
@@ -994,7 +995,7 @@ static int cppi41_dma_probe(struct platform_device *pdev)
	if (ret)
		goto err_dma_reg;

	ret = of_dma_controller_register(pdev->dev.of_node,
	ret = of_dma_controller_register(dev->of_node,
			cppi41_dma_xlate, &cpp41_dma_info);
	if (ret)
		goto err_of;
@@ -1009,11 +1010,11 @@ static int cppi41_dma_probe(struct platform_device *pdev)
	cppi_writel(0, cdd->usbss_mem + USBSS_IRQ_CLEARR);
	cleanup_chans(cdd);
err_chans:
	deinit_cpii41(pdev, cdd);
	deinit_cppi41(dev, cdd);
err_init_cppi:
	pm_runtime_put(&pdev->dev);
	pm_runtime_put(dev);
err_get_sync:
	pm_runtime_disable(&pdev->dev);
	pm_runtime_disable(dev);
	iounmap(cdd->usbss_mem);
	iounmap(cdd->ctrl_mem);
	iounmap(cdd->sched_mem);
@@ -1033,7 +1034,7 @@ static int cppi41_dma_remove(struct platform_device *pdev)
	cppi_writel(0, cdd->usbss_mem + USBSS_IRQ_CLEARR);
	free_irq(cdd->irq, cdd);
	cleanup_chans(cdd);
	deinit_cpii41(pdev, cdd);
	deinit_cppi41(&pdev->dev, cdd);
	iounmap(cdd->usbss_mem);
	iounmap(cdd->ctrl_mem);
	iounmap(cdd->sched_mem);
@@ -1044,12 +1045,41 @@ static int cppi41_dma_remove(struct platform_device *pdev)
	return 0;
}

#ifdef CONFIG_PM_SLEEP
static int cppi41_suspend(struct device *dev)
{
	struct cppi41_dd *cdd = dev_get_drvdata(dev);

	cppi_writel(0, cdd->usbss_mem + USBSS_IRQ_CLEARR);
	disable_sched(cdd);

	return 0;
}

static int cppi41_resume(struct device *dev)
{
	struct cppi41_dd *cdd = dev_get_drvdata(dev);
	int i;

	for (i = 0; i < DESCS_AREAS; i++)
		cppi_writel(cdd->descs_phys, cdd->qmgr_mem + QMGR_MEMBASE(i));

	init_sched(cdd);
	cppi_writel(USBSS_IRQ_PD_COMP, cdd->usbss_mem + USBSS_IRQ_ENABLER);

	return 0;
}
#endif

static SIMPLE_DEV_PM_OPS(cppi41_pm_ops, cppi41_suspend, cppi41_resume);

static struct platform_driver cpp41_dma_driver = {
	.probe  = cppi41_dma_probe,
	.remove = cppi41_dma_remove,
	.driver = {
		.name = "cppi41-dma-engine",
		.owner = THIS_MODULE,
		.pm = &cppi41_pm_ops,
		.of_match_table = of_match_ptr(cppi41_dma_ids),
	},
};
+136 −75
Original line number Diff line number Diff line
@@ -46,8 +46,14 @@
#define EDMA_CHANS	64
#endif /* CONFIG_ARCH_DAVINCI_DA8XX */

/* Max of 16 segments per channel to conserve PaRAM slots */
#define MAX_NR_SG		16
/*
 * Max of 20 segments per channel to conserve PaRAM slots
 * Also note that MAX_NR_SG should be atleast the no.of periods
 * that are required for ASoC, otherwise DMA prep calls will
 * fail. Today davinci-pcm is the only user of this driver and
 * requires atleast 17 slots, so we setup the default to 20.
 */
#define MAX_NR_SG		20
#define EDMA_MAX_SLOTS		MAX_NR_SG
#define EDMA_DESCRIPTORS	16

@@ -250,6 +256,117 @@ static int edma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
	return ret;
}

/*
 * A PaRAM set configuration abstraction used by other modes
 * @chan: Channel who's PaRAM set we're configuring
 * @pset: PaRAM set to initialize and setup.
 * @src_addr: Source address of the DMA
 * @dst_addr: Destination address of the DMA
 * @burst: In units of dev_width, how much to send
 * @dev_width: How much is the dev_width
 * @dma_length: Total length of the DMA transfer
 * @direction: Direction of the transfer
 */
static int edma_config_pset(struct dma_chan *chan, struct edmacc_param *pset,
	dma_addr_t src_addr, dma_addr_t dst_addr, u32 burst,
	enum dma_slave_buswidth dev_width, unsigned int dma_length,
	enum dma_transfer_direction direction)
{
	struct edma_chan *echan = to_edma_chan(chan);
	struct device *dev = chan->device->dev;
	int acnt, bcnt, ccnt, cidx;
	int src_bidx, dst_bidx, src_cidx, dst_cidx;
	int absync;

	acnt = dev_width;
	/*
	 * If the maxburst is equal to the fifo width, use
	 * A-synced transfers. This allows for large contiguous
	 * buffer transfers using only one PaRAM set.
	 */
	if (burst == 1) {
		/*
		 * For the A-sync case, bcnt and ccnt are the remainder
		 * and quotient respectively of the division of:
		 * (dma_length / acnt) by (SZ_64K -1). This is so
		 * that in case bcnt over flows, we have ccnt to use.
		 * Note: In A-sync tranfer only, bcntrld is used, but it
		 * only applies for sg_dma_len(sg) >= SZ_64K.
		 * In this case, the best way adopted is- bccnt for the
		 * first frame will be the remainder below. Then for
		 * every successive frame, bcnt will be SZ_64K-1. This
		 * is assured as bcntrld = 0xffff in end of function.
		 */
		absync = false;
		ccnt = dma_length / acnt / (SZ_64K - 1);
		bcnt = dma_length / acnt - ccnt * (SZ_64K - 1);
		/*
		 * If bcnt is non-zero, we have a remainder and hence an
		 * extra frame to transfer, so increment ccnt.
		 */
		if (bcnt)
			ccnt++;
		else
			bcnt = SZ_64K - 1;
		cidx = acnt;
	} else {
		/*
		 * If maxburst is greater than the fifo address_width,
		 * use AB-synced transfers where A count is the fifo
		 * address_width and B count is the maxburst. In this
		 * case, we are limited to transfers of C count frames
		 * of (address_width * maxburst) where C count is limited
		 * to SZ_64K-1. This places an upper bound on the length
		 * of an SG segment that can be handled.
		 */
		absync = true;
		bcnt = burst;
		ccnt = dma_length / (acnt * bcnt);
		if (ccnt > (SZ_64K - 1)) {
			dev_err(dev, "Exceeded max SG segment size\n");
			return -EINVAL;
		}
		cidx = acnt * bcnt;
	}

	if (direction == DMA_MEM_TO_DEV) {
		src_bidx = acnt;
		src_cidx = cidx;
		dst_bidx = 0;
		dst_cidx = 0;
	} else if (direction == DMA_DEV_TO_MEM)  {
		src_bidx = 0;
		src_cidx = 0;
		dst_bidx = acnt;
		dst_cidx = cidx;
	} else {
		dev_err(dev, "%s: direction not implemented yet\n", __func__);
		return -EINVAL;
	}

	pset->opt = EDMA_TCC(EDMA_CHAN_SLOT(echan->ch_num));
	/* Configure A or AB synchronized transfers */
	if (absync)
		pset->opt |= SYNCDIM;

	pset->src = src_addr;
	pset->dst = dst_addr;

	pset->src_dst_bidx = (dst_bidx << 16) | src_bidx;
	pset->src_dst_cidx = (dst_cidx << 16) | src_cidx;

	pset->a_b_cnt = bcnt << 16 | acnt;
	pset->ccnt = ccnt;
	/*
	 * Only time when (bcntrld) auto reload is required is for
	 * A-sync case, and in this case, a requirement of reload value
	 * of SZ_64K-1 only is assured. 'link' is initially set to NULL
	 * and then later will be populated by edma_execute.
	 */
	pset->link_bcntrld = 0xffffffff;
	return absync;
}

static struct dma_async_tx_descriptor *edma_prep_slave_sg(
	struct dma_chan *chan, struct scatterlist *sgl,
	unsigned int sg_len, enum dma_transfer_direction direction,
@@ -258,23 +375,21 @@ static struct dma_async_tx_descriptor *edma_prep_slave_sg(
	struct edma_chan *echan = to_edma_chan(chan);
	struct device *dev = chan->device->dev;
	struct edma_desc *edesc;
	dma_addr_t dev_addr;
	dma_addr_t src_addr = 0, dst_addr = 0;
	enum dma_slave_buswidth dev_width;
	u32 burst;
	struct scatterlist *sg;
	int acnt, bcnt, ccnt, src, dst, cidx;
	int src_bidx, dst_bidx, src_cidx, dst_cidx;
	int i, nslots;
	int i, nslots, ret;

	if (unlikely(!echan || !sgl || !sg_len))
		return NULL;

	if (direction == DMA_DEV_TO_MEM) {
		dev_addr = echan->cfg.src_addr;
		src_addr = echan->cfg.src_addr;
		dev_width = echan->cfg.src_addr_width;
		burst = echan->cfg.src_maxburst;
	} else if (direction == DMA_MEM_TO_DEV) {
		dev_addr = echan->cfg.dst_addr;
		dst_addr = echan->cfg.dst_addr;
		dev_width = echan->cfg.dst_addr_width;
		burst = echan->cfg.dst_maxburst;
	} else {
@@ -315,64 +430,21 @@ static struct dma_async_tx_descriptor *edma_prep_slave_sg(

	/* Configure PaRAM sets for each SG */
	for_each_sg(sgl, sg, sg_len, i) {

		acnt = dev_width;

		/*
		 * If the maxburst is equal to the fifo width, use
		 * A-synced transfers. This allows for large contiguous
		 * buffer transfers using only one PaRAM set.
		 */
		if (burst == 1) {
			edesc->absync = false;
			ccnt = sg_dma_len(sg) / acnt / (SZ_64K - 1);
			bcnt = sg_dma_len(sg) / acnt - ccnt * (SZ_64K - 1);
			if (bcnt)
				ccnt++;
		/* Get address for each SG */
		if (direction == DMA_DEV_TO_MEM)
			dst_addr = sg_dma_address(sg);
		else
				bcnt = SZ_64K - 1;
			cidx = acnt;
		/*
		 * If maxburst is greater than the fifo address_width,
		 * use AB-synced transfers where A count is the fifo
		 * address_width and B count is the maxburst. In this
		 * case, we are limited to transfers of C count frames
		 * of (address_width * maxburst) where C count is limited
		 * to SZ_64K-1. This places an upper bound on the length
		 * of an SG segment that can be handled.
		 */
		} else {
			edesc->absync = true;
			bcnt = burst;
			ccnt = sg_dma_len(sg) / (acnt * bcnt);
			if (ccnt > (SZ_64K - 1)) {
				dev_err(dev, "Exceeded max SG segment size\n");
			src_addr = sg_dma_address(sg);

		ret = edma_config_pset(chan, &edesc->pset[i], src_addr,
				       dst_addr, burst, dev_width,
				       sg_dma_len(sg), direction);
		if (ret < 0) {
			kfree(edesc);
			return NULL;
		}
			cidx = acnt * bcnt;
		}

		if (direction == DMA_MEM_TO_DEV) {
			src = sg_dma_address(sg);
			dst = dev_addr;
			src_bidx = acnt;
			src_cidx = cidx;
			dst_bidx = 0;
			dst_cidx = 0;
		} else {
			src = dev_addr;
			dst = sg_dma_address(sg);
			src_bidx = 0;
			src_cidx = 0;
			dst_bidx = acnt;
			dst_cidx = cidx;
		}

		edesc->pset[i].opt = EDMA_TCC(EDMA_CHAN_SLOT(echan->ch_num));
		/* Configure A or AB synchronized transfers */
		if (edesc->absync)
			edesc->pset[i].opt |= SYNCDIM;
		edesc->absync = ret;

		/* If this is the last in a current SG set of transactions,
		   enable interrupts so that next set is processed */
@@ -382,17 +454,6 @@ static struct dma_async_tx_descriptor *edma_prep_slave_sg(
		/* If this is the last set, enable completion interrupt flag */
		if (i == sg_len - 1)
			edesc->pset[i].opt |= TCINTEN;

		edesc->pset[i].src = src;
		edesc->pset[i].dst = dst;

		edesc->pset[i].src_dst_bidx = (dst_bidx << 16) | src_bidx;
		edesc->pset[i].src_dst_cidx = (dst_cidx << 16) | src_cidx;

		edesc->pset[i].a_b_cnt = bcnt << 16 | acnt;
		edesc->pset[i].ccnt = ccnt;
		edesc->pset[i].link_bcntrld = 0xffffffff;

	}

	return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags);
Loading