Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 8328509c authored by Mark Brown's avatar Mark Brown
Browse files

Merge remote-tracking branches 'spi/topic/img-spfi', 'spi/topic/imx',...

Merge remote-tracking branches 'spi/topic/img-spfi', 'spi/topic/imx', 'spi/topic/inline', 'spi/topic/meson' and 'spi/topic/mxs' into spi-next
Loading
Loading
Loading
Loading
+19 −30
Original line number Diff line number Diff line
@@ -160,16 +160,16 @@ static unsigned int spfi_pio_write32(struct img_spfi *spfi, const u32 *buf,
	unsigned int count = 0;
	u32 status;

	while (count < max) {
	while (count < max / 4) {
		spfi_writel(spfi, SPFI_INTERRUPT_SDFUL, SPFI_INTERRUPT_CLEAR);
		status = spfi_readl(spfi, SPFI_INTERRUPT_STATUS);
		if (status & SPFI_INTERRUPT_SDFUL)
			break;
		spfi_writel(spfi, buf[count / 4], SPFI_TX_32BIT_VALID_DATA);
		count += 4;
		spfi_writel(spfi, buf[count], SPFI_TX_32BIT_VALID_DATA);
		count++;
	}

	return count;
	return count * 4;
}

static unsigned int spfi_pio_write8(struct img_spfi *spfi, const u8 *buf,
@@ -196,17 +196,17 @@ static unsigned int spfi_pio_read32(struct img_spfi *spfi, u32 *buf,
	unsigned int count = 0;
	u32 status;

	while (count < max) {
	while (count < max / 4) {
		spfi_writel(spfi, SPFI_INTERRUPT_GDEX32BIT,
			    SPFI_INTERRUPT_CLEAR);
		status = spfi_readl(spfi, SPFI_INTERRUPT_STATUS);
		if (!(status & SPFI_INTERRUPT_GDEX32BIT))
			break;
		buf[count / 4] = spfi_readl(spfi, SPFI_RX_32BIT_VALID_DATA);
		count += 4;
		buf[count] = spfi_readl(spfi, SPFI_RX_32BIT_VALID_DATA);
		count++;
	}

	return count;
	return count * 4;
}

static unsigned int spfi_pio_read8(struct img_spfi *spfi, u8 *buf,
@@ -251,17 +251,15 @@ static int img_spfi_start_pio(struct spi_master *master,
	       time_before(jiffies, timeout)) {
		unsigned int tx_count, rx_count;

		switch (xfer->bits_per_word) {
		case 32:
		if (tx_bytes >= 4)
			tx_count = spfi_pio_write32(spfi, tx_buf, tx_bytes);
			rx_count = spfi_pio_read32(spfi, rx_buf, rx_bytes);
			break;
		case 8:
		default:
		else
			tx_count = spfi_pio_write8(spfi, tx_buf, tx_bytes);

		if (rx_bytes >= 4)
			rx_count = spfi_pio_read32(spfi, rx_buf, rx_bytes);
		else
			rx_count = spfi_pio_read8(spfi, rx_buf, rx_bytes);
			break;
		}

		tx_buf += tx_count;
		rx_buf += rx_count;
@@ -331,14 +329,11 @@ static int img_spfi_start_dma(struct spi_master *master,

	if (xfer->rx_buf) {
		rxconf.direction = DMA_DEV_TO_MEM;
		switch (xfer->bits_per_word) {
		case 32:
		if (xfer->len % 4 == 0) {
			rxconf.src_addr = spfi->phys + SPFI_RX_32BIT_VALID_DATA;
			rxconf.src_addr_width = 4;
			rxconf.src_maxburst = 4;
			break;
		case 8:
		default:
		} else {
			rxconf.src_addr = spfi->phys + SPFI_RX_8BIT_VALID_DATA;
			rxconf.src_addr_width = 1;
			rxconf.src_maxburst = 4;
@@ -358,18 +353,14 @@ static int img_spfi_start_dma(struct spi_master *master,

	if (xfer->tx_buf) {
		txconf.direction = DMA_MEM_TO_DEV;
		switch (xfer->bits_per_word) {
		case 32:
		if (xfer->len % 4 == 0) {
			txconf.dst_addr = spfi->phys + SPFI_TX_32BIT_VALID_DATA;
			txconf.dst_addr_width = 4;
			txconf.dst_maxburst = 4;
			break;
		case 8:
		default:
		} else {
			txconf.dst_addr = spfi->phys + SPFI_TX_8BIT_VALID_DATA;
			txconf.dst_addr_width = 1;
			txconf.dst_maxburst = 4;
			break;
		}
		dmaengine_slave_config(spfi->tx_ch, &txconf);

@@ -508,9 +499,7 @@ static void img_spfi_set_cs(struct spi_device *spi, bool enable)
static bool img_spfi_can_dma(struct spi_master *master, struct spi_device *spi,
			     struct spi_transfer *xfer)
{
	if (xfer->bits_per_word == 8 && xfer->len > SPFI_8BIT_FIFO_SIZE)
		return true;
	if (xfer->bits_per_word == 32 && xfer->len > SPFI_32BIT_FIFO_SIZE)
	if (xfer->len > SPFI_32BIT_FIFO_SIZE)
		return true;
	return false;
}
+14 −14
Original line number Diff line number Diff line
@@ -89,7 +89,6 @@ struct spi_imx_data {

	struct completion xfer_done;
	void __iomem *base;
	int irq;
	struct clk *clk_per;
	struct clk *clk_ipg;
	unsigned long spi_clk;
@@ -896,6 +895,7 @@ static int spi_imx_dma_transfer(struct spi_imx_data *spi_imx,
{
	struct dma_async_tx_descriptor *desc_tx = NULL, *desc_rx = NULL;
	int ret;
	unsigned long timeout;
	u32 dma;
	int left;
	struct spi_master *master = spi_imx->bitbang.master;
@@ -943,17 +943,17 @@ static int spi_imx_dma_transfer(struct spi_imx_data *spi_imx,
	dma_async_issue_pending(master->dma_tx);
	dma_async_issue_pending(master->dma_rx);
	/* Wait SDMA to finish the data transfer.*/
	ret = wait_for_completion_timeout(&spi_imx->dma_tx_completion,
	timeout = wait_for_completion_timeout(&spi_imx->dma_tx_completion,
						IMX_DMA_TIMEOUT);
	if (!ret) {
	if (!timeout) {
		pr_warn("%s %s: I/O Error in DMA TX\n",
			dev_driver_string(&master->dev),
			dev_name(&master->dev));
		dmaengine_terminate_all(master->dma_tx);
	} else {
		ret = wait_for_completion_timeout(&spi_imx->dma_rx_completion,
				IMX_DMA_TIMEOUT);
		if (!ret) {
		timeout = wait_for_completion_timeout(
				&spi_imx->dma_rx_completion, IMX_DMA_TIMEOUT);
		if (!timeout) {
			pr_warn("%s %s: I/O Error in DMA RX\n",
				dev_driver_string(&master->dev),
				dev_name(&master->dev));
@@ -968,9 +968,9 @@ static int spi_imx_dma_transfer(struct spi_imx_data *spi_imx,
	spi_imx->dma_finished = 1;
	spi_imx->devtype_data->trigger(spi_imx);

	if (!ret)
	if (!timeout)
		ret = -ETIMEDOUT;
	else if (ret > 0)
	else
		ret = transfer->len;

	return ret;
@@ -1080,7 +1080,7 @@ static int spi_imx_probe(struct platform_device *pdev)
	struct spi_master *master;
	struct spi_imx_data *spi_imx;
	struct resource *res;
	int i, ret, num_cs;
	int i, ret, num_cs, irq;

	if (!np && !mxc_platform_info) {
		dev_err(&pdev->dev, "can't get the platform data\n");
@@ -1147,16 +1147,16 @@ static int spi_imx_probe(struct platform_device *pdev)
		goto out_master_put;
	}

	spi_imx->irq = platform_get_irq(pdev, 0);
	if (spi_imx->irq < 0) {
		ret = spi_imx->irq;
	irq = platform_get_irq(pdev, 0);
	if (irq < 0) {
		ret = irq;
		goto out_master_put;
	}

	ret = devm_request_irq(&pdev->dev, spi_imx->irq, spi_imx_isr, 0,
	ret = devm_request_irq(&pdev->dev, irq, spi_imx_isr, 0,
			       dev_name(&pdev->dev), spi_imx);
	if (ret) {
		dev_err(&pdev->dev, "can't get irq%d: %d\n", spi_imx->irq, ret);
		dev_err(&pdev->dev, "can't get irq%d: %d\n", irq, ret);
		goto out_master_put;
	}

+1 −1
Original line number Diff line number Diff line
@@ -85,7 +85,7 @@ struct meson_spifc {
	struct device *dev;
};

static struct regmap_config spifc_regmap_config = {
static const struct regmap_config spifc_regmap_config = {
	.reg_bits = 32,
	.val_bits = 32,
	.reg_stride = 4,
+2 −3
Original line number Diff line number Diff line
@@ -282,9 +282,8 @@ static int mxs_spi_txrx_dma(struct mxs_spi *spi,
	dmaengine_submit(desc);
	dma_async_issue_pending(ssp->dmach);

	ret = wait_for_completion_timeout(&spi->c,
				msecs_to_jiffies(SSP_TIMEOUT));
	if (!ret) {
	if (!wait_for_completion_timeout(&spi->c,
					 msecs_to_jiffies(SSP_TIMEOUT))) {
		dev_err(ssp->dev, "DMA transfer timeout\n");
		ret = -ETIMEDOUT;
		dmaengine_terminate_all(ssp->dmach);
+93 −21
Original line number Diff line number Diff line
@@ -871,31 +871,59 @@ void spi_finalize_current_transfer(struct spi_master *master)
EXPORT_SYMBOL_GPL(spi_finalize_current_transfer);

/**
 * spi_pump_messages - kthread work function which processes spi message queue
 * @work: pointer to kthread work struct contained in the master struct
 * __spi_pump_messages - function which processes spi message queue
 * @master: master to process queue for
 * @in_kthread: true if we are in the context of the message pump thread
 *
 * This function checks if there is any spi message in the queue that
 * needs processing and if so call out to the driver to initialize hardware
 * and transfer each message.
 *
 * Note that it is called both from the kthread itself and also from
 * inside spi_sync(); the queue extraction handling at the top of the
 * function should deal with this safely.
 */
static void spi_pump_messages(struct kthread_work *work)
static void __spi_pump_messages(struct spi_master *master, bool in_kthread)
{
	struct spi_master *master =
		container_of(work, struct spi_master, pump_messages);
	unsigned long flags;
	bool was_busy = false;
	int ret;

	/* Lock queue and check for queue work */
	/* Lock queue */
	spin_lock_irqsave(&master->queue_lock, flags);

	/* Make sure we are not already running a message */
	if (master->cur_msg) {
		spin_unlock_irqrestore(&master->queue_lock, flags);
		return;
	}

	/* If another context is idling the device then defer */
	if (master->idling) {
		queue_kthread_work(&master->kworker, &master->pump_messages);
		spin_unlock_irqrestore(&master->queue_lock, flags);
		return;
	}

	/* Check if the queue is idle */
	if (list_empty(&master->queue) || !master->running) {
		if (!master->busy) {
			spin_unlock_irqrestore(&master->queue_lock, flags);
			return;
		}

		/* Only do teardown in the thread */
		if (!in_kthread) {
			queue_kthread_work(&master->kworker,
					   &master->pump_messages);
			spin_unlock_irqrestore(&master->queue_lock, flags);
			return;
		}

		master->busy = false;
		master->idling = true;
		spin_unlock_irqrestore(&master->queue_lock, flags);

		kfree(master->dummy_rx);
		master->dummy_rx = NULL;
		kfree(master->dummy_tx);
@@ -909,14 +937,13 @@ static void spi_pump_messages(struct kthread_work *work)
			pm_runtime_put_autosuspend(master->dev.parent);
		}
		trace_spi_master_idle(master);
		return;
	}

	/* Make sure we are not already running a message */
	if (master->cur_msg) {
		spin_lock_irqsave(&master->queue_lock, flags);
		master->idling = false;
		spin_unlock_irqrestore(&master->queue_lock, flags);
		return;
	}

	/* Extract head of queue */
	master->cur_msg =
		list_first_entry(&master->queue, struct spi_message, queue);
@@ -981,13 +1008,22 @@ static void spi_pump_messages(struct kthread_work *work)
	}
}

/**
 * spi_pump_messages - kthread work function which processes spi message queue
 * @work: pointer to kthread work struct contained in the master struct
 */
static void spi_pump_messages(struct kthread_work *work)
{
	struct spi_master *master =
		container_of(work, struct spi_master, pump_messages);

	__spi_pump_messages(master, true);
}

static int spi_init_queue(struct spi_master *master)
{
	struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 };

	INIT_LIST_HEAD(&master->queue);
	spin_lock_init(&master->queue_lock);

	master->running = false;
	master->busy = false;

@@ -1157,12 +1193,9 @@ static int spi_destroy_queue(struct spi_master *master)
	return 0;
}

/**
 * spi_queued_transfer - transfer function for queued transfers
 * @spi: spi device which is requesting transfer
 * @msg: spi message which is to handled is queued to driver queue
 */
static int spi_queued_transfer(struct spi_device *spi, struct spi_message *msg)
static int __spi_queued_transfer(struct spi_device *spi,
				 struct spi_message *msg,
				 bool need_pump)
{
	struct spi_master *master = spi->master;
	unsigned long flags;
@@ -1177,13 +1210,23 @@ static int spi_queued_transfer(struct spi_device *spi, struct spi_message *msg)
	msg->status = -EINPROGRESS;

	list_add_tail(&msg->queue, &master->queue);
	if (!master->busy)
	if (!master->busy && need_pump)
		queue_kthread_work(&master->kworker, &master->pump_messages);

	spin_unlock_irqrestore(&master->queue_lock, flags);
	return 0;
}

/**
 * spi_queued_transfer - transfer function for queued transfers
 * @spi: spi device which is requesting transfer
 * @msg: spi message which is to handled is queued to driver queue
 */
static int spi_queued_transfer(struct spi_device *spi, struct spi_message *msg)
{
	return __spi_queued_transfer(spi, msg, true);
}

static int spi_master_initialize_queue(struct spi_master *master)
{
	int ret;
@@ -1605,6 +1648,8 @@ int spi_register_master(struct spi_master *master)
		dynamic = 1;
	}

	INIT_LIST_HEAD(&master->queue);
	spin_lock_init(&master->queue_lock);
	spin_lock_init(&master->bus_lock_spinlock);
	mutex_init(&master->bus_lock_mutex);
	master->bus_lock_flag = 0;
@@ -2110,19 +2155,46 @@ static int __spi_sync(struct spi_device *spi, struct spi_message *message,
	DECLARE_COMPLETION_ONSTACK(done);
	int status;
	struct spi_master *master = spi->master;
	unsigned long flags;

	status = __spi_validate(spi, message);
	if (status != 0)
		return status;

	message->complete = spi_complete;
	message->context = &done;
	message->spi = spi;

	if (!bus_locked)
		mutex_lock(&master->bus_lock_mutex);

	/* If we're not using the legacy transfer method then we will
	 * try to transfer in the calling context so special case.
	 * This code would be less tricky if we could remove the
	 * support for driver implemented message queues.
	 */
	if (master->transfer == spi_queued_transfer) {
		spin_lock_irqsave(&master->bus_lock_spinlock, flags);

		trace_spi_message_submit(message);

		status = __spi_queued_transfer(spi, message, false);

		spin_unlock_irqrestore(&master->bus_lock_spinlock, flags);
	} else {
		status = spi_async_locked(spi, message);
	}

	if (!bus_locked)
		mutex_unlock(&master->bus_lock_mutex);

	if (status == 0) {
		/* Push out the messages in the calling context if we
		 * can.
		 */
		if (master->transfer == spi_queued_transfer)
			__spi_pump_messages(master, false);

		wait_for_completion(&done);
		status = message->status;
	}
Loading