Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 2ee7f385 authored by Alisha Thapaliya's avatar Alisha Thapaliya
Browse files

Merge remote-tracking branch 'quic/display-kernel.lnx.4.19' into display-kernel.lnx.1.0



* quic/display-kernel.lnx.4.19:
  disp: msm: dsi: handle wait for dma cmd completion
  disp: msm: dsi: Config panel test pin to input mode when panel off
  disp: msm: adjust smmu detach sequence to include unmapping
  disp: msm: sde: Fix 32-bit compilation issues
  disp: msm: dsi: reject seamless commit with active changed
  disp: msm: update debug dump for ltm block
  disp: msm: remove runtime_pm support from rsc driver
  disp: msm: sde: avoid encoder power-collapse with pending frames
  disp: msm: sde: handle all error cases during sui transitions
  drm/msm/dsi: bypass dsi clock set during changing mode
  disp: msm: sde: remove dspp blocking
  disp: msm: sde: update avr mode config during commit prepare
  disp: msm: sde: add one-shot qsync mode support
  disp: msm: sde: update wr_ptr_success state post wait
  disp: msm: sde: allow qsync support along with VRR

Change-Id: Ib2a2a855a2fa49ed74789ed470ee669b21a95500
Signed-off-by: default avatarAlisha Thapaliya <athapali@codeaurora.org>
parents 90069901 8bc240b7
Loading
Loading
Loading
Loading
+173 −76
Original line number Diff line number Diff line
@@ -258,6 +258,95 @@ dsi_ctrl_get_aspace(struct dsi_ctrl *dsi_ctrl,
	return msm_gem_smmu_address_space_get(dsi_ctrl->drm_dev, domain);
}

static void dsi_ctrl_flush_cmd_dma_queue(struct dsi_ctrl *dsi_ctrl)
{
	u32 status;
	u32 mask = DSI_CMD_MODE_DMA_DONE;
	struct dsi_ctrl_hw_ops dsi_hw_ops = dsi_ctrl->hw.ops;

	/*
	 * If a command is triggered right after another command,
	 * check if the previous command transfer is completed. If
	 * transfer is done, cancel any work that has been
	 * queued. Otherwise wait till the work is scheduled and
	 * completed before triggering the next command by
	 * flushing the workqueue.
	 */
	status = dsi_hw_ops.get_interrupt_status(&dsi_ctrl->hw);
	if (atomic_read(&dsi_ctrl->dma_irq_trig)) {
		cancel_work_sync(&dsi_ctrl->dma_cmd_wait);
	} else if (status & mask) {
		atomic_set(&dsi_ctrl->dma_irq_trig, 1);
		status |= (DSI_CMD_MODE_DMA_DONE | DSI_BTA_DONE);
		dsi_hw_ops.clear_interrupt_status(
						&dsi_ctrl->hw,
						status);
		dsi_ctrl_disable_status_interrupt(dsi_ctrl,
				DSI_SINT_CMD_MODE_DMA_DONE);
		complete_all(&dsi_ctrl->irq_info.cmd_dma_done);
		cancel_work_sync(&dsi_ctrl->dma_cmd_wait);
		DSI_CTRL_DEBUG(dsi_ctrl,
				"dma_tx done but irq not yet triggered\n");
	} else {
		flush_workqueue(dsi_ctrl->dma_cmd_workq);
	}
}

static void dsi_ctrl_dma_cmd_wait_for_done(struct work_struct *work)
{
	int ret = 0;
	struct dsi_ctrl *dsi_ctrl = NULL;
	u32 status;
	u32 mask = DSI_CMD_MODE_DMA_DONE;
	struct dsi_ctrl_hw_ops dsi_hw_ops;

	dsi_ctrl = container_of(work, struct dsi_ctrl, dma_cmd_wait);
	dsi_hw_ops = dsi_ctrl->hw.ops;
	SDE_EVT32(dsi_ctrl->cell_index, SDE_EVTLOG_FUNC_ENTRY);

	/*
	 * This atomic state will be set if ISR has been triggered,
	 * so the wait is not needed.
	 */
	if (atomic_read(&dsi_ctrl->dma_irq_trig))
		goto done;
	/*
	 * If IRQ wasn't triggered check interrupt status register for
	 * transfer done before waiting.
	 */
	status = dsi_hw_ops.get_interrupt_status(&dsi_ctrl->hw);
	if (status & mask) {
		status |= (DSI_CMD_MODE_DMA_DONE | DSI_BTA_DONE);
		dsi_hw_ops.clear_interrupt_status(&dsi_ctrl->hw,
				status);
		dsi_ctrl_disable_status_interrupt(dsi_ctrl,
				DSI_SINT_CMD_MODE_DMA_DONE);
		goto done;
	}

	ret = wait_for_completion_timeout(
			&dsi_ctrl->irq_info.cmd_dma_done,
			msecs_to_jiffies(DSI_CTRL_TX_TO_MS));
	if (ret == 0) {
		status = dsi_hw_ops.get_interrupt_status(&dsi_ctrl->hw);
		if (status & mask) {
			status |= (DSI_CMD_MODE_DMA_DONE | DSI_BTA_DONE);
			dsi_hw_ops.clear_interrupt_status(&dsi_ctrl->hw,
					status);
			DSI_CTRL_WARN(dsi_ctrl,
					"dma_tx done but irq not triggered\n");
		} else {
			DSI_CTRL_ERR(dsi_ctrl,
					"Command transfer failed\n");
		}
		dsi_ctrl_disable_status_interrupt(dsi_ctrl,
					DSI_SINT_CMD_MODE_DMA_DONE);
	}

done:
	dsi_ctrl->dma_wait_queued = false;
}

static int dsi_ctrl_check_state(struct dsi_ctrl *dsi_ctrl,
				enum dsi_ctrl_driver_ops op,
				u32 op_state)
@@ -847,8 +936,8 @@ static int dsi_ctrl_update_link_freqs(struct dsi_ctrl *dsi_ctrl,
		bit_rate = config->bit_clk_rate_hz_override * num_of_lanes;
	} else if (config->panel_mode == DSI_OP_CMD_MODE) {
		/* Calculate the bit rate needed to match dsi transfer time */
		bit_rate = mult_frac(min_dsi_clk_hz, frame_time_us,
				dsi_transfer_time_us);
		bit_rate = min_dsi_clk_hz * frame_time_us;
		do_div(bit_rate, dsi_transfer_time_us);
		bit_rate = bit_rate * num_of_lanes;
	} else {
		h_period = DSI_H_TOTAL_DSC(timing);
@@ -1106,12 +1195,12 @@ static void dsi_kickoff_msg_tx(struct dsi_ctrl *dsi_ctrl,
				struct dsi_ctrl_cmd_dma_info *cmd_mem,
				u32 flags)
{
	int rc = 0, ret = 0;
	u32 hw_flags = 0;
	u32 line_no = 0x1;
	struct dsi_mode_info *timing;
	struct dsi_ctrl_hw_ops dsi_hw_ops = dsi_ctrl->hw.ops;

	SDE_EVT32(dsi_ctrl->cell_index, SDE_EVTLOG_FUNC_ENTRY, flags);
	/* check if custom dma scheduling line needed */
	if ((dsi_ctrl->host_config.panel_mode == DSI_OP_VIDEO_MODE) &&
		(flags & DSI_CTRL_CMD_CUSTOM_DMA_SCHED))
@@ -1156,11 +1245,13 @@ static void dsi_kickoff_msg_tx(struct dsi_ctrl *dsi_ctrl,

	if (!(flags & DSI_CTRL_CMD_DEFER_TRIGGER)) {
		dsi_ctrl_wait_for_video_done(dsi_ctrl);
		dsi_ctrl_enable_status_interrupt(dsi_ctrl,
					DSI_SINT_CMD_MODE_DMA_DONE, NULL);
		if (dsi_hw_ops.mask_error_intr)
			dsi_hw_ops.mask_error_intr(&dsi_ctrl->hw,
					BIT(DSI_FIFO_OVERFLOW), true);

		atomic_set(&dsi_ctrl->dma_irq_trig, 0);
		dsi_ctrl_enable_status_interrupt(dsi_ctrl,
					DSI_SINT_CMD_MODE_DMA_DONE, NULL);
		reinit_completion(&dsi_ctrl->irq_info.cmd_dma_done);

		if (flags & DSI_CTRL_CMD_FETCH_MEMORY) {
@@ -1180,34 +1271,13 @@ static void dsi_kickoff_msg_tx(struct dsi_ctrl *dsi_ctrl,
							      cmd,
							      hw_flags);
		}

		ret = wait_for_completion_timeout(
				&dsi_ctrl->irq_info.cmd_dma_done,
				msecs_to_jiffies(DSI_CTRL_TX_TO_MS));

		if (ret == 0) {
			u32 status = dsi_hw_ops.get_interrupt_status(
								&dsi_ctrl->hw);
			u32 mask = DSI_CMD_MODE_DMA_DONE;

			if (status & mask) {
				status |= (DSI_CMD_MODE_DMA_DONE |
						DSI_BTA_DONE);
				dsi_hw_ops.clear_interrupt_status(
								&dsi_ctrl->hw,
								status);
				dsi_ctrl_disable_status_interrupt(dsi_ctrl,
						DSI_SINT_CMD_MODE_DMA_DONE);
				complete_all(&dsi_ctrl->irq_info.cmd_dma_done);
				DSI_CTRL_WARN(dsi_ctrl,
					"dma_tx done but irq not triggered\n");
		if (flags & DSI_CTRL_CMD_ASYNC_WAIT) {
			dsi_ctrl->dma_wait_queued = true;
			queue_work(dsi_ctrl->dma_cmd_workq,
					&dsi_ctrl->dma_cmd_wait);
		} else {
				rc = -ETIMEDOUT;
				dsi_ctrl_disable_status_interrupt(dsi_ctrl,
						DSI_SINT_CMD_MODE_DMA_DONE);
				DSI_CTRL_ERR(dsi_ctrl,
						"Command transfer failed\n");
			}
			dsi_ctrl->dma_wait_queued = false;
			dsi_ctrl_dma_cmd_wait_for_done(&dsi_ctrl->dma_cmd_wait);
		}

		if (dsi_hw_ops.mask_error_intr && !dsi_ctrl->esd_check_underway)
@@ -1227,6 +1297,20 @@ static void dsi_kickoff_msg_tx(struct dsi_ctrl *dsi_ctrl,
	}
}

static u32 dsi_ctrl_validate_msg_flags(const struct mipi_dsi_msg *msg,
				u32 flags)
{
	/*
	 * ASYNC command wait mode is not supported for FIFO commands.
	 * Waiting after a command is transferred cannot be guaranteed
	 * if DSI_CTRL_CMD_ASYNC_WAIT flag is set.
	 */
	if ((flags & DSI_CTRL_CMD_FIFO_STORE) ||
			msg->wait_ms)
		flags &= ~DSI_CTRL_CMD_ASYNC_WAIT;
	return flags;
}

static int dsi_message_tx(struct dsi_ctrl *dsi_ctrl,
			  const struct mipi_dsi_msg *msg,
			  u32 flags)
@@ -1252,6 +1336,11 @@ static int dsi_message_tx(struct dsi_ctrl *dsi_ctrl,
		goto error;
	}

	flags = dsi_ctrl_validate_msg_flags(msg, flags);

	if (dsi_ctrl->dma_wait_queued)
		dsi_ctrl_flush_cmd_dma_queue(dsi_ctrl);

	if (flags & DSI_CTRL_CMD_NON_EMBEDDED_MODE) {
		cmd_mem.offset = dsi_ctrl->cmd_buffer_iova;
		cmd_mem.en_broadcast = (flags & DSI_CTRL_CMD_BROADCAST) ?
@@ -1793,6 +1882,9 @@ static int dsi_ctrl_dev_probe(struct platform_device *pdev)
	dsi_ctrl->irq_info.irq_num = -1;
	dsi_ctrl->irq_info.irq_stat_mask = 0x0;

	INIT_WORK(&dsi_ctrl->dma_cmd_wait, dsi_ctrl_dma_cmd_wait_for_done);
	atomic_set(&dsi_ctrl->dma_irq_trig, 0);

	spin_lock_init(&dsi_ctrl->irq_info.irq_lock);

	rc = dsi_ctrl_dts_parse(dsi_ctrl, pdev->dev.of_node);
@@ -1896,6 +1988,7 @@ static int dsi_ctrl_dev_remove(struct platform_device *pdev)
		DSI_CTRL_ERR(dsi_ctrl,
				"failed to deinitialize clocks, rc=%d\n", rc);

	atomic_set(&dsi_ctrl->dma_irq_trig, 0);
	mutex_unlock(&dsi_ctrl->ctrl_lock);

	mutex_destroy(&dsi_ctrl->ctrl_lock);
@@ -2213,10 +2306,9 @@ int dsi_ctrl_timing_db_update(struct dsi_ctrl *dsi_ctrl,
	return rc;
}

int dsi_ctrl_setup(struct dsi_ctrl *dsi_ctrl)
int dsi_ctrl_timing_setup(struct dsi_ctrl *dsi_ctrl)
{
	int rc = 0;

	if (!dsi_ctrl) {
		DSI_CTRL_ERR(dsi_ctrl, "Invalid params\n");
		return -EINVAL;
@@ -2224,12 +2316,6 @@ int dsi_ctrl_setup(struct dsi_ctrl *dsi_ctrl)

	mutex_lock(&dsi_ctrl->ctrl_lock);

	dsi_ctrl->hw.ops.setup_lane_map(&dsi_ctrl->hw,
					&dsi_ctrl->host_config.lane_map);

	dsi_ctrl->hw.ops.host_setup(&dsi_ctrl->hw,
				    &dsi_ctrl->host_config.common_config);

	if (dsi_ctrl->host_config.panel_mode == DSI_OP_CMD_MODE) {
		dsi_ctrl->hw.ops.cmd_engine_setup(&dsi_ctrl->hw,
					&dsi_ctrl->host_config.common_config,
@@ -2250,8 +2336,29 @@ int dsi_ctrl_setup(struct dsi_ctrl *dsi_ctrl)
		dsi_ctrl->hw.ops.video_engine_en(&dsi_ctrl->hw, true);
	}

	mutex_unlock(&dsi_ctrl->ctrl_lock);
	return rc;
}

int dsi_ctrl_setup(struct dsi_ctrl *dsi_ctrl)
{
	int rc = 0;

	rc = dsi_ctrl_timing_setup(dsi_ctrl);
	if (rc)
		return -EINVAL;

	mutex_lock(&dsi_ctrl->ctrl_lock);

	dsi_ctrl->hw.ops.setup_lane_map(&dsi_ctrl->hw,
					&dsi_ctrl->host_config.lane_map);

	dsi_ctrl->hw.ops.host_setup(&dsi_ctrl->hw,
				    &dsi_ctrl->host_config.common_config);

	dsi_ctrl->hw.ops.enable_status_interrupts(&dsi_ctrl->hw, 0x0);
	dsi_ctrl_enable_error_interrupts(dsi_ctrl);

	dsi_ctrl->hw.ops.ctrl_en(&dsi_ctrl->hw, true);

	mutex_unlock(&dsi_ctrl->ctrl_lock);
@@ -2489,6 +2596,7 @@ static irqreturn_t dsi_ctrl_isr(int irq, void *ptr)
		dsi_ctrl_handle_error_status(dsi_ctrl, errors);

	if (status & DSI_CMD_MODE_DMA_DONE) {
		atomic_set(&dsi_ctrl->dma_irq_trig, 1);
		dsi_ctrl_disable_status_interrupt(dsi_ctrl,
					DSI_SINT_CMD_MODE_DMA_DONE);
		complete_all(&dsi_ctrl->irq_info.cmd_dma_done);
@@ -2603,6 +2711,7 @@ void dsi_ctrl_enable_status_interrupt(struct dsi_ctrl *dsi_ctrl,
			intr_idx >= DSI_STATUS_INTERRUPT_COUNT)
		return;

	SDE_EVT32(dsi_ctrl->cell_index, SDE_EVTLOG_FUNC_ENTRY);
	spin_lock_irqsave(&dsi_ctrl->irq_info.irq_lock, flags);

	if (dsi_ctrl->irq_info.irq_stat_refcount[intr_idx] == 0) {
@@ -2632,6 +2741,7 @@ void dsi_ctrl_disable_status_interrupt(struct dsi_ctrl *dsi_ctrl,
			intr_idx >= DSI_STATUS_INTERRUPT_COUNT)
		return;

	SDE_EVT32(dsi_ctrl->cell_index, SDE_EVTLOG_FUNC_ENTRY);
	spin_lock_irqsave(&dsi_ctrl->irq_info.irq_lock, flags);

	if (dsi_ctrl->irq_info.irq_stat_refcount[intr_idx])
@@ -3070,15 +3180,17 @@ int dsi_ctrl_cmd_transfer(struct dsi_ctrl *dsi_ctrl,
 */
int dsi_ctrl_cmd_tx_trigger(struct dsi_ctrl *dsi_ctrl, u32 flags)
{
	int rc = 0, ret = 0;
	u32 status = 0;
	u32 mask = (DSI_CMD_MODE_DMA_DONE);
	int rc = 0;
	struct dsi_ctrl_hw_ops dsi_hw_ops;

	if (!dsi_ctrl) {
		DSI_CTRL_ERR(dsi_ctrl, "Invalid params\n");
		return -EINVAL;
	}

	dsi_hw_ops = dsi_ctrl->hw.ops;

	SDE_EVT32(dsi_ctrl->cell_index, SDE_EVTLOG_FUNC_ENTRY, flags);
	/* Dont trigger the command if this is not the last ocmmand */
	if (!(flags & DSI_CTRL_CMD_LAST_COMMAND))
		return rc;
@@ -3086,52 +3198,37 @@ int dsi_ctrl_cmd_tx_trigger(struct dsi_ctrl *dsi_ctrl, u32 flags)
	mutex_lock(&dsi_ctrl->ctrl_lock);

	if (!(flags & DSI_CTRL_CMD_BROADCAST_MASTER))
		dsi_ctrl->hw.ops.trigger_command_dma(&dsi_ctrl->hw);
		dsi_hw_ops.trigger_command_dma(&dsi_ctrl->hw);

	if ((flags & DSI_CTRL_CMD_BROADCAST) &&
		(flags & DSI_CTRL_CMD_BROADCAST_MASTER)) {
		dsi_ctrl_wait_for_video_done(dsi_ctrl);
		if (dsi_hw_ops.mask_error_intr)
			dsi_hw_ops.mask_error_intr(&dsi_ctrl->hw,
					BIT(DSI_FIFO_OVERFLOW), true);
		atomic_set(&dsi_ctrl->dma_irq_trig, 0);
		dsi_ctrl_enable_status_interrupt(dsi_ctrl,
					DSI_SINT_CMD_MODE_DMA_DONE, NULL);
		if (dsi_ctrl->hw.ops.mask_error_intr)
			dsi_ctrl->hw.ops.mask_error_intr(&dsi_ctrl->hw,
					BIT(DSI_FIFO_OVERFLOW), true);
		reinit_completion(&dsi_ctrl->irq_info.cmd_dma_done);

		/* trigger command */
		dsi_ctrl->hw.ops.trigger_command_dma(&dsi_ctrl->hw);

		ret = wait_for_completion_timeout(
				&dsi_ctrl->irq_info.cmd_dma_done,
				msecs_to_jiffies(DSI_CTRL_TX_TO_MS));

		if (ret == 0) {
			status = dsi_ctrl->hw.ops.get_interrupt_status(
								&dsi_ctrl->hw);
			if (status & mask) {
				status |= (DSI_CMD_MODE_DMA_DONE |
						DSI_BTA_DONE);
				dsi_ctrl->hw.ops.clear_interrupt_status(
								&dsi_ctrl->hw,
								status);
				dsi_ctrl_disable_status_interrupt(dsi_ctrl,
						DSI_SINT_CMD_MODE_DMA_DONE);
				complete_all(&dsi_ctrl->irq_info.cmd_dma_done);
				DSI_CTRL_WARN(dsi_ctrl, "dma_tx done but irq not triggered\n");
		dsi_hw_ops.trigger_command_dma(&dsi_ctrl->hw);
		if (flags & DSI_CTRL_CMD_ASYNC_WAIT) {
			dsi_ctrl->dma_wait_queued = true;
			queue_work(dsi_ctrl->dma_cmd_workq,
					&dsi_ctrl->dma_cmd_wait);
		} else {
				rc = -ETIMEDOUT;
				dsi_ctrl_disable_status_interrupt(dsi_ctrl,
						DSI_SINT_CMD_MODE_DMA_DONE);
				DSI_CTRL_ERR(dsi_ctrl, "Command transfer failed\n");
			}
			dsi_ctrl->dma_wait_queued = false;
			dsi_ctrl_dma_cmd_wait_for_done(&dsi_ctrl->dma_cmd_wait);
		}
		if (dsi_ctrl->hw.ops.mask_error_intr &&

		if (dsi_hw_ops.mask_error_intr &&
				!dsi_ctrl->esd_check_underway)
			dsi_ctrl->hw.ops.mask_error_intr(&dsi_ctrl->hw,
			dsi_hw_ops.mask_error_intr(&dsi_ctrl->hw,
					BIT(DSI_FIFO_OVERFLOW), false);

		if (flags & DSI_CTRL_CMD_NON_EMBEDDED_MODE) {
			dsi_ctrl->hw.ops.soft_reset(&dsi_ctrl->hw);
			dsi_hw_ops.soft_reset(&dsi_ctrl->hw);
			dsi_ctrl->cmd_len = 0;
		}
	}
+30 −4
Original line number Diff line number Diff line
@@ -31,6 +31,8 @@
 * @DSI_CTRL_CMD_NON_EMBEDDED_MODE:Transfer cmd packets in non embedded mode.
 * @DSI_CTRL_CMD_CUSTOM_DMA_SCHED: Use the dma scheduling line number defined in
 *				   display panel dtsi file instead of default.
 * @DSI_CTRL_CMD_ASYNC_WAIT: Command flag to indicate that the wait for done
 *			for this command is asynchronous and must be queued.
 */
#define DSI_CTRL_CMD_READ             0x1
#define DSI_CTRL_CMD_BROADCAST        0x2
@@ -41,6 +43,7 @@
#define DSI_CTRL_CMD_LAST_COMMAND     0x40
#define DSI_CTRL_CMD_NON_EMBEDDED_MODE 0x80
#define DSI_CTRL_CMD_CUSTOM_DMA_SCHED  0x100
#define DSI_CTRL_CMD_ASYNC_WAIT 0x200

/* DSI embedded mode fifo size
 * If the command is greater than 256 bytes it is sent in non-embedded mode.
@@ -217,6 +220,13 @@ struct dsi_ctrl_interrupts {
 * @vaddr:               CPU virtual address of cmd buffer.
 * @secure_mode:         Indicates if secure-session is in progress
 * @esd_check_underway:  Indicates if esd status check is in progress
 * @dma_cmd_wait:	Work object waiting on DMA command transfer done.
 * @dma_cmd_workq:	Pointer to the workqueue of DMA command transfer done
 *				wait sequence.
 * @dma_wait_queued:	Indicates if any DMA command transfer wait work
 *				is queued.
 * @dma_irq_trig:		 Atomic state to indicate DMA done IRQ
 *				triggered.
 * @debugfs_root:        Root for debugfs entries.
 * @misr_enable:         Frame MISR enable/disable
 * @misr_cache:          Cached Frame MISR value
@@ -267,6 +277,10 @@ struct dsi_ctrl {
	void *vaddr;
	bool secure_mode;
	bool esd_check_underway;
	struct work_struct dma_cmd_wait;
	struct workqueue_struct *dma_cmd_workq;
	bool dma_wait_queued;
	atomic_t dma_irq_trig;

	/* Debug Information */
	struct dentry *debugfs_root;
@@ -485,18 +499,30 @@ int dsi_ctrl_host_deinit(struct dsi_ctrl *dsi_ctrl);
int dsi_ctrl_set_ulps(struct dsi_ctrl *dsi_ctrl, bool enable);

/**
 * dsi_ctrl_setup() - Setup DSI host hardware while coming out of idle screen.
 * dsi_ctrl_timing_setup() - Setup DSI host config
 * @dsi_ctrl:        DSI controller handle.
 *
 * Initializes DSI controller hardware with host configuration provided by
 * dsi_ctrl_update_host_config(). Initialization can be performed only during
 * DSI_CTRL_POWER_CORE_CLK_ON state and after the PHY SW reset has been
 * performed.
 * dsi_ctrl_update_host_config(). This is called while setting up DSI host
 * through dsi_ctrl_setup() and after any ROI change.
 *
 * Also used to program the video mode timing values.
 *
 * Return: error code.
 */
int dsi_ctrl_timing_setup(struct dsi_ctrl *dsi_ctrl);

/**
 * dsi_ctrl_setup() - Setup DSI host hardware while coming out of idle screen.
 * @dsi_ctrl:        DSI controller handle.
 *
 * Initialization of DSI controller hardware with host configuration and
 * enabling required interrupts. Initialization can be performed only during
 * DSI_CTRL_POWER_CORE_CLK_ON state and after the PHY SW reset has been
 * performed.
 *
 * Return: error code.
 */
int dsi_ctrl_setup(struct dsi_ctrl *dsi_ctrl);

/**
+81 −14
Original line number Diff line number Diff line
@@ -2699,6 +2699,12 @@ static int dsi_display_broadcast_cmd(struct dsi_display *display,
		flags |= DSI_CTRL_CMD_LAST_COMMAND;
		m_flags |= DSI_CTRL_CMD_LAST_COMMAND;
	}

	if (display->queue_cmd_waits) {
		flags |= DSI_CTRL_CMD_ASYNC_WAIT;
		m_flags |= DSI_CTRL_CMD_ASYNC_WAIT;
	}

	/*
	 * 1. Setup commands in FIFO
	 * 2. Trigger commands
@@ -2852,9 +2858,13 @@ static ssize_t dsi_host_transfer(struct mipi_dsi_host *host,
	} else {
		int ctrl_idx = (msg->flags & MIPI_DSI_MSG_UNICAST) ?
				msg->ctrl : 0;
		u32 cmd_flags = DSI_CTRL_CMD_FETCH_MEMORY;

		if (display->queue_cmd_waits)
			cmd_flags |= DSI_CTRL_CMD_ASYNC_WAIT;

		rc = dsi_ctrl_cmd_transfer(display->ctrl[ctrl_idx].ctrl, msg,
					  DSI_CTRL_CMD_FETCH_MEMORY);
				cmd_flags);
		if (rc) {
			DSI_ERR("[%s] cmd transfer failed, rc=%d\n",
			       display->name, rc);
@@ -3151,6 +3161,22 @@ int dsi_pre_clkoff_cb(void *priv,
	struct dsi_display *display = priv;
	struct dsi_display_ctrl *ctrl;


	/*
	 * If Idle Power Collapse occurs immediately after a CMD
	 * transfer with an asynchronous wait for DMA done, ensure
	 * that the work queued is scheduled and completed before turning
	 * off the clocks and disabling interrupts to validate the command
	 * transfer.
	 */
	display_for_each_ctrl(i, display) {
		ctrl = &display->ctrl[i];
		if (!ctrl->ctrl || !ctrl->ctrl->dma_wait_queued)
			continue;
		flush_workqueue(display->dma_cmd_workq);
		cancel_work_sync(&ctrl->ctrl->dma_cmd_wait);
		ctrl->ctrl->dma_wait_queued = false;
	}
	if ((clk & DSI_LINK_CLK) && (new_state == DSI_CLK_OFF) &&
		(l_type & DSI_LINK_LP_CLK)) {
		/*
@@ -4489,7 +4515,12 @@ static int dsi_display_set_mode_sub(struct dsi_display *display,

	if ((mode->dsi_mode_flags & DSI_MODE_FLAG_DMS) &&
			(display->panel->panel_mode == DSI_OP_CMD_MODE)) {
		u64 cur_bitclk = display->panel->cur_mode->timing.clk_rate_hz;
		u64 to_bitclk = mode->timing.clk_rate_hz;
		commit_phy_timing = true;

		/* No need to set clkrate pending flag if clocks are same */
		if (cur_bitclk != to_bitclk)
			atomic_set(&display->clkrate_change_pending, 1);

		dsi_display_validate_dms_fps(display->panel->cur_mode, mode);
@@ -4831,6 +4862,7 @@ static int dsi_display_bind(struct device *dev,
			goto error_ctrl_deinit;
		}

		display_ctrl->ctrl->dma_cmd_workq = display->dma_cmd_workq;
		memcpy(&info.c_clks[i],
				(&display_ctrl->ctrl->clk_info.core_clks),
				sizeof(struct dsi_core_clk_info));
@@ -5008,6 +5040,7 @@ static void dsi_display_unbind(struct device *dev,
			DSI_ERR("[%s] failed to deinit phy%d driver, rc=%d\n",
			       display->name, i, rc);

		display->ctrl->ctrl->dma_cmd_workq = NULL;
		rc = dsi_ctrl_drv_deinit(display_ctrl->ctrl);
		if (rc)
			DSI_ERR("[%s] failed to deinit ctrl%d driver, rc=%d\n",
@@ -5096,6 +5129,14 @@ int dsi_display_dev_probe(struct platform_device *pdev)
		goto end;
	}

	display->dma_cmd_workq = create_singlethread_workqueue(
			"dsi_dma_cmd_workq");
	if (!display->dma_cmd_workq)  {
		DSI_ERR("failed to create work queue\n");
		rc =  -EINVAL;
		goto end;
	}

	display->display_type = of_get_property(pdev->dev.of_node,
				"label", NULL);
	if (!display->display_type)
@@ -5159,8 +5200,9 @@ int dsi_display_dev_probe(struct platform_device *pdev)

int dsi_display_dev_remove(struct platform_device *pdev)
{
	int rc = 0;
	int rc = 0i, i = 0;
	struct dsi_display *display;
	struct dsi_display_ctrl *ctrl;

	if (!pdev) {
		DSI_ERR("Invalid device\n");
@@ -5172,6 +5214,18 @@ int dsi_display_dev_remove(struct platform_device *pdev)
	/* decrement ref count */
	of_node_put(display->panel_node);

	if (display->dma_cmd_workq) {
		flush_workqueue(display->dma_cmd_workq);
		destroy_workqueue(display->dma_cmd_workq);
		display->dma_cmd_workq = NULL;
		display_for_each_ctrl(i, display) {
			ctrl = &display->ctrl[i];
			if (!ctrl->ctrl)
				continue;
			ctrl->ctrl->dma_cmd_workq = NULL;
		}
	}

	(void)_dsi_display_dev_deinit(display);

	platform_set_drvdata(pdev, NULL);
@@ -6998,7 +7052,7 @@ static int dsi_display_set_roi(struct dsi_display *display,
		}

		/* re-program the ctrl with the timing based on the new roi */
		rc = dsi_ctrl_setup(ctrl->ctrl);
		rc = dsi_ctrl_timing_setup(ctrl->ctrl);
		if (rc) {
			DSI_ERR("dsi_ctrl_setup failed rc %d\n", rc);
			return rc;
@@ -7014,21 +7068,11 @@ int dsi_display_pre_kickoff(struct drm_connector *connector,
{
	int rc = 0;
	int i;
	bool enable;

	/* check and setup MISR */
	if (display->misr_enable)
		_dsi_display_setup_misr(display);

	if (params->qsync_update) {
		enable = (params->qsync_mode > 0) ? true : false;
		rc = dsi_display_qsync(display, enable);
		if (rc)
			DSI_ERR("%s failed to send qsync commands\n",
				__func__);
		SDE_EVT32(params->qsync_mode, rc);
	}

	rc = dsi_display_set_roi(display, params->rois);

	/* dynamic DSI clock setting */
@@ -7109,6 +7153,29 @@ int dsi_display_config_ctrl_for_cont_splash(struct dsi_display *display)
	return rc;
}

int dsi_display_pre_commit(void *display,
		struct msm_display_conn_params *params)
{
	bool enable = false;
	int rc = 0;

	if (!display || !params) {
		pr_err("Invalid params\n");
		return -EINVAL;
	}

	if (params->qsync_update) {
		enable = (params->qsync_mode > 0) ? true : false;
		rc = dsi_display_qsync(display, enable);
		if (rc)
			pr_err("%s failed to send qsync commands\n",
				__func__);
		SDE_EVT32(params->qsync_mode, rc);
	}

	return rc;
}

int dsi_display_enable(struct dsi_display *display)
{
	int rc = 0;
+15 −0
Original line number Diff line number Diff line
@@ -182,6 +182,9 @@ struct dsi_display_ext_bridge {
 * @esd_trigger       field indicating ESD trigger through debugfs
 * @te_source         vsync source pin information
 * @clk_gating_config Clocks for which clock gating needs to be enabled
 * @queue_cmd_waits   Indicates if wait for dma commands done has to be queued.
 * @dma_cmd_workq:	Pointer to the workqueue of DMA command transfer done
 *				wait sequence.
 */
struct dsi_display {
	struct platform_device *pdev;
@@ -266,6 +269,8 @@ struct dsi_display {

	u32 te_source;
	u32 clk_gating_config;
	bool queue_cmd_waits;
	struct workqueue_struct *dma_cmd_workq;
};

int dsi_display_dev_probe(struct platform_device *pdev);
@@ -679,6 +684,16 @@ int dsi_display_set_power(struct drm_connector *connector,
int dsi_display_pre_kickoff(struct drm_connector *connector,
		struct dsi_display *display,
		struct msm_display_kickoff_params *params);

/*
 * dsi_display_pre_commit - program pre commit features
 * @display: Pointer to private display structure
 * @params: Parameters for pre commit time programming
 * Returns: Zero on success
 */
int dsi_display_pre_commit(void *display,
		struct msm_display_conn_params *params);

/**
 * dsi_display_get_dst_format() - get dst_format from DSI display
 * @connector:        Pointer to drm connector structure
+29 −1
Original line number Diff line number Diff line
@@ -423,6 +423,15 @@ static bool dsi_bridge_mode_fixup(struct drm_bridge *bridge,
			dsi_mode.dsi_mode_flags |= DSI_MODE_FLAG_DMS;
	}

	/* Reject seamless transition when active changed */
	if (crtc_state->active_changed &&
		((dsi_mode.dsi_mode_flags & DSI_MODE_FLAG_VRR) ||
		(dsi_mode.dsi_mode_flags & DSI_MODE_FLAG_DYN_CLK))) {
		DSI_ERR("seamless upon active changed 0x%x %d\n",
			dsi_mode.dsi_mode_flags, crtc_state->active_changed);
		return false;
	}

	/* convert back to drm mode, propagating the private info & flags */
	dsi_convert_to_drm_mode(&dsi_mode, adjusted_mode);

@@ -911,6 +920,17 @@ int dsi_conn_pre_kickoff(struct drm_connector *connector,
	return dsi_display_pre_kickoff(connector, display, params);
}

int dsi_conn_prepare_commit(void *display,
		struct msm_display_conn_params *params)
{
	if (!display || !params) {
		pr_err("Invalid params\n");
		return -EINVAL;
	}

	return dsi_display_pre_commit(display, params);
}

void dsi_conn_enable_event(struct drm_connector *connector,
		uint32_t event_idx, bool enable, void *display)
{
@@ -925,7 +945,8 @@ void dsi_conn_enable_event(struct drm_connector *connector,
			event_idx, &event_info, enable);
}

int dsi_conn_post_kickoff(struct drm_connector *connector)
int dsi_conn_post_kickoff(struct drm_connector *connector,
	struct msm_display_conn_params *params)
{
	struct drm_encoder *encoder;
	struct dsi_bridge *c_bridge;
@@ -933,6 +954,7 @@ int dsi_conn_post_kickoff(struct drm_connector *connector)
	struct dsi_display *display;
	struct dsi_display_ctrl *m_ctrl, *ctrl;
	int i, rc = 0;
	bool enable;

	if (!connector || !connector->state) {
		DSI_ERR("invalid connector or connector state\n");
@@ -978,6 +1000,12 @@ int dsi_conn_post_kickoff(struct drm_connector *connector)
	/* ensure dynamic clk switch flag is reset */
	c_bridge->dsi_mode.dsi_mode_flags &= ~DSI_MODE_FLAG_DYN_CLK;

	if (params->qsync_update) {
		enable = (params->qsync_mode > 0) ? true : false;
		display_for_each_ctrl(i, display)
			dsi_ctrl_setup_avr(display->ctrl[i].ctrl, enable);
	}

	return 0;
}

Loading