Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit d209d0d6 authored by Linux Build Service Account's avatar Linux Build Service Account Committed by Gerrit - the friendly Code Review server
Browse files

Merge "drm/msm/sde: update kickoff sequence for command mode" into msm-4.9

parents 8b4cb6f4 0e558f4c
Loading
Loading
Loading
Loading
+6 −6
Original line number Diff line number Diff line
@@ -1765,9 +1765,9 @@ static void sde_crtc_atomic_begin(struct drm_crtc *crtc,
		struct drm_crtc_state *old_state)
{
	struct sde_crtc *sde_crtc;
	struct drm_encoder *encoder;
	struct drm_device *dev;
	unsigned long flags;
	u32 i;

	if (!crtc) {
		SDE_ERROR("invalid crtc\n");
@@ -1798,12 +1798,12 @@ static void sde_crtc_atomic_begin(struct drm_crtc *crtc,
		spin_unlock_irqrestore(&dev->event_lock, flags);
	}

	/* Reset flush mask from previous commit */
	for (i = 0; i < ARRAY_SIZE(sde_crtc->mixers); i++) {
		struct sde_hw_ctl *ctl = sde_crtc->mixers[i].hw_ctl;
	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
		if (encoder->crtc != crtc)
			continue;

		if (ctl)
			ctl->ops.clear_pending_flush(ctl);
		/* encoder will trigger pending mask now */
		sde_encoder_trigger_kickoff_pending(encoder);
	}

	/*
+33 −0
Original line number Diff line number Diff line
@@ -1755,6 +1755,38 @@ static void _sde_encoder_update_master(struct drm_encoder *drm_enc,
	}
}

void sde_encoder_trigger_kickoff_pending(struct drm_encoder *drm_enc)
{
	struct sde_encoder_virt *sde_enc;
	struct sde_encoder_phys *phys;
	unsigned int i;
	struct sde_hw_ctl *ctl;
	struct msm_display_info *disp_info;

	if (!drm_enc) {
		SDE_ERROR("invalid encoder\n");
		return;
	}
	sde_enc = to_sde_encoder_virt(drm_enc);
	disp_info = &sde_enc->disp_info;

	for (i = 0; i < sde_enc->num_phys_encs; i++) {
		phys = sde_enc->phys_encs[i];

		if (phys && phys->hw_ctl) {
			ctl = phys->hw_ctl;
			if (ctl->ops.clear_pending_flush)
				ctl->ops.clear_pending_flush(ctl);

			/* update only for command mode primary ctl */
			if ((phys == sde_enc->cur_master) &&
			   (disp_info->capabilities & MSM_DISPLAY_CAP_CMD_MODE)
			    && ctl->ops.trigger_pending)
				ctl->ops.trigger_pending(ctl);
		}
	}
}

void sde_encoder_prepare_for_kickoff(struct drm_encoder *drm_enc,
		struct sde_encoder_kickoff_params *params)
{
@@ -1788,6 +1820,7 @@ void sde_encoder_prepare_for_kickoff(struct drm_encoder *drm_enc,

	/* if any phys needs reset, reset all phys, in-order */
	if (needs_hw_reset) {
		SDE_EVT32(DRMID(drm_enc), SDE_EVTLOG_FUNC_CASE1);
		for (i = 0; i < sde_enc->num_phys_encs; i++) {
			phys = sde_enc->phys_encs[i];
			if (phys && phys->ops.hw_reset)
+7 −0
Original line number Diff line number Diff line
@@ -103,6 +103,13 @@ struct sde_rsc_client *sde_encoder_get_rsc_client(struct drm_encoder *encoder);
void sde_encoder_prepare_for_kickoff(struct drm_encoder *encoder,
		struct sde_encoder_kickoff_params *params);

/**
 * sde_encoder_trigger_kickoff_pending - Clear the flush bits from previous
 *        kickoff and trigger the ctl prepare progress for command mode display.
 * @encoder:	encoder pointer
 */
void sde_encoder_trigger_kickoff_pending(struct drm_encoder *encoder);

/**
 * sde_encoder_kickoff - trigger a double buffer flip of the ctl path
 *	(i.e. ctl flush and start) immediately.
+5 −0
Original line number Diff line number Diff line
@@ -172,6 +172,7 @@ enum sde_intr_idx {
	INTR_IDX_VSYNC,
	INTR_IDX_PINGPONG,
	INTR_IDX_UNDERRUN,
	INTR_IDX_CTL_START,
	INTR_IDX_RDPTR,
	INTR_IDX_MAX,
};
@@ -205,6 +206,8 @@ enum sde_intr_idx {
 *				vs. the number of done/vblank irqs. Should hover
 *				between 0-2 Incremented when a new kickoff is
 *				scheduled. Decremented in irq handler
 * @pending_ctlstart_cnt:	Atomic counter tracking the number of ctl start
 *                              pending.
 * @pending_kickoff_wq:		Wait queue for blocking until kickoff completes
 */
struct sde_encoder_phys {
@@ -228,12 +231,14 @@ struct sde_encoder_phys {
	atomic_t vblank_refcount;
	atomic_t vsync_cnt;
	atomic_t underrun_cnt;
	atomic_t pending_ctlstart_cnt;
	atomic_t pending_kickoff_cnt;
	wait_queue_head_t pending_kickoff_wq;
};

static inline int sde_encoder_phys_inc_pending(struct sde_encoder_phys *phys)
{
	atomic_inc_return(&phys->pending_ctlstart_cnt);
	return atomic_inc_return(&phys->pending_kickoff_cnt);
}

+111 −15
Original line number Diff line number Diff line
@@ -33,6 +33,9 @@

#define PP_TIMEOUT_MAX_TRIALS	10

/* wait for 2 vyncs only */
#define CTL_START_TIMEOUT_MS	32

/*
 * Tearcheck sync start and continue thresholds are empirically found
 * based on common panels In the future, may want to allow panels to override
@@ -170,11 +173,35 @@ static void sde_encoder_phys_cmd_pp_rd_ptr_irq(void *arg, int irq_idx)
	if (!cmd_enc)
		return;

	SDE_EVT32_IRQ(DRMID(phys_enc->parent),
			phys_enc->hw_pp->idx - PINGPONG_0, 0xfff);

	if (phys_enc->parent_ops.handle_vblank_virt)
		phys_enc->parent_ops.handle_vblank_virt(phys_enc->parent,
			phys_enc);
}

static void sde_encoder_phys_cmd_ctl_start_irq(void *arg, int irq_idx)
{
	struct sde_encoder_phys_cmd *cmd_enc = arg;
	struct sde_encoder_phys *phys_enc;
	struct sde_hw_ctl *ctl;

	if (!cmd_enc)
		return;

	phys_enc = &cmd_enc->base;
	if (!phys_enc->hw_ctl)
		return;

	ctl = phys_enc->hw_ctl;
	SDE_EVT32_IRQ(DRMID(phys_enc->parent), ctl->idx - CTL_0, 0xfff);
	atomic_add_unless(&phys_enc->pending_ctlstart_cnt, -1, 0);

	/* Signal any waiting ctl start interrupt */
	wake_up_all(&phys_enc->pending_kickoff_wq);
}

static bool _sde_encoder_phys_is_ppsplit(struct sde_encoder_phys *phys_enc)
{
	enum sde_rm_topology_name topology;
@@ -280,7 +307,7 @@ static int _sde_encoder_phys_cmd_wait_for_idle(
	if (ret <= 0) {
		/* read and clear interrupt */
		irq_status = sde_core_irq_read(phys_enc->sde_kms,
				INTR_IDX_PINGPONG, true);
				cmd_enc->irq_idx[INTR_IDX_PINGPONG], true);
		if (irq_status) {
			unsigned long flags;
			SDE_EVT32(DRMID(phys_enc->parent),
@@ -335,8 +362,13 @@ static int sde_encoder_phys_cmd_register_irq(struct sde_encoder_phys *phys_enc,
		return -EINVAL;
	}

	idx_lookup = (intr_type == SDE_IRQ_TYPE_INTF_UNDER_RUN) ?
			phys_enc->intf_idx : phys_enc->hw_pp->idx;
	if (intr_type == SDE_IRQ_TYPE_INTF_UNDER_RUN)
		idx_lookup = phys_enc->intf_idx;
	else if (intr_type == SDE_IRQ_TYPE_CTL_START)
		idx_lookup = phys_enc->hw_ctl ? phys_enc->hw_ctl->idx : -1;
	else
		idx_lookup = phys_enc->hw_pp->idx;

	cmd_enc->irq_idx[idx] = sde_core_irq_idx_lookup(phys_enc->sde_kms,
			intr_type, idx_lookup);
	if (cmd_enc->irq_idx[idx] < 0) {
@@ -449,9 +481,13 @@ static int sde_encoder_phys_cmd_control_vblank_irq(
void sde_encoder_phys_cmd_irq_control(struct sde_encoder_phys *phys_enc,
		bool enable)
{
	struct sde_encoder_phys_cmd *cmd_enc;

	if (!phys_enc || _sde_encoder_phys_is_ppsplit_slave(phys_enc))
		return;

	cmd_enc = to_sde_encoder_phys_cmd(phys_enc);

	if (enable) {
		sde_encoder_phys_cmd_register_irq(phys_enc,
				SDE_IRQ_TYPE_PING_PONG_COMP,
@@ -466,7 +502,17 @@ void sde_encoder_phys_cmd_irq_control(struct sde_encoder_phys *phys_enc,
				INTR_IDX_UNDERRUN,
				sde_encoder_phys_cmd_underrun_irq,
				"underrun");

		if (sde_encoder_phys_cmd_is_master(phys_enc))
			sde_encoder_phys_cmd_register_irq(phys_enc,
				SDE_IRQ_TYPE_CTL_START,
				INTR_IDX_CTL_START,
				sde_encoder_phys_cmd_ctl_start_irq,
				"ctl_start");
	} else {
		if (sde_encoder_phys_cmd_is_master(phys_enc))
			sde_encoder_phys_cmd_unregister_irq(
				phys_enc, INTR_IDX_CTL_START);
		sde_encoder_phys_cmd_unregister_irq(
				phys_enc, INTR_IDX_UNDERRUN);
		sde_encoder_phys_cmd_control_vblank_irq(phys_enc, false);
@@ -713,24 +759,73 @@ static void sde_encoder_phys_cmd_prepare_for_kickoff(
	}
}

static int _sde_encoder_phys_cmd_wait_for_ctl_start(
		struct sde_encoder_phys *phys_enc)
{
	int rc = 0;
	struct sde_hw_ctl *ctl;
	u32 irq_status;
	struct sde_encoder_phys_cmd *cmd_enc;

	if (!phys_enc->hw_ctl) {
		SDE_ERROR("invalid ctl\n");
		return -EINVAL;
	}

	ctl = phys_enc->hw_ctl;
	cmd_enc = to_sde_encoder_phys_cmd(phys_enc);
	rc = sde_encoder_helper_wait_event_timeout(DRMID(phys_enc->parent),
			ctl->idx - CTL_0,
			&phys_enc->pending_kickoff_wq,
			&phys_enc->pending_ctlstart_cnt,
			CTL_START_TIMEOUT_MS);
	if (rc <= 0) {
		/* read and clear interrupt */
		irq_status = sde_core_irq_read(phys_enc->sde_kms,
				cmd_enc->irq_idx[INTR_IDX_CTL_START], true);
		if (irq_status) {
			unsigned long flags;

			SDE_EVT32(DRMID(phys_enc->parent), ctl->idx - CTL_0);
			SDE_DEBUG_CMDENC(cmd_enc,
					"ctl:%d start done but irq not triggered\n",
					ctl->idx - CTL_0);
			local_irq_save(flags);
			sde_encoder_phys_cmd_ctl_start_irq(cmd_enc,
					INTR_IDX_CTL_START);
			local_irq_restore(flags);
			rc = 0;
		} else {
			SDE_ERROR("ctl start interrupt wait failed\n");
			rc = -EINVAL;
		}
	} else {
		rc = 0;
	}

	return rc;
}

static int sde_encoder_phys_cmd_wait_for_commit_done(
		struct sde_encoder_phys *phys_enc)
{
	struct sde_encoder_phys_cmd *cmd_enc =
			to_sde_encoder_phys_cmd(phys_enc);
	int rc = 0;
	struct sde_encoder_phys_cmd *cmd_enc;

	if (cmd_enc->serialize_wait4pp)
		sde_encoder_phys_cmd_prepare_for_kickoff(phys_enc, NULL);
	if (!phys_enc)
		return -EINVAL;

	/*
	 * following statement is true serialize_wait4pp is false.
	 *
	 * Since ctl_start "commits" the transaction to hardware, and the
	 * tearcheck block takes it from there, there is no need to have a
	 * separate wait for committed, a la wait-for-vsync in video mode
	 */
	cmd_enc = to_sde_encoder_phys_cmd(phys_enc);

	return 0;
	/* only required for master controller */
	if (sde_encoder_phys_cmd_is_master(phys_enc))
		rc = _sde_encoder_phys_cmd_wait_for_ctl_start(phys_enc);

	/* required for both controllers */
	if (!rc && cmd_enc->serialize_wait4pp)
		sde_encoder_phys_cmd_prepare_for_kickoff(phys_enc, NULL);

	return rc;
}

static void sde_encoder_phys_cmd_update_split_role(
@@ -815,6 +910,7 @@ struct sde_encoder_phys *sde_encoder_phys_cmd_init(
		INIT_LIST_HEAD(&cmd_enc->irq_cb[i].list);
	atomic_set(&phys_enc->vblank_refcount, 0);
	atomic_set(&phys_enc->pending_kickoff_cnt, 0);
	atomic_set(&phys_enc->pending_ctlstart_cnt, 0);
	init_waitqueue_head(&phys_enc->pending_kickoff_wq);

	SDE_DEBUG_CMDENC(cmd_enc, "created\n");
Loading