Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 493fa952 authored by Lakshmi Narayana Kalavala's avatar Lakshmi Narayana Kalavala Committed by Gerrit - the friendly Code Review server
Browse files

drm/msm/sde: split the interrupt wait timeout in two halves



Certain module may disable interrupt for longer duration on same
CPU where display SW is scheduled to receive the interrupt.
It may also be possible that certain module disables interrupt
on all CPUs for longer duration. That causes all subsystems
to receive interrupt at same time when interrupts are enabled
again. This can be handled by splitting the wait timeout in
two halves. The first half may return late due to jiffie jump.
The second half wait allows system to process all subsystem
interrupt and avoid flagging invalid timeout in display module.

Change-Id: Ic2183b13e37bff4a4b2a363a0f8c3ac5247ab5c0
Signed-off-by: default avatarLakshmi Narayana Kalavala <lkalaval@codeaurora.org>
parent 72af9a4d
Loading
Loading
Loading
Loading
+40 −52
Original line number Diff line number Diff line
@@ -72,9 +72,7 @@

#define IDLE_SHORT_TIMEOUT	1

#define FAULT_TOLERENCE_DELTA_IN_MS 2

#define FAULT_TOLERENCE_WAIT_IN_MS 5
#define EVT_TIME_OUT_SPLIT 2

/* Maximum number of VSYNC wait attempts for RSC state transition */
#define MAX_RSC_WAIT	5
@@ -415,6 +413,28 @@ static bool _sde_encoder_is_dsc_enabled(struct drm_encoder *drm_enc)
	return (comp_info->comp_type == MSM_DISPLAY_COMPRESSION_DSC);
}

static int _sde_encoder_wait_timeout(int32_t drm_id, int32_t hw_id,
	s64 timeout_ms, struct sde_encoder_wait_info *info)
{
	int rc = 0;
	s64 wait_time_jiffies = msecs_to_jiffies(timeout_ms);
	ktime_t cur_ktime;
	ktime_t exp_ktime = ktime_add_ms(ktime_get(), timeout_ms);

	do {
		rc = wait_event_timeout(*(info->wq),
			atomic_read(info->atomic_cnt) == 0, wait_time_jiffies);
		cur_ktime = ktime_get();

		SDE_EVT32(drm_id, hw_id, rc, ktime_to_ms(cur_ktime),
			timeout_ms, atomic_read(info->atomic_cnt));
	/* If we timed out, counter is valid and time is less, wait again */
	} while (atomic_read(info->atomic_cnt) && (rc == 0) &&
			(ktime_compare_safe(exp_ktime, cur_ktime) > 0));

	return rc;
}

bool sde_encoder_is_dsc_merge(struct drm_encoder *drm_enc)
{
	enum sde_rm_topology_name topology;
@@ -513,7 +533,7 @@ int sde_encoder_helper_wait_for_irq(struct sde_encoder_phys *phys_enc,
{
	struct sde_encoder_irq *irq;
	u32 irq_status;
	int ret;
	int ret, i;

	if (!phys_enc || !wait_info || intr_idx >= INTR_IDX_MAX) {
		SDE_ERROR("invalid params\n");
@@ -545,10 +565,22 @@ int sde_encoder_helper_wait_for_irq(struct sde_encoder_phys *phys_enc,
		irq->irq_idx, phys_enc->hw_pp->idx - PINGPONG_0,
		atomic_read(wait_info->atomic_cnt), SDE_EVTLOG_FUNC_ENTRY);

	ret = sde_encoder_helper_wait_event_timeout(
			DRMID(phys_enc->parent),
	/*
	 * Some module X may disable interrupt for longer duration
	 * and it may trigger all interrupts including timer interrupt
	 * when module X again enable the interrupt.
	 * That may cause interrupt wait timeout API in this API.
	 * It is handled by split the wait timer in two halves.
	 */

	for (i = 0; i < EVT_TIME_OUT_SPLIT; i++) {
		ret = _sde_encoder_wait_timeout(DRMID(phys_enc->parent),
				irq->hw_idx,
				(wait_info->timeout_ms/EVT_TIME_OUT_SPLIT),
				wait_info);
		if (ret)
			break;
	}

	if (ret <= 0) {
		irq_status = sde_core_irq_read(phys_enc->sde_kms,
@@ -3641,50 +3673,6 @@ void sde_encoder_helper_trigger_start(struct sde_encoder_phys *phys_enc)
	}
}

static int _sde_encoder_wait_timeout(int32_t drm_id, int32_t hw_id,
	s64 timeout_ms, struct sde_encoder_wait_info *info)
{
	int rc = 0;
	s64 wait_time_jiffies = msecs_to_jiffies(timeout_ms);
	ktime_t cur_ktime;
	ktime_t exp_ktime = ktime_add_ms(ktime_get(), timeout_ms);

	do {
		rc = wait_event_timeout(*(info->wq),
			atomic_read(info->atomic_cnt) == 0, wait_time_jiffies);
		cur_ktime = ktime_get();

		SDE_EVT32(drm_id, hw_id, rc, ktime_to_ms(cur_ktime),
			timeout_ms, atomic_read(info->atomic_cnt));
	/* If we timed out, counter is valid and time is less, wait again */
	} while (atomic_read(info->atomic_cnt) && (rc == 0) &&
			(ktime_compare_safe(exp_ktime, cur_ktime) > 0));

	return rc;
}

int sde_encoder_helper_wait_event_timeout(int32_t drm_id, int32_t hw_id,
	struct sde_encoder_wait_info *info)
{
	int rc;
	ktime_t exp_ktime = ktime_add_ms(ktime_get(), info->timeout_ms);

	rc = _sde_encoder_wait_timeout(drm_id, hw_id, info->timeout_ms, info);

	/**
	 * handle disabled irq case where timer irq is also delayed.
	 * wait for additional timeout of FAULT_TOLERENCE_WAIT_IN_MS
	 * if it event_timeout expired late detected.
	 */
	if (atomic_read(info->atomic_cnt) && (!rc) &&
	    (ktime_compare_safe(ktime_get(), ktime_add_ms(exp_ktime,
	     FAULT_TOLERENCE_DELTA_IN_MS)) > 0))
		rc = _sde_encoder_wait_timeout(drm_id, hw_id,
			FAULT_TOLERENCE_WAIT_IN_MS, info);

	return rc;
}

void sde_encoder_helper_hw_reset(struct sde_encoder_phys *phys_enc)
{
	struct sde_encoder_virt *sde_enc;