Loading drivers/gpu/drm/msm/sde/sde_encoder.c +40 −52 Original line number Diff line number Diff line Loading @@ -72,9 +72,7 @@ #define IDLE_SHORT_TIMEOUT 1 #define FAULT_TOLERENCE_DELTA_IN_MS 2 #define FAULT_TOLERENCE_WAIT_IN_MS 5 #define EVT_TIME_OUT_SPLIT 2 /* Maximum number of VSYNC wait attempts for RSC state transition */ #define MAX_RSC_WAIT 5 Loading Loading @@ -415,6 +413,28 @@ static bool _sde_encoder_is_dsc_enabled(struct drm_encoder *drm_enc) return (comp_info->comp_type == MSM_DISPLAY_COMPRESSION_DSC); } static int _sde_encoder_wait_timeout(int32_t drm_id, int32_t hw_id, s64 timeout_ms, struct sde_encoder_wait_info *info) { int rc = 0; s64 wait_time_jiffies = msecs_to_jiffies(timeout_ms); ktime_t cur_ktime; ktime_t exp_ktime = ktime_add_ms(ktime_get(), timeout_ms); do { rc = wait_event_timeout(*(info->wq), atomic_read(info->atomic_cnt) == 0, wait_time_jiffies); cur_ktime = ktime_get(); SDE_EVT32(drm_id, hw_id, rc, ktime_to_ms(cur_ktime), timeout_ms, atomic_read(info->atomic_cnt)); /* If we timed out, counter is valid and time is less, wait again */ } while (atomic_read(info->atomic_cnt) && (rc == 0) && (ktime_compare_safe(exp_ktime, cur_ktime) > 0)); return rc; } bool sde_encoder_is_dsc_merge(struct drm_encoder *drm_enc) { enum sde_rm_topology_name topology; Loading Loading @@ -505,7 +525,7 @@ int sde_encoder_helper_wait_for_irq(struct sde_encoder_phys *phys_enc, { struct sde_encoder_irq *irq; u32 irq_status; int ret; int ret, i; if (!phys_enc || !wait_info || intr_idx >= INTR_IDX_MAX) { SDE_ERROR("invalid params\n"); Loading Loading @@ -537,10 +557,22 @@ int sde_encoder_helper_wait_for_irq(struct sde_encoder_phys *phys_enc, irq->irq_idx, phys_enc->hw_pp->idx - PINGPONG_0, atomic_read(wait_info->atomic_cnt), SDE_EVTLOG_FUNC_ENTRY); ret = sde_encoder_helper_wait_event_timeout( DRMID(phys_enc->parent), /* * Some module X may disable interrupt for longer duration * and it may trigger all interrupts including timer interrupt * when module X again enable the interrupt. * That may cause interrupt wait timeout API in this API. * It is handled by split the wait timer in two halves. */ for (i = 0; i < EVT_TIME_OUT_SPLIT; i++) { ret = _sde_encoder_wait_timeout(DRMID(phys_enc->parent), irq->hw_idx, (wait_info->timeout_ms/EVT_TIME_OUT_SPLIT), wait_info); if (ret) break; } if (ret <= 0) { irq_status = sde_core_irq_read(phys_enc->sde_kms, Loading Loading @@ -3655,50 +3687,6 @@ void sde_encoder_helper_trigger_start(struct sde_encoder_phys *phys_enc) } } static int _sde_encoder_wait_timeout(int32_t drm_id, int32_t hw_id, s64 timeout_ms, struct sde_encoder_wait_info *info) { int rc = 0; s64 wait_time_jiffies = msecs_to_jiffies(timeout_ms); ktime_t cur_ktime; ktime_t exp_ktime = ktime_add_ms(ktime_get(), timeout_ms); do { rc = wait_event_timeout(*(info->wq), atomic_read(info->atomic_cnt) == 0, wait_time_jiffies); cur_ktime = ktime_get(); SDE_EVT32(drm_id, hw_id, rc, ktime_to_ms(cur_ktime), timeout_ms, atomic_read(info->atomic_cnt)); /* If we timed out, counter is valid and time is less, wait again */ } while (atomic_read(info->atomic_cnt) && (rc == 0) && (ktime_compare_safe(exp_ktime, cur_ktime) > 0)); return rc; } int sde_encoder_helper_wait_event_timeout(int32_t drm_id, int32_t hw_id, struct sde_encoder_wait_info *info) { int rc; ktime_t exp_ktime = ktime_add_ms(ktime_get(), info->timeout_ms); rc = _sde_encoder_wait_timeout(drm_id, hw_id, info->timeout_ms, info); /** * handle disabled irq case where timer irq is also delayed. * wait for additional timeout of FAULT_TOLERENCE_WAIT_IN_MS * if it event_timeout expired late detected. */ if (atomic_read(info->atomic_cnt) && (!rc) && (ktime_compare_safe(ktime_get(), ktime_add_ms(exp_ktime, FAULT_TOLERENCE_DELTA_IN_MS)) > 0)) rc = _sde_encoder_wait_timeout(drm_id, hw_id, FAULT_TOLERENCE_WAIT_IN_MS, info); return rc; } void sde_encoder_helper_hw_reset(struct sde_encoder_phys *phys_enc) { struct sde_encoder_virt *sde_enc; Loading Loading
drivers/gpu/drm/msm/sde/sde_encoder.c +40 −52 Original line number Diff line number Diff line Loading @@ -72,9 +72,7 @@ #define IDLE_SHORT_TIMEOUT 1 #define FAULT_TOLERENCE_DELTA_IN_MS 2 #define FAULT_TOLERENCE_WAIT_IN_MS 5 #define EVT_TIME_OUT_SPLIT 2 /* Maximum number of VSYNC wait attempts for RSC state transition */ #define MAX_RSC_WAIT 5 Loading Loading @@ -415,6 +413,28 @@ static bool _sde_encoder_is_dsc_enabled(struct drm_encoder *drm_enc) return (comp_info->comp_type == MSM_DISPLAY_COMPRESSION_DSC); } static int _sde_encoder_wait_timeout(int32_t drm_id, int32_t hw_id, s64 timeout_ms, struct sde_encoder_wait_info *info) { int rc = 0; s64 wait_time_jiffies = msecs_to_jiffies(timeout_ms); ktime_t cur_ktime; ktime_t exp_ktime = ktime_add_ms(ktime_get(), timeout_ms); do { rc = wait_event_timeout(*(info->wq), atomic_read(info->atomic_cnt) == 0, wait_time_jiffies); cur_ktime = ktime_get(); SDE_EVT32(drm_id, hw_id, rc, ktime_to_ms(cur_ktime), timeout_ms, atomic_read(info->atomic_cnt)); /* If we timed out, counter is valid and time is less, wait again */ } while (atomic_read(info->atomic_cnt) && (rc == 0) && (ktime_compare_safe(exp_ktime, cur_ktime) > 0)); return rc; } bool sde_encoder_is_dsc_merge(struct drm_encoder *drm_enc) { enum sde_rm_topology_name topology; Loading Loading @@ -505,7 +525,7 @@ int sde_encoder_helper_wait_for_irq(struct sde_encoder_phys *phys_enc, { struct sde_encoder_irq *irq; u32 irq_status; int ret; int ret, i; if (!phys_enc || !wait_info || intr_idx >= INTR_IDX_MAX) { SDE_ERROR("invalid params\n"); Loading Loading @@ -537,10 +557,22 @@ int sde_encoder_helper_wait_for_irq(struct sde_encoder_phys *phys_enc, irq->irq_idx, phys_enc->hw_pp->idx - PINGPONG_0, atomic_read(wait_info->atomic_cnt), SDE_EVTLOG_FUNC_ENTRY); ret = sde_encoder_helper_wait_event_timeout( DRMID(phys_enc->parent), /* * Some module X may disable interrupt for longer duration * and it may trigger all interrupts including timer interrupt * when module X again enable the interrupt. * That may cause interrupt wait timeout API in this API. * It is handled by split the wait timer in two halves. */ for (i = 0; i < EVT_TIME_OUT_SPLIT; i++) { ret = _sde_encoder_wait_timeout(DRMID(phys_enc->parent), irq->hw_idx, (wait_info->timeout_ms/EVT_TIME_OUT_SPLIT), wait_info); if (ret) break; } if (ret <= 0) { irq_status = sde_core_irq_read(phys_enc->sde_kms, Loading Loading @@ -3655,50 +3687,6 @@ void sde_encoder_helper_trigger_start(struct sde_encoder_phys *phys_enc) } } static int _sde_encoder_wait_timeout(int32_t drm_id, int32_t hw_id, s64 timeout_ms, struct sde_encoder_wait_info *info) { int rc = 0; s64 wait_time_jiffies = msecs_to_jiffies(timeout_ms); ktime_t cur_ktime; ktime_t exp_ktime = ktime_add_ms(ktime_get(), timeout_ms); do { rc = wait_event_timeout(*(info->wq), atomic_read(info->atomic_cnt) == 0, wait_time_jiffies); cur_ktime = ktime_get(); SDE_EVT32(drm_id, hw_id, rc, ktime_to_ms(cur_ktime), timeout_ms, atomic_read(info->atomic_cnt)); /* If we timed out, counter is valid and time is less, wait again */ } while (atomic_read(info->atomic_cnt) && (rc == 0) && (ktime_compare_safe(exp_ktime, cur_ktime) > 0)); return rc; } int sde_encoder_helper_wait_event_timeout(int32_t drm_id, int32_t hw_id, struct sde_encoder_wait_info *info) { int rc; ktime_t exp_ktime = ktime_add_ms(ktime_get(), info->timeout_ms); rc = _sde_encoder_wait_timeout(drm_id, hw_id, info->timeout_ms, info); /** * handle disabled irq case where timer irq is also delayed. * wait for additional timeout of FAULT_TOLERENCE_WAIT_IN_MS * if it event_timeout expired late detected. */ if (atomic_read(info->atomic_cnt) && (!rc) && (ktime_compare_safe(ktime_get(), ktime_add_ms(exp_ktime, FAULT_TOLERENCE_DELTA_IN_MS)) > 0)) rc = _sde_encoder_wait_timeout(drm_id, hw_id, FAULT_TOLERENCE_WAIT_IN_MS, info); return rc; } void sde_encoder_helper_hw_reset(struct sde_encoder_phys *phys_enc) { struct sde_encoder_virt *sde_enc; Loading