Loading drivers/gpu/drm/msm/sde/sde_color_processing.c +7 −11 Original line number Diff line number Diff line /* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved. /* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published by Loading Loading @@ -835,7 +835,6 @@ void sde_cp_crtc_apply_properties(struct drm_crtc *crtc) bool set_dspp_flush = false, set_lm_flush = false; struct sde_cp_node *prop_node = NULL, *n = NULL; struct sde_hw_ctl *ctl; uint32_t flush_mask = 0; u32 num_mixers = 0, i = 0; if (!crtc || !crtc->dev) { Loading Loading @@ -892,18 +891,15 @@ void sde_cp_crtc_apply_properties(struct drm_crtc *crtc) ctl = sde_crtc->mixers[i].hw_ctl; if (!ctl) continue; if (set_dspp_flush && ctl->ops.get_bitmask_dspp if (set_dspp_flush && ctl->ops.update_bitmask_dspp && sde_crtc->mixers[i].hw_dspp) { ctl->ops.get_bitmask_dspp(ctl, &flush_mask, sde_crtc->mixers[i].hw_dspp->idx); ctl->ops.update_pending_flush(ctl, flush_mask); ctl->ops.update_bitmask_dspp(ctl, sde_crtc->mixers[i].hw_dspp->idx, 1); } if (set_lm_flush && ctl->ops.get_bitmask_mixer if (set_lm_flush && ctl->ops.update_bitmask_mixer && sde_crtc->mixers[i].hw_lm) { flush_mask = ctl->ops.get_bitmask_mixer(ctl, sde_crtc->mixers[i].hw_lm->idx); ctl->ops.update_pending_flush(ctl, flush_mask); ctl->ops.update_bitmask_mixer(ctl, sde_crtc->mixers[i].hw_lm->idx, 1); } } exit: Loading drivers/gpu/drm/msm/sde/sde_crtc.c +56 −62 Original line number Diff line number Diff line Loading @@ -1469,11 +1469,10 @@ static void _sde_crtc_blend_setup_mixer(struct drm_crtc *crtc, struct sde_hw_mixer *lm; struct sde_hw_stage_cfg *stage_cfg; struct sde_rect plane_crtc_roi; u32 flush_mask, flush_sbuf, prefill; uint32_t prefill; uint32_t stage_idx, lm_idx; int zpos_cnt[SDE_STAGE_MAX + 1] = { 0 }; int i; int i, rot_id = 0; bool bg_alpha_enable = false; if (!sde_crtc || !crtc->state || !mixer) { Loading @@ -1487,9 +1486,9 @@ static void _sde_crtc_blend_setup_mixer(struct drm_crtc *crtc, cstate = to_sde_crtc_state(crtc->state); cstate->sbuf_prefill_line = _sde_crtc_calc_inline_prefill(crtc); sde_crtc->sbuf_flush_mask_old = sde_crtc->sbuf_flush_mask_all; sde_crtc->sbuf_flush_mask_all = 0x0; sde_crtc->sbuf_flush_mask_delta = 0x0; sde_crtc->sbuf_rot_id_old = sde_crtc->sbuf_rot_id; sde_crtc->sbuf_rot_id = 0x0; sde_crtc->sbuf_rot_id_delta = 0x0; drm_atomic_crtc_for_each_plane(plane, crtc) { state = plane->state; Loading @@ -1509,13 +1508,16 @@ static void _sde_crtc_blend_setup_mixer(struct drm_crtc *crtc, if (prefill) cstate->sbuf_prefill_line = prefill; sde_plane_get_ctl_flush(plane, ctl, &flush_mask, &flush_sbuf); sde_plane_ctl_flush(plane, ctl, true); rot_id = sde_plane_get_sbuf_id(plane); /* save sbuf flush value for later */ /* save sbuf id for later */ if (old_state && drm_atomic_get_existing_plane_state( old_state->state, plane)) sde_crtc->sbuf_flush_mask_delta |= flush_sbuf; sde_crtc->sbuf_flush_mask_all |= flush_sbuf; old_state->state, plane) && !sde_crtc->sbuf_rot_id_old) sde_crtc->sbuf_rot_id_delta = rot_id; if (!sde_crtc->sbuf_rot_id) sde_crtc->sbuf_rot_id = rot_id; SDE_DEBUG("crtc %d stage:%d - plane %d sspp %d fb %d\n", crtc->base.id, Loading @@ -1539,7 +1541,7 @@ static void _sde_crtc_blend_setup_mixer(struct drm_crtc *crtc, state->src_w >> 16, state->src_h >> 16, state->crtc_x, state->crtc_y, state->crtc_w, state->crtc_h, flush_sbuf != 0); rot_id != 0); stage_idx = zpos_cnt[pstate->stage]++; stage_cfg->stage[pstate->stage][stage_idx] = Loading @@ -1556,7 +1558,6 @@ static void _sde_crtc_blend_setup_mixer(struct drm_crtc *crtc, for (lm_idx = 0; lm_idx < sde_crtc->num_mixers; lm_idx++) { _sde_crtc_setup_blend_cfg(mixer + lm_idx, pstate, format); mixer[lm_idx].flush_mask |= flush_mask; if (bg_alpha_enable && !format->alpha_enable) mixer[lm_idx].mixer_op_mode = 0; Loading Loading @@ -1653,6 +1654,7 @@ static void _sde_crtc_blend_setup(struct drm_crtc *crtc, struct sde_crtc_mixer *mixer; struct sde_hw_ctl *ctl; struct sde_hw_mixer *lm; struct sde_ctl_flush_cfg cfg = {0,}; int i; Loading @@ -1676,7 +1678,6 @@ static void _sde_crtc_blend_setup(struct drm_crtc *crtc, return; } mixer[i].mixer_op_mode = 0; mixer[i].flush_mask = 0; if (mixer[i].hw_ctl->ops.clear_all_blendstages) mixer[i].hw_ctl->ops.clear_all_blendstages( mixer[i].hw_ctl); Loading Loading @@ -1711,18 +1712,15 @@ static void _sde_crtc_blend_setup(struct drm_crtc *crtc, lm->ops.setup_alpha_out(lm, mixer[i].mixer_op_mode); mixer[i].pipe_mask = mixer[i].flush_mask; mixer[i].flush_mask |= ctl->ops.get_bitmask_mixer(ctl, mixer[i].hw_lm->idx); /* stage config flush mask */ ctl->ops.update_pending_flush(ctl, mixer[i].flush_mask); ctl->ops.update_bitmask_mixer(ctl, mixer[i].hw_lm->idx, 1); ctl->ops.get_pending_flush(ctl, &cfg); SDE_DEBUG("lm %d, op_mode 0x%X, ctl %d, flush mask 0x%x\n", mixer[i].hw_lm->idx - LM_0, mixer[i].mixer_op_mode, ctl->idx - CTL_0, mixer[i].flush_mask); cfg.pending_flush_mask); ctl->ops.setup_blendstage(ctl, mixer[i].hw_lm->idx, &sde_crtc->stage_cfg); Loading Loading @@ -2199,7 +2197,7 @@ static void _sde_crtc_dest_scaler_setup(struct drm_crtc *crtc) struct sde_hw_ds *hw_ds; struct sde_hw_ds_cfg *cfg; struct sde_kms *kms; u32 flush_mask = 0, op_mode = 0; u32 op_mode = 0; u32 lm_idx = 0, num_mixers = 0; int i, count = 0; bool ds_dirty = false; Loading Loading @@ -2275,14 +2273,9 @@ static void _sde_crtc_dest_scaler_setup(struct drm_crtc *crtc) /* * Dest scaler shares the flush bit of the LM in control */ if (hw_ctl->ops.get_bitmask_mixer) { flush_mask = hw_ctl->ops.get_bitmask_mixer( hw_ctl, hw_lm->idx); SDE_DEBUG("Set lm[%d] flush = %d", hw_lm->idx, flush_mask); hw_ctl->ops.update_pending_flush(hw_ctl, flush_mask); } if (hw_ctl && hw_ctl->ops.update_bitmask_mixer) hw_ctl->ops.update_bitmask_mixer( hw_ctl, hw_lm->idx, 1); } } } Loading Loading @@ -3479,7 +3472,7 @@ static int _sde_crtc_commit_kickoff_rot(struct drm_crtc *crtc, struct drm_plane *plane; struct sde_crtc *sde_crtc; struct sde_hw_ctl *ctl, *master_ctl; u32 flush_mask; enum sde_rot rot_id = SDE_NONE; int i, rc = 0; if (!crtc || !cstate) Loading @@ -3488,29 +3481,28 @@ static int _sde_crtc_commit_kickoff_rot(struct drm_crtc *crtc, sde_crtc = to_sde_crtc(crtc); /* * Update sbuf configuration and flush bits if either the rot_op_mode * Update sbuf configuration and flush rotator if the rot_op_mode * is different or a rotator commit was performed. * * In the case where the rot_op_mode has changed, further require that * the transition is either to or from offline mode unless * sbuf_flush_mask_delta is also non-zero (i.e., a corresponding plane * update was provided to the current commit). * In case where the rot_op_mode has changed, further require that * the transition is either to or from offline mode unless corresponding * plane update was provided to current commit. */ flush_mask = sde_crtc->sbuf_flush_mask_delta; rot_id = sde_crtc->sbuf_rot_id_delta; if ((sde_crtc->sbuf_op_mode_old != cstate->sbuf_cfg.rot_op_mode) && (sde_crtc->sbuf_op_mode_old == SDE_CTL_ROT_OP_MODE_OFFLINE || cstate->sbuf_cfg.rot_op_mode == SDE_CTL_ROT_OP_MODE_OFFLINE)) flush_mask |= sde_crtc->sbuf_flush_mask_all | sde_crtc->sbuf_flush_mask_old; rot_id |= sde_crtc->sbuf_rot_id | sde_crtc->sbuf_rot_id_old; if (!flush_mask && if (!rot_id && cstate->sbuf_cfg.rot_op_mode == SDE_CTL_ROT_OP_MODE_OFFLINE) return 0; SDE_ATRACE_BEGIN("crtc_kickoff_rot"); if (cstate->sbuf_cfg.rot_op_mode != SDE_CTL_ROT_OP_MODE_OFFLINE && sde_crtc->sbuf_flush_mask_delta) { sde_crtc->sbuf_rot_id_delta) { drm_atomic_crtc_for_each_plane(plane, crtc) { rc = sde_plane_kickoff_rot(plane); if (rc) { Loading Loading @@ -3540,8 +3532,8 @@ static int _sde_crtc_commit_kickoff_rot(struct drm_crtc *crtc, } /* only update sbuf_cfg and flush for master ctl */ if (master_ctl && master_ctl->ops.update_pending_flush) { master_ctl->ops.update_pending_flush(master_ctl, flush_mask); if (master_ctl && master_ctl->ops.update_bitmask_rot) { master_ctl->ops.update_bitmask_rot(master_ctl, rot_id, 1); /* explicitly trigger rotator for async modes */ if (cstate->sbuf_cfg.rot_op_mode == Loading @@ -3549,8 +3541,8 @@ static int _sde_crtc_commit_kickoff_rot(struct drm_crtc *crtc, master_ctl->ops.trigger_rot_start) master_ctl->ops.trigger_rot_start(master_ctl); SDE_EVT32(DRMID(crtc), master_ctl->idx - CTL_0, sde_crtc->sbuf_flush_mask_all, sde_crtc->sbuf_flush_mask_delta); sde_crtc->sbuf_rot_id, sde_crtc->sbuf_rot_id_delta); } /* save this in sde_crtc for next commit cycle */ Loading @@ -3562,30 +3554,32 @@ static int _sde_crtc_commit_kickoff_rot(struct drm_crtc *crtc, /** * _sde_crtc_remove_pipe_flush - remove staged pipes from flush mask * @sde_crtc: Pointer to sde crtc structure * @crtc: Pointer to crtc structure */ static void _sde_crtc_remove_pipe_flush(struct sde_crtc *sde_crtc) static void _sde_crtc_remove_pipe_flush(struct drm_crtc *crtc) { struct drm_plane *plane; struct drm_plane_state *state; struct sde_crtc *sde_crtc; struct sde_crtc_mixer *mixer; struct sde_hw_ctl *ctl; u32 i, n, flush_mask; if (!sde_crtc) if (!crtc) return; sde_crtc = to_sde_crtc(crtc); mixer = sde_crtc->mixers; n = min_t(size_t, sde_crtc->num_mixers, ARRAY_SIZE(sde_crtc->mixers)); for (i = 0; i < n; i++) { ctl = mixer[i].hw_ctl; if (!ctl || !ctl->ops.get_pending_flush || !ctl->ops.clear_pending_flush || !ctl->ops.update_pending_flush) if (!mixer) return; ctl = mixer->hw_ctl; drm_atomic_crtc_for_each_plane(plane, crtc) { state = plane->state; if (!state) continue; flush_mask = ctl->ops.get_pending_flush(ctl); flush_mask &= ~mixer[i].pipe_mask; ctl->ops.clear_pending_flush(ctl); ctl->ops.update_pending_flush(ctl, flush_mask); /* clear plane flush bitmask */ sde_plane_ctl_flush(plane, ctl, false); } } Loading Loading @@ -3696,7 +3690,7 @@ static int _sde_crtc_reset_hw(struct drm_crtc *crtc, /* provide safe "border color only" commit configuration for later */ cstate->sbuf_cfg.rot_op_mode = SDE_CTL_ROT_OP_MODE_OFFLINE; _sde_crtc_commit_kickoff_rot(crtc, cstate); _sde_crtc_remove_pipe_flush(sde_crtc); _sde_crtc_remove_pipe_flush(crtc); _sde_crtc_blend_setup(crtc, old_state, false); /* take h/w components out of reset */ Loading Loading @@ -3753,7 +3747,7 @@ static bool _sde_crtc_prepare_for_kickoff_rot(struct drm_device *dev, cstate = to_sde_crtc_state(crtc->state); /* default to ASYNC mode for inline rotation */ cstate->sbuf_cfg.rot_op_mode = sde_crtc->sbuf_flush_mask_all ? cstate->sbuf_cfg.rot_op_mode = sde_crtc->sbuf_rot_id ? SDE_CTL_ROT_OP_MODE_INLINE_ASYNC : SDE_CTL_ROT_OP_MODE_OFFLINE; if (cstate->sbuf_cfg.rot_op_mode == SDE_CTL_ROT_OP_MODE_OFFLINE) Loading Loading @@ -3897,7 +3891,7 @@ void sde_crtc_commit_kickoff(struct drm_crtc *crtc, sde_vbif_clear_errors(sde_kms); if (is_error) { _sde_crtc_remove_pipe_flush(sde_crtc); _sde_crtc_remove_pipe_flush(crtc); _sde_crtc_blend_setup(crtc, old_state, false); } Loading drivers/gpu/drm/msm/sde/sde_crtc.h +7 −11 Original line number Diff line number Diff line Loading @@ -110,8 +110,6 @@ struct sde_crtc_retire_event { * @hw_ds: DS HW driver context * @encoder: Encoder attached to this lm & ctl * @mixer_op_mode: mixer blending operation mode * @flush_mask: mixer flush mask for ctl, mixer and pipe * @pipe_mask: mixer flush mask for pipe */ struct sde_crtc_mixer { struct sde_hw_mixer *hw_lm; Loading @@ -120,8 +118,6 @@ struct sde_crtc_mixer { struct sde_hw_ds *hw_ds; struct drm_encoder *encoder; u32 mixer_op_mode; u32 flush_mask; u32 pipe_mask; }; /** Loading Loading @@ -216,9 +212,9 @@ struct sde_crtc_event { * @misr_frame_count : misr frame count provided by client * @misr_data : store misr data before turning off the clocks. * @sbuf_op_mode_old : inline rotator op mode for previous commit cycle * @sbuf_flush_mask_old: inline rotator flush mask for previous commit * @sbuf_flush_mask_all: inline rotator flush mask for all attached planes * @sbuf_flush_mask_delta: inline rotator flush mask for current delta state * @sbuf_rot_id : inline rotator block id for attached planes * @sbuf_rot_id_old: inline rotator id for previous commit * @sbuf_rot_id_delta: inline rotator id for current delta state * @idle_notify_work: delayed worker to notify idle timeout to user space * @power_event : registered power event handle * @cur_perf : current performance committed to clock/bandwidth driver Loading Loading @@ -287,9 +283,9 @@ struct sde_crtc { u32 misr_data[CRTC_DUAL_MIXERS]; u32 sbuf_op_mode_old; u32 sbuf_flush_mask_old; u32 sbuf_flush_mask_all; u32 sbuf_flush_mask_delta; u32 sbuf_rot_id; u32 sbuf_rot_id_old; u32 sbuf_rot_id_delta; struct kthread_delayed_work idle_notify_work; struct sde_power_event *power_event; Loading drivers/gpu/drm/msm/sde/sde_encoder.c +174 −22 Original line number Diff line number Diff line Loading @@ -728,6 +728,65 @@ void sde_encoder_destroy(struct drm_encoder *drm_enc) kfree(sde_enc); } void sde_encoder_helper_update_intf_cfg( struct sde_encoder_phys *phys_enc) { struct sde_encoder_virt *sde_enc; struct sde_hw_intf_cfg_v1 *intf_cfg; enum sde_3d_blend_mode mode_3d; if (!phys_enc) { SDE_ERROR("invalid arg, encoder %d\n", phys_enc != 0); return; } sde_enc = to_sde_encoder_virt(phys_enc->parent); intf_cfg = &sde_enc->cur_master->intf_cfg_v1; SDE_DEBUG_ENC(sde_enc, "intf_cfg updated for %d at idx %d\n", phys_enc->intf_idx, intf_cfg->intf_count); /* setup interface configuration */ if (intf_cfg->intf_count >= MAX_INTF_PER_CTL_V1) { pr_err("invalid inf_count %d\n", intf_cfg->intf_count); return; } intf_cfg->intf[intf_cfg->intf_count++] = phys_enc->intf_idx; if (phys_enc == sde_enc->cur_master) { if (sde_enc->cur_master->intf_mode == INTF_MODE_CMD) intf_cfg->intf_mode_sel = SDE_CTL_MODE_SEL_CMD; else intf_cfg->intf_mode_sel = SDE_CTL_MODE_SEL_VID; } /* configure this interface as master for split display */ if (phys_enc->split_role == ENC_ROLE_MASTER) intf_cfg->intf_master = phys_enc->hw_intf->idx; /* setup which pp blk will connect to this intf */ if (phys_enc->hw_intf->ops.bind_pingpong_blk) phys_enc->hw_intf->ops.bind_pingpong_blk( phys_enc->hw_intf, true, phys_enc->hw_pp->idx); /*setup merge_3d configuration */ mode_3d = sde_encoder_helper_get_3d_blend_mode(phys_enc); if (mode_3d && phys_enc->hw_pp->merge_3d && intf_cfg->merge_3d_count < MAX_MERGE_3D_PER_CTL_V1) intf_cfg->merge_3d[intf_cfg->merge_3d_count++] = phys_enc->hw_pp->merge_3d->idx; if (phys_enc->hw_pp->ops.setup_3d_mode) phys_enc->hw_pp->ops.setup_3d_mode(phys_enc->hw_pp, mode_3d); } void sde_encoder_helper_split_config( struct sde_encoder_phys *phys_enc, enum sde_intf interface) Loading Loading @@ -1086,6 +1145,9 @@ static void _sde_encoder_dsc_pipe_cfg(struct sde_hw_dsc *hw_dsc, if (hw_pp->ops.setup_dsc) hw_pp->ops.setup_dsc(hw_pp); if (hw_dsc->ops.bind_pingpong_blk) hw_dsc->ops.bind_pingpong_blk(hw_dsc, true, hw_pp->idx); if (hw_pp->ops.enable_dsc) hw_pp->ops.enable_dsc(hw_pp); } Loading Loading @@ -1121,6 +1183,8 @@ static int _sde_encoder_dsc_n_lm_1_enc_1_intf(struct sde_encoder_virt *sde_enc) const struct sde_rect *roi = &sde_enc->cur_conn_roi; struct msm_mode_info mode_info; struct msm_display_dsc_info *dsc = NULL; struct sde_hw_ctl *hw_ctl = enc_master->hw_ctl; struct sde_ctl_dsc_cfg cfg; int rc; if (hw_dsc == NULL || hw_pp == NULL || !enc_master) { Loading @@ -1134,8 +1198,8 @@ static int _sde_encoder_dsc_n_lm_1_enc_1_intf(struct sde_encoder_virt *sde_enc) return -EINVAL; } memset(&cfg, 0, sizeof(cfg)); dsc = &mode_info.comp_info.dsc_info; _sde_encoder_dsc_update_pic_dim(dsc, roi->w, roi->h); this_frame_slices = roi->w / dsc->slice_width; Loading @@ -1156,6 +1220,25 @@ static int _sde_encoder_dsc_n_lm_1_enc_1_intf(struct sde_encoder_virt *sde_enc) _sde_encoder_dsc_pipe_cfg(hw_dsc, hw_pp, dsc, dsc_common_mode, ich_res, true); if (cfg.dsc_count >= MAX_DSC_PER_CTL_V1) { pr_err("Invalid dsc count:%d\n", cfg.dsc_count); return -EINVAL; } cfg.dsc[cfg.dsc_count++] = hw_dsc->idx; /* setup dsc active configuration in the control path */ if (hw_ctl->ops.setup_dsc_cfg) { hw_ctl->ops.setup_dsc_cfg(hw_ctl, &cfg); SDE_DEBUG_ENC(sde_enc, "setup dsc_cfg hw_ctl[%d], count:%d,dsc[0]:%d, dsc[1]:%d\n", hw_ctl->idx, cfg.dsc_count, cfg.dsc[0], cfg.dsc[1]); } if (hw_ctl->ops.update_bitmask_dsc) hw_ctl->ops.update_bitmask_dsc(hw_ctl, hw_dsc->idx, 1); return 0; } Loading @@ -1174,8 +1257,12 @@ static int _sde_encoder_dsc_2_lm_2_enc_2_intf(struct sde_encoder_virt *sde_enc, struct msm_display_dsc_info dsc[MAX_CHANNELS_PER_ENC]; struct msm_mode_info mode_info; bool half_panel_partial_update; struct sde_hw_ctl *hw_ctl = enc_master->hw_ctl; struct sde_ctl_dsc_cfg cfg; int i, rc; memset(&cfg, 0, sizeof(cfg)); for (i = 0; i < MAX_CHANNELS_PER_ENC; i++) { hw_pp[i] = sde_enc->hw_pp[i]; hw_dsc[i] = sde_enc->hw_dsc[i]; Loading Loading @@ -1249,8 +1336,32 @@ static int _sde_encoder_dsc_2_lm_2_enc_2_intf(struct sde_encoder_virt *sde_enc, dsc_common_mode, i, active); _sde_encoder_dsc_pipe_cfg(hw_dsc[i], hw_pp[i], &dsc[i], dsc_common_mode, ich_res, active); if (active) { if (cfg.dsc_count >= MAX_DSC_PER_CTL_V1) { pr_err("Invalid dsc count:%d\n", cfg.dsc_count); return -EINVAL; } cfg.dsc[i] = hw_dsc[i]->idx; cfg.dsc_count++; if (hw_ctl->ops.update_bitmask_dsc) hw_ctl->ops.update_bitmask_dsc(hw_ctl, hw_dsc[i]->idx, 1); } } /* setup dsc active configuration in the control path */ if (hw_ctl->ops.setup_dsc_cfg) { hw_ctl->ops.setup_dsc_cfg(hw_ctl, &cfg); SDE_DEBUG_ENC(sde_enc, "setup dsc_cfg hw_ctl[%d], count:%d,dsc[0]:%d, dsc[1]:%d\n", hw_ctl->idx, cfg.dsc_count, cfg.dsc[0], cfg.dsc[1]); } return 0; } Loading @@ -1268,8 +1379,12 @@ static int _sde_encoder_dsc_2_lm_2_enc_1_intf(struct sde_encoder_virt *sde_enc, struct msm_display_dsc_info *dsc = NULL; struct msm_mode_info mode_info; bool half_panel_partial_update; struct sde_hw_ctl *hw_ctl = enc_master->hw_ctl; struct sde_ctl_dsc_cfg cfg; int i, rc; memset(&cfg, 0, sizeof(cfg)); for (i = 0; i < MAX_CHANNELS_PER_ENC; i++) { hw_pp[i] = sde_enc->hw_pp[i]; hw_dsc[i] = sde_enc->hw_dsc[i]; Loading Loading @@ -1320,9 +1435,31 @@ static int _sde_encoder_dsc_2_lm_2_enc_1_intf(struct sde_encoder_virt *sde_enc, _sde_encoder_dsc_pipe_cfg(hw_dsc[0], hw_pp[0], dsc, dsc_common_mode, ich_res, true); cfg.dsc[0] = hw_dsc[0]->idx; cfg.dsc_count++; if (hw_ctl->ops.update_bitmask_dsc) hw_ctl->ops.update_bitmask_dsc(hw_ctl, hw_dsc[0]->idx, 1); _sde_encoder_dsc_pipe_cfg(hw_dsc[1], hw_pp[1], dsc, dsc_common_mode, ich_res, !half_panel_partial_update); if (!half_panel_partial_update) { cfg.dsc[1] = hw_dsc[1]->idx; cfg.dsc_count++; if (hw_ctl->ops.update_bitmask_dsc) hw_ctl->ops.update_bitmask_dsc(hw_ctl, hw_dsc[1]->idx, 1); } /* setup dsc active configuration in the control path */ if (hw_ctl->ops.setup_dsc_cfg) { hw_ctl->ops.setup_dsc_cfg(hw_ctl, &cfg); SDE_DEBUG_ENC(sde_enc, "setup_dsc_cfg hw_ctl[%d], count:%d,dsc[0]:%d, dsc[1]:%d\n", hw_ctl->idx, cfg.dsc_count, cfg.dsc[0], cfg.dsc[1]); } return 0; } Loading Loading @@ -2480,6 +2617,12 @@ static void _sde_encoder_virt_enable_helper(struct drm_encoder *drm_enc) sde_enc->cur_master->hw_mdptop, sde_kms->catalog); if (sde_enc->cur_master->hw_ctl && sde_enc->cur_master->hw_ctl->ops.setup_intf_cfg_v1) sde_enc->cur_master->hw_ctl->ops.setup_intf_cfg_v1( sde_enc->cur_master->hw_ctl, &sde_enc->cur_master->intf_cfg_v1); _sde_encoder_update_vsync_source(sde_enc, &sde_enc->disp_info, false); sde_encoder_control_te(drm_enc, true); Loading @@ -2497,6 +2640,8 @@ void sde_encoder_virt_restore(struct drm_encoder *drm_enc) return; } sde_enc = to_sde_encoder_virt(drm_enc); memset(&sde_enc->cur_master->intf_cfg_v1, 0, sizeof(sde_enc->cur_master->intf_cfg_v1)); for (i = 0; i < sde_enc->num_phys_encs; i++) { struct sde_encoder_phys *phys = sde_enc->phys_encs[i]; Loading Loading @@ -2575,6 +2720,9 @@ static void sde_encoder_virt_enable(struct drm_encoder *drm_enc) return; } memset(&sde_enc->cur_master->intf_cfg_v1, 0, sizeof(sde_enc->cur_master->intf_cfg_v1)); for (i = 0; i < sde_enc->num_phys_encs; i++) { struct sde_encoder_phys *phys = sde_enc->phys_encs[i]; Loading Loading @@ -2904,10 +3052,11 @@ static void sde_encoder_off_work(struct kthread_work *work) * _sde_encoder_trigger_flush - trigger flush for a physical encoder * drm_enc: Pointer to drm encoder structure * phys: Pointer to physical encoder structure * extra_flush_bits: Additional bit mask to include in flush trigger * extra_flush: Additional bit mask to include in flush trigger */ static inline void _sde_encoder_trigger_flush(struct drm_encoder *drm_enc, struct sde_encoder_phys *phys, uint32_t extra_flush_bits) struct sde_encoder_phys *phys, struct sde_ctl_flush_cfg *extra_flush) { struct sde_hw_ctl *ctl; int pending_kickoff_cnt; Loading Loading @@ -2942,18 +3091,24 @@ static inline void _sde_encoder_trigger_flush(struct drm_encoder *drm_enc, if (phys->ops.is_master && phys->ops.is_master(phys)) atomic_inc(&phys->pending_retire_fence_cnt); if (extra_flush_bits && ctl->ops.update_pending_flush) ctl->ops.update_pending_flush(ctl, extra_flush_bits); if ((extra_flush && extra_flush->pending_flush_mask) && ctl->ops.update_pending_flush) ctl->ops.update_pending_flush(ctl, extra_flush); phys->ops.trigger_flush(phys); if (ctl->ops.get_pending_flush) if (ctl->ops.get_pending_flush) { struct sde_ctl_flush_cfg pending_flush = {0,}; ctl->ops.get_pending_flush(ctl, &pending_flush); SDE_EVT32(DRMID(drm_enc), phys->intf_idx - INTF_0, pending_kickoff_cnt, ctl->idx - CTL_0, ctl->ops.get_pending_flush(ctl)); else pending_flush.pending_flush_mask); } else { SDE_EVT32(DRMID(drm_enc), phys->intf_idx - INTF_0, ctl->idx - CTL_0, pending_kickoff_cnt); ctl->idx - CTL_0, pending_kickoff_cnt); } } /** Loading Loading @@ -3112,16 +3267,15 @@ void sde_encoder_helper_hw_reset(struct sde_encoder_phys *phys_enc) static void _sde_encoder_kickoff_phys(struct sde_encoder_virt *sde_enc) { struct sde_hw_ctl *ctl; uint32_t i, pending_flush; uint32_t i; unsigned long lock_flags; struct sde_ctl_flush_cfg pending_flush = {0,}; if (!sde_enc) { SDE_ERROR("invalid encoder\n"); return; } pending_flush = 0x0; /* * Trigger LUT DMA flush, this might need a wait, so we need * to do this outside of the atomic context Loading Loading @@ -3177,15 +3331,15 @@ static void _sde_encoder_kickoff_phys(struct sde_encoder_virt *sde_enc) !phys->ops.needs_single_flush(phys)) _sde_encoder_trigger_flush(&sde_enc->base, phys, 0x0); else if (ctl->ops.get_pending_flush) pending_flush |= ctl->ops.get_pending_flush(ctl); ctl->ops.get_pending_flush(ctl, &pending_flush); } /* for split flush, combine pending flush masks and send to master */ if (pending_flush && sde_enc->cur_master) { if (pending_flush.pending_flush_mask && sde_enc->cur_master) { _sde_encoder_trigger_flush( &sde_enc->base, sde_enc->cur_master, pending_flush); &pending_flush); } _sde_encoder_trigger_start(sde_enc->cur_master); Loading Loading @@ -3912,12 +4066,10 @@ int sde_encoder_helper_reset_mixers(struct sde_encoder_phys *phys_enc, continue; /* need to flush LM to remove it */ if (phys_enc->hw_ctl->ops.get_bitmask_mixer && phys_enc->hw_ctl->ops.update_pending_flush) phys_enc->hw_ctl->ops.update_pending_flush( if (phys_enc->hw_ctl->ops.update_bitmask_mixer) phys_enc->hw_ctl->ops.update_bitmask_mixer( phys_enc->hw_ctl, phys_enc->hw_ctl->ops.get_bitmask_mixer( phys_enc->hw_ctl, hw_lm->idx)); hw_lm->idx, 1); if (fb) { /* assume a single LM if targeting a frame buffer */ Loading drivers/gpu/drm/msm/sde/sde_encoder_phys.h +15 −4 Original line number Diff line number Diff line Loading @@ -236,6 +236,7 @@ struct sde_encoder_irq { * @parent_ops: Callbacks exposed by the parent to the phys_enc * @hw_mdptop: Hardware interface to the top registers * @hw_ctl: Hardware interface to the ctl registers * @hw_intf: Hardware interface to INTF registers * @hw_cdm: Hardware interface to the cdm registers * @cdm_cfg: Chroma-down hardware configuration * @hw_pp: Hardware interface to the ping pong registers Loading @@ -245,6 +246,9 @@ struct sde_encoder_irq { * @split_role: Role to play in a split-panel configuration * @intf_mode: Interface mode * @intf_idx: Interface index on sde hardware * @intf_cfg: Interface hardware configuration * @intf_cfg_v1: Interface hardware configuration to be used if control * path supports SDE_CTL_ACTIVE_CFG * @comp_type: Type of compression supported * @enc_spinlock: Virtual-Encoder-Wide Spin Lock for IRQ purposes * @enable_state: Enable state tracking Loading @@ -269,6 +273,7 @@ struct sde_encoder_phys { struct sde_encoder_virt_ops parent_ops; struct sde_hw_mdp *hw_mdptop; struct sde_hw_ctl *hw_ctl; struct sde_hw_intf *hw_intf; struct sde_hw_cdm *hw_cdm; struct sde_hw_cdm_cfg cdm_cfg; struct sde_hw_pingpong *hw_pp; Loading @@ -277,6 +282,8 @@ struct sde_encoder_phys { enum sde_enc_split_role split_role; enum sde_intf_mode intf_mode; enum sde_intf intf_idx; struct sde_hw_intf_cfg intf_cfg; struct sde_hw_intf_cfg_v1 intf_cfg_v1; enum msm_display_compression_type comp_type; spinlock_t *enc_spinlock; enum sde_enc_enable_state enable_state; Loading @@ -300,7 +307,6 @@ static inline int sde_encoder_phys_inc_pending(struct sde_encoder_phys *phys) * struct sde_encoder_phys_vid - sub-class of sde_encoder_phys to handle video * mode specific operations * @base: Baseclass physical encoder structure * @hw_intf: Hardware interface to the intf registers * @timing_params: Current timing parameter * @rot_fetch: Prefill for inline rotation * @error_count: Number of consecutive kickoffs that experienced an error Loading @@ -308,7 +314,6 @@ static inline int sde_encoder_phys_inc_pending(struct sde_encoder_phys *phys) */ struct sde_encoder_phys_vid { struct sde_encoder_phys base; struct sde_hw_intf *hw_intf; struct intf_timing_params timing_params; struct intf_prog_fetch rot_fetch; int error_count; Loading Loading @@ -366,7 +371,6 @@ struct sde_encoder_phys_cmd { * @wbdone_complete: for wbdone irq synchronization * @wb_cfg: Writeback hardware configuration * @cdp_cfg: Writeback CDP configuration * @intf_cfg: Interface hardware configuration * @wb_roi: Writeback region-of-interest * @wb_fmt: Writeback pixel format * @wb_fb: Pointer to current writeback framebuffer Loading @@ -391,7 +395,6 @@ struct sde_encoder_phys_wb { struct completion wbdone_complete; struct sde_hw_wb_cfg wb_cfg; struct sde_hw_wb_cdp_cfg cdp_cfg; struct sde_hw_intf_cfg intf_cfg; struct sde_rect wb_roi; const struct sde_format *wb_fmt; struct drm_framebuffer *wb_fb; Loading Loading @@ -593,4 +596,12 @@ int sde_encoder_helper_register_irq(struct sde_encoder_phys *phys_enc, int sde_encoder_helper_unregister_irq(struct sde_encoder_phys *phys_enc, enum sde_intr_idx intr_idx); /** * sde_encoder_helper_update_intf_cfg - update interface configuration for * single control path. * @phys_enc: Pointer to physical encoder structure */ void sde_encoder_helper_update_intf_cfg( struct sde_encoder_phys *phys_enc); #endif /* __sde_encoder_phys_H__ */ Loading
drivers/gpu/drm/msm/sde/sde_color_processing.c +7 −11 Original line number Diff line number Diff line /* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved. /* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published by Loading Loading @@ -835,7 +835,6 @@ void sde_cp_crtc_apply_properties(struct drm_crtc *crtc) bool set_dspp_flush = false, set_lm_flush = false; struct sde_cp_node *prop_node = NULL, *n = NULL; struct sde_hw_ctl *ctl; uint32_t flush_mask = 0; u32 num_mixers = 0, i = 0; if (!crtc || !crtc->dev) { Loading Loading @@ -892,18 +891,15 @@ void sde_cp_crtc_apply_properties(struct drm_crtc *crtc) ctl = sde_crtc->mixers[i].hw_ctl; if (!ctl) continue; if (set_dspp_flush && ctl->ops.get_bitmask_dspp if (set_dspp_flush && ctl->ops.update_bitmask_dspp && sde_crtc->mixers[i].hw_dspp) { ctl->ops.get_bitmask_dspp(ctl, &flush_mask, sde_crtc->mixers[i].hw_dspp->idx); ctl->ops.update_pending_flush(ctl, flush_mask); ctl->ops.update_bitmask_dspp(ctl, sde_crtc->mixers[i].hw_dspp->idx, 1); } if (set_lm_flush && ctl->ops.get_bitmask_mixer if (set_lm_flush && ctl->ops.update_bitmask_mixer && sde_crtc->mixers[i].hw_lm) { flush_mask = ctl->ops.get_bitmask_mixer(ctl, sde_crtc->mixers[i].hw_lm->idx); ctl->ops.update_pending_flush(ctl, flush_mask); ctl->ops.update_bitmask_mixer(ctl, sde_crtc->mixers[i].hw_lm->idx, 1); } } exit: Loading
drivers/gpu/drm/msm/sde/sde_crtc.c +56 −62 Original line number Diff line number Diff line Loading @@ -1469,11 +1469,10 @@ static void _sde_crtc_blend_setup_mixer(struct drm_crtc *crtc, struct sde_hw_mixer *lm; struct sde_hw_stage_cfg *stage_cfg; struct sde_rect plane_crtc_roi; u32 flush_mask, flush_sbuf, prefill; uint32_t prefill; uint32_t stage_idx, lm_idx; int zpos_cnt[SDE_STAGE_MAX + 1] = { 0 }; int i; int i, rot_id = 0; bool bg_alpha_enable = false; if (!sde_crtc || !crtc->state || !mixer) { Loading @@ -1487,9 +1486,9 @@ static void _sde_crtc_blend_setup_mixer(struct drm_crtc *crtc, cstate = to_sde_crtc_state(crtc->state); cstate->sbuf_prefill_line = _sde_crtc_calc_inline_prefill(crtc); sde_crtc->sbuf_flush_mask_old = sde_crtc->sbuf_flush_mask_all; sde_crtc->sbuf_flush_mask_all = 0x0; sde_crtc->sbuf_flush_mask_delta = 0x0; sde_crtc->sbuf_rot_id_old = sde_crtc->sbuf_rot_id; sde_crtc->sbuf_rot_id = 0x0; sde_crtc->sbuf_rot_id_delta = 0x0; drm_atomic_crtc_for_each_plane(plane, crtc) { state = plane->state; Loading @@ -1509,13 +1508,16 @@ static void _sde_crtc_blend_setup_mixer(struct drm_crtc *crtc, if (prefill) cstate->sbuf_prefill_line = prefill; sde_plane_get_ctl_flush(plane, ctl, &flush_mask, &flush_sbuf); sde_plane_ctl_flush(plane, ctl, true); rot_id = sde_plane_get_sbuf_id(plane); /* save sbuf flush value for later */ /* save sbuf id for later */ if (old_state && drm_atomic_get_existing_plane_state( old_state->state, plane)) sde_crtc->sbuf_flush_mask_delta |= flush_sbuf; sde_crtc->sbuf_flush_mask_all |= flush_sbuf; old_state->state, plane) && !sde_crtc->sbuf_rot_id_old) sde_crtc->sbuf_rot_id_delta = rot_id; if (!sde_crtc->sbuf_rot_id) sde_crtc->sbuf_rot_id = rot_id; SDE_DEBUG("crtc %d stage:%d - plane %d sspp %d fb %d\n", crtc->base.id, Loading @@ -1539,7 +1541,7 @@ static void _sde_crtc_blend_setup_mixer(struct drm_crtc *crtc, state->src_w >> 16, state->src_h >> 16, state->crtc_x, state->crtc_y, state->crtc_w, state->crtc_h, flush_sbuf != 0); rot_id != 0); stage_idx = zpos_cnt[pstate->stage]++; stage_cfg->stage[pstate->stage][stage_idx] = Loading @@ -1556,7 +1558,6 @@ static void _sde_crtc_blend_setup_mixer(struct drm_crtc *crtc, for (lm_idx = 0; lm_idx < sde_crtc->num_mixers; lm_idx++) { _sde_crtc_setup_blend_cfg(mixer + lm_idx, pstate, format); mixer[lm_idx].flush_mask |= flush_mask; if (bg_alpha_enable && !format->alpha_enable) mixer[lm_idx].mixer_op_mode = 0; Loading Loading @@ -1653,6 +1654,7 @@ static void _sde_crtc_blend_setup(struct drm_crtc *crtc, struct sde_crtc_mixer *mixer; struct sde_hw_ctl *ctl; struct sde_hw_mixer *lm; struct sde_ctl_flush_cfg cfg = {0,}; int i; Loading @@ -1676,7 +1678,6 @@ static void _sde_crtc_blend_setup(struct drm_crtc *crtc, return; } mixer[i].mixer_op_mode = 0; mixer[i].flush_mask = 0; if (mixer[i].hw_ctl->ops.clear_all_blendstages) mixer[i].hw_ctl->ops.clear_all_blendstages( mixer[i].hw_ctl); Loading Loading @@ -1711,18 +1712,15 @@ static void _sde_crtc_blend_setup(struct drm_crtc *crtc, lm->ops.setup_alpha_out(lm, mixer[i].mixer_op_mode); mixer[i].pipe_mask = mixer[i].flush_mask; mixer[i].flush_mask |= ctl->ops.get_bitmask_mixer(ctl, mixer[i].hw_lm->idx); /* stage config flush mask */ ctl->ops.update_pending_flush(ctl, mixer[i].flush_mask); ctl->ops.update_bitmask_mixer(ctl, mixer[i].hw_lm->idx, 1); ctl->ops.get_pending_flush(ctl, &cfg); SDE_DEBUG("lm %d, op_mode 0x%X, ctl %d, flush mask 0x%x\n", mixer[i].hw_lm->idx - LM_0, mixer[i].mixer_op_mode, ctl->idx - CTL_0, mixer[i].flush_mask); cfg.pending_flush_mask); ctl->ops.setup_blendstage(ctl, mixer[i].hw_lm->idx, &sde_crtc->stage_cfg); Loading Loading @@ -2199,7 +2197,7 @@ static void _sde_crtc_dest_scaler_setup(struct drm_crtc *crtc) struct sde_hw_ds *hw_ds; struct sde_hw_ds_cfg *cfg; struct sde_kms *kms; u32 flush_mask = 0, op_mode = 0; u32 op_mode = 0; u32 lm_idx = 0, num_mixers = 0; int i, count = 0; bool ds_dirty = false; Loading Loading @@ -2275,14 +2273,9 @@ static void _sde_crtc_dest_scaler_setup(struct drm_crtc *crtc) /* * Dest scaler shares the flush bit of the LM in control */ if (hw_ctl->ops.get_bitmask_mixer) { flush_mask = hw_ctl->ops.get_bitmask_mixer( hw_ctl, hw_lm->idx); SDE_DEBUG("Set lm[%d] flush = %d", hw_lm->idx, flush_mask); hw_ctl->ops.update_pending_flush(hw_ctl, flush_mask); } if (hw_ctl && hw_ctl->ops.update_bitmask_mixer) hw_ctl->ops.update_bitmask_mixer( hw_ctl, hw_lm->idx, 1); } } } Loading Loading @@ -3479,7 +3472,7 @@ static int _sde_crtc_commit_kickoff_rot(struct drm_crtc *crtc, struct drm_plane *plane; struct sde_crtc *sde_crtc; struct sde_hw_ctl *ctl, *master_ctl; u32 flush_mask; enum sde_rot rot_id = SDE_NONE; int i, rc = 0; if (!crtc || !cstate) Loading @@ -3488,29 +3481,28 @@ static int _sde_crtc_commit_kickoff_rot(struct drm_crtc *crtc, sde_crtc = to_sde_crtc(crtc); /* * Update sbuf configuration and flush bits if either the rot_op_mode * Update sbuf configuration and flush rotator if the rot_op_mode * is different or a rotator commit was performed. * * In the case where the rot_op_mode has changed, further require that * the transition is either to or from offline mode unless * sbuf_flush_mask_delta is also non-zero (i.e., a corresponding plane * update was provided to the current commit). * In case where the rot_op_mode has changed, further require that * the transition is either to or from offline mode unless corresponding * plane update was provided to current commit. */ flush_mask = sde_crtc->sbuf_flush_mask_delta; rot_id = sde_crtc->sbuf_rot_id_delta; if ((sde_crtc->sbuf_op_mode_old != cstate->sbuf_cfg.rot_op_mode) && (sde_crtc->sbuf_op_mode_old == SDE_CTL_ROT_OP_MODE_OFFLINE || cstate->sbuf_cfg.rot_op_mode == SDE_CTL_ROT_OP_MODE_OFFLINE)) flush_mask |= sde_crtc->sbuf_flush_mask_all | sde_crtc->sbuf_flush_mask_old; rot_id |= sde_crtc->sbuf_rot_id | sde_crtc->sbuf_rot_id_old; if (!flush_mask && if (!rot_id && cstate->sbuf_cfg.rot_op_mode == SDE_CTL_ROT_OP_MODE_OFFLINE) return 0; SDE_ATRACE_BEGIN("crtc_kickoff_rot"); if (cstate->sbuf_cfg.rot_op_mode != SDE_CTL_ROT_OP_MODE_OFFLINE && sde_crtc->sbuf_flush_mask_delta) { sde_crtc->sbuf_rot_id_delta) { drm_atomic_crtc_for_each_plane(plane, crtc) { rc = sde_plane_kickoff_rot(plane); if (rc) { Loading Loading @@ -3540,8 +3532,8 @@ static int _sde_crtc_commit_kickoff_rot(struct drm_crtc *crtc, } /* only update sbuf_cfg and flush for master ctl */ if (master_ctl && master_ctl->ops.update_pending_flush) { master_ctl->ops.update_pending_flush(master_ctl, flush_mask); if (master_ctl && master_ctl->ops.update_bitmask_rot) { master_ctl->ops.update_bitmask_rot(master_ctl, rot_id, 1); /* explicitly trigger rotator for async modes */ if (cstate->sbuf_cfg.rot_op_mode == Loading @@ -3549,8 +3541,8 @@ static int _sde_crtc_commit_kickoff_rot(struct drm_crtc *crtc, master_ctl->ops.trigger_rot_start) master_ctl->ops.trigger_rot_start(master_ctl); SDE_EVT32(DRMID(crtc), master_ctl->idx - CTL_0, sde_crtc->sbuf_flush_mask_all, sde_crtc->sbuf_flush_mask_delta); sde_crtc->sbuf_rot_id, sde_crtc->sbuf_rot_id_delta); } /* save this in sde_crtc for next commit cycle */ Loading @@ -3562,30 +3554,32 @@ static int _sde_crtc_commit_kickoff_rot(struct drm_crtc *crtc, /** * _sde_crtc_remove_pipe_flush - remove staged pipes from flush mask * @sde_crtc: Pointer to sde crtc structure * @crtc: Pointer to crtc structure */ static void _sde_crtc_remove_pipe_flush(struct sde_crtc *sde_crtc) static void _sde_crtc_remove_pipe_flush(struct drm_crtc *crtc) { struct drm_plane *plane; struct drm_plane_state *state; struct sde_crtc *sde_crtc; struct sde_crtc_mixer *mixer; struct sde_hw_ctl *ctl; u32 i, n, flush_mask; if (!sde_crtc) if (!crtc) return; sde_crtc = to_sde_crtc(crtc); mixer = sde_crtc->mixers; n = min_t(size_t, sde_crtc->num_mixers, ARRAY_SIZE(sde_crtc->mixers)); for (i = 0; i < n; i++) { ctl = mixer[i].hw_ctl; if (!ctl || !ctl->ops.get_pending_flush || !ctl->ops.clear_pending_flush || !ctl->ops.update_pending_flush) if (!mixer) return; ctl = mixer->hw_ctl; drm_atomic_crtc_for_each_plane(plane, crtc) { state = plane->state; if (!state) continue; flush_mask = ctl->ops.get_pending_flush(ctl); flush_mask &= ~mixer[i].pipe_mask; ctl->ops.clear_pending_flush(ctl); ctl->ops.update_pending_flush(ctl, flush_mask); /* clear plane flush bitmask */ sde_plane_ctl_flush(plane, ctl, false); } } Loading Loading @@ -3696,7 +3690,7 @@ static int _sde_crtc_reset_hw(struct drm_crtc *crtc, /* provide safe "border color only" commit configuration for later */ cstate->sbuf_cfg.rot_op_mode = SDE_CTL_ROT_OP_MODE_OFFLINE; _sde_crtc_commit_kickoff_rot(crtc, cstate); _sde_crtc_remove_pipe_flush(sde_crtc); _sde_crtc_remove_pipe_flush(crtc); _sde_crtc_blend_setup(crtc, old_state, false); /* take h/w components out of reset */ Loading Loading @@ -3753,7 +3747,7 @@ static bool _sde_crtc_prepare_for_kickoff_rot(struct drm_device *dev, cstate = to_sde_crtc_state(crtc->state); /* default to ASYNC mode for inline rotation */ cstate->sbuf_cfg.rot_op_mode = sde_crtc->sbuf_flush_mask_all ? cstate->sbuf_cfg.rot_op_mode = sde_crtc->sbuf_rot_id ? SDE_CTL_ROT_OP_MODE_INLINE_ASYNC : SDE_CTL_ROT_OP_MODE_OFFLINE; if (cstate->sbuf_cfg.rot_op_mode == SDE_CTL_ROT_OP_MODE_OFFLINE) Loading Loading @@ -3897,7 +3891,7 @@ void sde_crtc_commit_kickoff(struct drm_crtc *crtc, sde_vbif_clear_errors(sde_kms); if (is_error) { _sde_crtc_remove_pipe_flush(sde_crtc); _sde_crtc_remove_pipe_flush(crtc); _sde_crtc_blend_setup(crtc, old_state, false); } Loading
drivers/gpu/drm/msm/sde/sde_crtc.h +7 −11 Original line number Diff line number Diff line Loading @@ -110,8 +110,6 @@ struct sde_crtc_retire_event { * @hw_ds: DS HW driver context * @encoder: Encoder attached to this lm & ctl * @mixer_op_mode: mixer blending operation mode * @flush_mask: mixer flush mask for ctl, mixer and pipe * @pipe_mask: mixer flush mask for pipe */ struct sde_crtc_mixer { struct sde_hw_mixer *hw_lm; Loading @@ -120,8 +118,6 @@ struct sde_crtc_mixer { struct sde_hw_ds *hw_ds; struct drm_encoder *encoder; u32 mixer_op_mode; u32 flush_mask; u32 pipe_mask; }; /** Loading Loading @@ -216,9 +212,9 @@ struct sde_crtc_event { * @misr_frame_count : misr frame count provided by client * @misr_data : store misr data before turning off the clocks. * @sbuf_op_mode_old : inline rotator op mode for previous commit cycle * @sbuf_flush_mask_old: inline rotator flush mask for previous commit * @sbuf_flush_mask_all: inline rotator flush mask for all attached planes * @sbuf_flush_mask_delta: inline rotator flush mask for current delta state * @sbuf_rot_id : inline rotator block id for attached planes * @sbuf_rot_id_old: inline rotator id for previous commit * @sbuf_rot_id_delta: inline rotator id for current delta state * @idle_notify_work: delayed worker to notify idle timeout to user space * @power_event : registered power event handle * @cur_perf : current performance committed to clock/bandwidth driver Loading Loading @@ -287,9 +283,9 @@ struct sde_crtc { u32 misr_data[CRTC_DUAL_MIXERS]; u32 sbuf_op_mode_old; u32 sbuf_flush_mask_old; u32 sbuf_flush_mask_all; u32 sbuf_flush_mask_delta; u32 sbuf_rot_id; u32 sbuf_rot_id_old; u32 sbuf_rot_id_delta; struct kthread_delayed_work idle_notify_work; struct sde_power_event *power_event; Loading
drivers/gpu/drm/msm/sde/sde_encoder.c +174 −22 Original line number Diff line number Diff line Loading @@ -728,6 +728,65 @@ void sde_encoder_destroy(struct drm_encoder *drm_enc) kfree(sde_enc); } void sde_encoder_helper_update_intf_cfg( struct sde_encoder_phys *phys_enc) { struct sde_encoder_virt *sde_enc; struct sde_hw_intf_cfg_v1 *intf_cfg; enum sde_3d_blend_mode mode_3d; if (!phys_enc) { SDE_ERROR("invalid arg, encoder %d\n", phys_enc != 0); return; } sde_enc = to_sde_encoder_virt(phys_enc->parent); intf_cfg = &sde_enc->cur_master->intf_cfg_v1; SDE_DEBUG_ENC(sde_enc, "intf_cfg updated for %d at idx %d\n", phys_enc->intf_idx, intf_cfg->intf_count); /* setup interface configuration */ if (intf_cfg->intf_count >= MAX_INTF_PER_CTL_V1) { pr_err("invalid inf_count %d\n", intf_cfg->intf_count); return; } intf_cfg->intf[intf_cfg->intf_count++] = phys_enc->intf_idx; if (phys_enc == sde_enc->cur_master) { if (sde_enc->cur_master->intf_mode == INTF_MODE_CMD) intf_cfg->intf_mode_sel = SDE_CTL_MODE_SEL_CMD; else intf_cfg->intf_mode_sel = SDE_CTL_MODE_SEL_VID; } /* configure this interface as master for split display */ if (phys_enc->split_role == ENC_ROLE_MASTER) intf_cfg->intf_master = phys_enc->hw_intf->idx; /* setup which pp blk will connect to this intf */ if (phys_enc->hw_intf->ops.bind_pingpong_blk) phys_enc->hw_intf->ops.bind_pingpong_blk( phys_enc->hw_intf, true, phys_enc->hw_pp->idx); /*setup merge_3d configuration */ mode_3d = sde_encoder_helper_get_3d_blend_mode(phys_enc); if (mode_3d && phys_enc->hw_pp->merge_3d && intf_cfg->merge_3d_count < MAX_MERGE_3D_PER_CTL_V1) intf_cfg->merge_3d[intf_cfg->merge_3d_count++] = phys_enc->hw_pp->merge_3d->idx; if (phys_enc->hw_pp->ops.setup_3d_mode) phys_enc->hw_pp->ops.setup_3d_mode(phys_enc->hw_pp, mode_3d); } void sde_encoder_helper_split_config( struct sde_encoder_phys *phys_enc, enum sde_intf interface) Loading Loading @@ -1086,6 +1145,9 @@ static void _sde_encoder_dsc_pipe_cfg(struct sde_hw_dsc *hw_dsc, if (hw_pp->ops.setup_dsc) hw_pp->ops.setup_dsc(hw_pp); if (hw_dsc->ops.bind_pingpong_blk) hw_dsc->ops.bind_pingpong_blk(hw_dsc, true, hw_pp->idx); if (hw_pp->ops.enable_dsc) hw_pp->ops.enable_dsc(hw_pp); } Loading Loading @@ -1121,6 +1183,8 @@ static int _sde_encoder_dsc_n_lm_1_enc_1_intf(struct sde_encoder_virt *sde_enc) const struct sde_rect *roi = &sde_enc->cur_conn_roi; struct msm_mode_info mode_info; struct msm_display_dsc_info *dsc = NULL; struct sde_hw_ctl *hw_ctl = enc_master->hw_ctl; struct sde_ctl_dsc_cfg cfg; int rc; if (hw_dsc == NULL || hw_pp == NULL || !enc_master) { Loading @@ -1134,8 +1198,8 @@ static int _sde_encoder_dsc_n_lm_1_enc_1_intf(struct sde_encoder_virt *sde_enc) return -EINVAL; } memset(&cfg, 0, sizeof(cfg)); dsc = &mode_info.comp_info.dsc_info; _sde_encoder_dsc_update_pic_dim(dsc, roi->w, roi->h); this_frame_slices = roi->w / dsc->slice_width; Loading @@ -1156,6 +1220,25 @@ static int _sde_encoder_dsc_n_lm_1_enc_1_intf(struct sde_encoder_virt *sde_enc) _sde_encoder_dsc_pipe_cfg(hw_dsc, hw_pp, dsc, dsc_common_mode, ich_res, true); if (cfg.dsc_count >= MAX_DSC_PER_CTL_V1) { pr_err("Invalid dsc count:%d\n", cfg.dsc_count); return -EINVAL; } cfg.dsc[cfg.dsc_count++] = hw_dsc->idx; /* setup dsc active configuration in the control path */ if (hw_ctl->ops.setup_dsc_cfg) { hw_ctl->ops.setup_dsc_cfg(hw_ctl, &cfg); SDE_DEBUG_ENC(sde_enc, "setup dsc_cfg hw_ctl[%d], count:%d,dsc[0]:%d, dsc[1]:%d\n", hw_ctl->idx, cfg.dsc_count, cfg.dsc[0], cfg.dsc[1]); } if (hw_ctl->ops.update_bitmask_dsc) hw_ctl->ops.update_bitmask_dsc(hw_ctl, hw_dsc->idx, 1); return 0; } Loading @@ -1174,8 +1257,12 @@ static int _sde_encoder_dsc_2_lm_2_enc_2_intf(struct sde_encoder_virt *sde_enc, struct msm_display_dsc_info dsc[MAX_CHANNELS_PER_ENC]; struct msm_mode_info mode_info; bool half_panel_partial_update; struct sde_hw_ctl *hw_ctl = enc_master->hw_ctl; struct sde_ctl_dsc_cfg cfg; int i, rc; memset(&cfg, 0, sizeof(cfg)); for (i = 0; i < MAX_CHANNELS_PER_ENC; i++) { hw_pp[i] = sde_enc->hw_pp[i]; hw_dsc[i] = sde_enc->hw_dsc[i]; Loading Loading @@ -1249,8 +1336,32 @@ static int _sde_encoder_dsc_2_lm_2_enc_2_intf(struct sde_encoder_virt *sde_enc, dsc_common_mode, i, active); _sde_encoder_dsc_pipe_cfg(hw_dsc[i], hw_pp[i], &dsc[i], dsc_common_mode, ich_res, active); if (active) { if (cfg.dsc_count >= MAX_DSC_PER_CTL_V1) { pr_err("Invalid dsc count:%d\n", cfg.dsc_count); return -EINVAL; } cfg.dsc[i] = hw_dsc[i]->idx; cfg.dsc_count++; if (hw_ctl->ops.update_bitmask_dsc) hw_ctl->ops.update_bitmask_dsc(hw_ctl, hw_dsc[i]->idx, 1); } } /* setup dsc active configuration in the control path */ if (hw_ctl->ops.setup_dsc_cfg) { hw_ctl->ops.setup_dsc_cfg(hw_ctl, &cfg); SDE_DEBUG_ENC(sde_enc, "setup dsc_cfg hw_ctl[%d], count:%d,dsc[0]:%d, dsc[1]:%d\n", hw_ctl->idx, cfg.dsc_count, cfg.dsc[0], cfg.dsc[1]); } return 0; } Loading @@ -1268,8 +1379,12 @@ static int _sde_encoder_dsc_2_lm_2_enc_1_intf(struct sde_encoder_virt *sde_enc, struct msm_display_dsc_info *dsc = NULL; struct msm_mode_info mode_info; bool half_panel_partial_update; struct sde_hw_ctl *hw_ctl = enc_master->hw_ctl; struct sde_ctl_dsc_cfg cfg; int i, rc; memset(&cfg, 0, sizeof(cfg)); for (i = 0; i < MAX_CHANNELS_PER_ENC; i++) { hw_pp[i] = sde_enc->hw_pp[i]; hw_dsc[i] = sde_enc->hw_dsc[i]; Loading Loading @@ -1320,9 +1435,31 @@ static int _sde_encoder_dsc_2_lm_2_enc_1_intf(struct sde_encoder_virt *sde_enc, _sde_encoder_dsc_pipe_cfg(hw_dsc[0], hw_pp[0], dsc, dsc_common_mode, ich_res, true); cfg.dsc[0] = hw_dsc[0]->idx; cfg.dsc_count++; if (hw_ctl->ops.update_bitmask_dsc) hw_ctl->ops.update_bitmask_dsc(hw_ctl, hw_dsc[0]->idx, 1); _sde_encoder_dsc_pipe_cfg(hw_dsc[1], hw_pp[1], dsc, dsc_common_mode, ich_res, !half_panel_partial_update); if (!half_panel_partial_update) { cfg.dsc[1] = hw_dsc[1]->idx; cfg.dsc_count++; if (hw_ctl->ops.update_bitmask_dsc) hw_ctl->ops.update_bitmask_dsc(hw_ctl, hw_dsc[1]->idx, 1); } /* setup dsc active configuration in the control path */ if (hw_ctl->ops.setup_dsc_cfg) { hw_ctl->ops.setup_dsc_cfg(hw_ctl, &cfg); SDE_DEBUG_ENC(sde_enc, "setup_dsc_cfg hw_ctl[%d], count:%d,dsc[0]:%d, dsc[1]:%d\n", hw_ctl->idx, cfg.dsc_count, cfg.dsc[0], cfg.dsc[1]); } return 0; } Loading Loading @@ -2480,6 +2617,12 @@ static void _sde_encoder_virt_enable_helper(struct drm_encoder *drm_enc) sde_enc->cur_master->hw_mdptop, sde_kms->catalog); if (sde_enc->cur_master->hw_ctl && sde_enc->cur_master->hw_ctl->ops.setup_intf_cfg_v1) sde_enc->cur_master->hw_ctl->ops.setup_intf_cfg_v1( sde_enc->cur_master->hw_ctl, &sde_enc->cur_master->intf_cfg_v1); _sde_encoder_update_vsync_source(sde_enc, &sde_enc->disp_info, false); sde_encoder_control_te(drm_enc, true); Loading @@ -2497,6 +2640,8 @@ void sde_encoder_virt_restore(struct drm_encoder *drm_enc) return; } sde_enc = to_sde_encoder_virt(drm_enc); memset(&sde_enc->cur_master->intf_cfg_v1, 0, sizeof(sde_enc->cur_master->intf_cfg_v1)); for (i = 0; i < sde_enc->num_phys_encs; i++) { struct sde_encoder_phys *phys = sde_enc->phys_encs[i]; Loading Loading @@ -2575,6 +2720,9 @@ static void sde_encoder_virt_enable(struct drm_encoder *drm_enc) return; } memset(&sde_enc->cur_master->intf_cfg_v1, 0, sizeof(sde_enc->cur_master->intf_cfg_v1)); for (i = 0; i < sde_enc->num_phys_encs; i++) { struct sde_encoder_phys *phys = sde_enc->phys_encs[i]; Loading Loading @@ -2904,10 +3052,11 @@ static void sde_encoder_off_work(struct kthread_work *work) * _sde_encoder_trigger_flush - trigger flush for a physical encoder * drm_enc: Pointer to drm encoder structure * phys: Pointer to physical encoder structure * extra_flush_bits: Additional bit mask to include in flush trigger * extra_flush: Additional bit mask to include in flush trigger */ static inline void _sde_encoder_trigger_flush(struct drm_encoder *drm_enc, struct sde_encoder_phys *phys, uint32_t extra_flush_bits) struct sde_encoder_phys *phys, struct sde_ctl_flush_cfg *extra_flush) { struct sde_hw_ctl *ctl; int pending_kickoff_cnt; Loading Loading @@ -2942,18 +3091,24 @@ static inline void _sde_encoder_trigger_flush(struct drm_encoder *drm_enc, if (phys->ops.is_master && phys->ops.is_master(phys)) atomic_inc(&phys->pending_retire_fence_cnt); if (extra_flush_bits && ctl->ops.update_pending_flush) ctl->ops.update_pending_flush(ctl, extra_flush_bits); if ((extra_flush && extra_flush->pending_flush_mask) && ctl->ops.update_pending_flush) ctl->ops.update_pending_flush(ctl, extra_flush); phys->ops.trigger_flush(phys); if (ctl->ops.get_pending_flush) if (ctl->ops.get_pending_flush) { struct sde_ctl_flush_cfg pending_flush = {0,}; ctl->ops.get_pending_flush(ctl, &pending_flush); SDE_EVT32(DRMID(drm_enc), phys->intf_idx - INTF_0, pending_kickoff_cnt, ctl->idx - CTL_0, ctl->ops.get_pending_flush(ctl)); else pending_flush.pending_flush_mask); } else { SDE_EVT32(DRMID(drm_enc), phys->intf_idx - INTF_0, ctl->idx - CTL_0, pending_kickoff_cnt); ctl->idx - CTL_0, pending_kickoff_cnt); } } /** Loading Loading @@ -3112,16 +3267,15 @@ void sde_encoder_helper_hw_reset(struct sde_encoder_phys *phys_enc) static void _sde_encoder_kickoff_phys(struct sde_encoder_virt *sde_enc) { struct sde_hw_ctl *ctl; uint32_t i, pending_flush; uint32_t i; unsigned long lock_flags; struct sde_ctl_flush_cfg pending_flush = {0,}; if (!sde_enc) { SDE_ERROR("invalid encoder\n"); return; } pending_flush = 0x0; /* * Trigger LUT DMA flush, this might need a wait, so we need * to do this outside of the atomic context Loading Loading @@ -3177,15 +3331,15 @@ static void _sde_encoder_kickoff_phys(struct sde_encoder_virt *sde_enc) !phys->ops.needs_single_flush(phys)) _sde_encoder_trigger_flush(&sde_enc->base, phys, 0x0); else if (ctl->ops.get_pending_flush) pending_flush |= ctl->ops.get_pending_flush(ctl); ctl->ops.get_pending_flush(ctl, &pending_flush); } /* for split flush, combine pending flush masks and send to master */ if (pending_flush && sde_enc->cur_master) { if (pending_flush.pending_flush_mask && sde_enc->cur_master) { _sde_encoder_trigger_flush( &sde_enc->base, sde_enc->cur_master, pending_flush); &pending_flush); } _sde_encoder_trigger_start(sde_enc->cur_master); Loading Loading @@ -3912,12 +4066,10 @@ int sde_encoder_helper_reset_mixers(struct sde_encoder_phys *phys_enc, continue; /* need to flush LM to remove it */ if (phys_enc->hw_ctl->ops.get_bitmask_mixer && phys_enc->hw_ctl->ops.update_pending_flush) phys_enc->hw_ctl->ops.update_pending_flush( if (phys_enc->hw_ctl->ops.update_bitmask_mixer) phys_enc->hw_ctl->ops.update_bitmask_mixer( phys_enc->hw_ctl, phys_enc->hw_ctl->ops.get_bitmask_mixer( phys_enc->hw_ctl, hw_lm->idx)); hw_lm->idx, 1); if (fb) { /* assume a single LM if targeting a frame buffer */ Loading
drivers/gpu/drm/msm/sde/sde_encoder_phys.h +15 −4 Original line number Diff line number Diff line Loading @@ -236,6 +236,7 @@ struct sde_encoder_irq { * @parent_ops: Callbacks exposed by the parent to the phys_enc * @hw_mdptop: Hardware interface to the top registers * @hw_ctl: Hardware interface to the ctl registers * @hw_intf: Hardware interface to INTF registers * @hw_cdm: Hardware interface to the cdm registers * @cdm_cfg: Chroma-down hardware configuration * @hw_pp: Hardware interface to the ping pong registers Loading @@ -245,6 +246,9 @@ struct sde_encoder_irq { * @split_role: Role to play in a split-panel configuration * @intf_mode: Interface mode * @intf_idx: Interface index on sde hardware * @intf_cfg: Interface hardware configuration * @intf_cfg_v1: Interface hardware configuration to be used if control * path supports SDE_CTL_ACTIVE_CFG * @comp_type: Type of compression supported * @enc_spinlock: Virtual-Encoder-Wide Spin Lock for IRQ purposes * @enable_state: Enable state tracking Loading @@ -269,6 +273,7 @@ struct sde_encoder_phys { struct sde_encoder_virt_ops parent_ops; struct sde_hw_mdp *hw_mdptop; struct sde_hw_ctl *hw_ctl; struct sde_hw_intf *hw_intf; struct sde_hw_cdm *hw_cdm; struct sde_hw_cdm_cfg cdm_cfg; struct sde_hw_pingpong *hw_pp; Loading @@ -277,6 +282,8 @@ struct sde_encoder_phys { enum sde_enc_split_role split_role; enum sde_intf_mode intf_mode; enum sde_intf intf_idx; struct sde_hw_intf_cfg intf_cfg; struct sde_hw_intf_cfg_v1 intf_cfg_v1; enum msm_display_compression_type comp_type; spinlock_t *enc_spinlock; enum sde_enc_enable_state enable_state; Loading @@ -300,7 +307,6 @@ static inline int sde_encoder_phys_inc_pending(struct sde_encoder_phys *phys) * struct sde_encoder_phys_vid - sub-class of sde_encoder_phys to handle video * mode specific operations * @base: Baseclass physical encoder structure * @hw_intf: Hardware interface to the intf registers * @timing_params: Current timing parameter * @rot_fetch: Prefill for inline rotation * @error_count: Number of consecutive kickoffs that experienced an error Loading @@ -308,7 +314,6 @@ static inline int sde_encoder_phys_inc_pending(struct sde_encoder_phys *phys) */ struct sde_encoder_phys_vid { struct sde_encoder_phys base; struct sde_hw_intf *hw_intf; struct intf_timing_params timing_params; struct intf_prog_fetch rot_fetch; int error_count; Loading Loading @@ -366,7 +371,6 @@ struct sde_encoder_phys_cmd { * @wbdone_complete: for wbdone irq synchronization * @wb_cfg: Writeback hardware configuration * @cdp_cfg: Writeback CDP configuration * @intf_cfg: Interface hardware configuration * @wb_roi: Writeback region-of-interest * @wb_fmt: Writeback pixel format * @wb_fb: Pointer to current writeback framebuffer Loading @@ -391,7 +395,6 @@ struct sde_encoder_phys_wb { struct completion wbdone_complete; struct sde_hw_wb_cfg wb_cfg; struct sde_hw_wb_cdp_cfg cdp_cfg; struct sde_hw_intf_cfg intf_cfg; struct sde_rect wb_roi; const struct sde_format *wb_fmt; struct drm_framebuffer *wb_fb; Loading Loading @@ -593,4 +596,12 @@ int sde_encoder_helper_register_irq(struct sde_encoder_phys *phys_enc, int sde_encoder_helper_unregister_irq(struct sde_encoder_phys *phys_enc, enum sde_intr_idx intr_idx); /** * sde_encoder_helper_update_intf_cfg - update interface configuration for * single control path. * @phys_enc: Pointer to physical encoder structure */ void sde_encoder_helper_update_intf_cfg( struct sde_encoder_phys *phys_enc); #endif /* __sde_encoder_phys_H__ */