Loading drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw_cmn.c +3 −2 Original line number Original line Diff line number Diff line Loading @@ -354,8 +354,6 @@ void dsi_ctrl_hw_cmn_setup_cmd_stream(struct dsi_ctrl_hw *ctrl, reg_ctrl |= (reg << offset); reg_ctrl |= (reg << offset); reg_ctrl2 &= ~(0xFFFF << offset); reg_ctrl2 &= ~(0xFFFF << offset); reg_ctrl2 |= (dsc.bytes_in_slice << offset); reg_ctrl2 |= (dsc.bytes_in_slice << offset); DSI_W32(ctrl, DSI_COMMAND_COMPRESSION_MODE_CTRL, reg_ctrl); DSI_W32(ctrl, DSI_COMMAND_COMPRESSION_MODE_CTRL2, reg_ctrl2); pr_debug("ctrl %d reg_ctrl 0x%x reg_ctrl2 0x%x\n", ctrl->index, pr_debug("ctrl %d reg_ctrl 0x%x reg_ctrl2 0x%x\n", ctrl->index, reg_ctrl, reg_ctrl2); reg_ctrl, reg_ctrl2); Loading @@ -373,6 +371,9 @@ void dsi_ctrl_hw_cmn_setup_cmd_stream(struct dsi_ctrl_hw *ctrl, stream_ctrl |= (vc_id & 0x3) << 8; stream_ctrl |= (vc_id & 0x3) << 8; stream_ctrl |= 0x39; /* packet data type */ stream_ctrl |= 0x39; /* packet data type */ DSI_W32(ctrl, DSI_COMMAND_COMPRESSION_MODE_CTRL, reg_ctrl); DSI_W32(ctrl, DSI_COMMAND_COMPRESSION_MODE_CTRL2, reg_ctrl2); DSI_W32(ctrl, DSI_COMMAND_MODE_MDP_STREAM0_CTRL, stream_ctrl); DSI_W32(ctrl, DSI_COMMAND_MODE_MDP_STREAM0_CTRL, stream_ctrl); DSI_W32(ctrl, DSI_COMMAND_MODE_MDP_STREAM1_CTRL, stream_ctrl); DSI_W32(ctrl, DSI_COMMAND_MODE_MDP_STREAM1_CTRL, stream_ctrl); Loading drivers/gpu/drm/msm/sde/sde_encoder.c +109 −0 Original line number Original line Diff line number Diff line Loading @@ -1237,6 +1237,102 @@ static void _sde_encoder_update_vsync_source(struct sde_encoder_virt *sde_enc, } } } } static int _sde_encoder_dsc_disable(struct sde_encoder_virt *sde_enc) { enum sde_rm_topology_name topology; struct drm_connector *drm_conn; int i, ret = 0; struct sde_hw_pingpong *hw_pp[MAX_CHANNELS_PER_ENC]; struct sde_hw_dsc *hw_dsc[MAX_CHANNELS_PER_ENC] = {NULL}; int pp_count = 0; int dsc_count = 0; if (!sde_enc || !sde_enc->phys_encs[0] || !sde_enc->phys_encs[0]->connector) { SDE_ERROR("invalid params %d %d\n", !sde_enc, sde_enc ? !sde_enc->phys_encs[0] : -1); return -EINVAL; } drm_conn = sde_enc->phys_encs[0]->connector; topology = sde_connector_get_topology_name(drm_conn); if (topology == SDE_RM_TOPOLOGY_NONE) { SDE_ERROR_ENC(sde_enc, "topology not set yet\n"); return -EINVAL; } switch (topology) { case SDE_RM_TOPOLOGY_SINGLEPIPE: case SDE_RM_TOPOLOGY_SINGLEPIPE_DSC: /* single PP */ hw_pp[0] = sde_enc->hw_pp[0]; hw_dsc[0] = sde_enc->hw_dsc[0]; pp_count = 1; dsc_count = 1; break; case SDE_RM_TOPOLOGY_DUALPIPE_DSC: case SDE_RM_TOPOLOGY_DUALPIPE_3DMERGE_DSC: case SDE_RM_TOPOLOGY_DUALPIPE_DSCMERGE: /* dual dsc */ for (i = 0; i < MAX_CHANNELS_PER_ENC; i++) { hw_dsc[i] = sde_enc->hw_dsc[i]; if (hw_dsc[i]) dsc_count++; } /* fall through */ case SDE_RM_TOPOLOGY_DUALPIPE: case SDE_RM_TOPOLOGY_DUALPIPE_3DMERGE: /* dual pp */ for (i = 0; i < MAX_CHANNELS_PER_ENC; i++) { hw_pp[i] = sde_enc->hw_pp[i]; if (hw_pp[i]) pp_count++; } break; default: SDE_ERROR_ENC(sde_enc, "Unexpected topology:%d\n", topology); return -EINVAL; }; SDE_EVT32(DRMID(&sde_enc->base), topology, pp_count, dsc_count); if (pp_count > MAX_CHANNELS_PER_ENC || dsc_count > MAX_CHANNELS_PER_ENC) { SDE_ERROR_ENC(sde_enc, "Wrong count pp:%d dsc:%d top:%d\n", pp_count, dsc_count, topology); return -EINVAL; } /* Disable DSC for all the pp's present in this topology */ for (i = 0; i < pp_count; i++) { if (!hw_pp[i]) { SDE_ERROR_ENC(sde_enc, "null pp:%d top:%d cnt:%d\n", i, topology, pp_count); return -EINVAL; } if (hw_pp[i]->ops.disable_dsc) hw_pp[i]->ops.disable_dsc(hw_pp[i]); } /* Disable DSC HW */ for (i = 0; i < dsc_count; i++) { if (!hw_dsc[i]) { SDE_ERROR_ENC(sde_enc, "null dsc:%d top:%d cnt:%d\n", i, topology, dsc_count); return -EINVAL; } if (hw_dsc[i]->ops.dsc_disable) hw_dsc[i]->ops.dsc_disable(hw_dsc[i]); } return ret; } static int _sde_encoder_update_rsc_client( static int _sde_encoder_update_rsc_client( struct drm_encoder *drm_enc, struct drm_encoder *drm_enc, struct sde_encoder_rsc_config *config, bool enable) struct sde_encoder_rsc_config *config, bool enable) Loading Loading @@ -1876,6 +1972,12 @@ static void sde_encoder_virt_mode_set(struct drm_encoder *drm_enc, ret); ret); return; return; } } /* * Disable dsc before switch the mode and after pre_modeset, * to guarantee that previous kickoff finished. */ _sde_encoder_dsc_disable(sde_enc); } } /* Reserve dynamic resources now. Indicating non-AtomicTest phase */ /* Reserve dynamic resources now. Indicating non-AtomicTest phase */ Loading Loading @@ -2107,6 +2209,13 @@ static void sde_encoder_virt_disable(struct drm_encoder *drm_enc) phys->ops.disable(phys); phys->ops.disable(phys); } } /* * disable dsc after the transfer is complete (for command mode) * and after physical encoder is disabled, to make sure timing * engine is already disabled (for video mode). */ _sde_encoder_dsc_disable(sde_enc); /* after phys waits for frame-done, should be no more frames pending */ /* after phys waits for frame-done, should be no more frames pending */ if (atomic_xchg(&sde_enc->frame_done_timeout, 0)) { if (atomic_xchg(&sde_enc->frame_done_timeout, 0)) { SDE_ERROR("enc%d timeout pending\n", drm_enc->base.id); SDE_ERROR("enc%d timeout pending\n", drm_enc->base.id); Loading drivers/gpu/drm/msm/sde/sde_hw_pingpong.c +5 −0 Original line number Original line Diff line number Diff line Loading @@ -165,11 +165,16 @@ static void sde_hw_pp_dsc_enable(struct sde_hw_pingpong *pp) static void sde_hw_pp_dsc_disable(struct sde_hw_pingpong *pp) static void sde_hw_pp_dsc_disable(struct sde_hw_pingpong *pp) { { struct sde_hw_blk_reg_map *c; struct sde_hw_blk_reg_map *c; u32 data; if (!pp) if (!pp) return; return; c = &pp->hw; c = &pp->hw; data = SDE_REG_READ(c, PP_DCE_DATA_OUT_SWAP); data &= ~BIT(18); /* disable endian flip */ SDE_REG_WRITE(c, PP_DCE_DATA_OUT_SWAP, data); SDE_REG_WRITE(c, PP_DSC_MODE, 0); SDE_REG_WRITE(c, PP_DSC_MODE, 0); } } Loading Loading
drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw_cmn.c +3 −2 Original line number Original line Diff line number Diff line Loading @@ -354,8 +354,6 @@ void dsi_ctrl_hw_cmn_setup_cmd_stream(struct dsi_ctrl_hw *ctrl, reg_ctrl |= (reg << offset); reg_ctrl |= (reg << offset); reg_ctrl2 &= ~(0xFFFF << offset); reg_ctrl2 &= ~(0xFFFF << offset); reg_ctrl2 |= (dsc.bytes_in_slice << offset); reg_ctrl2 |= (dsc.bytes_in_slice << offset); DSI_W32(ctrl, DSI_COMMAND_COMPRESSION_MODE_CTRL, reg_ctrl); DSI_W32(ctrl, DSI_COMMAND_COMPRESSION_MODE_CTRL2, reg_ctrl2); pr_debug("ctrl %d reg_ctrl 0x%x reg_ctrl2 0x%x\n", ctrl->index, pr_debug("ctrl %d reg_ctrl 0x%x reg_ctrl2 0x%x\n", ctrl->index, reg_ctrl, reg_ctrl2); reg_ctrl, reg_ctrl2); Loading @@ -373,6 +371,9 @@ void dsi_ctrl_hw_cmn_setup_cmd_stream(struct dsi_ctrl_hw *ctrl, stream_ctrl |= (vc_id & 0x3) << 8; stream_ctrl |= (vc_id & 0x3) << 8; stream_ctrl |= 0x39; /* packet data type */ stream_ctrl |= 0x39; /* packet data type */ DSI_W32(ctrl, DSI_COMMAND_COMPRESSION_MODE_CTRL, reg_ctrl); DSI_W32(ctrl, DSI_COMMAND_COMPRESSION_MODE_CTRL2, reg_ctrl2); DSI_W32(ctrl, DSI_COMMAND_MODE_MDP_STREAM0_CTRL, stream_ctrl); DSI_W32(ctrl, DSI_COMMAND_MODE_MDP_STREAM0_CTRL, stream_ctrl); DSI_W32(ctrl, DSI_COMMAND_MODE_MDP_STREAM1_CTRL, stream_ctrl); DSI_W32(ctrl, DSI_COMMAND_MODE_MDP_STREAM1_CTRL, stream_ctrl); Loading
drivers/gpu/drm/msm/sde/sde_encoder.c +109 −0 Original line number Original line Diff line number Diff line Loading @@ -1237,6 +1237,102 @@ static void _sde_encoder_update_vsync_source(struct sde_encoder_virt *sde_enc, } } } } static int _sde_encoder_dsc_disable(struct sde_encoder_virt *sde_enc) { enum sde_rm_topology_name topology; struct drm_connector *drm_conn; int i, ret = 0; struct sde_hw_pingpong *hw_pp[MAX_CHANNELS_PER_ENC]; struct sde_hw_dsc *hw_dsc[MAX_CHANNELS_PER_ENC] = {NULL}; int pp_count = 0; int dsc_count = 0; if (!sde_enc || !sde_enc->phys_encs[0] || !sde_enc->phys_encs[0]->connector) { SDE_ERROR("invalid params %d %d\n", !sde_enc, sde_enc ? !sde_enc->phys_encs[0] : -1); return -EINVAL; } drm_conn = sde_enc->phys_encs[0]->connector; topology = sde_connector_get_topology_name(drm_conn); if (topology == SDE_RM_TOPOLOGY_NONE) { SDE_ERROR_ENC(sde_enc, "topology not set yet\n"); return -EINVAL; } switch (topology) { case SDE_RM_TOPOLOGY_SINGLEPIPE: case SDE_RM_TOPOLOGY_SINGLEPIPE_DSC: /* single PP */ hw_pp[0] = sde_enc->hw_pp[0]; hw_dsc[0] = sde_enc->hw_dsc[0]; pp_count = 1; dsc_count = 1; break; case SDE_RM_TOPOLOGY_DUALPIPE_DSC: case SDE_RM_TOPOLOGY_DUALPIPE_3DMERGE_DSC: case SDE_RM_TOPOLOGY_DUALPIPE_DSCMERGE: /* dual dsc */ for (i = 0; i < MAX_CHANNELS_PER_ENC; i++) { hw_dsc[i] = sde_enc->hw_dsc[i]; if (hw_dsc[i]) dsc_count++; } /* fall through */ case SDE_RM_TOPOLOGY_DUALPIPE: case SDE_RM_TOPOLOGY_DUALPIPE_3DMERGE: /* dual pp */ for (i = 0; i < MAX_CHANNELS_PER_ENC; i++) { hw_pp[i] = sde_enc->hw_pp[i]; if (hw_pp[i]) pp_count++; } break; default: SDE_ERROR_ENC(sde_enc, "Unexpected topology:%d\n", topology); return -EINVAL; }; SDE_EVT32(DRMID(&sde_enc->base), topology, pp_count, dsc_count); if (pp_count > MAX_CHANNELS_PER_ENC || dsc_count > MAX_CHANNELS_PER_ENC) { SDE_ERROR_ENC(sde_enc, "Wrong count pp:%d dsc:%d top:%d\n", pp_count, dsc_count, topology); return -EINVAL; } /* Disable DSC for all the pp's present in this topology */ for (i = 0; i < pp_count; i++) { if (!hw_pp[i]) { SDE_ERROR_ENC(sde_enc, "null pp:%d top:%d cnt:%d\n", i, topology, pp_count); return -EINVAL; } if (hw_pp[i]->ops.disable_dsc) hw_pp[i]->ops.disable_dsc(hw_pp[i]); } /* Disable DSC HW */ for (i = 0; i < dsc_count; i++) { if (!hw_dsc[i]) { SDE_ERROR_ENC(sde_enc, "null dsc:%d top:%d cnt:%d\n", i, topology, dsc_count); return -EINVAL; } if (hw_dsc[i]->ops.dsc_disable) hw_dsc[i]->ops.dsc_disable(hw_dsc[i]); } return ret; } static int _sde_encoder_update_rsc_client( static int _sde_encoder_update_rsc_client( struct drm_encoder *drm_enc, struct drm_encoder *drm_enc, struct sde_encoder_rsc_config *config, bool enable) struct sde_encoder_rsc_config *config, bool enable) Loading Loading @@ -1876,6 +1972,12 @@ static void sde_encoder_virt_mode_set(struct drm_encoder *drm_enc, ret); ret); return; return; } } /* * Disable dsc before switch the mode and after pre_modeset, * to guarantee that previous kickoff finished. */ _sde_encoder_dsc_disable(sde_enc); } } /* Reserve dynamic resources now. Indicating non-AtomicTest phase */ /* Reserve dynamic resources now. Indicating non-AtomicTest phase */ Loading Loading @@ -2107,6 +2209,13 @@ static void sde_encoder_virt_disable(struct drm_encoder *drm_enc) phys->ops.disable(phys); phys->ops.disable(phys); } } /* * disable dsc after the transfer is complete (for command mode) * and after physical encoder is disabled, to make sure timing * engine is already disabled (for video mode). */ _sde_encoder_dsc_disable(sde_enc); /* after phys waits for frame-done, should be no more frames pending */ /* after phys waits for frame-done, should be no more frames pending */ if (atomic_xchg(&sde_enc->frame_done_timeout, 0)) { if (atomic_xchg(&sde_enc->frame_done_timeout, 0)) { SDE_ERROR("enc%d timeout pending\n", drm_enc->base.id); SDE_ERROR("enc%d timeout pending\n", drm_enc->base.id); Loading
drivers/gpu/drm/msm/sde/sde_hw_pingpong.c +5 −0 Original line number Original line Diff line number Diff line Loading @@ -165,11 +165,16 @@ static void sde_hw_pp_dsc_enable(struct sde_hw_pingpong *pp) static void sde_hw_pp_dsc_disable(struct sde_hw_pingpong *pp) static void sde_hw_pp_dsc_disable(struct sde_hw_pingpong *pp) { { struct sde_hw_blk_reg_map *c; struct sde_hw_blk_reg_map *c; u32 data; if (!pp) if (!pp) return; return; c = &pp->hw; c = &pp->hw; data = SDE_REG_READ(c, PP_DCE_DATA_OUT_SWAP); data &= ~BIT(18); /* disable endian flip */ SDE_REG_WRITE(c, PP_DCE_DATA_OUT_SWAP, data); SDE_REG_WRITE(c, PP_DSC_MODE, 0); SDE_REG_WRITE(c, PP_DSC_MODE, 0); } } Loading