diff --git a/techpack/display/Makefile b/techpack/display/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..b2829628ff1343c3b25122b0e3e42d44e1439b3e --- /dev/null +++ b/techpack/display/Makefile @@ -0,0 +1,30 @@ +# SPDX-License-Identifier: GPL-2.0-only + +# auto-detect subdirs +ifeq ($(CONFIG_ARCH_KONA), y) +include $(srctree)/techpack/display/config/konadisp.conf +endif + +ifeq ($(CONFIG_ARCH_KONA), y) +LINUXINCLUDE += -include $(srctree)/techpack/display/config/konadispconf.h +endif + +ifeq ($(CONFIG_ARCH_LITO), y) +include $(srctree)/techpack/display/config/saipdisp.conf +endif + +ifeq ($(CONFIG_ARCH_LITO), y) +LINUXINCLUDE += -include $(srctree)/techpack/display/config/saipdispconf.h +endif + +ifeq ($(CONFIG_ARCH_BENGAL), y) +include $(srctree)/techpack/display/config/bengaldisp.conf +endif + +ifeq ($(CONFIG_ARCH_BENGAL), y) +LINUXINCLUDE += -include $(srctree)/techpack/display/config/bengaldispconf.h +endif + +obj-$(CONFIG_DRM_MSM) += msm/ +obj-$(CONFIG_MSM_SDE_ROTATOR) += rotator/ +obj-$(CONFIG_QCOM_MDSS_PLL) += pll/ diff --git a/techpack/display/NOTICE b/techpack/display/NOTICE new file mode 100644 index 0000000000000000000000000000000000000000..987146bb6ddb370d39cbb30b250227b4c7f97c72 --- /dev/null +++ b/techpack/display/NOTICE @@ -0,0 +1,47 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2012-2019, The Linux Foundation. All rights reserved. +*/ + +/* + * Copyright (c) 2014-2019, The Linux Foundation. All rights reserved. + * Copyright (C) 2013 Red Hat + * Copyright (C) 2014 Red Hat + * Copyright (C) 2016 Red Hat + * Author: Rob Clark + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . +*/ + + +/* + * Copyright © 2014 Red Hatt. + * + * Permission to use, copy, modify, distribute, and sell this software and its + * documentation for any purpose is hereby granted without fee, provided that + * the above copyright notice appear in all copies and that both that copyright + * notice and this permission notice appear in supporting documentation, and + * that the name of the copyright holders not be used in advertising or + * publicity pertaining to distribution of the software without specific, + * written prior permission. The copyright holders make no representations + * about the suitability of this software for any purpose. It is provided "as + * is" without express or implied warranty. + * + * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, + * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO + * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR + * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, + * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THIS SOFTWARE. + */ diff --git a/techpack/display/config/bengaldisp.conf b/techpack/display/config/bengaldisp.conf new file mode 100644 index 0000000000000000000000000000000000000000..1ef288bd41827e888fe8280fefc510e6b5fa3998 --- /dev/null +++ b/techpack/display/config/bengaldisp.conf @@ -0,0 +1,16 @@ +# SPDX-License-Identifier: GPL-2.0-only +# Copyright (c) 2019, The Linux Foundation. All rights reserved. + +export CONFIG_DRM_MSM=y +export CONFIG_DRM_MSM_SDE=y +export CONFIG_SYNC_FILE=y +export CONFIG_DRM_MSM_DSI=y +export CONFIG_DRM_MSM_DP=n +export CONFIG_QCOM_MDSS_DP_PLL=n +export CONFIG_DSI_PARSER=y +export CONFIG_DRM_SDE_WB=n +export CONFIG_DRM_MSM_REGISTER_LOGGING=y +export CONFIG_QCOM_MDSS_PLL=y +export CONFIG_MSM_SDE_ROTATOR=y +export CONFIG_MSM_SDE_ROTATOR_EVTLOG_DEBUG=y +export CONFIG_DRM_SDE_RSC=n diff --git a/techpack/display/config/bengaldispconf.h b/techpack/display/config/bengaldispconf.h new file mode 100644 index 0000000000000000000000000000000000000000..c76a073ecccd99fa18165a0c3e0df00592d61b71 --- /dev/null +++ b/techpack/display/config/bengaldispconf.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2019, The Linux Foundation. All rights reserved. + */ + +#define CONFIG_DRM_MSM 1 +#define CONFIG_DRM_MSM_SDE 1 +#define CONFIG_SYNC_FILE 1 +#define CONFIG_DRM_MSM_DSI 1 +#define CONFIG_DSI_PARSER 1 +#define CONFIG_DRM_MSM_REGISTER_LOGGING 1 +#define CONFIG_DRM_SDE_EVTLOG_DEBUG 1 +#define CONFIG_QCOM_MDSS_PLL 1 +#define CONFIG_MSM_SDE_ROTATOR 1 +#define CONFIG_MSM_SDE_ROTATOR_EVTLOG_DEBUG 1 diff --git a/techpack/display/config/konadisp.conf b/techpack/display/config/konadisp.conf new file mode 100644 index 0000000000000000000000000000000000000000..dbbf3c847dbb7f4d5688f02fc0030228fb9246e5 --- /dev/null +++ b/techpack/display/config/konadisp.conf @@ -0,0 +1,13 @@ +export CONFIG_DRM_MSM=y +export CONFIG_DRM_MSM_SDE=y +export CONFIG_SYNC_FILE=y +export CONFIG_DRM_MSM_DSI=y +export CONFIG_DRM_MSM_DP=y +export CONFIG_QCOM_MDSS_DP_PLL=y +export CONFIG_DSI_PARSER=y +export CONFIG_DRM_SDE_WB=y +export CONFIG_DRM_MSM_REGISTER_LOGGING=y +export CONFIG_QCOM_MDSS_PLL=y +export CONFIG_MSM_SDE_ROTATOR=y +export CONFIG_MSM_SDE_ROTATOR_EVTLOG_DEBUG=y +export CONFIG_DRM_SDE_RSC=y diff --git a/techpack/display/config/konadispconf.h b/techpack/display/config/konadispconf.h new file mode 100644 index 0000000000000000000000000000000000000000..690d4ec79f41ae9cf7e4c1065a0e6c89f51d9fd7 --- /dev/null +++ b/techpack/display/config/konadispconf.h @@ -0,0 +1,20 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2019, The Linux Foundation. All rights reserved. + */ + +#define CONFIG_DRM_MSM 1 +#define CONFIG_DRM_MSM_SDE 1 +#define CONFIG_SYNC_FILE 1 +#define CONFIG_DRM_MSM_DSI 1 +#define CONFIG_DRM_MSM_DP 1 +#define CONFIG_QCOM_MDSS_DP_PLL 1 +#define CONFIG_DSI_PARSER 1 +#define CONFIG_DRM_SDE_WB 1 +#define CONFIG_DRM_MSM_REGISTER_LOGGING 1 +#define CONFIG_DRM_SDE_EVTLOG_DEBUG 1 +#define CONFIG_QCOM_MDSS_PLL 1 +#define CONFIG_MSM_SDE_ROTATOR 1 +#define CONFIG_MSM_SDE_ROTATOR_EVTLOG_DEBUG 1 +#define CONFIG_DRM_SDE_RSC 1 + diff --git a/techpack/display/config/saipdisp.conf b/techpack/display/config/saipdisp.conf new file mode 100644 index 0000000000000000000000000000000000000000..dbbf3c847dbb7f4d5688f02fc0030228fb9246e5 --- /dev/null +++ b/techpack/display/config/saipdisp.conf @@ -0,0 +1,13 @@ +export CONFIG_DRM_MSM=y +export CONFIG_DRM_MSM_SDE=y +export CONFIG_SYNC_FILE=y +export CONFIG_DRM_MSM_DSI=y +export CONFIG_DRM_MSM_DP=y +export CONFIG_QCOM_MDSS_DP_PLL=y +export CONFIG_DSI_PARSER=y +export CONFIG_DRM_SDE_WB=y +export CONFIG_DRM_MSM_REGISTER_LOGGING=y +export CONFIG_QCOM_MDSS_PLL=y +export CONFIG_MSM_SDE_ROTATOR=y +export CONFIG_MSM_SDE_ROTATOR_EVTLOG_DEBUG=y +export CONFIG_DRM_SDE_RSC=y diff --git a/techpack/display/config/saipdispconf.h b/techpack/display/config/saipdispconf.h new file mode 100644 index 0000000000000000000000000000000000000000..0490248397010294c922921b4cbf29f8128ccf4a --- /dev/null +++ b/techpack/display/config/saipdispconf.h @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2019, The Linux Foundation. All rights reserved. + */ + +#define CONFIG_DRM_MSM 1 +#define CONFIG_DRM_MSM_SDE 1 +#define CONFIG_SYNC_FILE 1 +#define CONFIG_DRM_MSM_DSI 1 +#define CONFIG_DRM_MSM_DP 1 +#define CONFIG_QCOM_MDSS_DP_PLL 1 +#define CONFIG_DSI_PARSER 1 +#define CONFIG_DRM_SDE_WB 1 +#define CONFIG_DRM_MSM_REGISTER_LOGGING 1 +#define CONFIG_DRM_SDE_EVTLOG_DEBUG 1 +#define CONFIG_QCOM_MDSS_PLL 1 +#define CONFIG_MSM_SDE_ROTATOR 1 +#define CONFIG_MSM_SDE_ROTATOR_EVTLOG_DEBUG 1 +#define CONFIG_DRM_SDE_RSC 1 diff --git a/techpack/display/msm/Makefile b/techpack/display/msm/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..4236c5bd78a1c60b4b442de9cf2359571fff7011 --- /dev/null +++ b/techpack/display/msm/Makefile @@ -0,0 +1,135 @@ +# SPDX-License-Identifier: GPL-2.0 +ccflags-y := -I$(srctree)/include/drm -I$(srctree)/techpack/display/msm -I$(srctree)/techpack/display/msm/dsi -I$(srctree)/techpack/display/msm/dp +ccflags-y += -I$(srctree)/techpack/display/msm/sde +ccflags-y += -I$(srctree)/techpack/display/rotator + +msm_drm-$(CONFIG_DRM_MSM_DP) += dp/dp_usbpd.o \ + dp/dp_parser.o \ + dp/dp_power.o \ + dp/dp_catalog.o \ + dp/dp_catalog_v420.o \ + dp/dp_catalog_v200.o \ + dp/dp_aux.o \ + dp/dp_panel.o \ + dp/dp_link.o \ + dp/dp_ctrl.o \ + dp/dp_audio.o \ + dp/dp_debug.o \ + dp/dp_hpd.o \ + dp/dp_gpio_hpd.o \ + dp/dp_lphw_hpd.o \ + dp/dp_display.o \ + dp/dp_drm.o \ + dp/dp_hdcp2p2.o \ + dp/dp_mst_drm.o \ + +msm_drm-$(CONFIG_DRM_MSM_SDE) += sde/sde_crtc.o \ + sde/sde_encoder.o \ + sde/sde_encoder_phys_vid.o \ + sde/sde_encoder_phys_cmd.o \ + sde/sde_irq.o \ + sde/sde_core_irq.o \ + sde/sde_core_perf.o \ + sde/sde_rm.o \ + sde/sde_kms_utils.o \ + sde/sde_kms.o \ + sde/sde_plane.o \ + sde/sde_connector.o \ + sde/sde_color_processing.o \ + sde/sde_vbif.o \ + sde_io_util.o \ + sde/sde_hw_reg_dma_v1_color_proc.o \ + sde/sde_hw_color_proc_v4.o \ + sde/sde_hw_ad4.o \ + sde/sde_hw_uidle.o \ + sde_edid_parser.o \ + sde_hdcp_1x.o \ + sde_hdcp_2x.o \ + sde/sde_hw_catalog.o \ + sde/sde_hw_cdm.o \ + sde/sde_hw_dspp.o \ + sde/sde_hw_intf.o \ + sde/sde_hw_lm.o \ + sde/sde_hw_ctl.o \ + sde/sde_hw_util.o \ + sde/sde_hw_sspp.o \ + sde/sde_hw_wb.o \ + sde/sde_hw_pingpong.o \ + sde/sde_hw_top.o \ + sde/sde_hw_interrupts.o \ + sde/sde_hw_vbif.o \ + sde/sde_hw_blk.o \ + sde/sde_formats.o \ + sde_power_handle.o \ + sde/sde_hw_color_processing_v1_7.o \ + sde/sde_reg_dma.o \ + sde/sde_hw_reg_dma_v1.o \ + sde/sde_hw_dsc.o \ + sde/sde_hw_ds.o \ + sde/sde_fence.o \ + sde/sde_hw_qdss.o \ + sde_dbg.o \ + sde_dbg_evtlog.o \ + sde/sde_hw_rc.o \ + +msm_drm-$(CONFIG_DRM_SDE_WB) += sde/sde_wb.o \ + sde/sde_encoder_phys_wb.o \ + +msm_drm-$(CONFIG_DRM_SDE_RSC) += sde_rsc.o \ + sde_rsc_hw.o \ + sde_rsc_hw_v3.o \ + +msm_drm-$(CONFIG_DRM_MSM_DSI) += dsi/dsi_phy.o \ + dsi/dsi_pwr.o \ + dsi/dsi_phy.o \ + dsi/dsi_phy_hw_v2_0.o \ + dsi/dsi_phy_hw_v3_0.o \ + dsi/dsi_phy_hw_v4_0.o \ + dsi/dsi_phy_timing_calc.o \ + dsi/dsi_phy_timing_v2_0.o \ + dsi/dsi_phy_timing_v3_0.o \ + dsi/dsi_phy_timing_v4_0.o \ + dsi/dsi_ctrl_hw_cmn.o \ + dsi/dsi_ctrl_hw_1_4.o \ + dsi/dsi_ctrl_hw_2_0.o \ + dsi/dsi_ctrl_hw_2_2.o \ + dsi/dsi_ctrl.o \ + dsi/dsi_catalog.o \ + dsi/dsi_drm.o \ + dsi/dsi_display.o \ + dsi/dsi_panel.o \ + dsi/dsi_clk_manager.o \ + dsi/dsi_display_test.o \ + +ifeq ($(CONFIG_PXLW_IRIS),y) +msm_drm-$(CONFIG_PXLW_IRIS) += dsi/iris/dsi_iris6_ioctl.o \ + dsi/iris/dsi_iris6_lightup.o \ + dsi/iris/dsi_iris6_lightup_ocp.o \ + dsi/iris/dsi_iris6_lp.o \ + dsi/iris/dsi_iris6_lut.o \ + dsi/iris/dsi_iris6_pq.o \ + dsi/iris/dsi_iris6_cmds.o \ + dsi/iris/dsi_iris6_i3c.o \ + dsi/iris/dsi_iris6_gpio.o \ + dsi/iris/dsi_iris6_loopback.o \ + dsi/iris/dsi_iris6_dbg.o +ccflags-y += -DCONFIG_PXLW_IRIS +endif + +msm_drm-$(CONFIG_DSI_PARSER) += dsi/dsi_parser.o \ + +msm_drm-$(CONFIG_DRM_MSM) += \ + msm_atomic.o \ + msm_fb.o \ + msm_iommu.o \ + msm_drv.o \ + msm_gem.o \ + msm_gem_prime.o \ + msm_gem_vma.o \ + msm_smmu.o \ + msm_prop.o \ + msm_notifier.o\ + +obj-$(CONFIG_DRM_MSM) += msm_drm.o + +obj-$(CONFIG_DRM_FBDEV_EMULATION) += msm_fbdev.o diff --git a/techpack/display/msm/dp/dp_audio.c b/techpack/display/msm/dp/dp_audio.c new file mode 100644 index 0000000000000000000000000000000000000000..90c3ad16dba6245475fe600421d7fdac044cd5a8 --- /dev/null +++ b/techpack/display/msm/dp/dp_audio.c @@ -0,0 +1,889 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2016-2019, The Linux Foundation. All rights reserved. + */ + +#include +#include + +#include + +#include "dp_catalog.h" +#include "dp_audio.h" +#include "dp_panel.h" +#include "dp_debug.h" + +struct dp_audio_private { + struct platform_device *ext_pdev; + struct platform_device *pdev; + struct dp_catalog_audio *catalog; + struct msm_ext_disp_init_data ext_audio_data; + struct dp_panel *panel; + + bool ack_enabled; + atomic_t session_on; + bool engine_on; + + u32 channels; + + struct completion hpd_comp; + struct workqueue_struct *notify_workqueue; + struct delayed_work notify_delayed_work; + struct mutex ops_lock; + + struct dp_audio dp_audio; + + atomic_t acked; +}; + +static u32 dp_audio_get_header(struct dp_catalog_audio *catalog, + enum dp_catalog_audio_sdp_type sdp, + enum dp_catalog_audio_header_type header) +{ + catalog->sdp_type = sdp; + catalog->sdp_header = header; + catalog->get_header(catalog); + + return catalog->data; +} + +static void dp_audio_set_header(struct dp_catalog_audio *catalog, + u32 data, + enum dp_catalog_audio_sdp_type sdp, + enum dp_catalog_audio_header_type header) +{ + catalog->sdp_type = sdp; + catalog->sdp_header = header; + catalog->data = data; + catalog->set_header(catalog); +} + +static void dp_audio_stream_sdp(struct dp_audio_private *audio) +{ + struct dp_catalog_audio *catalog = audio->catalog; + u32 value, new_value; + u8 parity_byte; + + /* Config header and parity byte 1 */ + value = dp_audio_get_header(catalog, + DP_AUDIO_SDP_STREAM, DP_AUDIO_SDP_HEADER_1); + value &= 0x0000ffff; + + new_value = 0x02; + parity_byte = dp_header_get_parity(new_value); + value |= ((new_value << HEADER_BYTE_1_BIT) + | (parity_byte << PARITY_BYTE_1_BIT)); + DP_DEBUG("Header Byte 1: value = 0x%x, parity_byte = 0x%x\n", + value, parity_byte); + dp_audio_set_header(catalog, value, + DP_AUDIO_SDP_STREAM, DP_AUDIO_SDP_HEADER_1); + + /* Config header and parity byte 2 */ + value = dp_audio_get_header(catalog, + DP_AUDIO_SDP_STREAM, DP_AUDIO_SDP_HEADER_2); + value &= 0xffff0000; + new_value = 0x0; + parity_byte = dp_header_get_parity(new_value); + value |= ((new_value << HEADER_BYTE_2_BIT) + | (parity_byte << PARITY_BYTE_2_BIT)); + DP_DEBUG("Header Byte 2: value = 0x%x, parity_byte = 0x%x\n", + value, parity_byte); + + dp_audio_set_header(catalog, value, + DP_AUDIO_SDP_STREAM, DP_AUDIO_SDP_HEADER_2); + + /* Config header and parity byte 3 */ + value = dp_audio_get_header(catalog, + DP_AUDIO_SDP_STREAM, DP_AUDIO_SDP_HEADER_3); + value &= 0x0000ffff; + + new_value = audio->channels - 1; + parity_byte = dp_header_get_parity(new_value); + value |= ((new_value << HEADER_BYTE_3_BIT) + | (parity_byte << PARITY_BYTE_3_BIT)); + DP_DEBUG("Header Byte 3: value = 0x%x, parity_byte = 0x%x\n", + value, parity_byte); + + dp_audio_set_header(catalog, value, + DP_AUDIO_SDP_STREAM, DP_AUDIO_SDP_HEADER_3); +} + +static void dp_audio_timestamp_sdp(struct dp_audio_private *audio) +{ + struct dp_catalog_audio *catalog = audio->catalog; + u32 value, new_value; + u8 parity_byte; + + /* Config header and parity byte 1 */ + value = dp_audio_get_header(catalog, + DP_AUDIO_SDP_TIMESTAMP, DP_AUDIO_SDP_HEADER_1); + value &= 0x0000ffff; + + new_value = 0x1; + parity_byte = dp_header_get_parity(new_value); + value |= ((new_value << HEADER_BYTE_1_BIT) + | (parity_byte << PARITY_BYTE_1_BIT)); + DP_DEBUG("Header Byte 1: value = 0x%x, parity_byte = 0x%x\n", + value, parity_byte); + dp_audio_set_header(catalog, value, + DP_AUDIO_SDP_TIMESTAMP, DP_AUDIO_SDP_HEADER_1); + + /* Config header and parity byte 2 */ + value = dp_audio_get_header(catalog, + DP_AUDIO_SDP_TIMESTAMP, DP_AUDIO_SDP_HEADER_2); + value &= 0xffff0000; + + new_value = 0x17; + parity_byte = dp_header_get_parity(new_value); + value |= ((new_value << HEADER_BYTE_2_BIT) + | (parity_byte << PARITY_BYTE_2_BIT)); + DP_DEBUG("Header Byte 2: value = 0x%x, parity_byte = 0x%x\n", + value, parity_byte); + dp_audio_set_header(catalog, value, + DP_AUDIO_SDP_TIMESTAMP, DP_AUDIO_SDP_HEADER_2); + + /* Config header and parity byte 3 */ + value = dp_audio_get_header(catalog, + DP_AUDIO_SDP_TIMESTAMP, DP_AUDIO_SDP_HEADER_3); + value &= 0x0000ffff; + + new_value = (0x0 | (0x11 << 2)); + parity_byte = dp_header_get_parity(new_value); + value |= ((new_value << HEADER_BYTE_3_BIT) + | (parity_byte << PARITY_BYTE_3_BIT)); + DP_DEBUG("Header Byte 3: value = 0x%x, parity_byte = 0x%x\n", + value, parity_byte); + dp_audio_set_header(catalog, value, + DP_AUDIO_SDP_TIMESTAMP, DP_AUDIO_SDP_HEADER_3); +} + +static void dp_audio_infoframe_sdp(struct dp_audio_private *audio) +{ + struct dp_catalog_audio *catalog = audio->catalog; + u32 value, new_value; + u8 parity_byte; + + /* Config header and parity byte 1 */ + value = dp_audio_get_header(catalog, + DP_AUDIO_SDP_INFOFRAME, DP_AUDIO_SDP_HEADER_1); + value &= 0x0000ffff; + + new_value = 0x84; + parity_byte = dp_header_get_parity(new_value); + value |= ((new_value << HEADER_BYTE_1_BIT) + | (parity_byte << PARITY_BYTE_1_BIT)); + DP_DEBUG("Header Byte 1: value = 0x%x, parity_byte = 0x%x\n", + value, parity_byte); + dp_audio_set_header(catalog, value, + DP_AUDIO_SDP_INFOFRAME, DP_AUDIO_SDP_HEADER_1); + + /* Config header and parity byte 2 */ + value = dp_audio_get_header(catalog, + DP_AUDIO_SDP_INFOFRAME, DP_AUDIO_SDP_HEADER_2); + value &= 0xffff0000; + + new_value = 0x1b; + parity_byte = dp_header_get_parity(new_value); + value |= ((new_value << HEADER_BYTE_2_BIT) + | (parity_byte << PARITY_BYTE_2_BIT)); + DP_DEBUG("Header Byte 2: value = 0x%x, parity_byte = 0x%x\n", + value, parity_byte); + dp_audio_set_header(catalog, value, + DP_AUDIO_SDP_INFOFRAME, DP_AUDIO_SDP_HEADER_2); + + /* Config header and parity byte 3 */ + value = dp_audio_get_header(catalog, + DP_AUDIO_SDP_INFOFRAME, DP_AUDIO_SDP_HEADER_3); + value &= 0x0000ffff; + + new_value = (0x0 | (0x11 << 2)); + parity_byte = dp_header_get_parity(new_value); + value |= ((new_value << HEADER_BYTE_3_BIT) + | (parity_byte << PARITY_BYTE_3_BIT)); + DP_DEBUG("Header Byte 3: value = 0x%x, parity_byte = 0x%x\n", + new_value, parity_byte); + dp_audio_set_header(catalog, value, + DP_AUDIO_SDP_INFOFRAME, DP_AUDIO_SDP_HEADER_3); +} + +static void dp_audio_copy_management_sdp(struct dp_audio_private *audio) +{ + struct dp_catalog_audio *catalog = audio->catalog; + u32 value, new_value; + u8 parity_byte; + + /* Config header and parity byte 1 */ + value = dp_audio_get_header(catalog, + DP_AUDIO_SDP_COPYMANAGEMENT, DP_AUDIO_SDP_HEADER_1); + value &= 0x0000ffff; + + new_value = 0x05; + parity_byte = dp_header_get_parity(new_value); + value |= ((new_value << HEADER_BYTE_1_BIT) + | (parity_byte << PARITY_BYTE_1_BIT)); + DP_DEBUG("Header Byte 1: value = 0x%x, parity_byte = 0x%x\n", + value, parity_byte); + dp_audio_set_header(catalog, value, + DP_AUDIO_SDP_COPYMANAGEMENT, DP_AUDIO_SDP_HEADER_1); + + /* Config header and parity byte 2 */ + value = dp_audio_get_header(catalog, + DP_AUDIO_SDP_COPYMANAGEMENT, DP_AUDIO_SDP_HEADER_2); + value &= 0xffff0000; + + new_value = 0x0F; + parity_byte = dp_header_get_parity(new_value); + value |= ((new_value << HEADER_BYTE_2_BIT) + | (parity_byte << PARITY_BYTE_2_BIT)); + DP_DEBUG("Header Byte 2: value = 0x%x, parity_byte = 0x%x\n", + value, parity_byte); + dp_audio_set_header(catalog, value, + DP_AUDIO_SDP_COPYMANAGEMENT, DP_AUDIO_SDP_HEADER_2); + + /* Config header and parity byte 3 */ + value = dp_audio_get_header(catalog, + DP_AUDIO_SDP_COPYMANAGEMENT, DP_AUDIO_SDP_HEADER_3); + value &= 0x0000ffff; + + new_value = 0x0; + parity_byte = dp_header_get_parity(new_value); + value |= ((new_value << HEADER_BYTE_3_BIT) + | (parity_byte << PARITY_BYTE_3_BIT)); + DP_DEBUG("Header Byte 3: value = 0x%x, parity_byte = 0x%x\n", + value, parity_byte); + dp_audio_set_header(catalog, value, + DP_AUDIO_SDP_COPYMANAGEMENT, DP_AUDIO_SDP_HEADER_3); +} + +static void dp_audio_isrc_sdp(struct dp_audio_private *audio) +{ + struct dp_catalog_audio *catalog = audio->catalog; + u32 value, new_value; + u8 parity_byte; + + /* Config header and parity byte 1 */ + value = dp_audio_get_header(catalog, + DP_AUDIO_SDP_ISRC, DP_AUDIO_SDP_HEADER_1); + value &= 0x0000ffff; + + new_value = 0x06; + parity_byte = dp_header_get_parity(new_value); + value |= ((new_value << HEADER_BYTE_1_BIT) + | (parity_byte << PARITY_BYTE_1_BIT)); + DP_DEBUG("Header Byte 1: value = 0x%x, parity_byte = 0x%x\n", + value, parity_byte); + dp_audio_set_header(catalog, value, + DP_AUDIO_SDP_ISRC, DP_AUDIO_SDP_HEADER_1); + + /* Config header and parity byte 2 */ + value = dp_audio_get_header(catalog, + DP_AUDIO_SDP_ISRC, DP_AUDIO_SDP_HEADER_2); + value &= 0xffff0000; + + new_value = 0x0F; + parity_byte = dp_header_get_parity(new_value); + value |= ((new_value << HEADER_BYTE_2_BIT) + | (parity_byte << PARITY_BYTE_2_BIT)); + DP_DEBUG("Header Byte 2: value = 0x%x, parity_byte = 0x%x\n", + value, parity_byte); + dp_audio_set_header(catalog, value, + DP_AUDIO_SDP_ISRC, DP_AUDIO_SDP_HEADER_2); +} + +static void dp_audio_setup_sdp(struct dp_audio_private *audio) +{ + if (!atomic_read(&audio->session_on)) { + DP_WARN("session inactive\n"); + return; + } + + /* always program stream 0 first before actual stream cfg */ + audio->catalog->stream_id = DP_STREAM_0; + audio->catalog->config_sdp(audio->catalog); + + if (audio->panel->stream_id == DP_STREAM_1) { + audio->catalog->stream_id = DP_STREAM_1; + audio->catalog->config_sdp(audio->catalog); + } + + dp_audio_stream_sdp(audio); + dp_audio_timestamp_sdp(audio); + dp_audio_infoframe_sdp(audio); + dp_audio_copy_management_sdp(audio); + dp_audio_isrc_sdp(audio); +} + +static void dp_audio_setup_acr(struct dp_audio_private *audio) +{ + u32 select = 0; + struct dp_catalog_audio *catalog = audio->catalog; + + if (!atomic_read(&audio->session_on)) { + DP_WARN("session inactive\n"); + return; + } + + switch (audio->dp_audio.bw_code) { + case DP_LINK_BW_1_62: + select = 0; + break; + case DP_LINK_BW_2_7: + select = 1; + break; + case DP_LINK_BW_5_4: + select = 2; + break; + case DP_LINK_BW_8_1: + select = 3; + break; + default: + DP_DEBUG("Unknown link rate\n"); + select = 0; + break; + } + + catalog->data = select; + catalog->config_acr(catalog); +} + +static void dp_audio_enable(struct dp_audio_private *audio, bool enable) +{ + struct dp_catalog_audio *catalog = audio->catalog; + + audio->engine_on = enable; + if (!atomic_read(&audio->session_on)) { + DP_WARN("session inactive. enable=%d\n", enable); + return; + } + catalog->data = enable; + catalog->enable(catalog); + +} + +static struct dp_audio_private *dp_audio_get_data(struct platform_device *pdev) +{ + struct msm_ext_disp_data *ext_data; + struct dp_audio *dp_audio; + + if (!pdev) { + DP_ERR("invalid input\n"); + return ERR_PTR(-ENODEV); + } + + ext_data = platform_get_drvdata(pdev); + if (!ext_data) { + DP_ERR("invalid ext disp data\n"); + return ERR_PTR(-EINVAL); + } + + dp_audio = ext_data->intf_data; + if (!dp_audio) { + DP_ERR("invalid intf data\n"); + return ERR_PTR(-EINVAL); + } + + return container_of(dp_audio, struct dp_audio_private, dp_audio); +} + +static int dp_audio_info_setup(struct platform_device *pdev, + struct msm_ext_disp_audio_setup_params *params) +{ + int rc = 0; + struct dp_audio_private *audio; + + audio = dp_audio_get_data(pdev); + if (IS_ERR(audio)) { + rc = PTR_ERR(audio); + return rc; + } + + mutex_lock(&audio->ops_lock); + + audio->channels = params->num_of_channels; + + if (audio->panel->stream_id >= DP_STREAM_MAX) { + DP_ERR("invalid stream id: %d\n", + audio->panel->stream_id); + rc = -EINVAL; + mutex_unlock(&audio->ops_lock); + return rc; + } + + dp_audio_setup_sdp(audio); + dp_audio_setup_acr(audio); + dp_audio_enable(audio, true); + + mutex_unlock(&audio->ops_lock); + + DP_DEBUG("audio stream configured\n"); + + return rc; +} + +static int dp_audio_get_edid_blk(struct platform_device *pdev, + struct msm_ext_disp_audio_edid_blk *blk) +{ + int rc = 0; + struct dp_audio_private *audio; + struct sde_edid_ctrl *edid; + + if (!blk) { + DP_ERR("invalid input\n"); + return -EINVAL; + } + + audio = dp_audio_get_data(pdev); + if (IS_ERR(audio)) { + rc = PTR_ERR(audio); + goto end; + } + + if (!audio->panel || !audio->panel->edid_ctrl) { + DP_ERR("invalid panel data\n"); + rc = -EINVAL; + goto end; + } + + edid = audio->panel->edid_ctrl; + + blk->audio_data_blk = edid->audio_data_block; + blk->audio_data_blk_size = edid->adb_size; + + blk->spk_alloc_data_blk = edid->spkr_alloc_data_block; + blk->spk_alloc_data_blk_size = edid->sadb_size; +end: + return rc; +} + +static int dp_audio_get_cable_status(struct platform_device *pdev, u32 vote) +{ + int rc = 0; + struct dp_audio_private *audio; + + audio = dp_audio_get_data(pdev); + if (IS_ERR(audio)) { + rc = PTR_ERR(audio); + goto end; + } + + return atomic_read(&audio->session_on); +end: + return rc; +} + +static int dp_audio_get_intf_id(struct platform_device *pdev) +{ + int rc = 0; + struct dp_audio_private *audio; + + audio = dp_audio_get_data(pdev); + if (IS_ERR(audio)) { + rc = PTR_ERR(audio); + goto end; + } + + return EXT_DISPLAY_TYPE_DP; +end: + return rc; +} + +static void dp_audio_teardown_done(struct platform_device *pdev) +{ + struct dp_audio_private *audio; + + audio = dp_audio_get_data(pdev); + if (IS_ERR(audio)) + return; + + mutex_lock(&audio->ops_lock); + dp_audio_enable(audio, false); + mutex_unlock(&audio->ops_lock); + + atomic_set(&audio->acked, 1); + complete_all(&audio->hpd_comp); + + DP_DEBUG("audio engine disabled\n"); +} + +static int dp_audio_ack_done(struct platform_device *pdev, u32 ack) +{ + int rc = 0, ack_hpd; + struct dp_audio_private *audio; + + audio = dp_audio_get_data(pdev); + if (IS_ERR(audio)) { + rc = PTR_ERR(audio); + goto end; + } + + if (ack & AUDIO_ACK_SET_ENABLE) { + audio->ack_enabled = ack & AUDIO_ACK_ENABLE ? + true : false; + + DP_DEBUG("audio ack feature %s\n", + audio->ack_enabled ? "enabled" : "disabled"); + goto end; + } + + if (!audio->ack_enabled) + goto end; + + ack_hpd = ack & AUDIO_ACK_CONNECT; + + DP_DEBUG("acknowledging audio (%d)\n", ack_hpd); + + if (!audio->engine_on) { + atomic_set(&audio->acked, 1); + complete_all(&audio->hpd_comp); + } +end: + return rc; +} + +static int dp_audio_codec_ready(struct platform_device *pdev) +{ + int rc = 0; + struct dp_audio_private *audio; + + audio = dp_audio_get_data(pdev); + if (IS_ERR(audio)) { + DP_ERR("invalid input\n"); + rc = PTR_ERR(audio); + goto end; + } + + queue_delayed_work(audio->notify_workqueue, + &audio->notify_delayed_work, HZ/4); +end: + return rc; +} + +static int dp_audio_register_ext_disp(struct dp_audio_private *audio) +{ + int rc = 0; + struct device_node *pd = NULL; + const char *phandle = "qcom,ext-disp"; + struct msm_ext_disp_init_data *ext; + struct msm_ext_disp_audio_codec_ops *ops; + + ext = &audio->ext_audio_data; + ops = &ext->codec_ops; + + ext->codec.type = EXT_DISPLAY_TYPE_DP; + ext->codec.ctrl_id = 0; + ext->codec.stream_id = audio->panel->stream_id; + ext->pdev = audio->pdev; + ext->intf_data = &audio->dp_audio; + + ops->audio_info_setup = dp_audio_info_setup; + ops->get_audio_edid_blk = dp_audio_get_edid_blk; + ops->cable_status = dp_audio_get_cable_status; + ops->get_intf_id = dp_audio_get_intf_id; + ops->teardown_done = dp_audio_teardown_done; + ops->acknowledge = dp_audio_ack_done; + ops->ready = dp_audio_codec_ready; + + if (!audio->pdev->dev.of_node) { + DP_ERR("cannot find audio dev.of_node\n"); + rc = -ENODEV; + goto end; + } + + pd = of_parse_phandle(audio->pdev->dev.of_node, phandle, 0); + if (!pd) { + DP_ERR("cannot parse %s handle\n", phandle); + rc = -ENODEV; + goto end; + } + + audio->ext_pdev = of_find_device_by_node(pd); + if (!audio->ext_pdev) { + DP_ERR("cannot find %s pdev\n", phandle); + rc = -ENODEV; + goto end; + } +#if defined(CONFIG_MSM_EXT_DISPLAY) + rc = msm_ext_disp_register_intf(audio->ext_pdev, ext); + if (rc) + DP_ERR("failed to register disp\n"); +#endif +end: + if (pd) + of_node_put(pd); + + return rc; +} + +static int dp_audio_deregister_ext_disp(struct dp_audio_private *audio) +{ + int rc = 0; + struct device_node *pd = NULL; + const char *phandle = "qcom,ext-disp"; + struct msm_ext_disp_init_data *ext; + + ext = &audio->ext_audio_data; + + if (!audio->pdev->dev.of_node) { + DP_ERR("cannot find audio dev.of_node\n"); + rc = -ENODEV; + goto end; + } + + pd = of_parse_phandle(audio->pdev->dev.of_node, phandle, 0); + if (!pd) { + DP_ERR("cannot parse %s handle\n", phandle); + rc = -ENODEV; + goto end; + } + + audio->ext_pdev = of_find_device_by_node(pd); + if (!audio->ext_pdev) { + DP_ERR("cannot find %s pdev\n", phandle); + rc = -ENODEV; + goto end; + } + +#if defined(CONFIG_MSM_EXT_DISPLAY) + rc = msm_ext_disp_deregister_intf(audio->ext_pdev, ext); + if (rc) + DP_ERR("failed to deregister disp\n"); +#endif + +end: + return rc; +} + +static int dp_audio_notify(struct dp_audio_private *audio, u32 state) +{ + int rc = 0; + struct msm_ext_disp_init_data *ext = &audio->ext_audio_data; + + atomic_set(&audio->acked, 0); + + if (!ext->intf_ops.audio_notify) { + DP_ERR("audio notify not defined\n"); + goto end; + } + + reinit_completion(&audio->hpd_comp); + rc = ext->intf_ops.audio_notify(audio->ext_pdev, + &ext->codec, state); + if (rc) + goto end; + + if (atomic_read(&audio->acked)) + goto end; + + if (state == EXT_DISPLAY_CABLE_DISCONNECT && !audio->engine_on) + goto end; + + if (state == EXT_DISPLAY_CABLE_CONNECT) + goto end; + + rc = wait_for_completion_timeout(&audio->hpd_comp, HZ * 4); + if (!rc) { + DP_ERR("timeout. state=%d err=%d\n", state, rc); + rc = -ETIMEDOUT; + goto end; + } + + DP_DEBUG("success\n"); +end: + return rc; +} + +static int dp_audio_config(struct dp_audio_private *audio, u32 state) +{ + int rc = 0; + struct msm_ext_disp_init_data *ext = &audio->ext_audio_data; + + if (!ext || !ext->intf_ops.audio_config) { + DP_ERR("audio_config not defined\n"); + goto end; + } + + /* + * DP Audio sets default STREAM_0 only, other streams are + * set by audio driver based on the hardware/software support. + */ + if (audio->panel->stream_id == DP_STREAM_0) { + rc = ext->intf_ops.audio_config(audio->ext_pdev, + &ext->codec, state); + if (rc) + DP_ERR("failed to config audio, err=%d\n", + rc); + } +end: + return rc; +} + +static int dp_audio_on(struct dp_audio *dp_audio) +{ + int rc = 0; + struct dp_audio_private *audio; + struct msm_ext_disp_init_data *ext; + + if (!dp_audio) { + DP_ERR("invalid input\n"); + return -EINVAL; + } + + audio = container_of(dp_audio, struct dp_audio_private, dp_audio); + if (IS_ERR(audio)) { + DP_ERR("invalid input\n"); + return -EINVAL; + } + + dp_audio_register_ext_disp(audio); + + ext = &audio->ext_audio_data; + + atomic_set(&audio->session_on, 1); + + rc = dp_audio_config(audio, EXT_DISPLAY_CABLE_CONNECT); + if (rc) + goto end; + + rc = dp_audio_notify(audio, EXT_DISPLAY_CABLE_CONNECT); + if (rc) + goto end; + + DP_DEBUG("success\n"); +end: + return rc; +} + +static int dp_audio_off(struct dp_audio *dp_audio) +{ + int rc = 0; + struct dp_audio_private *audio; + struct msm_ext_disp_init_data *ext; + bool work_pending = false; + + if (!dp_audio) { + DP_ERR("invalid input\n"); + return -EINVAL; + } + + audio = container_of(dp_audio, struct dp_audio_private, dp_audio); + + if (!atomic_read(&audio->session_on)) { + DP_DEBUG("audio already off\n"); + return rc; + } + + ext = &audio->ext_audio_data; + + work_pending = cancel_delayed_work_sync(&audio->notify_delayed_work); + if (work_pending) + DP_DEBUG("pending notification work completed\n"); + + rc = dp_audio_notify(audio, EXT_DISPLAY_CABLE_DISCONNECT); + if (rc) + goto end; + + DP_DEBUG("success\n"); +end: + dp_audio_config(audio, EXT_DISPLAY_CABLE_DISCONNECT); + + atomic_set(&audio->session_on, 0); + audio->engine_on = false; + + dp_audio_deregister_ext_disp(audio); + + return rc; +} + +static void dp_audio_notify_work_fn(struct work_struct *work) +{ + struct dp_audio_private *audio; + struct delayed_work *dw = to_delayed_work(work); + + audio = container_of(dw, struct dp_audio_private, notify_delayed_work); + + dp_audio_notify(audio, EXT_DISPLAY_CABLE_CONNECT); +} + +static int dp_audio_create_notify_workqueue(struct dp_audio_private *audio) +{ + audio->notify_workqueue = create_workqueue("sdm_dp_audio_notify"); + if (IS_ERR_OR_NULL(audio->notify_workqueue)) { + DP_ERR("Error creating notify_workqueue\n"); + return -EPERM; + } + + INIT_DELAYED_WORK(&audio->notify_delayed_work, dp_audio_notify_work_fn); + + return 0; +} + +static void dp_audio_destroy_notify_workqueue(struct dp_audio_private *audio) +{ + if (audio->notify_workqueue) + destroy_workqueue(audio->notify_workqueue); +} + +struct dp_audio *dp_audio_get(struct platform_device *pdev, + struct dp_panel *panel, + struct dp_catalog_audio *catalog) +{ + int rc = 0; + struct dp_audio_private *audio; + struct dp_audio *dp_audio; + + if (!pdev || !panel || !catalog) { + DP_ERR("invalid input\n"); + rc = -EINVAL; + goto error; + } + + audio = devm_kzalloc(&pdev->dev, sizeof(*audio), GFP_KERNEL); + if (!audio) { + rc = -ENOMEM; + goto error; + } + + rc = dp_audio_create_notify_workqueue(audio); + if (rc) + goto error_notify_workqueue; + + init_completion(&audio->hpd_comp); + + audio->pdev = pdev; + audio->panel = panel; + audio->catalog = catalog; + + atomic_set(&audio->acked, 0); + + dp_audio = &audio->dp_audio; + + mutex_init(&audio->ops_lock); + + dp_audio->on = dp_audio_on; + dp_audio->off = dp_audio_off; + + catalog->init(catalog); + + return dp_audio; + +error_notify_workqueue: + devm_kfree(&pdev->dev, audio); +error: + return ERR_PTR(rc); +} + +void dp_audio_put(struct dp_audio *dp_audio) +{ + struct dp_audio_private *audio; + + if (!dp_audio) + return; + + audio = container_of(dp_audio, struct dp_audio_private, dp_audio); + + mutex_destroy(&audio->ops_lock); + + dp_audio_destroy_notify_workqueue(audio); + + devm_kfree(&audio->pdev->dev, audio); +} diff --git a/techpack/display/msm/dp/dp_audio.h b/techpack/display/msm/dp/dp_audio.h new file mode 100644 index 0000000000000000000000000000000000000000..882551e0fefc970a473e27e275adb04b2a9c9c66 --- /dev/null +++ b/techpack/display/msm/dp/dp_audio.h @@ -0,0 +1,70 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2016-2019, The Linux Foundation. All rights reserved. + */ + +#ifndef _DP_AUDIO_H_ +#define _DP_AUDIO_H_ + +#include + +#include "dp_panel.h" +#include "dp_catalog.h" + +/** + * struct dp_audio + * @lane_count: number of lanes configured in current session + * @bw_code: link rate's bandwidth code for current session + */ +struct dp_audio { + u32 lane_count; + u32 bw_code; + + /** + * on() + * + * Enables the audio by notifying the user module. + * + * @dp_audio: an instance of struct dp_audio. + * + * Returns the error code in case of failure, 0 in success case. + */ + int (*on)(struct dp_audio *dp_audio); + + /** + * off() + * + * Disables the audio by notifying the user module. + * + * @dp_audio: an instance of struct dp_audio. + * + * Returns the error code in case of failure, 0 in success case. + */ + int (*off)(struct dp_audio *dp_audio); +}; + +/** + * dp_audio_get() + * + * Creates and instance of dp audio. + * + * @pdev: caller's platform device instance. + * @panel: an instance of dp_panel module. + * @catalog: an instance of dp_catalog_audio module. + * + * Returns the error code in case of failure, otherwize + * an instance of newly created dp_module. + */ +struct dp_audio *dp_audio_get(struct platform_device *pdev, + struct dp_panel *panel, + struct dp_catalog_audio *catalog); + +/** + * dp_audio_put() + * + * Cleans the dp_audio instance. + * + * @dp_audio: an instance of dp_audio. + */ +void dp_audio_put(struct dp_audio *dp_audio); +#endif /* _DP_AUDIO_H_ */ diff --git a/techpack/display/msm/dp/dp_aux.c b/techpack/display/msm/dp/dp_aux.c new file mode 100644 index 0000000000000000000000000000000000000000..4a1f54eef5692d9f00fa36bc4cacd133f1632461 --- /dev/null +++ b/techpack/display/msm/dp/dp_aux.c @@ -0,0 +1,872 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2012-2019, The Linux Foundation. All rights reserved. + */ + +#include +#include +#include + +#include "dp_aux.h" +#include "dp_debug.h" + +#define DP_AUX_ENUM_STR(x) #x + +enum { + DP_AUX_DATA_INDEX_WRITE = BIT(31), +}; + +struct dp_aux_private { + struct device *dev; + struct dp_aux dp_aux; + struct dp_catalog_aux *catalog; + struct dp_aux_cfg *cfg; + struct device_node *aux_switch_node; + struct mutex mutex; + struct completion comp; + struct drm_dp_aux drm_aux; + + bool cmd_busy; + bool native; + bool read; + bool no_send_addr; + bool no_send_stop; + bool enabled; + + u32 offset; + u32 segment; + u32 aux_error_num; + u32 retry_cnt; + + atomic_t aborted; + + u8 *dpcd; + u8 *edid; +}; + +#ifdef CONFIG_DYNAMIC_DEBUG +static void dp_aux_hex_dump(struct drm_dp_aux *drm_aux, + struct drm_dp_aux_msg *msg) +{ + char prefix[64]; + int i, linelen, remaining = msg->size; + const int rowsize = 16; + u8 linebuf[64]; + struct dp_aux_private *aux = container_of(drm_aux, + struct dp_aux_private, drm_aux); + + snprintf(prefix, sizeof(prefix), "%s %s %4xh(%2zu): ", + aux->native ? "NAT" : "I2C", + aux->read ? "RD" : "WR", + msg->address, msg->size); + + for (i = 0; i < msg->size; i += rowsize) { + linelen = min(remaining, rowsize); + remaining -= rowsize; + + hex_dump_to_buffer(msg->buffer + i, linelen, rowsize, 1, + linebuf, sizeof(linebuf), false); + + DP_DEBUG("%s%s\n", prefix, linebuf); + } +} +#else +static void dp_aux_hex_dump(struct drm_dp_aux *drm_aux, + struct drm_dp_aux_msg *msg) +{ +} +#endif + +static char *dp_aux_get_error(u32 aux_error) +{ + switch (aux_error) { + case DP_AUX_ERR_NONE: + return DP_AUX_ENUM_STR(DP_AUX_ERR_NONE); + case DP_AUX_ERR_ADDR: + return DP_AUX_ENUM_STR(DP_AUX_ERR_ADDR); + case DP_AUX_ERR_TOUT: + return DP_AUX_ENUM_STR(DP_AUX_ERR_TOUT); + case DP_AUX_ERR_NACK: + return DP_AUX_ENUM_STR(DP_AUX_ERR_NACK); + case DP_AUX_ERR_DEFER: + return DP_AUX_ENUM_STR(DP_AUX_ERR_DEFER); + case DP_AUX_ERR_NACK_DEFER: + return DP_AUX_ENUM_STR(DP_AUX_ERR_NACK_DEFER); + default: + return "unknown"; + } +} + +static u32 dp_aux_write(struct dp_aux_private *aux, + struct drm_dp_aux_msg *msg) +{ + u32 data[4], reg, len; + u8 *msgdata = msg->buffer; + int const aux_cmd_fifo_len = 128; + int i = 0; + + if (aux->read) + len = 4; + else + len = msg->size + 4; + + /* + * cmd fifo only has depth of 144 bytes + * limit buf length to 128 bytes here + */ + if (len > aux_cmd_fifo_len) { + DP_ERR("buf len error\n"); + return 0; + } + + /* Pack cmd and write to HW */ + data[0] = (msg->address >> 16) & 0xf; /* addr[19:16] */ + if (aux->read) + data[0] |= BIT(4); /* R/W */ + + data[1] = (msg->address >> 8) & 0xff; /* addr[15:8] */ + data[2] = msg->address & 0xff; /* addr[7:0] */ + data[3] = (msg->size - 1) & 0xff; /* len[7:0] */ + + for (i = 0; i < len; i++) { + reg = (i < 4) ? data[i] : msgdata[i - 4]; + reg = ((reg) << 8) & 0x0000ff00; /* index = 0, write */ + if (i == 0) + reg |= DP_AUX_DATA_INDEX_WRITE; + aux->catalog->data = reg; + aux->catalog->write_data(aux->catalog); + } + + aux->catalog->clear_trans(aux->catalog, false); + aux->catalog->clear_hw_interrupts(aux->catalog); + + reg = 0; /* Transaction number == 1 */ + if (!aux->native) { /* i2c */ + reg |= BIT(8); + + if (aux->no_send_addr) + reg |= BIT(10); + + if (aux->no_send_stop) + reg |= BIT(11); + } + + reg |= BIT(9); + aux->catalog->data = reg; + aux->catalog->write_trans(aux->catalog); + + return len; +} + +static int dp_aux_cmd_fifo_tx(struct dp_aux_private *aux, + struct drm_dp_aux_msg *msg) +{ + u32 ret = 0, len = 0, timeout; + int const aux_timeout_ms = HZ/4; + + reinit_completion(&aux->comp); + + len = dp_aux_write(aux, msg); + if (len == 0) { + DP_ERR("DP AUX write failed\n"); + return -EINVAL; + } + + timeout = wait_for_completion_timeout(&aux->comp, aux_timeout_ms); + if (!timeout) { + DP_ERR("aux %s timeout\n", (aux->read ? "read" : "write")); + return -ETIMEDOUT; + } + + if (aux->aux_error_num == DP_AUX_ERR_NONE) { + ret = len; + } else { + pr_err_ratelimited("aux err: %s\n", + dp_aux_get_error(aux->aux_error_num)); + + ret = -EINVAL; + } + + return ret; +} + +static void dp_aux_cmd_fifo_rx(struct dp_aux_private *aux, + struct drm_dp_aux_msg *msg) +{ + u32 data; + u8 *dp; + u32 i, actual_i; + u32 len = msg->size; + + aux->catalog->clear_trans(aux->catalog, true); + + data = 0; + data |= DP_AUX_DATA_INDEX_WRITE; /* INDEX_WRITE */ + data |= BIT(0); /* read */ + + aux->catalog->data = data; + aux->catalog->write_data(aux->catalog); + + dp = msg->buffer; + + /* discard first byte */ + data = aux->catalog->read_data(aux->catalog); + + for (i = 0; i < len; i++) { + data = aux->catalog->read_data(aux->catalog); + *dp++ = (u8)((data >> 8) & 0xff); + + actual_i = (data >> 16) & 0xFF; + if (i != actual_i) + DP_WARN("Index mismatch: expected %d, found %d\n", + i, actual_i); + } +} + +static void dp_aux_native_handler(struct dp_aux_private *aux) +{ + u32 isr = aux->catalog->isr; + + if (isr & DP_INTR_AUX_I2C_DONE) + aux->aux_error_num = DP_AUX_ERR_NONE; + else if (isr & DP_INTR_WRONG_ADDR) + aux->aux_error_num = DP_AUX_ERR_ADDR; + else if (isr & DP_INTR_TIMEOUT) + aux->aux_error_num = DP_AUX_ERR_TOUT; + if (isr & DP_INTR_NACK_DEFER) + aux->aux_error_num = DP_AUX_ERR_NACK; + if (isr & DP_INTR_AUX_ERROR) { + aux->aux_error_num = DP_AUX_ERR_PHY; + aux->catalog->clear_hw_interrupts(aux->catalog); + } + + complete(&aux->comp); +} + +static void dp_aux_i2c_handler(struct dp_aux_private *aux) +{ + u32 isr = aux->catalog->isr; + + if (isr & DP_INTR_AUX_I2C_DONE) { + if (isr & (DP_INTR_I2C_NACK | DP_INTR_I2C_DEFER)) + aux->aux_error_num = DP_AUX_ERR_NACK; + else + aux->aux_error_num = DP_AUX_ERR_NONE; + } else { + if (isr & DP_INTR_WRONG_ADDR) + aux->aux_error_num = DP_AUX_ERR_ADDR; + else if (isr & DP_INTR_TIMEOUT) + aux->aux_error_num = DP_AUX_ERR_TOUT; + if (isr & DP_INTR_NACK_DEFER) + aux->aux_error_num = DP_AUX_ERR_NACK_DEFER; + if (isr & DP_INTR_I2C_NACK) + aux->aux_error_num = DP_AUX_ERR_NACK; + if (isr & DP_INTR_I2C_DEFER) + aux->aux_error_num = DP_AUX_ERR_DEFER; + if (isr & DP_INTR_AUX_ERROR) { + aux->aux_error_num = DP_AUX_ERR_PHY; + aux->catalog->clear_hw_interrupts(aux->catalog); + } + } + + complete(&aux->comp); +} + +static void dp_aux_isr(struct dp_aux *dp_aux) +{ + struct dp_aux_private *aux; + + if (!dp_aux) { + DP_ERR("invalid input\n"); + return; + } + + aux = container_of(dp_aux, struct dp_aux_private, dp_aux); + + aux->catalog->get_irq(aux->catalog, aux->cmd_busy); + + if (!aux->cmd_busy) + return; + + if (aux->native) + dp_aux_native_handler(aux); + else + dp_aux_i2c_handler(aux); +} + +static void dp_aux_reconfig(struct dp_aux *dp_aux) +{ + struct dp_aux_private *aux; + + if (!dp_aux) { + DP_ERR("invalid input\n"); + return; + } + + aux = container_of(dp_aux, struct dp_aux_private, dp_aux); + + aux->catalog->update_aux_cfg(aux->catalog, + aux->cfg, PHY_AUX_CFG1); + aux->catalog->reset(aux->catalog); +} + +static void dp_aux_abort_transaction(struct dp_aux *dp_aux, bool abort) +{ + struct dp_aux_private *aux; + + if (!dp_aux) { + DP_ERR("invalid input\n"); + return; + } + + aux = container_of(dp_aux, struct dp_aux_private, dp_aux); + + atomic_set(&aux->aborted, abort); +} + +static void dp_aux_update_offset_and_segment(struct dp_aux_private *aux, + struct drm_dp_aux_msg *input_msg) +{ + u32 const edid_address = 0x50; + u32 const segment_address = 0x30; + bool i2c_read = input_msg->request & + (DP_AUX_I2C_READ & DP_AUX_NATIVE_READ); + u8 *data = NULL; + + if (aux->native || i2c_read || ((input_msg->address != edid_address) && + (input_msg->address != segment_address))) + return; + + + data = input_msg->buffer; + if (input_msg->address == segment_address) + aux->segment = *data; + else + aux->offset = *data; +} + +/** + * dp_aux_transfer_helper() - helper function for EDID read transactions + * + * @aux: DP AUX private structure + * @input_msg: input message from DRM upstream APIs + * @send_seg: send the seg to sink + * + * return: void + * + * This helper function is used to fix EDID reads for non-compliant + * sinks that do not handle the i2c middle-of-transaction flag correctly. + */ +static void dp_aux_transfer_helper(struct dp_aux_private *aux, + struct drm_dp_aux_msg *input_msg, bool send_seg) +{ + struct drm_dp_aux_msg helper_msg; + u32 const message_size = 0x10; + u32 const segment_address = 0x30; + u32 const edid_block_length = 0x80; + bool i2c_mot = input_msg->request & DP_AUX_I2C_MOT; + bool i2c_read = input_msg->request & + (DP_AUX_I2C_READ & DP_AUX_NATIVE_READ); + + if (!i2c_mot || !i2c_read || (input_msg->size == 0)) + return; + + /* + * Sending the segment value and EDID offset will be performed + * from the DRM upstream EDID driver for each block. Avoid + * duplicate AUX transactions related to this while reading the + * first 16 bytes of each block. + */ + if (!(aux->offset % edid_block_length) || !send_seg) + goto end; + + aux->read = false; + aux->cmd_busy = true; + aux->no_send_addr = true; + aux->no_send_stop = true; + + /* + * Send the segment address for i2c reads for segment > 0 and for which + * the middle-of-transaction flag is set. This is required to support + * EDID reads of more than 2 blocks as the segment address is reset to 0 + * since we are overriding the middle-of-transaction flag for read + * transactions. + */ + if (aux->segment) { + memset(&helper_msg, 0, sizeof(helper_msg)); + helper_msg.address = segment_address; + helper_msg.buffer = &aux->segment; + helper_msg.size = 1; + dp_aux_cmd_fifo_tx(aux, &helper_msg); + } + + /* + * Send the offset address for every i2c read in which the + * middle-of-transaction flag is set. This will ensure that the sink + * will update its read pointer and return the correct portion of the + * EDID buffer in the subsequent i2c read trasntion triggered in the + * native AUX transfer function. + */ + memset(&helper_msg, 0, sizeof(helper_msg)); + helper_msg.address = input_msg->address; + helper_msg.buffer = &aux->offset; + helper_msg.size = 1; + dp_aux_cmd_fifo_tx(aux, &helper_msg); +end: + aux->offset += message_size; + if (aux->offset == 0x80 || aux->offset == 0x100) + aux->segment = 0x0; /* reset segment at end of block */ +} + +static int dp_aux_transfer_ready(struct dp_aux_private *aux, + struct drm_dp_aux_msg *msg, bool send_seg) +{ + int ret = 0; + int const aux_cmd_native_max = 16; + int const aux_cmd_i2c_max = 128; + + if (atomic_read(&aux->aborted)) { + ret = -ETIMEDOUT; + goto error; + } + + aux->native = msg->request & (DP_AUX_NATIVE_WRITE & DP_AUX_NATIVE_READ); + + /* Ignore address only message */ + if ((msg->size == 0) || (msg->buffer == NULL)) { + msg->reply = aux->native ? + DP_AUX_NATIVE_REPLY_ACK : DP_AUX_I2C_REPLY_ACK; + goto error; + } + + /* msg sanity check */ + if ((aux->native && (msg->size > aux_cmd_native_max)) || + (msg->size > aux_cmd_i2c_max)) { + DP_ERR("%s: invalid msg: size(%zu), request(%x)\n", + __func__, msg->size, msg->request); + ret = -EINVAL; + goto error; + } + + dp_aux_update_offset_and_segment(aux, msg); + + dp_aux_transfer_helper(aux, msg, send_seg); + + aux->read = msg->request & (DP_AUX_I2C_READ & DP_AUX_NATIVE_READ); + + if (aux->read) { + aux->no_send_addr = true; + aux->no_send_stop = false; + } else { + aux->no_send_addr = true; + aux->no_send_stop = true; + } + + aux->cmd_busy = true; +error: + return ret; +} + +static ssize_t dp_aux_transfer_debug(struct drm_dp_aux *drm_aux, + struct drm_dp_aux_msg *msg) +{ + u32 timeout; + ssize_t ret; + struct dp_aux_private *aux = container_of(drm_aux, + struct dp_aux_private, drm_aux); + + mutex_lock(&aux->mutex); + + ret = dp_aux_transfer_ready(aux, msg, false); + if (ret) + goto end; + + aux->aux_error_num = DP_AUX_ERR_NONE; + + if (!aux->dpcd || !aux->edid) { + DP_ERR("invalid aux/dpcd structure\n"); + goto end; + } + + if ((msg->address + msg->size) > SZ_4K) { + DP_DEBUG("invalid dpcd access: addr=0x%x, size=0x%lx\n", + msg->address, msg->size); + goto address_error; + } + + if (aux->native) { + mutex_lock(aux->dp_aux.access_lock); + aux->dp_aux.reg = msg->address; + aux->dp_aux.read = aux->read; + aux->dp_aux.size = msg->size; + + if (!aux->read) + memcpy(aux->dpcd + msg->address, + msg->buffer, msg->size); + + reinit_completion(&aux->comp); + mutex_unlock(aux->dp_aux.access_lock); + + timeout = wait_for_completion_timeout(&aux->comp, HZ * 2); + if (!timeout) { + DP_ERR("%s timeout: 0x%x\n", + aux->read ? "read" : "write", + msg->address); + atomic_set(&aux->aborted, 1); + ret = -ETIMEDOUT; + goto end; + } + + mutex_lock(aux->dp_aux.access_lock); + if (aux->read) + memcpy(msg->buffer, aux->dpcd + msg->address, + msg->size); + mutex_unlock(aux->dp_aux.access_lock); + + aux->aux_error_num = DP_AUX_ERR_NONE; + } else { + if (aux->read && msg->address == 0x50) { + memcpy(msg->buffer, + aux->edid + aux->offset - 16, + msg->size); + } + } + + if (aux->aux_error_num == DP_AUX_ERR_NONE) { + dp_aux_hex_dump(drm_aux, msg); + + if (!aux->read) + memset(msg->buffer, 0, msg->size); + + msg->reply = aux->native ? + DP_AUX_NATIVE_REPLY_ACK : DP_AUX_I2C_REPLY_ACK; + } else { + /* Reply defer to retry */ + msg->reply = aux->native ? + DP_AUX_NATIVE_REPLY_DEFER : DP_AUX_I2C_REPLY_DEFER; + } + + ret = msg->size; + goto end; + +address_error: + memset(msg->buffer, 0, msg->size); + ret = msg->size; +end: + if (ret == -ETIMEDOUT) + aux->dp_aux.state |= DP_STATE_AUX_TIMEOUT; + aux->dp_aux.reg = 0xFFFF; + aux->dp_aux.read = true; + aux->dp_aux.size = 0; + + mutex_unlock(&aux->mutex); + return ret; +} + +/* + * This function does the real job to process an AUX transaction. + * It will call aux_reset() function to reset the AUX channel, + * if the waiting is timeout. + */ +static ssize_t dp_aux_transfer(struct drm_dp_aux *drm_aux, + struct drm_dp_aux_msg *msg) +{ + ssize_t ret; + int const retry_count = 5; + struct dp_aux_private *aux = container_of(drm_aux, + struct dp_aux_private, drm_aux); + + mutex_lock(&aux->mutex); + + ret = dp_aux_transfer_ready(aux, msg, true); + if (ret) + goto unlock_exit; + + if (!aux->cmd_busy) { + ret = msg->size; + goto unlock_exit; + } + + ret = dp_aux_cmd_fifo_tx(aux, msg); + if ((ret < 0) && !atomic_read(&aux->aborted)) { + aux->retry_cnt++; + if (!(aux->retry_cnt % retry_count)) + aux->catalog->update_aux_cfg(aux->catalog, + aux->cfg, PHY_AUX_CFG1); + aux->catalog->reset(aux->catalog); + goto unlock_exit; + } else if (ret < 0) { + goto unlock_exit; + } + + if (aux->aux_error_num == DP_AUX_ERR_NONE) { + if (aux->read) + dp_aux_cmd_fifo_rx(aux, msg); + + dp_aux_hex_dump(drm_aux, msg); + + msg->reply = aux->native ? + DP_AUX_NATIVE_REPLY_ACK : DP_AUX_I2C_REPLY_ACK; + } else { + /* Reply defer to retry */ + msg->reply = aux->native ? + DP_AUX_NATIVE_REPLY_DEFER : DP_AUX_I2C_REPLY_DEFER; + } + + /* Return requested size for success or retry */ + ret = msg->size; + aux->retry_cnt = 0; + +unlock_exit: + aux->cmd_busy = false; + mutex_unlock(&aux->mutex); + return ret; +} + +static void dp_aux_reset_phy_config_indices(struct dp_aux_cfg *aux_cfg) +{ + int i = 0; + + for (i = 0; i < PHY_AUX_CFG_MAX; i++) + aux_cfg[i].current_index = 0; +} + +static void dp_aux_init(struct dp_aux *dp_aux, struct dp_aux_cfg *aux_cfg) +{ + struct dp_aux_private *aux; + + if (!dp_aux || !aux_cfg) { + DP_ERR("invalid input\n"); + return; + } + + aux = container_of(dp_aux, struct dp_aux_private, dp_aux); + + if (aux->enabled) + return; + + dp_aux_reset_phy_config_indices(aux_cfg); + aux->catalog->setup(aux->catalog, aux_cfg); + aux->catalog->reset(aux->catalog); + aux->catalog->enable(aux->catalog, true); + atomic_set(&aux->aborted, 0); + aux->retry_cnt = 0; + aux->enabled = true; +} + +static void dp_aux_deinit(struct dp_aux *dp_aux) +{ + struct dp_aux_private *aux; + + if (!dp_aux) { + DP_ERR("invalid input\n"); + return; + } + + aux = container_of(dp_aux, struct dp_aux_private, dp_aux); + + if (!aux->enabled) + return; + + atomic_set(&aux->aborted, 1); + aux->catalog->enable(aux->catalog, false); + aux->enabled = false; +} + +static int dp_aux_register(struct dp_aux *dp_aux) +{ + struct dp_aux_private *aux; + int ret = 0; + + if (!dp_aux) { + DP_ERR("invalid input\n"); + ret = -EINVAL; + goto exit; + } + + aux = container_of(dp_aux, struct dp_aux_private, dp_aux); + + aux->drm_aux.name = "sde_dp_aux"; + aux->drm_aux.dev = aux->dev; + aux->drm_aux.transfer = dp_aux_transfer; + ret = drm_dp_aux_register(&aux->drm_aux); + if (ret) { + DP_ERR("%s: failed to register drm aux: %d\n", __func__, ret); + goto exit; + } + dp_aux->drm_aux = &aux->drm_aux; +exit: + return ret; +} + +static void dp_aux_deregister(struct dp_aux *dp_aux) +{ + struct dp_aux_private *aux; + + if (!dp_aux) { + DP_ERR("invalid input\n"); + return; + } + + aux = container_of(dp_aux, struct dp_aux_private, dp_aux); + drm_dp_aux_unregister(&aux->drm_aux); +} + +static void dp_aux_dpcd_updated(struct dp_aux *dp_aux) +{ + struct dp_aux_private *aux; + + if (!dp_aux) { + DP_ERR("invalid input\n"); + return; + } + + aux = container_of(dp_aux, struct dp_aux_private, dp_aux); + + /* make sure wait has started */ + usleep_range(20, 30); + complete(&aux->comp); +} + +static void dp_aux_set_sim_mode(struct dp_aux *dp_aux, bool en, + u8 *edid, u8 *dpcd) +{ + struct dp_aux_private *aux; + + if (!dp_aux) { + DP_ERR("invalid input\n"); + return; + } + + aux = container_of(dp_aux, struct dp_aux_private, dp_aux); + + mutex_lock(&aux->mutex); + + aux->edid = edid; + aux->dpcd = dpcd; + + if (en) { + atomic_set(&aux->aborted, 0); + aux->drm_aux.transfer = dp_aux_transfer_debug; + } else { + aux->drm_aux.transfer = dp_aux_transfer; + } + + mutex_unlock(&aux->mutex); +} + +static int dp_aux_configure_aux_switch(struct dp_aux *dp_aux, + bool enable, int orientation) +{ + struct dp_aux_private *aux; + int rc = 0; + enum fsa_function event = FSA_USBC_DISPLAYPORT_DISCONNECTED; + + if (!dp_aux) { + DP_ERR("invalid input\n"); + rc = -EINVAL; + goto end; + } + + aux = container_of(dp_aux, struct dp_aux_private, dp_aux); + + if (!aux->aux_switch_node) { + DP_DEBUG("undefined aux switch handle\n"); + rc = -EINVAL; + goto end; + } + + if (strcmp(aux->aux_switch_node->name, "fsa4480")) { + DP_DEBUG("Not an fsa4480 aux switch\n"); + goto end; + } + + if (enable) { + switch (orientation) { + case ORIENTATION_CC1: + event = FSA_USBC_ORIENTATION_CC1; + break; + case ORIENTATION_CC2: + event = FSA_USBC_ORIENTATION_CC2; + break; + default: + DP_ERR("invalid orientation\n"); + rc = -EINVAL; + goto end; + } + } + + DP_DEBUG("enable=%d, orientation=%d, event=%d\n", + enable, orientation, event); + + rc = fsa4480_switch_event(aux->aux_switch_node, event); + if (rc) + DP_ERR("failed to configure fsa4480 i2c device (%d)\n", rc); +end: + return rc; +} + +struct dp_aux *dp_aux_get(struct device *dev, struct dp_catalog_aux *catalog, + struct dp_parser *parser, struct device_node *aux_switch) +{ + int rc = 0; + struct dp_aux_private *aux; + struct dp_aux *dp_aux; + + if (!catalog || !parser || + (!parser->no_aux_switch && + !aux_switch && + !parser->gpio_aux_switch)) { + DP_ERR("invalid input\n"); + rc = -ENODEV; + goto error; + } + + aux = devm_kzalloc(dev, sizeof(*aux), GFP_KERNEL); + if (!aux) { + rc = -ENOMEM; + goto error; + } + + init_completion(&aux->comp); + aux->cmd_busy = false; + mutex_init(&aux->mutex); + + aux->dev = dev; + aux->catalog = catalog; + aux->cfg = parser->aux_cfg; + aux->aux_switch_node = aux_switch; + dp_aux = &aux->dp_aux; + aux->retry_cnt = 0; + aux->dp_aux.reg = 0xFFFF; + + dp_aux->isr = dp_aux_isr; + dp_aux->init = dp_aux_init; + dp_aux->deinit = dp_aux_deinit; + dp_aux->drm_aux_register = dp_aux_register; + dp_aux->drm_aux_deregister = dp_aux_deregister; + dp_aux->reconfig = dp_aux_reconfig; + dp_aux->abort = dp_aux_abort_transaction; + dp_aux->dpcd_updated = dp_aux_dpcd_updated; + dp_aux->set_sim_mode = dp_aux_set_sim_mode; + dp_aux->aux_switch = dp_aux_configure_aux_switch; + + return dp_aux; +error: + return ERR_PTR(rc); +} + +void dp_aux_put(struct dp_aux *dp_aux) +{ + struct dp_aux_private *aux; + + if (!dp_aux) + return; + + aux = container_of(dp_aux, struct dp_aux_private, dp_aux); + + mutex_destroy(&aux->mutex); + + devm_kfree(aux->dev, aux); +} diff --git a/techpack/display/msm/dp/dp_aux.h b/techpack/display/msm/dp/dp_aux.h new file mode 100644 index 0000000000000000000000000000000000000000..cd0d9714b922af45a88ccabdd546d430cb4a2148 --- /dev/null +++ b/techpack/display/msm/dp/dp_aux.h @@ -0,0 +1,62 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2012-2019, The Linux Foundation. All rights reserved. + */ + +#ifndef _DP_AUX_H_ +#define _DP_AUX_H_ + +#include "dp_catalog.h" +#include "drm_dp_helper.h" + +#define DP_STATE_NOTIFICATION_SENT BIT(0) +#define DP_STATE_TRAIN_1_STARTED BIT(1) +#define DP_STATE_TRAIN_1_SUCCEEDED BIT(2) +#define DP_STATE_TRAIN_1_FAILED BIT(3) +#define DP_STATE_TRAIN_2_STARTED BIT(4) +#define DP_STATE_TRAIN_2_SUCCEEDED BIT(5) +#define DP_STATE_TRAIN_2_FAILED BIT(6) +#define DP_STATE_CTRL_POWERED_ON BIT(7) +#define DP_STATE_CTRL_POWERED_OFF BIT(8) +#define DP_STATE_LINK_MAINTENANCE_STARTED BIT(9) +#define DP_STATE_LINK_MAINTENANCE_COMPLETED BIT(10) +#define DP_STATE_LINK_MAINTENANCE_FAILED BIT(11) +#define DP_STATE_AUX_TIMEOUT BIT(12) + +enum dp_aux_error { + DP_AUX_ERR_NONE = 0, + DP_AUX_ERR_ADDR = -1, + DP_AUX_ERR_TOUT = -2, + DP_AUX_ERR_NACK = -3, + DP_AUX_ERR_DEFER = -4, + DP_AUX_ERR_NACK_DEFER = -5, + DP_AUX_ERR_PHY = -6, +}; + +struct dp_aux { + u32 reg; + u32 size; + u32 state; + + bool read; + + struct mutex *access_lock; + + struct drm_dp_aux *drm_aux; + int (*drm_aux_register)(struct dp_aux *aux); + void (*drm_aux_deregister)(struct dp_aux *aux); + void (*isr)(struct dp_aux *aux); + void (*init)(struct dp_aux *aux, struct dp_aux_cfg *aux_cfg); + void (*deinit)(struct dp_aux *aux); + void (*reconfig)(struct dp_aux *aux); + void (*abort)(struct dp_aux *aux, bool abort); + void (*dpcd_updated)(struct dp_aux *aux); + void (*set_sim_mode)(struct dp_aux *aux, bool en, u8 *edid, u8 *dpcd); + int (*aux_switch)(struct dp_aux *aux, bool enable, int orientation); +}; + +struct dp_aux *dp_aux_get(struct device *dev, struct dp_catalog_aux *catalog, + struct dp_parser *parser, struct device_node *aux_switch); +void dp_aux_put(struct dp_aux *aux); + +#endif /*__DP_AUX_H_*/ diff --git a/techpack/display/msm/dp/dp_catalog.c b/techpack/display/msm/dp/dp_catalog.c new file mode 100644 index 0000000000000000000000000000000000000000..de64dc2c14e9afd7bfb074ef02b1d075214f445b --- /dev/null +++ b/techpack/display/msm/dp/dp_catalog.c @@ -0,0 +1,2805 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved. + * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. + */ + + +#include +#include + +#include "dp_catalog.h" +#include "dp_reg.h" +#include "dp_debug.h" + +#define DP_GET_MSB(x) (x >> 8) +#define DP_GET_LSB(x) (x & 0xff) + +#define DP_PHY_READY BIT(1) + +#define dp_catalog_get_priv(x) ({ \ + struct dp_catalog *dp_catalog; \ + dp_catalog = container_of(x, struct dp_catalog, x); \ + container_of(dp_catalog, struct dp_catalog_private, \ + dp_catalog); \ +}) + +#define DP_INTERRUPT_STATUS1 \ + (DP_INTR_AUX_I2C_DONE| \ + DP_INTR_WRONG_ADDR | DP_INTR_TIMEOUT | \ + DP_INTR_NACK_DEFER | DP_INTR_WRONG_DATA_CNT | \ + DP_INTR_I2C_NACK | DP_INTR_I2C_DEFER | \ + DP_INTR_PLL_UNLOCKED | DP_INTR_AUX_ERROR) + +#define DP_INTR_MASK1 (DP_INTERRUPT_STATUS1 << 2) + +#define DP_INTERRUPT_STATUS2 \ + (DP_INTR_READY_FOR_VIDEO | DP_INTR_IDLE_PATTERN_SENT | \ + DP_INTR_FRAME_END | DP_INTR_CRC_UPDATED) + +#define DP_INTR_MASK2 (DP_INTERRUPT_STATUS2 << 2) + +#define DP_INTERRUPT_STATUS5 \ + (DP_INTR_MST_DP0_VCPF_SENT | DP_INTR_MST_DP1_VCPF_SENT) + +#define DP_INTR_MASK5 (DP_INTERRUPT_STATUS5 << 2) + +#define dp_catalog_fill_io(x) { \ + catalog->io.x = parser->get_io(parser, #x); \ +} + +#define dp_catalog_fill_io_buf(x) { \ + parser->get_io_buf(parser, #x); \ +} + +#define dp_read(x) ({ \ + catalog->read(catalog, io_data, x); \ +}) + +#define dp_write(x, y) ({ \ + catalog->write(catalog, io_data, x, y); \ +}) + +static u8 const vm_pre_emphasis[4][4] = { + {0x00, 0x0B, 0x12, 0xFF}, /* pe0, 0 db */ + {0x00, 0x0A, 0x12, 0xFF}, /* pe1, 3.5 db */ + {0x00, 0x0C, 0xFF, 0xFF}, /* pe2, 6.0 db */ + {0xFF, 0xFF, 0xFF, 0xFF} /* pe3, 9.5 db */ +}; + +/* voltage swing, 0.2v and 1.0v are not support */ +static u8 const vm_voltage_swing[4][4] = { + {0x07, 0x0F, 0x14, 0xFF}, /* sw0, 0.4v */ + {0x11, 0x1D, 0x1F, 0xFF}, /* sw1, 0.6 v */ + {0x18, 0x1F, 0xFF, 0xFF}, /* sw1, 0.8 v */ + {0xFF, 0xFF, 0xFF, 0xFF} /* sw1, 1.2 v, optional */ +}; + +static u8 const vm_pre_emphasis_hbr3_hbr2[4][4] = { + {0x00, 0x0C, 0x15, 0x1A}, + {0x02, 0x0E, 0x16, 0xFF}, + {0x02, 0x11, 0xFF, 0xFF}, + {0x04, 0xFF, 0xFF, 0xFF} +}; + +static u8 const vm_voltage_swing_hbr3_hbr2[4][4] = { + {0x02, 0x12, 0x16, 0x1A}, + {0x09, 0x19, 0x1F, 0xFF}, + {0x10, 0x1F, 0xFF, 0xFF}, + {0x1F, 0xFF, 0xFF, 0xFF} +}; + +static u8 const vm_pre_emphasis_hbr_rbr[4][4] = { + {0x00, 0x0C, 0x14, 0x19}, + {0x00, 0x0B, 0x12, 0xFF}, + {0x00, 0x0B, 0xFF, 0xFF}, + {0x04, 0xFF, 0xFF, 0xFF} +}; + +static u8 const vm_voltage_swing_hbr_rbr[4][4] = { + {0x08, 0x0F, 0x16, 0x1F}, + {0x11, 0x1E, 0x1F, 0xFF}, + {0x19, 0x1F, 0xFF, 0xFF}, + {0x1F, 0xFF, 0xFF, 0xFF} +}; + +enum dp_flush_bit { + DP_PPS_FLUSH, + DP_DHDR_FLUSH, +}; + +/* audio related catalog functions */ +struct dp_catalog_private { + struct device *dev; + struct dp_catalog_io io; + struct dp_parser *parser; + + u32 (*read)(struct dp_catalog_private *catalog, + struct dp_io_data *io_data, u32 offset); + void (*write)(struct dp_catalog_private *catlog, + struct dp_io_data *io_data, u32 offset, u32 data); + + u32 (*audio_map)[DP_AUDIO_SDP_HEADER_MAX]; + struct dp_catalog dp_catalog; + + char exe_mode[SZ_4]; +}; + +static u32 dp_read_sw(struct dp_catalog_private *catalog, + struct dp_io_data *io_data, u32 offset) +{ + u32 data = 0; + + if (io_data->buf) + memcpy(&data, io_data->buf + offset, sizeof(offset)); + + return data; +} + +static void dp_write_sw(struct dp_catalog_private *catalog, + struct dp_io_data *io_data, u32 offset, u32 data) +{ + if (io_data->buf) + memcpy(io_data->buf + offset, &data, sizeof(data)); +} + +static u32 dp_read_hw(struct dp_catalog_private *catalog, + struct dp_io_data *io_data, u32 offset) +{ + u32 data = 0; + + data = readl_relaxed(io_data->io.base + offset); + + return data; +} + +static void dp_write_hw(struct dp_catalog_private *catalog, + struct dp_io_data *io_data, u32 offset, u32 data) +{ + writel_relaxed(data, io_data->io.base + offset); +} + +static u32 dp_read_sub_sw(struct dp_catalog *dp_catalog, + struct dp_io_data *io_data, u32 offset) +{ + struct dp_catalog_private *catalog = container_of(dp_catalog, + struct dp_catalog_private, dp_catalog); + + return dp_read_sw(catalog, io_data, offset); +} + +static void dp_write_sub_sw(struct dp_catalog *dp_catalog, + struct dp_io_data *io_data, u32 offset, u32 data) +{ + struct dp_catalog_private *catalog = container_of(dp_catalog, + struct dp_catalog_private, dp_catalog); + + dp_write_sw(catalog, io_data, offset, data); +} + +static u32 dp_read_sub_hw(struct dp_catalog *dp_catalog, + struct dp_io_data *io_data, u32 offset) +{ + struct dp_catalog_private *catalog = container_of(dp_catalog, + struct dp_catalog_private, dp_catalog); + + return dp_read_hw(catalog, io_data, offset); +} + +static void dp_write_sub_hw(struct dp_catalog *dp_catalog, + struct dp_io_data *io_data, u32 offset, u32 data) +{ + struct dp_catalog_private *catalog = container_of(dp_catalog, + struct dp_catalog_private, dp_catalog); + + dp_write_hw(catalog, io_data, offset, data); +} + +/* aux related catalog functions */ +static u32 dp_catalog_aux_read_data(struct dp_catalog_aux *aux) +{ + struct dp_catalog_private *catalog; + struct dp_io_data *io_data; + + if (!aux) { + DP_ERR("invalid input\n"); + goto end; + } + + catalog = dp_catalog_get_priv(aux); + io_data = catalog->io.dp_aux; + + return dp_read(DP_AUX_DATA); +end: + return 0; +} + +static int dp_catalog_aux_write_data(struct dp_catalog_aux *aux) +{ + int rc = 0; + struct dp_catalog_private *catalog; + struct dp_io_data *io_data; + + if (!aux) { + DP_ERR("invalid input\n"); + rc = -EINVAL; + goto end; + } + + catalog = dp_catalog_get_priv(aux); + io_data = catalog->io.dp_aux; + + dp_write(DP_AUX_DATA, aux->data); +end: + return rc; +} + +static int dp_catalog_aux_write_trans(struct dp_catalog_aux *aux) +{ + int rc = 0; + struct dp_catalog_private *catalog; + struct dp_io_data *io_data; + + if (!aux) { + DP_ERR("invalid input\n"); + rc = -EINVAL; + goto end; + } + + catalog = dp_catalog_get_priv(aux); + io_data = catalog->io.dp_aux; + + dp_write(DP_AUX_TRANS_CTRL, aux->data); +end: + return rc; +} + +static int dp_catalog_aux_clear_trans(struct dp_catalog_aux *aux, bool read) +{ + int rc = 0; + u32 data = 0; + struct dp_catalog_private *catalog; + struct dp_io_data *io_data; + + if (!aux) { + DP_ERR("invalid input\n"); + rc = -EINVAL; + goto end; + } + + catalog = dp_catalog_get_priv(aux); + io_data = catalog->io.dp_aux; + + if (read) { + data = dp_read(DP_AUX_TRANS_CTRL); + data &= ~BIT(9); + dp_write(DP_AUX_TRANS_CTRL, data); + } else { + dp_write(DP_AUX_TRANS_CTRL, 0); + } +end: + return rc; +} + +static void dp_catalog_aux_clear_hw_interrupts(struct dp_catalog_aux *aux) +{ + struct dp_catalog_private *catalog; + struct dp_io_data *io_data; + u32 data = 0; + + if (!aux) { + DP_ERR("invalid input\n"); + return; + } + + catalog = dp_catalog_get_priv(aux); + io_data = catalog->io.dp_phy; + + data = dp_read(DP_PHY_AUX_INTERRUPT_STATUS); + + dp_write(DP_PHY_AUX_INTERRUPT_CLEAR, 0x1f); + wmb(); /* make sure 0x1f is written before next write */ + dp_write(DP_PHY_AUX_INTERRUPT_CLEAR, 0x9f); + wmb(); /* make sure 0x9f is written before next write */ + dp_write(DP_PHY_AUX_INTERRUPT_CLEAR, 0); + wmb(); /* make sure register is cleared */ +} + +static void dp_catalog_aux_reset(struct dp_catalog_aux *aux) +{ + u32 aux_ctrl; + struct dp_catalog_private *catalog; + struct dp_io_data *io_data; + + if (!aux) { + DP_ERR("invalid input\n"); + return; + } + + catalog = dp_catalog_get_priv(aux); + io_data = catalog->io.dp_aux; + + aux_ctrl = dp_read(DP_AUX_CTRL); + + aux_ctrl |= BIT(1); + dp_write(DP_AUX_CTRL, aux_ctrl); + usleep_range(1000, 1010); /* h/w recommended delay */ + + aux_ctrl &= ~BIT(1); + + dp_write(DP_AUX_CTRL, aux_ctrl); + wmb(); /* make sure AUX reset is done here */ +} + +static void dp_catalog_aux_enable(struct dp_catalog_aux *aux, bool enable) +{ + u32 aux_ctrl; + struct dp_catalog_private *catalog; + struct dp_io_data *io_data; + + if (!aux) { + DP_ERR("invalid input\n"); + return; + } + + catalog = dp_catalog_get_priv(aux); + io_data = catalog->io.dp_aux; + + aux_ctrl = dp_read(DP_AUX_CTRL); + + if (enable) { + aux_ctrl |= BIT(0); + dp_write(DP_AUX_CTRL, aux_ctrl); + wmb(); /* make sure AUX module is enabled */ + + dp_write(DP_TIMEOUT_COUNT, 0xffff); + dp_write(DP_AUX_LIMITS, 0xffff); + } else { + aux_ctrl &= ~BIT(0); + dp_write(DP_AUX_CTRL, aux_ctrl); + } +} + +static void dp_catalog_aux_update_cfg(struct dp_catalog_aux *aux, + struct dp_aux_cfg *cfg, enum dp_phy_aux_config_type type) +{ + struct dp_catalog_private *catalog; + u32 new_index = 0, current_index = 0; + struct dp_io_data *io_data; + + if (!aux || !cfg || (type >= PHY_AUX_CFG_MAX)) { + DP_ERR("invalid input\n"); + return; + } + + catalog = dp_catalog_get_priv(aux); + + io_data = catalog->io.dp_phy; + + current_index = cfg[type].current_index; + new_index = (current_index + 1) % cfg[type].cfg_cnt; + DP_DEBUG("Updating %s from 0x%08x to 0x%08x\n", + dp_phy_aux_config_type_to_string(type), + cfg[type].lut[current_index], cfg[type].lut[new_index]); + + dp_write(cfg[type].offset, cfg[type].lut[new_index]); + cfg[type].current_index = new_index; +} + +static void dp_catalog_aux_setup(struct dp_catalog_aux *aux, + struct dp_aux_cfg *cfg) +{ + struct dp_catalog_private *catalog; + struct dp_io_data *io_data; + int i = 0; + + if (!aux || !cfg) { + DP_ERR("invalid input\n"); + return; + } + + catalog = dp_catalog_get_priv(aux); + + io_data = catalog->io.dp_phy; + dp_write(DP_PHY_PD_CTL, 0x65); + wmb(); /* make sure PD programming happened */ + + /* Turn on BIAS current for PHY/PLL */ + io_data = catalog->io.dp_pll; + dp_write(QSERDES_COM_BIAS_EN_CLKBUFLR_EN, 0x1b); + + io_data = catalog->io.dp_phy; + dp_write(DP_PHY_PD_CTL, 0x02); + wmb(); /* make sure PD programming happened */ + dp_write(DP_PHY_PD_CTL, 0x7d); + + /* Turn on BIAS current for PHY/PLL */ + io_data = catalog->io.dp_pll; + dp_write(QSERDES_COM_BIAS_EN_CLKBUFLR_EN, 0x3f); + + /* DP AUX CFG register programming */ + io_data = catalog->io.dp_phy; + for (i = 0; i < PHY_AUX_CFG_MAX; i++) + dp_write(cfg[i].offset, cfg[i].lut[cfg[i].current_index]); + + dp_write(DP_PHY_AUX_INTERRUPT_MASK, 0x1F); + wmb(); /* make sure AUX configuration is done before enabling it */ +} + +static void dp_catalog_aux_get_irq(struct dp_catalog_aux *aux, bool cmd_busy) +{ + u32 ack; + struct dp_catalog_private *catalog; + struct dp_io_data *io_data; + + if (!aux) { + DP_ERR("invalid input\n"); + return; + } + + catalog = dp_catalog_get_priv(aux); + io_data = catalog->io.dp_ahb; + + aux->isr = dp_read(DP_INTR_STATUS); + aux->isr &= ~DP_INTR_MASK1; + ack = aux->isr & DP_INTERRUPT_STATUS1; + ack <<= 1; + ack |= DP_INTR_MASK1; + dp_write(DP_INTR_STATUS, ack); +} + +/* controller related catalog functions */ +static int dp_catalog_ctrl_late_phy_init(struct dp_catalog_ctrl *ctrl, + u8 lane_cnt, bool flipped) +{ + return 0; +} + +static u32 dp_catalog_ctrl_read_hdcp_status(struct dp_catalog_ctrl *ctrl) +{ + struct dp_catalog_private *catalog; + struct dp_io_data *io_data; + + if (!ctrl) { + DP_ERR("invalid input\n"); + return -EINVAL; + } + + catalog = dp_catalog_get_priv(ctrl); + io_data = catalog->io.dp_ahb; + + return dp_read(DP_HDCP_STATUS); +} + +static void dp_catalog_panel_sdp_update(struct dp_catalog_panel *panel) +{ + struct dp_catalog_private *catalog; + struct dp_io_data *io_data; + u32 sdp_cfg3_off = 0; + + if (panel->stream_id >= DP_STREAM_MAX) { + DP_ERR("invalid stream_id:%d\n", panel->stream_id); + return; + } + + if (panel->stream_id == DP_STREAM_1) + sdp_cfg3_off = MMSS_DP1_SDP_CFG3 - MMSS_DP_SDP_CFG3; + + catalog = dp_catalog_get_priv(panel); + io_data = catalog->io.dp_link; + + dp_write(MMSS_DP_SDP_CFG3 + sdp_cfg3_off, 0x01); + dp_write(MMSS_DP_SDP_CFG3 + sdp_cfg3_off, 0x00); +} + +static void dp_catalog_panel_setup_vsif_infoframe_sdp( + struct dp_catalog_panel *panel) +{ + struct dp_catalog_private *catalog; + struct drm_msm_ext_hdr_metadata *hdr; + struct dp_io_data *io_data; + u32 header, parity, data, mst_offset = 0; + u8 buf[SZ_64], off = 0; + + if (panel->stream_id >= DP_STREAM_MAX) { + DP_ERR("invalid stream_id:%d\n", panel->stream_id); + return; + } + + if (panel->stream_id == DP_STREAM_1) + mst_offset = MMSS_DP1_VSCEXT_0 - MMSS_DP_VSCEXT_0; + + catalog = dp_catalog_get_priv(panel); + hdr = &panel->hdr_meta; + io_data = catalog->io.dp_link; + + /* HEADER BYTE 1 */ + header = panel->dhdr_vsif_sdp.HB1; + parity = dp_header_get_parity(header); + data = ((header << HEADER_BYTE_1_BIT) + | (parity << PARITY_BYTE_1_BIT)); + dp_write(MMSS_DP_VSCEXT_0 + mst_offset, data); + memcpy(buf + off, &data, sizeof(data)); + off += sizeof(data); + + /* HEADER BYTE 2 */ + header = panel->dhdr_vsif_sdp.HB2; + parity = dp_header_get_parity(header); + data = ((header << HEADER_BYTE_2_BIT) + | (parity << PARITY_BYTE_2_BIT)); + dp_write(MMSS_DP_VSCEXT_1 + mst_offset, data); + + /* HEADER BYTE 3 */ + header = panel->dhdr_vsif_sdp.HB3; + parity = dp_header_get_parity(header); + data = ((header << HEADER_BYTE_3_BIT) + | (parity << PARITY_BYTE_3_BIT)); + data |= dp_read(MMSS_DP_VSCEXT_1 + mst_offset); + dp_write(MMSS_DP_VSCEXT_1 + mst_offset, data); + memcpy(buf + off, &data, sizeof(data)); + off += sizeof(data); + + print_hex_dump(KERN_DEBUG, "[drm-dp] VSCEXT: ", + DUMP_PREFIX_NONE, 16, 4, buf, off, false); +} + +static void dp_catalog_panel_setup_hdr_infoframe_sdp( + struct dp_catalog_panel *panel) +{ + struct dp_catalog_private *catalog; + struct drm_msm_ext_hdr_metadata *hdr; + struct dp_io_data *io_data; + u32 header, parity, data, mst_offset = 0; + u8 buf[SZ_64], off = 0; + u32 const version = 0x01; + u32 const length = 0x1a; + + if (panel->stream_id >= DP_STREAM_MAX) { + DP_ERR("invalid stream_id:%d\n", panel->stream_id); + return; + } + + if (panel->stream_id == DP_STREAM_1) + mst_offset = MMSS_DP1_GENERIC2_0 - MMSS_DP_GENERIC2_0; + + catalog = dp_catalog_get_priv(panel); + hdr = &panel->hdr_meta; + io_data = catalog->io.dp_link; + + /* HEADER BYTE 1 */ + header = panel->shdr_if_sdp.HB1; + parity = dp_header_get_parity(header); + data = ((header << HEADER_BYTE_1_BIT) + | (parity << PARITY_BYTE_1_BIT)); + dp_write(MMSS_DP_GENERIC2_0 + mst_offset, + data); + memcpy(buf + off, &data, sizeof(data)); + off += sizeof(data); + + /* HEADER BYTE 2 */ + header = panel->shdr_if_sdp.HB2; + parity = dp_header_get_parity(header); + data = ((header << HEADER_BYTE_2_BIT) + | (parity << PARITY_BYTE_2_BIT)); + dp_write(MMSS_DP_GENERIC2_1 + mst_offset, data); + + /* HEADER BYTE 3 */ + header = panel->shdr_if_sdp.HB3; + parity = dp_header_get_parity(header); + data = ((header << HEADER_BYTE_3_BIT) + | (parity << PARITY_BYTE_3_BIT)); + data |= dp_read(MMSS_DP_VSCEXT_1 + mst_offset); + dp_write(MMSS_DP_GENERIC2_1 + mst_offset, + data); + memcpy(buf + off, &data, sizeof(data)); + off += sizeof(data); + + data = version; + data |= length << 8; + data |= hdr->eotf << 16; + dp_write(MMSS_DP_GENERIC2_2 + mst_offset, data); + memcpy(buf + off, &data, sizeof(data)); + off += sizeof(data); + + data = (DP_GET_LSB(hdr->display_primaries_x[0]) | + (DP_GET_MSB(hdr->display_primaries_x[0]) << 8) | + (DP_GET_LSB(hdr->display_primaries_y[0]) << 16) | + (DP_GET_MSB(hdr->display_primaries_y[0]) << 24)); + dp_write(MMSS_DP_GENERIC2_3 + mst_offset, data); + memcpy(buf + off, &data, sizeof(data)); + off += sizeof(data); + + data = (DP_GET_LSB(hdr->display_primaries_x[1]) | + (DP_GET_MSB(hdr->display_primaries_x[1]) << 8) | + (DP_GET_LSB(hdr->display_primaries_y[1]) << 16) | + (DP_GET_MSB(hdr->display_primaries_y[1]) << 24)); + dp_write(MMSS_DP_GENERIC2_4 + mst_offset, data); + memcpy(buf + off, &data, sizeof(data)); + off += sizeof(data); + + data = (DP_GET_LSB(hdr->display_primaries_x[2]) | + (DP_GET_MSB(hdr->display_primaries_x[2]) << 8) | + (DP_GET_LSB(hdr->display_primaries_y[2]) << 16) | + (DP_GET_MSB(hdr->display_primaries_y[2]) << 24)); + dp_write(MMSS_DP_GENERIC2_5 + mst_offset, data); + memcpy(buf + off, &data, sizeof(data)); + off += sizeof(data); + + data = (DP_GET_LSB(hdr->white_point_x) | + (DP_GET_MSB(hdr->white_point_x) << 8) | + (DP_GET_LSB(hdr->white_point_y) << 16) | + (DP_GET_MSB(hdr->white_point_y) << 24)); + dp_write(MMSS_DP_GENERIC2_6 + mst_offset, data); + memcpy(buf + off, &data, sizeof(data)); + off += sizeof(data); + + data = (DP_GET_LSB(hdr->max_luminance) | + (DP_GET_MSB(hdr->max_luminance) << 8) | + (DP_GET_LSB(hdr->min_luminance) << 16) | + (DP_GET_MSB(hdr->min_luminance) << 24)); + dp_write(MMSS_DP_GENERIC2_7 + mst_offset, data); + memcpy(buf + off, &data, sizeof(data)); + off += sizeof(data); + + data = (DP_GET_LSB(hdr->max_content_light_level) | + (DP_GET_MSB(hdr->max_content_light_level) << 8) | + (DP_GET_LSB(hdr->max_average_light_level) << 16) | + (DP_GET_MSB(hdr->max_average_light_level) << 24)); + dp_write(MMSS_DP_GENERIC2_8 + mst_offset, data); + memcpy(buf + off, &data, sizeof(data)); + off += sizeof(data); + + data = 0; + dp_write(MMSS_DP_GENERIC2_9 + mst_offset, data); + memcpy(buf + off, &data, sizeof(data)); + off += sizeof(data); + + print_hex_dump(KERN_DEBUG, "[drm-dp] HDR: ", + DUMP_PREFIX_NONE, 16, 4, buf, off, false); +} + +static void dp_catalog_panel_setup_vsc_sdp(struct dp_catalog_panel *panel) +{ + struct dp_catalog_private *catalog; + struct dp_io_data *io_data; + u32 header, parity, data, mst_offset = 0; + u8 off = 0; + u8 buf[SZ_128]; + + if (!panel) { + DP_ERR("invalid input\n"); + return; + } + + if (panel->stream_id >= DP_STREAM_MAX) { + DP_ERR("invalid stream_id:%d\n", panel->stream_id); + return; + } + + if (panel->stream_id == DP_STREAM_1) + mst_offset = MMSS_DP1_GENERIC0_0 - MMSS_DP_GENERIC0_0; + + catalog = dp_catalog_get_priv(panel); + io_data = catalog->io.dp_link; + + /* HEADER BYTE 1 */ + header = panel->vsc_colorimetry.header.HB1; + parity = dp_header_get_parity(header); + data = ((header << HEADER_BYTE_1_BIT) + | (parity << PARITY_BYTE_1_BIT)); + dp_write(MMSS_DP_GENERIC0_0 + mst_offset, data); + memcpy(buf + off, &data, sizeof(data)); + off += sizeof(data); + + /* HEADER BYTE 2 */ + header = panel->vsc_colorimetry.header.HB2; + parity = dp_header_get_parity(header); + data = ((header << HEADER_BYTE_2_BIT) + | (parity << PARITY_BYTE_2_BIT)); + dp_write(MMSS_DP_GENERIC0_1 + mst_offset, data); + + /* HEADER BYTE 3 */ + header = panel->vsc_colorimetry.header.HB3; + parity = dp_header_get_parity(header); + data = ((header << HEADER_BYTE_3_BIT) + | (parity << PARITY_BYTE_3_BIT)); + data |= dp_read(MMSS_DP_GENERIC0_1 + mst_offset); + dp_write(MMSS_DP_GENERIC0_1 + mst_offset, data); + memcpy(buf + off, &data, sizeof(data)); + off += sizeof(data); + + data = 0; + dp_write(MMSS_DP_GENERIC0_2 + mst_offset, data); + memcpy(buf + off, &data, sizeof(data)); + off += sizeof(data); + + dp_write(MMSS_DP_GENERIC0_3 + mst_offset, data); + memcpy(buf + off, &data, sizeof(data)); + off += sizeof(data); + + dp_write(MMSS_DP_GENERIC0_4 + mst_offset, data); + memcpy(buf + off, &data, sizeof(data)); + off += sizeof(data); + + dp_write(MMSS_DP_GENERIC0_5 + mst_offset, data); + memcpy(buf + off, &data, sizeof(data)); + off += sizeof(data); + + data = (panel->vsc_colorimetry.data[16] & 0xFF) | + ((panel->vsc_colorimetry.data[17] & 0xFF) << 8) | + ((panel->vsc_colorimetry.data[18] & 0x7) << 16); + + dp_write(MMSS_DP_GENERIC0_6 + mst_offset, data); + memcpy(buf + off, &data, sizeof(data)); + off += sizeof(data); + + data = 0; + dp_write(MMSS_DP_GENERIC0_7 + mst_offset, data); + memcpy(buf + off, &data, sizeof(data)); + off += sizeof(data); + + dp_write(MMSS_DP_GENERIC0_8 + mst_offset, data); + memcpy(buf + off, &data, sizeof(data)); + off += sizeof(data); + + dp_write(MMSS_DP_GENERIC0_9 + mst_offset, data); + memcpy(buf + off, &data, sizeof(data)); + off += sizeof(data); + + print_hex_dump(KERN_DEBUG, "[drm-dp] VSC: ", + DUMP_PREFIX_NONE, 16, 4, buf, off, false); +} + +static void dp_catalog_panel_config_sdp(struct dp_catalog_panel *panel, + bool en) +{ + struct dp_catalog_private *catalog; + struct dp_io_data *io_data; + u32 cfg, cfg2; + u32 sdp_cfg_off = 0; + u32 sdp_cfg2_off = 0; + + if (panel->stream_id >= DP_STREAM_MAX) { + DP_ERR("invalid stream_id:%d\n", panel->stream_id); + return; + } + + catalog = dp_catalog_get_priv(panel); + io_data = catalog->io.dp_link; + + if (panel->stream_id == DP_STREAM_1) { + sdp_cfg_off = MMSS_DP1_SDP_CFG - MMSS_DP_SDP_CFG; + sdp_cfg2_off = MMSS_DP1_SDP_CFG2 - MMSS_DP_SDP_CFG2; + } + + cfg = dp_read(MMSS_DP_SDP_CFG + sdp_cfg_off); + cfg2 = dp_read(MMSS_DP_SDP_CFG2 + sdp_cfg2_off); + + if (en) { + /* GEN0_SDP_EN */ + cfg |= BIT(17); + dp_write(MMSS_DP_SDP_CFG + sdp_cfg_off, cfg); + + /* GENERIC0_SDPSIZE */ + cfg2 |= BIT(16); + dp_write(MMSS_DP_SDP_CFG2 + sdp_cfg2_off, cfg2); + + /* setup the GENERIC0 in case of en = true */ + dp_catalog_panel_setup_vsc_sdp(panel); + + } else { + /* GEN0_SDP_EN */ + cfg &= ~BIT(17); + dp_write(MMSS_DP_SDP_CFG + sdp_cfg_off, cfg); + + /* GENERIC0_SDPSIZE */ + cfg2 &= ~BIT(16); + dp_write(MMSS_DP_SDP_CFG2 + sdp_cfg2_off, cfg2); + } + + dp_catalog_panel_sdp_update(panel); +} + +static void dp_catalog_panel_config_misc(struct dp_catalog_panel *panel) +{ + struct dp_catalog_private *catalog; + struct dp_io_data *io_data; + u32 reg_offset = 0; + + if (!panel) { + DP_ERR("invalid input\n"); + return; + } + + if (panel->stream_id >= DP_STREAM_MAX) { + DP_ERR("invalid stream_id:%d\n", panel->stream_id); + return; + } + + catalog = dp_catalog_get_priv(panel); + io_data = catalog->io.dp_link; + + if (panel->stream_id == DP_STREAM_1) + reg_offset = DP1_MISC1_MISC0 - DP_MISC1_MISC0; + + DP_DEBUG("misc settings = 0x%x\n", panel->misc_val); + dp_write(DP_MISC1_MISC0 + reg_offset, panel->misc_val); +} + +static int dp_catalog_panel_set_colorspace(struct dp_catalog_panel *panel, +bool vsc_supported) +{ + struct dp_catalog_private *catalog; + struct dp_io_data *io_data; + + if (!panel) { + DP_ERR("invalid input\n"); + return -EINVAL; + } + + if (panel->stream_id >= DP_STREAM_MAX) { + DP_ERR("invalid stream_id:%d\n", panel->stream_id); + return -EINVAL; + } + + catalog = dp_catalog_get_priv(panel); + io_data = catalog->io.dp_link; + + if (vsc_supported) { + dp_catalog_panel_setup_vsc_sdp(panel); + dp_catalog_panel_sdp_update(panel); + } else + dp_catalog_panel_config_misc(panel); + + return 0; +} + +static void dp_catalog_panel_config_hdr(struct dp_catalog_panel *panel, bool en, + u32 dhdr_max_pkts, bool flush) +{ + struct dp_catalog_private *catalog; + struct dp_io_data *io_data; + u32 cfg, cfg2, cfg4, misc; + u32 sdp_cfg_off = 0; + u32 sdp_cfg2_off = 0; + u32 sdp_cfg4_off = 0; + u32 misc1_misc0_off = 0; + + if (!panel) { + DP_ERR("invalid input\n"); + return; + } + + if (panel->stream_id >= DP_STREAM_MAX) { + DP_ERR("invalid stream_id:%d\n", panel->stream_id); + return; + } + + catalog = dp_catalog_get_priv(panel); + io_data = catalog->io.dp_link; + + if (panel->stream_id == DP_STREAM_1) { + sdp_cfg_off = MMSS_DP1_SDP_CFG - MMSS_DP_SDP_CFG; + sdp_cfg2_off = MMSS_DP1_SDP_CFG2 - MMSS_DP_SDP_CFG2; + sdp_cfg4_off = MMSS_DP1_SDP_CFG4 - MMSS_DP_SDP_CFG4; + misc1_misc0_off = DP1_MISC1_MISC0 - DP_MISC1_MISC0; + } + + cfg = dp_read(MMSS_DP_SDP_CFG + sdp_cfg_off); + cfg2 = dp_read(MMSS_DP_SDP_CFG2 + sdp_cfg2_off); + misc = dp_read(DP_MISC1_MISC0 + misc1_misc0_off); + + if (en) { + if (dhdr_max_pkts) { + /* VSCEXT_SDP_EN */ + cfg |= BIT(16); + /* DHDR_EN, DHDR_PACKET_LIMIT */ + cfg4 = (dhdr_max_pkts << 1) | BIT(0); + dp_write(MMSS_DP_SDP_CFG4 + sdp_cfg4_off, cfg4); + dp_catalog_panel_setup_vsif_infoframe_sdp(panel); + } + + /* GEN2_SDP_EN */ + cfg |= BIT(19); + dp_write(MMSS_DP_SDP_CFG + sdp_cfg_off, cfg); + + /* GENERIC2_SDPSIZE */ + cfg2 |= BIT(20); + dp_write(MMSS_DP_SDP_CFG2 + sdp_cfg2_off, cfg2); + + dp_catalog_panel_setup_hdr_infoframe_sdp(panel); + + if (panel->hdr_meta.eotf) + DP_DEBUG("Enabled\n"); + else + DP_DEBUG("Reset\n"); + } else { + /* VSCEXT_SDP_ENG */ + cfg &= ~BIT(16) & ~BIT(19); + dp_write(MMSS_DP_SDP_CFG + sdp_cfg_off, cfg); + + /* GENERIC0_SDPSIZE GENERIC2_SDPSIZE */ + cfg2 &= ~BIT(20); + dp_write(MMSS_DP_SDP_CFG2 + sdp_cfg2_off, cfg2); + + /* DHDR_EN, DHDR_PACKET_LIMIT */ + cfg4 = 0; + dp_write(MMSS_DP_SDP_CFG4 + sdp_cfg4_off, cfg4); + + DP_DEBUG("Disabled\n"); + } + + if (flush) { + DP_DEBUG("flushing HDR metadata\n"); + dp_catalog_panel_sdp_update(panel); + } +} + +static void dp_catalog_panel_update_transfer_unit( + struct dp_catalog_panel *panel) +{ + struct dp_catalog_private *catalog; + struct dp_io_data *io_data; + + if (!panel || panel->stream_id >= DP_STREAM_MAX) { + DP_ERR("invalid input\n"); + return; + } + + catalog = dp_catalog_get_priv(panel); + io_data = catalog->io.dp_link; + + dp_write(DP_VALID_BOUNDARY, panel->valid_boundary); + dp_write(DP_TU, panel->dp_tu); + dp_write(DP_VALID_BOUNDARY_2, panel->valid_boundary2); +} + +static void dp_catalog_ctrl_state_ctrl(struct dp_catalog_ctrl *ctrl, u32 state) +{ + struct dp_catalog_private *catalog; + struct dp_io_data *io_data; + + if (!ctrl) { + DP_ERR("invalid input\n"); + return; + } + + catalog = dp_catalog_get_priv(ctrl); + io_data = catalog->io.dp_link; + + dp_write(DP_STATE_CTRL, state); + /* make sure to change the hw state */ + wmb(); +} + +static void dp_catalog_ctrl_config_ctrl(struct dp_catalog_ctrl *ctrl, u8 ln_cnt) +{ + struct dp_catalog_private *catalog; + struct dp_io_data *io_data; + u32 cfg; + + if (!ctrl) { + DP_ERR("invalid input\n"); + return; + } + + catalog = dp_catalog_get_priv(ctrl); + io_data = catalog->io.dp_link; + + cfg = dp_read(DP_CONFIGURATION_CTRL); + /* + * Reset ASSR (alternate scrambler seed reset) by resetting BIT(10). + * ASSR should be set to disable for TPS4 link training pattern. + * Forcing it to 0 as the power on reset value of register enables it. + */ + cfg &= ~(BIT(4) | BIT(5) | BIT(10)); + cfg |= (ln_cnt - 1) << 4; + dp_write(DP_CONFIGURATION_CTRL, cfg); + + cfg = dp_read(DP_MAINLINK_CTRL); + cfg |= 0x02000000; + dp_write(DP_MAINLINK_CTRL, cfg); + + DP_DEBUG("DP_MAINLINK_CTRL=0x%x\n", cfg); +} + +static void dp_catalog_panel_config_ctrl(struct dp_catalog_panel *panel, + u32 cfg) +{ + struct dp_catalog_private *catalog; + struct dp_io_data *io_data; + u32 strm_reg_off = 0, mainlink_ctrl; + + if (!panel) { + DP_ERR("invalid input\n"); + return; + } + + if (panel->stream_id >= DP_STREAM_MAX) { + DP_ERR("invalid stream_id:%d\n", panel->stream_id); + return; + } + + catalog = dp_catalog_get_priv(panel); + io_data = catalog->io.dp_link; + + if (panel->stream_id == DP_STREAM_1) + strm_reg_off = DP1_CONFIGURATION_CTRL - DP_CONFIGURATION_CTRL; + + DP_DEBUG("DP_CONFIGURATION_CTRL=0x%x\n", cfg); + + dp_write(DP_CONFIGURATION_CTRL + strm_reg_off, cfg); + + mainlink_ctrl = dp_read(DP_MAINLINK_CTRL); + + if (panel->stream_id == DP_STREAM_0) + io_data = catalog->io.dp_p0; + else if (panel->stream_id == DP_STREAM_1) + io_data = catalog->io.dp_p1; + + if (mainlink_ctrl & BIT(8)) + dp_write(MMSS_DP_ASYNC_FIFO_CONFIG, 0x01); + else + dp_write(MMSS_DP_ASYNC_FIFO_CONFIG, 0x00); +} + +static void dp_catalog_panel_config_dto(struct dp_catalog_panel *panel, + bool ack) +{ + struct dp_catalog_private *catalog; + struct dp_io_data *io_data; + u32 dsc_dto; + + if (!panel) { + DP_ERR("invalid input\n"); + return; + } + + if (panel->stream_id >= DP_STREAM_MAX) { + DP_ERR("invalid stream_id:%d\n", panel->stream_id); + return; + } + + catalog = dp_catalog_get_priv(panel); + io_data = catalog->io.dp_link; + + switch (panel->stream_id) { + case DP_STREAM_0: + io_data = catalog->io.dp_p0; + break; + case DP_STREAM_1: + io_data = catalog->io.dp_p1; + break; + default: + DP_ERR("invalid stream id\n"); + return; + } + + dsc_dto = dp_read(MMSS_DP_DSC_DTO); + if (ack) + dsc_dto = BIT(1); + else + dsc_dto &= ~BIT(1); + dp_write(MMSS_DP_DSC_DTO, dsc_dto); +} + +static void dp_catalog_ctrl_lane_mapping(struct dp_catalog_ctrl *ctrl, + bool flipped, char *lane_map) +{ + struct dp_catalog_private *catalog; + struct dp_io_data *io_data; + + if (!ctrl) { + DP_ERR("invalid input\n"); + return; + } + + catalog = dp_catalog_get_priv(ctrl); + io_data = catalog->io.dp_link; + + dp_write(DP_LOGICAL2PHYSICAL_LANE_MAPPING, 0xe4); +} + +static void dp_catalog_ctrl_lane_pnswap(struct dp_catalog_ctrl *ctrl, + u8 ln_pnswap) +{ + struct dp_catalog_private *catalog; + struct dp_io_data *io_data; + u32 cfg0, cfg1; + + catalog = dp_catalog_get_priv(ctrl); + + cfg0 = 0x0a; + cfg1 = 0x0a; + + cfg0 |= ((ln_pnswap >> 0) & 0x1) << 0; + cfg0 |= ((ln_pnswap >> 1) & 0x1) << 2; + cfg1 |= ((ln_pnswap >> 2) & 0x1) << 0; + cfg1 |= ((ln_pnswap >> 3) & 0x1) << 2; + + io_data = catalog->io.dp_ln_tx0; + dp_write(TXn_TX_POL_INV, cfg0); + + io_data = catalog->io.dp_ln_tx1; + dp_write(TXn_TX_POL_INV, cfg1); +} + +static void dp_catalog_ctrl_mainlink_ctrl(struct dp_catalog_ctrl *ctrl, + bool enable) +{ + u32 mainlink_ctrl, reg; + struct dp_catalog_private *catalog; + struct dp_io_data *io_data; + + if (!ctrl) { + DP_ERR("invalid input\n"); + return; + } + + catalog = dp_catalog_get_priv(ctrl); + io_data = catalog->io.dp_link; + + if (enable) { + reg = dp_read(DP_MAINLINK_CTRL); + mainlink_ctrl = reg & ~(0x03); + dp_write(DP_MAINLINK_CTRL, mainlink_ctrl); + wmb(); /* make sure mainlink is turned off before reset */ + mainlink_ctrl = reg | 0x02; + dp_write(DP_MAINLINK_CTRL, mainlink_ctrl); + wmb(); /* make sure mainlink entered reset */ + mainlink_ctrl = reg & ~(0x03); + dp_write(DP_MAINLINK_CTRL, mainlink_ctrl); + wmb(); /* make sure mainlink reset done */ + mainlink_ctrl = reg | 0x01; + dp_write(DP_MAINLINK_CTRL, mainlink_ctrl); + wmb(); /* make sure mainlink turned on */ + } else { + mainlink_ctrl = dp_read(DP_MAINLINK_CTRL); + mainlink_ctrl &= ~BIT(0); + dp_write(DP_MAINLINK_CTRL, mainlink_ctrl); + } +} + +static void dp_catalog_panel_config_msa(struct dp_catalog_panel *panel, + u32 rate, u32 stream_rate_khz) +{ + u32 pixel_m, pixel_n; + u32 mvid, nvid; + u32 const nvid_fixed = 0x8000; + u32 const link_rate_hbr2 = 540000; + u32 const link_rate_hbr3 = 810000; + struct dp_catalog_private *catalog; + struct dp_io_data *io_data; + u32 strm_reg_off = 0; + u32 mvid_reg_off = 0, nvid_reg_off = 0; + + if (!panel) { + DP_ERR("invalid input\n"); + return; + } + + if (panel->stream_id >= DP_STREAM_MAX) { + DP_ERR("invalid stream_id:%d\n", panel->stream_id); + return; + } + + catalog = dp_catalog_get_priv(panel); + io_data = catalog->io.dp_mmss_cc; + + if (panel->stream_id == DP_STREAM_1) + strm_reg_off = MMSS_DP_PIXEL1_M - MMSS_DP_PIXEL_M; + + pixel_m = dp_read(MMSS_DP_PIXEL_M + strm_reg_off); + pixel_n = dp_read(MMSS_DP_PIXEL_N + strm_reg_off); + DP_DEBUG("pixel_m=0x%x, pixel_n=0x%x\n", pixel_m, pixel_n); + + mvid = (pixel_m & 0xFFFF) * 5; + nvid = (0xFFFF & (~pixel_n)) + (pixel_m & 0xFFFF); + + if (nvid < nvid_fixed) { + u32 temp; + + temp = (nvid_fixed / nvid) * nvid; + mvid = (nvid_fixed / nvid) * mvid; + nvid = temp; + } + + DP_DEBUG("rate = %d\n", rate); + + if (panel->widebus_en) + mvid <<= 1; + + if (link_rate_hbr2 == rate) + nvid *= 2; + + if (link_rate_hbr3 == rate) + nvid *= 3; + + io_data = catalog->io.dp_link; + + if (panel->stream_id == DP_STREAM_1) { + mvid_reg_off = DP1_SOFTWARE_MVID - DP_SOFTWARE_MVID; + nvid_reg_off = DP1_SOFTWARE_NVID - DP_SOFTWARE_NVID; + } + + DP_DEBUG("mvid=0x%x, nvid=0x%x\n", mvid, nvid); + dp_write(DP_SOFTWARE_MVID + mvid_reg_off, mvid); + dp_write(DP_SOFTWARE_NVID + nvid_reg_off, nvid); +} + +static void dp_catalog_ctrl_set_pattern(struct dp_catalog_ctrl *ctrl, + u32 pattern) +{ + int bit, cnt = 10; + u32 data; + const u32 link_training_offset = 3; + struct dp_catalog_private *catalog; + struct dp_io_data *io_data; + + if (!ctrl) { + DP_ERR("invalid input\n"); + return; + } + + catalog = dp_catalog_get_priv(ctrl); + io_data = catalog->io.dp_link; + + switch (pattern) { + case DP_TRAINING_PATTERN_4: + bit = 3; + break; + case DP_TRAINING_PATTERN_3: + case DP_TRAINING_PATTERN_2: + case DP_TRAINING_PATTERN_1: + bit = pattern - 1; + break; + default: + DP_ERR("invalid pattern\n"); + return; + } + + DP_DEBUG("hw: bit=%d train=%d\n", bit, pattern); + dp_write(DP_STATE_CTRL, BIT(bit)); + + bit += link_training_offset; + + while (cnt--) { + data = dp_read(DP_MAINLINK_READY); + if (data & BIT(bit)) + break; + } + + if (cnt == 0) + DP_ERR("set link_train=%d failed\n", pattern); +} + +static void dp_catalog_ctrl_usb_reset(struct dp_catalog_ctrl *ctrl, bool flip) +{ + struct dp_catalog_private *catalog; + struct dp_io_data *io_data; + + if (!ctrl) { + DP_ERR("invalid input\n"); + return; + } + + catalog = dp_catalog_get_priv(ctrl); + + io_data = catalog->io.usb3_dp_com; + + DP_DEBUG("Program PHYMODE to DP only\n"); + dp_write(USB3_DP_COM_RESET_OVRD_CTRL, 0x0a); + dp_write(USB3_DP_COM_PHY_MODE_CTRL, 0x02); + dp_write(USB3_DP_COM_SW_RESET, 0x01); + /* make sure usb3 com phy software reset is done */ + wmb(); + + if (!flip) /* CC1 */ + dp_write(USB3_DP_COM_TYPEC_CTRL, 0x02); + else /* CC2 */ + dp_write(USB3_DP_COM_TYPEC_CTRL, 0x03); + + dp_write(USB3_DP_COM_SWI_CTRL, 0x00); + dp_write(USB3_DP_COM_SW_RESET, 0x00); + /* make sure the software reset is done */ + wmb(); + + dp_write(USB3_DP_COM_POWER_DOWN_CTRL, 0x01); + dp_write(USB3_DP_COM_RESET_OVRD_CTRL, 0x00); + /* make sure phy is brought out of reset */ + wmb(); +} + +static void dp_catalog_panel_tpg_cfg(struct dp_catalog_panel *panel, + bool enable) +{ + struct dp_catalog_private *catalog; + struct dp_io_data *io_data; + + if (!panel) { + DP_ERR("invalid input\n"); + return; + } + + if (panel->stream_id >= DP_STREAM_MAX) { + DP_ERR("invalid stream_id:%d\n", panel->stream_id); + return; + } + + catalog = dp_catalog_get_priv(panel); + + if (panel->stream_id == DP_STREAM_0) + io_data = catalog->io.dp_p0; + else if (panel->stream_id == DP_STREAM_1) + io_data = catalog->io.dp_p1; + + if (!enable) { + dp_write(MMSS_DP_TPG_MAIN_CONTROL, 0x0); + dp_write(MMSS_DP_BIST_ENABLE, 0x0); + dp_write(MMSS_DP_TIMING_ENGINE_EN, 0x0); + wmb(); /* ensure Timing generator is turned off */ + return; + } + + dp_write(MMSS_DP_INTF_CONFIG, 0x0); + dp_write(MMSS_DP_INTF_HSYNC_CTL, + panel->hsync_ctl); + dp_write(MMSS_DP_INTF_VSYNC_PERIOD_F0, + panel->vsync_period * panel->hsync_period); + dp_write(MMSS_DP_INTF_VSYNC_PULSE_WIDTH_F0, + panel->v_sync_width * panel->hsync_period); + dp_write(MMSS_DP_INTF_VSYNC_PERIOD_F1, 0); + dp_write(MMSS_DP_INTF_VSYNC_PULSE_WIDTH_F1, 0); + dp_write(MMSS_DP_INTF_DISPLAY_HCTL, panel->display_hctl); + dp_write(MMSS_DP_INTF_ACTIVE_HCTL, 0); + dp_write(MMSS_INTF_DISPLAY_V_START_F0, panel->display_v_start); + dp_write(MMSS_DP_INTF_DISPLAY_V_END_F0, panel->display_v_end); + dp_write(MMSS_INTF_DISPLAY_V_START_F1, 0); + dp_write(MMSS_DP_INTF_DISPLAY_V_END_F1, 0); + dp_write(MMSS_DP_INTF_ACTIVE_V_START_F0, 0); + dp_write(MMSS_DP_INTF_ACTIVE_V_END_F0, 0); + dp_write(MMSS_DP_INTF_ACTIVE_V_START_F1, 0); + dp_write(MMSS_DP_INTF_ACTIVE_V_END_F1, 0); + dp_write(MMSS_DP_INTF_POLARITY_CTL, 0); + wmb(); /* ensure TPG registers are programmed */ + + dp_write(MMSS_DP_TPG_MAIN_CONTROL, 0x100); + dp_write(MMSS_DP_TPG_VIDEO_CONFIG, 0x5); + wmb(); /* ensure TPG config is programmed */ + dp_write(MMSS_DP_BIST_ENABLE, 0x1); + dp_write(MMSS_DP_TIMING_ENGINE_EN, 0x1); + wmb(); /* ensure Timing generator is turned on */ +} + +static void dp_catalog_panel_dsc_cfg(struct dp_catalog_panel *panel) +{ + struct dp_catalog_private *catalog; + struct dp_io_data *io_data; + u32 reg, offset; + int i; + + if (!panel) { + DP_ERR("invalid input\n"); + return; + } + + if (panel->stream_id >= DP_STREAM_MAX) { + DP_ERR("invalid stream_id:%d\n", panel->stream_id); + return; + } + + catalog = dp_catalog_get_priv(panel); + + if (panel->stream_id == DP_STREAM_0) + io_data = catalog->io.dp_p0; + else + io_data = catalog->io.dp_p1; + + dp_write(MMSS_DP_DSC_DTO_COUNT, panel->dsc.dto_count); + + reg = dp_read(MMSS_DP_DSC_DTO); + if (panel->dsc.dto_en) { + reg |= BIT(0); + reg |= (panel->dsc.dto_n << 8); + reg |= (panel->dsc.dto_d << 16); + } + dp_write(MMSS_DP_DSC_DTO, reg); + + io_data = catalog->io.dp_link; + + if (panel->stream_id == DP_STREAM_0) + offset = 0; + else + offset = DP1_COMPRESSION_MODE_CTRL - DP_COMPRESSION_MODE_CTRL; + + dp_write(DP_PPS_HB_0_3 + offset, 0x7F1000); + dp_write(DP_PPS_PB_0_3 + offset, 0xA22300); + + for (i = 0; i < panel->dsc.parity_word_len; i++) + dp_write(DP_PPS_PB_4_7 + (i << 2) + offset, + panel->dsc.parity_word[i]); + + for (i = 0; i < panel->dsc.pps_word_len; i++) + dp_write(DP_PPS_PPS_0_3 + (i << 2) + offset, + panel->dsc.pps_word[i]); + + reg = 0; + if (panel->dsc.dsc_en) { + reg = BIT(0); + reg |= (panel->dsc.eol_byte_num << 3); + reg |= (panel->dsc.slice_per_pkt << 5); + reg |= (panel->dsc.bytes_per_pkt << 16); + reg |= (panel->dsc.be_in_lane << 10); + } + dp_write(DP_COMPRESSION_MODE_CTRL + offset, reg); + + DP_DEBUG("compression:0x%x for stream:%d\n", + reg, panel->stream_id); +} + +static void dp_catalog_panel_dp_flush(struct dp_catalog_panel *panel, + enum dp_flush_bit flush_bit) +{ + struct dp_catalog_private *catalog; + struct dp_io_data *io_data; + u32 dp_flush, offset; + + if (!panel) { + DP_ERR("invalid input\n"); + return; + } + + if (panel->stream_id >= DP_STREAM_MAX) { + DP_ERR("invalid stream_id:%d\n", panel->stream_id); + return; + } + + catalog = dp_catalog_get_priv(panel); + io_data = catalog->io.dp_link; + + if (panel->stream_id == DP_STREAM_0) + offset = 0; + else + offset = MMSS_DP1_FLUSH - MMSS_DP_FLUSH; + + dp_flush = dp_read(MMSS_DP_FLUSH + offset); + dp_flush |= BIT(flush_bit); + dp_write(MMSS_DP_FLUSH + offset, dp_flush); +} + +static void dp_catalog_panel_pps_flush(struct dp_catalog_panel *panel) +{ + dp_catalog_panel_dp_flush(panel, DP_PPS_FLUSH); + DP_DEBUG("pps flush for stream:%d\n", panel->stream_id); +} + +static void dp_catalog_panel_dhdr_flush(struct dp_catalog_panel *panel) +{ + dp_catalog_panel_dp_flush(panel, DP_DHDR_FLUSH); + DP_DEBUG("dhdr flush for stream:%d\n", panel->stream_id); +} + + +static bool dp_catalog_panel_dhdr_busy(struct dp_catalog_panel *panel) +{ + struct dp_catalog_private *catalog; + struct dp_io_data *io_data; + u32 dp_flush, offset; + + if (panel->stream_id >= DP_STREAM_MAX) { + DP_ERR("invalid stream_id:%d\n", panel->stream_id); + return false; + } + + catalog = dp_catalog_get_priv(panel); + io_data = catalog->io.dp_link; + + if (panel->stream_id == DP_STREAM_0) + offset = 0; + else + offset = MMSS_DP1_FLUSH - MMSS_DP_FLUSH; + + dp_flush = dp_read(MMSS_DP_FLUSH + offset); + + return dp_flush & BIT(DP_DHDR_FLUSH) ? true : false; +} + +static void dp_catalog_ctrl_reset(struct dp_catalog_ctrl *ctrl) +{ + u32 sw_reset; + struct dp_catalog_private *catalog; + struct dp_io_data *io_data; + + if (!ctrl) { + DP_ERR("invalid input\n"); + return; + } + + catalog = dp_catalog_get_priv(ctrl); + io_data = catalog->io.dp_ahb; + + sw_reset = dp_read(DP_SW_RESET); + + sw_reset |= BIT(0); + dp_write(DP_SW_RESET, sw_reset); + usleep_range(1000, 1010); /* h/w recommended delay */ + + sw_reset &= ~BIT(0); + dp_write(DP_SW_RESET, sw_reset); +} + +static bool dp_catalog_ctrl_mainlink_ready(struct dp_catalog_ctrl *ctrl) +{ + u32 data; + int cnt = 10; + struct dp_catalog_private *catalog; + struct dp_io_data *io_data; + + if (!ctrl) { + DP_ERR("invalid input\n"); + goto end; + } + + catalog = dp_catalog_get_priv(ctrl); + io_data = catalog->io.dp_link; + + while (--cnt) { + /* DP_MAINLINK_READY */ + data = dp_read(DP_MAINLINK_READY); + if (data & BIT(0)) + return true; + + usleep_range(1000, 1010); /* 1ms wait before next reg read */ + } + DP_ERR("mainlink not ready\n"); +end: + return false; +} + +static void dp_catalog_ctrl_enable_irq(struct dp_catalog_ctrl *ctrl, + bool enable) +{ + struct dp_catalog_private *catalog; + struct dp_io_data *io_data; + + if (!ctrl) { + DP_ERR("invalid input\n"); + return; + } + + catalog = dp_catalog_get_priv(ctrl); + io_data = catalog->io.dp_ahb; + + if (enable) { + dp_write(DP_INTR_STATUS, DP_INTR_MASK1); + dp_write(DP_INTR_STATUS2, DP_INTR_MASK2); + dp_write(DP_INTR_STATUS5, DP_INTR_MASK5); + } else { + dp_write(DP_INTR_STATUS, 0x00); + dp_write(DP_INTR_STATUS2, 0x00); + dp_write(DP_INTR_STATUS5, 0x00); + } +} + +static void dp_catalog_ctrl_get_interrupt(struct dp_catalog_ctrl *ctrl) +{ + u32 ack = 0; + struct dp_catalog_private *catalog; + struct dp_io_data *io_data; + + if (!ctrl) { + DP_ERR("invalid input\n"); + return; + } + + catalog = dp_catalog_get_priv(ctrl); + io_data = catalog->io.dp_ahb; + + ctrl->isr = dp_read(DP_INTR_STATUS2); + ctrl->isr &= ~DP_INTR_MASK2; + ack = ctrl->isr & DP_INTERRUPT_STATUS2; + ack <<= 1; + ack |= DP_INTR_MASK2; + dp_write(DP_INTR_STATUS2, ack); + + ctrl->isr5 = dp_read(DP_INTR_STATUS5); + ctrl->isr5 &= ~DP_INTR_MASK5; + ack = ctrl->isr5 & DP_INTERRUPT_STATUS5; + ack <<= 1; + ack |= DP_INTR_MASK5; + dp_write(DP_INTR_STATUS5, ack); +} + +static void dp_catalog_ctrl_phy_reset(struct dp_catalog_ctrl *ctrl) +{ + struct dp_catalog_private *catalog; + struct dp_io_data *io_data; + + if (!ctrl) { + DP_ERR("invalid input\n"); + return; + } + + catalog = dp_catalog_get_priv(ctrl); + io_data = catalog->io.dp_ahb; + + dp_write(DP_PHY_CTRL, 0x5); /* bit 0 & 2 */ + usleep_range(1000, 1010); /* h/w recommended delay */ + dp_write(DP_PHY_CTRL, 0x0); + wmb(); /* make sure PHY reset done */ +} + +static void dp_catalog_ctrl_phy_lane_cfg(struct dp_catalog_ctrl *ctrl, + bool flipped, u8 ln_cnt) +{ + u32 info = 0x0; + struct dp_catalog_private *catalog; + struct dp_io_data *io_data; + u8 orientation = BIT(!!flipped); + + if (!ctrl) { + DP_ERR("invalid input\n"); + return; + } + + catalog = dp_catalog_get_priv(ctrl); + + io_data = catalog->io.dp_phy; + + info |= (ln_cnt & 0x0F); + info |= ((orientation & 0x0F) << 4); + DP_DEBUG("Shared Info = 0x%x\n", info); + + dp_write(DP_PHY_SPARE0, info); +} + +static void dp_catalog_ctrl_update_vx_px(struct dp_catalog_ctrl *ctrl, + u8 v_level, u8 p_level, bool high) +{ + struct dp_catalog_private *catalog; + struct dp_io_data *io_data; + u8 value0, value1; + u32 version; + + if (!ctrl) { + DP_ERR("invalid input\n"); + return; + } + + catalog = dp_catalog_get_priv(ctrl); + + DP_DEBUG("hw: v=%d p=%d\n", v_level, p_level); + + io_data = catalog->io.dp_ahb; + version = dp_read(DP_HW_VERSION); + + if (version == 0x10020004) { + if (high) { + value0 = vm_voltage_swing_hbr3_hbr2[v_level][p_level]; + value1 = vm_pre_emphasis_hbr3_hbr2[v_level][p_level]; + } else { + value0 = vm_voltage_swing_hbr_rbr[v_level][p_level]; + value1 = vm_pre_emphasis_hbr_rbr[v_level][p_level]; + } + } else { + value0 = vm_voltage_swing[v_level][p_level]; + value1 = vm_pre_emphasis[v_level][p_level]; + } + + /* program default setting first */ + + io_data = catalog->io.dp_ln_tx0; + dp_write(TXn_TX_DRV_LVL, 0x2A); + dp_write(TXn_TX_EMP_POST1_LVL, 0x20); + + io_data = catalog->io.dp_ln_tx1; + dp_write(TXn_TX_DRV_LVL, 0x2A); + dp_write(TXn_TX_EMP_POST1_LVL, 0x20); + + /* Enable MUX to use Cursor values from these registers */ + value0 |= BIT(5); + value1 |= BIT(5); + + /* Configure host and panel only if both values are allowed */ + if (value0 != 0xFF && value1 != 0xFF) { + io_data = catalog->io.dp_ln_tx0; + dp_write(TXn_TX_DRV_LVL, value0); + dp_write(TXn_TX_EMP_POST1_LVL, value1); + + io_data = catalog->io.dp_ln_tx1; + dp_write(TXn_TX_DRV_LVL, value0); + dp_write(TXn_TX_EMP_POST1_LVL, value1); + + DP_DEBUG("hw: vx_value=0x%x px_value=0x%x\n", + value0, value1); + } else { + DP_ERR("invalid vx (0x%x=0x%x), px (0x%x=0x%x\n", + v_level, value0, p_level, value1); + } +} + +static void dp_catalog_ctrl_send_phy_pattern(struct dp_catalog_ctrl *ctrl, + u32 pattern) +{ + struct dp_catalog_private *catalog; + u32 value = 0x0; + struct dp_io_data *io_data = NULL; + + if (!ctrl) { + DP_ERR("invalid input\n"); + return; + } + + catalog = dp_catalog_get_priv(ctrl); + + io_data = catalog->io.dp_link; + + dp_write(DP_STATE_CTRL, 0x0); + + switch (pattern) { + case DP_TEST_PHY_PATTERN_D10_2_NO_SCRAMBLING: + dp_write(DP_STATE_CTRL, 0x1); + break; + case DP_TEST_PHY_PATTERN_SYMBOL_ERR_MEASUREMENT_CNT: + value &= ~(1 << 16); + dp_write(DP_HBR2_COMPLIANCE_SCRAMBLER_RESET, value); + value |= 0xFC; + dp_write(DP_HBR2_COMPLIANCE_SCRAMBLER_RESET, value); + dp_write(DP_MAINLINK_LEVELS, 0x2); + dp_write(DP_STATE_CTRL, 0x10); + break; + case DP_TEST_PHY_PATTERN_PRBS7: + dp_write(DP_STATE_CTRL, 0x20); + break; + case DP_TEST_PHY_PATTERN_80_BIT_CUSTOM_PATTERN: + dp_write(DP_STATE_CTRL, 0x40); + /* 00111110000011111000001111100000 */ + dp_write(DP_TEST_80BIT_CUSTOM_PATTERN_REG0, 0x3E0F83E0); + /* 00001111100000111110000011111000 */ + dp_write(DP_TEST_80BIT_CUSTOM_PATTERN_REG1, 0x0F83E0F8); + /* 1111100000111110 */ + dp_write(DP_TEST_80BIT_CUSTOM_PATTERN_REG2, 0x0000F83E); + break; + case DP_TEST_PHY_PATTERN_CP2520_PATTERN_1: + value = dp_read(DP_MAINLINK_CTRL); + value &= ~BIT(4); + dp_write(DP_MAINLINK_CTRL, value); + + value = BIT(16); + dp_write(DP_HBR2_COMPLIANCE_SCRAMBLER_RESET, value); + value |= 0xFC; + dp_write(DP_HBR2_COMPLIANCE_SCRAMBLER_RESET, value); + dp_write(DP_MAINLINK_LEVELS, 0x2); + dp_write(DP_STATE_CTRL, 0x10); + + value = dp_read(DP_MAINLINK_CTRL); + value |= BIT(0); + dp_write(DP_MAINLINK_CTRL, value); + break; + case DP_TEST_PHY_PATTERN_CP2520_PATTERN_3: + dp_write(DP_MAINLINK_CTRL, 0x01); + dp_write(DP_STATE_CTRL, 0x8); + break; + default: + DP_DEBUG("No valid test pattern requested: 0x%x\n", pattern); + return; + } + + /* Make sure the test pattern is programmed in the hardware */ + wmb(); +} + +static u32 dp_catalog_ctrl_read_phy_pattern(struct dp_catalog_ctrl *ctrl) +{ + struct dp_catalog_private *catalog; + struct dp_io_data *io_data = NULL; + + if (!ctrl) { + DP_ERR("invalid input\n"); + return 0; + } + + catalog = dp_catalog_get_priv(ctrl); + + io_data = catalog->io.dp_link; + + return dp_read(DP_MAINLINK_READY); +} + +static void dp_catalog_ctrl_fec_config(struct dp_catalog_ctrl *ctrl, + bool enable) +{ + struct dp_catalog_private *catalog; + struct dp_io_data *io_data = NULL; + u32 reg; + + if (!ctrl) { + DP_ERR("invalid input\n"); + return; + } + + catalog = dp_catalog_get_priv(ctrl); + io_data = catalog->io.dp_link; + + reg = dp_read(DP_MAINLINK_CTRL); + + /* + * fec_en = BIT(12) + * fec_seq_mode = BIT(22) + * sde_flush = BIT(23) | BIT(24) + * fb_boundary_sel = BIT(25) + */ + if (enable) + reg |= BIT(12) | BIT(22) | BIT(23) | BIT(24) | BIT(25); + else + reg &= ~BIT(12); + + dp_write(DP_MAINLINK_CTRL, reg); + /* make sure mainlink configuration is updated with fec sequence */ + wmb(); +} + +static int dp_catalog_reg_dump(struct dp_catalog *dp_catalog, + char *name, u8 **out_buf, u32 *out_buf_len) +{ + int ret = 0; + u8 *buf; + u32 len; + struct dp_io_data *io_data; + struct dp_catalog_private *catalog; + struct dp_parser *parser; + + if (!dp_catalog) { + DP_ERR("invalid input\n"); + return -EINVAL; + } + + catalog = container_of(dp_catalog, struct dp_catalog_private, + dp_catalog); + + parser = catalog->parser; + parser->get_io_buf(parser, name); + io_data = parser->get_io(parser, name); + if (!io_data) { + DP_ERR("IO %s not found\n", name); + ret = -EINVAL; + goto end; + } + + buf = io_data->buf; + len = io_data->io.len; + + if (!buf || !len) { + DP_ERR("no buffer available\n"); + ret = -ENOMEM; + goto end; + } + + if (!strcmp(catalog->exe_mode, "hw") || + !strcmp(catalog->exe_mode, "all")) { + u32 i, data; + u32 const rowsize = 4; + void __iomem *addr = io_data->io.base; + + memset(buf, 0, len); + + for (i = 0; i < len / rowsize; i++) { + data = readl_relaxed(addr); + memcpy(buf + (rowsize * i), &data, sizeof(u32)); + + addr += rowsize; + } + } + + *out_buf = buf; + *out_buf_len = len; +end: + if (ret) + parser->clear_io_buf(parser); + + return ret; +} + +static void dp_catalog_ctrl_mst_config(struct dp_catalog_ctrl *ctrl, + bool enable) +{ + struct dp_catalog_private *catalog; + struct dp_io_data *io_data = NULL; + u32 reg; + + if (!ctrl) { + DP_ERR("invalid input\n"); + return; + } + + catalog = dp_catalog_get_priv(ctrl); + + io_data = catalog->io.dp_link; + + reg = dp_read(DP_MAINLINK_CTRL); + if (enable) + reg |= (0x04000100); + else + reg &= ~(0x04000100); + + dp_write(DP_MAINLINK_CTRL, reg); + /* make sure mainlink MST configuration is updated */ + wmb(); +} + +static void dp_catalog_ctrl_trigger_act(struct dp_catalog_ctrl *ctrl) +{ + struct dp_catalog_private *catalog; + struct dp_io_data *io_data = NULL; + + if (!ctrl) { + DP_ERR("invalid input\n"); + return; + } + + catalog = dp_catalog_get_priv(ctrl); + + io_data = catalog->io.dp_link; + + dp_write(DP_MST_ACT, 0x1); + /* make sure ACT signal is performed */ + wmb(); +} + +static void dp_catalog_ctrl_read_act_complete_sts(struct dp_catalog_ctrl *ctrl, + bool *sts) +{ + struct dp_catalog_private *catalog; + struct dp_io_data *io_data = NULL; + u32 reg; + + if (!ctrl || !sts) { + DP_ERR("invalid input\n"); + return; + } + + *sts = false; + + catalog = dp_catalog_get_priv(ctrl); + + io_data = catalog->io.dp_link; + + reg = dp_read(DP_MST_ACT); + + if (!reg) + *sts = true; +} + +static void dp_catalog_ctrl_channel_alloc(struct dp_catalog_ctrl *ctrl, + u32 ch, u32 ch_start_slot, u32 tot_slot_cnt) +{ + struct dp_catalog_private *catalog; + struct dp_io_data *io_data = NULL; + u32 i, slot_reg_1, slot_reg_2, slot; + u32 reg_off = 0; + int const num_slots_per_reg = 32; + + if (!ctrl || ch >= DP_STREAM_MAX) { + DP_ERR("invalid input. ch %d\n", ch); + return; + } + + if (ch_start_slot > DP_MAX_TIME_SLOTS || + (ch_start_slot + tot_slot_cnt > DP_MAX_TIME_SLOTS)) { + DP_ERR("invalid slots start %d, tot %d\n", + ch_start_slot, tot_slot_cnt); + return; + } + + catalog = dp_catalog_get_priv(ctrl); + + io_data = catalog->io.dp_link; + + DP_DEBUG("ch %d, start_slot %d, tot_slot %d\n", + ch, ch_start_slot, tot_slot_cnt); + + if (ch == DP_STREAM_1) + reg_off = DP_DP1_TIMESLOT_1_32 - DP_DP0_TIMESLOT_1_32; + + slot_reg_1 = 0; + slot_reg_2 = 0; + + if (ch_start_slot && tot_slot_cnt) { + ch_start_slot--; + for (i = 0; i < tot_slot_cnt; i++) { + if (ch_start_slot < num_slots_per_reg) { + slot_reg_1 |= BIT(ch_start_slot); + } else { + slot = ch_start_slot - num_slots_per_reg; + slot_reg_2 |= BIT(slot); + } + ch_start_slot++; + } + } + + DP_DEBUG("ch:%d slot_reg_1:%d, slot_reg_2:%d\n", ch, + slot_reg_1, slot_reg_2); + + dp_write(DP_DP0_TIMESLOT_1_32 + reg_off, slot_reg_1); + dp_write(DP_DP0_TIMESLOT_33_63 + reg_off, slot_reg_2); +} + +static void dp_catalog_ctrl_channel_dealloc(struct dp_catalog_ctrl *ctrl, + u32 ch, u32 ch_start_slot, u32 tot_slot_cnt) +{ + struct dp_catalog_private *catalog; + struct dp_io_data *io_data = NULL; + u32 i, slot_reg_1, slot_reg_2, slot; + u32 reg_off = 0; + + if (!ctrl || ch >= DP_STREAM_MAX) { + DP_ERR("invalid input. ch %d\n", ch); + return; + } + + if (ch_start_slot > DP_MAX_TIME_SLOTS || + (ch_start_slot + tot_slot_cnt > DP_MAX_TIME_SLOTS)) { + DP_ERR("invalid slots start %d, tot %d\n", + ch_start_slot, tot_slot_cnt); + return; + } + + catalog = dp_catalog_get_priv(ctrl); + + io_data = catalog->io.dp_link; + + DP_DEBUG("dealloc ch %d, start_slot %d, tot_slot %d\n", + ch, ch_start_slot, tot_slot_cnt); + + if (ch == DP_STREAM_1) + reg_off = DP_DP1_TIMESLOT_1_32 - DP_DP0_TIMESLOT_1_32; + + slot_reg_1 = dp_read(DP_DP0_TIMESLOT_1_32 + reg_off); + slot_reg_2 = dp_read(DP_DP0_TIMESLOT_33_63 + reg_off); + + ch_start_slot = ch_start_slot - 1; + for (i = 0; i < tot_slot_cnt; i++) { + if (ch_start_slot < 33) { + slot_reg_1 &= ~BIT(ch_start_slot); + } else { + slot = ch_start_slot - 33; + slot_reg_2 &= ~BIT(slot); + } + ch_start_slot++; + } + + DP_DEBUG("dealloc ch:%d slot_reg_1:%d, slot_reg_2:%d\n", ch, + slot_reg_1, slot_reg_2); + + dp_write(DP_DP0_TIMESLOT_1_32 + reg_off, slot_reg_1); + dp_write(DP_DP0_TIMESLOT_33_63 + reg_off, slot_reg_2); +} + +static void dp_catalog_ctrl_update_rg(struct dp_catalog_ctrl *ctrl, u32 ch, + u32 x_int, u32 y_frac_enum) +{ + struct dp_catalog_private *catalog; + struct dp_io_data *io_data = NULL; + u32 rg, reg_off = 0; + + if (!ctrl || ch >= DP_STREAM_MAX) { + DP_ERR("invalid input. ch %d\n", ch); + return; + } + + catalog = dp_catalog_get_priv(ctrl); + + io_data = catalog->io.dp_link; + + rg = y_frac_enum; + rg |= (x_int << 16); + + DP_DEBUG("ch: %d x_int:%d y_frac_enum:%d rg:%d\n", ch, x_int, + y_frac_enum, rg); + + if (ch == DP_STREAM_1) + reg_off = DP_DP1_RG - DP_DP0_RG; + + dp_write(DP_DP0_RG + reg_off, rg); +} + +static void dp_catalog_ctrl_mainlink_levels(struct dp_catalog_ctrl *ctrl, + u8 lane_cnt) +{ + struct dp_catalog_private *catalog; + struct dp_io_data *io_data; + u32 mainlink_levels, safe_to_exit_level = 14; + + catalog = dp_catalog_get_priv(ctrl); + + io_data = catalog->io.dp_link; + + switch (lane_cnt) { + case 1: + safe_to_exit_level = 14; + break; + case 2: + safe_to_exit_level = 8; + break; + case 4: + safe_to_exit_level = 5; + break; + default: + DP_DEBUG("setting the default safe_to_exit_level = %u\n", + safe_to_exit_level); + break; + } + + mainlink_levels = dp_read(DP_MAINLINK_LEVELS); + mainlink_levels &= 0xFE0; + mainlink_levels |= safe_to_exit_level; + + DP_DEBUG("mainlink_level = 0x%x, safe_to_exit_level = 0x%x\n", + mainlink_levels, safe_to_exit_level); + + dp_write(DP_MAINLINK_LEVELS, mainlink_levels); +} + + +/* panel related catalog functions */ +static int dp_catalog_panel_timing_cfg(struct dp_catalog_panel *panel) +{ + struct dp_catalog_private *catalog; + struct dp_io_data *io_data; + u32 offset = 0, reg; + + if (!panel) { + DP_ERR("invalid input\n"); + goto end; + } + + if (panel->stream_id >= DP_STREAM_MAX) { + DP_ERR("invalid stream_id:%d\n", panel->stream_id); + goto end; + } + + catalog = dp_catalog_get_priv(panel); + io_data = catalog->io.dp_link; + + if (panel->stream_id == DP_STREAM_1) + offset = DP1_TOTAL_HOR_VER - DP_TOTAL_HOR_VER; + + dp_write(DP_TOTAL_HOR_VER + offset, panel->total); + dp_write(DP_START_HOR_VER_FROM_SYNC + offset, panel->sync_start); + dp_write(DP_HSYNC_VSYNC_WIDTH_POLARITY + offset, panel->width_blanking); + dp_write(DP_ACTIVE_HOR_VER + offset, panel->dp_active); + + if (panel->stream_id == DP_STREAM_0) + io_data = catalog->io.dp_p0; + else + io_data = catalog->io.dp_p1; + + reg = dp_read(MMSS_DP_INTF_CONFIG); + + if (panel->widebus_en) + reg |= BIT(4); + else + reg &= ~BIT(4); + + dp_write(MMSS_DP_INTF_CONFIG, reg); +end: + return 0; +} + +static void dp_catalog_hpd_config_hpd(struct dp_catalog_hpd *hpd, bool en) +{ + struct dp_catalog_private *catalog; + struct dp_io_data *io_data; + + if (!hpd) { + DP_ERR("invalid input\n"); + return; + } + + catalog = dp_catalog_get_priv(hpd); + io_data = catalog->io.dp_aux; + + if (en) { + u32 reftimer = dp_read(DP_DP_HPD_REFTIMER); + + /* Arm only the UNPLUG and HPD_IRQ interrupts */ + dp_write(DP_DP_HPD_INT_ACK, 0xF); + dp_write(DP_DP_HPD_INT_MASK, 0xA); + + /* Enable REFTIMER to count 1ms */ + reftimer |= BIT(16); + dp_write(DP_DP_HPD_REFTIMER, reftimer); + + /* Connect_time is 250us & disconnect_time is 2ms */ + dp_write(DP_DP_HPD_EVENT_TIME_0, 0x3E800FA); + dp_write(DP_DP_HPD_EVENT_TIME_1, 0x1F407D0); + + /* Enable HPD */ + dp_write(DP_DP_HPD_CTRL, 0x1); + + } else { + /* Disable HPD */ + dp_write(DP_DP_HPD_CTRL, 0x0); + } +} + +static u32 dp_catalog_hpd_get_interrupt(struct dp_catalog_hpd *hpd) +{ + u32 isr = 0; + struct dp_catalog_private *catalog; + struct dp_io_data *io_data; + + if (!hpd) { + DP_ERR("invalid input\n"); + return isr; + } + + catalog = dp_catalog_get_priv(hpd); + + io_data = catalog->io.dp_aux; + isr = dp_read(DP_DP_HPD_INT_STATUS); + dp_write(DP_DP_HPD_INT_ACK, (isr & 0xf)); + + return isr; +} + +static void dp_catalog_audio_init(struct dp_catalog_audio *audio) +{ + struct dp_catalog_private *catalog; + static u32 sdp_map[][DP_AUDIO_SDP_HEADER_MAX] = { + { + MMSS_DP_AUDIO_STREAM_0, + MMSS_DP_AUDIO_STREAM_1, + MMSS_DP_AUDIO_STREAM_1, + }, + { + MMSS_DP_AUDIO_TIMESTAMP_0, + MMSS_DP_AUDIO_TIMESTAMP_1, + MMSS_DP_AUDIO_TIMESTAMP_1, + }, + { + MMSS_DP_AUDIO_INFOFRAME_0, + MMSS_DP_AUDIO_INFOFRAME_1, + MMSS_DP_AUDIO_INFOFRAME_1, + }, + { + MMSS_DP_AUDIO_COPYMANAGEMENT_0, + MMSS_DP_AUDIO_COPYMANAGEMENT_1, + MMSS_DP_AUDIO_COPYMANAGEMENT_1, + }, + { + MMSS_DP_AUDIO_ISRC_0, + MMSS_DP_AUDIO_ISRC_1, + MMSS_DP_AUDIO_ISRC_1, + }, + }; + + if (!audio) + return; + + catalog = dp_catalog_get_priv(audio); + + catalog->audio_map = sdp_map; +} + +static void dp_catalog_audio_config_sdp(struct dp_catalog_audio *audio) +{ + struct dp_catalog_private *catalog; + struct dp_io_data *io_data; + u32 sdp_cfg = 0, sdp_cfg_off = 0; + u32 sdp_cfg2 = 0, sdp_cfg2_off = 0; + + if (!audio) + return; + + if (audio->stream_id >= DP_STREAM_MAX) { + DP_ERR("invalid stream id:%d\n", audio->stream_id); + return; + } + + if (audio->stream_id == DP_STREAM_1) { + sdp_cfg_off = MMSS_DP1_SDP_CFG - MMSS_DP_SDP_CFG; + sdp_cfg2_off = MMSS_DP1_SDP_CFG2 - MMSS_DP_SDP_CFG2; + } + + catalog = dp_catalog_get_priv(audio); + io_data = catalog->io.dp_link; + + sdp_cfg = dp_read(MMSS_DP_SDP_CFG + sdp_cfg_off); + + /* AUDIO_TIMESTAMP_SDP_EN */ + sdp_cfg |= BIT(1); + /* AUDIO_STREAM_SDP_EN */ + sdp_cfg |= BIT(2); + /* AUDIO_COPY_MANAGEMENT_SDP_EN */ + sdp_cfg |= BIT(5); + /* AUDIO_ISRC_SDP_EN */ + sdp_cfg |= BIT(6); + /* AUDIO_INFOFRAME_SDP_EN */ + sdp_cfg |= BIT(20); + + DP_DEBUG("sdp_cfg = 0x%x\n", sdp_cfg); + dp_write(MMSS_DP_SDP_CFG + sdp_cfg_off, sdp_cfg); + + sdp_cfg2 = dp_read(MMSS_DP_SDP_CFG2 + sdp_cfg_off); + /* IFRM_REGSRC -> Do not use reg values */ + sdp_cfg2 &= ~BIT(0); + /* AUDIO_STREAM_HB3_REGSRC-> Do not use reg values */ + sdp_cfg2 &= ~BIT(1); + + DP_DEBUG("sdp_cfg2 = 0x%x\n", sdp_cfg2); + dp_write(MMSS_DP_SDP_CFG2 + sdp_cfg_off, sdp_cfg2); +} + +static void dp_catalog_audio_get_header(struct dp_catalog_audio *audio) +{ + struct dp_catalog_private *catalog; + u32 (*sdp_map)[DP_AUDIO_SDP_HEADER_MAX]; + struct dp_io_data *io_data; + enum dp_catalog_audio_sdp_type sdp; + enum dp_catalog_audio_header_type header; + + if (!audio) + return; + + catalog = dp_catalog_get_priv(audio); + + io_data = catalog->io.dp_link; + sdp_map = catalog->audio_map; + sdp = audio->sdp_type; + header = audio->sdp_header; + + audio->data = dp_read(sdp_map[sdp][header]); +} + +static void dp_catalog_audio_set_header(struct dp_catalog_audio *audio) +{ + struct dp_catalog_private *catalog; + u32 (*sdp_map)[DP_AUDIO_SDP_HEADER_MAX]; + struct dp_io_data *io_data; + enum dp_catalog_audio_sdp_type sdp; + enum dp_catalog_audio_header_type header; + u32 data; + + if (!audio) + return; + + catalog = dp_catalog_get_priv(audio); + + io_data = catalog->io.dp_link; + sdp_map = catalog->audio_map; + sdp = audio->sdp_type; + header = audio->sdp_header; + data = audio->data; + + dp_write(sdp_map[sdp][header], data); +} + +static void dp_catalog_audio_config_acr(struct dp_catalog_audio *audio) +{ + struct dp_catalog_private *catalog; + struct dp_io_data *io_data; + u32 acr_ctrl, select; + + catalog = dp_catalog_get_priv(audio); + + select = audio->data; + io_data = catalog->io.dp_link; + + acr_ctrl = select << 4 | BIT(31) | BIT(8) | BIT(14); + + DP_DEBUG("select = 0x%x, acr_ctrl = 0x%x\n", select, acr_ctrl); + + dp_write(MMSS_DP_AUDIO_ACR_CTRL, acr_ctrl); +} + +static void dp_catalog_audio_enable(struct dp_catalog_audio *audio) +{ + struct dp_catalog_private *catalog; + struct dp_io_data *io_data; + bool enable; + u32 audio_ctrl; + + catalog = dp_catalog_get_priv(audio); + + io_data = catalog->io.dp_link; + enable = !!audio->data; + + audio_ctrl = dp_read(MMSS_DP_AUDIO_CFG); + + if (enable) + audio_ctrl |= BIT(0); + else + audio_ctrl &= ~BIT(0); + + DP_DEBUG("dp_audio_cfg = 0x%x\n", audio_ctrl); + dp_write(MMSS_DP_AUDIO_CFG, audio_ctrl); + + /* make sure audio engine is disabled */ + wmb(); +} + +static void dp_catalog_config_spd_header(struct dp_catalog_panel *panel) +{ + struct dp_catalog_private *catalog; + struct dp_io_data *io_data; + u32 value, new_value, offset = 0; + u8 parity_byte; + + if (!panel || panel->stream_id >= DP_STREAM_MAX) + return; + + catalog = dp_catalog_get_priv(panel); + io_data = catalog->io.dp_link; + + if (panel->stream_id == DP_STREAM_1) + offset = MMSS_DP1_GENERIC0_0 - MMSS_DP_GENERIC0_0; + + /* Config header and parity byte 1 */ + value = dp_read(MMSS_DP_GENERIC1_0 + offset); + + new_value = 0x83; + parity_byte = dp_header_get_parity(new_value); + value |= ((new_value << HEADER_BYTE_1_BIT) + | (parity_byte << PARITY_BYTE_1_BIT)); + DP_DEBUG("Header Byte 1: value = 0x%x, parity_byte = 0x%x\n", + value, parity_byte); + dp_write(MMSS_DP_GENERIC1_0 + offset, value); + + /* Config header and parity byte 2 */ + value = dp_read(MMSS_DP_GENERIC1_1 + offset); + + new_value = 0x1b; + parity_byte = dp_header_get_parity(new_value); + value |= ((new_value << HEADER_BYTE_2_BIT) + | (parity_byte << PARITY_BYTE_2_BIT)); + DP_DEBUG("Header Byte 2: value = 0x%x, parity_byte = 0x%x\n", + value, parity_byte); + dp_write(MMSS_DP_GENERIC1_1 + offset, value); + + /* Config header and parity byte 3 */ + value = dp_read(MMSS_DP_GENERIC1_1 + offset); + + new_value = (0x0 | (0x12 << 2)); + parity_byte = dp_header_get_parity(new_value); + value |= ((new_value << HEADER_BYTE_3_BIT) + | (parity_byte << PARITY_BYTE_3_BIT)); + DP_DEBUG("Header Byte 3: value = 0x%x, parity_byte = 0x%x\n", + new_value, parity_byte); + dp_write(MMSS_DP_GENERIC1_1 + offset, value); +} + +static void dp_catalog_panel_config_spd(struct dp_catalog_panel *panel) +{ + struct dp_catalog_private *catalog; + struct dp_io_data *io_data; + u32 spd_cfg = 0, spd_cfg2 = 0; + u8 *vendor = NULL, *product = NULL; + u32 offset = 0; + u32 sdp_cfg_off = 0; + u32 sdp_cfg2_off = 0; + + /* + * Source Device Information + * 00h unknown + * 01h Digital STB + * 02h DVD + * 03h D-VHS + * 04h HDD Video + * 05h DVC + * 06h DSC + * 07h Video CD + * 08h Game + * 09h PC general + * 0ah Bluray-Disc + * 0bh Super Audio CD + * 0ch HD DVD + * 0dh PMP + * 0eh-ffh reserved + */ + u32 device_type = 0; + + if (!panel || panel->stream_id >= DP_STREAM_MAX) + return; + + catalog = dp_catalog_get_priv(panel); + io_data = catalog->io.dp_link; + + if (panel->stream_id == DP_STREAM_1) + offset = MMSS_DP1_GENERIC0_0 - MMSS_DP_GENERIC0_0; + + dp_catalog_config_spd_header(panel); + + vendor = panel->spd_vendor_name; + product = panel->spd_product_description; + + dp_write(MMSS_DP_GENERIC1_2 + offset, + ((vendor[0] & 0x7f) | + ((vendor[1] & 0x7f) << 8) | + ((vendor[2] & 0x7f) << 16) | + ((vendor[3] & 0x7f) << 24))); + dp_write(MMSS_DP_GENERIC1_3 + offset, + ((vendor[4] & 0x7f) | + ((vendor[5] & 0x7f) << 8) | + ((vendor[6] & 0x7f) << 16) | + ((vendor[7] & 0x7f) << 24))); + dp_write(MMSS_DP_GENERIC1_4 + offset, + ((product[0] & 0x7f) | + ((product[1] & 0x7f) << 8) | + ((product[2] & 0x7f) << 16) | + ((product[3] & 0x7f) << 24))); + dp_write(MMSS_DP_GENERIC1_5 + offset, + ((product[4] & 0x7f) | + ((product[5] & 0x7f) << 8) | + ((product[6] & 0x7f) << 16) | + ((product[7] & 0x7f) << 24))); + dp_write(MMSS_DP_GENERIC1_6 + offset, + ((product[8] & 0x7f) | + ((product[9] & 0x7f) << 8) | + ((product[10] & 0x7f) << 16) | + ((product[11] & 0x7f) << 24))); + dp_write(MMSS_DP_GENERIC1_7 + offset, + ((product[12] & 0x7f) | + ((product[13] & 0x7f) << 8) | + ((product[14] & 0x7f) << 16) | + ((product[15] & 0x7f) << 24))); + dp_write(MMSS_DP_GENERIC1_8 + offset, device_type); + dp_write(MMSS_DP_GENERIC1_9 + offset, 0x00); + + if (panel->stream_id == DP_STREAM_1) { + sdp_cfg_off = MMSS_DP1_SDP_CFG - MMSS_DP_SDP_CFG; + sdp_cfg2_off = MMSS_DP1_SDP_CFG2 - MMSS_DP_SDP_CFG2; + } + + spd_cfg = dp_read(MMSS_DP_SDP_CFG + sdp_cfg_off); + /* GENERIC1_SDP for SPD Infoframe */ + spd_cfg |= BIT(18); + dp_write(MMSS_DP_SDP_CFG + sdp_cfg_off, spd_cfg); + + spd_cfg2 = dp_read(MMSS_DP_SDP_CFG2 + sdp_cfg2_off); + /* 28 data bytes for SPD Infoframe with GENERIC1 set */ + spd_cfg2 |= BIT(17); + dp_write(MMSS_DP_SDP_CFG2 + sdp_cfg2_off, spd_cfg2); + + dp_catalog_panel_sdp_update(panel); +} + +static void dp_catalog_get_io_buf(struct dp_catalog_private *catalog) +{ + struct dp_parser *parser = catalog->parser; + + dp_catalog_fill_io_buf(dp_ahb); + dp_catalog_fill_io_buf(dp_aux); + dp_catalog_fill_io_buf(dp_link); + dp_catalog_fill_io_buf(dp_p0); + dp_catalog_fill_io_buf(dp_phy); + dp_catalog_fill_io_buf(dp_ln_tx0); + dp_catalog_fill_io_buf(dp_ln_tx1); + dp_catalog_fill_io_buf(dp_pll); + dp_catalog_fill_io_buf(usb3_dp_com); + dp_catalog_fill_io_buf(dp_mmss_cc); + dp_catalog_fill_io_buf(hdcp_physical); + dp_catalog_fill_io_buf(dp_p1); + dp_catalog_fill_io_buf(dp_tcsr); +} + +static void dp_catalog_get_io(struct dp_catalog_private *catalog) +{ + struct dp_parser *parser = catalog->parser; + + dp_catalog_fill_io(dp_ahb); + dp_catalog_fill_io(dp_aux); + dp_catalog_fill_io(dp_link); + dp_catalog_fill_io(dp_p0); + dp_catalog_fill_io(dp_phy); + dp_catalog_fill_io(dp_ln_tx0); + dp_catalog_fill_io(dp_ln_tx1); + dp_catalog_fill_io(dp_pll); + dp_catalog_fill_io(usb3_dp_com); + dp_catalog_fill_io(dp_mmss_cc); + dp_catalog_fill_io(hdcp_physical); + dp_catalog_fill_io(dp_p1); + dp_catalog_fill_io(dp_tcsr); +} + +static void dp_catalog_set_exe_mode(struct dp_catalog *dp_catalog, char *mode) +{ + struct dp_catalog_private *catalog; + + if (!dp_catalog) { + DP_ERR("invalid input\n"); + return; + } + + catalog = container_of(dp_catalog, struct dp_catalog_private, + dp_catalog); + + strlcpy(catalog->exe_mode, mode, sizeof(catalog->exe_mode)); + + if (!strcmp(catalog->exe_mode, "hw")) + catalog->parser->clear_io_buf(catalog->parser); + else + dp_catalog_get_io_buf(catalog); + + if (!strcmp(catalog->exe_mode, "hw") || + !strcmp(catalog->exe_mode, "all")) { + catalog->read = dp_read_hw; + catalog->write = dp_write_hw; + + dp_catalog->sub->read = dp_read_sub_hw; + dp_catalog->sub->write = dp_write_sub_hw; + } else { + catalog->read = dp_read_sw; + catalog->write = dp_write_sw; + + dp_catalog->sub->read = dp_read_sub_sw; + dp_catalog->sub->write = dp_write_sub_sw; + } +} + +static int dp_catalog_init(struct device *dev, struct dp_catalog *dp_catalog, + struct dp_parser *parser) +{ + int rc = 0; + struct dp_catalog_private *catalog = container_of(dp_catalog, + struct dp_catalog_private, dp_catalog); + + switch (parser->hw_cfg.phy_version) { + case DP_PHY_VERSION_4_2_0: + dp_catalog->sub = dp_catalog_get_v420(dev, dp_catalog, + &catalog->io); + break; + case DP_PHY_VERSION_2_0_0: + dp_catalog->sub = dp_catalog_get_v200(dev, dp_catalog, + &catalog->io); + break; + default: + goto end; + } + + if (IS_ERR(dp_catalog->sub)) { + rc = PTR_ERR(dp_catalog->sub); + dp_catalog->sub = NULL; + } else { + dp_catalog->sub->read = dp_read_sub_hw; + dp_catalog->sub->write = dp_write_sub_hw; + } +end: + return rc; +} + +void dp_catalog_put(struct dp_catalog *dp_catalog) +{ + struct dp_catalog_private *catalog; + + if (!dp_catalog) + return; + + catalog = container_of(dp_catalog, struct dp_catalog_private, + dp_catalog); + + if (dp_catalog->sub && dp_catalog->sub->put) + dp_catalog->sub->put(dp_catalog); + + catalog->parser->clear_io_buf(catalog->parser); + devm_kfree(catalog->dev, catalog); +} + +struct dp_catalog *dp_catalog_get(struct device *dev, struct dp_parser *parser) +{ + int rc = 0; + struct dp_catalog *dp_catalog; + struct dp_catalog_private *catalog; + struct dp_catalog_aux aux = { + .read_data = dp_catalog_aux_read_data, + .write_data = dp_catalog_aux_write_data, + .write_trans = dp_catalog_aux_write_trans, + .clear_trans = dp_catalog_aux_clear_trans, + .reset = dp_catalog_aux_reset, + .update_aux_cfg = dp_catalog_aux_update_cfg, + .enable = dp_catalog_aux_enable, + .setup = dp_catalog_aux_setup, + .get_irq = dp_catalog_aux_get_irq, + .clear_hw_interrupts = dp_catalog_aux_clear_hw_interrupts, + }; + struct dp_catalog_ctrl ctrl = { + .state_ctrl = dp_catalog_ctrl_state_ctrl, + .config_ctrl = dp_catalog_ctrl_config_ctrl, + .lane_mapping = dp_catalog_ctrl_lane_mapping, + .lane_pnswap = dp_catalog_ctrl_lane_pnswap, + .mainlink_ctrl = dp_catalog_ctrl_mainlink_ctrl, + .set_pattern = dp_catalog_ctrl_set_pattern, + .reset = dp_catalog_ctrl_reset, + .usb_reset = dp_catalog_ctrl_usb_reset, + .mainlink_ready = dp_catalog_ctrl_mainlink_ready, + .enable_irq = dp_catalog_ctrl_enable_irq, + .phy_reset = dp_catalog_ctrl_phy_reset, + .phy_lane_cfg = dp_catalog_ctrl_phy_lane_cfg, + .update_vx_px = dp_catalog_ctrl_update_vx_px, + .get_interrupt = dp_catalog_ctrl_get_interrupt, + .read_hdcp_status = dp_catalog_ctrl_read_hdcp_status, + .send_phy_pattern = dp_catalog_ctrl_send_phy_pattern, + .read_phy_pattern = dp_catalog_ctrl_read_phy_pattern, + .mst_config = dp_catalog_ctrl_mst_config, + .trigger_act = dp_catalog_ctrl_trigger_act, + .read_act_complete_sts = dp_catalog_ctrl_read_act_complete_sts, + .channel_alloc = dp_catalog_ctrl_channel_alloc, + .update_rg = dp_catalog_ctrl_update_rg, + .channel_dealloc = dp_catalog_ctrl_channel_dealloc, + .fec_config = dp_catalog_ctrl_fec_config, + .mainlink_levels = dp_catalog_ctrl_mainlink_levels, + .late_phy_init = dp_catalog_ctrl_late_phy_init, + }; + struct dp_catalog_hpd hpd = { + .config_hpd = dp_catalog_hpd_config_hpd, + .get_interrupt = dp_catalog_hpd_get_interrupt, + }; + struct dp_catalog_audio audio = { + .init = dp_catalog_audio_init, + .config_acr = dp_catalog_audio_config_acr, + .enable = dp_catalog_audio_enable, + .config_sdp = dp_catalog_audio_config_sdp, + .set_header = dp_catalog_audio_set_header, + .get_header = dp_catalog_audio_get_header, + }; + struct dp_catalog_panel panel = { + .timing_cfg = dp_catalog_panel_timing_cfg, + .config_hdr = dp_catalog_panel_config_hdr, + .config_sdp = dp_catalog_panel_config_sdp, + .tpg_config = dp_catalog_panel_tpg_cfg, + .config_spd = dp_catalog_panel_config_spd, + .config_misc = dp_catalog_panel_config_misc, + .set_colorspace = dp_catalog_panel_set_colorspace, + .config_msa = dp_catalog_panel_config_msa, + .update_transfer_unit = dp_catalog_panel_update_transfer_unit, + .config_ctrl = dp_catalog_panel_config_ctrl, + .config_dto = dp_catalog_panel_config_dto, + .dsc_cfg = dp_catalog_panel_dsc_cfg, + .pps_flush = dp_catalog_panel_pps_flush, + .dhdr_flush = dp_catalog_panel_dhdr_flush, + .dhdr_busy = dp_catalog_panel_dhdr_busy, + }; + + if (!dev || !parser) { + DP_ERR("invalid input\n"); + rc = -EINVAL; + goto error; + } + + catalog = devm_kzalloc(dev, sizeof(*catalog), GFP_KERNEL); + if (!catalog) { + rc = -ENOMEM; + goto error; + } + + catalog->dev = dev; + catalog->parser = parser; + + catalog->read = dp_read_hw; + catalog->write = dp_write_hw; + + dp_catalog_get_io(catalog); + + strlcpy(catalog->exe_mode, "hw", sizeof(catalog->exe_mode)); + + dp_catalog = &catalog->dp_catalog; + + dp_catalog->aux = aux; + dp_catalog->ctrl = ctrl; + dp_catalog->hpd = hpd; + dp_catalog->audio = audio; + dp_catalog->panel = panel; + + rc = dp_catalog_init(dev, dp_catalog, parser); + if (rc) { + dp_catalog_put(dp_catalog); + goto error; + } + + dp_catalog->set_exe_mode = dp_catalog_set_exe_mode; + dp_catalog->get_reg_dump = dp_catalog_reg_dump; + + return dp_catalog; +error: + return ERR_PTR(rc); +} diff --git a/techpack/display/msm/dp/dp_catalog.h b/techpack/display/msm/dp/dp_catalog.h new file mode 100644 index 0000000000000000000000000000000000000000..0b1f00408e43508a7444b0c7e856081aacc4b455 --- /dev/null +++ b/techpack/display/msm/dp/dp_catalog.h @@ -0,0 +1,338 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved. + */ + +#ifndef _DP_CATALOG_H_ +#define _DP_CATALOG_H_ + +#include +#include + +#include "dp_parser.h" + +/* interrupts */ +#define DP_INTR_HPD BIT(0) +#define DP_INTR_AUX_I2C_DONE BIT(3) +#define DP_INTR_WRONG_ADDR BIT(6) +#define DP_INTR_TIMEOUT BIT(9) +#define DP_INTR_NACK_DEFER BIT(12) +#define DP_INTR_WRONG_DATA_CNT BIT(15) +#define DP_INTR_I2C_NACK BIT(18) +#define DP_INTR_I2C_DEFER BIT(21) +#define DP_INTR_PLL_UNLOCKED BIT(24) +#define DP_INTR_AUX_ERROR BIT(27) + +#define DP_INTR_READY_FOR_VIDEO BIT(0) +#define DP_INTR_IDLE_PATTERN_SENT BIT(3) +#define DP_INTR_FRAME_END BIT(6) +#define DP_INTR_CRC_UPDATED BIT(9) + +#define DP_INTR_MST_DP0_VCPF_SENT BIT(0) +#define DP_INTR_MST_DP1_VCPF_SENT BIT(3) + +#define DP_MAX_TIME_SLOTS 64 + +/* stream id */ +enum dp_stream_id { + DP_STREAM_0, + DP_STREAM_1, + DP_STREAM_MAX, +}; + +struct dp_catalog_vsc_sdp_colorimetry { + struct dp_sdp_header header; + u8 data[32]; +}; + +struct dp_catalog_aux { + u32 data; + u32 isr; + + u32 (*read_data)(struct dp_catalog_aux *aux); + int (*write_data)(struct dp_catalog_aux *aux); + int (*write_trans)(struct dp_catalog_aux *aux); + int (*clear_trans)(struct dp_catalog_aux *aux, bool read); + void (*reset)(struct dp_catalog_aux *aux); + void (*enable)(struct dp_catalog_aux *aux, bool enable); + void (*update_aux_cfg)(struct dp_catalog_aux *aux, + struct dp_aux_cfg *cfg, enum dp_phy_aux_config_type type); + void (*setup)(struct dp_catalog_aux *aux, + struct dp_aux_cfg *aux_cfg); + void (*get_irq)(struct dp_catalog_aux *aux, bool cmd_busy); + void (*clear_hw_interrupts)(struct dp_catalog_aux *aux); +}; + +struct dp_catalog_ctrl { + u32 isr; + u32 isr5; + + void (*state_ctrl)(struct dp_catalog_ctrl *ctrl, u32 state); + void (*config_ctrl)(struct dp_catalog_ctrl *ctrl, u8 ln_cnt); + void (*lane_mapping)(struct dp_catalog_ctrl *ctrl, bool flipped, + char *lane_map); + void (*lane_pnswap)(struct dp_catalog_ctrl *ctrl, u8 ln_pnswap); + void (*mainlink_ctrl)(struct dp_catalog_ctrl *ctrl, bool enable); + void (*set_pattern)(struct dp_catalog_ctrl *ctrl, u32 pattern); + void (*reset)(struct dp_catalog_ctrl *ctrl); + void (*usb_reset)(struct dp_catalog_ctrl *ctrl, bool flip); + bool (*mainlink_ready)(struct dp_catalog_ctrl *ctrl); + void (*enable_irq)(struct dp_catalog_ctrl *ctrl, bool enable); + void (*phy_reset)(struct dp_catalog_ctrl *ctrl); + void (*phy_lane_cfg)(struct dp_catalog_ctrl *ctrl, bool flipped, + u8 lane_cnt); + void (*update_vx_px)(struct dp_catalog_ctrl *ctrl, u8 v_level, + u8 p_level, bool high); + void (*get_interrupt)(struct dp_catalog_ctrl *ctrl); + u32 (*read_hdcp_status)(struct dp_catalog_ctrl *ctrl); + void (*send_phy_pattern)(struct dp_catalog_ctrl *ctrl, + u32 pattern); + u32 (*read_phy_pattern)(struct dp_catalog_ctrl *ctrl); + void (*mst_config)(struct dp_catalog_ctrl *ctrl, bool enable); + void (*trigger_act)(struct dp_catalog_ctrl *ctrl); + void (*read_act_complete_sts)(struct dp_catalog_ctrl *ctrl, bool *sts); + void (*channel_alloc)(struct dp_catalog_ctrl *ctrl, + u32 ch, u32 ch_start_timeslot, u32 tot_ch_cnt); + void (*update_rg)(struct dp_catalog_ctrl *ctrl, u32 ch, u32 x_int, + u32 y_frac_enum); + void (*channel_dealloc)(struct dp_catalog_ctrl *ctrl, + u32 ch, u32 ch_start_timeslot, u32 tot_ch_cnt); + void (*fec_config)(struct dp_catalog_ctrl *ctrl, bool enable); + void (*mainlink_levels)(struct dp_catalog_ctrl *ctrl, u8 lane_cnt); + + int (*late_phy_init)(struct dp_catalog_ctrl *ctrl, + u8 lane_cnt, bool flipped); +}; + +struct dp_catalog_hpd { + void (*config_hpd)(struct dp_catalog_hpd *hpd, bool en); + u32 (*get_interrupt)(struct dp_catalog_hpd *hpd); +}; + +#define HEADER_BYTE_2_BIT 0 +#define PARITY_BYTE_2_BIT 8 +#define HEADER_BYTE_1_BIT 16 +#define PARITY_BYTE_1_BIT 24 +#define HEADER_BYTE_3_BIT 16 +#define PARITY_BYTE_3_BIT 24 + +enum dp_catalog_audio_sdp_type { + DP_AUDIO_SDP_STREAM, + DP_AUDIO_SDP_TIMESTAMP, + DP_AUDIO_SDP_INFOFRAME, + DP_AUDIO_SDP_COPYMANAGEMENT, + DP_AUDIO_SDP_ISRC, + DP_AUDIO_SDP_MAX, +}; + +enum dp_catalog_audio_header_type { + DP_AUDIO_SDP_HEADER_1, + DP_AUDIO_SDP_HEADER_2, + DP_AUDIO_SDP_HEADER_3, + DP_AUDIO_SDP_HEADER_MAX, +}; + +struct dp_catalog_audio { + enum dp_catalog_audio_sdp_type sdp_type; + enum dp_catalog_audio_header_type sdp_header; + u32 data; + + enum dp_stream_id stream_id; + + void (*init)(struct dp_catalog_audio *audio); + void (*enable)(struct dp_catalog_audio *audio); + void (*config_acr)(struct dp_catalog_audio *audio); + void (*config_sdp)(struct dp_catalog_audio *audio); + void (*set_header)(struct dp_catalog_audio *audio); + void (*get_header)(struct dp_catalog_audio *audio); +}; + +struct dp_dsc_cfg_data { + bool dsc_en; + char pps[128]; + u32 pps_len; + u32 pps_word[32]; + u32 pps_word_len; + u8 parity[32]; + u8 parity_len; + u32 parity_word[8]; + u32 parity_word_len; + u32 slice_per_pkt; + u32 bytes_per_pkt; + u32 eol_byte_num; + u32 be_in_lane; + u32 dto_en; + u32 dto_n; + u32 dto_d; + u32 dto_count; +}; + +struct dp_catalog_panel { + u32 total; + u32 sync_start; + u32 width_blanking; + u32 dp_active; + u8 *spd_vendor_name; + u8 *spd_product_description; + + struct dp_catalog_vsc_sdp_colorimetry vsc_colorimetry; + struct dp_sdp_header dhdr_vsif_sdp; + struct dp_sdp_header shdr_if_sdp; + struct drm_msm_ext_hdr_metadata hdr_meta; + + /* TPG */ + u32 hsync_period; + u32 vsync_period; + u32 display_v_start; + u32 display_v_end; + u32 v_sync_width; + u32 hsync_ctl; + u32 display_hctl; + + /* TU */ + u32 dp_tu; + u32 valid_boundary; + u32 valid_boundary2; + + u32 misc_val; + + enum dp_stream_id stream_id; + + bool widebus_en; + struct dp_dsc_cfg_data dsc; + + int (*timing_cfg)(struct dp_catalog_panel *panel); + void (*config_hdr)(struct dp_catalog_panel *panel, bool en, + u32 dhdr_max_pkts, bool flush); + void (*config_sdp)(struct dp_catalog_panel *panel, bool en); + int (*set_colorspace)(struct dp_catalog_panel *panel, + bool vsc_supported); + void (*tpg_config)(struct dp_catalog_panel *panel, bool enable); + void (*config_spd)(struct dp_catalog_panel *panel); + void (*config_misc)(struct dp_catalog_panel *panel); + void (*config_msa)(struct dp_catalog_panel *panel, + u32 rate, u32 stream_rate_khz); + void (*update_transfer_unit)(struct dp_catalog_panel *panel); + void (*config_ctrl)(struct dp_catalog_panel *panel, u32 cfg); + void (*config_dto)(struct dp_catalog_panel *panel, bool ack); + void (*dsc_cfg)(struct dp_catalog_panel *panel); + void (*pps_flush)(struct dp_catalog_panel *panel); + void (*dhdr_flush)(struct dp_catalog_panel *panel); + bool (*dhdr_busy)(struct dp_catalog_panel *panel); +}; + +struct dp_catalog; +struct dp_catalog_sub { + u32 (*read)(struct dp_catalog *dp_catalog, + struct dp_io_data *io_data, u32 offset); + void (*write)(struct dp_catalog *dp_catalog, + struct dp_io_data *io_data, u32 offset, u32 data); + + void (*put)(struct dp_catalog *catalog); +}; + +struct dp_catalog_io { + struct dp_io_data *dp_ahb; + struct dp_io_data *dp_aux; + struct dp_io_data *dp_link; + struct dp_io_data *dp_p0; + struct dp_io_data *dp_phy; + struct dp_io_data *dp_ln_tx0; + struct dp_io_data *dp_ln_tx1; + struct dp_io_data *dp_mmss_cc; + struct dp_io_data *dp_pll; + struct dp_io_data *usb3_dp_com; + struct dp_io_data *hdcp_physical; + struct dp_io_data *dp_p1; + struct dp_io_data *dp_tcsr; +}; + +struct dp_catalog { + struct dp_catalog_aux aux; + struct dp_catalog_ctrl ctrl; + struct dp_catalog_audio audio; + struct dp_catalog_panel panel; + struct dp_catalog_hpd hpd; + + struct dp_catalog_sub *sub; + + void (*set_exe_mode)(struct dp_catalog *dp_catalog, char *mode); + int (*get_reg_dump)(struct dp_catalog *dp_catalog, + char *mode, u8 **out_buf, u32 *out_buf_len); +}; + +static inline u8 dp_ecc_get_g0_value(u8 data) +{ + u8 c[4]; + u8 g[4]; + u8 ret_data = 0; + u8 i; + + for (i = 0; i < 4; i++) + c[i] = (data >> i) & 0x01; + + g[0] = c[3]; + g[1] = c[0] ^ c[3]; + g[2] = c[1]; + g[3] = c[2]; + + for (i = 0; i < 4; i++) + ret_data = ((g[i] & 0x01) << i) | ret_data; + + return ret_data; +} + +static inline u8 dp_ecc_get_g1_value(u8 data) +{ + u8 c[4]; + u8 g[4]; + u8 ret_data = 0; + u8 i; + + for (i = 0; i < 4; i++) + c[i] = (data >> i) & 0x01; + + g[0] = c[0] ^ c[3]; + g[1] = c[0] ^ c[1] ^ c[3]; + g[2] = c[1] ^ c[2]; + g[3] = c[2] ^ c[3]; + + for (i = 0; i < 4; i++) + ret_data = ((g[i] & 0x01) << i) | ret_data; + + return ret_data; +} + +static inline u8 dp_header_get_parity(u32 data) +{ + u8 x0 = 0; + u8 x1 = 0; + u8 ci = 0; + u8 iData = 0; + u8 i = 0; + u8 parity_byte; + u8 num_byte = (data > 0xFF) ? 8 : 2; + + for (i = 0; i < num_byte; i++) { + iData = (data >> i*4) & 0xF; + + ci = iData ^ x1; + x1 = x0 ^ dp_ecc_get_g1_value(ci); + x0 = dp_ecc_get_g0_value(ci); + } + + parity_byte = x1 | (x0 << 4); + + return parity_byte; +} + +struct dp_catalog *dp_catalog_get(struct device *dev, struct dp_parser *parser); +void dp_catalog_put(struct dp_catalog *catalog); + +struct dp_catalog_sub *dp_catalog_get_v420(struct device *dev, + struct dp_catalog *catalog, struct dp_catalog_io *io); + +struct dp_catalog_sub *dp_catalog_get_v200(struct device *dev, + struct dp_catalog *catalog, struct dp_catalog_io *io); + +#endif /* _DP_CATALOG_H_ */ diff --git a/techpack/display/msm/dp/dp_catalog_v200.c b/techpack/display/msm/dp/dp_catalog_v200.c new file mode 100644 index 0000000000000000000000000000000000000000..97d78a120bc42c884fcb071dabe024fc92d5619c --- /dev/null +++ b/techpack/display/msm/dp/dp_catalog_v200.c @@ -0,0 +1,272 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2012-2019, The Linux Foundation. All rights reserved. + */ + +#include + +#include "dp_catalog.h" +#include "dp_reg.h" +#include "dp_debug.h" + +#define dp_catalog_get_priv_v200(x) ({ \ + struct dp_catalog *catalog; \ + catalog = container_of(x, struct dp_catalog, x); \ + container_of(catalog->sub, \ + struct dp_catalog_private_v200, sub); \ +}) + +#define dp_read(x) ({ \ + catalog->sub.read(catalog->dpc, io_data, x); \ +}) + +#define dp_write(x, y) ({ \ + catalog->sub.write(catalog->dpc, io_data, x, y); \ +}) + +struct dp_catalog_private_v200 { + struct device *dev; + struct dp_catalog_io *io; + struct dp_catalog *dpc; + struct dp_catalog_sub sub; +}; + +static void dp_catalog_aux_clear_hw_int_v200(struct dp_catalog_aux *aux) +{ + struct dp_catalog_private_v200 *catalog; + struct dp_io_data *io_data; + u32 data = 0; + + if (!aux) { + DP_ERR("invalid input\n"); + return; + } + + catalog = dp_catalog_get_priv_v200(aux); + io_data = catalog->io->dp_phy; + + data = dp_read(DP_PHY_AUX_INTERRUPT_STATUS_V200); + + dp_write(DP_PHY_AUX_INTERRUPT_CLEAR_V200, 0x1f); + wmb(); /* make sure 0x1f is written before next write */ + + dp_write(DP_PHY_AUX_INTERRUPT_CLEAR_V200, 0x9f); + wmb(); /* make sure 0x9f is written before next write */ + + dp_write(DP_PHY_AUX_INTERRUPT_CLEAR_V200, 0); + wmb(); /* make sure register is cleared */ +} + +static void dp_catalog_aux_setup_v200(struct dp_catalog_aux *aux, + struct dp_aux_cfg *cfg) +{ + struct dp_catalog_private_v200 *catalog; + struct dp_io_data *io_data; + int i = 0, sw_reset = 0; + + if (!aux || !cfg) { + DP_ERR("invalid input\n"); + return; + } + + catalog = dp_catalog_get_priv_v200(aux); + io_data = catalog->io->dp_ahb; + + sw_reset = dp_read(DP_SW_RESET); + + sw_reset |= BIT(0); + dp_write(DP_SW_RESET, sw_reset); + usleep_range(1000, 1010); /* h/w recommended delay */ + + sw_reset &= ~BIT(0); + dp_write(DP_SW_RESET, sw_reset); + + dp_write(DP_PHY_CTRL, 0x4); /* bit 2 */ + udelay(1000); + dp_write(DP_PHY_CTRL, 0x0); /* bit 2 */ + wmb(); /* make sure programming happened */ + + io_data = catalog->io->dp_tcsr; + dp_write(0x4c, 0x1); /* bit 0 & 2 */ + wmb(); /* make sure programming happened */ + + io_data = catalog->io->dp_phy; + dp_write(DP_PHY_PD_CTL, 0x3c); + wmb(); /* make sure PD programming happened */ + dp_write(DP_PHY_PD_CTL, 0x3d); + wmb(); /* make sure PD programming happened */ + + /* DP AUX CFG register programming */ + io_data = catalog->io->dp_phy; + for (i = 0; i < PHY_AUX_CFG_MAX; i++) + dp_write(cfg[i].offset, cfg[i].lut[cfg[i].current_index]); + + dp_write(DP_PHY_AUX_INTERRUPT_MASK_V200, 0x1F); + wmb(); /* make sure AUX configuration is done before enabling it */ +} + +static void dp_catalog_panel_config_msa_v200(struct dp_catalog_panel *panel, + u32 rate, u32 stream_rate_khz) +{ + u32 pixel_m, pixel_n; + u32 mvid, nvid; + u32 const nvid_fixed = 0x8000; + u32 const link_rate_hbr2 = 540000; + u32 const link_rate_hbr3 = 810000; + struct dp_catalog_private_v200 *catalog; + struct dp_io_data *io_data; + u32 strm_reg_off = 0; + u32 mvid_reg_off = 0, nvid_reg_off = 0; + + if (!panel) { + DP_ERR("invalid input\n"); + return; + } + + if (panel->stream_id >= DP_STREAM_MAX) { + DP_ERR("invalid stream_id:%d\n", panel->stream_id); + return; + } + + catalog = dp_catalog_get_priv_v200(panel); + io_data = catalog->io->dp_mmss_cc; + + if (panel->stream_id == DP_STREAM_1) + strm_reg_off = MMSS_DP_PIXEL1_M_V200 - + MMSS_DP_PIXEL_M_V200; + + pixel_m = dp_read(MMSS_DP_PIXEL_M_V200 + strm_reg_off); + pixel_n = dp_read(MMSS_DP_PIXEL_N_V200 + strm_reg_off); + DP_DEBUG("pixel_m=0x%x, pixel_n=0x%x\n", pixel_m, pixel_n); + + mvid = (pixel_m & 0xFFFF) * 5; + nvid = (0xFFFF & (~pixel_n)) + (pixel_m & 0xFFFF); + + if (nvid < nvid_fixed) { + u32 temp; + + temp = (nvid_fixed / nvid) * nvid; + mvid = (nvid_fixed / nvid) * mvid; + nvid = temp; + } + + DP_DEBUG("rate = %d\n", rate); + + if (panel->widebus_en) + mvid <<= 1; + + if (link_rate_hbr2 == rate) + nvid *= 2; + + if (link_rate_hbr3 == rate) + nvid *= 3; + + io_data = catalog->io->dp_link; + + if (panel->stream_id == DP_STREAM_1) { + mvid_reg_off = DP1_SOFTWARE_MVID - DP_SOFTWARE_MVID; + nvid_reg_off = DP1_SOFTWARE_NVID - DP_SOFTWARE_NVID; + } + + DP_DEBUG("mvid=0x%x, nvid=0x%x\n", mvid, nvid); + dp_write(DP_SOFTWARE_MVID + mvid_reg_off, mvid); + dp_write(DP_SOFTWARE_NVID + nvid_reg_off, nvid); +} + +static void dp_catalog_ctrl_lane_mapping_v200(struct dp_catalog_ctrl *ctrl, + bool flipped, char *lane_map) +{ + struct dp_catalog_private_v200 *catalog; + struct dp_io_data *io_data; + u8 l_map[4] = { 0 }, i = 0, j = 0; + u32 lane_map_reg = 0; + + if (!ctrl) { + DP_ERR("invalid input\n"); + return; + } + + catalog = dp_catalog_get_priv_v200(ctrl); + io_data = catalog->io->dp_link; + + /* For flip case, swap phy lanes with ML0 and ML3, ML1 and ML2 */ + if (flipped) { + for (i = 0; i < DP_MAX_PHY_LN; i++) { + if (lane_map[i] == DP_ML0) { + for (j = 0; j < DP_MAX_PHY_LN; j++) { + if (lane_map[j] == DP_ML3) { + l_map[i] = DP_ML3; + l_map[j] = DP_ML0; + break; + } + } + } else if (lane_map[i] == DP_ML1) { + for (j = 0; j < DP_MAX_PHY_LN; j++) { + if (lane_map[j] == DP_ML2) { + l_map[i] = DP_ML2; + l_map[j] = DP_ML1; + break; + } + } + } + } + } else { + /* Normal orientation */ + for (i = 0; i < DP_MAX_PHY_LN; i++) + l_map[i] = lane_map[i]; + } + + lane_map_reg = ((l_map[3]&3)<<6)|((l_map[2]&3)<<4)|((l_map[1]&3)<<2) + |(l_map[0]&3); + + dp_write(DP_LOGICAL2PHYSICAL_LANE_MAPPING, lane_map_reg); +} + +static void dp_catalog_ctrl_usb_reset_v200(struct dp_catalog_ctrl *ctrl, + bool flip) +{ +} + +static void dp_catalog_put_v200(struct dp_catalog *catalog) +{ + struct dp_catalog_private_v200 *catalog_priv; + + if (!catalog) + return; + + catalog_priv = container_of(catalog->sub, + struct dp_catalog_private_v200, sub); + + devm_kfree(catalog_priv->dev, catalog_priv); +} + +struct dp_catalog_sub *dp_catalog_get_v200(struct device *dev, + struct dp_catalog *catalog, struct dp_catalog_io *io) +{ + struct dp_catalog_private_v200 *catalog_priv; + + if (!dev || !catalog) { + DP_ERR("invalid input\n"); + return ERR_PTR(-EINVAL); + } + + catalog_priv = devm_kzalloc(dev, sizeof(*catalog_priv), GFP_KERNEL); + if (!catalog_priv) + return ERR_PTR(-ENOMEM); + + catalog_priv->dev = dev; + catalog_priv->io = io; + catalog_priv->dpc = catalog; + + catalog_priv->sub.put = dp_catalog_put_v200; + + catalog->aux.clear_hw_interrupts = dp_catalog_aux_clear_hw_int_v200; + catalog->aux.setup = dp_catalog_aux_setup_v200; + + catalog->panel.config_msa = dp_catalog_panel_config_msa_v200; + + catalog->ctrl.lane_mapping = dp_catalog_ctrl_lane_mapping_v200; + catalog->ctrl.usb_reset = dp_catalog_ctrl_usb_reset_v200; + + return &catalog_priv->sub; +} diff --git a/techpack/display/msm/dp/dp_catalog_v420.c b/techpack/display/msm/dp/dp_catalog_v420.c new file mode 100644 index 0000000000000000000000000000000000000000..a4f3c456b4585d1e9ac66683e719e81df92313f2 --- /dev/null +++ b/techpack/display/msm/dp/dp_catalog_v420.c @@ -0,0 +1,467 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved. + */ + +#include +#include + +#include "dp_catalog.h" +#include "dp_reg.h" +#include "dp_debug.h" + +#define dp_catalog_get_priv_v420(x) ({ \ + struct dp_catalog *catalog; \ + catalog = container_of(x, struct dp_catalog, x); \ + container_of(catalog->sub, \ + struct dp_catalog_private_v420, sub); \ +}) + +#define dp_read(x) ({ \ + catalog->sub.read(catalog->dpc, io_data, x); \ +}) + +#define dp_write(x, y) ({ \ + catalog->sub.write(catalog->dpc, io_data, x, y); \ +}) + +#define DP_PHY_READY BIT(1) +#define MAX_VOLTAGE_LEVELS 4 +#define MAX_PRE_EMP_LEVELS 4 + +static u8 const vm_pre_emphasis[MAX_VOLTAGE_LEVELS][MAX_PRE_EMP_LEVELS] = { + {0x00, 0x0E, 0x16, 0xFF}, /* pe0, 0 db */ + {0x00, 0x0E, 0x16, 0xFF}, /* pe1, 3.5 db */ + {0x00, 0x0E, 0xFF, 0xFF}, /* pe2, 6.0 db */ + {0xFF, 0xFF, 0xFF, 0xFF} /* pe3, 9.5 db */ +}; + +/* voltage swing, 0.2v and 1.0v are not support */ +static u8 const vm_voltage_swing[MAX_VOLTAGE_LEVELS][MAX_PRE_EMP_LEVELS] = { + {0x07, 0x0F, 0x16, 0xFF}, /* sw0, 0.4v */ + {0x11, 0x1E, 0x1F, 0xFF}, /* sw1, 0.6 v */ + {0x1A, 0x1F, 0xFF, 0xFF}, /* sw1, 0.8 v */ + {0xFF, 0xFF, 0xFF, 0xFF} /* sw1, 1.2 v, optional */ +}; + +static u8 const dp_pre_emp_hbr2_hbr3[MAX_VOLTAGE_LEVELS][MAX_PRE_EMP_LEVELS] = { + {0x00, 0x0C, 0x15, 0x1A}, /* pe0, 0 db */ + {0x02, 0x0E, 0x16, 0xFF}, /* pe1, 3.5 db */ + {0x02, 0x11, 0xFF, 0xFF}, /* pe2, 6.0 db */ + {0x04, 0xFF, 0xFF, 0xFF} /* pe3, 9.5 db */ +}; + +static u8 const dp_swing_hbr2_hbr3[MAX_VOLTAGE_LEVELS][MAX_PRE_EMP_LEVELS] = { + {0x02, 0x12, 0x16, 0x1A}, /* sw0, 0.4v */ + {0x09, 0x19, 0x1F, 0xFF}, /* sw1, 0.6v */ + {0x10, 0x1F, 0xFF, 0xFF}, /* sw1, 0.8v */ + {0x1F, 0xFF, 0xFF, 0xFF} /* sw1, 1.2v */ +}; + +static u8 const dp_pre_emp_hbr_rbr[MAX_VOLTAGE_LEVELS][MAX_PRE_EMP_LEVELS] = { + {0x00, 0x0E, 0x15, 0x1A}, /* pe0, 0 db */ + {0x00, 0x0E, 0x15, 0xFF}, /* pe1, 3.5 db */ + {0x00, 0x0E, 0xFF, 0xFF}, /* pe2, 6.0 db */ + {0x04, 0xFF, 0xFF, 0xFF} /* pe3, 9.5 db */ +}; + +static u8 const dp_swing_hbr_rbr[MAX_VOLTAGE_LEVELS][MAX_PRE_EMP_LEVELS] = { + {0x08, 0x0F, 0x16, 0x1F}, /* sw0, 0.4v */ + {0x11, 0x1E, 0x1F, 0xFF}, /* sw1, 0.6v */ + {0x16, 0x1F, 0xFF, 0xFF}, /* sw1, 0.8v */ + {0x1F, 0xFF, 0xFF, 0xFF} /* sw1, 1.2v */ +}; + +struct dp_catalog_private_v420 { + struct device *dev; + struct dp_catalog_sub sub; + struct dp_catalog_io *io; + struct dp_catalog *dpc; +}; + +static void dp_catalog_aux_setup_v420(struct dp_catalog_aux *aux, + struct dp_aux_cfg *cfg) +{ + struct dp_catalog_private_v420 *catalog; + struct dp_io_data *io_data; + int i = 0; + + if (!aux || !cfg) { + DP_ERR("invalid input\n"); + return; + } + + catalog = dp_catalog_get_priv_v420(aux); + + io_data = catalog->io->dp_phy; + dp_write(DP_PHY_PD_CTL, 0x67); + wmb(); /* make sure PD programming happened */ + + /* Turn on BIAS current for PHY/PLL */ + io_data = catalog->io->dp_pll; + dp_write(QSERDES_COM_BIAS_EN_CLKBUFLR_EN, 0x17); + wmb(); /* make sure BIAS programming happened */ + + io_data = catalog->io->dp_phy; + /* DP AUX CFG register programming */ + for (i = 0; i < PHY_AUX_CFG_MAX; i++) { + DP_DEBUG("%s: offset=0x%08x, value=0x%08x\n", + dp_phy_aux_config_type_to_string(i), + cfg[i].offset, cfg[i].lut[cfg[i].current_index]); + dp_write(cfg[i].offset, cfg[i].lut[cfg[i].current_index]); + } + wmb(); /* make sure DP AUX CFG programming happened */ + + dp_write(DP_PHY_AUX_INTERRUPT_MASK_V420, 0x1F); +} + +static void dp_catalog_aux_clear_hw_int_v420(struct dp_catalog_aux *aux) +{ + struct dp_catalog_private_v420 *catalog; + struct dp_io_data *io_data; + u32 data = 0; + + if (!aux) { + DP_ERR("invalid input\n"); + return; + } + + catalog = dp_catalog_get_priv_v420(aux); + io_data = catalog->io->dp_phy; + + data = dp_read(DP_PHY_AUX_INTERRUPT_STATUS_V420); + + dp_write(DP_PHY_AUX_INTERRUPT_CLEAR_V420, 0x1f); + wmb(); /* make sure 0x1f is written before next write */ + + dp_write(DP_PHY_AUX_INTERRUPT_CLEAR_V420, 0x9f); + wmb(); /* make sure 0x9f is written before next write */ + + dp_write(DP_PHY_AUX_INTERRUPT_CLEAR_V420, 0); + wmb(); /* make sure register is cleared */ +} + +static void dp_catalog_panel_config_msa_v420(struct dp_catalog_panel *panel, + u32 rate, u32 stream_rate_khz) +{ + u32 pixel_m, pixel_n; + u32 mvid, nvid, reg_off = 0, mvid_off = 0, nvid_off = 0; + u32 const nvid_fixed = 0x8000; + u32 const link_rate_hbr2 = 540000; + u32 const link_rate_hbr3 = 810000; + struct dp_catalog_private_v420 *catalog; + struct dp_io_data *io_data; + + if (!panel || !rate) { + DP_ERR("invalid input\n"); + return; + } + + if (panel->stream_id >= DP_STREAM_MAX) { + DP_ERR("invalid stream id:%d\n", panel->stream_id); + return; + } + + catalog = dp_catalog_get_priv_v420(panel); + io_data = catalog->io->dp_mmss_cc; + + if (panel->stream_id == DP_STREAM_1) + reg_off = MMSS_DP_PIXEL1_M_V420 - MMSS_DP_PIXEL_M_V420; + + pixel_m = dp_read(MMSS_DP_PIXEL_M_V420 + reg_off); + pixel_n = dp_read(MMSS_DP_PIXEL_N_V420 + reg_off); + DP_DEBUG("pixel_m=0x%x, pixel_n=0x%x\n", pixel_m, pixel_n); + + mvid = (pixel_m & 0xFFFF) * 5; + nvid = (0xFFFF & (~pixel_n)) + (pixel_m & 0xFFFF); + + if (nvid < nvid_fixed) { + u32 temp; + + temp = (nvid_fixed / nvid) * nvid; + mvid = (nvid_fixed / nvid) * mvid; + nvid = temp; + } + + DP_DEBUG("rate = %d\n", rate); + + if (panel->widebus_en) + mvid <<= 1; + + if (link_rate_hbr2 == rate) + nvid *= 2; + + if (link_rate_hbr3 == rate) + nvid *= 3; + + io_data = catalog->io->dp_link; + + if (panel->stream_id == DP_STREAM_1) { + mvid_off = DP1_SOFTWARE_MVID - DP_SOFTWARE_MVID; + nvid_off = DP1_SOFTWARE_NVID - DP_SOFTWARE_NVID; + } + + DP_DEBUG("mvid=0x%x, nvid=0x%x\n", mvid, nvid); + dp_write(DP_SOFTWARE_MVID + mvid_off, mvid); + dp_write(DP_SOFTWARE_NVID + nvid_off, nvid); +} + +static void dp_catalog_ctrl_phy_lane_cfg_v420(struct dp_catalog_ctrl *ctrl, + bool flipped, u8 ln_cnt) +{ + u32 info = 0x0; + struct dp_catalog_private_v420 *catalog; + struct dp_io_data *io_data; + u8 orientation = BIT(!!flipped); + + if (!ctrl) { + DP_ERR("invalid input\n"); + return; + } + + catalog = dp_catalog_get_priv_v420(ctrl); + io_data = catalog->io->dp_phy; + + info |= (ln_cnt & 0x0F); + info |= ((orientation & 0x0F) << 4); + DP_DEBUG("Shared Info = 0x%x\n", info); + + dp_write(DP_PHY_SPARE0_V420, info); +} + +static void dp_catalog_ctrl_update_vx_px_v420(struct dp_catalog_ctrl *ctrl, + u8 v_level, u8 p_level, bool high) +{ + struct dp_catalog_private_v420 *catalog; + struct dp_io_data *io_data; + u8 value0, value1; + u32 version; + + if (!ctrl || !((v_level < MAX_VOLTAGE_LEVELS) + && (p_level < MAX_PRE_EMP_LEVELS))) { + DP_ERR("invalid input\n"); + return; + } + + DP_DEBUG("hw: v=%d p=%d, high=%d\n", v_level, p_level, high); + + catalog = dp_catalog_get_priv_v420(ctrl); + + io_data = catalog->io->dp_ahb; + version = dp_read(DP_HW_VERSION); + + /* + * For DP controller versions 1.2.3 and 1.2.4 + */ + if ((version == 0x10020003) || (version == 0x10020004)) { + if (high) { + value0 = dp_swing_hbr2_hbr3[v_level][p_level]; + value1 = dp_pre_emp_hbr2_hbr3[v_level][p_level]; + } else { + value0 = dp_swing_hbr_rbr[v_level][p_level]; + value1 = dp_pre_emp_hbr_rbr[v_level][p_level]; + } + } else { + value0 = vm_voltage_swing[v_level][p_level]; + value1 = vm_pre_emphasis[v_level][p_level]; + } + + /* program default setting first */ + io_data = catalog->io->dp_ln_tx0; + dp_write(TXn_TX_DRV_LVL_V420, 0x2A); + dp_write(TXn_TX_EMP_POST1_LVL, 0x20); + + io_data = catalog->io->dp_ln_tx1; + dp_write(TXn_TX_DRV_LVL_V420, 0x2A); + dp_write(TXn_TX_EMP_POST1_LVL, 0x20); + + /* Enable MUX to use Cursor values from these registers */ + value0 |= BIT(5); + value1 |= BIT(5); + + /* Configure host and panel only if both values are allowed */ + if (value0 != 0xFF && value1 != 0xFF) { + io_data = catalog->io->dp_ln_tx0; + dp_write(TXn_TX_DRV_LVL_V420, value0); + dp_write(TXn_TX_EMP_POST1_LVL, value1); + + io_data = catalog->io->dp_ln_tx1; + dp_write(TXn_TX_DRV_LVL_V420, value0); + dp_write(TXn_TX_EMP_POST1_LVL, value1); + + DP_DEBUG("hw: vx_value=0x%x px_value=0x%x\n", + value0, value1); + } else { + DP_ERR("invalid vx (0x%x=0x%x), px (0x%x=0x%x\n", + v_level, value0, p_level, value1); + } +} + +static bool dp_catalog_ctrl_wait_for_phy_ready_v420( + struct dp_catalog_private_v420 *catalog) +{ + u32 reg = DP_PHY_STATUS_V420, state; + void __iomem *base = catalog->io->dp_phy->io.base; + bool success = true; + u32 const poll_sleep_us = 500; + u32 const pll_timeout_us = 10000; + + if (readl_poll_timeout_atomic((base + reg), state, + ((state & DP_PHY_READY) > 0), + poll_sleep_us, pll_timeout_us)) { + DP_ERR("PHY status failed, status=%x\n", state); + + success = false; + } + + return success; +} + +static int dp_catalog_ctrl_late_phy_init_v420(struct dp_catalog_ctrl *ctrl, + u8 lane_cnt, bool flipped) +{ + int rc = 0; + u32 bias0_en, drvr0_en, bias1_en, drvr1_en; + struct dp_catalog_private_v420 *catalog; + struct dp_io_data *io_data; + + if (!ctrl) { + DP_ERR("invalid input\n"); + return -EINVAL; + } + + catalog = dp_catalog_get_priv_v420(ctrl); + + switch (lane_cnt) { + case 1: + drvr0_en = flipped ? 0x13 : 0x10; + bias0_en = flipped ? 0x3E : 0x15; + drvr1_en = flipped ? 0x10 : 0x13; + bias1_en = flipped ? 0x15 : 0x3E; + break; + case 2: + drvr0_en = flipped ? 0x10 : 0x10; + bias0_en = flipped ? 0x3F : 0x15; + drvr1_en = flipped ? 0x10 : 0x10; + bias1_en = flipped ? 0x15 : 0x3F; + break; + case 4: + default: + drvr0_en = 0x10; + bias0_en = 0x3F; + drvr1_en = 0x10; + bias1_en = 0x3F; + break; + } + + io_data = catalog->io->dp_ln_tx0; + dp_write(TXn_HIGHZ_DRVR_EN_V420, drvr0_en); + dp_write(TXn_TRANSCEIVER_BIAS_EN_V420, bias0_en); + + io_data = catalog->io->dp_ln_tx1; + dp_write(TXn_HIGHZ_DRVR_EN_V420, drvr1_en); + dp_write(TXn_TRANSCEIVER_BIAS_EN_V420, bias1_en); + + io_data = catalog->io->dp_phy; + dp_write(DP_PHY_CFG, 0x18); + /* add hardware recommended delay */ + udelay(2000); + dp_write(DP_PHY_CFG, 0x19); + + /* + * Make sure all the register writes are completed before + * doing any other operation + */ + wmb(); + + if (!dp_catalog_ctrl_wait_for_phy_ready_v420(catalog)) { + rc = -EINVAL; + goto lock_err; + } + + io_data = catalog->io->dp_ln_tx0; + dp_write(TXn_TX_POL_INV_V420, 0x0a); + io_data = catalog->io->dp_ln_tx1; + dp_write(TXn_TX_POL_INV_V420, 0x0a); + + io_data = catalog->io->dp_ln_tx0; + dp_write(TXn_TX_DRV_LVL_V420, 0x27); + io_data = catalog->io->dp_ln_tx1; + dp_write(TXn_TX_DRV_LVL_V420, 0x27); + + io_data = catalog->io->dp_ln_tx0; + dp_write(TXn_TX_EMP_POST1_LVL, 0x20); + io_data = catalog->io->dp_ln_tx1; + dp_write(TXn_TX_EMP_POST1_LVL, 0x20); + /* Make sure the PHY register writes are done */ + wmb(); +lock_err: + return rc; +} + +static void dp_catalog_ctrl_lane_pnswap_v420(struct dp_catalog_ctrl *ctrl, + u8 ln_pnswap) +{ + struct dp_catalog_private_v420 *catalog; + struct dp_io_data *io_data; + u32 cfg0, cfg1; + + catalog = dp_catalog_get_priv_v420(ctrl); + + cfg0 = 0x0a; + cfg1 = 0x0a; + + cfg0 |= ((ln_pnswap >> 0) & 0x1) << 0; + cfg0 |= ((ln_pnswap >> 1) & 0x1) << 2; + cfg1 |= ((ln_pnswap >> 2) & 0x1) << 0; + cfg1 |= ((ln_pnswap >> 3) & 0x1) << 2; + + io_data = catalog->io->dp_ln_tx0; + dp_write(TXn_TX_POL_INV_V420, cfg0); + + io_data = catalog->io->dp_ln_tx1; + dp_write(TXn_TX_POL_INV_V420, cfg1); +} + +static void dp_catalog_put_v420(struct dp_catalog *catalog) +{ + struct dp_catalog_private_v420 *catalog_priv; + + if (!catalog) + return; + + catalog_priv = container_of(catalog->sub, + struct dp_catalog_private_v420, sub); + devm_kfree(catalog_priv->dev, catalog_priv); +} + +struct dp_catalog_sub *dp_catalog_get_v420(struct device *dev, + struct dp_catalog *catalog, struct dp_catalog_io *io) +{ + struct dp_catalog_private_v420 *catalog_priv; + + if (!dev || !catalog) { + DP_ERR("invalid input\n"); + return ERR_PTR(-EINVAL); + } + + catalog_priv = devm_kzalloc(dev, sizeof(*catalog_priv), GFP_KERNEL); + if (!catalog_priv) + return ERR_PTR(-ENOMEM); + + catalog_priv->dev = dev; + catalog_priv->io = io; + catalog_priv->dpc = catalog; + + catalog_priv->sub.put = dp_catalog_put_v420; + + catalog->aux.setup = dp_catalog_aux_setup_v420; + catalog->aux.clear_hw_interrupts = dp_catalog_aux_clear_hw_int_v420; + catalog->panel.config_msa = dp_catalog_panel_config_msa_v420; + catalog->ctrl.phy_lane_cfg = dp_catalog_ctrl_phy_lane_cfg_v420; + catalog->ctrl.update_vx_px = dp_catalog_ctrl_update_vx_px_v420; + catalog->ctrl.lane_pnswap = dp_catalog_ctrl_lane_pnswap_v420; + catalog->ctrl.late_phy_init = dp_catalog_ctrl_late_phy_init_v420; + + return &catalog_priv->sub; +} diff --git a/techpack/display/msm/dp/dp_ctrl.c b/techpack/display/msm/dp/dp_ctrl.c new file mode 100644 index 0000000000000000000000000000000000000000..88c419292c5a23d361dc83507b6d82ad52dd3c45 --- /dev/null +++ b/techpack/display/msm/dp/dp_ctrl.c @@ -0,0 +1,1465 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2012-2020, The Linux Foundation. All rights reserved. + */ + +#include +#include +#include +#include + +#include "dp_ctrl.h" +#include "dp_debug.h" + +#define DP_MST_DEBUG(fmt, ...) DP_DEBUG(fmt, ##__VA_ARGS__) + +#define DP_CTRL_INTR_READY_FOR_VIDEO BIT(0) +#define DP_CTRL_INTR_IDLE_PATTERN_SENT BIT(3) + +#define DP_CTRL_INTR_MST_DP0_VCPF_SENT BIT(0) +#define DP_CTRL_INTR_MST_DP1_VCPF_SENT BIT(3) + +/* dp state ctrl */ +#define ST_TRAIN_PATTERN_1 BIT(0) +#define ST_TRAIN_PATTERN_2 BIT(1) +#define ST_TRAIN_PATTERN_3 BIT(2) +#define ST_TRAIN_PATTERN_4 BIT(3) +#define ST_SYMBOL_ERR_RATE_MEASUREMENT BIT(4) +#define ST_PRBS7 BIT(5) +#define ST_CUSTOM_80_BIT_PATTERN BIT(6) +#define ST_SEND_VIDEO BIT(7) +#define ST_PUSH_IDLE BIT(8) +#define MST_DP0_PUSH_VCPF BIT(12) +#define MST_DP0_FORCE_VCPF BIT(13) +#define MST_DP1_PUSH_VCPF BIT(14) +#define MST_DP1_FORCE_VCPF BIT(15) + +#define MR_LINK_TRAINING1 0x8 +#define MR_LINK_SYMBOL_ERM 0x80 +#define MR_LINK_PRBS7 0x100 +#define MR_LINK_CUSTOM80 0x200 +#define MR_LINK_TRAINING4 0x40 + +#define DP_MAX_LANES 4 + +struct dp_mst_ch_slot_info { + u32 start_slot; + u32 tot_slots; +}; + +struct dp_mst_channel_info { + struct dp_mst_ch_slot_info slot_info[DP_STREAM_MAX]; +}; + +struct dp_ctrl_private { + struct dp_ctrl dp_ctrl; + + struct device *dev; + struct dp_aux *aux; + struct dp_panel *panel; + struct dp_link *link; + struct dp_power *power; + struct dp_parser *parser; + struct dp_catalog_ctrl *catalog; + + struct completion idle_comp; + struct completion video_comp; + + bool orientation; + bool power_on; + bool mst_mode; + bool fec_mode; + bool dsc_mode; + + atomic_t aborted; + + u8 initial_lane_count; + u8 initial_bw_code; + + u32 vic; + u32 stream_count; + u32 training_2_pattern; + struct dp_mst_channel_info mst_ch_info; +}; + +enum notification_status { + NOTIFY_UNKNOWN, + NOTIFY_CONNECT, + NOTIFY_DISCONNECT, + NOTIFY_CONNECT_IRQ_HPD, + NOTIFY_DISCONNECT_IRQ_HPD, +}; + +static void dp_ctrl_idle_patterns_sent(struct dp_ctrl_private *ctrl) +{ + DP_DEBUG("idle_patterns_sent\n"); + complete(&ctrl->idle_comp); +} + +static void dp_ctrl_video_ready(struct dp_ctrl_private *ctrl) +{ + DP_DEBUG("dp_video_ready\n"); + complete(&ctrl->video_comp); +} + +static void dp_ctrl_abort(struct dp_ctrl *dp_ctrl, bool abort) +{ + struct dp_ctrl_private *ctrl; + + if (!dp_ctrl) { + DP_ERR("Invalid input data\n"); + return; + } + + ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl); + + atomic_set(&ctrl->aborted, abort); +} + +static void dp_ctrl_state_ctrl(struct dp_ctrl_private *ctrl, u32 state) +{ + ctrl->catalog->state_ctrl(ctrl->catalog, state); +} + +static void dp_ctrl_push_idle(struct dp_ctrl_private *ctrl, + enum dp_stream_id strm) +{ + int const idle_pattern_completion_timeout_ms = HZ / 10; + u32 state = 0x0; + + if (!ctrl->power_on) + return; + + if (!ctrl->mst_mode) { + state = ST_PUSH_IDLE; + goto trigger_idle; + } + + if (strm >= DP_STREAM_MAX) { + DP_ERR("mst push idle, invalid stream:%d\n", strm); + return; + } + + state |= (strm == DP_STREAM_0) ? MST_DP0_PUSH_VCPF : MST_DP1_PUSH_VCPF; + +trigger_idle: + reinit_completion(&ctrl->idle_comp); + dp_ctrl_state_ctrl(ctrl, state); + + if (!wait_for_completion_timeout(&ctrl->idle_comp, + idle_pattern_completion_timeout_ms)) + DP_WARN("time out\n"); + else + DP_DEBUG("mainlink off done\n"); +} + +/** + * dp_ctrl_configure_source_link_params() - configures DP TX source params + * @ctrl: Display Port Driver data + * @enable: enable or disable DP transmitter + * + * Configures the DP transmitter source params including details such as lane + * configuration, output format and sink/panel timing information. + */ +static void dp_ctrl_configure_source_link_params(struct dp_ctrl_private *ctrl, + bool enable) +{ + if (enable) { + ctrl->catalog->lane_mapping(ctrl->catalog, ctrl->orientation, + ctrl->parser->l_map); + ctrl->catalog->lane_pnswap(ctrl->catalog, + ctrl->parser->l_pnswap); + ctrl->catalog->mst_config(ctrl->catalog, ctrl->mst_mode); + ctrl->catalog->config_ctrl(ctrl->catalog, + ctrl->link->link_params.lane_count); + ctrl->catalog->mainlink_levels(ctrl->catalog, + ctrl->link->link_params.lane_count); + ctrl->catalog->mainlink_ctrl(ctrl->catalog, true); + } else { + ctrl->catalog->mainlink_ctrl(ctrl->catalog, false); + } +} + +static void dp_ctrl_wait4video_ready(struct dp_ctrl_private *ctrl) +{ + if (!wait_for_completion_timeout(&ctrl->video_comp, HZ / 2)) + DP_WARN("SEND_VIDEO time out\n"); +} + +static int dp_ctrl_update_sink_vx_px(struct dp_ctrl_private *ctrl) +{ + int i, ret; + u8 buf[DP_MAX_LANES]; + u8 v_level = ctrl->link->phy_params.v_level; + u8 p_level = ctrl->link->phy_params.p_level; + u8 size = min_t(u8, sizeof(buf), ctrl->link->link_params.lane_count); + u32 max_level_reached = 0; + + if (v_level == DP_LINK_VOLTAGE_MAX) { + DP_DEBUG("max voltage swing level reached %d\n", v_level); + max_level_reached |= DP_TRAIN_MAX_SWING_REACHED; + } + + if (p_level == DP_LINK_PRE_EMPHASIS_MAX) { + DP_DEBUG("max pre-emphasis level reached %d\n", p_level); + max_level_reached |= DP_TRAIN_MAX_PRE_EMPHASIS_REACHED; + } + + p_level <<= DP_TRAIN_PRE_EMPHASIS_SHIFT; + + for (i = 0; i < size; i++) + buf[i] = v_level | p_level | max_level_reached; + + DP_DEBUG("lanes: %d, swing: 0x%x, pre-emp: 0x%x\n", + size, v_level, p_level); + + ret = drm_dp_dpcd_write(ctrl->aux->drm_aux, + DP_TRAINING_LANE0_SET, buf, size); + + return ret <= 0 ? -EINVAL : 0; +} + +static void dp_ctrl_update_hw_vx_px(struct dp_ctrl_private *ctrl) +{ + struct dp_link *link = ctrl->link; + bool high = false; + + if (ctrl->link->link_params.bw_code == DP_LINK_BW_5_4 || + ctrl->link->link_params.bw_code == DP_LINK_BW_8_1) + high = true; + + ctrl->catalog->update_vx_px(ctrl->catalog, + link->phy_params.v_level, link->phy_params.p_level, high); +} + +static int dp_ctrl_update_sink_pattern(struct dp_ctrl_private *ctrl, u8 pattern) +{ + u8 buf = pattern; + int ret; + + DP_DEBUG("sink: pattern=%x\n", pattern); + + if (pattern && pattern != DP_TRAINING_PATTERN_4) + buf |= DP_LINK_SCRAMBLING_DISABLE; + + ret = drm_dp_dpcd_writeb(ctrl->aux->drm_aux, + DP_TRAINING_PATTERN_SET, buf); + + return ret <= 0 ? -EINVAL : 0; +} + +static int dp_ctrl_read_link_status(struct dp_ctrl_private *ctrl, + u8 *link_status) +{ + int ret = 0, len; + u32 const offset = DP_LANE_ALIGN_STATUS_UPDATED - DP_LANE0_1_STATUS; + u32 link_status_read_max_retries = 100; + + while (--link_status_read_max_retries) { + len = drm_dp_dpcd_read_link_status(ctrl->aux->drm_aux, + link_status); + if (len != DP_LINK_STATUS_SIZE) { + DP_ERR("DP link status read failed, err: %d\n", len); + ret = len; + break; + } + + if (!(link_status[offset] & DP_LINK_STATUS_UPDATED)) + break; + } + + return ret; +} + +static int dp_ctrl_lane_count_down_shift(struct dp_ctrl_private *ctrl) +{ + int ret = -EAGAIN; + u8 lanes = ctrl->link->link_params.lane_count; + + if (ctrl->panel->link_info.revision != 0x14) + return -EINVAL; + + switch (lanes) { + case 4: + ctrl->link->link_params.lane_count = 2; + break; + case 2: + ctrl->link->link_params.lane_count = 1; + break; + default: + if (lanes != ctrl->initial_lane_count) + ret = -EINVAL; + break; + } + + DP_DEBUG("new lane count=%d\n", ctrl->link->link_params.lane_count); + + return ret; +} + +static bool dp_ctrl_is_link_rate_rbr(struct dp_ctrl_private *ctrl) +{ + return ctrl->link->link_params.bw_code == DP_LINK_BW_1_62; +} + +static u8 dp_ctrl_get_active_lanes(struct dp_ctrl_private *ctrl, + u8 *link_status) +{ + u8 lane, count = 0; + + for (lane = 0; lane < ctrl->link->link_params.lane_count; lane++) { + if (link_status[lane / 2] & (1 << (lane * 4))) + count++; + else + break; + } + + return count; +} + +static int dp_ctrl_link_training_1(struct dp_ctrl_private *ctrl) +{ + int tries, old_v_level, ret = -EINVAL; + u8 link_status[DP_LINK_STATUS_SIZE]; + u8 pattern = 0; + int const maximum_retries = 5; + + ctrl->aux->state &= ~DP_STATE_TRAIN_1_FAILED; + ctrl->aux->state &= ~DP_STATE_TRAIN_1_SUCCEEDED; + ctrl->aux->state |= DP_STATE_TRAIN_1_STARTED; + + dp_ctrl_state_ctrl(ctrl, 0); + /* Make sure to clear the current pattern before starting a new one */ + wmb(); + + tries = 0; + old_v_level = ctrl->link->phy_params.v_level; + while (!atomic_read(&ctrl->aborted)) { + /* update hardware with current swing/pre-emp values */ + dp_ctrl_update_hw_vx_px(ctrl); + + if (!pattern) { + pattern = DP_TRAINING_PATTERN_1; + + ctrl->catalog->set_pattern(ctrl->catalog, pattern); + + /* update sink with current settings */ + ret = dp_ctrl_update_sink_pattern(ctrl, pattern); + if (ret) + break; + } + + ret = dp_ctrl_update_sink_vx_px(ctrl); + if (ret) + break; + + drm_dp_link_train_clock_recovery_delay(ctrl->panel->dpcd); + + ret = dp_ctrl_read_link_status(ctrl, link_status); + if (ret) + break; + + if (!drm_dp_clock_recovery_ok(link_status, + ctrl->link->link_params.lane_count)) + ret = -EINVAL; + else + break; + + if (ctrl->link->phy_params.v_level == DP_LINK_VOLTAGE_MAX) { + pr_err_ratelimited("max v_level reached\n"); + break; + } + + if (old_v_level == ctrl->link->phy_params.v_level) { + if (++tries >= maximum_retries) { + DP_ERR("max tries reached\n"); + ret = -ETIMEDOUT; + break; + } + } else { + tries = 0; + old_v_level = ctrl->link->phy_params.v_level; + } + + DP_DEBUG("clock recovery not done, adjusting vx px\n"); + + ctrl->link->adjust_levels(ctrl->link, link_status); + } + + if (ret && dp_ctrl_is_link_rate_rbr(ctrl)) { + u8 active_lanes = dp_ctrl_get_active_lanes(ctrl, link_status); + + if (active_lanes) { + ctrl->link->link_params.lane_count = active_lanes; + ctrl->link->link_params.bw_code = ctrl->initial_bw_code; + + /* retry with new settings */ + ret = -EAGAIN; + } + } + + ctrl->aux->state &= ~DP_STATE_TRAIN_1_STARTED; + + if (ret) + ctrl->aux->state |= DP_STATE_TRAIN_1_FAILED; + else + ctrl->aux->state |= DP_STATE_TRAIN_1_SUCCEEDED; + + return ret; +} + +static int dp_ctrl_link_rate_down_shift(struct dp_ctrl_private *ctrl) +{ + int ret = 0; + + if (!ctrl) + return -EINVAL; + + switch (ctrl->link->link_params.bw_code) { + case DP_LINK_BW_8_1: + ctrl->link->link_params.bw_code = DP_LINK_BW_5_4; + break; + case DP_LINK_BW_5_4: + ctrl->link->link_params.bw_code = DP_LINK_BW_2_7; + break; + case DP_LINK_BW_2_7: + case DP_LINK_BW_1_62: + default: + ctrl->link->link_params.bw_code = DP_LINK_BW_1_62; + break; + } + + DP_DEBUG("new bw code=0x%x\n", ctrl->link->link_params.bw_code); + + return ret; +} + +static void dp_ctrl_clear_training_pattern(struct dp_ctrl_private *ctrl) +{ + dp_ctrl_update_sink_pattern(ctrl, 0); + drm_dp_link_train_channel_eq_delay(ctrl->panel->dpcd); +} + +static int dp_ctrl_link_training_2(struct dp_ctrl_private *ctrl) +{ + int tries = 0, ret = -EINVAL; + u8 dpcd_pattern, pattern = 0; + int const maximum_retries = 5; + u8 link_status[DP_LINK_STATUS_SIZE]; + + ctrl->aux->state &= ~DP_STATE_TRAIN_2_FAILED; + ctrl->aux->state &= ~DP_STATE_TRAIN_2_SUCCEEDED; + ctrl->aux->state |= DP_STATE_TRAIN_2_STARTED; + + dp_ctrl_state_ctrl(ctrl, 0); + /* Make sure to clear the current pattern before starting a new one */ + wmb(); + + dpcd_pattern = ctrl->training_2_pattern; + + while (!atomic_read(&ctrl->aborted)) { + /* update hardware with current swing/pre-emp values */ + dp_ctrl_update_hw_vx_px(ctrl); + + if (!pattern) { + pattern = dpcd_pattern; + + /* program hw to send pattern */ + ctrl->catalog->set_pattern(ctrl->catalog, pattern); + + /* update sink with current pattern */ + ret = dp_ctrl_update_sink_pattern(ctrl, pattern); + if (ret) + break; + } + + ret = dp_ctrl_update_sink_vx_px(ctrl); + if (ret) + break; + + drm_dp_link_train_channel_eq_delay(ctrl->panel->dpcd); + + ret = dp_ctrl_read_link_status(ctrl, link_status); + if (ret) + break; + + /* check if CR bits still remain set */ + if (!drm_dp_clock_recovery_ok(link_status, + ctrl->link->link_params.lane_count)) { + ret = -EINVAL; + break; + } + + if (!drm_dp_channel_eq_ok(link_status, + ctrl->link->link_params.lane_count)) + ret = -EINVAL; + else + break; + + if (tries >= maximum_retries) { + ret = dp_ctrl_lane_count_down_shift(ctrl); + break; + } + tries++; + + ctrl->link->adjust_levels(ctrl->link, link_status); + } + + ctrl->aux->state &= ~DP_STATE_TRAIN_2_STARTED; + + if (ret) + ctrl->aux->state |= DP_STATE_TRAIN_2_FAILED; + else + ctrl->aux->state |= DP_STATE_TRAIN_2_SUCCEEDED; + return ret; +} + +static int dp_ctrl_link_train(struct dp_ctrl_private *ctrl) +{ + int ret = 0; + u8 const encoding = 0x1, downspread = 0x00; + struct drm_dp_link link_info = {0}; + + ctrl->link->phy_params.p_level = 0; + ctrl->link->phy_params.v_level = 0; + + link_info.num_lanes = ctrl->link->link_params.lane_count; + link_info.rate = drm_dp_bw_code_to_link_rate( + ctrl->link->link_params.bw_code); + link_info.capabilities = ctrl->panel->link_info.capabilities; + + ret = drm_dp_link_configure(ctrl->aux->drm_aux, &link_info); + if (ret) + goto end; + + ret = drm_dp_dpcd_writeb(ctrl->aux->drm_aux, + DP_DOWNSPREAD_CTRL, downspread); + if (ret <= 0) { + ret = -EINVAL; + goto end; + } + + ret = drm_dp_dpcd_writeb(ctrl->aux->drm_aux, + DP_MAIN_LINK_CHANNEL_CODING_SET, encoding); + if (ret <= 0) { + ret = -EINVAL; + goto end; + } + + ret = dp_ctrl_link_training_1(ctrl); + if (ret) { + DP_ERR("link training #1 failed\n"); + goto end; + } + + /* print success info as this is a result of user initiated action */ + DP_INFO("link training #1 successful\n"); + + ret = dp_ctrl_link_training_2(ctrl); + if (ret) { + DP_ERR("link training #2 failed\n"); + goto end; + } + + /* print success info as this is a result of user initiated action */ + DP_INFO("link training #2 successful\n"); + +end: + dp_ctrl_state_ctrl(ctrl, 0); + /* Make sure to clear the current pattern before starting a new one */ + wmb(); + + dp_ctrl_clear_training_pattern(ctrl); + return ret; +} + +static int dp_ctrl_setup_main_link(struct dp_ctrl_private *ctrl) +{ + int ret = 0; + + if (ctrl->link->sink_request & DP_TEST_LINK_PHY_TEST_PATTERN) + goto end; + + /* + * As part of previous calls, DP controller state might have + * transitioned to PUSH_IDLE. In order to start transmitting a link + * training pattern, we have to first to a DP software reset. + */ + ctrl->catalog->reset(ctrl->catalog); + + if (ctrl->fec_mode) + drm_dp_dpcd_writeb(ctrl->aux->drm_aux, DP_FEC_CONFIGURATION, + 0x01); + + ret = dp_ctrl_link_train(ctrl); + +end: + return ret; +} + +static void dp_ctrl_set_clock_rate(struct dp_ctrl_private *ctrl, + char *name, enum dp_pm_type clk_type, u32 rate) +{ + u32 num = ctrl->parser->mp[clk_type].num_clk; + struct dss_clk *cfg = ctrl->parser->mp[clk_type].clk_config; + + while (num && strcmp(cfg->clk_name, name)) { + num--; + cfg++; + } + + DP_DEBUG("setting rate=%d on clk=%s\n", rate, name); + + if (num) + cfg->rate = rate; + else + DP_ERR("%s clock could not be set with rate %d\n", name, rate); +} + +static int dp_ctrl_enable_link_clock(struct dp_ctrl_private *ctrl) +{ + int ret = 0; + u32 rate = drm_dp_bw_code_to_link_rate(ctrl->link->link_params.bw_code); + enum dp_pm_type type = DP_LINK_PM; + + DP_DEBUG("rate=%d\n", rate); + + dp_ctrl_set_clock_rate(ctrl, "link_clk", type, rate); + + ret = ctrl->power->clk_enable(ctrl->power, type, true); + if (ret) { + DP_ERR("Unabled to start link clocks\n"); + ret = -EINVAL; + } + + return ret; +} + +static void dp_ctrl_disable_link_clock(struct dp_ctrl_private *ctrl) +{ + ctrl->power->clk_enable(ctrl->power, DP_LINK_PM, false); +} + +static void dp_ctrl_select_training_pattern(struct dp_ctrl_private *ctrl, + bool downgrade) +{ + u32 pattern; + + if (drm_dp_tps4_supported(ctrl->panel->dpcd)) + pattern = DP_TRAINING_PATTERN_4; + else if (drm_dp_tps3_supported(ctrl->panel->dpcd)) + pattern = DP_TRAINING_PATTERN_3; + else + pattern = DP_TRAINING_PATTERN_2; + + if (!downgrade) + goto end; + + switch (pattern) { + case DP_TRAINING_PATTERN_4: + pattern = DP_TRAINING_PATTERN_3; + break; + case DP_TRAINING_PATTERN_3: + pattern = DP_TRAINING_PATTERN_2; + break; + default: + break; + } +end: + ctrl->training_2_pattern = pattern; +} + +static int dp_ctrl_link_setup(struct dp_ctrl_private *ctrl, bool shallow) +{ + int rc = -EINVAL; + bool downgrade = false; + u32 link_train_max_retries = 100; + struct dp_catalog_ctrl *catalog; + struct dp_link_params *link_params; + + catalog = ctrl->catalog; + link_params = &ctrl->link->link_params; + + catalog->phy_lane_cfg(catalog, ctrl->orientation, + link_params->lane_count); + + while (1) { + DP_DEBUG("bw_code=%d, lane_count=%d\n", + link_params->bw_code, link_params->lane_count); + + rc = dp_ctrl_enable_link_clock(ctrl); + if (rc) + break; + + ctrl->catalog->late_phy_init(ctrl->catalog, + ctrl->link->link_params.lane_count, + ctrl->orientation); + + dp_ctrl_configure_source_link_params(ctrl, true); + + if (!(--link_train_max_retries % 10)) { + struct dp_link_params *link = &ctrl->link->link_params; + + link->lane_count = ctrl->initial_lane_count; + link->bw_code = ctrl->initial_bw_code; + downgrade = true; + } + + dp_ctrl_select_training_pattern(ctrl, downgrade); + + rc = dp_ctrl_setup_main_link(ctrl); + if (!rc) + break; + + /* + * Shallow means link training failure is not important. + * If it fails, we still keep the link clocks on. + * In this mode, the system expects DP to be up + * even though the cable is removed. Disconnect interrupt + * will eventually trigger and shutdown DP. + */ + if (shallow) { + rc = 0; + break; + } + + if (!link_train_max_retries || atomic_read(&ctrl->aborted)) { + dp_ctrl_disable_link_clock(ctrl); + break; + } + + if (rc != -EAGAIN) + dp_ctrl_link_rate_down_shift(ctrl); + + dp_ctrl_configure_source_link_params(ctrl, false); + dp_ctrl_disable_link_clock(ctrl); + + /* hw recommended delays before retrying link training */ + msleep(20); + } + + return rc; +} + +static int dp_ctrl_enable_stream_clocks(struct dp_ctrl_private *ctrl, + struct dp_panel *dp_panel) +{ + int ret = 0; + u32 pclk; + enum dp_pm_type clk_type; + char clk_name[32] = ""; + + ret = ctrl->power->set_pixel_clk_parent(ctrl->power, + dp_panel->stream_id); + + if (ret) + return ret; + + if (dp_panel->stream_id == DP_STREAM_0) { + clk_type = DP_STREAM0_PM; + strlcpy(clk_name, "strm0_pixel_clk", 32); + } else if (dp_panel->stream_id == DP_STREAM_1) { + clk_type = DP_STREAM1_PM; + strlcpy(clk_name, "strm1_pixel_clk", 32); + } else { + DP_ERR("Invalid stream:%d for clk enable\n", + dp_panel->stream_id); + return -EINVAL; + } + + pclk = dp_panel->pinfo.widebus_en ? + (dp_panel->pinfo.pixel_clk_khz >> 1) : + (dp_panel->pinfo.pixel_clk_khz); + + dp_ctrl_set_clock_rate(ctrl, clk_name, clk_type, pclk); + + ret = ctrl->power->clk_enable(ctrl->power, clk_type, true); + if (ret) { + DP_ERR("Unabled to start stream:%d clocks\n", + dp_panel->stream_id); + ret = -EINVAL; + } + + return ret; +} + +static int dp_ctrl_disable_stream_clocks(struct dp_ctrl_private *ctrl, + struct dp_panel *dp_panel) +{ + int ret = 0; + + if (dp_panel->stream_id == DP_STREAM_0) { + return ctrl->power->clk_enable(ctrl->power, + DP_STREAM0_PM, false); + } else if (dp_panel->stream_id == DP_STREAM_1) { + return ctrl->power->clk_enable(ctrl->power, + DP_STREAM1_PM, false); + } else { + DP_ERR("Invalid stream:%d for clk disable\n", + dp_panel->stream_id); + ret = -EINVAL; + } + return ret; +} +static int dp_ctrl_host_init(struct dp_ctrl *dp_ctrl, bool flip, bool reset) +{ + struct dp_ctrl_private *ctrl; + struct dp_catalog_ctrl *catalog; + + if (!dp_ctrl) { + DP_ERR("Invalid input data\n"); + return -EINVAL; + } + + ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl); + + ctrl->orientation = flip; + catalog = ctrl->catalog; + + if (reset) { + catalog->usb_reset(ctrl->catalog, flip); + catalog->phy_reset(ctrl->catalog); + } + catalog->enable_irq(ctrl->catalog, true); + atomic_set(&ctrl->aborted, 0); + + return 0; +} + +/** + * dp_ctrl_host_deinit() - Uninitialize DP controller + * @ctrl: Display Port Driver data + * + * Perform required steps to uninitialize DP controller + * and its resources. + */ +static void dp_ctrl_host_deinit(struct dp_ctrl *dp_ctrl) +{ + struct dp_ctrl_private *ctrl; + + if (!dp_ctrl) { + DP_ERR("Invalid input data\n"); + return; + } + + ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl); + + ctrl->catalog->enable_irq(ctrl->catalog, false); + + DP_DEBUG("Host deinitialized successfully\n"); +} + +static void dp_ctrl_send_video(struct dp_ctrl_private *ctrl) +{ + ctrl->catalog->state_ctrl(ctrl->catalog, ST_SEND_VIDEO); +} + +static int dp_ctrl_link_maintenance(struct dp_ctrl *dp_ctrl) +{ + int ret = 0; + struct dp_ctrl_private *ctrl; + + if (!dp_ctrl) { + DP_ERR("Invalid input data\n"); + return -EINVAL; + } + + ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl); + + ctrl->aux->state &= ~DP_STATE_LINK_MAINTENANCE_COMPLETED; + ctrl->aux->state &= ~DP_STATE_LINK_MAINTENANCE_FAILED; + + if (!ctrl->power_on) { + DP_ERR("ctrl off\n"); + ret = -EINVAL; + goto end; + } + + if (atomic_read(&ctrl->aborted)) + goto end; + + ctrl->aux->state |= DP_STATE_LINK_MAINTENANCE_STARTED; + ret = dp_ctrl_setup_main_link(ctrl); + ctrl->aux->state &= ~DP_STATE_LINK_MAINTENANCE_STARTED; + + if (ret) { + ctrl->aux->state |= DP_STATE_LINK_MAINTENANCE_FAILED; + goto end; + } + + ctrl->aux->state |= DP_STATE_LINK_MAINTENANCE_COMPLETED; + + if (ctrl->stream_count) { + dp_ctrl_send_video(ctrl); + dp_ctrl_wait4video_ready(ctrl); + } +end: + return ret; +} + +static void dp_ctrl_process_phy_test_request(struct dp_ctrl *dp_ctrl) +{ + int ret = 0; + struct dp_ctrl_private *ctrl; + + if (!dp_ctrl) { + DP_ERR("Invalid input data\n"); + return; + } + + ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl); + + if (!ctrl->link->phy_params.phy_test_pattern_sel) { + DP_DEBUG("no test pattern selected by sink\n"); + return; + } + + DP_DEBUG("start\n"); + + /* + * The global reset will need DP link ralated clocks to be + * running. Add the global reset just before disabling the + * link clocks and core clocks. + */ + ctrl->catalog->reset(ctrl->catalog); + ctrl->dp_ctrl.stream_pre_off(&ctrl->dp_ctrl, ctrl->panel); + ctrl->dp_ctrl.stream_off(&ctrl->dp_ctrl, ctrl->panel); + ctrl->dp_ctrl.off(&ctrl->dp_ctrl); + + ctrl->aux->init(ctrl->aux, ctrl->parser->aux_cfg); + + ret = ctrl->dp_ctrl.on(&ctrl->dp_ctrl, ctrl->mst_mode, + ctrl->fec_mode, ctrl->dsc_mode, false); + if (ret) + DP_ERR("failed to enable DP controller\n"); + + ctrl->dp_ctrl.stream_on(&ctrl->dp_ctrl, ctrl->panel); + DP_DEBUG("end\n"); +} + +static void dp_ctrl_send_phy_test_pattern(struct dp_ctrl_private *ctrl) +{ + bool success = false; + u32 pattern_sent = 0x0; + u32 pattern_requested = ctrl->link->phy_params.phy_test_pattern_sel; + + dp_ctrl_update_hw_vx_px(ctrl); + ctrl->catalog->send_phy_pattern(ctrl->catalog, pattern_requested); + dp_ctrl_update_sink_vx_px(ctrl); + ctrl->link->send_test_response(ctrl->link); + + pattern_sent = ctrl->catalog->read_phy_pattern(ctrl->catalog); + DP_DEBUG("pattern_request: %s. pattern_sent: 0x%x\n", + dp_link_get_phy_test_pattern(pattern_requested), + pattern_sent); + + switch (pattern_sent) { + case MR_LINK_TRAINING1: + if (pattern_requested == + DP_TEST_PHY_PATTERN_D10_2_NO_SCRAMBLING) + success = true; + break; + case MR_LINK_SYMBOL_ERM: + if ((pattern_requested == + DP_TEST_PHY_PATTERN_SYMBOL_ERR_MEASUREMENT_CNT) + || (pattern_requested == + DP_TEST_PHY_PATTERN_CP2520_PATTERN_1)) + success = true; + break; + case MR_LINK_PRBS7: + if (pattern_requested == DP_TEST_PHY_PATTERN_PRBS7) + success = true; + break; + case MR_LINK_CUSTOM80: + if (pattern_requested == + DP_TEST_PHY_PATTERN_80_BIT_CUSTOM_PATTERN) + success = true; + break; + case MR_LINK_TRAINING4: + if (pattern_requested == + DP_TEST_PHY_PATTERN_CP2520_PATTERN_3) + success = true; + break; + default: + success = false; + break; + } + + DP_DEBUG("%s: %s\n", success ? "success" : "failed", + dp_link_get_phy_test_pattern(pattern_requested)); +} + +static void dp_ctrl_mst_calculate_rg(struct dp_ctrl_private *ctrl, + struct dp_panel *panel, u32 *p_x_int, u32 *p_y_frac_enum) +{ + u64 min_slot_cnt, max_slot_cnt; + u64 raw_target_sc, target_sc_fixp; + u64 ts_denom, ts_enum, ts_int; + u64 pclk = panel->pinfo.pixel_clk_khz; + u64 lclk = 0; + u64 lanes = ctrl->link->link_params.lane_count; + u64 bpp = panel->pinfo.bpp; + u64 pbn = panel->pbn; + u64 numerator, denominator, temp, temp1, temp2; + u32 x_int = 0, y_frac_enum = 0; + u64 target_strm_sym, ts_int_fixp, ts_frac_fixp, y_frac_enum_fixp; + + lclk = drm_dp_bw_code_to_link_rate(ctrl->link->link_params.bw_code); + if (panel->pinfo.comp_info.comp_ratio) + bpp = panel->pinfo.comp_info.dsc_info.bpp; + + /* min_slot_cnt */ + numerator = pclk * bpp * 64 * 1000; + denominator = lclk * lanes * 8 * 1000; + min_slot_cnt = drm_fixp_from_fraction(numerator, denominator); + + /* max_slot_cnt */ + numerator = pbn * 54 * 1000; + denominator = lclk * lanes; + max_slot_cnt = drm_fixp_from_fraction(numerator, denominator); + + /* raw_target_sc */ + numerator = max_slot_cnt + min_slot_cnt; + denominator = drm_fixp_from_fraction(2, 1); + raw_target_sc = drm_fixp_div(numerator, denominator); + + DP_DEBUG("raw_target_sc before overhead:0x%llx\n", raw_target_sc); + DP_DEBUG("dsc_overhead_fp:0x%llx\n", panel->pinfo.dsc_overhead_fp); + + /* apply fec and dsc overhead factor */ + if (panel->pinfo.dsc_overhead_fp) + raw_target_sc = drm_fixp_mul(raw_target_sc, + panel->pinfo.dsc_overhead_fp); + + if (panel->fec_overhead_fp) + raw_target_sc = drm_fixp_mul(raw_target_sc, + panel->fec_overhead_fp); + + DP_DEBUG("raw_target_sc after overhead:0x%llx\n", raw_target_sc); + + /* target_sc */ + temp = drm_fixp_from_fraction(256 * lanes, 1); + numerator = drm_fixp_mul(raw_target_sc, temp); + denominator = drm_fixp_from_fraction(256 * lanes, 1); + target_sc_fixp = drm_fixp_div(numerator, denominator); + + ts_enum = 256 * lanes; + ts_denom = drm_fixp_from_fraction(256 * lanes, 1); + ts_int = drm_fixp2int(target_sc_fixp); + + temp = drm_fixp2int_ceil(raw_target_sc); + if (temp != ts_int) { + temp = drm_fixp_from_fraction(ts_int, 1); + temp1 = raw_target_sc - temp; + temp2 = drm_fixp_mul(temp1, ts_denom); + ts_enum = drm_fixp2int(temp2); + } + + /* target_strm_sym */ + ts_int_fixp = drm_fixp_from_fraction(ts_int, 1); + ts_frac_fixp = drm_fixp_from_fraction(ts_enum, drm_fixp2int(ts_denom)); + temp = ts_int_fixp + ts_frac_fixp; + temp1 = drm_fixp_from_fraction(lanes, 1); + target_strm_sym = drm_fixp_mul(temp, temp1); + + /* x_int */ + x_int = drm_fixp2int(target_strm_sym); + + /* y_enum_frac */ + temp = drm_fixp_from_fraction(x_int, 1); + temp1 = target_strm_sym - temp; + temp2 = drm_fixp_from_fraction(256, 1); + y_frac_enum_fixp = drm_fixp_mul(temp1, temp2); + + temp1 = drm_fixp2int(y_frac_enum_fixp); + temp2 = drm_fixp2int_ceil(y_frac_enum_fixp); + + y_frac_enum = (u32)((temp1 == temp2) ? temp1 : temp1 + 1); + + panel->mst_target_sc = raw_target_sc; + *p_x_int = x_int; + *p_y_frac_enum = y_frac_enum; + + DP_DEBUG("x_int: %d, y_frac_enum: %d\n", x_int, y_frac_enum); +} + +static int dp_ctrl_mst_send_act(struct dp_ctrl_private *ctrl) +{ + bool act_complete; + + if (!ctrl->mst_mode) + return 0; + + ctrl->catalog->trigger_act(ctrl->catalog); + msleep(20); /* needs 1 frame time */ + + ctrl->catalog->read_act_complete_sts(ctrl->catalog, &act_complete); + + if (!act_complete) + DP_ERR("mst act trigger complete failed\n"); + else + DP_MST_DEBUG("mst ACT trigger complete SUCCESS\n"); + + return 0; +} + +static void dp_ctrl_mst_stream_setup(struct dp_ctrl_private *ctrl, + struct dp_panel *panel) +{ + u32 x_int, y_frac_enum, lanes, bw_code; + int i; + + if (!ctrl->mst_mode) + return; + + DP_MST_DEBUG("mst stream channel allocation\n"); + + for (i = DP_STREAM_0; i < DP_STREAM_MAX; i++) { + ctrl->catalog->channel_alloc(ctrl->catalog, + i, + ctrl->mst_ch_info.slot_info[i].start_slot, + ctrl->mst_ch_info.slot_info[i].tot_slots); + } + + lanes = ctrl->link->link_params.lane_count; + bw_code = ctrl->link->link_params.bw_code; + + dp_ctrl_mst_calculate_rg(ctrl, panel, &x_int, &y_frac_enum); + + ctrl->catalog->update_rg(ctrl->catalog, panel->stream_id, + x_int, y_frac_enum); + + DP_MST_DEBUG("mst stream:%d, start_slot:%d, tot_slots:%d\n", + panel->stream_id, + panel->channel_start_slot, panel->channel_total_slots); + + DP_MST_DEBUG("mst lane_cnt:%d, bw:%d, x_int:%d, y_frac:%d\n", + lanes, bw_code, x_int, y_frac_enum); +} + +static void dp_ctrl_fec_dsc_setup(struct dp_ctrl_private *ctrl) +{ + u8 fec_sts = 0; + int rlen; + u32 dsc_enable; + + if (!ctrl->fec_mode) + return; + + ctrl->catalog->fec_config(ctrl->catalog, ctrl->fec_mode); + + /* wait for controller to start fec sequence */ + usleep_range(900, 1000); + drm_dp_dpcd_readb(ctrl->aux->drm_aux, DP_FEC_STATUS, &fec_sts); + DP_DEBUG("sink fec status:%d\n", fec_sts); + + dsc_enable = ctrl->dsc_mode ? 1 : 0; + rlen = drm_dp_dpcd_writeb(ctrl->aux->drm_aux, DP_DSC_ENABLE, + dsc_enable); + if (rlen < 1) + DP_DEBUG("failed to enable sink dsc\n"); +} + +static int dp_ctrl_stream_on(struct dp_ctrl *dp_ctrl, struct dp_panel *panel) +{ + int rc = 0; + bool link_ready = false; + struct dp_ctrl_private *ctrl; + + if (!dp_ctrl || !panel) + return -EINVAL; + + ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl); + + if (!ctrl->power_on) { + DP_ERR("ctrl off\n"); + return -EINVAL; + } + + rc = dp_ctrl_enable_stream_clocks(ctrl, panel); + if (rc) { + DP_ERR("failure on stream clock enable\n"); + return rc; + } + + rc = panel->hw_cfg(panel, true); + if (rc) + return rc; + + if (ctrl->link->sink_request & DP_TEST_LINK_PHY_TEST_PATTERN) { + dp_ctrl_send_phy_test_pattern(ctrl); + return 0; + } + + dp_ctrl_mst_stream_setup(ctrl, panel); + + dp_ctrl_send_video(ctrl); + + dp_ctrl_mst_send_act(ctrl); + + dp_ctrl_wait4video_ready(ctrl); + + ctrl->stream_count++; + + link_ready = ctrl->catalog->mainlink_ready(ctrl->catalog); + DP_DEBUG("mainlink %s\n", link_ready ? "READY" : "NOT READY"); + + /* wait for link training completion before fec config as per spec */ + dp_ctrl_fec_dsc_setup(ctrl); + + return rc; +} + +static void dp_ctrl_mst_stream_pre_off(struct dp_ctrl *dp_ctrl, + struct dp_panel *panel) +{ + struct dp_ctrl_private *ctrl; + bool act_complete; + int i; + + ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl); + + if (!ctrl->mst_mode) + return; + + for (i = DP_STREAM_0; i < DP_STREAM_MAX; i++) { + ctrl->catalog->channel_alloc(ctrl->catalog, + i, + ctrl->mst_ch_info.slot_info[i].start_slot, + ctrl->mst_ch_info.slot_info[i].tot_slots); + } + + ctrl->catalog->trigger_act(ctrl->catalog); + msleep(20); /* needs 1 frame time */ + ctrl->catalog->read_act_complete_sts(ctrl->catalog, &act_complete); + + if (!act_complete) + DP_ERR("mst stream_off act trigger complete failed\n"); + else + DP_MST_DEBUG("mst stream_off ACT trigger complete SUCCESS\n"); +} + +static void dp_ctrl_stream_pre_off(struct dp_ctrl *dp_ctrl, + struct dp_panel *panel) +{ + struct dp_ctrl_private *ctrl; + + if (!dp_ctrl || !panel) { + DP_ERR("invalid input\n"); + return; + } + + ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl); + + dp_ctrl_push_idle(ctrl, panel->stream_id); + + dp_ctrl_mst_stream_pre_off(dp_ctrl, panel); +} + +static void dp_ctrl_stream_off(struct dp_ctrl *dp_ctrl, struct dp_panel *panel) +{ + struct dp_ctrl_private *ctrl; + + if (!dp_ctrl || !panel) + return; + + ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl); + + if (!ctrl->power_on) + return; + + panel->hw_cfg(panel, false); + + dp_ctrl_disable_stream_clocks(ctrl, panel); + ctrl->stream_count--; +} + +static int dp_ctrl_on(struct dp_ctrl *dp_ctrl, bool mst_mode, + bool fec_mode, bool dsc_mode, bool shallow) +{ + int rc = 0; + struct dp_ctrl_private *ctrl; + u32 rate = 0; + + if (!dp_ctrl) { + rc = -EINVAL; + goto end; + } + + ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl); + + if (ctrl->power_on) + goto end; + + if (atomic_read(&ctrl->aborted)) { + rc = -EPERM; + goto end; + } + + ctrl->mst_mode = mst_mode; + if (fec_mode) { + ctrl->fec_mode = fec_mode; + ctrl->dsc_mode = dsc_mode; + } + + rate = ctrl->panel->link_info.rate; + + if (ctrl->link->sink_request & DP_TEST_LINK_PHY_TEST_PATTERN) { + DP_DEBUG("using phy test link parameters\n"); + } else { + ctrl->link->link_params.bw_code = + drm_dp_link_rate_to_bw_code(rate); + ctrl->link->link_params.lane_count = + ctrl->panel->link_info.num_lanes; + } + + DP_DEBUG("bw_code=%d, lane_count=%d\n", + ctrl->link->link_params.bw_code, + ctrl->link->link_params.lane_count); + + /* backup initial lane count and bw code */ + ctrl->initial_lane_count = ctrl->link->link_params.lane_count; + ctrl->initial_bw_code = ctrl->link->link_params.bw_code; + + rc = dp_ctrl_link_setup(ctrl, shallow); + if (!rc) + ctrl->power_on = true; +end: + return rc; +} + +static void dp_ctrl_off(struct dp_ctrl *dp_ctrl) +{ + struct dp_ctrl_private *ctrl; + + if (!dp_ctrl) + return; + + ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl); + + if (!ctrl->power_on) + return; + + ctrl->catalog->fec_config(ctrl->catalog, false); + dp_ctrl_configure_source_link_params(ctrl, false); + ctrl->catalog->reset(ctrl->catalog); + + /* Make sure DP is disabled before clk disable */ + wmb(); + + dp_ctrl_disable_link_clock(ctrl); + + ctrl->mst_mode = false; + ctrl->fec_mode = false; + ctrl->dsc_mode = false; + ctrl->power_on = false; + memset(&ctrl->mst_ch_info, 0, sizeof(ctrl->mst_ch_info)); + DP_DEBUG("DP off done\n"); +} + +static void dp_ctrl_set_mst_channel_info(struct dp_ctrl *dp_ctrl, + enum dp_stream_id strm, + u32 start_slot, u32 tot_slots) +{ + struct dp_ctrl_private *ctrl; + + if (!dp_ctrl || strm >= DP_STREAM_MAX) { + DP_ERR("invalid input\n"); + return; + } + + ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl); + + ctrl->mst_ch_info.slot_info[strm].start_slot = start_slot; + ctrl->mst_ch_info.slot_info[strm].tot_slots = tot_slots; +} + +static void dp_ctrl_isr(struct dp_ctrl *dp_ctrl) +{ + struct dp_ctrl_private *ctrl; + + if (!dp_ctrl) + return; + + ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl); + + ctrl->catalog->get_interrupt(ctrl->catalog); + + if (ctrl->catalog->isr & DP_CTRL_INTR_READY_FOR_VIDEO) + dp_ctrl_video_ready(ctrl); + + if (ctrl->catalog->isr & DP_CTRL_INTR_IDLE_PATTERN_SENT) + dp_ctrl_idle_patterns_sent(ctrl); + + if (ctrl->catalog->isr5 & DP_CTRL_INTR_MST_DP0_VCPF_SENT) + dp_ctrl_idle_patterns_sent(ctrl); + + if (ctrl->catalog->isr5 & DP_CTRL_INTR_MST_DP1_VCPF_SENT) + dp_ctrl_idle_patterns_sent(ctrl); +} + +struct dp_ctrl *dp_ctrl_get(struct dp_ctrl_in *in) +{ + int rc = 0; + struct dp_ctrl_private *ctrl; + struct dp_ctrl *dp_ctrl; + + if (!in->dev || !in->panel || !in->aux || + !in->link || !in->catalog) { + DP_ERR("invalid input\n"); + rc = -EINVAL; + goto error; + } + + ctrl = devm_kzalloc(in->dev, sizeof(*ctrl), GFP_KERNEL); + if (!ctrl) { + rc = -ENOMEM; + goto error; + } + + init_completion(&ctrl->idle_comp); + init_completion(&ctrl->video_comp); + + /* in parameters */ + ctrl->parser = in->parser; + ctrl->panel = in->panel; + ctrl->power = in->power; + ctrl->aux = in->aux; + ctrl->link = in->link; + ctrl->catalog = in->catalog; + ctrl->dev = in->dev; + ctrl->mst_mode = false; + ctrl->fec_mode = false; + + dp_ctrl = &ctrl->dp_ctrl; + + /* out parameters */ + dp_ctrl->init = dp_ctrl_host_init; + dp_ctrl->deinit = dp_ctrl_host_deinit; + dp_ctrl->on = dp_ctrl_on; + dp_ctrl->off = dp_ctrl_off; + dp_ctrl->abort = dp_ctrl_abort; + dp_ctrl->isr = dp_ctrl_isr; + dp_ctrl->link_maintenance = dp_ctrl_link_maintenance; + dp_ctrl->process_phy_test_request = dp_ctrl_process_phy_test_request; + dp_ctrl->stream_on = dp_ctrl_stream_on; + dp_ctrl->stream_off = dp_ctrl_stream_off; + dp_ctrl->stream_pre_off = dp_ctrl_stream_pre_off; + dp_ctrl->set_mst_channel_info = dp_ctrl_set_mst_channel_info; + + return dp_ctrl; +error: + return ERR_PTR(rc); +} + +void dp_ctrl_put(struct dp_ctrl *dp_ctrl) +{ + struct dp_ctrl_private *ctrl; + + if (!dp_ctrl) + return; + + ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl); + + devm_kfree(ctrl->dev, ctrl); +} diff --git a/techpack/display/msm/dp/dp_ctrl.h b/techpack/display/msm/dp/dp_ctrl.h new file mode 100644 index 0000000000000000000000000000000000000000..a2e20e26fe3041e37cc7eec868ae1efbb1595baf --- /dev/null +++ b/techpack/display/msm/dp/dp_ctrl.h @@ -0,0 +1,48 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2012-2019, The Linux Foundation. All rights reserved. + */ + +#ifndef _DP_CTRL_H_ +#define _DP_CTRL_H_ + +#include "dp_aux.h" +#include "dp_panel.h" +#include "dp_link.h" +#include "dp_parser.h" +#include "dp_power.h" +#include "dp_catalog.h" + +struct dp_ctrl { + int (*init)(struct dp_ctrl *dp_ctrl, bool flip, bool reset); + void (*deinit)(struct dp_ctrl *dp_ctrl); + int (*on)(struct dp_ctrl *dp_ctrl, bool mst_mode, bool fec_en, + bool dsc_en, bool shallow); + void (*off)(struct dp_ctrl *dp_ctrl); + void (*abort)(struct dp_ctrl *dp_ctrl, bool abort); + void (*isr)(struct dp_ctrl *dp_ctrl); + bool (*handle_sink_request)(struct dp_ctrl *dp_ctrl); + void (*process_phy_test_request)(struct dp_ctrl *dp_ctrl); + int (*link_maintenance)(struct dp_ctrl *dp_ctrl); + int (*stream_on)(struct dp_ctrl *dp_ctrl, struct dp_panel *panel); + void (*stream_off)(struct dp_ctrl *dp_ctrl, struct dp_panel *panel); + void (*stream_pre_off)(struct dp_ctrl *dp_ctrl, struct dp_panel *panel); + void (*set_mst_channel_info)(struct dp_ctrl *dp_ctrl, + enum dp_stream_id strm, + u32 ch_start_slot, u32 ch_tot_slots); +}; + +struct dp_ctrl_in { + struct device *dev; + struct dp_panel *panel; + struct dp_aux *aux; + struct dp_link *link; + struct dp_parser *parser; + struct dp_power *power; + struct dp_catalog_ctrl *catalog; +}; + +struct dp_ctrl *dp_ctrl_get(struct dp_ctrl_in *in); +void dp_ctrl_put(struct dp_ctrl *dp_ctrl); + +#endif /* _DP_CTRL_H_ */ diff --git a/techpack/display/msm/dp/dp_debug.c b/techpack/display/msm/dp/dp_debug.c new file mode 100644 index 0000000000000000000000000000000000000000..0bccc7bf017a5c3021a80af0f6afe459a88a52c1 --- /dev/null +++ b/techpack/display/msm/dp/dp_debug.c @@ -0,0 +1,2331 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved. + */ + +#include +#include + +#include "dp_power.h" +#include "dp_catalog.h" +#include "dp_aux.h" +#include "dp_debug.h" +#include "drm_connector.h" +#include "sde_connector.h" +#include "dp_display.h" + +#define DEBUG_NAME "drm_dp" + +struct dp_debug_private { + struct dentry *root; + u8 *edid; + u32 edid_size; + + u8 *dpcd; + u32 dpcd_size; + + u32 mst_con_id; + bool hotplug; + + char exe_mode[SZ_32]; + char reg_dump[SZ_32]; + + struct dp_hpd *hpd; + struct dp_link *link; + struct dp_panel *panel; + struct dp_aux *aux; + struct dp_catalog *catalog; + struct drm_connector **connector; + struct device *dev; + struct dp_debug dp_debug; + struct dp_parser *parser; + struct dp_ctrl *ctrl; + struct mutex lock; +}; + +static int dp_debug_get_edid_buf(struct dp_debug_private *debug) +{ + int rc = 0; + + if (!debug->edid) { + debug->edid = devm_kzalloc(debug->dev, SZ_256, GFP_KERNEL); + if (!debug->edid) { + rc = -ENOMEM; + goto end; + } + + debug->edid_size = SZ_256; + } +end: + return rc; +} + +static int dp_debug_get_dpcd_buf(struct dp_debug_private *debug) +{ + int rc = 0; + + if (!debug->dpcd) { + debug->dpcd = devm_kzalloc(debug->dev, SZ_4K, GFP_KERNEL); + if (!debug->dpcd) { + rc = -ENOMEM; + goto end; + } + + debug->dpcd_size = SZ_4K; + } +end: + return rc; +} + +static ssize_t dp_debug_write_edid(struct file *file, + const char __user *user_buff, size_t count, loff_t *ppos) +{ + struct dp_debug_private *debug = file->private_data; + u8 *buf = NULL, *buf_t = NULL, *edid = NULL; + const int char_to_nib = 2; + size_t edid_size = 0; + size_t size = 0, edid_buf_index = 0; + ssize_t rc = count; + + if (!debug) + return -ENODEV; + + mutex_lock(&debug->lock); + + if (*ppos) + goto bail; + + size = min_t(size_t, count, SZ_1K); + + buf = kzalloc(size, GFP_KERNEL); + if (ZERO_OR_NULL_PTR(buf)) { + rc = -ENOMEM; + goto bail; + } + + if (copy_from_user(buf, user_buff, size)) + goto bail; + + edid_size = size / char_to_nib; + buf_t = buf; + + if (dp_debug_get_edid_buf(debug)) + goto bail; + + if (edid_size != debug->edid_size) { + DP_DEBUG("realloc debug edid\n"); + + if (debug->edid) { + devm_kfree(debug->dev, debug->edid); + + debug->edid = devm_kzalloc(debug->dev, + edid_size, GFP_KERNEL); + if (!debug->edid) { + rc = -ENOMEM; + goto bail; + } + + debug->edid_size = edid_size; + + debug->aux->set_sim_mode(debug->aux, + debug->dp_debug.sim_mode, + debug->edid, debug->dpcd); + } + } + + while (edid_size--) { + char t[3]; + int d; + + memcpy(t, buf_t, sizeof(char) * char_to_nib); + t[char_to_nib] = '\0'; + + if (kstrtoint(t, 16, &d)) { + DP_ERR("kstrtoint error\n"); + goto bail; + } + + if (debug->edid && (edid_buf_index < debug->edid_size)) + debug->edid[edid_buf_index++] = d; + + buf_t += char_to_nib; + } + + edid = debug->edid; +bail: + kfree(buf); + debug->panel->set_edid(debug->panel, edid, debug->edid_size); + + /* + * print edid status as this code is executed + * only while running in debug mode which is manually + * triggered by a tester or a script. + */ + DP_INFO("[%s]\n", edid ? "SET" : "CLEAR"); + + mutex_unlock(&debug->lock); + return rc; +} + +static ssize_t dp_debug_write_dpcd(struct file *file, + const char __user *user_buff, size_t count, loff_t *ppos) +{ + struct dp_debug_private *debug = file->private_data; + u8 *buf = NULL, *buf_t = NULL, *dpcd = NULL; + const int char_to_nib = 2; + size_t dpcd_size = 0; + size_t size = 0, dpcd_buf_index = 0; + ssize_t rc = count; + char offset_ch[5]; + u32 offset, data_len; + const u32 dp_receiver_cap_size = 16; + + if (!debug) + return -ENODEV; + + mutex_lock(&debug->lock); + + if (*ppos) + goto bail; + + size = min_t(size_t, count, SZ_2K); + + if (size <= 4) + goto bail; + + buf = kzalloc(size, GFP_KERNEL); + if (ZERO_OR_NULL_PTR(buf)) { + rc = -ENOMEM; + goto bail; + } + + if (copy_from_user(buf, user_buff, size)) + goto bail; + + memcpy(offset_ch, buf, 4); + offset_ch[4] = '\0'; + + if (kstrtoint(offset_ch, 16, &offset)) { + DP_ERR("offset kstrtoint error\n"); + goto bail; + } + + if (dp_debug_get_dpcd_buf(debug)) + goto bail; + + if (offset == 0xFFFF) { + DP_ERR("clearing dpcd\n"); + memset(debug->dpcd, 0, debug->dpcd_size); + goto bail; + } + + size -= 4; + if (size == 0) + goto bail; + + dpcd_size = size / char_to_nib; + data_len = dpcd_size; + buf_t = buf + 4; + + dpcd_buf_index = offset; + + while (dpcd_size--) { + char t[3]; + int d; + + memcpy(t, buf_t, sizeof(char) * char_to_nib); + t[char_to_nib] = '\0'; + + if (kstrtoint(t, 16, &d)) { + DP_ERR("kstrtoint error\n"); + goto bail; + } + + if (dpcd_buf_index < debug->dpcd_size) + debug->dpcd[dpcd_buf_index++] = d; + + buf_t += char_to_nib; + } + + dpcd = debug->dpcd; +bail: + kfree(buf); + + if (!dpcd || (size / char_to_nib) >= dp_receiver_cap_size || + offset == 0xffff) { + debug->panel->set_dpcd(debug->panel, dpcd); + /* + * print dpcd status as this code is executed + * only while running in debug mode which is manually + * triggered by a tester or a script. + */ + if (!dpcd || (offset == 0xffff)) + DP_INFO("[%s]\n", "CLEAR"); + else + DP_INFO("[%s]\n", "SET"); + } + + mutex_unlock(&debug->lock); + + debug->aux->dpcd_updated(debug->aux); + return rc; +} + +static ssize_t dp_debug_read_dpcd(struct file *file, + char __user *user_buff, size_t count, loff_t *ppos) +{ + struct dp_debug_private *debug = file->private_data; + char *buf; + int const buf_size = SZ_4K; + u32 offset = 0; + u32 len = 0; + bool notify = false; + + if (!debug || !debug->aux || !debug->dpcd) + return -ENODEV; + + mutex_lock(&debug->lock); + if (*ppos) + goto end; + + buf = kzalloc(buf_size, GFP_KERNEL); + if (!buf) + goto end; + + len += snprintf(buf, buf_size, "0x%x", debug->aux->reg); + + if (!debug->aux->read) { + while (1) { + if (debug->aux->reg + offset >= buf_size || + offset >= debug->aux->size) + break; + + len += snprintf(buf + len, buf_size - len, "0x%x", + debug->dpcd[debug->aux->reg + offset++]); + } + + notify = true; + } + + len = min_t(size_t, count, len); + if (!copy_to_user(user_buff, buf, len)) + *ppos += len; + + kfree(buf); +end: + mutex_unlock(&debug->lock); + + if (notify) + debug->aux->dpcd_updated(debug->aux); + + return len; +} + +static ssize_t dp_debug_write_hpd(struct file *file, + const char __user *user_buff, size_t count, loff_t *ppos) +{ + struct dp_debug_private *debug = file->private_data; + char buf[SZ_8]; + size_t len = 0; + int const hpd_data_mask = 0x7; + int hpd = 0; + + if (!debug) + return -ENODEV; + + if (*ppos) + return 0; + + /* Leave room for termination char */ + len = min_t(size_t, count, SZ_8 - 1); + if (copy_from_user(buf, user_buff, len)) + goto end; + + buf[len] = '\0'; + + if (kstrtoint(buf, 10, &hpd) != 0) + goto end; + + hpd &= hpd_data_mask; + debug->hotplug = !!(hpd & BIT(0)); + + debug->dp_debug.psm_enabled = !!(hpd & BIT(1)); + + /* + * print hotplug value as this code is executed + * only while running in debug mode which is manually + * triggered by a tester or a script. + */ + DP_INFO("%s\n", debug->hotplug ? "[CONNECT]" : "[DISCONNECT]"); + + debug->hpd->simulate_connect(debug->hpd, debug->hotplug); +end: + return len; +} + +static ssize_t dp_debug_write_edid_modes(struct file *file, + const char __user *user_buff, size_t count, loff_t *ppos) +{ + struct dp_debug_private *debug = file->private_data; + char buf[SZ_32]; + size_t len = 0; + int hdisplay = 0, vdisplay = 0, vrefresh = 0, aspect_ratio; + + if (!debug) + return -ENODEV; + + if (*ppos) + goto end; + + /* Leave room for termination char */ + len = min_t(size_t, count, SZ_32 - 1); + if (copy_from_user(buf, user_buff, len)) + goto clear; + + buf[len] = '\0'; + + if (sscanf(buf, "%d %d %d %d", &hdisplay, &vdisplay, &vrefresh, + &aspect_ratio) != 4) + goto clear; + + if (!hdisplay || !vdisplay || !vrefresh) + goto clear; + + debug->dp_debug.debug_en = true; + debug->dp_debug.hdisplay = hdisplay; + debug->dp_debug.vdisplay = vdisplay; + debug->dp_debug.vrefresh = vrefresh; + debug->dp_debug.aspect_ratio = aspect_ratio; + goto end; +clear: + DP_DEBUG("clearing debug modes\n"); + debug->dp_debug.debug_en = false; +end: + return len; +} + +static ssize_t dp_debug_write_edid_modes_mst(struct file *file, + const char __user *user_buff, size_t count, loff_t *ppos) +{ + struct dp_debug_private *debug = file->private_data; + struct dp_mst_connector *mst_connector; + char buf[SZ_512]; + char *read_buf; + size_t len = 0; + + int hdisplay = 0, vdisplay = 0, vrefresh = 0, aspect_ratio = 0; + int con_id = 0, offset = 0, debug_en = 0; + bool in_list = false; + + if (!debug) + return -ENODEV; + + if (*ppos) + goto end; + + len = min_t(size_t, count, SZ_512 - 1); + if (copy_from_user(buf, user_buff, len)) + goto end; + + buf[len] = '\0'; + read_buf = buf; + + mutex_lock(&debug->dp_debug.dp_mst_connector_list.lock); + while (sscanf(read_buf, "%d %d %d %d %d %d%n", &debug_en, &con_id, + &hdisplay, &vdisplay, &vrefresh, &aspect_ratio, + &offset) == 6) { + list_for_each_entry(mst_connector, + &debug->dp_debug.dp_mst_connector_list.list, + list) { + if (mst_connector->con_id == con_id) { + in_list = true; + mst_connector->debug_en = (bool) debug_en; + mst_connector->hdisplay = hdisplay; + mst_connector->vdisplay = vdisplay; + mst_connector->vrefresh = vrefresh; + mst_connector->aspect_ratio = aspect_ratio; + } + } + + if (!in_list) + DP_DEBUG("dp connector id %d is invalid\n", con_id); + + in_list = false; + read_buf += offset; + } + mutex_unlock(&debug->dp_debug.dp_mst_connector_list.lock); +end: + return len; +} + +static ssize_t dp_debug_write_mst_con_id(struct file *file, + const char __user *user_buff, size_t count, loff_t *ppos) +{ + struct dp_debug_private *debug = file->private_data; + struct dp_mst_connector *mst_connector; + char buf[SZ_32]; + size_t len = 0; + int con_id = 0, status; + bool in_list = false; + const int dp_en = BIT(3), hpd_high = BIT(7), hpd_irq = BIT(8); + int vdo = dp_en | hpd_high | hpd_irq; + + if (!debug) + return -ENODEV; + + if (*ppos) + goto end; + + /* Leave room for termination char */ + len = min_t(size_t, count, SZ_32 - 1); + if (copy_from_user(buf, user_buff, len)) + goto clear; + + buf[len] = '\0'; + + if (sscanf(buf, "%d %d", &con_id, &status) != 2) { + len = 0; + goto end; + } + + if (!con_id) + goto clear; + + /* Verify that the connector id is for a valid mst connector. */ + mutex_lock(&debug->dp_debug.dp_mst_connector_list.lock); + list_for_each_entry(mst_connector, + &debug->dp_debug.dp_mst_connector_list.list, list) { + if (mst_connector->con_id == con_id) { + in_list = true; + debug->mst_con_id = con_id; + mst_connector->state = status; + break; + } + } + mutex_unlock(&debug->dp_debug.dp_mst_connector_list.lock); + + if (!in_list) + DP_ERR("invalid connector id %u\n", con_id); + else if (status != connector_status_unknown) { + debug->dp_debug.mst_hpd_sim = true; + debug->hpd->simulate_attention(debug->hpd, vdo); + } + + goto end; +clear: + DP_DEBUG("clearing mst_con_id\n"); + debug->mst_con_id = 0; +end: + return len; +} + +static ssize_t dp_debug_write_mst_con_add(struct file *file, + const char __user *user_buff, size_t count, loff_t *ppos) +{ + struct dp_debug_private *debug = file->private_data; + char buf[SZ_32]; + size_t len = 0; + const int dp_en = BIT(3), hpd_high = BIT(7), hpd_irq = BIT(8); + int vdo = dp_en | hpd_high | hpd_irq; + + if (!debug) + return -ENODEV; + + if (*ppos) + return 0; + + /* Leave room for termination char */ + len = min_t(size_t, count, SZ_32 - 1); + if (copy_from_user(buf, user_buff, len)) + goto end; + + debug->dp_debug.mst_hpd_sim = true; + debug->dp_debug.mst_sim_add_con = true; + debug->hpd->simulate_attention(debug->hpd, vdo); +end: + return len; +} + +static ssize_t dp_debug_write_mst_con_remove(struct file *file, + const char __user *user_buff, size_t count, loff_t *ppos) +{ + struct dp_debug_private *debug = file->private_data; + struct dp_mst_connector *mst_connector; + char buf[SZ_32]; + size_t len = 0; + int con_id = 0; + bool in_list = false; + const int dp_en = BIT(3), hpd_high = BIT(7), hpd_irq = BIT(8); + int vdo = dp_en | hpd_high | hpd_irq; + + if (!debug) + return -ENODEV; + + if (*ppos) + return 0; + + /* Leave room for termination char */ + len = min_t(size_t, count, SZ_32 - 1); + if (copy_from_user(buf, user_buff, len)) + goto end; + + buf[len] = '\0'; + + if (sscanf(buf, "%d", &con_id) != 1) { + len = 0; + goto end; + } + + if (!con_id) + goto end; + + /* Verify that the connector id is for a valid mst connector. */ + mutex_lock(&debug->dp_debug.dp_mst_connector_list.lock); + list_for_each_entry(mst_connector, + &debug->dp_debug.dp_mst_connector_list.list, list) { + if (mst_connector->con_id == con_id) { + in_list = true; + break; + } + } + mutex_unlock(&debug->dp_debug.dp_mst_connector_list.lock); + + if (!in_list) { + DRM_ERROR("invalid connector id %u\n", con_id); + goto end; + } + + debug->dp_debug.mst_hpd_sim = true; + debug->dp_debug.mst_sim_remove_con = true; + debug->dp_debug.mst_sim_remove_con_id = con_id; + debug->hpd->simulate_attention(debug->hpd, vdo); +end: + return len; +} + +static ssize_t dp_debug_bw_code_write(struct file *file, + const char __user *user_buff, size_t count, loff_t *ppos) +{ + struct dp_debug_private *debug = file->private_data; + char buf[SZ_8]; + size_t len = 0; + u32 max_bw_code = 0; + + if (!debug) + return -ENODEV; + + if (*ppos) + return 0; + + /* Leave room for termination char */ + len = min_t(size_t, count, SZ_8 - 1); + if (copy_from_user(buf, user_buff, len)) + return 0; + + buf[len] = '\0'; + + if (kstrtoint(buf, 10, &max_bw_code) != 0) + return 0; + + if (!is_link_rate_valid(max_bw_code)) { + DP_ERR("Unsupported bw code %d\n", max_bw_code); + return len; + } + debug->panel->max_bw_code = max_bw_code; + DP_DEBUG("max_bw_code: %d\n", max_bw_code); + + return len; +} + +static ssize_t dp_debug_mst_mode_read(struct file *file, + char __user *user_buff, size_t count, loff_t *ppos) +{ + struct dp_debug_private *debug = file->private_data; + char buf[64]; + ssize_t len; + + len = scnprintf(buf, sizeof(buf), + "mst_mode = %d, mst_state = %d\n", + debug->parser->has_mst, + debug->panel->mst_state); + + return simple_read_from_buffer(user_buff, count, ppos, buf, len); +} + +static ssize_t dp_debug_mst_mode_write(struct file *file, + const char __user *user_buff, size_t count, loff_t *ppos) +{ + struct dp_debug_private *debug = file->private_data; + char buf[SZ_8]; + size_t len = 0; + u32 mst_mode = 0; + + if (!debug) + return -ENODEV; + + if (*ppos) + return 0; + + len = min_t(size_t, count, SZ_8 - 1); + if (copy_from_user(buf, user_buff, len)) + return 0; + + buf[len] = '\0'; + + if (kstrtoint(buf, 10, &mst_mode) != 0) + return 0; + + debug->parser->has_mst = mst_mode ? true : false; + DP_DEBUG("mst_enable: %d\n", mst_mode); + + return len; +} + +static ssize_t dp_debug_max_pclk_khz_write(struct file *file, + const char __user *user_buff, size_t count, loff_t *ppos) +{ + struct dp_debug_private *debug = file->private_data; + char buf[SZ_8]; + size_t len = 0; + u32 max_pclk = 0; + + if (!debug) + return -ENODEV; + + if (*ppos) + return 0; + + len = min_t(size_t, count, SZ_8 - 1); + if (copy_from_user(buf, user_buff, len)) + return 0; + + buf[len] = '\0'; + + if (kstrtoint(buf, 10, &max_pclk) != 0) + return 0; + + if (max_pclk > debug->parser->max_pclk_khz) + DP_ERR("requested: %d, max_pclk_khz:%d\n", max_pclk, + debug->parser->max_pclk_khz); + else + debug->dp_debug.max_pclk_khz = max_pclk; + + DP_DEBUG("max_pclk_khz: %d\n", max_pclk); + + return len; +} + +static ssize_t dp_debug_max_pclk_khz_read(struct file *file, + char __user *user_buff, size_t count, loff_t *ppos) +{ + struct dp_debug_private *debug = file->private_data; + char *buf; + u32 len = 0; + + if (!debug) + return -ENODEV; + + if (*ppos) + return 0; + + buf = kzalloc(SZ_4K, GFP_KERNEL); + if (ZERO_OR_NULL_PTR(buf)) + return -ENOMEM; + + len += snprintf(buf + len, (SZ_4K - len), + "max_pclk_khz = %d, org: %d\n", + debug->dp_debug.max_pclk_khz, + debug->parser->max_pclk_khz); + + len = min_t(size_t, count, len); + if (copy_to_user(user_buff, buf, len)) { + kfree(buf); + return -EFAULT; + } + + *ppos += len; + kfree(buf); + return len; +} + +static ssize_t dp_debug_mst_sideband_mode_write(struct file *file, + const char __user *user_buff, size_t count, loff_t *ppos) +{ + struct dp_debug_private *debug = file->private_data; + char buf[SZ_8]; + size_t len = 0; + int mst_sideband_mode = 0; + u32 mst_port_cnt = 0; + + if (!debug) + return -ENODEV; + + /* Leave room for termination char */ + len = min_t(size_t, count, SZ_8 - 1); + if (copy_from_user(buf, user_buff, len)) + return -EFAULT; + + buf[len] = '\0'; + + if (sscanf(buf, "%d %u", &mst_sideband_mode, &mst_port_cnt) != 2) { + DP_ERR("invalid input\n"); + return -EINVAL; + } + + if (mst_port_cnt > DP_MST_SIM_MAX_PORTS) { + DP_ERR("port cnt:%d exceeding max:%d\n", mst_port_cnt, + DP_MST_SIM_MAX_PORTS); + return -EINVAL; + } + + debug->parser->has_mst_sideband = mst_sideband_mode ? true : false; + debug->dp_debug.mst_port_cnt = mst_port_cnt; + DP_DEBUG("mst_sideband_mode: %d port_cnt:%d\n", + mst_sideband_mode, mst_port_cnt); + return count; +} + +static ssize_t dp_debug_widebus_mode_write(struct file *file, + const char __user *user_buff, size_t count, loff_t *ppos) +{ + struct dp_debug_private *debug = file->private_data; + char buf[SZ_8]; + size_t len = 0; + u32 widebus_mode = 0; + + if (!debug || !debug->parser) + return -ENODEV; + + if (*ppos) + return 0; + + len = min_t(size_t, count, SZ_8 - 1); + if (copy_from_user(buf, user_buff, len)) + return -EFAULT; + + buf[len] = '\0'; + + if (kstrtoint(buf, 10, &widebus_mode) != 0) + return -EINVAL; + + debug->parser->has_widebus = widebus_mode ? true : false; + DP_DEBUG("widebus_enable: %d\n", widebus_mode); + + return len; +} + +static ssize_t dp_debug_tpg_write(struct file *file, + const char __user *user_buff, size_t count, loff_t *ppos) +{ + struct dp_debug_private *debug = file->private_data; + char buf[SZ_8]; + size_t len = 0; + u32 tpg_state = 0; + + if (!debug) + return -ENODEV; + + if (*ppos) + return 0; + + /* Leave room for termination char */ + len = min_t(size_t, count, SZ_8 - 1); + if (copy_from_user(buf, user_buff, len)) + goto bail; + + buf[len] = '\0'; + + if (kstrtoint(buf, 10, &tpg_state) != 0) + goto bail; + + tpg_state &= 0x1; + DP_DEBUG("tpg_state: %d\n", tpg_state); + + if (tpg_state == debug->dp_debug.tpg_state) + goto bail; + + if (debug->panel) + debug->panel->tpg_config(debug->panel, tpg_state); + + debug->dp_debug.tpg_state = tpg_state; +bail: + return len; +} + +static ssize_t dp_debug_write_exe_mode(struct file *file, + const char __user *user_buff, size_t count, loff_t *ppos) +{ + struct dp_debug_private *debug = file->private_data; + char buf[SZ_32]; + size_t len = 0; + + if (!debug) + return -ENODEV; + + if (*ppos) + return 0; + + len = min_t(size_t, count, SZ_32 - 1); + if (copy_from_user(buf, user_buff, len)) + goto end; + + buf[len] = '\0'; + + if (sscanf(buf, "%3s", debug->exe_mode) != 1) + goto end; + + if (strcmp(debug->exe_mode, "hw") && + strcmp(debug->exe_mode, "sw") && + strcmp(debug->exe_mode, "all")) + goto end; + + debug->catalog->set_exe_mode(debug->catalog, debug->exe_mode); +end: + return len; +} + +static ssize_t dp_debug_read_connected(struct file *file, + char __user *user_buff, size_t count, loff_t *ppos) +{ + struct dp_debug_private *debug = file->private_data; + char buf[SZ_8]; + u32 len = 0; + + if (!debug) + return -ENODEV; + + if (*ppos) + return 0; + + len += snprintf(buf, SZ_8, "%d\n", debug->hpd->hpd_high); + + len = min_t(size_t, count, len); + if (copy_to_user(user_buff, buf, len)) + return -EFAULT; + + *ppos += len; + return len; +} + +static ssize_t dp_debug_write_hdcp(struct file *file, + const char __user *user_buff, size_t count, loff_t *ppos) +{ + struct dp_debug_private *debug = file->private_data; + char buf[SZ_8]; + size_t len = 0; + int hdcp = 0; + + if (!debug) + return -ENODEV; + + if (*ppos) + return 0; + + /* Leave room for termination char */ + len = min_t(size_t, count, SZ_8 - 1); + if (copy_from_user(buf, user_buff, len)) + goto end; + + buf[len] = '\0'; + + if (kstrtoint(buf, 10, &hdcp) != 0) + goto end; + + debug->dp_debug.hdcp_disabled = !hdcp; +end: + return len; +} + +static ssize_t dp_debug_read_hdcp(struct file *file, + char __user *user_buff, size_t count, loff_t *ppos) +{ + struct dp_debug_private *debug = file->private_data; + u32 len = 0; + + if (!debug) + return -ENODEV; + + if (*ppos) + return 0; + + len = sizeof(debug->dp_debug.hdcp_status); + + len = min_t(size_t, count, len); + if (copy_to_user(user_buff, debug->dp_debug.hdcp_status, len)) + return -EFAULT; + + *ppos += len; + return len; +} + +static int dp_debug_check_buffer_overflow(int rc, int *max_size, int *len) +{ + if (rc >= *max_size) { + DP_ERR("buffer overflow\n"); + return -EINVAL; + } + *len += rc; + *max_size = SZ_4K - *len; + + return 0; +} + +static ssize_t dp_debug_read_edid_modes(struct file *file, + char __user *user_buff, size_t count, loff_t *ppos) +{ + struct dp_debug_private *debug = file->private_data; + char *buf; + u32 len = 0, ret = 0, max_size = SZ_4K; + int rc = 0; + struct drm_connector *connector; + struct drm_display_mode *mode; + + if (!debug) { + DP_ERR("invalid data\n"); + rc = -ENODEV; + goto error; + } + + connector = *debug->connector; + + if (!connector) { + DP_ERR("connector is NULL\n"); + rc = -EINVAL; + goto error; + } + + if (*ppos) + goto error; + + buf = kzalloc(SZ_4K, GFP_KERNEL); + if (ZERO_OR_NULL_PTR(buf)) { + rc = -ENOMEM; + goto error; + } + + mutex_lock(&connector->dev->mode_config.mutex); + list_for_each_entry(mode, &connector->modes, head) { + ret = snprintf(buf + len, max_size, + "%s %d %d %d %d %d 0x%x\n", + mode->name, mode->vrefresh, mode->picture_aspect_ratio, + mode->htotal, mode->vtotal, mode->clock, mode->flags); + if (dp_debug_check_buffer_overflow(ret, &max_size, &len)) + break; + } + mutex_unlock(&connector->dev->mode_config.mutex); + + len = min_t(size_t, count, len); + if (copy_to_user(user_buff, buf, len)) { + kfree(buf); + rc = -EFAULT; + goto error; + } + + *ppos += len; + kfree(buf); + + return len; +error: + return rc; +} + +static ssize_t dp_debug_read_edid_modes_mst(struct file *file, + char __user *user_buff, size_t count, loff_t *ppos) +{ + struct dp_debug_private *debug = file->private_data; + struct dp_mst_connector *mst_connector; + char *buf; + u32 len = 0, ret = 0, max_size = SZ_4K; + int rc = 0; + struct drm_connector *connector; + struct drm_display_mode *mode; + bool in_list = false; + + if (!debug) { + DP_ERR("invalid data\n"); + rc = -ENODEV; + goto error; + } + + mutex_lock(&debug->dp_debug.dp_mst_connector_list.lock); + list_for_each_entry(mst_connector, + &debug->dp_debug.dp_mst_connector_list.list, list) { + if (mst_connector->con_id == debug->mst_con_id) { + connector = mst_connector->conn; + in_list = true; + } + } + mutex_unlock(&debug->dp_debug.dp_mst_connector_list.lock); + + if (!in_list) { + DP_ERR("connector %u not in mst list\n", debug->mst_con_id); + rc = -EINVAL; + goto error; + } + + if (!connector) { + DP_ERR("connector is NULL\n"); + rc = -EINVAL; + goto error; + } + + if (*ppos) + goto error; + + buf = kzalloc(SZ_4K, GFP_KERNEL); + if (!buf) { + rc = -ENOMEM; + goto error; + } + + mutex_lock(&connector->dev->mode_config.mutex); + list_for_each_entry(mode, &connector->modes, head) { + ret = snprintf(buf + len, max_size, + "%s %d %d %d %d %d 0x%x\n", + mode->name, mode->vrefresh, + mode->picture_aspect_ratio, mode->htotal, + mode->vtotal, mode->clock, mode->flags); + if (dp_debug_check_buffer_overflow(ret, &max_size, &len)) + break; + } + mutex_unlock(&connector->dev->mode_config.mutex); + + len = min_t(size_t, count, len); + if (copy_to_user(user_buff, buf, len)) { + kfree(buf); + rc = -EFAULT; + goto error; + } + + *ppos += len; + kfree(buf); + + return len; +error: + return rc; +} + +static ssize_t dp_debug_read_mst_con_id(struct file *file, + char __user *user_buff, size_t count, loff_t *ppos) +{ + struct dp_debug_private *debug = file->private_data; + char *buf; + u32 len = 0, ret = 0, max_size = SZ_4K; + int rc = 0; + + if (!debug) { + DP_ERR("invalid data\n"); + rc = -ENODEV; + goto error; + } + + if (*ppos) + goto error; + + buf = kzalloc(SZ_4K, GFP_KERNEL); + if (!buf) { + rc = -ENOMEM; + goto error; + } + + ret = snprintf(buf, max_size, "%u\n", debug->mst_con_id); + len += ret; + + len = min_t(size_t, count, len); + if (copy_to_user(user_buff, buf, len)) { + kfree(buf); + rc = -EFAULT; + goto error; + } + + *ppos += len; + kfree(buf); + + return len; +error: + return rc; +} + +static ssize_t dp_debug_read_mst_conn_info(struct file *file, + char __user *user_buff, size_t count, loff_t *ppos) +{ + struct dp_debug_private *debug = file->private_data; + struct dp_mst_connector *mst_connector; + char *buf; + u32 len = 0, ret = 0, max_size = SZ_4K; + int rc = 0; + struct drm_connector *connector; + + if (!debug) { + DP_ERR("invalid data\n"); + rc = -ENODEV; + goto error; + } + + if (*ppos) + goto error; + + buf = kzalloc(SZ_4K, GFP_KERNEL); + if (!buf) { + rc = -ENOMEM; + goto error; + } + + mutex_lock(&debug->dp_debug.dp_mst_connector_list.lock); + list_for_each_entry(mst_connector, + &debug->dp_debug.dp_mst_connector_list.list, list) { + /* Do not print info for head node */ + if (mst_connector->con_id == -1) + continue; + + connector = mst_connector->conn; + + if (!connector) { + DP_ERR("connector for id %d is NULL\n", + mst_connector->con_id); + continue; + } + + ret = scnprintf(buf + len, max_size, + "conn name:%s, conn id:%d state:%d\n", + connector->name, connector->base.id, + connector->status); + if (dp_debug_check_buffer_overflow(ret, &max_size, &len)) + break; + } + mutex_unlock(&debug->dp_debug.dp_mst_connector_list.lock); + + len = min_t(size_t, count, len); + if (copy_to_user(user_buff, buf, len)) { + kfree(buf); + rc = -EFAULT; + goto error; + } + + *ppos += len; + kfree(buf); + + return len; +error: + return rc; +} + +static ssize_t dp_debug_read_info(struct file *file, char __user *user_buff, + size_t count, loff_t *ppos) +{ + struct dp_debug_private *debug = file->private_data; + char *buf; + u32 len = 0, rc = 0; + u32 max_size = SZ_4K; + + if (!debug) + return -ENODEV; + + if (*ppos) + return 0; + + buf = kzalloc(SZ_4K, GFP_KERNEL); + if (ZERO_OR_NULL_PTR(buf)) + return -ENOMEM; + + rc = snprintf(buf + len, max_size, "\tstate=0x%x\n", debug->aux->state); + if (dp_debug_check_buffer_overflow(rc, &max_size, &len)) + goto error; + + rc = snprintf(buf + len, max_size, "\tlink_rate=%u\n", + debug->panel->link_info.rate); + if (dp_debug_check_buffer_overflow(rc, &max_size, &len)) + goto error; + + rc = snprintf(buf + len, max_size, "\tnum_lanes=%u\n", + debug->panel->link_info.num_lanes); + if (dp_debug_check_buffer_overflow(rc, &max_size, &len)) + goto error; + + rc = snprintf(buf + len, max_size, "\tresolution=%dx%d@%dHz\n", + debug->panel->pinfo.h_active, + debug->panel->pinfo.v_active, + debug->panel->pinfo.refresh_rate); + if (dp_debug_check_buffer_overflow(rc, &max_size, &len)) + goto error; + + rc = snprintf(buf + len, max_size, "\tpclock=%dKHz\n", + debug->panel->pinfo.pixel_clk_khz); + if (dp_debug_check_buffer_overflow(rc, &max_size, &len)) + goto error; + + rc = snprintf(buf + len, max_size, "\tbpp=%d\n", + debug->panel->pinfo.bpp); + if (dp_debug_check_buffer_overflow(rc, &max_size, &len)) + goto error; + + /* Link Information */ + rc = snprintf(buf + len, max_size, "\ttest_req=%s\n", + dp_link_get_test_name(debug->link->sink_request)); + if (dp_debug_check_buffer_overflow(rc, &max_size, &len)) + goto error; + + rc = snprintf(buf + len, max_size, + "\tlane_count=%d\n", debug->link->link_params.lane_count); + if (dp_debug_check_buffer_overflow(rc, &max_size, &len)) + goto error; + + rc = snprintf(buf + len, max_size, + "\tbw_code=%d\n", debug->link->link_params.bw_code); + if (dp_debug_check_buffer_overflow(rc, &max_size, &len)) + goto error; + + rc = snprintf(buf + len, max_size, + "\tv_level=%d\n", debug->link->phy_params.v_level); + if (dp_debug_check_buffer_overflow(rc, &max_size, &len)) + goto error; + + rc = snprintf(buf + len, max_size, + "\tp_level=%d\n", debug->link->phy_params.p_level); + if (dp_debug_check_buffer_overflow(rc, &max_size, &len)) + goto error; + + len = min_t(size_t, count, len); + if (copy_to_user(user_buff, buf, len)) + goto error; + + *ppos += len; + + kfree(buf); + return len; +error: + kfree(buf); + return -EINVAL; +} + +static ssize_t dp_debug_bw_code_read(struct file *file, + char __user *user_buff, size_t count, loff_t *ppos) +{ + struct dp_debug_private *debug = file->private_data; + char *buf; + u32 len = 0; + + if (!debug) + return -ENODEV; + + if (*ppos) + return 0; + + buf = kzalloc(SZ_4K, GFP_KERNEL); + if (ZERO_OR_NULL_PTR(buf)) + return -ENOMEM; + + len += snprintf(buf + len, (SZ_4K - len), + "max_bw_code = %d\n", debug->panel->max_bw_code); + + len = min_t(size_t, count, len); + if (copy_to_user(user_buff, buf, len)) { + kfree(buf); + return -EFAULT; + } + + *ppos += len; + kfree(buf); + return len; +} + +static ssize_t dp_debug_tpg_read(struct file *file, + char __user *user_buff, size_t count, loff_t *ppos) +{ + struct dp_debug_private *debug = file->private_data; + char buf[SZ_8]; + u32 len = 0; + + if (!debug) + return -ENODEV; + + if (*ppos) + return 0; + + len += snprintf(buf, SZ_8, "%d\n", debug->dp_debug.tpg_state); + + len = min_t(size_t, count, len); + if (copy_to_user(user_buff, buf, len)) + return -EFAULT; + + *ppos += len; + return len; +} + +static int dp_debug_print_hdr_params_to_buf(struct drm_connector *connector, + char *buf, u32 size) +{ + int rc; + u32 i, len = 0, max_size = size; + struct sde_connector *c_conn; + struct sde_connector_state *c_state; + struct drm_msm_ext_hdr_metadata *hdr; + + c_conn = to_sde_connector(connector); + c_state = to_sde_connector_state(connector->state); + + hdr = &c_state->hdr_meta; + + rc = snprintf(buf + len, max_size, + "============SINK HDR PARAMETERS===========\n"); + if (dp_debug_check_buffer_overflow(rc, &max_size, &len)) + goto error; + + rc = snprintf(buf + len, max_size, "eotf = %d\n", + connector->hdr_eotf); + if (dp_debug_check_buffer_overflow(rc, &max_size, &len)) + goto error; + + rc = snprintf(buf + len, max_size, "type_one = %d\n", + connector->hdr_metadata_type_one); + if (dp_debug_check_buffer_overflow(rc, &max_size, &len)) + goto error; + + rc = snprintf(buf + len, max_size, "hdr_plus_app_ver = %d\n", + connector->hdr_plus_app_ver); + if (dp_debug_check_buffer_overflow(rc, &max_size, &len)) + goto error; + + rc = snprintf(buf + len, max_size, "max_luminance = %d\n", + connector->hdr_max_luminance); + if (dp_debug_check_buffer_overflow(rc, &max_size, &len)) + goto error; + + rc = snprintf(buf + len, max_size, "avg_luminance = %d\n", + connector->hdr_avg_luminance); + if (dp_debug_check_buffer_overflow(rc, &max_size, &len)) + goto error; + + rc = snprintf(buf + len, max_size, "min_luminance = %d\n", + connector->hdr_min_luminance); + if (dp_debug_check_buffer_overflow(rc, &max_size, &len)) + goto error; + + rc = snprintf(buf + len, max_size, + "============VIDEO HDR PARAMETERS===========\n"); + if (dp_debug_check_buffer_overflow(rc, &max_size, &len)) + goto error; + + rc = snprintf(buf + len, max_size, "hdr_state = %d\n", hdr->hdr_state); + if (dp_debug_check_buffer_overflow(rc, &max_size, &len)) + goto error; + + rc = snprintf(buf + len, max_size, "hdr_supported = %d\n", + hdr->hdr_supported); + if (dp_debug_check_buffer_overflow(rc, &max_size, &len)) + goto error; + + rc = snprintf(buf + len, max_size, "eotf = %d\n", hdr->eotf); + if (dp_debug_check_buffer_overflow(rc, &max_size, &len)) + goto error; + + rc = snprintf(buf + len, max_size, "white_point_x = %d\n", + hdr->white_point_x); + if (dp_debug_check_buffer_overflow(rc, &max_size, &len)) + goto error; + + rc = snprintf(buf + len, max_size, "white_point_y = %d\n", + hdr->white_point_y); + if (dp_debug_check_buffer_overflow(rc, &max_size, &len)) + goto error; + + rc = snprintf(buf + len, max_size, "max_luminance = %d\n", + hdr->max_luminance); + if (dp_debug_check_buffer_overflow(rc, &max_size, &len)) + goto error; + + rc = snprintf(buf + len, max_size, "min_luminance = %d\n", + hdr->min_luminance); + if (dp_debug_check_buffer_overflow(rc, &max_size, &len)) + goto error; + + rc = snprintf(buf + len, max_size, "max_content_light_level = %d\n", + hdr->max_content_light_level); + if (dp_debug_check_buffer_overflow(rc, &max_size, &len)) + goto error; + + rc = snprintf(buf + len, max_size, "min_content_light_level = %d\n", + hdr->max_average_light_level); + if (dp_debug_check_buffer_overflow(rc, &max_size, &len)) + goto error; + + for (i = 0; i < HDR_PRIMARIES_COUNT; i++) { + rc = snprintf(buf + len, max_size, "primaries_x[%d] = %d\n", + i, hdr->display_primaries_x[i]); + if (dp_debug_check_buffer_overflow(rc, &max_size, &len)) + goto error; + + rc = snprintf(buf + len, max_size, "primaries_y[%d] = %d\n", + i, hdr->display_primaries_y[i]); + if (dp_debug_check_buffer_overflow(rc, &max_size, &len)) + goto error; + } + + if (hdr->hdr_plus_payload && hdr->hdr_plus_payload_size) { + u32 rowsize = 16, rem; + struct sde_connector_dyn_hdr_metadata *dhdr = + &c_state->dyn_hdr_meta; + + /** + * Do not use user pointer from hdr->hdr_plus_payload directly, + * instead use kernel's cached copy of payload data. + */ + for (i = 0; i < dhdr->dynamic_hdr_payload_size; i += rowsize) { + rc = snprintf(buf + len, max_size, "DHDR: "); + if (dp_debug_check_buffer_overflow(rc, &max_size, + &len)) + goto error; + + rem = dhdr->dynamic_hdr_payload_size - i; + rc = hex_dump_to_buffer(&dhdr->dynamic_hdr_payload[i], + min(rowsize, rem), rowsize, 1, buf + len, + max_size, false); + if (dp_debug_check_buffer_overflow(rc, &max_size, + &len)) + goto error; + + rc = snprintf(buf + len, max_size, "\n"); + if (dp_debug_check_buffer_overflow(rc, &max_size, + &len)) + goto error; + } + } + + return len; +error: + return -EOVERFLOW; +} + +static ssize_t dp_debug_read_hdr(struct file *file, + char __user *user_buff, size_t count, loff_t *ppos) +{ + struct dp_debug_private *debug = file->private_data; + char *buf = NULL; + u32 len = 0; + u32 max_size = SZ_4K; + struct drm_connector *connector; + + if (!debug) { + DP_ERR("invalid data\n"); + return -ENODEV; + } + + connector = *debug->connector; + + if (!connector) { + DP_ERR("connector is NULL\n"); + return -EINVAL; + } + + if (*ppos) + return 0; + + buf = kzalloc(max_size, GFP_KERNEL); + if (ZERO_OR_NULL_PTR(buf)) + return -ENOMEM; + + len = dp_debug_print_hdr_params_to_buf(connector, buf, max_size); + if (len == -EOVERFLOW) { + kfree(buf); + return len; + } + + len = min_t(size_t, count, len); + if (copy_to_user(user_buff, buf, len)) { + kfree(buf); + return -EFAULT; + } + + *ppos += len; + kfree(buf); + return len; +} + +static ssize_t dp_debug_read_hdr_mst(struct file *file, + char __user *user_buff, size_t count, loff_t *ppos) +{ + struct dp_debug_private *debug = file->private_data; + char *buf = NULL; + u32 len = 0, max_size = SZ_4K; + struct dp_mst_connector *mst_connector; + struct drm_connector *connector; + bool in_list = false; + + if (!debug) { + DP_ERR("invalid data\n"); + return -ENODEV; + } + + mutex_lock(&debug->dp_debug.dp_mst_connector_list.lock); + list_for_each_entry(mst_connector, + &debug->dp_debug.dp_mst_connector_list.list, list) { + if (mst_connector->con_id == debug->mst_con_id) { + connector = mst_connector->conn; + in_list = true; + } + } + mutex_unlock(&debug->dp_debug.dp_mst_connector_list.lock); + + if (!in_list) { + DP_ERR("connector %u not in mst list\n", debug->mst_con_id); + return -EINVAL; + } + + if (!connector) { + DP_ERR("connector is NULL\n"); + return -EINVAL; + } + + if (*ppos) + return 0; + + + buf = kzalloc(max_size, GFP_KERNEL); + if (ZERO_OR_NULL_PTR(buf)) + return -ENOMEM; + + len = dp_debug_print_hdr_params_to_buf(connector, buf, max_size); + if (len == -EOVERFLOW) { + kfree(buf); + return len; + } + + len = min_t(size_t, count, len); + if (copy_to_user(user_buff, buf, len)) { + kfree(buf); + return -EFAULT; + } + + *ppos += len; + kfree(buf); + return len; +} + +static void dp_debug_set_sim_mode(struct dp_debug_private *debug, bool sim) +{ + if (sim) { + if (dp_debug_get_edid_buf(debug)) + return; + + if (dp_debug_get_dpcd_buf(debug)) { + devm_kfree(debug->dev, debug->edid); + debug->edid = NULL; + return; + } + + debug->dp_debug.sim_mode = true; + debug->aux->set_sim_mode(debug->aux, true, + debug->edid, debug->dpcd); + } else { + if (debug->hotplug) { + DP_WARN("sim mode off before hotplug disconnect\n"); + debug->hpd->simulate_connect(debug->hpd, false); + debug->hotplug = false; + } + debug->aux->abort(debug->aux, true); + debug->ctrl->abort(debug->ctrl, true); + + debug->aux->set_sim_mode(debug->aux, false, NULL, NULL); + debug->dp_debug.sim_mode = false; + + debug->panel->set_edid(debug->panel, 0, 0); + if (debug->edid) { + devm_kfree(debug->dev, debug->edid); + debug->edid = NULL; + } + + debug->panel->set_dpcd(debug->panel, 0); + if (debug->dpcd) { + devm_kfree(debug->dev, debug->dpcd); + debug->dpcd = NULL; + } + } + + /* + * print simulation status as this code is executed + * only while running in debug mode which is manually + * triggered by a tester or a script. + */ + DP_INFO("%s\n", sim ? "[ON]" : "[OFF]"); +} + +static ssize_t dp_debug_write_sim(struct file *file, + const char __user *user_buff, size_t count, loff_t *ppos) +{ + struct dp_debug_private *debug = file->private_data; + char buf[SZ_8]; + size_t len = 0; + int sim; + + if (!debug) + return -ENODEV; + + if (*ppos) + return 0; + + mutex_lock(&debug->lock); + + /* Leave room for termination char */ + len = min_t(size_t, count, SZ_8 - 1); + if (copy_from_user(buf, user_buff, len)) + goto end; + + buf[len] = '\0'; + + if (kstrtoint(buf, 10, &sim) != 0) + goto end; + + dp_debug_set_sim_mode(debug, sim); +end: + mutex_unlock(&debug->lock); + return len; +} + +static ssize_t dp_debug_write_attention(struct file *file, + const char __user *user_buff, size_t count, loff_t *ppos) +{ + struct dp_debug_private *debug = file->private_data; + char buf[SZ_8]; + size_t len = 0; + int vdo; + + if (!debug) + return -ENODEV; + + if (*ppos) + return 0; + + /* Leave room for termination char */ + len = min_t(size_t, count, SZ_8 - 1); + if (copy_from_user(buf, user_buff, len)) + goto end; + + buf[len] = '\0'; + + if (kstrtoint(buf, 10, &vdo) != 0) + goto end; + + debug->hpd->simulate_attention(debug->hpd, vdo); +end: + return len; +} + +static ssize_t dp_debug_write_dump(struct file *file, + const char __user *user_buff, size_t count, loff_t *ppos) +{ + struct dp_debug_private *debug = file->private_data; + char buf[SZ_32]; + size_t len = 0; + + if (!debug) + return -ENODEV; + + if (*ppos) + return 0; + + /* Leave room for termination char */ + len = min_t(size_t, count, SZ_32 - 1); + if (copy_from_user(buf, user_buff, len)) + goto end; + + buf[len] = '\0'; + + if (sscanf(buf, "%31s", debug->reg_dump) != 1) + goto end; + + /* qfprom register dump not supported */ + if (!strcmp(debug->reg_dump, "qfprom_physical")) + strlcpy(debug->reg_dump, "clear", sizeof(debug->reg_dump)); +end: + return len; +} + +static ssize_t dp_debug_read_dump(struct file *file, + char __user *user_buff, size_t count, loff_t *ppos) +{ + int rc = 0; + struct dp_debug_private *debug = file->private_data; + u8 *buf = NULL; + u32 len = 0; + char prefix[SZ_32]; + + if (!debug) + return -ENODEV; + + if (*ppos) + return 0; + + if (!debug->hpd->hpd_high || !strlen(debug->reg_dump)) + goto end; + + rc = debug->catalog->get_reg_dump(debug->catalog, + debug->reg_dump, &buf, &len); + if (rc) + goto end; + + snprintf(prefix, sizeof(prefix), "%s: ", debug->reg_dump); + print_hex_dump(KERN_DEBUG, prefix, DUMP_PREFIX_NONE, + 16, 4, buf, len, false); + + len = min_t(size_t, count, len); + if (copy_to_user(user_buff, buf, len)) + return -EFAULT; + + *ppos += len; +end: + return len; +} + +static const struct file_operations dp_debug_fops = { + .open = simple_open, + .read = dp_debug_read_info, +}; + +static const struct file_operations edid_modes_fops = { + .open = simple_open, + .read = dp_debug_read_edid_modes, + .write = dp_debug_write_edid_modes, +}; + +static const struct file_operations edid_modes_mst_fops = { + .open = simple_open, + .read = dp_debug_read_edid_modes_mst, + .write = dp_debug_write_edid_modes_mst, +}; + +static const struct file_operations mst_conn_info_fops = { + .open = simple_open, + .read = dp_debug_read_mst_conn_info, +}; + +static const struct file_operations mst_con_id_fops = { + .open = simple_open, + .read = dp_debug_read_mst_con_id, + .write = dp_debug_write_mst_con_id, +}; + +static const struct file_operations mst_con_add_fops = { + .open = simple_open, + .write = dp_debug_write_mst_con_add, +}; + +static const struct file_operations mst_con_remove_fops = { + .open = simple_open, + .write = dp_debug_write_mst_con_remove, +}; + +static const struct file_operations hpd_fops = { + .open = simple_open, + .write = dp_debug_write_hpd, +}; + +static const struct file_operations edid_fops = { + .open = simple_open, + .write = dp_debug_write_edid, +}; + +static const struct file_operations dpcd_fops = { + .open = simple_open, + .write = dp_debug_write_dpcd, + .read = dp_debug_read_dpcd, +}; + +static const struct file_operations connected_fops = { + .open = simple_open, + .read = dp_debug_read_connected, +}; + +static const struct file_operations bw_code_fops = { + .open = simple_open, + .read = dp_debug_bw_code_read, + .write = dp_debug_bw_code_write, +}; +static const struct file_operations exe_mode_fops = { + .open = simple_open, + .write = dp_debug_write_exe_mode, +}; + +static const struct file_operations tpg_fops = { + .open = simple_open, + .read = dp_debug_tpg_read, + .write = dp_debug_tpg_write, +}; + +static const struct file_operations hdr_fops = { + .open = simple_open, + .read = dp_debug_read_hdr, +}; + +static const struct file_operations hdr_mst_fops = { + .open = simple_open, + .read = dp_debug_read_hdr_mst, +}; + +static const struct file_operations sim_fops = { + .open = simple_open, + .write = dp_debug_write_sim, +}; + +static const struct file_operations attention_fops = { + .open = simple_open, + .write = dp_debug_write_attention, +}; + +static const struct file_operations dump_fops = { + .open = simple_open, + .write = dp_debug_write_dump, + .read = dp_debug_read_dump, +}; + +static const struct file_operations mst_mode_fops = { + .open = simple_open, + .write = dp_debug_mst_mode_write, + .read = dp_debug_mst_mode_read, +}; + +static const struct file_operations mst_sideband_mode_fops = { + .open = simple_open, + .write = dp_debug_mst_sideband_mode_write, +}; + +static const struct file_operations max_pclk_khz_fops = { + .open = simple_open, + .write = dp_debug_max_pclk_khz_write, + .read = dp_debug_max_pclk_khz_read, +}; + +static const struct file_operations hdcp_fops = { + .open = simple_open, + .write = dp_debug_write_hdcp, + .read = dp_debug_read_hdcp, +}; + +static const struct file_operations widebus_mode_fops = { + .open = simple_open, + .write = dp_debug_widebus_mode_write, +}; + +static int dp_debug_init(struct dp_debug *dp_debug) +{ + int rc = 0; + struct dp_debug_private *debug = container_of(dp_debug, + struct dp_debug_private, dp_debug); + struct dentry *dir, *file; + + if (!IS_ENABLED(CONFIG_DEBUG_FS)) { + DP_WARN("Not creating debug root dir."); + debug->root = NULL; + return 0; + } + + dir = debugfs_create_dir(DEBUG_NAME, NULL); + if (IS_ERR_OR_NULL(dir)) { + if (!dir) + rc = -EINVAL; + else + rc = PTR_ERR(dir); + DP_ERR("[%s] debugfs create dir failed, rc = %d\n", + DEBUG_NAME, rc); + goto error; + } + + debug->root = dir; + + file = debugfs_create_file("dp_debug", 0444, dir, + debug, &dp_debug_fops); + if (IS_ERR_OR_NULL(file)) { + rc = PTR_ERR(file); + DP_ERR("[%s] debugfs create file failed, rc=%d\n", + DEBUG_NAME, rc); + goto error_remove_dir; + } + + file = debugfs_create_file("edid_modes", 0644, dir, + debug, &edid_modes_fops); + if (IS_ERR_OR_NULL(file)) { + rc = PTR_ERR(file); + DP_ERR("[%s] debugfs create edid_modes failed, rc=%d\n", + DEBUG_NAME, rc); + goto error_remove_dir; + } + + file = debugfs_create_file("edid_modes_mst", 0644, dir, + debug, &edid_modes_mst_fops); + if (IS_ERR_OR_NULL(file)) { + rc = PTR_ERR(file); + DP_ERR("[%s] debugfs create edid_modes_mst failed, rc=%d\n", + DEBUG_NAME, rc); + goto error_remove_dir; + } + + file = debugfs_create_file("mst_con_id", 0644, dir, + debug, &mst_con_id_fops); + if (IS_ERR_OR_NULL(file)) { + rc = PTR_ERR(file); + DP_ERR("[%s] debugfs create mst_con_id failed, rc=%d\n", + DEBUG_NAME, rc); + goto error_remove_dir; + } + + file = debugfs_create_file("mst_con_info", 0644, dir, + debug, &mst_conn_info_fops); + if (IS_ERR_OR_NULL(file)) { + rc = PTR_ERR(file); + DP_ERR("[%s] debugfs create mst_conn_info failed, rc=%d\n", + DEBUG_NAME, rc); + goto error_remove_dir; + } + + file = debugfs_create_file("mst_con_add", 0644, dir, + debug, &mst_con_add_fops); + if (IS_ERR_OR_NULL(file)) { + rc = PTR_ERR(file); + DRM_ERROR("[%s] debugfs create mst_con_add failed, rc=%d\n", + DEBUG_NAME, rc); + goto error_remove_dir; + } + + file = debugfs_create_file("mst_con_remove", 0644, dir, + debug, &mst_con_remove_fops); + if (IS_ERR_OR_NULL(file)) { + rc = PTR_ERR(file); + DRM_ERROR("[%s] debugfs create mst_con_remove failed, rc=%d\n", + DEBUG_NAME, rc); + goto error_remove_dir; + } + + file = debugfs_create_file("hpd", 0644, dir, + debug, &hpd_fops); + if (IS_ERR_OR_NULL(file)) { + rc = PTR_ERR(file); + DP_ERR("[%s] debugfs hpd failed, rc=%d\n", + DEBUG_NAME, rc); + goto error_remove_dir; + } + + file = debugfs_create_file("connected", 0444, dir, + debug, &connected_fops); + if (IS_ERR_OR_NULL(file)) { + rc = PTR_ERR(file); + DP_ERR("[%s] debugfs connected failed, rc=%d\n", + DEBUG_NAME, rc); + goto error_remove_dir; + } + + file = debugfs_create_file("max_bw_code", 0644, dir, + debug, &bw_code_fops); + if (IS_ERR_OR_NULL(file)) { + rc = PTR_ERR(file); + DP_ERR("[%s] debugfs max_bw_code failed, rc=%d\n", + DEBUG_NAME, rc); + } + + file = debugfs_create_file("exe_mode", 0644, dir, + debug, &exe_mode_fops); + if (IS_ERR_OR_NULL(file)) { + rc = PTR_ERR(file); + DP_ERR("[%s] debugfs register failed, rc=%d\n", + DEBUG_NAME, rc); + } + + file = debugfs_create_file("edid", 0644, dir, + debug, &edid_fops); + if (IS_ERR_OR_NULL(file)) { + rc = PTR_ERR(file); + DP_ERR("[%s] debugfs edid failed, rc=%d\n", + DEBUG_NAME, rc); + goto error_remove_dir; + } + + file = debugfs_create_file("dpcd", 0644, dir, + debug, &dpcd_fops); + if (IS_ERR_OR_NULL(file)) { + rc = PTR_ERR(file); + DP_ERR("[%s] debugfs dpcd failed, rc=%d\n", + DEBUG_NAME, rc); + goto error_remove_dir; + } + + file = debugfs_create_file("tpg_ctrl", 0644, dir, + debug, &tpg_fops); + if (IS_ERR_OR_NULL(file)) { + rc = PTR_ERR(file); + DP_ERR("[%s] debugfs tpg failed, rc=%d\n", + DEBUG_NAME, rc); + goto error_remove_dir; + } + + file = debugfs_create_file("hdr", 0400, dir, + debug, &hdr_fops); + + if (IS_ERR_OR_NULL(file)) { + rc = PTR_ERR(file); + DP_ERR("[%s] debugfs hdr failed, rc=%d\n", + DEBUG_NAME, rc); + goto error_remove_dir; + } + + file = debugfs_create_file("hdr_mst", 0400, dir, + debug, &hdr_mst_fops); + + if (IS_ERR_OR_NULL(file)) { + rc = PTR_ERR(file); + DP_ERR("[%s] debugfs hdr_mst failed, rc=%d\n", + DEBUG_NAME, rc); + goto error_remove_dir; + } + + file = debugfs_create_file("sim", 0644, dir, + debug, &sim_fops); + + if (IS_ERR_OR_NULL(file)) { + rc = PTR_ERR(file); + DP_ERR("[%s] debugfs sim failed, rc=%d\n", + DEBUG_NAME, rc); + goto error_remove_dir; + } + + file = debugfs_create_file("attention", 0644, dir, + debug, &attention_fops); + + if (IS_ERR_OR_NULL(file)) { + rc = PTR_ERR(file); + DP_ERR("[%s] debugfs attention failed, rc=%d\n", + DEBUG_NAME, rc); + goto error_remove_dir; + } + + file = debugfs_create_file("dump", 0644, dir, + debug, &dump_fops); + + if (IS_ERR_OR_NULL(file)) { + rc = PTR_ERR(file); + DP_ERR("[%s] debugfs dump failed, rc=%d\n", + DEBUG_NAME, rc); + goto error_remove_dir; + } + + file = debugfs_create_file("mst_mode", 0644, dir, + debug, &mst_mode_fops); + if (IS_ERR_OR_NULL(file)) { + rc = PTR_ERR(file); + DP_ERR("[%s] debugfs max_bw_code failed, rc=%d\n", + DEBUG_NAME, rc); + goto error_remove_dir; + } + + file = debugfs_create_file("mst_sideband_mode", 0644, dir, + debug, &mst_sideband_mode_fops); + if (IS_ERR_OR_NULL(file)) { + rc = PTR_ERR(file); + DP_ERR("[%s] debugfs max_bw_code failed, rc=%d\n", + DEBUG_NAME, rc); + goto error_remove_dir; + } + + file = debugfs_create_file("max_pclk_khz", 0644, dir, + debug, &max_pclk_khz_fops); + if (IS_ERR_OR_NULL(file)) { + rc = PTR_ERR(file); + DP_ERR("[%s] debugfs max_pclk_khz failed, rc=%d\n", + DEBUG_NAME, rc); + goto error_remove_dir; + } + + file = debugfs_create_bool("force_encryption", 0644, dir, + &debug->dp_debug.force_encryption); + if (IS_ERR_OR_NULL(file)) { + rc = PTR_ERR(file); + DP_ERR("[%s] debugfs force_encryption failed, rc=%d\n", + DEBUG_NAME, rc); + goto error_remove_dir; + } + + file = debugfs_create_file("hdcp", 0644, dir, + debug, &hdcp_fops); + if (IS_ERR_OR_NULL(file)) { + rc = PTR_ERR(file); + DP_ERR("[%s] debugfs hdcp failed, rc=%d\n", + DEBUG_NAME, rc); + goto error_remove_dir; + } + + file = debugfs_create_bool("hdcp_wait_sink_sync", 0644, dir, + &debug->dp_debug.hdcp_wait_sink_sync); + + if (IS_ERR_OR_NULL(file)) { + rc = PTR_ERR(file); + DP_ERR("[%s] debugfs hdcp_wait_sink_sync failed, rc=%d\n", + DEBUG_NAME, rc); + goto error_remove_dir; + } + + file = debugfs_create_bool("dsc_feature_enable", 0644, dir, + &debug->parser->dsc_feature_enable); + if (IS_ERR_OR_NULL(file)) { + rc = PTR_ERR(file); + DP_ERR("[%s] debugfs dsc_feature failed, rc=%d\n", + DEBUG_NAME, rc); + } + + file = debugfs_create_bool("fec_feature_enable", 0644, dir, + &debug->parser->fec_feature_enable); + if (IS_ERR_OR_NULL(file)) { + rc = PTR_ERR(file); + DP_ERR("[%s] debugfs fec_feature_enable failed, rc=%d\n", + DEBUG_NAME, rc); + } + + file = debugfs_create_file("widebus_mode", 0644, dir, + debug, &widebus_mode_fops); + if (IS_ERR_OR_NULL(file)) { + rc = PTR_ERR(file); + DP_ERR("[%s] debugfs widebus failed, rc=%d\n", + DEBUG_NAME, rc); + } + + file = debugfs_create_u32("max_lclk_khz", 0644, dir, + &debug->parser->max_lclk_khz); + if (IS_ERR_OR_NULL(file)) { + rc = PTR_ERR(file); + DP_ERR("[%s] debugfs max_lclk_khz failed, rc=%d\n", + DEBUG_NAME, rc); + } + + return 0; + +error_remove_dir: + if (!file) + rc = -EINVAL; + debugfs_remove_recursive(dir); +error: + return rc; +} + +u8 *dp_debug_get_edid(struct dp_debug *dp_debug) +{ + struct dp_debug_private *debug; + + if (!dp_debug) + return NULL; + + debug = container_of(dp_debug, struct dp_debug_private, dp_debug); + + return debug->edid; +} + +static void dp_debug_abort(struct dp_debug *dp_debug) +{ + struct dp_debug_private *debug; + + if (!dp_debug) + return; + + debug = container_of(dp_debug, struct dp_debug_private, dp_debug); + + mutex_lock(&debug->lock); + dp_debug_set_sim_mode(debug, false); + mutex_unlock(&debug->lock); +} + +struct dp_debug *dp_debug_get(struct dp_debug_in *in) +{ + int rc = 0; + struct dp_debug_private *debug; + struct dp_debug *dp_debug; + + if (!in->dev || !in->panel || !in->hpd || !in->link || + !in->catalog || !in->ctrl) { + DP_ERR("invalid input\n"); + rc = -EINVAL; + goto error; + } + + debug = devm_kzalloc(in->dev, sizeof(*debug), GFP_KERNEL); + if (!debug) { + rc = -ENOMEM; + goto error; + } + + debug->dp_debug.debug_en = false; + debug->hpd = in->hpd; + debug->link = in->link; + debug->panel = in->panel; + debug->aux = in->aux; + debug->dev = in->dev; + debug->connector = in->connector; + debug->catalog = in->catalog; + debug->parser = in->parser; + debug->ctrl = in->ctrl; + + dp_debug = &debug->dp_debug; + dp_debug->vdisplay = 0; + dp_debug->hdisplay = 0; + dp_debug->vrefresh = 0; + + mutex_init(&debug->lock); + + rc = dp_debug_init(dp_debug); + if (rc) { + devm_kfree(in->dev, debug); + goto error; + } + + debug->aux->access_lock = &debug->lock; + dp_debug->get_edid = dp_debug_get_edid; + dp_debug->abort = dp_debug_abort; + + INIT_LIST_HEAD(&dp_debug->dp_mst_connector_list.list); + + /* + * Do not associate the head of the list with any connector in order to + * maintain backwards compatibility with the SST use case. + */ + dp_debug->dp_mst_connector_list.con_id = -1; + dp_debug->dp_mst_connector_list.conn = NULL; + dp_debug->dp_mst_connector_list.debug_en = false; + mutex_init(&dp_debug->dp_mst_connector_list.lock); + + dp_debug->max_pclk_khz = debug->parser->max_pclk_khz; + + return dp_debug; +error: + return ERR_PTR(rc); +} + +static int dp_debug_deinit(struct dp_debug *dp_debug) +{ + struct dp_debug_private *debug; + + if (!dp_debug) + return -EINVAL; + + debug = container_of(dp_debug, struct dp_debug_private, dp_debug); + + debugfs_remove_recursive(debug->root); + + return 0; +} + +void dp_debug_put(struct dp_debug *dp_debug) +{ + struct dp_debug_private *debug; + + if (!dp_debug) + return; + + debug = container_of(dp_debug, struct dp_debug_private, dp_debug); + + dp_debug_deinit(dp_debug); + + mutex_destroy(&debug->lock); + + if (debug->edid) + devm_kfree(debug->dev, debug->edid); + + if (debug->dpcd) + devm_kfree(debug->dev, debug->dpcd); + + devm_kfree(debug->dev, debug); +} diff --git a/techpack/display/msm/dp/dp_debug.h b/techpack/display/msm/dp/dp_debug.h new file mode 100644 index 0000000000000000000000000000000000000000..aa239be382b809714446b70f3ff6fb9c5eba5d2a --- /dev/null +++ b/techpack/display/msm/dp/dp_debug.h @@ -0,0 +1,133 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved. + */ + +#ifndef _DP_DEBUG_H_ +#define _DP_DEBUG_H_ + +#include "dp_panel.h" +#include "dp_ctrl.h" +#include "dp_link.h" +#include "dp_usbpd.h" +#include "dp_aux.h" +#include "dp_display.h" + +#define DP_DEBUG(fmt, ...) \ + do { \ + if (unlikely(drm_debug & DRM_UT_KMS)) \ + DRM_DEBUG("[msm-dp-debug][%-4d]"fmt, current->pid, \ + ##__VA_ARGS__); \ + else \ + pr_debug("[drm:%s][msm-dp-debug][%-4d]"fmt, __func__,\ + current->pid, ##__VA_ARGS__); \ + } while (0) + +#define DP_INFO(fmt, ...) \ + do { \ + if (unlikely(drm_debug & DRM_UT_KMS)) \ + DRM_INFO("[msm-dp-info][%-4d]"fmt, current->pid, \ + ##__VA_ARGS__); \ + else \ + pr_info("[drm:%s][msm-dp-info][%-4d]"fmt, __func__, \ + current->pid, ##__VA_ARGS__); \ + } while (0) + +#define DP_WARN(fmt, ...) \ + pr_warn("[drm:%s][msm-dp-warn][%-4d]"fmt, __func__, \ + current->pid, ##__VA_ARGS__) + +#define DP_ERR(fmt, ...) \ + pr_err("[drm:%s][msm-dp-err][%-4d]"fmt, __func__, \ + current->pid, ##__VA_ARGS__) + +/** + * struct dp_debug + * @debug_en: specifies whether debug mode enabled + * @sim_mode: specifies whether sim mode enabled + * @psm_enabled: specifies whether psm enabled + * @hdcp_disabled: specifies if hdcp is disabled + * @hdcp_wait_sink_sync: used to wait for sink synchronization before HDCP auth + * @aspect_ratio: used to filter out aspect_ratio value + * @vdisplay: used to filter out vdisplay value + * @hdisplay: used to filter out hdisplay value + * @vrefresh: used to filter out vrefresh value + * @tpg_state: specifies whether tpg feature is enabled + * @max_pclk_khz: max pclk supported + * @force_encryption: enable/disable forced encryption for HDCP 2.2 + * @hdcp_status: string holding hdcp status information + * @dp_mst_connector_list: list containing all dp mst connectors + * @mst_hpd_sim: specifies whether simulated hpd enabled + * @mst_sim_add_con: specifies whether new sim connector is to be added + * @mst_sim_remove_con: specifies whether sim connector is to be removed + * @mst_sim_remove_con_id: specifies id of sim connector to be removed + * @mst_port_cnt: number of mst ports to be added during hpd + */ +struct dp_debug { + bool debug_en; + bool sim_mode; + bool psm_enabled; + bool hdcp_disabled; + bool hdcp_wait_sink_sync; + int aspect_ratio; + int vdisplay; + int hdisplay; + int vrefresh; + bool tpg_state; + u32 max_pclk_khz; + bool force_encryption; + char hdcp_status[SZ_128]; + struct dp_mst_connector dp_mst_connector_list; + bool mst_hpd_sim; + bool mst_sim_add_con; + bool mst_sim_remove_con; + int mst_sim_remove_con_id; + u32 mst_port_cnt; + + u8 *(*get_edid)(struct dp_debug *dp_debug); + void (*abort)(struct dp_debug *dp_debug); +}; + +/** + * struct dp_debug_in + * @dev: device instance of the caller + * @panel: instance of panel module + * @hpd: instance of hpd module + * @link: instance of link module + * @aux: instance of aux module + * @connector: double pointer to display connector + * @catalog: instance of catalog module + * @parser: instance of parser module + */ +struct dp_debug_in { + struct device *dev; + struct dp_panel *panel; + struct dp_hpd *hpd; + struct dp_link *link; + struct dp_aux *aux; + struct drm_connector **connector; + struct dp_catalog *catalog; + struct dp_parser *parser; + struct dp_ctrl *ctrl; +}; + +/** + * dp_debug_get() - configure and get the DisplayPlot debug module data + * + * @in: input structure containing data to initialize the debug module + * return: pointer to allocated debug module data + * + * This function sets up the debug module and provides a way + * for debugfs input to be communicated with existing modules + */ +struct dp_debug *dp_debug_get(struct dp_debug_in *in); + +/** + * dp_debug_put() + * + * Cleans up dp_debug instance + * + * @dp_debug: instance of dp_debug + */ +void dp_debug_put(struct dp_debug *dp_debug); +#endif /* _DP_DEBUG_H_ */ diff --git a/techpack/display/msm/dp/dp_display.c b/techpack/display/msm/dp/dp_display.c new file mode 100644 index 0000000000000000000000000000000000000000..26554a453a65eaa7276c8dfcd135610fb314504d --- /dev/null +++ b/techpack/display/msm/dp/dp_display.c @@ -0,0 +1,3383 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "sde_connector.h" + +#include "msm_drv.h" +#include "dp_hpd.h" +#include "dp_parser.h" +#include "dp_power.h" +#include "dp_catalog.h" +#include "dp_aux.h" +#include "dp_link.h" +#include "dp_panel.h" +#include "dp_ctrl.h" +#include "dp_audio.h" +#include "dp_display.h" +#include "sde_hdcp.h" +#include "dp_debug.h" +#include "sde_dbg.h" + +#define DP_MST_DEBUG(fmt, ...) DP_DEBUG(fmt, ##__VA_ARGS__) + +#define dp_display_state_show(x) { \ + DP_ERR("%s: state (0x%x): %s\n", x, dp->state, \ + dp_display_state_name(dp->state)); \ + SDE_EVT32_EXTERNAL(dp->state); } + +#define dp_display_state_log(x) { \ + DP_DEBUG("%s: state (0x%x): %s\n", x, dp->state, \ + dp_display_state_name(dp->state)); \ + SDE_EVT32_EXTERNAL(dp->state); } + +#define dp_display_state_is(x) (dp->state & (x)) +#define dp_display_state_add(x) { \ + (dp->state |= (x)); \ + dp_display_state_log("add "#x); } +#define dp_display_state_remove(x) { \ + (dp->state &= ~(x)); \ + dp_display_state_log("remove "#x); } + +enum dp_display_states { + DP_STATE_DISCONNECTED = 0, + DP_STATE_CONFIGURED = BIT(0), + DP_STATE_INITIALIZED = BIT(1), + DP_STATE_READY = BIT(2), + DP_STATE_CONNECTED = BIT(3), + DP_STATE_CONNECT_NOTIFIED = BIT(4), + DP_STATE_DISCONNECT_NOTIFIED = BIT(5), + DP_STATE_ENABLED = BIT(6), + DP_STATE_SUSPENDED = BIT(7), + DP_STATE_ABORTED = BIT(8), + DP_STATE_HDCP_ABORTED = BIT(9), + DP_STATE_SRC_PWRDN = BIT(10), +}; + +static char *dp_display_state_name(enum dp_display_states state) +{ + static char buf[SZ_1K]; + u32 len = 0; + + memset(buf, 0, SZ_1K); + + if (state & DP_STATE_CONFIGURED) + len += scnprintf(buf + len, sizeof(buf) - len, "|%s|", + "CONFIGURED"); + + if (state & DP_STATE_INITIALIZED) + len += scnprintf(buf + len, sizeof(buf) - len, "|%s|", + "INITIALIZED"); + + if (state & DP_STATE_READY) + len += scnprintf(buf + len, sizeof(buf) - len, "|%s|", + "READY"); + + if (state & DP_STATE_CONNECTED) + len += scnprintf(buf + len, sizeof(buf) - len, "|%s|", + "CONNECTED"); + + if (state & DP_STATE_CONNECT_NOTIFIED) + len += scnprintf(buf + len, sizeof(buf) - len, "|%s|", + "CONNECT_NOTIFIED"); + + if (state & DP_STATE_DISCONNECT_NOTIFIED) + len += scnprintf(buf + len, sizeof(buf) - len, "|%s|", + "DISCONNECT_NOTIFIED"); + + if (state & DP_STATE_ENABLED) + len += scnprintf(buf + len, sizeof(buf) - len, "|%s|", + "ENABLED"); + + if (state & DP_STATE_SUSPENDED) + len += scnprintf(buf + len, sizeof(buf) - len, "|%s|", + "SUSPENDED"); + + if (state & DP_STATE_ABORTED) + len += scnprintf(buf + len, sizeof(buf) - len, "|%s|", + "ABORTED"); + + if (state & DP_STATE_HDCP_ABORTED) + len += scnprintf(buf + len, sizeof(buf) - len, "|%s|", + "HDCP_ABORTED"); + + if (state & DP_STATE_SRC_PWRDN) + len += scnprintf(buf + len, sizeof(buf) - len, "|%s|", + "SRC_PWRDN"); + + if (!strlen(buf)) + return "DISCONNECTED"; + + return buf; +} + +static struct dp_display *g_dp_display; +#define HPD_STRING_SIZE 30 + +struct dp_hdcp_dev { + void *fd; + struct sde_hdcp_ops *ops; + enum sde_hdcp_version ver; +}; + +struct dp_hdcp { + void *data; + struct sde_hdcp_ops *ops; + + u32 source_cap; + + struct dp_hdcp_dev dev[HDCP_VERSION_MAX]; +}; + +struct dp_mst { + bool mst_active; + + bool drm_registered; + struct dp_mst_drm_cbs cbs; +}; + +struct dp_display_private { + char *name; + int irq; + + enum drm_connector_status cached_connector_status; + enum dp_display_states state; + + struct platform_device *pdev; + struct device_node *aux_switch_node; + struct dentry *root; + struct completion notification_comp; + + struct dp_hpd *hpd; + struct dp_parser *parser; + struct dp_power *power; + struct dp_catalog *catalog; + struct dp_aux *aux; + struct dp_link *link; + struct dp_panel *panel; + struct dp_ctrl *ctrl; + struct dp_debug *debug; + + struct dp_panel *active_panels[DP_STREAM_MAX]; + struct dp_hdcp hdcp; + + struct dp_hpd_cb hpd_cb; + struct dp_display_mode mode; + struct dp_display dp_display; + struct msm_drm_private *priv; + + struct workqueue_struct *wq; + struct delayed_work hdcp_cb_work; + struct work_struct connect_work; + struct work_struct attention_work; + struct mutex session_lock; + bool hdcp_delayed_off; + + u32 active_stream_cnt; + struct dp_mst mst; + + u32 tot_dsc_blks_in_use; + + bool process_hpd_connect; + + struct notifier_block usb_nb; +}; + +static const struct of_device_id dp_dt_match[] = { + {.compatible = "qcom,dp-display"}, + {} +}; + +static inline bool dp_display_is_hdcp_enabled(struct dp_display_private *dp) +{ + return dp->link->hdcp_status.hdcp_version && dp->hdcp.ops; +} + +static irqreturn_t dp_display_irq(int irq, void *dev_id) +{ + struct dp_display_private *dp = dev_id; + + if (!dp) { + DP_ERR("invalid data\n"); + return IRQ_NONE; + } + + /* DP HPD isr */ + if (dp->hpd->type == DP_HPD_LPHW) + dp->hpd->isr(dp->hpd); + + /* DP controller isr */ + dp->ctrl->isr(dp->ctrl); + + /* DP aux isr */ + dp->aux->isr(dp->aux); + + /* HDCP isr */ + if (dp_display_is_hdcp_enabled(dp) && dp->hdcp.ops->isr) { + if (dp->hdcp.ops->isr(dp->hdcp.data)) + DP_ERR("dp_hdcp_isr failed\n"); + } + + return IRQ_HANDLED; +} +static bool dp_display_is_ds_bridge(struct dp_panel *panel) +{ + return (panel->dpcd[DP_DOWNSTREAMPORT_PRESENT] & + DP_DWN_STRM_PORT_PRESENT); +} + +static bool dp_display_is_sink_count_zero(struct dp_display_private *dp) +{ + return dp_display_is_ds_bridge(dp->panel) && + (dp->link->sink_count.count == 0); +} + +static bool dp_display_is_ready(struct dp_display_private *dp) +{ + return dp->hpd->hpd_high && dp_display_state_is(DP_STATE_CONNECTED) && + !dp_display_is_sink_count_zero(dp) && + dp->hpd->alt_mode_cfg_done; +} + +static void dp_audio_enable(struct dp_display_private *dp, bool enable) +{ + struct dp_panel *dp_panel; + int idx; + + for (idx = DP_STREAM_0; idx < DP_STREAM_MAX; idx++) { + if (!dp->active_panels[idx]) + continue; + dp_panel = dp->active_panels[idx]; + + if (dp_panel->audio_supported) { + if (enable) { + dp_panel->audio->bw_code = + dp->link->link_params.bw_code; + dp_panel->audio->lane_count = + dp->link->link_params.lane_count; + dp_panel->audio->on(dp_panel->audio); + } else { + dp_panel->audio->off(dp_panel->audio); + } + } + } +} + +static void dp_display_update_hdcp_status(struct dp_display_private *dp, + bool reset) +{ + if (reset) { + dp->link->hdcp_status.hdcp_state = HDCP_STATE_INACTIVE; + dp->link->hdcp_status.hdcp_version = HDCP_VERSION_NONE; + } + + memset(dp->debug->hdcp_status, 0, sizeof(dp->debug->hdcp_status)); + + snprintf(dp->debug->hdcp_status, sizeof(dp->debug->hdcp_status), + "%s: %s\ncaps: %d\n", + sde_hdcp_version(dp->link->hdcp_status.hdcp_version), + sde_hdcp_state_name(dp->link->hdcp_status.hdcp_state), + dp->hdcp.source_cap); +} + +static void dp_display_update_hdcp_info(struct dp_display_private *dp) +{ + void *fd = NULL; + struct dp_hdcp_dev *dev = NULL; + struct sde_hdcp_ops *ops = NULL; + int i = HDCP_VERSION_2P2; + + dp_display_update_hdcp_status(dp, true); + + dp->hdcp.data = NULL; + dp->hdcp.ops = NULL; + + if (dp->debug->hdcp_disabled || dp->debug->sim_mode) + return; + + while (i) { + dev = &dp->hdcp.dev[i]; + ops = dev->ops; + fd = dev->fd; + + i >>= 1; + + if (!(dp->hdcp.source_cap & dev->ver)) + continue; + + if (ops->sink_support(fd)) { + dp->hdcp.data = fd; + dp->hdcp.ops = ops; + dp->link->hdcp_status.hdcp_version = dev->ver; + break; + } + } + + DP_DEBUG("HDCP version supported: %s\n", + sde_hdcp_version(dp->link->hdcp_status.hdcp_version)); +} + +static void dp_display_check_source_hdcp_caps(struct dp_display_private *dp) +{ + int i; + struct dp_hdcp_dev *hdcp_dev = dp->hdcp.dev; + + if (dp->debug->hdcp_disabled) { + DP_DEBUG("hdcp disabled\n"); + return; + } + + for (i = 0; i < HDCP_VERSION_MAX; i++) { + struct dp_hdcp_dev *dev = &hdcp_dev[i]; + struct sde_hdcp_ops *ops = dev->ops; + void *fd = dev->fd; + + if (!fd || !ops) + continue; + + if (ops->set_mode && ops->set_mode(fd, dp->mst.mst_active)) + continue; + + if (!(dp->hdcp.source_cap & dev->ver) && + ops->feature_supported && + ops->feature_supported(fd)) + dp->hdcp.source_cap |= dev->ver; + } + + dp_display_update_hdcp_status(dp, false); +} + +static void dp_display_hdcp_register_streams(struct dp_display_private *dp) +{ + int rc; + size_t i; + struct sde_hdcp_ops *ops = dp->hdcp.ops; + void *data = dp->hdcp.data; + + if (dp_display_is_ready(dp) && dp->mst.mst_active && ops && + ops->register_streams){ + struct stream_info streams[DP_STREAM_MAX]; + int index = 0; + + DP_DEBUG("Registering all active panel streams with HDCP\n"); + for (i = DP_STREAM_0; i < DP_STREAM_MAX; i++) { + if (!dp->active_panels[i]) + continue; + streams[index].stream_id = i; + streams[index].virtual_channel = + dp->active_panels[i]->vcpi; + index++; + } + + if (index > 0) { + rc = ops->register_streams(data, index, streams); + if (rc) + DP_ERR("failed to register streams. rc = %d\n", + rc); + } + } +} + +static void dp_display_hdcp_deregister_stream(struct dp_display_private *dp, + enum dp_stream_id stream_id) +{ + if (dp->hdcp.ops->deregister_streams) { + struct stream_info stream = {stream_id, + dp->active_panels[stream_id]->vcpi}; + + DP_DEBUG("Deregistering stream within HDCP library\n"); + dp->hdcp.ops->deregister_streams(dp->hdcp.data, 1, &stream); + } +} + +static void dp_display_abort_hdcp(struct dp_display_private *dp, + bool abort) +{ + u32 i = HDCP_VERSION_2P2; + struct dp_hdcp_dev *dev = NULL; + + while (i) { + dev = &dp->hdcp.dev[i]; + i >>= 1; + if (!(dp->hdcp.source_cap & dev->ver)) + continue; + + dev->ops->abort(dev->fd, abort); + } +} + +static void dp_display_hdcp_cb_work(struct work_struct *work) +{ + struct dp_display_private *dp; + struct delayed_work *dw = to_delayed_work(work); + struct sde_hdcp_ops *ops; + struct dp_link_hdcp_status *status; + void *data; + int rc = 0; + u32 hdcp_auth_state; + u8 sink_status = 0; + + dp = container_of(dw, struct dp_display_private, hdcp_cb_work); + + if (!dp_display_state_is(DP_STATE_ENABLED | DP_STATE_CONNECTED) || + dp_display_state_is(DP_STATE_ABORTED | DP_STATE_HDCP_ABORTED)) + return; + + if (dp_display_state_is(DP_STATE_SUSPENDED)) { + DP_DEBUG("System suspending. Delay HDCP operations\n"); + queue_delayed_work(dp->wq, &dp->hdcp_cb_work, HZ); + return; + } + + if (dp->hdcp_delayed_off) { + if (dp->hdcp.ops && dp->hdcp.ops->off) + dp->hdcp.ops->off(dp->hdcp.data); + dp_display_update_hdcp_status(dp, true); + dp->hdcp_delayed_off = false; + } + + if (dp->debug->hdcp_wait_sink_sync) { + drm_dp_dpcd_readb(dp->aux->drm_aux, DP_SINK_STATUS, + &sink_status); + sink_status &= (DP_RECEIVE_PORT_0_STATUS | + DP_RECEIVE_PORT_1_STATUS); + if (sink_status < 1) { + DP_DEBUG("Sink not synchronized. Queuing again then exiting\n"); + queue_delayed_work(dp->wq, &dp->hdcp_cb_work, HZ); + return; + } + } + + status = &dp->link->hdcp_status; + + if (status->hdcp_state == HDCP_STATE_INACTIVE) { + dp_display_check_source_hdcp_caps(dp); + dp_display_update_hdcp_info(dp); + + if (dp_display_is_hdcp_enabled(dp)) { + if (dp->hdcp.ops && dp->hdcp.ops->on && + dp->hdcp.ops->on(dp->hdcp.data)) { + dp_display_update_hdcp_status(dp, true); + return; + } + } else { + dp_display_update_hdcp_status(dp, true); + return; + } + } + + rc = dp->catalog->ctrl.read_hdcp_status(&dp->catalog->ctrl); + if (rc >= 0) { + hdcp_auth_state = (rc >> 20) & 0x3; + DP_DEBUG("hdcp auth state %d\n", hdcp_auth_state); + } + + ops = dp->hdcp.ops; + data = dp->hdcp.data; + + DP_DEBUG("%s: %s\n", sde_hdcp_version(status->hdcp_version), + sde_hdcp_state_name(status->hdcp_state)); + + dp_display_update_hdcp_status(dp, false); + + if (status->hdcp_state != HDCP_STATE_AUTHENTICATED && + dp->debug->force_encryption && ops && ops->force_encryption) + ops->force_encryption(data, dp->debug->force_encryption); + + switch (status->hdcp_state) { + case HDCP_STATE_INACTIVE: + dp_display_hdcp_register_streams(dp); + if (dp->hdcp.ops && dp->hdcp.ops->authenticate) + rc = dp->hdcp.ops->authenticate(data); + if (!rc) + status->hdcp_state = HDCP_STATE_AUTHENTICATING; + break; + case HDCP_STATE_AUTH_FAIL: + if (dp_display_is_ready(dp) && + dp_display_state_is(DP_STATE_ENABLED)) { + if (ops && ops->on && ops->on(data)) { + dp_display_update_hdcp_status(dp, true); + return; + } + dp_display_hdcp_register_streams(dp); + if (ops && ops->reauthenticate) { + rc = ops->reauthenticate(data); + if (rc) + DP_ERR("failed rc=%d\n", rc); + } + status->hdcp_state = HDCP_STATE_AUTHENTICATING; + } else { + DP_DEBUG("not reauthenticating, cable disconnected\n"); + } + break; + default: + dp_display_hdcp_register_streams(dp); + break; + } +} + +static void dp_display_notify_hdcp_status_cb(void *ptr, + enum sde_hdcp_state state) +{ + struct dp_display_private *dp = ptr; + + if (!dp) { + DP_ERR("invalid input\n"); + return; + } + + dp->link->hdcp_status.hdcp_state = state; + + queue_delayed_work(dp->wq, &dp->hdcp_cb_work, HZ/4); +} + +static void dp_display_deinitialize_hdcp(struct dp_display_private *dp) +{ + if (!dp) { + DP_ERR("invalid input\n"); + return; + } + + sde_dp_hdcp2p2_deinit(dp->hdcp.data); +} + +static int dp_display_initialize_hdcp(struct dp_display_private *dp) +{ + struct sde_hdcp_init_data hdcp_init_data; + struct dp_parser *parser; + void *fd; + int rc = 0; + + if (!dp) { + DP_ERR("invalid input\n"); + return -EINVAL; + } + + parser = dp->parser; + + hdcp_init_data.client_id = HDCP_CLIENT_DP; + hdcp_init_data.drm_aux = dp->aux->drm_aux; + hdcp_init_data.cb_data = (void *)dp; + hdcp_init_data.workq = dp->wq; + hdcp_init_data.sec_access = true; + hdcp_init_data.notify_status = dp_display_notify_hdcp_status_cb; + hdcp_init_data.dp_ahb = &parser->get_io(parser, "dp_ahb")->io; + hdcp_init_data.dp_aux = &parser->get_io(parser, "dp_aux")->io; + hdcp_init_data.dp_link = &parser->get_io(parser, "dp_link")->io; + hdcp_init_data.dp_p0 = &parser->get_io(parser, "dp_p0")->io; + hdcp_init_data.hdcp_io = &parser->get_io(parser, + "hdcp_physical")->io; + hdcp_init_data.revision = &dp->panel->link_info.revision; + hdcp_init_data.msm_hdcp_dev = dp->parser->msm_hdcp_dev; + + fd = sde_hdcp_1x_init(&hdcp_init_data); + if (IS_ERR_OR_NULL(fd)) { + DP_ERR("Error initializing HDCP 1.x\n"); + rc = -EINVAL; + goto error; + } + + dp->hdcp.dev[HDCP_VERSION_1X].fd = fd; + dp->hdcp.dev[HDCP_VERSION_1X].ops = sde_hdcp_1x_get(fd); + dp->hdcp.dev[HDCP_VERSION_1X].ver = HDCP_VERSION_1X; + DP_DEBUG("HDCP 1.3 initialized\n"); + + fd = sde_dp_hdcp2p2_init(&hdcp_init_data); + if (IS_ERR_OR_NULL(fd)) { + DP_ERR("Error initializing HDCP 2.x\n"); + rc = -EINVAL; + goto error; + } + + dp->hdcp.dev[HDCP_VERSION_2P2].fd = fd; + dp->hdcp.dev[HDCP_VERSION_2P2].ops = sde_dp_hdcp2p2_get(fd); + dp->hdcp.dev[HDCP_VERSION_2P2].ver = HDCP_VERSION_2P2; + DP_DEBUG("HDCP 2.2 initialized\n"); + + return 0; +error: + dp_display_deinitialize_hdcp(dp); + + return rc; +} + +static int dp_display_bind(struct device *dev, struct device *master, + void *data) +{ + int rc = 0; + struct dp_display_private *dp; + struct drm_device *drm; + struct platform_device *pdev = to_platform_device(dev); + + if (!dev || !pdev || !master) { + DP_ERR("invalid param(s), dev %pK, pdev %pK, master %pK\n", + dev, pdev, master); + rc = -EINVAL; + goto end; + } + + drm = dev_get_drvdata(master); + dp = platform_get_drvdata(pdev); + if (!drm || !dp) { + DP_ERR("invalid param(s), drm %pK, dp %pK\n", + drm, dp); + rc = -EINVAL; + goto end; + } + + dp->dp_display.drm_dev = drm; + dp->priv = drm->dev_private; +end: + return rc; +} + +static void dp_display_unbind(struct device *dev, struct device *master, + void *data) +{ + struct dp_display_private *dp; + struct platform_device *pdev = to_platform_device(dev); + + if (!dev || !pdev) { + DP_ERR("invalid param(s)\n"); + return; + } + + dp = platform_get_drvdata(pdev); + if (!dp) { + DP_ERR("Invalid params\n"); + return; + } + + if (dp->power) + (void)dp->power->power_client_deinit(dp->power); + if (dp->aux) + (void)dp->aux->drm_aux_deregister(dp->aux); + dp_display_deinitialize_hdcp(dp); +} + +static const struct component_ops dp_display_comp_ops = { + .bind = dp_display_bind, + .unbind = dp_display_unbind, +}; + +static void dp_display_send_hpd_event(struct dp_display_private *dp) +{ + struct drm_device *dev = NULL; + struct drm_connector *connector; + char name[HPD_STRING_SIZE], status[HPD_STRING_SIZE], + bpp[HPD_STRING_SIZE], pattern[HPD_STRING_SIZE]; + char *envp[5]; + + if (dp->mst.mst_active) { + DP_DEBUG("skip notification for mst mode\n"); + dp_display_state_remove(DP_STATE_DISCONNECT_NOTIFIED); + return; + } + + connector = dp->dp_display.base_connector; + + if (!connector) { + DP_ERR("connector not set\n"); + return; + } + + connector->status = connector->funcs->detect(connector, false); + if (dp->cached_connector_status == connector->status) { + DP_DEBUG("connector status (%d) unchanged, skipping uevent\n", + dp->cached_connector_status); + return; + } + + dp->cached_connector_status = connector->status; + + dev = connector->dev; + + snprintf(name, HPD_STRING_SIZE, "name=%s", connector->name); + snprintf(status, HPD_STRING_SIZE, "status=%s", + drm_get_connector_status_name(connector->status)); + snprintf(bpp, HPD_STRING_SIZE, "bpp=%d", + dp_link_bit_depth_to_bpp( + dp->link->test_video.test_bit_depth)); + snprintf(pattern, HPD_STRING_SIZE, "pattern=%d", + dp->link->test_video.test_video_pattern); + + DP_INFO("[%s]:[%s] [%s] [%s]\n", name, status, bpp, pattern); + envp[0] = name; + envp[1] = status; + envp[2] = bpp; + envp[3] = pattern; + envp[4] = NULL; + kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE, + envp); + + if (dev->mode_config.funcs->output_poll_changed) + dev->mode_config.funcs->output_poll_changed(dev); + + drm_client_dev_hotplug(dev); + + if (connector->status == connector_status_connected) { + dp_display_state_add(DP_STATE_CONNECT_NOTIFIED); + dp_display_state_remove(DP_STATE_DISCONNECT_NOTIFIED); + } else { + dp_display_state_add(DP_STATE_DISCONNECT_NOTIFIED); + dp_display_state_remove(DP_STATE_CONNECT_NOTIFIED); + } +} + +static int dp_display_send_hpd_notification(struct dp_display_private *dp) +{ + int ret = 0; + bool hpd = !!dp_display_state_is(DP_STATE_CONNECTED); + + SDE_EVT32_EXTERNAL(SDE_EVTLOG_FUNC_ENTRY, dp->state, hpd); + + /* + * Send the notification only if there is any change. This check is + * necessary since it is possible that the connect_work may or may not + * skip sending the notification in order to respond to a pending + * attention message. Attention work thread will always attempt to + * send the notification after successfully handling the attention + * message. This check here will avoid any unintended duplicate + * notifications. + */ + if (dp_display_state_is(DP_STATE_CONNECT_NOTIFIED) && hpd) { + DP_DEBUG("connection notified already, skip notification\n"); + goto skip_wait; + } else if (dp_display_state_is(DP_STATE_DISCONNECT_NOTIFIED) && !hpd) { + DP_DEBUG("disonnect notified already, skip notification\n"); + goto skip_wait; + } + + dp->aux->state |= DP_STATE_NOTIFICATION_SENT; + + if (!dp->mst.mst_active) + dp->dp_display.is_sst_connected = hpd; + else + dp->dp_display.is_sst_connected = false; + + reinit_completion(&dp->notification_comp); + dp_display_send_hpd_event(dp); + + if (hpd && dp->mst.mst_active) + goto skip_wait; + + if (!dp->mst.mst_active && + (!!dp_display_state_is(DP_STATE_ENABLED) == hpd)) + goto skip_wait; + + if (!wait_for_completion_timeout(&dp->notification_comp, + HZ * 5)) { + DP_WARN("%s timeout\n", hpd ? "connect" : "disconnect"); + ret = -EINVAL; + } + SDE_EVT32_EXTERNAL(SDE_EVTLOG_FUNC_EXIT, dp->state, hpd, ret); + return ret; +skip_wait: + SDE_EVT32_EXTERNAL(SDE_EVTLOG_FUNC_EXIT, dp->state, hpd, ret); + return 0; +} + +static void dp_display_update_mst_state(struct dp_display_private *dp, + bool state) +{ + dp->mst.mst_active = state; + dp->panel->mst_state = state; +} + +static void dp_display_process_mst_hpd_high(struct dp_display_private *dp, + bool mst_probe) +{ + bool is_mst_receiver; + struct dp_mst_hpd_info info; + const unsigned long clear_mstm_ctrl_timeout_us = 100000; + u8 old_mstm_ctrl; + int ret; + + if (!dp->parser->has_mst || !dp->mst.drm_registered) { + DP_MST_DEBUG("mst not enabled. has_mst:%d, registered:%d\n", + dp->parser->has_mst, dp->mst.drm_registered); + return; + } + + DP_MST_DEBUG("mst_hpd_high work. mst_probe:%d\n", mst_probe); + + if (!dp->mst.mst_active) { + is_mst_receiver = dp->panel->read_mst_cap(dp->panel); + + if (!is_mst_receiver) { + DP_MST_DEBUG("sink doesn't support mst\n"); + return; + } + + /* clear sink mst state */ + drm_dp_dpcd_readb(dp->aux->drm_aux, DP_MSTM_CTRL, + &old_mstm_ctrl); + drm_dp_dpcd_writeb(dp->aux->drm_aux, DP_MSTM_CTRL, 0); + + /* add extra delay if MST state is not cleared */ + if (old_mstm_ctrl) { + DP_MST_DEBUG("MSTM_CTRL is not cleared, wait %dus\n", + clear_mstm_ctrl_timeout_us); + usleep_range(clear_mstm_ctrl_timeout_us, + clear_mstm_ctrl_timeout_us + 1000); + } + + ret = drm_dp_dpcd_writeb(dp->aux->drm_aux, DP_MSTM_CTRL, + DP_MST_EN | DP_UP_REQ_EN | DP_UPSTREAM_IS_SRC); + if (ret < 0) { + DP_ERR("sink mst enablement failed\n"); + return; + } + + dp_display_update_mst_state(dp, true); + } else if (dp->mst.mst_active && mst_probe) { + info.mst_protocol = dp->parser->has_mst_sideband; + info.mst_port_cnt = dp->debug->mst_port_cnt; + info.edid = dp->debug->get_edid(dp->debug); + + if (dp->mst.cbs.hpd) + dp->mst.cbs.hpd(&dp->dp_display, true, &info); + } + + DP_MST_DEBUG("mst_hpd_high. mst_active:%d\n", dp->mst.mst_active); +} + +static void dp_display_host_init(struct dp_display_private *dp) +{ + bool flip = false; + bool reset; + + if (dp_display_state_is(DP_STATE_INITIALIZED)) { + dp_display_state_log("[already initialized]"); + return; + } + + if (dp->hpd->orientation == ORIENTATION_CC2) + flip = true; + + reset = dp->debug->sim_mode ? false : + (!dp->hpd->multi_func || !dp->hpd->peer_usb_comm); + + dp->power->init(dp->power, flip); + dp->hpd->host_init(dp->hpd, &dp->catalog->hpd); + dp->ctrl->init(dp->ctrl, flip, reset); + enable_irq(dp->irq); + dp_display_abort_hdcp(dp, false); + + dp_display_state_add(DP_STATE_INITIALIZED); + + /* log this as it results from user action of cable connection */ + DP_INFO("[OK]\n"); +} + +static void dp_display_host_ready(struct dp_display_private *dp) +{ + if (!dp_display_state_is(DP_STATE_INITIALIZED)) { + dp_display_state_show("[not initialized]"); + return; + } + + if (dp_display_state_is(DP_STATE_READY)) { + dp_display_state_log("[already ready]"); + return; + } + + /* + * Reset the aborted state for AUX and CTRL modules. This will + * allow these modules to execute normally in response to the + * cable connection event. + * + * One corner case still exists. While the execution flow ensures + * that cable disconnection flushes all pending work items on the DP + * workqueue, and waits for the user module to clean up the DP + * connection session, it is possible that the system delays can + * lead to timeouts in the connect path. As a result, the actual + * connection callback from user modules can come in late and can + * race against a subsequent connection event here which would have + * reset the aborted flags. There is no clear solution for this since + * the connect/disconnect notifications do not currently have any + * sessions IDs. + */ + dp->aux->abort(dp->aux, false); + dp->ctrl->abort(dp->ctrl, false); + + dp->aux->init(dp->aux, dp->parser->aux_cfg); + dp->panel->init(dp->panel); + + dp_display_state_add(DP_STATE_READY); + /* log this as it results from user action of cable connection */ + DP_INFO("[OK]\n"); +} + +static void dp_display_host_unready(struct dp_display_private *dp) +{ + if (!dp_display_state_is(DP_STATE_INITIALIZED)) { + dp_display_state_show("[not initialized]"); + return; + } + + if (!dp_display_state_is(DP_STATE_READY)) { + dp_display_state_show("[not ready]"); + return; + } + + dp_display_state_remove(DP_STATE_READY); + dp->aux->deinit(dp->aux); + /* log this as it results from user action of cable connection */ + DP_INFO("[OK]\n"); +} + +static void dp_display_host_deinit(struct dp_display_private *dp) +{ + if (dp->active_stream_cnt) { + SDE_EVT32_EXTERNAL(dp->state, dp->active_stream_cnt); + DP_DEBUG("active stream present\n"); + return; + } + + if (!dp_display_state_is(DP_STATE_INITIALIZED)) { + dp_display_state_show("[not initialized]"); + return; + } + + dp_display_abort_hdcp(dp, true); + dp->ctrl->deinit(dp->ctrl); + dp->hpd->host_deinit(dp->hpd, &dp->catalog->hpd); + dp->power->deinit(dp->power); + disable_irq(dp->irq); + dp->aux->state = 0; + + dp_display_state_remove(DP_STATE_INITIALIZED); + + /* log this as it results from user action of cable dis-connection */ + DP_INFO("[OK]\n"); +} + +static int dp_display_process_hpd_high(struct dp_display_private *dp) +{ + int rc = -EINVAL; + + SDE_EVT32_EXTERNAL(SDE_EVTLOG_FUNC_ENTRY, dp->state); + mutex_lock(&dp->session_lock); + + if (dp_display_state_is(DP_STATE_CONNECTED)) { + DP_DEBUG("dp already connected, skipping hpd high\n"); + mutex_unlock(&dp->session_lock); + return -EISCONN; + } + + dp_display_state_add(DP_STATE_CONNECTED); + + dp->dp_display.max_pclk_khz = min(dp->parser->max_pclk_khz, + dp->debug->max_pclk_khz); + dp->dp_display.max_hdisplay = dp->parser->max_hdisplay; + dp->dp_display.max_vdisplay = dp->parser->max_vdisplay; + + /* + * If dp video session is not restored from a previous session teardown + * by userspace, ensure the host_init is executed, in such a scenario, + * so that all the required DP resources are enabled. + * + * Below is one of the sequences of events which describe the above + * scenario: + * a. Source initiated power down resulting in host_deinit. + * b. Sink issues hpd low attention without physical cable disconnect. + * c. Source initiated power up sequence returns early because hpd is + * not high. + * d. Sink issues a hpd high attention event. + */ + if (dp_display_state_is(DP_STATE_SRC_PWRDN) && + dp_display_state_is(DP_STATE_CONFIGURED)) { + dp_display_host_init(dp); + dp_display_state_remove(DP_STATE_SRC_PWRDN); + } + + dp_display_host_ready(dp); + + dp->link->psm_config(dp->link, &dp->panel->link_info, false); + dp->debug->psm_enabled = false; + + if (!dp->dp_display.base_connector) + goto end; + + rc = dp->panel->read_sink_caps(dp->panel, + dp->dp_display.base_connector, dp->hpd->multi_func); + /* + * ETIMEDOUT --> cable may have been removed + * ENOTCONN --> no downstream device connected + */ + if (rc == -ETIMEDOUT || rc == -ENOTCONN) { + dp_display_state_remove(DP_STATE_CONNECTED); + goto end; + } + + dp->link->process_request(dp->link); + dp->panel->handle_sink_request(dp->panel); + + dp_display_process_mst_hpd_high(dp, false); + + rc = dp->ctrl->on(dp->ctrl, dp->mst.mst_active, + dp->panel->fec_en, dp->panel->dsc_en, false); + if (rc) { + dp_display_state_remove(DP_STATE_CONNECTED); + goto end; + } + + dp->process_hpd_connect = false; + + dp_display_process_mst_hpd_high(dp, true); +end: + mutex_unlock(&dp->session_lock); + + /* + * Delay the HPD connect notification to see if sink generates any + * IRQ HPDs immediately after the HPD high. + */ + usleep_range(10000, 10100); + + /* + * If an IRQ HPD is pending, then do not send a connect notification. + * Once this work returns, the IRQ HPD would be processed and any + * required actions (such as link maintenance) would be done which + * will subsequently send the HPD notification. To keep things simple, + * do this only for SST use-cases. MST use cases require additional + * care in order to handle the side-band communications as well. + * + * One of the main motivations for this is DP LL 1.4 CTS use case + * where it is possible that we could get a test request right after + * a connection, and the strict timing requriements of the test can + * only be met if we do not wait for the e2e connection to be set up. + */ + if (!dp->mst.mst_active && + (work_busy(&dp->attention_work) == WORK_BUSY_PENDING)) { + SDE_EVT32_EXTERNAL(dp->state, 99); + DP_DEBUG("Attention pending, skip HPD notification\n"); + goto skip_notify; + } + + if (!rc && !dp_display_state_is(DP_STATE_ABORTED)) + dp_display_send_hpd_notification(dp); + +skip_notify: + SDE_EVT32_EXTERNAL(SDE_EVTLOG_FUNC_EXIT, dp->state, rc); + return rc; +} + +static void dp_display_process_mst_hpd_low(struct dp_display_private *dp) +{ + struct dp_mst_hpd_info info = {0}; + + if (dp->mst.mst_active) { + DP_MST_DEBUG("mst_hpd_low work\n"); + + if (dp->mst.cbs.hpd) { + info.mst_protocol = dp->parser->has_mst_sideband; + dp->mst.cbs.hpd(&dp->dp_display, false, &info); + } + dp_display_update_mst_state(dp, false); + } + + DP_MST_DEBUG("mst_hpd_low. mst_active:%d\n", dp->mst.mst_active); +} + +static int dp_display_process_hpd_low(struct dp_display_private *dp) +{ + int rc = 0; + + dp_display_state_remove(DP_STATE_CONNECTED); + dp->process_hpd_connect = false; + dp_audio_enable(dp, false); + dp_display_process_mst_hpd_low(dp); + + if ((dp_display_state_is(DP_STATE_CONNECT_NOTIFIED) || + dp_display_state_is(DP_STATE_ENABLED)) && + !dp->mst.mst_active) + rc = dp_display_send_hpd_notification(dp); + + mutex_lock(&dp->session_lock); + if (!dp->active_stream_cnt) + dp->ctrl->off(dp->ctrl); + mutex_unlock(&dp->session_lock); + + dp->panel->video_test = false; + + return rc; +} + +static int dp_display_usbpd_configure_cb(struct device *dev) +{ + int rc = 0; + struct dp_display_private *dp; + + if (!dev) { + DP_ERR("invalid dev\n"); + rc = -EINVAL; + goto end; + } + + dp = dev_get_drvdata(dev); + if (!dp) { + DP_ERR("no driver data found\n"); + rc = -ENODEV; + goto end; + } + + if (!dp->debug->sim_mode && !dp->parser->no_aux_switch + && !dp->parser->gpio_aux_switch) { + rc = dp->aux->aux_switch(dp->aux, true, dp->hpd->orientation); + if (rc) + goto end; + } + + mutex_lock(&dp->session_lock); + + dp_display_state_remove(DP_STATE_ABORTED); + dp_display_state_add(DP_STATE_CONFIGURED); + + dp_display_host_init(dp); + + /* check for hpd high */ + if (dp->hpd->hpd_high) + queue_work(dp->wq, &dp->connect_work); + else + dp->process_hpd_connect = true; + mutex_unlock(&dp->session_lock); +end: + return rc; +} + +static int dp_display_stream_pre_disable(struct dp_display_private *dp, + struct dp_panel *dp_panel) +{ + dp->ctrl->stream_pre_off(dp->ctrl, dp_panel); + + return 0; +} + +static void dp_display_stream_disable(struct dp_display_private *dp, + struct dp_panel *dp_panel) +{ + if (!dp->active_stream_cnt) { + DP_ERR("invalid active_stream_cnt (%d)\n", + dp->active_stream_cnt); + return; + } + + if (dp_panel->stream_id == DP_STREAM_MAX || + !dp->active_panels[dp_panel->stream_id]) { + DP_ERR("panel is already disabled\n"); + return; + } + + DP_DEBUG("stream_id=%d, active_stream_cnt=%d\n", + dp_panel->stream_id, dp->active_stream_cnt); + + dp->ctrl->stream_off(dp->ctrl, dp_panel); + dp->active_panels[dp_panel->stream_id] = NULL; + dp->active_stream_cnt--; +} + +static void dp_display_clean(struct dp_display_private *dp) +{ + int idx; + struct dp_panel *dp_panel; + struct dp_link_hdcp_status *status = &dp->link->hdcp_status; + + SDE_EVT32_EXTERNAL(SDE_EVTLOG_FUNC_ENTRY, dp->state); + + if (dp_display_is_hdcp_enabled(dp) && + status->hdcp_state != HDCP_STATE_INACTIVE) { + cancel_delayed_work_sync(&dp->hdcp_cb_work); + if (dp->hdcp.ops->off) + dp->hdcp.ops->off(dp->hdcp.data); + + dp_display_update_hdcp_status(dp, true); + } + + for (idx = DP_STREAM_0; idx < DP_STREAM_MAX; idx++) { + if (!dp->active_panels[idx]) + continue; + + dp_panel = dp->active_panels[idx]; + if (dp_panel->audio_supported) + dp_panel->audio->off(dp_panel->audio); + + dp_display_stream_pre_disable(dp, dp_panel); + dp_display_stream_disable(dp, dp_panel); + dp_panel->deinit(dp_panel, 0); + } + + dp_display_state_remove(DP_STATE_ENABLED | DP_STATE_CONNECTED); + + dp->ctrl->off(dp->ctrl); + SDE_EVT32_EXTERNAL(SDE_EVTLOG_FUNC_EXIT, dp->state); +} + +static int dp_display_handle_disconnect(struct dp_display_private *dp) +{ + int rc; + + SDE_EVT32_EXTERNAL(SDE_EVTLOG_FUNC_ENTRY, dp->state); + rc = dp_display_process_hpd_low(dp); + if (rc) { + /* cancel any pending request */ + dp->ctrl->abort(dp->ctrl, true); + dp->aux->abort(dp->aux, true); + } + + mutex_lock(&dp->session_lock); + if (rc && dp_display_state_is(DP_STATE_ENABLED)) + dp_display_clean(dp); + + dp_display_host_unready(dp); + + mutex_unlock(&dp->session_lock); + + SDE_EVT32_EXTERNAL(SDE_EVTLOG_FUNC_EXIT, dp->state); + return rc; +} + +static void dp_display_disconnect_sync(struct dp_display_private *dp) +{ + /* cancel any pending request */ + dp_display_state_add(DP_STATE_ABORTED); + + dp->ctrl->abort(dp->ctrl, true); + dp->aux->abort(dp->aux, true); + + /* wait for idle state */ + cancel_work_sync(&dp->connect_work); + cancel_work_sync(&dp->attention_work); + flush_workqueue(dp->wq); + + dp_display_handle_disconnect(dp); +} + +static int dp_display_usbpd_disconnect_cb(struct device *dev) +{ + int rc = 0; + struct dp_display_private *dp; + + if (!dev) { + DP_ERR("invalid dev\n"); + rc = -EINVAL; + goto end; + } + + dp = dev_get_drvdata(dev); + if (!dp) { + DP_ERR("no driver data found\n"); + rc = -ENODEV; + goto end; + } + + SDE_EVT32_EXTERNAL(SDE_EVTLOG_FUNC_ENTRY, dp->state, + dp->debug->psm_enabled); + + if (dp->debug->psm_enabled && dp_display_state_is(DP_STATE_READY)) + dp->link->psm_config(dp->link, &dp->panel->link_info, true); + + dp_display_disconnect_sync(dp); + + mutex_lock(&dp->session_lock); + dp_display_host_deinit(dp); + dp_display_state_remove(DP_STATE_CONFIGURED); + mutex_unlock(&dp->session_lock); + + if (!dp->debug->sim_mode && !dp->parser->no_aux_switch + && !dp->parser->gpio_aux_switch) + dp->aux->aux_switch(dp->aux, false, ORIENTATION_NONE); + + SDE_EVT32_EXTERNAL(SDE_EVTLOG_FUNC_EXIT, dp->state); +end: + return rc; +} + +static int dp_display_stream_enable(struct dp_display_private *dp, + struct dp_panel *dp_panel) +{ + int rc = 0; + + rc = dp->ctrl->stream_on(dp->ctrl, dp_panel); + + if (dp->debug->tpg_state) + dp_panel->tpg_config(dp_panel, true); + + if (!rc) { + dp->active_panels[dp_panel->stream_id] = dp_panel; + dp->active_stream_cnt++; + } + + DP_DEBUG("dp active_stream_cnt:%d\n", dp->active_stream_cnt); + + return rc; +} + +static void dp_display_mst_attention(struct dp_display_private *dp) +{ + struct dp_mst_hpd_info hpd_irq = {0}; + + if (dp->mst.mst_active && dp->mst.cbs.hpd_irq) { + hpd_irq.mst_hpd_sim = dp->debug->mst_hpd_sim; + hpd_irq.mst_sim_add_con = dp->debug->mst_sim_add_con; + hpd_irq.mst_sim_remove_con = dp->debug->mst_sim_remove_con; + hpd_irq.mst_sim_remove_con_id = dp->debug->mst_sim_remove_con_id; + hpd_irq.edid = dp->debug->get_edid(dp->debug); + dp->mst.cbs.hpd_irq(&dp->dp_display, &hpd_irq); + dp->debug->mst_hpd_sim = false; + dp->debug->mst_sim_add_con = false; + dp->debug->mst_sim_remove_con = false; + } + + DP_MST_DEBUG("mst_attention_work. mst_active:%d\n", dp->mst.mst_active); +} + +static void dp_display_attention_work(struct work_struct *work) +{ + struct dp_display_private *dp = container_of(work, + struct dp_display_private, attention_work); + int rc = 0; + + SDE_EVT32_EXTERNAL(SDE_EVTLOG_FUNC_ENTRY, dp->state); + mutex_lock(&dp->session_lock); + SDE_EVT32_EXTERNAL(dp->state); + + if (dp->debug->mst_hpd_sim || !dp_display_state_is(DP_STATE_READY)) { + mutex_unlock(&dp->session_lock); + goto mst_attention; + } + + if (dp->link->process_request(dp->link)) { + mutex_unlock(&dp->session_lock); + goto cp_irq; + } + + mutex_unlock(&dp->session_lock); + SDE_EVT32_EXTERNAL(dp->state, dp->link->sink_request); + + if (dp->link->sink_request & DS_PORT_STATUS_CHANGED) { + SDE_EVT32_EXTERNAL(dp->state, DS_PORT_STATUS_CHANGED); + if (dp_display_is_sink_count_zero(dp)) { + dp_display_handle_disconnect(dp); + } else { + /* + * connect work should take care of sending + * the HPD notification. + */ + if (!dp->mst.mst_active) + queue_work(dp->wq, &dp->connect_work); + } + + goto mst_attention; + } + + if (dp->link->sink_request & DP_TEST_LINK_VIDEO_PATTERN) { + SDE_EVT32_EXTERNAL(dp->state, DP_TEST_LINK_VIDEO_PATTERN); + dp_display_handle_disconnect(dp); + + dp->panel->video_test = true; + /* + * connect work should take care of sending + * the HPD notification. + */ + queue_work(dp->wq, &dp->connect_work); + + goto mst_attention; + } + + if (dp->link->sink_request & (DP_TEST_LINK_PHY_TEST_PATTERN | + DP_TEST_LINK_TRAINING | DP_LINK_STATUS_UPDATED)) { + + mutex_lock(&dp->session_lock); + dp_audio_enable(dp, false); + + if (dp->link->sink_request & DP_TEST_LINK_PHY_TEST_PATTERN) { + SDE_EVT32_EXTERNAL(dp->state, + DP_TEST_LINK_PHY_TEST_PATTERN); + dp->ctrl->process_phy_test_request(dp->ctrl); + } + + if (dp->link->sink_request & DP_TEST_LINK_TRAINING) { + SDE_EVT32_EXTERNAL(dp->state, DP_TEST_LINK_TRAINING); + dp->link->send_test_response(dp->link); + rc = dp->ctrl->link_maintenance(dp->ctrl); + } + + if (dp->link->sink_request & DP_LINK_STATUS_UPDATED) { + SDE_EVT32_EXTERNAL(dp->state, DP_LINK_STATUS_UPDATED); + rc = dp->ctrl->link_maintenance(dp->ctrl); + } + + if (!rc) + dp_audio_enable(dp, true); + + mutex_unlock(&dp->session_lock); + if (rc) + goto end; + + if (dp->link->sink_request & (DP_TEST_LINK_PHY_TEST_PATTERN | + DP_TEST_LINK_TRAINING)) + goto mst_attention; + } + +cp_irq: + if (dp_display_is_hdcp_enabled(dp) && dp->hdcp.ops->cp_irq) + dp->hdcp.ops->cp_irq(dp->hdcp.data); + + if (!dp->mst.mst_active) { + /* + * It is possible that the connect_work skipped sending + * the HPD notification if the attention message was + * already pending. Send the notification here to + * account for that. This is not needed if this + * attention work was handling a test request + */ + dp_display_send_hpd_notification(dp); + } + +mst_attention: + dp_display_mst_attention(dp); + +end: + SDE_EVT32_EXTERNAL(SDE_EVTLOG_FUNC_EXIT, dp->state); +} + +static int dp_display_usbpd_attention_cb(struct device *dev) +{ + struct dp_display_private *dp; + + if (!dev) { + DP_ERR("invalid dev\n"); + return -EINVAL; + } + + dp = dev_get_drvdata(dev); + if (!dp) { + DP_ERR("no driver data found\n"); + return -ENODEV; + } + + DP_DEBUG("hpd_irq:%d, hpd_high:%d, power_on:%d, is_connected:%d\n", + dp->hpd->hpd_irq, dp->hpd->hpd_high, + !!dp_display_state_is(DP_STATE_ENABLED), + !!dp_display_state_is(DP_STATE_CONNECTED)); + SDE_EVT32_EXTERNAL(dp->state, dp->hpd->hpd_irq, dp->hpd->hpd_high, + !!dp_display_state_is(DP_STATE_ENABLED), + !!dp_display_state_is(DP_STATE_CONNECTED)); + + if (!dp->hpd->hpd_high) { + dp_display_disconnect_sync(dp); + } else if ((dp->hpd->hpd_irq && dp_display_state_is(DP_STATE_READY)) || + dp->debug->mst_hpd_sim) { + queue_work(dp->wq, &dp->attention_work); + } else if (dp->process_hpd_connect || + !dp_display_state_is(DP_STATE_CONNECTED)) { + dp_display_state_remove(DP_STATE_ABORTED); + queue_work(dp->wq, &dp->connect_work); + } else { + DP_DEBUG("ignored\n"); + } + + return 0; +} + +static void dp_display_connect_work(struct work_struct *work) +{ + int rc = 0; + struct dp_display_private *dp = container_of(work, + struct dp_display_private, connect_work); + + if (dp_display_state_is(DP_STATE_ABORTED)) { + DP_WARN("HPD off requested\n"); + return; + } + + if (!dp->hpd->hpd_high) { + DP_WARN("Sink disconnected\n"); + return; + } + + rc = dp_display_process_hpd_high(dp); + + if (!rc && dp->panel->video_test) + dp->link->send_test_response(dp->link); +} + +static int dp_display_usb_notifier(struct notifier_block *nb, + unsigned long event, void *ptr) +{ + struct extcon_dev *edev = ptr; + struct dp_display_private *dp = container_of(nb, + struct dp_display_private, usb_nb); + if (!edev) + goto end; + + if (!event && dp->debug->sim_mode) { + dp_display_disconnect_sync(dp); + dp->debug->abort(dp->debug); + } +end: + return NOTIFY_DONE; +} + +static int dp_display_get_usb_extcon(struct dp_display_private *dp) +{ + struct extcon_dev *edev; + int rc; + + edev = extcon_get_edev_by_phandle(&dp->pdev->dev, 0); + if (IS_ERR(edev)) + return PTR_ERR(edev); + + dp->usb_nb.notifier_call = dp_display_usb_notifier; + dp->usb_nb.priority = 2; + rc = extcon_register_notifier(edev, EXTCON_USB, &dp->usb_nb); + if (rc) + DP_ERR("failed to register for usb event: %d\n", rc); + + return rc; +} + +static void dp_display_deinit_sub_modules(struct dp_display_private *dp) +{ + dp_audio_put(dp->panel->audio); + dp_ctrl_put(dp->ctrl); + dp_link_put(dp->link); + dp_panel_put(dp->panel); + dp_aux_put(dp->aux); + dp_power_put(dp->power); + dp_catalog_put(dp->catalog); + dp_parser_put(dp->parser); + dp_hpd_put(dp->hpd); + mutex_destroy(&dp->session_lock); + dp_debug_put(dp->debug); +} + +static int dp_init_sub_modules(struct dp_display_private *dp) +{ + int rc = 0; + bool hdcp_disabled; + struct device *dev = &dp->pdev->dev; + struct dp_hpd_cb *cb = &dp->hpd_cb; + struct dp_ctrl_in ctrl_in = { + .dev = dev, + }; + struct dp_panel_in panel_in = { + .dev = dev, + }; + struct dp_debug_in debug_in = { + .dev = dev, + }; + + mutex_init(&dp->session_lock); + + dp->parser = dp_parser_get(dp->pdev); + if (IS_ERR(dp->parser)) { + rc = PTR_ERR(dp->parser); + DP_ERR("failed to initialize parser, rc = %d\n", rc); + dp->parser = NULL; + goto error; + } + + rc = dp->parser->parse(dp->parser); + if (rc) { + DP_ERR("device tree parsing failed\n"); + goto error_catalog; + } + + g_dp_display->is_mst_supported = dp->parser->has_mst; + g_dp_display->no_mst_encoder = dp->parser->no_mst_encoder; + + dp->catalog = dp_catalog_get(dev, dp->parser); + if (IS_ERR(dp->catalog)) { + rc = PTR_ERR(dp->catalog); + DP_ERR("failed to initialize catalog, rc = %d\n", rc); + dp->catalog = NULL; + goto error_catalog; + } + + dp->power = dp_power_get(dp->parser); + if (IS_ERR(dp->power)) { + rc = PTR_ERR(dp->power); + DP_ERR("failed to initialize power, rc = %d\n", rc); + dp->power = NULL; + goto error_power; + } + + rc = dp->power->power_client_init(dp->power, &dp->priv->phandle, + dp->dp_display.drm_dev); + if (rc) { + DP_ERR("Power client create failed\n"); + goto error_aux; + } + + dp->aux = dp_aux_get(dev, &dp->catalog->aux, dp->parser, + dp->aux_switch_node); + if (IS_ERR(dp->aux)) { + rc = PTR_ERR(dp->aux); + DP_ERR("failed to initialize aux, rc = %d\n", rc); + dp->aux = NULL; + goto error_aux; + } + + rc = dp->aux->drm_aux_register(dp->aux); + if (rc) { + DP_ERR("DRM DP AUX register failed\n"); + goto error_link; + } + + dp->link = dp_link_get(dev, dp->aux); + if (IS_ERR(dp->link)) { + rc = PTR_ERR(dp->link); + DP_ERR("failed to initialize link, rc = %d\n", rc); + dp->link = NULL; + goto error_link; + } + + panel_in.aux = dp->aux; + panel_in.catalog = &dp->catalog->panel; + panel_in.link = dp->link; + panel_in.connector = dp->dp_display.base_connector; + panel_in.base_panel = NULL; + panel_in.parser = dp->parser; + + dp->panel = dp_panel_get(&panel_in); + if (IS_ERR(dp->panel)) { + rc = PTR_ERR(dp->panel); + DP_ERR("failed to initialize panel, rc = %d\n", rc); + dp->panel = NULL; + goto error_panel; + } + + ctrl_in.link = dp->link; + ctrl_in.panel = dp->panel; + ctrl_in.aux = dp->aux; + ctrl_in.power = dp->power; + ctrl_in.catalog = &dp->catalog->ctrl; + ctrl_in.parser = dp->parser; + + dp->ctrl = dp_ctrl_get(&ctrl_in); + if (IS_ERR(dp->ctrl)) { + rc = PTR_ERR(dp->ctrl); + DP_ERR("failed to initialize ctrl, rc = %d\n", rc); + dp->ctrl = NULL; + goto error_ctrl; + } + + dp->panel->audio = dp_audio_get(dp->pdev, dp->panel, + &dp->catalog->audio); + if (IS_ERR(dp->panel->audio)) { + rc = PTR_ERR(dp->panel->audio); + DP_ERR("failed to initialize audio, rc = %d\n", rc); + dp->panel->audio = NULL; + goto error_audio; + } + + memset(&dp->mst, 0, sizeof(dp->mst)); + dp->active_stream_cnt = 0; + + cb->configure = dp_display_usbpd_configure_cb; + cb->disconnect = dp_display_usbpd_disconnect_cb; + cb->attention = dp_display_usbpd_attention_cb; + + dp->hpd = dp_hpd_get(dev, dp->parser, &dp->catalog->hpd, cb); + if (IS_ERR(dp->hpd)) { + rc = PTR_ERR(dp->hpd); + DP_ERR("failed to initialize hpd, rc = %d\n", rc); + dp->hpd = NULL; + goto error_hpd; + } + + hdcp_disabled = !!dp_display_initialize_hdcp(dp); + + debug_in.panel = dp->panel; + debug_in.hpd = dp->hpd; + debug_in.link = dp->link; + debug_in.aux = dp->aux; + debug_in.connector = &dp->dp_display.base_connector; + debug_in.catalog = dp->catalog; + debug_in.parser = dp->parser; + debug_in.ctrl = dp->ctrl; + + dp->debug = dp_debug_get(&debug_in); + if (IS_ERR(dp->debug)) { + rc = PTR_ERR(dp->debug); + DP_ERR("failed to initialize debug, rc = %d\n", rc); + dp->debug = NULL; + goto error_debug; + } + + dp->cached_connector_status = connector_status_disconnected; + dp->tot_dsc_blks_in_use = 0; + + dp->debug->hdcp_disabled = hdcp_disabled; + dp_display_update_hdcp_status(dp, true); + + dp_display_get_usb_extcon(dp); + + rc = dp->hpd->register_hpd(dp->hpd); + if (rc) { + DP_ERR("failed register hpd\n"); + goto error_hpd_reg; + } + + return rc; +error_hpd_reg: + dp_debug_put(dp->debug); +error_debug: + dp_hpd_put(dp->hpd); +error_hpd: + dp_audio_put(dp->panel->audio); +error_audio: + dp_ctrl_put(dp->ctrl); +error_ctrl: + dp_panel_put(dp->panel); +error_panel: + dp_link_put(dp->link); +error_link: + dp_aux_put(dp->aux); +error_aux: + dp_power_put(dp->power); +error_power: + dp_catalog_put(dp->catalog); +error_catalog: + dp_parser_put(dp->parser); +error: + mutex_destroy(&dp->session_lock); + return rc; +} + +static int dp_display_post_init(struct dp_display *dp_display) +{ + int rc = 0; + struct dp_display_private *dp; + + if (!dp_display) { + DP_ERR("invalid input\n"); + rc = -EINVAL; + goto end; + } + + dp = container_of(dp_display, struct dp_display_private, dp_display); + if (IS_ERR_OR_NULL(dp)) { + DP_ERR("invalid params\n"); + rc = -EINVAL; + goto end; + } + + rc = dp_init_sub_modules(dp); + if (rc) + goto end; + + dp_display->post_init = NULL; +end: + DP_DEBUG("%s\n", rc ? "failed" : "success"); + return rc; +} + +static int dp_display_set_mode(struct dp_display *dp_display, void *panel, + struct dp_display_mode *mode) +{ + const u32 num_components = 3, default_bpp = 24; + struct dp_display_private *dp; + struct dp_panel *dp_panel; + + if (!dp_display || !panel) { + DP_ERR("invalid input\n"); + return -EINVAL; + } + + dp_panel = panel; + if (!dp_panel->connector) { + DP_ERR("invalid connector input\n"); + return -EINVAL; + } + + dp = container_of(dp_display, struct dp_display_private, dp_display); + + mutex_lock(&dp->session_lock); + + if (dp_panel->connector->display_info.max_tmds_clock > 0) + dp->panel->connector->display_info.max_tmds_clock = + dp_panel->connector->display_info.max_tmds_clock; + + mode->timing.bpp = + dp_panel->connector->display_info.bpc * num_components; + if (!mode->timing.bpp) + mode->timing.bpp = default_bpp; + + mode->timing.bpp = dp->panel->get_mode_bpp(dp->panel, + mode->timing.bpp, mode->timing.pixel_clk_khz); + + dp_panel->pinfo = mode->timing; + mutex_unlock(&dp->session_lock); + + return 0; +} + +static int dp_display_prepare(struct dp_display *dp_display, void *panel) +{ + struct dp_display_private *dp; + struct dp_panel *dp_panel; + int rc = 0; + + if (!dp_display || !panel) { + DP_ERR("invalid input\n"); + return -EINVAL; + } + + dp_panel = panel; + if (!dp_panel->connector) { + DP_ERR("invalid connector input\n"); + return -EINVAL; + } + + dp = container_of(dp_display, struct dp_display_private, dp_display); + + SDE_EVT32_EXTERNAL(SDE_EVTLOG_FUNC_ENTRY, dp->state); + mutex_lock(&dp->session_lock); + + /* + * If DP video session is restored by the userspace after display + * disconnect notification from dongle i.e. typeC cable connected to + * source but disconnected at the display side, the DP controller is + * not restored to the desired configured state. So, ensure host_init + * is executed in such a scenario so that all the DP controller + * resources are enabled for the next connection event. + */ + if (dp_display_state_is(DP_STATE_SRC_PWRDN) && + dp_display_state_is(DP_STATE_CONFIGURED)) { + dp_display_host_init(dp); + dp_display_state_remove(DP_STATE_SRC_PWRDN); + } + + /* + * If the physical connection to the sink is already lost by the time + * we try to set up the connection, we can just skip all the steps + * here safely. + */ + if (dp_display_state_is(DP_STATE_ABORTED)) { + dp_display_state_log("[aborted]"); + goto end; + } + + /* + * If DP_STATE_ENABLED, there is nothing left to do. + * However, this should not happen ideally. So, log this. + */ + if (dp_display_state_is(DP_STATE_ENABLED)) { + dp_display_state_show("[already enabled]"); + goto end; + } + + if (!dp_display_is_ready(dp)) { + dp_display_state_show("[not ready]"); + goto end; + } + + /* For supporting DP_PANEL_SRC_INITIATED_POWER_DOWN case */ + dp_display_host_ready(dp); + + if (dp->debug->psm_enabled) { + dp->link->psm_config(dp->link, &dp->panel->link_info, false); + dp->debug->psm_enabled = false; + } + + /* + * Execute the dp controller power on in shallow mode here. + * In normal cases, controller should have been powered on + * by now. In some cases like suspend/resume or framework + * reboot, we end up here without a powered on controller. + * Cable may have been removed in suspended state. In that + * case, link training is bound to fail on system resume. + * So, we execute in shallow mode here to do only minimal + * and required things. + */ + rc = dp->ctrl->on(dp->ctrl, dp->mst.mst_active, dp_panel->fec_en, + dp_panel->dsc_en, true); + if (rc) + goto end; + +end: + mutex_unlock(&dp->session_lock); + + SDE_EVT32_EXTERNAL(SDE_EVTLOG_FUNC_EXIT, dp->state); + return rc; +} + +static int dp_display_set_stream_info(struct dp_display *dp_display, + void *panel, u32 strm_id, u32 start_slot, + u32 num_slots, u32 pbn, int vcpi) +{ + int rc = 0; + struct dp_panel *dp_panel; + struct dp_display_private *dp; + const int max_slots = 64; + + if (!dp_display) { + DP_ERR("invalid input\n"); + return -EINVAL; + } + + if (strm_id >= DP_STREAM_MAX) { + DP_ERR("invalid stream id:%d\n", strm_id); + return -EINVAL; + } + + if (start_slot + num_slots > max_slots) { + DP_ERR("invalid channel info received. start:%d, slots:%d\n", + start_slot, num_slots); + return -EINVAL; + } + + dp = container_of(dp_display, struct dp_display_private, dp_display); + + mutex_lock(&dp->session_lock); + + dp->ctrl->set_mst_channel_info(dp->ctrl, strm_id, + start_slot, num_slots); + + if (panel) { + dp_panel = panel; + dp_panel->set_stream_info(dp_panel, strm_id, start_slot, + num_slots, pbn, vcpi); + } + + mutex_unlock(&dp->session_lock); + + return rc; +} + +static void dp_display_update_dsc_resources(struct dp_display_private *dp, + struct dp_panel *panel, bool enable) +{ + u32 dsc_blk_cnt = 0; + + if (panel->pinfo.comp_info.comp_type == MSM_DISPLAY_COMPRESSION_DSC && + panel->pinfo.comp_info.comp_ratio) { + dsc_blk_cnt = panel->pinfo.h_active / + dp->parser->max_dp_dsc_input_width_pixs; + if (panel->pinfo.h_active % + dp->parser->max_dp_dsc_input_width_pixs) + dsc_blk_cnt++; + } + + if (enable) { + dp->tot_dsc_blks_in_use += dsc_blk_cnt; + panel->tot_dsc_blks_in_use += dsc_blk_cnt; + } else { + dp->tot_dsc_blks_in_use -= dsc_blk_cnt; + panel->tot_dsc_blks_in_use -= dsc_blk_cnt; + } +} + +static int dp_display_enable(struct dp_display *dp_display, void *panel) +{ + int rc = 0; + struct dp_display_private *dp; + + if (!dp_display || !panel) { + DP_ERR("invalid input\n"); + return -EINVAL; + } + + dp = container_of(dp_display, struct dp_display_private, dp_display); + + SDE_EVT32_EXTERNAL(SDE_EVTLOG_FUNC_ENTRY, dp->state); + mutex_lock(&dp->session_lock); + + /* + * If DP_STATE_READY is not set, we should not do any HW + * programming. + */ + if (!dp_display_state_is(DP_STATE_READY)) { + dp_display_state_show("[host not ready]"); + goto end; + } + + /* + * It is possible that by the time we get call back to establish + * the DP pipeline e2e, the physical DP connection to the sink is + * already lost. In such cases, the DP_STATE_ABORTED would be set. + * However, it is necessary to NOT abort the display setup here so as + * to ensure that the rest of the system is in a stable state prior to + * handling the disconnect notification. + */ + if (dp_display_state_is(DP_STATE_ABORTED)) + dp_display_state_log("[aborted, but continue on]"); + + rc = dp_display_stream_enable(dp, panel); + if (rc) + goto end; + + dp_display_update_dsc_resources(dp, panel, true); + dp_display_state_add(DP_STATE_ENABLED); +end: + mutex_unlock(&dp->session_lock); + SDE_EVT32_EXTERNAL(SDE_EVTLOG_FUNC_EXIT, dp->state); + return rc; +} + +static void dp_display_stream_post_enable(struct dp_display_private *dp, + struct dp_panel *dp_panel) +{ + dp_panel->spd_config(dp_panel); + dp_panel->setup_hdr(dp_panel, NULL, false, 0, true); +} + +static int dp_display_post_enable(struct dp_display *dp_display, void *panel) +{ + struct dp_display_private *dp; + struct dp_panel *dp_panel; + + if (!dp_display || !panel) { + DP_ERR("invalid input\n"); + return -EINVAL; + } + + dp = container_of(dp_display, struct dp_display_private, dp_display); + dp_panel = panel; + + SDE_EVT32_EXTERNAL(SDE_EVTLOG_FUNC_ENTRY, dp->state); + mutex_lock(&dp->session_lock); + + /* + * If DP_STATE_READY is not set, we should not do any HW + * programming. + */ + if (!dp_display_state_is(DP_STATE_ENABLED)) { + dp_display_state_show("[not enabled]"); + goto end; + } + + /* + * If the physical connection to the sink is already lost by the time + * we try to set up the connection, we can just skip all the steps + * here safely. + */ + if (dp_display_state_is(DP_STATE_ABORTED)) { + dp_display_state_log("[aborted]"); + goto end; + } + + if (!dp_display_is_ready(dp) || !dp_display_state_is(DP_STATE_READY)) { + dp_display_state_show("[not ready]"); + goto end; + } + + dp_display_stream_post_enable(dp, dp_panel); + + cancel_delayed_work_sync(&dp->hdcp_cb_work); + queue_delayed_work(dp->wq, &dp->hdcp_cb_work, HZ); + + if (dp_panel->audio_supported) { + dp_panel->audio->bw_code = dp->link->link_params.bw_code; + dp_panel->audio->lane_count = dp->link->link_params.lane_count; + dp_panel->audio->on(dp_panel->audio); + } +end: + dp->aux->state |= DP_STATE_CTRL_POWERED_ON; + + complete_all(&dp->notification_comp); + mutex_unlock(&dp->session_lock); + SDE_EVT32_EXTERNAL(SDE_EVTLOG_FUNC_EXIT, dp->state); + return 0; +} + +static void dp_display_clear_colorspaces(struct dp_display *dp_display) +{ + struct drm_connector *connector; + + connector = dp_display->base_connector; + connector->color_enc_fmt = 0; +} + +static int dp_display_pre_disable(struct dp_display *dp_display, void *panel) +{ + struct dp_display_private *dp; + struct dp_panel *dp_panel = panel; + struct dp_link_hdcp_status *status; + int rc = 0; + size_t i; + + if (!dp_display || !panel) { + DP_ERR("invalid input\n"); + return -EINVAL; + } + + dp = container_of(dp_display, struct dp_display_private, dp_display); + + SDE_EVT32_EXTERNAL(SDE_EVTLOG_FUNC_ENTRY, dp->state); + mutex_lock(&dp->session_lock); + + status = &dp->link->hdcp_status; + + if (!dp_display_state_is(DP_STATE_ENABLED)) { + dp_display_state_show("[not enabled]"); + goto end; + } + + dp_display_state_add(DP_STATE_HDCP_ABORTED); + cancel_delayed_work_sync(&dp->hdcp_cb_work); + if (dp_display_is_hdcp_enabled(dp) && + status->hdcp_state != HDCP_STATE_INACTIVE) { + bool off = true; + + if (dp_display_state_is(DP_STATE_SUSPENDED)) { + DP_DEBUG("Can't perform HDCP cleanup while suspended. Defer\n"); + dp->hdcp_delayed_off = true; + goto clean; + } + + flush_delayed_work(&dp->hdcp_cb_work); + if (dp->mst.mst_active) { + dp_display_hdcp_deregister_stream(dp, + dp_panel->stream_id); + for (i = DP_STREAM_0; i < DP_STREAM_MAX; i++) { + if (i != dp_panel->stream_id && + dp->active_panels[i]) { + DP_DEBUG("Streams are still active. Skip disabling HDCP\n"); + off = false; + } + } + } + + if (off) { + if (dp->hdcp.ops->off) + dp->hdcp.ops->off(dp->hdcp.data); + dp_display_update_hdcp_status(dp, true); + } + } + + dp_display_clear_colorspaces(dp_display); + +clean: + if (dp_panel->audio_supported) + dp_panel->audio->off(dp_panel->audio); + + rc = dp_display_stream_pre_disable(dp, dp_panel); + +end: + mutex_unlock(&dp->session_lock); + SDE_EVT32_EXTERNAL(SDE_EVTLOG_FUNC_EXIT, dp->state); + return 0; +} + +static int dp_display_disable(struct dp_display *dp_display, void *panel) +{ + int i; + struct dp_display_private *dp = NULL; + struct dp_panel *dp_panel = NULL; + struct dp_link_hdcp_status *status; + + if (!dp_display || !panel) { + DP_ERR("invalid input\n"); + return -EINVAL; + } + + dp = container_of(dp_display, struct dp_display_private, dp_display); + dp_panel = panel; + status = &dp->link->hdcp_status; + + SDE_EVT32_EXTERNAL(SDE_EVTLOG_FUNC_ENTRY, dp->state); + mutex_lock(&dp->session_lock); + + if (!dp_display_state_is(DP_STATE_ENABLED)) { + dp_display_state_show("[not enabled]"); + goto end; + } + + if (!dp_display_state_is(DP_STATE_READY)) { + dp_display_state_show("[not ready]"); + goto end; + } + + dp_display_stream_disable(dp, dp_panel); + dp_display_update_dsc_resources(dp, dp_panel, false); + + dp_display_state_remove(DP_STATE_HDCP_ABORTED); + for (i = DP_STREAM_0; i < DP_STREAM_MAX; i++) { + if (dp->active_panels[i]) { + if (status->hdcp_state != HDCP_STATE_AUTHENTICATED) + queue_delayed_work(dp->wq, &dp->hdcp_cb_work, + HZ/4); + break; + } + } +end: + mutex_unlock(&dp->session_lock); + SDE_EVT32_EXTERNAL(SDE_EVTLOG_FUNC_EXIT, dp->state); + return 0; +} + +static int dp_request_irq(struct dp_display *dp_display) +{ + int rc = 0; + struct dp_display_private *dp; + + if (!dp_display) { + DP_ERR("invalid input\n"); + return -EINVAL; + } + + dp = container_of(dp_display, struct dp_display_private, dp_display); + + dp->irq = irq_of_parse_and_map(dp->pdev->dev.of_node, 0); + if (dp->irq < 0) { + rc = dp->irq; + DP_ERR("failed to get irq: %d\n", rc); + return rc; + } + + rc = devm_request_irq(&dp->pdev->dev, dp->irq, dp_display_irq, + IRQF_TRIGGER_HIGH, "dp_display_isr", dp); + if (rc < 0) { + DP_ERR("failed to request IRQ%u: %d\n", + dp->irq, rc); + return rc; + } + disable_irq(dp->irq); + + return 0; +} + +static struct dp_debug *dp_get_debug(struct dp_display *dp_display) +{ + struct dp_display_private *dp; + + if (!dp_display) { + DP_ERR("invalid input\n"); + return ERR_PTR(-EINVAL); + } + + dp = container_of(dp_display, struct dp_display_private, dp_display); + + return dp->debug; +} + +static int dp_display_unprepare(struct dp_display *dp_display, void *panel) +{ + struct dp_display_private *dp; + struct dp_panel *dp_panel = panel; + u32 flags = 0; + + if (!dp_display || !panel) { + DP_ERR("invalid input\n"); + return -EINVAL; + } + + dp = container_of(dp_display, struct dp_display_private, dp_display); + + SDE_EVT32_EXTERNAL(SDE_EVTLOG_FUNC_ENTRY, dp->state); + mutex_lock(&dp->session_lock); + + /* + * Check if the power off sequence was triggered + * by a source initialated action like framework + * reboot or suspend-resume but not from normal + * hot plug. If connector is in MST mode, skip + * powering down host as aux needs to be kept + * alive to handle hot-plug sideband message. + */ + if (dp_display_is_ready(dp) && + (dp_display_state_is(DP_STATE_SUSPENDED) || + !dp->mst.mst_active)) + flags |= DP_PANEL_SRC_INITIATED_POWER_DOWN; + + if (dp->active_stream_cnt) + goto end; + + if (flags & DP_PANEL_SRC_INITIATED_POWER_DOWN) { + dp->link->psm_config(dp->link, &dp->panel->link_info, true); + dp->debug->psm_enabled = true; + + dp->ctrl->off(dp->ctrl); + dp_display_host_unready(dp); + dp_display_host_deinit(dp); + dp_display_state_add(DP_STATE_SRC_PWRDN); + } + + dp_display_state_remove(DP_STATE_ENABLED); + dp->aux->state = DP_STATE_CTRL_POWERED_OFF; + + complete_all(&dp->notification_comp); + + /* log this as it results from user action of cable dis-connection */ + DP_INFO("[OK]\n"); +end: + dp_panel->deinit(dp_panel, flags); + mutex_unlock(&dp->session_lock); + SDE_EVT32_EXTERNAL(SDE_EVTLOG_FUNC_EXIT, dp->state); + + return 0; +} + +static int dp_display_validate_resources( + struct dp_display *dp_display, + void *panel, struct drm_display_mode *mode, + const struct msm_resource_caps_info *avail_res) +{ + struct dp_display_private *dp; + struct dp_panel *dp_panel; + struct dp_debug *debug; + struct dp_display_mode dp_mode; + u32 mode_rate_khz, supported_rate_khz, mode_bpp, num_lm; + int rc, tmds_max_clock, rate; + bool dsc_en; + + dp = container_of(dp_display, struct dp_display_private, dp_display); + dp_panel = panel; + debug = dp->debug; + + dp_display->convert_to_dp_mode(dp_display, panel, mode, &dp_mode); + + dsc_en = dp_mode.timing.comp_info.comp_ratio ? true : false; + mode_bpp = dsc_en ? dp_mode.timing.comp_info.dsc_info.bpp : + dp_mode.timing.bpp; + + mode_rate_khz = mode->clock * mode_bpp; + rate = drm_dp_bw_code_to_link_rate(dp->link->link_params.bw_code); + supported_rate_khz = dp->link->link_params.lane_count * rate * 8; + tmds_max_clock = dp_panel->connector->display_info.max_tmds_clock; + + if (mode_rate_khz > supported_rate_khz) { + DP_DEBUG("pclk:%d, supported_rate:%d\n", + mode->clock, supported_rate_khz); + return -EINVAL; + } + + if (mode->clock > dp_display->max_pclk_khz) { + DP_DEBUG("clk:%d, max:%d\n", mode->clock, + dp_display->max_pclk_khz); + return -EINVAL; + } + + if ((dp_display->max_hdisplay > 0) && (dp_display->max_vdisplay > 0) && + ((mode->hdisplay > dp_display->max_hdisplay) || + (mode->vdisplay > dp_display->max_vdisplay))) { + DP_DEBUG("hdisplay:%d, max-hdisplay:%d", + mode->hdisplay, dp_display->max_hdisplay); + DP_DEBUG("vdisplay:%d, max-vdisplay:%d\n", + mode->vdisplay, dp_display->max_vdisplay); + return -EINVAL; + } + + if (tmds_max_clock > 0 && mode->clock > tmds_max_clock) { + DP_DEBUG("clk:%d, max tmds:%d\n", mode->clock, + tmds_max_clock); + return -EINVAL; + } + + rc = msm_get_mixer_count(dp->priv, mode, avail_res, &num_lm); + if (rc) { + DP_ERR("error getting mixer count. rc:%d\n", rc); + return -EINVAL; + } + + if (num_lm > avail_res->num_lm || + (num_lm == 2 && !avail_res->num_3dmux)) { + DP_DEBUG("num_lm:%d, req lm:%d 3dmux:%d\n", num_lm, + avail_res->num_lm, avail_res->num_3dmux); + return -EINVAL; + } + + return 0; +} + +static int dp_display_check_overrides( + struct dp_display *dp_display, + void *panel, struct drm_display_mode *mode, + const struct msm_resource_caps_info *avail_res) +{ + struct dp_mst_connector *mst_connector; + struct dp_display_private *dp; + struct dp_panel *dp_panel; + struct dp_debug *debug; + bool in_list = false; + int hdis, vdis, vref, ar, _hdis, _vdis, _vref, _ar; + + dp = container_of(dp_display, struct dp_display_private, dp_display); + dp_panel = panel; + debug = dp->debug; + + /* + * If the connector exists in the mst connector list and if debug is + * enabled for that connector, use the mst connector settings from the + * list for validation. Otherwise, use non-mst default settings. + */ + mutex_lock(&debug->dp_mst_connector_list.lock); + + if (list_empty(&debug->dp_mst_connector_list.list)) { + DP_MST_DEBUG("MST connect list is empty\n"); + mutex_unlock(&debug->dp_mst_connector_list.lock); + goto verify_default; + } + + list_for_each_entry(mst_connector, &debug->dp_mst_connector_list.list, + list) { + if (mst_connector->con_id == dp_panel->connector->base.id) { + in_list = true; + + if (!mst_connector->debug_en) { + mutex_unlock( + &debug->dp_mst_connector_list.lock); + return 0; + } + + hdis = mst_connector->hdisplay; + vdis = mst_connector->vdisplay; + vref = mst_connector->vrefresh; + ar = mst_connector->aspect_ratio; + + _hdis = mode->hdisplay; + _vdis = mode->vdisplay; + _vref = mode->vrefresh; + _ar = mode->picture_aspect_ratio; + + if (hdis == _hdis && vdis == _vdis && vref == _vref && + ar == _ar) { + mutex_unlock( + &debug->dp_mst_connector_list.lock); + return 0; + } + break; + } + } + + mutex_unlock(&debug->dp_mst_connector_list.lock); + if (in_list) + return -EINVAL; + +verify_default: + if (debug->debug_en && (mode->hdisplay != debug->hdisplay || + mode->vdisplay != debug->vdisplay || + mode->vrefresh != debug->vrefresh || + mode->picture_aspect_ratio != debug->aspect_ratio)) + return -EINVAL; + + return 0; +} + +static enum drm_mode_status dp_display_validate_mode( + struct dp_display *dp_display, + void *panel, struct drm_display_mode *mode, + const struct msm_resource_caps_info *avail_res) +{ + struct dp_display_private *dp; + + struct dp_panel *dp_panel; + struct dp_debug *debug; + enum drm_mode_status mode_status = MODE_BAD; + + if (!dp_display || !mode || !panel || + !avail_res || !avail_res->max_mixer_width) { + DP_ERR("invalid params\n"); + return mode_status; + } + + dp = container_of(dp_display, struct dp_display_private, dp_display); + + mutex_lock(&dp->session_lock); + + dp_panel = panel; + if (!dp_panel->connector) { + DP_ERR("invalid connector\n"); + goto end; + } + + debug = dp->debug; + if (!debug) { + DP_ERR("invalid debug node\n"); + goto end; + } + + if (dp_display_validate_resources(dp_display, panel, mode, avail_res)) { + DP_DEBUG("DP bad mode %dx%d@%d\n", + mode->hdisplay, mode->vdisplay, mode->clock); + goto end; + } + + if (dp_display_check_overrides(dp_display, panel, + mode, avail_res)) { + DP_MST_DEBUG("DP overrides ignore mode %dx%d@%d\n", + mode->hdisplay, mode->vdisplay, mode->clock); + goto end; + } + + DP_DEBUG("DP ok mode %dx%d@%d\n", + mode->hdisplay, mode->vdisplay, mode->clock); + mode_status = MODE_OK; +end: + mutex_unlock(&dp->session_lock); + return mode_status; +} + +static int dp_display_get_modes(struct dp_display *dp, void *panel, + struct dp_display_mode *dp_mode) +{ + struct dp_display_private *dp_display; + struct dp_panel *dp_panel; + int ret = 0; + + if (!dp || !panel) { + DP_ERR("invalid params\n"); + return 0; + } + + dp_panel = panel; + if (!dp_panel->connector) { + DP_ERR("invalid connector\n"); + return 0; + } + + dp_display = container_of(dp, struct dp_display_private, dp_display); + + ret = dp_panel->get_modes(dp_panel, dp_panel->connector, dp_mode); + if (dp_mode->timing.pixel_clk_khz) + dp->max_pclk_khz = dp_mode->timing.pixel_clk_khz; + return ret; +} + +static void dp_display_convert_to_dp_mode(struct dp_display *dp_display, + void *panel, + const struct drm_display_mode *drm_mode, + struct dp_display_mode *dp_mode) +{ + struct dp_display_private *dp; + struct dp_panel *dp_panel; + u32 free_dsc_blks = 0, required_dsc_blks = 0; + + if (!dp_display || !drm_mode || !dp_mode || !panel) { + DP_ERR("invalid input\n"); + return; + } + + dp = container_of(dp_display, struct dp_display_private, dp_display); + dp_panel = panel; + + memset(dp_mode, 0, sizeof(*dp_mode)); + + free_dsc_blks = dp->parser->max_dp_dsc_blks - + dp->tot_dsc_blks_in_use + + dp_panel->tot_dsc_blks_in_use; + required_dsc_blks = drm_mode->hdisplay / + dp->parser->max_dp_dsc_input_width_pixs; + if (drm_mode->hdisplay % dp->parser->max_dp_dsc_input_width_pixs) + required_dsc_blks++; + + if (free_dsc_blks >= required_dsc_blks) + dp_mode->capabilities |= DP_PANEL_CAPS_DSC; + + if (dp_mode->capabilities & DP_PANEL_CAPS_DSC) + DP_DEBUG("in_use:%d, max:%d, free:%d, req:%d, caps:0x%x, width:%d\n", + dp->tot_dsc_blks_in_use, dp->parser->max_dp_dsc_blks, + free_dsc_blks, required_dsc_blks, dp_mode->capabilities, + dp->parser->max_dp_dsc_input_width_pixs); + + dp_panel->convert_to_dp_mode(dp_panel, drm_mode, dp_mode); +} + +static int dp_display_config_hdr(struct dp_display *dp_display, void *panel, + struct drm_msm_ext_hdr_metadata *hdr, bool dhdr_update) +{ + struct dp_panel *dp_panel; + struct sde_connector *sde_conn; + struct dp_display_private *dp; + u64 core_clk_rate; + bool flush_hdr; + + if (!dp_display || !panel) { + DP_ERR("invalid input\n"); + return -EINVAL; + } + + dp_panel = panel; + dp = container_of(dp_display, struct dp_display_private, dp_display); + sde_conn = to_sde_connector(dp_panel->connector); + + core_clk_rate = dp->power->clk_get_rate(dp->power, "core_clk"); + if (!core_clk_rate) { + DP_ERR("invalid rate for core_clk\n"); + return -EINVAL; + } + + if (!dp_display_state_is(DP_STATE_ENABLED)) { + dp_display_state_show("[not enabled]"); + return 0; + } + + /* + * In rare cases where HDR metadata is updated independently + * flush the HDR metadata immediately instead of relying on + * the colorspace + */ + flush_hdr = !sde_conn->colorspace_updated; + + if (flush_hdr) + DP_DEBUG("flushing the HDR metadata\n"); + else + DP_DEBUG("piggy-backing with colorspace\n"); + + return dp_panel->setup_hdr(dp_panel, hdr, dhdr_update, + core_clk_rate, flush_hdr); +} + +static int dp_display_get_display_type(struct dp_display *dp_display, + const char **display_type) +{ + struct dp_display_private *dp; + + if (!dp_display || !display_type) { + pr_err("invalid input\n"); + return -EINVAL; + } + + dp = container_of(dp_display, struct dp_display_private, dp_display); + + *display_type = dp->parser->display_type; + + if (!strcmp(*display_type, "primary")) + dp_display->is_primary = true; + + return 0; +} + + +static int dp_display_setup_colospace(struct dp_display *dp_display, + void *panel, + u32 colorspace) +{ + struct dp_panel *dp_panel; + struct dp_display_private *dp; + + if (!dp_display || !panel) { + pr_err("invalid input\n"); + return -EINVAL; + } + + dp = container_of(dp_display, struct dp_display_private, dp_display); + + if (!dp_display_state_is(DP_STATE_ENABLED)) { + dp_display_state_show("[not enabled]"); + return 0; + } + + dp_panel = panel; + + return dp_panel->set_colorspace(dp_panel, colorspace); +} + +static int dp_display_create_workqueue(struct dp_display_private *dp) +{ + dp->wq = create_singlethread_workqueue("drm_dp"); + if (IS_ERR_OR_NULL(dp->wq)) { + DP_ERR("Error creating wq\n"); + return -EPERM; + } + + INIT_DELAYED_WORK(&dp->hdcp_cb_work, dp_display_hdcp_cb_work); + INIT_WORK(&dp->connect_work, dp_display_connect_work); + INIT_WORK(&dp->attention_work, dp_display_attention_work); + + return 0; +} + +static int dp_display_fsa4480_callback(struct notifier_block *self, + unsigned long event, void *data) +{ + return 0; +} + +static int dp_display_init_aux_switch(struct dp_display_private *dp) +{ + int rc = 0; + const char *phandle = "qcom,dp-aux-switch"; + struct notifier_block nb; + + if (!dp->pdev->dev.of_node) { + DP_ERR("cannot find dev.of_node\n"); + rc = -ENODEV; + goto end; + } + + dp->aux_switch_node = of_parse_phandle(dp->pdev->dev.of_node, + phandle, 0); + if (!dp->aux_switch_node) { + DP_WARN("cannot parse %s handle\n", phandle); + rc = -ENODEV; + goto end; + } + + if (strcmp(dp->aux_switch_node->name, "fsa4480")) { + DP_DEBUG("Not an fsa4480 aux switch\n"); + goto end; + } + + nb.notifier_call = dp_display_fsa4480_callback; + nb.priority = 0; + + rc = fsa4480_reg_notifier(&nb, dp->aux_switch_node); + if (rc) { + DP_ERR("failed to register notifier (%d)\n", rc); + goto end; + } + + fsa4480_unreg_notifier(&nb, dp->aux_switch_node); +end: + return rc; +} + +static int dp_display_mst_install(struct dp_display *dp_display, + struct dp_mst_drm_install_info *mst_install_info) +{ + struct dp_display_private *dp; + + if (!dp_display || !mst_install_info) { + DP_ERR("invalid input\n"); + return -EINVAL; + } + + dp = container_of(dp_display, struct dp_display_private, dp_display); + + if (!mst_install_info->cbs->hpd || !mst_install_info->cbs->hpd_irq) { + DP_ERR("invalid mst cbs\n"); + return -EINVAL; + } + + dp_display->dp_mst_prv_info = mst_install_info->dp_mst_prv_info; + + if (!dp->parser->has_mst) { + DP_DEBUG("mst not enabled\n"); + return -EPERM; + } + + memcpy(&dp->mst.cbs, mst_install_info->cbs, sizeof(dp->mst.cbs)); + dp->mst.drm_registered = true; + + DP_MST_DEBUG("dp mst drm installed\n"); + + return 0; +} + +static int dp_display_mst_uninstall(struct dp_display *dp_display) +{ + struct dp_display_private *dp; + + if (!dp_display) { + DP_ERR("invalid input\n"); + return -EINVAL; + } + + dp = container_of(dp_display, struct dp_display_private, dp_display); + + if (!dp->mst.drm_registered) { + DP_DEBUG("drm mst not registered\n"); + return -EPERM; + } + + dp = container_of(dp_display, struct dp_display_private, + dp_display); + memset(&dp->mst.cbs, 0, sizeof(dp->mst.cbs)); + dp->mst.drm_registered = false; + + DP_MST_DEBUG("dp mst drm uninstalled\n"); + + return 0; +} + +static int dp_display_mst_connector_install(struct dp_display *dp_display, + struct drm_connector *connector) +{ + int rc = 0; + struct dp_panel_in panel_in; + struct dp_panel *dp_panel; + struct dp_display_private *dp; + struct dp_mst_connector *mst_connector; + + if (!dp_display || !connector) { + DP_ERR("invalid input\n"); + return -EINVAL; + } + + dp = container_of(dp_display, struct dp_display_private, dp_display); + + mutex_lock(&dp->session_lock); + + if (!dp->mst.drm_registered) { + DP_DEBUG("drm mst not registered\n"); + mutex_unlock(&dp->session_lock); + return -EPERM; + } + + panel_in.dev = &dp->pdev->dev; + panel_in.aux = dp->aux; + panel_in.catalog = &dp->catalog->panel; + panel_in.link = dp->link; + panel_in.connector = connector; + panel_in.base_panel = dp->panel; + panel_in.parser = dp->parser; + + dp_panel = dp_panel_get(&panel_in); + if (IS_ERR(dp_panel)) { + rc = PTR_ERR(dp_panel); + DP_ERR("failed to initialize panel, rc = %d\n", rc); + mutex_unlock(&dp->session_lock); + return rc; + } + + dp_panel->audio = dp_audio_get(dp->pdev, dp_panel, &dp->catalog->audio); + if (IS_ERR(dp_panel->audio)) { + rc = PTR_ERR(dp_panel->audio); + DP_ERR("[mst] failed to initialize audio, rc = %d\n", rc); + dp_panel->audio = NULL; + mutex_unlock(&dp->session_lock); + return rc; + } + + DP_MST_DEBUG("dp mst connector installed. conn:%d\n", + connector->base.id); + + mutex_lock(&dp->debug->dp_mst_connector_list.lock); + + mst_connector = kmalloc(sizeof(struct dp_mst_connector), + GFP_KERNEL); + if (!mst_connector) { + mutex_unlock(&dp->debug->dp_mst_connector_list.lock); + mutex_unlock(&dp->session_lock); + return -ENOMEM; + } + + mst_connector->debug_en = false; + mst_connector->conn = connector; + mst_connector->con_id = connector->base.id; + mst_connector->state = connector_status_unknown; + INIT_LIST_HEAD(&mst_connector->list); + + list_add(&mst_connector->list, + &dp->debug->dp_mst_connector_list.list); + + mutex_unlock(&dp->debug->dp_mst_connector_list.lock); + mutex_unlock(&dp->session_lock); + + return 0; +} + +static int dp_display_mst_connector_uninstall(struct dp_display *dp_display, + struct drm_connector *connector) +{ + int rc = 0; + struct sde_connector *sde_conn; + struct dp_panel *dp_panel; + struct dp_display_private *dp; + struct dp_mst_connector *con_to_remove, *temp_con; + + if (!dp_display || !connector) { + DP_ERR("invalid input\n"); + return -EINVAL; + } + + dp = container_of(dp_display, struct dp_display_private, dp_display); + + mutex_lock(&dp->session_lock); + + if (!dp->mst.drm_registered) { + DP_DEBUG("drm mst not registered\n"); + mutex_unlock(&dp->session_lock); + return -EPERM; + } + + sde_conn = to_sde_connector(connector); + if (!sde_conn->drv_panel) { + DP_ERR("invalid panel for connector:%d\n", connector->base.id); + mutex_unlock(&dp->session_lock); + return -EINVAL; + } + + dp_panel = sde_conn->drv_panel; + dp_audio_put(dp_panel->audio); + dp_panel_put(dp_panel); + + DP_MST_DEBUG("dp mst connector uninstalled. conn:%d\n", + connector->base.id); + + mutex_lock(&dp->debug->dp_mst_connector_list.lock); + + list_for_each_entry_safe(con_to_remove, temp_con, + &dp->debug->dp_mst_connector_list.list, list) { + if (con_to_remove->conn == connector) { + list_del(&con_to_remove->list); + kfree(con_to_remove); + } + } + + mutex_unlock(&dp->debug->dp_mst_connector_list.lock); + mutex_unlock(&dp->session_lock); + + return rc; +} + +static int dp_display_mst_get_connector_info(struct dp_display *dp_display, + struct drm_connector *connector, + struct dp_mst_connector *mst_conn) +{ + struct dp_display_private *dp; + struct dp_mst_connector *conn, *temp_conn; + + if (!connector || !mst_conn) { + DP_ERR("invalid input\n"); + return -EINVAL; + } + + dp = container_of(dp_display, struct dp_display_private, dp_display); + + mutex_lock(&dp->session_lock); + if (!dp->mst.drm_registered) { + DP_DEBUG("drm mst not registered\n"); + mutex_unlock(&dp->session_lock); + return -EPERM; + } + + mutex_lock(&dp->debug->dp_mst_connector_list.lock); + list_for_each_entry_safe(conn, temp_conn, + &dp->debug->dp_mst_connector_list.list, list) { + if (conn->con_id == connector->base.id) + memcpy(mst_conn, conn, sizeof(*mst_conn)); + } + mutex_unlock(&dp->debug->dp_mst_connector_list.lock); + mutex_unlock(&dp->session_lock); + return 0; +} + +static int dp_display_mst_connector_update_edid(struct dp_display *dp_display, + struct drm_connector *connector, + struct edid *edid) +{ + int rc = 0; + struct sde_connector *sde_conn; + struct dp_panel *dp_panel; + struct dp_display_private *dp; + + if (!dp_display || !connector || !edid) { + DP_ERR("invalid input\n"); + return -EINVAL; + } + + dp = container_of(dp_display, struct dp_display_private, dp_display); + + if (!dp->mst.drm_registered) { + DP_DEBUG("drm mst not registered\n"); + return -EPERM; + } + + sde_conn = to_sde_connector(connector); + if (!sde_conn->drv_panel) { + DP_ERR("invalid panel for connector:%d\n", connector->base.id); + return -EINVAL; + } + + dp_panel = sde_conn->drv_panel; + rc = dp_panel->update_edid(dp_panel, edid); + + DP_MST_DEBUG("dp mst connector:%d edid updated. mode_cnt:%d\n", + connector->base.id, rc); + + return rc; +} + +static int dp_display_update_pps(struct dp_display *dp_display, + struct drm_connector *connector, char *pps_cmd) +{ + struct sde_connector *sde_conn; + struct dp_panel *dp_panel; + struct dp_display_private *dp; + + dp = container_of(dp_display, struct dp_display_private, dp_display); + + sde_conn = to_sde_connector(connector); + if (!sde_conn->drv_panel) { + DP_ERR("invalid panel for connector:%d\n", connector->base.id); + return -EINVAL; + } + + if (!dp_display_state_is(DP_STATE_ENABLED)) { + dp_display_state_show("[not enabled]"); + return 0; + } + + dp_panel = sde_conn->drv_panel; + dp_panel->update_pps(dp_panel, pps_cmd); + return 0; +} + +static int dp_display_mst_connector_update_link_info( + struct dp_display *dp_display, + struct drm_connector *connector) +{ + int rc = 0; + struct sde_connector *sde_conn; + struct dp_panel *dp_panel; + struct dp_display_private *dp; + + if (!dp_display || !connector) { + DP_ERR("invalid input\n"); + return -EINVAL; + } + + dp = container_of(dp_display, struct dp_display_private, dp_display); + + if (!dp->mst.drm_registered) { + DP_DEBUG("drm mst not registered\n"); + return -EPERM; + } + + sde_conn = to_sde_connector(connector); + if (!sde_conn->drv_panel) { + DP_ERR("invalid panel for connector:%d\n", connector->base.id); + return -EINVAL; + } + + dp_panel = sde_conn->drv_panel; + + memcpy(dp_panel->dpcd, dp->panel->dpcd, + DP_RECEIVER_CAP_SIZE + 1); + memcpy(dp_panel->dsc_dpcd, dp->panel->dsc_dpcd, + DP_RECEIVER_DSC_CAP_SIZE + 1); + memcpy(&dp_panel->link_info, &dp->panel->link_info, + sizeof(dp_panel->link_info)); + + DP_MST_DEBUG("dp mst connector:%d link info updated\n", + connector->base.id); + + return rc; +} + +static int dp_display_mst_get_fixed_topology_port( + struct dp_display *dp_display, + u32 strm_id, u32 *port_num) +{ + struct dp_display_private *dp; + u32 port; + + if (!dp_display) { + DP_ERR("invalid input\n"); + return -EINVAL; + } + + if (strm_id >= DP_STREAM_MAX) { + DP_ERR("invalid stream id:%d\n", strm_id); + return -EINVAL; + } + + dp = container_of(dp_display, struct dp_display_private, dp_display); + + port = dp->parser->mst_fixed_port[strm_id]; + + if (!port || port > 255) + return -ENOENT; + + if (port_num) + *port_num = port; + + return 0; +} + +static int dp_display_get_mst_caps(struct dp_display *dp_display, + struct dp_mst_caps *mst_caps) +{ + int rc = 0; + struct dp_display_private *dp; + + if (!dp_display || !mst_caps) { + DP_ERR("invalid input\n"); + return -EINVAL; + } + + dp = container_of(dp_display, struct dp_display_private, dp_display); + + mst_caps->has_mst = dp->parser->has_mst; + mst_caps->max_streams_supported = (mst_caps->has_mst) ? 2 : 0; + mst_caps->max_dpcd_transaction_bytes = (mst_caps->has_mst) ? 16 : 0; + mst_caps->drm_aux = dp->aux->drm_aux; + + return rc; +} + +static void dp_display_wakeup_phy_layer(struct dp_display *dp_display, + bool wakeup) +{ + struct dp_display_private *dp; + struct dp_hpd *hpd; + + if (!dp_display) { + DP_ERR("invalid input\n"); + return; + } + + dp = container_of(dp_display, struct dp_display_private, dp_display); + if (!dp->mst.drm_registered) { + DP_DEBUG("drm mst not registered\n"); + return; + } + + hpd = dp->hpd; + if (hpd && hpd->wakeup_phy) + hpd->wakeup_phy(hpd, wakeup); +} + +static int dp_display_probe(struct platform_device *pdev) +{ + int rc = 0; + struct dp_display_private *dp; + + if (!pdev || !pdev->dev.of_node) { + DP_ERR("pdev not found\n"); + rc = -ENODEV; + goto bail; + } + + dp = devm_kzalloc(&pdev->dev, sizeof(*dp), GFP_KERNEL); + if (!dp) { + rc = -ENOMEM; + goto bail; + } + + init_completion(&dp->notification_comp); + + dp->pdev = pdev; + dp->name = "drm_dp"; + + memset(&dp->mst, 0, sizeof(dp->mst)); + + rc = dp_display_init_aux_switch(dp); + if (rc) { + rc = -EPROBE_DEFER; + goto error; + } + + rc = dp_display_create_workqueue(dp); + if (rc) { + DP_ERR("Failed to create workqueue\n"); + goto error; + } + + platform_set_drvdata(pdev, dp); + + g_dp_display = &dp->dp_display; + + g_dp_display->enable = dp_display_enable; + g_dp_display->post_enable = dp_display_post_enable; + g_dp_display->pre_disable = dp_display_pre_disable; + g_dp_display->disable = dp_display_disable; + g_dp_display->set_mode = dp_display_set_mode; + g_dp_display->validate_mode = dp_display_validate_mode; + g_dp_display->get_modes = dp_display_get_modes; + g_dp_display->prepare = dp_display_prepare; + g_dp_display->unprepare = dp_display_unprepare; + g_dp_display->request_irq = dp_request_irq; + g_dp_display->get_debug = dp_get_debug; + g_dp_display->post_open = NULL; + g_dp_display->post_init = dp_display_post_init; + g_dp_display->config_hdr = dp_display_config_hdr; + g_dp_display->get_display_type = dp_display_get_display_type; + g_dp_display->mst_install = dp_display_mst_install; + g_dp_display->mst_uninstall = dp_display_mst_uninstall; + g_dp_display->mst_connector_install = dp_display_mst_connector_install; + g_dp_display->mst_connector_uninstall = + dp_display_mst_connector_uninstall; + g_dp_display->mst_connector_update_edid = + dp_display_mst_connector_update_edid; + g_dp_display->mst_connector_update_link_info = + dp_display_mst_connector_update_link_info; + g_dp_display->get_mst_caps = dp_display_get_mst_caps; + g_dp_display->set_stream_info = dp_display_set_stream_info; + g_dp_display->update_pps = dp_display_update_pps; + g_dp_display->convert_to_dp_mode = dp_display_convert_to_dp_mode; + g_dp_display->mst_get_connector_info = + dp_display_mst_get_connector_info; + g_dp_display->mst_get_fixed_topology_port = + dp_display_mst_get_fixed_topology_port; + g_dp_display->wakeup_phy_layer = + dp_display_wakeup_phy_layer; + g_dp_display->set_colorspace = dp_display_setup_colospace; + + rc = component_add(&pdev->dev, &dp_display_comp_ops); + if (rc) { + DP_ERR("component add failed, rc=%d\n", rc); + goto error; + } + + return 0; +error: + devm_kfree(&pdev->dev, dp); +bail: + return rc; +} + +int dp_display_get_displays(void **displays, int count) +{ + if (!displays) { + DP_ERR("invalid data\n"); + return -EINVAL; + } + + if (count != 1) { + DP_ERR("invalid number of displays\n"); + return -EINVAL; + } + + displays[0] = g_dp_display; + return count; +} + +int dp_display_get_num_of_displays(void) +{ + if (!g_dp_display) + return 0; + + return 1; +} + +int dp_display_get_num_of_streams(void) +{ + if (g_dp_display->no_mst_encoder) + return 0; + + return DP_STREAM_MAX; +} + +static void dp_display_set_mst_state(void *dp_display, + enum dp_drv_state mst_state) +{ + struct dp_display_private *dp; + + if (!g_dp_display) { + DP_DEBUG("dp display not initialized\n"); + return; + } + + dp = container_of(g_dp_display, struct dp_display_private, dp_display); + if (dp->mst.mst_active && dp->mst.cbs.set_drv_state) + dp->mst.cbs.set_drv_state(g_dp_display, mst_state); +} + +static int dp_display_remove(struct platform_device *pdev) +{ + struct dp_display_private *dp; + + if (!pdev) + return -EINVAL; + + dp = platform_get_drvdata(pdev); + + dp_display_deinit_sub_modules(dp); + + if (dp->wq) + destroy_workqueue(dp->wq); + + platform_set_drvdata(pdev, NULL); + devm_kfree(&pdev->dev, dp); + + return 0; +} + +static int dp_pm_prepare(struct device *dev) +{ + struct dp_display_private *dp = container_of(g_dp_display, + struct dp_display_private, dp_display); + + mutex_lock(&dp->session_lock); + dp_display_set_mst_state(g_dp_display, PM_SUSPEND); + + /* + * There are a few instances where the DP is hotplugged when the device + * is in PM suspend state. After hotplug, it is observed the device + * enters and exits the PM suspend multiple times while aux transactions + * are taking place. This may sometimes cause an unclocked register + * access error. So, abort aux transactions when such a situation + * arises i.e. when DP is connected but display not enabled yet. + */ + if (dp_display_state_is(DP_STATE_CONNECTED) && + !dp_display_state_is(DP_STATE_ENABLED)) { + dp->aux->abort(dp->aux, true); + dp->ctrl->abort(dp->ctrl, true); + } + + dp_display_state_add(DP_STATE_SUSPENDED); + mutex_unlock(&dp->session_lock); + + return 0; +} + +static void dp_pm_complete(struct device *dev) +{ + struct dp_display_private *dp = container_of(g_dp_display, + struct dp_display_private, dp_display); + + mutex_lock(&dp->session_lock); + dp_display_set_mst_state(g_dp_display, PM_DEFAULT); + + /* + * There are multiple PM suspend entry and exits observed before + * the connect uevent is issued to userspace. The aux transactions are + * aborted during PM suspend entry in dp_pm_prepare to prevent unclocked + * register access. On PM suspend exit, there will be no host_init call + * to reset the abort flags for ctrl and aux incase DP is connected + * but display not enabled. So, resetting abort flags for aux and ctrl. + */ + if (dp_display_state_is(DP_STATE_CONNECTED) && + !dp_display_state_is(DP_STATE_ENABLED)) { + dp->aux->abort(dp->aux, false); + dp->ctrl->abort(dp->ctrl, false); + } + + dp_display_state_remove(DP_STATE_SUSPENDED); + mutex_unlock(&dp->session_lock); +} + +static const struct dev_pm_ops dp_pm_ops = { + .prepare = dp_pm_prepare, + .complete = dp_pm_complete, +}; + +static struct platform_driver dp_display_driver = { + .probe = dp_display_probe, + .remove = dp_display_remove, + .driver = { + .name = "msm-dp-display", + .of_match_table = dp_dt_match, + .suppress_bind_attrs = true, + .pm = &dp_pm_ops, + }, +}; + +static int __init dp_display_init(void) +{ + int ret; + + ret = platform_driver_register(&dp_display_driver); + if (ret) { + DP_ERR("driver register failed\n"); + return ret; + } + + return ret; +} +late_initcall(dp_display_init); + +static void __exit dp_display_cleanup(void) +{ + platform_driver_unregister(&dp_display_driver); +} +module_exit(dp_display_cleanup); diff --git a/techpack/display/msm/dp/dp_display.h b/techpack/display/msm/dp/dp_display.h new file mode 100644 index 0000000000000000000000000000000000000000..0172889c29f38dbf5b291f2fee587f6d5091abe6 --- /dev/null +++ b/techpack/display/msm/dp/dp_display.h @@ -0,0 +1,160 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved. + */ + +#ifndef _DP_DISPLAY_H_ +#define _DP_DISPLAY_H_ + +#include +#include +#include + +#include "dp_panel.h" + +#define DP_MST_SIM_MAX_PORTS 8 + +enum dp_drv_state { + PM_DEFAULT, + PM_SUSPEND, +}; + +struct dp_mst_hpd_info { + bool mst_protocol; + bool mst_hpd_sim; + u32 mst_port_cnt; + u8 *edid; + bool mst_sim_add_con; + bool mst_sim_remove_con; + int mst_sim_remove_con_id; +}; + +struct dp_mst_drm_cbs { + void (*hpd)(void *display, bool hpd_status, + struct dp_mst_hpd_info *info); + void (*hpd_irq)(void *display, struct dp_mst_hpd_info *info); + void (*set_drv_state)(void *dp_display, + enum dp_drv_state mst_state); +}; + +struct dp_mst_drm_install_info { + void *dp_mst_prv_info; + const struct dp_mst_drm_cbs *cbs; +}; + +struct dp_mst_caps { + bool has_mst; + u32 max_streams_supported; + u32 max_dpcd_transaction_bytes; + struct drm_dp_aux *drm_aux; +}; + +struct dp_mst_connector { + bool debug_en; + int con_id; + int hdisplay; + int vdisplay; + int vrefresh; + int aspect_ratio; + struct drm_connector *conn; + struct mutex lock; + struct list_head list; + enum drm_connector_status state; +}; + +struct dp_display { + struct drm_device *drm_dev; + struct dp_bridge *bridge; + struct drm_connector *base_connector; + void *base_dp_panel; + bool is_sst_connected; + bool is_mst_supported; + u32 max_pclk_khz; + u32 max_hdisplay; + u32 max_vdisplay; + u32 no_mst_encoder; + void *dp_mst_prv_info; + bool is_primary; + + int (*enable)(struct dp_display *dp_display, void *panel); + int (*post_enable)(struct dp_display *dp_display, void *panel); + + int (*pre_disable)(struct dp_display *dp_display, void *panel); + int (*disable)(struct dp_display *dp_display, void *panel); + + int (*set_mode)(struct dp_display *dp_display, void *panel, + struct dp_display_mode *mode); + enum drm_mode_status (*validate_mode)(struct dp_display *dp_display, + void *panel, struct drm_display_mode *mode, + const struct msm_resource_caps_info *avail_res); + int (*get_modes)(struct dp_display *dp_display, void *panel, + struct dp_display_mode *dp_mode); + int (*prepare)(struct dp_display *dp_display, void *panel); + int (*unprepare)(struct dp_display *dp_display, void *panel); + int (*request_irq)(struct dp_display *dp_display); + struct dp_debug *(*get_debug)(struct dp_display *dp_display); + void (*post_open)(struct dp_display *dp_display); + int (*config_hdr)(struct dp_display *dp_display, void *panel, + struct drm_msm_ext_hdr_metadata *hdr_meta, + bool dhdr_update); + int (*set_colorspace)(struct dp_display *dp_display, void *panel, + u32 colorspace); + int (*post_init)(struct dp_display *dp_display); + int (*mst_install)(struct dp_display *dp_display, + struct dp_mst_drm_install_info *mst_install_info); + int (*mst_uninstall)(struct dp_display *dp_display); + int (*mst_connector_install)(struct dp_display *dp_display, + struct drm_connector *connector); + int (*mst_connector_uninstall)(struct dp_display *dp_display, + struct drm_connector *connector); + int (*mst_connector_update_edid)(struct dp_display *dp_display, + struct drm_connector *connector, + struct edid *edid); + int (*mst_connector_update_link_info)(struct dp_display *dp_display, + struct drm_connector *connector); + int (*mst_get_connector_info)(struct dp_display *dp_display, + struct drm_connector *connector, + struct dp_mst_connector *mst_conn); + int (*mst_get_fixed_topology_port)(struct dp_display *dp_display, + u32 strm_id, u32 *port_num); + int (*get_mst_caps)(struct dp_display *dp_display, + struct dp_mst_caps *mst_caps); + int (*set_stream_info)(struct dp_display *dp_display, void *panel, + u32 strm_id, u32 start_slot, u32 num_slots, u32 pbn, + int vcpi); + void (*convert_to_dp_mode)(struct dp_display *dp_display, void *panel, + const struct drm_display_mode *drm_mode, + struct dp_display_mode *dp_mode); + int (*update_pps)(struct dp_display *dp_display, + struct drm_connector *connector, char *pps_cmd); + void (*wakeup_phy_layer)(struct dp_display *dp_display, + bool wakeup); + int (*get_display_type)(struct dp_display *dp_display, + const char **display_type); +}; + +#ifdef CONFIG_DRM_MSM_DP +int dp_display_get_num_of_displays(void); +int dp_display_get_displays(void **displays, int count); +int dp_display_get_num_of_streams(void); +#else +static inline int dp_display_get_num_of_displays(void) +{ + return 0; +} +static inline int dp_display_get_displays(void **displays, int count) +{ + return 0; +} +static inline int dp_display_get_num_of_streams(void) +{ + return 0; +} +static inline int dp_connector_update_pps(struct drm_connector *connector, + char *pps_cmd, void *display) +{ + return 0; +} +#endif +#endif /* _DP_DISPLAY_H_ */ diff --git a/techpack/display/msm/dp/dp_drm.c b/techpack/display/msm/dp/dp_drm.c new file mode 100644 index 0000000000000000000000000000000000000000..30446724cea7c28d7e25240028f5c46e522e040b --- /dev/null +++ b/techpack/display/msm/dp/dp_drm.c @@ -0,0 +1,695 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2017-2019, 2021, The Linux Foundation. All rights reserved. + */ + +#include +#include +#include + +#include "msm_drv.h" +#include "msm_kms.h" +#include "sde_connector.h" +#include "dp_drm.h" +#include "dp_debug.h" + +#define DP_MST_DEBUG(fmt, ...) DP_DEBUG(fmt, ##__VA_ARGS__) + +#define to_dp_bridge(x) container_of((x), struct dp_bridge, base) + +void convert_to_drm_mode(const struct dp_display_mode *dp_mode, + struct drm_display_mode *drm_mode) +{ + u32 flags = 0; + + memset(drm_mode, 0, sizeof(*drm_mode)); + + drm_mode->hdisplay = dp_mode->timing.h_active; + drm_mode->hsync_start = drm_mode->hdisplay + + dp_mode->timing.h_front_porch; + drm_mode->hsync_end = drm_mode->hsync_start + + dp_mode->timing.h_sync_width; + drm_mode->htotal = drm_mode->hsync_end + dp_mode->timing.h_back_porch; + drm_mode->hskew = dp_mode->timing.h_skew; + + drm_mode->vdisplay = dp_mode->timing.v_active; + drm_mode->vsync_start = drm_mode->vdisplay + + dp_mode->timing.v_front_porch; + drm_mode->vsync_end = drm_mode->vsync_start + + dp_mode->timing.v_sync_width; + drm_mode->vtotal = drm_mode->vsync_end + dp_mode->timing.v_back_porch; + + drm_mode->vrefresh = dp_mode->timing.refresh_rate; + drm_mode->clock = dp_mode->timing.pixel_clk_khz; + + if (dp_mode->timing.h_active_low) + flags |= DRM_MODE_FLAG_NHSYNC; + else + flags |= DRM_MODE_FLAG_PHSYNC; + + if (dp_mode->timing.v_active_low) + flags |= DRM_MODE_FLAG_NVSYNC; + else + flags |= DRM_MODE_FLAG_PVSYNC; + + drm_mode->flags = flags; + + drm_mode->type = 0x48; + drm_mode_set_name(drm_mode); +} + +static int dp_bridge_attach(struct drm_bridge *dp_bridge) +{ + struct dp_bridge *bridge = to_dp_bridge(dp_bridge); + + if (!dp_bridge) { + DP_ERR("Invalid params\n"); + return -EINVAL; + } + + DP_DEBUG("[%d] attached\n", bridge->id); + + return 0; +} + +static void dp_bridge_pre_enable(struct drm_bridge *drm_bridge) +{ + int rc = 0; + struct dp_bridge *bridge; + struct dp_display *dp; + + if (!drm_bridge) { + DP_ERR("Invalid params\n"); + return; + } + + bridge = to_dp_bridge(drm_bridge); + dp = bridge->display; + + if (!bridge->connector) { + DP_ERR("Invalid connector\n"); + return; + } + + if (!bridge->dp_panel) { + DP_ERR("Invalid dp_panel\n"); + return; + } + + /* By this point mode should have been validated through mode_fixup */ + rc = dp->set_mode(dp, bridge->dp_panel, &bridge->dp_mode); + if (rc) { + DP_ERR("[%d] failed to perform a mode set, rc=%d\n", + bridge->id, rc); + return; + } + + rc = dp->prepare(dp, bridge->dp_panel); + if (rc) { + DP_ERR("[%d] DP display prepare failed, rc=%d\n", + bridge->id, rc); + return; + } + + /* for SST force stream id, start slot and total slots to 0 */ + dp->set_stream_info(dp, bridge->dp_panel, 0, 0, 0, 0, 0); + + rc = dp->enable(dp, bridge->dp_panel); + if (rc) { + DP_ERR("[%d] DP display enable failed, rc=%d\n", + bridge->id, rc); + dp->unprepare(dp, bridge->dp_panel); + } +} + +static void dp_bridge_enable(struct drm_bridge *drm_bridge) +{ + int rc = 0; + struct dp_bridge *bridge; + struct dp_display *dp; + + if (!drm_bridge) { + DP_ERR("Invalid params\n"); + return; + } + + bridge = to_dp_bridge(drm_bridge); + if (!bridge->connector) { + DP_ERR("Invalid connector\n"); + return; + } + + if (!bridge->dp_panel) { + DP_ERR("Invalid dp_panel\n"); + return; + } + + dp = bridge->display; + + rc = dp->post_enable(dp, bridge->dp_panel); + if (rc) + DP_ERR("[%d] DP display post enable failed, rc=%d\n", + bridge->id, rc); +} + +static void dp_bridge_disable(struct drm_bridge *drm_bridge) +{ + int rc = 0; + struct dp_bridge *bridge; + struct dp_display *dp; + + if (!drm_bridge) { + DP_ERR("Invalid params\n"); + return; + } + + bridge = to_dp_bridge(drm_bridge); + if (!bridge->connector) { + DP_ERR("Invalid connector\n"); + return; + } + + if (!bridge->dp_panel) { + DP_ERR("Invalid dp_panel\n"); + return; + } + + dp = bridge->display; + + if (!dp) { + DP_ERR("dp is null\n"); + return; + } + + if (dp) + sde_connector_helper_bridge_disable(bridge->connector); + + rc = dp->pre_disable(dp, bridge->dp_panel); + if (rc) { + DP_ERR("[%d] DP display pre disable failed, rc=%d\n", + bridge->id, rc); + } +} + +static void dp_bridge_post_disable(struct drm_bridge *drm_bridge) +{ + int rc = 0; + struct dp_bridge *bridge; + struct dp_display *dp; + + if (!drm_bridge) { + DP_ERR("Invalid params\n"); + return; + } + + bridge = to_dp_bridge(drm_bridge); + if (!bridge->connector) { + DP_ERR("Invalid connector\n"); + return; + } + + if (!bridge->dp_panel) { + DP_ERR("Invalid dp_panel\n"); + return; + } + + dp = bridge->display; + + rc = dp->disable(dp, bridge->dp_panel); + if (rc) { + DP_ERR("[%d] DP display disable failed, rc=%d\n", + bridge->id, rc); + return; + } + + rc = dp->unprepare(dp, bridge->dp_panel); + if (rc) { + DP_ERR("[%d] DP display unprepare failed, rc=%d\n", + bridge->id, rc); + return; + } +} + +static void dp_bridge_mode_set(struct drm_bridge *drm_bridge, + struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + struct dp_bridge *bridge; + struct dp_display *dp; + + if (!drm_bridge || !mode || !adjusted_mode) { + DP_ERR("Invalid params\n"); + return; + } + + bridge = to_dp_bridge(drm_bridge); + if (!bridge->connector) { + DP_ERR("Invalid connector\n"); + return; + } + + if (!bridge->dp_panel) { + DP_ERR("Invalid dp_panel\n"); + return; + } + + dp = bridge->display; + + dp->convert_to_dp_mode(dp, bridge->dp_panel, adjusted_mode, + &bridge->dp_mode); +} + +static bool dp_bridge_mode_fixup(struct drm_bridge *drm_bridge, + const struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + bool ret = true; + struct dp_display_mode dp_mode; + struct dp_bridge *bridge; + struct dp_display *dp; + + if (!drm_bridge || !mode || !adjusted_mode) { + DP_ERR("Invalid params\n"); + ret = false; + goto end; + } + + bridge = to_dp_bridge(drm_bridge); + if (!bridge->connector) { + DP_ERR("Invalid connector\n"); + ret = false; + goto end; + } + + if (!bridge->dp_panel) { + DP_ERR("Invalid dp_panel\n"); + ret = false; + goto end; + } + + dp = bridge->display; + + dp->convert_to_dp_mode(dp, bridge->dp_panel, mode, &dp_mode); + convert_to_drm_mode(&dp_mode, adjusted_mode); +end: + return ret; +} + +static const struct drm_bridge_funcs dp_bridge_ops = { + .attach = dp_bridge_attach, + .mode_fixup = dp_bridge_mode_fixup, + .pre_enable = dp_bridge_pre_enable, + .enable = dp_bridge_enable, + .disable = dp_bridge_disable, + .post_disable = dp_bridge_post_disable, + .mode_set = dp_bridge_mode_set, +}; + +int dp_connector_config_hdr(struct drm_connector *connector, void *display, + struct sde_connector_state *c_state) +{ + struct dp_display *dp = display; + struct sde_connector *sde_conn; + + if (!display || !c_state || !connector) { + DP_ERR("invalid params\n"); + return -EINVAL; + } + + sde_conn = to_sde_connector(connector); + if (!sde_conn->drv_panel) { + DP_ERR("invalid dp panel\n"); + return -EINVAL; + } + + return dp->config_hdr(dp, sde_conn->drv_panel, &c_state->hdr_meta, + c_state->dyn_hdr_meta.dynamic_hdr_update); +} + +int dp_connector_set_colorspace(struct drm_connector *connector, + void *display) +{ + struct dp_display *dp_display = display; + struct sde_connector *sde_conn; + + if (!dp_display || !connector) + return -EINVAL; + + sde_conn = to_sde_connector(connector); + if (!sde_conn->drv_panel) { + pr_err("invalid dp panel\n"); + return -EINVAL; + } + + return dp_display->set_colorspace(dp_display, + sde_conn->drv_panel, connector->state->colorspace); +} + +int dp_connector_post_init(struct drm_connector *connector, void *display) +{ + int rc; + struct dp_display *dp_display = display; + struct sde_connector *sde_conn; + + if (!dp_display || !connector) + return -EINVAL; + + dp_display->base_connector = connector; + dp_display->bridge->connector = connector; + + if (dp_display->post_init) { + rc = dp_display->post_init(dp_display); + if (rc) + goto end; + } + + sde_conn = to_sde_connector(connector); + dp_display->bridge->dp_panel = sde_conn->drv_panel; + + rc = dp_mst_init(dp_display); +end: + return rc; +} + +int dp_connector_get_mode_info(struct drm_connector *connector, + const struct drm_display_mode *drm_mode, + struct msm_mode_info *mode_info, + void *display, const struct msm_resource_caps_info *avail_res) +{ + const u32 single_intf = 1; + const u32 no_enc = 0; + struct msm_display_topology *topology; + struct sde_connector *sde_conn; + struct dp_panel *dp_panel; + struct dp_display_mode dp_mode; + struct dp_display *dp_disp = display; + struct msm_drm_private *priv; + int rc = 0; + + if (!drm_mode || !mode_info || !avail_res || + !avail_res->max_mixer_width || !connector || !display || + !connector->dev || !connector->dev->dev_private) { + DP_ERR("invalid params\n"); + return -EINVAL; + } + + memset(mode_info, 0, sizeof(*mode_info)); + + sde_conn = to_sde_connector(connector); + dp_panel = sde_conn->drv_panel; + priv = connector->dev->dev_private; + + topology = &mode_info->topology; + + rc = msm_get_mixer_count(priv, drm_mode, avail_res, + &topology->num_lm); + if (rc) { + DP_ERR("error getting mixer count. rc:%d\n", rc); + return rc; + } + + topology->num_enc = no_enc; + topology->num_intf = single_intf; + + mode_info->frame_rate = drm_mode->vrefresh; + mode_info->vtotal = drm_mode->vtotal; + + mode_info->wide_bus_en = dp_panel->widebus_en; + + dp_disp->convert_to_dp_mode(dp_disp, dp_panel, drm_mode, &dp_mode); + + if (dp_mode.timing.comp_info.comp_ratio) { + memcpy(&mode_info->comp_info, + &dp_mode.timing.comp_info, + sizeof(mode_info->comp_info)); + + topology->num_enc = topology->num_lm; + } + + return 0; +} + +int dp_connector_get_info(struct drm_connector *connector, + struct msm_display_info *info, void *data) +{ + struct dp_display *display = data; + + if (!info || !display || !display->drm_dev) { + DP_ERR("invalid params\n"); + return -EINVAL; + } + + info->intf_type = DRM_MODE_CONNECTOR_DisplayPort; + + info->num_of_h_tiles = 1; + info->h_tile_instance[0] = 0; + info->is_connected = display->is_sst_connected; + info->capabilities = MSM_DISPLAY_CAP_VID_MODE | MSM_DISPLAY_CAP_EDID | + MSM_DISPLAY_CAP_HOT_PLUG; + + return 0; +} + +enum drm_connector_status dp_connector_detect(struct drm_connector *conn, + bool force, + void *display) +{ + enum drm_connector_status status = connector_status_unknown; + struct msm_display_info info; + int rc; + + if (!conn || !display) + return status; + + /* get display dp_info */ + memset(&info, 0x0, sizeof(info)); + rc = dp_connector_get_info(conn, &info, display); + if (rc) { + DP_ERR("failed to get display info, rc=%d\n", rc); + return connector_status_disconnected; + } + + if (info.capabilities & MSM_DISPLAY_CAP_HOT_PLUG) + status = (info.is_connected ? connector_status_connected : + connector_status_disconnected); + else + status = connector_status_connected; + + conn->display_info.width_mm = info.width_mm; + conn->display_info.height_mm = info.height_mm; + + return status; +} + +void dp_connector_post_open(struct drm_connector *connector, void *display) +{ + struct dp_display *dp; + + if (!display) { + DP_ERR("invalid input\n"); + return; + } + + dp = display; + + if (dp->post_open) + dp->post_open(dp); +} + +int dp_connector_atomic_check(struct drm_connector *connector, + void *display, + struct drm_connector_state *c_state) +{ + struct sde_connector *sde_conn; + struct drm_connector_state *old_state; + + if (!connector || !display) + return -EINVAL; + + old_state = + drm_atomic_get_old_connector_state(c_state->state, connector); + + if (!old_state) + return -EINVAL; + + sde_conn = to_sde_connector(connector); + + /* + * Marking the colorspace has been changed + * the flag shall be checked in the pre_kickoff + * to configure the new colorspace in HW + */ + if (c_state->colorspace != old_state->colorspace) { + DP_DEBUG("colorspace has been updated\n"); + sde_conn->colorspace_updated = true; + } + + return 0; +} + +int dp_connector_get_modes(struct drm_connector *connector, + void *display, const struct msm_resource_caps_info *avail_res) +{ + int rc = 0; + struct dp_display *dp; + struct dp_display_mode *dp_mode = NULL; + struct drm_display_mode *m, drm_mode; + struct sde_connector *sde_conn; + + if (!connector || !display) + return 0; + + sde_conn = to_sde_connector(connector); + if (!sde_conn->drv_panel) { + DP_ERR("invalid dp panel\n"); + return 0; + } + + dp = display; + + dp_mode = kzalloc(sizeof(*dp_mode), GFP_KERNEL); + if (!dp_mode) + return 0; + + /* pluggable case assumes EDID is read when HPD */ + if (dp->is_sst_connected) { + rc = dp->get_modes(dp, sde_conn->drv_panel, dp_mode); + if (!rc) + DP_ERR("failed to get DP sink modes, rc=%d\n", rc); + + if (dp_mode->timing.pixel_clk_khz) { /* valid DP mode */ + memset(&drm_mode, 0x0, sizeof(drm_mode)); + convert_to_drm_mode(dp_mode, &drm_mode); + m = drm_mode_duplicate(connector->dev, &drm_mode); + if (!m) { + DP_ERR("failed to add mode %ux%u\n", + drm_mode.hdisplay, + drm_mode.vdisplay); + kfree(dp_mode); + return 0; + } + m->width_mm = connector->display_info.width_mm; + m->height_mm = connector->display_info.height_mm; + drm_mode_probed_add(connector, m); + } + } else { + DP_ERR("No sink connected\n"); + } + kfree(dp_mode); + + return rc; +} + +int dp_connnector_set_info_blob(struct drm_connector *connector, + void *info, void *display, struct msm_mode_info *mode_info) +{ + struct dp_display *dp_display = display; + const char *display_type = NULL; + + dp_display->get_display_type(dp_display, &display_type); + sde_kms_info_add_keystr(info, + "display type", display_type); + + return 0; +} + +int dp_drm_bridge_init(void *data, struct drm_encoder *encoder) +{ + int rc = 0; + struct dp_bridge *bridge; + struct drm_device *dev; + struct dp_display *display = data; + struct msm_drm_private *priv = NULL; + + bridge = kzalloc(sizeof(*bridge), GFP_KERNEL); + if (!bridge) { + rc = -ENOMEM; + goto error; + } + + dev = display->drm_dev; + bridge->display = display; + bridge->base.funcs = &dp_bridge_ops; + bridge->base.encoder = encoder; + + priv = dev->dev_private; + + rc = drm_bridge_attach(encoder, &bridge->base, NULL); + if (rc) { + DP_ERR("failed to attach bridge, rc=%d\n", rc); + goto error_free_bridge; + } + + rc = display->request_irq(display); + if (rc) { + DP_ERR("request_irq failed, rc=%d\n", rc); + goto error_free_bridge; + } + + encoder->bridge = &bridge->base; + priv->bridges[priv->num_bridges++] = &bridge->base; + display->bridge = bridge; + + return 0; +error_free_bridge: + kfree(bridge); +error: + return rc; +} + +void dp_drm_bridge_deinit(void *data) +{ + struct dp_display *display = data; + struct dp_bridge *bridge = display->bridge; + + if (bridge && bridge->base.encoder) + bridge->base.encoder->bridge = NULL; + + kfree(bridge); +} + +enum drm_mode_status dp_connector_mode_valid(struct drm_connector *connector, + struct drm_display_mode *mode, void *display, + const struct msm_resource_caps_info *avail_res) +{ + struct dp_display *dp_disp; + struct sde_connector *sde_conn; + + if (!mode || !display || !connector) { + DP_ERR("invalid params\n"); + return MODE_ERROR; + } + + sde_conn = to_sde_connector(connector); + if (!sde_conn->drv_panel) { + DP_ERR("invalid dp panel\n"); + return MODE_ERROR; + } + + dp_disp = display; + mode->vrefresh = drm_mode_vrefresh(mode); + + return dp_disp->validate_mode(dp_disp, sde_conn->drv_panel, + mode, avail_res); +} + +int dp_connector_update_pps(struct drm_connector *connector, + char *pps_cmd, void *display) +{ + struct dp_display *dp_disp; + struct sde_connector *sde_conn; + + if (!display || !connector) { + DP_ERR("invalid params\n"); + return -EINVAL; + } + + sde_conn = to_sde_connector(connector); + if (!sde_conn->drv_panel) { + DP_ERR("invalid dp panel\n"); + return MODE_ERROR; + } + + dp_disp = display; + return dp_disp->update_pps(dp_disp, connector, pps_cmd); +} diff --git a/techpack/display/msm/dp/dp_drm.h b/techpack/display/msm/dp/dp_drm.h new file mode 100644 index 0000000000000000000000000000000000000000..930b3aa911cf001204d5ff5b29ff36b9b74b5865 --- /dev/null +++ b/techpack/display/msm/dp/dp_drm.h @@ -0,0 +1,302 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2017-2019, 2021, The Linux Foundation. All rights reserved. + */ + +#ifndef _DP_DRM_H_ +#define _DP_DRM_H_ + +#include +#include +#include +#include + +#include "msm_drv.h" +#include "dp_display.h" + +struct dp_bridge { + struct drm_bridge base; + u32 id; + + struct drm_connector *connector; + struct dp_display *display; + struct dp_display_mode dp_mode; + void *dp_panel; +}; + + +#ifdef CONFIG_DRM_MSM_DP +/** + * dp_connector_config_hdr - callback to configure HDR + * @connector: Pointer to drm connector structure + * @display: Pointer to private display handle + * @c_state: connect state data + * Returns: Zero on success + */ +int dp_connector_config_hdr(struct drm_connector *connector, + void *display, + struct sde_connector_state *c_state); + +/** + * dp_connector_atomic_check - callback to perform atomic + * check for DP + * @connector: Pointer to drm connector structure + * @display: Pointer to private display handle + * @c_state: connect state data + * Returns: Zero on success + */ +int dp_connector_atomic_check(struct drm_connector *connector, + void *display, + struct drm_connector_state *c_state); + +/** + * dp_connector_set_colorspace - callback to set new colorspace + * @connector: Pointer to drm connector structure + * @display: Pointer to private display handle + * Returns: Zero on success + */ +int dp_connector_set_colorspace(struct drm_connector *connector, + void *display); + +/** + * dp_connector_post_init - callback to perform additional initialization steps + * @connector: Pointer to drm connector structure + * @display: Pointer to private display handle + * Returns: Zero on success + */ +int dp_connector_post_init(struct drm_connector *connector, void *display); + +/** + * dp_connector_detect - callback to determine if connector is connected + * @connector: Pointer to drm connector structure + * @force: Force detect setting from drm framework + * @display: Pointer to private display handle + * Returns: Connector 'is connected' status + */ +enum drm_connector_status dp_connector_detect(struct drm_connector *conn, + bool force, + void *display); + +/** + * dp_connector_get_modes - callback to add drm modes via drm_mode_probed_add() + * @connector: Pointer to drm connector structure + * @display: Pointer to private display handle + * @avail_res: Pointer with curr available resources + * Returns: Number of modes added + */ +int dp_connector_get_modes(struct drm_connector *connector, + void *display, const struct msm_resource_caps_info *avail_res); + +/** + * dp_connector_mode_valid - callback to determine if specified mode is valid + * @connector: Pointer to drm connector structure + * @mode: Pointer to drm mode structure + * @display: Pointer to private display handle + * @avail_res: Pointer with curr available resources + * Returns: Validity status for specified mode + */ +enum drm_mode_status dp_connector_mode_valid(struct drm_connector *connector, + struct drm_display_mode *mode, + void *display, const struct msm_resource_caps_info *avail_res); + +/** + * dp_connector_get_mode_info - retrieve information of the mode selected + * @connector: Pointer to drm connector structure + * @drm_mode: Display mode set for the display + * @mode_info: Out parameter. Information of the mode + * @display: Pointer to private display structure + * @avail_res: Pointer with curr available resources + * Returns: zero on success + */ +int dp_connector_get_mode_info(struct drm_connector *connector, + const struct drm_display_mode *drm_mode, + struct msm_mode_info *mode_info, + void *display, const struct msm_resource_caps_info *avail_res); + +/** + * dp_connector_get_info - retrieve connector display info + * @connector: Pointer to drm connector structure + * @info: Out parameter. Information of the connected display + * @display: Pointer to private display structure + * Returns: zero on success + */ +int dp_connector_get_info(struct drm_connector *connector, + struct msm_display_info *info, void *display); + +/** + * dp_connector_post_open - handle the post open functionalites + * @connector: Pointer to drm connector structure + * @display: Pointer to private display structure + */ +void dp_connector_post_open(struct drm_connector *connector, void *display); + +int dp_drm_bridge_init(void *display, + struct drm_encoder *encoder); + +void dp_drm_bridge_deinit(void *display); + +/** + * convert_to_drm_mode - convert dp mode to drm mode + * @dp_mode: Point to dp mode + * @drm_mode: Pointer to drm mode + */ +void convert_to_drm_mode(const struct dp_display_mode *dp_mode, + struct drm_display_mode *drm_mode); + +/** + * dp_connector_update_pps - update pps for given connector + * @dp_mode: Point to dp mode + * @pps_cmd: PPS packet + * @display: Pointer to private display structure + */ +int dp_connector_update_pps(struct drm_connector *connector, + char *pps_cmd, void *display); + +/** + * dp_mst_drm_bridge_init - initialize mst bridge + * @display: Pointer to private display structure + * @encoder: Pointer to encoder for mst bridge mapping + */ +int dp_mst_drm_bridge_init(void *display, + struct drm_encoder *encoder); + +/** + * dp_mst_drm_bridge_deinit - de-initialize mst bridges + * @display: Pointer to private display structure + */ +void dp_mst_drm_bridge_deinit(void *display); + +/** + * dp_mst_init - initialize mst objects for the given display + * @display: Pointer to private display structure + */ +int dp_mst_init(struct dp_display *dp_display); + +/** + * dp_mst_deinit - de-initialize mst objects for the given display + * @display: Pointer to private display structure + */ +void dp_mst_deinit(struct dp_display *dp_display); + +/** + * dp_conn_set_info_blob - callback to perform info blob initialization + * @connector: Pointer to drm connector structure + * @info: Pointer to sde connector info structure + * @display: Pointer to private display handle + * @mode_info: Pointer to mode info structure + * Returns: Zero on success + */ +int dp_connnector_set_info_blob(struct drm_connector *connector, + void *info, void *display, struct msm_mode_info *mode_info); + +#else +static inline int dp_connector_config_hdr(struct drm_connector *connector, + void *display, struct sde_connector_state *c_state) +{ + return 0; +} + +int dp_connector_atomic_check(struct drm_connector *connector, + void *display, + struct drm_connector_state *c_state) +{ + return 0; +} + +int dp_connector_set_colorspace(struct drm_connector *connector, + void *display) +{ + return 0; +} + +static inline int dp_connector_post_init(struct drm_connector *connector, + void *display) +{ + return 0; +} + +static inline enum drm_connector_status dp_connector_detect( + struct drm_connector *conn, + bool force, + void *display) +{ + return 0; +} + + +static inline int dp_connector_get_modes(struct drm_connector *connector, + void *display, const struct msm_resource_caps_info *avail_res) +{ + return 0; +} + +static inline enum drm_mode_status dp_connector_mode_valid( + struct drm_connector *connector, + struct drm_display_mode *mode, + void *display, const struct msm_resource_caps_info *avail_res) +{ + return MODE_OK; +} + +static inline int dp_connector_get_mode_info(struct drm_connector *connector, + const struct drm_display_mode *drm_mode, + struct msm_mode_info *mode_info, + void *display, const struct msm_resource_caps_info *avail_res) +{ + return 0; +} + +static inline int dp_connector_get_info(struct drm_connector *connector, + struct msm_display_info *info, void *display) +{ + return 0; +} + +static inline void dp_connector_post_open(struct drm_connector *connector, + void *display) +{ +} + +static inline int dp_drm_bridge_init(void *display, struct drm_encoder *encoder) +{ + return 0; +} + +static inline void dp_drm_bridge_deinit(void *display) +{ +} + +static inline void convert_to_drm_mode(const struct dp_display_mode *dp_mode, + struct drm_display_mode *drm_mode) +{ +} + +static inline int dp_mst_drm_bridge_init(void *display, + struct drm_encoder *encoder) +{ + return 0; +} + +static inline void dp_mst_drm_bridge_deinit(void *display) +{ +} + +static inline int dp_mst_init(struct dp_display *dp_display) +{ + return 0; +} + +static inline int dp_mst_deinit(struct dp_display *dp_display) +{ + return 0; +} + +int dp_connnector_set_info_blob(struct drm_connector *connector, + void *info, void *display, struct msm_mode_info *mode_info) +{ + return 0; +} + +#endif + +#endif /* _DP_DRM_H_ */ diff --git a/techpack/display/msm/dp/dp_gpio_hpd.c b/techpack/display/msm/dp/dp_gpio_hpd.c new file mode 100644 index 0000000000000000000000000000000000000000..f11212b63eaeb647b43ed16009b41856c9afb87f --- /dev/null +++ b/techpack/display/msm/dp/dp_gpio_hpd.c @@ -0,0 +1,296 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2016-2019, The Linux Foundation. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "dp_gpio_hpd.h" +#include "dp_debug.h" + +struct dp_gpio_hpd_private { + struct device *dev; + struct dp_hpd base; + struct dss_gpio gpio_cfg; + struct delayed_work work; + struct dp_hpd_cb *cb; + int irq; + bool hpd; +}; + +static int dp_gpio_hpd_connect(struct dp_gpio_hpd_private *gpio_hpd, bool hpd) +{ + int rc = 0; + + if (!gpio_hpd) { + DP_ERR("invalid input\n"); + rc = -EINVAL; + goto error; + } + + gpio_hpd->base.hpd_high = hpd; + gpio_hpd->base.alt_mode_cfg_done = hpd; + gpio_hpd->base.hpd_irq = false; + + if (!gpio_hpd->cb || + !gpio_hpd->cb->configure || + !gpio_hpd->cb->disconnect) { + DP_ERR("invalid cb\n"); + rc = -EINVAL; + goto error; + } + + if (hpd) + rc = gpio_hpd->cb->configure(gpio_hpd->dev); + else + rc = gpio_hpd->cb->disconnect(gpio_hpd->dev); + +error: + return rc; +} + +static int dp_gpio_hpd_attention(struct dp_gpio_hpd_private *gpio_hpd) +{ + int rc = 0; + + if (!gpio_hpd) { + DP_ERR("invalid input\n"); + rc = -EINVAL; + goto error; + } + + gpio_hpd->base.hpd_irq = true; + + if (gpio_hpd->cb && gpio_hpd->cb->attention) + rc = gpio_hpd->cb->attention(gpio_hpd->dev); + +error: + return rc; +} + +static irqreturn_t dp_gpio_isr(int unused, void *data) +{ + struct dp_gpio_hpd_private *gpio_hpd = data; + u32 const disconnect_timeout_retry = 50; + bool hpd; + int i; + + if (!gpio_hpd) + return IRQ_NONE; + + hpd = gpio_get_value_cansleep(gpio_hpd->gpio_cfg.gpio); + + if (!gpio_hpd->hpd && hpd) { + gpio_hpd->hpd = true; + queue_delayed_work(system_wq, &gpio_hpd->work, 0); + return IRQ_HANDLED; + } + + if (!gpio_hpd->hpd) + return IRQ_HANDLED; + + /* In DP 1.2 spec, 100msec is recommended for the detection + * of HPD connect event. Here we'll poll HPD status for + * 50x2ms = 100ms and if HPD is always low, we know DP is + * disconnected. If HPD is high, HPD_IRQ will be handled + */ + for (i = 0; i < disconnect_timeout_retry; i++) { + if (hpd) { + dp_gpio_hpd_attention(gpio_hpd); + return IRQ_HANDLED; + } + usleep_range(2000, 2100); + hpd = gpio_get_value_cansleep(gpio_hpd->gpio_cfg.gpio); + } + + gpio_hpd->hpd = false; + queue_delayed_work(system_wq, &gpio_hpd->work, 0); + return IRQ_HANDLED; +} + +static void dp_gpio_hpd_work(struct work_struct *work) +{ + struct delayed_work *dw = to_delayed_work(work); + struct dp_gpio_hpd_private *gpio_hpd = container_of(dw, + struct dp_gpio_hpd_private, work); + int ret; + + if (gpio_hpd->hpd) { + devm_free_irq(gpio_hpd->dev, + gpio_hpd->irq, gpio_hpd); + ret = devm_request_threaded_irq(gpio_hpd->dev, + gpio_hpd->irq, NULL, + dp_gpio_isr, + IRQF_TRIGGER_FALLING | IRQF_ONESHOT, + "dp-gpio-intp", gpio_hpd); + dp_gpio_hpd_connect(gpio_hpd, true); + } else { + devm_free_irq(gpio_hpd->dev, + gpio_hpd->irq, gpio_hpd); + ret = devm_request_threaded_irq(gpio_hpd->dev, + gpio_hpd->irq, NULL, + dp_gpio_isr, + IRQF_TRIGGER_RISING | IRQF_ONESHOT, + "dp-gpio-intp", gpio_hpd); + dp_gpio_hpd_connect(gpio_hpd, false); + } + + if (ret < 0) + DP_ERR("Cannot claim IRQ dp-gpio-intp\n"); +} + +static int dp_gpio_hpd_simulate_connect(struct dp_hpd *dp_hpd, bool hpd) +{ + int rc = 0; + struct dp_gpio_hpd_private *gpio_hpd; + + if (!dp_hpd) { + DP_ERR("invalid input\n"); + rc = -EINVAL; + goto error; + } + + gpio_hpd = container_of(dp_hpd, struct dp_gpio_hpd_private, base); + + dp_gpio_hpd_connect(gpio_hpd, hpd); +error: + return rc; +} + +static int dp_gpio_hpd_simulate_attention(struct dp_hpd *dp_hpd, int vdo) +{ + int rc = 0; + struct dp_gpio_hpd_private *gpio_hpd; + + if (!dp_hpd) { + DP_ERR("invalid input\n"); + rc = -EINVAL; + goto error; + } + + gpio_hpd = container_of(dp_hpd, struct dp_gpio_hpd_private, base); + + dp_gpio_hpd_attention(gpio_hpd); +error: + return rc; +} + +int dp_gpio_hpd_register(struct dp_hpd *dp_hpd) +{ + struct dp_gpio_hpd_private *gpio_hpd; + int edge; + int rc = 0; + + if (!dp_hpd) + return -EINVAL; + + gpio_hpd = container_of(dp_hpd, struct dp_gpio_hpd_private, base); + + gpio_hpd->hpd = gpio_get_value_cansleep(gpio_hpd->gpio_cfg.gpio); + + edge = gpio_hpd->hpd ? IRQF_TRIGGER_FALLING : IRQF_TRIGGER_RISING; + rc = devm_request_threaded_irq(gpio_hpd->dev, gpio_hpd->irq, NULL, + dp_gpio_isr, + edge | IRQF_ONESHOT, + "dp-gpio-intp", gpio_hpd); + if (rc) { + DP_ERR("Failed to request INTP threaded IRQ: %d\n", rc); + return rc; + } + + if (gpio_hpd->hpd) + queue_delayed_work(system_wq, &gpio_hpd->work, 0); + + return rc; +} + +struct dp_hpd *dp_gpio_hpd_get(struct device *dev, + struct dp_hpd_cb *cb) +{ + int rc = 0; + const char *hpd_gpio_name = "qcom,dp-hpd-gpio"; + struct dp_gpio_hpd_private *gpio_hpd; + struct dp_pinctrl pinctrl = {0}; + + if (!dev || !cb) { + DP_ERR("invalid device\n"); + rc = -EINVAL; + goto error; + } + + gpio_hpd = devm_kzalloc(dev, sizeof(*gpio_hpd), GFP_KERNEL); + if (!gpio_hpd) { + rc = -ENOMEM; + goto error; + } + + pinctrl.pin = devm_pinctrl_get(dev); + if (!IS_ERR_OR_NULL(pinctrl.pin)) { + pinctrl.state_hpd_active = pinctrl_lookup_state(pinctrl.pin, + "mdss_dp_hpd_active"); + if (!IS_ERR_OR_NULL(pinctrl.state_hpd_active)) { + rc = pinctrl_select_state(pinctrl.pin, + pinctrl.state_hpd_active); + if (rc) { + DP_ERR("failed to set hpd active state\n"); + goto gpio_error; + } + } + } + + gpio_hpd->gpio_cfg.gpio = of_get_named_gpio(dev->of_node, + hpd_gpio_name, 0); + if (!gpio_is_valid(gpio_hpd->gpio_cfg.gpio)) { + DP_ERR("%s gpio not specified\n", hpd_gpio_name); + rc = -EINVAL; + goto gpio_error; + } + + strlcpy(gpio_hpd->gpio_cfg.gpio_name, hpd_gpio_name, + sizeof(gpio_hpd->gpio_cfg.gpio_name)); + gpio_hpd->gpio_cfg.value = 0; + + rc = gpio_request(gpio_hpd->gpio_cfg.gpio, + gpio_hpd->gpio_cfg.gpio_name); + if (rc) { + DP_ERR("%s: failed to request gpio\n", hpd_gpio_name); + goto gpio_error; + } + gpio_direction_input(gpio_hpd->gpio_cfg.gpio); + + gpio_hpd->dev = dev; + gpio_hpd->cb = cb; + gpio_hpd->irq = gpio_to_irq(gpio_hpd->gpio_cfg.gpio); + INIT_DELAYED_WORK(&gpio_hpd->work, dp_gpio_hpd_work); + + gpio_hpd->base.simulate_connect = dp_gpio_hpd_simulate_connect; + gpio_hpd->base.simulate_attention = dp_gpio_hpd_simulate_attention; + gpio_hpd->base.register_hpd = dp_gpio_hpd_register; + + return &gpio_hpd->base; + +gpio_error: + devm_kfree(dev, gpio_hpd); +error: + return ERR_PTR(rc); +} + +void dp_gpio_hpd_put(struct dp_hpd *dp_hpd) +{ + struct dp_gpio_hpd_private *gpio_hpd; + + if (!dp_hpd) + return; + + gpio_hpd = container_of(dp_hpd, struct dp_gpio_hpd_private, base); + + gpio_free(gpio_hpd->gpio_cfg.gpio); + devm_kfree(gpio_hpd->dev, gpio_hpd); +} diff --git a/techpack/display/msm/dp/dp_gpio_hpd.h b/techpack/display/msm/dp/dp_gpio_hpd.h new file mode 100644 index 0000000000000000000000000000000000000000..0ed305cb906e10c352933bf8c91f2395ff3f210e --- /dev/null +++ b/techpack/display/msm/dp/dp_gpio_hpd.h @@ -0,0 +1,32 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2016-2019, The Linux Foundation. All rights reserved. + */ + + +#ifndef _DP_GPIO_HPD_H_ +#define _DP_GPIO_HPD_H_ + +#include "dp_hpd.h" + +/** + * dp_gpio_hpd_get() - configure and get the DisplayPlot HPD module data + * + * @dev: device instance of the caller + * return: pointer to allocated gpio hpd module data + * + * This function sets up the gpio hpd module + */ +struct dp_hpd *dp_gpio_hpd_get(struct device *dev, + struct dp_hpd_cb *cb); + +/** + * dp_gpio_hpd_put() + * + * Cleans up dp_hpd instance + * + * @hpd: instance of gpio_hpd + */ +void dp_gpio_hpd_put(struct dp_hpd *hpd); + +#endif /* _DP_GPIO_HPD_H_ */ diff --git a/techpack/display/msm/dp/dp_hdcp2p2.c b/techpack/display/msm/dp/dp_hdcp2p2.c new file mode 100644 index 0000000000000000000000000000000000000000..fcbec753ccfc46b218da1cf4a6d7f317b232e10e --- /dev/null +++ b/techpack/display/msm/dp/dp_hdcp2p2.c @@ -0,0 +1,998 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2016-2020, The Linux Foundation. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "sde_hdcp_2x.h" +#include "dp_debug.h" + +#define DP_INTR_STATUS2 (0x00000024) +#define DP_INTR_STATUS3 (0x00000028) +#define dp_read(offset) readl_relaxed((offset)) +#define dp_write(offset, data) writel_relaxed((data), (offset)) +#define DP_HDCP_RXCAPS_LENGTH 3 + +enum dp_hdcp2p2_sink_status { + SINK_DISCONNECTED, + SINK_CONNECTED +}; + +struct dp_hdcp2p2_ctrl { + DECLARE_KFIFO(cmd_q, enum hdcp_transport_wakeup_cmd, 8); + wait_queue_head_t wait_q; + atomic_t auth_state; + atomic_t abort; + enum dp_hdcp2p2_sink_status sink_status; /* Is sink connected */ + struct dp_hdcp2p2_interrupts *intr; + struct sde_hdcp_init_data init_data; + struct mutex mutex; /* mutex to protect access to ctrl */ + struct mutex msg_lock; /* mutex to protect access to msg buffer */ + struct sde_hdcp_ops *ops; + void *lib_ctx; /* Handle to HDCP 2.2 Trustzone library */ + struct sde_hdcp_2x_ops *lib; /* Ops for driver to call into TZ */ + + struct task_struct *thread; + struct hdcp2_buffer response; + struct hdcp2_buffer request; + uint32_t total_message_length; + uint32_t transaction_delay; + uint32_t transaction_timeout; + struct sde_hdcp_2x_msg_part msg_part[HDCP_MAX_MESSAGE_PARTS]; + u8 sink_rx_status; + u8 rx_status; + char abort_mask; + + bool polling; +}; + +struct dp_hdcp2p2_int_set { + u32 interrupt; + char *name; + void (*func)(struct dp_hdcp2p2_ctrl *ctrl); +}; + +struct dp_hdcp2p2_interrupts { + u32 reg; + struct dp_hdcp2p2_int_set *int_set; +}; + +static inline int dp_hdcp2p2_valid_handle(struct dp_hdcp2p2_ctrl *ctrl) +{ + if (!ctrl) { + DP_ERR("invalid input\n"); + return -EINVAL; + } + + if (!ctrl->lib_ctx) { + DP_ERR("HDCP library needs to be acquired\n"); + return -EINVAL; + } + + if (!ctrl->lib) { + DP_ERR("invalid lib ops data\n"); + return -EINVAL; + } + return 0; +} + +static inline bool dp_hdcp2p2_is_valid_state(struct dp_hdcp2p2_ctrl *ctrl) +{ + enum hdcp_transport_wakeup_cmd cmd; + + if (kfifo_peek(&ctrl->cmd_q, &cmd) && + cmd == HDCP_TRANSPORT_CMD_AUTHENTICATE) + return true; + + if (atomic_read(&ctrl->auth_state) != HDCP_STATE_INACTIVE) + return true; + + return false; +} + +static int dp_hdcp2p2_copy_buf(struct dp_hdcp2p2_ctrl *ctrl, + struct hdcp_transport_wakeup_data *data) +{ + int i = 0; + uint32_t num_messages = 0; + + if (!data || !data->message_data) + return 0; + + mutex_lock(&ctrl->msg_lock); + + num_messages = data->message_data->num_messages; + ctrl->total_message_length = 0; /* Total length of all messages */ + + for (i = 0; i < num_messages; i++) + ctrl->total_message_length += + data->message_data->messages[i].length; + + memcpy(ctrl->msg_part, data->message_data->messages, + sizeof(data->message_data->messages)); + + ctrl->rx_status = data->message_data->rx_status; + ctrl->abort_mask = data->abort_mask; + + if (!ctrl->total_message_length) { + mutex_unlock(&ctrl->msg_lock); + return 0; + } + + ctrl->response.data = data->buf; + ctrl->response.length = ctrl->total_message_length; + ctrl->request.data = data->buf; + ctrl->request.length = ctrl->total_message_length; + + ctrl->transaction_delay = data->transaction_delay; + ctrl->transaction_timeout = data->transaction_timeout; + + mutex_unlock(&ctrl->msg_lock); + + return 0; +} + +static void dp_hdcp2p2_send_auth_status(struct dp_hdcp2p2_ctrl *ctrl) +{ + ctrl->init_data.notify_status(ctrl->init_data.cb_data, + atomic_read(&ctrl->auth_state)); +} + +static void dp_hdcp2p2_set_interrupts(struct dp_hdcp2p2_ctrl *ctrl, bool enable) +{ + void __iomem *base = ctrl->init_data.dp_ahb->base; + struct dp_hdcp2p2_interrupts *intr = ctrl->intr; + + if (atomic_read(&ctrl->abort)) + return; + + while (intr && intr->reg) { + struct dp_hdcp2p2_int_set *int_set = intr->int_set; + u32 interrupts = 0; + + while (int_set && int_set->interrupt) { + interrupts |= int_set->interrupt; + int_set++; + } + + if (enable) + dp_write(base + intr->reg, + dp_read(base + intr->reg) | interrupts); + else + dp_write(base + intr->reg, + dp_read(base + intr->reg) & ~interrupts); + intr++; + } +} + +static int dp_hdcp2p2_wakeup(struct hdcp_transport_wakeup_data *data) +{ + struct dp_hdcp2p2_ctrl *ctrl; + + if (!data) { + DP_ERR("invalid input\n"); + return -EINVAL; + } + + ctrl = data->context; + if (!ctrl) { + DP_ERR("invalid ctrl\n"); + return -EINVAL; + } + + if (dp_hdcp2p2_copy_buf(ctrl, data)) + goto exit; + + ctrl->polling = false; + switch (data->cmd) { + case HDCP_TRANSPORT_CMD_STATUS_SUCCESS: + atomic_set(&ctrl->auth_state, HDCP_STATE_AUTHENTICATED); + kfifo_put(&ctrl->cmd_q, data->cmd); + wake_up(&ctrl->wait_q); + break; + case HDCP_TRANSPORT_CMD_STATUS_FAILED: + atomic_set(&ctrl->auth_state, HDCP_STATE_AUTH_FAIL); + kfifo_put(&ctrl->cmd_q, data->cmd); + kthread_park(ctrl->thread); + break; + default: + kfifo_put(&ctrl->cmd_q, data->cmd); + wake_up(&ctrl->wait_q); + break; + } + +exit: + return 0; +} + +static inline void dp_hdcp2p2_wakeup_lib(struct dp_hdcp2p2_ctrl *ctrl, + struct sde_hdcp_2x_wakeup_data *data) +{ + int rc = 0; + + if (ctrl && ctrl->lib && ctrl->lib->wakeup && + data && (data->cmd != HDCP_2X_CMD_INVALID)) { + rc = ctrl->lib->wakeup(data); + if (rc) + DP_ERR("error sending %s to lib\n", + sde_hdcp_2x_cmd_to_str(data->cmd)); + } +} + +static void dp_hdcp2p2_reset(struct dp_hdcp2p2_ctrl *ctrl) +{ + if (!ctrl) { + DP_ERR("invalid input\n"); + return; + } + + ctrl->sink_status = SINK_DISCONNECTED; + atomic_set(&ctrl->auth_state, HDCP_STATE_INACTIVE); +} + +static int dp_hdcp2p2_register(void *input, bool mst_enabled) +{ + int rc; + struct dp_hdcp2p2_ctrl *ctrl = input; + struct sde_hdcp_2x_wakeup_data cdata = {HDCP_2X_CMD_ENABLE}; + + rc = dp_hdcp2p2_valid_handle(ctrl); + if (rc) + return rc; + + if (mst_enabled) + cdata.device_type = HDCP_TXMTR_DP_MST; + else + cdata.device_type = HDCP_TXMTR_DP; + + cdata.context = ctrl->lib_ctx; + rc = ctrl->lib->wakeup(&cdata); + + return rc; +} + +static int dp_hdcp2p2_on(void *input) +{ + int rc = 0; + struct dp_hdcp2p2_ctrl *ctrl = input; + struct sde_hdcp_2x_wakeup_data cdata = {HDCP_2X_CMD_INVALID}; + + rc = dp_hdcp2p2_valid_handle(ctrl); + if (rc) + return rc; + + cdata.cmd = HDCP_2X_CMD_START; + cdata.context = ctrl->lib_ctx; + rc = ctrl->lib->wakeup(&cdata); + if (rc) + DP_ERR("Unable to start the HDCP 2.2 library (%d)\n", rc); + + return rc; +} + +static void dp_hdcp2p2_off(void *input) +{ + int rc; + struct dp_hdcp2p2_ctrl *ctrl = (struct dp_hdcp2p2_ctrl *)input; + struct sde_hdcp_2x_wakeup_data cdata = {HDCP_2X_CMD_DISABLE}; + + rc = dp_hdcp2p2_valid_handle(ctrl); + if (rc) + return; + + dp_hdcp2p2_set_interrupts(ctrl, false); + + dp_hdcp2p2_reset(ctrl); + + kthread_park(ctrl->thread); + + cdata.context = ctrl->lib_ctx; + ctrl->lib->wakeup(&cdata); +} + +static int dp_hdcp2p2_authenticate(void *input) +{ + int rc; + struct dp_hdcp2p2_ctrl *ctrl = input; + struct hdcp_transport_wakeup_data cdata = { + HDCP_TRANSPORT_CMD_AUTHENTICATE}; + rc = dp_hdcp2p2_valid_handle(ctrl); + if (rc) + return rc; + + dp_hdcp2p2_set_interrupts(ctrl, true); + + ctrl->sink_status = SINK_CONNECTED; + atomic_set(&ctrl->auth_state, HDCP_STATE_AUTHENTICATING); + + kthread_park(ctrl->thread); + kfifo_reset(&ctrl->cmd_q); + kthread_unpark(ctrl->thread); + + cdata.context = input; + dp_hdcp2p2_wakeup(&cdata); + + return rc; +} + +static int dp_hdcp2p2_reauthenticate(void *input) +{ + struct dp_hdcp2p2_ctrl *ctrl = (struct dp_hdcp2p2_ctrl *)input; + + if (!ctrl) { + DP_ERR("invalid input\n"); + return -EINVAL; + } + + dp_hdcp2p2_reset((struct dp_hdcp2p2_ctrl *)input); + + return dp_hdcp2p2_authenticate(input); +} + +static void dp_hdcp2p2_min_level_change(void *client_ctx, + u8 min_enc_level) +{ + struct dp_hdcp2p2_ctrl *ctrl = (struct dp_hdcp2p2_ctrl *)client_ctx; + struct sde_hdcp_2x_wakeup_data cdata = { + HDCP_2X_CMD_MIN_ENC_LEVEL}; + + if (!ctrl) { + DP_ERR("invalid input\n"); + return; + } + + cdata.context = ctrl->lib_ctx; + cdata.min_enc_level = min_enc_level; + dp_hdcp2p2_wakeup_lib(ctrl, &cdata); +} + +static int dp_hdcp2p2_aux_read_message(struct dp_hdcp2p2_ctrl *ctrl) +{ + int rc = 0, max_size = 16, read_size = 0, bytes_read = 0; + int size = ctrl->request.length, offset = ctrl->msg_part->offset; + u8 *buf = ctrl->request.data; + s64 diff_ms; + ktime_t start_read, finish_read; + + if (atomic_read(&ctrl->auth_state) == HDCP_STATE_INACTIVE || + atomic_read(&ctrl->auth_state) == HDCP_STATE_AUTH_FAIL) { + DP_ERR("invalid hdcp state\n"); + rc = -EINVAL; + goto exit; + } + + if (!buf) { + DP_ERR("invalid request buffer\n"); + rc = -EINVAL; + goto exit; + } + + DP_DEBUG("offset(0x%x), size(%d)\n", offset, size); + + start_read = ktime_get(); + do { + read_size = min(size, max_size); + + bytes_read = drm_dp_dpcd_read(ctrl->init_data.drm_aux, + offset, buf, read_size); + if (bytes_read != read_size) { + DP_ERR("fail: offset(0x%x), size(0x%x), rc(0x%x)\n", + offset, read_size, bytes_read); + rc = -EINVAL; + break; + } + + buf += read_size; + offset += read_size; + size -= read_size; + } while (size > 0); + finish_read = ktime_get(); + diff_ms = ktime_ms_delta(finish_read, start_read); + + if (ctrl->transaction_timeout && diff_ms > ctrl->transaction_timeout) { + DP_ERR("HDCP read timeout exceeded (%dms > %dms)\n", diff_ms, + ctrl->transaction_timeout); + rc = -ETIMEDOUT; + } +exit: + return rc; +} + +static int dp_hdcp2p2_aux_write_message(struct dp_hdcp2p2_ctrl *ctrl, + u8 *buf, int size, uint offset, uint timeout) +{ + int const max_size = 16; + int rc = 0, write_size = 0, bytes_written = 0; + + DP_DEBUG("offset(0x%x), size(%d)\n", offset, size); + + do { + write_size = min(size, max_size); + + bytes_written = drm_dp_dpcd_write(ctrl->init_data.drm_aux, + offset, buf, write_size); + if (bytes_written != write_size) { + DP_ERR("fail: offset(0x%x), size(0x%x), rc(0x%x)\n", + offset, write_size, bytes_written); + rc = -EINVAL; + break; + } + + buf += write_size; + offset += write_size; + size -= write_size; + } while (size > 0); + + return rc; +} + +static bool dp_hdcp2p2_feature_supported(void *input) +{ + int rc; + struct dp_hdcp2p2_ctrl *ctrl = input; + struct sde_hdcp_2x_ops *lib = NULL; + bool supported = false; + + rc = dp_hdcp2p2_valid_handle(ctrl); + if (rc) + return supported; + + lib = ctrl->lib; + if (lib->feature_supported) + supported = lib->feature_supported( + ctrl->lib_ctx); + + return supported; +} + +static void dp_hdcp2p2_force_encryption(void *data, bool enable) +{ + int rc; + struct dp_hdcp2p2_ctrl *ctrl = data; + struct sde_hdcp_2x_ops *lib = NULL; + + rc = dp_hdcp2p2_valid_handle(ctrl); + if (rc) + return; + + lib = ctrl->lib; + if (lib->force_encryption) + lib->force_encryption(ctrl->lib_ctx, enable); +} + +static void dp_hdcp2p2_send_msg(struct dp_hdcp2p2_ctrl *ctrl) +{ + int rc = 0; + struct sde_hdcp_2x_wakeup_data cdata = {HDCP_2X_CMD_INVALID}; + + if (!ctrl) { + DP_ERR("invalid input\n"); + rc = -EINVAL; + goto exit; + } + + cdata.context = ctrl->lib_ctx; + + if (atomic_read(&ctrl->auth_state) == HDCP_STATE_INACTIVE) { + DP_ERR("hdcp is off\n"); + goto exit; + } + + mutex_lock(&ctrl->msg_lock); + + rc = dp_hdcp2p2_aux_write_message(ctrl, ctrl->response.data, + ctrl->response.length, ctrl->msg_part->offset, + ctrl->transaction_delay); + if (rc) { + DP_ERR("Error sending msg to sink %d\n", rc); + mutex_unlock(&ctrl->msg_lock); + goto exit; + } + + cdata.cmd = HDCP_2X_CMD_MSG_SEND_SUCCESS; + cdata.timeout = ctrl->transaction_delay; + mutex_unlock(&ctrl->msg_lock); + +exit: + if (rc == -ETIMEDOUT) + cdata.cmd = HDCP_2X_CMD_MSG_SEND_TIMEOUT; + else if (rc) + cdata.cmd = HDCP_2X_CMD_MSG_SEND_FAILED; + + dp_hdcp2p2_wakeup_lib(ctrl, &cdata); +} + +static int dp_hdcp2p2_get_msg_from_sink(struct dp_hdcp2p2_ctrl *ctrl) +{ + int rc = 0; + struct sde_hdcp_2x_wakeup_data cdata = { HDCP_2X_CMD_INVALID }; + + cdata.context = ctrl->lib_ctx; + + rc = dp_hdcp2p2_aux_read_message(ctrl); + if (rc) { + DP_ERR("error reading message %d\n", rc); + goto exit; + } + + cdata.total_message_length = ctrl->total_message_length; + cdata.timeout = ctrl->transaction_delay; +exit: + if (rc == -ETIMEDOUT) + cdata.cmd = HDCP_2X_CMD_MSG_RECV_TIMEOUT; + else if (rc) + cdata.cmd = HDCP_2X_CMD_MSG_RECV_FAILED; + else + cdata.cmd = HDCP_2X_CMD_MSG_RECV_SUCCESS; + + dp_hdcp2p2_wakeup_lib(ctrl, &cdata); + + return rc; +} + +static void dp_hdcp2p2_recv_msg(struct dp_hdcp2p2_ctrl *ctrl) +{ + struct sde_hdcp_2x_wakeup_data cdata = { HDCP_2X_CMD_INVALID }; + + cdata.context = ctrl->lib_ctx; + + if (atomic_read(&ctrl->auth_state) == HDCP_STATE_INACTIVE) { + DP_ERR("hdcp is off\n"); + return; + } + + if (ctrl->transaction_delay) + msleep(ctrl->transaction_delay); + + dp_hdcp2p2_get_msg_from_sink(ctrl); +} + +static void dp_hdcp2p2_link_check(struct dp_hdcp2p2_ctrl *ctrl) +{ + int rc = 0, retries = 10; + struct sde_hdcp_2x_wakeup_data cdata = {HDCP_2X_CMD_INVALID}; + + if (!ctrl) { + DP_ERR("invalid input\n"); + return; + } + + if (atomic_read(&ctrl->auth_state) == HDCP_STATE_AUTH_FAIL || + atomic_read(&ctrl->auth_state) == HDCP_STATE_INACTIVE) { + DP_ERR("invalid hdcp state\n"); + return; + } + + cdata.context = ctrl->lib_ctx; + + if (ctrl->sink_rx_status & ctrl->abort_mask) { + if (ctrl->sink_rx_status & BIT(3)) + DP_ERR("reauth_req set by sink\n"); + + if (ctrl->sink_rx_status & BIT(4)) + DP_ERR("link failure reported by sink\n"); + + ctrl->sink_rx_status = 0; + ctrl->rx_status = 0; + + rc = -ENOLINK; + + cdata.cmd = HDCP_2X_CMD_LINK_FAILED; + atomic_set(&ctrl->auth_state, HDCP_STATE_AUTH_FAIL); + goto exit; + } + + /* wait for polling to start till spec allowed timeout */ + while (!ctrl->polling && retries--) + msleep(20); + + /* check if sink has made a message available */ + if (ctrl->polling && (ctrl->sink_rx_status & ctrl->rx_status)) { + ctrl->sink_rx_status = 0; + ctrl->rx_status = 0; + + dp_hdcp2p2_get_msg_from_sink(ctrl); + + ctrl->polling = false; + } +exit: + if (rc) + dp_hdcp2p2_wakeup_lib(ctrl, &cdata); +} + +static void dp_hdcp2p2_start_auth(struct dp_hdcp2p2_ctrl *ctrl) +{ + struct sde_hdcp_2x_wakeup_data cdata = {HDCP_2X_CMD_START_AUTH}; + cdata.context = ctrl->lib_ctx; + + if (atomic_read(&ctrl->auth_state) == HDCP_STATE_AUTHENTICATING) + dp_hdcp2p2_wakeup_lib(ctrl, &cdata); +} + +static int dp_hdcp2p2_read_rx_status(struct dp_hdcp2p2_ctrl *ctrl, + u8 *rx_status) +{ + u32 const cp_irq_dpcd_offset = 0x201; + u32 const rxstatus_dpcd_offset = 0x69493; + ssize_t const bytes_to_read = 1; + ssize_t bytes_read = 0; + u8 buf = 0; + int rc = 0; + bool cp_irq = false; + + *rx_status = 0; + + bytes_read = drm_dp_dpcd_read(ctrl->init_data.drm_aux, + cp_irq_dpcd_offset, &buf, bytes_to_read); + if (bytes_read != bytes_to_read) { + DP_ERR("cp irq read failed\n"); + rc = bytes_read; + goto error; + } + + cp_irq = buf & BIT(2); + DP_DEBUG("cp_irq=0x%x\n", cp_irq); + buf = 0; + + if (cp_irq) { + bytes_read = drm_dp_dpcd_read(ctrl->init_data.drm_aux, + rxstatus_dpcd_offset, &buf, bytes_to_read); + if (bytes_read != bytes_to_read) { + DP_ERR("rxstatus read failed\n"); + rc = bytes_read; + goto error; + } + *rx_status = buf; + DP_DEBUG("rx_status=0x%x\n", *rx_status); + } + +error: + return rc; +} + +static int dp_hdcp2p2_cp_irq(void *input) +{ + int rc; + struct dp_hdcp2p2_ctrl *ctrl = input; + + rc = dp_hdcp2p2_valid_handle(ctrl); + if (rc) + return rc; + + if (atomic_read(&ctrl->auth_state) == HDCP_STATE_AUTH_FAIL || + atomic_read(&ctrl->auth_state) == HDCP_STATE_INACTIVE) { + DP_ERR("invalid hdcp state\n"); + return -EINVAL; + } + + ctrl->sink_rx_status = 0; + rc = dp_hdcp2p2_read_rx_status(ctrl, &ctrl->sink_rx_status); + if (rc) { + DP_ERR("failed to read rx status\n"); + return rc; + } + + DP_DEBUG("sink_rx_status=0x%x\n", ctrl->sink_rx_status); + + if (!ctrl->sink_rx_status) { + DP_DEBUG("not a hdcp 2.2 irq\n"); + return -EINVAL; + } + + + kfifo_put(&ctrl->cmd_q, HDCP_TRANSPORT_CMD_LINK_CHECK); + wake_up(&ctrl->wait_q); + + return 0; +} + +static int dp_hdcp2p2_isr(void *input) +{ + struct dp_hdcp2p2_ctrl *ctrl = (struct dp_hdcp2p2_ctrl *)input; + int rc = 0; + struct dss_io_data *io; + struct dp_hdcp2p2_interrupts *intr; + u32 hdcp_int_val = 0; + + if (!ctrl || !ctrl->init_data.dp_ahb) { + DP_ERR("invalid input\n"); + rc = -EINVAL; + goto end; + } + + io = ctrl->init_data.dp_ahb; + intr = ctrl->intr; + + while (intr && intr->reg) { + struct dp_hdcp2p2_int_set *int_set = intr->int_set; + + hdcp_int_val = dp_read(io->base + intr->reg); + + while (int_set && int_set->interrupt) { + if (hdcp_int_val & (int_set->interrupt >> 2)) { + DP_DEBUG("%s\n", int_set->name); + + if (int_set->func) + int_set->func(ctrl); + + dp_write(io->base + intr->reg, hdcp_int_val | + (int_set->interrupt >> 1)); + } + int_set++; + } + intr++; + } +end: + return rc; +} + +static bool dp_hdcp2p2_supported(void *input) +{ + struct dp_hdcp2p2_ctrl *ctrl = input; + u32 const rxcaps_dpcd_offset = 0x6921d; + ssize_t bytes_read = 0; + u8 buf[DP_HDCP_RXCAPS_LENGTH]; + + DP_DEBUG("Checking sink capability\n"); + + bytes_read = drm_dp_dpcd_read(ctrl->init_data.drm_aux, + rxcaps_dpcd_offset, &buf, DP_HDCP_RXCAPS_LENGTH); + if (bytes_read != DP_HDCP_RXCAPS_LENGTH) { + DP_ERR("RxCaps read failed\n"); + goto error; + } + + DP_DEBUG("HDCP_CAPABLE=%lu\n", (buf[2] & BIT(1)) >> 1); + DP_DEBUG("VERSION=%d\n", buf[0]); + + if ((buf[2] & BIT(1)) && (buf[0] == 0x2)) + return true; +error: + return false; +} + +static int dp_hdcp2p2_change_streams(struct dp_hdcp2p2_ctrl *ctrl, + struct sde_hdcp_2x_wakeup_data *cdata) +{ + if (!ctrl || cdata->num_streams == 0 || !cdata->streams) { + DP_ERR("invalid input\n"); + return -EINVAL; + } + + if (!ctrl->lib_ctx) { + DP_ERR("HDCP library needs to be acquired\n"); + return -EINVAL; + } + + if (!ctrl->lib) { + DP_ERR("invalid lib ops data\n"); + return -EINVAL; + } + + cdata->context = ctrl->lib_ctx; + return ctrl->lib->wakeup(cdata); +} + + +static int dp_hdcp2p2_register_streams(void *input, u8 num_streams, + struct stream_info *streams) +{ + struct dp_hdcp2p2_ctrl *ctrl = input; + struct sde_hdcp_2x_wakeup_data cdata = {HDCP_2X_CMD_OPEN_STREAMS}; + + cdata.streams = streams; + cdata.num_streams = num_streams; + return dp_hdcp2p2_change_streams(ctrl, &cdata); +} + +static int dp_hdcp2p2_deregister_streams(void *input, u8 num_streams, + struct stream_info *streams) +{ + struct dp_hdcp2p2_ctrl *ctrl = input; + struct sde_hdcp_2x_wakeup_data cdata = {HDCP_2X_CMD_CLOSE_STREAMS}; + + cdata.streams = streams; + cdata.num_streams = num_streams; + return dp_hdcp2p2_change_streams(ctrl, &cdata); +} + +void sde_dp_hdcp2p2_deinit(void *input) +{ + struct dp_hdcp2p2_ctrl *ctrl = (struct dp_hdcp2p2_ctrl *)input; + struct sde_hdcp_2x_wakeup_data cdata = {HDCP_2X_CMD_INVALID}; + + if (!ctrl) { + DP_ERR("invalid input\n"); + return; + } + + if (atomic_read(&ctrl->auth_state) != HDCP_STATE_AUTH_FAIL) { + cdata.cmd = HDCP_2X_CMD_STOP; + cdata.context = ctrl->lib_ctx; + dp_hdcp2p2_wakeup_lib(ctrl, &cdata); + } + + sde_hdcp_2x_deregister(ctrl->lib_ctx); + + kthread_stop(ctrl->thread); + + mutex_destroy(&ctrl->mutex); + mutex_destroy(&ctrl->msg_lock); + kfree(ctrl); +} + +static int dp_hdcp2p2_main(void *data) +{ + struct dp_hdcp2p2_ctrl *ctrl = data; + enum hdcp_transport_wakeup_cmd cmd; + + while (1) { + wait_event(ctrl->wait_q, + !kfifo_is_empty(&ctrl->cmd_q) || + kthread_should_stop() || + kthread_should_park()); + + if (kthread_should_stop()) + break; + + if (kfifo_is_empty(&ctrl->cmd_q) && kthread_should_park()) { + kthread_parkme(); + continue; + } + + if (!kfifo_get(&ctrl->cmd_q, &cmd)) + continue; + + switch (cmd) { + case HDCP_TRANSPORT_CMD_SEND_MESSAGE: + dp_hdcp2p2_send_msg(ctrl); + break; + case HDCP_TRANSPORT_CMD_RECV_MESSAGE: + if (ctrl->rx_status) + ctrl->polling = true; + else + dp_hdcp2p2_recv_msg(ctrl); + break; + case HDCP_TRANSPORT_CMD_STATUS_SUCCESS: + dp_hdcp2p2_send_auth_status(ctrl); + break; + case HDCP_TRANSPORT_CMD_STATUS_FAILED: + dp_hdcp2p2_set_interrupts(ctrl, false); + dp_hdcp2p2_send_auth_status(ctrl); + break; + case HDCP_TRANSPORT_CMD_LINK_POLL: + ctrl->polling = true; + break; + case HDCP_TRANSPORT_CMD_LINK_CHECK: + dp_hdcp2p2_link_check(ctrl); + break; + case HDCP_TRANSPORT_CMD_AUTHENTICATE: + dp_hdcp2p2_start_auth(ctrl); + break; + default: + break; + } + } + + return 0; +} + +static void dp_hdcp2p2_abort(void *input, bool abort) +{ + struct dp_hdcp2p2_ctrl *ctrl = input; + + atomic_set(&ctrl->abort, abort); +} + +void *sde_dp_hdcp2p2_init(struct sde_hdcp_init_data *init_data) +{ + int rc; + struct dp_hdcp2p2_ctrl *ctrl; + static struct sde_hdcp_ops ops = { + .isr = dp_hdcp2p2_isr, + .reauthenticate = dp_hdcp2p2_reauthenticate, + .authenticate = dp_hdcp2p2_authenticate, + .feature_supported = dp_hdcp2p2_feature_supported, + .force_encryption = dp_hdcp2p2_force_encryption, + .sink_support = dp_hdcp2p2_supported, + .set_mode = dp_hdcp2p2_register, + .on = dp_hdcp2p2_on, + .off = dp_hdcp2p2_off, + .abort = dp_hdcp2p2_abort, + .cp_irq = dp_hdcp2p2_cp_irq, + .register_streams = dp_hdcp2p2_register_streams, + .deregister_streams = dp_hdcp2p2_deregister_streams, + }; + + static struct hdcp_transport_ops client_ops = { + .wakeup = dp_hdcp2p2_wakeup, + }; + static struct dp_hdcp2p2_int_set int_set1[] = { + {BIT(17), "authentication successful", NULL}, + {BIT(20), "authentication failed", NULL}, + {BIT(24), "encryption enabled", NULL}, + {BIT(27), "encryption disabled", NULL}, + {0}, + }; + static struct dp_hdcp2p2_int_set int_set2[] = { + {BIT(2), "key fifo underflow", NULL}, + {0}, + }; + static struct dp_hdcp2p2_interrupts intr[] = { + {DP_INTR_STATUS2, int_set1}, + {DP_INTR_STATUS3, int_set2}, + {0} + }; + static struct sde_hdcp_2x_ops hdcp2x_ops; + struct sde_hdcp_2x_register_data register_data = {0}; + + if (!init_data || !init_data->cb_data || + !init_data->notify_status || !init_data->drm_aux) { + DP_ERR("invalid input\n"); + return ERR_PTR(-EINVAL); + } + + ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL); + if (!ctrl) + return ERR_PTR(-ENOMEM); + + ctrl->init_data = *init_data; + ctrl->lib = &hdcp2x_ops; + ctrl->response.data = NULL; + ctrl->request.data = NULL; + + ctrl->sink_status = SINK_DISCONNECTED; + ctrl->intr = intr; + + INIT_KFIFO(ctrl->cmd_q); + + init_waitqueue_head(&ctrl->wait_q); + atomic_set(&ctrl->auth_state, HDCP_STATE_INACTIVE); + + ctrl->ops = &ops; + mutex_init(&ctrl->mutex); + mutex_init(&ctrl->msg_lock); + + register_data.hdcp_data = &ctrl->lib_ctx; + register_data.client_ops = &client_ops; + register_data.ops = &hdcp2x_ops; + register_data.client_data = ctrl; + + rc = sde_hdcp_2x_register(®ister_data); + if (rc) { + DP_ERR("Unable to register with HDCP 2.2 library\n"); + goto error; + } + + if (IS_ENABLED(CONFIG_HDCP_QSEECOM)) + msm_hdcp_register_cb(init_data->msm_hdcp_dev, ctrl, + dp_hdcp2p2_min_level_change); + + ctrl->thread = kthread_run(dp_hdcp2p2_main, ctrl, "dp_hdcp2p2"); + + if (IS_ERR(ctrl->thread)) { + DP_ERR("unable to start DP hdcp2p2 thread\n"); + rc = PTR_ERR(ctrl->thread); + ctrl->thread = NULL; + goto error; + } + + return ctrl; +error: + kfree(ctrl); + return ERR_PTR(rc); +} + +struct sde_hdcp_ops *sde_dp_hdcp2p2_get(void *input) +{ + return ((struct dp_hdcp2p2_ctrl *)input)->ops; +} diff --git a/techpack/display/msm/dp/dp_hpd.c b/techpack/display/msm/dp/dp_hpd.c new file mode 100644 index 0000000000000000000000000000000000000000..3c96350ccb3e105958e31d996cd4ed364a4afd24 --- /dev/null +++ b/techpack/display/msm/dp/dp_hpd.c @@ -0,0 +1,98 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2012-2019, The Linux Foundation. All rights reserved. + */ + +#include +#include +#include +#include + +#include "dp_hpd.h" +#include "dp_usbpd.h" +#include "dp_gpio_hpd.h" +#include "dp_lphw_hpd.h" +#include "dp_debug.h" + +static void dp_hpd_host_init(struct dp_hpd *dp_hpd, + struct dp_catalog_hpd *catalog) +{ + if (!catalog) { + DP_ERR("invalid input\n"); + return; + } + catalog->config_hpd(catalog, true); +} + +static void dp_hpd_host_deinit(struct dp_hpd *dp_hpd, + struct dp_catalog_hpd *catalog) +{ + if (!catalog) { + DP_ERR("invalid input\n"); + return; + } + catalog->config_hpd(catalog, false); +} + +static void dp_hpd_isr(struct dp_hpd *dp_hpd) +{ +} + +struct dp_hpd *dp_hpd_get(struct device *dev, struct dp_parser *parser, + struct dp_catalog_hpd *catalog, struct dp_hpd_cb *cb) +{ + struct dp_hpd *dp_hpd; + + if (parser->no_aux_switch && parser->lphw_hpd) { + dp_hpd = dp_lphw_hpd_get(dev, parser, catalog, cb); + if (IS_ERR(dp_hpd)) { + DP_ERR("failed to get lphw hpd\n"); + return dp_hpd; + } + dp_hpd->type = DP_HPD_LPHW; + } else if (parser->no_aux_switch) { + dp_hpd = dp_gpio_hpd_get(dev, cb); + if (IS_ERR(dp_hpd)) { + DP_ERR("failed to get gpio hpd\n"); + return dp_hpd; + } + dp_hpd->type = DP_HPD_GPIO; + } else { + dp_hpd = dp_usbpd_get(dev, cb); + if (IS_ERR(dp_hpd)) { + DP_ERR("failed to get usbpd\n"); + return dp_hpd; + } + dp_hpd->type = DP_HPD_USBPD; + } + + if (!dp_hpd->host_init) + dp_hpd->host_init = dp_hpd_host_init; + if (!dp_hpd->host_deinit) + dp_hpd->host_deinit = dp_hpd_host_deinit; + if (!dp_hpd->isr) + dp_hpd->isr = dp_hpd_isr; + + return dp_hpd; +} + +void dp_hpd_put(struct dp_hpd *dp_hpd) +{ + if (!dp_hpd) + return; + + switch (dp_hpd->type) { + case DP_HPD_USBPD: + dp_usbpd_put(dp_hpd); + break; + case DP_HPD_GPIO: + dp_gpio_hpd_put(dp_hpd); + break; + case DP_HPD_LPHW: + dp_lphw_hpd_put(dp_hpd); + break; + default: + DP_ERR("unknown hpd type %d\n", dp_hpd->type); + break; + } +} diff --git a/techpack/display/msm/dp/dp_hpd.h b/techpack/display/msm/dp/dp_hpd.h new file mode 100644 index 0000000000000000000000000000000000000000..86806fbdf773413f6d3d29e251fc7d42aed6da20 --- /dev/null +++ b/techpack/display/msm/dp/dp_hpd.h @@ -0,0 +1,99 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2012-2019, The Linux Foundation. All rights reserved. + */ + +#ifndef _DP_HPD_H_ +#define _DP_HPD_H_ + +#include +#include "dp_parser.h" +#include "dp_catalog.h" + +struct device; + +/** + * enum dp_hpd_type - dp hpd type + * @DP_HPD_USBPD: USB type-c based HPD + * @DP_HPD_GPIO: GPIO based HPD + * @DP_HPD_BUILTIN: Controller built-in HPD + */ + +enum dp_hpd_type { + DP_HPD_USBPD, + DP_HPD_GPIO, + DP_HPD_LPHW, + DP_HPD_BUILTIN, +}; + +/** + * struct dp_hpd_cb - callback functions provided by the client + * + * @configure: called when dp connection is ready. + * @disconnect: notify the cable disconnect event. + * @attention: notify any attention message event. + */ +struct dp_hpd_cb { + int (*configure)(struct device *dev); + int (*disconnect)(struct device *dev); + int (*attention)(struct device *dev); +}; + +/** + * struct dp_hpd - DisplayPort HPD status + * + * @type: type of HPD + * @orientation: plug orientation configuration, USBPD type only. + * @hpd_high: Hot Plug Detect signal is high. + * @hpd_irq: Change in the status since last message + * @alt_mode_cfg_done: bool to specify alt mode status + * @multi_func: multi-function preferred, USBPD type only + * @isr: event interrupt, BUILTIN and LPHW type only + * @register_hpd: register hardware callback + * @host_init: source or host side setup for hpd + * @host_deinit: source or host side de-initializations + * @simulate_connect: simulate disconnect or connect for debug mode + * @simulate_attention: simulate attention messages for debug mode + * @wakeup_phy: wakeup USBPD phy layer + */ +struct dp_hpd { + enum dp_hpd_type type; + u32 orientation; + bool hpd_high; + bool hpd_irq; + bool alt_mode_cfg_done; + bool multi_func; + bool peer_usb_comm; + + void (*isr)(struct dp_hpd *dp_hpd); + int (*register_hpd)(struct dp_hpd *dp_hpd); + void (*host_init)(struct dp_hpd *hpd, struct dp_catalog_hpd *catalog); + void (*host_deinit)(struct dp_hpd *hpd, struct dp_catalog_hpd *catalog); + int (*simulate_connect)(struct dp_hpd *dp_hpd, bool hpd); + int (*simulate_attention)(struct dp_hpd *dp_hpd, int vdo); + void (*wakeup_phy)(struct dp_hpd *dp_hpd, bool wakeup); +}; + +/** + * dp_hpd_get() - configure and get the DisplayPlot HPD module data + * + * @dev: device instance of the caller + * @parser: DP parser + * @cb: callback function for HPD response + * return: pointer to allocated hpd module data + * + * This function sets up the hpd module + */ +struct dp_hpd *dp_hpd_get(struct device *dev, struct dp_parser *parser, + struct dp_catalog_hpd *catalog, struct dp_hpd_cb *cb); + +/** + * dp_hpd_put() + * + * Cleans up dp_hpd instance + * + * @dp_hpd: instance of dp_hpd + */ +void dp_hpd_put(struct dp_hpd *dp_hpd); + +#endif /* _DP_HPD_H_ */ diff --git a/techpack/display/msm/dp/dp_link.c b/techpack/display/msm/dp/dp_link.c new file mode 100644 index 0000000000000000000000000000000000000000..20a09cf9bf5fb7466e6d2240196966a5e9c83389 --- /dev/null +++ b/techpack/display/msm/dp/dp_link.c @@ -0,0 +1,1529 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2012-2019, The Linux Foundation. All rights reserved. + */ + +#include "dp_link.h" +#include "dp_panel.h" +#include "dp_debug.h" + +enum dynamic_range { + DP_DYNAMIC_RANGE_RGB_VESA = 0x00, + DP_DYNAMIC_RANGE_RGB_CEA = 0x01, + DP_DYNAMIC_RANGE_UNKNOWN = 0xFFFFFFFF, +}; + +enum audio_sample_rate { + AUDIO_SAMPLE_RATE_32_KHZ = 0x00, + AUDIO_SAMPLE_RATE_44_1_KHZ = 0x01, + AUDIO_SAMPLE_RATE_48_KHZ = 0x02, + AUDIO_SAMPLE_RATE_88_2_KHZ = 0x03, + AUDIO_SAMPLE_RATE_96_KHZ = 0x04, + AUDIO_SAMPLE_RATE_176_4_KHZ = 0x05, + AUDIO_SAMPLE_RATE_192_KHZ = 0x06, +}; + +enum audio_pattern_type { + AUDIO_TEST_PATTERN_OPERATOR_DEFINED = 0x00, + AUDIO_TEST_PATTERN_SAWTOOTH = 0x01, +}; + +struct dp_link_request { + u32 test_requested; + u32 test_link_rate; + u32 test_lane_count; +}; + +struct dp_link_private { + u32 prev_sink_count; + struct device *dev; + struct dp_aux *aux; + struct dp_link dp_link; + + struct dp_link_request request; + u8 link_status[DP_LINK_STATUS_SIZE]; +}; + +static char *dp_link_get_audio_test_pattern(u32 pattern) +{ + switch (pattern) { + case AUDIO_TEST_PATTERN_OPERATOR_DEFINED: + return DP_LINK_ENUM_STR(AUDIO_TEST_PATTERN_OPERATOR_DEFINED); + case AUDIO_TEST_PATTERN_SAWTOOTH: + return DP_LINK_ENUM_STR(AUDIO_TEST_PATTERN_SAWTOOTH); + default: + return "unknown"; + } +} + +static char *dp_link_get_audio_sample_rate(u32 rate) +{ + switch (rate) { + case AUDIO_SAMPLE_RATE_32_KHZ: + return DP_LINK_ENUM_STR(AUDIO_SAMPLE_RATE_32_KHZ); + case AUDIO_SAMPLE_RATE_44_1_KHZ: + return DP_LINK_ENUM_STR(AUDIO_SAMPLE_RATE_44_1_KHZ); + case AUDIO_SAMPLE_RATE_48_KHZ: + return DP_LINK_ENUM_STR(AUDIO_SAMPLE_RATE_48_KHZ); + case AUDIO_SAMPLE_RATE_88_2_KHZ: + return DP_LINK_ENUM_STR(AUDIO_SAMPLE_RATE_88_2_KHZ); + case AUDIO_SAMPLE_RATE_96_KHZ: + return DP_LINK_ENUM_STR(AUDIO_SAMPLE_RATE_96_KHZ); + case AUDIO_SAMPLE_RATE_176_4_KHZ: + return DP_LINK_ENUM_STR(AUDIO_SAMPLE_RATE_176_4_KHZ); + case AUDIO_SAMPLE_RATE_192_KHZ: + return DP_LINK_ENUM_STR(AUDIO_SAMPLE_RATE_192_KHZ); + default: + return "unknown"; + } +} + +static int dp_link_get_period(struct dp_link_private *link, int const addr) +{ + int ret = 0; + u8 bp; + u8 data; + u32 const param_len = 0x1; + u32 const max_audio_period = 0xA; + + /* TEST_AUDIO_PERIOD_CH_XX */ + if (drm_dp_dpcd_read(link->aux->drm_aux, addr, &bp, + param_len) < param_len) { + DP_ERR("failed to read test_audio_period (0x%x)\n", addr); + ret = -EINVAL; + goto exit; + } + + data = bp; + + /* Period - Bits 3:0 */ + data = data & 0xF; + if ((int)data > max_audio_period) { + DP_ERR("invalid test_audio_period_ch_1 = 0x%x\n", data); + ret = -EINVAL; + goto exit; + } + + ret = data; +exit: + return ret; +} + +static int dp_link_parse_audio_channel_period(struct dp_link_private *link) +{ + int ret = 0; + struct dp_link_test_audio *req = &link->dp_link.test_audio; + + ret = dp_link_get_period(link, DP_TEST_AUDIO_PERIOD_CH1); + if (ret == -EINVAL) + goto exit; + + req->test_audio_period_ch_1 = ret; + DP_DEBUG("test_audio_period_ch_1 = 0x%x\n", ret); + + ret = dp_link_get_period(link, DP_TEST_AUDIO_PERIOD_CH2); + if (ret == -EINVAL) + goto exit; + + req->test_audio_period_ch_2 = ret; + DP_DEBUG("test_audio_period_ch_2 = 0x%x\n", ret); + + /* TEST_AUDIO_PERIOD_CH_3 (Byte 0x275) */ + ret = dp_link_get_period(link, DP_TEST_AUDIO_PERIOD_CH3); + if (ret == -EINVAL) + goto exit; + + req->test_audio_period_ch_3 = ret; + DP_DEBUG("test_audio_period_ch_3 = 0x%x\n", ret); + + ret = dp_link_get_period(link, DP_TEST_AUDIO_PERIOD_CH4); + if (ret == -EINVAL) + goto exit; + + req->test_audio_period_ch_4 = ret; + DP_DEBUG("test_audio_period_ch_4 = 0x%x\n", ret); + + ret = dp_link_get_period(link, DP_TEST_AUDIO_PERIOD_CH5); + if (ret == -EINVAL) + goto exit; + + req->test_audio_period_ch_5 = ret; + DP_DEBUG("test_audio_period_ch_5 = 0x%x\n", ret); + + ret = dp_link_get_period(link, DP_TEST_AUDIO_PERIOD_CH6); + if (ret == -EINVAL) + goto exit; + + req->test_audio_period_ch_6 = ret; + DP_DEBUG("test_audio_period_ch_6 = 0x%x\n", ret); + + ret = dp_link_get_period(link, DP_TEST_AUDIO_PERIOD_CH7); + if (ret == -EINVAL) + goto exit; + + req->test_audio_period_ch_7 = ret; + DP_DEBUG("test_audio_period_ch_7 = 0x%x\n", ret); + + ret = dp_link_get_period(link, DP_TEST_AUDIO_PERIOD_CH8); + if (ret == -EINVAL) + goto exit; + + req->test_audio_period_ch_8 = ret; + DP_DEBUG("test_audio_period_ch_8 = 0x%x\n", ret); +exit: + return ret; +} + +static int dp_link_parse_audio_pattern_type(struct dp_link_private *link) +{ + int ret = 0; + u8 bp; + u8 data; + int rlen; + int const param_len = 0x1; + int const max_audio_pattern_type = 0x1; + + rlen = drm_dp_dpcd_read(link->aux->drm_aux, + DP_TEST_AUDIO_PATTERN_TYPE, &bp, param_len); + if (rlen < param_len) { + DP_ERR("failed to read link audio mode data\n"); + ret = -EINVAL; + goto exit; + } + data = bp; + + /* Audio Pattern Type - Bits 7:0 */ + if ((int)data > max_audio_pattern_type) { + DP_ERR("invalid audio pattern type = 0x%x\n", data); + ret = -EINVAL; + goto exit; + } + + link->dp_link.test_audio.test_audio_pattern_type = data; + DP_DEBUG("audio pattern type = %s\n", + dp_link_get_audio_test_pattern(data)); +exit: + return ret; +} + +static int dp_link_parse_audio_mode(struct dp_link_private *link) +{ + int ret = 0; + u8 bp; + u8 data; + int rlen; + int const param_len = 0x1; + int const max_audio_sampling_rate = 0x6; + int const max_audio_channel_count = 0x8; + int sampling_rate = 0x0; + int channel_count = 0x0; + + rlen = drm_dp_dpcd_read(link->aux->drm_aux, DP_TEST_AUDIO_MODE, + &bp, param_len); + if (rlen < param_len) { + DP_ERR("failed to read link audio mode data\n"); + ret = -EINVAL; + goto exit; + } + data = bp; + + /* Sampling Rate - Bits 3:0 */ + sampling_rate = data & 0xF; + if (sampling_rate > max_audio_sampling_rate) { + DP_ERR("sampling rate (0x%x) greater than max (0x%x)\n", + sampling_rate, max_audio_sampling_rate); + ret = -EINVAL; + goto exit; + } + + /* Channel Count - Bits 7:4 */ + channel_count = ((data & 0xF0) >> 4) + 1; + if (channel_count > max_audio_channel_count) { + DP_ERR("channel_count (0x%x) greater than max (0x%x)\n", + channel_count, max_audio_channel_count); + ret = -EINVAL; + goto exit; + } + + link->dp_link.test_audio.test_audio_sampling_rate = sampling_rate; + link->dp_link.test_audio.test_audio_channel_count = channel_count; + DP_DEBUG("sampling_rate = %s, channel_count = 0x%x\n", + dp_link_get_audio_sample_rate(sampling_rate), channel_count); +exit: + return ret; +} + +/** + * dp_parse_audio_pattern_params() - parses audio pattern parameters from DPCD + * @link: Display Port Driver data + * + * Returns 0 if it successfully parses the audio link pattern parameters. + */ +static int dp_link_parse_audio_pattern_params(struct dp_link_private *link) +{ + int ret = 0; + + ret = dp_link_parse_audio_mode(link); + if (ret) + goto exit; + + ret = dp_link_parse_audio_pattern_type(link); + if (ret) + goto exit; + + ret = dp_link_parse_audio_channel_period(link); + +exit: + return ret; +} + +/** + * dp_link_is_video_pattern_valid() - validates the video pattern + * @pattern: video pattern requested by the sink + * + * Returns true if the requested video pattern is supported. + */ +static bool dp_link_is_video_pattern_valid(u32 pattern) +{ + switch (pattern) { + case DP_NO_TEST_PATTERN: + case DP_COLOR_RAMP: + case DP_BLACK_AND_WHITE_VERTICAL_LINES: + case DP_COLOR_SQUARE: + return true; + default: + return false; + } +} + +static char *dp_link_video_pattern_to_string(u32 test_video_pattern) +{ + switch (test_video_pattern) { + case DP_NO_TEST_PATTERN: + return DP_LINK_ENUM_STR(DP_NO_TEST_PATTERN); + case DP_COLOR_RAMP: + return DP_LINK_ENUM_STR(DP_COLOR_RAMP); + case DP_BLACK_AND_WHITE_VERTICAL_LINES: + return DP_LINK_ENUM_STR(DP_BLACK_AND_WHITE_VERTICAL_LINES); + case DP_COLOR_SQUARE: + return DP_LINK_ENUM_STR(DP_COLOR_SQUARE); + default: + return "unknown"; + } +} + +/** + * dp_link_is_dynamic_range_valid() - validates the dynamic range + * @bit_depth: the dynamic range value to be checked + * + * Returns true if the dynamic range value is supported. + */ +static bool dp_link_is_dynamic_range_valid(u32 dr) +{ + switch (dr) { + case DP_DYNAMIC_RANGE_RGB_VESA: + case DP_DYNAMIC_RANGE_RGB_CEA: + return true; + default: + return false; + } +} + +static char *dp_link_dynamic_range_to_string(u32 dr) +{ + switch (dr) { + case DP_DYNAMIC_RANGE_RGB_VESA: + return DP_LINK_ENUM_STR(DP_DYNAMIC_RANGE_RGB_VESA); + case DP_DYNAMIC_RANGE_RGB_CEA: + return DP_LINK_ENUM_STR(DP_DYNAMIC_RANGE_RGB_CEA); + case DP_DYNAMIC_RANGE_UNKNOWN: + default: + return "unknown"; + } +} + +/** + * dp_link_is_bit_depth_valid() - validates the bit depth requested + * @bit_depth: bit depth requested by the sink + * + * Returns true if the requested bit depth is supported. + */ +static bool dp_link_is_bit_depth_valid(u32 tbd) +{ + /* DP_TEST_VIDEO_PATTERN_NONE is treated as invalid */ + switch (tbd) { + case DP_TEST_BIT_DEPTH_6: + case DP_TEST_BIT_DEPTH_8: + case DP_TEST_BIT_DEPTH_10: + return true; + default: + return false; + } +} + +static char *dp_link_bit_depth_to_string(u32 tbd) +{ + switch (tbd) { + case DP_TEST_BIT_DEPTH_6: + return DP_LINK_ENUM_STR(DP_TEST_BIT_DEPTH_6); + case DP_TEST_BIT_DEPTH_8: + return DP_LINK_ENUM_STR(DP_TEST_BIT_DEPTH_8); + case DP_TEST_BIT_DEPTH_10: + return DP_LINK_ENUM_STR(DP_TEST_BIT_DEPTH_10); + case DP_TEST_BIT_DEPTH_UNKNOWN: + default: + return "unknown"; + } +} + +static int dp_link_parse_timing_params1(struct dp_link_private *link, + int const addr, int const len, u32 *val) +{ + u8 bp[2]; + int rlen; + + if (len < 2) + return -EINVAL; + + /* Read the requested video link pattern (Byte 0x221). */ + rlen = drm_dp_dpcd_read(link->aux->drm_aux, addr, bp, len); + if (rlen < len) { + DP_ERR("failed to read 0x%x\n", addr); + return -EINVAL; + } + + *val = bp[1] | (bp[0] << 8); + + return 0; +} + +static int dp_link_parse_timing_params2(struct dp_link_private *link, + int const addr, int const len, u32 *val1, u32 *val2) +{ + u8 bp[2]; + int rlen; + + if (len < 2) + return -EINVAL; + + /* Read the requested video link pattern (Byte 0x221). */ + rlen = drm_dp_dpcd_read(link->aux->drm_aux, addr, bp, len); + if (rlen < len) { + DP_ERR("failed to read 0x%x\n", addr); + return -EINVAL; + } + + *val1 = (bp[0] & BIT(7)) >> 7; + *val2 = bp[1] | ((bp[0] & 0x7F) << 8); + + return 0; +} + +static int dp_link_parse_timing_params3(struct dp_link_private *link, + int const addr, u32 *val) +{ + u8 bp; + u32 len = 1; + int rlen; + + rlen = drm_dp_dpcd_read(link->aux->drm_aux, addr, &bp, len); + if (rlen < 1) { + DP_ERR("failed to read 0x%x\n", addr); + return -EINVAL; + } + *val = bp; + + return 0; +} + +/** + * dp_parse_video_pattern_params() - parses video pattern parameters from DPCD + * @link: Display Port Driver data + * + * Returns 0 if it successfully parses the video link pattern and the link + * bit depth requested by the sink and, and if the values parsed are valid. + */ +static int dp_link_parse_video_pattern_params(struct dp_link_private *link) +{ + int ret = 0; + int rlen; + u8 bp; + u8 data; + u32 dyn_range; + int const param_len = 0x1; + + rlen = drm_dp_dpcd_read(link->aux->drm_aux, DP_TEST_PATTERN, + &bp, param_len); + if (rlen < param_len) { + DP_ERR("failed to read link video pattern\n"); + ret = -EINVAL; + goto exit; + } + data = bp; + + if (!dp_link_is_video_pattern_valid(data)) { + DP_ERR("invalid link video pattern = 0x%x\n", data); + ret = -EINVAL; + goto exit; + } + + link->dp_link.test_video.test_video_pattern = data; + DP_DEBUG("link video pattern = 0x%x (%s)\n", + link->dp_link.test_video.test_video_pattern, + dp_link_video_pattern_to_string( + link->dp_link.test_video.test_video_pattern)); + + /* Read the requested color bit depth and dynamic range (Byte 0x232) */ + rlen = drm_dp_dpcd_read(link->aux->drm_aux, DP_TEST_MISC0, + &bp, param_len); + if (rlen < param_len) { + DP_ERR("failed to read link bit depth\n"); + ret = -EINVAL; + goto exit; + } + data = bp; + + /* Dynamic Range */ + dyn_range = (data & DP_TEST_DYNAMIC_RANGE_CEA) >> 3; + if (!dp_link_is_dynamic_range_valid(dyn_range)) { + DP_ERR("invalid link dynamic range = 0x%x\n", dyn_range); + ret = -EINVAL; + goto exit; + } + link->dp_link.test_video.test_dyn_range = dyn_range; + DP_DEBUG("link dynamic range = 0x%x (%s)\n", + link->dp_link.test_video.test_dyn_range, + dp_link_dynamic_range_to_string( + link->dp_link.test_video.test_dyn_range)); + + /* Color bit depth */ + data &= DP_TEST_BIT_DEPTH_MASK; + if (!dp_link_is_bit_depth_valid(data)) { + DP_ERR("invalid link bit depth = 0x%x\n", data); + ret = -EINVAL; + goto exit; + } + + link->dp_link.test_video.test_bit_depth = data; + DP_DEBUG("link bit depth = 0x%x (%s)\n", + link->dp_link.test_video.test_bit_depth, + dp_link_bit_depth_to_string( + link->dp_link.test_video.test_bit_depth)); + + /* resolution timing params */ + ret = dp_link_parse_timing_params1(link, DP_TEST_H_TOTAL_HI, 2, + &link->dp_link.test_video.test_h_total); + if (ret) { + DP_ERR("failed to parse test_h_total (DP_TEST_H_TOTAL_HI)\n"); + goto exit; + } + DP_DEBUG("TEST_H_TOTAL = %d\n", link->dp_link.test_video.test_h_total); + + ret = dp_link_parse_timing_params1(link, DP_TEST_V_TOTAL_HI, 2, + &link->dp_link.test_video.test_v_total); + if (ret) { + DP_ERR("failed to parse test_v_total (DP_TEST_V_TOTAL_HI)\n"); + goto exit; + } + DP_DEBUG("TEST_V_TOTAL = %d\n", link->dp_link.test_video.test_v_total); + + ret = dp_link_parse_timing_params1(link, DP_TEST_H_START_HI, 2, + &link->dp_link.test_video.test_h_start); + if (ret) { + DP_ERR("failed to parse test_h_start (DP_TEST_H_START_HI)\n"); + goto exit; + } + DP_DEBUG("TEST_H_START = %d\n", link->dp_link.test_video.test_h_start); + + ret = dp_link_parse_timing_params1(link, DP_TEST_V_START_HI, 2, + &link->dp_link.test_video.test_v_start); + if (ret) { + DP_ERR("failed to parse test_v_start (DP_TEST_V_START_HI)\n"); + goto exit; + } + DP_DEBUG("TEST_V_START = %d\n", link->dp_link.test_video.test_v_start); + + ret = dp_link_parse_timing_params2(link, DP_TEST_HSYNC_HI, 2, + &link->dp_link.test_video.test_hsync_pol, + &link->dp_link.test_video.test_hsync_width); + if (ret) { + DP_ERR("failed to parse (DP_TEST_HSYNC_HI)\n"); + goto exit; + } + DP_DEBUG("TEST_HSYNC_POL = %d\n", + link->dp_link.test_video.test_hsync_pol); + DP_DEBUG("TEST_HSYNC_WIDTH = %d\n", + link->dp_link.test_video.test_hsync_width); + + ret = dp_link_parse_timing_params2(link, DP_TEST_VSYNC_HI, 2, + &link->dp_link.test_video.test_vsync_pol, + &link->dp_link.test_video.test_vsync_width); + if (ret) { + DP_ERR("failed to parse (DP_TEST_VSYNC_HI)\n"); + goto exit; + } + DP_DEBUG("TEST_VSYNC_POL = %d\n", + link->dp_link.test_video.test_vsync_pol); + DP_DEBUG("TEST_VSYNC_WIDTH = %d\n", + link->dp_link.test_video.test_vsync_width); + + ret = dp_link_parse_timing_params1(link, DP_TEST_H_WIDTH_HI, 2, + &link->dp_link.test_video.test_h_width); + if (ret) { + DP_ERR("failed to parse test_h_width (DP_TEST_H_WIDTH_HI)\n"); + goto exit; + } + DP_DEBUG("TEST_H_WIDTH = %d\n", link->dp_link.test_video.test_h_width); + + ret = dp_link_parse_timing_params1(link, DP_TEST_V_HEIGHT_HI, 2, + &link->dp_link.test_video.test_v_height); + if (ret) { + DP_ERR("failed to parse test_v_height (DP_TEST_V_HEIGHT_HI)\n"); + goto exit; + } + DP_DEBUG("TEST_V_HEIGHT = %d\n", + link->dp_link.test_video.test_v_height); + + ret = dp_link_parse_timing_params3(link, DP_TEST_MISC1, + &link->dp_link.test_video.test_rr_d); + link->dp_link.test_video.test_rr_d &= DP_TEST_REFRESH_DENOMINATOR; + if (ret) { + DP_ERR("failed to parse test_rr_d (DP_TEST_MISC1)\n"); + goto exit; + } + DP_DEBUG("TEST_REFRESH_DENOMINATOR = %d\n", + link->dp_link.test_video.test_rr_d); + + ret = dp_link_parse_timing_params3(link, DP_TEST_REFRESH_RATE_NUMERATOR, + &link->dp_link.test_video.test_rr_n); + if (ret) { + DP_ERR("failed to parse test_rr_n (DP_TEST_REFRESH_RATE_NUMERATOR)\n"); + goto exit; + } + DP_DEBUG("TEST_REFRESH_NUMERATOR = %d\n", + link->dp_link.test_video.test_rr_n); +exit: + return ret; +} + +/** + * dp_link_parse_link_training_params() - parses link training parameters from + * DPCD + * @link: Display Port Driver data + * + * Returns 0 if it successfully parses the link rate (Byte 0x219) and lane + * count (Byte 0x220), and if these values parse are valid. + */ +static int dp_link_parse_link_training_params(struct dp_link_private *link) +{ + u8 bp; + u8 data; + int ret = 0; + int rlen; + int const param_len = 0x1; + + rlen = drm_dp_dpcd_read(link->aux->drm_aux, DP_TEST_LINK_RATE, + &bp, param_len); + if (rlen < param_len) { + DP_ERR("failed to read link rate\n"); + ret = -EINVAL; + goto exit; + } + data = bp; + + if (!is_link_rate_valid(data)) { + DP_ERR("invalid link rate = 0x%x\n", data); + ret = -EINVAL; + goto exit; + } + + link->request.test_link_rate = data; + DP_DEBUG("link rate = 0x%x\n", link->request.test_link_rate); + + rlen = drm_dp_dpcd_read(link->aux->drm_aux, DP_TEST_LANE_COUNT, + &bp, param_len); + if (rlen < param_len) { + DP_ERR("failed to read lane count\n"); + ret = -EINVAL; + goto exit; + } + data = bp; + data &= 0x1F; + + if (!is_lane_count_valid(data)) { + DP_ERR("invalid lane count = 0x%x\n", data); + ret = -EINVAL; + goto exit; + } + + link->request.test_lane_count = data; + DP_DEBUG("lane count = 0x%x\n", link->request.test_lane_count); +exit: + return ret; +} + +static bool dp_link_is_phy_test_pattern_supported(u32 phy_test_pattern_sel) +{ + switch (phy_test_pattern_sel) { + case DP_TEST_PHY_PATTERN_NONE: + case DP_TEST_PHY_PATTERN_D10_2_NO_SCRAMBLING: + case DP_TEST_PHY_PATTERN_SYMBOL_ERR_MEASUREMENT_CNT: + case DP_TEST_PHY_PATTERN_PRBS7: + case DP_TEST_PHY_PATTERN_80_BIT_CUSTOM_PATTERN: + case DP_TEST_PHY_PATTERN_CP2520_PATTERN_1: + case DP_TEST_PHY_PATTERN_CP2520_PATTERN_3: + return true; + default: + return false; + } +} + +/** + * dp_parse_phy_test_params() - parses the phy link parameters + * @link: Display Port Driver data + * + * Parses the DPCD (Byte 0x248) for the DP PHY link pattern that is being + * requested. + */ +static int dp_link_parse_phy_test_params(struct dp_link_private *link) +{ + u8 bp; + u8 data; + int rlen; + int const param_len = 0x1; + int ret = 0; + + rlen = drm_dp_dpcd_read(link->aux->drm_aux, DP_TEST_PHY_PATTERN, + &bp, param_len); + if (rlen < param_len) { + DP_ERR("failed to read phy link pattern\n"); + ret = -EINVAL; + goto end; + } + + data = bp; + + link->dp_link.phy_params.phy_test_pattern_sel = data; + + DP_DEBUG("phy_test_pattern_sel = %s\n", + dp_link_get_phy_test_pattern(data)); + + if (!dp_link_is_phy_test_pattern_supported(data)) + ret = -EINVAL; +end: + return ret; +} + +/** + * dp_link_is_video_audio_test_requested() - checks for audio/video link request + * @link: link requested by the sink + * + * Returns true if the requested link is a permitted audio/video link. + */ +static bool dp_link_is_video_audio_test_requested(u32 link) +{ + return (link == DP_TEST_LINK_VIDEO_PATTERN) || + (link == (DP_TEST_LINK_AUDIO_PATTERN | + DP_TEST_LINK_VIDEO_PATTERN)) || + (link == DP_TEST_LINK_AUDIO_PATTERN) || + (link == (DP_TEST_LINK_AUDIO_PATTERN | + DP_TEST_LINK_AUDIO_DISABLED_VIDEO)); +} + +/** + * dp_link_supported() - checks if link requested by sink is supported + * @test_requested: link requested by the sink + * + * Returns true if the requested link is supported. + */ +static bool dp_link_is_test_supported(u32 test_requested) +{ + return (test_requested == DP_TEST_LINK_TRAINING) || + (test_requested == DP_TEST_LINK_EDID_READ) || + (test_requested == DP_TEST_LINK_PHY_TEST_PATTERN) || + dp_link_is_video_audio_test_requested(test_requested); +} + +static bool dp_link_is_test_edid_read(struct dp_link_private *link) +{ + return (link->request.test_requested == DP_TEST_LINK_EDID_READ); +} + +/** + * dp_sink_parse_test_request() - parses link request parameters from sink + * @link: Display Port Driver data + * + * Parses the DPCD to check if an automated link is requested (Byte 0x201), + * and what type of link automation is being requested (Byte 0x218). + */ +static int dp_link_parse_request(struct dp_link_private *link) +{ + int ret = 0; + u8 bp; + u8 data; + int rlen; + u32 const param_len = 0x1; + + /** + * Read the device service IRQ vector (Byte 0x201) to determine + * whether an automated link has been requested by the sink. + */ + rlen = drm_dp_dpcd_read(link->aux->drm_aux, + DP_DEVICE_SERVICE_IRQ_VECTOR, &bp, param_len); + if (rlen < param_len) { + DP_ERR("aux read failed\n"); + ret = -EINVAL; + goto end; + } + + data = bp; + + if (!(data & DP_AUTOMATED_TEST_REQUEST)) + return 0; + + /** + * Read the link request byte (Byte 0x218) to determine what type + * of automated link has been requested by the sink. + */ + rlen = drm_dp_dpcd_read(link->aux->drm_aux, DP_TEST_REQUEST, + &bp, param_len); + if (rlen < param_len) { + DP_ERR("aux read failed\n"); + ret = -EINVAL; + goto end; + } + + data = bp; + + if (!dp_link_is_test_supported(data)) { + DP_DEBUG("link 0x%x not supported\n", data); + goto end; + } + + link->request.test_requested = data; + + if (link->request.test_requested == DP_TEST_LINK_PHY_TEST_PATTERN) { + ret = dp_link_parse_phy_test_params(link); + if (ret) + goto end; + ret = dp_link_parse_link_training_params(link); + } + + if (link->request.test_requested == DP_TEST_LINK_TRAINING) + ret = dp_link_parse_link_training_params(link); + + if (dp_link_is_video_audio_test_requested( + link->request.test_requested)) { + ret = dp_link_parse_video_pattern_params(link); + if (ret) + goto end; + + ret = dp_link_parse_audio_pattern_params(link); + } +end: + /** + * Send a DP_TEST_ACK if all link parameters are valid, otherwise send + * a DP_TEST_NAK. + */ + if (ret) { + link->dp_link.test_response = DP_TEST_NAK; + } else { + if (!dp_link_is_test_edid_read(link)) + link->dp_link.test_response = DP_TEST_ACK; + else + link->dp_link.test_response = + DP_TEST_EDID_CHECKSUM_WRITE; + } + + return ret; +} + +/** + * dp_link_parse_sink_count() - parses the sink count + * + * Parses the DPCD to check if there is an update to the sink count + * (Byte 0x200), and whether all the sink devices connected have Content + * Protection enabled. + */ +static int dp_link_parse_sink_count(struct dp_link *dp_link) +{ + int rlen; + int const param_len = 0x1; + struct dp_link_private *link = container_of(dp_link, + struct dp_link_private, dp_link); + + rlen = drm_dp_dpcd_read(link->aux->drm_aux, DP_SINK_COUNT, + &link->dp_link.sink_count.count, param_len); + if (rlen < param_len) { + DP_ERR("failed to read sink count\n"); + return -EINVAL; + } + + link->dp_link.sink_count.cp_ready = + link->dp_link.sink_count.count & DP_SINK_CP_READY; + /* BIT 7, BIT 5:0 */ + link->dp_link.sink_count.count = + DP_GET_SINK_COUNT(link->dp_link.sink_count.count); + + DP_DEBUG("sink_count = 0x%x, cp_ready = 0x%x\n", + link->dp_link.sink_count.count, + link->dp_link.sink_count.cp_ready); + return 0; +} + +static void dp_link_parse_sink_status_field(struct dp_link_private *link) +{ + int len = 0; + + link->prev_sink_count = link->dp_link.sink_count.count; + dp_link_parse_sink_count(&link->dp_link); + + len = drm_dp_dpcd_read_link_status(link->aux->drm_aux, + link->link_status); + if (len < DP_LINK_STATUS_SIZE) + DP_ERR("DP link status read failed\n"); + dp_link_parse_request(link); +} + +static bool dp_link_is_link_training_requested(struct dp_link_private *link) +{ + return (link->request.test_requested == DP_TEST_LINK_TRAINING); +} + +/** + * dp_link_process_link_training_request() - processes new training requests + * @link: Display Port link data + * + * This function will handle new link training requests that are initiated by + * the sink. In particular, it will update the requested lane count and link + * link rate, and then trigger the link retraining procedure. + * + * The function will return 0 if a link training request has been processed, + * otherwise it will return -EINVAL. + */ +static int dp_link_process_link_training_request(struct dp_link_private *link) +{ + if (!dp_link_is_link_training_requested(link)) + return -EINVAL; + + DP_DEBUG("%s link rate = 0x%x, lane count = 0x%x\n", + dp_link_get_test_name(DP_TEST_LINK_TRAINING), + link->request.test_link_rate, + link->request.test_lane_count); + + link->dp_link.link_params.lane_count = link->request.test_lane_count; + link->dp_link.link_params.bw_code = link->request.test_link_rate; + + return 0; +} + +static void dp_link_send_test_response(struct dp_link *dp_link) +{ + struct dp_link_private *link = NULL; + u32 const response_len = 0x1; + + if (!dp_link) { + DP_ERR("invalid input\n"); + return; + } + + link = container_of(dp_link, struct dp_link_private, dp_link); + + drm_dp_dpcd_write(link->aux->drm_aux, DP_TEST_RESPONSE, + &dp_link->test_response, response_len); +} + +static int dp_link_psm_config(struct dp_link *dp_link, + struct drm_dp_link *link_info, bool enable) +{ + struct dp_link_private *link = NULL; + int ret = 0; + + if (!dp_link) { + DP_ERR("invalid params\n"); + return -EINVAL; + } + + link = container_of(dp_link, struct dp_link_private, dp_link); + + if (enable) + ret = drm_dp_link_power_down(link->aux->drm_aux, link_info); + else + ret = drm_dp_link_power_up(link->aux->drm_aux, link_info); + + if (ret) + DP_ERR("Failed to %s low power mode\n", + (enable ? "enter" : "exit")); + + return ret; +} + +static void dp_link_send_edid_checksum(struct dp_link *dp_link, u8 checksum) +{ + struct dp_link_private *link = NULL; + u32 const response_len = 0x1; + + if (!dp_link) { + DP_ERR("invalid input\n"); + return; + } + + link = container_of(dp_link, struct dp_link_private, dp_link); + + drm_dp_dpcd_write(link->aux->drm_aux, DP_TEST_EDID_CHECKSUM, + &checksum, response_len); +} + +static int dp_link_parse_vx_px(struct dp_link_private *link) +{ + u8 bp; + u8 data; + int const param_len = 0x1; + int ret = 0; + u32 v0, p0, v1, p1, v2, p2, v3, p3; + int rlen; + + DP_DEBUG("\n"); + + rlen = drm_dp_dpcd_read(link->aux->drm_aux, DP_ADJUST_REQUEST_LANE0_1, + &bp, param_len); + if (rlen < param_len) { + DP_ERR("failed reading lanes 0/1\n"); + ret = -EINVAL; + goto end; + } + + data = bp; + + DP_DEBUG("lanes 0/1 (Byte 0x206): 0x%x\n", data); + + v0 = data & 0x3; + data = data >> 2; + p0 = data & 0x3; + data = data >> 2; + + v1 = data & 0x3; + data = data >> 2; + p1 = data & 0x3; + data = data >> 2; + + rlen = drm_dp_dpcd_read(link->aux->drm_aux, DP_ADJUST_REQUEST_LANE2_3, + &bp, param_len); + if (rlen < param_len) { + DP_ERR("failed reading lanes 2/3\n"); + ret = -EINVAL; + goto end; + } + + data = bp; + + DP_DEBUG("lanes 2/3 (Byte 0x207): 0x%x\n", data); + + v2 = data & 0x3; + data = data >> 2; + p2 = data & 0x3; + data = data >> 2; + + v3 = data & 0x3; + data = data >> 2; + p3 = data & 0x3; + data = data >> 2; + + DP_DEBUG("vx: 0=%d, 1=%d, 2=%d, 3=%d\n", v0, v1, v2, v3); + DP_DEBUG("px: 0=%d, 1=%d, 2=%d, 3=%d\n", p0, p1, p2, p3); + + /** + * Update the voltage and pre-emphasis levels as per DPCD request + * vector. + */ + DP_DEBUG("Current: v_level = 0x%x, p_level = 0x%x\n", + link->dp_link.phy_params.v_level, + link->dp_link.phy_params.p_level); + DP_DEBUG("Requested: v_level = 0x%x, p_level = 0x%x\n", v0, p0); + link->dp_link.phy_params.v_level = v0; + link->dp_link.phy_params.p_level = p0; + + DP_DEBUG("Success\n"); +end: + return ret; +} + +/** + * dp_link_process_phy_test_pattern_request() - process new phy link requests + * @link: Display Port Driver data + * + * This function will handle new phy link pattern requests that are initiated + * by the sink. The function will return 0 if a phy link pattern has been + * processed, otherwise it will return -EINVAL. + */ +static int dp_link_process_phy_test_pattern_request( + struct dp_link_private *link) +{ + u32 test_link_rate = 0, test_lane_count = 0; + + if (!(link->request.test_requested & DP_TEST_LINK_PHY_TEST_PATTERN)) { + DP_DEBUG("no phy test\n"); + return -EINVAL; + } + + test_link_rate = link->request.test_link_rate; + test_lane_count = link->request.test_lane_count; + + if (!is_link_rate_valid(test_link_rate) || + !is_lane_count_valid(test_lane_count)) { + DP_ERR("Invalid params: link rate = 0x%x, lane count = 0x%x\n", + test_link_rate, test_lane_count); + return -EINVAL; + } + + DP_DEBUG("start\n"); + + DP_INFO("Current: bw_code = 0x%x, lane count = 0x%x\n", + link->dp_link.link_params.bw_code, + link->dp_link.link_params.lane_count); + + DP_INFO("Requested: bw_code = 0x%x, lane count = 0x%x\n", + test_link_rate, test_lane_count); + + link->dp_link.link_params.lane_count = link->request.test_lane_count; + link->dp_link.link_params.bw_code = link->request.test_link_rate; + + dp_link_parse_vx_px(link); + + DP_DEBUG("end\n"); + + return 0; +} + +static u8 get_link_status(const u8 link_status[DP_LINK_STATUS_SIZE], int r) +{ + return link_status[r - DP_LANE0_1_STATUS]; +} + +/** + * dp_link_process_link_status_update() - processes link status updates + * @link: Display Port link module data + * + * This function will check for changes in the link status, e.g. clock + * recovery done on all lanes, and trigger link training if there is a + * failure/error on the link. + * + * The function will return 0 if the a link status update has been processed, + * otherwise it will return -EINVAL. + */ +static int dp_link_process_link_status_update(struct dp_link_private *link) +{ + if (!(get_link_status(link->link_status, DP_LANE_ALIGN_STATUS_UPDATED) & + DP_LINK_STATUS_UPDATED) || /* link status updated */ + (drm_dp_clock_recovery_ok(link->link_status, + link->dp_link.link_params.lane_count) && + drm_dp_channel_eq_ok(link->link_status, + link->dp_link.link_params.lane_count))) + return -EINVAL; + + DP_DEBUG("channel_eq_done = %d, clock_recovery_done = %d\n", + drm_dp_channel_eq_ok(link->link_status, + link->dp_link.link_params.lane_count), + drm_dp_clock_recovery_ok(link->link_status, + link->dp_link.link_params.lane_count)); + + return 0; +} + +static bool dp_link_is_ds_port_status_changed(struct dp_link_private *link) +{ + if (get_link_status(link->link_status, DP_LANE_ALIGN_STATUS_UPDATED) & + DP_DOWNSTREAM_PORT_STATUS_CHANGED) /* port status changed */ + return true; + + if (link->prev_sink_count != link->dp_link.sink_count.count) + return true; + + return false; +} + +/** + * dp_link_process_downstream_port_status_change() - process port status changes + * @link: Display Port Driver data + * + * This function will handle downstream port updates that are initiated by + * the sink. If the downstream port status has changed, the EDID is read via + * AUX. + * + * The function will return 0 if a downstream port update has been + * processed, otherwise it will return -EINVAL. + */ +static int dp_link_process_ds_port_status_change(struct dp_link_private *link) +{ + if (!dp_link_is_ds_port_status_changed(link)) + return -EINVAL; + + /* reset prev_sink_count */ + link->prev_sink_count = link->dp_link.sink_count.count; + + return 0; +} + +static bool dp_link_is_video_pattern_requested(struct dp_link_private *link) +{ + return (link->request.test_requested & DP_TEST_LINK_VIDEO_PATTERN) + && !(link->request.test_requested & + DP_TEST_LINK_AUDIO_DISABLED_VIDEO); +} + +static bool dp_link_is_audio_pattern_requested(struct dp_link_private *link) +{ + return (link->request.test_requested & DP_TEST_LINK_AUDIO_PATTERN); +} + +/** + * dp_link_process_video_pattern_request() - process new video pattern request + * @link: Display Port link module's data + * + * This function will handle a new video pattern request that are initiated by + * the sink. This is acheieved by first sending a disconnect notification to + * the sink followed by a subsequent connect notification to the user modules, + * where it is expected that the user modules would draw the required link + * pattern. + */ +static int dp_link_process_video_pattern_request(struct dp_link_private *link) +{ + if (!dp_link_is_video_pattern_requested(link)) + goto end; + + DP_DEBUG("%s: bit depth=%d(%d bpp) pattern=%s\n", + dp_link_get_test_name(DP_TEST_LINK_VIDEO_PATTERN), + link->dp_link.test_video.test_bit_depth, + dp_link_bit_depth_to_bpp( + link->dp_link.test_video.test_bit_depth), + dp_link_video_pattern_to_string( + link->dp_link.test_video.test_video_pattern)); + + return 0; +end: + return -EINVAL; +} + +/** + * dp_link_process_audio_pattern_request() - process new audio pattern request + * @link: Display Port link module data + * + * This function will handle a new audio pattern request that is initiated by + * the sink. This is acheieved by sending the necessary secondary data packets + * to the sink. It is expected that any simulatenous requests for video + * patterns will be handled before the audio pattern is sent to the sink. + */ +static int dp_link_process_audio_pattern_request(struct dp_link_private *link) +{ + if (!dp_link_is_audio_pattern_requested(link)) + return -EINVAL; + + DP_DEBUG("sampling_rate=%s, channel_count=%d, pattern_type=%s\n", + dp_link_get_audio_sample_rate( + link->dp_link.test_audio.test_audio_sampling_rate), + link->dp_link.test_audio.test_audio_channel_count, + dp_link_get_audio_test_pattern( + link->dp_link.test_audio.test_audio_pattern_type)); + + DP_DEBUG("audio_period: ch1=0x%x, ch2=0x%x, ch3=0x%x, ch4=0x%x\n", + link->dp_link.test_audio.test_audio_period_ch_1, + link->dp_link.test_audio.test_audio_period_ch_2, + link->dp_link.test_audio.test_audio_period_ch_3, + link->dp_link.test_audio.test_audio_period_ch_4); + + DP_DEBUG("audio_period: ch5=0x%x, ch6=0x%x, ch7=0x%x, ch8=0x%x\n", + link->dp_link.test_audio.test_audio_period_ch_5, + link->dp_link.test_audio.test_audio_period_ch_6, + link->dp_link.test_audio.test_audio_period_ch_7, + link->dp_link.test_audio.test_audio_period_ch_8); + + return 0; +} + +static void dp_link_reset_data(struct dp_link_private *link) +{ + link->request = (const struct dp_link_request){ 0 }; + link->dp_link.test_video = (const struct dp_link_test_video){ 0 }; + link->dp_link.test_video.test_bit_depth = DP_TEST_BIT_DEPTH_UNKNOWN; + link->dp_link.test_audio = (const struct dp_link_test_audio){ 0 }; + link->dp_link.phy_params.phy_test_pattern_sel = 0; + link->dp_link.sink_request = 0; + link->dp_link.test_response = 0; +} + +/** + * dp_link_process_request() - handle HPD IRQ transition to HIGH + * @link: pointer to link module data + * + * This function will handle the HPD IRQ state transitions from LOW to HIGH + * (including cases when there are back to back HPD IRQ HIGH) indicating + * the start of a new link training request or sink status update. + */ +static int dp_link_process_request(struct dp_link *dp_link) +{ + int ret = 0; + struct dp_link_private *link; + + if (!dp_link) { + DP_ERR("invalid input\n"); + return -EINVAL; + } + + link = container_of(dp_link, struct dp_link_private, dp_link); + + dp_link_reset_data(link); + + dp_link_parse_sink_status_field(link); + + if (dp_link_is_test_edid_read(link)) { + dp_link->sink_request |= DP_TEST_LINK_EDID_READ; + goto exit; + } + + ret = dp_link_process_ds_port_status_change(link); + if (!ret) { + dp_link->sink_request |= DS_PORT_STATUS_CHANGED; + goto exit; + } + + ret = dp_link_process_link_training_request(link); + if (!ret) { + dp_link->sink_request |= DP_TEST_LINK_TRAINING; + goto exit; + } + + ret = dp_link_process_phy_test_pattern_request(link); + if (!ret) { + dp_link->sink_request |= DP_TEST_LINK_PHY_TEST_PATTERN; + goto exit; + } + + ret = dp_link_process_link_status_update(link); + if (!ret) { + dp_link->sink_request |= DP_LINK_STATUS_UPDATED; + goto exit; + } + + ret = dp_link_process_video_pattern_request(link); + if (!ret) { + dp_link->sink_request |= DP_TEST_LINK_VIDEO_PATTERN; + goto exit; + } + + ret = dp_link_process_audio_pattern_request(link); + if (!ret) { + dp_link->sink_request |= DP_TEST_LINK_AUDIO_PATTERN; + goto exit; + } + + DP_DEBUG("no test requested\n"); + return ret; +exit: + /* + * log this as it can be a use initiated action to run a DP CTS + * test or in normal cases, sink has encountered a problem and + * and want source to redo some part of initialization which can + * be helpful in debugging. + */ + DP_INFO("test requested: %s\n", + dp_link_get_test_name(dp_link->sink_request)); + return 0; +} + +static int dp_link_get_colorimetry_config(struct dp_link *dp_link) +{ + u32 cc; + enum dynamic_range dr; + struct dp_link_private *link; + + if (!dp_link) { + DP_ERR("invalid input\n"); + return -EINVAL; + } + + link = container_of(dp_link, struct dp_link_private, dp_link); + + /* unless a video pattern CTS test is ongoing, use CEA_VESA */ + if (dp_link_is_video_pattern_requested(link)) + dr = link->dp_link.test_video.test_dyn_range; + else + dr = DP_DYNAMIC_RANGE_RGB_VESA; + + /* Only RGB_VESA nd RGB_CEA supported for now */ + switch (dr) { + case DP_DYNAMIC_RANGE_RGB_CEA: + cc = BIT(2); + break; + case DP_DYNAMIC_RANGE_RGB_VESA: + default: + cc = 0; + } + + return cc; +} + +static int dp_link_adjust_levels(struct dp_link *dp_link, u8 *link_status) +{ + int i; + int max = 0; + u8 data; + struct dp_link_private *link; + u8 buf[8] = {0}, offset = 0; + + if (!dp_link) { + DP_ERR("invalid input\n"); + return -EINVAL; + } + + link = container_of(dp_link, struct dp_link_private, dp_link); + + /* use the max level across lanes */ + for (i = 0; i < dp_link->link_params.lane_count; i++) { + data = drm_dp_get_adjust_request_voltage(link_status, i); + data >>= DP_TRAIN_VOLTAGE_SWING_SHIFT; + + offset = i * 2; + if (offset < sizeof(buf)) + buf[offset] = data; + + if (max < data) + max = data; + } + + dp_link->phy_params.v_level = max; + + /* use the max level across lanes */ + max = 0; + for (i = 0; i < dp_link->link_params.lane_count; i++) { + data = drm_dp_get_adjust_request_pre_emphasis(link_status, i); + data >>= DP_TRAIN_PRE_EMPHASIS_SHIFT; + + offset = (i * 2) + 1; + if (offset < sizeof(buf)) + buf[offset] = data; + + if (max < data) + max = data; + } + + dp_link->phy_params.p_level = max; + + print_hex_dump(KERN_DEBUG, "[drm-dp] Req (VxPx): ", + DUMP_PREFIX_NONE, 8, 2, buf, sizeof(buf), false); + + /** + * Adjust the voltage swing and pre-emphasis level combination to within + * the allowable range. + */ + if (dp_link->phy_params.v_level > DP_LINK_VOLTAGE_MAX) + dp_link->phy_params.v_level = DP_LINK_VOLTAGE_MAX; + + if (dp_link->phy_params.p_level > DP_LINK_PRE_EMPHASIS_MAX) + dp_link->phy_params.p_level = DP_LINK_PRE_EMPHASIS_MAX; + + if ((dp_link->phy_params.p_level > DP_LINK_PRE_EMPHASIS_LEVEL_1) + && (dp_link->phy_params.v_level == DP_LINK_VOLTAGE_LEVEL_2)) + dp_link->phy_params.p_level = DP_LINK_PRE_EMPHASIS_LEVEL_1; + + if ((dp_link->phy_params.p_level > DP_LINK_PRE_EMPHASIS_LEVEL_2) + && (dp_link->phy_params.v_level == DP_LINK_VOLTAGE_LEVEL_1)) + dp_link->phy_params.p_level = DP_LINK_PRE_EMPHASIS_LEVEL_2; + + DP_DEBUG("Set (VxPx): %x%x\n", + dp_link->phy_params.v_level, dp_link->phy_params.p_level); + + return 0; +} + +static int dp_link_send_psm_request(struct dp_link *dp_link, bool req) +{ + struct dp_link_private *link; + + if (!dp_link) { + DP_ERR("invalid input\n"); + return -EINVAL; + } + + link = container_of(dp_link, struct dp_link_private, dp_link); + + return 0; +} + +static u32 dp_link_get_test_bits_depth(struct dp_link *dp_link, u32 bpp) +{ + u32 tbd; + + /* + * Few simplistic rules and assumptions made here: + * 1. Test bit depth is bit depth per color component + * 2. Assume 3 color components + */ + switch (bpp) { + case 18: + tbd = DP_TEST_BIT_DEPTH_6; + break; + case 24: + tbd = DP_TEST_BIT_DEPTH_8; + break; + case 30: + tbd = DP_TEST_BIT_DEPTH_10; + break; + default: + tbd = DP_TEST_BIT_DEPTH_UNKNOWN; + break; + } + + if (tbd != DP_TEST_BIT_DEPTH_UNKNOWN) + tbd = (tbd >> DP_TEST_BIT_DEPTH_SHIFT); + + return tbd; +} + +struct dp_link *dp_link_get(struct device *dev, struct dp_aux *aux) +{ + int rc = 0; + struct dp_link_private *link; + struct dp_link *dp_link; + + if (!dev || !aux) { + DP_ERR("invalid input\n"); + rc = -EINVAL; + goto error; + } + + link = devm_kzalloc(dev, sizeof(*link), GFP_KERNEL); + if (!link) { + rc = -EINVAL; + goto error; + } + + link->dev = dev; + link->aux = aux; + + dp_link = &link->dp_link; + + dp_link->process_request = dp_link_process_request; + dp_link->get_test_bits_depth = dp_link_get_test_bits_depth; + dp_link->get_colorimetry_config = dp_link_get_colorimetry_config; + dp_link->adjust_levels = dp_link_adjust_levels; + dp_link->send_psm_request = dp_link_send_psm_request; + dp_link->send_test_response = dp_link_send_test_response; + dp_link->psm_config = dp_link_psm_config; + dp_link->send_edid_checksum = dp_link_send_edid_checksum; + + return dp_link; +error: + return ERR_PTR(rc); +} + +void dp_link_put(struct dp_link *dp_link) +{ + struct dp_link_private *link; + + if (!dp_link) + return; + + link = container_of(dp_link, struct dp_link_private, dp_link); + + devm_kfree(link->dev, link); +} diff --git a/techpack/display/msm/dp/dp_link.h b/techpack/display/msm/dp/dp_link.h new file mode 100644 index 0000000000000000000000000000000000000000..db3456d973e34a5073837319555a67fd73983334 --- /dev/null +++ b/techpack/display/msm/dp/dp_link.h @@ -0,0 +1,206 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2012-2019, The Linux Foundation. All rights reserved. + */ + +#ifndef _DP_LINK_H_ +#define _DP_LINK_H_ + +#include "dp_aux.h" + +#define DS_PORT_STATUS_CHANGED 0x200 +#define DP_TEST_BIT_DEPTH_UNKNOWN 0xFFFFFFFF +#define DP_LINK_ENUM_STR(x) #x + +enum dp_link_voltage_level { + DP_LINK_VOLTAGE_LEVEL_0, + DP_LINK_VOLTAGE_LEVEL_1, + DP_LINK_VOLTAGE_LEVEL_2, + DP_LINK_VOLTAGE_MAX = DP_LINK_VOLTAGE_LEVEL_2, +}; + +enum dp_link_preemaphasis_level { + DP_LINK_PRE_EMPHASIS_LEVEL_0, + DP_LINK_PRE_EMPHASIS_LEVEL_1, + DP_LINK_PRE_EMPHASIS_LEVEL_2, + DP_LINK_PRE_EMPHASIS_LEVEL_3, + DP_LINK_PRE_EMPHASIS_MAX = DP_LINK_PRE_EMPHASIS_LEVEL_3, +}; + +struct dp_link_sink_count { + u32 count; + bool cp_ready; +}; + +struct dp_link_test_video { + u32 test_video_pattern; + u32 test_bit_depth; + u32 test_dyn_range; + u32 test_h_total; + u32 test_v_total; + u32 test_h_start; + u32 test_v_start; + u32 test_hsync_pol; + u32 test_hsync_width; + u32 test_vsync_pol; + u32 test_vsync_width; + u32 test_h_width; + u32 test_v_height; + u32 test_rr_d; + u32 test_rr_n; +}; + +struct dp_link_test_audio { + u32 test_audio_sampling_rate; + u32 test_audio_channel_count; + u32 test_audio_pattern_type; + u32 test_audio_period_ch_1; + u32 test_audio_period_ch_2; + u32 test_audio_period_ch_3; + u32 test_audio_period_ch_4; + u32 test_audio_period_ch_5; + u32 test_audio_period_ch_6; + u32 test_audio_period_ch_7; + u32 test_audio_period_ch_8; +}; + +struct dp_link_hdcp_status { + int hdcp_state; + int hdcp_version; +}; + +struct dp_link_phy_params { + u32 phy_test_pattern_sel; + u8 v_level; + u8 p_level; +}; + +struct dp_link_params { + u32 lane_count; + u32 bw_code; +}; + +static inline char *dp_link_get_test_name(u32 test_requested) +{ + switch (test_requested) { + case DP_TEST_LINK_TRAINING: + return DP_LINK_ENUM_STR(DP_TEST_LINK_TRAINING); + case DP_TEST_LINK_VIDEO_PATTERN: + return DP_LINK_ENUM_STR(DP_TEST_LINK_VIDEO_PATTERN); + case DP_TEST_LINK_EDID_READ: + return DP_LINK_ENUM_STR(DP_TEST_LINK_EDID_READ); + case DP_TEST_LINK_PHY_TEST_PATTERN: + return DP_LINK_ENUM_STR(DP_TEST_LINK_PHY_TEST_PATTERN); + case DP_TEST_LINK_AUDIO_PATTERN: + return DP_LINK_ENUM_STR(DP_TEST_LINK_AUDIO_PATTERN); + case DS_PORT_STATUS_CHANGED: + return DP_LINK_ENUM_STR(DS_PORT_STATUS_CHANGED); + case DP_LINK_STATUS_UPDATED: + return DP_LINK_ENUM_STR(DP_LINK_STATUS_UPDATED); + default: + return "unknown"; + } +} + +struct dp_link { + u32 sink_request; + u32 test_response; + + struct dp_link_sink_count sink_count; + struct dp_link_test_video test_video; + struct dp_link_test_audio test_audio; + struct dp_link_phy_params phy_params; + struct dp_link_params link_params; + struct dp_link_hdcp_status hdcp_status; + + u32 (*get_test_bits_depth)(struct dp_link *dp_link, u32 bpp); + int (*process_request)(struct dp_link *dp_link); + int (*get_colorimetry_config)(struct dp_link *dp_link); + int (*adjust_levels)(struct dp_link *dp_link, u8 *link_status); + int (*send_psm_request)(struct dp_link *dp_link, bool req); + void (*send_test_response)(struct dp_link *dp_link); + int (*psm_config)(struct dp_link *dp_link, + struct drm_dp_link *link_info, bool enable); + void (*send_edid_checksum)(struct dp_link *dp_link, u8 checksum); +}; + +static inline char *dp_link_get_phy_test_pattern(u32 phy_test_pattern_sel) +{ + switch (phy_test_pattern_sel) { + case DP_TEST_PHY_PATTERN_NONE: + return DP_LINK_ENUM_STR(DP_TEST_PHY_PATTERN_NONE); + case DP_TEST_PHY_PATTERN_D10_2_NO_SCRAMBLING: + return DP_LINK_ENUM_STR( + DP_TEST_PHY_PATTERN_D10_2_NO_SCRAMBLING); + case DP_TEST_PHY_PATTERN_SYMBOL_ERR_MEASUREMENT_CNT: + return DP_LINK_ENUM_STR( + DP_TEST_PHY_PATTERN_SYMBOL_ERR_MEASUREMENT_CNT); + case DP_TEST_PHY_PATTERN_PRBS7: + return DP_LINK_ENUM_STR(DP_TEST_PHY_PATTERN_PRBS7); + case DP_TEST_PHY_PATTERN_80_BIT_CUSTOM_PATTERN: + return DP_LINK_ENUM_STR( + DP_TEST_PHY_PATTERN_80_BIT_CUSTOM_PATTERN); + case DP_TEST_PHY_PATTERN_CP2520_PATTERN_1: + return DP_LINK_ENUM_STR(DP_TEST_PHY_PATTERN_CP2520_PATTERN_1); + case DP_TEST_PHY_PATTERN_CP2520_PATTERN_2: + return DP_LINK_ENUM_STR(DP_TEST_PHY_PATTERN_CP2520_PATTERN_2); + case DP_TEST_PHY_PATTERN_CP2520_PATTERN_3: + return DP_LINK_ENUM_STR(DP_TEST_PHY_PATTERN_CP2520_PATTERN_3); + default: + return "unknown"; + } +} + +/** + * mdss_dp_test_bit_depth_to_bpp() - convert test bit depth to bpp + * @tbd: test bit depth + * + * Returns the bits per pixel (bpp) to be used corresponding to the + * git bit depth value. This function assumes that bit depth has + * already been validated. + */ +static inline u32 dp_link_bit_depth_to_bpp(u32 tbd) +{ + u32 bpp; + + /* + * Few simplistic rules and assumptions made here: + * 1. Bit depth is per color component + * 2. If bit depth is unknown return 0 + * 3. Assume 3 color components + */ + switch (tbd) { + case DP_TEST_BIT_DEPTH_6: + bpp = 18; + break; + case DP_TEST_BIT_DEPTH_8: + bpp = 24; + break; + case DP_TEST_BIT_DEPTH_10: + bpp = 30; + break; + case DP_TEST_BIT_DEPTH_UNKNOWN: + default: + bpp = 0; + } + + return bpp; +} + +/** + * dp_link_get() - get the functionalities of dp test module + * + * + * return: a pointer to dp_link struct + */ +struct dp_link *dp_link_get(struct device *dev, struct dp_aux *aux); + +/** + * dp_link_put() - releases the dp test module's resources + * + * @dp_link: an instance of dp_link module + * + */ +void dp_link_put(struct dp_link *dp_link); + +#endif /* _DP_LINK_H_ */ diff --git a/techpack/display/msm/dp/dp_lphw_hpd.c b/techpack/display/msm/dp/dp_lphw_hpd.c new file mode 100644 index 0000000000000000000000000000000000000000..7fbd01449c2f78ded175cde29bcd3ed45a6f6e88 --- /dev/null +++ b/techpack/display/msm/dp/dp_lphw_hpd.c @@ -0,0 +1,421 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "dp_lphw_hpd.h" +#include "dp_debug.h" + +struct dp_lphw_hpd_private { + struct device *dev; + struct dp_hpd base; + struct dp_parser *parser; + struct dp_catalog_hpd *catalog; + struct dss_gpio gpio_cfg; + struct workqueue_struct *connect_wq; + struct delayed_work work; + struct work_struct connect; + struct work_struct disconnect; + struct work_struct attention; + struct dp_hpd_cb *cb; + int irq; + bool hpd; +}; + +static void dp_lphw_hpd_attention(struct work_struct *work) +{ + struct dp_lphw_hpd_private *lphw_hpd = container_of(work, + struct dp_lphw_hpd_private, attention); + + if (!lphw_hpd) { + DP_ERR("invalid input\n"); + return; + } + + lphw_hpd->base.hpd_irq = true; + + if (lphw_hpd->cb && lphw_hpd->cb->attention) + lphw_hpd->cb->attention(lphw_hpd->dev); +} + +static void dp_lphw_hpd_connect(struct work_struct *work) +{ + struct dp_lphw_hpd_private *lphw_hpd = container_of(work, + struct dp_lphw_hpd_private, connect); + + if (!lphw_hpd) { + DP_ERR("invalid input\n"); + return; + } + + lphw_hpd->base.hpd_high = true; + lphw_hpd->base.alt_mode_cfg_done = true; + lphw_hpd->base.hpd_irq = false; + + if (lphw_hpd->cb && lphw_hpd->cb->configure) + lphw_hpd->cb->configure(lphw_hpd->dev); +} + +static void dp_lphw_hpd_disconnect(struct work_struct *work) +{ + struct dp_lphw_hpd_private *lphw_hpd = container_of(work, + struct dp_lphw_hpd_private, disconnect); + + if (!lphw_hpd) { + DP_ERR("invalid input\n"); + return; + } + + lphw_hpd->base.hpd_high = false; + lphw_hpd->base.alt_mode_cfg_done = false; + lphw_hpd->base.hpd_irq = false; + + if (lphw_hpd->cb && lphw_hpd->cb->disconnect) + lphw_hpd->cb->disconnect(lphw_hpd->dev); +} + +static irqreturn_t dp_tlmm_isr(int unused, void *data) +{ + struct dp_lphw_hpd_private *lphw_hpd = data; + bool hpd; + + if (!lphw_hpd) + return IRQ_NONE; + + /* + * According to the DP spec, HPD high event can be confirmed only after + * the HPD line has een asserted continuously for more than 100ms + */ + usleep_range(99000, 100000); + + hpd = gpio_get_value_cansleep(lphw_hpd->gpio_cfg.gpio); + + DP_DEBUG("lphw_hpd state = %d, new hpd state = %d\n", + lphw_hpd->hpd, hpd); + if (!lphw_hpd->hpd && hpd) { + lphw_hpd->hpd = true; + queue_work(lphw_hpd->connect_wq, &lphw_hpd->connect); + } + + return IRQ_HANDLED; +} + +static void dp_lphw_hpd_host_init(struct dp_hpd *dp_hpd, + struct dp_catalog_hpd *catalog) +{ + struct dp_lphw_hpd_private *lphw_hpd; + + if (!dp_hpd) { + DP_ERR("invalid input\n"); + return; + } + + lphw_hpd = container_of(dp_hpd, struct dp_lphw_hpd_private, base); + + lphw_hpd->catalog->config_hpd(lphw_hpd->catalog, true); + + /* + * Changing the gpio function to dp controller for the hpd line is not + * stopping the tlmm interrupts generation on function 0. + * So, as an additional step, disable the gpio interrupt irq also + */ + disable_irq(lphw_hpd->irq); +} + +static void dp_lphw_hpd_host_deinit(struct dp_hpd *dp_hpd, + struct dp_catalog_hpd *catalog) +{ + struct dp_lphw_hpd_private *lphw_hpd; + + if (!dp_hpd) { + DP_ERR("invalid input\n"); + return; + } + + lphw_hpd = container_of(dp_hpd, struct dp_lphw_hpd_private, base); + + /* Enable the tlmm interrupt irq which was disabled in host_init */ + enable_irq(lphw_hpd->irq); + + lphw_hpd->catalog->config_hpd(lphw_hpd->catalog, false); +} + +static void dp_lphw_hpd_isr(struct dp_hpd *dp_hpd) +{ + struct dp_lphw_hpd_private *lphw_hpd; + u32 isr = 0; + int rc = 0; + + if (!dp_hpd) { + DP_ERR("invalid input\n"); + return; + } + + lphw_hpd = container_of(dp_hpd, struct dp_lphw_hpd_private, base); + + isr = lphw_hpd->catalog->get_interrupt(lphw_hpd->catalog); + + if (isr & DP_HPD_UNPLUG_INT_STATUS) { /* disconnect interrupt */ + + DP_DEBUG("disconnect interrupt, hpd isr state: 0x%x\n", isr); + + if (lphw_hpd->base.hpd_high) { + lphw_hpd->hpd = false; + lphw_hpd->base.hpd_high = false; + lphw_hpd->base.alt_mode_cfg_done = false; + lphw_hpd->base.hpd_irq = false; + + rc = queue_work(lphw_hpd->connect_wq, + &lphw_hpd->disconnect); + if (!rc) + DP_DEBUG("disconnect not queued\n"); + } else { + DP_ERR("already disconnected\n"); + } + + } else if (isr & DP_IRQ_HPD_INT_STATUS) { /* attention interrupt */ + + DP_DEBUG("hpd_irq interrupt, hpd isr state: 0x%x\n", isr); + + rc = queue_work(lphw_hpd->connect_wq, &lphw_hpd->attention); + if (!rc) + DP_DEBUG("attention not queued\n"); + } +} + +static int dp_lphw_hpd_simulate_connect(struct dp_hpd *dp_hpd, bool hpd) +{ + struct dp_lphw_hpd_private *lphw_hpd; + + if (!dp_hpd) { + DP_ERR("invalid input\n"); + return -EINVAL; + } + + lphw_hpd = container_of(dp_hpd, struct dp_lphw_hpd_private, base); + + lphw_hpd->base.hpd_high = hpd; + lphw_hpd->base.alt_mode_cfg_done = hpd; + lphw_hpd->base.hpd_irq = false; + + if (!lphw_hpd->cb || !lphw_hpd->cb->configure || + !lphw_hpd->cb->disconnect) { + DP_ERR("invalid callback\n"); + return -EINVAL; + } + + if (hpd) + lphw_hpd->cb->configure(lphw_hpd->dev); + else + lphw_hpd->cb->disconnect(lphw_hpd->dev); + + return 0; +} + +static int dp_lphw_hpd_simulate_attention(struct dp_hpd *dp_hpd, int vdo) +{ + struct dp_lphw_hpd_private *lphw_hpd; + + if (!dp_hpd) { + DP_ERR("invalid input\n"); + return -EINVAL; + } + + lphw_hpd = container_of(dp_hpd, struct dp_lphw_hpd_private, base); + + lphw_hpd->base.hpd_irq = true; + + if (lphw_hpd->cb && lphw_hpd->cb->attention) + lphw_hpd->cb->attention(lphw_hpd->dev); + + return 0; +} + +int dp_lphw_hpd_register(struct dp_hpd *dp_hpd) +{ + struct dp_lphw_hpd_private *lphw_hpd; + int rc = 0; + + if (!dp_hpd) + return -EINVAL; + + lphw_hpd = container_of(dp_hpd, struct dp_lphw_hpd_private, base); + + lphw_hpd->hpd = gpio_get_value_cansleep(lphw_hpd->gpio_cfg.gpio); + + rc = devm_request_threaded_irq(lphw_hpd->dev, lphw_hpd->irq, NULL, + dp_tlmm_isr, + IRQF_TRIGGER_RISING | IRQF_ONESHOT, + "dp-gpio-intp", lphw_hpd); + if (rc) { + DP_ERR("Failed to request INTP threaded IRQ: %d\n", rc); + return rc; + } + enable_irq_wake(lphw_hpd->irq); + + if (lphw_hpd->hpd) + queue_work(lphw_hpd->connect_wq, &lphw_hpd->connect); + + return rc; +} + +static void dp_lphw_hpd_deinit(struct dp_lphw_hpd_private *lphw_hpd) +{ + struct dp_parser *parser = lphw_hpd->parser; + int i = 0; + + for (i = 0; i < parser->mp[DP_PHY_PM].num_vreg; i++) { + + if (!strcmp(parser->mp[DP_PHY_PM].vreg_config[i].vreg_name, + "hpd-pwr")) { + /* disable the hpd-pwr voltage regulator */ + if (msm_dss_enable_vreg( + &parser->mp[DP_PHY_PM].vreg_config[i], 1, + false)) + DP_ERR("hpd-pwr vreg not disabled\n"); + + break; + } + } +} + +static void dp_lphw_hpd_init(struct dp_lphw_hpd_private *lphw_hpd) +{ + struct dp_pinctrl pinctrl = {0}; + struct dp_parser *parser = lphw_hpd->parser; + int i = 0, rc = 0; + + for (i = 0; i < parser->mp[DP_PHY_PM].num_vreg; i++) { + + if (!strcmp(parser->mp[DP_PHY_PM].vreg_config[i].vreg_name, + "hpd-pwr")) { + /* enable the hpd-pwr voltage regulator */ + if (msm_dss_enable_vreg( + &parser->mp[DP_PHY_PM].vreg_config[i], 1, + true)) + DP_ERR("hpd-pwr vreg not enabled\n"); + + break; + } + } + + pinctrl.pin = devm_pinctrl_get(lphw_hpd->dev); + + if (!IS_ERR_OR_NULL(pinctrl.pin)) { + pinctrl.state_hpd_active = pinctrl_lookup_state(pinctrl.pin, + "mdss_dp_hpd_active"); + + if (!IS_ERR_OR_NULL(pinctrl.state_hpd_active)) { + rc = pinctrl_select_state(pinctrl.pin, + pinctrl.state_hpd_active); + if (rc) + DP_ERR("failed to set hpd_active state\n"); + } + } +} + +static int dp_lphw_hpd_create_workqueue(struct dp_lphw_hpd_private *lphw_hpd) +{ + lphw_hpd->connect_wq = create_singlethread_workqueue("dp_lphw_work"); + if (IS_ERR_OR_NULL(lphw_hpd->connect_wq)) { + DP_ERR("Error creating connect_wq\n"); + return -EPERM; + } + + INIT_WORK(&lphw_hpd->connect, dp_lphw_hpd_connect); + INIT_WORK(&lphw_hpd->disconnect, dp_lphw_hpd_disconnect); + INIT_WORK(&lphw_hpd->attention, dp_lphw_hpd_attention); + + return 0; +} + +struct dp_hpd *dp_lphw_hpd_get(struct device *dev, struct dp_parser *parser, + struct dp_catalog_hpd *catalog, struct dp_hpd_cb *cb) +{ + int rc = 0; + const char *hpd_gpio_name = "qcom,dp-hpd-gpio"; + struct dp_lphw_hpd_private *lphw_hpd; + + if (!dev || !parser || !cb) { + DP_ERR("invalid device\n"); + rc = -EINVAL; + goto error; + } + + lphw_hpd = devm_kzalloc(dev, sizeof(*lphw_hpd), GFP_KERNEL); + if (!lphw_hpd) { + rc = -ENOMEM; + goto error; + } + + lphw_hpd->gpio_cfg.gpio = of_get_named_gpio(dev->of_node, + hpd_gpio_name, 0); + if (!gpio_is_valid(lphw_hpd->gpio_cfg.gpio)) { + DP_ERR("%s gpio not specified\n", hpd_gpio_name); + rc = -EINVAL; + goto gpio_error; + } + + strlcpy(lphw_hpd->gpio_cfg.gpio_name, hpd_gpio_name, + sizeof(lphw_hpd->gpio_cfg.gpio_name)); + lphw_hpd->gpio_cfg.value = 0; + + rc = gpio_request(lphw_hpd->gpio_cfg.gpio, + lphw_hpd->gpio_cfg.gpio_name); + if (rc) { + DP_ERR("%s: failed to request gpio\n", hpd_gpio_name); + goto gpio_error; + } + gpio_direction_input(lphw_hpd->gpio_cfg.gpio); + + lphw_hpd->dev = dev; + lphw_hpd->cb = cb; + lphw_hpd->irq = gpio_to_irq(lphw_hpd->gpio_cfg.gpio); + + rc = dp_lphw_hpd_create_workqueue(lphw_hpd); + if (rc) { + DP_ERR("Failed to create a dp_hpd workqueue\n"); + goto gpio_error; + } + + lphw_hpd->parser = parser; + lphw_hpd->catalog = catalog; + lphw_hpd->base.isr = dp_lphw_hpd_isr; + lphw_hpd->base.host_init = dp_lphw_hpd_host_init; + lphw_hpd->base.host_deinit = dp_lphw_hpd_host_deinit; + lphw_hpd->base.simulate_connect = dp_lphw_hpd_simulate_connect; + lphw_hpd->base.simulate_attention = dp_lphw_hpd_simulate_attention; + lphw_hpd->base.register_hpd = dp_lphw_hpd_register; + + dp_lphw_hpd_init(lphw_hpd); + + return &lphw_hpd->base; + +gpio_error: + devm_kfree(dev, lphw_hpd); +error: + return ERR_PTR(rc); +} + +void dp_lphw_hpd_put(struct dp_hpd *dp_hpd) +{ + struct dp_lphw_hpd_private *lphw_hpd; + + if (!dp_hpd) + return; + + lphw_hpd = container_of(dp_hpd, struct dp_lphw_hpd_private, base); + + dp_lphw_hpd_deinit(lphw_hpd); + gpio_free(lphw_hpd->gpio_cfg.gpio); + devm_kfree(lphw_hpd->dev, lphw_hpd); +} diff --git a/techpack/display/msm/dp/dp_lphw_hpd.h b/techpack/display/msm/dp/dp_lphw_hpd.h new file mode 100644 index 0000000000000000000000000000000000000000..9779331bd295eefed4546746974369f7a167760e --- /dev/null +++ b/techpack/display/msm/dp/dp_lphw_hpd.h @@ -0,0 +1,36 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved. + */ + +#ifndef _DP_LPHW_HPD_H_ +#define _DP_LPHW_HPD_H_ + +#include "dp_hpd.h" + +#define DP_HPD_PLUG_INT_STATUS BIT(0) +#define DP_IRQ_HPD_INT_STATUS BIT(1) +#define DP_HPD_REPLUG_INT_STATUS BIT(2) +#define DP_HPD_UNPLUG_INT_STATUS BIT(3) + +/** + * dp_lphw_hpd_get() - configure and get the DisplayPlot HPD module data + * + * @dev: device instance of the caller + * return: pointer to allocated gpio hpd module data + * + * This function sets up the lphw hpd module + */ +struct dp_hpd *dp_lphw_hpd_get(struct device *dev, struct dp_parser *parser, + struct dp_catalog_hpd *catalog, struct dp_hpd_cb *cb); + +/** + * dp_lphw_hpd_put() + * + * Cleans up dp_hpd instance + * + * @hpd: instance of lphw_hpd + */ +void dp_lphw_hpd_put(struct dp_hpd *hpd); + +#endif /* _DP_LPHW_HPD_H_ */ diff --git a/techpack/display/msm/dp/dp_mst_drm.c b/techpack/display/msm/dp/dp_mst_drm.c new file mode 100644 index 0000000000000000000000000000000000000000..508e1034feb46ffc045d5e11856f1a32d025c358 --- /dev/null +++ b/techpack/display/msm/dp/dp_mst_drm.c @@ -0,0 +1,2258 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved. + */ + +/* + * Copyright © 2014 Red Hat. + * + * Permission to use, copy, modify, distribute, and sell this software and its + * documentation for any purpose is hereby granted without fee, provided that + * the above copyright notice appear in all copies and that both that copyright + * notice and this permission notice appear in supporting documentation, and + * that the name of the copyright holders not be used in advertising or + * publicity pertaining to distribution of the software without specific, + * written prior permission. The copyright holders make no representations + * about the suitability of this software for any purpose. It is provided "as + * is" without express or implied warranty. + * + * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, + * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO + * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR + * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, + * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THIS SOFTWARE. + */ + +#include +#include +#include + +#include +#include +#include +#include +#include + +#include "msm_drv.h" +#include "msm_kms.h" +#include "sde_connector.h" +#include "dp_drm.h" +#include "dp_debug.h" + +#define DP_MST_DEBUG(fmt, ...) DP_DEBUG(fmt, ##__VA_ARGS__) +#define DP_MST_INFO_LOG(fmt, ...) DP_DEBUG(fmt, ##__VA_ARGS__) + +#define MAX_DP_MST_DRM_ENCODERS 2 +#define MAX_DP_MST_DRM_BRIDGES 2 +#define HPD_STRING_SIZE 30 + +struct dp_drm_mst_fw_helper_ops { + int (*calc_pbn_mode)(struct dp_display_mode *dp_mode); + int (*find_vcpi_slots)(struct drm_dp_mst_topology_mgr *mgr, int pbn); + int (*atomic_find_vcpi_slots)(struct drm_atomic_state *state, + struct drm_dp_mst_topology_mgr *mgr, + struct drm_dp_mst_port *port, int pbn); + bool (*allocate_vcpi)(struct drm_dp_mst_topology_mgr *mgr, + struct drm_dp_mst_port *port, + int pbn, int slots); + int (*update_payload_part1)(struct drm_dp_mst_topology_mgr *mgr); + int (*check_act_status)(struct drm_dp_mst_topology_mgr *mgr); + int (*update_payload_part2)(struct drm_dp_mst_topology_mgr *mgr); + enum drm_connector_status (*detect_port)( + struct drm_connector *connector, + struct drm_dp_mst_topology_mgr *mgr, + struct drm_dp_mst_port *port); + struct edid *(*get_edid)(struct drm_connector *connector, + struct drm_dp_mst_topology_mgr *mgr, + struct drm_dp_mst_port *port); + int (*topology_mgr_set_mst)(struct drm_dp_mst_topology_mgr *mgr, + bool mst_state); + int (*atomic_release_vcpi_slots)(struct drm_atomic_state *state, + struct drm_dp_mst_topology_mgr *mgr, + int slots); + void (*get_vcpi_info)(struct drm_dp_mst_topology_mgr *mgr, + int vcpi, int *start_slot, int *num_slots); + void (*reset_vcpi_slots)(struct drm_dp_mst_topology_mgr *mgr, + struct drm_dp_mst_port *port); + void (*deallocate_vcpi)(struct drm_dp_mst_topology_mgr *mgr, + struct drm_dp_mst_port *port); +}; + +struct dp_mst_sim_port_data { + bool input_port; + u8 peer_device_type; + u8 port_number; + bool mcs; + bool ddps; + bool legacy_device_plug_status; + u8 dpcd_revision; + u8 peer_guid[16]; + u8 num_sdp_streams; + u8 num_sdp_stream_sinks; +}; + +struct dp_mst_sim_port_edid { + u8 port_number; + u8 edid[SZ_256]; + bool valid; +}; + +struct dp_mst_sim_mode { + bool mst_state; + struct edid *edid; + struct dp_mst_sim_port_edid port_edids[DP_MST_SIM_MAX_PORTS]; + struct work_struct probe_work; + const struct drm_dp_mst_topology_cbs *cbs; + u32 port_cnt; +}; + +struct dp_mst_bridge { + struct drm_bridge base; + struct drm_private_obj obj; + u32 id; + + bool in_use; + + struct dp_display *display; + struct drm_encoder *encoder; + + struct drm_display_mode drm_mode; + struct dp_display_mode dp_mode; + struct drm_connector *connector; + void *dp_panel; + + int vcpi; + int pbn; + int num_slots; + int start_slot; + + u32 fixed_port_num; + bool fixed_port_added; + struct drm_connector *fixed_connector; +}; + +struct dp_mst_bridge_state { + struct drm_private_state base; + struct drm_connector *connector; + void *dp_panel; + int num_slots; +}; + +struct dp_mst_private { + bool mst_initialized; + struct dp_mst_caps caps; + struct drm_dp_mst_topology_mgr mst_mgr; + struct dp_mst_bridge mst_bridge[MAX_DP_MST_DRM_BRIDGES]; + struct dp_display *dp_display; + const struct dp_drm_mst_fw_helper_ops *mst_fw_cbs; + struct dp_mst_sim_mode simulator; + struct mutex mst_lock; + enum dp_drv_state state; + bool mst_session_state; +}; + +struct dp_mst_encoder_info_cache { + u8 cnt; + struct drm_encoder *mst_enc[MAX_DP_MST_DRM_BRIDGES]; +}; + +#define to_dp_mst_bridge(x) container_of((x), struct dp_mst_bridge, base) +#define to_dp_mst_bridge_priv(x) \ + container_of((x), struct dp_mst_bridge, obj) +#define to_dp_mst_bridge_priv_state(x) \ + container_of((x), struct dp_mst_bridge_state, base) +#define to_dp_mst_bridge_state(x) \ + to_dp_mst_bridge_priv_state((x)->obj.state) + +struct dp_mst_private dp_mst; +struct dp_mst_encoder_info_cache dp_mst_enc_cache; + +static struct drm_private_state *dp_mst_duplicate_bridge_state( + struct drm_private_obj *obj) +{ + struct dp_mst_bridge_state *state; + + state = kmemdup(obj->state, sizeof(*state), GFP_KERNEL); + if (!state) + return NULL; + + __drm_atomic_helper_private_obj_duplicate_state(obj, &state->base); + + return &state->base; +} + +static void dp_mst_destroy_bridge_state(struct drm_private_obj *obj, + struct drm_private_state *state) +{ + struct dp_mst_bridge_state *priv_state = + to_dp_mst_bridge_priv_state(state); + + kfree(priv_state); +} + +static const struct drm_private_state_funcs dp_mst_bridge_state_funcs = { + .atomic_duplicate_state = dp_mst_duplicate_bridge_state, + .atomic_destroy_state = dp_mst_destroy_bridge_state, +}; + +static struct dp_mst_bridge_state *dp_mst_get_bridge_atomic_state( + struct drm_atomic_state *state, struct dp_mst_bridge *bridge) +{ + struct drm_device *dev = bridge->base.dev; + + WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex)); + + return to_dp_mst_bridge_priv_state( + drm_atomic_get_private_obj_state(state, &bridge->obj)); +} + +static void dp_mst_sim_destroy_port(struct kref *ref) +{ + struct drm_dp_mst_port *port = container_of(ref, + struct drm_dp_mst_port, kref); + struct drm_dp_mst_topology_mgr *mgr = port->mgr; + + if (port->cached_edid) + kfree(port->cached_edid); + + if (port->connector) { + mutex_lock(&mgr->destroy_connector_lock); + kref_get(&port->parent->kref); + list_add(&port->next, &mgr->destroy_connector_list); + mutex_unlock(&mgr->destroy_connector_lock); + schedule_work(&mgr->destroy_connector_work); + return; + } else { + kfree(port); + } +} + +/* DRM DP MST Framework simulator OPs */ +static void dp_mst_sim_add_port(struct dp_mst_private *mst, + struct dp_mst_sim_port_data *port_msg) +{ + struct drm_dp_mst_branch *mstb; + struct drm_dp_mst_port *port; + + mstb = mst->mst_mgr.mst_primary; + + port = kzalloc(sizeof(*port), GFP_KERNEL); + if (!port) + return; + kref_init(&port->kref); + port->parent = mstb; + port->port_num = port_msg->port_number; + port->mgr = mstb->mgr; + port->aux.name = dp_mst.caps.drm_aux->name; + port->aux.dev = mst->dp_display->drm_dev->dev; + + port->pdt = port_msg->peer_device_type; + port->input = port_msg->input_port; + port->mcs = port_msg->mcs; + port->ddps = port_msg->ddps; + port->ldps = port_msg->legacy_device_plug_status; + port->dpcd_rev = port_msg->dpcd_revision; + port->num_sdp_streams = port_msg->num_sdp_streams; + port->num_sdp_stream_sinks = port_msg->num_sdp_stream_sinks; + + mutex_lock(&mstb->mgr->lock); + kref_get(&port->kref); + list_add(&port->next, &mstb->ports); + mutex_unlock(&mstb->mgr->lock); + + /* use fixed pbn for simulator ports */ + port->available_pbn = 2520; + + if (!port->input) { + port->connector = (*mstb->mgr->cbs->add_connector) + (mstb->mgr, port, NULL); + if (!port->connector) { + /* remove it from the port list */ + mutex_lock(&mstb->mgr->lock); + list_del(&port->next); + mutex_unlock(&mstb->mgr->lock); + kref_put(&port->kref, dp_mst_sim_destroy_port); + goto put_port; + } + (*mstb->mgr->cbs->register_connector)(port->connector); + } + +put_port: + kref_put(&port->kref, dp_mst_sim_destroy_port); +} + +static void dp_mst_sim_link_probe_work(struct work_struct *work) +{ + struct dp_mst_sim_mode *sim; + struct dp_mst_private *mst; + struct dp_mst_sim_port_data port_data; + u8 cnt, i; + + DP_MST_DEBUG("enter\n"); + sim = container_of(work, struct dp_mst_sim_mode, probe_work); + mst = container_of(sim, struct dp_mst_private, simulator); + + port_data.input_port = false; + port_data.peer_device_type = DP_PEER_DEVICE_SST_SINK; + port_data.mcs = false; + port_data.ddps = true; + port_data.legacy_device_plug_status = false; + port_data.dpcd_revision = 0; + port_data.num_sdp_streams = 0; + port_data.num_sdp_stream_sinks = 0; + + for (i = 0; i < DP_MST_SIM_MAX_PORTS; i++) + sim->port_edids[i].valid = false; + + for (cnt = 0; cnt < sim->port_cnt; cnt++) { + port_data.port_number = cnt; + + for (i = 0; i < DP_MST_SIM_MAX_PORTS; i++) { + if (sim->port_edids[i].valid) continue; + + sim->port_edids[i].port_number = port_data.port_number; + memcpy(sim->port_edids[i].edid, sim->edid, SZ_256); + sim->port_edids[i].valid = true; + break; + } + + dp_mst_sim_add_port(mst, &port_data); + } + + mst->mst_mgr.cbs->hotplug(&mst->mst_mgr); + DP_MST_DEBUG("completed\n"); +} + +static int dp_mst_sim_no_action(struct drm_dp_mst_topology_mgr *mgr) +{ + return 0; +} + +static int dp_mst_sim_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr) +{ + int i, j; + int cur_slots = 1; + struct drm_dp_payload req_payload; + struct drm_dp_mst_port *port; + + mutex_lock(&mgr->payload_lock); + for (i = 0; i < mgr->max_payloads; i++) { + req_payload.start_slot = cur_slots; + if (mgr->proposed_vcpis[i]) { + port = container_of(mgr->proposed_vcpis[i], + struct drm_dp_mst_port, vcpi); + req_payload.num_slots = + mgr->proposed_vcpis[i]->num_slots; + req_payload.vcpi = mgr->proposed_vcpis[i]->vcpi; + } else { + port = NULL; + req_payload.num_slots = 0; + } + + if (mgr->payloads[i].start_slot != req_payload.start_slot) + mgr->payloads[i].start_slot = req_payload.start_slot; + + if (mgr->payloads[i].num_slots != req_payload.num_slots) { + if (req_payload.num_slots) { + req_payload.payload_state = DP_PAYLOAD_LOCAL; + mgr->payloads[i].num_slots = + req_payload.num_slots; + mgr->payloads[i].vcpi = req_payload.vcpi; + } else if (mgr->payloads[i].num_slots) { + mgr->payloads[i].num_slots = 0; + mgr->payloads[i].payload_state = + DP_PAYLOAD_DELETE_LOCAL; + req_payload.payload_state = + mgr->payloads[i].payload_state; + mgr->payloads[i].start_slot = 0; + } else + req_payload.payload_state = + mgr->payloads[i].payload_state; + + mgr->payloads[i].payload_state = + req_payload.payload_state; + } + cur_slots += req_payload.num_slots; + } + + for (i = 0; i < mgr->max_payloads; i++) { + if (mgr->payloads[i].payload_state == DP_PAYLOAD_DELETE_LOCAL) { + DP_DEBUG("removing payload %d\n", i); + for (j = i; j < mgr->max_payloads - 1; j++) { + memcpy(&mgr->payloads[j], + &mgr->payloads[j + 1], + sizeof(struct drm_dp_payload)); + mgr->proposed_vcpis[j] = + mgr->proposed_vcpis[j + 1]; + if (mgr->proposed_vcpis[j] && + mgr->proposed_vcpis[j]->num_slots) { + set_bit(j + 1, &mgr->payload_mask); + } else { + clear_bit(j + 1, &mgr->payload_mask); + } + } + memset(&mgr->payloads[mgr->max_payloads - 1], 0, + sizeof(struct drm_dp_payload)); + mgr->proposed_vcpis[mgr->max_payloads - 1] = NULL; + clear_bit(mgr->max_payloads, &mgr->payload_mask); + } + } + mutex_unlock(&mgr->payload_lock); + return 0; +} + +static int dp_mst_sim_update_payload_part2(struct drm_dp_mst_topology_mgr *mgr) +{ + struct drm_dp_mst_port *port; + int i; + + mutex_lock(&mgr->payload_lock); + for (i = 0; i < mgr->max_payloads; i++) { + + if (!mgr->proposed_vcpis[i]) + continue; + + port = container_of(mgr->proposed_vcpis[i], + struct drm_dp_mst_port, vcpi); + + DP_DEBUG("payload %d %d\n", i, mgr->payloads[i].payload_state); + if (mgr->payloads[i].payload_state == DP_PAYLOAD_LOCAL) + mgr->payloads[i].payload_state = DP_PAYLOAD_REMOTE; + else if (mgr->payloads[i].payload_state == + DP_PAYLOAD_DELETE_LOCAL) + mgr->payloads[i].payload_state = 0; + } + mutex_unlock(&mgr->payload_lock); + return 0; +} + +static struct edid *dp_mst_sim_get_edid(struct drm_connector *connector, + struct drm_dp_mst_topology_mgr *mgr, + struct drm_dp_mst_port *port) +{ + struct dp_mst_private *mst = container_of(mgr, + struct dp_mst_private, mst_mgr); + int i; + + for (i = 0; i < DP_MST_SIM_MAX_PORTS; i++) { + if (mst->simulator.port_edids[i].valid && + mst->simulator.port_edids[i].port_number == + port->port_num) { + return drm_edid_duplicate((struct edid *) + (mst->simulator.port_edids[i].edid)); + } + } + + DRM_ERROR("edid not found for connector %d\n", connector->base.id); + return NULL; +} + +static int dp_mst_sim_topology_mgr_set_mst( + struct drm_dp_mst_topology_mgr *mgr, + bool mst_state) +{ + int rc; + struct dp_mst_private *mst = container_of(mgr, + struct dp_mst_private, mst_mgr); + + rc = drm_dp_mst_topology_mgr_set_mst(mgr, mst_state); + if (rc < 0) { + DRM_ERROR("unable to set mst topology mgr, rc: %d\n", rc); + return rc; + } + + if (mst_state) + queue_work(system_long_wq, &mst->simulator.probe_work); + + mst->simulator.mst_state = mst_state; + return 0; +} + +static void dp_mst_sim_handle_hpd_irq(void *dp_display, + struct dp_mst_hpd_info *info) +{ + struct dp_display *dp; + struct dp_mst_private *mst; + struct drm_dp_mst_port *port; + struct dp_mst_sim_port_data port_data; + struct drm_dp_mst_branch *mstb; + int i; + bool in_list, port_available; + + dp = dp_display; + mst = dp->dp_mst_prv_info; + + if (info->mst_sim_add_con) { + port_available = false; + for (i = 0; i < DP_MST_SIM_MAX_PORTS; i++) { + if (mst->simulator.port_edids[i].valid) continue; + + port_data.port_number = i; + mst->simulator.port_edids[i].port_number = i; + memcpy(mst->simulator.port_edids[i].edid, info->edid, + SZ_256); + mst->simulator.port_edids[i].valid = true; + port_available = true; + break; + } + + if (!port_available) { + DRM_ERROR("add port failed, limit (%d) reached\n", + DP_MST_SIM_MAX_PORTS); + return; + } + + port_data.input_port = false; + port_data.peer_device_type = DP_PEER_DEVICE_SST_SINK; + port_data.mcs = false; + port_data.ddps = true; + port_data.legacy_device_plug_status = false; + port_data.dpcd_revision = 0; + port_data.num_sdp_streams = 0; + port_data.num_sdp_stream_sinks = 0; + + dp_mst_sim_add_port(mst, &port_data); + } else if (info->mst_sim_remove_con) { + mstb = mst->mst_mgr.mst_primary; + in_list = false; + + mutex_lock(&mst->mst_mgr.lock); + list_for_each_entry(port, + &mstb->ports, next) { + if (port->connector && port->connector->base.id == + info->mst_sim_remove_con_id) { + in_list = true; + list_del(&port->next); + break; + } + } + mutex_unlock(&mst->mst_mgr.lock); + + if (!in_list) { + DRM_ERROR("invalid connector id %d\n", + info->mst_sim_remove_con_id); + return; + } + + for (i = 0; i < DP_MST_SIM_MAX_PORTS; i++) { + if (mst->simulator.port_edids[i].port_number == + port->port_num) { + mst->simulator.port_edids[i].valid = false; + } + } + + kref_put(&port->kref, dp_mst_sim_destroy_port); + } +} + +static void _dp_mst_get_vcpi_info( + struct drm_dp_mst_topology_mgr *mgr, + int vcpi, int *start_slot, int *num_slots) +{ + int i; + + *start_slot = 0; + *num_slots = 0; + + mutex_lock(&mgr->payload_lock); + for (i = 0; i < mgr->max_payloads; i++) { + if (mgr->payloads[i].vcpi == vcpi) { + *start_slot = mgr->payloads[i].start_slot; + *num_slots = mgr->payloads[i].num_slots; + break; + } + } + mutex_unlock(&mgr->payload_lock); + + DP_INFO("vcpi_info. vcpi:%d, start_slot:%d, num_slots:%d\n", + vcpi, *start_slot, *num_slots); +} + +static int dp_mst_calc_pbn_mode(struct dp_display_mode *dp_mode) +{ + int pbn, bpp; + bool dsc_en; + s64 pbn_fp; + + dsc_en = dp_mode->timing.comp_info.comp_ratio ? true : false; + bpp = dsc_en ? dp_mode->timing.comp_info.dsc_info.bpp : + dp_mode->timing.bpp; + + pbn = drm_dp_calc_pbn_mode(dp_mode->timing.pixel_clk_khz, bpp); + pbn_fp = drm_fixp_from_fraction(pbn, 1); + + DP_DEBUG("before overhead pbn:%d, bpp:%d\n", pbn, bpp); + + if (dsc_en) + pbn_fp = drm_fixp_mul(pbn_fp, dp_mode->dsc_overhead_fp); + + if (dp_mode->fec_overhead_fp) + pbn_fp = drm_fixp_mul(pbn_fp, dp_mode->fec_overhead_fp); + + pbn = drm_fixp2int(pbn_fp); + + DP_DEBUG("after overhead pbn:%d, bpp:%d\n", pbn, bpp); + return pbn; +} + +static const struct dp_drm_mst_fw_helper_ops drm_dp_mst_fw_helper_ops = { + .calc_pbn_mode = dp_mst_calc_pbn_mode, + .find_vcpi_slots = drm_dp_find_vcpi_slots, + .atomic_find_vcpi_slots = drm_dp_atomic_find_vcpi_slots, + .allocate_vcpi = drm_dp_mst_allocate_vcpi, + .update_payload_part1 = drm_dp_update_payload_part1, + .check_act_status = drm_dp_check_act_status, + .update_payload_part2 = drm_dp_update_payload_part2, + .detect_port = drm_dp_mst_detect_port, + .get_edid = drm_dp_mst_get_edid, + .topology_mgr_set_mst = drm_dp_mst_topology_mgr_set_mst, + .get_vcpi_info = _dp_mst_get_vcpi_info, + .atomic_release_vcpi_slots = drm_dp_atomic_release_vcpi_slots, + .reset_vcpi_slots = drm_dp_mst_reset_vcpi_slots, + .deallocate_vcpi = drm_dp_mst_deallocate_vcpi, +}; + +static const struct dp_drm_mst_fw_helper_ops drm_dp_sim_mst_fw_helper_ops = { + .calc_pbn_mode = dp_mst_calc_pbn_mode, + .find_vcpi_slots = drm_dp_find_vcpi_slots, + .atomic_find_vcpi_slots = drm_dp_atomic_find_vcpi_slots, + .allocate_vcpi = drm_dp_mst_allocate_vcpi, + .update_payload_part1 = dp_mst_sim_update_payload_part1, + .check_act_status = dp_mst_sim_no_action, + .update_payload_part2 = dp_mst_sim_update_payload_part2, + .detect_port = drm_dp_mst_detect_port, + .get_edid = dp_mst_sim_get_edid, + .topology_mgr_set_mst = dp_mst_sim_topology_mgr_set_mst, + .get_vcpi_info = _dp_mst_get_vcpi_info, + .atomic_release_vcpi_slots = drm_dp_atomic_release_vcpi_slots, + .reset_vcpi_slots = drm_dp_mst_reset_vcpi_slots, + .deallocate_vcpi = drm_dp_mst_deallocate_vcpi, +}; + +/* DP MST Bridge OPs */ + +static int dp_mst_bridge_attach(struct drm_bridge *dp_bridge) +{ + struct dp_mst_bridge *bridge; + + DP_MST_DEBUG("enter\n"); + + if (!dp_bridge) { + DP_ERR("Invalid params\n"); + return -EINVAL; + } + + bridge = to_dp_mst_bridge(dp_bridge); + + DP_MST_DEBUG("mst bridge [%d] attached\n", bridge->id); + + return 0; +} + +static bool dp_mst_bridge_mode_fixup(struct drm_bridge *drm_bridge, + const struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + bool ret = true; + struct dp_display_mode dp_mode; + struct dp_mst_bridge *bridge; + struct dp_display *dp; + struct drm_crtc_state *crtc_state; + struct dp_mst_bridge_state *bridge_state; + + DP_MST_DEBUG("enter\n"); + + if (!drm_bridge || !mode || !adjusted_mode) { + DP_ERR("Invalid params\n"); + ret = false; + goto end; + } + + bridge = to_dp_mst_bridge(drm_bridge); + + crtc_state = container_of(mode, struct drm_crtc_state, mode); + bridge_state = dp_mst_get_bridge_atomic_state(crtc_state->state, + bridge); + if (IS_ERR(bridge_state)) { + DP_ERR("invalid bridge state\n"); + ret = false; + goto end; + } + + if (!bridge_state->dp_panel) { + DP_ERR("Invalid dp_panel\n"); + ret = false; + goto end; + } + + dp = bridge->display; + + dp->convert_to_dp_mode(dp, bridge_state->dp_panel, mode, &dp_mode); + convert_to_drm_mode(&dp_mode, adjusted_mode); + + DP_MST_DEBUG("mst bridge [%d] mode:%s fixup\n", bridge->id, mode->name); +end: + return ret; +} + +static int _dp_mst_compute_config(struct drm_atomic_state *state, + struct dp_mst_private *mst, struct drm_connector *connector, + struct dp_display_mode *mode) +{ + int slots = 0, pbn; + struct sde_connector *c_conn = to_sde_connector(connector); + + DP_MST_DEBUG("enter\n"); + + pbn = mst->mst_fw_cbs->calc_pbn_mode(mode); + + slots = mst->mst_fw_cbs->atomic_find_vcpi_slots(state, + &mst->mst_mgr, c_conn->mst_port, pbn); + if (slots < 0) { + DP_ERR("mst: failed to find vcpi slots. pbn:%d, slots:%d\n", + pbn, slots); + return slots; + } + + DP_MST_DEBUG("exit\n"); + + return slots; +} + +static void _dp_mst_update_timeslots(struct dp_mst_private *mst, + struct dp_mst_bridge *mst_bridge) +{ + int i; + struct dp_mst_bridge *dp_bridge; + int pbn, start_slot, num_slots; + + for (i = 0; i < MAX_DP_MST_DRM_BRIDGES; i++) { + dp_bridge = &mst->mst_bridge[i]; + + pbn = 0; + start_slot = 0; + num_slots = 0; + + if (dp_bridge->vcpi) { + mst->mst_fw_cbs->get_vcpi_info(&mst->mst_mgr, + dp_bridge->vcpi, + &start_slot, &num_slots); + pbn = dp_bridge->pbn; + } + + if (mst_bridge == dp_bridge) + dp_bridge->num_slots = num_slots; + + mst->dp_display->set_stream_info(mst->dp_display, + dp_bridge->dp_panel, + dp_bridge->id, start_slot, num_slots, pbn, + dp_bridge->vcpi); + + DP_INFO("bridge:%d vcpi:%d start_slot:%d num_slots:%d, pbn:%d\n", + dp_bridge->id, dp_bridge->vcpi, + start_slot, num_slots, pbn); + } +} + +static void _dp_mst_update_single_timeslot(struct dp_mst_private *mst, + struct dp_mst_bridge *mst_bridge) +{ + int pbn = 0, start_slot = 0, num_slots = 0; + + if (mst->state == PM_SUSPEND) { + if (mst_bridge->vcpi) { + mst->mst_fw_cbs->get_vcpi_info(&mst->mst_mgr, + mst_bridge->vcpi, + &start_slot, &num_slots); + pbn = mst_bridge->pbn; + } + + mst_bridge->num_slots = num_slots; + + mst->dp_display->set_stream_info(mst->dp_display, + mst_bridge->dp_panel, + mst_bridge->id, start_slot, num_slots, pbn, + mst_bridge->vcpi); + } +} + +static void _dp_mst_bridge_pre_enable_part1(struct dp_mst_bridge *dp_bridge) +{ + struct dp_display *dp_display = dp_bridge->display; + struct sde_connector *c_conn = + to_sde_connector(dp_bridge->connector); + struct dp_mst_private *mst = dp_display->dp_mst_prv_info; + struct drm_dp_mst_port *port = c_conn->mst_port; + bool ret; + int pbn, slots; + + /* skip mst specific disable operations during suspend */ + if (mst->state == PM_SUSPEND) { + dp_display->wakeup_phy_layer(dp_display, true); + drm_dp_send_power_updown_phy(&mst->mst_mgr, port, true); + dp_display->wakeup_phy_layer(dp_display, false); + _dp_mst_update_single_timeslot(mst, dp_bridge); + return; + } + + pbn = mst->mst_fw_cbs->calc_pbn_mode(&dp_bridge->dp_mode); + + slots = mst->mst_fw_cbs->find_vcpi_slots(&mst->mst_mgr, pbn); + + DP_INFO("bridge:%d, pbn:%d, slots:%d\n", dp_bridge->id, + dp_bridge->pbn, dp_bridge->num_slots); + + ret = mst->mst_fw_cbs->allocate_vcpi(&mst->mst_mgr, + port, pbn, slots); + if (!ret) { + DP_ERR("mst: failed to allocate vcpi. bridge:%d\n", + dp_bridge->id); + return; + } + + dp_bridge->vcpi = port->vcpi.vcpi; + dp_bridge->pbn = pbn; + + ret = mst->mst_fw_cbs->update_payload_part1(&mst->mst_mgr); + + _dp_mst_update_timeslots(mst, dp_bridge); +} + +static void _dp_mst_bridge_pre_enable_part2(struct dp_mst_bridge *dp_bridge) +{ + struct dp_display *dp_display = dp_bridge->display; + struct dp_mst_private *mst = dp_display->dp_mst_prv_info; + + DP_MST_DEBUG("enter\n"); + + /* skip mst specific disable operations during suspend */ + if (mst->state == PM_SUSPEND) + return; + + mst->mst_fw_cbs->check_act_status(&mst->mst_mgr); + + mst->mst_fw_cbs->update_payload_part2(&mst->mst_mgr); + + DP_MST_DEBUG("mst bridge [%d] _pre enable part-2 complete\n", + dp_bridge->id); +} + +static void _dp_mst_bridge_pre_disable_part1(struct dp_mst_bridge *dp_bridge) +{ + struct dp_display *dp_display = dp_bridge->display; + struct sde_connector *c_conn = + to_sde_connector(dp_bridge->connector); + struct dp_mst_private *mst = dp_display->dp_mst_prv_info; + struct drm_dp_mst_port *port = c_conn->mst_port; + + DP_MST_DEBUG("enter\n"); + + /* skip mst specific disable operations during suspend */ + if (mst->state == PM_SUSPEND) { + _dp_mst_update_single_timeslot(mst, dp_bridge); + return; + } + + mst->mst_fw_cbs->reset_vcpi_slots(&mst->mst_mgr, port); + + mst->mst_fw_cbs->update_payload_part1(&mst->mst_mgr); + + _dp_mst_update_timeslots(mst, dp_bridge); + + DP_MST_DEBUG("mst bridge [%d] _pre disable part-1 complete\n", + dp_bridge->id); +} + +static void _dp_mst_bridge_pre_disable_part2(struct dp_mst_bridge *dp_bridge) +{ + struct dp_display *dp_display = dp_bridge->display; + struct dp_mst_private *mst = dp_display->dp_mst_prv_info; + struct sde_connector *c_conn = + to_sde_connector(dp_bridge->connector); + struct drm_dp_mst_port *port = c_conn->mst_port; + + DP_MST_DEBUG("enter\n"); + + /* skip mst specific disable operations during suspend */ + if (mst->state == PM_SUSPEND) { + dp_display->wakeup_phy_layer(dp_display, true); + drm_dp_send_power_updown_phy(&mst->mst_mgr, port, false); + dp_display->wakeup_phy_layer(dp_display, false); + return; + } + + mst->mst_fw_cbs->check_act_status(&mst->mst_mgr); + + mst->mst_fw_cbs->update_payload_part2(&mst->mst_mgr); + + mst->mst_fw_cbs->deallocate_vcpi(&mst->mst_mgr, port); + + dp_bridge->vcpi = 0; + dp_bridge->pbn = 0; + + DP_MST_DEBUG("mst bridge [%d] _pre disable part-2 complete\n", + dp_bridge->id); +} + +static void dp_mst_bridge_pre_enable(struct drm_bridge *drm_bridge) +{ + int rc = 0; + struct dp_mst_bridge *bridge; + struct dp_display *dp; + struct dp_mst_private *mst; + + if (!drm_bridge) { + DP_ERR("Invalid params\n"); + return; + } + + bridge = to_dp_mst_bridge(drm_bridge); + dp = bridge->display; + + if (!bridge->connector) { + DP_ERR("Invalid connector\n"); + return; + } + + mst = dp->dp_mst_prv_info; + + mutex_lock(&mst->mst_lock); + + /* By this point mode should have been validated through mode_fixup */ + rc = dp->set_mode(dp, bridge->dp_panel, &bridge->dp_mode); + if (rc) { + DP_ERR("[%d] failed to perform a mode set, rc=%d\n", + bridge->id, rc); + goto end; + } + + rc = dp->prepare(dp, bridge->dp_panel); + if (rc) { + DP_ERR("[%d] DP display prepare failed, rc=%d\n", + bridge->id, rc); + goto end; + } + + _dp_mst_bridge_pre_enable_part1(bridge); + + rc = dp->enable(dp, bridge->dp_panel); + if (rc) { + DP_ERR("[%d] DP display enable failed, rc=%d\n", + bridge->id, rc); + dp->unprepare(dp, bridge->dp_panel); + goto end; + } else { + _dp_mst_bridge_pre_enable_part2(bridge); + } + + DP_MST_INFO_LOG("mode: id(%d) mode(%s), refresh(%d)\n", + bridge->id, bridge->drm_mode.name, + bridge->drm_mode.vrefresh); + DP_MST_INFO_LOG("dsc: id(%d) dsc(%d)\n", bridge->id, + bridge->dp_mode.timing.comp_info.comp_ratio); + DP_MST_INFO_LOG("channel: id(%d) vcpi(%d) start(%d) tot(%d)\n", + bridge->id, bridge->vcpi, bridge->start_slot, + bridge->num_slots); +end: + mutex_unlock(&mst->mst_lock); +} + +static void dp_mst_bridge_enable(struct drm_bridge *drm_bridge) +{ + int rc = 0; + struct dp_mst_bridge *bridge; + struct dp_display *dp; + + if (!drm_bridge) { + DP_ERR("Invalid params\n"); + return; + } + + bridge = to_dp_mst_bridge(drm_bridge); + if (!bridge->connector) { + DP_ERR("Invalid connector\n"); + return; + } + + dp = bridge->display; + + rc = dp->post_enable(dp, bridge->dp_panel); + if (rc) { + DP_ERR("mst bridge [%d] post enable failed, rc=%d\n", + bridge->id, rc); + return; + } + + DP_MST_INFO_LOG("mst bridge [%d] post enable complete\n", + bridge->id); +} + +static void dp_mst_bridge_disable(struct drm_bridge *drm_bridge) +{ + int rc = 0; + struct dp_mst_bridge *bridge; + struct dp_display *dp; + struct dp_mst_private *mst; + + if (!drm_bridge) { + DP_ERR("Invalid params\n"); + return; + } + + bridge = to_dp_mst_bridge(drm_bridge); + if (!bridge->connector) { + DP_ERR("Invalid connector\n"); + return; + } + + dp = bridge->display; + + mst = dp->dp_mst_prv_info; + + sde_connector_helper_bridge_disable(bridge->connector); + + mutex_lock(&mst->mst_lock); + + _dp_mst_bridge_pre_disable_part1(bridge); + + rc = dp->pre_disable(dp, bridge->dp_panel); + if (rc) + DP_ERR("[%d] DP display pre disable failed, rc=%d\n", + bridge->id, rc); + + _dp_mst_bridge_pre_disable_part2(bridge); + + DP_MST_INFO_LOG("mst bridge [%d] disable complete\n", bridge->id); + + mutex_unlock(&mst->mst_lock); +} + +static void dp_mst_bridge_post_disable(struct drm_bridge *drm_bridge) +{ + int rc = 0; + struct dp_mst_bridge *bridge; + struct dp_display *dp; + struct dp_mst_private *mst; + + if (!drm_bridge) { + DP_ERR("Invalid params\n"); + return; + } + + bridge = to_dp_mst_bridge(drm_bridge); + if (!bridge->connector) { + DP_ERR("Invalid connector\n"); + return; + } + + dp = bridge->display; + mst = dp->dp_mst_prv_info; + + rc = dp->disable(dp, bridge->dp_panel); + if (rc) + DP_INFO("[%d] DP display disable failed, rc=%d\n", + bridge->id, rc); + + rc = dp->unprepare(dp, bridge->dp_panel); + if (rc) + DP_INFO("[%d] DP display unprepare failed, rc=%d\n", + bridge->id, rc); + + bridge->connector = NULL; + bridge->dp_panel = NULL; + + DP_MST_INFO_LOG("mst bridge [%d] post disable complete\n", + bridge->id); +} + +static void dp_mst_bridge_mode_set(struct drm_bridge *drm_bridge, + struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + struct dp_mst_bridge *bridge; + struct dp_mst_bridge_state *dp_bridge_state; + struct dp_display *dp; + + DP_MST_DEBUG("enter\n"); + + if (!drm_bridge || !mode || !adjusted_mode) { + DP_ERR("Invalid params\n"); + return; + } + + bridge = to_dp_mst_bridge(drm_bridge); + + dp_bridge_state = to_dp_mst_bridge_state(bridge); + bridge->connector = dp_bridge_state->connector; + bridge->dp_panel = dp_bridge_state->dp_panel; + + dp = bridge->display; + + memset(&bridge->dp_mode, 0x0, sizeof(struct dp_display_mode)); + memcpy(&bridge->drm_mode, adjusted_mode, sizeof(bridge->drm_mode)); + dp->convert_to_dp_mode(dp, bridge->dp_panel, adjusted_mode, + &bridge->dp_mode); + + DP_MST_DEBUG("mst bridge [%d] mode set complete\n", bridge->id); +} + +/* DP MST Bridge APIs */ + +static struct drm_connector * +dp_mst_drm_fixed_connector_init(struct dp_display *dp_display, + struct drm_encoder *encoder); + +static const struct drm_bridge_funcs dp_mst_bridge_ops = { + .attach = dp_mst_bridge_attach, + .mode_fixup = dp_mst_bridge_mode_fixup, + .pre_enable = dp_mst_bridge_pre_enable, + .enable = dp_mst_bridge_enable, + .disable = dp_mst_bridge_disable, + .post_disable = dp_mst_bridge_post_disable, + .mode_set = dp_mst_bridge_mode_set, +}; + +int dp_mst_drm_bridge_init(void *data, struct drm_encoder *encoder) +{ + int rc = 0; + struct dp_mst_bridge *bridge = NULL; + struct dp_mst_bridge_state *state; + struct drm_device *dev; + struct dp_display *display = data; + struct msm_drm_private *priv = NULL; + struct dp_mst_private *mst = display->dp_mst_prv_info; + int i; + + if (!mst || !mst->mst_initialized) { + if (dp_mst_enc_cache.cnt >= MAX_DP_MST_DRM_BRIDGES) { + DP_INFO("exceeding max bridge cnt %d\n", + dp_mst_enc_cache.cnt); + return 0; + } + + dp_mst_enc_cache.mst_enc[dp_mst_enc_cache.cnt] = encoder; + dp_mst_enc_cache.cnt++; + DP_INFO("mst not initialized. cache encoder information\n"); + return 0; + } + + for (i = 0; i < MAX_DP_MST_DRM_BRIDGES; i++) { + if (!mst->mst_bridge[i].in_use) { + bridge = &mst->mst_bridge[i]; + bridge->encoder = encoder; + bridge->in_use = true; + bridge->id = i; + break; + } + } + + if (i == MAX_DP_MST_DRM_BRIDGES) { + DP_ERR("mst supports only %d bridges\n", i); + rc = -EACCES; + goto end; + } + + dev = display->drm_dev; + bridge->display = display; + bridge->base.funcs = &dp_mst_bridge_ops; + bridge->base.encoder = encoder; + + priv = dev->dev_private; + + rc = drm_bridge_attach(encoder, &bridge->base, NULL); + if (rc) { + DP_ERR("failed to attach bridge, rc=%d\n", rc); + goto end; + } + + encoder->bridge = &bridge->base; + priv->bridges[priv->num_bridges++] = &bridge->base; + + state = kzalloc(sizeof(*state), GFP_KERNEL); + if (state == NULL) { + rc = -ENOMEM; + goto end; + } + + drm_atomic_private_obj_init(&bridge->obj, + &state->base, + &dp_mst_bridge_state_funcs); + + DP_MST_DEBUG("mst drm bridge init. bridge id:%d\n", i); + + /* + * If fixed topology port is defined, connector will be created + * immediately. + */ + rc = display->mst_get_fixed_topology_port(display, bridge->id, + &bridge->fixed_port_num); + if (!rc) { + bridge->fixed_connector = + dp_mst_drm_fixed_connector_init(display, + bridge->encoder); + if (bridge->fixed_connector == NULL) { + DP_ERR("failed to create fixed connector\n"); + kfree(state); + rc = -ENOMEM; + goto end; + } + } + + return 0; + +end: + return rc; +} + +void dp_mst_drm_bridge_deinit(void *display) +{ + DP_MST_DEBUG("mst bridge deinit\n"); +} + +/* DP MST Connector OPs */ + +static enum drm_connector_status +dp_mst_connector_detect(struct drm_connector *connector, bool force, + void *display) +{ + struct sde_connector *c_conn = to_sde_connector(connector); + struct dp_display *dp_display = c_conn->display; + struct dp_mst_private *mst = dp_display->dp_mst_prv_info; + enum drm_connector_status status; + struct dp_mst_connector mst_conn; + + DP_MST_DEBUG("enter:\n"); + + status = mst->mst_fw_cbs->detect_port(connector, + &mst->mst_mgr, + c_conn->mst_port); + + memset(&mst_conn, 0, sizeof(mst_conn)); + dp_display->mst_get_connector_info(dp_display, connector, &mst_conn); + if (mst_conn.conn == connector && + mst_conn.state != connector_status_unknown) { + status = mst_conn.state; + } + + DP_MST_DEBUG("mst connector:%d detect, status:%d\n", + connector->base.id, status); + + DP_MST_DEBUG("exit:\n"); + + return status; +} + +static int dp_mst_connector_get_modes(struct drm_connector *connector, + void *display, const struct msm_resource_caps_info *avail_res) +{ + struct sde_connector *c_conn = to_sde_connector(connector); + struct dp_display *dp_display = display; + struct dp_mst_private *mst = dp_display->dp_mst_prv_info; + struct edid *edid; + int rc = 0; + + DP_MST_DEBUG("enter:\n"); + + edid = mst->mst_fw_cbs->get_edid(connector, &mst->mst_mgr, + c_conn->mst_port); + + if (edid) + rc = dp_display->mst_connector_update_edid(dp_display, + connector, edid); + + DP_MST_DEBUG("mst connector get modes. id: %d\n", connector->base.id); + + DP_MST_DEBUG("exit:\n"); + + return rc; +} + +enum drm_mode_status dp_mst_connector_mode_valid( + struct drm_connector *connector, + struct drm_display_mode *mode, + void *display, const struct msm_resource_caps_info *avail_res) +{ + struct dp_display *dp_display = display; + struct dp_mst_private *mst; + struct sde_connector *c_conn; + struct drm_dp_mst_port *mst_port; + struct dp_display_mode dp_mode; + uint16_t available_pbn, required_pbn; + int available_slots, required_slots; + struct dp_mst_bridge_state *dp_bridge_state; + int i, slots_in_use = 0, active_enc_cnt = 0; + const u32 tot_slots = 63; + + if (!connector || !mode || !display) { + DP_ERR("invalid input\n"); + return 0; + } + + mst = dp_display->dp_mst_prv_info; + c_conn = to_sde_connector(connector); + mst_port = c_conn->mst_port; + + /* dp bridge state is protected by drm_mode_config.connection_mutex */ + for (i = 0; i < MAX_DP_MST_DRM_BRIDGES; i++) { + dp_bridge_state = to_dp_mst_bridge_state(&mst->mst_bridge[i]); + if (dp_bridge_state->connector && + dp_bridge_state->connector != connector) { + active_enc_cnt++; + slots_in_use += dp_bridge_state->num_slots; + } + } + + if (active_enc_cnt < DP_STREAM_MAX) { + available_pbn = mst_port->available_pbn; + available_slots = tot_slots - slots_in_use; + } else { + pr_debug("all mst streams are active\n"); + return MODE_BAD; + } + + dp_display->convert_to_dp_mode(dp_display, c_conn->drv_panel, + mode, &dp_mode); + + required_pbn = mst->mst_fw_cbs->calc_pbn_mode(&dp_mode); + required_slots = mst->mst_fw_cbs->find_vcpi_slots( + &mst->mst_mgr, required_pbn); + + if (required_pbn > available_pbn || required_slots > available_slots) { + DP_DEBUG("mode:%s not supported\n", mode->name); + return MODE_BAD; + } + + return dp_connector_mode_valid(connector, mode, display, avail_res); +} + +int dp_mst_connector_get_info(struct drm_connector *connector, + struct msm_display_info *info, + void *display) +{ + int rc; + enum drm_connector_status status = connector_status_unknown; + + DP_MST_DEBUG("enter:\n"); + + rc = dp_connector_get_info(connector, info, display); + + if (!rc) { + status = dp_mst_connector_detect(connector, false, display); + + if (status == connector_status_connected) + info->is_connected = true; + else + info->is_connected = false; + } + + DP_MST_DEBUG("mst connector:%d get info:%d, rc:%d\n", + connector->base.id, status, rc); + + DP_MST_DEBUG("exit:\n"); + + return rc; +} + +int dp_mst_connector_get_mode_info(struct drm_connector *connector, + const struct drm_display_mode *drm_mode, + struct msm_mode_info *mode_info, + void *display, + const struct msm_resource_caps_info *avail_res) +{ + int rc; + + DP_MST_DEBUG("enter:\n"); + + rc = dp_connector_get_mode_info(connector, drm_mode, mode_info, + display, avail_res); + + DP_MST_DEBUG("mst connector:%d get mode info. rc:%d\n", + connector->base.id, rc); + + DP_MST_DEBUG("exit:\n"); + + return rc; +} + +static struct drm_encoder * +dp_mst_atomic_best_encoder(struct drm_connector *connector, + void *display, struct drm_connector_state *state) +{ + struct dp_display *dp_display = display; + struct dp_mst_private *mst = dp_display->dp_mst_prv_info; + struct sde_connector *conn = to_sde_connector(connector); + struct drm_encoder *enc = NULL; + struct dp_mst_bridge_state *bridge_state; + u32 i; + + if (state->best_encoder) + return state->best_encoder; + + for (i = 0; i < MAX_DP_MST_DRM_BRIDGES; i++) { + bridge_state = dp_mst_get_bridge_atomic_state( + state->state, &mst->mst_bridge[i]); + if (IS_ERR(bridge_state)) + goto end; + + if (bridge_state->connector == connector) { + enc = mst->mst_bridge[i].encoder; + goto end; + } + } + + for (i = 0; i < MAX_DP_MST_DRM_BRIDGES; i++) { + if (mst->mst_bridge[i].fixed_connector) + continue; + + bridge_state = dp_mst_get_bridge_atomic_state( + state->state, &mst->mst_bridge[i]); + + if (!bridge_state->connector) { + bridge_state->connector = connector; + bridge_state->dp_panel = conn->drv_panel; + enc = mst->mst_bridge[i].encoder; + break; + } + + } + +end: + if (enc) + DP_MST_DEBUG("mst connector:%d atomic best encoder:%d\n", + connector->base.id, i); + else + DP_MST_DEBUG("mst connector:%d atomic best encoder failed\n", + connector->base.id); + + return enc; +} + +static int dp_mst_connector_atomic_check(struct drm_connector *connector, + void *display, struct drm_connector_state *new_conn_state) +{ + int rc = 0, slots, i; + struct drm_atomic_state *state; + struct drm_connector_state *old_conn_state; + struct drm_crtc *old_crtc; + struct drm_crtc_state *crtc_state; + struct dp_mst_bridge *bridge; + struct dp_mst_bridge_state *bridge_state; + struct dp_display *dp_display = display; + struct dp_mst_private *mst = dp_display->dp_mst_prv_info; + struct sde_connector *c_conn; + struct dp_display_mode dp_mode; + + DP_MST_DEBUG("enter:\n"); + + if (!new_conn_state) + return rc; + + state = new_conn_state->state; + + old_conn_state = drm_atomic_get_old_connector_state(state, connector); + if (!old_conn_state) + goto mode_set; + + old_crtc = old_conn_state->crtc; + if (!old_crtc) + goto mode_set; + + crtc_state = drm_atomic_get_new_crtc_state(state, old_crtc); + + for (i = 0; i < MAX_DP_MST_DRM_BRIDGES; i++) { + bridge = &mst->mst_bridge[i]; + DP_MST_DEBUG("bridge id:%d, vcpi:%d, pbn:%d, slots:%d\n", + bridge->id, bridge->vcpi, bridge->pbn, + bridge->num_slots); + } + + if (drm_atomic_crtc_needs_modeset(crtc_state)) { + if (WARN_ON(!old_conn_state->best_encoder)) { + rc = -EINVAL; + goto end; + } + + bridge = to_dp_mst_bridge( + old_conn_state->best_encoder->bridge); + + bridge_state = dp_mst_get_bridge_atomic_state(state, bridge); + if (IS_ERR(bridge_state)) { + rc = PTR_ERR(bridge_state); + goto end; + } + + if (WARN_ON(bridge_state->connector != connector)) { + rc = -EINVAL; + goto end; + } + + slots = bridge_state->num_slots; + if (slots > 0) { + rc = mst->mst_fw_cbs->atomic_release_vcpi_slots(state, + &mst->mst_mgr, slots); + if (rc) { + pr_err("failed releasing %d vcpi slots %d\n", + slots, rc); + goto end; + } + } + + bridge_state->num_slots = 0; + + if (!new_conn_state->crtc && mst->state != PM_SUSPEND) { + bridge_state->connector = NULL; + bridge_state->dp_panel = NULL; + + DP_MST_DEBUG("clear best encoder: %d\n", bridge->id); + } + } + +mode_set: + if (!new_conn_state->crtc) + goto end; + + crtc_state = drm_atomic_get_new_crtc_state(state, new_conn_state->crtc); + + if (drm_atomic_crtc_needs_modeset(crtc_state) && crtc_state->active) { + c_conn = to_sde_connector(connector); + + if (WARN_ON(!new_conn_state->best_encoder)) { + rc = -EINVAL; + goto end; + } + + bridge = to_dp_mst_bridge( + new_conn_state->best_encoder->bridge); + + bridge_state = dp_mst_get_bridge_atomic_state(state, bridge); + if (IS_ERR(bridge_state)) { + rc = PTR_ERR(bridge_state); + goto end; + } + + if (WARN_ON(bridge_state->connector != connector)) { + rc = -EINVAL; + goto end; + } + + if (WARN_ON(bridge_state->num_slots)) { + rc = -EINVAL; + goto end; + } + + dp_display->convert_to_dp_mode(dp_display, c_conn->drv_panel, + &crtc_state->mode, &dp_mode); + + slots = _dp_mst_compute_config(state, mst, connector, &dp_mode); + if (slots < 0) { + rc = slots; + goto end; + } + + bridge_state->num_slots = slots; + } + +end: + DP_MST_DEBUG("mst connector:%d atomic check ret %d\n", + connector->base.id, rc); + return rc; +} + +static int dp_mst_connector_config_hdr(struct drm_connector *connector, + void *display, struct sde_connector_state *c_state) +{ + int rc; + + DP_MST_DEBUG("enter:\n"); + + rc = dp_connector_config_hdr(connector, display, c_state); + + DP_MST_DEBUG("mst connector:%d cfg hdr. rc:%d\n", + connector->base.id, rc); + + DP_MST_DEBUG("exit:\n"); + + return rc; +} + +static void dp_mst_connector_pre_destroy(struct drm_connector *connector, + void *display) +{ + struct dp_display *dp_display = display; + + DP_MST_DEBUG("enter:\n"); + dp_display->mst_connector_uninstall(dp_display, connector); + DP_MST_DEBUG("exit:\n"); +} + +/* DRM MST callbacks */ + +static struct drm_connector * +dp_mst_add_connector(struct drm_dp_mst_topology_mgr *mgr, + struct drm_dp_mst_port *port, const char *pathprop) +{ + static const struct sde_connector_ops dp_mst_connector_ops = { + .post_init = NULL, + .detect = dp_mst_connector_detect, + .get_modes = dp_mst_connector_get_modes, + .mode_valid = dp_mst_connector_mode_valid, + .get_info = dp_mst_connector_get_info, + .get_mode_info = dp_mst_connector_get_mode_info, + .atomic_best_encoder = dp_mst_atomic_best_encoder, + .atomic_check = dp_mst_connector_atomic_check, + .config_hdr = dp_mst_connector_config_hdr, + .pre_destroy = dp_mst_connector_pre_destroy, + .update_pps = dp_connector_update_pps, + }; + struct dp_mst_private *dp_mst; + struct drm_device *dev; + struct dp_display *dp_display; + struct drm_connector *connector; + struct sde_connector *c_conn; + int rc, i; + + DP_MST_DEBUG("enter\n"); + + dp_mst = container_of(mgr, struct dp_mst_private, mst_mgr); + + dp_display = dp_mst->dp_display; + dev = dp_display->drm_dev; + + /* make sure connector is not accessed before reset */ + drm_modeset_lock_all(dev); + + connector = sde_connector_init(dev, + dp_mst->mst_bridge[0].encoder, + NULL, + dp_display, + &dp_mst_connector_ops, + DRM_CONNECTOR_POLL_HPD, + DRM_MODE_CONNECTOR_DisplayPort); + + if (!connector) { + DP_ERR("mst sde_connector_init failed\n"); + drm_modeset_unlock_all(dev); + return connector; + } + + rc = dp_display->mst_connector_install(dp_display, connector); + if (rc) { + DP_ERR("mst connector install failed\n"); + sde_connector_destroy(connector); + drm_modeset_unlock_all(dev); + return NULL; + } + + c_conn = to_sde_connector(connector); + c_conn->mst_port = port; + + if (connector->funcs->reset) + connector->funcs->reset(connector); + + for (i = 1; i < MAX_DP_MST_DRM_BRIDGES; i++) { + drm_connector_attach_encoder(connector, + dp_mst->mst_bridge[i].encoder); + } + + drm_object_attach_property(&connector->base, + dev->mode_config.path_property, 0); + drm_object_attach_property(&connector->base, + dev->mode_config.tile_property, 0); + + /* unlock connector and make it accessible */ + drm_modeset_unlock_all(dev); + + DP_MST_INFO_LOG("add mst connector id:%d\n", connector->base.id); + + return connector; +} + +static void dp_mst_register_connector(struct drm_connector *connector) +{ + DP_MST_DEBUG("enter\n"); + + connector->status = connector->funcs->detect(connector, false); + + DP_MST_INFO_LOG("register mst connector id:%d\n", + connector->base.id); + drm_connector_register(connector); +} + +static void dp_mst_destroy_connector(struct drm_dp_mst_topology_mgr *mgr, + struct drm_connector *connector) +{ + DP_MST_DEBUG("enter\n"); + + DP_MST_INFO_LOG("destroy mst connector id:%d\n", connector->base.id); + + drm_connector_unregister(connector); + drm_connector_put(connector); +} + +static enum drm_connector_status +dp_mst_fixed_connector_detect(struct drm_connector *connector, bool force, + void *display) +{ + struct dp_display *dp_display = display; + struct dp_mst_private *mst = dp_display->dp_mst_prv_info; + int i; + + for (i = 0; i < MAX_DP_MST_DRM_BRIDGES; i++) { + if (mst->mst_bridge[i].fixed_connector != connector) + continue; + + if (!mst->mst_bridge[i].fixed_port_added) + break; + + return dp_mst_connector_detect(connector, force, display); + } + + return connector_status_disconnected; +} + +static struct drm_encoder * +dp_mst_fixed_atomic_best_encoder(struct drm_connector *connector, + void *display, struct drm_connector_state *state) +{ + struct dp_display *dp_display = display; + struct dp_mst_private *mst = dp_display->dp_mst_prv_info; + struct sde_connector *conn = to_sde_connector(connector); + struct drm_encoder *enc = NULL; + struct dp_mst_bridge_state *bridge_state; + u32 i; + + if (state->best_encoder) + return state->best_encoder; + + for (i = 0; i < MAX_DP_MST_DRM_BRIDGES; i++) { + if (mst->mst_bridge[i].fixed_connector == connector) { + bridge_state = dp_mst_get_bridge_atomic_state( + state->state, &mst->mst_bridge[i]); + if (IS_ERR(bridge_state)) + goto end; + + bridge_state->connector = connector; + bridge_state->dp_panel = conn->drv_panel; + enc = mst->mst_bridge[i].encoder; + break; + } + } + +end: + if (enc) + DP_MST_DEBUG("mst connector:%d atomic best encoder:%d\n", + connector->base.id, i); + else + DP_MST_DEBUG("mst connector:%d atomic best encoder failed\n", + connector->base.id); + + return enc; +} + +static u32 dp_mst_find_fixed_port_num(struct drm_dp_mst_branch *mstb, + struct drm_dp_mst_port *target) +{ + struct drm_dp_mst_port *port; + u32 port_num = 0; + + /* + * search through reversed order of adding sequence, so the port number + * will be unique once topology is fixed + */ + list_for_each_entry_reverse(port, &mstb->ports, next) { + if (port->mstb) + port_num += dp_mst_find_fixed_port_num(port->mstb, + target); + else if (!port->input) { + ++port_num; + if (port == target) + break; + } + } + + return port_num; +} + +static struct drm_connector * +dp_mst_find_fixed_connector(struct dp_mst_private *dp_mst, + struct drm_dp_mst_port *port) +{ + struct dp_display *dp_display = dp_mst->dp_display; + struct drm_connector *connector = NULL; + struct sde_connector *c_conn; + u32 port_num; + int i; + + mutex_lock(&port->mgr->lock); + port_num = dp_mst_find_fixed_port_num(port->mgr->mst_primary, port); + mutex_unlock(&port->mgr->lock); + + if (!port_num) + return NULL; + + for (i = 0; i < MAX_DP_MST_DRM_BRIDGES; i++) { + if (dp_mst->mst_bridge[i].fixed_port_num == port_num) { + connector = dp_mst->mst_bridge[i].fixed_connector; + c_conn = to_sde_connector(connector); + c_conn->mst_port = port; + dp_display->mst_connector_update_link_info(dp_display, + connector); + dp_mst->mst_bridge[i].fixed_port_added = true; + DP_MST_DEBUG("found fixed connector %d\n", + DRMID(connector)); + break; + } + } + + return connector; +} + +static int +dp_mst_find_first_available_encoder_idx(struct dp_mst_private *dp_mst) +{ + int enc_idx = MAX_DP_MST_DRM_BRIDGES; + int i; + + for (i = 0; i < MAX_DP_MST_DRM_BRIDGES; i++) { + if (!dp_mst->mst_bridge[i].fixed_connector) { + enc_idx = i; + break; + } + } + + return enc_idx; +} + +static struct drm_connector * +dp_mst_add_fixed_connector(struct drm_dp_mst_topology_mgr *mgr, + struct drm_dp_mst_port *port, const char *pathprop) +{ + struct dp_mst_private *dp_mst; + struct drm_device *dev; + struct dp_display *dp_display; + struct drm_connector *connector; + int i, enc_idx; + + DP_MST_DEBUG("enter\n"); + + dp_mst = container_of(mgr, struct dp_mst_private, mst_mgr); + + dp_display = dp_mst->dp_display; + dev = dp_display->drm_dev; + + if (port->input || port->mstb) + enc_idx = MAX_DP_MST_DRM_BRIDGES; + else { + /* if port is already reserved, return immediately */ + connector = dp_mst_find_fixed_connector(dp_mst, port); + if (connector != NULL) + return connector; + + /* first available bridge index for non-reserved port */ + enc_idx = dp_mst_find_first_available_encoder_idx(dp_mst); + } + + /* add normal connector */ + connector = dp_mst_add_connector(mgr, port, pathprop); + if (!connector) { + DP_MST_DEBUG("failed to add connector\n"); + return NULL; + } + + drm_modeset_lock_all(dev); + + /* clear encoder list */ + for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) + connector->encoder_ids[i] = 0; + + /* re-attach encoders from first available encoders */ + for (i = enc_idx; i < MAX_DP_MST_DRM_BRIDGES; i++) + drm_connector_attach_encoder(connector, + dp_mst->mst_bridge[i].encoder); + + drm_modeset_unlock_all(dev); + + DP_MST_DEBUG("add mst connector:%d\n", connector->base.id); + + return connector; +} + +static void dp_mst_register_fixed_connector(struct drm_connector *connector) +{ + struct sde_connector *c_conn = to_sde_connector(connector); + struct dp_display *dp_display = c_conn->display; + struct dp_mst_private *dp_mst = dp_display->dp_mst_prv_info; + int i; + + DP_MST_DEBUG("enter\n"); + + /* skip connector registered for fixed topology ports */ + for (i = 0; i < MAX_DP_MST_DRM_BRIDGES; i++) { + if (dp_mst->mst_bridge[i].fixed_connector == connector) { + DP_MST_DEBUG("found fixed connector %d\n", + DRMID(connector)); + return; + } + } + + dp_mst_register_connector(connector); +} + +static void dp_mst_destroy_fixed_connector(struct drm_dp_mst_topology_mgr *mgr, + struct drm_connector *connector) +{ + struct dp_mst_private *dp_mst; + int i; + + DP_MST_DEBUG("enter\n"); + + dp_mst = container_of(mgr, struct dp_mst_private, mst_mgr); + + /* skip connector destroy for fixed topology ports */ + for (i = 0; i < MAX_DP_MST_DRM_BRIDGES; i++) { + if (dp_mst->mst_bridge[i].fixed_connector == connector) { + dp_mst->mst_bridge[i].fixed_port_added = false; + DP_MST_DEBUG("destroy fixed connector %d\n", + DRMID(connector)); + return; + } + } + + dp_mst_destroy_connector(mgr, connector); +} + +static struct drm_connector * +dp_mst_drm_fixed_connector_init(struct dp_display *dp_display, + struct drm_encoder *encoder) +{ + static const struct sde_connector_ops dp_mst_connector_ops = { + .post_init = NULL, + .detect = dp_mst_fixed_connector_detect, + .get_modes = dp_mst_connector_get_modes, + .mode_valid = dp_mst_connector_mode_valid, + .get_info = dp_mst_connector_get_info, + .get_mode_info = dp_mst_connector_get_mode_info, + .atomic_best_encoder = dp_mst_fixed_atomic_best_encoder, + .atomic_check = dp_mst_connector_atomic_check, + .config_hdr = dp_mst_connector_config_hdr, + .pre_destroy = dp_mst_connector_pre_destroy, + }; + struct drm_device *dev; + struct drm_connector *connector; + int rc; + + DP_MST_DEBUG("enter\n"); + + dev = dp_display->drm_dev; + + connector = sde_connector_init(dev, + encoder, + NULL, + dp_display, + &dp_mst_connector_ops, + DRM_CONNECTOR_POLL_HPD, + DRM_MODE_CONNECTOR_DisplayPort); + + if (!connector) { + DP_ERR("mst sde_connector_init failed\n"); + return NULL; + } + + rc = dp_display->mst_connector_install(dp_display, connector); + if (rc) { + DP_ERR("mst connector install failed\n"); + sde_connector_destroy(connector); + return NULL; + } + + drm_object_attach_property(&connector->base, + dev->mode_config.path_property, 0); + drm_object_attach_property(&connector->base, + dev->mode_config.tile_property, 0); + + DP_MST_DEBUG("add mst fixed connector:%d\n", connector->base.id); + + return connector; +} + +static void dp_mst_hotplug(struct drm_dp_mst_topology_mgr *mgr) +{ + struct dp_mst_private *mst = container_of(mgr, struct dp_mst_private, + mst_mgr); + struct drm_device *dev = mst->dp_display->drm_dev; + char event_string[] = "MST_HOTPLUG=1"; + char *envp[2]; + + envp[0] = event_string; + envp[1] = NULL; + + kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE, envp); + + DP_MST_INFO_LOG("mst hot plug event\n"); +} + +static void dp_mst_hpd_event_notify(struct dp_mst_private *mst, bool hpd_status) +{ + struct drm_device *dev = mst->dp_display->drm_dev; + char event_string[] = "MST_HOTPLUG=1"; + char status[HPD_STRING_SIZE]; + char *envp[3]; + + if (hpd_status) + snprintf(status, HPD_STRING_SIZE, "status=connected"); + else + snprintf(status, HPD_STRING_SIZE, "status=disconnected"); + + envp[0] = event_string; + envp[1] = status; + envp[2] = NULL; + + kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE, envp); + + DP_MST_INFO_LOG("%s finished\n", __func__); +} + +/* DP Driver Callback OPs */ + +static void dp_mst_display_hpd(void *dp_display, bool hpd_status, + struct dp_mst_hpd_info *info) +{ + int rc; + struct dp_display *dp = dp_display; + struct dp_mst_private *mst = dp->dp_mst_prv_info; + + mutex_lock(&mst->mst_lock); + mst->mst_session_state = hpd_status; + mutex_unlock(&mst->mst_lock); + + if (!hpd_status) { + rc = mst->mst_fw_cbs->topology_mgr_set_mst(&mst->mst_mgr, + hpd_status); + if (rc < 0) + goto fail; + } + + if (info && !info->mst_protocol) { + if (hpd_status) { + mst->simulator.edid = (struct edid *)info->edid; + mst->simulator.port_cnt = info->mst_port_cnt; + } + mst->mst_fw_cbs = &drm_dp_sim_mst_fw_helper_ops; + } else { + mst->mst_fw_cbs = &drm_dp_mst_fw_helper_ops; + } + + if (hpd_status) { + rc = mst->mst_fw_cbs->topology_mgr_set_mst(&mst->mst_mgr, + hpd_status); + if (rc < 0) + goto fail; + } + + dp_mst_hpd_event_notify(mst, hpd_status); + + DP_MST_INFO_LOG("mst display hpd success. hpd:%d, rc:%d\n", hpd_status, + rc); + return; +fail: + DRM_ERROR("mst display hpd failed. hpd: %d, rc: %d\n", + hpd_status, rc); +} + +static void dp_mst_display_hpd_irq(void *dp_display, + struct dp_mst_hpd_info *info) +{ + int rc; + struct dp_display *dp = dp_display; + struct dp_mst_private *mst = dp->dp_mst_prv_info; + u8 esi[14]; + unsigned int esi_res = DP_SINK_COUNT_ESI + 1; + bool handled; + + if (info->mst_hpd_sim) { + if (info->mst_sim_add_con || info->mst_sim_remove_con) { + dp_mst_sim_handle_hpd_irq(dp_display, info); + + /* + * When removing a connector, hpd_irq -> sim_destroy -> + * destroy_connector_work will be executed in a thread. + * This thread will perform the dp_mst_hotplug at the + * appropriate time. Do not perform hotplug here + * because it may be too early. + */ + if (info->mst_sim_remove_con) + return; + } + + dp_mst_hotplug(&mst->mst_mgr); + return; + } + + if (!mst->mst_session_state) { + DP_ERR("mst_hpd_irq received before mst session start\n"); + return; + } + + rc = drm_dp_dpcd_read(mst->caps.drm_aux, DP_SINK_COUNT_ESI, + esi, 14); + if (rc != 14) { + DP_ERR("dpcd sink status read failed, rlen=%d\n", rc); + return; + } + + DP_MST_DEBUG("mst irq: esi1[0x%x] esi2[0x%x] esi3[%x]\n", + esi[1], esi[2], esi[3]); + + rc = drm_dp_mst_hpd_irq(&mst->mst_mgr, esi, &handled); + + /* ack the request */ + if (handled) { + rc = drm_dp_dpcd_write(mst->caps.drm_aux, esi_res, &esi[1], 3); + + if (rc != 3) + DP_ERR("dpcd esi_res failed. rlen=%d\n", rc); + } + + DP_MST_DEBUG("mst display hpd_irq handled:%d rc:%d\n", handled, rc); +} + +static void dp_mst_set_state(void *dp_display, enum dp_drv_state mst_state) +{ + struct dp_display *dp = dp_display; + struct dp_mst_private *mst = dp->dp_mst_prv_info; + + if (!mst) { + DP_DEBUG("mst not initialized\n"); + return; + } + + mst->state = mst_state; + DP_MST_INFO_LOG("mst power state:%d\n", mst_state); +} + +/* DP MST APIs */ + +static const struct dp_mst_drm_cbs dp_mst_display_cbs = { + .hpd = dp_mst_display_hpd, + .hpd_irq = dp_mst_display_hpd_irq, + .set_drv_state = dp_mst_set_state, +}; + +static const struct drm_dp_mst_topology_cbs dp_mst_drm_cbs = { + .add_connector = dp_mst_add_connector, + .register_connector = dp_mst_register_connector, + .destroy_connector = dp_mst_destroy_connector, + .hotplug = dp_mst_hotplug, +}; + +static const struct drm_dp_mst_topology_cbs dp_mst_fixed_drm_cbs = { + .add_connector = dp_mst_add_fixed_connector, + .register_connector = dp_mst_register_fixed_connector, + .destroy_connector = dp_mst_destroy_fixed_connector, + .hotplug = dp_mst_hotplug, +}; + +static void dp_mst_sim_init(struct dp_mst_private *mst) +{ + INIT_WORK(&mst->simulator.probe_work, dp_mst_sim_link_probe_work); + mst->simulator.cbs = &dp_mst_drm_cbs; +} + +int dp_mst_init(struct dp_display *dp_display) +{ + struct drm_device *dev; + int conn_base_id = 0; + int ret, i; + struct dp_mst_drm_install_info install_info; + + memset(&dp_mst, 0, sizeof(dp_mst)); + + if (!dp_display) { + DP_ERR("invalid params\n"); + return 0; + } + + dev = dp_display->drm_dev; + + /* register with DP driver */ + install_info.dp_mst_prv_info = &dp_mst; + install_info.cbs = &dp_mst_display_cbs; + dp_display->mst_install(dp_display, &install_info); + + dp_display->get_mst_caps(dp_display, &dp_mst.caps); + + if (!dp_mst.caps.has_mst) { + DP_MST_DEBUG("mst not supported\n"); + return 0; + } + + dp_mst.mst_fw_cbs = &drm_dp_mst_fw_helper_ops; + + memset(&dp_mst.mst_mgr, 0, sizeof(dp_mst.mst_mgr)); + dp_mst.mst_mgr.cbs = &dp_mst_drm_cbs; + conn_base_id = dp_display->base_connector->base.id; + dp_mst.dp_display = dp_display; + + mutex_init(&dp_mst.mst_lock); + + ret = drm_dp_mst_topology_mgr_init(&dp_mst.mst_mgr, dev, + dp_mst.caps.drm_aux, + dp_mst.caps.max_dpcd_transaction_bytes, + dp_mst.caps.max_streams_supported, + conn_base_id); + if (ret) { + DP_ERR("dp drm mst topology manager init failed\n"); + goto error; + } + + dp_mst_sim_init(&dp_mst); + + dp_mst.mst_initialized = true; + + /* create drm_bridges for cached mst encoders and clear cache */ + for (i = 0; i < dp_mst_enc_cache.cnt; i++) { + ret = dp_mst_drm_bridge_init(dp_display, + dp_mst_enc_cache.mst_enc[i]); + } + memset(&dp_mst_enc_cache, 0, sizeof(dp_mst_enc_cache)); + + /* choose fixed callback function if fixed topology is found */ + if (!dp_display->mst_get_fixed_topology_port(dp_display, 0, NULL)) + dp_mst.mst_mgr.cbs = &dp_mst_fixed_drm_cbs; + + DP_MST_INFO_LOG("dp drm mst topology manager init completed\n"); + + return ret; + +error: + mutex_destroy(&dp_mst.mst_lock); + return ret; +} + +void dp_mst_deinit(struct dp_display *dp_display) +{ + struct dp_mst_private *mst; + + if (!dp_display) { + DP_ERR("invalid params\n"); + return; + } + + mst = dp_display->dp_mst_prv_info; + + if (!mst->mst_initialized) + return; + + dp_display->mst_uninstall(dp_display); + + drm_dp_mst_topology_mgr_destroy(&mst->mst_mgr); + + dp_mst.mst_initialized = false; + + mutex_destroy(&mst->mst_lock); + + DP_MST_INFO_LOG("dp drm mst topology manager deinit completed\n"); +} + diff --git a/techpack/display/msm/dp/dp_panel.c b/techpack/display/msm/dp/dp_panel.c new file mode 100644 index 0000000000000000000000000000000000000000..1c01b0eee481d8fb4eab844d4931c37e60a80d35 --- /dev/null +++ b/techpack/display/msm/dp/dp_panel.c @@ -0,0 +1,3419 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2012-2020, The Linux Foundation. All rights reserved. + */ + +#include "dp_panel.h" +#include +#include +#include "dp_debug.h" +#include + +#define DP_KHZ_TO_HZ 1000 +#define DP_PANEL_DEFAULT_BPP 24 +#define DP_MAX_DS_PORT_COUNT 1 + +#define DPRX_FEATURE_ENUMERATION_LIST 0x2210 +#define DPRX_EXTENDED_DPCD_FIELD 0x2200 +#define VSC_SDP_EXTENSION_FOR_COLORIMETRY_SUPPORTED BIT(3) +#define VSC_EXT_VESA_SDP_SUPPORTED BIT(4) +#define VSC_EXT_VESA_SDP_CHAINING_SUPPORTED BIT(5) + +enum dp_panel_hdr_pixel_encoding { + RGB, + YCbCr444, + YCbCr422, + YCbCr420, + YONLY, + RAW, +}; + +enum dp_panel_hdr_rgb_colorimetry { + sRGB, + RGB_WIDE_GAMUT_FIXED_POINT, + RGB_WIDE_GAMUT_FLOATING_POINT, + ADOBERGB, + DCI_P3, + CUSTOM_COLOR_PROFILE, + ITU_R_BT_2020_RGB, +}; + +enum dp_panel_hdr_dynamic_range { + VESA, + CEA, +}; + +enum dp_panel_hdr_content_type { + NOT_DEFINED, + GRAPHICS, + PHOTO, + VIDEO, + GAME, +}; + +enum dp_panel_hdr_state { + HDR_DISABLED, + HDR_ENABLED, +}; + +struct dp_panel_private { + struct device *dev; + struct dp_panel dp_panel; + struct dp_aux *aux; + struct dp_link *link; + struct dp_parser *parser; + struct dp_catalog_panel *catalog; + bool custom_edid; + bool custom_dpcd; + bool panel_on; + bool vsc_supported; + bool vscext_supported; + bool vscext_chaining_supported; + enum dp_panel_hdr_state hdr_state; + u8 spd_vendor_name[8]; + u8 spd_product_description[16]; + u8 major; + u8 minor; +}; + +static const struct dp_panel_info fail_safe = { + .h_active = 640, + .v_active = 480, + .h_back_porch = 48, + .h_front_porch = 16, + .h_sync_width = 96, + .h_active_low = 0, + .v_back_porch = 33, + .v_front_porch = 10, + .v_sync_width = 2, + .v_active_low = 0, + .h_skew = 0, + .refresh_rate = 60, + .pixel_clk_khz = 25200, + .bpp = 24, +}; + +/* OEM NAME */ +static const u8 vendor_name[8] = {81, 117, 97, 108, 99, 111, 109, 109}; + +/* MODEL NAME */ +static const u8 product_desc[16] = {83, 110, 97, 112, 100, 114, 97, 103, + 111, 110, 0, 0, 0, 0, 0, 0}; + +struct dp_dhdr_maxpkt_calc_input { + u32 mdp_clk; + u32 lclk; + u32 pclk; + u32 h_active; + u32 nlanes; + s64 mst_target_sc; + bool mst_en; + bool fec_en; +}; + +struct tu_algo_data { + s64 lclk_fp; + s64 pclk_fp; + s64 lwidth; + s64 lwidth_fp; + s64 hbp_relative_to_pclk; + s64 hbp_relative_to_pclk_fp; + int nlanes; + int bpp; + int pixelEnc; + int dsc_en; + int async_en; + int bpc; + + uint delay_start_link_extra_pixclk; + int extra_buffer_margin; + s64 ratio_fp; + s64 original_ratio_fp; + + s64 err_fp; + s64 n_err_fp; + s64 n_n_err_fp; + int tu_size; + int tu_size_desired; + int tu_size_minus1; + + int valid_boundary_link; + s64 resulting_valid_fp; + s64 total_valid_fp; + s64 effective_valid_fp; + s64 effective_valid_recorded_fp; + int n_tus; + int n_tus_per_lane; + int paired_tus; + int remainder_tus; + int remainder_tus_upper; + int remainder_tus_lower; + int extra_bytes; + int filler_size; + int delay_start_link; + + int extra_pclk_cycles; + int extra_pclk_cycles_in_link_clk; + s64 ratio_by_tu_fp; + s64 average_valid2_fp; + int new_valid_boundary_link; + int remainder_symbols_exist; + int n_symbols; + s64 n_remainder_symbols_per_lane_fp; + s64 last_partial_tu_fp; + s64 TU_ratio_err_fp; + + int n_tus_incl_last_incomplete_tu; + int extra_pclk_cycles_tmp; + int extra_pclk_cycles_in_link_clk_tmp; + int extra_required_bytes_new_tmp; + int filler_size_tmp; + int lower_filler_size_tmp; + int delay_start_link_tmp; + + bool boundary_moderation_en; + int boundary_mod_lower_err; + int upper_boundary_count; + int lower_boundary_count; + int i_upper_boundary_count; + int i_lower_boundary_count; + int valid_lower_boundary_link; + int even_distribution_BF; + int even_distribution_legacy; + int even_distribution; + int min_hblank_violated; + s64 delay_start_time_fp; + s64 hbp_time_fp; + s64 hactive_time_fp; + s64 diff_abs_fp; + + s64 ratio; +}; + +/** + * Mapper function which outputs colorimetry and dynamic range + * to be used for a given colorspace value when the vsc sdp + * packets are used to change the colorimetry. + */ +static void get_sdp_colorimetry_range(struct dp_panel_private *panel, + u32 colorspace, u32 *colorimetry, u32 *dynamic_range) +{ + + u32 cc; + + /* + * Some rules being used for assignment of dynamic + * range for colorimetry using SDP: + * + * 1) If compliance test is ongoing return sRGB with + * CEA primaries + * 2) For BT2020 cases, dynamic range shall be CEA + * 3) For DCI-P3 cases, as per HW team dynamic range + * shall be VESA for RGB and CEA for YUV content + * Hence defaulting to RGB and picking VESA + * 4) Default shall be sRGB with VESA + */ + + cc = panel->link->get_colorimetry_config(panel->link); + + if (cc) { + *colorimetry = sRGB; + *dynamic_range = CEA; + return; + } + + switch (colorspace) { + case DRM_MODE_COLORIMETRY_BT2020_RGB: + *colorimetry = ITU_R_BT_2020_RGB; + *dynamic_range = CEA; + break; + case DRM_MODE_COLORIMETRY_DCI_P3_RGB_D65: + case DRM_MODE_COLORIMETRY_DCI_P3_RGB_THEATER: + *colorimetry = DCI_P3; + *dynamic_range = VESA; + break; + default: + *colorimetry = sRGB; + *dynamic_range = VESA; + } +} + +/** + * Mapper function which outputs colorimetry to be used for a + * given colorspace value when misc field of MSA is used to + * change the colorimetry. Currently only RGB formats have been + * added. This API will be extended to YUV once its supported on DP. + */ +static u8 get_misc_colorimetry_val(struct dp_panel_private *panel, + u32 colorspace) +{ + u8 colorimetry; + u32 cc; + + cc = panel->link->get_colorimetry_config(panel->link); + /* + * If there is a non-zero value then compliance test-case + * is going on, otherwise we can honor the colorspace setting + */ + if (cc) + return cc; + + switch (colorspace) { + case DRM_MODE_COLORIMETRY_DCI_P3_RGB_D65: + case DRM_MODE_COLORIMETRY_DCI_P3_RGB_THEATER: + colorimetry = 0x7; + break; + case DRM_MODE_DP_COLORIMETRY_SRGB: + colorimetry = 0x4; + break; + case DRM_MODE_DP_COLORIMETRY_RGB_WIDE_GAMUT: + colorimetry = 0x3; + break; + case DRM_MODE_DP_COLORIMETRY_SCRGB: + colorimetry = 0xb; + break; + case DRM_MODE_COLORIMETRY_OPRGB: + colorimetry = 0xc; + break; + default: + colorimetry = 0; + } + + return colorimetry; +} + +static int _tu_param_compare(s64 a, s64 b) +{ + u32 a_int, a_frac, a_sign; + u32 b_int, b_frac, b_sign; + s64 a_temp, b_temp, minus_1; + + if (a == b) + return 0; + + minus_1 = drm_fixp_from_fraction(-1, 1); + + a_int = (a >> 32) & 0x7FFFFFFF; + a_frac = a & 0xFFFFFFFF; + a_sign = (a >> 32) & 0x80000000 ? 1 : 0; + + b_int = (b >> 32) & 0x7FFFFFFF; + b_frac = b & 0xFFFFFFFF; + b_sign = (b >> 32) & 0x80000000 ? 1 : 0; + + if (a_sign > b_sign) + return 2; + else if (b_sign > a_sign) + return 1; + + if (!a_sign && !b_sign) { /* positive */ + if (a > b) + return 1; + else + return 2; + } else { /* negative */ + a_temp = drm_fixp_mul(a, minus_1); + b_temp = drm_fixp_mul(b, minus_1); + + if (a_temp > b_temp) + return 2; + else + return 1; + } +} + +static void dp_panel_update_tu_timings(struct dp_tu_calc_input *in, + struct tu_algo_data *tu) +{ + int nlanes = in->nlanes; + int dsc_num_slices = in->num_of_dsc_slices; + int dsc_num_bytes = 0; + int numerator; + s64 pclk_dsc_fp; + s64 dwidth_dsc_fp; + s64 hbp_dsc_fp; + s64 overhead_dsc; + + int tot_num_eoc_symbols = 0; + int tot_num_hor_bytes = 0; + int tot_num_dummy_bytes = 0; + int dwidth_dsc_bytes = 0; + int eoc_bytes = 0; + + s64 temp1_fp, temp2_fp, temp3_fp; + + tu->lclk_fp = drm_fixp_from_fraction(in->lclk, 1); + tu->pclk_fp = drm_fixp_from_fraction(in->pclk_khz, 1000); + tu->lwidth = in->hactive; + tu->hbp_relative_to_pclk = in->hporch; + tu->nlanes = in->nlanes; + tu->bpp = in->bpp; + tu->pixelEnc = in->pixel_enc; + tu->dsc_en = in->dsc_en; + tu->async_en = in->async_en; + tu->lwidth_fp = drm_fixp_from_fraction(in->hactive, 1); + tu->hbp_relative_to_pclk_fp = drm_fixp_from_fraction(in->hporch, 1); + + if (tu->pixelEnc == 420) { + temp1_fp = drm_fixp_from_fraction(2, 1); + tu->pclk_fp = drm_fixp_div(tu->pclk_fp, temp1_fp); + tu->lwidth_fp = drm_fixp_div(tu->lwidth_fp, temp1_fp); + tu->hbp_relative_to_pclk_fp = + drm_fixp_div(tu->hbp_relative_to_pclk_fp, 2); + } + + if (tu->pixelEnc == 422) { + switch (tu->bpp) { + case 24: + tu->bpp = 16; + tu->bpc = 8; + break; + case 30: + tu->bpp = 20; + tu->bpc = 10; + break; + default: + tu->bpp = 16; + tu->bpc = 8; + break; + } + } else + tu->bpc = tu->bpp/3; + + if (!in->dsc_en) + goto fec_check; + + temp1_fp = drm_fixp_from_fraction(in->compress_ratio, 100); + temp2_fp = drm_fixp_from_fraction(in->bpp, 1); + temp3_fp = drm_fixp_div(temp2_fp, temp1_fp); + temp2_fp = drm_fixp_mul(tu->lwidth_fp, temp3_fp); + + temp1_fp = drm_fixp_from_fraction(8, 1); + temp3_fp = drm_fixp_div(temp2_fp, temp1_fp); + + numerator = drm_fixp2int(temp3_fp); + + dsc_num_bytes = numerator / dsc_num_slices; + eoc_bytes = dsc_num_bytes % nlanes; + tot_num_eoc_symbols = nlanes * dsc_num_slices; + tot_num_hor_bytes = dsc_num_bytes * dsc_num_slices; + tot_num_dummy_bytes = (nlanes - eoc_bytes) * dsc_num_slices; + + if (dsc_num_bytes == 0) + DP_INFO("incorrect no of bytes per slice=%d\n", dsc_num_bytes); + + dwidth_dsc_bytes = (tot_num_hor_bytes + + tot_num_eoc_symbols + + (eoc_bytes == 0 ? 0 : tot_num_dummy_bytes)); + overhead_dsc = dwidth_dsc_bytes / tot_num_hor_bytes; + + dwidth_dsc_fp = drm_fixp_from_fraction(dwidth_dsc_bytes, 3); + + temp2_fp = drm_fixp_mul(tu->pclk_fp, dwidth_dsc_fp); + temp1_fp = drm_fixp_div(temp2_fp, tu->lwidth_fp); + pclk_dsc_fp = temp1_fp; + + temp1_fp = drm_fixp_div(pclk_dsc_fp, tu->pclk_fp); + temp2_fp = drm_fixp_mul(tu->hbp_relative_to_pclk_fp, temp1_fp); + hbp_dsc_fp = temp2_fp; + + /* output */ + tu->pclk_fp = pclk_dsc_fp; + tu->lwidth_fp = dwidth_dsc_fp; + tu->hbp_relative_to_pclk_fp = hbp_dsc_fp; + +fec_check: + if (in->fec_en) { + temp1_fp = drm_fixp_from_fraction(976, 1000); /* 0.976 */ + tu->lclk_fp = drm_fixp_mul(tu->lclk_fp, temp1_fp); + } +} + +static void _tu_valid_boundary_calc(struct tu_algo_data *tu) +{ + s64 temp1_fp, temp2_fp, temp, temp1, temp2; + int compare_result_1, compare_result_2, compare_result_3; + + temp1_fp = drm_fixp_from_fraction(tu->tu_size, 1); + temp2_fp = drm_fixp_mul(tu->ratio_fp, temp1_fp); + + tu->new_valid_boundary_link = drm_fixp2int_ceil(temp2_fp); + + temp = (tu->i_upper_boundary_count * + tu->new_valid_boundary_link + + tu->i_lower_boundary_count * + (tu->new_valid_boundary_link-1)); + tu->average_valid2_fp = drm_fixp_from_fraction(temp, + (tu->i_upper_boundary_count + + tu->i_lower_boundary_count)); + + temp1_fp = drm_fixp_from_fraction(tu->bpp, 8); + temp2_fp = tu->lwidth_fp; + temp1_fp = drm_fixp_mul(temp2_fp, temp1_fp); + temp2_fp = drm_fixp_div(temp1_fp, tu->average_valid2_fp); + tu->n_tus = drm_fixp2int(temp2_fp); + if ((temp2_fp & 0xFFFFFFFF) > 0xFFFFF000) + tu->n_tus += 1; + + temp1_fp = drm_fixp_from_fraction(tu->n_tus, 1); + temp2_fp = drm_fixp_mul(temp1_fp, tu->average_valid2_fp); + temp1_fp = drm_fixp_from_fraction(tu->n_symbols, 1); + temp2_fp = temp1_fp - temp2_fp; + temp1_fp = drm_fixp_from_fraction(tu->nlanes, 1); + temp2_fp = drm_fixp_div(temp2_fp, temp1_fp); + tu->n_remainder_symbols_per_lane_fp = temp2_fp; + + temp1_fp = drm_fixp_from_fraction(tu->tu_size, 1); + tu->last_partial_tu_fp = + drm_fixp_div(tu->n_remainder_symbols_per_lane_fp, + temp1_fp); + + if (tu->n_remainder_symbols_per_lane_fp != 0) + tu->remainder_symbols_exist = 1; + else + tu->remainder_symbols_exist = 0; + + temp1_fp = drm_fixp_from_fraction(tu->n_tus, tu->nlanes); + tu->n_tus_per_lane = drm_fixp2int(temp1_fp); + + tu->paired_tus = (int)((tu->n_tus_per_lane) / + (tu->i_upper_boundary_count + + tu->i_lower_boundary_count)); + + tu->remainder_tus = tu->n_tus_per_lane - tu->paired_tus * + (tu->i_upper_boundary_count + + tu->i_lower_boundary_count); + + if ((tu->remainder_tus - tu->i_upper_boundary_count) > 0) { + tu->remainder_tus_upper = tu->i_upper_boundary_count; + tu->remainder_tus_lower = tu->remainder_tus - + tu->i_upper_boundary_count; + } else { + tu->remainder_tus_upper = tu->remainder_tus; + tu->remainder_tus_lower = 0; + } + + temp = tu->paired_tus * (tu->i_upper_boundary_count * + tu->new_valid_boundary_link + + tu->i_lower_boundary_count * + (tu->new_valid_boundary_link - 1)) + + (tu->remainder_tus_upper * + tu->new_valid_boundary_link) + + (tu->remainder_tus_lower * + (tu->new_valid_boundary_link - 1)); + tu->total_valid_fp = drm_fixp_from_fraction(temp, 1); + + if (tu->remainder_symbols_exist) { + temp1_fp = tu->total_valid_fp + + tu->n_remainder_symbols_per_lane_fp; + temp2_fp = drm_fixp_from_fraction(tu->n_tus_per_lane, 1); + temp2_fp = temp2_fp + tu->last_partial_tu_fp; + temp1_fp = drm_fixp_div(temp1_fp, temp2_fp); + } else { + temp2_fp = drm_fixp_from_fraction(tu->n_tus_per_lane, 1); + temp1_fp = drm_fixp_div(tu->total_valid_fp, temp2_fp); + } + tu->effective_valid_fp = temp1_fp; + + temp1_fp = drm_fixp_from_fraction(tu->tu_size, 1); + temp2_fp = drm_fixp_mul(tu->ratio_fp, temp1_fp); + tu->n_n_err_fp = tu->effective_valid_fp - temp2_fp; + + temp1_fp = drm_fixp_from_fraction(tu->tu_size, 1); + temp2_fp = drm_fixp_mul(tu->ratio_fp, temp1_fp); + tu->n_err_fp = tu->average_valid2_fp - temp2_fp; + + tu->even_distribution = tu->n_tus % tu->nlanes == 0 ? 1 : 0; + + temp1_fp = drm_fixp_from_fraction(tu->bpp, 8); + temp2_fp = tu->lwidth_fp; + temp1_fp = drm_fixp_mul(temp2_fp, temp1_fp); + temp2_fp = drm_fixp_div(temp1_fp, tu->average_valid2_fp); + + if (temp2_fp) + tu->n_tus_incl_last_incomplete_tu = drm_fixp2int_ceil(temp2_fp); + else + tu->n_tus_incl_last_incomplete_tu = 0; + + temp1 = 0; + temp1_fp = drm_fixp_from_fraction(tu->tu_size, 1); + temp2_fp = drm_fixp_mul(tu->original_ratio_fp, temp1_fp); + temp1_fp = tu->average_valid2_fp - temp2_fp; + temp2_fp = drm_fixp_from_fraction(tu->n_tus_incl_last_incomplete_tu, 1); + temp1_fp = drm_fixp_mul(temp2_fp, temp1_fp); + + if (temp1_fp) + temp1 = drm_fixp2int_ceil(temp1_fp); + + temp = tu->i_upper_boundary_count * tu->nlanes; + temp1_fp = drm_fixp_from_fraction(tu->tu_size, 1); + temp2_fp = drm_fixp_mul(tu->original_ratio_fp, temp1_fp); + temp1_fp = drm_fixp_from_fraction(tu->new_valid_boundary_link, 1); + temp2_fp = temp1_fp - temp2_fp; + temp1_fp = drm_fixp_from_fraction(temp, 1); + temp2_fp = drm_fixp_mul(temp1_fp, temp2_fp); + + if (temp2_fp) + temp2 = drm_fixp2int_ceil(temp2_fp); + else + temp2 = 0; + tu->extra_required_bytes_new_tmp = (int)(temp1 + temp2); + + temp1_fp = drm_fixp_from_fraction(8, tu->bpp); + temp2_fp = drm_fixp_from_fraction( + tu->extra_required_bytes_new_tmp, 1); + temp1_fp = drm_fixp_mul(temp2_fp, temp1_fp); + + if (temp1_fp) + tu->extra_pclk_cycles_tmp = drm_fixp2int_ceil(temp1_fp); + else + tu->extra_pclk_cycles_tmp = 0; + + temp1_fp = drm_fixp_from_fraction(tu->extra_pclk_cycles_tmp, 1); + temp2_fp = drm_fixp_div(tu->lclk_fp, tu->pclk_fp); + temp1_fp = drm_fixp_mul(temp1_fp, temp2_fp); + + if (temp1_fp) + tu->extra_pclk_cycles_in_link_clk_tmp = + drm_fixp2int_ceil(temp1_fp); + else + tu->extra_pclk_cycles_in_link_clk_tmp = 0; + + tu->filler_size_tmp = tu->tu_size - tu->new_valid_boundary_link; + + tu->lower_filler_size_tmp = tu->filler_size_tmp + 1; + + tu->delay_start_link_tmp = tu->extra_pclk_cycles_in_link_clk_tmp + + tu->lower_filler_size_tmp + + tu->extra_buffer_margin; + + temp1_fp = drm_fixp_from_fraction(tu->delay_start_link_tmp, 1); + tu->delay_start_time_fp = drm_fixp_div(temp1_fp, tu->lclk_fp); + + compare_result_1 = _tu_param_compare(tu->n_n_err_fp, tu->diff_abs_fp); + if (compare_result_1 == 2) + compare_result_1 = 1; + else + compare_result_1 = 0; + + compare_result_2 = _tu_param_compare(tu->n_n_err_fp, tu->err_fp); + if (compare_result_2 == 2) + compare_result_2 = 1; + else + compare_result_2 = 0; + + compare_result_3 = _tu_param_compare(tu->hbp_time_fp, + tu->delay_start_time_fp); + if (compare_result_3 == 2) + compare_result_3 = 0; + else + compare_result_3 = 1; + + if (((tu->even_distribution == 1) || + ((tu->even_distribution_BF == 0) && + (tu->even_distribution_legacy == 0))) && + tu->n_err_fp >= 0 && tu->n_n_err_fp >= 0 && + compare_result_2 && + (compare_result_1 || (tu->min_hblank_violated == 1)) && + (tu->new_valid_boundary_link - 1) > 0 && + compare_result_3 && + (tu->delay_start_link_tmp <= 1023)) { + tu->upper_boundary_count = tu->i_upper_boundary_count; + tu->lower_boundary_count = tu->i_lower_boundary_count; + tu->err_fp = tu->n_n_err_fp; + tu->boundary_moderation_en = true; + tu->tu_size_desired = tu->tu_size; + tu->valid_boundary_link = tu->new_valid_boundary_link; + tu->effective_valid_recorded_fp = tu->effective_valid_fp; + tu->even_distribution_BF = 1; + tu->delay_start_link = tu->delay_start_link_tmp; + } else if (tu->boundary_mod_lower_err == 0) { + compare_result_1 = _tu_param_compare(tu->n_n_err_fp, + tu->diff_abs_fp); + if (compare_result_1 == 2) + tu->boundary_mod_lower_err = 1; + } +} + +static void _dp_calc_boundary(struct tu_algo_data *tu) +{ + + s64 temp1_fp = 0, temp2_fp = 0; + + do { + tu->err_fp = drm_fixp_from_fraction(1000, 1); + + temp1_fp = drm_fixp_div(tu->lclk_fp, tu->pclk_fp); + temp2_fp = drm_fixp_from_fraction( + tu->delay_start_link_extra_pixclk, 1); + temp1_fp = drm_fixp_mul(temp2_fp, temp1_fp); + + if (temp1_fp) + tu->extra_buffer_margin = + drm_fixp2int_ceil(temp1_fp); + else + tu->extra_buffer_margin = 0; + + temp1_fp = drm_fixp_from_fraction(tu->bpp, 8); + temp1_fp = drm_fixp_mul(tu->lwidth_fp, temp1_fp); + + if (temp1_fp) + tu->n_symbols = drm_fixp2int_ceil(temp1_fp); + else + tu->n_symbols = 0; + + for (tu->tu_size = 32; tu->tu_size <= 64; tu->tu_size++) { + for (tu->i_upper_boundary_count = 1; + tu->i_upper_boundary_count <= 15; + tu->i_upper_boundary_count++) { + for (tu->i_lower_boundary_count = 1; + tu->i_lower_boundary_count <= 15; + tu->i_lower_boundary_count++) { + _tu_valid_boundary_calc(tu); + } + } + } + tu->delay_start_link_extra_pixclk--; + } while (!tu->boundary_moderation_en && + tu->boundary_mod_lower_err == 1 && + tu->delay_start_link_extra_pixclk != 0); +} + +static void _dp_calc_extra_bytes(struct tu_algo_data *tu) +{ + u64 temp = 0; + s64 temp1_fp = 0, temp2_fp = 0; + + temp1_fp = drm_fixp_from_fraction(tu->tu_size_desired, 1); + temp2_fp = drm_fixp_mul(tu->original_ratio_fp, temp1_fp); + temp1_fp = drm_fixp_from_fraction(tu->valid_boundary_link, 1); + temp2_fp = temp1_fp - temp2_fp; + temp1_fp = drm_fixp_from_fraction(tu->n_tus + 1, 1); + temp2_fp = drm_fixp_mul(temp1_fp, temp2_fp); + + temp = drm_fixp2int(temp2_fp); + if (temp && temp2_fp) + tu->extra_bytes = drm_fixp2int_ceil(temp2_fp); + else + tu->extra_bytes = 0; + + temp1_fp = drm_fixp_from_fraction(tu->extra_bytes, 1); + temp2_fp = drm_fixp_from_fraction(8, tu->bpp); + temp1_fp = drm_fixp_mul(temp1_fp, temp2_fp); + + if (temp1_fp) + tu->extra_pclk_cycles = drm_fixp2int_ceil(temp1_fp); + else + tu->extra_pclk_cycles = drm_fixp2int(temp1_fp); + + temp1_fp = drm_fixp_div(tu->lclk_fp, tu->pclk_fp); + temp2_fp = drm_fixp_from_fraction(tu->extra_pclk_cycles, 1); + temp1_fp = drm_fixp_mul(temp2_fp, temp1_fp); + + if (temp1_fp) + tu->extra_pclk_cycles_in_link_clk = drm_fixp2int_ceil(temp1_fp); + else + tu->extra_pclk_cycles_in_link_clk = drm_fixp2int(temp1_fp); +} + +static void _dp_panel_calc_tu(struct dp_tu_calc_input *in, + struct dp_vc_tu_mapping_table *tu_table) +{ + struct tu_algo_data tu; + int compare_result_1, compare_result_2; + u64 temp = 0; + s64 temp_fp = 0, temp1_fp = 0, temp2_fp = 0; + + s64 LCLK_FAST_SKEW_fp = drm_fixp_from_fraction(6, 10000); /* 0.0006 */ + s64 const_p49_fp = drm_fixp_from_fraction(49, 100); /* 0.49 */ + s64 const_p56_fp = drm_fixp_from_fraction(56, 100); /* 0.56 */ + s64 RATIO_SCALE_fp = drm_fixp_from_fraction(1001, 1000); + + u8 DP_BRUTE_FORCE = 1; + s64 BRUTE_FORCE_THRESHOLD_fp = drm_fixp_from_fraction(1, 10); /* 0.1 */ + uint EXTRA_PIXCLK_CYCLE_DELAY = 4; + uint HBLANK_MARGIN = 4; + + memset(&tu, 0, sizeof(tu)); + + dp_panel_update_tu_timings(in, &tu); + + tu.err_fp = drm_fixp_from_fraction(1000, 1); /* 1000 */ + + temp1_fp = drm_fixp_from_fraction(4, 1); + temp2_fp = drm_fixp_mul(temp1_fp, tu.lclk_fp); + temp_fp = drm_fixp_div(temp2_fp, tu.pclk_fp); + tu.extra_buffer_margin = drm_fixp2int_ceil(temp_fp); + + temp1_fp = drm_fixp_from_fraction(tu.bpp, 8); + temp2_fp = drm_fixp_mul(tu.pclk_fp, temp1_fp); + temp1_fp = drm_fixp_from_fraction(tu.nlanes, 1); + temp2_fp = drm_fixp_div(temp2_fp, temp1_fp); + tu.ratio_fp = drm_fixp_div(temp2_fp, tu.lclk_fp); + + tu.original_ratio_fp = tu.ratio_fp; + tu.boundary_moderation_en = false; + tu.upper_boundary_count = 0; + tu.lower_boundary_count = 0; + tu.i_upper_boundary_count = 0; + tu.i_lower_boundary_count = 0; + tu.valid_lower_boundary_link = 0; + tu.even_distribution_BF = 0; + tu.even_distribution_legacy = 0; + tu.even_distribution = 0; + tu.delay_start_time_fp = 0; + + tu.err_fp = drm_fixp_from_fraction(1000, 1); + tu.n_err_fp = 0; + tu.n_n_err_fp = 0; + + tu.ratio = drm_fixp2int(tu.ratio_fp); + temp1_fp = drm_fixp_from_fraction(tu.nlanes, 1); + temp2_fp = tu.lwidth_fp % temp1_fp; + if (temp2_fp != 0 && + !tu.ratio && tu.dsc_en == 0) { + tu.ratio_fp = drm_fixp_mul(tu.ratio_fp, RATIO_SCALE_fp); + tu.ratio = drm_fixp2int(tu.ratio_fp); + if (tu.ratio) + tu.ratio_fp = drm_fixp_from_fraction(1, 1); + } + + if (tu.ratio > 1) + tu.ratio = 1; + + if (tu.ratio == 1) + goto tu_size_calc; + + compare_result_1 = _tu_param_compare(tu.ratio_fp, const_p49_fp); + if (!compare_result_1 || compare_result_1 == 1) + compare_result_1 = 1; + else + compare_result_1 = 0; + + compare_result_2 = _tu_param_compare(tu.ratio_fp, const_p56_fp); + if (!compare_result_2 || compare_result_2 == 2) + compare_result_2 = 1; + else + compare_result_2 = 0; + + if (tu.dsc_en && compare_result_1 && compare_result_2) { + HBLANK_MARGIN += 4; + DP_INFO("Info: increase HBLANK_MARGIN to %d\n", HBLANK_MARGIN); + } + +tu_size_calc: + for (tu.tu_size = 32; tu.tu_size <= 64; tu.tu_size++) { + temp1_fp = drm_fixp_from_fraction(tu.tu_size, 1); + temp2_fp = drm_fixp_mul(tu.ratio_fp, temp1_fp); + temp = drm_fixp2int_ceil(temp2_fp); + temp1_fp = drm_fixp_from_fraction(temp, 1); + tu.n_err_fp = temp1_fp - temp2_fp; + + if (tu.n_err_fp < tu.err_fp) { + tu.err_fp = tu.n_err_fp; + tu.tu_size_desired = tu.tu_size; + } + } + + tu.tu_size_minus1 = tu.tu_size_desired - 1; + + temp1_fp = drm_fixp_from_fraction(tu.tu_size_desired, 1); + temp2_fp = drm_fixp_mul(tu.ratio_fp, temp1_fp); + tu.valid_boundary_link = drm_fixp2int_ceil(temp2_fp); + + temp1_fp = drm_fixp_from_fraction(tu.bpp, 8); + temp2_fp = tu.lwidth_fp; + temp2_fp = drm_fixp_mul(temp2_fp, temp1_fp); + + temp1_fp = drm_fixp_from_fraction(tu.valid_boundary_link, 1); + temp2_fp = drm_fixp_div(temp2_fp, temp1_fp); + tu.n_tus = drm_fixp2int(temp2_fp); + if ((temp2_fp & 0xFFFFFFFF) > 0xFFFFF000) + tu.n_tus += 1; + + tu.even_distribution_legacy = tu.n_tus % tu.nlanes == 0 ? 1 : 0; + DP_INFO("Info: n_sym = %d, num_of_tus = %d\n", + tu.valid_boundary_link, tu.n_tus); + + _dp_calc_extra_bytes(&tu); + + tu.filler_size = tu.tu_size_desired - tu.valid_boundary_link; + + temp1_fp = drm_fixp_from_fraction(tu.tu_size_desired, 1); + tu.ratio_by_tu_fp = drm_fixp_mul(tu.ratio_fp, temp1_fp); + + tu.delay_start_link = tu.extra_pclk_cycles_in_link_clk + + tu.filler_size + tu.extra_buffer_margin; + + tu.resulting_valid_fp = + drm_fixp_from_fraction(tu.valid_boundary_link, 1); + + temp1_fp = drm_fixp_from_fraction(tu.tu_size_desired, 1); + temp2_fp = drm_fixp_div(tu.resulting_valid_fp, temp1_fp); + tu.TU_ratio_err_fp = temp2_fp - tu.original_ratio_fp; + + temp1_fp = drm_fixp_from_fraction(HBLANK_MARGIN, 1); + temp1_fp = tu.hbp_relative_to_pclk_fp - temp1_fp; + tu.hbp_time_fp = drm_fixp_div(temp1_fp, tu.pclk_fp); + + temp1_fp = drm_fixp_from_fraction(tu.delay_start_link, 1); + tu.delay_start_time_fp = drm_fixp_div(temp1_fp, tu.lclk_fp); + + compare_result_1 = _tu_param_compare(tu.hbp_time_fp, + tu.delay_start_time_fp); + if (compare_result_1 == 2) /* hbp_time_fp < delay_start_time_fp */ + tu.min_hblank_violated = 1; + + tu.hactive_time_fp = drm_fixp_div(tu.lwidth_fp, tu.pclk_fp); + + compare_result_2 = _tu_param_compare(tu.hactive_time_fp, + tu.delay_start_time_fp); + if (compare_result_2 == 2) + tu.min_hblank_violated = 1; + + tu.delay_start_time_fp = 0; + + /* brute force */ + + tu.delay_start_link_extra_pixclk = EXTRA_PIXCLK_CYCLE_DELAY; + tu.diff_abs_fp = tu.resulting_valid_fp - tu.ratio_by_tu_fp; + + temp = drm_fixp2int(tu.diff_abs_fp); + if (!temp && tu.diff_abs_fp <= 0xffff) + tu.diff_abs_fp = 0; + + /* if(diff_abs < 0) diff_abs *= -1 */ + if (tu.diff_abs_fp < 0) + tu.diff_abs_fp = drm_fixp_mul(tu.diff_abs_fp, -1); + + tu.boundary_mod_lower_err = 0; + if ((tu.diff_abs_fp != 0 && + ((tu.diff_abs_fp > BRUTE_FORCE_THRESHOLD_fp) || + (tu.even_distribution_legacy == 0) || + (DP_BRUTE_FORCE == 1))) || + (tu.min_hblank_violated == 1)) { + + _dp_calc_boundary(&tu); + + if (tu.boundary_moderation_en) { + temp1_fp = drm_fixp_from_fraction( + (tu.upper_boundary_count * + tu.valid_boundary_link + + tu.lower_boundary_count * + (tu.valid_boundary_link - 1)), 1); + temp2_fp = drm_fixp_from_fraction( + (tu.upper_boundary_count + + tu.lower_boundary_count), 1); + tu.resulting_valid_fp = + drm_fixp_div(temp1_fp, temp2_fp); + + temp1_fp = drm_fixp_from_fraction( + tu.tu_size_desired, 1); + tu.ratio_by_tu_fp = + drm_fixp_mul(tu.original_ratio_fp, temp1_fp); + + tu.valid_lower_boundary_link = + tu.valid_boundary_link - 1; + + temp1_fp = drm_fixp_from_fraction(tu.bpp, 8); + temp1_fp = drm_fixp_mul(tu.lwidth_fp, temp1_fp); + temp2_fp = drm_fixp_div(temp1_fp, + tu.resulting_valid_fp); + tu.n_tus = drm_fixp2int(temp2_fp); + + tu.tu_size_minus1 = tu.tu_size_desired - 1; + tu.even_distribution_BF = 1; + + temp1_fp = + drm_fixp_from_fraction(tu.tu_size_desired, 1); + temp2_fp = + drm_fixp_div(tu.resulting_valid_fp, temp1_fp); + tu.TU_ratio_err_fp = temp2_fp - tu.original_ratio_fp; + } + } + + temp2_fp = drm_fixp_mul(LCLK_FAST_SKEW_fp, tu.lwidth_fp); + + if (temp2_fp) + temp = drm_fixp2int_ceil(temp2_fp); + else + temp = 0; + + temp1_fp = drm_fixp_from_fraction(tu.nlanes, 1); + temp2_fp = drm_fixp_mul(tu.original_ratio_fp, temp1_fp); + temp1_fp = drm_fixp_from_fraction(tu.bpp, 8); + temp2_fp = drm_fixp_div(temp1_fp, temp2_fp); + temp1_fp = drm_fixp_from_fraction(temp, 1); + temp2_fp = drm_fixp_mul(temp1_fp, temp2_fp); + temp = drm_fixp2int(temp2_fp); + + if (tu.async_en) + tu.delay_start_link += (int)temp; + + temp1_fp = drm_fixp_from_fraction(tu.delay_start_link, 1); + tu.delay_start_time_fp = drm_fixp_div(temp1_fp, tu.lclk_fp); + + /* OUTPUTS */ + tu_table->valid_boundary_link = tu.valid_boundary_link; + tu_table->delay_start_link = tu.delay_start_link; + tu_table->boundary_moderation_en = tu.boundary_moderation_en; + tu_table->valid_lower_boundary_link = tu.valid_lower_boundary_link; + tu_table->upper_boundary_count = tu.upper_boundary_count; + tu_table->lower_boundary_count = tu.lower_boundary_count; + tu_table->tu_size_minus1 = tu.tu_size_minus1; + + DP_INFO("TU: valid_boundary_link: %d\n", tu_table->valid_boundary_link); + DP_INFO("TU: delay_start_link: %d\n", tu_table->delay_start_link); + DP_INFO("TU: boundary_moderation_en: %d\n", + tu_table->boundary_moderation_en); + DP_INFO("TU: valid_lower_boundary_link: %d\n", + tu_table->valid_lower_boundary_link); + DP_INFO("TU: upper_boundary_count: %d\n", + tu_table->upper_boundary_count); + DP_INFO("TU: lower_boundary_count: %d\n", + tu_table->lower_boundary_count); + DP_INFO("TU: tu_size_minus1: %d\n", tu_table->tu_size_minus1); +} + +static void dp_panel_calc_tu_parameters(struct dp_panel *dp_panel, + struct dp_vc_tu_mapping_table *tu_table) +{ + struct dp_tu_calc_input in; + struct dp_panel_info *pinfo; + struct dp_panel_private *panel; + int bw_code; + + panel = container_of(dp_panel, struct dp_panel_private, dp_panel); + pinfo = &dp_panel->pinfo; + bw_code = panel->link->link_params.bw_code; + + in.lclk = drm_dp_bw_code_to_link_rate(bw_code) / 1000; + in.pclk_khz = pinfo->pixel_clk_khz; + in.hactive = pinfo->h_active; + in.hporch = pinfo->h_back_porch + pinfo->h_front_porch + + pinfo->h_sync_width; + in.nlanes = panel->link->link_params.lane_count; + in.bpp = pinfo->bpp; + in.pixel_enc = 444; + in.dsc_en = dp_panel->dsc_en; + in.async_en = 0; + in.fec_en = dp_panel->fec_en; + in.num_of_dsc_slices = pinfo->comp_info.dsc_info.slice_per_pkt; + + switch (pinfo->comp_info.comp_ratio) { + case MSM_DISPLAY_COMPRESSION_RATIO_2_TO_1: + in.compress_ratio = 200; + break; + case MSM_DISPLAY_COMPRESSION_RATIO_3_TO_1: + in.compress_ratio = 300; + break; + default: + in.compress_ratio = 100; + } + + _dp_panel_calc_tu(&in, tu_table); +} + +void dp_panel_calc_tu_test(struct dp_tu_calc_input *in, + struct dp_vc_tu_mapping_table *tu_table) +{ + _dp_panel_calc_tu(in, tu_table); +} + +static void dp_panel_config_tr_unit(struct dp_panel *dp_panel) +{ + struct dp_panel_private *panel; + struct dp_catalog_panel *catalog; + u32 dp_tu = 0x0; + u32 valid_boundary = 0x0; + u32 valid_boundary2 = 0x0; + struct dp_vc_tu_mapping_table tu_calc_table; + + if (!dp_panel) { + DP_ERR("invalid input\n"); + return; + } + + if (dp_panel->stream_id != DP_STREAM_0) + return; + + panel = container_of(dp_panel, struct dp_panel_private, dp_panel); + catalog = panel->catalog; + + dp_panel_calc_tu_parameters(dp_panel, &tu_calc_table); + + dp_tu |= tu_calc_table.tu_size_minus1; + valid_boundary |= tu_calc_table.valid_boundary_link; + valid_boundary |= (tu_calc_table.delay_start_link << 16); + + valid_boundary2 |= (tu_calc_table.valid_lower_boundary_link << 1); + valid_boundary2 |= (tu_calc_table.upper_boundary_count << 16); + valid_boundary2 |= (tu_calc_table.lower_boundary_count << 20); + + if (tu_calc_table.boundary_moderation_en) + valid_boundary2 |= BIT(0); + + DP_DEBUG("dp_tu=0x%x, valid_boundary=0x%x, valid_boundary2=0x%x\n", + dp_tu, valid_boundary, valid_boundary2); + + catalog->dp_tu = dp_tu; + catalog->valid_boundary = valid_boundary; + catalog->valid_boundary2 = valid_boundary2; + + catalog->update_transfer_unit(catalog); +} + +enum dp_dsc_ratio_type { + DSC_8BPC_8BPP, + DSC_10BPC_8BPP, + DSC_12BPC_8BPP, + DSC_10BPC_10BPP, + DSC_RATIO_TYPE_MAX +}; + +static u32 dp_dsc_rc_buf_thresh[] = {0x0e, 0x1c, 0x2a, 0x38, 0x46, 0x54, + 0x62, 0x69, 0x70, 0x77, 0x79, 0x7b, 0x7d, 0x7e}; + +/* + * DSC 1.1 + * Rate control - Min QP values for each ratio type in dp_dsc_ratio_type + */ +static char dp_dsc_rc_range_min_qp_1_1[][15] = { + {0, 0, 1, 1, 3, 3, 3, 3, 3, 3, 5, 5, 5, 7, 13}, + {0, 4, 5, 5, 7, 7, 7, 7, 7, 7, 9, 9, 9, 11, 17}, + {0, 4, 9, 9, 11, 11, 11, 11, 11, 11, 13, 13, 13, 15, 21}, + {0, 4, 5, 6, 7, 7, 7, 7, 7, 7, 9, 9, 9, 11, 15}, + }; + +/* + * DSC 1.1 SCR + * Rate control - Min QP values for each ratio type in dp_dsc_ratio_type + */ +static char dp_dsc_rc_range_min_qp_1_1_scr1[][15] = { + {0, 0, 1, 1, 3, 3, 3, 3, 3, 3, 5, 5, 5, 9, 12}, + {0, 4, 5, 5, 7, 7, 7, 7, 7, 7, 9, 9, 9, 13, 16}, + {0, 4, 9, 9, 11, 11, 11, 11, 11, 11, 13, 13, 13, 17, 20}, + {0, 4, 5, 6, 7, 7, 7, 7, 7, 7, 9, 9, 9, 11, 15}, + }; + +/* + * DSC 1.1 + * Rate control - Max QP values for each ratio type in dp_dsc_ratio_type + */ +static char dp_dsc_rc_range_max_qp_1_1[][15] = { + {4, 4, 5, 6, 7, 7, 7, 8, 9, 10, 11, 12, 13, 13, 15}, + {8, 8, 9, 10, 11, 11, 11, 12, 13, 14, 15, 16, 17, 17, 19}, + {12, 12, 13, 14, 15, 15, 15, 16, 17, 18, 19, 20, 21, 21, 23}, + {7, 8, 9, 10, 11, 11, 11, 12, 13, 13, 14, 14, 15, 15, 16}, + }; + +/* + * DSC 1.1 SCR + * Rate control - Max QP values for each ratio type in dp_dsc_ratio_type + */ +static char dp_dsc_rc_range_max_qp_1_1_scr1[][15] = { + {4, 4, 5, 6, 7, 7, 7, 8, 9, 10, 10, 11, 11, 12, 13}, + {8, 8, 9, 10, 11, 11, 11, 12, 13, 14, 14, 15, 15, 16, 17}, + {12, 12, 13, 14, 15, 15, 15, 16, 17, 18, 18, 19, 19, 20, 21}, + {7, 8, 9, 10, 11, 11, 11, 12, 13, 13, 14, 14, 15, 15, 16}, + }; + +/* + * DSC 1.1 and DSC 1.1 SCR + * Rate control - bpg offset values + */ +static char dp_dsc_rc_range_bpg_offset[] = {2, 0, 0, -2, -4, -6, -8, -8, + -8, -10, -10, -12, -12, -12, -12}; + +struct dp_dsc_dto_data { + enum msm_display_compression_ratio comp_ratio; + u32 org_bpp; /* bits */ + u32 dto_numerator; + u32 dto_denominator; +}; + +struct dp_dsc_dto_data dto_tbl[] = { + {MSM_DISPLAY_COMPRESSION_RATIO_2_TO_1, 24, 1, 2}, + {MSM_DISPLAY_COMPRESSION_RATIO_2_TO_1, 30, 5, 8}, + {MSM_DISPLAY_COMPRESSION_RATIO_3_TO_1, 24, 1, 3}, + {MSM_DISPLAY_COMPRESSION_RATIO_3_TO_1, 30, 5, 12}, +}; + +static void _dp_panel_get_dto_m_n(enum msm_display_compression_ratio ratio, + u32 org_bpp, u32 *dto_n, u32 *dto_d) +{ + u32 idx; + + for (idx = 0; idx < ARRAY_SIZE(dto_tbl); idx++) { + if (ratio == dto_tbl[idx].comp_ratio && + org_bpp == dto_tbl[idx].org_bpp) { + *dto_n = dto_tbl[idx].dto_numerator; + *dto_d = dto_tbl[idx].dto_denominator; + return; + } + } +} + +static int dp_panel_dsc_create_pps_buf_cmd(struct msm_display_dsc_info *dsc, + char *buf, int pps_id) +{ + char *bp = buf; + char data; + int i, bpp; + + *bp++ = (dsc->version & 0xff); /* pps0 */ + *bp++ = (pps_id & 0xff); /* pps1 */ + bp++; /* pps2, reserved */ + + data = dsc->line_buf_depth & 0x0f; + data |= ((dsc->bpc & 0xf) << 4); + *bp++ = data; /* pps3 */ + + bpp = dsc->bpp; + bpp <<= 4; /* 4 fraction bits */ + data = (bpp >> 8); + data &= 0x03; /* upper two bits */ + data |= ((dsc->block_pred_enable & 0x1) << 5); + data |= ((dsc->convert_rgb & 0x1) << 4); + data |= ((dsc->enable_422 & 0x1) << 3); + data |= ((dsc->vbr_enable & 0x1) << 2); + *bp++ = data; /* pps4 */ + *bp++ = (bpp & 0xff); /* pps5 */ + + *bp++ = ((dsc->pic_height >> 8) & 0xff); /* pps6 */ + *bp++ = (dsc->pic_height & 0x0ff); /* pps7 */ + *bp++ = ((dsc->pic_width >> 8) & 0xff); /* pps8 */ + *bp++ = (dsc->pic_width & 0x0ff); /* pps9 */ + + *bp++ = ((dsc->slice_height >> 8) & 0xff);/* pps10 */ + *bp++ = (dsc->slice_height & 0x0ff); /* pps11 */ + *bp++ = ((dsc->slice_width >> 8) & 0xff); /* pps12 */ + *bp++ = (dsc->slice_width & 0x0ff); /* pps13 */ + + *bp++ = ((dsc->chunk_size >> 8) & 0xff);/* pps14 */ + *bp++ = (dsc->chunk_size & 0x0ff); /* pps15 */ + + *bp++ = (dsc->initial_xmit_delay >> 8) & 0x3; /* pps16*/ + *bp++ = (dsc->initial_xmit_delay & 0xff);/* pps17 */ + + *bp++ = ((dsc->initial_dec_delay >> 8) & 0xff); /* pps18 */ + *bp++ = (dsc->initial_dec_delay & 0xff);/* pps19 */ + + bp++; /* pps20, reserved */ + + *bp++ = (dsc->initial_scale_value & 0x3f); /* pps21 */ + + *bp++ = ((dsc->scale_increment_interval >> 8) & 0xff); /* pps22 */ + *bp++ = (dsc->scale_increment_interval & 0xff); /* pps23 */ + + *bp++ = ((dsc->scale_decrement_interval >> 8) & 0xf); /* pps24 */ + *bp++ = (dsc->scale_decrement_interval & 0x0ff);/* pps25 */ + + bp++; /* pps26, reserved */ + + *bp++ = (dsc->first_line_bpg_offset & 0x1f);/* pps27 */ + + *bp++ = ((dsc->nfl_bpg_offset >> 8) & 0xff);/* pps28 */ + *bp++ = (dsc->nfl_bpg_offset & 0x0ff); /* pps29 */ + *bp++ = ((dsc->slice_bpg_offset >> 8) & 0xff);/* pps30 */ + *bp++ = (dsc->slice_bpg_offset & 0x0ff);/* pps31 */ + + *bp++ = ((dsc->initial_offset >> 8) & 0xff);/* pps32 */ + *bp++ = (dsc->initial_offset & 0x0ff); /* pps33 */ + + *bp++ = ((dsc->final_offset >> 8) & 0xff);/* pps34 */ + *bp++ = (dsc->final_offset & 0x0ff); /* pps35 */ + + *bp++ = (dsc->min_qp_flatness & 0x1f); /* pps36 */ + *bp++ = (dsc->max_qp_flatness & 0x1f); /* pps37 */ + + *bp++ = ((dsc->rc_model_size >> 8) & 0xff);/* pps38 */ + *bp++ = (dsc->rc_model_size & 0x0ff); /* pps39 */ + + *bp++ = (dsc->edge_factor & 0x0f); /* pps40 */ + + *bp++ = (dsc->quant_incr_limit0 & 0x1f); /* pps41 */ + *bp++ = (dsc->quant_incr_limit1 & 0x1f); /* pps42 */ + + data = ((dsc->tgt_offset_hi & 0xf) << 4); + data |= (dsc->tgt_offset_lo & 0x0f); + *bp++ = data; /* pps43 */ + + for (i = 0; i < ARRAY_SIZE(dp_dsc_rc_buf_thresh); i++) + *bp++ = (dsc->buf_thresh[i] & 0xff); /* pps44 - pps57 */ + + for (i = 0; i < 15; i++) { /* pps58 - pps87 */ + data = (dsc->range_min_qp[i] & 0x1f); + data <<= 3; + data |= ((dsc->range_max_qp[i] >> 2) & 0x07); + *bp++ = data; + data = (dsc->range_max_qp[i] & 0x03); + data <<= 6; + data |= (dsc->range_bpg_offset[i] & 0x3f); + *bp++ = data; + } + + return 88; +} + +static void dp_panel_dsc_prepare_pps_packet(struct dp_panel *dp_panel) +{ + struct dp_panel_private *panel; + struct dp_dsc_cfg_data *dsc; + u8 *pps, *parity; + u32 *pps_word, *parity_word; + int i, index_4; + + panel = container_of(dp_panel, struct dp_panel_private, dp_panel); + dsc = &panel->catalog->dsc; + pps = dsc->pps; + pps_word = dsc->pps_word; + parity = dsc->parity; + parity_word = dsc->parity_word; + + memset(parity, 0, sizeof(dsc->parity)); + + dsc->pps_word_len = dsc->pps_len >> 2; + dsc->parity_len = dsc->pps_word_len; + dsc->parity_word_len = (dsc->parity_len >> 2) + 1; + + for (i = 0; i < dsc->pps_word_len; i++) { + index_4 = i << 2; + pps_word[i] = pps[index_4 + 0] << 0 | + pps[index_4 + 1] << 8 | + pps[index_4 + 2] << 16 | + pps[index_4 + 3] << 24; + + parity[i] = dp_header_get_parity(pps_word[i]); + } + + for (i = 0; i < dsc->parity_word_len; i++) { + index_4 = i << 2; + parity_word[i] = parity[index_4 + 0] << 0 | + parity[index_4 + 1] << 8 | + parity[index_4 + 2] << 16 | + parity[index_4 + 3] << 24; + } +} + +static void _dp_panel_dsc_get_num_extra_pclk(struct msm_display_dsc_info *dsc, + enum msm_display_compression_ratio ratio) +{ + unsigned int dto_n = 0, dto_d = 0, remainder; + int ack_required, last_few_ack_required, accum_ack; + int last_few_pclk, last_few_pclk_required; + int start, temp, line_width = dsc->pic_width/2; + s64 temp1_fp, temp2_fp; + + _dp_panel_get_dto_m_n(ratio, dsc->bpc * 3, &dto_n, &dto_d); + + ack_required = dsc->pclk_per_line; + + /* number of pclk cycles left outside of the complete DTO set */ + last_few_pclk = line_width % dto_d; + + /* number of pclk cycles outside of the complete dto */ + temp1_fp = drm_fixp_from_fraction(line_width, dto_d); + temp2_fp = drm_fixp_from_fraction(dto_n, 1); + temp1_fp = drm_fixp_mul(temp1_fp, temp2_fp); + temp = drm_fixp2int(temp1_fp); + last_few_ack_required = ack_required - temp; + + /* + * check how many more pclk is needed to + * accommodate the last few ack required + */ + remainder = dto_n; + accum_ack = 0; + last_few_pclk_required = 0; + while (accum_ack < last_few_ack_required) { + last_few_pclk_required++; + + if (remainder >= dto_n) + start = remainder; + else + start = remainder + dto_d; + + remainder = start - dto_n; + if (remainder < dto_n) + accum_ack++; + } + + /* if fewer pclk than required */ + if (last_few_pclk < last_few_pclk_required) + dsc->extra_width = last_few_pclk_required - last_few_pclk; + else + dsc->extra_width = 0; + + DP_DEBUG("extra pclks required: %d\n", dsc->extra_width); +} + +static void _dp_panel_dsc_bw_overhead_calc(struct dp_panel *dp_panel, + struct msm_display_dsc_info *dsc, + struct dp_display_mode *dp_mode, u32 dsc_byte_cnt) +{ + int num_slices, tot_num_eoc_symbols; + int tot_num_hor_bytes, tot_num_dummy_bytes; + int dwidth_dsc_bytes, eoc_bytes; + u32 num_lanes; + struct dp_panel_private *panel; + + panel = container_of(dp_panel, struct dp_panel_private, dp_panel); + + num_lanes = panel->link->link_params.lane_count; + num_slices = dsc->slice_per_pkt; + + eoc_bytes = dsc_byte_cnt % num_lanes; + tot_num_eoc_symbols = num_lanes * num_slices; + tot_num_hor_bytes = dsc_byte_cnt * num_slices; + tot_num_dummy_bytes = (num_lanes - eoc_bytes) * num_slices; + + if (!eoc_bytes) + tot_num_dummy_bytes = 0; + + dwidth_dsc_bytes = tot_num_hor_bytes + tot_num_eoc_symbols + + tot_num_dummy_bytes; + + DP_DEBUG("dwidth_dsc_bytes:%d, tot_num_hor_bytes:%d\n", + dwidth_dsc_bytes, tot_num_hor_bytes); + + dp_mode->dsc_overhead_fp = drm_fixp_from_fraction(dwidth_dsc_bytes, + tot_num_hor_bytes); + dp_mode->timing.dsc_overhead_fp = dp_mode->dsc_overhead_fp; +} + +static void dp_panel_dsc_pclk_param_calc(struct dp_panel *dp_panel, + struct msm_display_dsc_info *dsc, + enum msm_display_compression_ratio ratio, + struct dp_display_mode *dp_mode) +{ + int slice_per_pkt, slice_per_intf, intf_width; + int bytes_in_slice, total_bytes_per_intf; + int comp_ratio; + s64 temp1_fp, temp2_fp; + s64 numerator_fp, denominator_fp; + s64 dsc_byte_count_fp; + u32 dsc_byte_count, temp1, temp2; + + intf_width = dp_mode->timing.h_active; + if (!dsc || !dsc->slice_width || !dsc->slice_per_pkt || + (intf_width < dsc->slice_width)) + return; + + slice_per_pkt = dsc->slice_per_pkt; + slice_per_intf = DIV_ROUND_UP(intf_width, dsc->slice_width); + + if (slice_per_pkt > slice_per_intf) + slice_per_pkt = 1; + + bytes_in_slice = DIV_ROUND_UP(dsc->slice_width * dsc->bpp, 8); + total_bytes_per_intf = bytes_in_slice * slice_per_intf; + + dsc->bytes_in_slice = bytes_in_slice; + dsc->bytes_per_pkt = bytes_in_slice * slice_per_pkt; + dsc->pkt_per_line = slice_per_intf / slice_per_pkt; + + switch (ratio) { + case MSM_DISPLAY_COMPRESSION_RATIO_2_TO_1: + comp_ratio = 200; + break; + case MSM_DISPLAY_COMPRESSION_RATIO_3_TO_1: + comp_ratio = 300; + break; + default: + comp_ratio = 100; + break; + } + + temp1_fp = drm_fixp_from_fraction(comp_ratio, 100); + temp2_fp = drm_fixp_from_fraction(slice_per_pkt * 8, 1); + denominator_fp = drm_fixp_mul(temp1_fp, temp2_fp); + numerator_fp = drm_fixp_from_fraction(intf_width * dsc->bpc * 3, 1); + dsc_byte_count_fp = drm_fixp_div(numerator_fp, denominator_fp); + dsc_byte_count = drm_fixp2int_ceil(dsc_byte_count_fp); + + temp1 = dsc_byte_count * slice_per_intf; + temp2 = temp1; + if (temp1 % 3 != 0) + temp1 += 3 - (temp1 % 3); + + dsc->eol_byte_num = temp1 - temp2; + + temp1_fp = drm_fixp_from_fraction(slice_per_intf, 6); + temp2_fp = drm_fixp_mul(dsc_byte_count_fp, temp1_fp); + dsc->pclk_per_line = drm_fixp2int_ceil(temp2_fp); + + _dp_panel_dsc_get_num_extra_pclk(dsc, ratio); + dsc->pclk_per_line--; + + _dp_panel_dsc_bw_overhead_calc(dp_panel, dsc, dp_mode, dsc_byte_count); +} + +static void dp_panel_dsc_populate_static_params( + struct msm_display_dsc_info *dsc, struct dp_panel *panel) +{ + int bpp, bpc; + int mux_words_size; + int groups_per_line, groups_total; + int min_rate_buffer_size; + int hrd_delay; + int pre_num_extra_mux_bits, num_extra_mux_bits; + int slice_bits; + int data; + int final_value, final_scale; + int ratio_index, mod_offset; + int line_buf_depth_raw, line_buf_depth; + + dsc->version = 0x11; + dsc->scr_rev = 0; + dsc->rc_model_size = 8192; + + if (dsc->version == 0x11 && dsc->scr_rev == 0x1) + dsc->first_line_bpg_offset = 15; + else + dsc->first_line_bpg_offset = 12; + + dsc->edge_factor = 6; + dsc->tgt_offset_hi = 3; + dsc->tgt_offset_lo = 3; + dsc->enable_422 = 0; + dsc->convert_rgb = 1; + dsc->vbr_enable = 0; + + dsc->buf_thresh = dp_dsc_rc_buf_thresh; + + bpp = dsc->bpp; + bpc = dsc->bpc; + + if (bpc == 12 && bpp == 8) + ratio_index = DSC_12BPC_8BPP; + else if (bpc == 10 && bpp == 8) + ratio_index = DSC_10BPC_8BPP; + else if (bpc == 10 && bpp == 10) + ratio_index = DSC_10BPC_10BPP; + else + ratio_index = DSC_8BPC_8BPP; + + if (dsc->version == 0x11 && dsc->scr_rev == 0x1) { + dsc->range_min_qp = + dp_dsc_rc_range_min_qp_1_1_scr1[ratio_index]; + dsc->range_max_qp = + dp_dsc_rc_range_max_qp_1_1_scr1[ratio_index]; + } else { + dsc->range_min_qp = dp_dsc_rc_range_min_qp_1_1[ratio_index]; + dsc->range_max_qp = dp_dsc_rc_range_max_qp_1_1[ratio_index]; + } + dsc->range_bpg_offset = dp_dsc_rc_range_bpg_offset; + + if (bpp == 8) { + dsc->initial_offset = 6144; + dsc->initial_xmit_delay = 512; + } else if (bpp == 10) { + dsc->initial_offset = 5632; + dsc->initial_xmit_delay = 410; + } else { + dsc->initial_offset = 2048; + dsc->initial_xmit_delay = 341; + } + + line_buf_depth_raw = panel->dsc_dpcd[5] & 0x0f; + line_buf_depth = (line_buf_depth_raw == 8) ? 8 : + (line_buf_depth_raw + 9); + dsc->line_buf_depth = min(line_buf_depth, dsc->bpc + 1); + + if (bpc == 8) { + dsc->input_10_bits = 0; + dsc->min_qp_flatness = 3; + dsc->max_qp_flatness = 12; + dsc->quant_incr_limit0 = 11; + dsc->quant_incr_limit1 = 11; + mux_words_size = 48; + } else if (bpc == 10) { /* 10bpc */ + dsc->input_10_bits = 1; + dsc->min_qp_flatness = 7; + dsc->max_qp_flatness = 16; + dsc->quant_incr_limit0 = 15; + dsc->quant_incr_limit1 = 15; + mux_words_size = 48; + } else { /* 12 bpc */ + dsc->input_10_bits = 0; + dsc->min_qp_flatness = 11; + dsc->max_qp_flatness = 20; + dsc->quant_incr_limit0 = 19; + dsc->quant_incr_limit1 = 19; + mux_words_size = 64; + } + + mod_offset = dsc->slice_width % 3; + switch (mod_offset) { + case 0: + dsc->slice_last_group_size = 2; + break; + case 1: + dsc->slice_last_group_size = 0; + break; + case 2: + dsc->slice_last_group_size = 1; + break; + default: + break; + } + + dsc->det_thresh_flatness = 2 << (bpc - 8); + + groups_per_line = DIV_ROUND_UP(dsc->slice_width, 3); + + dsc->chunk_size = dsc->slice_width * bpp / 8; + if ((dsc->slice_width * bpp) % 8) + dsc->chunk_size++; + + /* rbs-min */ + min_rate_buffer_size = dsc->rc_model_size - dsc->initial_offset + + dsc->initial_xmit_delay * bpp + + groups_per_line * dsc->first_line_bpg_offset; + + hrd_delay = DIV_ROUND_UP(min_rate_buffer_size, bpp); + + dsc->initial_dec_delay = hrd_delay - dsc->initial_xmit_delay; + + dsc->initial_scale_value = 8 * dsc->rc_model_size / + (dsc->rc_model_size - dsc->initial_offset); + + slice_bits = 8 * dsc->chunk_size * dsc->slice_height; + + groups_total = groups_per_line * dsc->slice_height; + + data = dsc->first_line_bpg_offset * 2048; + + dsc->nfl_bpg_offset = DIV_ROUND_UP(data, (dsc->slice_height - 1)); + + pre_num_extra_mux_bits = 3 * (mux_words_size + (4 * bpc + 4) - 2); + + num_extra_mux_bits = pre_num_extra_mux_bits - (mux_words_size - + ((slice_bits - pre_num_extra_mux_bits) % mux_words_size)); + + data = 2048 * (dsc->rc_model_size - dsc->initial_offset + + num_extra_mux_bits); + dsc->slice_bpg_offset = DIV_ROUND_UP(data, groups_total); + + data = dsc->initial_xmit_delay * bpp; + final_value = dsc->rc_model_size - data + num_extra_mux_bits; + + final_scale = 8 * dsc->rc_model_size / + (dsc->rc_model_size - final_value); + + dsc->final_offset = final_value; + + data = (final_scale - 9) * (dsc->nfl_bpg_offset + + dsc->slice_bpg_offset); + dsc->scale_increment_interval = (2048 * dsc->final_offset) / data; + + dsc->scale_decrement_interval = groups_per_line / + (dsc->initial_scale_value - 8); +} + +struct dp_dsc_slices_per_line { + u32 min_ppr; + u32 max_ppr; + u8 num_slices; +}; + +struct dp_dsc_peak_throughput { + u32 index; + u32 peak_throughput; +}; + +struct dp_dsc_slice_caps_bit_map { + u32 num_slices; + u32 bit_index; +}; + +const struct dp_dsc_slices_per_line slice_per_line_tbl[] = { + {0, 340, 1 }, + {340, 680, 2 }, + {680, 1360, 4 }, + {1360, 3200, 8 }, + {3200, 4800, 12 }, + {4800, 6400, 16 }, + {6400, 8000, 20 }, + {8000, 9600, 24 } +}; + +const struct dp_dsc_peak_throughput peak_throughput_mode_0_tbl[] = { + {0, 0}, + {1, 340}, + {2, 400}, + {3, 450}, + {4, 500}, + {5, 550}, + {6, 600}, + {7, 650}, + {8, 700}, + {9, 750}, + {10, 800}, + {11, 850}, + {12, 900}, + {13, 950}, + {14, 1000}, +}; + +const struct dp_dsc_slice_caps_bit_map slice_caps_bit_map_tbl[] = { + {1, 0}, + {2, 1}, + {4, 3}, + {6, 4}, + {8, 5}, + {10, 6}, + {12, 7}, + {16, 0}, + {20, 1}, + {24, 2}, +}; + +static bool dp_panel_check_slice_support(u32 num_slices, u32 raw_data_1, + u32 raw_data_2) +{ + const struct dp_dsc_slice_caps_bit_map *bcap; + u32 raw_data; + int i; + + if (num_slices <= 12) + raw_data = raw_data_1; + else + raw_data = raw_data_2; + + for (i = 0; i < ARRAY_SIZE(slice_caps_bit_map_tbl); i++) { + bcap = &slice_caps_bit_map_tbl[i]; + + if (bcap->num_slices == num_slices) { + raw_data &= (1 << bcap->bit_index); + + if (raw_data) + return true; + else + return false; + } + } + + return false; +} + +static int dp_panel_dsc_prepare_basic_params( + struct msm_compression_info *comp_info, + const struct dp_display_mode *dp_mode, + struct dp_panel *dp_panel) +{ + int i; + const struct dp_dsc_slices_per_line *rec; + const struct dp_dsc_peak_throughput *tput; + u32 slice_width; + u32 ppr = dp_mode->timing.pixel_clk_khz/1000; + u32 max_slice_width; + u32 ppr_max_index; + u32 peak_throughput; + u32 ppr_per_slice; + u32 slice_caps_1; + u32 slice_caps_2; + + comp_info->dsc_info.slice_per_pkt = 0; + for (i = 0; i < ARRAY_SIZE(slice_per_line_tbl); i++) { + rec = &slice_per_line_tbl[i]; + if ((ppr > rec->min_ppr) && (ppr <= rec->max_ppr)) { + comp_info->dsc_info.slice_per_pkt = rec->num_slices; + i++; + break; + } + } + + if (comp_info->dsc_info.slice_per_pkt == 0) + return -EINVAL; + + ppr_max_index = dp_panel->dsc_dpcd[11] &= 0xf; + if (!ppr_max_index || ppr_max_index >= 15) { + DP_DEBUG("Throughput mode 0 not supported"); + return -EINVAL; + } + + tput = &peak_throughput_mode_0_tbl[ppr_max_index]; + peak_throughput = tput->peak_throughput; + + max_slice_width = dp_panel->dsc_dpcd[12] * 320; + slice_width = (dp_mode->timing.h_active / + comp_info->dsc_info.slice_per_pkt); + + ppr_per_slice = ppr/comp_info->dsc_info.slice_per_pkt; + + slice_caps_1 = dp_panel->dsc_dpcd[4]; + slice_caps_2 = dp_panel->dsc_dpcd[13] & 0x7; + + /* + * There are 3 conditions to check for sink support: + * 1. The slice width cannot exceed the maximum. + * 2. The ppr per slice cannot exceed the maximum. + * 3. The number of slices must be explicitly supported. + */ + while (slice_width >= max_slice_width || + ppr_per_slice > peak_throughput || + !dp_panel_check_slice_support( + comp_info->dsc_info.slice_per_pkt, slice_caps_1, + slice_caps_2)) { + if (i == ARRAY_SIZE(slice_per_line_tbl)) + return -EINVAL; + + rec = &slice_per_line_tbl[i]; + comp_info->dsc_info.slice_per_pkt = rec->num_slices; + slice_width = (dp_mode->timing.h_active / + comp_info->dsc_info.slice_per_pkt); + ppr_per_slice = ppr/comp_info->dsc_info.slice_per_pkt; + i++; + } + + comp_info->dsc_info.block_pred_enable = + dp_panel->sink_dsc_caps.block_pred_en; + comp_info->dsc_info.vbr_enable = 0; + comp_info->dsc_info.enable_422 = 0; + comp_info->dsc_info.convert_rgb = 1; + comp_info->dsc_info.input_10_bits = 0; + + comp_info->dsc_info.pic_width = dp_mode->timing.h_active; + comp_info->dsc_info.pic_height = dp_mode->timing.v_active; + comp_info->dsc_info.slice_width = slice_width; + + if (comp_info->dsc_info.pic_height % 16 == 0) + comp_info->dsc_info.slice_height = 16; + else if (comp_info->dsc_info.pic_height % 12 == 0) + comp_info->dsc_info.slice_height = 12; + else + comp_info->dsc_info.slice_height = 15; + + comp_info->dsc_info.bpc = dp_mode->timing.bpp / 3; + comp_info->dsc_info.bpp = comp_info->dsc_info.bpc; + comp_info->dsc_info.full_frame_slices = + DIV_ROUND_UP(dp_mode->timing.h_active, slice_width); + + comp_info->comp_type = MSM_DISPLAY_COMPRESSION_DSC; + comp_info->comp_ratio = MSM_DISPLAY_COMPRESSION_RATIO_3_TO_1; + return 0; +} + +static int dp_panel_read_dpcd(struct dp_panel *dp_panel, bool multi_func) +{ + int rlen, rc = 0; + struct dp_panel_private *panel; + struct drm_dp_link *link_info; + struct drm_dp_aux *drm_aux; + u8 *dpcd, rx_feature, temp; + u32 dfp_count = 0, offset = DP_DPCD_REV; + + if (!dp_panel) { + DP_ERR("invalid input\n"); + rc = -EINVAL; + goto end; + } + + dpcd = dp_panel->dpcd; + + panel = container_of(dp_panel, struct dp_panel_private, dp_panel); + drm_aux = panel->aux->drm_aux; + link_info = &dp_panel->link_info; + + /* reset vsc data */ + panel->vsc_supported = false; + panel->vscext_supported = false; + panel->vscext_chaining_supported = false; + + if (panel->custom_dpcd) { + DP_DEBUG("skip dpcd read in debug mode\n"); + goto skip_dpcd_read; + } + + rlen = drm_dp_dpcd_read(drm_aux, DP_TRAINING_AUX_RD_INTERVAL, &temp, 1); + if (rlen != 1) { + DP_ERR("error reading DP_TRAINING_AUX_RD_INTERVAL\n"); + rc = -EINVAL; + goto end; + } + + /* check for EXTENDED_RECEIVER_CAPABILITY_FIELD_PRESENT */ + if (temp & BIT(7)) { + DP_DEBUG("using EXTENDED_RECEIVER_CAPABILITY_FIELD\n"); + offset = DPRX_EXTENDED_DPCD_FIELD; + } + + rlen = drm_dp_dpcd_read(drm_aux, offset, + dp_panel->dpcd, (DP_RECEIVER_CAP_SIZE + 1)); + if (rlen < (DP_RECEIVER_CAP_SIZE + 1)) { + DP_ERR("dpcd read failed, rlen=%d\n", rlen); + if (rlen == -ETIMEDOUT) + rc = rlen; + else + rc = -EINVAL; + + goto end; + } + + print_hex_dump(KERN_DEBUG, "[drm-dp] SINK DPCD: ", + DUMP_PREFIX_NONE, 8, 1, dp_panel->dpcd, rlen, false); + + rlen = drm_dp_dpcd_read(panel->aux->drm_aux, + DPRX_FEATURE_ENUMERATION_LIST, &rx_feature, 1); + if (rlen != 1) { + DP_DEBUG("failed to read DPRX_FEATURE_ENUMERATION_LIST\n"); + rx_feature = 0; + } + +skip_dpcd_read: + if (panel->custom_dpcd) + rx_feature = dp_panel->dpcd[DP_RECEIVER_CAP_SIZE + 1]; + + panel->vsc_supported = !!(rx_feature & + VSC_SDP_EXTENSION_FOR_COLORIMETRY_SUPPORTED); + panel->vscext_supported = !!(rx_feature & VSC_EXT_VESA_SDP_SUPPORTED); + panel->vscext_chaining_supported = !!(rx_feature & + VSC_EXT_VESA_SDP_CHAINING_SUPPORTED); + + DP_DEBUG("vsc=%d, vscext=%d, vscext_chaining=%d\n", + panel->vsc_supported, panel->vscext_supported, + panel->vscext_chaining_supported); + + link_info->revision = dpcd[DP_DPCD_REV]; + panel->major = (link_info->revision >> 4) & 0x0f; + panel->minor = link_info->revision & 0x0f; + + /* override link params updated in dp_panel_init_panel_info */ + link_info->rate = min_t(unsigned long, panel->parser->max_lclk_khz, + drm_dp_bw_code_to_link_rate(dpcd[DP_MAX_LINK_RATE])); + + link_info->num_lanes = dpcd[DP_MAX_LANE_COUNT] & DP_MAX_LANE_COUNT_MASK; + + if (multi_func) + link_info->num_lanes = min_t(unsigned int, + link_info->num_lanes, 2); + + DP_DEBUG("version:%d.%d, rate:%d, lanes:%d\n", panel->major, + panel->minor, link_info->rate, link_info->num_lanes); + + if (drm_dp_enhanced_frame_cap(dpcd)) + link_info->capabilities |= DP_LINK_CAP_ENHANCED_FRAMING; + + dfp_count = dpcd[DP_DOWN_STREAM_PORT_COUNT] & + DP_DOWN_STREAM_PORT_COUNT; + + if ((dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT) + && (dpcd[DP_DPCD_REV] > 0x10)) { + rlen = drm_dp_dpcd_read(panel->aux->drm_aux, + DP_DOWNSTREAM_PORT_0, dp_panel->ds_ports, + DP_MAX_DOWNSTREAM_PORTS); + if (rlen < DP_MAX_DOWNSTREAM_PORTS) { + DP_ERR("ds port status failed, rlen=%d\n", rlen); + rc = -EINVAL; + goto end; + } + } + + if (dfp_count > DP_MAX_DS_PORT_COUNT) + DP_DEBUG("DS port count %d greater that max (%d) supported\n", + dfp_count, DP_MAX_DS_PORT_COUNT); + +end: + return rc; +} + +static int dp_panel_set_default_link_params(struct dp_panel *dp_panel) +{ + struct drm_dp_link *link_info; + const int default_bw_code = 162000; + const int default_num_lanes = 1; + + if (!dp_panel) { + DP_ERR("invalid input\n"); + return -EINVAL; + } + link_info = &dp_panel->link_info; + link_info->rate = default_bw_code; + link_info->num_lanes = default_num_lanes; + DP_DEBUG("link_rate=%d num_lanes=%d\n", + link_info->rate, link_info->num_lanes); + + return 0; +} + +static bool dp_panel_validate_edid(struct edid *edid, size_t edid_size) +{ + if (!edid || (edid_size < EDID_LENGTH)) + return false; + + if (EDID_LENGTH * (edid->extensions + 1) > edid_size) { + DP_ERR("edid size does not match allocated.\n"); + return false; + } + + if (!drm_edid_is_valid(edid)) { + DP_ERR("invalid edid.\n"); + return false; + } + return true; +} + +static int dp_panel_set_edid(struct dp_panel *dp_panel, u8 *edid, + size_t edid_size) +{ + struct dp_panel_private *panel; + + if (!dp_panel) { + DP_ERR("invalid input\n"); + return -EINVAL; + } + + panel = container_of(dp_panel, struct dp_panel_private, dp_panel); + + if (edid && dp_panel_validate_edid((struct edid *)edid, edid_size)) { + dp_panel->edid_ctrl->edid = (struct edid *)edid; + panel->custom_edid = true; + } else { + panel->custom_edid = false; + dp_panel->edid_ctrl->edid = NULL; + } + + DP_DEBUG("%d\n", panel->custom_edid); + return 0; +} + +static int dp_panel_set_dpcd(struct dp_panel *dp_panel, u8 *dpcd) +{ + struct dp_panel_private *panel; + u8 *dp_dpcd; + + if (!dp_panel) { + DP_ERR("invalid input\n"); + return -EINVAL; + } + + dp_dpcd = dp_panel->dpcd; + + panel = container_of(dp_panel, struct dp_panel_private, dp_panel); + + if (dpcd) { + memcpy(dp_dpcd, dpcd, DP_RECEIVER_CAP_SIZE + + DP_RECEIVER_EXT_CAP_SIZE + 1); + panel->custom_dpcd = true; + } else { + panel->custom_dpcd = false; + } + + DP_DEBUG("%d\n", panel->custom_dpcd); + + return 0; +} + +static int dp_panel_read_edid(struct dp_panel *dp_panel, + struct drm_connector *connector) +{ + int ret = 0; + struct dp_panel_private *panel; + struct edid *edid; + + if (!dp_panel) { + DP_ERR("invalid input\n"); + return -EINVAL; + } + + panel = container_of(dp_panel, struct dp_panel_private, dp_panel); + + if (panel->custom_edid) { + DP_DEBUG("skip edid read in debug mode\n"); + goto end; + } + + sde_get_edid(connector, &panel->aux->drm_aux->ddc, + (void **)&dp_panel->edid_ctrl); + if (!dp_panel->edid_ctrl->edid) { + DP_ERR("EDID read failed\n"); + ret = -EINVAL; + goto end; + } +end: + edid = dp_panel->edid_ctrl->edid; + dp_panel->audio_supported = drm_detect_monitor_audio(edid); + + return ret; +} + +static void dp_panel_decode_dsc_dpcd(struct dp_panel *dp_panel) +{ + if (dp_panel->dsc_dpcd[0]) { + dp_panel->sink_dsc_caps.dsc_capable = true; + dp_panel->sink_dsc_caps.version = dp_panel->dsc_dpcd[1]; + dp_panel->sink_dsc_caps.block_pred_en = + dp_panel->dsc_dpcd[6] ? true : false; + dp_panel->sink_dsc_caps.color_depth = + dp_panel->dsc_dpcd[10]; + + if (dp_panel->sink_dsc_caps.version >= 0x11) + dp_panel->dsc_en = true; + } else { + dp_panel->sink_dsc_caps.dsc_capable = false; + dp_panel->dsc_en = false; + } + + dp_panel->widebus_en = dp_panel->dsc_en; +} + +static void dp_panel_read_sink_dsc_caps(struct dp_panel *dp_panel) +{ + int rlen; + struct dp_panel_private *panel; + int dpcd_rev; + + if (!dp_panel) { + DP_ERR("invalid input\n"); + return; + } + + dpcd_rev = dp_panel->dpcd[DP_DPCD_REV]; + + panel = container_of(dp_panel, struct dp_panel_private, dp_panel); + if (panel->parser->dsc_feature_enable && dpcd_rev >= 0x14) { + rlen = drm_dp_dpcd_read(panel->aux->drm_aux, DP_DSC_SUPPORT, + dp_panel->dsc_dpcd, (DP_RECEIVER_DSC_CAP_SIZE + 1)); + if (rlen < (DP_RECEIVER_DSC_CAP_SIZE + 1)) { + DP_DEBUG("dsc dpcd read failed, rlen=%d\n", rlen); + return; + } + + print_hex_dump(KERN_DEBUG, "[drm-dp] SINK DSC DPCD: ", + DUMP_PREFIX_NONE, 8, 1, dp_panel->dsc_dpcd, rlen, + false); + + dp_panel_decode_dsc_dpcd(dp_panel); + } +} + +static void dp_panel_read_sink_fec_caps(struct dp_panel *dp_panel) +{ + int rlen; + struct dp_panel_private *panel; + s64 fec_overhead_fp = drm_fixp_from_fraction(1, 1); + + if (!dp_panel) { + DP_ERR("invalid input\n"); + return; + } + + panel = container_of(dp_panel, struct dp_panel_private, dp_panel); + rlen = drm_dp_dpcd_readb(panel->aux->drm_aux, DP_FEC_CAPABILITY, + &dp_panel->fec_dpcd); + if (rlen < 1) { + DP_ERR("fec capability read failed, rlen=%d\n", rlen); + return; + } + + dp_panel->fec_en = dp_panel->fec_dpcd & DP_FEC_CAPABLE; + if (dp_panel->fec_en) + fec_overhead_fp = drm_fixp_from_fraction(100000, 97582); + + dp_panel->fec_overhead_fp = fec_overhead_fp; + + return; +} + +static int dp_panel_read_sink_caps(struct dp_panel *dp_panel, + struct drm_connector *connector, bool multi_func) +{ + int rc = 0, rlen, count, downstream_ports; + const int count_len = 1; + struct dp_panel_private *panel; + + if (!dp_panel || !connector) { + DP_ERR("invalid input\n"); + rc = -EINVAL; + goto end; + } + + panel = container_of(dp_panel, struct dp_panel_private, dp_panel); + + rc = dp_panel_read_dpcd(dp_panel, multi_func); + if (rc || !is_link_rate_valid(drm_dp_link_rate_to_bw_code( + dp_panel->link_info.rate)) || !is_lane_count_valid( + dp_panel->link_info.num_lanes) || + ((drm_dp_link_rate_to_bw_code(dp_panel->link_info.rate)) > + dp_panel->max_bw_code)) { + if ((rc == -ETIMEDOUT) || (rc == -ENODEV)) { + DP_ERR("DPCD read failed, return early\n"); + goto end; + } + DP_ERR("panel dpcd read failed/incorrect, set default params\n"); + dp_panel_set_default_link_params(dp_panel); + } + + downstream_ports = dp_panel->dpcd[DP_DOWNSTREAMPORT_PRESENT] & + DP_DWN_STRM_PORT_PRESENT; + + if (downstream_ports) { + rlen = drm_dp_dpcd_read(panel->aux->drm_aux, DP_SINK_COUNT, + &count, count_len); + if (rlen == count_len) { + count = DP_GET_SINK_COUNT(count); + if (!count) { + DP_ERR("no downstream ports connected\n"); + panel->link->sink_count.count = 0; + rc = -ENOTCONN; + goto end; + } + } + } + + rc = dp_panel_read_edid(dp_panel, connector); + if (rc) { + DP_ERR("panel edid read failed, set failsafe mode\n"); + return rc; + } + + dp_panel->widebus_en = panel->parser->has_widebus; + dp_panel->dsc_feature_enable = panel->parser->dsc_feature_enable; + dp_panel->fec_feature_enable = panel->parser->fec_feature_enable; + + dp_panel->fec_en = false; + dp_panel->dsc_en = false; + + if (dp_panel->dpcd[DP_DPCD_REV] >= DP_DPCD_REV_14 && + dp_panel->fec_feature_enable) { + dp_panel_read_sink_fec_caps(dp_panel); + + if (dp_panel->dsc_feature_enable && dp_panel->fec_en) + dp_panel_read_sink_dsc_caps(dp_panel); + } + + DP_INFO("fec_en=%d, dsc_en=%d, widebus_en=%d\n", dp_panel->fec_en, + dp_panel->dsc_en, dp_panel->widebus_en); +end: + return rc; +} + +static u32 dp_panel_get_supported_bpp(struct dp_panel *dp_panel, + u32 mode_edid_bpp, u32 mode_pclk_khz) +{ + struct dp_link_params *link_params; + struct dp_panel_private *panel; + const u32 max_supported_bpp = 30; + u32 min_supported_bpp = 18; + u32 bpp = 0, data_rate_khz = 0, tmds_max_clock = 0; + + panel = container_of(dp_panel, struct dp_panel_private, dp_panel); + + if (dp_panel->dsc_en) + min_supported_bpp = 24; + + bpp = min_t(u32, mode_edid_bpp, max_supported_bpp); + + link_params = &panel->link->link_params; + + data_rate_khz = link_params->lane_count * + drm_dp_bw_code_to_link_rate(link_params->bw_code) * 8; + tmds_max_clock = dp_panel->connector->display_info.max_tmds_clock; + + for (; bpp > min_supported_bpp; bpp -= 6) { + if (dp_panel->dsc_en) { + if (bpp == 36 && !(dp_panel->sink_dsc_caps.color_depth + & DP_DSC_12_BPC)) + continue; + else if (bpp == 30 && + !(dp_panel->sink_dsc_caps.color_depth & + DP_DSC_10_BPC)) + continue; + else if (bpp == 24 && + !(dp_panel->sink_dsc_caps.color_depth & + DP_DSC_8_BPC)) + continue; + } + + if (tmds_max_clock > 0 && + mult_frac(mode_pclk_khz, bpp, 24) > tmds_max_clock) + continue; + + if (mode_pclk_khz * bpp <= data_rate_khz) + break; + } + + if (bpp < min_supported_bpp) + DP_ERR("bpp %d is below minimum supported bpp %d\n", bpp, + min_supported_bpp); + if (dp_panel->dsc_en && bpp != 24 && bpp != 30 && bpp != 36) + DP_ERR("bpp %d is not supported when dsc is enabled\n", bpp); + + return bpp; +} + +static u32 dp_panel_get_mode_bpp(struct dp_panel *dp_panel, + u32 mode_edid_bpp, u32 mode_pclk_khz) +{ + struct dp_panel_private *panel; + u32 bpp = mode_edid_bpp; + + if (!dp_panel || !mode_edid_bpp || !mode_pclk_khz) { + DP_ERR("invalid input\n"); + return 0; + } + + panel = container_of(dp_panel, struct dp_panel_private, dp_panel); + + if (dp_panel->video_test) + bpp = dp_link_bit_depth_to_bpp( + panel->link->test_video.test_bit_depth); + else + bpp = dp_panel_get_supported_bpp(dp_panel, mode_edid_bpp, + mode_pclk_khz); + + return bpp; +} + +static void dp_panel_set_test_mode(struct dp_panel_private *panel, + struct dp_display_mode *mode) +{ + struct dp_panel_info *pinfo = NULL; + struct dp_link_test_video *test_info = NULL; + + if (!panel) { + DP_ERR("invalid params\n"); + return; + } + + pinfo = &mode->timing; + test_info = &panel->link->test_video; + + pinfo->h_active = test_info->test_h_width; + pinfo->h_sync_width = test_info->test_hsync_width; + pinfo->h_back_porch = test_info->test_h_start - + test_info->test_hsync_width; + pinfo->h_front_porch = test_info->test_h_total - + (test_info->test_h_start + test_info->test_h_width); + + pinfo->v_active = test_info->test_v_height; + pinfo->v_sync_width = test_info->test_vsync_width; + pinfo->v_back_porch = test_info->test_v_start - + test_info->test_vsync_width; + pinfo->v_front_porch = test_info->test_v_total - + (test_info->test_v_start + test_info->test_v_height); + + pinfo->bpp = dp_link_bit_depth_to_bpp(test_info->test_bit_depth); + pinfo->h_active_low = test_info->test_hsync_pol; + pinfo->v_active_low = test_info->test_vsync_pol; + + pinfo->refresh_rate = test_info->test_rr_n; + pinfo->pixel_clk_khz = test_info->test_h_total * + test_info->test_v_total * pinfo->refresh_rate; + + if (test_info->test_rr_d == 0) + pinfo->pixel_clk_khz /= 1000; + else + pinfo->pixel_clk_khz /= 1001; + + if (test_info->test_h_width == 640) + pinfo->pixel_clk_khz = 25170; +} + +static int dp_panel_get_modes(struct dp_panel *dp_panel, + struct drm_connector *connector, struct dp_display_mode *mode) +{ + struct dp_panel_private *panel; + + if (!dp_panel) { + DP_ERR("invalid input\n"); + return -EINVAL; + } + + panel = container_of(dp_panel, struct dp_panel_private, dp_panel); + + if (dp_panel->video_test) { + dp_panel_set_test_mode(panel, mode); + return 1; + } else if (dp_panel->edid_ctrl->edid) { + return _sde_edid_update_modes(connector, dp_panel->edid_ctrl); + } + + /* fail-safe mode */ + memcpy(&mode->timing, &fail_safe, + sizeof(fail_safe)); + return 1; +} + +static void dp_panel_handle_sink_request(struct dp_panel *dp_panel) +{ + struct dp_panel_private *panel; + + if (!dp_panel) { + DP_ERR("invalid input\n"); + return; + } + + panel = container_of(dp_panel, struct dp_panel_private, dp_panel); + + if (panel->link->sink_request & DP_TEST_LINK_EDID_READ) { + u8 checksum; + + if (dp_panel->edid_ctrl->edid) + checksum = sde_get_edid_checksum(dp_panel->edid_ctrl); + else + checksum = dp_panel->connector->checksum; + + panel->link->send_edid_checksum(panel->link, checksum); + panel->link->send_test_response(panel->link); + } +} + +static void dp_panel_tpg_config(struct dp_panel *dp_panel, bool enable) +{ + u32 hsync_start_x, hsync_end_x; + struct dp_catalog_panel *catalog; + struct dp_panel_private *panel; + struct dp_panel_info *pinfo; + + if (!dp_panel) { + DP_ERR("invalid input\n"); + return; + } + + if (dp_panel->stream_id >= DP_STREAM_MAX) { + DP_ERR("invalid stream id:%d\n", dp_panel->stream_id); + return; + } + + panel = container_of(dp_panel, struct dp_panel_private, dp_panel); + catalog = panel->catalog; + pinfo = &panel->dp_panel.pinfo; + + if (!panel->panel_on) { + DP_DEBUG("DP panel not enabled, handle TPG on next panel on\n"); + return; + } + + if (!enable) { + panel->catalog->tpg_config(catalog, false); + return; + } + + /* TPG config */ + catalog->hsync_period = pinfo->h_sync_width + pinfo->h_back_porch + + pinfo->h_active + pinfo->h_front_porch; + catalog->vsync_period = pinfo->v_sync_width + pinfo->v_back_porch + + pinfo->v_active + pinfo->v_front_porch; + + catalog->display_v_start = ((pinfo->v_sync_width + + pinfo->v_back_porch) * catalog->hsync_period); + catalog->display_v_end = ((catalog->vsync_period - + pinfo->v_front_porch) * catalog->hsync_period) - 1; + + catalog->display_v_start += pinfo->h_sync_width + pinfo->h_back_porch; + catalog->display_v_end -= pinfo->h_front_porch; + + hsync_start_x = pinfo->h_back_porch + pinfo->h_sync_width; + hsync_end_x = catalog->hsync_period - pinfo->h_front_porch - 1; + + catalog->v_sync_width = pinfo->v_sync_width; + + catalog->hsync_ctl = (catalog->hsync_period << 16) | + pinfo->h_sync_width; + catalog->display_hctl = (hsync_end_x << 16) | hsync_start_x; + + panel->catalog->tpg_config(catalog, true); +} + +static int dp_panel_config_timing(struct dp_panel *dp_panel) +{ + int rc = 0; + u32 data, total_ver, total_hor; + struct dp_catalog_panel *catalog; + struct dp_panel_private *panel; + struct dp_panel_info *pinfo; + + if (!dp_panel) { + DP_ERR("invalid input\n"); + rc = -EINVAL; + goto end; + } + + panel = container_of(dp_panel, struct dp_panel_private, dp_panel); + catalog = panel->catalog; + pinfo = &panel->dp_panel.pinfo; + + DP_DEBUG("width=%d hporch= %d %d %d\n", + pinfo->h_active, pinfo->h_back_porch, + pinfo->h_front_porch, pinfo->h_sync_width); + + DP_DEBUG("height=%d vporch= %d %d %d\n", + pinfo->v_active, pinfo->v_back_porch, + pinfo->v_front_porch, pinfo->v_sync_width); + + total_hor = pinfo->h_active + pinfo->h_back_porch + + pinfo->h_front_porch + pinfo->h_sync_width; + + total_ver = pinfo->v_active + pinfo->v_back_porch + + pinfo->v_front_porch + pinfo->v_sync_width; + + data = total_ver; + data <<= 16; + data |= total_hor; + + catalog->total = data; + + data = (pinfo->v_back_porch + pinfo->v_sync_width); + data <<= 16; + data |= (pinfo->h_back_porch + pinfo->h_sync_width); + + catalog->sync_start = data; + + data = pinfo->v_sync_width; + data <<= 16; + data |= (pinfo->v_active_low << 31); + data |= pinfo->h_sync_width; + data |= (pinfo->h_active_low << 15); + + catalog->width_blanking = data; + + data = pinfo->v_active; + data <<= 16; + data |= pinfo->h_active; + + catalog->dp_active = data; + + catalog->widebus_en = pinfo->widebus_en; + + panel->catalog->timing_cfg(catalog); + panel->panel_on = true; +end: + return rc; +} + +static u32 _dp_panel_calc_be_in_lane(struct dp_panel *dp_panel) +{ + struct dp_panel_info *pinfo; + struct msm_compression_info *comp_info; + u32 dsc_htot_byte_cnt, mod_result; + u32 numerator, denominator; + s64 temp_fp; + u32 be_in_lane = 10; + + pinfo = &dp_panel->pinfo; + comp_info = &pinfo->comp_info; + + if (!dp_panel->mst_state) + return be_in_lane; + + switch (pinfo->comp_info.comp_ratio) { + case MSM_DISPLAY_COMPRESSION_RATIO_2_TO_1: + denominator = 16; /* 2 * bits-in-byte */ + break; + case MSM_DISPLAY_COMPRESSION_RATIO_3_TO_1: + denominator = 24; /* 3 * bits-in-byte */ + break; + default: + denominator = 8; /* 1 * bits-in-byte */ + } + + numerator = (pinfo->h_active + pinfo->h_back_porch + + pinfo->h_front_porch + pinfo->h_sync_width) * + pinfo->bpp; + temp_fp = drm_fixp_from_fraction(numerator, denominator); + dsc_htot_byte_cnt = drm_fixp2int_ceil(temp_fp); + + mod_result = dsc_htot_byte_cnt % 12; + if (mod_result == 0) + be_in_lane = 8; + else if (mod_result <= 3) + be_in_lane = 1; + else if (mod_result <= 6) + be_in_lane = 2; + else if (mod_result <= 9) + be_in_lane = 4; + else if (mod_result <= 11) + be_in_lane = 8; + else + be_in_lane = 10; + + return be_in_lane; +} + +static void dp_panel_config_dsc(struct dp_panel *dp_panel, bool enable) +{ + struct dp_catalog_panel *catalog; + struct dp_panel_private *panel; + struct dp_panel_info *pinfo; + struct msm_compression_info *comp_info; + struct dp_dsc_cfg_data *dsc; + int pps_len; + + panel = container_of(dp_panel, struct dp_panel_private, dp_panel); + + catalog = panel->catalog; + dsc = &catalog->dsc; + pinfo = &dp_panel->pinfo; + comp_info = &pinfo->comp_info; + + if (comp_info->comp_type == MSM_DISPLAY_COMPRESSION_DSC && enable) { + pps_len = dp_panel_dsc_create_pps_buf_cmd(&comp_info->dsc_info, + dsc->pps, 0); + dsc->pps_len = pps_len; + dp_panel_dsc_prepare_pps_packet(dp_panel); + + dsc->slice_per_pkt = comp_info->dsc_info.slice_per_pkt - 1; + dsc->bytes_per_pkt = comp_info->dsc_info.bytes_per_pkt; + dsc->bytes_per_pkt /= comp_info->dsc_info.slice_per_pkt; + dsc->eol_byte_num = comp_info->dsc_info.eol_byte_num; + dsc->dto_count = comp_info->dsc_info.pclk_per_line; + dsc->be_in_lane = _dp_panel_calc_be_in_lane(dp_panel); + dsc->dsc_en = true; + dsc->dto_en = true; + + _dp_panel_get_dto_m_n(comp_info->comp_ratio, pinfo->bpp, + &dsc->dto_n, &dsc->dto_d); + } else { + dsc->dsc_en = false; + dsc->dto_en = false; + dsc->dto_n = 0; + dsc->dto_d = 0; + } + + catalog->stream_id = dp_panel->stream_id; + catalog->dsc_cfg(catalog); + + if (catalog->dsc.dsc_en && enable) + catalog->pps_flush(catalog); +} + +static int dp_panel_edid_register(struct dp_panel_private *panel) +{ + int rc = 0; + + panel->dp_panel.edid_ctrl = sde_edid_init(); + if (!panel->dp_panel.edid_ctrl) { + DP_ERR("sde edid init for DP failed\n"); + rc = -ENOMEM; + } + + return rc; +} + +static void dp_panel_edid_deregister(struct dp_panel_private *panel) +{ + sde_edid_deinit((void **)&panel->dp_panel.edid_ctrl); +} + +static int dp_panel_set_stream_info(struct dp_panel *dp_panel, + enum dp_stream_id stream_id, u32 ch_start_slot, + u32 ch_tot_slots, u32 pbn, int vcpi) +{ + if (!dp_panel || stream_id > DP_STREAM_MAX) { + DP_ERR("invalid input. stream_id: %d\n", stream_id); + return -EINVAL; + } + + dp_panel->vcpi = vcpi; + dp_panel->stream_id = stream_id; + dp_panel->channel_start_slot = ch_start_slot; + dp_panel->channel_total_slots = ch_tot_slots; + dp_panel->pbn = pbn; + + return 0; +} + +static int dp_panel_init_panel_info(struct dp_panel *dp_panel) +{ + int rc = 0; + struct dp_panel_private *panel; + struct dp_panel_info *pinfo; + + if (!dp_panel) { + DP_ERR("invalid input\n"); + rc = -EINVAL; + goto end; + } + + panel = container_of(dp_panel, struct dp_panel_private, dp_panel); + pinfo = &dp_panel->pinfo; + + drm_dp_dpcd_writeb(panel->aux->drm_aux, DP_SET_POWER, DP_SET_POWER_D3); + /* 200us propagation time for the power down to take effect */ + usleep_range(200, 205); + drm_dp_dpcd_writeb(panel->aux->drm_aux, DP_SET_POWER, DP_SET_POWER_D0); + + /* + * According to the DP 1.1 specification, a "Sink Device must exit the + * power saving state within 1 ms" (Section 2.5.3.1, Table 5-52, "Sink + * Control Field" (register 0x600). + */ + usleep_range(1000, 2000); +end: + return rc; +} + +static int dp_panel_deinit_panel_info(struct dp_panel *dp_panel, u32 flags) +{ + int rc = 0; + struct dp_panel_private *panel; + struct drm_msm_ext_hdr_metadata *hdr_meta; + struct dp_sdp_header *dhdr_vsif_sdp; + struct sde_connector *sde_conn; + struct dp_sdp_header *shdr_if_sdp; + struct dp_catalog_vsc_sdp_colorimetry *vsc_colorimetry; + struct drm_connector *connector; + struct sde_connector_state *c_state; + + if (!dp_panel) { + DP_ERR("invalid input\n"); + return -EINVAL; + } + + if (flags & DP_PANEL_SRC_INITIATED_POWER_DOWN) { + DP_DEBUG("retain states in src initiated power down request\n"); + return 0; + } + + panel = container_of(dp_panel, struct dp_panel_private, dp_panel); + hdr_meta = &panel->catalog->hdr_meta; + dhdr_vsif_sdp = &panel->catalog->dhdr_vsif_sdp; + shdr_if_sdp = &panel->catalog->shdr_if_sdp; + vsc_colorimetry = &panel->catalog->vsc_colorimetry; + + if (!panel->custom_edid && dp_panel->edid_ctrl->edid) + sde_free_edid((void **)&dp_panel->edid_ctrl); + + dp_panel_set_stream_info(dp_panel, DP_STREAM_MAX, 0, 0, 0, 0); + memset(&dp_panel->pinfo, 0, sizeof(dp_panel->pinfo)); + memset(hdr_meta, 0, sizeof(struct drm_msm_ext_hdr_metadata)); + memset(dhdr_vsif_sdp, 0, sizeof(struct dp_sdp_header)); + memset(shdr_if_sdp, 0, sizeof(struct dp_sdp_header)); + memset(vsc_colorimetry, 0, + sizeof(struct dp_catalog_vsc_sdp_colorimetry)); + + panel->panel_on = false; + + connector = dp_panel->connector; + sde_conn = to_sde_connector(connector); + c_state = to_sde_connector_state(connector->state); + + connector->hdr_eotf = 0; + connector->hdr_metadata_type_one = 0; + connector->hdr_max_luminance = 0; + connector->hdr_avg_luminance = 0; + connector->hdr_min_luminance = 0; + connector->hdr_supported = false; + connector->hdr_plus_app_ver = 0; + + sde_conn->colorspace_updated = false; + + memset(&c_state->hdr_meta, 0, sizeof(c_state->hdr_meta)); + memset(&c_state->dyn_hdr_meta, 0, sizeof(c_state->dyn_hdr_meta)); + + return rc; +} + +static bool dp_panel_hdr_supported(struct dp_panel *dp_panel) +{ + struct dp_panel_private *panel; + + if (!dp_panel) { + DP_ERR("invalid input\n"); + return false; + } + + panel = container_of(dp_panel, struct dp_panel_private, dp_panel); + + return panel->major >= 1 && panel->vsc_supported && + (panel->minor >= 4 || panel->vscext_supported); +} + +static u32 dp_panel_calc_dhdr_pkt_limit(struct dp_panel *dp_panel, + struct dp_dhdr_maxpkt_calc_input *input) +{ + s64 mdpclk_fp = drm_fixp_from_fraction(input->mdp_clk, 1000000); + s64 lclk_fp = drm_fixp_from_fraction(input->lclk, 1000); + s64 pclk_fp = drm_fixp_from_fraction(input->pclk, 1000); + s64 nlanes_fp = drm_int2fixp(input->nlanes); + s64 target_sc = input->mst_target_sc; + s64 hactive_fp = drm_int2fixp(input->h_active); + const s64 i1_fp = DRM_FIXED_ONE; + const s64 i2_fp = drm_int2fixp(2); + const s64 i10_fp = drm_int2fixp(10); + const s64 i56_fp = drm_int2fixp(56); + const s64 i64_fp = drm_int2fixp(64); + s64 mst_bw_fp = i1_fp; + s64 fec_factor_fp = i1_fp; + s64 mst_bw64_fp, mst_bw64_ceil_fp, nlanes56_fp; + u32 f1, f2, f3, f4, f5, deploy_period, target_period; + s64 f3_f5_slot_fp; + u32 calc_pkt_limit; + const u32 max_pkt_limit = 64; + + if (input->fec_en && input->mst_en) + fec_factor_fp = drm_fixp_from_fraction(64000, 65537); + + if (input->mst_en) + mst_bw_fp = drm_fixp_div(target_sc, i64_fp); + + f1 = drm_fixp2int_ceil(drm_fixp_div(drm_fixp_mul(i10_fp, lclk_fp), + mdpclk_fp)); + f2 = drm_fixp2int_ceil(drm_fixp_div(drm_fixp_mul(i2_fp, lclk_fp), + mdpclk_fp)) + drm_fixp2int_ceil(drm_fixp_div( + drm_fixp_mul(i1_fp, lclk_fp), mdpclk_fp)); + + mst_bw64_fp = drm_fixp_mul(mst_bw_fp, i64_fp); + if (drm_fixp2int(mst_bw64_fp) == 0) + f3_f5_slot_fp = drm_fixp_div(i1_fp, drm_int2fixp( + drm_fixp2int_ceil(drm_fixp_div( + i1_fp, mst_bw64_fp)))); + else + f3_f5_slot_fp = drm_int2fixp(drm_fixp2int(mst_bw_fp)); + + mst_bw64_ceil_fp = drm_int2fixp(drm_fixp2int_ceil(mst_bw64_fp)); + f3 = drm_fixp2int(drm_fixp_mul(drm_int2fixp(drm_fixp2int( + drm_fixp_div(i2_fp, f3_f5_slot_fp)) + 1), + (i64_fp - mst_bw64_ceil_fp))) + 2; + + if (!input->mst_en) { + f4 = 1 + drm_fixp2int(drm_fixp_div(drm_int2fixp(50), + nlanes_fp)) + drm_fixp2int(drm_fixp_div( + nlanes_fp, i2_fp)); + f5 = 0; + } else { + f4 = 0; + nlanes56_fp = drm_fixp_div(i56_fp, nlanes_fp); + f5 = drm_fixp2int(drm_fixp_mul(drm_int2fixp(drm_fixp2int( + drm_fixp_div(i1_fp + nlanes56_fp, + f3_f5_slot_fp)) + 1), (i64_fp - + mst_bw64_ceil_fp + i1_fp + nlanes56_fp))); + } + + deploy_period = f1 + f2 + f3 + f4 + f5 + 19; + target_period = drm_fixp2int(drm_fixp_mul(fec_factor_fp, drm_fixp_mul( + hactive_fp, drm_fixp_div(lclk_fp, pclk_fp)))); + + calc_pkt_limit = target_period / deploy_period; + + DP_DEBUG("input: %d, %d, %d, %d, %d, 0x%llx, %d, %d\n", + input->mdp_clk, input->lclk, input->pclk, input->h_active, + input->nlanes, input->mst_target_sc, input->mst_en ? 1 : 0, + input->fec_en ? 1 : 0); + DP_DEBUG("factors: %d, %d, %d, %d, %d\n", f1, f2, f3, f4, f5); + DP_DEBUG("d_p: %d, t_p: %d, maxPkts: %d%s\n", deploy_period, + target_period, calc_pkt_limit, calc_pkt_limit > max_pkt_limit ? + " CAPPED" : ""); + + if (calc_pkt_limit > max_pkt_limit) + calc_pkt_limit = max_pkt_limit; + + DP_DEBUG("packet limit per line = %d\n", calc_pkt_limit); + return calc_pkt_limit; +} + +static void dp_panel_setup_colorimetry_sdp(struct dp_panel *dp_panel, + u32 cspace) +{ + struct dp_panel_private *panel; + struct dp_catalog_vsc_sdp_colorimetry *hdr_colorimetry; + u8 bpc; + u32 colorimetry = 0; + u32 dynamic_range = 0; + + panel = container_of(dp_panel, struct dp_panel_private, dp_panel); + hdr_colorimetry = &panel->catalog->vsc_colorimetry; + + hdr_colorimetry->header.HB0 = 0x00; + hdr_colorimetry->header.HB1 = 0x07; + hdr_colorimetry->header.HB2 = 0x05; + hdr_colorimetry->header.HB3 = 0x13; + + get_sdp_colorimetry_range(panel, cspace, &colorimetry, + &dynamic_range); + + /* VSC SDP Payload for DB16 */ + hdr_colorimetry->data[16] = (RGB << 4) | colorimetry; + + /* VSC SDP Payload for DB17 */ + hdr_colorimetry->data[17] = (dynamic_range << 7); + bpc = (dp_panel->pinfo.bpp / 3); + + switch (bpc) { + default: + case 10: + hdr_colorimetry->data[17] |= BIT(1); + break; + case 8: + hdr_colorimetry->data[17] |= BIT(0); + break; + case 6: + hdr_colorimetry->data[17] |= 0; + break; + } + + /* VSC SDP Payload for DB18 */ + hdr_colorimetry->data[18] = GRAPHICS; +} + +static void dp_panel_setup_hdr_if(struct dp_panel_private *panel) +{ + struct dp_sdp_header *shdr_if; + + shdr_if = &panel->catalog->shdr_if_sdp; + + shdr_if->HB0 = 0x00; + shdr_if->HB1 = 0x87; + shdr_if->HB2 = 0x1D; + shdr_if->HB3 = 0x13 << 2; +} + +static void dp_panel_setup_dhdr_vsif(struct dp_panel_private *panel) +{ + struct dp_sdp_header *dhdr_vsif; + + dhdr_vsif = &panel->catalog->dhdr_vsif_sdp; + + dhdr_vsif->HB0 = 0x00; + dhdr_vsif->HB1 = 0x81; + dhdr_vsif->HB2 = 0x1D; + dhdr_vsif->HB3 = 0x13 << 2; +} + +static void dp_panel_setup_misc_colorimetry(struct dp_panel *dp_panel, + u32 colorspace) +{ + struct dp_panel_private *panel; + struct dp_catalog_panel *catalog; + + panel = container_of(dp_panel, struct dp_panel_private, dp_panel); + catalog = panel->catalog; + + catalog->misc_val &= ~0x1e; + + catalog->misc_val |= (get_misc_colorimetry_val(panel, + colorspace) << 1); +} + +static int dp_panel_set_colorspace(struct dp_panel *dp_panel, + u32 colorspace) +{ + int rc = 0; + struct dp_panel_private *panel; + + if (!dp_panel) { + pr_err("invalid input\n"); + rc = -EINVAL; + goto end; + } + + panel = container_of(dp_panel, struct dp_panel_private, dp_panel); + + if (panel->vsc_supported) + dp_panel_setup_colorimetry_sdp(dp_panel, + colorspace); + else + dp_panel_setup_misc_colorimetry(dp_panel, + colorspace); + + /* + * During the first frame update panel_on will be false and + * the colorspace will be cached in the connector's state which + * shall be used in the dp_panel_hw_cfg + */ + if (panel->panel_on) { + DP_DEBUG("panel is ON programming colorspace\n"); + rc = panel->catalog->set_colorspace(panel->catalog, + panel->vsc_supported); + } + +end: + return rc; +} + +static int dp_panel_setup_hdr(struct dp_panel *dp_panel, + struct drm_msm_ext_hdr_metadata *hdr_meta, + bool dhdr_update, u64 core_clk_rate, bool flush) +{ + int rc = 0, max_pkts = 0; + struct dp_panel_private *panel; + struct dp_dhdr_maxpkt_calc_input input; + struct drm_msm_ext_hdr_metadata *catalog_hdr_meta; + + if (!dp_panel) { + DP_ERR("invalid input\n"); + rc = -EINVAL; + goto end; + } + + panel = container_of(dp_panel, struct dp_panel_private, dp_panel); + + catalog_hdr_meta = &panel->catalog->hdr_meta; + + /* use cached meta data in case meta data not provided */ + if (!hdr_meta) { + if (catalog_hdr_meta->hdr_state) + goto cached; + else + goto end; + } + + panel->hdr_state = hdr_meta->hdr_state; + + dp_panel_setup_hdr_if(panel); + + if (panel->hdr_state) { + memcpy(catalog_hdr_meta, hdr_meta, + sizeof(struct drm_msm_ext_hdr_metadata)); + } else { + memset(catalog_hdr_meta, 0, + sizeof(struct drm_msm_ext_hdr_metadata)); + } +cached: + if (dhdr_update) { + dp_panel_setup_dhdr_vsif(panel); + + input.mdp_clk = core_clk_rate; + input.lclk = drm_dp_bw_code_to_link_rate( + panel->link->link_params.bw_code); + input.nlanes = panel->link->link_params.lane_count; + input.pclk = dp_panel->pinfo.pixel_clk_khz; + input.h_active = dp_panel->pinfo.h_active; + input.mst_target_sc = dp_panel->mst_target_sc; + input.mst_en = dp_panel->mst_state; + input.fec_en = dp_panel->fec_en; + max_pkts = dp_panel_calc_dhdr_pkt_limit(dp_panel, &input); + } + + if (panel->panel_on) { + panel->catalog->stream_id = dp_panel->stream_id; + panel->catalog->config_hdr(panel->catalog, panel->hdr_state, + max_pkts, flush); + if (dhdr_update) + panel->catalog->dhdr_flush(panel->catalog); + } +end: + return rc; +} + +static int dp_panel_spd_config(struct dp_panel *dp_panel) +{ + int rc = 0; + struct dp_panel_private *panel; + + if (!dp_panel) { + DP_ERR("invalid input\n"); + rc = -EINVAL; + goto end; + } + + if (dp_panel->stream_id >= DP_STREAM_MAX) { + DP_ERR("invalid stream id:%d\n", dp_panel->stream_id); + return -EINVAL; + } + + if (!dp_panel->spd_enabled) { + DP_DEBUG("SPD Infoframe not enabled\n"); + goto end; + } + + panel = container_of(dp_panel, struct dp_panel_private, dp_panel); + + panel->catalog->spd_vendor_name = panel->spd_vendor_name; + panel->catalog->spd_product_description = + panel->spd_product_description; + + panel->catalog->stream_id = dp_panel->stream_id; + panel->catalog->config_spd(panel->catalog); +end: + return rc; +} + +static void dp_panel_config_ctrl(struct dp_panel *dp_panel) +{ + u32 config = 0, tbd; + u8 *dpcd = dp_panel->dpcd; + struct dp_panel_private *panel; + struct dp_catalog_panel *catalog; + + panel = container_of(dp_panel, struct dp_panel_private, dp_panel); + catalog = panel->catalog; + + config |= (2 << 13); /* Default-> LSCLK DIV: 1/4 LCLK */ + config |= (0 << 11); /* RGB */ + + tbd = panel->link->get_test_bits_depth(panel->link, + dp_panel->pinfo.bpp); + + if (tbd == DP_TEST_BIT_DEPTH_UNKNOWN) + tbd = DP_TEST_BIT_DEPTH_8; + + config |= tbd << 8; + + /* Num of Lanes */ + config |= ((panel->link->link_params.lane_count - 1) << 4); + + if (drm_dp_enhanced_frame_cap(dpcd)) + config |= 0x40; + + config |= 0x04; /* progressive video */ + + config |= 0x03; /* sycn clock & static Mvid */ + + catalog->config_ctrl(catalog, config); +} + +static void dp_panel_config_misc(struct dp_panel *dp_panel) +{ + struct dp_panel_private *panel; + struct dp_catalog_panel *catalog; + struct drm_connector *connector; + u32 misc_val; + u32 tb, cc, colorspace; + + panel = container_of(dp_panel, struct dp_panel_private, dp_panel); + catalog = panel->catalog; + connector = dp_panel->connector; + cc = 0; + + tb = panel->link->get_test_bits_depth(panel->link, dp_panel->pinfo.bpp); + colorspace = connector->state->colorspace; + + + cc = (get_misc_colorimetry_val(panel, colorspace) << 1); + + misc_val = cc; + misc_val |= (tb << 5); + misc_val |= BIT(0); /* Configure clock to synchronous mode */ + + /* if VSC is supported then set bit 6 of MISC1 */ + if (panel->vsc_supported) + misc_val |= BIT(14); + + catalog->misc_val = misc_val; + catalog->config_misc(catalog); +} + +static void dp_panel_config_msa(struct dp_panel *dp_panel) +{ + struct dp_panel_private *panel; + struct dp_catalog_panel *catalog; + u32 rate; + u32 stream_rate_khz; + + panel = container_of(dp_panel, struct dp_panel_private, dp_panel); + catalog = panel->catalog; + + catalog->widebus_en = dp_panel->widebus_en; + + rate = drm_dp_bw_code_to_link_rate(panel->link->link_params.bw_code); + stream_rate_khz = dp_panel->pinfo.pixel_clk_khz; + + catalog->config_msa(catalog, rate, stream_rate_khz); +} + +static void dp_panel_resolution_info(struct dp_panel_private *panel) +{ + struct dp_panel_info *pinfo = &panel->dp_panel.pinfo; + + /* + * print resolution info as this is a result + * of user initiated action of cable connection + */ + DP_INFO("DP RESOLUTION: active(back|front|width|low)\n"); + DP_INFO("%d(%d|%d|%d|%d)x%d(%d|%d|%d|%d)@%dfps %dbpp %dKhz %dLR %dLn\n", + pinfo->h_active, pinfo->h_back_porch, pinfo->h_front_porch, + pinfo->h_sync_width, pinfo->h_active_low, + pinfo->v_active, pinfo->v_back_porch, pinfo->v_front_porch, + pinfo->v_sync_width, pinfo->v_active_low, + pinfo->refresh_rate, pinfo->bpp, pinfo->pixel_clk_khz, + panel->link->link_params.bw_code, + panel->link->link_params.lane_count); +} + +static void dp_panel_config_sdp(struct dp_panel *dp_panel, + bool en) +{ + struct dp_panel_private *panel; + + panel = container_of(dp_panel, struct dp_panel_private, dp_panel); + panel->catalog->stream_id = dp_panel->stream_id; + + panel->catalog->config_sdp(panel->catalog, en); +} + +static int dp_panel_hw_cfg(struct dp_panel *dp_panel, bool enable) +{ + struct dp_panel_private *panel; + struct drm_connector *connector; + + if (!dp_panel) { + DP_ERR("invalid input\n"); + return -EINVAL; + } + + if (dp_panel->stream_id >= DP_STREAM_MAX) { + DP_ERR("invalid stream_id: %d\n", dp_panel->stream_id); + return -EINVAL; + } + + panel = container_of(dp_panel, struct dp_panel_private, dp_panel); + panel->catalog->stream_id = dp_panel->stream_id; + connector = dp_panel->connector; + + if (enable) { + dp_panel_config_ctrl(dp_panel); + dp_panel_config_misc(dp_panel); + dp_panel_config_msa(dp_panel); + if (panel->vsc_supported) { + dp_panel_setup_colorimetry_sdp(dp_panel, + connector->state->colorspace); + dp_panel_config_sdp(dp_panel, true); + } + dp_panel_config_dsc(dp_panel, enable); + dp_panel_config_tr_unit(dp_panel); + dp_panel_config_timing(dp_panel); + dp_panel_resolution_info(panel); + } else { + dp_panel_config_sdp(dp_panel, false); + } + + panel->catalog->config_dto(panel->catalog, !enable); + + return 0; +} + +static int dp_panel_read_sink_sts(struct dp_panel *dp_panel, u8 *sts, u32 size) +{ + int rlen, rc = 0; + struct dp_panel_private *panel; + + if (!dp_panel || !sts || !size) { + DP_ERR("invalid input\n"); + rc = -EINVAL; + return rc; + } + + panel = container_of(dp_panel, struct dp_panel_private, dp_panel); + + rlen = drm_dp_dpcd_read(panel->aux->drm_aux, DP_SINK_COUNT_ESI, + sts, size); + if (rlen != size) { + DP_ERR("dpcd sink sts fail rlen:%d size:%d\n", rlen, size); + rc = -EINVAL; + return rc; + } + + return 0; +} + +static int dp_panel_update_edid(struct dp_panel *dp_panel, struct edid *edid) +{ + int rc; + + dp_panel->edid_ctrl->edid = edid; + sde_parse_edid(dp_panel->edid_ctrl); + + rc = _sde_edid_update_modes(dp_panel->connector, dp_panel->edid_ctrl); + dp_panel->audio_supported = drm_detect_monitor_audio(edid); + + return rc; +} + +static bool dp_panel_read_mst_cap(struct dp_panel *dp_panel) +{ + int rlen; + struct dp_panel_private *panel; + u8 dpcd; + bool mst_cap = false; + + if (!dp_panel) { + DP_ERR("invalid input\n"); + return 0; + } + + panel = container_of(dp_panel, struct dp_panel_private, dp_panel); + + rlen = drm_dp_dpcd_read(panel->aux->drm_aux, DP_MSTM_CAP, + &dpcd, 1); + if (rlen < 1) { + DP_ERR("dpcd mstm_cap read failed, rlen=%d\n", rlen); + goto end; + } + + mst_cap = (dpcd & DP_MST_CAP) ? true : false; + +end: + DP_DEBUG("dp mst-cap: %d\n", mst_cap); + + return mst_cap; +} + +static void dp_panel_convert_to_dp_mode(struct dp_panel *dp_panel, + const struct drm_display_mode *drm_mode, + struct dp_display_mode *dp_mode) +{ + const u32 num_components = 3, default_bpp = 24; + struct msm_compression_info *comp_info; + bool dsc_cap = (dp_mode->capabilities & DP_PANEL_CAPS_DSC) ? + true : false; + + dp_mode->timing.h_active = drm_mode->hdisplay; + dp_mode->timing.h_back_porch = drm_mode->htotal - drm_mode->hsync_end; + dp_mode->timing.h_sync_width = drm_mode->htotal - + (drm_mode->hsync_start + dp_mode->timing.h_back_porch); + dp_mode->timing.h_front_porch = drm_mode->hsync_start - + drm_mode->hdisplay; + dp_mode->timing.h_skew = drm_mode->hskew; + + dp_mode->timing.v_active = drm_mode->vdisplay; + dp_mode->timing.v_back_porch = drm_mode->vtotal - drm_mode->vsync_end; + dp_mode->timing.v_sync_width = drm_mode->vtotal - + (drm_mode->vsync_start + dp_mode->timing.v_back_porch); + + dp_mode->timing.v_front_porch = drm_mode->vsync_start - + drm_mode->vdisplay; + + dp_mode->timing.refresh_rate = drm_mode->vrefresh; + + dp_mode->timing.pixel_clk_khz = drm_mode->clock; + + dp_mode->timing.v_active_low = + !!(drm_mode->flags & DRM_MODE_FLAG_NVSYNC); + + dp_mode->timing.h_active_low = + !!(drm_mode->flags & DRM_MODE_FLAG_NHSYNC); + + dp_mode->timing.bpp = + dp_panel->connector->display_info.bpc * num_components; + if (!dp_mode->timing.bpp) + dp_mode->timing.bpp = default_bpp; + + dp_mode->timing.bpp = dp_panel_get_mode_bpp(dp_panel, + dp_mode->timing.bpp, dp_mode->timing.pixel_clk_khz); + + dp_mode->timing.widebus_en = dp_panel->widebus_en; + dp_mode->timing.dsc_overhead_fp = 0; + + if (dp_panel->dsc_en && dsc_cap) { + comp_info = &dp_mode->timing.comp_info; + + if (dp_panel_dsc_prepare_basic_params(comp_info, + dp_mode, dp_panel)) { + DP_DEBUG("prepare DSC basic params failed\n"); + return; + } + + dp_panel_dsc_populate_static_params(&comp_info->dsc_info, + dp_panel); + dp_panel_dsc_pclk_param_calc(dp_panel, + &comp_info->dsc_info, + comp_info->comp_ratio, + dp_mode); + } + dp_mode->fec_overhead_fp = dp_panel->fec_overhead_fp; +} + +static void dp_panel_update_pps(struct dp_panel *dp_panel, char *pps_cmd) +{ + struct dp_catalog_panel *catalog; + struct dp_panel_private *panel; + + panel = container_of(dp_panel, struct dp_panel_private, dp_panel); + + catalog = panel->catalog; + catalog->stream_id = dp_panel->stream_id; + catalog->pps_flush(catalog); +} + +struct dp_panel *dp_panel_get(struct dp_panel_in *in) +{ + int rc = 0; + struct dp_panel_private *panel; + struct dp_panel *dp_panel; + struct sde_connector *sde_conn; + + if (!in->dev || !in->catalog || !in->aux || + !in->link || !in->connector) { + DP_ERR("invalid input\n"); + rc = -EINVAL; + goto error; + } + + panel = devm_kzalloc(in->dev, sizeof(*panel), GFP_KERNEL); + if (!panel) { + rc = -ENOMEM; + goto error; + } + + panel->dev = in->dev; + panel->aux = in->aux; + panel->catalog = in->catalog; + panel->link = in->link; + panel->parser = in->parser; + + dp_panel = &panel->dp_panel; + dp_panel->max_bw_code = DP_LINK_BW_8_1; + dp_panel->spd_enabled = true; + memcpy(panel->spd_vendor_name, vendor_name, (sizeof(u8) * 8)); + memcpy(panel->spd_product_description, product_desc, (sizeof(u8) * 16)); + dp_panel->connector = in->connector; + + dp_panel->dsc_feature_enable = panel->parser->dsc_feature_enable; + dp_panel->fec_feature_enable = panel->parser->fec_feature_enable; + + if (in->base_panel) { + memcpy(dp_panel->dpcd, in->base_panel->dpcd, + DP_RECEIVER_CAP_SIZE + 1); + memcpy(dp_panel->dsc_dpcd, in->base_panel->dsc_dpcd, + DP_RECEIVER_DSC_CAP_SIZE + 1); + memcpy(&dp_panel->link_info, &in->base_panel->link_info, + sizeof(dp_panel->link_info)); + dp_panel->mst_state = in->base_panel->mst_state; + dp_panel->widebus_en = in->base_panel->widebus_en; + dp_panel->fec_en = in->base_panel->fec_en; + dp_panel->dsc_en = in->base_panel->dsc_en; + dp_panel->fec_overhead_fp = in->base_panel->fec_overhead_fp; + } + + dp_panel->init = dp_panel_init_panel_info; + dp_panel->deinit = dp_panel_deinit_panel_info; + dp_panel->hw_cfg = dp_panel_hw_cfg; + dp_panel->read_sink_caps = dp_panel_read_sink_caps; + dp_panel->get_mode_bpp = dp_panel_get_mode_bpp; + dp_panel->get_modes = dp_panel_get_modes; + dp_panel->handle_sink_request = dp_panel_handle_sink_request; + dp_panel->set_edid = dp_panel_set_edid; + dp_panel->set_dpcd = dp_panel_set_dpcd; + dp_panel->tpg_config = dp_panel_tpg_config; + dp_panel->spd_config = dp_panel_spd_config; + dp_panel->setup_hdr = dp_panel_setup_hdr; + dp_panel->set_colorspace = dp_panel_set_colorspace; + dp_panel->hdr_supported = dp_panel_hdr_supported; + dp_panel->set_stream_info = dp_panel_set_stream_info; + dp_panel->read_sink_status = dp_panel_read_sink_sts; + dp_panel->update_edid = dp_panel_update_edid; + dp_panel->read_mst_cap = dp_panel_read_mst_cap; + dp_panel->convert_to_dp_mode = dp_panel_convert_to_dp_mode; + dp_panel->update_pps = dp_panel_update_pps; + + sde_conn = to_sde_connector(dp_panel->connector); + sde_conn->drv_panel = dp_panel; + + dp_panel_edid_register(panel); + + return dp_panel; +error: + return ERR_PTR(rc); +} + +void dp_panel_put(struct dp_panel *dp_panel) +{ + struct dp_panel_private *panel; + struct sde_connector *sde_conn; + + if (!dp_panel) + return; + + panel = container_of(dp_panel, struct dp_panel_private, dp_panel); + + dp_panel_edid_deregister(panel); + sde_conn = to_sde_connector(dp_panel->connector); + if (sde_conn) + sde_conn->drv_panel = NULL; + + devm_kfree(panel->dev, panel); +} diff --git a/techpack/display/msm/dp/dp_panel.h b/techpack/display/msm/dp/dp_panel.h new file mode 100644 index 0000000000000000000000000000000000000000..36629c3c36a7cc7f2272bd7c1bea94ec2e2bd6b7 --- /dev/null +++ b/techpack/display/msm/dp/dp_panel.h @@ -0,0 +1,233 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2012-2020, The Linux Foundation. All rights reserved. + */ + +#ifndef _DP_PANEL_H_ +#define _DP_PANEL_H_ + +#include + +#include "dp_aux.h" +#include "dp_link.h" +#include "dp_usbpd.h" +#include "sde_edid_parser.h" +#include "sde_connector.h" +#include "msm_drv.h" + +#define DP_RECEIVER_DSC_CAP_SIZE 15 +#define DP_RECEIVER_FEC_STATUS_SIZE 3 +#define DP_RECEIVER_EXT_CAP_SIZE 4 +/* + * A source initiated power down flag is set + * when the DP is powered off while physical + * DP cable is still connected i.e. without + * HPD or not initiated by sink like HPD_IRQ. + * This can happen if framework reboots or + * device suspends. + */ +#define DP_PANEL_SRC_INITIATED_POWER_DOWN BIT(0) + +#define DP_EXT_REC_CAP_FIELD BIT(7) + +enum dp_lane_count { + DP_LANE_COUNT_1 = 1, + DP_LANE_COUNT_2 = 2, + DP_LANE_COUNT_4 = 4, +}; + +#define DP_MAX_DOWNSTREAM_PORTS 0x10 + +struct dp_panel_info { + u32 h_active; + u32 v_active; + u32 h_back_porch; + u32 h_front_porch; + u32 h_sync_width; + u32 h_active_low; + u32 v_back_porch; + u32 v_front_porch; + u32 v_sync_width; + u32 v_active_low; + u32 h_skew; + u32 refresh_rate; + u32 pixel_clk_khz; + u32 bpp; + bool widebus_en; + struct msm_compression_info comp_info; + s64 dsc_overhead_fp; +}; + +struct dp_display_mode { + struct dp_panel_info timing; + u32 capabilities; + s64 fec_overhead_fp; + s64 dsc_overhead_fp; +}; + +struct dp_panel; + +struct dp_panel_in { + struct device *dev; + struct dp_aux *aux; + struct dp_link *link; + struct dp_catalog_panel *catalog; + struct drm_connector *connector; + struct dp_panel *base_panel; + struct dp_parser *parser; +}; + +struct dp_dsc_caps { + bool dsc_capable; + u8 version; + bool block_pred_en; + u8 color_depth; +}; + +struct dp_audio; + +#define DP_PANEL_CAPS_DSC BIT(0) + +struct dp_panel { + /* dpcd raw data */ + u8 dpcd[DP_RECEIVER_CAP_SIZE + DP_RECEIVER_EXT_CAP_SIZE + 1]; + u8 ds_ports[DP_MAX_DOWNSTREAM_PORTS]; + u8 dsc_dpcd[DP_RECEIVER_DSC_CAP_SIZE + 1]; + u8 fec_dpcd; + u8 fec_sts_dpcd[DP_RECEIVER_FEC_STATUS_SIZE + 1]; + + struct drm_dp_link link_info; + struct sde_edid_ctrl *edid_ctrl; + struct dp_panel_info pinfo; + bool video_test; + bool spd_enabled; + + u32 vic; + u32 max_pclk_khz; + s64 mst_target_sc; + + /* debug */ + u32 max_bw_code; + + /* By default, stream_id is assigned to DP_INVALID_STREAM. + * Client sets the stream id value using set_stream_id interface. + */ + enum dp_stream_id stream_id; + int vcpi; + + u32 channel_start_slot; + u32 channel_total_slots; + u32 pbn; + + u32 tot_dsc_blks_in_use; + /* DRM connector assosiated with this panel */ + struct drm_connector *connector; + + struct dp_audio *audio; + bool audio_supported; + + struct dp_dsc_caps sink_dsc_caps; + bool dsc_feature_enable; + bool fec_feature_enable; + bool dsc_en; + bool fec_en; + bool widebus_en; + bool mst_state; + + s64 fec_overhead_fp; + + int (*init)(struct dp_panel *dp_panel); + int (*deinit)(struct dp_panel *dp_panel, u32 flags); + int (*hw_cfg)(struct dp_panel *dp_panel, bool enable); + int (*read_sink_caps)(struct dp_panel *dp_panel, + struct drm_connector *connector, bool multi_func); + u32 (*get_mode_bpp)(struct dp_panel *dp_panel, u32 mode_max_bpp, + u32 mode_pclk_khz); + int (*get_modes)(struct dp_panel *dp_panel, + struct drm_connector *connector, struct dp_display_mode *mode); + void (*handle_sink_request)(struct dp_panel *dp_panel); + int (*set_edid)(struct dp_panel *dp_panel, u8 *edid, size_t edid_size); + int (*set_dpcd)(struct dp_panel *dp_panel, u8 *dpcd); + int (*setup_hdr)(struct dp_panel *dp_panel, + struct drm_msm_ext_hdr_metadata *hdr_meta, + bool dhdr_update, u64 core_clk_rate, bool flush); + int (*set_colorspace)(struct dp_panel *dp_panel, + u32 colorspace); + void (*tpg_config)(struct dp_panel *dp_panel, bool enable); + int (*spd_config)(struct dp_panel *dp_panel); + bool (*hdr_supported)(struct dp_panel *dp_panel); + + int (*set_stream_info)(struct dp_panel *dp_panel, + enum dp_stream_id stream_id, u32 ch_start_slot, + u32 ch_tot_slots, u32 pbn, int vcpi); + + int (*read_sink_status)(struct dp_panel *dp_panel, u8 *sts, u32 size); + int (*update_edid)(struct dp_panel *dp_panel, struct edid *edid); + bool (*read_mst_cap)(struct dp_panel *dp_panel); + void (*convert_to_dp_mode)(struct dp_panel *dp_panel, + const struct drm_display_mode *drm_mode, + struct dp_display_mode *dp_mode); + void (*update_pps)(struct dp_panel *dp_panel, char *pps_cmd); +}; + +struct dp_tu_calc_input { + u64 lclk; /* 162, 270, 540 and 810 */ + u64 pclk_khz; /* in KHz */ + u64 hactive; /* active h-width */ + u64 hporch; /* bp + fp + pulse */ + int nlanes; /* no.of.lanes */ + int bpp; /* bits */ + int pixel_enc; /* 444, 420, 422 */ + int dsc_en; /* dsc on/off */ + int async_en; /* async mode */ + int fec_en; /* fec */ + int compress_ratio; /* 2:1 = 200, 3:1 = 300, 3.75:1 = 375 */ + int num_of_dsc_slices; /* number of slices per line */ +}; + +struct dp_vc_tu_mapping_table { + u32 vic; + u8 lanes; + u8 lrate; /* DP_LINK_RATE -> 162(6), 270(10), 540(20), 810 (30) */ + u8 bpp; + u32 valid_boundary_link; + u32 delay_start_link; + bool boundary_moderation_en; + u32 valid_lower_boundary_link; + u32 upper_boundary_count; + u32 lower_boundary_count; + u32 tu_size_minus1; +}; + +/** + * is_link_rate_valid() - validates the link rate + * @lane_rate: link rate requested by the sink + * + * Returns true if the requested link rate is supported. + */ +static inline bool is_link_rate_valid(u32 bw_code) +{ + return ((bw_code == DP_LINK_BW_1_62) || + (bw_code == DP_LINK_BW_2_7) || + (bw_code == DP_LINK_BW_5_4) || + (bw_code == DP_LINK_BW_8_1)); +} + +/** + * dp_link_is_lane_count_valid() - validates the lane count + * @lane_count: lane count requested by the sink + * + * Returns true if the requested lane count is supported. + */ +static inline bool is_lane_count_valid(u32 lane_count) +{ + return (lane_count == DP_LANE_COUNT_1) || + (lane_count == DP_LANE_COUNT_2) || + (lane_count == DP_LANE_COUNT_4); +} + +struct dp_panel *dp_panel_get(struct dp_panel_in *in); +void dp_panel_put(struct dp_panel *dp_panel); +void dp_panel_calc_tu_test(struct dp_tu_calc_input *in, + struct dp_vc_tu_mapping_table *tu_table); +#endif /* _DP_PANEL_H_ */ diff --git a/techpack/display/msm/dp/dp_parser.c b/techpack/display/msm/dp/dp_parser.c new file mode 100644 index 0000000000000000000000000000000000000000..e33f781404a12b08c347f316a26424de2f3ca846 --- /dev/null +++ b/techpack/display/msm/dp/dp_parser.c @@ -0,0 +1,951 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2012-2021, The Linux Foundation. All rights reserved. + */ + +#include +#include + +#include "dp_parser.h" +#include "dp_debug.h" + +static void dp_parser_unmap_io_resources(struct dp_parser *parser) +{ + int i = 0; + struct dp_io *io = &parser->io; + + for (i = 0; i < io->len; i++) + msm_dss_iounmap(&io->data[i].io); +} + +static int dp_parser_reg(struct dp_parser *parser) +{ + int rc = 0, i = 0; + u32 reg_count; + struct platform_device *pdev = parser->pdev; + struct dp_io *io = &parser->io; + struct device *dev = &pdev->dev; + + reg_count = of_property_count_strings(dev->of_node, "reg-names"); + if (reg_count <= 0) { + DP_ERR("no reg defined\n"); + return -EINVAL; + } + + io->len = reg_count; + io->data = devm_kzalloc(dev, sizeof(struct dp_io_data) * reg_count, + GFP_KERNEL); + if (!io->data) + return -ENOMEM; + + for (i = 0; i < reg_count; i++) { + of_property_read_string_index(dev->of_node, + "reg-names", i, &io->data[i].name); + rc = msm_dss_ioremap_byname(pdev, &io->data[i].io, + io->data[i].name); + if (rc) { + DP_ERR("unable to remap %s resources\n", + io->data[i].name); + goto err; + } + } + + return 0; +err: + dp_parser_unmap_io_resources(parser); + return rc; +} + +static const char *dp_get_phy_aux_config_property(u32 cfg_type) +{ + switch (cfg_type) { + case PHY_AUX_CFG0: + return "qcom,aux-cfg0-settings"; + case PHY_AUX_CFG1: + return "qcom,aux-cfg1-settings"; + case PHY_AUX_CFG2: + return "qcom,aux-cfg2-settings"; + case PHY_AUX_CFG3: + return "qcom,aux-cfg3-settings"; + case PHY_AUX_CFG4: + return "qcom,aux-cfg4-settings"; + case PHY_AUX_CFG5: + return "qcom,aux-cfg5-settings"; + case PHY_AUX_CFG6: + return "qcom,aux-cfg6-settings"; + case PHY_AUX_CFG7: + return "qcom,aux-cfg7-settings"; + case PHY_AUX_CFG8: + return "qcom,aux-cfg8-settings"; + case PHY_AUX_CFG9: + return "qcom,aux-cfg9-settings"; + default: + return "unknown"; + } +} + +static void dp_parser_phy_aux_cfg_reset(struct dp_parser *parser) +{ + int i = 0; + + for (i = 0; i < PHY_AUX_CFG_MAX; i++) + parser->aux_cfg[i] = (const struct dp_aux_cfg){ 0 }; +} + +static int dp_parser_aux(struct dp_parser *parser) +{ + struct device_node *of_node = parser->pdev->dev.of_node; + int len = 0, i = 0, j = 0, config_count = 0; + const char *data; + int const minimum_config_count = 1; + + for (i = 0; i < PHY_AUX_CFG_MAX; i++) { + const char *property = dp_get_phy_aux_config_property(i); + + data = of_get_property(of_node, property, &len); + if (!data) { + DP_ERR("Unable to read %s\n", property); + goto error; + } + + config_count = len - 1; + if ((config_count < minimum_config_count) || + (config_count > DP_AUX_CFG_MAX_VALUE_CNT)) { + DP_ERR("Invalid config count (%d) configs for %s\n", + config_count, property); + goto error; + } + + parser->aux_cfg[i].offset = data[0]; + parser->aux_cfg[i].cfg_cnt = config_count; + DP_DEBUG("%s offset=0x%x, cfg_cnt=%d\n", + property, + parser->aux_cfg[i].offset, + parser->aux_cfg[i].cfg_cnt); + for (j = 1; j < len; j++) { + parser->aux_cfg[i].lut[j - 1] = data[j]; + DP_DEBUG("%s lut[%d]=0x%x\n", + property, + i, + parser->aux_cfg[i].lut[j - 1]); + } + } + return 0; + +error: + dp_parser_phy_aux_cfg_reset(parser); + return -EINVAL; +} + +static int dp_parser_misc(struct dp_parser *parser) +{ + int rc = 0, len = 0, i = 0; + const char *data = NULL; + + struct device_node *of_node = parser->pdev->dev.of_node; + + data = of_get_property(of_node, "qcom,logical2physical-lane-map", &len); + if (data && (len == DP_MAX_PHY_LN)) { + for (i = 0; i < len; i++) + parser->l_map[i] = data[i]; + } + + data = of_get_property(of_node, "qcom,pn-swap-lane-map", &len); + if (data && (len == DP_MAX_PHY_LN)) { + for (i = 0; i < len; i++) + parser->l_pnswap |= (data[i] & 0x01) << i; + } + + rc = of_property_read_u32(of_node, + "qcom,max-pclk-frequency-khz", &parser->max_pclk_khz); + if (rc) + parser->max_pclk_khz = DP_MAX_PIXEL_CLK_KHZ; + + rc = of_property_read_u32(of_node, + "qcom,max-lclk-frequency-khz", &parser->max_lclk_khz); + if (rc) + parser->max_lclk_khz = DP_MAX_LINK_CLK_KHZ; + + parser->display_type = of_get_property(of_node, "label", NULL); + if (!parser->display_type) + parser->display_type = "unknown"; + + rc = of_property_read_u32(of_node, + "qcom,max-hdisplay", &parser->max_hdisplay); + if (rc) + parser->max_hdisplay = 0; + + rc = of_property_read_u32(of_node, + "qcom,max-vdisplay", &parser->max_vdisplay); + if (rc) + parser->max_vdisplay = 0; + + return 0; +} + +static int dp_parser_msm_hdcp_dev(struct dp_parser *parser) +{ + struct device_node *node; + struct platform_device *pdev; + + node = of_find_compatible_node(NULL, NULL, "qcom,msm-hdcp"); + if (!node) { + // This is a non-fatal error, module initialization can proceed + DP_WARN("couldn't find msm-hdcp node\n"); + return 0; + } + + pdev = of_find_device_by_node(node); + if (!pdev) { + // This is a non-fatal error, module initialization can proceed + DP_WARN("couldn't find msm-hdcp pdev\n"); + return 0; + } + + parser->msm_hdcp_dev = &pdev->dev; + + return 0; +} + +static int dp_parser_pinctrl(struct dp_parser *parser) +{ + int rc = 0; + struct dp_pinctrl *pinctrl = &parser->pinctrl; + + pinctrl->pin = devm_pinctrl_get(&parser->pdev->dev); + + if (IS_ERR_OR_NULL(pinctrl->pin)) { + DP_DEBUG("failed to get pinctrl, rc=%d\n", rc); + goto error; + } + + if (parser->no_aux_switch && parser->lphw_hpd) { + pinctrl->state_hpd_tlmm = pinctrl->state_hpd_ctrl = NULL; + + pinctrl->state_hpd_tlmm = pinctrl_lookup_state(pinctrl->pin, + "mdss_dp_hpd_tlmm"); + if (!IS_ERR_OR_NULL(pinctrl->state_hpd_tlmm)) { + pinctrl->state_hpd_ctrl = pinctrl_lookup_state( + pinctrl->pin, "mdss_dp_hpd_ctrl"); + } + + if (!pinctrl->state_hpd_tlmm || !pinctrl->state_hpd_ctrl) { + pinctrl->state_hpd_tlmm = NULL; + pinctrl->state_hpd_ctrl = NULL; + DP_DEBUG("tlmm or ctrl pinctrl state does not exist\n"); + } + } + + pinctrl->state_active = pinctrl_lookup_state(pinctrl->pin, + "mdss_dp_active"); + if (IS_ERR_OR_NULL(pinctrl->state_active)) { + rc = PTR_ERR(pinctrl->state_active); + DP_ERR("failed to get pinctrl active state, rc=%d\n", rc); + goto error; + } + + pinctrl->state_suspend = pinctrl_lookup_state(pinctrl->pin, + "mdss_dp_sleep"); + if (IS_ERR_OR_NULL(pinctrl->state_suspend)) { + rc = PTR_ERR(pinctrl->state_suspend); + DP_ERR("failed to get pinctrl suspend state, rc=%d\n", rc); + goto error; + } +error: + return rc; +} + +static int dp_parser_gpio(struct dp_parser *parser) +{ + int i = 0; + struct device *dev = &parser->pdev->dev; + struct device_node *of_node = dev->of_node; + struct dss_module_power *mp = &parser->mp[DP_CORE_PM]; + static const char * const dp_gpios[] = { + "qcom,aux-en-gpio", + "qcom,aux-sel-gpio", + "qcom,usbplug-cc-gpio", + }; + + if (of_find_property(of_node, "qcom,dp-hpd-gpio", NULL)) { + parser->no_aux_switch = true; + parser->lphw_hpd = of_find_property(of_node, + "qcom,dp-low-power-hw-hpd", NULL); + return 0; + } + + if (of_find_property(of_node, "qcom,dp-gpio-aux-switch", NULL)) + parser->gpio_aux_switch = true; + mp->gpio_config = devm_kzalloc(dev, + sizeof(struct dss_gpio) * ARRAY_SIZE(dp_gpios), GFP_KERNEL); + if (!mp->gpio_config) + return -ENOMEM; + + mp->num_gpio = ARRAY_SIZE(dp_gpios); + + for (i = 0; i < ARRAY_SIZE(dp_gpios); i++) { + mp->gpio_config[i].gpio = of_get_named_gpio(of_node, + dp_gpios[i], 0); + + if (!gpio_is_valid(mp->gpio_config[i].gpio)) { + DP_DEBUG("%s gpio not specified\n", dp_gpios[i]); + /* In case any gpio was not specified, we think gpio + * aux switch also was not specified. + */ + parser->gpio_aux_switch = false; + continue; + } + + strlcpy(mp->gpio_config[i].gpio_name, dp_gpios[i], + sizeof(mp->gpio_config[i].gpio_name)); + + mp->gpio_config[i].value = 0; + } + + return 0; +} + +static const char *dp_parser_supply_node_name(enum dp_pm_type module) +{ + switch (module) { + case DP_CORE_PM: return "qcom,core-supply-entries"; + case DP_CTRL_PM: return "qcom,ctrl-supply-entries"; + case DP_PHY_PM: return "qcom,phy-supply-entries"; + default: return "???"; + } +} + +static int dp_parser_get_vreg(struct dp_parser *parser, + enum dp_pm_type module) +{ + int i = 0, rc = 0; + u32 tmp = 0; + const char *pm_supply_name = NULL; + struct device_node *supply_node = NULL; + struct device_node *of_node = parser->pdev->dev.of_node; + struct device_node *supply_root_node = NULL; + struct dss_module_power *mp = &parser->mp[module]; + + mp->num_vreg = 0; + pm_supply_name = dp_parser_supply_node_name(module); + supply_root_node = of_get_child_by_name(of_node, pm_supply_name); + if (!supply_root_node) { + DP_WARN("no supply entry present: %s\n", pm_supply_name); + goto novreg; + } + + mp->num_vreg = of_get_available_child_count(supply_root_node); + + if (mp->num_vreg == 0) { + DP_DEBUG("no vreg\n"); + goto novreg; + } else { + DP_DEBUG("vreg found. count=%d\n", mp->num_vreg); + } + + mp->vreg_config = devm_kzalloc(&parser->pdev->dev, + sizeof(struct dss_vreg) * mp->num_vreg, GFP_KERNEL); + if (!mp->vreg_config) { + rc = -ENOMEM; + goto error; + } + + for_each_child_of_node(supply_root_node, supply_node) { + const char *st = NULL; + /* vreg-name */ + rc = of_property_read_string(supply_node, + "qcom,supply-name", &st); + if (rc) { + DP_ERR("error reading name. rc=%d\n", + rc); + goto error; + } + snprintf(mp->vreg_config[i].vreg_name, + ARRAY_SIZE((mp->vreg_config[i].vreg_name)), "%s", st); + /* vreg-min-voltage */ + rc = of_property_read_u32(supply_node, + "qcom,supply-min-voltage", &tmp); + if (rc) { + DP_ERR("error reading min volt. rc=%d\n", + rc); + goto error; + } + mp->vreg_config[i].min_voltage = tmp; + + /* vreg-max-voltage */ + rc = of_property_read_u32(supply_node, + "qcom,supply-max-voltage", &tmp); + if (rc) { + DP_ERR("error reading max volt. rc=%d\n", + rc); + goto error; + } + mp->vreg_config[i].max_voltage = tmp; + + /* enable-load */ + rc = of_property_read_u32(supply_node, + "qcom,supply-enable-load", &tmp); + if (rc) { + DP_ERR("error reading enable load. rc=%d\n", + rc); + goto error; + } + mp->vreg_config[i].enable_load = tmp; + + /* disable-load */ + rc = of_property_read_u32(supply_node, + "qcom,supply-disable-load", &tmp); + if (rc) { + DP_ERR("error reading disable load. rc=%d\n", + rc); + goto error; + } + mp->vreg_config[i].disable_load = tmp; + + DP_DEBUG("%s min=%d, max=%d, enable=%d, disable=%d\n", + mp->vreg_config[i].vreg_name, + mp->vreg_config[i].min_voltage, + mp->vreg_config[i].max_voltage, + mp->vreg_config[i].enable_load, + mp->vreg_config[i].disable_load + ); + ++i; + } + + return rc; + +error: + if (mp->vreg_config) { + devm_kfree(&parser->pdev->dev, mp->vreg_config); + mp->vreg_config = NULL; + } +novreg: + mp->num_vreg = 0; + + return rc; +} + +static void dp_parser_put_vreg_data(struct device *dev, + struct dss_module_power *mp) +{ + if (!mp) { + DEV_ERR("invalid input\n"); + return; + } + + if (mp->vreg_config) { + devm_kfree(dev, mp->vreg_config); + mp->vreg_config = NULL; + } + mp->num_vreg = 0; +} + +static int dp_parser_regulator(struct dp_parser *parser) +{ + int i, rc = 0; + struct platform_device *pdev = parser->pdev; + + /* Parse the regulator information */ + for (i = DP_CORE_PM; i < DP_MAX_PM; i++) { + rc = dp_parser_get_vreg(parser, i); + if (rc) { + DP_ERR("get_dt_vreg_data failed for %s. rc=%d\n", + dp_parser_pm_name(i), rc); + i--; + for (; i >= DP_CORE_PM; i--) + dp_parser_put_vreg_data(&pdev->dev, + &parser->mp[i]); + break; + } + } + + return rc; +} + +static bool dp_parser_check_prefix(const char *clk_prefix, const char *clk_name) +{ + return !!strnstr(clk_name, clk_prefix, strlen(clk_name)); +} + +static void dp_parser_put_clk_data(struct device *dev, + struct dss_module_power *mp) +{ + if (!mp) { + DEV_ERR("%s: invalid input\n", __func__); + return; + } + + if (mp->clk_config) { + devm_kfree(dev, mp->clk_config); + mp->clk_config = NULL; + } + + mp->num_clk = 0; +} + +static void dp_parser_put_gpio_data(struct device *dev, + struct dss_module_power *mp) +{ + if (!mp) { + DEV_ERR("%s: invalid input\n", __func__); + return; + } + + if (mp->gpio_config) { + devm_kfree(dev, mp->gpio_config); + mp->gpio_config = NULL; + } + + mp->num_gpio = 0; +} + +static int dp_parser_init_clk_data(struct dp_parser *parser) +{ + int num_clk = 0, i = 0, rc = 0; + int core_clk_count = 0, link_clk_count = 0; + int strm0_clk_count = 0, strm1_clk_count = 0; + const char *core_clk = "core"; + const char *strm0_clk = "strm0"; + const char *strm1_clk = "strm1"; + const char *link_clk = "link"; + const char *clk_name; + struct device *dev = &parser->pdev->dev; + struct dss_module_power *core_power = &parser->mp[DP_CORE_PM]; + struct dss_module_power *strm0_power = &parser->mp[DP_STREAM0_PM]; + struct dss_module_power *strm1_power = &parser->mp[DP_STREAM1_PM]; + struct dss_module_power *link_power = &parser->mp[DP_LINK_PM]; + + num_clk = of_property_count_strings(dev->of_node, "clock-names"); + if (num_clk <= 0) { + DP_ERR("no clocks are defined\n"); + rc = -EINVAL; + goto exit; + } + + for (i = 0; i < num_clk; i++) { + of_property_read_string_index(dev->of_node, + "clock-names", i, &clk_name); + + if (dp_parser_check_prefix(core_clk, clk_name)) + core_clk_count++; + + if (dp_parser_check_prefix(strm0_clk, clk_name)) + strm0_clk_count++; + + if (dp_parser_check_prefix(strm1_clk, clk_name)) + strm1_clk_count++; + + if (dp_parser_check_prefix(link_clk, clk_name)) + link_clk_count++; + } + + /* Initialize the CORE power module */ + if (core_clk_count <= 0) { + DP_ERR("no core clocks are defined\n"); + rc = -EINVAL; + goto exit; + } + + core_power->num_clk = core_clk_count; + core_power->clk_config = devm_kzalloc(dev, + sizeof(struct dss_clk) * core_power->num_clk, + GFP_KERNEL); + if (!core_power->clk_config) { + rc = -EINVAL; + goto exit; + } + + /* Initialize the STREAM0 power module */ + if (strm0_clk_count <= 0) { + DP_DEBUG("no strm0 clocks are defined\n"); + } else { + strm0_power->num_clk = strm0_clk_count; + strm0_power->clk_config = devm_kzalloc(dev, + sizeof(struct dss_clk) * strm0_power->num_clk, + GFP_KERNEL); + if (!strm0_power->clk_config) { + strm0_power->num_clk = 0; + rc = -EINVAL; + goto strm0_clock_error; + } + } + + /* Initialize the STREAM1 power module */ + if (strm1_clk_count <= 0) { + DP_DEBUG("no strm1 clocks are defined\n"); + } else { + strm1_power->num_clk = strm1_clk_count; + strm1_power->clk_config = devm_kzalloc(dev, + sizeof(struct dss_clk) * strm1_power->num_clk, + GFP_KERNEL); + if (!strm1_power->clk_config) { + strm1_power->num_clk = 0; + rc = -EINVAL; + goto strm1_clock_error; + } + } + + /* Initialize the link power module */ + if (link_clk_count <= 0) { + DP_ERR("no link clocks are defined\n"); + rc = -EINVAL; + goto link_clock_error; + } + + link_power->num_clk = link_clk_count; + link_power->clk_config = devm_kzalloc(dev, + sizeof(struct dss_clk) * link_power->num_clk, + GFP_KERNEL); + if (!link_power->clk_config) { + link_power->num_clk = 0; + rc = -EINVAL; + goto link_clock_error; + } + + return rc; + +link_clock_error: + dp_parser_put_clk_data(dev, strm1_power); +strm1_clock_error: + dp_parser_put_clk_data(dev, strm0_power); +strm0_clock_error: + dp_parser_put_clk_data(dev, core_power); +exit: + return rc; +} + +static int dp_parser_clock(struct dp_parser *parser) +{ + int rc = 0, i = 0; + int num_clk = 0; + int core_clk_index = 0, link_clk_index = 0; + int core_clk_count = 0, link_clk_count = 0; + int strm0_clk_index = 0, strm1_clk_index = 0; + int strm0_clk_count = 0, strm1_clk_count = 0; + const char *clk_name; + const char *core_clk = "core"; + const char *strm0_clk = "strm0"; + const char *strm1_clk = "strm1"; + const char *link_clk = "link"; + struct device *dev = &parser->pdev->dev; + struct dss_module_power *core_power; + struct dss_module_power *strm0_power; + struct dss_module_power *strm1_power; + struct dss_module_power *link_power; + + core_power = &parser->mp[DP_CORE_PM]; + strm0_power = &parser->mp[DP_STREAM0_PM]; + strm1_power = &parser->mp[DP_STREAM1_PM]; + link_power = &parser->mp[DP_LINK_PM]; + + rc = dp_parser_init_clk_data(parser); + if (rc) { + DP_ERR("failed to initialize power data\n"); + rc = -EINVAL; + goto exit; + } + + core_clk_count = core_power->num_clk; + link_clk_count = link_power->num_clk; + strm0_clk_count = strm0_power->num_clk; + strm1_clk_count = strm1_power->num_clk; + + num_clk = of_property_count_strings(dev->of_node, "clock-names"); + + for (i = 0; i < num_clk; i++) { + of_property_read_string_index(dev->of_node, "clock-names", + i, &clk_name); + + if (dp_parser_check_prefix(core_clk, clk_name) && + core_clk_index < core_clk_count) { + struct dss_clk *clk = + &core_power->clk_config[core_clk_index]; + strlcpy(clk->clk_name, clk_name, sizeof(clk->clk_name)); + clk->type = DSS_CLK_AHB; + core_clk_index++; + } else if (dp_parser_check_prefix(link_clk, clk_name) && + link_clk_index < link_clk_count) { + struct dss_clk *clk = + &link_power->clk_config[link_clk_index]; + strlcpy(clk->clk_name, clk_name, sizeof(clk->clk_name)); + link_clk_index++; + + if (!strcmp(clk_name, "link_clk")) + clk->type = DSS_CLK_PCLK; + else + clk->type = DSS_CLK_AHB; + } else if (dp_parser_check_prefix(strm0_clk, clk_name) && + strm0_clk_index < strm0_clk_count) { + struct dss_clk *clk = + &strm0_power->clk_config[strm0_clk_index]; + strlcpy(clk->clk_name, clk_name, sizeof(clk->clk_name)); + strm0_clk_index++; + + clk->type = DSS_CLK_PCLK; + } else if (dp_parser_check_prefix(strm1_clk, clk_name) && + strm1_clk_index < strm1_clk_count) { + struct dss_clk *clk = + &strm1_power->clk_config[strm1_clk_index]; + strlcpy(clk->clk_name, clk_name, sizeof(clk->clk_name)); + strm1_clk_index++; + + clk->type = DSS_CLK_PCLK; + } + } + + DP_DEBUG("clock parsing successful\n"); + +exit: + return rc; +} + +static int dp_parser_catalog(struct dp_parser *parser) +{ + int rc; + u32 version; + struct device *dev = &parser->pdev->dev; + + rc = of_property_read_u32(dev->of_node, "qcom,phy-version", &version); + + if (!rc) + parser->hw_cfg.phy_version = version; + + return 0; +} + +static int dp_parser_mst(struct dp_parser *parser) +{ + struct device *dev = &parser->pdev->dev; + int i; + + parser->has_mst = of_property_read_bool(dev->of_node, + "qcom,mst-enable"); + + parser->no_mst_encoder = of_property_read_bool(dev->of_node, + "qcom,no-mst-encoder"); + + parser->has_mst_sideband = parser->has_mst; + + DP_DEBUG("mst parsing successful. mst:%d\n", parser->has_mst); + + for (i = 0; i < MAX_DP_MST_STREAMS; i++) { + of_property_read_u32_index(dev->of_node, + "qcom,mst-fixed-topology-ports", i, + &parser->mst_fixed_port[i]); + } + + return 0; +} + +static void dp_parser_dsc(struct dp_parser *parser) +{ + int rc; + struct device *dev = &parser->pdev->dev; + + parser->dsc_feature_enable = of_property_read_bool(dev->of_node, + "qcom,dsc-feature-enable"); + + rc = of_property_read_u32(dev->of_node, + "qcom,max-dp-dsc-blks", &parser->max_dp_dsc_blks); + if (rc || !parser->max_dp_dsc_blks) + parser->dsc_feature_enable = false; + + rc = of_property_read_u32(dev->of_node, + "qcom,max-dp-dsc-input-width-pixs", + &parser->max_dp_dsc_input_width_pixs); + if (rc || !parser->max_dp_dsc_input_width_pixs) + parser->dsc_feature_enable = false; + + DP_DEBUG("dsc parsing successful. dsc:%d, blks:%d, width:%d\n", + parser->dsc_feature_enable, + parser->max_dp_dsc_blks, + parser->max_dp_dsc_input_width_pixs); +} + +static void dp_parser_fec(struct dp_parser *parser) +{ + struct device *dev = &parser->pdev->dev; + + parser->fec_feature_enable = of_property_read_bool(dev->of_node, + "qcom,fec-feature-enable"); + + DP_DEBUG("fec parsing successful. fec:%d\n", + parser->fec_feature_enable); +} + +static void dp_parser_widebus(struct dp_parser *parser) +{ + struct device *dev = &parser->pdev->dev; + + parser->has_widebus = of_property_read_bool(dev->of_node, + "qcom,widebus-enable"); + + DP_DEBUG("widebus parsing successful. widebus:%d\n", + parser->has_widebus); +} + +static int dp_parser_parse(struct dp_parser *parser) +{ + int rc = 0; + + if (!parser) { + DP_ERR("invalid input\n"); + rc = -EINVAL; + goto err; + } + + rc = dp_parser_reg(parser); + if (rc) + goto err; + + rc = dp_parser_aux(parser); + if (rc) + goto err; + + rc = dp_parser_misc(parser); + if (rc) + goto err; + + rc = dp_parser_clock(parser); + if (rc) + goto err; + + rc = dp_parser_regulator(parser); + if (rc) + goto err; + + rc = dp_parser_gpio(parser); + if (rc) + goto err; + + rc = dp_parser_catalog(parser); + if (rc) + goto err; + + rc = dp_parser_pinctrl(parser); + if (rc) + goto err; + + rc = dp_parser_msm_hdcp_dev(parser); + if (rc) + goto err; + + rc = dp_parser_mst(parser); + if (rc) + goto err; + + dp_parser_dsc(parser); + dp_parser_fec(parser); + dp_parser_widebus(parser); +err: + return rc; +} + +static struct dp_io_data *dp_parser_get_io(struct dp_parser *dp_parser, + char *name) +{ + int i = 0; + struct dp_io *io; + + if (!dp_parser) { + DP_ERR("invalid input\n"); + goto err; + } + + io = &dp_parser->io; + + for (i = 0; i < io->len; i++) { + struct dp_io_data *data = &io->data[i]; + + if (!strcmp(data->name, name)) + return data; + } +err: + return NULL; +} + +static void dp_parser_get_io_buf(struct dp_parser *dp_parser, char *name) +{ + int i = 0; + struct dp_io *io; + + if (!dp_parser) { + DP_ERR("invalid input\n"); + return; + } + + io = &dp_parser->io; + + for (i = 0; i < io->len; i++) { + struct dp_io_data *data = &io->data[i]; + + if (!strcmp(data->name, name)) { + if (!data->buf) + data->buf = devm_kzalloc(&dp_parser->pdev->dev, + data->io.len, GFP_KERNEL); + } + } +} + +static void dp_parser_clear_io_buf(struct dp_parser *dp_parser) +{ + int i = 0; + struct dp_io *io; + + if (!dp_parser) { + DP_ERR("invalid input\n"); + return; + } + + io = &dp_parser->io; + + for (i = 0; i < io->len; i++) { + struct dp_io_data *data = &io->data[i]; + + if (data->buf) + devm_kfree(&dp_parser->pdev->dev, data->buf); + + data->buf = NULL; + } +} + +struct dp_parser *dp_parser_get(struct platform_device *pdev) +{ + struct dp_parser *parser; + + parser = devm_kzalloc(&pdev->dev, sizeof(*parser), GFP_KERNEL); + if (!parser) + return ERR_PTR(-ENOMEM); + + parser->parse = dp_parser_parse; + parser->get_io = dp_parser_get_io; + parser->get_io_buf = dp_parser_get_io_buf; + parser->clear_io_buf = dp_parser_clear_io_buf; + parser->pdev = pdev; + + return parser; +} + +void dp_parser_put(struct dp_parser *parser) +{ + int i = 0; + struct dss_module_power *power = NULL; + + if (!parser) { + DP_ERR("invalid parser module\n"); + return; + } + + power = parser->mp; + + for (i = 0; i < DP_MAX_PM; i++) { + dp_parser_put_clk_data(&parser->pdev->dev, &power[i]); + dp_parser_put_vreg_data(&parser->pdev->dev, &power[i]); + dp_parser_put_gpio_data(&parser->pdev->dev, &power[i]); + } + + dp_parser_clear_io_buf(parser); + devm_kfree(&parser->pdev->dev, parser->io.data); + devm_kfree(&parser->pdev->dev, parser); +} diff --git a/techpack/display/msm/dp/dp_parser.h b/techpack/display/msm/dp/dp_parser.h new file mode 100644 index 0000000000000000000000000000000000000000..61cbe3839ced10dd184f1ac66a2a311dc33f8509 --- /dev/null +++ b/techpack/display/msm/dp/dp_parser.h @@ -0,0 +1,281 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2012-2021, The Linux Foundation. All rights reserved. + */ + +#ifndef _DP_PARSER_H_ +#define _DP_PARSER_H_ + +#include + +#define DP_LABEL "MDSS DP DISPLAY" +#define AUX_CFG_LEN 10 +#define DP_MAX_PIXEL_CLK_KHZ 675000 +#define DP_MAX_LINK_CLK_KHZ 810000 +#define MAX_DP_MST_STREAMS 2 + +enum dp_pm_type { + DP_CORE_PM, + DP_CTRL_PM, + DP_PHY_PM, + DP_STREAM0_PM, + DP_STREAM1_PM, + DP_LINK_PM, + DP_MAX_PM +}; + +static inline const char *dp_parser_pm_name(enum dp_pm_type module) +{ + switch (module) { + case DP_CORE_PM: return "DP_CORE_PM"; + case DP_CTRL_PM: return "DP_CTRL_PM"; + case DP_PHY_PM: return "DP_PHY_PM"; + case DP_STREAM0_PM: return "DP_STREAM0_PM"; + case DP_STREAM1_PM: return "DP_STREAM1_PM"; + case DP_LINK_PM: return "DP_LINK_PM"; + default: return "???"; + } +} + +/** + * struct dp_display_data - display related device tree data. + * + * @ctrl_node: referece to controller device + * @phy_node: reference to phy device + * @is_active: is the controller currently active + * @name: name of the display + * @display_type: type of the display + */ +struct dp_display_data { + struct device_node *ctrl_node; + struct device_node *phy_node; + bool is_active; + const char *name; + const char *display_type; +}; + +/** + * struct dp_io_data - data structure to store DP IO related info + * @name: name of the IO + * @buf: buffer corresponding to IO for debugging + * @io: io data which give len and mapped address + */ +struct dp_io_data { + const char *name; + u8 *buf; + struct dss_io_data io; +}; + +/** + * struct dp_io - data struct to store array of DP IO info + * @len: total number of IOs + * @data: pointer to an array of DP IO data structures. + */ +struct dp_io { + u32 len; + struct dp_io_data *data; +}; + +/** + * struct dp_pinctrl - DP's pin control + * + * @pin: pin-controller's instance + * @state_active: active state pin control + * @state_hpd_active: hpd active state pin control + * @state_suspend: suspend state pin control + */ +struct dp_pinctrl { + struct pinctrl *pin; + struct pinctrl_state *state_active; + struct pinctrl_state *state_hpd_active; + struct pinctrl_state *state_hpd_tlmm; + struct pinctrl_state *state_hpd_ctrl; + struct pinctrl_state *state_suspend; +}; + +#define DP_ENUM_STR(x) #x +#define DP_AUX_CFG_MAX_VALUE_CNT 3 +/** + * struct dp_aux_cfg - DP's AUX configuration settings + * + * @cfg_cnt: count of the configurable settings for the AUX register + * @current_index: current index of the AUX config lut + * @offset: register offset of the AUX config register + * @lut: look up table for the AUX config values for this register + */ +struct dp_aux_cfg { + u32 cfg_cnt; + u32 current_index; + u32 offset; + u32 lut[DP_AUX_CFG_MAX_VALUE_CNT]; +}; + +/* PHY AUX config registers */ +enum dp_phy_aux_config_type { + PHY_AUX_CFG0, + PHY_AUX_CFG1, + PHY_AUX_CFG2, + PHY_AUX_CFG3, + PHY_AUX_CFG4, + PHY_AUX_CFG5, + PHY_AUX_CFG6, + PHY_AUX_CFG7, + PHY_AUX_CFG8, + PHY_AUX_CFG9, + PHY_AUX_CFG_MAX, +}; + +/** + * enum dp_phy_version - version of the dp phy + * @DP_PHY_VERSION_UNKNOWN: Unknown controller version + * @DP_PHY_VERSION_4_2_0: DP phy v4.2.0 controller + * @DP_PHY_VERSION_MAX: max version + */ +enum dp_phy_version { + DP_PHY_VERSION_UNKNOWN, + DP_PHY_VERSION_2_0_0 = 0x200, + DP_PHY_VERSION_4_2_0 = 0x420, + DP_PHY_VERSION_MAX +}; + +/** + * struct dp_hw_cfg - DP HW specific configuration + * + * @phy_version: DP PHY HW version + */ +struct dp_hw_cfg { + enum dp_phy_version phy_version; +}; + +static inline char *dp_phy_aux_config_type_to_string(u32 cfg_type) +{ + switch (cfg_type) { + case PHY_AUX_CFG0: + return DP_ENUM_STR(PHY_AUX_CFG0); + case PHY_AUX_CFG1: + return DP_ENUM_STR(PHY_AUX_CFG1); + case PHY_AUX_CFG2: + return DP_ENUM_STR(PHY_AUX_CFG2); + case PHY_AUX_CFG3: + return DP_ENUM_STR(PHY_AUX_CFG3); + case PHY_AUX_CFG4: + return DP_ENUM_STR(PHY_AUX_CFG4); + case PHY_AUX_CFG5: + return DP_ENUM_STR(PHY_AUX_CFG5); + case PHY_AUX_CFG6: + return DP_ENUM_STR(PHY_AUX_CFG6); + case PHY_AUX_CFG7: + return DP_ENUM_STR(PHY_AUX_CFG7); + case PHY_AUX_CFG8: + return DP_ENUM_STR(PHY_AUX_CFG8); + case PHY_AUX_CFG9: + return DP_ENUM_STR(PHY_AUX_CFG9); + default: + return "unknown"; + } +} + +/** + * struct dp_parser - DP parser's data exposed to clients + * + * @pdev: platform data of the client + * @msm_hdcp_dev: device pointer for the HDCP driver + * @mp: gpio, regulator and clock related data + * @pinctrl: pin-control related data + * @disp_data: controller's display related data + * @l_pnswap: P/N swap status on each lane + * @max_pclk_khz: maximum pixel clock supported for the platform + * @max_lclk_khz: maximum link clock supported for the platform + * @max_hdisplay: maximum supported horizontal display by the platform for dp + * @max_vdisplay: maximum supported vertical display by the platform for dp + * @hw_cfg: DP HW specific settings + * @has_mst: MST feature enable status + * @has_mst_sideband: MST sideband feature enable status + * @no_aux_switch: presence AUX switch status + * @no_mst_encoder: only one dp interface available + * @gpio_aux_switch: presence GPIO AUX switch status + * @dsc_feature_enable: DSC feature enable status + * @fec_feature_enable: FEC feature enable status + * @max_dp_dsc_blks: maximum DSC blks for DP interface + * @max_dp_dsc_input_width_pixs: Maximum input width for DSC block + * @has_widebus: widebus (2PPC) feature eanble status + *@mst_fixed_port: mst port_num reserved for fixed topology + * @display_type: display type as defined in device tree. + * @parse: function to be called by client to parse device tree. + * @get_io: function to be called by client to get io data. + * @get_io_buf: function to be called by client to get io buffers. + * @clear_io_buf: function to be called by client to clear io buffers. + */ +struct dp_parser { + struct platform_device *pdev; + struct device *msm_hdcp_dev; + struct dss_module_power mp[DP_MAX_PM]; + struct dp_pinctrl pinctrl; + struct dp_io io; + struct dp_display_data disp_data; + + u8 l_map[4]; + u8 l_pnswap; + struct dp_aux_cfg aux_cfg[AUX_CFG_LEN]; + u32 max_pclk_khz; + u32 max_lclk_khz; + u32 max_hdisplay; + u32 max_vdisplay; + struct dp_hw_cfg hw_cfg; + bool has_mst; + bool has_mst_sideband; + bool no_aux_switch; + bool no_mst_encoder; + bool dsc_feature_enable; + bool fec_feature_enable; + bool has_widebus; + bool gpio_aux_switch; + u32 max_dp_dsc_blks; + u32 max_dp_dsc_input_width_pixs; + bool lphw_hpd; + u32 mst_fixed_port[MAX_DP_MST_STREAMS]; + + const char *display_type; + + int (*parse)(struct dp_parser *parser); + struct dp_io_data *(*get_io)(struct dp_parser *parser, char *name); + void (*get_io_buf)(struct dp_parser *parser, char *name); + void (*clear_io_buf)(struct dp_parser *parser); +}; + +enum dp_phy_lane_num { + DP_PHY_LN0 = 0, + DP_PHY_LN1 = 1, + DP_PHY_LN2 = 2, + DP_PHY_LN3 = 3, + DP_MAX_PHY_LN = 4, +}; + +enum dp_mainlink_lane_num { + DP_ML0 = 0, + DP_ML1 = 1, + DP_ML2 = 2, + DP_ML3 = 3, +}; + +/** + * dp_parser_get() - get the DP's device tree parser module + * + * @pdev: platform data of the client + * return: pointer to dp_parser structure. + * + * This function provides client capability to parse the + * device tree and populate the data structures. The data + * related to clock, regulators, pin-control and other + * can be parsed using this module. + */ +struct dp_parser *dp_parser_get(struct platform_device *pdev); + +/** + * dp_parser_put() - cleans the dp_parser module + * + * @parser: pointer to the parser's data. + */ +void dp_parser_put(struct dp_parser *parser); +#endif diff --git a/techpack/display/msm/dp/dp_power.c b/techpack/display/msm/dp/dp_power.c new file mode 100644 index 0000000000000000000000000000000000000000..e14488f43fa0226bf834f5ce7e5fc317368f0b06 --- /dev/null +++ b/techpack/display/msm/dp/dp_power.c @@ -0,0 +1,702 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2012-2019, The Linux Foundation. All rights reserved. + */ + +#include +#include +#include +#include "dp_power.h" +#include "dp_catalog.h" +#include "dp_debug.h" + +#define DP_CLIENT_NAME_SIZE 20 + +struct dp_power_private { + struct dp_parser *parser; + struct platform_device *pdev; + struct clk *pixel_clk_rcg; + struct clk *pixel_parent; + struct clk *pixel1_clk_rcg; + struct clk *pixel1_parent; + + struct dp_power dp_power; + + bool core_clks_on; + bool link_clks_on; + bool strm0_clks_on; + bool strm1_clks_on; +}; + +static int dp_power_regulator_init(struct dp_power_private *power) +{ + int rc = 0, i = 0, j = 0; + struct platform_device *pdev; + struct dp_parser *parser; + + parser = power->parser; + pdev = power->pdev; + + for (i = DP_CORE_PM; !rc && (i < DP_MAX_PM); i++) { + rc = msm_dss_config_vreg(&pdev->dev, + parser->mp[i].vreg_config, + parser->mp[i].num_vreg, 1); + if (rc) { + DP_ERR("failed to init vregs for %s\n", + dp_parser_pm_name(i)); + for (j = i - 1; j >= DP_CORE_PM; j--) { + msm_dss_config_vreg(&pdev->dev, + parser->mp[j].vreg_config, + parser->mp[j].num_vreg, 0); + } + + goto error; + } + } +error: + return rc; +} + +static void dp_power_regulator_deinit(struct dp_power_private *power) +{ + int rc = 0, i = 0; + struct platform_device *pdev; + struct dp_parser *parser; + + parser = power->parser; + pdev = power->pdev; + + for (i = DP_CORE_PM; (i < DP_MAX_PM); i++) { + rc = msm_dss_config_vreg(&pdev->dev, + parser->mp[i].vreg_config, + parser->mp[i].num_vreg, 0); + if (rc) + DP_ERR("failed to deinit vregs for %s\n", + dp_parser_pm_name(i)); + } +} + +static int dp_power_regulator_ctrl(struct dp_power_private *power, bool enable) +{ + int rc = 0, i = 0, j = 0; + struct dp_parser *parser; + + parser = power->parser; + + for (i = DP_CORE_PM; i < DP_MAX_PM; i++) { + rc = msm_dss_enable_vreg( + parser->mp[i].vreg_config, + parser->mp[i].num_vreg, enable); + if (rc) { + DP_ERR("failed to '%s' vregs for %s\n", + enable ? "enable" : "disable", + dp_parser_pm_name(i)); + if (enable) { + for (j = i-1; j >= DP_CORE_PM; j--) { + msm_dss_enable_vreg( + parser->mp[j].vreg_config, + parser->mp[j].num_vreg, 0); + } + } + goto error; + } + } +error: + return rc; +} + +static int dp_power_pinctrl_set(struct dp_power_private *power, bool active) +{ + int rc = -EFAULT; + struct pinctrl_state *pin_state; + struct dp_parser *parser; + + parser = power->parser; + + if (IS_ERR_OR_NULL(parser->pinctrl.pin)) + return 0; + + if (parser->no_aux_switch && parser->lphw_hpd) { + pin_state = active ? parser->pinctrl.state_hpd_ctrl + : parser->pinctrl.state_hpd_tlmm; + if (!IS_ERR_OR_NULL(pin_state)) { + rc = pinctrl_select_state(parser->pinctrl.pin, + pin_state); + if (rc) { + DP_ERR("cannot direct hpd line to %s\n", + active ? "ctrl" : "tlmm"); + return rc; + } + } + } + + if (parser->no_aux_switch) + return 0; + + pin_state = active ? parser->pinctrl.state_active + : parser->pinctrl.state_suspend; + if (!IS_ERR_OR_NULL(pin_state)) { + rc = pinctrl_select_state(parser->pinctrl.pin, + pin_state); + if (rc) + DP_ERR("can not set %s pins\n", + active ? "dp_active" + : "dp_sleep"); + } else { + DP_ERR("invalid '%s' pinstate\n", + active ? "dp_active" + : "dp_sleep"); + } + + return rc; +} + +static int dp_power_clk_init(struct dp_power_private *power, bool enable) +{ + int rc = 0; + struct device *dev; + enum dp_pm_type module; + + dev = &power->pdev->dev; + + if (enable) { + for (module = DP_CORE_PM; module < DP_MAX_PM; module++) { + struct dss_module_power *pm = + &power->parser->mp[module]; + + if (!pm->num_clk) + continue; + + rc = msm_dss_get_clk(dev, pm->clk_config, pm->num_clk); + if (rc) { + DP_ERR("failed to get %s clk. err=%d\n", + dp_parser_pm_name(module), rc); + goto exit; + } + } + + power->pixel_clk_rcg = devm_clk_get(dev, "pixel_clk_rcg"); + if (IS_ERR(power->pixel_clk_rcg)) { + DP_DEBUG("Unable to get DP pixel clk RCG\n"); + power->pixel_clk_rcg = NULL; + } + + power->pixel_parent = devm_clk_get(dev, "pixel_parent"); + if (IS_ERR(power->pixel_parent)) { + DP_DEBUG("Unable to get DP pixel RCG parent\n"); + power->pixel_parent = NULL; + } + + power->pixel1_clk_rcg = devm_clk_get(dev, "pixel1_clk_rcg"); + if (IS_ERR(power->pixel1_clk_rcg)) { + DP_DEBUG("Unable to get DP pixel1 clk RCG\n"); + power->pixel1_clk_rcg = NULL; + } + + power->pixel1_parent = devm_clk_get(dev, "pixel1_parent"); + if (IS_ERR(power->pixel1_parent)) { + DP_DEBUG("Unable to get DP pixel1 RCG parent\n"); + power->pixel1_parent = NULL; + } + } else { + if (power->pixel_parent) + devm_clk_put(dev, power->pixel_parent); + + if (power->pixel_clk_rcg) + devm_clk_put(dev, power->pixel_clk_rcg); + + if (power->pixel1_parent) + devm_clk_put(dev, power->pixel1_parent); + + if (power->pixel1_clk_rcg) + devm_clk_put(dev, power->pixel1_clk_rcg); + + for (module = DP_CORE_PM; module < DP_MAX_PM; module++) { + struct dss_module_power *pm = + &power->parser->mp[module]; + + if (!pm->num_clk) + continue; + + msm_dss_put_clk(pm->clk_config, pm->num_clk); + } + } +exit: + return rc; +} + +static int dp_power_clk_set_rate(struct dp_power_private *power, + enum dp_pm_type module, bool enable) +{ + int rc = 0; + struct dss_module_power *mp; + + if (!power) { + DP_ERR("invalid power data\n"); + rc = -EINVAL; + goto exit; + } + + mp = &power->parser->mp[module]; + + if (enable) { + rc = msm_dss_clk_set_rate(mp->clk_config, mp->num_clk); + if (rc) { + DP_ERR("failed to set clks rate.\n"); + goto exit; + } + + rc = msm_dss_enable_clk(mp->clk_config, mp->num_clk, 1); + if (rc) { + DP_ERR("failed to enable clks\n"); + goto exit; + } + } else { + rc = msm_dss_enable_clk(mp->clk_config, mp->num_clk, 0); + if (rc) { + DP_ERR("failed to disable clks\n"); + goto exit; + } + } +exit: + return rc; +} + +static int dp_power_clk_enable(struct dp_power *dp_power, + enum dp_pm_type pm_type, bool enable) +{ + int rc = 0; + struct dss_module_power *mp; + struct dp_power_private *power; + + if (!dp_power) { + DP_ERR("invalid power data\n"); + rc = -EINVAL; + goto error; + } + + power = container_of(dp_power, struct dp_power_private, dp_power); + + mp = &power->parser->mp[pm_type]; + + if (pm_type >= DP_MAX_PM) { + DP_ERR("unsupported power module: %s\n", + dp_parser_pm_name(pm_type)); + return -EINVAL; + } + + if (enable) { + if (pm_type == DP_CORE_PM && power->core_clks_on) { + DP_DEBUG("core clks already enabled\n"); + return 0; + } + + if ((pm_type == DP_STREAM0_PM) && (power->strm0_clks_on)) { + DP_DEBUG("strm0 clks already enabled\n"); + return 0; + } + + if ((pm_type == DP_STREAM1_PM) && (power->strm1_clks_on)) { + DP_DEBUG("strm1 clks already enabled\n"); + return 0; + } + + if ((pm_type == DP_CTRL_PM) && (!power->core_clks_on)) { + DP_DEBUG("Need to enable core clks before link clks\n"); + + rc = dp_power_clk_set_rate(power, pm_type, enable); + if (rc) { + DP_ERR("failed to enable clks: %s. err=%d\n", + dp_parser_pm_name(DP_CORE_PM), rc); + goto error; + } else { + power->core_clks_on = true; + } + } + + if (pm_type == DP_LINK_PM && power->link_clks_on) { + DP_DEBUG("links clks already enabled\n"); + return 0; + } + } + + rc = dp_power_clk_set_rate(power, pm_type, enable); + if (rc) { + DP_ERR("failed to '%s' clks for: %s. err=%d\n", + enable ? "enable" : "disable", + dp_parser_pm_name(pm_type), rc); + goto error; + } + + if (pm_type == DP_CORE_PM) + power->core_clks_on = enable; + else if (pm_type == DP_STREAM0_PM) + power->strm0_clks_on = enable; + else if (pm_type == DP_STREAM1_PM) + power->strm1_clks_on = enable; + else if (pm_type == DP_LINK_PM) + power->link_clks_on = enable; + + /* + * This log is printed only when user connects or disconnects + * a DP cable. As this is a user-action and not a frequent + * usecase, it is not going to flood the kernel logs. Also, + * helpful in debugging the NOC issues. + */ + DP_INFO("core:%s link:%s strm0:%s strm1:%s\n", + power->core_clks_on ? "on" : "off", + power->link_clks_on ? "on" : "off", + power->strm0_clks_on ? "on" : "off", + power->strm1_clks_on ? "on" : "off"); +error: + return rc; +} + +static int dp_power_request_gpios(struct dp_power_private *power) +{ + int rc = 0, i; + struct device *dev; + struct dss_module_power *mp; + static const char * const gpio_names[] = { + "aux_enable", "aux_sel", "usbplug_cc", + }; + + if (!power) { + DP_ERR("invalid power data\n"); + return -EINVAL; + } + + dev = &power->pdev->dev; + mp = &power->parser->mp[DP_CORE_PM]; + + for (i = 0; i < ARRAY_SIZE(gpio_names); i++) { + unsigned int gpio = mp->gpio_config[i].gpio; + + if (gpio_is_valid(gpio)) { + rc = devm_gpio_request(dev, gpio, gpio_names[i]); + if (rc) { + DP_ERR("request %s gpio failed, rc=%d\n", + gpio_names[i], rc); + goto error; + } + } + } + return 0; +error: + for (i = 0; i < ARRAY_SIZE(gpio_names); i++) { + unsigned int gpio = mp->gpio_config[i].gpio; + + if (gpio_is_valid(gpio)) + gpio_free(gpio); + } + return rc; +} + +static bool dp_power_find_gpio(const char *gpio1, const char *gpio2) +{ + return !!strnstr(gpio1, gpio2, strlen(gpio1)); +} + +static void dp_power_set_gpio(struct dp_power_private *power, bool flip) +{ + int i; + struct dss_module_power *mp = &power->parser->mp[DP_CORE_PM]; + struct dss_gpio *config = mp->gpio_config; + + for (i = 0; i < mp->num_gpio; i++) { + if (dp_power_find_gpio(config->gpio_name, "aux-sel")) + config->value = flip; + + if (gpio_is_valid(config->gpio)) { + DP_DEBUG("gpio %s, value %d\n", config->gpio_name, + config->value); + + if (dp_power_find_gpio(config->gpio_name, "aux-en") || + dp_power_find_gpio(config->gpio_name, "aux-sel")) + gpio_direction_output(config->gpio, + config->value); + else + gpio_set_value(config->gpio, config->value); + + } + config++; + } +} + +static int dp_power_config_gpios(struct dp_power_private *power, bool flip, + bool enable) +{ + int rc = 0, i; + struct dss_module_power *mp; + struct dss_gpio *config; + + if (power->parser->no_aux_switch) + return 0; + + mp = &power->parser->mp[DP_CORE_PM]; + config = mp->gpio_config; + + if (enable) { + rc = dp_power_request_gpios(power); + if (rc) { + DP_ERR("gpio request failed\n"); + return rc; + } + + dp_power_set_gpio(power, flip); + } else { + for (i = 0; i < mp->num_gpio; i++) { + if (gpio_is_valid(config[i].gpio)) { + gpio_set_value(config[i].gpio, 0); + gpio_free(config[i].gpio); + } + } + } + + return 0; +} + +static int dp_power_client_init(struct dp_power *dp_power, + struct sde_power_handle *phandle, struct drm_device *drm_dev) +{ + int rc = 0; + struct dp_power_private *power; + + if (!drm_dev) { + DP_ERR("invalid drm_dev\n"); + return -EINVAL; + } + + power = container_of(dp_power, struct dp_power_private, dp_power); + + rc = dp_power_regulator_init(power); + if (rc) { + DP_ERR("failed to init regulators\n"); + goto error_power; + } + + rc = dp_power_clk_init(power, true); + if (rc) { + DP_ERR("failed to init clocks\n"); + goto error_clk; + } + dp_power->phandle = phandle; + dp_power->drm_dev = drm_dev; + + return 0; + +error_clk: + dp_power_regulator_deinit(power); +error_power: + return rc; +} + +static void dp_power_client_deinit(struct dp_power *dp_power) +{ + struct dp_power_private *power; + + if (!dp_power) { + DP_ERR("invalid power data\n"); + return; + } + + power = container_of(dp_power, struct dp_power_private, dp_power); + + dp_power_clk_init(power, false); + dp_power_regulator_deinit(power); +} + +static int dp_power_set_pixel_clk_parent(struct dp_power *dp_power, u32 strm_id) +{ + int rc = 0; + struct dp_power_private *power; + + if (!dp_power || strm_id >= DP_STREAM_MAX) { + DP_ERR("invalid power data. stream %d\n", strm_id); + rc = -EINVAL; + goto exit; + } + + power = container_of(dp_power, struct dp_power_private, dp_power); + + if (strm_id == DP_STREAM_0) { + if (power->pixel_clk_rcg && power->pixel_parent) + clk_set_parent(power->pixel_clk_rcg, + power->pixel_parent); + } else if (strm_id == DP_STREAM_1) { + if (power->pixel1_clk_rcg && power->pixel1_parent) + clk_set_parent(power->pixel1_clk_rcg, + power->pixel1_parent); + } +exit: + return rc; +} + +static u64 dp_power_clk_get_rate(struct dp_power *dp_power, char *clk_name) +{ + size_t i; + enum dp_pm_type j; + struct dss_module_power *mp; + struct dp_power_private *power; + bool clk_found = false; + u64 rate = 0; + + if (!clk_name) { + DP_ERR("invalid pointer for clk_name\n"); + return 0; + } + + power = container_of(dp_power, struct dp_power_private, dp_power); + mp = &dp_power->phandle->mp; + for (i = 0; i < mp->num_clk; i++) { + if (!strcmp(mp->clk_config[i].clk_name, clk_name)) { + rate = clk_get_rate(mp->clk_config[i].clk); + clk_found = true; + break; + } + } + + for (j = DP_CORE_PM; j < DP_MAX_PM && !clk_found; j++) { + mp = &power->parser->mp[j]; + for (i = 0; i < mp->num_clk; i++) { + if (!strcmp(mp->clk_config[i].clk_name, clk_name)) { + rate = clk_get_rate(mp->clk_config[i].clk); + clk_found = true; + break; + } + } + } + + return rate; +} + +static int dp_power_init(struct dp_power *dp_power, bool flip) +{ + int rc = 0; + struct dp_power_private *power; + + if (!dp_power) { + DP_ERR("invalid power data\n"); + rc = -EINVAL; + goto exit; + } + + power = container_of(dp_power, struct dp_power_private, dp_power); + + rc = dp_power_regulator_ctrl(power, true); + if (rc) { + DP_ERR("failed to enable regulators\n"); + goto exit; + } + + rc = dp_power_pinctrl_set(power, true); + if (rc) { + DP_ERR("failed to set pinctrl state\n"); + goto err_pinctrl; + } + + rc = dp_power_config_gpios(power, flip, true); + if (rc) { + DP_ERR("failed to enable gpios\n"); + goto err_gpio; + } + + rc = pm_runtime_get_sync(dp_power->drm_dev->dev); + if (rc < 0) { + DP_ERR("Power resource enable failed\n"); + goto err_sde_power; + } + + rc = dp_power_clk_enable(dp_power, DP_CORE_PM, true); + if (rc) { + DP_ERR("failed to enable DP core clocks\n"); + goto err_clk; + } + + return 0; + +err_clk: + pm_runtime_put_sync(dp_power->drm_dev->dev); +err_sde_power: + dp_power_config_gpios(power, flip, false); +err_gpio: + dp_power_pinctrl_set(power, false); +err_pinctrl: + dp_power_regulator_ctrl(power, false); +exit: + return rc; +} + +static int dp_power_deinit(struct dp_power *dp_power) +{ + int rc = 0; + struct dp_power_private *power; + + if (!dp_power) { + DP_ERR("invalid power data\n"); + rc = -EINVAL; + goto exit; + } + + power = container_of(dp_power, struct dp_power_private, dp_power); + + if (power->link_clks_on) + dp_power_clk_enable(dp_power, DP_LINK_PM, false); + + dp_power_clk_enable(dp_power, DP_CORE_PM, false); + pm_runtime_put_sync(dp_power->drm_dev->dev); + + dp_power_config_gpios(power, false, false); + dp_power_pinctrl_set(power, false); + dp_power_regulator_ctrl(power, false); +exit: + return rc; +} + +struct dp_power *dp_power_get(struct dp_parser *parser) +{ + int rc = 0; + struct dp_power_private *power; + struct dp_power *dp_power; + + if (!parser) { + DP_ERR("invalid input\n"); + rc = -EINVAL; + goto error; + } + + power = devm_kzalloc(&parser->pdev->dev, sizeof(*power), GFP_KERNEL); + if (!power) { + rc = -ENOMEM; + goto error; + } + + power->parser = parser; + power->pdev = parser->pdev; + + dp_power = &power->dp_power; + + dp_power->init = dp_power_init; + dp_power->deinit = dp_power_deinit; + dp_power->clk_enable = dp_power_clk_enable; + dp_power->set_pixel_clk_parent = dp_power_set_pixel_clk_parent; + dp_power->clk_get_rate = dp_power_clk_get_rate; + dp_power->power_client_init = dp_power_client_init; + dp_power->power_client_deinit = dp_power_client_deinit; + + return dp_power; +error: + return ERR_PTR(rc); +} + +void dp_power_put(struct dp_power *dp_power) +{ + struct dp_power_private *power = NULL; + + if (!dp_power) + return; + + power = container_of(dp_power, struct dp_power_private, dp_power); + + devm_kfree(&power->pdev->dev, power); +} diff --git a/techpack/display/msm/dp/dp_power.h b/techpack/display/msm/dp/dp_power.h new file mode 100644 index 0000000000000000000000000000000000000000..a5e5f5d93e90288e7288e1e6f9216b9a8b99ded8 --- /dev/null +++ b/techpack/display/msm/dp/dp_power.h @@ -0,0 +1,54 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2012-2019, The Linux Foundation. All rights reserved. + */ + +#ifndef _DP_POWER_H_ +#define _DP_POWER_H_ + +#include "dp_parser.h" +#include "sde_power_handle.h" + +/** + * sruct dp_power - DisplayPort's power related data + * + * @init: initializes the regulators/core clocks/GPIOs/pinctrl + * @deinit: turns off the regulators/core clocks/GPIOs/pinctrl + * @clk_enable: enable/disable the DP clocks + * @set_pixel_clk_parent: set the parent of DP pixel clock + * @clk_get_rate: get the current rate for provided clk_name + */ +struct dp_power { + struct drm_device *drm_dev; + struct sde_power_handle *phandle; + int (*init)(struct dp_power *power, bool flip); + int (*deinit)(struct dp_power *power); + int (*clk_enable)(struct dp_power *power, enum dp_pm_type pm_type, + bool enable); + int (*set_pixel_clk_parent)(struct dp_power *power, u32 stream_id); + u64 (*clk_get_rate)(struct dp_power *power, char *clk_name); + int (*power_client_init)(struct dp_power *power, + struct sde_power_handle *phandle, + struct drm_device *drm_dev); + void (*power_client_deinit)(struct dp_power *power); +}; + +/** + * dp_power_get() - configure and get the DisplayPort power module data + * + * @parser: instance of parser module + * return: pointer to allocated power module data + * + * This API will configure the DisplayPort's power module and provides + * methods to be called by the client to configure the power related + * modueles. + */ +struct dp_power *dp_power_get(struct dp_parser *parser); + +/** + * dp_power_put() - release the power related resources + * + * @power: pointer to the power module's data + */ +void dp_power_put(struct dp_power *power); +#endif /* _DP_POWER_H_ */ diff --git a/techpack/display/msm/dp/dp_reg.h b/techpack/display/msm/dp/dp_reg.h new file mode 100644 index 0000000000000000000000000000000000000000..df6a8d3ce8c4e5301d4209b1fa575394cdec2d27 --- /dev/null +++ b/techpack/display/msm/dp/dp_reg.h @@ -0,0 +1,443 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved. + */ + +#ifndef _DP_REG_H_ +#define _DP_REG_H_ + +/* DP_TX Registers */ +#define DP_HW_VERSION (0x00000000) +#define DP_SW_RESET (0x00000010) +#define DP_PHY_CTRL (0x00000014) +#define DP_CLK_CTRL (0x00000018) +#define DP_CLK_ACTIVE (0x0000001C) +#define DP_INTR_STATUS (0x00000020) +#define DP_INTR_STATUS2 (0x00000024) +#define DP_INTR_STATUS3 (0x00000028) +#define DP_INTR_STATUS5 (0x00000034) + +#define DP_DP_HPD_CTRL (0x00000000) +#define DP_DP_HPD_INT_STATUS (0x00000004) +#define DP_DP_HPD_INT_ACK (0x00000008) +#define DP_DP_HPD_INT_MASK (0x0000000C) +#define DP_DP_HPD_REFTIMER (0x00000018) +#define DP_DP_HPD_EVENT_TIME_0 (0x0000001C) +#define DP_DP_HPD_EVENT_TIME_1 (0x00000020) +#define DP_AUX_CTRL (0x00000030) +#define DP_AUX_DATA (0x00000034) +#define DP_AUX_TRANS_CTRL (0x00000038) +#define DP_TIMEOUT_COUNT (0x0000003C) +#define DP_AUX_LIMITS (0x00000040) +#define DP_AUX_STATUS (0x00000044) + +#define DP_DPCD_CP_IRQ (0x201) +#define DP_DPCD_RXSTATUS (0x69493) + +#define DP_INTERRUPT_TRANS_NUM (0x000000A0) + +#define DP_MAINLINK_CTRL (0x00000000) +#define DP_STATE_CTRL (0x00000004) +#define DP_CONFIGURATION_CTRL (0x00000008) +#define DP_SOFTWARE_MVID (0x00000010) +#define DP_SOFTWARE_NVID (0x00000018) +#define DP_TOTAL_HOR_VER (0x0000001C) +#define DP_START_HOR_VER_FROM_SYNC (0x00000020) +#define DP_HSYNC_VSYNC_WIDTH_POLARITY (0x00000024) +#define DP_ACTIVE_HOR_VER (0x00000028) +#define DP_MISC1_MISC0 (0x0000002C) +#define DP_VALID_BOUNDARY (0x00000030) +#define DP_VALID_BOUNDARY_2 (0x00000034) +#define DP_LOGICAL2PHYSICAL_LANE_MAPPING (0x00000038) + +#define DP1_CONFIGURATION_CTRL (0x00000400) +#define DP_DP0_TIMESLOT_1_32 (0x00000404) +#define DP_DP0_TIMESLOT_33_63 (0x00000408) +#define DP_DP1_TIMESLOT_1_32 (0x0000040C) +#define DP_DP1_TIMESLOT_33_63 (0x00000410) +#define DP1_SOFTWARE_MVID (0x00000414) +#define DP1_SOFTWARE_NVID (0x00000418) +#define DP1_TOTAL_HOR_VER (0x0000041C) +#define DP1_START_HOR_VER_FROM_SYNC (0x00000420) +#define DP1_HSYNC_VSYNC_WIDTH_POLARITY (0x00000424) +#define DP1_ACTIVE_HOR_VER (0x00000428) +#define DP1_MISC1_MISC0 (0x0000042C) +#define DP_DP0_RG (0x000004F8) +#define DP_DP1_RG (0x000004FC) + +#define DP_MST_ACT (0x00000500) +#define DP_MST_MAINLINK_READY (0x00000504) + +#define DP_MAINLINK_READY (0x00000040) +#define DP_MAINLINK_LEVELS (0x00000044) +#define DP_TU (0x0000004C) + +#define DP_HBR2_COMPLIANCE_SCRAMBLER_RESET (0x00000054) +#define DP_TEST_80BIT_CUSTOM_PATTERN_REG0 (0x000000C0) +#define DP_TEST_80BIT_CUSTOM_PATTERN_REG1 (0x000000C4) +#define DP_TEST_80BIT_CUSTOM_PATTERN_REG2 (0x000000C8) + +#define MMSS_DP_MISC1_MISC0 (0x0000002C) +#define MMSS_DP_AUDIO_TIMING_GEN (0x00000080) +#define MMSS_DP_AUDIO_TIMING_RBR_32 (0x00000084) +#define MMSS_DP_AUDIO_TIMING_HBR_32 (0x00000088) +#define MMSS_DP_AUDIO_TIMING_RBR_44 (0x0000008C) +#define MMSS_DP_AUDIO_TIMING_HBR_44 (0x00000090) +#define MMSS_DP_AUDIO_TIMING_RBR_48 (0x00000094) +#define MMSS_DP_AUDIO_TIMING_HBR_48 (0x00000098) + +#define MMSS_DP_PSR_CRC_RG (0x00000154) +#define MMSS_DP_PSR_CRC_B (0x00000158) + +#define DP_COMPRESSION_MODE_CTRL (0x00000180) +#define DP_PPS_HB_0_3 (0x00000184) +#define DP_PPS_PB_0_3 (0x00000188) +#define DP_PPS_PB_4_7 (0x0000018C) +#define DP_PPS_PB_8_11 (0x00000190) +#define DP_PPS_PB_12_15 (0x00000194) +#define DP_PPS_PB_16_19 (0x00000198) +#define DP_PPS_PB_20_23 (0x0000019C) +#define DP_PPS_PB_24_27 (0x000001A0) +#define DP_PPS_PB_28_31 (0x000001A4) +#define DP_PPS_PPS_0_3 (0x000001A8) +#define DP_PPS_PPS_4_7 (0x000001AC) +#define DP_PPS_PPS_8_11 (0x000001B0) +#define DP_PPS_PPS_12_15 (0x000001B4) +#define DP_PPS_PPS_16_19 (0x000001B8) +#define DP_PPS_PPS_20_23 (0x000001BC) +#define DP_PPS_PPS_24_27 (0x000001C0) +#define DP_PPS_PPS_28_31 (0x000001C4) +#define DP_PPS_PPS_32_35 (0x000001C8) +#define DP_PPS_PPS_36_39 (0x000001CC) +#define DP_PPS_PPS_40_43 (0x000001D0) +#define DP_PPS_PPS_44_47 (0x000001D4) +#define DP_PPS_PPS_48_51 (0x000001D8) +#define DP_PPS_PPS_52_55 (0x000001DC) +#define DP_PPS_PPS_56_59 (0x000001E0) +#define DP_PPS_PPS_60_63 (0x000001E4) +#define DP_PPS_PPS_64_67 (0x000001E8) +#define DP_PPS_PPS_68_71 (0x000001EC) +#define DP_PPS_PPS_72_75 (0x000001F0) +#define DP_PPS_PPS_76_79 (0x000001F4) +#define DP_PPS_PPS_80_83 (0x000001F8) +#define DP_PPS_PPS_84_87 (0x000001FC) + +#define MMSS_DP_AUDIO_CFG (0x00000200) +#define MMSS_DP_AUDIO_STATUS (0x00000204) +#define MMSS_DP_AUDIO_PKT_CTRL (0x00000208) +#define MMSS_DP_AUDIO_PKT_CTRL2 (0x0000020C) +#define MMSS_DP_AUDIO_ACR_CTRL (0x00000210) +#define MMSS_DP_AUDIO_CTRL_RESET (0x00000214) + +#define MMSS_DP_SDP_CFG (0x00000228) +#define MMSS_DP_SDP_CFG2 (0x0000022C) +#define MMSS_DP_SDP_CFG3 (0x0000024C) +#define MMSS_DP_SDP_CFG4 (0x000004EC) +#define MMSS_DP_AUDIO_TIMESTAMP_0 (0x00000230) +#define MMSS_DP_AUDIO_TIMESTAMP_1 (0x00000234) + +#define MMSS_DP_AUDIO_STREAM_0 (0x00000240) +#define MMSS_DP_AUDIO_STREAM_1 (0x00000244) + +#define MMSS_DP_EXTENSION_0 (0x00000250) +#define MMSS_DP_EXTENSION_1 (0x00000254) +#define MMSS_DP_EXTENSION_2 (0x00000258) +#define MMSS_DP_EXTENSION_3 (0x0000025C) +#define MMSS_DP_EXTENSION_4 (0x00000260) +#define MMSS_DP_EXTENSION_5 (0x00000264) +#define MMSS_DP_EXTENSION_6 (0x00000268) +#define MMSS_DP_EXTENSION_7 (0x0000026C) +#define MMSS_DP_EXTENSION_8 (0x00000270) +#define MMSS_DP_EXTENSION_9 (0x00000274) +#define MMSS_DP_AUDIO_COPYMANAGEMENT_0 (0x00000278) +#define MMSS_DP_AUDIO_COPYMANAGEMENT_1 (0x0000027C) +#define MMSS_DP_AUDIO_COPYMANAGEMENT_2 (0x00000280) +#define MMSS_DP_AUDIO_COPYMANAGEMENT_3 (0x00000284) +#define MMSS_DP_AUDIO_COPYMANAGEMENT_4 (0x00000288) +#define MMSS_DP_AUDIO_COPYMANAGEMENT_5 (0x0000028C) +#define MMSS_DP_AUDIO_ISRC_0 (0x00000290) +#define MMSS_DP_AUDIO_ISRC_1 (0x00000294) +#define MMSS_DP_AUDIO_ISRC_2 (0x00000298) +#define MMSS_DP_AUDIO_ISRC_3 (0x0000029C) +#define MMSS_DP_AUDIO_ISRC_4 (0x000002A0) +#define MMSS_DP_AUDIO_ISRC_5 (0x000002A4) +#define MMSS_DP_AUDIO_INFOFRAME_0 (0x000002A8) +#define MMSS_DP_AUDIO_INFOFRAME_1 (0x000002AC) +#define MMSS_DP_AUDIO_INFOFRAME_2 (0x000002B0) + +#define MMSS_DP_FLUSH (0x000002F8) +#define MMSS_DP1_FLUSH (0x000002FC) + +#define MMSS_DP_GENERIC0_0 (0x00000300) +#define MMSS_DP_GENERIC0_1 (0x00000304) +#define MMSS_DP_GENERIC0_2 (0x00000308) +#define MMSS_DP_GENERIC0_3 (0x0000030C) +#define MMSS_DP_GENERIC0_4 (0x00000310) +#define MMSS_DP_GENERIC0_5 (0x00000314) +#define MMSS_DP_GENERIC0_6 (0x00000318) +#define MMSS_DP_GENERIC0_7 (0x0000031C) +#define MMSS_DP_GENERIC0_8 (0x00000320) +#define MMSS_DP_GENERIC0_9 (0x00000324) +#define MMSS_DP_GENERIC1_0 (0x00000328) +#define MMSS_DP_GENERIC1_1 (0x0000032C) +#define MMSS_DP_GENERIC1_2 (0x00000330) +#define MMSS_DP_GENERIC1_3 (0x00000334) +#define MMSS_DP_GENERIC1_4 (0x00000338) +#define MMSS_DP_GENERIC1_5 (0x0000033C) +#define MMSS_DP_GENERIC1_6 (0x00000340) +#define MMSS_DP_GENERIC1_7 (0x00000344) +#define MMSS_DP_GENERIC1_8 (0x00000348) +#define MMSS_DP_GENERIC1_9 (0x0000034C) + +#define MMSS_DP1_GENERIC0_0 (0x00000490) +#define MMSS_DP1_GENERIC0_1 (0x00000494) +#define MMSS_DP1_GENERIC0_2 (0x00000498) +#define MMSS_DP1_GENERIC0_3 (0x0000049C) +#define MMSS_DP1_GENERIC0_4 (0x000004A0) +#define MMSS_DP1_GENERIC0_5 (0x000004A4) +#define MMSS_DP1_GENERIC0_6 (0x000004A8) +#define MMSS_DP1_GENERIC0_7 (0x000004AC) +#define MMSS_DP1_GENERIC0_8 (0x000004B0) +#define MMSS_DP1_GENERIC0_9 (0x000004B4) +#define MMSS_DP1_GENERIC1_0 (0x000004B8) +#define MMSS_DP1_GENERIC1_1 (0x000004BC) +#define MMSS_DP1_GENERIC1_2 (0x000004C0) +#define MMSS_DP1_GENERIC1_3 (0x000004C4) +#define MMSS_DP1_GENERIC1_4 (0x000004C8) +#define MMSS_DP1_GENERIC1_5 (0x000004CC) +#define MMSS_DP1_GENERIC1_6 (0x000004D0) +#define MMSS_DP1_GENERIC1_7 (0x000004D4) +#define MMSS_DP1_GENERIC1_8 (0x000004D8) +#define MMSS_DP1_GENERIC1_9 (0x000004DC) + +#define MMSS_DP_GENERIC2_0 (0x000003d8) +#define MMSS_DP_GENERIC2_1 (0x000003dc) +#define MMSS_DP_GENERIC2_2 (0x000003e0) +#define MMSS_DP_GENERIC2_3 (0x000003e4) +#define MMSS_DP_GENERIC2_4 (0x000003e8) +#define MMSS_DP_GENERIC2_5 (0x000003ec) +#define MMSS_DP_GENERIC2_6 (0x000003f0) +#define MMSS_DP_GENERIC2_7 (0x000003f4) +#define MMSS_DP_GENERIC2_8 (0x000003f8) +#define MMSS_DP_GENERIC2_9 (0x000003fc) +#define MMSS_DP1_GENERIC2_0 (0x00000510) +#define MMSS_DP1_GENERIC2_1 (0x00000514) +#define MMSS_DP1_GENERIC2_2 (0x00000518) +#define MMSS_DP1_GENERIC2_3 (0x0000051c) +#define MMSS_DP1_GENERIC2_4 (0x00000520) +#define MMSS_DP1_GENERIC2_5 (0x00000524) +#define MMSS_DP1_GENERIC2_6 (0x00000528) +#define MMSS_DP1_GENERIC2_7 (0x0000052C) +#define MMSS_DP1_GENERIC2_8 (0x00000530) +#define MMSS_DP1_GENERIC2_9 (0x00000534) + +#define MMSS_DP1_SDP_CFG (0x000004E0) +#define MMSS_DP1_SDP_CFG2 (0x000004E4) +#define MMSS_DP1_SDP_CFG3 (0x000004E8) +#define MMSS_DP1_SDP_CFG4 (0x000004F0) + +#define DP1_COMPRESSION_MODE_CTRL (0x00000560) +#define DP1_PPS_HB_0_3 (0x00000564) +#define DP1_PPS_PB_0_3 (0x00000568) +#define DP1_PPS_PB_4_7 (0x0000056C) +#define DP1_PPS_PB_8_11 (0x00000570) +#define DP1_PPS_PB_12_15 (0x00000574) +#define DP1_PPS_PB_16_19 (0x00000578) +#define DP1_PPS_PB_20_23 (0x0000057C) +#define DP1_PPS_PB_24_27 (0x00000580) +#define DP1_PPS_PB_28_31 (0x00000584) +#define DP1_PPS_PPS_0_3 (0x00000588) +#define DP1_PPS_PPS_4_7 (0x0000058C) +#define DP1_PPS_PPS_8_11 (0x00000590) +#define DP1_PPS_PPS_12_15 (0x00000594) +#define DP1_PPS_PPS_16_19 (0x00000598) +#define DP1_PPS_PPS_20_23 (0x0000059C) +#define DP1_PPS_PPS_24_27 (0x000005A0) +#define DP1_PPS_PPS_28_31 (0x000005A4) +#define DP1_PPS_PPS_32_35 (0x000005A8) +#define DP1_PPS_PPS_36_39 (0x000005AC) +#define DP1_PPS_PPS_40_43 (0x000005B0) +#define DP1_PPS_PPS_44_47 (0x000005B4) +#define DP1_PPS_PPS_48_51 (0x000005B8) +#define DP1_PPS_PPS_52_55 (0x000005BC) +#define DP1_PPS_PPS_56_59 (0x000005C0) +#define DP1_PPS_PPS_60_63 (0x000005C4) +#define DP1_PPS_PPS_64_67 (0x000005C8) +#define DP1_PPS_PPS_68_71 (0x000005CC) +#define DP1_PPS_PPS_72_75 (0x000005D0) +#define DP1_PPS_PPS_76_79 (0x000005D4) +#define DP1_PPS_PPS_80_83 (0x000005D8) +#define DP1_PPS_PPS_84_87 (0x000005DC) + +#define MMSS_DP_VSCEXT_0 (0x000002D0) +#define MMSS_DP_VSCEXT_1 (0x000002D4) +#define MMSS_DP_VSCEXT_2 (0x000002D8) +#define MMSS_DP_VSCEXT_3 (0x000002DC) +#define MMSS_DP_VSCEXT_4 (0x000002E0) +#define MMSS_DP_VSCEXT_5 (0x000002E4) +#define MMSS_DP_VSCEXT_6 (0x000002E8) +#define MMSS_DP_VSCEXT_7 (0x000002EC) +#define MMSS_DP_VSCEXT_8 (0x000002F0) +#define MMSS_DP_VSCEXT_9 (0x000002F4) + +#define MMSS_DP1_VSCEXT_0 (0x00000468) +#define MMSS_DP1_VSCEXT_1 (0x0000046c) +#define MMSS_DP1_VSCEXT_2 (0x00000470) +#define MMSS_DP1_VSCEXT_3 (0x00000474) +#define MMSS_DP1_VSCEXT_4 (0x00000478) +#define MMSS_DP1_VSCEXT_5 (0x0000047c) +#define MMSS_DP1_VSCEXT_6 (0x00000480) +#define MMSS_DP1_VSCEXT_7 (0x00000484) +#define MMSS_DP1_VSCEXT_8 (0x00000488) +#define MMSS_DP1_VSCEXT_9 (0x0000048c) + +#define MMSS_DP_BIST_ENABLE (0x00000000) +#define MMSS_DP_TIMING_ENGINE_EN (0x00000010) +#define MMSS_DP_INTF_CONFIG (0x00000014) +#define MMSS_DP_INTF_HSYNC_CTL (0x00000018) +#define MMSS_DP_INTF_VSYNC_PERIOD_F0 (0x0000001C) +#define MMSS_DP_INTF_VSYNC_PERIOD_F1 (0x00000020) +#define MMSS_DP_INTF_VSYNC_PULSE_WIDTH_F0 (0x00000024) +#define MMSS_DP_INTF_VSYNC_PULSE_WIDTH_F1 (0x00000028) +#define MMSS_INTF_DISPLAY_V_START_F0 (0x0000002C) +#define MMSS_INTF_DISPLAY_V_START_F1 (0x00000030) +#define MMSS_DP_INTF_DISPLAY_V_END_F0 (0x00000034) +#define MMSS_DP_INTF_DISPLAY_V_END_F1 (0x00000038) +#define MMSS_DP_INTF_ACTIVE_V_START_F0 (0x0000003C) +#define MMSS_DP_INTF_ACTIVE_V_START_F1 (0x00000040) +#define MMSS_DP_INTF_ACTIVE_V_END_F0 (0x00000044) +#define MMSS_DP_INTF_ACTIVE_V_END_F1 (0x00000048) +#define MMSS_DP_INTF_DISPLAY_HCTL (0x0000004C) +#define MMSS_DP_INTF_ACTIVE_HCTL (0x00000050) +#define MMSS_DP_INTF_POLARITY_CTL (0x00000058) +#define MMSS_DP_TPG_MAIN_CONTROL (0x00000060) +#define MMSS_DP_TPG_VIDEO_CONFIG (0x00000064) +#define MMSS_DP_DSC_DTO (0x0000007C) +#define MMSS_DP_DSC_DTO_COUNT (0x00000084) +#define MMSS_DP_ASYNC_FIFO_CONFIG (0x00000088) + +#define MMSS_DP1_BIST_ENABLE (0x00000000) +#define MMSS_DP1_TIMING_ENGINE_EN (0x00000010) +#define MMSS_DP1_INTF_CONFIG (0x00000014) +#define MMSS_DP1_INTF_HSYNC_CTL (0x00000018) +#define MMSS_DP1_INTF_VSYNC_PERIOD_F0 (0x0000001C) +#define MMSS_DP1_INTF_VSYNC_PERIOD_F1 (0x00000020) +#define MMSS_DP1_INTF_VSYNC_PULSE_WIDTH_F0 (0x00000024) +#define MMSS_DP1_INTF_VSYNC_PULSE_WIDTH_F1 (0x00000028) +#define MMSS_DP1_INTF_DISPLAY_V_START_F0 (0x0000002C) +#define MMSS_DP1_INTF_DISPLAY_V_START_F1 (0x00000030) +#define MMSS_DP1_INTF_DISPLAY_V_END_F0 (0x00000034) +#define MMSS_DP1_INTF_DISPLAY_V_END_F1 (0x00000038) +#define MMSS_DP1_INTF_ACTIVE_V_START_F0 (0x0000003C) +#define MMSS_DP1_INTF_ACTIVE_V_START_F1 (0x00000040) +#define MMSS_DP1_INTF_ACTIVE_V_END_F0 (0x00000044) +#define MMSS_DP1_INTF_ACTIVE_V_END_F1 (0x00000048) +#define MMSS_DP1_INTF_DISPLAY_HCTL (0x0000004C) +#define MMSS_DP1_INTF_ACTIVE_HCTL (0x00000050) +#define MMSS_DP1_INTF_POLARITY_CTL (0x00000058) +#define MMSS_DP1_TPG_MAIN_CONTROL (0x00000060) +#define MMSS_DP1_TPG_VIDEO_CONFIG (0x00000064) +#define MMSS_DP1_DSC_DTO (0x0000007C) +#define MMSS_DP1_DSC_DTO_COUNT (0x00000084) +#define MMSS_DP1_ASYNC_FIFO_CONFIG (0x00000088) + +/*DP PHY Register offsets */ +#define DP_PHY_REVISION_ID0 (0x00000000) +#define DP_PHY_REVISION_ID1 (0x00000004) +#define DP_PHY_REVISION_ID2 (0x00000008) +#define DP_PHY_REVISION_ID3 (0x0000000C) + +#define DP_PHY_CFG (0x00000010) +#define DP_PHY_PD_CTL (0x00000018) +#define DP_PHY_MODE (0x0000001C) + +#define DP_PHY_AUX_CFG0 (0x00000020) +#define DP_PHY_AUX_CFG1 (0x00000024) +#define DP_PHY_AUX_CFG2 (0x00000028) +#define DP_PHY_AUX_CFG3 (0x0000002C) +#define DP_PHY_AUX_CFG4 (0x00000030) +#define DP_PHY_AUX_CFG5 (0x00000034) +#define DP_PHY_AUX_CFG6 (0x00000038) +#define DP_PHY_AUX_CFG7 (0x0000003C) +#define DP_PHY_AUX_CFG8 (0x00000040) +#define DP_PHY_AUX_CFG9 (0x00000044) +#define DP_PHY_AUX_INTERRUPT_MASK (0x00000048) +#define DP_PHY_AUX_INTERRUPT_CLEAR (0x0000004C) +#define DP_PHY_AUX_INTERRUPT_STATUS (0x000000BC) +#define DP_PHY_AUX_INTERRUPT_MASK_V200 (0x00000048) +#define DP_PHY_AUX_INTERRUPT_CLEAR_V200 (0x0000004C) +#define DP_PHY_AUX_INTERRUPT_STATUS_V200 (0x000000BC) + +#define DP_PHY_SPARE0 (0x00AC) + +#define TXn_TX_EMP_POST1_LVL (0x000C) +#define TXn_TX_DRV_LVL (0x001C) +#define TXn_TX_POL_INV (0x0064) + +#define TXn_TRANSCEIVER_BIAS_EN (0x005C) +#define TXn_HIGHZ_DRVR_EN (0x0060) + +#define DP_PHY_STATUS_V420 (0x00DC) +#define DP_PHY_AUX_INTERRUPT_MASK_V420 (0x0054) +#define DP_PHY_AUX_INTERRUPT_CLEAR_V420 (0x0058) +#define DP_PHY_AUX_INTERRUPT_STATUS_V420 (0x00D8) +#define DP_PHY_SPARE0_V420 (0x00C8) +#define TXn_TX_DRV_LVL_V420 (0x0014) +#define TXn_TRANSCEIVER_BIAS_EN_V420 (0x0054) +#define TXn_HIGHZ_DRVR_EN_V420 (0x0058) +#define TXn_TX_POL_INV_V420 (0x005C) + +#define QSERDES_COM_BIAS_EN_CLKBUFLR_EN (0x004) + +/* DP MMSS_CC registers */ +#define MMSS_DP_LINK_CMD_RCGR (0x0138) +#define MMSS_DP_LINK_CFG_RCGR (0x013C) +#define MMSS_DP_PIXEL_M (0x0134) +#define MMSS_DP_PIXEL_N (0x0138) +#define MMSS_DP_PIXEL1_M (0x01CC) +#define MMSS_DP_PIXEL1_N (0x01D0) +#define MMSS_DP_PIXEL_M_V200 (0x0130) +#define MMSS_DP_PIXEL_N_V200 (0x0134) +#define MMSS_DP_PIXEL1_M_V200 (0x0148) +#define MMSS_DP_PIXEL1_N_V200 (0x014C) +#define MMSS_DP_PIXEL_M_V420 (0x01B4) +#define MMSS_DP_PIXEL_N_V420 (0x01B8) +#define MMSS_DP_PIXEL1_M_V420 (0x01CC) +#define MMSS_DP_PIXEL1_N_V420 (0x01D0) + +/* DP HDCP 1.3 registers */ +#define DP_HDCP_CTRL (0x0A0) +#define DP_HDCP_STATUS (0x0A4) +#define DP_HDCP_SW_UPPER_AKSV (0x098) +#define DP_HDCP_SW_LOWER_AKSV (0x09C) +#define DP_HDCP_ENTROPY_CTRL0 (0x350) +#define DP_HDCP_ENTROPY_CTRL1 (0x35C) +#define DP_HDCP_SHA_STATUS (0x0C8) +#define DP_HDCP_RCVPORT_DATA2_0 (0x0B0) +#define DP_HDCP_RCVPORT_DATA3 (0x0A4) +#define DP_HDCP_RCVPORT_DATA4 (0x0A8) +#define DP_HDCP_RCVPORT_DATA5 (0x0C0) +#define DP_HDCP_RCVPORT_DATA6 (0x0C4) + +#define HDCP_SEC_DP_TZ_HV_HLOS_HDCP_SHA_CTRL (0x024) +#define HDCP_SEC_DP_TZ_HV_HLOS_HDCP_SHA_DATA (0x028) +#define HDCP_SEC_DP_TZ_HV_HLOS_HDCP_RCVPORT_DATA0 (0x004) +#define HDCP_SEC_DP_TZ_HV_HLOS_HDCP_RCVPORT_DATA1 (0x008) +#define HDCP_SEC_DP_TZ_HV_HLOS_HDCP_RCVPORT_DATA7 (0x00C) +#define HDCP_SEC_DP_TZ_HV_HLOS_HDCP_RCVPORT_DATA8 (0x010) +#define HDCP_SEC_DP_TZ_HV_HLOS_HDCP_RCVPORT_DATA9 (0x014) +#define HDCP_SEC_DP_TZ_HV_HLOS_HDCP_RCVPORT_DATA10 (0x018) +#define HDCP_SEC_DP_TZ_HV_HLOS_HDCP_RCVPORT_DATA11 (0x01C) +#define HDCP_SEC_DP_TZ_HV_HLOS_HDCP_RCVPORT_DATA12 (0x020) + +/* USB3 DP COM registers */ +#define USB3_DP_COM_RESET_OVRD_CTRL (0x1C) +#define USB3_DP_COM_PHY_MODE_CTRL (0x00) +#define USB3_DP_COM_SW_RESET (0x04) +#define USB3_DP_COM_TYPEC_CTRL (0x10) +#define USB3_DP_COM_SWI_CTRL (0x0c) +#define USB3_DP_COM_POWER_DOWN_CTRL (0x08) + + + +#endif /* _DP_REG_H_ */ diff --git a/techpack/display/msm/dp/dp_usbpd.c b/techpack/display/msm/dp/dp_usbpd.c new file mode 100644 index 0000000000000000000000000000000000000000..f49df593ecefa350ef4e4877e679d3485d6feee0 --- /dev/null +++ b/techpack/display/msm/dp/dp_usbpd.c @@ -0,0 +1,582 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2012-2020, The Linux Foundation. All rights reserved. + */ + +#include +#include +#include +#include + +#include "dp_usbpd.h" +#include "dp_debug.h" + +/* DP specific VDM commands */ +#define DP_USBPD_VDM_STATUS 0x10 +#define DP_USBPD_VDM_CONFIGURE 0x11 + +/* USBPD-TypeC specific Macros */ +#define VDM_VERSION 0x0 +#define USB_C_DP_SID 0xFF01 + +enum dp_usbpd_pin_assignment { + DP_USBPD_PIN_A, + DP_USBPD_PIN_B, + DP_USBPD_PIN_C, + DP_USBPD_PIN_D, + DP_USBPD_PIN_E, + DP_USBPD_PIN_F, + DP_USBPD_PIN_MAX, +}; + +enum dp_usbpd_events { + DP_USBPD_EVT_DISCOVER, + DP_USBPD_EVT_ENTER, + DP_USBPD_EVT_STATUS, + DP_USBPD_EVT_CONFIGURE, + DP_USBPD_EVT_CC_PIN_POLARITY, + DP_USBPD_EVT_EXIT, + DP_USBPD_EVT_ATTENTION, +}; + +enum dp_usbpd_alt_mode { + DP_USBPD_ALT_MODE_NONE = 0, + DP_USBPD_ALT_MODE_INIT = BIT(0), + DP_USBPD_ALT_MODE_DISCOVER = BIT(1), + DP_USBPD_ALT_MODE_ENTER = BIT(2), + DP_USBPD_ALT_MODE_STATUS = BIT(3), + DP_USBPD_ALT_MODE_CONFIGURE = BIT(4), +}; + +struct dp_usbpd_capabilities { + enum dp_usbpd_port port; + bool receptacle_state; + u8 ulink_pin_config; + u8 dlink_pin_config; +}; + +struct dp_usbpd_private { + bool forced_disconnect; + u32 vdo; + struct device *dev; + struct usbpd *pd; + struct usbpd_svid_handler svid_handler; + struct dp_hpd_cb *dp_cb; + struct dp_usbpd_capabilities cap; + struct dp_usbpd dp_usbpd; + enum dp_usbpd_alt_mode alt_mode; + u32 dp_usbpd_config; +}; + +static const char *dp_usbpd_pin_name(u8 pin) +{ + switch (pin) { + case DP_USBPD_PIN_A: return "DP_USBPD_PIN_ASSIGNMENT_A"; + case DP_USBPD_PIN_B: return "DP_USBPD_PIN_ASSIGNMENT_B"; + case DP_USBPD_PIN_C: return "DP_USBPD_PIN_ASSIGNMENT_C"; + case DP_USBPD_PIN_D: return "DP_USBPD_PIN_ASSIGNMENT_D"; + case DP_USBPD_PIN_E: return "DP_USBPD_PIN_ASSIGNMENT_E"; + case DP_USBPD_PIN_F: return "DP_USBPD_PIN_ASSIGNMENT_F"; + default: return "UNKNOWN"; + } +} + +static const char *dp_usbpd_port_name(enum dp_usbpd_port port) +{ + switch (port) { + case DP_USBPD_PORT_NONE: return "DP_USBPD_PORT_NONE"; + case DP_USBPD_PORT_UFP_D: return "DP_USBPD_PORT_UFP_D"; + case DP_USBPD_PORT_DFP_D: return "DP_USBPD_PORT_DFP_D"; + case DP_USBPD_PORT_D_UFP_D: return "DP_USBPD_PORT_D_UFP_D"; + default: return "DP_USBPD_PORT_NONE"; + } +} + +static const char *dp_usbpd_cmd_name(u8 cmd) +{ + switch (cmd) { + case USBPD_SVDM_DISCOVER_MODES: return "USBPD_SVDM_DISCOVER_MODES"; + case USBPD_SVDM_ENTER_MODE: return "USBPD_SVDM_ENTER_MODE"; + case USBPD_SVDM_ATTENTION: return "USBPD_SVDM_ATTENTION"; + case DP_USBPD_VDM_STATUS: return "DP_USBPD_VDM_STATUS"; + case DP_USBPD_VDM_CONFIGURE: return "DP_USBPD_VDM_CONFIGURE"; + default: return "DP_USBPD_VDM_ERROR"; + } +} + +static void dp_usbpd_init_port(enum dp_usbpd_port *port, u32 in_port) +{ + switch (in_port) { + case 0: + *port = DP_USBPD_PORT_NONE; + break; + case 1: + *port = DP_USBPD_PORT_UFP_D; + break; + case 2: + *port = DP_USBPD_PORT_DFP_D; + break; + case 3: + *port = DP_USBPD_PORT_D_UFP_D; + break; + default: + *port = DP_USBPD_PORT_NONE; + } + DP_DEBUG("port:%s\n", dp_usbpd_port_name(*port)); +} + +static void dp_usbpd_get_capabilities(struct dp_usbpd_private *pd) +{ + struct dp_usbpd_capabilities *cap = &pd->cap; + u32 buf = pd->vdo; + int port = buf & 0x3; + + cap->receptacle_state = (buf & BIT(6)) ? true : false; + cap->dlink_pin_config = (buf >> 8) & 0xff; + cap->ulink_pin_config = (buf >> 16) & 0xff; + + dp_usbpd_init_port(&cap->port, port); +} + +static void dp_usbpd_get_status(struct dp_usbpd_private *pd) +{ + struct dp_usbpd *status = &pd->dp_usbpd; + u32 buf = pd->vdo; + int port = buf & 0x3; + + status->low_pow_st = (buf & BIT(2)) ? true : false; + status->adaptor_dp_en = (buf & BIT(3)) ? true : false; + status->base.multi_func = (buf & BIT(4)) ? true : false; + status->usb_config_req = (buf & BIT(5)) ? true : false; + status->exit_dp_mode = (buf & BIT(6)) ? true : false; + status->base.hpd_high = (buf & BIT(7)) ? true : false; + status->base.hpd_irq = (buf & BIT(8)) ? true : false; + + DP_DEBUG("low_pow_st = %d, adaptor_dp_en = %d, multi_func = %d\n", + status->low_pow_st, status->adaptor_dp_en, + status->base.multi_func); + DP_DEBUG("usb_config_req = %d, exit_dp_mode = %d, hpd_high =%d\n", + status->usb_config_req, + status->exit_dp_mode, status->base.hpd_high); + DP_DEBUG("hpd_irq = %d\n", status->base.hpd_irq); + + dp_usbpd_init_port(&status->port, port); +} + +static u32 dp_usbpd_gen_config_pkt(struct dp_usbpd_private *pd) +{ + u8 pin_cfg, pin; + u32 config = 0; + const u32 ufp_d_config = 0x2, dp_ver = 0x1; + + if (pd->cap.receptacle_state) + pin_cfg = pd->cap.ulink_pin_config; + else + pin_cfg = pd->cap.dlink_pin_config; + + for (pin = DP_USBPD_PIN_A; pin < DP_USBPD_PIN_MAX; pin++) { + if (pin_cfg & BIT(pin)) { + if (pd->dp_usbpd.base.multi_func) { + if (pin == DP_USBPD_PIN_D) + break; + } else { + break; + } + } + } + + if (pin == DP_USBPD_PIN_MAX) + pin = DP_USBPD_PIN_C; + + DP_DEBUG("pin assignment: %s\n", dp_usbpd_pin_name(pin)); + + config |= BIT(pin) << 8; + + config |= (dp_ver << 2); + config |= ufp_d_config; + + DP_DEBUG("config = 0x%x\n", config); + return config; +} + +static void dp_usbpd_send_event(struct dp_usbpd_private *pd, + enum dp_usbpd_events event) +{ + u32 config; + + switch (event) { + case DP_USBPD_EVT_DISCOVER: + usbpd_send_svdm(pd->pd, USB_C_DP_SID, + USBPD_SVDM_DISCOVER_MODES, + SVDM_CMD_TYPE_INITIATOR, 0x0, 0x0, 0x0); + break; + case DP_USBPD_EVT_ENTER: + usbpd_send_svdm(pd->pd, USB_C_DP_SID, + USBPD_SVDM_ENTER_MODE, + SVDM_CMD_TYPE_INITIATOR, 0x1, 0x0, 0x0); + break; + case DP_USBPD_EVT_EXIT: + usbpd_send_svdm(pd->pd, USB_C_DP_SID, + USBPD_SVDM_EXIT_MODE, + SVDM_CMD_TYPE_INITIATOR, 0x1, 0x0, 0x0); + break; + case DP_USBPD_EVT_STATUS: + config = 0x1; /* DFP_D connected */ + usbpd_send_svdm(pd->pd, USB_C_DP_SID, DP_USBPD_VDM_STATUS, + SVDM_CMD_TYPE_INITIATOR, 0x1, &config, 0x1); + break; + case DP_USBPD_EVT_CONFIGURE: + config = dp_usbpd_gen_config_pkt(pd); + usbpd_send_svdm(pd->pd, USB_C_DP_SID, DP_USBPD_VDM_CONFIGURE, + SVDM_CMD_TYPE_INITIATOR, 0x1, &config, 0x1); + break; + default: + DP_ERR("unknown event:%d\n", event); + } +} + +static void dp_usbpd_connect_cb(struct usbpd_svid_handler *hdlr, + bool peer_usb_comm) +{ + struct dp_usbpd_private *pd; + + pd = container_of(hdlr, struct dp_usbpd_private, svid_handler); + if (!pd) { + DP_ERR("get_usbpd phandle failed\n"); + return; + } + + DP_DEBUG("peer_usb_comm: %d\n", peer_usb_comm); + pd->dp_usbpd.base.peer_usb_comm = peer_usb_comm; + dp_usbpd_send_event(pd, DP_USBPD_EVT_DISCOVER); +} + +static void dp_usbpd_disconnect_cb(struct usbpd_svid_handler *hdlr) +{ + struct dp_usbpd_private *pd; + + pd = container_of(hdlr, struct dp_usbpd_private, svid_handler); + if (!pd) { + DP_ERR("get_usbpd phandle failed\n"); + return; + } + + pd->alt_mode = DP_USBPD_ALT_MODE_NONE; + pd->dp_usbpd.base.alt_mode_cfg_done = false; + DP_DEBUG("\n"); + + if (pd->dp_cb && pd->dp_cb->disconnect) + pd->dp_cb->disconnect(pd->dev); +} + +static int dp_usbpd_validate_callback(u8 cmd, + enum usbpd_svdm_cmd_type cmd_type, int num_vdos) +{ + int ret = 0; + + if (cmd_type == SVDM_CMD_TYPE_RESP_NAK) { + DP_ERR("error: NACK\n"); + ret = -EINVAL; + goto end; + } + + if (cmd_type == SVDM_CMD_TYPE_RESP_BUSY) { + DP_ERR("error: BUSY\n"); + ret = -EBUSY; + goto end; + } + + if (cmd == USBPD_SVDM_ATTENTION) { + if (cmd_type != SVDM_CMD_TYPE_INITIATOR) { + DP_ERR("error: invalid cmd type for attention\n"); + ret = -EINVAL; + goto end; + } + + if (!num_vdos) { + DP_ERR("error: no vdo provided\n"); + ret = -EINVAL; + goto end; + } + } else { + if (cmd_type != SVDM_CMD_TYPE_RESP_ACK) { + DP_ERR("error: invalid cmd type\n"); + ret = -EINVAL; + } + } +end: + return ret; +} + + +static int dp_usbpd_get_ss_lanes(struct dp_usbpd_private *pd) +{ + int rc = 0; + int timeout = 250; + + /* + * By default, USB reserves two lanes for Super Speed. + * Which means DP has remaining two lanes to operate on. + * If multi-function is not supported, request USB to + * release the Super Speed lanes so that DP can use + * all four lanes in case DPCD indicates support for + * four lanes. + */ + if (!pd->dp_usbpd.base.multi_func) { + while (timeout) { + rc = pd->svid_handler.request_usb_ss_lane( + pd->pd, &pd->svid_handler); + if (rc != -EBUSY) + break; + + DP_WARN("USB busy, retry\n"); + + /* wait for hw recommended delay for usb */ + msleep(20); + timeout--; + } + } + + return rc; +} + +static void dp_usbpd_response_cb(struct usbpd_svid_handler *hdlr, u8 cmd, + enum usbpd_svdm_cmd_type cmd_type, + const u32 *vdos, int num_vdos) +{ + struct dp_usbpd_private *pd; + int rc = 0; + + pd = container_of(hdlr, struct dp_usbpd_private, svid_handler); + + DP_DEBUG("callback -> cmd: %s, *vdos = 0x%x, num_vdos = %d\n", + dp_usbpd_cmd_name(cmd), *vdos, num_vdos); + + if (dp_usbpd_validate_callback(cmd, cmd_type, num_vdos)) { + DP_DEBUG("invalid callback received\n"); + return; + } + + switch (cmd) { + case USBPD_SVDM_DISCOVER_MODES: + pd->vdo = *vdos; + dp_usbpd_get_capabilities(pd); + + pd->alt_mode |= DP_USBPD_ALT_MODE_DISCOVER; + + if (pd->cap.port & BIT(0)) + dp_usbpd_send_event(pd, DP_USBPD_EVT_ENTER); + break; + case USBPD_SVDM_ENTER_MODE: + pd->alt_mode |= DP_USBPD_ALT_MODE_ENTER; + + dp_usbpd_send_event(pd, DP_USBPD_EVT_STATUS); + break; + case USBPD_SVDM_ATTENTION: + if (pd->forced_disconnect) + break; + + pd->vdo = *vdos; + dp_usbpd_get_status(pd); + + if (!pd->dp_usbpd.base.alt_mode_cfg_done) { + if (pd->dp_usbpd.port & BIT(1)) + dp_usbpd_send_event(pd, DP_USBPD_EVT_CONFIGURE); + break; + } + + if (pd->dp_cb && pd->dp_cb->attention) + pd->dp_cb->attention(pd->dev); + + break; + case DP_USBPD_VDM_STATUS: + pd->vdo = *vdos; + dp_usbpd_get_status(pd); + + if (!(pd->alt_mode & DP_USBPD_ALT_MODE_CONFIGURE)) { + pd->alt_mode |= DP_USBPD_ALT_MODE_STATUS; + + if (pd->dp_usbpd.port & BIT(1)) + dp_usbpd_send_event(pd, DP_USBPD_EVT_CONFIGURE); + } + break; + case DP_USBPD_VDM_CONFIGURE: + pd->alt_mode |= DP_USBPD_ALT_MODE_CONFIGURE; + pd->dp_usbpd.base.alt_mode_cfg_done = true; + pd->forced_disconnect = false; + dp_usbpd_get_status(pd); + + pd->dp_usbpd.base.orientation = + usbpd_get_plug_orientation(pd->pd); + + rc = dp_usbpd_get_ss_lanes(pd); + if (rc) { + DP_ERR("failed to get SuperSpeed lanes\n"); + break; + } + + if (pd->dp_cb && pd->dp_cb->configure) + pd->dp_cb->configure(pd->dev); + break; + default: + DP_ERR("unknown cmd: %d\n", cmd); + break; + } +} + +static int dp_usbpd_simulate_connect(struct dp_hpd *dp_hpd, bool hpd) +{ + int rc = 0; + struct dp_usbpd *dp_usbpd; + struct dp_usbpd_private *pd; + + if (!dp_hpd) { + DP_ERR("invalid input\n"); + rc = -EINVAL; + goto error; + } + + dp_usbpd = container_of(dp_hpd, struct dp_usbpd, base); + pd = container_of(dp_usbpd, struct dp_usbpd_private, dp_usbpd); + + dp_usbpd->base.hpd_high = hpd; + pd->forced_disconnect = !hpd; + pd->dp_usbpd.base.alt_mode_cfg_done = hpd; + + DP_DEBUG("hpd_high=%d, forced_disconnect=%d, orientation=%d\n", + dp_usbpd->base.hpd_high, pd->forced_disconnect, + pd->dp_usbpd.base.orientation); + if (hpd) + pd->dp_cb->configure(pd->dev); + else + pd->dp_cb->disconnect(pd->dev); + +error: + return rc; +} + +static int dp_usbpd_simulate_attention(struct dp_hpd *dp_hpd, int vdo) +{ + int rc = 0; + struct dp_usbpd *dp_usbpd; + struct dp_usbpd_private *pd; + + dp_usbpd = container_of(dp_hpd, struct dp_usbpd, base); + if (!dp_usbpd) { + DP_ERR("invalid input\n"); + rc = -EINVAL; + goto error; + } + + pd = container_of(dp_usbpd, struct dp_usbpd_private, dp_usbpd); + + pd->vdo = vdo; + dp_usbpd_get_status(pd); + + if (pd->dp_cb && pd->dp_cb->attention) + pd->dp_cb->attention(pd->dev); +error: + return rc; +} + +int dp_usbpd_register(struct dp_hpd *dp_hpd) +{ + struct dp_usbpd *dp_usbpd; + struct dp_usbpd_private *usbpd; + int rc = 0; + + if (!dp_hpd) + return -EINVAL; + + dp_usbpd = container_of(dp_hpd, struct dp_usbpd, base); + + usbpd = container_of(dp_usbpd, struct dp_usbpd_private, dp_usbpd); + + rc = usbpd_register_svid(usbpd->pd, &usbpd->svid_handler); + if (rc) + DP_ERR("pd registration failed\n"); + + return rc; +} + +static void dp_usbpd_wakeup_phy(struct dp_hpd *dp_hpd, bool wakeup) +{ + struct dp_usbpd *dp_usbpd; + struct dp_usbpd_private *usbpd; + + dp_usbpd = container_of(dp_hpd, struct dp_usbpd, base); + usbpd = container_of(dp_usbpd, struct dp_usbpd_private, dp_usbpd); + + if (!usbpd->pd) { + DP_ERR("usbpd pointer invalid"); + return; + } + + usbpd_vdm_in_suspend(usbpd->pd, wakeup); +} + +struct dp_hpd *dp_usbpd_get(struct device *dev, struct dp_hpd_cb *cb) +{ + int rc = 0; + const char *pd_phandle = "qcom,dp-usbpd-detection"; + struct usbpd *pd = NULL; + struct dp_usbpd_private *usbpd; + struct dp_usbpd *dp_usbpd; + struct usbpd_svid_handler svid_handler = { + .svid = USB_C_DP_SID, + .vdm_received = NULL, + .connect = &dp_usbpd_connect_cb, + .svdm_received = &dp_usbpd_response_cb, + .disconnect = &dp_usbpd_disconnect_cb, + }; + + if (!cb) { + DP_ERR("invalid cb data\n"); + rc = -EINVAL; + goto error; + } + + pd = devm_usbpd_get_by_phandle(dev, pd_phandle); + if (IS_ERR(pd)) { + DP_ERR("usbpd phandle failed (%ld)\n", PTR_ERR(pd)); + rc = PTR_ERR(pd); + goto error; + } + + usbpd = devm_kzalloc(dev, sizeof(*usbpd), GFP_KERNEL); + if (!usbpd) { + rc = -ENOMEM; + goto error; + } + + usbpd->dev = dev; + usbpd->pd = pd; + usbpd->svid_handler = svid_handler; + usbpd->dp_cb = cb; + + dp_usbpd = &usbpd->dp_usbpd; + dp_usbpd->base.simulate_connect = dp_usbpd_simulate_connect; + dp_usbpd->base.simulate_attention = dp_usbpd_simulate_attention; + dp_usbpd->base.register_hpd = dp_usbpd_register; + dp_usbpd->base.wakeup_phy = dp_usbpd_wakeup_phy; + + return &dp_usbpd->base; +error: + return ERR_PTR(rc); +} + +void dp_usbpd_put(struct dp_hpd *dp_hpd) +{ + struct dp_usbpd *dp_usbpd; + struct dp_usbpd_private *usbpd; + + dp_usbpd = container_of(dp_hpd, struct dp_usbpd, base); + if (!dp_usbpd) + return; + + usbpd = container_of(dp_usbpd, struct dp_usbpd_private, dp_usbpd); + + usbpd_unregister_svid(usbpd->pd, &usbpd->svid_handler); + + devm_kfree(usbpd->dev, usbpd); +} diff --git a/techpack/display/msm/dp/dp_usbpd.h b/techpack/display/msm/dp/dp_usbpd.h new file mode 100644 index 0000000000000000000000000000000000000000..899ac4c5960ccf0ef94fd0c23d3f6a9b94ac63aa --- /dev/null +++ b/techpack/display/msm/dp/dp_usbpd.h @@ -0,0 +1,64 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2012-2019, The Linux Foundation. All rights reserved. + */ + +#ifndef _DP_USBPD_H_ +#define _DP_USBPD_H_ + +#include +#include "dp_hpd.h" + +struct device; + +/** + * enum dp_usbpd_port - usb/dp port type + * @DP_USBPD_PORT_NONE: port not configured + * @DP_USBPD_PORT_UFP_D: Upstream Facing Port - DisplayPort + * @DP_USBPD_PORT_DFP_D: Downstream Facing Port - DisplayPort + * @DP_USBPD_PORT_D_UFP_D: Both UFP & DFP - DisplayPort + */ + +enum dp_usbpd_port { + DP_USBPD_PORT_NONE, + DP_USBPD_PORT_UFP_D, + DP_USBPD_PORT_DFP_D, + DP_USBPD_PORT_D_UFP_D, +}; + +/** + * struct dp_usbpd - DisplayPort status + * + * @port: port configured + * @low_pow_st: low power state + * @adaptor_dp_en: adaptor functionality enabled + * @usb_config_req: request to switch to usb + * @exit_dp_mode: request exit from displayport mode + * @debug_en: bool to specify debug mode + */ +struct dp_usbpd { + struct dp_hpd base; + enum dp_usbpd_port port; + bool low_pow_st; + bool adaptor_dp_en; + bool usb_config_req; + bool exit_dp_mode; + bool debug_en; +}; + +/** + * dp_usbpd_get() - setup usbpd module + * + * @dev: device instance of the caller + * @cb: struct containing callback function pointers. + * + * This function allows the client to initialize the usbpd + * module. The module will communicate with usb driver and + * handles the power delivery (PD) communication with the + * sink/usb device. This module will notify the client using + * the callback functions about the connection and status. + */ +struct dp_hpd *dp_usbpd_get(struct device *dev, struct dp_hpd_cb *cb); + +void dp_usbpd_put(struct dp_hpd *pd); +#endif /* _DP_USBPD_H_ */ diff --git a/techpack/display/msm/dsi/dsi_catalog.c b/techpack/display/msm/dsi/dsi_catalog.c new file mode 100644 index 0000000000000000000000000000000000000000..19fb900c147532583708774fcc595b0888a51bb1 --- /dev/null +++ b/techpack/display/msm/dsi/dsi_catalog.c @@ -0,0 +1,329 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2015-2020, The Linux Foundation. All rights reserved. + */ + +#include + +#include "dsi_catalog.h" + +/** + * dsi_catalog_cmn_init() - catalog init for dsi controller v1.4 + */ +static void dsi_catalog_cmn_init(struct dsi_ctrl_hw *ctrl, + enum dsi_ctrl_version version) +{ + /* common functions */ + ctrl->ops.host_setup = dsi_ctrl_hw_cmn_host_setup; + ctrl->ops.video_engine_en = dsi_ctrl_hw_cmn_video_engine_en; + ctrl->ops.video_engine_setup = dsi_ctrl_hw_cmn_video_engine_setup; + ctrl->ops.set_video_timing = dsi_ctrl_hw_cmn_set_video_timing; + ctrl->ops.set_timing_db = dsi_ctrl_hw_cmn_set_timing_db; + ctrl->ops.cmd_engine_setup = dsi_ctrl_hw_cmn_cmd_engine_setup; + ctrl->ops.setup_cmd_stream = dsi_ctrl_hw_cmn_setup_cmd_stream; + ctrl->ops.ctrl_en = dsi_ctrl_hw_cmn_ctrl_en; + ctrl->ops.cmd_engine_en = dsi_ctrl_hw_cmn_cmd_engine_en; + ctrl->ops.phy_sw_reset = dsi_ctrl_hw_cmn_phy_sw_reset; + ctrl->ops.soft_reset = dsi_ctrl_hw_cmn_soft_reset; + ctrl->ops.kickoff_command = dsi_ctrl_hw_cmn_kickoff_command; + ctrl->ops.kickoff_fifo_command = dsi_ctrl_hw_cmn_kickoff_fifo_command; + ctrl->ops.reset_cmd_fifo = dsi_ctrl_hw_cmn_reset_cmd_fifo; + ctrl->ops.trigger_command_dma = dsi_ctrl_hw_cmn_trigger_command_dma; + ctrl->ops.get_interrupt_status = dsi_ctrl_hw_cmn_get_interrupt_status; + ctrl->ops.get_error_status = dsi_ctrl_hw_cmn_get_error_status; + ctrl->ops.clear_error_status = dsi_ctrl_hw_cmn_clear_error_status; + ctrl->ops.clear_interrupt_status = + dsi_ctrl_hw_cmn_clear_interrupt_status; + ctrl->ops.enable_status_interrupts = + dsi_ctrl_hw_cmn_enable_status_interrupts; + ctrl->ops.enable_error_interrupts = + dsi_ctrl_hw_cmn_enable_error_interrupts; + ctrl->ops.video_test_pattern_setup = + dsi_ctrl_hw_cmn_video_test_pattern_setup; + ctrl->ops.cmd_test_pattern_setup = + dsi_ctrl_hw_cmn_cmd_test_pattern_setup; + ctrl->ops.test_pattern_enable = dsi_ctrl_hw_cmn_test_pattern_enable; + ctrl->ops.trigger_cmd_test_pattern = + dsi_ctrl_hw_cmn_trigger_cmd_test_pattern; + ctrl->ops.clear_phy0_ln_err = dsi_ctrl_hw_dln0_phy_err; + ctrl->ops.phy_reset_config = dsi_ctrl_hw_cmn_phy_reset_config; + ctrl->ops.setup_misr = dsi_ctrl_hw_cmn_setup_misr; + ctrl->ops.collect_misr = dsi_ctrl_hw_cmn_collect_misr; + ctrl->ops.debug_bus = dsi_ctrl_hw_cmn_debug_bus; + ctrl->ops.get_cmd_read_data = dsi_ctrl_hw_cmn_get_cmd_read_data; + ctrl->ops.clear_rdbk_register = dsi_ctrl_hw_cmn_clear_rdbk_reg; + ctrl->ops.ctrl_reset = dsi_ctrl_hw_cmn_ctrl_reset; + ctrl->ops.mask_error_intr = dsi_ctrl_hw_cmn_mask_error_intr; + ctrl->ops.error_intr_ctrl = dsi_ctrl_hw_cmn_error_intr_ctrl; + ctrl->ops.get_error_mask = dsi_ctrl_hw_cmn_get_error_mask; + ctrl->ops.get_hw_version = dsi_ctrl_hw_cmn_get_hw_version; + ctrl->ops.wait_for_cmd_mode_mdp_idle = + dsi_ctrl_hw_cmn_wait_for_cmd_mode_mdp_idle; + ctrl->ops.setup_avr = dsi_ctrl_hw_cmn_setup_avr; + ctrl->ops.set_continuous_clk = dsi_ctrl_hw_cmn_set_continuous_clk; + ctrl->ops.wait4dynamic_refresh_done = + dsi_ctrl_hw_cmn_wait4dynamic_refresh_done; + ctrl->ops.hs_req_sel = dsi_ctrl_hw_cmn_hs_req_sel; + + switch (version) { + case DSI_CTRL_VERSION_1_4: + ctrl->ops.setup_lane_map = dsi_ctrl_hw_14_setup_lane_map; + ctrl->ops.ulps_ops.ulps_request = dsi_ctrl_hw_cmn_ulps_request; + ctrl->ops.ulps_ops.ulps_exit = dsi_ctrl_hw_cmn_ulps_exit; + ctrl->ops.wait_for_lane_idle = + dsi_ctrl_hw_14_wait_for_lane_idle; + ctrl->ops.ulps_ops.get_lanes_in_ulps = + dsi_ctrl_hw_cmn_get_lanes_in_ulps; + ctrl->ops.clamp_enable = dsi_ctrl_hw_14_clamp_enable; + ctrl->ops.clamp_disable = dsi_ctrl_hw_14_clamp_disable; + ctrl->ops.reg_dump_to_buffer = + dsi_ctrl_hw_14_reg_dump_to_buffer; + ctrl->ops.schedule_dma_cmd = NULL; + ctrl->ops.kickoff_command_non_embedded_mode = NULL; + ctrl->ops.config_clk_gating = NULL; + ctrl->ops.map_mdp_regs = NULL; + ctrl->ops.log_line_count = NULL; + break; + case DSI_CTRL_VERSION_2_0: + ctrl->ops.setup_lane_map = dsi_ctrl_hw_20_setup_lane_map; + ctrl->ops.wait_for_lane_idle = + dsi_ctrl_hw_20_wait_for_lane_idle; + ctrl->ops.reg_dump_to_buffer = + dsi_ctrl_hw_20_reg_dump_to_buffer; + ctrl->ops.ulps_ops.ulps_request = NULL; + ctrl->ops.ulps_ops.ulps_exit = NULL; + ctrl->ops.ulps_ops.get_lanes_in_ulps = NULL; + ctrl->ops.clamp_enable = NULL; + ctrl->ops.clamp_disable = NULL; + ctrl->ops.schedule_dma_cmd = NULL; + ctrl->ops.kickoff_command_non_embedded_mode = NULL; + ctrl->ops.config_clk_gating = NULL; + ctrl->ops.map_mdp_regs = NULL; + ctrl->ops.log_line_count = NULL; + break; + case DSI_CTRL_VERSION_2_2: + case DSI_CTRL_VERSION_2_3: + case DSI_CTRL_VERSION_2_4: + ctrl->ops.phy_reset_config = dsi_ctrl_hw_22_phy_reset_config; + ctrl->ops.config_clk_gating = dsi_ctrl_hw_22_config_clk_gating; + ctrl->ops.setup_lane_map = dsi_ctrl_hw_20_setup_lane_map; + ctrl->ops.wait_for_lane_idle = + dsi_ctrl_hw_20_wait_for_lane_idle; + ctrl->ops.reg_dump_to_buffer = + dsi_ctrl_hw_20_reg_dump_to_buffer; + ctrl->ops.ulps_ops.ulps_request = dsi_ctrl_hw_cmn_ulps_request; + ctrl->ops.ulps_ops.ulps_exit = dsi_ctrl_hw_cmn_ulps_exit; + ctrl->ops.ulps_ops.get_lanes_in_ulps = + dsi_ctrl_hw_cmn_get_lanes_in_ulps; + ctrl->ops.clamp_enable = NULL; + ctrl->ops.clamp_disable = NULL; + ctrl->ops.schedule_dma_cmd = dsi_ctrl_hw_22_schedule_dma_cmd; + ctrl->ops.kickoff_command_non_embedded_mode = + dsi_ctrl_hw_kickoff_non_embedded_mode; + ctrl->ops.map_mdp_regs = dsi_ctrl_hw_22_map_mdp_regs; + ctrl->ops.log_line_count = dsi_ctrl_hw_22_log_line_count; + break; + default: + break; + } +} + +/** + * dsi_catalog_ctrl_setup() - return catalog info for dsi controller + * @ctrl: Pointer to DSI controller hw object. + * @version: DSI controller version. + * @index: DSI controller instance ID. + * @phy_isolation_enabled: DSI controller works isolated from phy. + * @null_insertion_enabled: DSI controller inserts null packet. + * + * This function setups the catalog information in the dsi_ctrl_hw object. + * + * return: error code for failure and 0 for success. + */ +int dsi_catalog_ctrl_setup(struct dsi_ctrl_hw *ctrl, + enum dsi_ctrl_version version, u32 index, + bool phy_isolation_enabled, bool null_insertion_enabled) +{ + int rc = 0; + + if (version == DSI_CTRL_VERSION_UNKNOWN || + version >= DSI_CTRL_VERSION_MAX) { + DSI_ERR("Unsupported version: %d\n", version); + return -ENOTSUPP; + } + + ctrl->index = index; + ctrl->null_insertion_enabled = null_insertion_enabled; + set_bit(DSI_CTRL_VIDEO_TPG, ctrl->feature_map); + set_bit(DSI_CTRL_CMD_TPG, ctrl->feature_map); + set_bit(DSI_CTRL_VARIABLE_REFRESH_RATE, ctrl->feature_map); + set_bit(DSI_CTRL_DYNAMIC_REFRESH, ctrl->feature_map); + set_bit(DSI_CTRL_DESKEW_CALIB, ctrl->feature_map); + set_bit(DSI_CTRL_DPHY, ctrl->feature_map); + + switch (version) { + case DSI_CTRL_VERSION_1_4: + dsi_catalog_cmn_init(ctrl, version); + break; + case DSI_CTRL_VERSION_2_0: + case DSI_CTRL_VERSION_2_2: + case DSI_CTRL_VERSION_2_3: + case DSI_CTRL_VERSION_2_4: + ctrl->phy_isolation_enabled = phy_isolation_enabled; + dsi_catalog_cmn_init(ctrl, version); + break; + default: + return -ENOTSUPP; + } + + return rc; +} + +/** + * dsi_catalog_phy_2_0_init() - catalog init for DSI PHY 14nm + */ +static void dsi_catalog_phy_2_0_init(struct dsi_phy_hw *phy) +{ + phy->ops.regulator_enable = dsi_phy_hw_v2_0_regulator_enable; + phy->ops.regulator_disable = dsi_phy_hw_v2_0_regulator_disable; + phy->ops.enable = dsi_phy_hw_v2_0_enable; + phy->ops.disable = dsi_phy_hw_v2_0_disable; + phy->ops.calculate_timing_params = + dsi_phy_hw_calculate_timing_params; + phy->ops.phy_idle_on = dsi_phy_hw_v2_0_idle_on; + phy->ops.phy_idle_off = dsi_phy_hw_v2_0_idle_off; + phy->ops.calculate_timing_params = + dsi_phy_hw_calculate_timing_params; + phy->ops.phy_timing_val = dsi_phy_hw_timing_val_v2_0; + phy->ops.clamp_ctrl = dsi_phy_hw_v2_0_clamp_ctrl; + phy->ops.dyn_refresh_ops.dyn_refresh_config = + dsi_phy_hw_v2_0_dyn_refresh_config; + phy->ops.dyn_refresh_ops.dyn_refresh_pipe_delay = + dsi_phy_hw_v2_0_dyn_refresh_pipe_delay; + phy->ops.dyn_refresh_ops.dyn_refresh_helper = + dsi_phy_hw_v2_0_dyn_refresh_helper; + phy->ops.dyn_refresh_ops.cache_phy_timings = + dsi_phy_hw_v2_0_cache_phy_timings; +} + +/** + * dsi_catalog_phy_3_0_init() - catalog init for DSI PHY 10nm + */ +static void dsi_catalog_phy_3_0_init(struct dsi_phy_hw *phy) +{ + phy->ops.regulator_enable = dsi_phy_hw_v3_0_regulator_enable; + phy->ops.regulator_disable = dsi_phy_hw_v3_0_regulator_disable; + phy->ops.enable = dsi_phy_hw_v3_0_enable; + phy->ops.disable = dsi_phy_hw_v3_0_disable; + phy->ops.calculate_timing_params = + dsi_phy_hw_calculate_timing_params; + phy->ops.ulps_ops.wait_for_lane_idle = + dsi_phy_hw_v3_0_wait_for_lane_idle; + phy->ops.ulps_ops.ulps_request = + dsi_phy_hw_v3_0_ulps_request; + phy->ops.ulps_ops.ulps_exit = + dsi_phy_hw_v3_0_ulps_exit; + phy->ops.ulps_ops.get_lanes_in_ulps = + dsi_phy_hw_v3_0_get_lanes_in_ulps; + phy->ops.ulps_ops.is_lanes_in_ulps = + dsi_phy_hw_v3_0_is_lanes_in_ulps; + phy->ops.phy_timing_val = dsi_phy_hw_timing_val_v3_0; + phy->ops.clamp_ctrl = dsi_phy_hw_v3_0_clamp_ctrl; + phy->ops.phy_lane_reset = dsi_phy_hw_v3_0_lane_reset; + phy->ops.toggle_resync_fifo = dsi_phy_hw_v3_0_toggle_resync_fifo; + phy->ops.dyn_refresh_ops.dyn_refresh_config = + dsi_phy_hw_v3_0_dyn_refresh_config; + phy->ops.dyn_refresh_ops.dyn_refresh_pipe_delay = + dsi_phy_hw_v3_0_dyn_refresh_pipe_delay; + phy->ops.dyn_refresh_ops.dyn_refresh_helper = + dsi_phy_hw_v3_0_dyn_refresh_helper; + phy->ops.dyn_refresh_ops.cache_phy_timings = + dsi_phy_hw_v3_0_cache_phy_timings; +} + +/** + * dsi_catalog_phy_4_0_init() - catalog init for DSI PHY 7nm + */ +static void dsi_catalog_phy_4_0_init(struct dsi_phy_hw *phy) +{ + phy->ops.regulator_enable = NULL; + phy->ops.regulator_disable = NULL; + phy->ops.enable = dsi_phy_hw_v4_0_enable; + phy->ops.disable = dsi_phy_hw_v4_0_disable; + phy->ops.calculate_timing_params = + dsi_phy_hw_calculate_timing_params; + phy->ops.ulps_ops.wait_for_lane_idle = + dsi_phy_hw_v4_0_wait_for_lane_idle; + phy->ops.ulps_ops.ulps_request = + dsi_phy_hw_v4_0_ulps_request; + phy->ops.ulps_ops.ulps_exit = + dsi_phy_hw_v4_0_ulps_exit; + phy->ops.ulps_ops.get_lanes_in_ulps = + dsi_phy_hw_v4_0_get_lanes_in_ulps; + phy->ops.ulps_ops.is_lanes_in_ulps = + dsi_phy_hw_v4_0_is_lanes_in_ulps; + phy->ops.phy_timing_val = dsi_phy_hw_timing_val_v4_0; + phy->ops.phy_lane_reset = dsi_phy_hw_v4_0_lane_reset; + phy->ops.toggle_resync_fifo = dsi_phy_hw_v4_0_toggle_resync_fifo; + phy->ops.reset_clk_en_sel = dsi_phy_hw_v4_0_reset_clk_en_sel; + + phy->ops.dyn_refresh_ops.dyn_refresh_config = + dsi_phy_hw_v4_0_dyn_refresh_config; + phy->ops.dyn_refresh_ops.dyn_refresh_pipe_delay = + dsi_phy_hw_v4_0_dyn_refresh_pipe_delay; + phy->ops.dyn_refresh_ops.dyn_refresh_helper = + dsi_phy_hw_v4_0_dyn_refresh_helper; + phy->ops.dyn_refresh_ops.cache_phy_timings = + dsi_phy_hw_v4_0_cache_phy_timings; + phy->ops.set_continuous_clk = dsi_phy_hw_v4_0_set_continuous_clk; + phy->ops.commit_phy_timing = dsi_phy_hw_v4_0_commit_phy_timing; +} + +/** + * dsi_catalog_phy_setup() - return catalog info for dsi phy hardware + * @ctrl: Pointer to DSI PHY hw object. + * @version: DSI PHY version. + * @index: DSI PHY instance ID. + * + * This function setups the catalog information in the dsi_phy_hw object. + * + * return: error code for failure and 0 for success. + */ +int dsi_catalog_phy_setup(struct dsi_phy_hw *phy, + enum dsi_phy_version version, + u32 index) +{ + int rc = 0; + + if (version == DSI_PHY_VERSION_UNKNOWN || + version >= DSI_PHY_VERSION_MAX) { + DSI_ERR("Unsupported version: %d\n", version); + return -ENOTSUPP; + } + + phy->index = index; + phy->version = version; + set_bit(DSI_PHY_DPHY, phy->feature_map); + + dsi_phy_timing_calc_init(phy, version); + + switch (version) { + case DSI_PHY_VERSION_2_0: + dsi_catalog_phy_2_0_init(phy); + break; + case DSI_PHY_VERSION_3_0: + dsi_catalog_phy_3_0_init(phy); + break; + case DSI_PHY_VERSION_4_0: + case DSI_PHY_VERSION_4_1: + dsi_catalog_phy_4_0_init(phy); + break; + case DSI_PHY_VERSION_0_0_HPM: + case DSI_PHY_VERSION_0_0_LPM: + case DSI_PHY_VERSION_1_0: + default: + return -ENOTSUPP; + } + + return rc; +} diff --git a/techpack/display/msm/dsi/dsi_catalog.h b/techpack/display/msm/dsi/dsi_catalog.h new file mode 100644 index 0000000000000000000000000000000000000000..3118cb573301120427e7928074b9713a7866327d --- /dev/null +++ b/techpack/display/msm/dsi/dsi_catalog.h @@ -0,0 +1,278 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2015-2020, The Linux Foundation. All rights reserved. + */ + +#ifndef _DSI_CATALOG_H_ +#define _DSI_CATALOG_H_ + +#include "dsi_ctrl_hw.h" +#include "dsi_phy_hw.h" + +/** + * dsi_catalog_ctrl_setup() - return catalog info for dsi controller + * @ctrl: Pointer to DSI controller hw object. + * @version: DSI controller version. + * @index: DSI controller instance ID. + * @phy_isolation_enabled: DSI controller works isolated from phy. + * @null_insertion_enabled: DSI controller inserts null packet. + * + * This function setups the catalog information in the dsi_ctrl_hw object. + * + * return: error code for failure and 0 for success. + */ +int dsi_catalog_ctrl_setup(struct dsi_ctrl_hw *ctrl, + enum dsi_ctrl_version version, u32 index, + bool phy_isolation_enabled, bool null_insertion_enabled); + +/** + * dsi_catalog_phy_setup() - return catalog info for dsi phy hardware + * @phy: Pointer to DSI PHY hw object. + * @version: DSI PHY version. + * @index: DSI PHY instance ID. + * + * This function setups the catalog information in the dsi_phy_hw object. + * + * return: error code for failure and 0 for success. + */ +int dsi_catalog_phy_setup(struct dsi_phy_hw *phy, + enum dsi_phy_version version, + u32 index); + +/** + * dsi_phy_timing_calc_init() - initialize info for DSI PHY timing calculations + * @phy: Pointer to DSI PHY hw object. + * @version: DSI PHY version. + * + * This function setups the catalog information in the dsi_phy_hw object. + * + * return: error code for failure and 0 for success. + */ +int dsi_phy_timing_calc_init(struct dsi_phy_hw *phy, + enum dsi_phy_version version); + +/** + * dsi_phy_hw_calculate_timing_params() - DSI PHY timing parameter calculations + * @phy: Pointer to DSI PHY hw object. + * @mode: DSI mode information. + * @host: DSI host configuration. + * @timing: DSI phy lane configurations. + * @use_mode_bit_clk: Boolean to indicate whether to recalculate bit clk. + * + * This function setups the catalog information in the dsi_phy_hw object. + * + * return: error code for failure and 0 for success. + */ +int dsi_phy_hw_calculate_timing_params(struct dsi_phy_hw *phy, + struct dsi_mode_info *mode, + struct dsi_host_common_cfg *host, + struct dsi_phy_per_lane_cfgs *timing, + bool use_mode_bit_clk); + +/* Definitions for 14nm PHY hardware driver */ +void dsi_phy_hw_v2_0_regulator_enable(struct dsi_phy_hw *phy, + struct dsi_phy_per_lane_cfgs *cfg); +void dsi_phy_hw_v2_0_regulator_disable(struct dsi_phy_hw *phy); +void dsi_phy_hw_v2_0_enable(struct dsi_phy_hw *phy, struct dsi_phy_cfg *cfg); +void dsi_phy_hw_v2_0_disable(struct dsi_phy_hw *phy, struct dsi_phy_cfg *cfg); +void dsi_phy_hw_v2_0_idle_on(struct dsi_phy_hw *phy, struct dsi_phy_cfg *cfg); +void dsi_phy_hw_v2_0_idle_off(struct dsi_phy_hw *phy); +int dsi_phy_hw_timing_val_v2_0(struct dsi_phy_per_lane_cfgs *timing_cfg, + u32 *timing_val, u32 size); +void dsi_phy_hw_v2_0_clamp_ctrl(struct dsi_phy_hw *phy, bool enable); +void dsi_phy_hw_v2_0_dyn_refresh_helper(struct dsi_phy_hw *phy, u32 offset); +void dsi_phy_hw_v2_0_dyn_refresh_config(struct dsi_phy_hw *phy, + struct dsi_phy_cfg *cfg, bool is_master); +void dsi_phy_hw_v2_0_dyn_refresh_pipe_delay(struct dsi_phy_hw *phy, + struct dsi_dyn_clk_delay *delay); +int dsi_phy_hw_v2_0_cache_phy_timings(struct dsi_phy_per_lane_cfgs *timings, + u32 *dst, u32 size); + +/* Definitions for 10nm PHY hardware driver */ +void dsi_phy_hw_v3_0_regulator_enable(struct dsi_phy_hw *phy, + struct dsi_phy_per_lane_cfgs *cfg); +void dsi_phy_hw_v3_0_regulator_disable(struct dsi_phy_hw *phy); +void dsi_phy_hw_v3_0_enable(struct dsi_phy_hw *phy, struct dsi_phy_cfg *cfg); +void dsi_phy_hw_v3_0_disable(struct dsi_phy_hw *phy, struct dsi_phy_cfg *cfg); +int dsi_phy_hw_v3_0_wait_for_lane_idle(struct dsi_phy_hw *phy, u32 lanes); +void dsi_phy_hw_v3_0_ulps_request(struct dsi_phy_hw *phy, + struct dsi_phy_cfg *cfg, u32 lanes); +void dsi_phy_hw_v3_0_ulps_exit(struct dsi_phy_hw *phy, + struct dsi_phy_cfg *cfg, u32 lanes); +u32 dsi_phy_hw_v3_0_get_lanes_in_ulps(struct dsi_phy_hw *phy); +bool dsi_phy_hw_v3_0_is_lanes_in_ulps(u32 lanes, u32 ulps_lanes); +int dsi_phy_hw_timing_val_v3_0(struct dsi_phy_per_lane_cfgs *timing_cfg, + u32 *timing_val, u32 size); +void dsi_phy_hw_v3_0_clamp_ctrl(struct dsi_phy_hw *phy, bool enable); +int dsi_phy_hw_v3_0_lane_reset(struct dsi_phy_hw *phy); +void dsi_phy_hw_v3_0_toggle_resync_fifo(struct dsi_phy_hw *phy); + +/* Definitions for 7nm PHY hardware driver */ +void dsi_phy_hw_v4_0_enable(struct dsi_phy_hw *phy, struct dsi_phy_cfg *cfg); +void dsi_phy_hw_v4_0_disable(struct dsi_phy_hw *phy, struct dsi_phy_cfg *cfg); +int dsi_phy_hw_v4_0_wait_for_lane_idle(struct dsi_phy_hw *phy, u32 lanes); +void dsi_phy_hw_v4_0_ulps_request(struct dsi_phy_hw *phy, + struct dsi_phy_cfg *cfg, u32 lanes); +void dsi_phy_hw_v4_0_ulps_exit(struct dsi_phy_hw *phy, + struct dsi_phy_cfg *cfg, u32 lanes); +u32 dsi_phy_hw_v4_0_get_lanes_in_ulps(struct dsi_phy_hw *phy); +bool dsi_phy_hw_v4_0_is_lanes_in_ulps(u32 lanes, u32 ulps_lanes); +int dsi_phy_hw_timing_val_v4_0(struct dsi_phy_per_lane_cfgs *timing_cfg, + u32 *timing_val, u32 size); +int dsi_phy_hw_v4_0_lane_reset(struct dsi_phy_hw *phy); +void dsi_phy_hw_v4_0_toggle_resync_fifo(struct dsi_phy_hw *phy); +void dsi_phy_hw_v4_0_reset_clk_en_sel(struct dsi_phy_hw *phy); +void dsi_phy_hw_v4_0_set_continuous_clk(struct dsi_phy_hw *phy, bool enable); +void dsi_phy_hw_v4_0_commit_phy_timing(struct dsi_phy_hw *phy, + struct dsi_phy_per_lane_cfgs *timing); + +/* DSI controller common ops */ +u32 dsi_ctrl_hw_cmn_get_interrupt_status(struct dsi_ctrl_hw *ctrl); +void dsi_ctrl_hw_cmn_debug_bus(struct dsi_ctrl_hw *ctrl, u32 *entries, + u32 size); +void dsi_ctrl_hw_cmn_clear_interrupt_status(struct dsi_ctrl_hw *ctrl, u32 ints); +void dsi_ctrl_hw_cmn_enable_status_interrupts(struct dsi_ctrl_hw *ctrl, + u32 ints); + +u64 dsi_ctrl_hw_cmn_get_error_status(struct dsi_ctrl_hw *ctrl); +void dsi_ctrl_hw_cmn_clear_error_status(struct dsi_ctrl_hw *ctrl, u64 errors); +void dsi_ctrl_hw_cmn_enable_error_interrupts(struct dsi_ctrl_hw *ctrl, + u64 errors); + +void dsi_ctrl_hw_cmn_video_test_pattern_setup(struct dsi_ctrl_hw *ctrl, + enum dsi_test_pattern type, + u32 init_val); +void dsi_ctrl_hw_cmn_cmd_test_pattern_setup(struct dsi_ctrl_hw *ctrl, + enum dsi_test_pattern type, + u32 init_val, + u32 stream_id); +void dsi_ctrl_hw_cmn_test_pattern_enable(struct dsi_ctrl_hw *ctrl, bool enable); +void dsi_ctrl_hw_cmn_trigger_cmd_test_pattern(struct dsi_ctrl_hw *ctrl, + u32 stream_id); + +void dsi_ctrl_hw_cmn_host_setup(struct dsi_ctrl_hw *ctrl, + struct dsi_host_common_cfg *config); +void dsi_ctrl_hw_cmn_video_engine_en(struct dsi_ctrl_hw *ctrl, bool on); +void dsi_ctrl_hw_cmn_video_engine_setup(struct dsi_ctrl_hw *ctrl, + struct dsi_host_common_cfg *common_cfg, + struct dsi_video_engine_cfg *cfg); + +void dsi_ctrl_hw_cmn_setup_avr(struct dsi_ctrl_hw *ctrl, bool enable); + +void dsi_ctrl_hw_cmn_set_video_timing(struct dsi_ctrl_hw *ctrl, + struct dsi_mode_info *mode); +void dsi_ctrl_hw_cmn_set_timing_db(struct dsi_ctrl_hw *ctrl, + bool enable); +void dsi_ctrl_hw_cmn_cmd_engine_setup(struct dsi_ctrl_hw *ctrl, + struct dsi_host_common_cfg *common_cfg, + struct dsi_cmd_engine_cfg *cfg); + +void dsi_ctrl_hw_cmn_ctrl_en(struct dsi_ctrl_hw *ctrl, bool on); +void dsi_ctrl_hw_cmn_cmd_engine_en(struct dsi_ctrl_hw *ctrl, bool on); + +void dsi_ctrl_hw_cmn_setup_cmd_stream(struct dsi_ctrl_hw *ctrl, + struct dsi_mode_info *mode, + u32 h_stride, + u32 vc_id, + struct dsi_rect *roi); +void dsi_ctrl_hw_cmn_phy_sw_reset(struct dsi_ctrl_hw *ctrl); +void dsi_ctrl_hw_cmn_soft_reset(struct dsi_ctrl_hw *ctrl); + +void dsi_ctrl_hw_cmn_setup_misr(struct dsi_ctrl_hw *ctrl, + enum dsi_op_mode panel_mode, + bool enable, u32 frame_count); +u32 dsi_ctrl_hw_cmn_collect_misr(struct dsi_ctrl_hw *ctrl, + enum dsi_op_mode panel_mode); + +void dsi_ctrl_hw_cmn_kickoff_command(struct dsi_ctrl_hw *ctrl, + struct dsi_ctrl_cmd_dma_info *cmd, + u32 flags); + +void dsi_ctrl_hw_cmn_kickoff_fifo_command(struct dsi_ctrl_hw *ctrl, + struct dsi_ctrl_cmd_dma_fifo_info *cmd, + u32 flags); +void dsi_ctrl_hw_cmn_reset_cmd_fifo(struct dsi_ctrl_hw *ctrl); +void dsi_ctrl_hw_cmn_trigger_command_dma(struct dsi_ctrl_hw *ctrl); +void dsi_ctrl_hw_dln0_phy_err(struct dsi_ctrl_hw *ctrl); +void dsi_ctrl_hw_cmn_phy_reset_config(struct dsi_ctrl_hw *ctrl, + bool enable); +void dsi_ctrl_hw_22_phy_reset_config(struct dsi_ctrl_hw *ctrl, + bool enable); +u32 dsi_ctrl_hw_cmn_get_cmd_read_data(struct dsi_ctrl_hw *ctrl, + u8 *rd_buf, + u32 read_offset, + u32 rx_byte, + u32 pkt_size, u32 *hw_read_cnt); +void dsi_ctrl_hw_cmn_clear_rdbk_reg(struct dsi_ctrl_hw *ctrl); +void dsi_ctrl_hw_22_schedule_dma_cmd(struct dsi_ctrl_hw *ctrl, int line_on); +int dsi_ctrl_hw_cmn_ctrl_reset(struct dsi_ctrl_hw *ctrl, + int mask); +void dsi_ctrl_hw_cmn_mask_error_intr(struct dsi_ctrl_hw *ctrl, u32 idx, + bool en); +void dsi_ctrl_hw_cmn_error_intr_ctrl(struct dsi_ctrl_hw *ctrl, bool en); +u32 dsi_ctrl_hw_cmn_get_error_mask(struct dsi_ctrl_hw *ctrl); +u32 dsi_ctrl_hw_cmn_get_hw_version(struct dsi_ctrl_hw *ctrl); +int dsi_ctrl_hw_cmn_wait_for_cmd_mode_mdp_idle(struct dsi_ctrl_hw *ctrl); + +/* Definitions specific to 1.4 DSI controller hardware */ +int dsi_ctrl_hw_14_wait_for_lane_idle(struct dsi_ctrl_hw *ctrl, u32 lanes); +void dsi_ctrl_hw_14_setup_lane_map(struct dsi_ctrl_hw *ctrl, + struct dsi_lane_map *lane_map); +void dsi_ctrl_hw_cmn_ulps_request(struct dsi_ctrl_hw *ctrl, u32 lanes); +void dsi_ctrl_hw_cmn_ulps_exit(struct dsi_ctrl_hw *ctrl, u32 lanes); +u32 dsi_ctrl_hw_cmn_get_lanes_in_ulps(struct dsi_ctrl_hw *ctrl); + +void dsi_ctrl_hw_14_clamp_enable(struct dsi_ctrl_hw *ctrl, + u32 lanes, + bool enable_ulps); + +void dsi_ctrl_hw_14_clamp_disable(struct dsi_ctrl_hw *ctrl, + u32 lanes, + bool disable_ulps); +ssize_t dsi_ctrl_hw_14_reg_dump_to_buffer(struct dsi_ctrl_hw *ctrl, + char *buf, + u32 size); + +/* Definitions specific to 2.0 DSI controller hardware */ +void dsi_ctrl_hw_20_setup_lane_map(struct dsi_ctrl_hw *ctrl, + struct dsi_lane_map *lane_map); +int dsi_ctrl_hw_20_wait_for_lane_idle(struct dsi_ctrl_hw *ctrl, u32 lanes); +ssize_t dsi_ctrl_hw_20_reg_dump_to_buffer(struct dsi_ctrl_hw *ctrl, + char *buf, + u32 size); +void dsi_ctrl_hw_kickoff_non_embedded_mode(struct dsi_ctrl_hw *ctrl, + struct dsi_ctrl_cmd_dma_info *cmd, + u32 flags); + +/* Definitions specific to 2.2 DSI controller hardware */ +void dsi_ctrl_hw_22_config_clk_gating(struct dsi_ctrl_hw *ctrl, bool enable, + enum dsi_clk_gate_type clk_selection); + +void dsi_ctrl_hw_cmn_set_continuous_clk(struct dsi_ctrl_hw *ctrl, bool enable); +void dsi_ctrl_hw_cmn_hs_req_sel(struct dsi_ctrl_hw *ctrl, bool sel_phy); + +/* dynamic refresh specific functions */ +void dsi_phy_hw_v3_0_dyn_refresh_helper(struct dsi_phy_hw *phy, u32 offset); +void dsi_phy_hw_v3_0_dyn_refresh_config(struct dsi_phy_hw *phy, + struct dsi_phy_cfg *cfg, bool is_master); +void dsi_phy_hw_v3_0_dyn_refresh_pipe_delay(struct dsi_phy_hw *phy, + struct dsi_dyn_clk_delay *delay); + +int dsi_ctrl_hw_cmn_wait4dynamic_refresh_done(struct dsi_ctrl_hw *ctrl); +int dsi_phy_hw_v3_0_cache_phy_timings(struct dsi_phy_per_lane_cfgs *timings, + u32 *dst, u32 size); + +void dsi_phy_hw_v4_0_dyn_refresh_helper(struct dsi_phy_hw *phy, u32 offset); +void dsi_phy_hw_v4_0_dyn_refresh_config(struct dsi_phy_hw *phy, + struct dsi_phy_cfg *cfg, bool is_master); +void dsi_phy_hw_v4_0_dyn_refresh_pipe_delay(struct dsi_phy_hw *phy, + struct dsi_dyn_clk_delay *delay); + +int dsi_phy_hw_v4_0_cache_phy_timings(struct dsi_phy_per_lane_cfgs *timings, + u32 *dst, u32 size); + +int dsi_ctrl_hw_22_map_mdp_regs(struct platform_device *pdev, + struct dsi_ctrl_hw *ctrl); + +u32 dsi_ctrl_hw_22_log_line_count(struct dsi_ctrl_hw *ctrl, bool cmd_mode); +#endif /* _DSI_CATALOG_H_ */ diff --git a/techpack/display/msm/dsi/dsi_clk.h b/techpack/display/msm/dsi/dsi_clk.h new file mode 100644 index 0000000000000000000000000000000000000000..fcd352d19b7b9fe9cfad26cf828d32634684706b --- /dev/null +++ b/techpack/display/msm/dsi/dsi_clk.h @@ -0,0 +1,328 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2016-2020, The Linux Foundation. All rights reserved. + */ + +#ifndef _DSI_CLK_H_ +#define _DSI_CLK_H_ + +#include +#include +#include +#include +#include + +#define MAX_STRING_LEN 32 +#define MAX_DSI_CTRL 2 + +enum dsi_clk_state { + DSI_CLK_OFF, + DSI_CLK_ON, + DSI_CLK_EARLY_GATE, +}; + +enum clk_req_client { + DSI_CLK_REQ_MDP_CLIENT = 0, + DSI_CLK_REQ_DSI_CLIENT, +}; + +enum dsi_link_clk_type { + DSI_LINK_ESC_CLK, + DSI_LINK_BYTE_CLK, + DSI_LINK_PIX_CLK, + DSI_LINK_BYTE_INTF_CLK, + DSI_LINK_CLK_MAX, +}; + +enum dsi_link_clk_op_type { + DSI_LINK_CLK_SET_RATE = BIT(0), + DSI_LINK_CLK_PREPARE = BIT(1), + DSI_LINK_CLK_ENABLE = BIT(2), + DSI_LINK_CLK_START = BIT(0) | BIT(1) | BIT(2), +}; + +enum dsi_clk_type { + DSI_CORE_CLK = BIT(0), + DSI_LINK_CLK = BIT(1), + DSI_ALL_CLKS = (BIT(0) | BIT(1)), + DSI_CLKS_MAX = BIT(2), +}; + +enum dsi_lclk_type { + DSI_LINK_NONE = 0, + DSI_LINK_LP_CLK = BIT(0), + DSI_LINK_HS_CLK = BIT(1), +}; + +struct dsi_clk_ctrl_info { + enum dsi_clk_type clk_type; + enum dsi_clk_state clk_state; + enum clk_req_client client; +}; + +struct clk_ctrl_cb { + void *priv; + int (*dsi_clk_cb)(void *priv, struct dsi_clk_ctrl_info clk_ctrl_info); +}; + +/** + * struct dsi_core_clk_info - Core clock information for DSI hardware + * @mdp_core_clk: Handle to MDP core clock. + * @iface_clk: Handle to MDP interface clock. + * @core_mmss_clk: Handle to MMSS core clock. + * @bus_clk: Handle to bus clock. + * @mnoc_clk: Handle to MMSS NOC clock. + * @drm: Pointer to drm device node + */ +struct dsi_core_clk_info { + struct clk *mdp_core_clk; + struct clk *iface_clk; + struct clk *core_mmss_clk; + struct clk *bus_clk; + struct clk *mnoc_clk; + struct drm_device *drm; +}; + +/** + * struct dsi_link_hs_clk_info - Set of high speed link clocks for DSI HW + * @byte_clk: Handle to DSI byte_clk. + * @pixel_clk: Handle to DSI pixel_clk. + * @byte_intf_clk: Handle to DSI byte intf. clock. + */ +struct dsi_link_hs_clk_info { + struct clk *byte_clk; + struct clk *pixel_clk; + struct clk *byte_intf_clk; +}; + +/** + * struct dsi_link_lp_clk_info - Set of low power link clocks for DSI HW. + * @esc_clk: Handle to DSI escape clock. + */ +struct dsi_link_lp_clk_info { + struct clk *esc_clk; +}; + +/** + * struct link_clk_freq - Clock frequency information for Link clocks + * @byte_clk_rate: Frequency of DSI byte_clk in Hz. + * @byte_intf_clk_rate: Frequency of DSI byte_intf_clk in Hz. + * @pixel_clk_rate: Frequency of DSI pixel_clk in Hz. + * @esc_clk_rate: Frequency of DSI escape clock in Hz. + */ +struct link_clk_freq { + u32 byte_clk_rate; + u32 byte_intf_clk_rate; + u32 pix_clk_rate; + u32 esc_clk_rate; +}; + +/** + * typedef *pre_clockoff_cb() - Callback before clock is turned off + * @priv: private data pointer. + * @clk_type: clock which is being turned off. + * @l_type: specifies if the clock is HS or LP type. Valid only for link clocks. + * @new_state: next state for the clock. + * + * @return: error code. + */ +typedef int (*pre_clockoff_cb)(void *priv, + enum dsi_clk_type clk_type, + enum dsi_lclk_type l_type, + enum dsi_clk_state new_state); + +/** + * typedef *post_clockoff_cb() - Callback after clock is turned off + * @priv: private data pointer. + * @clk_type: clock which was turned off. + * @l_type: specifies if the clock is HS or LP type. Valid only for link clocks. + * @curr_state: current state for the clock. + * + * @return: error code. + */ +typedef int (*post_clockoff_cb)(void *priv, + enum dsi_clk_type clk_type, + enum dsi_lclk_type l_type, + enum dsi_clk_state curr_state); + +/** + * typedef *post_clockon_cb() - Callback after clock is turned on + * @priv: private data pointer. + * @clk_type: clock which was turned on. + * @l_type: specifies if the clock is HS or LP type. Valid only for link clocks. + * @curr_state: current state for the clock. + * + * @return: error code. + */ +typedef int (*post_clockon_cb)(void *priv, + enum dsi_clk_type clk_type, + enum dsi_lclk_type l_type, + enum dsi_clk_state curr_state); + +/** + * typedef *pre_clockon_cb() - Callback before clock is turned on + * @priv: private data pointer. + * @clk_type: clock which is being turned on. + * @l_type: specifies if the clock is HS or LP type.Valid only for link clocks. + * @new_state: next state for the clock. + * + * @return: error code. + */ +typedef int (*pre_clockon_cb)(void *priv, + enum dsi_clk_type clk_type, + enum dsi_lclk_type l_type, + enum dsi_clk_state new_state); + + +/** + * struct dsi_clk_info - clock information for DSI hardware. + * @name: client name. + * @c_clks[MAX_DSI_CTRL] array of core clock configurations + * @l_lp_clks[MAX_DSI_CTRL] array of low power(esc) clock configurations + * @l_hs_clks[MAX_DSI_CTRL] array of high speed clock configurations + * @bus_handle[MAX_DSI_CTRL] array of bus handles + * @ctrl_index[MAX_DSI_CTRL] array of DSI controller indexes mapped + * to core and link clock configurations + * @pre_clkoff_cb callback before clock is turned off + * @post_clkoff_cb callback after clock is turned off + * @post_clkon_cb callback after clock is turned on + * @pre_clkon_cb callback before clock is turned on + * @priv_data pointer to private data + * @master_ndx master DSI controller index + * @dsi_ctrl_count number of DSI controllers + */ +struct dsi_clk_info { + char name[MAX_STRING_LEN]; + struct dsi_core_clk_info c_clks[MAX_DSI_CTRL]; + struct dsi_link_lp_clk_info l_lp_clks[MAX_DSI_CTRL]; + struct dsi_link_hs_clk_info l_hs_clks[MAX_DSI_CTRL]; + u32 bus_handle[MAX_DSI_CTRL]; + u32 ctrl_index[MAX_DSI_CTRL]; + pre_clockoff_cb pre_clkoff_cb; + post_clockoff_cb post_clkoff_cb; + post_clockon_cb post_clkon_cb; + pre_clockon_cb pre_clkon_cb; + void *priv_data; + u32 master_ndx; + u32 dsi_ctrl_count; +}; + +/** + * struct dsi_clk_link_set - Pair of clock handles to describe link clocks + * @byte_clk: Handle to DSi byte_clk. + * @pixel_clk: Handle to DSI pixel_clk. + */ +struct dsi_clk_link_set { + struct clk *byte_clk; + struct clk *pixel_clk; +}; + +/** + * dsi_display_clk_mngr_update_splash_status() - Update splash stattus + * @clk_mngr: Structure containing DSI clock information + * @status: Splash status + */ +void dsi_display_clk_mngr_update_splash_status(void *clk_mgr, bool status); + +/** + * dsi_display_clk_mgr_register() - Register DSI clock manager + * @info: Structure containing DSI clock information + */ +void *dsi_display_clk_mngr_register(struct dsi_clk_info *info); + +/** + * dsi_display_clk_mngr_deregister() - Deregister DSI clock manager + * @clk_mngr: DSI clock manager pointer + */ +int dsi_display_clk_mngr_deregister(void *clk_mngr); + +/** + * dsi_register_clk_handle() - Register clock handle with DSI clock manager + * @clk_mngr: DSI clock manager pointer + * @client: DSI clock client pointer. + */ +void *dsi_register_clk_handle(void *clk_mngr, char *client); + +/** + * dsi_deregister_clk_handle() - Deregister clock handle from DSI clock manager + * @client: DSI clock client pointer. + * + * return: error code in case of failure or 0 for success. + */ +int dsi_deregister_clk_handle(void *client); + +/** + * dsi_display_link_clk_force_update_ctrl() - force to set link clks + * @handle: Handle of desired DSI clock client. + * + * return: error code in case of failure or 0 for success. + */ + +int dsi_display_link_clk_force_update_ctrl(void *handle); + +/** + * dsi_display_clk_ctrl() - set frequencies for link clks + * @handle: Handle of desired DSI clock client. + * @clk_type: Clock which is being controlled. + * @clk_state: Desired state of clock + * + * return: error code in case of failure or 0 for success. + */ +int dsi_display_clk_ctrl(void *handle, + enum dsi_clk_type clk_type, enum dsi_clk_state clk_state); + +/** + * dsi_clk_set_link_frequencies() - set frequencies for link clks + * @client: DSI clock client pointer. + * @freq: Structure containing link clock frequencies. + * @index: Index of the DSI controller. + * + * return: error code in case of failure or 0 for success. + */ +int dsi_clk_set_link_frequencies(void *client, struct link_clk_freq freq, + u32 index); + + +/** + * dsi_clk_set_pixel_clk_rate() - set frequency for pixel_clk + * @client: DSI clock client pointer. + * @pixel_clk: Pixel_clk rate in Hz. + * @index: Index of the DSI controller. + * return: error code in case of failure or 0 for success. + */ +int dsi_clk_set_pixel_clk_rate(void *client, u64 pixel_clk, u32 index); + + +/** + * dsi_clk_set_byte_clk_rate() - set frequency for byte clock + * @client: DSI clock client pointer. + * @byte_clk: Pixel clock rate in Hz. + * @byte_intf_clk: Byte interface clock rate in Hz. + * @index: Index of the DSI controller. + * return: error code in case of failure or 0 for success. + */ +int dsi_clk_set_byte_clk_rate(void *client, u64 byte_clk, + u64 byte_intf_clk, u32 index); + +/** + * dsi_clk_update_parent() - update parent clocks for specified clock + * @parent: link clock pair which are set as parent. + * @child: link clock pair whose parent has to be set. + */ +int dsi_clk_update_parent(struct dsi_clk_link_set *parent, + struct dsi_clk_link_set *child); + +/** + * dsi_clk_prepare_enable() - prepare and enable dsi src clocks + * @clk: list of src clocks. + * + * @return: Zero on success and err no on failure + */ +int dsi_clk_prepare_enable(struct dsi_clk_link_set *clk); + +/** + * dsi_clk_disable_unprepare() - disable and unprepare dsi src clocks + * @clk: list of src clocks. + */ +void dsi_clk_disable_unprepare(struct dsi_clk_link_set *clk); +#endif /* _DSI_CLK_H_ */ diff --git a/techpack/display/msm/dsi/dsi_clk_manager.c b/techpack/display/msm/dsi/dsi_clk_manager.c new file mode 100644 index 0000000000000000000000000000000000000000..ec2df96605a93ebacbd2f376b0cdf309add195c1 --- /dev/null +++ b/techpack/display/msm/dsi/dsi_clk_manager.c @@ -0,0 +1,1481 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2016-2020, The Linux Foundation. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include "dsi_clk.h" +#include "dsi_defs.h" + +struct dsi_core_clks { + struct dsi_core_clk_info clks; + u32 bus_handle; +}; + +struct dsi_link_clks { + struct dsi_link_hs_clk_info hs_clks; + struct dsi_link_lp_clk_info lp_clks; + struct link_clk_freq freq; +}; + +struct dsi_clk_mngr { + char name[MAX_STRING_LEN]; + struct mutex clk_mutex; + struct list_head client_list; + + u32 dsi_ctrl_count; + u32 master_ndx; + struct dsi_core_clks core_clks[MAX_DSI_CTRL]; + struct dsi_link_clks link_clks[MAX_DSI_CTRL]; + u32 ctrl_index[MAX_DSI_CTRL]; + u32 core_clk_state; + u32 link_clk_state; + + pre_clockoff_cb pre_clkoff_cb; + post_clockoff_cb post_clkoff_cb; + post_clockon_cb post_clkon_cb; + pre_clockon_cb pre_clkon_cb; + + bool is_cont_splash_enabled; + void *priv_data; +}; + +struct dsi_clk_client_info { + char name[MAX_STRING_LEN]; + u32 core_refcount; + u32 link_refcount; + u32 core_clk_state; + u32 link_clk_state; + struct list_head list; + struct dsi_clk_mngr *mngr; +}; + +static int _get_clk_mngr_index(struct dsi_clk_mngr *mngr, + u32 dsi_ctrl_index, + u32 *clk_mngr_index) +{ + int i; + + for (i = 0; i < mngr->dsi_ctrl_count; i++) { + if (mngr->ctrl_index[i] == dsi_ctrl_index) { + *clk_mngr_index = i; + return 0; + } + } + + return -EINVAL; +} + +/** + * dsi_clk_set_link_frequencies() - set frequencies for link clks + * @clks: Link clock information + * @pixel_clk: pixel clock frequency in KHz. + * @byte_clk: Byte clock frequency in KHz. + * @esc_clk: Escape clock frequency in KHz. + * + * return: error code in case of failure or 0 for success. + */ +int dsi_clk_set_link_frequencies(void *client, struct link_clk_freq freq, + u32 index) +{ + int rc = 0, clk_mngr_index = 0; + struct dsi_clk_client_info *c = client; + struct dsi_clk_mngr *mngr; + + if (!client) { + DSI_ERR("invalid params\n"); + return -EINVAL; + } + + mngr = c->mngr; + rc = _get_clk_mngr_index(mngr, index, &clk_mngr_index); + if (rc) { + DSI_ERR("failed to map control index %d\n", index); + return -EINVAL; + } + + memcpy(&mngr->link_clks[clk_mngr_index].freq, &freq, + sizeof(struct link_clk_freq)); + + return rc; +} + +/** + * dsi_clk_set_pixel_clk_rate() - set frequency for pixel clock + * @clks: DSI link clock information. + * @pixel_clk: Pixel clock rate in KHz. + * @index: Index of the DSI controller. + * + * return: error code in case of failure or 0 for success. + */ +int dsi_clk_set_pixel_clk_rate(void *client, u64 pixel_clk, u32 index) +{ + int rc = 0; + struct dsi_clk_client_info *c = client; + struct dsi_clk_mngr *mngr; + + mngr = c->mngr; + rc = clk_set_rate(mngr->link_clks[index].hs_clks.pixel_clk, pixel_clk); + if (rc) + DSI_ERR("failed to set clk rate for pixel clk, rc=%d\n", rc); + else + mngr->link_clks[index].freq.pix_clk_rate = pixel_clk; + + return rc; +} + +/** + * dsi_clk_set_byte_clk_rate() - set frequency for byte clock + * @client: DSI clock client pointer. + * @byte_clk: Byte clock rate in Hz. + * @byte_intf_clk: Byte interface clock rate in Hz. + * @index: Index of the DSI controller. + * return: error code in case of failure or 0 for success. + */ +int dsi_clk_set_byte_clk_rate(void *client, u64 byte_clk, + u64 byte_intf_clk, u32 index) +{ + int rc = 0; + struct dsi_clk_client_info *c = client; + struct dsi_clk_mngr *mngr; + + mngr = c->mngr; + rc = clk_set_rate(mngr->link_clks[index].hs_clks.byte_clk, byte_clk); + if (rc) + DSI_ERR("failed to set clk rate for byte clk, rc=%d\n", rc); + else + mngr->link_clks[index].freq.byte_clk_rate = byte_clk; + + if (mngr->link_clks[index].hs_clks.byte_intf_clk) { + rc = clk_set_rate(mngr->link_clks[index].hs_clks.byte_intf_clk, + byte_intf_clk); + if (rc) + DSI_ERR("failed to set clk rate for byte intf clk=%d\n", + rc); + else + mngr->link_clks[index].freq.byte_intf_clk_rate = + byte_intf_clk; + } + + return rc; +} + +/** + * dsi_clk_update_parent() - update parent clocks for specified clock + * @parent: link clock pair which are set as parent. + * @child: link clock pair whose parent has to be set. + */ +int dsi_clk_update_parent(struct dsi_clk_link_set *parent, + struct dsi_clk_link_set *child) +{ + int rc = 0; + + rc = clk_set_parent(child->byte_clk, parent->byte_clk); + if (rc) { + DSI_ERR("failed to set byte clk parent\n"); + goto error; + } + + rc = clk_set_parent(child->pixel_clk, parent->pixel_clk); + if (rc) { + DSI_ERR("failed to set pixel clk parent\n"); + goto error; + } +error: + return rc; +} + +/** + * dsi_clk_prepare_enable() - prepare and enable dsi src clocks + * @clk: list of src clocks. + * + * @return: Zero on success and err no on failure. + */ +int dsi_clk_prepare_enable(struct dsi_clk_link_set *clk) +{ + int rc; + + rc = clk_prepare_enable(clk->byte_clk); + if (rc) { + DSI_ERR("failed to enable byte src clk %d\n", rc); + return rc; + } + + rc = clk_prepare_enable(clk->pixel_clk); + if (rc) { + DSI_ERR("failed to enable pixel src clk %d\n", rc); + return rc; + } + + return 0; +} + +/** + * dsi_clk_disable_unprepare() - disable and unprepare dsi src clocks + * @clk: list of src clocks. + */ +void dsi_clk_disable_unprepare(struct dsi_clk_link_set *clk) +{ + clk_disable_unprepare(clk->pixel_clk); + clk_disable_unprepare(clk->byte_clk); +} + +int dsi_core_clk_start(struct dsi_core_clks *c_clks) +{ + int rc = 0; + + if (c_clks->clks.mdp_core_clk) { + rc = clk_prepare_enable(c_clks->clks.mdp_core_clk); + if (rc) { + DSI_ERR("failed to enable mdp_core_clk, rc=%d\n", rc); + goto error; + } + } + + if (c_clks->clks.mnoc_clk) { + rc = clk_prepare_enable(c_clks->clks.mnoc_clk); + if (rc) { + DSI_ERR("failed to enable mnoc_clk, rc=%d\n", rc); + goto error_disable_core_clk; + } + } + + if (c_clks->clks.iface_clk) { + rc = clk_prepare_enable(c_clks->clks.iface_clk); + if (rc) { + DSI_ERR("failed to enable iface_clk, rc=%d\n", rc); + goto error_disable_mnoc_clk; + } + } + + if (c_clks->clks.bus_clk) { + rc = clk_prepare_enable(c_clks->clks.bus_clk); + if (rc) { + DSI_ERR("failed to enable bus_clk, rc=%d\n", rc); + goto error_disable_iface_clk; + } + } + + if (c_clks->clks.core_mmss_clk) { + rc = clk_prepare_enable(c_clks->clks.core_mmss_clk); + if (rc) { + DSI_ERR("failed to enable core_mmss_clk, rc=%d\n", rc); + goto error_disable_bus_clk; + } + } + + if (c_clks->bus_handle) { + rc = msm_bus_scale_client_update_request(c_clks->bus_handle, 1); + if (rc) { + DSI_ERR("bus scale client enable failed, rc=%d\n", rc); + goto error_disable_mmss_clk; + } + } + + return rc; + +error_disable_mmss_clk: + if (c_clks->clks.core_mmss_clk) + clk_disable_unprepare(c_clks->clks.core_mmss_clk); +error_disable_bus_clk: + if (c_clks->clks.bus_clk) + clk_disable_unprepare(c_clks->clks.bus_clk); +error_disable_iface_clk: + if (c_clks->clks.iface_clk) + clk_disable_unprepare(c_clks->clks.iface_clk); +error_disable_mnoc_clk: + if (c_clks->clks.mnoc_clk) + clk_disable_unprepare(c_clks->clks.mnoc_clk); +error_disable_core_clk: + if (c_clks->clks.mdp_core_clk) + clk_disable_unprepare(c_clks->clks.mdp_core_clk); +error: + return rc; +} + +int dsi_core_clk_stop(struct dsi_core_clks *c_clks) +{ + int rc = 0; + + if (c_clks->bus_handle) { + rc = msm_bus_scale_client_update_request(c_clks->bus_handle, 0); + if (rc) { + DSI_ERR("bus scale client disable failed, rc=%d\n", rc); + return rc; + } + } + + if (c_clks->clks.core_mmss_clk) + clk_disable_unprepare(c_clks->clks.core_mmss_clk); + + if (c_clks->clks.bus_clk) + clk_disable_unprepare(c_clks->clks.bus_clk); + + if (c_clks->clks.iface_clk) + clk_disable_unprepare(c_clks->clks.iface_clk); + + if (c_clks->clks.mnoc_clk) + clk_disable_unprepare(c_clks->clks.mnoc_clk); + + if (c_clks->clks.mdp_core_clk) + clk_disable_unprepare(c_clks->clks.mdp_core_clk); + + return rc; +} + +static int dsi_link_hs_clk_set_rate(struct dsi_link_hs_clk_info *link_hs_clks, + int index) +{ + int rc = 0; + struct dsi_clk_mngr *mngr; + struct dsi_link_clks *l_clks; + + if (index >= MAX_DSI_CTRL) { + DSI_ERR("Invalid DSI ctrl index\n"); + return -EINVAL; + } + + l_clks = container_of(link_hs_clks, struct dsi_link_clks, hs_clks); + mngr = container_of(l_clks, struct dsi_clk_mngr, link_clks[index]); + + /* + * In an ideal world, cont_splash_enabled should not be required inside + * the clock manager. But, in the current driver cont_splash_enabled + * flag is set inside mdp driver and there is no interface event + * associated with this flag setting. + */ + if (mngr->is_cont_splash_enabled) + return 0; + + rc = clk_set_rate(link_hs_clks->byte_clk, + l_clks->freq.byte_clk_rate); + if (rc) { + DSI_ERR("clk_set_rate failed for byte_clk rc = %d\n", rc); + goto error; + } + + rc = clk_set_rate(link_hs_clks->pixel_clk, + l_clks->freq.pix_clk_rate); + if (rc) { + DSI_ERR("clk_set_rate failed for pixel_clk rc = %d\n", rc); + goto error; + } + + /* + * If byte_intf_clk is present, set rate for that too. + */ + if (link_hs_clks->byte_intf_clk) { + rc = clk_set_rate(link_hs_clks->byte_intf_clk, + l_clks->freq.byte_intf_clk_rate); + if (rc) { + DSI_ERR("set_rate failed for byte_intf_clk rc = %d\n", + rc); + goto error; + } + } +error: + return rc; +} + +static int dsi_link_hs_clk_prepare(struct dsi_link_hs_clk_info *link_hs_clks) +{ + int rc = 0; + + rc = clk_prepare(link_hs_clks->byte_clk); + if (rc) { + DSI_ERR("Failed to prepare dsi byte clk, rc=%d\n", rc); + goto byte_clk_err; + } + + rc = clk_prepare(link_hs_clks->pixel_clk); + if (rc) { + DSI_ERR("Failed to prepare dsi pixel clk, rc=%d\n", rc); + goto pixel_clk_err; + } + + if (link_hs_clks->byte_intf_clk) { + rc = clk_prepare(link_hs_clks->byte_intf_clk); + if (rc) { + DSI_ERR("Failed to prepare dsi byte intf clk, rc=%d\n", + rc); + goto byte_intf_clk_err; + } + } + + return rc; + +byte_intf_clk_err: + clk_unprepare(link_hs_clks->pixel_clk); +pixel_clk_err: + clk_unprepare(link_hs_clks->byte_clk); +byte_clk_err: + return rc; +} + +static void dsi_link_hs_clk_unprepare(struct dsi_link_hs_clk_info *link_hs_clks) +{ + if (link_hs_clks->byte_intf_clk) + clk_unprepare(link_hs_clks->byte_intf_clk); + clk_unprepare(link_hs_clks->pixel_clk); + clk_unprepare(link_hs_clks->byte_clk); +} + +static int dsi_link_hs_clk_enable(struct dsi_link_hs_clk_info *link_hs_clks) +{ + int rc = 0; + + rc = clk_enable(link_hs_clks->byte_clk); + if (rc) { + DSI_ERR("Failed to enable dsi byte clk, rc=%d\n", rc); + goto byte_clk_err; + } + + rc = clk_enable(link_hs_clks->pixel_clk); + if (rc) { + DSI_ERR("Failed to enable dsi pixel clk, rc=%d\n", rc); + goto pixel_clk_err; + } + + if (link_hs_clks->byte_intf_clk) { + rc = clk_enable(link_hs_clks->byte_intf_clk); + if (rc) { + DSI_ERR("Failed to enable dsi byte intf clk, rc=%d\n", + rc); + goto byte_intf_clk_err; + } + } + + return rc; + +byte_intf_clk_err: + clk_disable(link_hs_clks->pixel_clk); +pixel_clk_err: + clk_disable(link_hs_clks->byte_clk); +byte_clk_err: + return rc; +} + +static void dsi_link_hs_clk_disable(struct dsi_link_hs_clk_info *link_hs_clks) +{ + if (link_hs_clks->byte_intf_clk) + clk_disable(link_hs_clks->byte_intf_clk); + clk_disable(link_hs_clks->pixel_clk); + clk_disable(link_hs_clks->byte_clk); +} + +/** + * dsi_link_clk_start() - enable dsi link clocks + */ +static int dsi_link_hs_clk_start(struct dsi_link_hs_clk_info *link_hs_clks, + enum dsi_link_clk_op_type op_type, int index) +{ + int rc = 0; + + if (index >= MAX_DSI_CTRL) { + DSI_ERR("Invalid DSI ctrl index\n"); + return -EINVAL; + } + + if (op_type & DSI_LINK_CLK_SET_RATE) { + rc = dsi_link_hs_clk_set_rate(link_hs_clks, index); + if (rc) { + DSI_ERR("failed to set HS clk rates, rc = %d\n", rc); + goto error; + } + } + + if (op_type & DSI_LINK_CLK_PREPARE) { + rc = dsi_link_hs_clk_prepare(link_hs_clks); + if (rc) { + DSI_ERR("failed to prepare link HS clks, rc = %d\n", + rc); + goto error; + } + } + + if (op_type & DSI_LINK_CLK_ENABLE) { + rc = dsi_link_hs_clk_enable(link_hs_clks); + if (rc) { + DSI_ERR("failed to enable link HS clks, rc = %d\n", rc); + goto error_unprepare; + } + } + + DSI_DEBUG("HS Link clocks are enabled\n"); + return rc; +error_unprepare: + dsi_link_hs_clk_unprepare(link_hs_clks); +error: + return rc; +} + +/** + * dsi_link_clk_stop() - Stop DSI link clocks. + */ +static int dsi_link_hs_clk_stop(struct dsi_link_hs_clk_info *link_hs_clks) +{ + struct dsi_link_clks *l_clks; + + l_clks = container_of(link_hs_clks, struct dsi_link_clks, hs_clks); + + dsi_link_hs_clk_disable(link_hs_clks); + dsi_link_hs_clk_unprepare(link_hs_clks); + + DSI_DEBUG("HS Link clocks disabled\n"); + + return 0; +} + +static int dsi_link_lp_clk_start(struct dsi_link_lp_clk_info *link_lp_clks, + int index) +{ + int rc = 0; + struct dsi_clk_mngr *mngr; + struct dsi_link_clks *l_clks; + + if (index >= MAX_DSI_CTRL) { + DSI_ERR("Invalid DSI ctrl index\n"); + return -EINVAL; + } + + l_clks = container_of(link_lp_clks, struct dsi_link_clks, lp_clks); + + mngr = container_of(l_clks, struct dsi_clk_mngr, link_clks[index]); + if (!mngr) + return -EINVAL; + + /* + * In an ideal world, cont_splash_enabled should not be required inside + * the clock manager. But, in the current driver cont_splash_enabled + * flag is set inside mdp driver and there is no interface event + * associated with this flag setting. Also, set rate for clock need not + * be called for every enable call. It should be done only once when + * coming out of suspend. + */ + if (mngr->is_cont_splash_enabled) + goto prepare; + + rc = clk_set_rate(link_lp_clks->esc_clk, l_clks->freq.esc_clk_rate); + if (rc) { + DSI_ERR("clk_set_rate failed for esc_clk rc = %d\n", rc); + goto error; + } + +prepare: + rc = clk_prepare_enable(link_lp_clks->esc_clk); + if (rc) { + DSI_ERR("Failed to enable dsi esc clk\n"); + clk_unprepare(l_clks->lp_clks.esc_clk); + } +error: + DSI_DEBUG("LP Link clocks are enabled\n"); + return rc; +} + +static int dsi_link_lp_clk_stop( + struct dsi_link_lp_clk_info *link_lp_clks) +{ + struct dsi_link_clks *l_clks; + + l_clks = container_of(link_lp_clks, struct dsi_link_clks, lp_clks); + + clk_disable_unprepare(l_clks->lp_clks.esc_clk); + + DSI_DEBUG("LP Link clocks are disabled\n"); + return 0; +} + +static int dsi_display_core_clk_enable(struct dsi_core_clks *clks, + u32 ctrl_count, u32 master_ndx) +{ + int rc = 0; + int i; + struct dsi_core_clks *clk, *m_clks; + + /* + * In case of split DSI usecases, the clock for master controller should + * be enabled before the other controller. Master controller in the + * clock context refers to the controller that sources the clock. + */ + + m_clks = &clks[master_ndx]; + + rc = dsi_core_clk_start(m_clks); + if (rc) { + DSI_ERR("failed to turn on master clocks, rc=%d\n", rc); + goto error; + } + + /* Turn on rest of the core clocks */ + for (i = 0; i < ctrl_count; i++) { + clk = &clks[i]; + if (!clk || (clk == m_clks)) + continue; + + rc = dsi_core_clk_start(clk); + if (rc) { + DSI_ERR("failed to turn on clocks, rc=%d\n", rc); + goto error_disable_master; + } + } + return rc; +error_disable_master: + (void)dsi_core_clk_stop(m_clks); + +error: + return rc; +} + +static int dsi_display_link_clk_enable(struct dsi_link_clks *clks, + enum dsi_lclk_type l_type, u32 ctrl_count, u32 master_ndx) +{ + int rc = 0; + int i; + struct dsi_link_clks *clk, *m_clks; + + /* + * In case of split DSI usecases, the clock for master controller should + * be enabled before the other controller. Master controller in the + * clock context refers to the controller that sources the clock. + */ + + m_clks = &clks[master_ndx]; + + if (l_type & DSI_LINK_LP_CLK) { + rc = dsi_link_lp_clk_start(&m_clks->lp_clks, master_ndx); + if (rc) { + DSI_ERR("failed to turn on master lp link clocks, rc=%d\n", + rc); + goto error; + } + } + + if (l_type & DSI_LINK_HS_CLK) { + rc = dsi_link_hs_clk_start(&m_clks->hs_clks, + DSI_LINK_CLK_START, master_ndx); + if (rc) { + DSI_ERR("failed to turn on master hs link clocks, rc=%d\n", + rc); + goto error; + } + } + + for (i = 0; i < ctrl_count; i++) { + clk = &clks[i]; + if (!clk || (clk == m_clks)) + continue; + + if (l_type & DSI_LINK_LP_CLK) { + rc = dsi_link_lp_clk_start(&clk->lp_clks, i); + if (rc) { + DSI_ERR("failed to turn on lp link clocks, rc=%d\n", + rc); + goto error_disable_master; + } + } + + if (l_type & DSI_LINK_HS_CLK) { + rc = dsi_link_hs_clk_start(&clk->hs_clks, + DSI_LINK_CLK_START, i); + if (rc) { + DSI_ERR("failed to turn on hs link clocks, rc=%d\n", + rc); + goto error_disable_master; + } + } + } + return rc; + +error_disable_master: + if (l_type == DSI_LINK_LP_CLK) + (void)dsi_link_lp_clk_stop(&m_clks->lp_clks); + else if (l_type == DSI_LINK_HS_CLK) + (void)dsi_link_hs_clk_stop(&m_clks->hs_clks); +error: + return rc; +} + +static int dsi_display_core_clk_disable(struct dsi_core_clks *clks, + u32 ctrl_count, u32 master_ndx) +{ + int rc = 0; + int i; + struct dsi_core_clks *clk, *m_clks; + + /* + * In case of split DSI usecases, clock for slave DSI controllers should + * be disabled first before disabling clock for master controller. Slave + * controllers in the clock context refer to controller which source + * clock from another controller. + */ + + m_clks = &clks[master_ndx]; + + /* Turn off non-master core clocks */ + for (i = 0; i < ctrl_count; i++) { + clk = &clks[i]; + if (!clk || (clk == m_clks)) + continue; + + rc = dsi_core_clk_stop(clk); + if (rc) { + DSI_DEBUG("failed to turn off clocks, rc=%d\n", rc); + goto error; + } + } + + rc = dsi_core_clk_stop(m_clks); + if (rc) { + DSI_ERR("failed to turn off master clocks, rc=%d\n", rc); + goto error; + } + +error: + return rc; +} + +static int dsi_display_link_clk_disable(struct dsi_link_clks *clks, + enum dsi_lclk_type l_type, u32 ctrl_count, u32 master_ndx) +{ + int rc = 0; + int i; + struct dsi_link_clks *clk, *m_clks; + + /* + * In case of split DSI usecases, clock for slave DSI controllers should + * be disabled first before disabling clock for master controller. Slave + * controllers in the clock context refer to controller which source + * clock from another controller. + */ + + m_clks = &clks[master_ndx]; + + /* Turn off non-master link clocks */ + for (i = 0; i < ctrl_count; i++) { + clk = &clks[i]; + if (!clk || (clk == m_clks)) + continue; + + if (l_type & DSI_LINK_LP_CLK) { + rc = dsi_link_lp_clk_stop(&clk->lp_clks); + if (rc) + DSI_ERR("failed to turn off lp link clocks, rc=%d\n", + rc); + } + + if (l_type & DSI_LINK_HS_CLK) { + rc = dsi_link_hs_clk_stop(&clk->hs_clks); + if (rc) + DSI_ERR("failed to turn off hs link clocks, rc=%d\n", + rc); + } + } + + if (l_type & DSI_LINK_LP_CLK) { + rc = dsi_link_lp_clk_stop(&m_clks->lp_clks); + if (rc) + DSI_ERR("failed to turn off master lp link clocks, rc=%d\n", + rc); + } + + if (l_type & DSI_LINK_HS_CLK) { + rc = dsi_link_hs_clk_stop(&m_clks->hs_clks); + if (rc) + DSI_ERR("failed to turn off master hs link clocks, rc=%d\n", + rc); + } + + return rc; +} + +static int dsi_clk_update_link_clk_state(struct dsi_clk_mngr *mngr, + struct dsi_link_clks *l_clks, enum dsi_lclk_type l_type, u32 l_state, + bool enable) +{ + int rc = 0; + + if (!mngr) + return -EINVAL; + + if (enable) { + if (mngr->pre_clkon_cb) { + rc = mngr->pre_clkon_cb(mngr->priv_data, DSI_LINK_CLK, + l_type, l_state); + if (rc) { + DSI_ERR("pre link clk on cb failed for type %d\n", + l_type); + goto error; + } + } + rc = dsi_display_link_clk_enable(l_clks, l_type, + mngr->dsi_ctrl_count, mngr->master_ndx); + if (rc) { + DSI_ERR("failed to start link clk type %d rc=%d\n", + l_type, rc); + goto error; + } + + if (mngr->post_clkon_cb) { + rc = mngr->post_clkon_cb(mngr->priv_data, DSI_LINK_CLK, + l_type, l_state); + if (rc) { + DSI_ERR("post link clk on cb failed for type %d\n", + l_type); + goto error; + } + } + } else { + if (mngr->pre_clkoff_cb) { + rc = mngr->pre_clkoff_cb(mngr->priv_data, + DSI_LINK_CLK, l_type, l_state); + if (rc) + DSI_ERR("pre link clk off cb failed\n"); + } + + rc = dsi_display_link_clk_disable(l_clks, l_type, + mngr->dsi_ctrl_count, mngr->master_ndx); + if (rc) { + DSI_ERR("failed to stop link clk type %d, rc = %d\n", + l_type, rc); + goto error; + } + + if (mngr->post_clkoff_cb) { + rc = mngr->post_clkoff_cb(mngr->priv_data, + DSI_LINK_CLK, l_type, l_state); + if (rc) + DSI_ERR("post link clk off cb failed\n"); + } + } + +error: + return rc; +} + +static int dsi_update_core_clks(struct dsi_clk_mngr *mngr, + struct dsi_core_clks *c_clks) +{ + int rc = 0; + + if (mngr->core_clk_state == DSI_CLK_OFF) { + rc = mngr->pre_clkon_cb(mngr->priv_data, + DSI_CORE_CLK, + DSI_LINK_NONE, + DSI_CLK_ON); + if (rc) { + DSI_ERR("failed to turn on MDP FS rc= %d\n", rc); + goto error; + } + } + rc = dsi_display_core_clk_enable(c_clks, mngr->dsi_ctrl_count, + mngr->master_ndx); + if (rc) { + DSI_ERR("failed to turn on core clks rc = %d\n", rc); + goto error; + } + + if (mngr->post_clkon_cb) { + rc = mngr->post_clkon_cb(mngr->priv_data, + DSI_CORE_CLK, + DSI_LINK_NONE, + DSI_CLK_ON); + if (rc) + DSI_ERR("post clk on cb failed, rc = %d\n", rc); + } + mngr->core_clk_state = DSI_CLK_ON; +error: + return rc; +} + +static int dsi_update_clk_state(struct dsi_clk_mngr *mngr, + struct dsi_core_clks *c_clks, u32 c_state, + struct dsi_link_clks *l_clks, u32 l_state) +{ + int rc = 0; + bool l_c_on = false; + + if (!mngr) + return -EINVAL; + + DSI_DEBUG("c_state = %d, l_state = %d\n", + c_clks ? c_state : -1, l_clks ? l_state : -1); + /* + * Below is the sequence to toggle DSI clocks: + * 1. For ON sequence, Core clocks before link clocks + * 2. For OFF sequence, Link clocks before core clocks. + */ + if (c_clks && (c_state == DSI_CLK_ON)) + rc = dsi_update_core_clks(mngr, c_clks); + + if (rc) + goto error; + + if (l_clks) { + if (l_state == DSI_CLK_ON) { + rc = dsi_clk_update_link_clk_state(mngr, l_clks, + DSI_LINK_LP_CLK, l_state, true); + if (rc) + goto error; + + rc = dsi_clk_update_link_clk_state(mngr, l_clks, + DSI_LINK_HS_CLK, l_state, true); + if (rc) + goto error; + } else { + /* + * Two conditions that need to be checked for Link + * clocks: + * 1. Link clocks need core clocks to be on when + * transitioning from EARLY_GATE to OFF state. + * 2. ULPS mode might have to be enabled in case of OFF + * state. For ULPS, Link clocks should be turned ON + * first before they are turned off again. + * + * If Link is going from EARLY_GATE to OFF state AND + * Core clock is already in EARLY_GATE or OFF state, + * turn on Core clocks and link clocks. + * + * ULPS state is managed as part of the pre_clkoff_cb. + */ + if ((l_state == DSI_CLK_OFF) && + (mngr->link_clk_state == + DSI_CLK_EARLY_GATE) && + (mngr->core_clk_state != + DSI_CLK_ON)) { + rc = dsi_display_core_clk_enable( + mngr->core_clks, mngr->dsi_ctrl_count, + mngr->master_ndx); + if (rc) { + DSI_ERR("core clks did not start\n"); + goto error; + } + + rc = dsi_display_link_clk_enable(l_clks, + (DSI_LINK_LP_CLK & DSI_LINK_HS_CLK), + mngr->dsi_ctrl_count, mngr->master_ndx); + if (rc) { + DSI_ERR("LP Link clks did not start\n"); + goto error; + } + l_c_on = true; + DSI_DEBUG("ECG: core and Link_on\n"); + } + + rc = dsi_clk_update_link_clk_state(mngr, l_clks, + DSI_LINK_HS_CLK, l_state, false); + if (rc) + goto error; + + rc = dsi_clk_update_link_clk_state(mngr, l_clks, + DSI_LINK_LP_CLK, l_state, false); + if (rc) + goto error; + + /* + * This check is to save unnecessary clock state + * change when going from EARLY_GATE to OFF. In the + * case where the request happens for both Core and Link + * clocks in the same call, core clocks need to be + * turned on first before OFF state can be entered. + * + * Core clocks are turned on here for Link clocks to go + * to OFF state. If core clock request is also present, + * then core clocks can be turned off Core clocks are + * transitioned to OFF state. + */ + if (l_c_on && (!(c_clks && (c_state == DSI_CLK_OFF) + && (mngr->core_clk_state == + DSI_CLK_EARLY_GATE)))) { + rc = dsi_display_core_clk_disable( + mngr->core_clks, mngr->dsi_ctrl_count, + mngr->master_ndx); + if (rc) { + DSI_ERR("core clks did not stop\n"); + goto error; + } + + l_c_on = false; + DSI_DEBUG("ECG: core off\n"); + } else + DSI_DEBUG("ECG: core off skip\n"); + } + + mngr->link_clk_state = l_state; + } + + if (c_clks && (c_state != DSI_CLK_ON)) { + /* + * When going to OFF state from EARLY GATE state, Core clocks + * should be turned on first so that the IOs can be clamped. + * l_c_on flag is set, then the core clocks were turned before + * to the Link clocks go to OFF state. So Core clocks are + * already ON and this step can be skipped. + * + * IOs are clamped in pre_clkoff_cb callback. + */ + if ((c_state == DSI_CLK_OFF) && + (mngr->core_clk_state == + DSI_CLK_EARLY_GATE) && !l_c_on) { + rc = dsi_display_core_clk_enable(mngr->core_clks, + mngr->dsi_ctrl_count, mngr->master_ndx); + if (rc) { + DSI_ERR("core clks did not start\n"); + goto error; + } + DSI_DEBUG("ECG: core on\n"); + } else + DSI_DEBUG("ECG: core on skip\n"); + + if (mngr->pre_clkoff_cb) { + rc = mngr->pre_clkoff_cb(mngr->priv_data, + DSI_CORE_CLK, + DSI_LINK_NONE, + c_state); + if (rc) + DSI_ERR("pre core clk off cb failed\n"); + } + + rc = dsi_display_core_clk_disable(c_clks, mngr->dsi_ctrl_count, + mngr->master_ndx); + if (rc) { + DSI_ERR("failed to turn off core clks rc = %d\n", rc); + goto error; + } + + if (c_state == DSI_CLK_OFF) { + if (mngr->post_clkoff_cb) { + rc = mngr->post_clkoff_cb(mngr->priv_data, + DSI_CORE_CLK, + DSI_LINK_NONE, + DSI_CLK_OFF); + if (rc) + DSI_ERR("post clkoff cb fail, rc = %d\n", + rc); + } + } + mngr->core_clk_state = c_state; + } + +error: + return rc; +} + +static int dsi_recheck_clk_state(struct dsi_clk_mngr *mngr) +{ + int rc = 0; + struct list_head *pos = NULL; + struct dsi_clk_client_info *c; + u32 new_core_clk_state = DSI_CLK_OFF; + u32 new_link_clk_state = DSI_CLK_OFF; + u32 old_c_clk_state = DSI_CLK_OFF; + u32 old_l_clk_state = DSI_CLK_OFF; + struct dsi_core_clks *c_clks = NULL; + struct dsi_link_clks *l_clks = NULL; + + /* + * Conditions to maintain DSI manager clock state based on + * clock states of various clients: + * 1. If any client has clock in ON state, DSI manager clock state + * should be ON. + * 2. If any client is in ECG state with rest of them turned OFF, + * go to Early gate state. + * 3. If all clients have clocks as OFF, then go to OFF state. + */ + list_for_each(pos, &mngr->client_list) { + c = list_entry(pos, struct dsi_clk_client_info, list); + if (c->core_clk_state == DSI_CLK_ON) { + new_core_clk_state = DSI_CLK_ON; + break; + } else if (c->core_clk_state == DSI_CLK_EARLY_GATE) { + new_core_clk_state = DSI_CLK_EARLY_GATE; + } + } + + list_for_each(pos, &mngr->client_list) { + c = list_entry(pos, struct dsi_clk_client_info, list); + if (c->link_clk_state == DSI_CLK_ON) { + new_link_clk_state = DSI_CLK_ON; + break; + } else if (c->link_clk_state == DSI_CLK_EARLY_GATE) { + new_link_clk_state = DSI_CLK_EARLY_GATE; + } + } + + if (new_core_clk_state != mngr->core_clk_state) + c_clks = mngr->core_clks; + + if (new_link_clk_state != mngr->link_clk_state) + l_clks = mngr->link_clks; + + old_c_clk_state = mngr->core_clk_state; + old_l_clk_state = mngr->link_clk_state; + + DSI_DEBUG("c_clk_state (%d -> %d)\n", old_c_clk_state, + new_core_clk_state); + DSI_DEBUG("l_clk_state (%d -> %d)\n", old_l_clk_state, + new_link_clk_state); + + if (c_clks || l_clks) { + rc = dsi_update_clk_state(mngr, c_clks, new_core_clk_state, + l_clks, new_link_clk_state); + if (rc) { + DSI_ERR("failed to update clock state, rc = %d\n", rc); + goto error; + } + } + +error: + return rc; +} + +int dsi_clk_req_state(void *client, enum dsi_clk_type clk, + enum dsi_clk_state state) +{ + int rc = 0; + struct dsi_clk_client_info *c = client; + struct dsi_clk_mngr *mngr; + bool changed = false; + + if (!client || !clk || clk > (DSI_CORE_CLK | DSI_LINK_CLK) || + state > DSI_CLK_EARLY_GATE) { + DSI_ERR("Invalid params, client = %pK, clk = 0x%x, state = %d\n", + client, clk, state); + return -EINVAL; + } + + mngr = c->mngr; + mutex_lock(&mngr->clk_mutex); + + DSI_DEBUG("[%s]%s: CLK=%d, new_state=%d, core=%d, linkl=%d\n", + mngr->name, c->name, clk, state, c->core_clk_state, + c->link_clk_state); + + /* + * Clock refcount handling as below: + * i. Increment refcount whenever ON is called. + * ii. Decrement refcount when transitioning from ON state to + * either OFF or EARLY_GATE. + * iii. Do not decrement refcount when changing from + * EARLY_GATE to OFF. + */ + if (state == DSI_CLK_ON) { + if (clk & DSI_CORE_CLK) { + c->core_refcount++; + if (c->core_clk_state != DSI_CLK_ON) { + c->core_clk_state = DSI_CLK_ON; + changed = true; + } + } + if (clk & DSI_LINK_CLK) { + c->link_refcount++; + if (c->link_clk_state != DSI_CLK_ON) { + c->link_clk_state = DSI_CLK_ON; + changed = true; + } + } + } else if ((state == DSI_CLK_EARLY_GATE) || + (state == DSI_CLK_OFF)) { + if (clk & DSI_CORE_CLK) { + if (c->core_refcount == 0) { + if ((c->core_clk_state == + DSI_CLK_EARLY_GATE) && + (state == DSI_CLK_OFF)) { + changed = true; + c->core_clk_state = DSI_CLK_OFF; + } else { + DSI_WARN("Core refcount is zero for %s\n", + c->name); + } + } else { + c->core_refcount--; + if (c->core_refcount == 0) { + c->core_clk_state = state; + changed = true; + } + } + } + if (clk & DSI_LINK_CLK) { + if (c->link_refcount == 0) { + if ((c->link_clk_state == + DSI_CLK_EARLY_GATE) && + (state == DSI_CLK_OFF)) { + changed = true; + c->link_clk_state = DSI_CLK_OFF; + } else { + DSI_WARN("Link refcount is zero for %s\n", + c->name); + } + } else { + c->link_refcount--; + if (c->link_refcount == 0) { + c->link_clk_state = state; + changed = true; + } + } + } + } + DSI_DEBUG("[%s]%s: change=%d, Core (ref=%d, state=%d), Link (ref=%d, state=%d)\n", + mngr->name, c->name, changed, c->core_refcount, + c->core_clk_state, c->link_refcount, c->link_clk_state); + + if (changed) { + rc = dsi_recheck_clk_state(mngr); + if (rc) + DSI_ERR("Failed to adjust clock state rc = %d\n", rc); + } + + mutex_unlock(&mngr->clk_mutex); + return rc; +} + +DEFINE_MUTEX(dsi_mngr_clk_mutex); + +static int dsi_display_link_clk_force_update(void *client) +{ + int rc = 0; + struct dsi_clk_client_info *c = client; + struct dsi_clk_mngr *mngr; + struct dsi_link_clks *l_clks; + + mngr = c->mngr; + mutex_lock(&mngr->clk_mutex); + + l_clks = mngr->link_clks; + + /* + * When link_clk_state is DSI_CLK_OFF, don't change DSI clock rate + * since it is possible to be overwritten, and return -EAGAIN to + * dynamic DSI writing interface to defer the reenabling to the next + * drm commit. + */ + if (mngr->link_clk_state == DSI_CLK_OFF) { + rc = -EAGAIN; + goto error; + } + + rc = dsi_clk_update_link_clk_state(mngr, l_clks, (DSI_LINK_LP_CLK | + DSI_LINK_HS_CLK), DSI_CLK_OFF, false); + if (rc) + goto error; + + rc = dsi_clk_update_link_clk_state(mngr, l_clks, (DSI_LINK_LP_CLK | + DSI_LINK_HS_CLK), DSI_CLK_ON, true); + if (rc) + goto error; + +error: + mutex_unlock(&mngr->clk_mutex); + return rc; + +} + +int dsi_display_link_clk_force_update_ctrl(void *handle) +{ + int rc = 0; + + if (!handle) { + DSI_ERR("Invalid arg\n"); + return -EINVAL; + } + + mutex_lock(&dsi_mngr_clk_mutex); + + rc = dsi_display_link_clk_force_update(handle); + + mutex_unlock(&dsi_mngr_clk_mutex); + + return rc; +} + +int dsi_display_clk_ctrl(void *handle, + enum dsi_clk_type clk_type, enum dsi_clk_state clk_state) +{ + int rc = 0; + + if (!handle) { + DSI_ERR("Invalid arg\n"); + return -EINVAL; + } + + mutex_lock(&dsi_mngr_clk_mutex); + rc = dsi_clk_req_state(handle, clk_type, clk_state); + if (rc) + DSI_ERR("failed set clk state, rc = %d\n", rc); + mutex_unlock(&dsi_mngr_clk_mutex); + + return rc; +} + +void *dsi_register_clk_handle(void *clk_mngr, char *client) +{ + void *handle = NULL; + struct dsi_clk_mngr *mngr = clk_mngr; + struct dsi_clk_client_info *c; + + if (!mngr) { + DSI_ERR("bad params\n"); + return ERR_PTR(-EINVAL); + } + + mutex_lock(&mngr->clk_mutex); + + c = kzalloc(sizeof(*c), GFP_KERNEL); + if (!c) { + handle = ERR_PTR(-ENOMEM); + goto error; + } + + strlcpy(c->name, client, MAX_STRING_LEN); + c->mngr = mngr; + + list_add(&c->list, &mngr->client_list); + + DSI_DEBUG("[%s]: Added new client (%s)\n", mngr->name, c->name); + handle = c; +error: + mutex_unlock(&mngr->clk_mutex); + return handle; +} + +int dsi_deregister_clk_handle(void *client) +{ + int rc = 0; + struct dsi_clk_client_info *c = client; + struct dsi_clk_mngr *mngr; + struct list_head *pos = NULL; + struct list_head *tmp = NULL; + struct dsi_clk_client_info *node = NULL; + + if (!client) { + DSI_ERR("Invalid params\n"); + return -EINVAL; + } + + mngr = c->mngr; + DSI_DEBUG("%s: ENTER\n", mngr->name); + mutex_lock(&mngr->clk_mutex); + c->core_clk_state = DSI_CLK_OFF; + c->link_clk_state = DSI_CLK_OFF; + + rc = dsi_recheck_clk_state(mngr); + if (rc) { + DSI_ERR("clock state recheck failed rc = %d\n", rc); + goto error; + } + + list_for_each_safe(pos, tmp, &mngr->client_list) { + node = list_entry(pos, struct dsi_clk_client_info, + list); + if (node == c) { + list_del(&node->list); + DSI_DEBUG("Removed device (%s)\n", node->name); + kfree(node); + break; + } + } + +error: + mutex_unlock(&mngr->clk_mutex); + DSI_DEBUG("%s: EXIT, rc = %d\n", mngr->name, rc); + return rc; +} + +void dsi_display_clk_mngr_update_splash_status(void *clk_mgr, bool status) +{ + struct dsi_clk_mngr *mngr; + + if (!clk_mgr) { + DSI_ERR("Invalid params\n"); + return; + } + + mngr = (struct dsi_clk_mngr *)clk_mgr; + mngr->is_cont_splash_enabled = status; +} + +void *dsi_display_clk_mngr_register(struct dsi_clk_info *info) +{ + struct dsi_clk_mngr *mngr; + int i = 0; + + if (!info) { + DSI_ERR("Invalid params\n"); + return ERR_PTR(-EINVAL); + } + + mngr = kzalloc(sizeof(*mngr), GFP_KERNEL); + if (!mngr) { + mngr = ERR_PTR(-ENOMEM); + goto error; + } + + mutex_init(&mngr->clk_mutex); + mngr->dsi_ctrl_count = info->dsi_ctrl_count; + mngr->master_ndx = info->master_ndx; + + if (mngr->dsi_ctrl_count > MAX_DSI_CTRL) { + kfree(mngr); + return ERR_PTR(-EINVAL); + } + + for (i = 0; i < mngr->dsi_ctrl_count; i++) { + memcpy(&mngr->core_clks[i].clks, &info->c_clks[i], + sizeof(struct dsi_core_clk_info)); + memcpy(&mngr->link_clks[i].hs_clks, &info->l_hs_clks[i], + sizeof(struct dsi_link_hs_clk_info)); + memcpy(&mngr->link_clks[i].lp_clks, &info->l_lp_clks[i], + sizeof(struct dsi_link_lp_clk_info)); + mngr->core_clks[i].bus_handle = info->bus_handle[i]; + mngr->ctrl_index[i] = info->ctrl_index[i]; + } + + INIT_LIST_HEAD(&mngr->client_list); + mngr->pre_clkon_cb = info->pre_clkon_cb; + mngr->post_clkon_cb = info->post_clkon_cb; + mngr->pre_clkoff_cb = info->pre_clkoff_cb; + mngr->post_clkoff_cb = info->post_clkoff_cb; + mngr->priv_data = info->priv_data; + memcpy(mngr->name, info->name, MAX_STRING_LEN); + +error: + DSI_DEBUG("EXIT, rc = %ld\n", PTR_ERR(mngr)); + return mngr; +} + +int dsi_display_clk_mngr_deregister(void *clk_mngr) +{ + int rc = 0; + struct dsi_clk_mngr *mngr = clk_mngr; + struct list_head *position = NULL; + struct list_head *tmp = NULL; + struct dsi_clk_client_info *node = NULL; + + if (!mngr) { + DSI_ERR("Invalid params\n"); + return -EINVAL; + } + + DSI_DEBUG("%s: ENTER\n", mngr->name); + mutex_lock(&mngr->clk_mutex); + + list_for_each_safe(position, tmp, &mngr->client_list) { + node = list_entry(position, struct dsi_clk_client_info, + list); + list_del(&node->list); + DSI_DEBUG("Removed device (%s)\n", node->name); + kfree(node); + } + + rc = dsi_recheck_clk_state(mngr); + if (rc) + DSI_ERR("failed to disable all clocks\n"); + + mutex_unlock(&mngr->clk_mutex); + DSI_DEBUG("%s: EXIT, rc = %d\n", mngr->name, rc); + kfree(mngr); + return rc; +} diff --git a/techpack/display/msm/dsi/dsi_ctrl.c b/techpack/display/msm/dsi/dsi_ctrl.c new file mode 100644 index 0000000000000000000000000000000000000000..71b11e5a5c81dcb3a8464dc3c554826fad8ffab7 --- /dev/null +++ b/techpack/display/msm/dsi/dsi_ctrl.c @@ -0,0 +1,4043 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2016-2020, The Linux Foundation. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include +#include