From 6b173218454019b296ccc3f61f30daefc6407ae5 Mon Sep 17 00:00:00 2001 From: Dhaval Patel Date: Sat, 17 Jan 2015 09:53:15 -0500 Subject: [PATCH 001/310] drm/msm: remove clock framework dependency Remove dependency on Clock Framework for msm DRM driver. Change-Id: I4f18bac4427de498e5c0adff14c176f6d614abc5 Signed-off-by: Dhaval Patel --- drivers/gpu/drm/msm/Kconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/msm/Kconfig b/drivers/gpu/drm/msm/Kconfig index 84d3ec98e6b9..7f29f3644fb6 100644 --- a/drivers/gpu/drm/msm/Kconfig +++ b/drivers/gpu/drm/msm/Kconfig @@ -3,7 +3,7 @@ config DRM_MSM tristate "MSM DRM" depends on DRM depends on ARCH_QCOM || (ARM && COMPILE_TEST) - depends on OF && COMMON_CLK + depends on OF select REGULATOR select DRM_KMS_HELPER select DRM_PANEL -- GitLab From 7680a4dd5931279d8addf6673cebd8db8506a5db Mon Sep 17 00:00:00 2001 From: Ajay Singh Parmar Date: Mon, 16 May 2016 16:36:59 -0700 Subject: [PATCH 002/310] drm/msm/dsi-staging: add dsi definitions for new dsi driver Add header with definitions which are common to all dsi driver components. Change-Id: Ic460f81ba11438c7f0032303824144cc913f20fd Signed-off-by: Ajay Singh Parmar --- drivers/gpu/drm/msm/dsi-staging/dsi_defs.h | 357 +++++++++++++++++++++ drivers/gpu/drm/msm/dsi-staging/dsi_hw.h | 39 +++ 2 files changed, 396 insertions(+) create mode 100644 drivers/gpu/drm/msm/dsi-staging/dsi_defs.h create mode 100644 drivers/gpu/drm/msm/dsi-staging/dsi_hw.h diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_defs.h b/drivers/gpu/drm/msm/dsi-staging/dsi_defs.h new file mode 100644 index 000000000000..ded7ed3710ee --- /dev/null +++ b/drivers/gpu/drm/msm/dsi-staging/dsi_defs.h @@ -0,0 +1,357 @@ +/* + * Copyright (c) 2016, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef _DSI_DEFS_H_ +#define _DSI_DEFS_H_ + +#include + +#define DSI_H_TOTAL(t) (((t)->h_active) + ((t)->h_back_porch) + \ + ((t)->h_sync_width) + ((t)->h_front_porch)) + +#define DSI_V_TOTAL(t) (((t)->v_active) + ((t)->v_back_porch) + \ + ((t)->v_sync_width) + ((t)->v_front_porch)) + +/** + * enum dsi_pixel_format - DSI pixel formats + * @DSI_PIXEL_FORMAT_RGB565: + * @DSI_PIXEL_FORMAT_RGB666: + * @DSI_PIXEL_FORMAT_RGB666_LOOSE: + * @DSI_PIXEL_FORMAT_RGB888: + * @DSI_PIXEL_FORMAT_RGB111: + * @DSI_PIXEL_FORMAT_RGB332: + * @DSI_PIXEL_FORMAT_RGB444: + * @DSI_PIXEL_FORMAT_MAX: + */ +enum dsi_pixel_format { + DSI_PIXEL_FORMAT_RGB565 = 0, + DSI_PIXEL_FORMAT_RGB666, + DSI_PIXEL_FORMAT_RGB666_LOOSE, + DSI_PIXEL_FORMAT_RGB888, + DSI_PIXEL_FORMAT_RGB111, + DSI_PIXEL_FORMAT_RGB332, + DSI_PIXEL_FORMAT_RGB444, + DSI_PIXEL_FORMAT_MAX +}; + +/** + * enum dsi_op_mode - dsi operation mode + * @DSI_OP_VIDEO_MODE: DSI video mode operation + * @DSI_OP_CMD_MODE: DSI Command mode operation + * @DSI_OP_MODE_MAX: + */ +enum dsi_op_mode { + DSI_OP_VIDEO_MODE = 0, + DSI_OP_CMD_MODE, + DSI_OP_MODE_MAX +}; + +/** + * enum dsi_data_lanes - dsi physical lanes + * @DSI_DATA_LANE_0: Physical lane 0 + * @DSI_DATA_LANE_1: Physical lane 1 + * @DSI_DATA_LANE_2: Physical lane 2 + * @DSI_DATA_LANE_3: Physical lane 3 + * @DSI_CLOCK_LANE: Physical clock lane + */ +enum dsi_data_lanes { + DSI_DATA_LANE_0 = BIT(0), + DSI_DATA_LANE_1 = BIT(1), + DSI_DATA_LANE_2 = BIT(2), + DSI_DATA_LANE_3 = BIT(3), + DSI_CLOCK_LANE = BIT(4) +}; + +/** + * enum dsi_logical_lane - dsi logical lanes + * @DSI_LOGICAL_LANE_0: Logical lane 0 + * @DSI_LOGICAL_LANE_1: Logical lane 1 + * @DSI_LOGICAL_LANE_2: Logical lane 2 + * @DSI_LOGICAL_LANE_3: Logical lane 3 + * @DSI_LOGICAL_CLOCK_LANE: Clock lane + * @DSI_LANE_MAX: Maximum lanes supported + */ +enum dsi_logical_lane { + DSI_LOGICAL_LANE_0 = 0, + DSI_LOGICAL_LANE_1, + DSI_LOGICAL_LANE_2, + DSI_LOGICAL_LANE_3, + DSI_LOGICAL_CLOCK_LANE, + DSI_LANE_MAX +}; + +/** + * enum dsi_trigger_type - dsi trigger type + * @DSI_TRIGGER_NONE: No trigger. + * @DSI_TRIGGER_TE: TE trigger. + * @DSI_TRIGGER_SEOF: Start or End of frame. + * @DSI_TRIGGER_SW: Software trigger. + * @DSI_TRIGGER_SW_SEOF: Software trigger and start/end of frame. + * @DSI_TRIGGER_SW_TE: Software and TE triggers. + * @DSI_TRIGGER_MAX: Max trigger values. + */ +enum dsi_trigger_type { + DSI_TRIGGER_NONE = 0, + DSI_TRIGGER_TE, + DSI_TRIGGER_SEOF, + DSI_TRIGGER_SW, + DSI_TRIGGER_SW_SEOF, + DSI_TRIGGER_SW_TE, + DSI_TRIGGER_MAX +}; + +/** + * enum dsi_color_swap_mode - color swap mode + * @DSI_COLOR_SWAP_RGB: + * @DSI_COLOR_SWAP_RBG: + * @DSI_COLOR_SWAP_BGR: + * @DSI_COLOR_SWAP_BRG: + * @DSI_COLOR_SWAP_GRB: + * @DSI_COLOR_SWAP_GBR: + */ +enum dsi_color_swap_mode { + DSI_COLOR_SWAP_RGB = 0, + DSI_COLOR_SWAP_RBG, + DSI_COLOR_SWAP_BGR, + DSI_COLOR_SWAP_BRG, + DSI_COLOR_SWAP_GRB, + DSI_COLOR_SWAP_GBR +}; + +/** + * enum dsi_dfps_type - Dynamic FPS support type + * @DSI_DFPS_NONE: Dynamic FPS is not supported. + * @DSI_DFPS_SUSPEND_RESUME: + * @DSI_DFPS_IMMEDIATE_CLK: + * @DSI_DFPS_IMMEDIATE_HFP: + * @DSI_DFPS_IMMEDIATE_VFP: + * @DSI_DPFS_MAX: + */ +enum dsi_dfps_type { + DSI_DFPS_NONE = 0, + DSI_DFPS_SUSPEND_RESUME, + DSI_DFPS_IMMEDIATE_CLK, + DSI_DFPS_IMMEDIATE_HFP, + DSI_DFPS_IMMEDIATE_VFP, + DSI_DFPS_MAX +}; + +/** + * enum dsi_phy_type - DSI phy types + * @DSI_PHY_TYPE_DPHY: + * @DSI_PHY_TYPE_CPHY: + */ +enum dsi_phy_type { + DSI_PHY_TYPE_DPHY, + DSI_PHY_TYPE_CPHY +}; + +/** + * enum dsi_te_mode - dsi te source + * @DSI_TE_ON_DATA_LINK: TE read from DSI link + * @DSI_TE_ON_EXT_PIN: TE signal on an external GPIO + */ +enum dsi_te_mode { + DSI_TE_ON_DATA_LINK = 0, + DSI_TE_ON_EXT_PIN, +}; + +/** + * enum dsi_video_traffic_mode - video mode pixel transmission type + * @DSI_VIDEO_TRAFFIC_SYNC_PULSES: Non-burst mode with sync pulses. + * @DSI_VIDEO_TRAFFIC_SYNC_START_EVENTS: Non-burst mode with sync start events. + * @DSI_VIDEO_TRAFFIC_BURST_MODE: Burst mode using sync start events. + */ +enum dsi_video_traffic_mode { + DSI_VIDEO_TRAFFIC_SYNC_PULSES = 0, + DSI_VIDEO_TRAFFIC_SYNC_START_EVENTS, + DSI_VIDEO_TRAFFIC_BURST_MODE, +}; + +/** + * struct dsi_mode_info - video mode information dsi frame + * @h_active: Active width of one frame in pixels. + * @h_back_porch: Horizontal back porch in pixels. + * @h_sync_width: HSYNC width in pixels. + * @h_front_porch: Horizontal fron porch in pixels. + * @h_skew: + * @h_sync_polarity: Polarity of HSYNC (false is active low). + * @v_active: Active height of one frame in lines. + * @v_back_porch: Vertical back porch in lines. + * @v_sync_width: VSYNC width in lines. + * @v_front_porch: Vertical front porch in lines. + * @v_sync_polarity: Polarity of VSYNC (false is active low). + * @refresh_rate: Refresh rate in Hz. + */ +struct dsi_mode_info { + u32 h_active; + u32 h_back_porch; + u32 h_sync_width; + u32 h_front_porch; + u32 h_skew; + bool h_sync_polarity; + + u32 v_active; + u32 v_back_porch; + u32 v_sync_width; + u32 v_front_porch; + bool v_sync_polarity; + + u32 refresh_rate; +}; + +/** + * struct dsi_lane_mapping - Mapping between DSI logical and physical lanes + * @physical_lane0: Logical lane to which physical lane 0 is mapped. + * @physical_lane1: Logical lane to which physical lane 1 is mapped. + * @physical_lane2: Logical lane to which physical lane 2 is mapped. + * @physical_lane3: Logical lane to which physical lane 3 is mapped. + */ +struct dsi_lane_mapping { + enum dsi_logical_lane physical_lane0; + enum dsi_logical_lane physical_lane1; + enum dsi_logical_lane physical_lane2; + enum dsi_logical_lane physical_lane3; +}; + +/** + * struct dsi_host_common_cfg - Host configuration common to video and cmd mode + * @dst_format: Destination pixel format. + * @data_lanes: Physical data lanes to be enabled. + * @en_crc_check: Enable CRC checks. + * @en_ecc_check: Enable ECC checks. + * @te_mode: Source for TE signalling. + * @mdp_cmd_trigger: MDP frame update trigger for command mode. + * @dma_cmd_trigger: Command DMA trigger. + * @cmd_trigger_stream: Command mode stream to trigger. + * @bit_swap_read: Is red color bit swapped. + * @bit_swap_green: Is green color bit swapped. + * @bit_swap_blue: Is blue color bit swapped. + * @t_clk_post: Number of byte clock cycles that the transmitter shall + * continue sending after last data lane has transitioned + * to LP mode. + * @t_clk_pre: Number of byte clock cycles that the high spped clock + * shall be driven prior to data lane transitions from LP + * to HS mode. + * @ignore_rx_eot: Ignore Rx EOT packets if set to true. + * @append_tx_eot: Append EOT packets for forward transmissions if set to + * true. + */ +struct dsi_host_common_cfg { + enum dsi_pixel_format dst_format; + enum dsi_data_lanes data_lanes; + bool en_crc_check; + bool en_ecc_check; + enum dsi_te_mode te_mode; + enum dsi_trigger_type mdp_cmd_trigger; + enum dsi_trigger_type dma_cmd_trigger; + u32 cmd_trigger_stream; + enum dsi_color_swap_mode swap_mode; + bool bit_swap_red; + bool bit_swap_green; + bool bit_swap_blue; + u32 t_clk_post; + u32 t_clk_pre; + bool ignore_rx_eot; + bool append_tx_eot; +}; + +/** + * struct dsi_video_engine_cfg - DSI video engine configuration + * @host_cfg: Pointer to host common configuration. + * @last_line_interleave_en: Allow command mode op interleaved on last line of + * video stream. + * @pulse_mode_hsa_he: Send HSA and HE following VS/VE packet if set to + * true. + * @hfp_lp11_en: Enter low power stop mode (LP-11) during HFP. + * @hbp_lp11_en: Enter low power stop mode (LP-11) during HBP. + * @hsa_lp11_en: Enter low power stop mode (LP-11) during HSA. + * @eof_bllp_lp11_en: Enter low power stop mode (LP-11) during BLLP of + * last line of a frame. + * @bllp_lp11_en: Enter low power stop mode (LP-11) during BLLP. + * @traffic_mode: Traffic mode for video stream. + * @vc_id: Virtual channel identifier. + */ +struct dsi_video_engine_cfg { + bool last_line_interleave_en; + bool pulse_mode_hsa_he; + bool hfp_lp11_en; + bool hbp_lp11_en; + bool hsa_lp11_en; + bool eof_bllp_lp11_en; + bool bllp_lp11_en; + enum dsi_video_traffic_mode traffic_mode; + u32 vc_id; +}; + +/** + * struct dsi_cmd_engine_cfg - DSI command engine configuration + * @host_cfg: Pointer to host common configuration. + * @host_cfg: Common host configuration + * @max_cmd_packets_interleave Maximum number of command mode RGB packets to + * send with in one horizontal blanking period + * of the video mode frame. + * @wr_mem_start: DCS command for write_memory_start. + * @wr_mem_continue: DCS command for write_memory_continue. + * @insert_dcs_command: Insert DCS command as first byte of payload + * of the pixel data. + */ +struct dsi_cmd_engine_cfg { + u32 max_cmd_packets_interleave; + u32 wr_mem_start; + u32 wr_mem_continue; + bool insert_dcs_command; +}; + +/** + * struct dsi_host_config - DSI host configuration parameters. + * @panel_mode: Operation mode for panel (video or cmd mode). + * @common_config: Host configuration common to both Video and Cmd mode. + * @video_engine: Video engine configuration if panel is in video mode. + * @cmd_engine: Cmd engine configuration if panel is in cmd mode. + * @esc_clk_rate_khz: Esc clock frequency in Hz. + * @bit_clk_rate_hz: Bit clock frequency in Hz. + * @video_timing: Video timing information of a frame. + * @lane_map: Mapping between logical and physical lanes. + * @phy_type: PHY type to be used. + */ +struct dsi_host_config { + enum dsi_op_mode panel_mode; + struct dsi_host_common_cfg common_config; + union { + struct dsi_video_engine_cfg video_engine; + struct dsi_cmd_engine_cfg cmd_engine; + } u; + u64 esc_clk_rate_hz; + u64 bit_clk_rate_hz; + struct dsi_mode_info video_timing; + struct dsi_lane_mapping lane_map; +}; + +/** + * struct dsi_display_mode - specifies mode for dsi display + * @timing: Timing parameters for the panel. + * @pixel_clk_khz: Pixel clock in Khz. + * @panel_mode: Panel operation mode. + * @flags: Additional flags. + */ +struct dsi_display_mode { + struct dsi_mode_info timing; + u32 pixel_clk_khz; + enum dsi_op_mode panel_mode; + + u32 flags; +}; + +#endif /* _DSI_DEFS_H_ */ diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_hw.h b/drivers/gpu/drm/msm/dsi-staging/dsi_hw.h new file mode 100644 index 000000000000..01535c02a7f8 --- /dev/null +++ b/drivers/gpu/drm/msm/dsi-staging/dsi_hw.h @@ -0,0 +1,39 @@ +/* + * Copyright (c) 2016, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef _DSI_HW_H_ +#define _DSI_HW_H_ +#include + +#define DSI_R32(dsi_hw, off) readl_relaxed((dsi_hw)->base + (off)) +#define DSI_W32(dsi_hw, off, val) \ + do {\ + pr_debug("[DSI_%d][%s] - [0x%08x]\n", \ + (dsi_hw)->index, #off, val); \ + writel_relaxed((val), (dsi_hw)->base + (off)); \ + } while (0) + +#define DSI_MMSS_MISC_R32(dsi_hw, off) \ + readl_relaxed((dsi_hw)->mmss_misc_base + (off)) +#define DSI_MMSS_MISC_W32(dsi_hw, off, val) \ + do {\ + pr_debug("[DSI_%d][%s] - [0x%08x]\n", \ + (dsi_hw)->index, #off, val); \ + writel_relaxed((val), (dsi_hw)->mmss_misc_base + (off)); \ + } while (0) + +#define DSI_R64(dsi_hw, off) readq_relaxed((dsi_hw)->base + (off)) +#define DSI_W64(dsi_hw, off, val) writeq_relaxed((val), (dsi_hw)->base + (off)) + +#endif /* _DSI_HW_H_ */ -- GitLab From 7636db7d4593497f7181a30662bd8f8dc9b7396a Mon Sep 17 00:00:00 2001 From: Ajay Singh Parmar Date: Mon, 16 May 2016 17:09:59 -0700 Subject: [PATCH 003/310] drm/msm/dsi-staging: add hardware driver for dsi phy Add hardware driver for dsi phy v4.0. Change-Id: I41afbec7621e2d08326188b507c57ef09bb1602b Signed-off-by: Ajay Singh Parmar --- drivers/gpu/drm/msm/dsi-staging/dsi_phy_hw.h | 164 ++++ .../gpu/drm/msm/dsi-staging/dsi_phy_hw_v4_0.c | 858 ++++++++++++++++++ 2 files changed, 1022 insertions(+) create mode 100644 drivers/gpu/drm/msm/dsi-staging/dsi_phy_hw.h create mode 100644 drivers/gpu/drm/msm/dsi-staging/dsi_phy_hw_v4_0.c diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_phy_hw.h b/drivers/gpu/drm/msm/dsi-staging/dsi_phy_hw.h new file mode 100644 index 000000000000..5edfd5e62738 --- /dev/null +++ b/drivers/gpu/drm/msm/dsi-staging/dsi_phy_hw.h @@ -0,0 +1,164 @@ +/* + * Copyright (c) 2015-2016, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _DSI_PHY_HW_H_ +#define _DSI_PHY_HW_H_ + +#include "dsi_defs.h" + +#define DSI_MAX_SETTINGS 8 + +/** + * enum dsi_phy_version - DSI PHY version enumeration + * @DSI_PHY_VERSION_UNKNOWN: Unknown version. + * @DSI_PHY_VERSION_1_0: 28nm-HPM. + * @DSI_PHY_VERSION_2_0: 28nm-LPM. + * @DSI_PHY_VERSION_3_0: 20nm. + * @DSI_PHY_VERSION_4_0: 14nm. + * @DSI_PHY_VERSION_MAX: + */ +enum dsi_phy_version { + DSI_PHY_VERSION_UNKNOWN, + DSI_PHY_VERSION_1_0, /* 28nm-HPM */ + DSI_PHY_VERSION_2_0, /* 28nm-LPM */ + DSI_PHY_VERSION_3_0, /* 20nm */ + DSI_PHY_VERSION_4_0, /* 14nm */ + DSI_PHY_VERSION_MAX +}; + +/** + * enum dsi_phy_hw_features - features supported by DSI PHY hardware + * @DSI_PHY_DPHY: Supports DPHY + * @DSI_PHY_CPHY: Supports CPHY + */ +enum dsi_phy_hw_features { + DSI_PHY_DPHY, + DSI_PHY_CPHY, + DSI_PHY_MAX_FEATURES +}; + +/** + * enum dsi_phy_pll_source - pll clock source for PHY. + * @DSI_PLL_SOURCE_STANDALONE: Clock is sourced from native PLL and is not + * shared by other PHYs. + * @DSI_PLL_SOURCE_NATIVE: Clock is sourced from native PLL and is + * shared by other PHYs. + * @DSI_PLL_SOURCE_NON_NATIVE: Clock is sourced from other PHYs. + * @DSI_PLL_SOURCE_MAX: + */ +enum dsi_phy_pll_source { + DSI_PLL_SOURCE_STANDALONE = 0, + DSI_PLL_SOURCE_NATIVE, + DSI_PLL_SOURCE_NON_NATIVE, + DSI_PLL_SOURCE_MAX +}; + +/** + * struct dsi_phy_per_lane_cfgs - Holds register values for PHY parameters + * @lane: A set of maximum 8 values for each lane. + * @count_per_lane: Number of values per each lane. + */ +struct dsi_phy_per_lane_cfgs { + u8 lane[DSI_LANE_MAX][DSI_MAX_SETTINGS]; + u32 count_per_lane; +}; + +/** + * struct dsi_phy_cfg - DSI PHY configuration + * @lanecfg: Lane configuration settings. + * @strength: Strength settings for lanes. + * @timing: Timing parameters for lanes. + * @regulators: Regulator settings for lanes. + * @pll_source: PLL source. + */ +struct dsi_phy_cfg { + struct dsi_phy_per_lane_cfgs lanecfg; + struct dsi_phy_per_lane_cfgs strength; + struct dsi_phy_per_lane_cfgs timing; + struct dsi_phy_per_lane_cfgs regulators; + enum dsi_phy_pll_source pll_source; +}; + +struct dsi_phy_hw; + +/** + * struct dsi_phy_hw_ops - Operations for DSI PHY hardware. + * @regulator_enable: Enable PHY regulators. + * @regulator_disable: Disable PHY regulators. + * @enable: Enable PHY. + * @disable: Disable PHY. + * @calculate_timing_params: Calculate PHY timing params from mode information + */ +struct dsi_phy_hw_ops { + /** + * regulator_enable() - enable regulators for DSI PHY + * @phy: Pointer to DSI PHY hardware object. + * @reg_cfg: Regulator configuration for all DSI lanes. + */ + void (*regulator_enable)(struct dsi_phy_hw *phy, + struct dsi_phy_per_lane_cfgs *reg_cfg); + + /** + * regulator_disable() - disable regulators + * @phy: Pointer to DSI PHY hardware object. + */ + void (*regulator_disable)(struct dsi_phy_hw *phy); + + /** + * enable() - Enable PHY hardware + * @phy: Pointer to DSI PHY hardware object. + * @cfg: Per lane configurations for timing, strength and lane + * configurations. + */ + void (*enable)(struct dsi_phy_hw *phy, struct dsi_phy_cfg *cfg); + + /** + * disable() - Disable PHY hardware + * @phy: Pointer to DSI PHY hardware object. + */ + void (*disable)(struct dsi_phy_hw *phy); + + /** + * calculate_timing_params() - calculates timing parameters. + * @phy: Pointer to DSI PHY hardware object. + * @mode: Mode information for which timing has to be calculated. + * @config: DSI host configuration for this mode. + * @timing: Timing parameters for each lane which will be returned. + */ + int (*calculate_timing_params)(struct dsi_phy_hw *phy, + struct dsi_mode_info *mode, + struct dsi_host_common_cfg *config, + struct dsi_phy_per_lane_cfgs *timing); +}; + +/** + * struct dsi_phy_hw - DSI phy hardware object specific to an instance + * @base: VA for the DSI PHY base address. + * @length: Length of the DSI PHY register base map. + * @index: Instance ID of the controller. + * @version: DSI PHY version. + * @feature_map: Features supported by DSI PHY. + * @ops: Function pointer to PHY operations. + */ +struct dsi_phy_hw { + void __iomem *base; + u32 length; + u32 index; + + enum dsi_phy_version version; + + DECLARE_BITMAP(feature_map, DSI_PHY_MAX_FEATURES); + struct dsi_phy_hw_ops ops; +}; + +#endif /* _DSI_PHY_HW_H_ */ diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_phy_hw_v4_0.c b/drivers/gpu/drm/msm/dsi-staging/dsi_phy_hw_v4_0.c new file mode 100644 index 000000000000..512352d96f98 --- /dev/null +++ b/drivers/gpu/drm/msm/dsi-staging/dsi_phy_hw_v4_0.c @@ -0,0 +1,858 @@ +/* + * Copyright (c) 2015-2016, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#define pr_fmt(fmt) "dsi-phy-hw:" fmt +#include +#include +#include "dsi_hw.h" +#include "dsi_phy_hw.h" + +#define DSIPHY_CMN_REVISION_ID0 0x0000 +#define DSIPHY_CMN_REVISION_ID1 0x0004 +#define DSIPHY_CMN_REVISION_ID2 0x0008 +#define DSIPHY_CMN_REVISION_ID3 0x000C +#define DSIPHY_CMN_CLK_CFG0 0x0010 +#define DSIPHY_CMN_CLK_CFG1 0x0014 +#define DSIPHY_CMN_GLBL_TEST_CTRL 0x0018 +#define DSIPHY_CMN_CTRL_0 0x001C +#define DSIPHY_CMN_CTRL_1 0x0020 +#define DSIPHY_CMN_CAL_HW_TRIGGER 0x0024 +#define DSIPHY_CMN_CAL_SW_CFG0 0x0028 +#define DSIPHY_CMN_CAL_SW_CFG1 0x002C +#define DSIPHY_CMN_CAL_SW_CFG2 0x0030 +#define DSIPHY_CMN_CAL_HW_CFG0 0x0034 +#define DSIPHY_CMN_CAL_HW_CFG1 0x0038 +#define DSIPHY_CMN_CAL_HW_CFG2 0x003C +#define DSIPHY_CMN_CAL_HW_CFG3 0x0040 +#define DSIPHY_CMN_CAL_HW_CFG4 0x0044 +#define DSIPHY_CMN_PLL_CNTRL 0x0048 +#define DSIPHY_CMN_LDO_CNTRL 0x004C + +#define DSIPHY_CMN_REGULATOR_CAL_STATUS0 0x0064 +#define DSIPHY_CMN_REGULATOR_CAL_STATUS1 0x0068 + +/* n = 0..3 for data lanes and n = 4 for clock lane */ +#define DSIPHY_DLNX_CFG0(n) (0x100 + ((n) * 0x80)) +#define DSIPHY_DLNX_CFG1(n) (0x104 + ((n) * 0x80)) +#define DSIPHY_DLNX_CFG2(n) (0x108 + ((n) * 0x80)) +#define DSIPHY_DLNX_CFG3(n) (0x10C + ((n) * 0x80)) +#define DSIPHY_DLNX_TEST_DATAPATH(n) (0x110 + ((n) * 0x80)) +#define DSIPHY_DLNX_TEST_STR(n) (0x114 + ((n) * 0x80)) +#define DSIPHY_DLNX_TIMING_CTRL_4(n) (0x118 + ((n) * 0x80)) +#define DSIPHY_DLNX_TIMING_CTRL_5(n) (0x11C + ((n) * 0x80)) +#define DSIPHY_DLNX_TIMING_CTRL_6(n) (0x120 + ((n) * 0x80)) +#define DSIPHY_DLNX_TIMING_CTRL_7(n) (0x124 + ((n) * 0x80)) +#define DSIPHY_DLNX_TIMING_CTRL_8(n) (0x128 + ((n) * 0x80)) +#define DSIPHY_DLNX_TIMING_CTRL_9(n) (0x12C + ((n) * 0x80)) +#define DSIPHY_DLNX_TIMING_CTRL_10(n) (0x130 + ((n) * 0x80)) +#define DSIPHY_DLNX_TIMING_CTRL_11(n) (0x134 + ((n) * 0x80)) +#define DSIPHY_DLNX_STRENGTH_CTRL_0(n) (0x138 + ((n) * 0x80)) +#define DSIPHY_DLNX_STRENGTH_CTRL_1(n) (0x13C + ((n) * 0x80)) +#define DSIPHY_DLNX_BIST_POLY(n) (0x140 + ((n) * 0x80)) +#define DSIPHY_DLNX_BIST_SEED0(n) (0x144 + ((n) * 0x80)) +#define DSIPHY_DLNX_BIST_SEED1(n) (0x148 + ((n) * 0x80)) +#define DSIPHY_DLNX_BIST_HEAD(n) (0x14C + ((n) * 0x80)) +#define DSIPHY_DLNX_BIST_SOT(n) (0x150 + ((n) * 0x80)) +#define DSIPHY_DLNX_BIST_CTRL0(n) (0x154 + ((n) * 0x80)) +#define DSIPHY_DLNX_BIST_CTRL1(n) (0x158 + ((n) * 0x80)) +#define DSIPHY_DLNX_BIST_CTRL2(n) (0x15C + ((n) * 0x80)) +#define DSIPHY_DLNX_BIST_CTRL3(n) (0x160 + ((n) * 0x80)) +#define DSIPHY_DLNX_VREG_CNTRL(n) (0x164 + ((n) * 0x80)) +#define DSIPHY_DLNX_HSTX_STR_STATUS(n) (0x168 + ((n) * 0x80)) +#define DSIPHY_DLNX_BIST_STATUS0(n) (0x16C + ((n) * 0x80)) +#define DSIPHY_DLNX_BIST_STATUS1(n) (0x170 + ((n) * 0x80)) +#define DSIPHY_DLNX_BIST_STATUS2(n) (0x174 + ((n) * 0x80)) +#define DSIPHY_DLNX_BIST_STATUS3(n) (0x178 + ((n) * 0x80)) +#define DSIPHY_DLNX_MISR_STATUS(n) (0x17C + ((n) * 0x80)) + +#define DSIPHY_PLL_CLKBUFLR_EN 0x041C +#define DSIPHY_PLL_PLL_BANDGAP 0x0508 + +/** + * struct timing_entry - Calculated values for each timing parameter. + * @mipi_min: + * @mipi_max: + * @rec_min: + * @rec_max: + * @rec: + * @reg_value: Value to be programmed in register. + */ +struct timing_entry { + s32 mipi_min; + s32 mipi_max; + s32 rec_min; + s32 rec_max; + s32 rec; + u8 reg_value; +}; + +/** + * struct phy_timing_desc - Timing parameters for DSI PHY. + */ +struct phy_timing_desc { + struct timing_entry clk_prepare; + struct timing_entry clk_zero; + struct timing_entry clk_trail; + struct timing_entry hs_prepare; + struct timing_entry hs_zero; + struct timing_entry hs_trail; + struct timing_entry hs_rqst; + struct timing_entry hs_rqst_clk; + struct timing_entry hs_exit; + struct timing_entry ta_go; + struct timing_entry ta_sure; + struct timing_entry ta_set; + struct timing_entry clk_post; + struct timing_entry clk_pre; +}; + +/** + * struct phy_clk_params - Clock parameters for PHY timing calculations. + */ +struct phy_clk_params { + u32 bitclk_mbps; + u32 escclk_numer; + u32 escclk_denom; + u32 tlpx_numer_ns; + u32 treot_ns; +}; + +/** + * regulator_enable() - enable regulators for DSI PHY + * @phy: Pointer to DSI PHY hardware object. + * @reg_cfg: Regulator configuration for all DSI lanes. + */ +void dsi_phy_hw_v4_0_regulator_enable(struct dsi_phy_hw *phy, + struct dsi_phy_per_lane_cfgs *reg_cfg) +{ + int i; + + for (i = DSI_LOGICAL_LANE_0; i < DSI_LANE_MAX; i++) + DSI_W32(phy, DSIPHY_DLNX_VREG_CNTRL(i), reg_cfg->lane[i][0]); + + /* make sure all values are written to hardware */ + wmb(); + + pr_debug("[DSI_%d] Phy regulators enabled\n", phy->index); +} + +/** + * regulator_disable() - disable regulators + * @phy: Pointer to DSI PHY hardware object. + */ +void dsi_phy_hw_v4_0_regulator_disable(struct dsi_phy_hw *phy) +{ + pr_debug("[DSI_%d] Phy regulators disabled\n", phy->index); +} + +/** + * enable() - Enable PHY hardware + * @phy: Pointer to DSI PHY hardware object. + * @cfg: Per lane configurations for timing, strength and lane + * configurations. + */ +void dsi_phy_hw_v4_0_enable(struct dsi_phy_hw *phy, + struct dsi_phy_cfg *cfg) +{ + int i; + struct dsi_phy_per_lane_cfgs *timing = &cfg->timing; + u32 data; + + DSI_W32(phy, DSIPHY_CMN_LDO_CNTRL, 0x1C); + + DSI_W32(phy, DSIPHY_CMN_GLBL_TEST_CTRL, 0x1); + for (i = DSI_LOGICAL_LANE_0; i < DSI_LANE_MAX; i++) { + + DSI_W32(phy, DSIPHY_DLNX_CFG0(i), cfg->lanecfg.lane[i][0]); + DSI_W32(phy, DSIPHY_DLNX_CFG1(i), cfg->lanecfg.lane[i][1]); + DSI_W32(phy, DSIPHY_DLNX_CFG2(i), cfg->lanecfg.lane[i][2]); + DSI_W32(phy, DSIPHY_DLNX_CFG3(i), cfg->lanecfg.lane[i][3]); + + DSI_W32(phy, DSIPHY_DLNX_TEST_STR(i), 0x88); + + DSI_W32(phy, DSIPHY_DLNX_TIMING_CTRL_4(i), timing->lane[i][0]); + DSI_W32(phy, DSIPHY_DLNX_TIMING_CTRL_5(i), timing->lane[i][1]); + DSI_W32(phy, DSIPHY_DLNX_TIMING_CTRL_6(i), timing->lane[i][2]); + DSI_W32(phy, DSIPHY_DLNX_TIMING_CTRL_7(i), timing->lane[i][3]); + DSI_W32(phy, DSIPHY_DLNX_TIMING_CTRL_8(i), timing->lane[i][4]); + DSI_W32(phy, DSIPHY_DLNX_TIMING_CTRL_9(i), timing->lane[i][5]); + DSI_W32(phy, DSIPHY_DLNX_TIMING_CTRL_10(i), timing->lane[i][6]); + DSI_W32(phy, DSIPHY_DLNX_TIMING_CTRL_11(i), timing->lane[i][7]); + + DSI_W32(phy, DSIPHY_DLNX_STRENGTH_CTRL_0(i), + cfg->strength.lane[i][0]); + DSI_W32(phy, DSIPHY_DLNX_STRENGTH_CTRL_1(i), + cfg->strength.lane[i][1]); + } + + /* make sure all values are written to hardware before enabling phy */ + wmb(); + + DSI_W32(phy, DSIPHY_CMN_CTRL_1, 0x80); + udelay(100); + DSI_W32(phy, DSIPHY_CMN_CTRL_1, 0x00); + + data = DSI_R32(phy, DSIPHY_CMN_GLBL_TEST_CTRL); + + switch (cfg->pll_source) { + case DSI_PLL_SOURCE_STANDALONE: + DSI_W32(phy, DSIPHY_PLL_CLKBUFLR_EN, 0x01); + data &= ~BIT(2); + break; + case DSI_PLL_SOURCE_NATIVE: + DSI_W32(phy, DSIPHY_PLL_CLKBUFLR_EN, 0x03); + data &= ~BIT(2); + break; + case DSI_PLL_SOURCE_NON_NATIVE: + DSI_W32(phy, DSIPHY_PLL_CLKBUFLR_EN, 0x00); + data |= BIT(2); + break; + default: + break; + } + + DSI_W32(phy, DSIPHY_CMN_GLBL_TEST_CTRL, data); + + /* Enable bias current for pll1 during split display case */ + if (cfg->pll_source == DSI_PLL_SOURCE_NON_NATIVE) + DSI_W32(phy, DSIPHY_PLL_PLL_BANDGAP, 0x3); + + pr_debug("[DSI_%d]Phy enabled ", phy->index); +} + +/** + * disable() - Disable PHY hardware + * @phy: Pointer to DSI PHY hardware object. + */ +void dsi_phy_hw_v4_0_disable(struct dsi_phy_hw *phy) +{ + DSI_W32(phy, DSIPHY_PLL_CLKBUFLR_EN, 0); + DSI_W32(phy, DSIPHY_CMN_GLBL_TEST_CTRL, 0); + DSI_W32(phy, DSIPHY_CMN_CTRL_0, 0); + pr_debug("[DSI_%d]Phy disabled ", phy->index); +} + +static const u32 bits_per_pixel[DSI_PIXEL_FORMAT_MAX] = { + 16, 18, 18, 24, 3, 8, 12 }; + +/** + * calc_clk_prepare - calculates prepare timing params for clk lane. + */ +static int calc_clk_prepare(struct phy_clk_params *clk_params, + struct phy_timing_desc *desc, + s32 *actual_frac, + s64 *actual_intermediate) +{ + u32 const min_prepare_frac = 50; + u64 const multiplier = BIT(20); + + struct timing_entry *t = &desc->clk_prepare; + int rc = 0; + u64 dividend, temp, temp_multiple; + s32 frac = 0; + s64 intermediate; + s64 clk_prep_actual; + + dividend = ((t->rec_max - t->rec_min) * min_prepare_frac * multiplier); + temp = roundup(div_s64(dividend, 100), multiplier); + temp += (t->rec_min * multiplier); + t->rec = div_s64(temp, multiplier); + + if (t->rec & 0xffffff00) { + pr_err("Incorrect rec valuefor clk_prepare\n"); + rc = -EINVAL; + } else { + t->reg_value = t->rec; + } + + /* calculate theoretical value */ + temp_multiple = 8 * t->reg_value * clk_params->tlpx_numer_ns + * multiplier; + intermediate = div_s64(temp_multiple, clk_params->bitclk_mbps); + div_s64_rem(temp_multiple, clk_params->bitclk_mbps, &frac); + clk_prep_actual = div_s64((intermediate + frac), multiplier); + + pr_debug("CLK_PREPARE:mipi_min=%d, mipi_max=%d, rec_min=%d, rec_max=%d", + t->mipi_min, t->mipi_max, t->rec_min, t->rec_max); + pr_debug(" reg_value=%d, actual=%lld\n", t->reg_value, clk_prep_actual); + + *actual_frac = frac; + *actual_intermediate = intermediate; + + return rc; +} + +/** + * calc_clk_zero - calculates zero timing params for clk lane. + */ +static int calc_clk_zero(struct phy_clk_params *clk_params, + struct phy_timing_desc *desc, + s32 actual_frac, + s64 actual_intermediate) +{ + u32 const clk_zero_min_frac = 2; + u64 const multiplier = BIT(20); + + int rc = 0; + struct timing_entry *t = &desc->clk_zero; + s64 mipi_min, rec_temp1, rec_temp2, rec_temp3, rec_min; + + mipi_min = ((300 * multiplier) - (actual_intermediate + actual_frac)); + t->mipi_min = div_s64(mipi_min, multiplier); + + rec_temp1 = div_s64((mipi_min * clk_params->bitclk_mbps), + clk_params->tlpx_numer_ns); + rec_temp2 = (rec_temp1 - (11 * multiplier)); + rec_temp3 = roundup(div_s64(rec_temp2, 8), multiplier); + rec_min = (div_s64(rec_temp3, multiplier) - 3); + t->rec_min = rec_min; + t->rec_max = ((t->rec_min > 255) ? 511 : 255); + + t->rec = DIV_ROUND_UP( + (((t->rec_max - t->rec_min) * clk_zero_min_frac) + + (t->rec_min * 100)), + 100); + + if (t->rec & 0xffffff00) { + pr_err("Incorrect rec valuefor clk_zero\n"); + rc = -EINVAL; + } else { + t->reg_value = t->rec; + } + + pr_debug("CLK_ZERO:mipi_min=%d, mipi_max=%d, rec_min=%d, rec_max=%d, reg_val=%d\n", + t->mipi_min, t->mipi_max, t->rec_min, t->rec_max, + t->reg_value); + return rc; +} + +/** + * calc_clk_trail - calculates prepare trail params for clk lane. + */ +static int calc_clk_trail(struct phy_clk_params *clk_params, + struct phy_timing_desc *desc, + s64 *teot_clk_lane) +{ + u64 const multiplier = BIT(20); + u32 const phy_timing_frac = 30; + + int rc = 0; + struct timing_entry *t = &desc->clk_trail; + u64 temp_multiple; + s32 frac; + s64 mipi_max_tr, rec_temp1, rec_temp2, rec_temp3, mipi_max; + s64 teot_clk_lane1; + + temp_multiple = div_s64( + (12 * multiplier * clk_params->tlpx_numer_ns), + clk_params->bitclk_mbps); + div_s64_rem(temp_multiple, multiplier, &frac); + + mipi_max_tr = ((105 * multiplier) + + (temp_multiple + frac)); + teot_clk_lane1 = div_s64(mipi_max_tr, multiplier); + + mipi_max = (mipi_max_tr - (clk_params->treot_ns * multiplier)); + t->mipi_max = div_s64(mipi_max, multiplier); + + temp_multiple = div_s64( + (t->mipi_min * multiplier * clk_params->bitclk_mbps), + clk_params->tlpx_numer_ns); + + div_s64_rem(temp_multiple, multiplier, &frac); + rec_temp1 = temp_multiple + frac + (3 * multiplier); + rec_temp2 = div_s64(rec_temp1, 8); + rec_temp3 = roundup(rec_temp2, multiplier); + + t->rec_min = div_s64(rec_temp3, multiplier); + + /* recommended max */ + rec_temp1 = div_s64((mipi_max * clk_params->bitclk_mbps), + clk_params->tlpx_numer_ns); + rec_temp2 = rec_temp1 + (3 * multiplier); + rec_temp3 = rec_temp2 / 8; + t->rec_max = div_s64(rec_temp3, multiplier); + + t->rec = DIV_ROUND_UP( + (((t->rec_max - t->rec_min) * phy_timing_frac) + + (t->rec_min * 100)), + 100); + + if (t->rec & 0xffffff00) { + pr_err("Incorrect rec valuefor clk_zero\n"); + rc = -EINVAL; + } else { + t->reg_value = t->rec; + } + + *teot_clk_lane = teot_clk_lane1; + pr_debug("CLK_TRAIL:mipi_min=%d, mipi_max=%d, rec_min=%d, rec_max=%d, reg_val=%d\n", + t->mipi_min, t->mipi_max, t->rec_min, t->rec_max, + t->reg_value); + return rc; + +} + +/** + * calc_hs_prepare - calculates prepare timing params for data lanes in HS. + */ +static int calc_hs_prepare(struct phy_clk_params *clk_params, + struct phy_timing_desc *desc, + u64 *temp_mul) +{ + u64 const multiplier = BIT(20); + u32 const min_prepare_frac = 50; + int rc = 0; + struct timing_entry *t = &desc->hs_prepare; + u64 temp_multiple, dividend, temp; + s32 frac; + s64 rec_temp1, rec_temp2, mipi_max, mipi_min; + u32 low_clk_multiplier = 0; + + if (clk_params->bitclk_mbps <= 120) + low_clk_multiplier = 2; + /* mipi min */ + temp_multiple = div_s64((4 * multiplier * clk_params->tlpx_numer_ns), + clk_params->bitclk_mbps); + div_s64_rem(temp_multiple, multiplier, &frac); + mipi_min = (40 * multiplier) + (temp_multiple + frac); + t->mipi_min = div_s64(mipi_min, multiplier); + + /* mipi_max */ + temp_multiple = div_s64( + (6 * multiplier * clk_params->tlpx_numer_ns), + clk_params->bitclk_mbps); + div_s64_rem(temp_multiple, multiplier, &frac); + mipi_max = (85 * multiplier) + temp_multiple; + t->mipi_max = div_s64(mipi_max, multiplier); + + /* recommended min */ + temp_multiple = div_s64((mipi_min * clk_params->bitclk_mbps), + clk_params->tlpx_numer_ns); + temp_multiple -= (low_clk_multiplier * multiplier); + div_s64_rem(temp_multiple, multiplier, &frac); + rec_temp1 = roundup(((temp_multiple + frac) / 8), multiplier); + t->rec_min = div_s64(rec_temp1, multiplier); + + /* recommended max */ + temp_multiple = div_s64((mipi_max * clk_params->bitclk_mbps), + clk_params->tlpx_numer_ns); + temp_multiple -= (low_clk_multiplier * multiplier); + div_s64_rem(temp_multiple, multiplier, &frac); + rec_temp2 = rounddown((temp_multiple / 8), multiplier); + t->rec_max = div_s64(rec_temp2, multiplier); + + /* register value */ + dividend = ((rec_temp2 - rec_temp1) * min_prepare_frac); + temp = roundup(div_u64(dividend, 100), multiplier); + t->rec = div_s64((temp + rec_temp1), multiplier); + + if (t->rec & 0xffffff00) { + pr_err("Incorrect rec valuefor hs_prepare\n"); + rc = -EINVAL; + } else { + t->reg_value = t->rec; + } + + temp_multiple = div_s64( + (8 * (temp + rec_temp1) * clk_params->tlpx_numer_ns), + clk_params->bitclk_mbps); + + *temp_mul = temp_multiple; + pr_debug("HS_PREP:mipi_min=%d, mipi_max=%d, rec_min=%d, rec_max=%d, reg_val=%d\n", + t->mipi_min, t->mipi_max, t->rec_min, t->rec_max, + t->reg_value); + return rc; +} + +/** + * calc_hs_zero - calculates zero timing params for data lanes in HS. + */ +static int calc_hs_zero(struct phy_clk_params *clk_params, + struct phy_timing_desc *desc, + u64 temp_multiple) +{ + u32 const hs_zero_min_frac = 10; + u64 const multiplier = BIT(20); + int rc = 0; + struct timing_entry *t = &desc->hs_zero; + s64 rec_temp1, rec_temp2, rec_temp3, mipi_min; + s64 rec_min; + + mipi_min = div_s64((10 * clk_params->tlpx_numer_ns * multiplier), + clk_params->bitclk_mbps); + rec_temp1 = (145 * multiplier) + mipi_min - temp_multiple; + t->mipi_min = div_s64(rec_temp1, multiplier); + + /* recommended min */ + rec_temp1 = div_s64((rec_temp1 * clk_params->bitclk_mbps), + clk_params->tlpx_numer_ns); + rec_temp2 = rec_temp1 - (11 * multiplier); + rec_temp3 = roundup((rec_temp2 / 8), multiplier); + rec_min = rec_temp3 - (3 * multiplier); + t->rec_min = div_s64(rec_min, multiplier); + t->rec_max = ((t->rec_min > 255) ? 511 : 255); + + t->rec = DIV_ROUND_UP( + (((t->rec_max - t->rec_min) * hs_zero_min_frac) + + (t->rec_min * 100)), + 100); + + if (t->rec & 0xffffff00) { + pr_err("Incorrect rec valuefor hs_zero\n"); + rc = -EINVAL; + } else { + t->reg_value = t->rec; + } + + pr_debug("HS_ZERO:mipi_min=%d, mipi_max=%d, rec_min=%d, rec_max=%d, reg_val=%d\n", + t->mipi_min, t->mipi_max, t->rec_min, t->rec_max, + t->reg_value); + + return rc; +} + +/** + * calc_hs_trail - calculates trail timing params for data lanes in HS. + */ +static int calc_hs_trail(struct phy_clk_params *clk_params, + struct phy_timing_desc *desc, + u64 teot_clk_lane) +{ + u32 const phy_timing_frac = 30; + int rc = 0; + struct timing_entry *t = &desc->hs_trail; + s64 rec_temp1; + + t->mipi_min = 60 + + mult_frac(clk_params->tlpx_numer_ns, 4, + clk_params->bitclk_mbps); + + t->mipi_max = teot_clk_lane - clk_params->treot_ns; + + t->rec_min = DIV_ROUND_UP( + ((t->mipi_min * clk_params->bitclk_mbps) + + (3 * clk_params->tlpx_numer_ns)), + (8 * clk_params->tlpx_numer_ns)); + + rec_temp1 = ((t->mipi_max * clk_params->bitclk_mbps) + + (3 * clk_params->tlpx_numer_ns)); + t->rec_max = (rec_temp1 / (8 * clk_params->tlpx_numer_ns)); + rec_temp1 = DIV_ROUND_UP( + ((t->rec_max - t->rec_min) * phy_timing_frac), + 100); + t->rec = rec_temp1 + t->rec_min; + + if (t->rec & 0xffffff00) { + pr_err("Incorrect rec valuefor hs_trail\n"); + rc = -EINVAL; + } else { + t->reg_value = t->rec; + } + + pr_debug("HS_TRAIL:mipi_min=%d, mipi_max=%d, rec_min=%d, rec_max=%d, reg_val=%d\n", + t->mipi_min, t->mipi_max, t->rec_min, t->rec_max, + t->reg_value); + + return rc; +} + +/** + * calc_hs_rqst - calculates rqst timing params for data lanes in HS. + */ +static int calc_hs_rqst(struct phy_clk_params *clk_params, + struct phy_timing_desc *desc) +{ + int rc = 0; + struct timing_entry *t = &desc->hs_rqst; + + t->rec = DIV_ROUND_UP( + ((t->mipi_min * clk_params->bitclk_mbps) - + (8 * clk_params->tlpx_numer_ns)), + (8 * clk_params->tlpx_numer_ns)); + + if (t->rec & 0xffffff00) { + pr_err("Incorrect rec valuefor hs_rqst, %d\n", t->rec); + rc = -EINVAL; + } else { + t->reg_value = t->rec; + } + + pr_debug("HS_RQST:mipi_min=%d, mipi_max=%d, rec_min=%d, rec_max=%d, reg_val=%d\n", + t->mipi_min, t->mipi_max, t->rec_min, t->rec_max, + t->reg_value); + + return rc; +} + +/** + * calc_hs_exit - calculates exit timing params for data lanes in HS. + */ +static int calc_hs_exit(struct phy_clk_params *clk_params, + struct phy_timing_desc *desc) +{ + u32 const hs_exit_min_frac = 10; + int rc = 0; + struct timing_entry *t = &desc->hs_exit; + + t->rec_min = (DIV_ROUND_UP( + (t->mipi_min * clk_params->bitclk_mbps), + (8 * clk_params->tlpx_numer_ns)) - 1); + + t->rec = DIV_ROUND_UP( + (((t->rec_max - t->rec_min) * hs_exit_min_frac) + + (t->rec_min * 100)), + 100); + + if (t->rec & 0xffffff00) { + pr_err("Incorrect rec valuefor hs_exit\n"); + rc = -EINVAL; + } else { + t->reg_value = t->rec; + } + + pr_debug("HS_EXIT:mipi_min=%d, mipi_max=%d, rec_min=%d, rec_max=%d, reg_val=%d\n", + t->mipi_min, t->mipi_max, t->rec_min, t->rec_max, + t->reg_value); + + return rc; +} + +/** + * calc_hs_rqst_clk - calculates rqst timing params for clock lane.. + */ +static int calc_hs_rqst_clk(struct phy_clk_params *clk_params, + struct phy_timing_desc *desc) +{ + int rc = 0; + struct timing_entry *t = &desc->hs_rqst_clk; + + t->rec = DIV_ROUND_UP( + ((t->mipi_min * clk_params->bitclk_mbps) - + (8 * clk_params->tlpx_numer_ns)), + (8 * clk_params->tlpx_numer_ns)); + + if (t->rec & 0xffffff00) { + pr_err("Incorrect rec valuefor hs_rqst_clk\n"); + rc = -EINVAL; + } else { + t->reg_value = t->rec; + } + + pr_debug("HS_RQST_CLK:mipi_min=%d, mipi_max=%d, rec_min=%d, rec_max=%d, reg_val=%d\n", + t->mipi_min, t->mipi_max, t->rec_min, t->rec_max, + t->reg_value); + + return rc; +} + +/** + * dsi_phy_calc_timing_params - calculates timing paramets for a given bit clock + */ +static int dsi_phy_calc_timing_params(struct phy_clk_params *clk_params, + struct phy_timing_desc *desc) +{ + int rc = 0; + s32 actual_frac = 0; + s64 actual_intermediate = 0; + u64 temp_multiple; + s64 teot_clk_lane; + + rc = calc_clk_prepare(clk_params, desc, &actual_frac, + &actual_intermediate); + if (rc) { + pr_err("clk_prepare calculations failed, rc=%d\n", rc); + goto error; + } + + rc = calc_clk_zero(clk_params, desc, actual_frac, actual_intermediate); + if (rc) { + pr_err("clk_zero calculations failed, rc=%d\n", rc); + goto error; + } + + rc = calc_clk_trail(clk_params, desc, &teot_clk_lane); + if (rc) { + pr_err("clk_trail calculations failed, rc=%d\n", rc); + goto error; + } + + rc = calc_hs_prepare(clk_params, desc, &temp_multiple); + if (rc) { + pr_err("hs_prepare calculations failed, rc=%d\n", rc); + goto error; + } + + rc = calc_hs_zero(clk_params, desc, temp_multiple); + if (rc) { + pr_err("hs_zero calculations failed, rc=%d\n", rc); + goto error; + } + + rc = calc_hs_trail(clk_params, desc, teot_clk_lane); + if (rc) { + pr_err("hs_trail calculations failed, rc=%d\n", rc); + goto error; + } + + rc = calc_hs_rqst(clk_params, desc); + if (rc) { + pr_err("hs_rqst calculations failed, rc=%d\n", rc); + goto error; + } + + rc = calc_hs_exit(clk_params, desc); + if (rc) { + pr_err("hs_exit calculations failed, rc=%d\n", rc); + goto error; + } + + rc = calc_hs_rqst_clk(clk_params, desc); + if (rc) { + pr_err("hs_rqst_clk calculations failed, rc=%d\n", rc); + goto error; + } +error: + return rc; +} + +/** + * calculate_timing_params() - calculates timing parameters. + * @phy: Pointer to DSI PHY hardware object. + * @mode: Mode information for which timing has to be calculated. + * @config: DSI host configuration for this mode. + * @timing: Timing parameters for each lane which will be returned. + */ +int dsi_phy_hw_v4_0_calculate_timing_params(struct dsi_phy_hw *phy, + struct dsi_mode_info *mode, + struct dsi_host_common_cfg *host, + struct dsi_phy_per_lane_cfgs *timing) +{ + /* constants */ + u32 const esc_clk_mhz = 192; /* TODO: esc clock is hardcoded */ + u32 const esc_clk_mmss_cc_prediv = 10; + u32 const tlpx_numer = 1000; + u32 const tr_eot = 20; + u32 const clk_prepare_spec_min = 38; + u32 const clk_prepare_spec_max = 95; + u32 const clk_trail_spec_min = 60; + u32 const hs_exit_spec_min = 100; + u32 const hs_exit_reco_max = 255; + u32 const hs_rqst_spec_min = 50; + + /* local vars */ + int rc = 0; + int i; + u32 h_total, v_total; + u64 inter_num; + u32 num_of_lanes = 0; + u32 bpp; + u64 x, y; + struct phy_timing_desc desc; + struct phy_clk_params clk_params = {0}; + + memset(&desc, 0x0, sizeof(desc)); + h_total = DSI_H_TOTAL(mode); + v_total = DSI_V_TOTAL(mode); + + bpp = bits_per_pixel[host->dst_format]; + + inter_num = bpp * mode->refresh_rate; + + if (host->data_lanes & DSI_DATA_LANE_0) + num_of_lanes++; + if (host->data_lanes & DSI_DATA_LANE_1) + num_of_lanes++; + if (host->data_lanes & DSI_DATA_LANE_2) + num_of_lanes++; + if (host->data_lanes & DSI_DATA_LANE_3) + num_of_lanes++; + + + x = mult_frac(v_total * h_total, inter_num, num_of_lanes); + y = rounddown(x, 1); + + clk_params.bitclk_mbps = rounddown(mult_frac(y, 1, 1000000), 1); + clk_params.escclk_numer = esc_clk_mhz; + clk_params.escclk_denom = esc_clk_mmss_cc_prediv; + clk_params.tlpx_numer_ns = tlpx_numer; + clk_params.treot_ns = tr_eot; + + + /* Setup default parameters */ + desc.clk_prepare.mipi_min = clk_prepare_spec_min; + desc.clk_prepare.mipi_max = clk_prepare_spec_max; + desc.clk_trail.mipi_min = clk_trail_spec_min; + desc.hs_exit.mipi_min = hs_exit_spec_min; + desc.hs_exit.rec_max = hs_exit_reco_max; + + desc.clk_prepare.rec_min = DIV_ROUND_UP( + (desc.clk_prepare.mipi_min * clk_params.bitclk_mbps), + (8 * clk_params.tlpx_numer_ns) + ); + + desc.clk_prepare.rec_max = rounddown( + mult_frac((desc.clk_prepare.mipi_max * clk_params.bitclk_mbps), + 1, (8 * clk_params.tlpx_numer_ns)), + 1); + + desc.hs_rqst.mipi_min = hs_rqst_spec_min; + desc.hs_rqst_clk.mipi_min = hs_rqst_spec_min; + + pr_debug("BIT CLOCK = %d, tlpx_numer_ns=%d, treot_ns=%d\n", + clk_params.bitclk_mbps, clk_params.tlpx_numer_ns, + clk_params.treot_ns); + rc = dsi_phy_calc_timing_params(&clk_params, &desc); + if (rc) { + pr_err("Timing calc failed, rc=%d\n", rc); + goto error; + } + + + for (i = DSI_LOGICAL_LANE_0; i < DSI_LANE_MAX; i++) { + timing->lane[i][0] = desc.hs_exit.reg_value; + + if (i == DSI_LOGICAL_CLOCK_LANE) + timing->lane[i][1] = desc.clk_zero.reg_value; + else + timing->lane[i][1] = desc.hs_zero.reg_value; + + if (i == DSI_LOGICAL_CLOCK_LANE) + timing->lane[i][2] = desc.clk_prepare.reg_value; + else + timing->lane[i][2] = desc.hs_prepare.reg_value; + + if (i == DSI_LOGICAL_CLOCK_LANE) + timing->lane[i][3] = desc.clk_trail.reg_value; + else + timing->lane[i][3] = desc.hs_trail.reg_value; + + if (i == DSI_LOGICAL_CLOCK_LANE) + timing->lane[i][4] = desc.hs_rqst_clk.reg_value; + else + timing->lane[i][4] = desc.hs_rqst.reg_value; + + timing->lane[i][5] = 0x3; + timing->lane[i][6] = 0x4; + timing->lane[i][7] = 0xA0; + pr_debug("[%d][%d %d %d %d %d]\n", i, timing->lane[i][0], + timing->lane[i][1], + timing->lane[i][2], + timing->lane[i][3], + timing->lane[i][4]); + } + timing->count_per_lane = 8; + +error: + return rc; +} -- GitLab From 0aa6b7bc60f61315a44f9e1a7635b0f5ba4de40c Mon Sep 17 00:00:00 2001 From: Ajay Singh Parmar Date: Mon, 16 May 2016 17:43:17 -0700 Subject: [PATCH 004/310] drm/msm/dsi-staging: add hardware driver for dsi controller Add hardware driver for DSI controller v1.4 Change-Id: I74a3b5ebbde1ca43b060d6e5ba2462fb66f0a3a8 Signed-off-by: Ajay Singh Parmar --- drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw.h | 558 +++++++ .../gpu/drm/msm/dsi-staging/dsi_ctrl_hw_1_4.c | 1321 +++++++++++++++++ .../drm/msm/dsi-staging/dsi_ctrl_reg_1_4.h | 192 +++ 3 files changed, 2071 insertions(+) create mode 100644 drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw.h create mode 100644 drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw_1_4.c create mode 100644 drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_reg_1_4.h diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw.h b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw.h new file mode 100644 index 000000000000..b5ddfbb4ef72 --- /dev/null +++ b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw.h @@ -0,0 +1,558 @@ +/* + * Copyright (c) 2015-2016, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef _DSI_CTRL_HW_H_ +#define _DSI_CTRL_HW_H_ + +#include +#include +#include +#include + +#include "dsi_defs.h" + +/** + * Modifier flag for command transmission. If this flag is set, command + * information is programmed to hardware and transmission is not triggered. + * Caller should call the trigger_command_dma() to start the transmission. This + * flag is valed for kickoff_command() and kickoff_fifo_command() operations. + */ +#define DSI_CTRL_HW_CMD_WAIT_FOR_TRIGGER 0x1 + +/** + * enum dsi_ctrl_version - version of the dsi host controller + * @DSI_CTRL_VERSION_UNKNOWN: Unknown controller version + * @DSI_CTRL_VERSION_1_4: DSI host v1.4 controller + * @DSI_CTRL_VERSION_2_0: DSI host v2.0 controller + * @DSI_CTRL_VERSION_MAX: max version + */ +enum dsi_ctrl_version { + DSI_CTRL_VERSION_UNKNOWN, + DSI_CTRL_VERSION_1_4, + DSI_CTRL_VERSION_2_0, + DSI_CTRL_VERSION_MAX +}; + +/** + * enum dsi_ctrl_hw_features - features supported by dsi host controller + * @DSI_CTRL_VIDEO_TPG: Test pattern support for video mode. + * @DSI_CTRL_CMD_TPG: Test pattern support for command mode. + * @DSI_CTRL_VARIABLE_REFRESH_RATE: variable panel timing + * @DSI_CTRL_DYNAMIC_REFRESH: variable pixel clock rate + * @DSI_CTRL_NULL_PACKET_INSERTION: NULL packet insertion + * @DSI_CTRL_DESKEW_CALIB: Deskew calibration support + * @DSI_CTRL_DPHY: Controller support for DPHY + * @DSI_CTRL_CPHY: Controller support for CPHY + * @DSI_CTRL_MAX_FEATURES: + */ +enum dsi_ctrl_hw_features { + DSI_CTRL_VIDEO_TPG, + DSI_CTRL_CMD_TPG, + DSI_CTRL_VARIABLE_REFRESH_RATE, + DSI_CTRL_DYNAMIC_REFRESH, + DSI_CTRL_NULL_PACKET_INSERTION, + DSI_CTRL_DESKEW_CALIB, + DSI_CTRL_DPHY, + DSI_CTRL_CPHY, + DSI_CTRL_MAX_FEATURES +}; + +/** + * enum dsi_test_pattern - test pattern type + * @DSI_TEST_PATTERN_FIXED: Test pattern is fixed, based on init value. + * @DSI_TEST_PATTERN_INC: Incremental test pattern, base on init value. + * @DSI_TEST_PATTERN_POLY: Pattern generated from polynomial and init val. + * @DSI_TEST_PATTERN_MAX: + */ +enum dsi_test_pattern { + DSI_TEST_PATTERN_FIXED = 0, + DSI_TEST_PATTERN_INC, + DSI_TEST_PATTERN_POLY, + DSI_TEST_PATTERN_MAX +}; + +/** + * enum dsi_status_int_type - status interrupts generated by DSI controller + * @DSI_CMD_MODE_DMA_DONE: Command mode DMA packets are sent out. + * @DSI_CMD_STREAM0_FRAME_DONE: A frame of command mode stream0 is sent out. + * @DSI_CMD_STREAM1_FRAME_DONE: A frame of command mode stream1 is sent out. + * @DSI_CMD_STREAM2_FRAME_DONE: A frame of command mode stream2 is sent out. + * @DSI_VIDEO_MODE_FRAME_DONE: A frame of video mode stream is sent out. + * @DSI_BTA_DONE: A BTA is completed. + * @DSI_CMD_FRAME_DONE: A frame of selected command mode stream is + * sent out by MDP. + * @DSI_DYN_REFRESH_DONE: The dynamic refresh operation has completed. + * @DSI_DESKEW_DONE: The deskew calibration operation has completed + * @DSI_DYN_BLANK_DMA_DONE: The dynamic blankin DMA operation has + * completed. + */ +enum dsi_status_int_type { + DSI_CMD_MODE_DMA_DONE = BIT(0), + DSI_CMD_STREAM0_FRAME_DONE = BIT(1), + DSI_CMD_STREAM1_FRAME_DONE = BIT(2), + DSI_CMD_STREAM2_FRAME_DONE = BIT(3), + DSI_VIDEO_MODE_FRAME_DONE = BIT(4), + DSI_BTA_DONE = BIT(5), + DSI_CMD_FRAME_DONE = BIT(6), + DSI_DYN_REFRESH_DONE = BIT(7), + DSI_DESKEW_DONE = BIT(8), + DSI_DYN_BLANK_DMA_DONE = BIT(9) +}; + +/** + * enum dsi_error_int_type - error interrupts generated by DSI controller + * @DSI_RDBK_SINGLE_ECC_ERR: Single bit ECC error in read packet. + * @DSI_RDBK_MULTI_ECC_ERR: Multi bit ECC error in read packet. + * @DSI_RDBK_CRC_ERR: CRC error in read packet. + * @DSI_RDBK_INCOMPLETE_PKT: Incomplete read packet. + * @DSI_PERIPH_ERROR_PKT: Error packet returned from peripheral, + * @DSI_LP_RX_TIMEOUT: Low power reverse transmission timeout. + * @DSI_HS_TX_TIMEOUT: High speed forward transmission timeout. + * @DSI_BTA_TIMEOUT: BTA timeout. + * @DSI_PLL_UNLOCK: PLL has unlocked. + * @DSI_DLN0_ESC_ENTRY_ERR: Incorrect LP Rx escape entry. + * @DSI_DLN0_ESC_SYNC_ERR: LP Rx data is not byte aligned. + * @DSI_DLN0_LP_CONTROL_ERR: Incorrect LP Rx state sequence. + * @DSI_PENDING_HS_TX_TIMEOUT: Pending High-speed transfer timeout. + * @DSI_INTERLEAVE_OP_CONTENTION: Interleave operation contention. + * @DSI_CMD_DMA_FIFO_UNDERFLOW: Command mode DMA FIFO underflow. + * @DSI_CMD_MDP_FIFO_UNDERFLOW: Command MDP FIFO underflow (failed to + * receive one complete line from MDP). + * @DSI_DLN0_HS_FIFO_OVERFLOW: High speed FIFO for data lane 0 overflows. + * @DSI_DLN1_HS_FIFO_OVERFLOW: High speed FIFO for data lane 1 overflows. + * @DSI_DLN2_HS_FIFO_OVERFLOW: High speed FIFO for data lane 2 overflows. + * @DSI_DLN3_HS_FIFO_OVERFLOW: High speed FIFO for data lane 3 overflows. + * @DSI_DLN0_HS_FIFO_UNDERFLOW: High speed FIFO for data lane 0 underflows. + * @DSI_DLN1_HS_FIFO_UNDERFLOW: High speed FIFO for data lane 1 underflows. + * @DSI_DLN2_HS_FIFO_UNDERFLOW: High speed FIFO for data lane 2 underflows. + * @DSI_DLN3_HS_FIFO_UNDERFLOW: High speed FIFO for data lane 3 undeflows. + * @DSI_DLN0_LP0_CONTENTION: PHY level contention while lane 0 is low. + * @DSI_DLN1_LP0_CONTENTION: PHY level contention while lane 1 is low. + * @DSI_DLN2_LP0_CONTENTION: PHY level contention while lane 2 is low. + * @DSI_DLN3_LP0_CONTENTION: PHY level contention while lane 3 is low. + * @DSI_DLN0_LP1_CONTENTION: PHY level contention while lane 0 is high. + * @DSI_DLN1_LP1_CONTENTION: PHY level contention while lane 1 is high. + * @DSI_DLN2_LP1_CONTENTION: PHY level contention while lane 2 is high. + * @DSI_DLN3_LP1_CONTENTION: PHY level contention while lane 3 is high. + */ +enum dsi_error_int_type { + DSI_RDBK_SINGLE_ECC_ERR = BIT(0), + DSI_RDBK_MULTI_ECC_ERR = BIT(1), + DSI_RDBK_CRC_ERR = BIT(2), + DSI_RDBK_INCOMPLETE_PKT = BIT(3), + DSI_PERIPH_ERROR_PKT = BIT(4), + DSI_LP_RX_TIMEOUT = BIT(5), + DSI_HS_TX_TIMEOUT = BIT(6), + DSI_BTA_TIMEOUT = BIT(7), + DSI_PLL_UNLOCK = BIT(8), + DSI_DLN0_ESC_ENTRY_ERR = BIT(9), + DSI_DLN0_ESC_SYNC_ERR = BIT(10), + DSI_DLN0_LP_CONTROL_ERR = BIT(11), + DSI_PENDING_HS_TX_TIMEOUT = BIT(12), + DSI_INTERLEAVE_OP_CONTENTION = BIT(13), + DSI_CMD_DMA_FIFO_UNDERFLOW = BIT(14), + DSI_CMD_MDP_FIFO_UNDERFLOW = BIT(15), + DSI_DLN0_HS_FIFO_OVERFLOW = BIT(16), + DSI_DLN1_HS_FIFO_OVERFLOW = BIT(17), + DSI_DLN2_HS_FIFO_OVERFLOW = BIT(18), + DSI_DLN3_HS_FIFO_OVERFLOW = BIT(19), + DSI_DLN0_HS_FIFO_UNDERFLOW = BIT(20), + DSI_DLN1_HS_FIFO_UNDERFLOW = BIT(21), + DSI_DLN2_HS_FIFO_UNDERFLOW = BIT(22), + DSI_DLN3_HS_FIFO_UNDERFLOW = BIT(23), + DSI_DLN0_LP0_CONTENTION = BIT(24), + DSI_DLN1_LP0_CONTENTION = BIT(25), + DSI_DLN2_LP0_CONTENTION = BIT(26), + DSI_DLN3_LP0_CONTENTION = BIT(27), + DSI_DLN0_LP1_CONTENTION = BIT(28), + DSI_DLN1_LP1_CONTENTION = BIT(29), + DSI_DLN2_LP1_CONTENTION = BIT(30), + DSI_DLN3_LP1_CONTENTION = BIT(31), +}; + +/** + * struct dsi_ctrl_cmd_dma_info - command buffer information + * @offset: IOMMU VA for command buffer address. + * @length: Length of the command buffer. + * @en_broadcast: Enable broadcast mode if set to true. + * @is_master: Is master in broadcast mode. + * @use_lpm: Use low power mode for command transmission. + */ +struct dsi_ctrl_cmd_dma_info { + u32 offset; + u32 length; + bool en_broadcast; + bool is_master; + bool use_lpm; +}; + +/** + * struct dsi_ctrl_cmd_dma_fifo_info - command payload tp be sent using FIFO + * @command: VA for command buffer. + * @size: Size of the command buffer. + * @en_broadcast: Enable broadcast mode if set to true. + * @is_master: Is master in broadcast mode. + * @use_lpm: Use low power mode for command transmission. + */ +struct dsi_ctrl_cmd_dma_fifo_info { + u32 *command; + u32 size; + bool en_broadcast; + bool is_master; + bool use_lpm; +}; + +struct dsi_ctrl_hw; + +/** + * struct dsi_ctrl_hw_ops - operations supported by dsi host hardware + */ +struct dsi_ctrl_hw_ops { + + /** + * host_setup() - Setup DSI host configuration + * @ctrl: Pointer to controller host hardware. + * @config: Configuration for DSI host controller + */ + void (*host_setup)(struct dsi_ctrl_hw *ctrl, + struct dsi_host_common_cfg *config); + + /** + * video_engine_en() - enable DSI video engine + * @ctrl: Pointer to controller host hardware. + * @on: Enable/disabel video engine. + */ + void (*video_engine_en)(struct dsi_ctrl_hw *ctrl, bool on); + + /** + * video_engine_setup() - Setup dsi host controller for video mode + * @ctrl: Pointer to controller host hardware. + * @common_cfg: Common configuration parameters. + * @cfg: Video mode configuration. + * + * Set up DSI video engine with a specific configuration. Controller and + * video engine are not enabled as part of this function. + */ + void (*video_engine_setup)(struct dsi_ctrl_hw *ctrl, + struct dsi_host_common_cfg *common_cfg, + struct dsi_video_engine_cfg *cfg); + + /** + * set_video_timing() - set up the timing for video frame + * @ctrl: Pointer to controller host hardware. + * @mode: Video mode information. + * + * Set up the video timing parameters for the DSI video mode operation. + */ + void (*set_video_timing)(struct dsi_ctrl_hw *ctrl, + struct dsi_mode_info *mode); + + /** + * cmd_engine_setup() - setup dsi host controller for command mode + * @ctrl: Pointer to the controller host hardware. + * @common_cfg: Common configuration parameters. + * @cfg: Command mode configuration. + * + * Setup DSI CMD engine with a specific configuration. Controller and + * command engine are not enabled as part of this function. + */ + void (*cmd_engine_setup)(struct dsi_ctrl_hw *ctrl, + struct dsi_host_common_cfg *common_cfg, + struct dsi_cmd_engine_cfg *cfg); + + /** + * ctrl_en() - enable DSI controller engine + * @ctrl: Pointer to the controller host hardware. + * @on: turn on/off the DSI controller engine. + */ + void (*ctrl_en)(struct dsi_ctrl_hw *ctrl, bool on); + + /** + * cmd_engine_en() - enable DSI controller command engine + * @ctrl: Pointer to the controller host hardware. + * @on: Turn on/off the DSI command engine. + */ + void (*cmd_engine_en)(struct dsi_ctrl_hw *ctrl, bool on); + + /** + * phy_sw_reset() - perform a soft reset on the PHY. + * @ctrl: Pointer to the controller host hardware. + */ + void (*phy_sw_reset)(struct dsi_ctrl_hw *ctrl); + + /** + * soft_reset() - perform a soft reset on DSI controller + * @ctrl: Pointer to the controller host hardware. + * + * The video, command and controller engines will be disable before the + * reset is triggered. These engines will not be enabled after the reset + * is complete. Caller must re-enable the engines. + * + * If the reset is done while MDP timing engine is turned on, the video + * enigne should be re-enabled only during the vertical blanking time. + */ + void (*soft_reset)(struct dsi_ctrl_hw *ctrl); + + /** + * setup_lane_map() - setup mapping between logical and physical lanes + * @ctrl: Pointer to the controller host hardware. + * @lane_map: Structure defining the mapping between DSI logical + * lanes and physical lanes. + */ + void (*setup_lane_map)(struct dsi_ctrl_hw *ctrl, + struct dsi_lane_mapping *lane_map); + + /** + * kickoff_command() - transmits commands stored in memory + * @ctrl: Pointer to the controller host hardware. + * @cmd: Command information. + * @flags: Modifiers for command transmission. + * + * The controller hardware is programmed with address and size of the + * command buffer. The transmission is kicked off if + * DSI_CTRL_HW_CMD_WAIT_FOR_TRIGGER flag is not set. If this flag is + * set, caller should make a separate call to trigger_command_dma() to + * transmit the command. + */ + void (*kickoff_command)(struct dsi_ctrl_hw *ctrl, + struct dsi_ctrl_cmd_dma_info *cmd, + u32 flags); + + /** + * kickoff_fifo_command() - transmits a command using FIFO in dsi + * hardware. + * @ctrl: Pointer to the controller host hardware. + * @cmd: Command information. + * @flags: Modifiers for command transmission. + * + * The controller hardware FIFO is programmed with command header and + * payload. The transmission is kicked off if + * DSI_CTRL_HW_CMD_WAIT_FOR_TRIGGER flag is not set. If this flag is + * set, caller should make a separate call to trigger_command_dma() to + * transmit the command. + */ + void (*kickoff_fifo_command)(struct dsi_ctrl_hw *ctrl, + struct dsi_ctrl_cmd_dma_fifo_info *cmd, + u32 flags); + + void (*reset_cmd_fifo)(struct dsi_ctrl_hw *ctrl); + /** + * trigger_command_dma() - trigger transmission of command buffer. + * @ctrl: Pointer to the controller host hardware. + * + * This trigger can be only used if there was a prior call to + * kickoff_command() of kickoff_fifo_command() with + * DSI_CTRL_HW_CMD_WAIT_FOR_TRIGGER flag. + */ + void (*trigger_command_dma)(struct dsi_ctrl_hw *ctrl); + + /** + * get_cmd_read_data() - get data read from the peripheral + * @ctrl: Pointer to the controller host hardware. + * @rd_buf: Buffer where data will be read into. + * @total_read_len: Number of bytes to read. + */ + u32 (*get_cmd_read_data)(struct dsi_ctrl_hw *ctrl, + u8 *rd_buf, + u32 total_read_len); + + /** + * ulps_request() - request ulps entry for specified lanes + * @ctrl: Pointer to the controller host hardware. + * @lanes: ORed list of lanes (enum dsi_data_lanes) which need + * to enter ULPS. + * + * Caller should check if lanes are in ULPS mode by calling + * get_lanes_in_ulps() operation. + */ + void (*ulps_request)(struct dsi_ctrl_hw *ctrl, u32 lanes); + + /** + * ulps_exit() - exit ULPS on specified lanes + * @ctrl: Pointer to the controller host hardware. + * @lanes: ORed list of lanes (enum dsi_data_lanes) which need + * to exit ULPS. + * + * Caller should check if lanes are in active mode by calling + * get_lanes_in_ulps() operation. + */ + void (*ulps_exit)(struct dsi_ctrl_hw *ctrl, u32 lanes); + + /** + * clear_ulps_request() - clear ulps request once all lanes are active + * @ctrl: Pointer to controller host hardware. + * @lanes: ORed list of lanes (enum dsi_data_lanes). + * + * ULPS request should be cleared after the lanes have exited ULPS. + */ + void (*clear_ulps_request)(struct dsi_ctrl_hw *ctrl, u32 lanes); + + /** + * get_lanes_in_ulps() - returns the list of lanes in ULPS mode + * @ctrl: Pointer to the controller host hardware. + * + * Returns an ORed list of lanes (enum dsi_data_lanes) that are in ULPS + * state. If 0 is returned, all the lanes are active. + * + * Return: List of lanes in ULPS state. + */ + u32 (*get_lanes_in_ulps)(struct dsi_ctrl_hw *ctrl); + + /** + * clamp_enable() - enable DSI clamps to keep PHY driving a stable link + * @ctrl: Pointer to the controller host hardware. + * @lanes: ORed list of lanes which need to be clamped. + * @enable_ulps: TODO:?? + */ + void (*clamp_enable)(struct dsi_ctrl_hw *ctrl, + u32 lanes, + bool enable_ulps); + + /** + * clamp_disable() - disable DSI clamps + * @ctrl: Pointer to the controller host hardware. + * @lanes: ORed list of lanes which need to have clamps released. + * @disable_ulps: TODO:?? + */ + void (*clamp_disable)(struct dsi_ctrl_hw *ctrl, + u32 lanes, + bool disable_ulps); + + /** + * get_interrupt_status() - returns the interrupt status + * @ctrl: Pointer to the controller host hardware. + * + * Returns the ORed list of interrupts(enum dsi_status_int_type) that + * are active. This list does not include any error interrupts. Caller + * should call get_error_status for error interrupts. + * + * Return: List of active interrupts. + */ + u32 (*get_interrupt_status)(struct dsi_ctrl_hw *ctrl); + + /** + * clear_interrupt_status() - clears the specified interrupts + * @ctrl: Pointer to the controller host hardware. + * @ints: List of interrupts to be cleared. + */ + void (*clear_interrupt_status)(struct dsi_ctrl_hw *ctrl, u32 ints); + + /** + * enable_status_interrupts() - enable the specified interrupts + * @ctrl: Pointer to the controller host hardware. + * @ints: List of interrupts to be enabled. + * + * Enables the specified interrupts. This list will override the + * previous interrupts enabled through this function. Caller has to + * maintain the state of the interrupts enabled. To disable all + * interrupts, set ints to 0. + */ + void (*enable_status_interrupts)(struct dsi_ctrl_hw *ctrl, u32 ints); + + /** + * get_error_status() - returns the error status + * @ctrl: Pointer to the controller host hardware. + * + * Returns the ORed list of errors(enum dsi_error_int_type) that are + * active. This list does not include any status interrupts. Caller + * should call get_interrupt_status for status interrupts. + * + * Return: List of active error interrupts. + */ + u64 (*get_error_status)(struct dsi_ctrl_hw *ctrl); + + /** + * clear_error_status() - clears the specified errors + * @ctrl: Pointer to the controller host hardware. + * @errors: List of errors to be cleared. + */ + void (*clear_error_status)(struct dsi_ctrl_hw *ctrl, u64 errors); + + /** + * enable_error_interrupts() - enable the specified interrupts + * @ctrl: Pointer to the controller host hardware. + * @errors: List of errors to be enabled. + * + * Enables the specified interrupts. This list will override the + * previous interrupts enabled through this function. Caller has to + * maintain the state of the interrupts enabled. To disable all + * interrupts, set errors to 0. + */ + void (*enable_error_interrupts)(struct dsi_ctrl_hw *ctrl, u64 errors); + + /** + * video_test_pattern_setup() - setup test pattern engine for video mode + * @ctrl: Pointer to the controller host hardware. + * @type: Type of test pattern. + * @init_val: Initial value to use for generating test pattern. + */ + void (*video_test_pattern_setup)(struct dsi_ctrl_hw *ctrl, + enum dsi_test_pattern type, + u32 init_val); + + /** + * cmd_test_pattern_setup() - setup test patttern engine for cmd mode + * @ctrl: Pointer to the controller host hardware. + * @type: Type of test pattern. + * @init_val: Initial value to use for generating test pattern. + * @stream_id: Stream Id on which packets are generated. + */ + void (*cmd_test_pattern_setup)(struct dsi_ctrl_hw *ctrl, + enum dsi_test_pattern type, + u32 init_val, + u32 stream_id); + + /** + * test_pattern_enable() - enable test pattern engine + * @ctrl: Pointer to the controller host hardware. + * @enable: Enable/Disable test pattern engine. + */ + void (*test_pattern_enable)(struct dsi_ctrl_hw *ctrl, bool enable); + + /** + * trigger_cmd_test_pattern() - trigger a command mode frame update with + * test pattern + * @ctrl: Pointer to the controller host hardware. + * @stream_id: Stream on which frame update is sent. + */ + void (*trigger_cmd_test_pattern)(struct dsi_ctrl_hw *ctrl, + u32 stream_id); +}; + +/* + * struct dsi_ctrl_hw - DSI controller hardware object specific to an instance + * @base: VA for the DSI controller base address. + * @length: Length of the DSI controller register map. + * @index: Instance ID of the controller. + * @feature_map: Features supported by the DSI controller. + * @ops: Function pointers to the operations supported by the + * controller. + */ +struct dsi_ctrl_hw { + void __iomem *base; + u32 length; + void __iomem *mmss_misc_base; + u32 mmss_misc_length; + u32 index; + + /* features */ + DECLARE_BITMAP(feature_map, DSI_CTRL_MAX_FEATURES); + struct dsi_ctrl_hw_ops ops; + + /* capabilities */ + u32 supported_interrupts; + u64 supported_errors; +}; + +#endif /* _DSI_CTRL_HW_H_ */ diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw_1_4.c b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw_1_4.c new file mode 100644 index 000000000000..8326024f76ec --- /dev/null +++ b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw_1_4.c @@ -0,0 +1,1321 @@ +/* + * Copyright (c) 2015-2016, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#define pr_fmt(fmt) "dsi-hw:" fmt +#include + +#include "dsi_ctrl_hw.h" +#include "dsi_ctrl_reg_1_4.h" +#include "dsi_hw.h" + +#define MMSS_MISC_CLAMP_REG_OFF 0x0014 + +/* Unsupported formats default to RGB888 */ +static const u8 cmd_mode_format_map[DSI_PIXEL_FORMAT_MAX] = { + 0x6, 0x7, 0x8, 0x8, 0x0, 0x3, 0x4 }; +static const u8 video_mode_format_map[DSI_PIXEL_FORMAT_MAX] = { + 0x0, 0x1, 0x2, 0x3, 0x3, 0x3, 0x3 }; + + +/** + * dsi_setup_trigger_controls() - setup dsi trigger configurations + * @ctrl: Pointer to the controller host hardware. + * @cfg: DSI host configuration that is common to both video and + * command modes. + */ +static void dsi_setup_trigger_controls(struct dsi_ctrl_hw *ctrl, + struct dsi_host_common_cfg *cfg) +{ + u32 reg = 0; + const u8 trigger_map[DSI_TRIGGER_MAX] = { + 0x0, 0x2, 0x1, 0x4, 0x5, 0x6 }; + + reg |= (cfg->te_mode == DSI_TE_ON_EXT_PIN) ? BIT(31) : 0; + reg |= (trigger_map[cfg->dma_cmd_trigger] & 0x7); + reg |= (trigger_map[cfg->mdp_cmd_trigger] & 0x7) << 4; + DSI_W32(ctrl, DSI_TRIG_CTRL, reg); +} + +/** + * dsi_ctrl_hw_14_host_setup() - setup dsi host configuration + * @ctrl: Pointer to the controller host hardware. + * @cfg: DSI host configuration that is common to both video and + * command modes. + */ +void dsi_ctrl_hw_14_host_setup(struct dsi_ctrl_hw *ctrl, + struct dsi_host_common_cfg *cfg) +{ + u32 reg_value = 0; + + dsi_setup_trigger_controls(ctrl, cfg); + + /* Setup clocking timing controls */ + reg_value = ((cfg->t_clk_post & 0x3F) << 8); + reg_value |= (cfg->t_clk_pre & 0x3F); + DSI_W32(ctrl, DSI_CLKOUT_TIMING_CTRL, reg_value); + + /* EOT packet control */ + reg_value = cfg->append_tx_eot ? 1 : 0; + reg_value |= (cfg->ignore_rx_eot ? (1 << 4) : 0); + DSI_W32(ctrl, DSI_EOT_PACKET_CTRL, reg_value); + + /* Turn on dsi clocks */ + DSI_W32(ctrl, DSI_CLK_CTRL, 0x23F); + + /* Setup DSI control register */ + reg_value = 0; + reg_value |= (cfg->en_crc_check ? BIT(24) : 0); + reg_value |= (cfg->en_ecc_check ? BIT(20) : 0); + reg_value |= BIT(8); /* Clock lane */ + reg_value |= ((cfg->data_lanes & DSI_DATA_LANE_3) ? BIT(7) : 0); + reg_value |= ((cfg->data_lanes & DSI_DATA_LANE_2) ? BIT(6) : 0); + reg_value |= ((cfg->data_lanes & DSI_DATA_LANE_1) ? BIT(5) : 0); + reg_value |= ((cfg->data_lanes & DSI_DATA_LANE_0) ? BIT(4) : 0); + + DSI_W32(ctrl, DSI_CTRL, reg_value); + + /* Enable Timing double buffering */ + DSI_W32(ctrl, DSI_DSI_TIMING_DB_MODE, 0x1); + + pr_debug("[DSI_%d]Host configuration complete\n", ctrl->index); +} + +/** + * phy_sw_reset() - perform a soft reset on the PHY. + * @ctrl: Pointer to the controller host hardware. + */ +void dsi_ctrl_hw_14_phy_sw_reset(struct dsi_ctrl_hw *ctrl) +{ + DSI_W32(ctrl, DSI_PHY_SW_RESET, 0x1); + udelay(1000); + DSI_W32(ctrl, DSI_PHY_SW_RESET, 0x0); + udelay(100); + + pr_debug("[DSI_%d] phy sw reset done\n", ctrl->index); +} + +/** + * soft_reset() - perform a soft reset on DSI controller + * @ctrl: Pointer to the controller host hardware. + * + * The video, command and controller engines will be disable before the + * reset is triggered. These engines will not be enabled after the reset + * is complete. Caller must re-enable the engines. + * + * If the reset is done while MDP timing engine is turned on, the video + * enigne should be re-enabled only during the vertical blanking time. + */ +void dsi_ctrl_hw_14_soft_reset(struct dsi_ctrl_hw *ctrl) +{ + u32 reg = 0; + u32 reg_ctrl = 0; + + /* Clear DSI_EN, VIDEO_MODE_EN, CMD_MODE_EN */ + reg_ctrl = DSI_R32(ctrl, DSI_CTRL); + DSI_W32(ctrl, DSI_CTRL, reg_ctrl & ~0x7); + + /* Force enable PCLK, BYTECLK, AHBM_HCLK */ + reg = DSI_R32(ctrl, DSI_CLK_CTRL); + reg |= 0x23F; + DSI_W32(ctrl, DSI_CLK_CTRL, reg); + + /* Trigger soft reset */ + DSI_W32(ctrl, DSI_SOFT_RESET, 0x1); + udelay(1); + DSI_W32(ctrl, DSI_SOFT_RESET, 0x0); + + /* Disable force clock on */ + reg &= ~(BIT(20) | BIT(11)); + DSI_W32(ctrl, DSI_CLK_CTRL, reg); + + /* Re-enable DSI controller */ + DSI_W32(ctrl, DSI_CTRL, reg_ctrl); + pr_debug("[DSI_%d] ctrl soft reset done\n", ctrl->index); +} + +/** + * set_video_timing() - set up the timing for video frame + * @ctrl: Pointer to controller host hardware. + * @mode: Video mode information. + * + * Set up the video timing parameters for the DSI video mode operation. + */ +void dsi_ctrl_hw_14_set_video_timing(struct dsi_ctrl_hw *ctrl, + struct dsi_mode_info *mode) +{ + u32 reg = 0; + u32 hs_start = 0; + u32 hs_end, active_h_start, active_h_end, h_total; + u32 vs_start = 0, vs_end = 0; + u32 vpos_start = 0, vpos_end, active_v_start, active_v_end, v_total; + + hs_end = mode->h_sync_width; + active_h_start = mode->h_sync_width + mode->h_back_porch; + active_h_end = active_h_start + mode->h_active; + h_total = (mode->h_sync_width + mode->h_back_porch + mode->h_active + + mode->h_front_porch) - 1; + + vpos_end = mode->v_sync_width; + active_v_start = mode->v_sync_width + mode->v_back_porch; + active_v_end = active_v_start + mode->v_active; + v_total = (mode->v_sync_width + mode->v_back_porch + mode->v_active + + mode->v_front_porch) - 1; + + reg = ((active_h_end & 0xFFFF) << 16) | (active_h_start & 0xFFFF); + DSI_W32(ctrl, DSI_VIDEO_MODE_ACTIVE_H, reg); + + reg = ((active_v_end & 0xFFFF) << 16) | (active_v_start & 0xFFFF); + DSI_W32(ctrl, DSI_VIDEO_MODE_ACTIVE_V, reg); + + reg = ((v_total & 0xFFFF) << 16) | (h_total & 0xFFFF); + DSI_W32(ctrl, DSI_VIDEO_MODE_TOTAL, reg); + + reg = ((hs_end & 0xFFFF) << 16) | (hs_start & 0xFFFF); + DSI_W32(ctrl, DSI_VIDEO_MODE_HSYNC, reg); + + reg = ((vs_end & 0xFFFF) << 16) | (vs_start & 0xFFFF); + DSI_W32(ctrl, DSI_VIDEO_MODE_VSYNC, reg); + + reg = ((vpos_end & 0xFFFF) << 16) | (vpos_start & 0xFFFF); + DSI_W32(ctrl, DSI_VIDEO_MODE_VSYNC_VPOS, reg); + + /* TODO: HS TIMER value? */ + DSI_W32(ctrl, DSI_HS_TIMER_CTRL, 0x3FD08); + DSI_W32(ctrl, DSI_MISR_VIDEO_CTRL, 0x10100); + DSI_W32(ctrl, DSI_DSI_TIMING_FLUSH, 0x1); + pr_debug("[DSI_%d] ctrl video parameters updated\n", ctrl->index); +} + +/** + * video_engine_setup() - Setup dsi host controller for video mode + * @ctrl: Pointer to controller host hardware. + * @common_cfg: Common configuration parameters. + * @cfg: Video mode configuration. + * + * Set up DSI video engine with a specific configuration. Controller and + * video engine are not enabled as part of this function. + */ +void dsi_ctrl_hw_14_video_engine_setup(struct dsi_ctrl_hw *ctrl, + struct dsi_host_common_cfg *common_cfg, + struct dsi_video_engine_cfg *cfg) +{ + u32 reg = 0; + + reg |= (cfg->last_line_interleave_en ? BIT(31) : 0); + reg |= (cfg->pulse_mode_hsa_he ? BIT(28) : 0); + reg |= (cfg->hfp_lp11_en ? BIT(24) : 0); + reg |= (cfg->hbp_lp11_en ? BIT(20) : 0); + reg |= (cfg->hsa_lp11_en ? BIT(16) : 0); + reg |= (cfg->eof_bllp_lp11_en ? BIT(15) : 0); + reg |= (cfg->bllp_lp11_en ? BIT(12) : 0); + reg |= (cfg->traffic_mode & 0x3) << 8; + reg |= (cfg->vc_id & 0x3); + reg |= (video_mode_format_map[common_cfg->dst_format] & 0x3) << 4; + DSI_W32(ctrl, DSI_VIDEO_MODE_CTRL, reg); + + reg = (common_cfg->swap_mode & 0x7) << 12; + reg |= (common_cfg->bit_swap_red ? BIT(0) : 0); + reg |= (common_cfg->bit_swap_green ? BIT(4) : 0); + reg |= (common_cfg->bit_swap_blue ? BIT(8) : 0); + DSI_W32(ctrl, DSI_VIDEO_MODE_DATA_CTRL, reg); + + pr_debug("[DSI_%d] Video engine setup done\n", ctrl->index); +} + +/** + * cmd_engine_setup() - setup dsi host controller for command mode + * @ctrl: Pointer to the controller host hardware. + * @common_cfg: Common configuration parameters. + * @cfg: Command mode configuration. + * + * Setup DSI CMD engine with a specific configuration. Controller and + * command engine are not enabled as part of this function. + */ +void dsi_ctrl_hw_14_cmd_engine_setup(struct dsi_ctrl_hw *ctrl, + struct dsi_host_common_cfg *common_cfg, + struct dsi_cmd_engine_cfg *cfg) +{ + u32 reg = 0; + + reg = (cfg->max_cmd_packets_interleave & 0xF) << 20; + reg |= (common_cfg->bit_swap_red ? BIT(4) : 0); + reg |= (common_cfg->bit_swap_green ? BIT(8) : 0); + reg |= (common_cfg->bit_swap_blue ? BIT(12) : 0); + reg |= cmd_mode_format_map[common_cfg->dst_format]; + DSI_W32(ctrl, DSI_COMMAND_MODE_MDP_CTRL, reg); + + reg = cfg->wr_mem_start & 0xFF; + reg |= (cfg->wr_mem_continue & 0xFF) << 8; + reg |= (cfg->insert_dcs_command ? BIT(16) : 0); + DSI_W32(ctrl, DSI_COMMAND_MODE_MDP_DCS_CMD_CTRL, reg); + + pr_debug("[DSI_%d] Cmd engine setup done\n", ctrl->index); +} + +/** + * video_engine_en() - enable DSI video engine + * @ctrl: Pointer to controller host hardware. + * @on: Enable/disabel video engine. + */ +void dsi_ctrl_hw_14_video_engine_en(struct dsi_ctrl_hw *ctrl, bool on) +{ + u32 reg = 0; + + /* Set/Clear VIDEO_MODE_EN bit */ + reg = DSI_R32(ctrl, DSI_CTRL); + if (on) + reg |= BIT(1); + else + reg &= ~BIT(1); + + DSI_W32(ctrl, DSI_CTRL, reg); + + pr_debug("[DSI_%d] Video engine = %d\n", ctrl->index, on); +} + +/** + * ctrl_en() - enable DSI controller engine + * @ctrl: Pointer to the controller host hardware. + * @on: turn on/off the DSI controller engine. + */ +void dsi_ctrl_hw_14_ctrl_en(struct dsi_ctrl_hw *ctrl, bool on) +{ + u32 reg = 0; + + /* Set/Clear DSI_EN bit */ + reg = DSI_R32(ctrl, DSI_CTRL); + if (on) + reg |= BIT(0); + else + reg &= ~BIT(0); + + DSI_W32(ctrl, DSI_CTRL, reg); + + pr_debug("[DSI_%d] Controller engine = %d\n", ctrl->index, on); +} + +/** + * cmd_engine_en() - enable DSI controller command engine + * @ctrl: Pointer to the controller host hardware. + * @on: Turn on/off the DSI command engine. + */ +void dsi_ctrl_hw_14_cmd_engine_en(struct dsi_ctrl_hw *ctrl, bool on) +{ + u32 reg = 0; + + /* Set/Clear CMD_MODE_EN bit */ + reg = DSI_R32(ctrl, DSI_CTRL); + if (on) + reg |= BIT(2); + else + reg &= ~BIT(2); + + DSI_W32(ctrl, DSI_CTRL, reg); + + pr_debug("[DSI_%d] command engine = %d\n", ctrl->index, on); +} + +/** + * setup_lane_map() - setup mapping between logical and physical lanes + * @ctrl: Pointer to the controller host hardware. + * @lane_map: Structure defining the mapping between DSI logical + * lanes and physical lanes. + */ +void dsi_ctrl_hw_14_setup_lane_map(struct dsi_ctrl_hw *ctrl, + struct dsi_lane_mapping *lane_map) +{ + u32 reg_value = 0; + u32 lane_number = ((lane_map->physical_lane0 * 1000)+ + (lane_map->physical_lane1 * 100) + + (lane_map->physical_lane2 * 10) + + (lane_map->physical_lane3)); + + if (lane_number == 123) + reg_value = 0; + else if (lane_number == 3012) + reg_value = 1; + else if (lane_number == 2301) + reg_value = 2; + else if (lane_number == 1230) + reg_value = 3; + else if (lane_number == 321) + reg_value = 4; + else if (lane_number == 1032) + reg_value = 5; + else if (lane_number == 2103) + reg_value = 6; + else if (lane_number == 3210) + reg_value = 7; + + DSI_W32(ctrl, DSI_LANE_SWAP_CTRL, reg_value); + + pr_debug("[DSI_%d] Lane swap setup complete\n", ctrl->index); +} + +/** + * kickoff_command() - transmits commands stored in memory + * @ctrl: Pointer to the controller host hardware. + * @cmd: Command information. + * @flags: Modifiers for command transmission. + * + * The controller hardware is programmed with address and size of the + * command buffer. The transmission is kicked off if + * DSI_CTRL_HW_CMD_WAIT_FOR_TRIGGER flag is not set. If this flag is + * set, caller should make a separate call to trigger_command_dma() to + * transmit the command. + */ +void dsi_ctrl_hw_14_kickoff_command(struct dsi_ctrl_hw *ctrl, + struct dsi_ctrl_cmd_dma_info *cmd, + u32 flags) +{ + u32 reg = 0; + + /*Set BROADCAST_EN and EMBEDDED_MODE */ + reg = DSI_R32(ctrl, DSI_COMMAND_MODE_DMA_CTRL); + if (cmd->en_broadcast) + reg |= BIT(31); + else + reg &= ~BIT(31); + + if (cmd->is_master) + reg |= BIT(30); + else + reg &= ~BIT(30); + + if (cmd->use_lpm) + reg |= BIT(26); + else + reg &= ~BIT(26); + + reg |= BIT(28); + DSI_W32(ctrl, DSI_COMMAND_MODE_DMA_CTRL, reg); + + DSI_W32(ctrl, DSI_DMA_CMD_OFFSET, cmd->offset); + DSI_W32(ctrl, DSI_DMA_CMD_LENGTH, (cmd->length & 0xFFFFFF)); + + /* wait for writes to complete before kick off */ + wmb(); + + if (!(flags & DSI_CTRL_HW_CMD_WAIT_FOR_TRIGGER)) + DSI_W32(ctrl, DSI_CMD_MODE_DMA_SW_TRIGGER, 0x1); +} + +/** + * kickoff_fifo_command() - transmits a command using FIFO in dsi + * hardware. + * @ctrl: Pointer to the controller host hardware. + * @cmd: Command information. + * @flags: Modifiers for command transmission. + * + * The controller hardware FIFO is programmed with command header and + * payload. The transmission is kicked off if + * DSI_CTRL_HW_CMD_WAIT_FOR_TRIGGER flag is not set. If this flag is + * set, caller should make a separate call to trigger_command_dma() to + * transmit the command. + */ +void dsi_ctrl_hw_14_kickoff_fifo_command(struct dsi_ctrl_hw *ctrl, + struct dsi_ctrl_cmd_dma_fifo_info *cmd, + u32 flags) +{ + u32 reg = 0, i = 0; + u32 *ptr = cmd->command; + /* + * Set CMD_DMA_TPG_EN, TPG_DMA_FIFO_MODE and + * CMD_DMA_PATTERN_SEL = custom pattern stored in TPG DMA FIFO + */ + reg = (BIT(1) | BIT(2) | (0x3 << 16)); + DSI_W32(ctrl, DSI_TEST_PATTERN_GEN_CTRL, reg); + + /* + * Program the FIFO with command buffer. Hardware requires an extra + * DWORD (set to zero) if the length of command buffer is odd DWORDS. + */ + for (i = 0; i < cmd->size; i += 4) { + DSI_W32(ctrl, DSI_TEST_PATTERN_GEN_CMD_DMA_INIT_VAL, *ptr); + ptr++; + } + + if ((cmd->size / 4) & 0x1) + DSI_W32(ctrl, DSI_TEST_PATTERN_GEN_CMD_DMA_INIT_VAL, 0); + + /*Set BROADCAST_EN and EMBEDDED_MODE */ + reg = DSI_R32(ctrl, DSI_COMMAND_MODE_DMA_CTRL); + if (cmd->en_broadcast) + reg |= BIT(31); + else + reg &= ~BIT(31); + + if (cmd->is_master) + reg |= BIT(30); + else + reg &= ~BIT(30); + + if (cmd->use_lpm) + reg |= BIT(26); + else + reg &= ~BIT(26); + + reg |= BIT(28); + + DSI_W32(ctrl, DSI_COMMAND_MODE_DMA_CTRL, reg); + + DSI_W32(ctrl, DSI_DMA_CMD_LENGTH, (cmd->size & 0xFFFFFFFF)); + /* Finish writes before command trigger */ + wmb(); + + if (!(flags & DSI_CTRL_HW_CMD_WAIT_FOR_TRIGGER)) + DSI_W32(ctrl, DSI_CMD_MODE_DMA_SW_TRIGGER, 0x1); + + pr_debug("[DSI_%d]size=%d, trigger = %d\n", + ctrl->index, cmd->size, + (flags & DSI_CTRL_HW_CMD_WAIT_FOR_TRIGGER) ? false : true); +} + +void dsi_ctrl_hw_14_reset_cmd_fifo(struct dsi_ctrl_hw *ctrl) +{ + /* disable cmd dma tpg */ + DSI_W32(ctrl, DSI_TEST_PATTERN_GEN_CTRL, 0x0); + + DSI_W32(ctrl, DSI_TPG_DMA_FIFO_RESET, 0x1); + udelay(1); + DSI_W32(ctrl, DSI_TPG_DMA_FIFO_RESET, 0x0); +} + +/** + * trigger_command_dma() - trigger transmission of command buffer. + * @ctrl: Pointer to the controller host hardware. + * + * This trigger can be only used if there was a prior call to + * kickoff_command() of kickoff_fifo_command() with + * DSI_CTRL_HW_CMD_WAIT_FOR_TRIGGER flag. + */ +void dsi_ctrl_hw_14_trigger_command_dma(struct dsi_ctrl_hw *ctrl) +{ + DSI_W32(ctrl, DSI_CMD_MODE_DMA_SW_TRIGGER, 0x1); + pr_debug("[DSI_%d] CMD DMA triggered\n", ctrl->index); +} + +/** + * get_cmd_read_data() - get data read from the peripheral + * @ctrl: Pointer to the controller host hardware. + * @rd_buf: Buffer where data will be read into. + * @total_read_len: Number of bytes to read. + * + * return: number of bytes read. + */ +u32 dsi_ctrl_hw_14_get_cmd_read_data(struct dsi_ctrl_hw *ctrl, + u8 *rd_buf, + u32 read_offset, + u32 total_read_len) +{ + u32 *lp, *temp, data; + int i, j = 0, cnt; + u32 read_cnt; + u32 rx_byte = 0; + u32 repeated_bytes = 0; + u8 reg[16]; + u32 pkt_size = 0; + int buf_offset = read_offset; + + lp = (u32 *)rd_buf; + temp = (u32 *)reg; + cnt = (rx_byte + 3) >> 2; + + if (cnt > 4) + cnt = 4; + + if (rx_byte == 4) + read_cnt = 4; + else + read_cnt = pkt_size + 6; + + if (read_cnt > 16) { + int bytes_shifted; + + bytes_shifted = read_cnt - 16; + repeated_bytes = buf_offset - bytes_shifted; + } + + for (i = cnt - 1; i >= 0; i--) { + data = DSI_R32(ctrl, DSI_RDBK_DATA0 + i*4); + *temp++ = ntohl(data); + } + + for (i = repeated_bytes; i < 16; i++) + rd_buf[j++] = reg[i]; + + pr_debug("[DSI_%d] Read %d bytes\n", ctrl->index, j); + return j; +} +/** + * ulps_request() - request ulps entry for specified lanes + * @ctrl: Pointer to the controller host hardware. + * @lanes: ORed list of lanes (enum dsi_data_lanes) which need + * to enter ULPS. + * + * Caller should check if lanes are in ULPS mode by calling + * get_lanes_in_ulps() operation. + */ +void dsi_ctrl_hw_14_ulps_request(struct dsi_ctrl_hw *ctrl, u32 lanes) +{ + u32 reg = 0; + + if (lanes & DSI_CLOCK_LANE) + reg = BIT(4); + if (lanes & DSI_DATA_LANE_0) + reg |= BIT(0); + if (lanes & DSI_DATA_LANE_1) + reg |= BIT(1); + if (lanes & DSI_DATA_LANE_2) + reg |= BIT(2); + if (lanes & DSI_DATA_LANE_3) + reg |= BIT(3); + + DSI_W32(ctrl, DSI_LANE_CTRL, reg); + + pr_debug("[DSI_%d] ULPS requested for lanes 0x%x\n", ctrl->index, + lanes); +} + +/** + * ulps_exit() - exit ULPS on specified lanes + * @ctrl: Pointer to the controller host hardware. + * @lanes: ORed list of lanes (enum dsi_data_lanes) which need + * to exit ULPS. + * + * Caller should check if lanes are in active mode by calling + * get_lanes_in_ulps() operation. + */ +void dsi_ctrl_hw_14_ulps_exit(struct dsi_ctrl_hw *ctrl, u32 lanes) +{ + u32 reg = 0; + + reg = DSI_R32(ctrl, DSI_LANE_CTRL); + if (lanes & DSI_CLOCK_LANE) + reg |= BIT(12); + if (lanes & DSI_DATA_LANE_0) + reg |= BIT(8); + if (lanes & DSI_DATA_LANE_1) + reg |= BIT(9); + if (lanes & DSI_DATA_LANE_2) + reg |= BIT(10); + if (lanes & DSI_DATA_LANE_3) + reg |= BIT(11); + + DSI_W32(ctrl, DSI_LANE_CTRL, reg); + + pr_debug("[DSI_%d] ULPS exit request for lanes=0x%x\n", + ctrl->index, lanes); +} + +/** + * clear_ulps_request() - clear ulps request once all lanes are active + * @ctrl: Pointer to controller host hardware. + * @lanes: ORed list of lanes (enum dsi_data_lanes). + * + * ULPS request should be cleared after the lanes have exited ULPS. + */ +void dsi_ctrl_hw_14_clear_ulps_request(struct dsi_ctrl_hw *ctrl, u32 lanes) +{ + u32 reg = 0; + + reg = DSI_R32(ctrl, DSI_LANE_CTRL); + reg &= ~BIT(4); /* clock lane */ + if (lanes & DSI_DATA_LANE_0) + reg &= ~BIT(0); + if (lanes & DSI_DATA_LANE_1) + reg &= ~BIT(1); + if (lanes & DSI_DATA_LANE_2) + reg &= ~BIT(2); + if (lanes & DSI_DATA_LANE_3) + reg &= ~BIT(3); + + DSI_W32(ctrl, DSI_LANE_CTRL, reg); + /* + * HPG recommends separate writes for clearing ULPS_REQUEST and + * ULPS_EXIT. + */ + DSI_W32(ctrl, DSI_LANE_CTRL, 0x0); + + pr_debug("[DSI_%d] ULPS request cleared\n", ctrl->index); +} + +/** + * get_lanes_in_ulps() - returns the list of lanes in ULPS mode + * @ctrl: Pointer to the controller host hardware. + * + * Returns an ORed list of lanes (enum dsi_data_lanes) that are in ULPS + * state. If 0 is returned, all the lanes are active. + * + * Return: List of lanes in ULPS state. + */ +u32 dsi_ctrl_hw_14_get_lanes_in_ulps(struct dsi_ctrl_hw *ctrl) +{ + u32 reg = 0; + u32 lanes = 0; + + reg = DSI_R32(ctrl, DSI_LANE_STATUS); + if (!(reg & BIT(8))) + lanes |= DSI_DATA_LANE_0; + if (!(reg & BIT(9))) + lanes |= DSI_DATA_LANE_1; + if (!(reg & BIT(10))) + lanes |= DSI_DATA_LANE_2; + if (!(reg & BIT(11))) + lanes |= DSI_DATA_LANE_3; + if (!(reg & BIT(12))) + lanes |= DSI_CLOCK_LANE; + + pr_debug("[DSI_%d] lanes in ulps = 0x%x\n", ctrl->index, lanes); + return lanes; +} + +/** + * clamp_enable() - enable DSI clamps to keep PHY driving a stable link + * @ctrl: Pointer to the controller host hardware. + * @lanes: ORed list of lanes which need to be clamped. + * @enable_ulps: TODO:?? + */ +void dsi_ctrl_hw_14_clamp_enable(struct dsi_ctrl_hw *ctrl, + u32 lanes, + bool enable_ulps) +{ + u32 clamp_reg = 0; + u32 bit_shift = 0; + u32 reg = 0; + + if (ctrl->index == 1) + bit_shift = 16; + + if (lanes & DSI_CLOCK_LANE) { + clamp_reg |= BIT(9); + if (enable_ulps) + clamp_reg |= BIT(8); + } + + if (lanes & DSI_DATA_LANE_0) { + clamp_reg |= BIT(7); + if (enable_ulps) + clamp_reg |= BIT(6); + } + + if (lanes & DSI_DATA_LANE_1) { + clamp_reg |= BIT(5); + if (enable_ulps) + clamp_reg |= BIT(4); + } + + if (lanes & DSI_DATA_LANE_2) { + clamp_reg |= BIT(3); + if (enable_ulps) + clamp_reg |= BIT(2); + } + + if (lanes & DSI_DATA_LANE_3) { + clamp_reg |= BIT(1); + if (enable_ulps) + clamp_reg |= BIT(0); + } + + clamp_reg |= BIT(15); /* Enable clamp */ + + reg = DSI_MMSS_MISC_R32(ctrl, MMSS_MISC_CLAMP_REG_OFF); + reg |= (clamp_reg << bit_shift); + DSI_MMSS_MISC_W32(ctrl, MMSS_MISC_CLAMP_REG_OFF, reg); + + + reg = DSI_MMSS_MISC_R32(ctrl, MMSS_MISC_CLAMP_REG_OFF); + reg |= BIT(30); + DSI_MMSS_MISC_W32(ctrl, MMSS_MISC_CLAMP_REG_OFF, reg); + + pr_debug("[DSI_%d] Clamps enabled for lanes=0x%x\n", ctrl->index, + lanes); +} + +/** + * clamp_disable() - disable DSI clamps + * @ctrl: Pointer to the controller host hardware. + * @lanes: ORed list of lanes which need to have clamps released. + * @disable_ulps: TODO:?? + */ +void dsi_ctrl_hw_14_clamp_disable(struct dsi_ctrl_hw *ctrl, + u32 lanes, + bool disable_ulps) +{ + u32 clamp_reg = 0; + u32 bit_shift = 0; + u32 reg = 0; + + if (ctrl->index == 1) + bit_shift = 16; + + if (lanes & DSI_CLOCK_LANE) { + clamp_reg |= BIT(9); + if (disable_ulps) + clamp_reg |= BIT(8); + } + + if (lanes & DSI_DATA_LANE_0) { + clamp_reg |= BIT(7); + if (disable_ulps) + clamp_reg |= BIT(6); + } + + if (lanes & DSI_DATA_LANE_1) { + clamp_reg |= BIT(5); + if (disable_ulps) + clamp_reg |= BIT(4); + } + + if (lanes & DSI_DATA_LANE_2) { + clamp_reg |= BIT(3); + if (disable_ulps) + clamp_reg |= BIT(2); + } + + if (lanes & DSI_DATA_LANE_3) { + clamp_reg |= BIT(1); + if (disable_ulps) + clamp_reg |= BIT(0); + } + + clamp_reg |= BIT(15); /* Enable clamp */ + clamp_reg <<= bit_shift; + + /* Disable PHY reset skip */ + reg = DSI_MMSS_MISC_R32(ctrl, MMSS_MISC_CLAMP_REG_OFF); + reg &= ~BIT(30); + DSI_MMSS_MISC_W32(ctrl, MMSS_MISC_CLAMP_REG_OFF, reg); + + reg = DSI_MMSS_MISC_R32(ctrl, MMSS_MISC_CLAMP_REG_OFF); + reg &= ~(clamp_reg); + DSI_MMSS_MISC_W32(ctrl, MMSS_MISC_CLAMP_REG_OFF, reg); + + pr_debug("[DSI_%d] Disable clamps for lanes=%d\n", ctrl->index, lanes); +} + +/** + * get_interrupt_status() - returns the interrupt status + * @ctrl: Pointer to the controller host hardware. + * + * Returns the ORed list of interrupts(enum dsi_status_int_type) that + * are active. This list does not include any error interrupts. Caller + * should call get_error_status for error interrupts. + * + * Return: List of active interrupts. + */ +u32 dsi_ctrl_hw_14_get_interrupt_status(struct dsi_ctrl_hw *ctrl) +{ + u32 reg = 0; + u32 ints = 0; + + reg = DSI_R32(ctrl, DSI_INT_CTRL); + + if (reg & BIT(0)) + ints |= DSI_CMD_MODE_DMA_DONE; + if (reg & BIT(8)) + ints |= DSI_CMD_FRAME_DONE; + if (reg & BIT(10)) + ints |= DSI_CMD_STREAM0_FRAME_DONE; + if (reg & BIT(12)) + ints |= DSI_CMD_STREAM1_FRAME_DONE; + if (reg & BIT(14)) + ints |= DSI_CMD_STREAM2_FRAME_DONE; + if (reg & BIT(16)) + ints |= DSI_VIDEO_MODE_FRAME_DONE; + if (reg & BIT(20)) + ints |= DSI_BTA_DONE; + if (reg & BIT(28)) + ints |= DSI_DYN_REFRESH_DONE; + if (reg & BIT(30)) + ints |= DSI_DESKEW_DONE; + + pr_debug("[DSI_%d] Interrupt status = 0x%x, INT_CTRL=0x%x\n", + ctrl->index, ints, reg); + return ints; +} + +/** + * clear_interrupt_status() - clears the specified interrupts + * @ctrl: Pointer to the controller host hardware. + * @ints: List of interrupts to be cleared. + */ +void dsi_ctrl_hw_14_clear_interrupt_status(struct dsi_ctrl_hw *ctrl, u32 ints) +{ + u32 reg = 0; + + if (ints & DSI_CMD_MODE_DMA_DONE) + reg |= BIT(0); + if (ints & DSI_CMD_FRAME_DONE) + reg |= BIT(8); + if (ints & DSI_CMD_STREAM0_FRAME_DONE) + reg |= BIT(10); + if (ints & DSI_CMD_STREAM1_FRAME_DONE) + reg |= BIT(12); + if (ints & DSI_CMD_STREAM2_FRAME_DONE) + reg |= BIT(14); + if (ints & DSI_VIDEO_MODE_FRAME_DONE) + reg |= BIT(16); + if (ints & DSI_BTA_DONE) + reg |= BIT(20); + if (ints & DSI_DYN_REFRESH_DONE) + reg |= BIT(28); + if (ints & DSI_DESKEW_DONE) + reg |= BIT(30); + + DSI_W32(ctrl, DSI_INT_CTRL, reg); + + pr_debug("[DSI_%d] Clear interrupts, ints = 0x%x, INT_CTRL=0x%x\n", + ctrl->index, ints, reg); +} + +/** + * enable_status_interrupts() - enable the specified interrupts + * @ctrl: Pointer to the controller host hardware. + * @ints: List of interrupts to be enabled. + * + * Enables the specified interrupts. This list will override the + * previous interrupts enabled through this function. Caller has to + * maintain the state of the interrupts enabled. To disable all + * interrupts, set ints to 0. + */ +void dsi_ctrl_hw_14_enable_status_interrupts(struct dsi_ctrl_hw *ctrl, u32 ints) +{ + u32 reg = 0; + + /* Do not change value of DSI_ERROR_MASK bit */ + reg |= (DSI_R32(ctrl, DSI_INT_CTRL) & BIT(25)); + if (ints & DSI_CMD_MODE_DMA_DONE) + reg |= BIT(1); + if (ints & DSI_CMD_FRAME_DONE) + reg |= BIT(9); + if (ints & DSI_CMD_STREAM0_FRAME_DONE) + reg |= BIT(11); + if (ints & DSI_CMD_STREAM1_FRAME_DONE) + reg |= BIT(13); + if (ints & DSI_CMD_STREAM2_FRAME_DONE) + reg |= BIT(15); + if (ints & DSI_VIDEO_MODE_FRAME_DONE) + reg |= BIT(17); + if (ints & DSI_BTA_DONE) + reg |= BIT(21); + if (ints & DSI_DYN_REFRESH_DONE) + reg |= BIT(29); + if (ints & DSI_DESKEW_DONE) + reg |= BIT(31); + + DSI_W32(ctrl, DSI_INT_CTRL, reg); + + pr_debug("[DSI_%d] Enable interrupts 0x%x, INT_CTRL=0x%x\n", + ctrl->index, ints, reg); +} + +/** + * get_error_status() - returns the error status + * @ctrl: Pointer to the controller host hardware. + * + * Returns the ORed list of errors(enum dsi_error_int_type) that are + * active. This list does not include any status interrupts. Caller + * should call get_interrupt_status for status interrupts. + * + * Return: List of active error interrupts. + */ +u64 dsi_ctrl_hw_14_get_error_status(struct dsi_ctrl_hw *ctrl) +{ + u32 dln0_phy_err; + u32 fifo_status; + u32 ack_error; + u32 timeout_errors; + u32 clk_error; + u32 dsi_status; + u64 errors = 0; + + dln0_phy_err = DSI_R32(ctrl, DSI_DLN0_PHY_ERR); + if (dln0_phy_err & BIT(0)) + errors |= DSI_DLN0_ESC_ENTRY_ERR; + if (dln0_phy_err & BIT(4)) + errors |= DSI_DLN0_ESC_SYNC_ERR; + if (dln0_phy_err & BIT(8)) + errors |= DSI_DLN0_LP_CONTROL_ERR; + if (dln0_phy_err & BIT(12)) + errors |= DSI_DLN0_LP0_CONTENTION; + if (dln0_phy_err & BIT(16)) + errors |= DSI_DLN0_LP1_CONTENTION; + + fifo_status = DSI_R32(ctrl, DSI_FIFO_STATUS); + if (fifo_status & BIT(7)) + errors |= DSI_CMD_MDP_FIFO_UNDERFLOW; + if (fifo_status & BIT(10)) + errors |= DSI_CMD_DMA_FIFO_UNDERFLOW; + if (fifo_status & BIT(18)) + errors |= DSI_DLN0_HS_FIFO_OVERFLOW; + if (fifo_status & BIT(19)) + errors |= DSI_DLN0_HS_FIFO_UNDERFLOW; + if (fifo_status & BIT(22)) + errors |= DSI_DLN1_HS_FIFO_OVERFLOW; + if (fifo_status & BIT(23)) + errors |= DSI_DLN1_HS_FIFO_UNDERFLOW; + if (fifo_status & BIT(26)) + errors |= DSI_DLN2_HS_FIFO_OVERFLOW; + if (fifo_status & BIT(27)) + errors |= DSI_DLN2_HS_FIFO_UNDERFLOW; + if (fifo_status & BIT(30)) + errors |= DSI_DLN3_HS_FIFO_OVERFLOW; + if (fifo_status & BIT(31)) + errors |= DSI_DLN3_HS_FIFO_UNDERFLOW; + + ack_error = DSI_R32(ctrl, DSI_ACK_ERR_STATUS); + if (ack_error & BIT(16)) + errors |= DSI_RDBK_SINGLE_ECC_ERR; + if (ack_error & BIT(17)) + errors |= DSI_RDBK_MULTI_ECC_ERR; + if (ack_error & BIT(20)) + errors |= DSI_RDBK_CRC_ERR; + if (ack_error & BIT(23)) + errors |= DSI_RDBK_INCOMPLETE_PKT; + if (ack_error & BIT(24)) + errors |= DSI_PERIPH_ERROR_PKT; + + timeout_errors = DSI_R32(ctrl, DSI_TIMEOUT_STATUS); + if (timeout_errors & BIT(0)) + errors |= DSI_HS_TX_TIMEOUT; + if (timeout_errors & BIT(4)) + errors |= DSI_LP_RX_TIMEOUT; + if (timeout_errors & BIT(8)) + errors |= DSI_BTA_TIMEOUT; + + clk_error = DSI_R32(ctrl, DSI_CLK_STATUS); + if (clk_error & BIT(16)) + errors |= DSI_PLL_UNLOCK; + + dsi_status = DSI_R32(ctrl, DSI_STATUS); + if (dsi_status & BIT(31)) + errors |= DSI_INTERLEAVE_OP_CONTENTION; + + pr_debug("[DSI_%d] Error status = 0x%llx, phy=0x%x, fifo=0x%x", + ctrl->index, errors, dln0_phy_err, fifo_status); + pr_debug("[DSI_%d] ack=0x%x, timeout=0x%x, clk=0x%x, dsi=0x%x\n", + ctrl->index, ack_error, timeout_errors, clk_error, dsi_status); + return errors; +} + +/** + * clear_error_status() - clears the specified errors + * @ctrl: Pointer to the controller host hardware. + * @errors: List of errors to be cleared. + */ +void dsi_ctrl_hw_14_clear_error_status(struct dsi_ctrl_hw *ctrl, u64 errors) +{ + u32 dln0_phy_err = 0; + u32 fifo_status = 0; + u32 ack_error = 0; + u32 timeout_error = 0; + u32 clk_error = 0; + u32 dsi_status = 0; + u32 int_ctrl = 0; + + if (errors & DSI_RDBK_SINGLE_ECC_ERR) + ack_error |= BIT(16); + if (errors & DSI_RDBK_MULTI_ECC_ERR) + ack_error |= BIT(17); + if (errors & DSI_RDBK_CRC_ERR) + ack_error |= BIT(20); + if (errors & DSI_RDBK_INCOMPLETE_PKT) + ack_error |= BIT(23); + if (errors & DSI_PERIPH_ERROR_PKT) + ack_error |= BIT(24); + + if (errors & DSI_LP_RX_TIMEOUT) + timeout_error |= BIT(4); + if (errors & DSI_HS_TX_TIMEOUT) + timeout_error |= BIT(0); + if (errors & DSI_BTA_TIMEOUT) + timeout_error |= BIT(8); + + if (errors & DSI_PLL_UNLOCK) + clk_error |= BIT(16); + + if (errors & DSI_DLN0_LP0_CONTENTION) + dln0_phy_err |= BIT(12); + if (errors & DSI_DLN0_LP1_CONTENTION) + dln0_phy_err |= BIT(16); + if (errors & DSI_DLN0_ESC_ENTRY_ERR) + dln0_phy_err |= BIT(0); + if (errors & DSI_DLN0_ESC_SYNC_ERR) + dln0_phy_err |= BIT(4); + if (errors & DSI_DLN0_LP_CONTROL_ERR) + dln0_phy_err |= BIT(8); + + if (errors & DSI_CMD_DMA_FIFO_UNDERFLOW) + fifo_status |= BIT(10); + if (errors & DSI_CMD_MDP_FIFO_UNDERFLOW) + fifo_status |= BIT(7); + if (errors & DSI_DLN0_HS_FIFO_OVERFLOW) + fifo_status |= BIT(18); + if (errors & DSI_DLN1_HS_FIFO_OVERFLOW) + fifo_status |= BIT(22); + if (errors & DSI_DLN2_HS_FIFO_OVERFLOW) + fifo_status |= BIT(26); + if (errors & DSI_DLN3_HS_FIFO_OVERFLOW) + fifo_status |= BIT(30); + if (errors & DSI_DLN0_HS_FIFO_UNDERFLOW) + fifo_status |= BIT(19); + if (errors & DSI_DLN1_HS_FIFO_UNDERFLOW) + fifo_status |= BIT(23); + if (errors & DSI_DLN2_HS_FIFO_UNDERFLOW) + fifo_status |= BIT(27); + if (errors & DSI_DLN3_HS_FIFO_UNDERFLOW) + fifo_status |= BIT(31); + + if (errors & DSI_INTERLEAVE_OP_CONTENTION) + dsi_status |= BIT(31); + + DSI_W32(ctrl, DSI_DLN0_PHY_ERR, dln0_phy_err); + DSI_W32(ctrl, DSI_FIFO_STATUS, fifo_status); + DSI_W32(ctrl, DSI_ACK_ERR_STATUS, ack_error); + DSI_W32(ctrl, DSI_TIMEOUT_STATUS, timeout_error); + DSI_W32(ctrl, DSI_CLK_STATUS, clk_error); + DSI_W32(ctrl, DSI_STATUS, dsi_status); + + int_ctrl = DSI_R32(ctrl, DSI_INT_CTRL); + int_ctrl |= BIT(24); + DSI_W32(ctrl, DSI_INT_CTRL, int_ctrl); + pr_debug("[DSI_%d] clear errors = 0x%llx, phy=0x%x, fifo=0x%x", + ctrl->index, errors, dln0_phy_err, fifo_status); + pr_debug("[DSI_%d] ack=0x%x, timeout=0x%x, clk=0x%x, dsi=0x%x\n", + ctrl->index, ack_error, timeout_error, clk_error, dsi_status); +} + +/** + * enable_error_interrupts() - enable the specified interrupts + * @ctrl: Pointer to the controller host hardware. + * @errors: List of errors to be enabled. + * + * Enables the specified interrupts. This list will override the + * previous interrupts enabled through this function. Caller has to + * maintain the state of the interrupts enabled. To disable all + * interrupts, set errors to 0. + */ +void dsi_ctrl_hw_14_enable_error_interrupts(struct dsi_ctrl_hw *ctrl, + u64 errors) +{ + u32 int_ctrl = 0; + u32 int_mask0 = 0x7FFF3BFF; + + int_ctrl = DSI_R32(ctrl, DSI_INT_CTRL); + if (errors) + int_ctrl |= BIT(25); + else + int_ctrl &= ~BIT(25); + + if (errors & DSI_RDBK_SINGLE_ECC_ERR) + int_mask0 &= ~BIT(0); + if (errors & DSI_RDBK_MULTI_ECC_ERR) + int_mask0 &= ~BIT(1); + if (errors & DSI_RDBK_CRC_ERR) + int_mask0 &= ~BIT(2); + if (errors & DSI_RDBK_INCOMPLETE_PKT) + int_mask0 &= ~BIT(3); + if (errors & DSI_PERIPH_ERROR_PKT) + int_mask0 &= ~BIT(4); + + if (errors & DSI_LP_RX_TIMEOUT) + int_mask0 &= ~BIT(5); + if (errors & DSI_HS_TX_TIMEOUT) + int_mask0 &= ~BIT(6); + if (errors & DSI_BTA_TIMEOUT) + int_mask0 &= ~BIT(7); + + if (errors & DSI_PLL_UNLOCK) + int_mask0 &= ~BIT(28); + + if (errors & DSI_DLN0_LP0_CONTENTION) + int_mask0 &= ~BIT(24); + if (errors & DSI_DLN0_LP1_CONTENTION) + int_mask0 &= ~BIT(25); + if (errors & DSI_DLN0_ESC_ENTRY_ERR) + int_mask0 &= ~BIT(21); + if (errors & DSI_DLN0_ESC_SYNC_ERR) + int_mask0 &= ~BIT(22); + if (errors & DSI_DLN0_LP_CONTROL_ERR) + int_mask0 &= ~BIT(23); + + if (errors & DSI_CMD_DMA_FIFO_UNDERFLOW) + int_mask0 &= ~BIT(9); + if (errors & DSI_CMD_MDP_FIFO_UNDERFLOW) + int_mask0 &= ~BIT(11); + if (errors & DSI_DLN0_HS_FIFO_OVERFLOW) + int_mask0 &= ~BIT(16); + if (errors & DSI_DLN1_HS_FIFO_OVERFLOW) + int_mask0 &= ~BIT(17); + if (errors & DSI_DLN2_HS_FIFO_OVERFLOW) + int_mask0 &= ~BIT(18); + if (errors & DSI_DLN3_HS_FIFO_OVERFLOW) + int_mask0 &= ~BIT(19); + if (errors & DSI_DLN0_HS_FIFO_UNDERFLOW) + int_mask0 &= ~BIT(26); + if (errors & DSI_DLN1_HS_FIFO_UNDERFLOW) + int_mask0 &= ~BIT(27); + if (errors & DSI_DLN2_HS_FIFO_UNDERFLOW) + int_mask0 &= ~BIT(29); + if (errors & DSI_DLN3_HS_FIFO_UNDERFLOW) + int_mask0 &= ~BIT(30); + + if (errors & DSI_INTERLEAVE_OP_CONTENTION) + int_mask0 &= ~BIT(8); + + DSI_W32(ctrl, DSI_INT_CTRL, int_ctrl); + DSI_W32(ctrl, DSI_ERR_INT_MASK0, int_mask0); + + pr_debug("[DSI_%d] enable errors = 0x%llx, int_mask0=0x%x\n", + ctrl->index, errors, int_mask0); +} + +/** + * video_test_pattern_setup() - setup test pattern engine for video mode + * @ctrl: Pointer to the controller host hardware. + * @type: Type of test pattern. + * @init_val: Initial value to use for generating test pattern. + */ +void dsi_ctrl_hw_14_video_test_pattern_setup(struct dsi_ctrl_hw *ctrl, + enum dsi_test_pattern type, + u32 init_val) +{ + u32 reg = 0; + + DSI_W32(ctrl, DSI_TEST_PATTERN_GEN_VIDEO_INIT_VAL, init_val); + + switch (type) { + case DSI_TEST_PATTERN_FIXED: + reg |= (0x2 << 4); + break; + case DSI_TEST_PATTERN_INC: + reg |= (0x1 << 4); + break; + case DSI_TEST_PATTERN_POLY: + DSI_W32(ctrl, DSI_TEST_PATTERN_GEN_VIDEO_POLY, 0xF0F0F); + break; + default: + break; + } + + DSI_W32(ctrl, DSI_TPG_MAIN_CONTROL, 0x100); + DSI_W32(ctrl, DSI_TPG_VIDEO_CONFIG, 0x5); + DSI_W32(ctrl, DSI_TEST_PATTERN_GEN_CTRL, reg); + + pr_debug("[DSI_%d] Video test pattern setup done\n", ctrl->index); +} + +/** + * cmd_test_pattern_setup() - setup test patttern engine for cmd mode + * @ctrl: Pointer to the controller host hardware. + * @type: Type of test pattern. + * @init_val: Initial value to use for generating test pattern. + * @stream_id: Stream Id on which packets are generated. + */ +void dsi_ctrl_hw_14_cmd_test_pattern_setup(struct dsi_ctrl_hw *ctrl, + enum dsi_test_pattern type, + u32 init_val, + u32 stream_id) +{ + u32 reg = 0; + u32 init_offset; + u32 poly_offset; + u32 pattern_sel_shift; + + switch (stream_id) { + case 0: + init_offset = DSI_TEST_PATTERN_GEN_CMD_MDP_INIT_VAL0; + poly_offset = DSI_TEST_PATTERN_GEN_CMD_MDP_STREAM0_POLY; + pattern_sel_shift = 8; + break; + case 1: + init_offset = DSI_TEST_PATTERN_GEN_CMD_MDP_INIT_VAL1; + poly_offset = DSI_TEST_PATTERN_GEN_CMD_MDP_STREAM1_POLY; + pattern_sel_shift = 12; + break; + case 2: + init_offset = DSI_TEST_PATTERN_GEN_CMD_MDP_INIT_VAL2; + poly_offset = DSI_TEST_PATTERN_GEN_CMD_MDP_STREAM2_POLY; + pattern_sel_shift = 20; + break; + default: + return; + } + + DSI_W32(ctrl, init_offset, init_val); + + switch (type) { + case DSI_TEST_PATTERN_FIXED: + reg |= (0x2 << pattern_sel_shift); + break; + case DSI_TEST_PATTERN_INC: + reg |= (0x1 << pattern_sel_shift); + break; + case DSI_TEST_PATTERN_POLY: + DSI_W32(ctrl, poly_offset, 0xF0F0F); + break; + default: + break; + } + + DSI_W32(ctrl, DSI_TEST_PATTERN_GEN_CTRL, reg); + pr_debug("[DSI_%d] Cmd test pattern setup done\n", ctrl->index); +} + +/** + * test_pattern_enable() - enable test pattern engine + * @ctrl: Pointer to the controller host hardware. + * @enable: Enable/Disable test pattern engine. + */ +void dsi_ctrl_hw_14_test_pattern_enable(struct dsi_ctrl_hw *ctrl, + bool enable) +{ + u32 reg = DSI_R32(ctrl, DSI_TEST_PATTERN_GEN_CTRL); + + if (enable) + reg |= BIT(0); + else + reg &= ~BIT(0); + + DSI_W32(ctrl, DSI_TEST_PATTERN_GEN_CTRL, reg); + + pr_debug("[DSI_%d] Test pattern enable=%d\n", ctrl->index, enable); +} + +/** + * trigger_cmd_test_pattern() - trigger a command mode frame update with + * test pattern + * @ctrl: Pointer to the controller host hardware. + * @stream_id: Stream on which frame update is sent. + */ +void dsi_ctrl_hw_14_trigger_cmd_test_pattern(struct dsi_ctrl_hw *ctrl, + u32 stream_id) +{ + switch (stream_id) { + case 0: + DSI_W32(ctrl, DSI_TEST_PATTERN_GEN_CMD_STREAM0_TRIGGER, 0x1); + break; + case 1: + DSI_W32(ctrl, DSI_TEST_PATTERN_GEN_CMD_STREAM1_TRIGGER, 0x1); + break; + case 2: + DSI_W32(ctrl, DSI_TEST_PATTERN_GEN_CMD_STREAM2_TRIGGER, 0x1); + break; + default: + break; + } + + pr_debug("[DSI_%d] Cmd Test pattern trigger\n", ctrl->index); +} diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_reg_1_4.h b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_reg_1_4.h new file mode 100644 index 000000000000..028ad46664a7 --- /dev/null +++ b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_reg_1_4.h @@ -0,0 +1,192 @@ +/* + * Copyright (c) 2015-2016, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef _DSI_CTRL_REG_H_ +#define _DSI_CTRL_REG_H_ + +#define DSI_HW_VERSION (0x0000) +#define DSI_CTRL (0x0004) +#define DSI_STATUS (0x0008) +#define DSI_FIFO_STATUS (0x000C) +#define DSI_VIDEO_MODE_CTRL (0x0010) +#define DSI_VIDEO_MODE_SYNC_DATATYPE (0x0014) +#define DSI_VIDEO_MODE_PIXEL_DATATYPE (0x0018) +#define DSI_VIDEO_MODE_BLANKING_DATATYPE (0x001C) +#define DSI_VIDEO_MODE_DATA_CTRL (0x0020) +#define DSI_VIDEO_MODE_ACTIVE_H (0x0024) +#define DSI_VIDEO_MODE_ACTIVE_V (0x0028) +#define DSI_VIDEO_MODE_TOTAL (0x002C) +#define DSI_VIDEO_MODE_HSYNC (0x0030) +#define DSI_VIDEO_MODE_VSYNC (0x0034) +#define DSI_VIDEO_MODE_VSYNC_VPOS (0x0038) +#define DSI_COMMAND_MODE_DMA_CTRL (0x003C) +#define DSI_COMMAND_MODE_MDP_CTRL (0x0040) +#define DSI_COMMAND_MODE_MDP_DCS_CMD_CTRL (0x0044) +#define DSI_DMA_CMD_OFFSET (0x0048) +#define DSI_DMA_CMD_LENGTH (0x004C) +#define DSI_DMA_FIFO_CTRL (0x0050) +#define DSI_DMA_NULL_PACKET_DATA (0x0054) +#define DSI_COMMAND_MODE_MDP_STREAM0_CTRL (0x0058) +#define DSI_COMMAND_MODE_MDP_STREAM0_TOTAL (0x005C) +#define DSI_COMMAND_MODE_MDP_STREAM1_CTRL (0x0060) +#define DSI_COMMAND_MODE_MDP_STREAM1_TOTAL (0x0064) +#define DSI_ACK_ERR_STATUS (0x0068) +#define DSI_RDBK_DATA0 (0x006C) +#define DSI_RDBK_DATA1 (0x0070) +#define DSI_RDBK_DATA2 (0x0074) +#define DSI_RDBK_DATA3 (0x0078) +#define DSI_RDBK_DATATYPE0 (0x007C) +#define DSI_RDBK_DATATYPE1 (0x0080) +#define DSI_TRIG_CTRL (0x0084) +#define DSI_EXT_MUX (0x0088) +#define DSI_EXT_MUX_TE_PULSE_DETECT_CTRL (0x008C) +#define DSI_CMD_MODE_DMA_SW_TRIGGER (0x0090) +#define DSI_CMD_MODE_MDP_SW_TRIGGER (0x0094) +#define DSI_CMD_MODE_BTA_SW_TRIGGER (0x0098) +#define DSI_RESET_SW_TRIGGER (0x009C) +#define DSI_MISR_CMD_CTRL (0x00A0) +#define DSI_MISR_VIDEO_CTRL (0x00A4) +#define DSI_LANE_STATUS (0x00A8) +#define DSI_LANE_CTRL (0x00AC) +#define DSI_LANE_SWAP_CTRL (0x00B0) +#define DSI_DLN0_PHY_ERR (0x00B4) +#define DSI_LP_TIMER_CTRL (0x00B8) +#define DSI_HS_TIMER_CTRL (0x00BC) +#define DSI_TIMEOUT_STATUS (0x00C0) +#define DSI_CLKOUT_TIMING_CTRL (0x00C4) +#define DSI_EOT_PACKET (0x00C8) +#define DSI_EOT_PACKET_CTRL (0x00CC) +#define DSI_GENERIC_ESC_TX_TRIGGER (0x00D0) +#define DSI_CAM_BIST_CTRL (0x00D4) +#define DSI_CAM_BIST_FRAME_SIZE (0x00D8) +#define DSI_CAM_BIST_BLOCK_SIZE (0x00DC) +#define DSI_CAM_BIST_FRAME_CONFIG (0x00E0) +#define DSI_CAM_BIST_LSFR_CTRL (0x00E4) +#define DSI_CAM_BIST_LSFR_INIT (0x00E8) +#define DSI_CAM_BIST_START (0x00EC) +#define DSI_CAM_BIST_STATUS (0x00F0) +#define DSI_ERR_INT_MASK0 (0x010C) +#define DSI_INT_CTRL (0x0110) +#define DSI_IOBIST_CTRL (0x0114) +#define DSI_SOFT_RESET (0x0118) +#define DSI_CLK_CTRL (0x011C) +#define DSI_CLK_STATUS (0x0120) +#define DSI_PHY_SW_RESET (0x012C) +#define DSI_AXI2AHB_CTRL (0x0130) +#define DSI_MISR_CMD_MDP0_32BIT (0x0134) +#define DSI_MISR_CMD_MDP1_32BIT (0x0138) +#define DSI_MISR_CMD_DMA_32BIT (0x013C) +#define DSI_MISR_VIDEO_32BIT (0x0140) +#define DSI_LANE_MISR_CTRL (0x0144) +#define DSI_LANE0_MISR (0x0148) +#define DSI_LANE1_MISR (0x014C) +#define DSI_LANE2_MISR (0x0150) +#define DSI_LANE3_MISR (0x0154) +#define DSI_TEST_PATTERN_GEN_CTRL (0x015C) +#define DSI_TEST_PATTERN_GEN_VIDEO_POLY (0x0160) +#define DSI_TEST_PATTERN_GEN_VIDEO_INIT_VAL (0x0164) +#define DSI_TEST_PATTERN_GEN_CMD_MDP_STREAM0_POLY (0x0168) +#define DSI_TEST_PATTERN_GEN_CMD_MDP_INIT_VAL0 (0x016C) +#define DSI_TEST_PATTERN_GEN_CMD_MDP_STREAM1_POLY (0x0170) +#define DSI_TEST_PATTERN_GEN_CMD_MDP_INIT_VAL1 (0x0174) +#define DSI_TEST_PATTERN_GEN_CMD_DMA_POLY (0x0178) +#define DSI_TEST_PATTERN_GEN_CMD_DMA_INIT_VAL (0x017C) +#define DSI_TEST_PATTERN_GEN_VIDEO_ENABLE (0x0180) +#define DSI_TEST_PATTERN_GEN_CMD_STREAM0_TRIGGER (0x0184) +#define DSI_TEST_PATTERN_GEN_CMD_STREAM1_TRIGGER (0x0188) +#define DSI_TEST_PATTERN_GEN_CMD_MDP_INIT_VAL2 (0x018C) +#define DSI_TEST_PATTERN_GEN_CMD_MDP_STREAM2_POLY (0x0190) +#define DSI_TEST_PATTERN_GEN_CMD_MDP_STREAM2_POLY (0x0190) +#define DSI_COMMAND_MODE_MDP_IDLE_CTRL (0x0194) +#define DSI_TEST_PATTERN_GEN_CMD_STREAM2_TRIGGER (0x0198) +#define DSI_TPG_MAIN_CONTROL (0x019C) +#define DSI_TPG_MAIN_CONTROL2 (0x01A0) +#define DSI_TPG_VIDEO_CONFIG (0x01A4) +#define DSI_TPG_COMPONENT_LIMITS (0x01A8) +#define DSI_TPG_RECTANGLE (0x01AC) +#define DSI_TPG_BLACK_WHITE_PATTERN_FRAMES (0x01B0) +#define DSI_TPG_RGB_MAPPING (0x01B4) +#define DSI_COMMAND_MODE_MDP_CTRL2 (0x01B8) +#define DSI_COMMAND_MODE_MDP_STREAM2_CTRL (0x01BC) +#define DSI_COMMAND_MODE_MDP_STREAM2_TOTAL (0x01C0) +#define DSI_MISR_CMD_MDP2_8BIT (0x01C4) +#define DSI_MISR_CMD_MDP2_32BIT (0x01C8) +#define DSI_VBIF_CTRL (0x01CC) +#define DSI_AES_CTRL (0x01D0) +#define DSI_RDBK_DATA_CTRL (0x01D4) +#define DSI_TEST_PATTERN_GEN_CMD_DMA_INIT_VAL2 (0x01D8) +#define DSI_TPG_DMA_FIFO_STATUS (0x01DC) +#define DSI_TPG_DMA_FIFO_WRITE_TRIGGER (0x01E0) +#define DSI_DSI_TIMING_FLUSH (0x01E4) +#define DSI_DSI_TIMING_DB_MODE (0x01E8) +#define DSI_TPG_DMA_FIFO_RESET (0x01EC) +#define DSI_SCRATCH_REGISTER_0 (0x01F0) +#define DSI_VERSION (0x01F4) +#define DSI_SCRATCH_REGISTER_1 (0x01F8) +#define DSI_SCRATCH_REGISTER_2 (0x01FC) +#define DSI_DYNAMIC_REFRESH_CTRL (0x0200) +#define DSI_DYNAMIC_REFRESH_PIPE_DELAY (0x0204) +#define DSI_DYNAMIC_REFRESH_PIPE_DELAY2 (0x0208) +#define DSI_DYNAMIC_REFRESH_PLL_DELAY (0x020C) +#define DSI_DYNAMIC_REFRESH_STATUS (0x0210) +#define DSI_DYNAMIC_REFRESH_PLL_CTRL0 (0x0214) +#define DSI_DYNAMIC_REFRESH_PLL_CTRL1 (0x0218) +#define DSI_DYNAMIC_REFRESH_PLL_CTRL2 (0x021C) +#define DSI_DYNAMIC_REFRESH_PLL_CTRL3 (0x0220) +#define DSI_DYNAMIC_REFRESH_PLL_CTRL4 (0x0224) +#define DSI_DYNAMIC_REFRESH_PLL_CTRL5 (0x0228) +#define DSI_DYNAMIC_REFRESH_PLL_CTRL6 (0x022C) +#define DSI_DYNAMIC_REFRESH_PLL_CTRL7 (0x0230) +#define DSI_DYNAMIC_REFRESH_PLL_CTRL8 (0x0234) +#define DSI_DYNAMIC_REFRESH_PLL_CTRL9 (0x0238) +#define DSI_DYNAMIC_REFRESH_PLL_CTRL10 (0x023C) +#define DSI_DYNAMIC_REFRESH_PLL_CTRL11 (0x0240) +#define DSI_DYNAMIC_REFRESH_PLL_CTRL12 (0x0244) +#define DSI_DYNAMIC_REFRESH_PLL_CTRL13 (0x0248) +#define DSI_DYNAMIC_REFRESH_PLL_CTRL14 (0x024C) +#define DSI_DYNAMIC_REFRESH_PLL_CTRL15 (0x0250) +#define DSI_DYNAMIC_REFRESH_PLL_CTRL16 (0x0254) +#define DSI_DYNAMIC_REFRESH_PLL_CTRL17 (0x0258) +#define DSI_DYNAMIC_REFRESH_PLL_CTRL18 (0x025C) +#define DSI_DYNAMIC_REFRESH_PLL_CTRL19 (0x0260) +#define DSI_DYNAMIC_REFRESH_PLL_CTRL20 (0x0264) +#define DSI_DYNAMIC_REFRESH_PLL_CTRL21 (0x0268) +#define DSI_DYNAMIC_REFRESH_PLL_CTRL22 (0x026C) +#define DSI_DYNAMIC_REFRESH_PLL_CTRL23 (0x0270) +#define DSI_DYNAMIC_REFRESH_PLL_CTRL24 (0x0274) +#define DSI_DYNAMIC_REFRESH_PLL_CTRL25 (0x0278) +#define DSI_DYNAMIC_REFRESH_PLL_CTRL26 (0x027C) +#define DSI_DYNAMIC_REFRESH_PLL_CTRL27 (0x0280) +#define DSI_DYNAMIC_REFRESH_PLL_CTRL28 (0x0284) +#define DSI_DYNAMIC_REFRESH_PLL_CTRL29 (0x0288) +#define DSI_DYNAMIC_REFRESH_PLL_CTRL30 (0x028C) +#define DSI_DYNAMIC_REFRESH_PLL_CTRL31 (0x0290) +#define DSI_DYNAMIC_REFRESH_PLL_UPPER_ADDR (0x0294) +#define DSI_DYNAMIC_REFRESH_PLL_UPPER_ADDR2 (0x0298) +#define DSI_VIDEO_COMPRESSION_MODE_CTRL (0x02A0) +#define DSI_VIDEO_COMPRESSION_MODE_CTRL2 (0x02A4) +#define DSI_COMMAND_COMPRESSION_MODE_CTRL (0x02A8) +#define DSI_COMMAND_COMPRESSION_MODE_CTRL2 (0x02AC) +#define DSI_COMMAND_COMPRESSION_MODE_CTRL3 (0x02B0) +#define DSI_COMMAND_MODE_NULL_INSERTION_CTRL (0x02B4) +#define DSI_READ_BACK_DISABLE_STATUS (0x02B8) +#define DSI_DESKEW_CTRL (0x02BC) +#define DSI_DESKEW_DELAY_CTRL (0x02C0) +#define DSI_DESKEW_SW_TRIGGER (0x02C4) +#define DSI_SECURE_DISPLAY_STATUS (0x02CC) +#define DSI_SECURE_DISPLAY_BLOCK_COMMAND_COLOR (0x02D0) +#define DSI_SECURE_DISPLAY_BLOCK_VIDEO_COLOR (0x02D4) + + +#endif /* _DSI_CTRL_REG_H_ */ -- GitLab From 95e37a3c23a6607e8e76f0ff5f33679e089e03b9 Mon Sep 17 00:00:00 2001 From: Ajay Singh Parmar Date: Mon, 16 May 2016 17:45:31 -0700 Subject: [PATCH 005/310] drm/msm/dsi-staging: add catalog for dsi driver Add catalog for DSI controller and phy drivers. Change-Id: Iff7f55592834fef0230982282af5b8b2890f97a5 Signed-off-by: Ajay Singh Parmar --- drivers/gpu/drm/msm/dsi-staging/dsi_catalog.c | 167 ++++++++++++++++++ drivers/gpu/drm/msm/dsi-staging/dsi_catalog.h | 125 +++++++++++++ 2 files changed, 292 insertions(+) create mode 100644 drivers/gpu/drm/msm/dsi-staging/dsi_catalog.c create mode 100644 drivers/gpu/drm/msm/dsi-staging/dsi_catalog.h diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_catalog.c b/drivers/gpu/drm/msm/dsi-staging/dsi_catalog.c new file mode 100644 index 000000000000..114998fb8fc5 --- /dev/null +++ b/drivers/gpu/drm/msm/dsi-staging/dsi_catalog.c @@ -0,0 +1,167 @@ +/* + * Copyright (c) 2015-2016, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#define pr_fmt(fmt) "msm-dsi-catalog:[%s] " fmt, __func__ +#include + +#include "dsi_catalog.h" + +/** + * dsi_catalog_14_init() - catalog init for dsi controller v1.4 + */ +static void dsi_catalog_14_init(struct dsi_ctrl_hw *ctrl) +{ + ctrl->ops.host_setup = dsi_ctrl_hw_14_host_setup; + ctrl->ops.setup_lane_map = dsi_ctrl_hw_14_setup_lane_map; + ctrl->ops.video_engine_en = dsi_ctrl_hw_14_video_engine_en; + ctrl->ops.video_engine_setup = dsi_ctrl_hw_14_video_engine_setup; + ctrl->ops.set_video_timing = dsi_ctrl_hw_14_set_video_timing; + ctrl->ops.cmd_engine_setup = dsi_ctrl_hw_14_cmd_engine_setup; + ctrl->ops.ctrl_en = dsi_ctrl_hw_14_ctrl_en; + ctrl->ops.cmd_engine_en = dsi_ctrl_hw_14_cmd_engine_en; + ctrl->ops.phy_sw_reset = dsi_ctrl_hw_14_phy_sw_reset; + ctrl->ops.soft_reset = dsi_ctrl_hw_14_soft_reset; + ctrl->ops.kickoff_command = dsi_ctrl_hw_14_kickoff_command; + ctrl->ops.kickoff_fifo_command = dsi_ctrl_hw_14_kickoff_fifo_command; + ctrl->ops.reset_cmd_fifo = dsi_ctrl_hw_14_reset_cmd_fifo; + ctrl->ops.trigger_command_dma = dsi_ctrl_hw_14_trigger_command_dma; + ctrl->ops.ulps_request = dsi_ctrl_hw_14_ulps_request; + ctrl->ops.ulps_exit = dsi_ctrl_hw_14_ulps_exit; + ctrl->ops.clear_ulps_request = dsi_ctrl_hw_14_clear_ulps_request; + ctrl->ops.get_lanes_in_ulps = dsi_ctrl_hw_14_get_lanes_in_ulps; + ctrl->ops.clamp_enable = dsi_ctrl_hw_14_clamp_enable; + ctrl->ops.clamp_disable = dsi_ctrl_hw_14_clamp_disable; + ctrl->ops.get_interrupt_status = dsi_ctrl_hw_14_get_interrupt_status; + ctrl->ops.get_error_status = dsi_ctrl_hw_14_get_error_status; + ctrl->ops.clear_error_status = dsi_ctrl_hw_14_clear_error_status; + ctrl->ops.clear_interrupt_status = + dsi_ctrl_hw_14_clear_interrupt_status; + ctrl->ops.enable_status_interrupts = + dsi_ctrl_hw_14_enable_status_interrupts; + ctrl->ops.enable_error_interrupts = + dsi_ctrl_hw_14_enable_error_interrupts; + ctrl->ops.video_test_pattern_setup = + dsi_ctrl_hw_14_video_test_pattern_setup; + ctrl->ops.cmd_test_pattern_setup = + dsi_ctrl_hw_14_cmd_test_pattern_setup; + ctrl->ops.test_pattern_enable = dsi_ctrl_hw_14_test_pattern_enable; + ctrl->ops.trigger_cmd_test_pattern = + dsi_ctrl_hw_14_trigger_cmd_test_pattern; +} + +/** + * dsi_catalog_20_init() - catalog init for dsi controller v2.0 + */ +static void dsi_catalog_20_init(struct dsi_ctrl_hw *ctrl) +{ + set_bit(DSI_CTRL_CPHY, ctrl->feature_map); +} + +/** + * dsi_catalog_ctrl_setup() - return catalog info for dsi controller + * @ctrl: Pointer to DSI controller hw object. + * @version: DSI controller version. + * @index: DSI controller instance ID. + * + * This function setups the catalog information in the dsi_ctrl_hw object. + * + * return: error code for failure and 0 for success. + */ +int dsi_catalog_ctrl_setup(struct dsi_ctrl_hw *ctrl, + enum dsi_ctrl_version version, + u32 index) +{ + int rc = 0; + + if (version == DSI_CTRL_VERSION_UNKNOWN || + version >= DSI_CTRL_VERSION_MAX) { + pr_err("Unsupported version: %d\n", version); + return -ENOTSUPP; + } + + ctrl->index = index; + set_bit(DSI_CTRL_VIDEO_TPG, ctrl->feature_map); + set_bit(DSI_CTRL_CMD_TPG, ctrl->feature_map); + set_bit(DSI_CTRL_VARIABLE_REFRESH_RATE, ctrl->feature_map); + set_bit(DSI_CTRL_DYNAMIC_REFRESH, ctrl->feature_map); + set_bit(DSI_CTRL_DESKEW_CALIB, ctrl->feature_map); + set_bit(DSI_CTRL_DPHY, ctrl->feature_map); + + switch (version) { + case DSI_CTRL_VERSION_1_4: + dsi_catalog_14_init(ctrl); + break; + case DSI_CTRL_VERSION_2_0: + dsi_catalog_20_init(ctrl); + break; + default: + return -ENOTSUPP; + } + + return rc; +} + +/** + * dsi_catalog_phy_4_0_init() - catalog init for DSI PHY v4.0 + */ +static void dsi_catalog_phy_4_0_init(struct dsi_phy_hw *phy) +{ + phy->ops.regulator_enable = dsi_phy_hw_v4_0_regulator_enable; + phy->ops.regulator_disable = dsi_phy_hw_v4_0_regulator_disable; + phy->ops.enable = dsi_phy_hw_v4_0_enable; + phy->ops.disable = dsi_phy_hw_v4_0_disable; + phy->ops.calculate_timing_params = + dsi_phy_hw_v4_0_calculate_timing_params; +} + +/** + * dsi_catalog_phy_setup() - return catalog info for dsi phy hardware + * @ctrl: Pointer to DSI PHY hw object. + * @version: DSI PHY version. + * @index: DSI PHY instance ID. + * + * This function setups the catalog information in the dsi_phy_hw object. + * + * return: error code for failure and 0 for success. + */ +int dsi_catalog_phy_setup(struct dsi_phy_hw *phy, + enum dsi_phy_version version, + u32 index) +{ + int rc = 0; + + if (version == DSI_PHY_VERSION_UNKNOWN || + version >= DSI_PHY_VERSION_MAX) { + pr_err("Unsupported version: %d\n", version); + return -ENOTSUPP; + } + + phy->index = index; + set_bit(DSI_PHY_DPHY, phy->feature_map); + + switch (version) { + case DSI_PHY_VERSION_4_0: + dsi_catalog_phy_4_0_init(phy); + break; + case DSI_PHY_VERSION_1_0: + case DSI_PHY_VERSION_2_0: + case DSI_PHY_VERSION_3_0: + default: + return -ENOTSUPP; + } + + return rc; +} + + diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_catalog.h b/drivers/gpu/drm/msm/dsi-staging/dsi_catalog.h new file mode 100644 index 000000000000..e4b33c259540 --- /dev/null +++ b/drivers/gpu/drm/msm/dsi-staging/dsi_catalog.h @@ -0,0 +1,125 @@ +/* + * Copyright (c) 2015-2016, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef _DSI_CATALOG_H_ +#define _DSI_CATALOG_H_ + +#include "dsi_ctrl_hw.h" +#include "dsi_phy_hw.h" + +/** + * dsi_catalog_ctrl_setup() - return catalog info for dsi controller + * @ctrl: Pointer to DSI controller hw object. + * @version: DSI controller version. + * @index: DSI controller instance ID. + * + * This function setups the catalog information in the dsi_ctrl_hw object. + * + * return: error code for failure and 0 for success. + */ +int dsi_catalog_ctrl_setup(struct dsi_ctrl_hw *ctrl, + enum dsi_ctrl_version version, + u32 index); + +/** + * dsi_catalog_phy_setup() - return catalog info for dsi phy hardware + * @ctrl: Pointer to DSI PHY hw object. + * @version: DSI PHY version. + * @index: DSI PHY instance ID. + * + * This function setups the catalog information in the dsi_phy_hw object. + * + * return: error code for failure and 0 for success. + */ +int dsi_catalog_phy_setup(struct dsi_phy_hw *phy, + enum dsi_phy_version version, + u32 index); + +/* Definitions for 4.0 PHY hardware driver */ +void dsi_phy_hw_v4_0_regulator_enable(struct dsi_phy_hw *phy, + struct dsi_phy_per_lane_cfgs *cfg); +void dsi_phy_hw_v4_0_regulator_disable(struct dsi_phy_hw *phy); +void dsi_phy_hw_v4_0_enable(struct dsi_phy_hw *phy, struct dsi_phy_cfg *cfg); +void dsi_phy_hw_v4_0_disable(struct dsi_phy_hw *phy); +int dsi_phy_hw_v4_0_calculate_timing_params(struct dsi_phy_hw *phy, + struct dsi_mode_info *mode, + struct dsi_host_common_cfg *cfg, + struct dsi_phy_per_lane_cfgs + *timing); + +/* Definitions for 1.4 controller hardware driver */ +void dsi_ctrl_hw_14_host_setup(struct dsi_ctrl_hw *ctrl, + struct dsi_host_common_cfg *config); +void dsi_ctrl_hw_14_video_engine_en(struct dsi_ctrl_hw *ctrl, bool on); +void dsi_ctrl_hw_14_video_engine_setup(struct dsi_ctrl_hw *ctrl, + struct dsi_host_common_cfg *common_cfg, + struct dsi_video_engine_cfg *cfg); +void dsi_ctrl_hw_14_set_video_timing(struct dsi_ctrl_hw *ctrl, + struct dsi_mode_info *mode); + +void dsi_ctrl_hw_14_cmd_engine_setup(struct dsi_ctrl_hw *ctrl, + struct dsi_host_common_cfg *common_cfg, + struct dsi_cmd_engine_cfg *cfg); + +void dsi_ctrl_hw_14_ctrl_en(struct dsi_ctrl_hw *ctrl, bool on); +void dsi_ctrl_hw_14_cmd_engine_en(struct dsi_ctrl_hw *ctrl, bool on); + +void dsi_ctrl_hw_14_phy_sw_reset(struct dsi_ctrl_hw *ctrl); +void dsi_ctrl_hw_14_soft_reset(struct dsi_ctrl_hw *ctrl); + +void dsi_ctrl_hw_14_setup_lane_map(struct dsi_ctrl_hw *ctrl, + struct dsi_lane_mapping *lane_map); +void dsi_ctrl_hw_14_kickoff_command(struct dsi_ctrl_hw *ctrl, + struct dsi_ctrl_cmd_dma_info *cmd, + u32 flags); + +void dsi_ctrl_hw_14_kickoff_fifo_command(struct dsi_ctrl_hw *ctrl, + struct dsi_ctrl_cmd_dma_fifo_info *cmd, + u32 flags); +void dsi_ctrl_hw_14_reset_cmd_fifo(struct dsi_ctrl_hw *ctrl); +void dsi_ctrl_hw_14_trigger_command_dma(struct dsi_ctrl_hw *ctrl); + +void dsi_ctrl_hw_14_ulps_request(struct dsi_ctrl_hw *ctrl, u32 lanes); +void dsi_ctrl_hw_14_ulps_exit(struct dsi_ctrl_hw *ctrl, u32 lanes); +void dsi_ctrl_hw_14_clear_ulps_request(struct dsi_ctrl_hw *ctrl, u32 lanes); +u32 dsi_ctrl_hw_14_get_lanes_in_ulps(struct dsi_ctrl_hw *ctrl); + +void dsi_ctrl_hw_14_clamp_enable(struct dsi_ctrl_hw *ctrl, + u32 lanes, + bool enable_ulps); + +void dsi_ctrl_hw_14_clamp_disable(struct dsi_ctrl_hw *ctrl, + u32 lanes, + bool disable_ulps); +u32 dsi_ctrl_hw_14_get_interrupt_status(struct dsi_ctrl_hw *ctrl); +void dsi_ctrl_hw_14_clear_interrupt_status(struct dsi_ctrl_hw *ctrl, u32 ints); +void dsi_ctrl_hw_14_enable_status_interrupts(struct dsi_ctrl_hw *ctrl, + u32 ints); + +u64 dsi_ctrl_hw_14_get_error_status(struct dsi_ctrl_hw *ctrl); +void dsi_ctrl_hw_14_clear_error_status(struct dsi_ctrl_hw *ctrl, u64 errors); +void dsi_ctrl_hw_14_enable_error_interrupts(struct dsi_ctrl_hw *ctrl, + u64 errors); + +void dsi_ctrl_hw_14_video_test_pattern_setup(struct dsi_ctrl_hw *ctrl, + enum dsi_test_pattern type, + u32 init_val); +void dsi_ctrl_hw_14_cmd_test_pattern_setup(struct dsi_ctrl_hw *ctrl, + enum dsi_test_pattern type, + u32 init_val, + u32 stream_id); +void dsi_ctrl_hw_14_test_pattern_enable(struct dsi_ctrl_hw *ctrl, bool enable); +void dsi_ctrl_hw_14_trigger_cmd_test_pattern(struct dsi_ctrl_hw *ctrl, + u32 stream_id); +#endif /* _DSI_CATALOG_H_ */ -- GitLab From ca84bcaf9cedb83f9f5b67bb0023790a6e865d3a Mon Sep 17 00:00:00 2001 From: Abhijit Kulkarni Date: Tue, 29 Sep 2015 10:16:51 -0700 Subject: [PATCH 006/310] drm/msm/sde: add driver for sde support Initial DRM/KMS driver to support snapdragon display engine. Change-Id: I2f93d7cd24acf77359682f90b6b9647017ed62ba Signed-off-by: Abhijit Kulkarni --- .../devicetree/bindings/display/msm/sde.txt | 55 ++ drivers/gpu/drm/msm/Makefile | 17 + drivers/gpu/drm/msm/msm_drv.c | 21 +- drivers/gpu/drm/msm/msm_drv.h | 17 +- drivers/gpu/drm/msm/msm_kms.h | 1 + drivers/gpu/drm/msm/sde/sde_crtc.c | 148 +++++ drivers/gpu/drm/msm/sde/sde_encoder.c | 95 +++ drivers/gpu/drm/msm/sde/sde_hw_catalog.c | 37 ++ drivers/gpu/drm/msm/sde/sde_hw_catalog.h | 468 ++++++++++++++ drivers/gpu/drm/msm/sde/sde_hw_catalog_8996.c | 295 +++++++++ drivers/gpu/drm/msm/sde/sde_hw_cdm.c | 296 +++++++++ drivers/gpu/drm/msm/sde/sde_hw_cdm.h | 115 ++++ drivers/gpu/drm/msm/sde/sde_hw_dspp.c | 105 ++++ drivers/gpu/drm/msm/sde/sde_hw_dspp.h | 127 ++++ drivers/gpu/drm/msm/sde/sde_hw_intf.c | 373 +++++++++++ drivers/gpu/drm/msm/sde/sde_hw_intf.h | 103 +++ drivers/gpu/drm/msm/sde/sde_hw_lm.c | 192 ++++++ drivers/gpu/drm/msm/sde/sde_hw_lm.h | 96 +++ drivers/gpu/drm/msm/sde/sde_hw_mdp_ctl.c | 338 ++++++++++ drivers/gpu/drm/msm/sde/sde_hw_mdp_ctl.h | 99 +++ drivers/gpu/drm/msm/sde/sde_hw_mdp_hwio.h | 0 drivers/gpu/drm/msm/sde/sde_hw_mdp_util.c | 73 +++ drivers/gpu/drm/msm/sde/sde_hw_mdp_util.h | 56 ++ drivers/gpu/drm/msm/sde/sde_hw_mdss.h | 320 ++++++++++ drivers/gpu/drm/msm/sde/sde_hw_pingpong.c | 159 +++++ drivers/gpu/drm/msm/sde/sde_hw_pingpong.h | 115 ++++ drivers/gpu/drm/msm/sde/sde_hw_sspp.c | 591 ++++++++++++++++++ drivers/gpu/drm/msm/sde/sde_hw_sspp.h | 266 ++++++++ drivers/gpu/drm/msm/sde/sde_hw_wb.c | 120 ++++ drivers/gpu/drm/msm/sde/sde_hw_wb.h | 85 +++ drivers/gpu/drm/msm/sde/sde_hwio.h | 56 ++ drivers/gpu/drm/msm/sde/sde_irq.c | 116 ++++ drivers/gpu/drm/msm/sde/sde_kms.c | 306 +++++++++ drivers/gpu/drm/msm/sde/sde_kms.h | 116 ++++ drivers/gpu/drm/msm/sde/sde_mdp_formats.h | 213 +++++++ drivers/gpu/drm/msm/sde/sde_plane.c | 115 ++++ 36 files changed, 5694 insertions(+), 11 deletions(-) create mode 100644 Documentation/devicetree/bindings/display/msm/sde.txt create mode 100644 drivers/gpu/drm/msm/sde/sde_crtc.c create mode 100644 drivers/gpu/drm/msm/sde/sde_encoder.c create mode 100644 drivers/gpu/drm/msm/sde/sde_hw_catalog.c create mode 100644 drivers/gpu/drm/msm/sde/sde_hw_catalog.h create mode 100644 drivers/gpu/drm/msm/sde/sde_hw_catalog_8996.c create mode 100644 drivers/gpu/drm/msm/sde/sde_hw_cdm.c create mode 100644 drivers/gpu/drm/msm/sde/sde_hw_cdm.h create mode 100644 drivers/gpu/drm/msm/sde/sde_hw_dspp.c create mode 100644 drivers/gpu/drm/msm/sde/sde_hw_dspp.h create mode 100644 drivers/gpu/drm/msm/sde/sde_hw_intf.c create mode 100644 drivers/gpu/drm/msm/sde/sde_hw_intf.h create mode 100644 drivers/gpu/drm/msm/sde/sde_hw_lm.c create mode 100644 drivers/gpu/drm/msm/sde/sde_hw_lm.h create mode 100644 drivers/gpu/drm/msm/sde/sde_hw_mdp_ctl.c create mode 100644 drivers/gpu/drm/msm/sde/sde_hw_mdp_ctl.h create mode 100644 drivers/gpu/drm/msm/sde/sde_hw_mdp_hwio.h create mode 100644 drivers/gpu/drm/msm/sde/sde_hw_mdp_util.c create mode 100644 drivers/gpu/drm/msm/sde/sde_hw_mdp_util.h create mode 100644 drivers/gpu/drm/msm/sde/sde_hw_mdss.h create mode 100644 drivers/gpu/drm/msm/sde/sde_hw_pingpong.c create mode 100644 drivers/gpu/drm/msm/sde/sde_hw_pingpong.h create mode 100644 drivers/gpu/drm/msm/sde/sde_hw_sspp.c create mode 100644 drivers/gpu/drm/msm/sde/sde_hw_sspp.h create mode 100644 drivers/gpu/drm/msm/sde/sde_hw_wb.c create mode 100644 drivers/gpu/drm/msm/sde/sde_hw_wb.h create mode 100644 drivers/gpu/drm/msm/sde/sde_hwio.h create mode 100644 drivers/gpu/drm/msm/sde/sde_irq.c create mode 100644 drivers/gpu/drm/msm/sde/sde_kms.c create mode 100644 drivers/gpu/drm/msm/sde/sde_kms.h create mode 100644 drivers/gpu/drm/msm/sde/sde_mdp_formats.h create mode 100644 drivers/gpu/drm/msm/sde/sde_plane.c diff --git a/Documentation/devicetree/bindings/display/msm/sde.txt b/Documentation/devicetree/bindings/display/msm/sde.txt new file mode 100644 index 000000000000..8ec9f78346d8 --- /dev/null +++ b/Documentation/devicetree/bindings/display/msm/sde.txt @@ -0,0 +1,55 @@ +Qualcomm Technologies, Inc. SDE KMS + +Snapdragon Display Engine implements Linux DRM/KMS APIs to drive user +interface to different panel interfaces. SDE driver is the core of +display subsystem which manage all data paths to different panel interfaces. + +Required properties +- compatible: Must be "qcom,sde-kms" +- reg: Offset and length of the register set for the device. +- reg-names : Names to refer to register sets related to this device +- clocks: List of Phandles for clock device nodes + needed by the device. +- clock-names: List of clock names needed by the device. +- mmagic-supply: Phandle for mmagic mdss supply regulator device node. +- vdd-supply: Phandle for vdd regulator device node. +- interrupt-parent: Must be core interrupt controller. +- interrupts: Interrupt associated with MDSS. +- interrupt-controller: Mark the device node as an interrupt controller. +- #interrupt-cells: Should be one. The first cell is interrupt number. +- iommus: Specifies the SID's used by this context bank. + +Please refer to ../../interrupt-controller/interrupts.txt for a general +description of interrupt bindings. + +Example: + mdss_mdp: qcom,mdss_mdp@900000 { + compatible = "qcom,sde-kms"; + reg = <0x00900000 0x90000>, + <0x009b0000 0x1040>, + <0x009b8000 0x1040>; + reg-names = "mdp_phys", + "vbif_phys", + "vbif_nrt_phys"; + clocks = <&clock_mmss clk_mdss_ahb_clk>, + <&clock_mmss clk_mdss_axi_clk>, + <&clock_mmss clk_mdp_clk_src>, + <&clock_mmss clk_mdss_mdp_vote_clk>, + <&clock_mmss clk_smmu_mdp_axi_clk>, + <&clock_mmss clk_mmagic_mdss_axi_clk>, + <&clock_mmss clk_mdss_vsync_clk>; + clock-names = "iface_clk", + "bus_clk", + "core_clk_src", + "core_clk", + "iommu_clk", + "mmagic_clk", + "vsync_clk"; + mmagic-supply = <&gdsc_mmagic_mdss>; + vdd-supply = <&gdsc_mdss>; + interrupt-parent = <&intc>; + interrupts = <0 83 0>; + interrupt-controller; + #interrupt-cells = <1>; + iommus = <&mdp_smmu 0>; + }; diff --git a/drivers/gpu/drm/msm/Makefile b/drivers/gpu/drm/msm/Makefile index 1c90290be716..f8984d673ff0 100644 --- a/drivers/gpu/drm/msm/Makefile +++ b/drivers/gpu/drm/msm/Makefile @@ -38,6 +38,11 @@ msm-y := \ mdp/mdp5/mdp5_kms.o \ mdp/mdp5/mdp5_plane.o \ mdp/mdp5/mdp5_smp.o \ + sde/sde_crtc.o \ + sde/sde_encoder.o \ + sde/sde_irq.o \ + sde/sde_kms.o \ + sde/sde_plane.o \ msm_atomic.o \ msm_drv.o \ msm_fb.o \ @@ -69,3 +74,15 @@ msm-$(CONFIG_DRM_MSM_DSI_28NM_PHY) += dsi/pll/dsi_pll_28nm.o endif obj-$(CONFIG_DRM_MSM) += msm.o + +obj-$(CONFIG_DRM_MSM) += sde/sde_hw_catalog.o \ + sde/sde_hw_catalog_8996.o \ + sde/sde_hw_cdm.o \ + sde/sde_hw_dspp.o \ + sde/sde_hw_intf.o \ + sde/sde_hw_lm.o \ + sde/sde_hw_mdp_ctl.o \ + sde/sde_hw_mdp_util.o \ + sde/sde_hw_sspp.o \ + sde/sde_hw_wb.o \ + sde/sde_hw_pingpong.o diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c index b88ce514eb8e..67c4518e22e1 100644 --- a/drivers/gpu/drm/msm/msm_drv.c +++ b/drivers/gpu/drm/msm/msm_drv.c @@ -235,13 +235,20 @@ static int msm_unload(struct drm_device *dev) return 0; } +#define KMS_MDP4 0 +#define KMS_MDP5 1 +#define KMS_SDE 2 + static int get_mdp_ver(struct platform_device *pdev) { #ifdef CONFIG_OF static const struct of_device_id match_types[] = { { .compatible = "qcom,mdss_mdp", - .data = (void *)5, - }, { + .data = (void *)KMS_MDP5, + }, + { + .compatible = "qcom,sde-kms", + .data = (void *)KMS_SDE, /* end node */ } }; struct device *dev = &pdev->dev; @@ -250,7 +257,7 @@ static int get_mdp_ver(struct platform_device *pdev) if (match) return (int)(unsigned long)match->data; #endif - return 4; + return KMS_MDP4; } #include @@ -369,12 +376,15 @@ static int msm_load(struct drm_device *dev, unsigned long flags) goto fail; switch (get_mdp_ver(pdev)) { - case 4: + case KMS_MDP4: kms = mdp4_kms_init(dev); break; - case 5: + case KMS_MDP5: kms = mdp5_kms_init(dev); break; + case KMS_SDE: + kms = sde_kms_init(dev); + break; default: kms = ERR_PTR(-ENODEV); break; @@ -1140,6 +1150,7 @@ static const struct platform_device_id msm_id[] = { static const struct of_device_id dt_match[] = { { .compatible = "qcom,mdp" }, /* mdp4 */ { .compatible = "qcom,mdss_mdp" }, /* mdp5 */ + { .compatible = "qcom,sde-kms" }, /* sde */ {} }; MODULE_DEVICE_TABLE(of, dt_match); diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h index 3be7a56b14f1..e4ebc0fa2f51 100644 --- a/drivers/gpu/drm/msm/msm_drv.h +++ b/drivers/gpu/drm/msm/msm_drv.h @@ -55,7 +55,12 @@ struct msm_rd_state; struct msm_perf_state; struct msm_gem_submit; -#define NUM_DOMAINS 2 /* one for KMS, then one per gpu core (?) */ +#define NUM_DOMAINS 2 /* one for KMS, then one per gpu core (?) */ +#define MAX_CRTCS 8 +#define MAX_PLANES 12 +#define MAX_ENCODERS 8 +#define MAX_BRIDGES 8 +#define MAX_CONNECTORS 8 struct msm_file_private { /* currently we don't do anything useful with this.. but when @@ -128,19 +133,19 @@ struct msm_drm_private { struct msm_mmu *mmus[NUM_DOMAINS]; unsigned int num_planes; - struct drm_plane *planes[8]; + struct drm_plane *planes[MAX_PLANES]; unsigned int num_crtcs; - struct drm_crtc *crtcs[8]; + struct drm_crtc *crtcs[MAX_CRTCS]; unsigned int num_encoders; - struct drm_encoder *encoders[8]; + struct drm_encoder *encoders[MAX_ENCODERS]; unsigned int num_bridges; - struct drm_bridge *bridges[8]; + struct drm_bridge *bridges[MAX_BRIDGES]; unsigned int num_connectors; - struct drm_connector *connectors[8]; + struct drm_connector *connectors[MAX_CONNECTORS]; /* Properties */ struct drm_property *plane_property[PLANE_PROP_MAX_NUM]; diff --git a/drivers/gpu/drm/msm/msm_kms.h b/drivers/gpu/drm/msm/msm_kms.h index 9bcabaada179..f2e1a4fb9fae 100644 --- a/drivers/gpu/drm/msm/msm_kms.h +++ b/drivers/gpu/drm/msm/msm_kms.h @@ -76,5 +76,6 @@ static inline void msm_kms_init(struct msm_kms *kms, struct msm_kms *mdp4_kms_init(struct drm_device *dev); struct msm_kms *mdp5_kms_init(struct drm_device *dev); +struct msm_kms *sde_kms_init(struct drm_device *dev); #endif /* __MSM_KMS_H__ */ diff --git a/drivers/gpu/drm/msm/sde/sde_crtc.c b/drivers/gpu/drm/msm/sde/sde_crtc.c new file mode 100644 index 000000000000..4812a5fa51b7 --- /dev/null +++ b/drivers/gpu/drm/msm/sde/sde_crtc.c @@ -0,0 +1,148 @@ +/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include + +#include "sde_kms.h" +#include "sde_hw_lm.h" +#include "sde_hw_mdss.h" + +struct sde_crtc { + struct drm_crtc base; + char name[8]; + struct drm_plane *plane; + struct drm_plane *planes[8]; + int id; + bool enabled; + enum sde_lm mixer; + enum sde_ctl ctl_path; +}; + +#define to_sde_crtc(x) container_of(x, struct sde_crtc, base) + +static void sde_crtc_destroy(struct drm_crtc *crtc) +{ + struct sde_crtc *sde_crtc = to_sde_crtc(crtc); + + drm_crtc_cleanup(crtc); + kfree(sde_crtc); +} + +static void sde_crtc_dpms(struct drm_crtc *crtc, int mode) +{ +} + +static bool sde_crtc_mode_fixup(struct drm_crtc *crtc, + const struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + return true; +} + +static int sde_crtc_mode_set(struct drm_crtc *crtc, + struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode, + int x, int y, + struct drm_framebuffer *old_fb) +{ + return 0; +} + +static void sde_crtc_prepare(struct drm_crtc *crtc) +{ +} + +static void sde_crtc_commit(struct drm_crtc *crtc) +{ +} + +static int sde_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y, + struct drm_framebuffer *old_fb) +{ + return 0; +} + +static void sde_crtc_load_lut(struct drm_crtc *crtc) +{ +} + +static int sde_crtc_page_flip(struct drm_crtc *crtc, + struct drm_framebuffer *new_fb, + struct drm_pending_vblank_event *event, + uint32_t page_flip_flags) +{ + return 0; +} + +static int sde_crtc_set_property(struct drm_crtc *crtc, + struct drm_property *property, uint64_t val) +{ + return -EINVAL; +} + +static const struct drm_crtc_funcs sde_crtc_funcs = { + .set_config = drm_crtc_helper_set_config, + .destroy = sde_crtc_destroy, + .page_flip = sde_crtc_page_flip, + .set_property = sde_crtc_set_property, +}; + +static const struct drm_crtc_helper_funcs sde_crtc_helper_funcs = { + .dpms = sde_crtc_dpms, + .mode_fixup = sde_crtc_mode_fixup, + .mode_set = sde_crtc_mode_set, + .prepare = sde_crtc_prepare, + .commit = sde_crtc_commit, + .mode_set_base = sde_crtc_mode_set_base, + .load_lut = sde_crtc_load_lut, +}; + +uint32_t sde_crtc_vblank(struct drm_crtc *crtc) +{ + return 0; +} + +void sde_crtc_cancel_pending_flip(struct drm_crtc *crtc, struct drm_file *file) +{ +} + +void sde_crtc_attach(struct drm_crtc *crtc, struct drm_plane *plane) +{ +} + +void sde_crtc_detach(struct drm_crtc *crtc, struct drm_plane *plane) +{ +} + +struct drm_crtc *sde_crtc_init(struct drm_device *dev, + struct drm_encoder *encoder, + struct drm_plane *plane, int id) +{ + struct drm_crtc *crtc = NULL; + struct sde_crtc *sde_crtc; + + sde_crtc = kzalloc(sizeof(*sde_crtc), GFP_KERNEL); + if (!sde_crtc) + return ERR_PTR(-ENOMEM); + + crtc = &sde_crtc->base; + + sde_crtc->id = id; + + /* find out if we need one or two lms */ + + drm_crtc_helper_add(crtc, &sde_crtc_helper_funcs); + return crtc; +} diff --git a/drivers/gpu/drm/msm/sde/sde_encoder.c b/drivers/gpu/drm/msm/sde/sde_encoder.c new file mode 100644 index 000000000000..3c28e319c580 --- /dev/null +++ b/drivers/gpu/drm/msm/sde/sde_encoder.c @@ -0,0 +1,95 @@ +/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include "sde_kms.h" +#include "drm_crtc.h" +#include "drm_crtc_helper.h" + +struct sde_encoder { + struct drm_encoder base; + int intf; +}; +#define to_sde_encoder(x) container_of(x, struct sde_encoder, base) + +static void sde_encoder_destroy(struct drm_encoder *encoder) +{ + struct sde_encoder *sde_encoder = to_sde_encoder(encoder); + + drm_encoder_cleanup(encoder); + kfree(sde_encoder); +} + +static const struct drm_encoder_funcs sde_encoder_funcs = { + .destroy = sde_encoder_destroy, +}; + +static void sde_encoder_dpms(struct drm_encoder *encoder, int mode) +{ +} + +static bool sde_encoder_mode_fixup(struct drm_encoder *encoder, + const struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + return true; +} + +static void sde_encoder_mode_set(struct drm_encoder *encoder, + struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ +} + +static void sde_encoder_prepare(struct drm_encoder *encoder) +{ +} + +static void sde_encoder_commit(struct drm_encoder *encoder) +{ +} + +static const struct drm_encoder_helper_funcs sde_encoder_helper_funcs = { + .dpms = sde_encoder_dpms, + .mode_fixup = sde_encoder_mode_fixup, + .mode_set = sde_encoder_mode_set, + .prepare = sde_encoder_prepare, + .commit = sde_encoder_commit, +}; + +/* initialize encoder */ +struct drm_encoder *sde_encoder_init(struct drm_device *dev, int intf) +{ + struct drm_encoder *encoder = NULL; + struct sde_encoder *sde_encoder; + int ret; + + sde_encoder = kzalloc(sizeof(*sde_encoder), GFP_KERNEL); + if (!sde_encoder) { + ret = -ENOMEM; + goto fail; + } + + sde_encoder->intf = intf; + encoder = &sde_encoder->base; + + drm_encoder_init(dev, encoder, &sde_encoder_funcs, + DRM_MODE_ENCODER_TMDS); + drm_encoder_helper_add(encoder, &sde_encoder_helper_funcs); + + return encoder; + +fail: + if (encoder) + sde_encoder_destroy(encoder); + + return ERR_PTR(ret); +} diff --git a/drivers/gpu/drm/msm/sde/sde_hw_catalog.c b/drivers/gpu/drm/msm/sde/sde_hw_catalog.c new file mode 100644 index 000000000000..97faec3ed3c2 --- /dev/null +++ b/drivers/gpu/drm/msm/sde/sde_hw_catalog.c @@ -0,0 +1,37 @@ +/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include "sde_hw_catalog.h" + +struct sde_mdss_hw_cfg_handler cfg_table[] = { + { .major = 1, .minor = 7, .cfg_init = sde_mdss_cfg_170_init}, +}; + +/** + * sde_hw_catalog_init: Returns the catalog information for the + * passed HW version + * @major: Major version of the MDSS HW + * @minor: Minor version + * @step: step version + */ +struct sde_mdss_cfg *sde_hw_catalog_init(u32 major, u32 minor, u32 step) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(cfg_table); i++) { + if ((cfg_table[i].major == major) && + (cfg_table[i].minor == minor)) + return cfg_table[i].cfg_init(step); + } + + return ERR_PTR(-ENODEV); +} diff --git a/drivers/gpu/drm/msm/sde/sde_hw_catalog.h b/drivers/gpu/drm/msm/sde/sde_hw_catalog.h new file mode 100644 index 000000000000..0d3c536cef26 --- /dev/null +++ b/drivers/gpu/drm/msm/sde/sde_hw_catalog.h @@ -0,0 +1,468 @@ +/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _SDE_HW_CATALOG_H +#define _SDE_HW_CATALOG_H + +#include +#include +#include +#include + +#define MAX_BLOCKS 8 +#define MAX_LAYERS 12 + +#define SDE_HW_VER(MAJOR, MINOR, STEP) (((MAJOR & 0xF) << 28) |\ + ((MINOR & 0xFFF) << 16) |\ + (STEP & 0xFFFF)) + +#define SDE_HW_MAJOR(rev) ((rev) >> 28) +#define SDE_HW_MINOR(rev) .(((rev) >> 16) & 0xFFF) +#define SDE_HW_STEP(rev) ((rev) & 0xFFFF) +#define SDE_HW_MAJOR_MINOR(rev) ((rev) >> 16) + +#define IS_SDE_MAJOR_MINOR_SAME(rev1, rev2) \ + (SDE_HW_MAJOR_MINOR((rev1)) == SDE_HW_MAJOR_MINOR((rev2))) + +#define SDE_HW_VER_170 SDE_HW_VER(1, 7, 0) /* 8996 v1.0 */ +#define SDE_HW_VER_171 SDE_HW_VER(1, 7, 1) /* 8996 v2.0 */ +#define SDE_HW_VER_172 SDE_HW_VER(1, 7, 2) /* 8996 v3.0 */ +#define SDE_HW_VER_300 SDE_HW_VER(3, 0, 0) /* cobalt v1.0 */ + +/** + * MDP TOP BLOCK features + * @SDE_MDP_PANIC_PER_PIPE Panic configuration needs to be be done per pipe + * @SDE_MDP_10BIT_SUPPORT, Chipset supports 10 bit pixel formats + * @SDE_MDP_BWC, MDSS HW supports Bandwidth compression. + * @SDE_MDP_UBWC_1_0, This chipsets supports Universal Bandwidth + * compression initial revision + * @SDE_MDP_UBWC_1_5, Universal Bandwidth compression version 1.5 + * @SDE_MDP_CDP, Client driven prefetch + * @SDE_MDP_MAX Maximum value + + */ +enum { + SDE_MDP_PANIC_PER_PIPE = 0x1, + SDE_MDP_10BIT_SUPPORT, + SDE_MDP_BWC, + SDE_MDP_UBWC_1_0, + SDE_MDP_UBWC_1_5, + SDE_MDP_CDP, + SDE_MDP_MAX +}; + +/** + * SSPP sub-blocks/features + * @SDE_SSPP_SRC Src and fetch part of the pipes, + * @SDE_SSPP_SCALAR_QSEED2, QSEED2 algorithm support + * @SDE_SSPP_SCALAR_QSEED3, QSEED3 algorithm support + * @SDE_SSPP_SCALAR_RGB, RGB Scalar, supported by RGB pipes + * @SDE_SSPP_CSC, Support of Color space conversion + * @SDE_SSPP_PA_V1, Common op-mode register for PA blocks + * @SDE_SSPP_HIST_V1 Histogram programming method V1 + * @SDE_SSPP_IGC, Inverse gamma correction + * @SDE_SSPP_PCC, Color correction support + * @SDE_SSPP_CURSOR, SSPP can be used as a cursor layer + * @SDE_SSPP_MAX maximum value + */ +enum { + SDE_SSPP_SRC = 0x1, + SDE_SSPP_SCALAR_QSEED2, + SDE_SSPP_SCALAR_QSEED3, + SDE_SSPP_SCALAR_RGB, + SDE_SSPP_CSC, + SDE_SSPP_PA_V1, /* Common op-mode register for PA blocks */ + SDE_SSPP_HIST_V1, + SDE_SSPP_IGC, + SDE_SSPP_PCC, + SDE_SSPP_CURSOR, + SDE_SSPP_MAX +}; + +/* + * MIXER sub-blocks/features + * @SDE_MIXER_LAYER Layer mixer layer blend configuration, + * @SDE_MIXER_SOURCESPLIT Layer mixer supports source-split configuration + * @SDE_MIXER_GC Gamma correction block + * @SDE_MIXER_MAX maximum value + */ +enum { + SDE_MIXER_LAYER = 0x1, + SDE_MIXER_SOURCESPLIT, + SDE_MIXER_GC, + SDE_MIXER_MAX +}; + +/** + * DSPP sub-blocks + * @SDE_DSPP_IGC DSPP Inverse gamma correction block + * @SDE_DSPP_PCC Panel color correction block + * @SDE_DSPP_GC Gamma correction block + * @SDE_DSPP_PA Picture adjustment block + * @SDE_DSPP_GAMUT Gamut bloc + * @SDE_DSPP_DITHER Dither block + * @SDE_DSPP_HIST Histogram bloc + * @SDE_DSPP_MAX maximum value + */ +enum { + SDE_DSPP_IGC = 0x1, + SDE_DSPP_PCC, + SDE_DSPP_GC, + SDE_DSPP_PA, + SDE_DSPP_GAMUT, + SDE_DSPP_DITHER, + SDE_DSPP_HIST, + SDE_DSPP_MAX +}; + +/** + * PINGPONG sub-blocks + * @SDE_PINGPONG_TE Tear check block + * @SDE_PINGPONG_TE2 Additional tear check block for split pipes + * @SDE_PINGPONG_SPLIT PP block supports split fifo + * @SDE_PINGPONG_DSC, Display stream compression blocks + * @SDE_PINGPONG_MAX + */ +enum { + SDE_PINGPONG_TE = 0x1, + SDE_PINGPONG_TE2, + SDE_PINGPONG_SPLIT, + SDE_PINGPONG_DSC, + SDE_PINGPONG_MAX +}; + +/** + * WB sub-blocks and features + * @SDE_WB_LINE_MODE Writeback module supports line/linear mode + * @SDE_WB_BLOCK_MODE Writeback module supports block mode read + * @SDE_WB_ROTATE rotation support,this is available if writeback + * supports block mode read + * @SDE_WB_CSC Writeback color conversion block support + * @SDE_WB_CHROMA_DOWN, Writeback chroma down block, + * @SDE_WB_DOWNSCALE, Writeback integer downscaler, + * @SDE_WB_DITHER, Dither block + * @SDE_WB_TRAFFIC_SHAPER, Writeback traffic shaper bloc + * @SDE_WB_UBWC_1_0, Writeback Universal bandwidth compression 1.0 + * support + * @SDE_WB_WBWC_1_5 UBWC 1.5 support + * @SDE_WB_MAX maximum value + */ +enum { + SDE_WB_LINE_MODE = 0x1, + SDE_WB_BLOCK_MODE, + SDE_WB_ROTATE = SDE_WB_BLOCK_MODE, + SDE_WB_CSC, + SDE_WB_CHROMA_DOWN, + SDE_WB_DOWNSCALE, + SDE_WB_DITHER, + SDE_WB_TRAFFIC_SHAPER, + SDE_WB_UBWC_1_0, + SDE_WB_MAX +}; + +/** + * MACRO SDE_HW_BLK_INFO - information of HW blocks inside SDE + * @id: enum identifying this block + * @base: register base offset to mdss + * @features bit mask identifying sub-blocks/features + */ +#define SDE_HW_BLK_INFO \ + u32 id; \ + u32 base; \ + unsigned long features + +/** + * MACRO SDE_HW_SUBBLK_INFO - information of HW sub-block inside SDE + * @id: enum identifying this sub-block + * @base: offset of this sub-block relative to the block + * offset + * @len register block length of this sub-block + */ +#define SDE_HW_SUBBLK_INFO \ + u32 id; \ + u32 base; \ + u32 len + +/** + * struct sde_src_blk: SSPP part of the source pipes + * @info: HW register and features supported by this sub-blk + */ +struct sde_src_blk { + SDE_HW_SUBBLK_INFO; +}; + +/** + * struct sde_scalar_info: Scalar information + * @info: HW register and features supported by this sub-blk + */ +struct sde_scalar_blk { + SDE_HW_SUBBLK_INFO; +}; + +struct sde_csc_blk { + SDE_HW_SUBBLK_INFO; +}; + +/** + * struct sde_pp_blk : Pixel processing sub-blk information + * @info: HW register and features supported by this sub-blk + * @version: HW Algorithm version + */ +struct sde_pp_blk { + SDE_HW_SUBBLK_INFO; + u32 version; +}; + +/** + * struct sde_sspp_sub_blks : SSPP sub-blocks + * @maxdwnscale: max downscale ratio supported(without DECIMATION) + * @maxupscale: maxupscale ratio supported + * @maxwidth: max pixelwidth supported by this pipe + * @danger_lut: LUT to generate danger signals + * @safe_lut: LUT to generate safe signals + * @src_blk: + * @scalar_blk: + * @csc_blk: + * @pa_blk: + * @hist_lut: + * @pcc_blk: + */ +struct sde_sspp_sub_blks { + u32 maxlinewidth; + u32 danger_lut; + u32 safe_lut; + u32 maxdwnscale; + u32 maxupscale; + struct sde_src_blk src_blk; + struct sde_scalar_blk scalar_blk; + struct sde_pp_blk csc_blk; + struct sde_pp_blk pa_blk; + struct sde_pp_blk hist_lut; + struct sde_pp_blk pcc_blk; +}; + +/** + * struct sde_lm_sub_blks: information of mixer block + * @maxwidth: Max pixel width supported by this mixer + * @maxblendstages: Max number of blend-stages supported + * @blendstage_base: Blend-stage register base offset + */ +struct sde_lm_sub_blks { + u32 maxwidth; + u32 maxblendstages; + u32 blendstage_base[MAX_BLOCKS]; +}; + +struct sde_dspp_sub_blks { + struct sde_pp_blk igc; + struct sde_pp_blk pcc; + struct sde_pp_blk gc; + struct sde_pp_blk pa; + struct sde_pp_blk gamut; + struct sde_pp_blk dither; + struct sde_pp_blk hist; +}; + +struct sde_pingpong_sub_blks { + struct sde_pp_blk te; + struct sde_pp_blk te2; + struct sde_pp_blk dsc; +}; + +struct sde_wb_sub_blocks { + u32 maxlinewidth; +}; + +/* struct sde_mdp_cfg : MDP TOP-BLK instance info + * @id: index identifying this block + * @base: register base offset to mdss + * @features bit mask identifying sub-blocks/features + * @highest_bank_bit: UBWC parameter + */ +struct sde_mdp_cfg { + SDE_HW_BLK_INFO; + u32 highest_bank_bit; +}; + +/* struct sde_mdp_cfg : MDP TOP-BLK instance info + * @id: index identifying this block + * @base: register base offset to mdss + * @features bit mask identifying sub-blocks/features + */ +struct sde_ctl_cfg { + SDE_HW_BLK_INFO; +}; + +/** + * struct sde_sspp_cfg - information of source pipes + * @id: index identifying this block + * @base register offset of this block + * @features bit mask identifying sub-blocks/features + * @sblk: Sub-blocks of SSPP + */ +struct sde_sspp_cfg { + SDE_HW_BLK_INFO; + const struct sde_sspp_sub_blks *sblk; +}; + +/** + * struct sde_lm_cfg - information of layer mixer blocks + * @id: index identifying this block + * @base register offset of this block + * @features bit mask identifying sub-blocks/features + * @sblk: Sub-blocks of SSPP + */ +struct sde_lm_cfg { + SDE_HW_BLK_INFO; + const struct sde_lm_sub_blks *sblk; +}; + +/** + * struct sde_dspp_cfg - information of DSPP blocks + * @id enum identifying this block + * @base register offset of this block + * @features bit mask identifying sub-blocks/features + * supported by this block + * @sblk sub-blocks information + */ +struct sde_dspp_cfg { + SDE_HW_BLK_INFO; + const struct sde_dspp_sub_blks *sblk; +}; + +/** + * struct sde_pingpong_cfg - information of PING-PONG blocks + * @id enum identifying this block + * @base register offset of this block + * @features bit mask identifying sub-blocks/features + * @sblk sub-blocks information + */ +struct sde_pingpong_cfg { + SDE_HW_BLK_INFO; + const struct sde_pingpong_sub_blks *sblk; +}; + +/** + * struct sde_cdm_cfg - information of chroma down blocks + * @id enum identifying this block + * @base register offset of this block + * @features bit mask identifying sub-blocks/features + * @intf_connect Connects to which interfaces + * @wb_connect: Connects to which writebacks + */ +struct sde_cdm_cfg { + SDE_HW_BLK_INFO; + u32 intf_connect[MAX_BLOCKS]; + u32 wb_connect[MAX_BLOCKS]; +}; + +/** + * struct sde_intf_cfg - information of timing engine blocks + * @id enum identifying this block + * @base register offset of this block + * @features bit mask identifying sub-blocks/features + * @type: Interface type(DSI, DP, HDMI) + */ +struct sde_intf_cfg { + SDE_HW_BLK_INFO; + u32 type; /* interface type*/ +}; + +/** + * struct sde_wb_cfg - information of writeback blocks + * @id enum identifying this block + * @base register offset of this block + * @features bit mask identifying sub-blocks/features + */ +struct sde_wb_cfg { + SDE_HW_BLK_INFO; + struct sde_wb_sub_blocks *sblk; +}; + +/** + * struct sde_ad_cfg - information of Assertive Display blocks + * @id enum identifying this block + * @base register offset of this block + * @features bit mask identifying sub-blocks/features + */ +struct sde_ad_cfg { + SDE_HW_BLK_INFO; +}; + +/** + * struct sde_mdss_cfg - information of MDSS HW + * This is the main catalog data structure representing + * this HW version. Contains number of instances, + * register offsets, capabilities of the all MDSS HW sub-blocks. + */ +struct sde_mdss_cfg { + u32 hwversion; + + u32 mdp_count; + struct sde_mdp_cfg mdp[MAX_BLOCKS]; + + u32 ctl_count; + struct sde_ctl_cfg ctl[MAX_BLOCKS]; + + u32 sspp_count; + struct sde_sspp_cfg sspp[MAX_LAYERS]; + + u32 mixer_count; + struct sde_lm_cfg mixer[MAX_BLOCKS]; + + u32 dspp_count; + struct sde_dspp_cfg dspp[MAX_BLOCKS]; + + u32 pingpong_count; + struct sde_pingpong_cfg pingpong[MAX_BLOCKS]; + + u32 cdm_count; + struct sde_cdm_cfg cdm[MAX_BLOCKS]; + + u32 intf_count; + struct sde_intf_cfg intf[MAX_BLOCKS]; + + u32 wb_count; + struct sde_wb_cfg wb[MAX_BLOCKS]; + + u32 ad_count; + struct sde_ad_cfg ad[MAX_BLOCKS]; + /* Add additional block data structures here */ +}; + +struct sde_mdss_hw_cfg_handler { + u32 major; + u32 minor; + struct sde_mdss_cfg* (*cfg_init)(u32); +}; + +/* + * Access Macros + */ +#define BLK_MDP(s) ((s)->mdp) +#define BLK_CTL(s) ((s)->ctl) +#define BLK_VIG(s) ((s)->vig) +#define BLK_RGB(s) ((s)->rgb) +#define BLK_DMA(s) ((s)->dma) +#define BLK_CURSOR(s) ((s)->cursor) +#define BLK_MIXER(s) ((s)->mixer) +#define BLK_DSPP(s) ((s)->dspp) +#define BLK_PINGPONG(s) ((s)->pingpong) +#define BLK_CDM(s) ((s)->cdm) +#define BLK_INTF(s) ((s)->intf) +#define BLK_WB(s) ((s)->wb) +#define BLK_AD(s) ((s)->ad) + +struct sde_mdss_cfg *sde_mdss_cfg_170_init(u32 step); +struct sde_mdss_cfg *sde_hw_catalog_init(u32 major, u32 minor, u32 step); + +#endif /* _SDE_HW_CATALOG_H */ diff --git a/drivers/gpu/drm/msm/sde/sde_hw_catalog_8996.c b/drivers/gpu/drm/msm/sde/sde_hw_catalog_8996.c new file mode 100644 index 000000000000..68782de943c1 --- /dev/null +++ b/drivers/gpu/drm/msm/sde/sde_hw_catalog_8996.c @@ -0,0 +1,295 @@ +/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include "sde_hw_catalog.h" +#include "sde_hw_mdss.h" +#include "sde_hwio.h" + +/* VIG layer capability */ +#define VIG_17X_MASK \ + (BIT(SDE_SSPP_SRC) | BIT(SDE_SSPP_SCALAR_QSEED2) |\ + BIT(SDE_SSPP_CSC) | BIT(SDE_SSPP_PA_V1) |\ + BIT(SDE_SSPP_HIST_V1) | BIT(SDE_SSPP_PCC) |\ + BIT(SDE_SSPP_IGC)) + +/* RGB layer capability */ +#define RGB_17X_MASK \ + (BIT(SDE_SSPP_SRC) | BIT(SDE_SSPP_SCALAR_RGB) |\ + BIT(SDE_SSPP_PCC) | BIT(SDE_SSPP_IGC)) + +/* DMA layer capability */ +#define DMA_17X_MASK \ + (BIT(SDE_SSPP_SRC) | BIT(SDE_SSPP_PA_V1) |\ + BIT(SDE_SSPP_PCC) | BIT(SDE_SSPP_IGC)) + +/* Cursor layer capability */ +#define CURSOR_17X_MASK (BIT(SDE_SSPP_SRC) | BIT(SDE_SSPP_CURSOR)) + +#define MIXER_17X_MASK (BIT(SDE_MIXER_SOURCESPLIT) |\ + BIT(SDE_MIXER_GC)) + +#define DSPP_17X_MASK \ + (BIT(SDE_DSPP_IGC) | BIT(SDE_DSPP_PCC) |\ + BIT(SDE_DSPP_GC) | BIT(SDE_DSPP_PA) | BIT(SDE_DSPP_GAMUT) |\ + BIT(SDE_DSPP_DITHER) | BIT(SDE_DSPP_HIST)) + +#define PINGPONG_17X_MASK \ + (BIT(SDE_PINGPONG_TE) | BIT(SDE_PINGPONG_DSC)) + +#define PINGPONG_17X_SPLIT_MASK \ + (PINGPONG_17X_MASK | BIT(SDE_PINGPONG_SPLIT) |\ + BIT(SDE_PINGPONG_TE2)) + +#define WB01_17X_MASK \ + (BIT(SDE_WB_LINE_MODE) | BIT(SDE_WB_BLOCK_MODE) |\ + BIT(SDE_WB_CSC) | BIT(SDE_WB_CHROMA_DOWN) | BIT(SDE_WB_DOWNSCALE) |\ + BIT(SDE_WB_DITHER) | BIT(SDE_WB_TRAFFIC_SHAPER) |\ + BIT(SDE_WB_UBWC_1_0)) + +#define WB2_17X_MASK \ + (BIT(SDE_WB_LINE_MODE) | BIT(SDE_WB_TRAFFIC_SHAPER)) + +/** + * set_cfg_1xx_init(): populate sde sub-blocks reg offsets and instance counts + */ +static inline int set_cfg_1xx_init(struct sde_mdss_cfg *cfg) +{ + + /* Layer capability */ + static const struct sde_sspp_sub_blks layer = { + .maxlinewidth = 2560, + .danger_lut = 0xFFFF, + .safe_lut = 0xFF00, + .maxdwnscale = 4, .maxupscale = 20, + .src_blk = {.id = SDE_SSPP_SRC, + .base = 0x00, .len = 0x150,}, + .scalar_blk = {.id = SDE_SSPP_SCALAR_QSEED2, + .base = 0x200, .len = 0x70,}, + .csc_blk = {.id = SDE_SSPP_CSC, + .base = 0x320, .len = 0x44,}, + .pa_blk = {.id = SDE_SSPP_PA_V1, + .base = 0x200, .len = 0x0,}, + .hist_lut = {.id = SDE_SSPP_HIST_V1, + .base = 0xA00, .len = 0x400,}, + .pcc_blk = {.id = SDE_SSPP_PCC, + .base = 0x1780, .len = 0x64,}, + }; + + static const struct sde_sspp_sub_blks dma = { + .maxlinewidth = 2560, + .danger_lut = 0xFFFF, + .safe_lut = 0xFF00, + .maxdwnscale = 0, .maxupscale = 0, + .src_blk = {.id = SDE_SSPP_SRC, .base = 0x00, .len = 0x0,}, + .scalar_blk = {.id = 0, .base = 0x00, .len = 0x0,}, + .csc_blk = {.id = 0, .base = 0x00, .len = 0x0,}, + .pa_blk = {.id = 0, .base = 0x00, .len = 0x0,}, + .hist_lut = {.id = 0, .base = 0x00, .len = 0x0,}, + .pcc_blk = {.id = SDE_SSPP_PCC, .base = 0x01780, .len = 0x64,}, + }; + + static const struct sde_sspp_sub_blks cursor = { + .maxlinewidth = 128, + .danger_lut = 0xFFFF, + .safe_lut = 0xFF00, + .maxdwnscale = 0, .maxupscale = 0, + .src_blk = {.id = SDE_SSPP_SRC, .base = 0x00, .len = 0x0,}, + .scalar_blk = {.id = 0, .base = 0x00, .len = 0x0,}, + .csc_blk = {.id = 0, .base = 0x00, .len = 0x0,}, + .pa_blk = {.id = 0, .base = 0x00, .len = 0x0,}, + .hist_lut = {.id = 0, .base = 0x00, .len = 0x0,}, + .pcc_blk = {.id = 0, .base = 0x00, .len = 0x0,}, + }; + + /* MIXER capability */ + static const struct sde_lm_sub_blks lm = { + .maxwidth = 2560, + .maxblendstages = 7, /* excluding base layer */ + .blendstage_base = { /* offsets relative to mixer base */ + 0x20, 0x50, 0x80, 0xB0, 0x230, 0x260, 0x290 } + }; + + /* DSPP capability */ + static const struct sde_dspp_sub_blks pp = { + .igc = {.id = SDE_DSPP_GC, .base = 0x17c0, .len = 0x0, + .version = 0x1}, + .pcc = {.id = SDE_DSPP_PCC, .base = 0x00, .len = 0x0, + .version = 0x1}, + .gamut = {.id = SDE_DSPP_GAMUT, .base = 0x01600, .len = 0x0, + .version = 0x1}, + .dither = {.id = SDE_DSPP_DITHER, .base = 0x00, .len = 0x0, + .version = 0x1}, + .pa = {.id = SDE_DSPP_PA, .base = 0x00, .len = 0x0, + .version = 0x1}, + .hist = {.id = SDE_DSPP_HIST, .base = 0x00, .len = 0x0, + .version = 0x1}, + }; + + /* PINGPONG capability */ + static const struct sde_pingpong_sub_blks p_p = { + .te = {.id = SDE_PINGPONG_TE, .base = 0x0000, .len = 0x0, + .version = 0x1}, + .te2 = {.id = SDE_PINGPONG_TE2, .base = 0x2000, .len = 0x0, + .version = 0x1}, + .dsc = {.id = SDE_PINGPONG_DSC, .base = 0x10000, .len = 0x0, + .version = 0x1}, + }; + + /* Setup Register maps and defaults */ + *cfg = (struct sde_mdss_cfg){ + .mdp_count = 1, + .mdp = { + {.id = MDP_TOP, .base = 0x00001000, .features = 0, + .highest_bank_bit = 0x2}, + }, + .ctl_count = 5, + .ctl = { + {.id = CTL_0, .base = 0x00002000}, + {.id = CTL_1, .base = 0x00002200}, + {.id = CTL_2, .base = 0x00002400}, + {.id = CTL_3, .base = 0x00002600}, + {.id = CTL_4, .base = 0x00002800}, + }, + /* 4 VIG, + 4 RGB + 2 DMA + 2 CURSOR */ + .sspp_count = 12, + .sspp = { + {.id = SSPP_VIG0, .base = 0x00005000, + .features = VIG_17X_MASK, .sblk = &layer}, + {.id = SSPP_VIG1, .base = 0x00007000, + .features = VIG_17X_MASK, .sblk = &layer}, + {.id = SSPP_VIG2, .base = 0x00009000, + .features = VIG_17X_MASK, .sblk = &layer}, + {.id = SSPP_VIG3, .base = 0x0000b000, + .features = VIG_17X_MASK, .sblk = &layer}, + + {.id = SSPP_RGB0, .base = 0x00001500, + .features = RGB_17X_MASK, .sblk = &layer}, + {.id = SSPP_RGB1, .base = 0x00001700, + .features = RGB_17X_MASK, .sblk = &layer}, + {.id = SSPP_RGB2, .base = 0x00001900, + .features = RGB_17X_MASK, .sblk = &layer}, + {.id = SSPP_RGB3, .base = 0x00001B00, + .features = RGB_17X_MASK, .sblk = &layer}, + + {.id = SSPP_DMA0, .base = 0x00025000, + .features = DMA_17X_MASK, .sblk = &dma}, + {.id = SSPP_DMA1, .base = 0x00027000, + .features = DMA_17X_MASK, .sblk = &dma}, + + {.id = SSPP_CURSOR0, .base = 0x00035000, + .features = CURSOR_17X_MASK, .sblk = &cursor}, + {.id = SSPP_CURSOR1, .base = 0x00037000, + .features = CURSOR_17X_MASK, .sblk = &cursor}, + }, + .mixer_count = 6, + .mixer = { + {.id = LM_0, .base = 0x00045000, + .features = MIXER_17X_MASK, + .sblk = &lm}, + {.id = LM_1, .base = 0x00046000, + .features = MIXER_17X_MASK, + .sblk = &lm}, + {.id = LM_2, .base = 0x00047000, + .features = MIXER_17X_MASK, + .sblk = &lm}, + {.id = LM_3, .base = 0x00048000, + .features = MIXER_17X_MASK, + .sblk = &lm}, + {.id = LM_4, .base = 0x00049000, + .features = MIXER_17X_MASK, + .sblk = &lm}, + {.id = LM_5, .base = 0x0004a000, + .features = MIXER_17X_MASK, + .sblk = &lm}, + }, + .dspp_count = 2, + .dspp = { + {.id = DSPP_0, .base = 0x00055000, + .features = DSPP_17X_MASK, + .sblk = &pp}, + {.id = DSPP_1, .base = 0x00057000, + .features = DSPP_17X_MASK, + .sblk = &pp}, + }, + .pingpong_count = 4, + .pingpong = { + {.id = PINGPONG_0, .base = 0x00071000, + .features = PINGPONG_17X_SPLIT_MASK, + .sblk = &p_p}, + {.id = PINGPONG_1, .base = 0x00071800, + .features = PINGPONG_17X_SPLIT_MASK, + .sblk = &p_p}, + {.id = PINGPONG_2, .base = 0x00072000, + .features = PINGPONG_17X_MASK, + .sblk = &p_p}, + {.id = PINGPONG_3, .base = 0x00072800, + .features = PINGPONG_17X_MASK, + .sblk = &p_p}, + }, + .cdm_count = 1, + .cdm = { + {.id = CDM_0, .base = 0x0007A200, .features = 0, + .intf_connect = { BIT(INTF_3)}, + .wb_connect = { BIT(WB_2)},} + }, + .intf_count = 4, + .intf = { + {.id = INTF_0, .base = 0x0006B000, + .type = INTF_NONE}, + {.id = INTF_1, .base = 0x0006B800, + .type = INTF_DSI}, + {.id = INTF_2, .base = 0x0006C000, + .type = INTF_DSI}, + {.id = INTF_3, .base = 0x0006C800, + .type = INTF_HDMI}, + }, + .wb_count = 3, + .wb = { + {.id = WB_0, .base = 0x00065000, + .features = WB01_17X_MASK}, + {.id = WB_1, .base = 0x00065800, + .features = WB01_17X_MASK}, + {.id = WB_2, .base = 0x00066000, + .features = WB2_17X_MASK}, + }, + .ad_count = 2, + .ad = { + {.id = AD_0, .base = 0x00079000}, + {.id = AD_1, .base = 0x00079800}, + }, + }; + return 0; +} + +/** + * sde_mdp_cfg_170_init(): Populate the sde sub-blocks catalog information + */ +struct sde_mdss_cfg *sde_mdss_cfg_170_init(u32 step) +{ + struct sde_mdss_cfg *m = NULL; + + /* + * This function, for each sub-block sets, + * instance count, IO regions, + * default capabilities and this version capabilities, + * Additional catalog items + */ + + m = kzalloc(sizeof(*m), GFP_KERNEL); + if (!m) + return NULL; + + set_cfg_1xx_init(m); + m->hwversion = SDE_HW_VER(1, 7, step); + + return m; +} diff --git a/drivers/gpu/drm/msm/sde/sde_hw_cdm.c b/drivers/gpu/drm/msm/sde/sde_hw_cdm.c new file mode 100644 index 000000000000..9697553c6d51 --- /dev/null +++ b/drivers/gpu/drm/msm/sde/sde_hw_cdm.c @@ -0,0 +1,296 @@ +/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include "sde_hw_mdss.h" +#include "sde_hwio.h" +#include "sde_hw_catalog.h" +#include "sde_hw_cdm.h" + +#define CDM_CSC_10_OPMODE 0x000 +#define CDM_CSC_10_BASE 0x004 + +#define CDM_CDWN2_OP_MODE 0x100 +#define CDM_CDWN2_CLAMP_OUT 0x104 +#define CDM_CDWN2_PARAMS_3D_0 0x108 +#define CDM_CDWN2_PARAMS_3D_1 0x10C +#define CDM_CDWN2_COEFF_COSITE_H_0 0x110 +#define CDM_CDWN2_COEFF_COSITE_H_1 0x114 +#define CDM_CDWN2_COEFF_COSITE_H_2 0x118 +#define CDM_CDWN2_COEFF_OFFSITE_H_0 0x11C +#define CDM_CDWN2_COEFF_OFFSITE_H_1 0x120 +#define CDM_CDWN2_COEFF_OFFSITE_H_2 0x124 +#define CDM_CDWN2_COEFF_COSITE_V 0x128 +#define CDM_CDWN2_COEFF_OFFSITE_V 0x12C +#define CDM_CDWN2_OUT_SIZE 0x130 + +#define CDM_HDMI_PACK_OP_MODE 0x200 +#define CDM_CSC_10_MATRIX_COEFF_0 0x204 + +/** + * Horizontal coeffiecients for cosite chroma downscale + * s13 repesentation of coefficients + */ +static u32 cosite_h_coeff[] = {0x00000016, 0x000001cc, 0x0100009e}; + +/** + * Horizontal coefficients for offsite chroma downscale + */ +static u32 offsite_h_coeff[] = {0x000b0005, 0x01db01eb, 0x00e40046}; + +/** + * Vertical coefficients for cosite chroma downscale + */ +static u32 cosite_v_coeff[] = {0x00080004}; +/** + * Vertical coefficients for offsite chroma downscale + */ +static u32 offsite_v_coeff[] = {0x00060002}; + +/* Limited Range rgb2yuv coeff with clamp and bias values for CSC 10 module */ +static struct sde_csc_cfg rgb2yuv_cfg = { + { + 0x0083, 0x0102, 0x0032, + 0x1fb5, 0x1f6c, 0x00e1, + 0x00e1, 0x1f45, 0x1fdc + }, + { 0x00, 0x00, 0x00 }, + { 0x0040, 0x0200, 0x0200 }, + { 0x000, 0x3ff, 0x000, 0x3ff, 0x000, 0x3ff }, + { 0x040, 0x3ac, 0x040, 0x3c0, 0x040, 0x3c0 }, +}; + +static struct sde_cdm_cfg *_cdm_offset(enum sde_cdm cdm, + struct sde_mdss_cfg *m, + void __iomem *addr, + struct sde_hw_blk_reg_map *b) +{ + int i; + + for (i = 0; i < m->cdm_count; i++) { + if (cdm == m->cdm[i].id) { + b->base_off = addr; + b->blk_off = m->cdm[i].base; + b->hwversion = m->hwversion; + return &m->cdm[i]; + } + } + + return ERR_PTR(-EINVAL); +} + +static void sde_hw_cdm_setup_csc_10bit(struct sde_hw_cdm *ctx, + struct sde_csc_cfg *data) +{ + struct sde_hw_blk_reg_map *c = &ctx->hw; + + sde_hw_csc_setup(c, CDM_CSC_10_MATRIX_COEFF_0, data); +} + +int sde_hw_cdm_setup_cdwn(struct sde_hw_cdm *ctx, + struct sde_hw_cdm_cfg *cfg) +{ + struct sde_hw_blk_reg_map *c = &ctx->hw; + u32 opmode = 0; + u32 out_size = 0; + + if (cfg->output_bit_depth == CDM_CDWN_OUTPUT_10BIT) + opmode &= ~BIT(7); + else + opmode |= BIT(7); + + /* ENABLE DWNS_H bit */ + opmode |= BIT(1); + + switch (cfg->h_cdwn_type) { + case CDM_CDWN_DISABLE: + /* CLEAR METHOD_H field */ + opmode &= ~(0x18); + /* CLEAR DWNS_H bit */ + opmode &= ~BIT(1); + break; + case CDM_CDWN_PIXEL_DROP: + /* Clear METHOD_H field (pixel drop is 0) */ + opmode &= ~(0x18); + break; + case CDM_CDWN_AVG: + /* Clear METHOD_H field (Average is 0x1) */ + opmode &= ~(0x18); + opmode |= (0x1 << 0x3); + break; + case CDM_CDWN_COSITE: + /* Clear METHOD_H field (Average is 0x2) */ + opmode &= ~(0x18); + opmode |= (0x2 << 0x3); + /* Co-site horizontal coefficients */ + SDE_REG_WRITE(c, CDM_CDWN2_COEFF_COSITE_H_0, + cosite_h_coeff[0]); + SDE_REG_WRITE(c, CDM_CDWN2_COEFF_COSITE_H_1, + cosite_h_coeff[1]); + SDE_REG_WRITE(c, CDM_CDWN2_COEFF_COSITE_H_2, + cosite_h_coeff[2]); + break; + case CDM_CDWN_OFFSITE: + /* Clear METHOD_H field (Average is 0x3) */ + opmode &= ~(0x18); + opmode |= (0x3 << 0x3); + + /* Off-site horizontal coefficients */ + SDE_REG_WRITE(c, CDM_CDWN2_COEFF_OFFSITE_H_0, + offsite_h_coeff[0]); + SDE_REG_WRITE(c, CDM_CDWN2_COEFF_OFFSITE_H_1, + offsite_h_coeff[1]); + SDE_REG_WRITE(c, CDM_CDWN2_COEFF_OFFSITE_H_2, + offsite_h_coeff[2]); + break; + default: + pr_err("%s invalid horz down sampling type\n", __func__); + return -EINVAL; + } + + /* ENABLE DWNS_V bit */ + opmode |= BIT(2); + + switch (cfg->v_cdwn_type) { + case CDM_CDWN_DISABLE: + /* CLEAR METHOD_V field */ + opmode &= ~(0x60); + /* CLEAR DWNS_V bit */ + opmode &= ~BIT(2); + break; + case CDM_CDWN_PIXEL_DROP: + /* Clear METHOD_V field (pixel drop is 0) */ + opmode &= ~(0x60); + break; + case CDM_CDWN_AVG: + /* Clear METHOD_V field (Average is 0x1) */ + opmode &= ~(0x60); + opmode |= (0x1 << 0x5); + break; + case CDM_CDWN_COSITE: + /* Clear METHOD_V field (Average is 0x2) */ + opmode &= ~(0x60); + opmode |= (0x2 << 0x5); + /* Co-site vertical coefficients */ + SDE_REG_WRITE(c, + CDM_CDWN2_COEFF_COSITE_V, + cosite_v_coeff[0]); + break; + case CDM_CDWN_OFFSITE: + /* Clear METHOD_V field (Average is 0x3) */ + opmode &= ~(0x60); + opmode |= (0x3 << 0x5); + + /* Off-site vertical coefficients */ + SDE_REG_WRITE(c, + CDM_CDWN2_COEFF_OFFSITE_V, + offsite_v_coeff[0]); + break; + default: + return -EINVAL; + } + + if (cfg->v_cdwn_type || cfg->h_cdwn_type) + opmode |= BIT(0); /* EN CDWN module */ + else + opmode &= ~BIT(0); + + out_size = (cfg->output_width & 0xFFFF) | + ((cfg->output_height & 0xFFFF) << 16); + SDE_REG_WRITE(c, CDM_CDWN2_OUT_SIZE, out_size); + SDE_REG_WRITE(c, CDM_CDWN2_OP_MODE, opmode); + SDE_REG_WRITE(c, CDM_CDWN2_CLAMP_OUT, + ((0x3FF << 16) | 0x0)); + + return 0; +} + +int sde_hw_cdm_enable(struct sde_hw_cdm *ctx, + struct sde_hw_cdm_cfg *cdm) +{ + struct sde_hw_blk_reg_map *c = &ctx->hw; + struct sde_mdp_format_params *fmt = cdm->output_fmt; + u32 opmode = 0; + u32 cdm_enable = 0; + u32 csc = 0; + + if (!fmt->is_yuv) + return 0; + + if (cdm->output_type == CDM_CDWN_OUTPUT_HDMI) { + if (fmt->chroma_sample != SDE_MDP_CHROMA_H1V2) + return -EINVAL; /*unsupported format */ + opmode = BIT(0); + opmode |= (fmt->chroma_sample << 1); + cdm_enable |= BIT(19); + } else { + opmode = 0; + cdm_enable = BIT(24); + } + + csc |= BIT(2); + csc &= ~BIT(1); + csc |= BIT(0); + + /* For this register we need to offset it to MDP TOP BLOCK */ + SDE_REG_WRITE(c, MDP_OUT_CTL_0, cdm_enable); + + SDE_REG_WRITE(c, CDM_CSC_10_OPMODE, csc); + SDE_REG_WRITE(c, CDM_HDMI_PACK_OP_MODE, opmode); + return 0; +} + +void sde_hw_cdm_disable(struct sde_hw_cdm *ctx) +{ + struct sde_hw_blk_reg_map *c = &ctx->hw; + + /* mdp top block */ + SDE_REG_WRITE(c, MDP_OUT_CTL_0, 0); /* bypass mode */ +} + +static void _setup_cdm_ops(struct sde_hw_cdm_ops *ops, + unsigned long features) +{ + ops->setup_csc_data = sde_hw_cdm_setup_csc_10bit; + ops->setup_cdwn = sde_hw_cdm_setup_cdwn; + ops->enable = sde_hw_cdm_enable; + ops->disable = sde_hw_cdm_disable; +} + +struct sde_hw_cdm *sde_hw_cdm_init(enum sde_cdm idx, + void __iomem *addr, + struct sde_mdss_cfg *m) +{ + struct sde_hw_cdm *c; + struct sde_cdm_cfg *cfg; + + c = kzalloc(sizeof(*c), GFP_KERNEL); + if (!c) + return ERR_PTR(-ENOMEM); + + cfg = _cdm_offset(idx, m, addr, &c->hw); + if (IS_ERR_OR_NULL(cfg)) { + kfree(c); + return ERR_PTR(-EINVAL); + } + + c->idx = idx; + c->cdm_hw_cap = cfg; + _setup_cdm_ops(&c->ops, c->cdm_hw_cap->features); + + /* + * Perform any default initialization for the chroma down module + * @setup default csc coefficients + */ + sde_hw_cdm_setup_csc_10bit(c, &rgb2yuv_cfg); + + return c; +} diff --git a/drivers/gpu/drm/msm/sde/sde_hw_cdm.h b/drivers/gpu/drm/msm/sde/sde_hw_cdm.h new file mode 100644 index 000000000000..d606ef5e9aa4 --- /dev/null +++ b/drivers/gpu/drm/msm/sde/sde_hw_cdm.h @@ -0,0 +1,115 @@ +/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _SDE_HW_CDM_H +#define _SDE_HW_CDM_H + +#include "sde_hw_mdss.h" + +struct sde_hw_cdm; + +struct sde_hw_cdm_cfg { + u32 output_width; + u32 output_height; + u32 output_bit_depth; + u32 h_cdwn_type; + u32 v_cdwn_type; + struct sde_mdp_format_params *output_fmt; + u32 output_type; + int flags; +}; + +enum sde_hw_cdwn_type { + CDM_CDWN_DISABLE, + CDM_CDWN_PIXEL_DROP, + CDM_CDWN_AVG, + CDM_CDWN_COSITE, + CDM_CDWN_OFFSITE, +}; + +enum sde_hw_cdwn_output_type { + CDM_CDWN_OUTPUT_HDMI, + CDM_CDWN_OUTPUT_WB, +}; + +enum sde_hw_cdwn_output_bit_depth { + CDM_CDWN_OUTPUT_8BIT, + CDM_CDWN_OUTPUT_10BIT, +}; + +/** + * struct sde_hw_cdm_ops : Interface to the chroma down Hw driver functions + * Assumption is these functions will be called after + * clocks are enabled + * @setup_csc: Programs the csc matrix + * @setup_cdwn: Sets up the chroma down sub module + * @enable: Enables the output to interface and programs the + * output packer + * @disable: Puts the cdm in bypass mode + */ +struct sde_hw_cdm_ops { + /** + * Programs the CSC matrix for conversion from RGB space to YUV space, + * it is optinal to call this function as this matrix is automatically + * set during initialization, user should call this if it wants + * to program a different matrix than default matrix. + * @cdm: Pointer to the chroma down context structure + * @data Pointer to CSC configuration data + */ + void (*setup_csc_data)(struct sde_hw_cdm *cdm, + struct sde_csc_cfg *data); + + /** + * Programs the Chroma downsample part. + * @cdm Pointer to chroma down context + */ + int (*setup_cdwn)(struct sde_hw_cdm *cdm, + struct sde_hw_cdm_cfg *cfg); + + /** + * Enable the CDM module + * @cdm Pointer to chroma down context + */ + int (*enable)(struct sde_hw_cdm *cdm, + struct sde_hw_cdm_cfg *cfg); + + /** + * Disable the CDM module + * @cdm Pointer to chroma down context + */ + void (*disable)(struct sde_hw_cdm *cdm); +}; + +struct sde_hw_cdm { + /* base */ + struct sde_hw_blk_reg_map hw; + + /* chroma down */ + const struct sde_cdm_cfg *cdm_hw_cap; + enum sde_cdm idx; + + /* ops */ + struct sde_hw_cdm_ops ops; +}; + +/** + * sde_hw_cdm_init(): Initializes the cdm hw driver object. + * should be called once before accessing every cdm. + * @idx: cdm index for which driver object is required + * @addr: mapped register io address of MDP + * @m : pointer to mdss catalog data + */ +struct sde_hw_cdm *sde_hw_cdm_init(enum sde_cdm idx, + void __iomem *addr, + struct sde_mdss_cfg *m); + +#endif /*_SDE_HW_CDM_H */ diff --git a/drivers/gpu/drm/msm/sde/sde_hw_dspp.c b/drivers/gpu/drm/msm/sde/sde_hw_dspp.c new file mode 100644 index 000000000000..fe8917f9e71d --- /dev/null +++ b/drivers/gpu/drm/msm/sde/sde_hw_dspp.c @@ -0,0 +1,105 @@ +/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include "sde_hw_mdss.h" +#include "sde_hwio.h" +#include "sde_hw_catalog.h" +#include "sde_hw_dspp.h" + +static struct sde_dspp_cfg *_dspp_offset(enum sde_dspp dspp, + struct sde_mdss_cfg *m, + void __iomem *addr, + struct sde_hw_blk_reg_map *b) +{ + int i; + + for (i = 0; i < m->dspp_count; i++) { + if (dspp == m->dspp[i].id) { + b->base_off = addr; + b->blk_off = m->dspp[i].base; + b->hwversion = m->hwversion; + return &m->dspp[i]; + } + } + + return ERR_PTR(-EINVAL); +} + +void sde_dspp_setup_histogram(struct sde_hw_dspp *ctx, void *cfg) +{ +} + +void sde_dspp_read_histogram(struct sde_hw_dspp *ctx, void *cfg) +{ +} + +void sde_dspp_update_igc(struct sde_hw_dspp *ctx, void *cfg) +{ +} + +void sde_dspp_setup_pa(struct sde_hw_dspp *dspp, void *cfg) +{ +} + +void sde_dspp_setup_pcc(struct sde_hw_dspp *ctx, void *cfg) +{ +} + +void sde_dspp_setup_sharpening(struct sde_hw_dspp *ctx, void *cfg) +{ +} + +void sde_dspp_setup_pa_memcolor(struct sde_hw_dspp *ctx, void *cfg) +{ +} + +void sde_dspp_setup_sixzone(struct sde_hw_dspp *dspp) +{ +} + +void sde_dspp_setup_danger_safe(struct sde_hw_dspp *ctx, void *cfg) +{ +} + +void sde_dspp_setup_dither(struct sde_hw_dspp *ctx, void *cfg) +{ +} + +static void _setup_dspp_ops(struct sde_hw_dspp_ops *ops, + unsigned long features) +{ +} +struct sde_hw_dspp *sde_hw_dspp_init(enum sde_dspp idx, + void __iomem *addr, + struct sde_mdss_cfg *m) +{ + struct sde_hw_dspp *c; + struct sde_dspp_cfg *cfg; + + c = kzalloc(sizeof(*c), GFP_KERNEL); + if (!c) + return ERR_PTR(-ENOMEM); + + cfg = _dspp_offset(idx, m, addr, &c->hw); + if (IS_ERR_OR_NULL(cfg)) { + kfree(c); + return ERR_PTR(-EINVAL); + } + + /* Assign ops */ + c->idx = idx; + c->cap = cfg; + _setup_dspp_ops(&c->ops, c->cap->features); + + return c; +} + diff --git a/drivers/gpu/drm/msm/sde/sde_hw_dspp.h b/drivers/gpu/drm/msm/sde/sde_hw_dspp.h new file mode 100644 index 000000000000..28c3cf10cb33 --- /dev/null +++ b/drivers/gpu/drm/msm/sde/sde_hw_dspp.h @@ -0,0 +1,127 @@ +/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _SDE_HW_DSPP_H +#define _SDE_HW_DSPP_H + +struct sde_hw_dspp; + +/** + * struct sde_hw_dspp_ops - interface to the dspp hardware driver functions + * Caller must call the init function to get the dspp context for each dspp + * Assumption is these functions will be called after clocks are enabled + */ +struct sde_hw_dspp_ops { + /** + * setup_histogram - setup dspp histogram + * @ctx: Pointer to dspp context + * @cfg: Pointer to configuration + */ + void (*setup_histogram)(struct sde_hw_dspp *ctx, void *cfg); + + /** + * read_histogram - read dspp histogram + * @ctx: Pointer to dspp context + * @cfg: Pointer to configuration + */ + void (*read_histogram)(struct sde_hw_dspp *ctx, void *cfg); + + /** + * update_igc - update dspp igc + * @ctx: Pointer to dspp context + * @cfg: Pointer to configuration + */ + void (*update_igc)(struct sde_hw_dspp *ctx, void *cfg); + + /** + * setup_pa - setup dspp pa + * @ctx: Pointer to dspp context + * @cfg: Pointer to configuration + */ + void (*setup_pa)(struct sde_hw_dspp *dspp, void *cfg); + + /** + * setup_pcc - setup dspp pcc + * @ctx: Pointer to dspp context + * @cfg: Pointer to configuration + */ + void (*setup_pcc)(struct sde_hw_dspp *ctx, void *cfg); + + /** + * setup_sharpening - setup dspp sharpening + * @ctx: Pointer to dspp context + * @cfg: Pointer to configuration + */ + void (*setup_sharpening)(struct sde_hw_dspp *ctx, void *cfg); + + /** + * setup_pa_memcolor - setup dspp memcolor + * @ctx: Pointer to dspp context + * @cfg: Pointer to configuration + */ + void (*setup_pa_memcolor)(struct sde_hw_dspp *ctx, void *cfg); + + /** + * setup_sixzone - setup dspp six zone + * @ctx: Pointer to dspp context + * @cfg: Pointer to configuration + */ + void (*setup_sixzone)(struct sde_hw_dspp *dspp); + + /** + * setup_danger_safe - setup danger safe LUTS + * @ctx: Pointer to dspp context + * @cfg: Pointer to configuration + */ + void (*setup_danger_safe)(struct sde_hw_dspp *ctx, void *cfg); + /** + * setup_dither - setup dspp dither + * @ctx: Pointer to dspp context + * @cfg: Pointer to configuration + */ + void (*setup_dither)(struct sde_hw_dspp *ctx, void *cfg); +}; + +/** + * struct sde_hw_dspp - dspp description + * @base_off: MDP register mapped offset + * @blk_off: DSPP offset relative to mdss offset + * @length Length of register block offset + * @hwversion Mdss hw version number + * @idx: DSPP index + * @dspp_hw_cap: Pointer to layer_cfg + * @highest_bank_bit: + * @ops: Pointer to operations possible for this dspp + */ +struct sde_hw_dspp { + /* base */ + struct sde_hw_blk_reg_map hw; + + /* dspp */ + enum sde_dspp idx; + const struct sde_dspp_cfg *cap; + + /* Ops */ + struct sde_hw_dspp_ops ops; +}; + +/** + * sde_hw_dspp_init - initializes the dspp hw driver object. + * should be called once before accessing every dspp. + * @idx: DSPP index for which driver object is required + * @addr: Mapped register io address of MDP + */ +struct sde_hw_dspp *sde_hw_dspp_init(enum sde_dspp idx, + void __iomem *addr, + struct sde_mdss_cfg *m); + +#endif /*_SDE_HW_DSPP_H */ diff --git a/drivers/gpu/drm/msm/sde/sde_hw_intf.c b/drivers/gpu/drm/msm/sde/sde_hw_intf.c new file mode 100644 index 000000000000..33d93e7a479b --- /dev/null +++ b/drivers/gpu/drm/msm/sde/sde_hw_intf.c @@ -0,0 +1,373 @@ +/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include "sde_hwio.h" +#include "sde_hw_catalog.h" +#include "sde_hw_intf.h" + +#define INTF_TIMING_ENGINE_EN 0x000 +#define INTF_CONFIG 0x004 +#define INTF_HSYNC_CTL 0x008 +#define INTF_VSYNC_PERIOD_F0 0x00C +#define INTF_VSYNC_PERIOD_F1 0x010 +#define INTF_VSYNC_PULSE_WIDTH_F0 0x014 +#define INTF_VSYNC_PULSE_WIDTH_F1 0x018 +#define INTF_DISPLAY_V_START_F0 0x01C +#define INTF_DISPLAY_V_START_F1 0x020 +#define INTF_DISPLAY_V_END_F0 0x024 +#define INTF_DISPLAY_V_END_F1 0x028 +#define INTF_ACTIVE_V_START_F0 0x02C +#define INTF_ACTIVE_V_START_F1 0x030 +#define INTF_ACTIVE_V_END_F0 0x034 +#define INTF_ACTIVE_V_END_F1 0x038 +#define INTF_DISPLAY_HCTL 0x03C +#define INTF_ACTIVE_HCTL 0x040 +#define INTF_BORDER_COLOR 0x044 +#define INTF_UNDERFLOW_COLOR 0x048 +#define INTF_HSYNC_SKEW 0x04C +#define INTF_POLARITY_CTL 0x050 +#define INTF_TEST_CTL 0x054 +#define INTF_TP_COLOR0 0x058 +#define INTF_TP_COLOR1 0x05C +#define INTF_FRAME_LINE_COUNT_EN 0x0A8 +#define INTF_FRAME_COUNT 0x0AC +#define INTF_LINE_COUNT 0x0B0 + +#define INTF_DEFLICKER_CONFIG 0x0F0 +#define INTF_DEFLICKER_STRNG_COEFF 0x0F4 +#define INTF_DEFLICKER_WEAK_COEFF 0x0F8 + +#define INTF_DSI_CMD_MODE_TRIGGER_EN 0x084 +#define INTF_PANEL_FORMAT 0x090 +#define INTF_TPG_ENABLE 0x100 +#define INTF_TPG_MAIN_CONTROL 0x104 +#define INTF_TPG_VIDEO_CONFIG 0x108 +#define INTF_TPG_COMPONENT_LIMITS 0x10C +#define INTF_TPG_RECTANGLE 0x110 +#define INTF_TPG_INITIAL_VALUE 0x114 +#define INTF_TPG_BLK_WHITE_PATTERN_FRAMES 0x118 +#define INTF_TPG_RGB_MAPPING 0x11C +#define INTF_PROG_FETCH_START 0x170 + +#define INTF_FRAME_LINE_COUNT_EN 0x0A8 +#define INTF_FRAME_COUNT 0x0AC +#define INTF_LINE_COUNT 0x0B0 + +static struct sde_intf_cfg *_intf_offset(enum sde_intf intf, + struct sde_mdss_cfg *m, + void __iomem *addr, + struct sde_hw_blk_reg_map *b) +{ + int i; + + for (i = 0; i < m->intf_count; i++) { + if (intf == m->intf[i].id) { + b->base_off = addr; + b->blk_off = m->intf[i].base; + b->hwversion = m->hwversion; + return &m->intf[i]; + } + } + + return ERR_PTR(-EINVAL); +} + +static void sde_hw_intf_setup_timing_engine(struct sde_hw_intf *ctx, + struct intf_timing_params *p, + struct sde_mdp_format_params *fmt) +{ + struct sde_hw_blk_reg_map *c = &ctx->hw; + u32 hsync_period, vsync_period; + u32 display_v_start, display_v_end; + u32 hsync_start_x, hsync_end_x; + u32 active_h_start, active_h_end; + u32 active_v_start, active_v_end; + u32 active_hctl, display_hctl, hsync_ctl; + u32 polarity_ctl, den_polarity, hsync_polarity, vsync_polarity; + u32 panel_format; + u32 intf_cfg; + + /* read interface_cfg */ + intf_cfg = SDE_REG_READ(c, INTF_CONFIG); + hsync_period = p->hsync_pulse_width + p->h_back_porch + p->width + + p->h_front_porch; + vsync_period = p->vsync_pulse_width + p->v_back_porch + p->height + + p->v_front_porch; + + display_v_start = ((p->vsync_pulse_width + p->v_back_porch) * + hsync_period) + p->hsync_skew; + display_v_end = ((vsync_period - p->v_front_porch) * hsync_period) + + p->hsync_skew - 1; + + if (ctx->cap->type == INTF_EDP) { + display_v_start += p->hsync_pulse_width + p->h_back_porch; + display_v_end -= p->h_front_porch; + } + + hsync_start_x = p->h_back_porch + p->hsync_pulse_width; + hsync_end_x = hsync_period - p->h_front_porch - 1; + + if (p->width != p->xres) { + active_h_start = hsync_start_x; + active_h_end = active_h_start + p->xres - 1; + } else { + active_h_start = 0; + active_h_end = 0; + } + + if (p->height != p->yres) { + active_v_start = display_v_start; + active_v_end = active_v_start + (p->yres * hsync_period) - 1; + } else { + active_v_start = 0; + active_v_end = 0; + } + + if (active_h_end) { + active_hctl = (active_h_end << 16) | active_h_start; + intf_cfg |= BIT(29); /* ACTIVE_H_ENABLE */ + } else { + active_hctl = 0; + } + + if (active_v_end) + intf_cfg |= BIT(30); /* ACTIVE_V_ENABLE */ + + hsync_ctl = (hsync_period << 16) | p->hsync_pulse_width; + display_hctl = (hsync_end_x << 16) | hsync_start_x; + + den_polarity = 0; + if (ctx->cap->type == INTF_HDMI) { + hsync_polarity = p->yres >= 720 ? 0 : 1; + vsync_polarity = p->yres >= 720 ? 0 : 1; + } else { + hsync_polarity = 0; + vsync_polarity = 0; + } + polarity_ctl = (den_polarity << 2) | /* DEN Polarity */ + (vsync_polarity << 1) | /* VSYNC Polarity */ + (hsync_polarity << 0); /* HSYNC Polarity */ + + if (!fmt->is_yuv) + panel_format = (fmt->bits[0] | + (fmt->bits[1] << 2) | + (fmt->bits[2] << 4) | + (0x21 << 8)); + else + /* Interface treats all the pixel data in RGB888 format */ + panel_format |= (COLOR_8BIT | + (COLOR_8BIT << 2) | + (COLOR_8BIT << 4) | + (0x21 << 8)); + + SDE_REG_WRITE(c, INTF_HSYNC_CTL, hsync_ctl); + SDE_REG_WRITE(c, INTF_VSYNC_PERIOD_F0, + vsync_period * hsync_period); + SDE_REG_WRITE(c, INTF_VSYNC_PULSE_WIDTH_F0, + p->vsync_pulse_width * hsync_period); + SDE_REG_WRITE(c, INTF_DISPLAY_HCTL, display_hctl); + SDE_REG_WRITE(c, INTF_DISPLAY_V_START_F0, + display_v_start); + SDE_REG_WRITE(c, INTF_DISPLAY_V_END_F0, + display_v_end); + SDE_REG_WRITE(c, INTF_ACTIVE_HCTL, active_hctl); + SDE_REG_WRITE(c, INTF_ACTIVE_V_START_F0, + active_v_start); + SDE_REG_WRITE(c, INTF_ACTIVE_V_END_F0, + active_v_end); + + SDE_REG_WRITE(c, INTF_BORDER_COLOR, p->border_clr); + SDE_REG_WRITE(c, INTF_UNDERFLOW_COLOR, + p->underflow_clr); + SDE_REG_WRITE(c, INTF_HSYNC_SKEW, p->hsync_skew); + SDE_REG_WRITE(c, INTF_POLARITY_CTL, polarity_ctl); + SDE_REG_WRITE(c, INTF_FRAME_LINE_COUNT_EN, 0x3); + SDE_REG_WRITE(c, INTF_CONFIG, intf_cfg); + SDE_REG_WRITE(c, INTF_PANEL_FORMAT, panel_format); +} + +static void sde_hw_intf_enable_timing_engine( + struct sde_hw_intf *intf, + u8 enable) +{ + struct sde_hw_blk_reg_map *c = &intf->hw; + u32 intf_sel; + + /* Display interface select */ + if (enable) { + intf_sel = SDE_REG_READ(c, DISP_INTF_SEL); + + intf_sel |= (intf->cap->type << ((intf->idx) * 8)); + SDE_REG_WRITE(c, DISP_INTF_SEL, intf_sel); + } + + SDE_REG_WRITE(c, INTF_TIMING_ENGINE_EN, + enable & 0x1); +} + +static void sde_hw_intf_setup_prg_fetch( + struct sde_hw_intf *intf, + struct intf_prog_fetch *fetch) +{ + struct sde_hw_blk_reg_map *c = &intf->hw; + int fetch_enable; + + /* + * Fetch should always be outside the active lines. If the fetching + * is programmed within active region, hardware behavior is unknown. + */ + + fetch_enable = SDE_REG_READ(c, INTF_CONFIG); + if (fetch->enable) { + fetch_enable |= BIT(31); + SDE_REG_WRITE(c, INTF_PROG_FETCH_START, + fetch->fetch_start); + } else { + fetch_enable &= ~BIT(31); + } + + SDE_REG_WRITE(c, INTF_CONFIG, fetch_enable); +} + +static void sde_hw_intf_get_timing_config( + struct sde_hw_intf *intf, + struct intf_timing_params *cfg) +{ + struct sde_hw_blk_reg_map *c = &intf->hw; + u32 vsync_period; + u32 display_v_start, display_v_end; + u32 hsync_start_x, hsync_end_x; + u32 active_v_start, active_v_end; + u32 active_hctl, display_hctl, hsync_ctl; + u32 polarity_ctl; + u32 pulse_width; + u32 htotal, vtotal; + u32 intf_cfg; + + hsync_ctl = SDE_REG_READ(c, INTF_HSYNC_CTL); + vsync_period = SDE_REG_READ(c, INTF_VSYNC_PERIOD_F0); + pulse_width = SDE_REG_READ(c, INTF_VSYNC_PULSE_WIDTH_F0); + display_hctl = SDE_REG_READ(c, INTF_DISPLAY_HCTL); + display_v_start = SDE_REG_READ(c, INTF_DISPLAY_V_START_F0); + display_v_end = SDE_REG_READ(c, INTF_DISPLAY_V_END_F0); + active_hctl = SDE_REG_READ(c, INTF_ACTIVE_HCTL); + active_v_start = SDE_REG_READ(c, INTF_ACTIVE_V_START_F0); + active_v_end = SDE_REG_READ(c, INTF_ACTIVE_V_END_F0); + intf_cfg = SDE_REG_READ(c, INTF_CONFIG); + cfg->border_clr = SDE_REG_READ(c, INTF_BORDER_COLOR); + cfg->underflow_clr = SDE_REG_READ(c, INTF_UNDERFLOW_COLOR); + cfg->hsync_skew = SDE_REG_READ(c, INTF_HSYNC_SKEW); + polarity_ctl = SDE_REG_READ(c, INTF_POLARITY_CTL); + + hsync_start_x = (display_hctl & 0xffff); + hsync_end_x = (display_hctl & 0xffff0000) >> 16; + cfg->hsync_pulse_width = (hsync_ctl & 0xffff); + htotal = (hsync_ctl & 0xffff0000) >> 16; + + if (htotal != 0) { + vtotal = vsync_period / htotal; + cfg->vsync_pulse_width = pulse_width/htotal; + + /* porches */ + cfg->h_front_porch = htotal - hsync_end_x - 1; + cfg->h_back_porch = hsync_start_x - cfg->hsync_pulse_width; + cfg->v_front_porch = vsync_period - display_v_end; + cfg->v_back_porch = display_v_start - cfg->vsync_pulse_width; + + /* active resolution */ + cfg->width = htotal - cfg->hsync_pulse_width - + cfg->h_back_porch - + cfg->h_front_porch; + cfg->height = vtotal - cfg->vsync_pulse_width - + cfg->v_back_porch - cfg->v_front_porch; + + /* display panel resolution */ + if (intf_cfg & BIT(29)) + cfg->xres = ((active_hctl & 0xffff0000) >> 16) - + (active_hctl & 0xffff) + 1; + else + cfg->xres = cfg->width; + + if (intf_cfg & BIT(30)) + cfg->yres = (active_v_end - active_v_start + 1 + )/htotal; + else + cfg->yres = cfg->height; + } else { + cfg->vsync_pulse_width = 0; + cfg->h_front_porch = 0; + cfg->h_back_porch = 0; + cfg->v_front_porch = 0; + cfg->v_back_porch = 0; + cfg->width = 0; + cfg->height = 0; + } + + cfg->hsync_polarity = polarity_ctl & 1; + cfg->vsync_polarity = (polarity_ctl & 2) >> 1; +} + +static void sde_hw_intf_get_status( + struct sde_hw_intf *intf, + struct intf_status *s) +{ + struct sde_hw_blk_reg_map *c = &intf->hw; + + s->is_en = SDE_REG_READ(c, INTF_TIMING_ENGINE_EN); + if (s->is_en) { + s->frame_count = SDE_REG_READ(c, INTF_FRAME_COUNT); + s->line_count = SDE_REG_READ(c, INTF_LINE_COUNT); + } else { + s->line_count = 0; + s->frame_count = 0; + } +} + +static void _setup_intf_ops(struct sde_hw_intf_ops *ops, + unsigned long cap) +{ + ops->setup_timing_gen = sde_hw_intf_setup_timing_engine; + ops->setup_prg_fetch = sde_hw_intf_setup_prg_fetch; + ops->get_timing_gen = sde_hw_intf_get_timing_config; + ops->get_status = sde_hw_intf_get_status; + ops->enable_timing = sde_hw_intf_enable_timing_engine; +} + +struct sde_hw_intf *sde_hw_intf_init(enum sde_intf idx, + void __iomem *addr, + struct sde_mdss_cfg *m) +{ + struct sde_hw_intf *c; + struct sde_intf_cfg *cfg; + + c = kzalloc(sizeof(*c), GFP_KERNEL); + if (!c) + return ERR_PTR(-ENOMEM); + + cfg = _intf_offset(idx, m, addr, &c->hw); + if (!cfg) { + kfree(c); + return ERR_PTR(-EINVAL); + } + + /* + * Assign ops + */ + c->idx = idx; + c->cap = cfg; + _setup_intf_ops(&c->ops, c->cap->features); + + /* + * Perform any default initialization for the intf + */ + return c; +} diff --git a/drivers/gpu/drm/msm/sde/sde_hw_intf.h b/drivers/gpu/drm/msm/sde/sde_hw_intf.h new file mode 100644 index 000000000000..ce5190655dad --- /dev/null +++ b/drivers/gpu/drm/msm/sde/sde_hw_intf.h @@ -0,0 +1,103 @@ +/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _SDE_HW_INTF_H +#define _SDE_HW_INTF_H + +#include "sde_hw_catalog.h" +#include "sde_hw_mdss.h" + +struct sde_hw_intf; + +/* intf timing settings */ +struct intf_timing_params { + u32 width; /* active width */ + u32 height; /* active height */ + u32 xres; /* Display panel width */ + u32 yres; /* Display panel height */ + + u32 h_back_porch; + u32 h_front_porch; + u32 v_back_porch; + u32 v_front_porch; + u32 hsync_pulse_width; + u32 vsync_pulse_width; + u32 hsync_polarity; + u32 vsync_polarity; + u32 border_clr; + u32 underflow_clr; + u32 hsync_skew; +}; + +struct intf_prog_fetch { + u8 enable; + /* vsync counter for the front porch pixel line */ + u32 fetch_start; +}; + +struct intf_status { + u8 is_en; /* interface timing engine is enabled or not */ + u32 frame_count; /* frame count since timing engine enabled */ + u32 line_count; /* current line count including blanking */ +}; + +/** + * struct sde_hw_intf_ops : Interface to the interface Hw driver functions + * Assumption is these functions will be called after clocks are enabled + * @ setup_timing_gen : programs the timing engine + * @ setup_prog_fetch : enables/disables the programmable fetch logic + * @ enable_timing: enable/disable timing engine + * @ get_timing_gen: get timing generator programmed configuration + * @ get_status: returns if timing engine is enabled or not + */ +struct sde_hw_intf_ops { + void (*setup_timing_gen)(struct sde_hw_intf *intf, + struct intf_timing_params *p, + struct sde_mdp_format_params *fmt); + + void (*setup_prg_fetch)(struct sde_hw_intf *intf, + struct intf_prog_fetch *fetch); + + void (*enable_timing)(struct sde_hw_intf *intf, + u8 enable); + + void (*get_timing_gen)(struct sde_hw_intf *intf, + struct intf_timing_params *cfg); + + void (*get_status)(struct sde_hw_intf *intf, + struct intf_status *status); +}; + +struct sde_hw_intf { + /* base */ + struct sde_hw_blk_reg_map hw; + + /* intf */ + enum sde_intf idx; + const struct sde_intf_cfg *cap; + + /* ops */ + struct sde_hw_intf_ops ops; +}; + +/** + * sde_hw_intf_init(): Initializes the intf driver for the passed + * interface idx. + * @idx: interface index for which driver object is required + * @addr: mapped register io address of MDP + * @m : pointer to mdss catalog data + */ +struct sde_hw_intf *sde_hw_intf_init(enum sde_intf idx, + void __iomem *addr, + struct sde_mdss_cfg *m); + +#endif /*_SDE_HW_INTF_H */ diff --git a/drivers/gpu/drm/msm/sde/sde_hw_lm.c b/drivers/gpu/drm/msm/sde/sde_hw_lm.c new file mode 100644 index 000000000000..e9aeab797a37 --- /dev/null +++ b/drivers/gpu/drm/msm/sde/sde_hw_lm.c @@ -0,0 +1,192 @@ +/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include "sde_hw_catalog.h" +#include "sde_hwio.h" +#include "sde_hw_lm.h" +#include "sde_hw_mdss.h" + +#define LM_OP_MODE 0x00 +#define LM_OUT_SIZE 0x04 +#define LM_BORDER_COLOR_0 0x08 +#define LM_BORDER_COLOR_1 0x010 + +/* These register are offset to mixer base + stage base */ +#define LM_BLEND0_OP 0x00 +#define LM_BLEND0_FG_ALPHA 0x04 +#define LM_BLEND0_BG_ALPHA 0x08 + +static struct sde_lm_cfg *_lm_offset(enum sde_lm mixer, + struct sde_mdss_cfg *m, + void __iomem *addr, + struct sde_hw_blk_reg_map *b) +{ + int i; + + for (i = 0; i < m->mixer_count; i++) { + if (mixer == m->mixer[i].id) { + b->base_off = addr; + b->blk_off = m->mixer[i].base; + b->hwversion = m->hwversion; + return &m->mixer[i]; + } + } + + return ERR_PTR(-ENOMEM); +} + +/** + * _stage_offset(): returns the relative offset of the blend registers + * for the stage to be setup + * @c: mixer ctx contains the mixer to be programmed + * @stage: stage index to setup + */ +static inline int _stage_offset(struct sde_hw_mixer *ctx, enum sde_stage stage) +{ + const struct sde_lm_sub_blks *sblk = ctx->cap->sblk; + + if (WARN_ON(stage == SDE_STAGE_BASE)) + return -EINVAL; + + if ((stage - SDE_STAGE_0) <= sblk->maxblendstages) + return sblk->blendstage_base[stage]; + else + return -EINVAL; +} + +static void sde_hw_lm_setup_out(struct sde_hw_mixer *ctx, + struct sde_hw_mixer_cfg *mixer) +{ + struct sde_hw_blk_reg_map *c = &ctx->hw; + u32 outsize; + u32 opmode; + + opmode = SDE_REG_READ(c, LM_OP_MODE); + + outsize = mixer->out_height << 16 | mixer->out_width; + SDE_REG_WRITE(c, LM_OUT_SIZE, outsize); + + /* SPLIT_LEFT_RIGHT */ + opmode = (opmode & ~(1 << 31)) | (mixer->right_mixer & 1 << 31); + SDE_REG_WRITE(c, LM_OP_MODE, opmode); +} + +static void sde_hw_lm_setup_border_color(struct sde_hw_mixer *ctx, + struct sde_mdss_color *color, + u8 border_en) +{ + struct sde_hw_blk_reg_map *c = &ctx->hw; + + if (border_en) { + SDE_REG_WRITE(c, LM_BORDER_COLOR_0, + (color->color_0 & 0xFFF) | + ((color->color_1 & 0xFFF) << 0x10)); + SDE_REG_WRITE(c, LM_BORDER_COLOR_1, + (color->color_2 & 0xFFF) | + ((color->color_3 & 0xFFF) << 0x10)); + } +} + +static void sde_hw_lm_setup_blendcfg(struct sde_hw_mixer *ctx, + int stage, + struct sde_hw_blend_cfg *blend) +{ + struct sde_hw_blk_reg_map *c = &ctx->hw; + u32 blend_op; + struct sde_hw_alpha_cfg *fg, *bg; + int stage_off; + + stage_off = _stage_offset(ctx, stage); + if (WARN_ON(stage_off < 0)) + return; + + fg = &(blend->fg); + bg = &(blend->bg); + + /* fg */ + blend_op = (fg->alpha_sel & 3); + blend_op |= (fg->inv_alpha_sel & 1) << 2; + blend_op |= (fg->mod_alpha & 1) << 3; + blend_op |= (fg->inv_mode_alpha & 1) << 4; + + /* bg */ + blend_op |= (bg->alpha_sel & 3) << 8; + blend_op |= (bg->inv_alpha_sel & 1) << 2; + blend_op |= (bg->mod_alpha & 1) << 3; + blend_op |= (bg->inv_mode_alpha & 1) << 4; + + SDE_REG_WRITE(c, LM_BLEND0_FG_ALPHA + stage_off, + fg->const_alpha); + SDE_REG_WRITE(c, LM_BLEND0_FG_ALPHA + stage_off, + bg->const_alpha); + SDE_REG_WRITE(c, LM_OP_MODE, blend_op); +} + +static void sde_hw_lm_setup_color3(struct sde_hw_mixer *ctx, + struct sde_hw_color3_cfg *cfg) +{ + struct sde_hw_blk_reg_map *c = &ctx->hw; + int maxblendstages = ctx->cap->sblk->maxblendstages; + int i; + int op_mode; + + /* read the existing op_mode configuration */ + op_mode = SDE_REG_READ(c, LM_OP_MODE); + + for (i = 0; i < maxblendstages; i++) + op_mode |= ((cfg->keep_fg[i] & 0x1) << i); + + SDE_REG_WRITE(c, LM_OP_MODE, op_mode); +} + +static void sde_hw_lm_gammacorrection(struct sde_hw_mixer *mixer, + void *cfg) +{ +} + +static void _setup_mixer_ops(struct sde_hw_lm_ops *ops, + unsigned long cap) +{ + ops->setup_mixer_out = sde_hw_lm_setup_out; + ops->setup_blend_config = sde_hw_lm_setup_blendcfg; + ops->setup_alpha_out = sde_hw_lm_setup_color3; + ops->setup_border_color = sde_hw_lm_setup_border_color; + ops->setup_gammcorrection = sde_hw_lm_gammacorrection; +}; + +struct sde_hw_mixer *sde_hw_lm_init(enum sde_lm idx, + void __iomem *addr, + struct sde_mdss_cfg *m) +{ + struct sde_hw_mixer *c; + struct sde_lm_cfg *cfg; + + c = kzalloc(sizeof(*c), GFP_KERNEL); + if (!c) + return ERR_PTR(-ENOMEM); + + cfg = _lm_offset(idx, m, addr, &c->hw); + if (IS_ERR_OR_NULL(cfg)) { + kfree(c); + return ERR_PTR(-EINVAL); + } + + /* Assign ops */ + c->idx = idx; + c->cap = cfg; + _setup_mixer_ops(&c->ops, c->cap->features); + + /* + * Perform any default initialization for the sspp blocks + */ + return c; +} diff --git a/drivers/gpu/drm/msm/sde/sde_hw_lm.h b/drivers/gpu/drm/msm/sde/sde_hw_lm.h new file mode 100644 index 000000000000..8129b29c4932 --- /dev/null +++ b/drivers/gpu/drm/msm/sde/sde_hw_lm.h @@ -0,0 +1,96 @@ +/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _SDE_HW_LM_H +#define _SDE_HW_LM_H + +#include "sde_hw_mdss.h" +#include "sde_hw_mdp_util.h" + +struct sde_hw_mixer; + +struct sde_hw_mixer_cfg { + u32 out_width; + u32 out_height; + bool right_mixer; + int flags; +}; + +struct sde_hw_color3_cfg { + u8 keep_fg[SDE_STAGE_MAX]; +}; + +/** + * + * struct sde_hw_lm_ops : Interface to the mixer Hw driver functions + * Assumption is these functions will be called after clocks are enabled + */ +struct sde_hw_lm_ops { + /* + * Sets up mixer output width and height + * and border color if enabled + */ + void (*setup_mixer_out)(struct sde_hw_mixer *ctx, + struct sde_hw_mixer_cfg *cfg); + + /* + * Alpha blending configuration + * for the specified stage + */ + void (*setup_blend_config)(struct sde_hw_mixer *ctx, + int stage, + struct sde_hw_blend_cfg *blend); + + /* + * Alpha color component selection from either fg or bg + */ + void (*setup_alpha_out)(struct sde_hw_mixer *ctx, + struct sde_hw_color3_cfg *cfg); + + /** + * setup_border_color : enable/disable border color + */ + void (*setup_border_color)(struct sde_hw_mixer *ctx, + struct sde_mdss_color *color, + u8 border_en); + + void (*setup_gammcorrection)(struct sde_hw_mixer *mixer, + void *cfg); + +}; + +struct sde_hw_mixer { + /* base */ + struct sde_hw_blk_reg_map hw; + + /* lm */ + enum sde_lm idx; + const struct sde_lm_cfg *cap; + const struct sde_mdp_cfg *mdp; + const struct sde_ctl_cfg *ctl; + + /* ops */ + struct sde_hw_lm_ops ops; +}; + +/** + * sde_hw_lm_init(): Initializes the mixer hw driver object. + * should be called once before accessing every mixer. + * @idx: mixer index for which driver object is required + * @addr: mapped register io address of MDP + * @m : pointer to mdss catalog data + */ +struct sde_hw_mixer *sde_hw_lm_init(enum sde_lm idx, + void __iomem *addr, + struct sde_mdss_cfg *m); + +#endif /*_SDE_HW_LM_H */ diff --git a/drivers/gpu/drm/msm/sde/sde_hw_mdp_ctl.c b/drivers/gpu/drm/msm/sde/sde_hw_mdp_ctl.c new file mode 100644 index 000000000000..56115153bf2e --- /dev/null +++ b/drivers/gpu/drm/msm/sde/sde_hw_mdp_ctl.c @@ -0,0 +1,338 @@ +/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include "sde_hwio.h" +#include "sde_hw_mdp_ctl.h" + +#define CTL_LAYER(lm) \ + (((lm) == 5) ? (0x024) : ((lm) * 0x004)) +#define CTL_LAYER_EXT(lm) \ + (0x40 + ((lm) * 0x004)) +#define CTL_TOP 0x014 +#define CTL_FLUSH 0x018 +#define CTL_START 0x01C +#define CTL_PACK_3D 0x020 +#define CTL_SW_RESET 0x030 +#define CTL_LAYER_EXTN_OFFSET 0x40 + +#define SDE_REG_RESET_TIMEOUT_COUNT 20 + +static struct sde_ctl_cfg *_ctl_offset(enum sde_ctl ctl, + struct sde_mdss_cfg *m, + void __iomem *addr, + struct sde_hw_blk_reg_map *b) +{ + int i; + + for (i = 0; i < m->ctl_count; i++) { + if (ctl == m->ctl[i].id) { + b->base_off = addr; + b->blk_off = m->ctl[i].base; + b->hwversion = m->hwversion; + return &m->ctl[i]; + } + } + return ERR_PTR(-ENOMEM); +} + +static int _mixer_stages(const struct sde_lm_cfg *mixer, int count, + enum sde_lm lm) +{ + int i; + int stages = -EINVAL; + + for (i = 0; i < count; i++) { + if (lm == mixer[i].id) { + stages = mixer[i].sblk->maxblendstages; + break; + } + } + + return stages; +} + +static inline void sde_hw_ctl_setup_flush(struct sde_hw_ctl *ctx, u32 flushbits, + u8 force_start) +{ + struct sde_hw_blk_reg_map *c = &ctx->hw; + + SDE_REG_WRITE(c, CTL_FLUSH, flushbits); + + if (force_start) + SDE_REG_WRITE(c, CTL_START, 0x1); +} + +static inline int sde_hw_ctl_get_bitmask_sspp(struct sde_hw_ctl *ctx, + u32 *flushbits, enum sde_sspp sspp) +{ + switch (sspp) { + case SSPP_VIG0: + *flushbits |= BIT(0); + break; + case SSPP_VIG1: + *flushbits |= BIT(1); + break; + case SSPP_VIG2: + *flushbits |= BIT(2); + break; + case SSPP_VIG3: + *flushbits |= BIT(18); + break; + case SSPP_RGB0: + *flushbits |= BIT(3); + break; + case SSPP_RGB1: + *flushbits |= BIT(4); + break; + case SSPP_RGB2: + *flushbits |= BIT(5); + break; + case SSPP_RGB3: + *flushbits |= BIT(19); + break; + case SSPP_DMA0: + *flushbits |= BIT(11); + break; + case SSPP_DMA1: + *flushbits |= BIT(12); + break; + case SSPP_CURSOR0: + *flushbits |= BIT(22); + break; + case SSPP_CURSOR1: + *flushbits |= BIT(23); + break; + default: + return -EINVAL; + } + return 0; +} + +static inline int sde_hw_ctl_get_bitmask_mixer(struct sde_hw_ctl *ctx, + u32 *flushbits, enum sde_lm lm) +{ + switch (lm) { + case LM_0: + *flushbits |= BIT(6); + break; + case LM_1: + *flushbits |= BIT(7); + break; + case LM_2: + *flushbits |= BIT(8); + break; + case LM_3: + *flushbits |= BIT(9); + break; + case LM_4: + *flushbits |= BIT(10); + break; + case LM_5: + *flushbits |= BIT(20); + break; + default: + return -EINVAL; + } + *flushbits |= BIT(17); /* CTL */ + return 0; +} + +static inline int sde_hw_ctl_get_bitmask_dspp(struct sde_hw_ctl *ctx, + u32 *flushbits, enum sde_dspp dspp) +{ + switch (dspp) { + case DSPP_0: + *flushbits |= BIT(13); + break; + case DSPP_1: + *flushbits |= BIT(14); + break; + default: + return -EINVAL; + } + return 0; +} + +static inline int sde_hw_ctl_get_bitmask_intf(struct sde_hw_ctl *ctx, + u32 *flushbits, enum sde_intf intf) +{ + switch (intf) { + case INTF_0: + *flushbits |= BIT(31); + break; + case INTF_1: + *flushbits |= BIT(30); + break; + case INTF_2: + *flushbits |= BIT(29); + break; + case INTF_3: + *flushbits |= BIT(28); + break; + default: + return -EINVAL; + } + return 0; +} + +static inline int sde_hw_ctl_get_bitmask_cdm(struct sde_hw_ctl *ctx, + u32 *flushbits, enum sde_cdm cdm) +{ + switch (cdm) { + case CDM_0: + *flushbits |= BIT(26); + break; + default: + return -EINVAL; + } + return 0; +} + +static int sde_hw_ctl_reset_control(struct sde_hw_ctl *ctx) +{ + struct sde_hw_blk_reg_map *c = &ctx->hw; + int count = SDE_REG_RESET_TIMEOUT_COUNT; + int reset; + + SDE_REG_WRITE(c, CTL_SW_RESET, 0x1); + + for (; count > 0; count--) { + /* insert small delay to avoid spinning the cpu while waiting */ + usleep_range(20, 50); + reset = SDE_REG_READ(c, CTL_SW_RESET); + if (reset == 0) + return 0; + } + + return -EINVAL; +} + +static void sde_hw_ctl_setup_blendstage(struct sde_hw_ctl *ctx, + enum sde_lm lm, + struct sde_hw_stage_cfg *cfg) +{ + struct sde_hw_blk_reg_map *c = &ctx->hw; + u32 mixercfg, mixercfg_ext; + int i, j; + u8 stages; + int pipes_per_stage; + + stages = _mixer_stages(ctx->mixer_hw_caps, ctx->mixer_count, lm); + if (WARN_ON(stages < 0)) + return; + + if (test_bit(SDE_MIXER_SOURCESPLIT, + &ctx->mixer_hw_caps->features)) + pipes_per_stage = PIPES_PER_STAGE; + else + pipes_per_stage = 1; + + mixercfg = cfg->border_enable >> 24; /* BORDER_OUT */ +; + for (i = 0; i <= stages; i++) { + for (j = 0; j < pipes_per_stage; j++) { + switch (cfg->stage[i][j]) { + case SSPP_VIG0: + mixercfg |= (i + 1) << 0; + mixercfg_ext |= ((i > SDE_STAGE_5) ? 1:0) << 0; + break; + case SSPP_VIG1: + mixercfg |= (i + 1) << 3; + mixercfg_ext |= ((i > SDE_STAGE_5) ? 1:0) << 2; + break; + case SSPP_VIG2: + mixercfg |= (i + 1) << 6; + mixercfg_ext |= ((i > SDE_STAGE_5) ? 1:0) << 4; + break; + case SSPP_VIG3: + mixercfg |= (i + 1) << 26; + mixercfg_ext |= ((i > SDE_STAGE_5) ? 1:0) << 4; + break; + case SSPP_RGB0: + mixercfg |= (i + 1) << 9; + mixercfg_ext |= ((i > SDE_STAGE_5) ? 1:0) << 8; + break; + case SSPP_RGB1: + mixercfg |= (i + 1) << 12; + mixercfg_ext |= ((i > SDE_STAGE_5) ? 1:0) << 10; + break; + case SSPP_RGB2: + mixercfg |= (i + 1) << 15; + mixercfg_ext |= ((i > SDE_STAGE_5) ? 1:0) << 12; + break; + case SSPP_RGB3: + mixercfg |= (i + 1) << 29; + mixercfg_ext |= ((i > SDE_STAGE_5) ? 1:0) << 14; + break; + case SSPP_DMA0: + mixercfg |= (i + 1) << 0; + mixercfg_ext |= ((i > SDE_STAGE_5) ? 1:0) << 0; + break; + case SSPP_DMA1: + mixercfg |= (i + 1) << 0; + mixercfg_ext |= ((i > SDE_STAGE_5) ? 1:0) << 0; + break; + case SSPP_CURSOR0: + mixercfg_ext |= (i + 1) << 20; + break; + case SSPP_CURSOR1: + mixercfg_ext |= (i + 1) << 26; + break; + default: + break; + } + } + } + + SDE_REG_WRITE(c, CTL_LAYER(lm), mixercfg); + SDE_REG_WRITE(c, CTL_LAYER_EXT(lm), mixercfg_ext); +} + +static void _setup_ctl_ops(struct sde_hw_ctl_ops *ops, + unsigned long cap) +{ + ops->setup_flush = sde_hw_ctl_setup_flush; + ops->reset = sde_hw_ctl_reset_control; + ops->get_bitmask_sspp = sde_hw_ctl_get_bitmask_sspp; + ops->get_bitmask_mixer = sde_hw_ctl_get_bitmask_mixer; + ops->get_bitmask_dspp = sde_hw_ctl_get_bitmask_dspp; + ops->get_bitmask_intf = sde_hw_ctl_get_bitmask_intf; + ops->get_bitmask_cdm = sde_hw_ctl_get_bitmask_cdm; + ops->setup_blendstage = sde_hw_ctl_setup_blendstage; +}; + +struct sde_hw_ctl *sde_hw_ctl_init(enum sde_ctl idx, + void __iomem *addr, + struct sde_mdss_cfg *m) +{ + struct sde_hw_ctl *c; + struct sde_ctl_cfg *cfg; + + c = kzalloc(sizeof(*c), GFP_KERNEL); + if (!c) + return ERR_PTR(-ENOMEM); + + cfg = _ctl_offset(idx, m, addr, &c->hw); + if (cfg) { + kfree(c); + return ERR_PTR(-EINVAL); + } + + c->caps = cfg; + _setup_ctl_ops(&c->ops, c->caps->features); + c->idx = idx; + c->mixer_count = m->mixer_count; + c->mixer_hw_caps = m->mixer; + + return c; +} diff --git a/drivers/gpu/drm/msm/sde/sde_hw_mdp_ctl.h b/drivers/gpu/drm/msm/sde/sde_hw_mdp_ctl.h new file mode 100644 index 000000000000..14a519f2a725 --- /dev/null +++ b/drivers/gpu/drm/msm/sde/sde_hw_mdp_ctl.h @@ -0,0 +1,99 @@ +/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _SDE_HW_MDP_CTL_H +#define _SDE_HW_MDP_CTL_H + +#include "sde_hw_mdss.h" +#include "sde_hw_catalog.h" + +struct sde_hw_ctl; +/** + * struct sde_hw_stage_cfg - blending stage cfg + * @stage + * @border_enable + */ +struct sde_hw_stage_cfg { + enum sde_sspp stage[SDE_STAGE_MAX][PIPES_PER_STAGE]; + u8 border_enable; +}; + +/** + * struct sde_hw_ctl_ops - Interface to the wb Hw driver functions + * Assumption is these functions will be called after clocks are enabled + */ +struct sde_hw_ctl_ops { + void (*setup_flush)(struct sde_hw_ctl *ctx, + u32 flushbits, + u8 force_start); + + int (*reset)(struct sde_hw_ctl *c); + + int (*get_bitmask_sspp)(struct sde_hw_ctl *ctx, + u32 *flushbits, + enum sde_sspp blk); + + int (*get_bitmask_mixer)(struct sde_hw_ctl *ctx, + u32 *flushbits, + enum sde_lm blk); + + int (*get_bitmask_dspp)(struct sde_hw_ctl *ctx, + u32 *flushbits, + enum sde_dspp blk); + + int (*get_bitmask_intf)(struct sde_hw_ctl *ctx, + u32 *flushbits, + enum sde_intf blk); + + int (*get_bitmask_cdm)(struct sde_hw_ctl *ctx, + u32 *flushbits, + enum sde_cdm blk); + + void (*setup_blendstage)(struct sde_hw_ctl *ctx, + enum sde_lm lm, + struct sde_hw_stage_cfg *cfg); +}; + +/** + * struct sde_hw_ctl : CTL PATH driver object + * @struct sde_hw_blk_reg_map *hw; + * @idx + * @ctl_hw_caps + * @mixer_hw_caps + * @ops + */ +struct sde_hw_ctl { + /* base */ + struct sde_hw_blk_reg_map hw; + + /* ctl path */ + int idx; + const struct sde_ctl_cfg *caps; + int mixer_count; + const struct sde_lm_cfg *mixer_hw_caps; + + /* ops */ + struct sde_hw_ctl_ops ops; +}; + +/** + * sde_hw_ctl_init(): Initializes the ctl_path hw driver object. + * should be called before accessing every mixer. + * @idx: ctl_path index for which driver object is required + * @addr: mapped register io address of MDP + * @m : pointer to mdss catalog data + */ +struct sde_hw_ctl *sde_hw_ctl_init(enum sde_ctl idx, + void __iomem *addr, + struct sde_mdss_cfg *m); + +#endif /*_SDE_HW_MDP_CTL_H */ diff --git a/drivers/gpu/drm/msm/sde/sde_hw_mdp_hwio.h b/drivers/gpu/drm/msm/sde/sde_hw_mdp_hwio.h new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/drivers/gpu/drm/msm/sde/sde_hw_mdp_util.c b/drivers/gpu/drm/msm/sde/sde_hw_mdp_util.c new file mode 100644 index 000000000000..8162efc08099 --- /dev/null +++ b/drivers/gpu/drm/msm/sde/sde_hw_mdp_util.c @@ -0,0 +1,73 @@ +/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include "sde_hw_mdp_util.h" + +void sde_hw_reg_write(void __iomem *base, u32 blk_off, u32 reg_off, u32 val) +{ + writel_relaxed(val, base + blk_off + reg_off); +} + +u32 sde_hw_reg_read(void __iomem *base, u32 blk_off, u32 reg_off) +{ + return readl_relaxed(base + blk_off + reg_off); +} + +void sde_hw_csc_setup(struct sde_hw_blk_reg_map *c, + u32 csc_reg_off, + struct sde_csc_cfg *data) +{ + u32 val; + + /* Matrix coeff */ + val = (data->csc_mv[0] & 0x1FF) | + ((data->csc_mv[1] & 0x1FF) << 16); + SDE_REG_WRITE(c, csc_reg_off, val); + val = (data->csc_mv[2] & 0x1FF) | + ((data->csc_mv[3] & 0x1FF) << 16); + SDE_REG_WRITE(c, csc_reg_off + 0x4, val); + val = (data->csc_mv[4] & 0x1FF) | + ((data->csc_mv[5] & 0x1FF) >> 16); + SDE_REG_WRITE(c, csc_reg_off + 0x8, val); + val = (data->csc_mv[6] & 0x1FF) | + ((data->csc_mv[7] & 0x1FF) << 16); + SDE_REG_WRITE(c, csc_reg_off + 0xc, val); + val = data->csc_mv[8] & 0x1FF; + SDE_REG_WRITE(c, csc_reg_off + 0x10, val); + + /* Pre clamp */ + val = (data->csc_pre_lv[0] << 8) | data->csc_pre_lv[1]; + SDE_REG_WRITE(c, csc_reg_off + 0x14, val); + val = (data->csc_pre_lv[2] << 8) | data->csc_pre_lv[3]; + SDE_REG_WRITE(c, csc_reg_off + 0x18, val); + val = (data->csc_pre_lv[4] << 8) | data->csc_pre_lv[5]; + SDE_REG_WRITE(c, csc_reg_off + 0x1c, val); + + /* Post clamp */ + val = (data->csc_post_lv[0] << 8) | data->csc_post_lv[1]; + SDE_REG_WRITE(c, csc_reg_off + 0x20, val); + val = (data->csc_post_lv[2] << 8) | data->csc_post_lv[3]; + SDE_REG_WRITE(c, csc_reg_off + 0x24, val); + val = (data->csc_post_lv[4] << 8) | data->csc_post_lv[5]; + SDE_REG_WRITE(c, csc_reg_off + 0x28, val); + + /* Pre-Bias */ + SDE_REG_WRITE(c, csc_reg_off + 0x2c, data->csc_pre_bv[0]); + SDE_REG_WRITE(c, csc_reg_off + 0x30, data->csc_pre_bv[1]); + SDE_REG_WRITE(c, csc_reg_off + 0x34, data->csc_pre_bv[2]); + + /* Post-Bias */ + SDE_REG_WRITE(c, csc_reg_off + 0x38, data->csc_post_bv[0]); + SDE_REG_WRITE(c, csc_reg_off + 0x3c, data->csc_post_bv[1]); + SDE_REG_WRITE(c, csc_reg_off + 0x40, data->csc_post_bv[2]); +} + diff --git a/drivers/gpu/drm/msm/sde/sde_hw_mdp_util.h b/drivers/gpu/drm/msm/sde/sde_hw_mdp_util.h new file mode 100644 index 000000000000..2a7af8374fb1 --- /dev/null +++ b/drivers/gpu/drm/msm/sde/sde_hw_mdp_util.h @@ -0,0 +1,56 @@ +/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _SDE_HW_MDP_UTIL_H +#define _SDE_HW_MDP_UTIL_H + +#include +#include +#include "sde_hw_mdss.h" + +/* + * This is the common struct maintained by each sub block + * for mapping the register offsets in this block to the + * absoulute IO address + * @base_off: mdp register mapped offset + * @blk_off: pipe offset relative to mdss offset + * @length length of register block offset + * @hwversion mdss hw version number + */ +struct sde_hw_blk_reg_map { + void __iomem *base_off; + u32 blk_off; + u32 length; + u32 hwversion; +}; + +void sde_hw_reg_write(void __iomem *base, u32 blk_offset, u32 reg, u32 val); + +u32 sde_hw_reg_read(void __iomem *base, u32 blk_offset, u32 reg); + +static inline void SDE_REG_WRITE(struct sde_hw_blk_reg_map *c, u32 reg_off, + u32 val) +{ + sde_hw_reg_write(c->base_off, c->blk_off, reg_off, val); +} + +static inline int SDE_REG_READ(struct sde_hw_blk_reg_map *c, u32 reg_off) +{ + return sde_hw_reg_read(c->base_off, c->blk_off, reg_off); +} + +void sde_hw_csc_setup(struct sde_hw_blk_reg_map *c, + u32 csc_reg_off, + struct sde_csc_cfg *data); + +#endif /* _SDE_HW_MDP_UTIL_H */ + diff --git a/drivers/gpu/drm/msm/sde/sde_hw_mdss.h b/drivers/gpu/drm/msm/sde/sde_hw_mdss.h new file mode 100644 index 000000000000..ce5a90bc2f55 --- /dev/null +++ b/drivers/gpu/drm/msm/sde/sde_hw_mdss.h @@ -0,0 +1,320 @@ +/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _SDE_HW_MDSS_H +#define _SDE_HW_MDSS_H + +#include +#include + +#define SDE_CSC_MATRIX_COEFF_SIZE 9 +#define SDE_CSC_CLAMP_SIZE 6 +#define SDE_CSC_BIAS_SIZE 3 + +#define SDE_MAX_PLANES 4 +#define PIPES_PER_STAGE 2 +#define VALID_ROT_WB_FORMAT BIT(0) + +enum sde_mdp { + MDP_TOP = 0x1, + MDP_MAX, +}; + +enum sde_sspp { + SSPP_NONE, + SSPP_VIG0, + SSPP_VIG1, + SSPP_VIG2, + SSPP_VIG3, + SSPP_RGB0, + SSPP_RGB1, + SSPP_RGB2, + SSPP_RGB3, + SSPP_DMA0, + SSPP_DMA1, + SSPP_DMA2, + SSPP_DMA3, + SSPP_CURSOR0, + SSPP_CURSOR1, + SSPP_MAX +}; + +enum sde_sspp_type { + SSPP_TYPE_VIG, + SSPP_TYPE_RGB, + SSPP_TYPE_DMA, + SSPP_TYPE_CURSOR, + SSPP_TYPE_MAX +}; + +enum sde_lm { + LM_0 = 0, + LM_1, + LM_2, + LM_3, + LM_4, + LM_5, + LM_6, + LM_MAX +}; + +enum sde_stage { + SDE_STAGE_BASE = 0, + SDE_STAGE_0, + SDE_STAGE_1, + SDE_STAGE_2, + SDE_STAGE_3, + SDE_STAGE_4, + SDE_STAGE_5, + SDE_STAGE_6, + SDE_STAGE_MAX +}; +enum sde_dspp { + DSPP_0 = 0, + DSPP_1, + DSPP_2, + DSPP_3, + DSPP_MAX +}; + +enum sde_ctl { + CTL_0 = 0, + CTL_1, + CTL_2, + CTL_3, + CTL_4, + CTL_MAX +}; + +enum sde_cdm { + CDM_0 = 0, + CDM_1, + CDM_MAX +}; + +enum sde_pingpong { + PINGPONG_0 = 0, + PINGPONG_1, + PINGPONG_2, + PINGPONG_3, + PINGPONG_4, + PINGPONG_MAX +}; + +enum sde_intf { + INTF_0 = 0, + INTF_1, + INTF_2, + INTF_3, + INTF_4, + INTF_5, + INTF_6, + INTF_MAX +}; + +enum sde_intf_type { + INTF_NONE = 0x0, + INTF_DSI = 0x1, + INTF_HDMI = 0x3, + INTF_LCDC = 0x5, + INTF_EDP = 0x9, + INTF_TYPE_MAX +}; + +enum sde_intf_mode { + INTF_MODE_NONE = 0, + INTF_MODE_CMD, + INTF_MODE_VIDEO, + INTF_MODE_WB_BLOCK, + INTF_MODE_WB_LINE, + INTF_MODE_MAX +}; + +enum sde_wb { + WB_0 = 1, + WB_1, + WB_2, + WB_3, + WB_MAX +}; + +enum sde_ad { + AD_0 = 0x1, + AD_1, + AD_MAX +}; + +/** + * MDP HW,Component order color map + */ +enum { + C0_G_Y = 0, + C1_B_Cb = 1, + C2_R_Cr = 2, + C3_ALPHA = 3 +}; + +/** + * enum sde_mdp_plane_type - defines how the color component pixel packing + * @SDE_MDP_PLANE_INTERLEAVED : Color components in single plane + * @SDE_MDP_PLANE_PLANAR : Color component in separate planes + * @SDE_MDP_PLANE_PSEUDO_PLANAR : Chroma components interleaved in separate + * plane + */ +enum sde_mdp_plane_type { + SDE_MDP_PLANE_INTERLEAVED, + SDE_MDP_PLANE_PLANAR, + SDE_MDP_PLANE_PSEUDO_PLANAR, +}; + +/** + * enum sde_mdp_chroma_samp_type - chroma sub-samplng type + * @SDE_MDP_CHROMA_RGB : no chroma subsampling + * @SDE_MDP_CHROMA_H2V1 : chroma pixels are horizontally subsampled + * @SDE_MDP_CHROMA_H1V2 : chroma pixels are vertically subsampled + * @SDE_MDP_CHROMA_420 : 420 subsampling + */ +enum sde_mdp_chroma_samp_type { + SDE_MDP_CHROMA_RGB, + SDE_MDP_CHROMA_H2V1, + SDE_MDP_CHROMA_H1V2, + SDE_MDP_CHROMA_420 +}; + +/** + * enum sde_mdp_fetch_type - format id, used by drm-driver only to map drm forcc + * Defines How MDP HW fetches data + * @SDE_MDP_FETCH_LINEAR : fetch is line by line + * @SDE_MDP_FETCH_TILE : fetches data in Z order from a tile + * @SDE_MDP_FETCH_UBWC : fetch and decompress data + */ +enum sde_mdp_fetch_type { + SDE_MDP_FETCH_LINEAR, + SDE_MDP_FETCH_TILE, + SDE_MDP_FETCH_UBWC +}; + +/** + * Value of enum chosen to fit the number of bits + * expected by the HW programming. + */ +enum { + COLOR_4BIT, + COLOR_5BIT, + COLOR_6BIT, + COLOR_8BIT, + COLOR_ALPHA_1BIT = 0, + COLOR_ALPHA_4BIT = 1, +}; + +enum sde_alpha_blend_type { + ALPHA_FG_CONST = 0, + ALPHA_BG_CONST, + ALPHA_FG_PIXEL, + ALPHA_BG_PIXEL, + ALPHA_MAX +}; + +struct addr_info { + u32 plane[SDE_MAX_PLANES]; +}; + +/** + * struct sde_mdp_format_params - defines the format configuration which + * allows MDP HW to correctly fetch and decode the format + * @format : format id, used by drm-driver only to map drm forcc + * @flag + * @chroma_sample + * @fetch_planes + * @unpack_align_msb + * @unpack_tight + * @unpack_count + * @bpp + * @alpha_enable + * @fetch_mode + * @bits + * @element + */ +struct sde_mdp_format_params { + u32 format; + enum sde_mdp_plane_type fetch_planes; + u8 element[SDE_MAX_PLANES]; + u8 bits[SDE_MAX_PLANES]; + enum sde_mdp_chroma_samp_type chroma_sample; + u8 unpack_align_msb; /* 0 to LSB, 1 to MSB */ + u8 unpack_tight; /* 0 for loose, 1 for tight */ + u8 unpack_count; /* 0 = 1 component, 1 = 2 component ... */ + u8 bpp; /* Bytes per pixel */ + u8 alpha_enable; /* source has alpha */ + enum sde_mdp_fetch_type fetch_mode; + u8 is_yuv; + u32 flag; +}; + +/** + * struct sde_hw_source_info - format information of the source pixel data + * @format : pixel format parameters + * @width : image width @height: image height + * @num_planes : number of planes including the meta data planes for the + * compressed formats @plane: per plane information + */ +struct sde_hw_source_info { + struct sde_mdp_format_params *format; + u32 width; + u32 height; + u32 num_planes; + u32 ystride[SDE_MAX_PLANES]; +}; + +struct sde_rect { + u16 x; + u16 y; + u16 w; + u16 h; +}; + +struct sde_hw_alpha_cfg { + u32 const_alpha; + enum sde_alpha_blend_type alpha_sel; + u8 inv_alpha_sel; + u8 mod_alpha; + u8 inv_mode_alpha; +}; + +struct sde_hw_blend_cfg { + struct sde_hw_alpha_cfg fg; + struct sde_hw_alpha_cfg bg; +}; + +struct sde_csc_cfg { + uint32_t csc_mv[SDE_CSC_MATRIX_COEFF_SIZE]; + uint32_t csc_pre_bv[SDE_CSC_BIAS_SIZE]; + uint32_t csc_post_bv[SDE_CSC_BIAS_SIZE]; + uint32_t csc_pre_lv[SDE_CSC_CLAMP_SIZE]; + uint32_t csc_post_lv[SDE_CSC_CLAMP_SIZE]; +}; + +/** + * struct sde_mdss_color - mdss color description + * color 0 : green + * color 1 : blue + * color 2 : red + * color 3 : alpha + */ +struct sde_mdss_color { + u32 color_0; + u32 color_1; + u32 color_2; + u32 color_3; +}; + +#endif /* _SDE_HW_MDSS_H */ diff --git a/drivers/gpu/drm/msm/sde/sde_hw_pingpong.c b/drivers/gpu/drm/msm/sde/sde_hw_pingpong.c new file mode 100644 index 000000000000..e6780033490d --- /dev/null +++ b/drivers/gpu/drm/msm/sde/sde_hw_pingpong.c @@ -0,0 +1,159 @@ +/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include "sde_hw_mdss.h" +#include "sde_hwio.h" +#include "sde_hw_catalog.h" +#include "sde_hw_pingpong.h" + +#define PP_TEAR_CHECK_EN 0x000 +#define PP_SYNC_CONFIG_VSYNC 0x004 +#define PP_SYNC_CONFIG_HEIGHT 0x008 +#define PP_SYNC_WRCOUNT 0x00C +#define PP_VSYNC_INIT_VAL 0x010 +#define PP_INT_COUNT_VAL 0x014 +#define PP_SYNC_THRESH 0x018 +#define PP_START_POS 0x01C +#define PP_RD_PTR_IRQ 0x020 +#define PP_WR_PTR_IRQ 0x024 +#define PP_OUT_LINE_COUNT 0x028 +#define PP_LINE_COUNT 0x02C +#define PP_AUTOREFRESH_CONFIG 0x030 + +#define PP_FBC_MODE 0x034 +#define PP_FBC_BUDGET_CTL 0x038 +#define PP_FBC_LOSSY_MODE 0x03C +#define PP_DSC_MODE 0x0a0 +#define PP_DCE_DATA_IN_SWAP 0x0ac +#define PP_DCE_DATA_OUT_SWAP 0x0c8 + +static struct sde_pingpong_cfg *_pingpong_offset(enum sde_pingpong pp, + struct sde_mdss_cfg *m, + void __iomem *addr, + struct sde_hw_blk_reg_map *b) +{ + int i; + + for (i = 0; i < m->pingpong_count; i++) { + if (pp == m->pingpong[i].id) { + b->base_off = addr; + b->blk_off = m->pingpong[i].base; + b->hwversion = m->hwversion; + return &m->pingpong[i]; + } + } + + return ERR_PTR(-EINVAL); +} + +static int sde_hw_pp_setup_te_config(struct sde_hw_pingpong *pp, + struct sde_hw_tear_check *te) +{ + struct sde_hw_blk_reg_map *c = &pp->hw; + int cfg; + + cfg = BIT(19); /*VSYNC_COUNTER_EN */ + if (te->hw_vsync_mode) + cfg |= BIT(20); + + cfg |= te->vsync_count; + + SDE_REG_WRITE(c, PP_SYNC_CONFIG_VSYNC, cfg); + SDE_REG_WRITE(c, PP_SYNC_CONFIG_HEIGHT, te->sync_cfg_height); + SDE_REG_WRITE(c, PP_VSYNC_INIT_VAL, te->vsync_init_val); + SDE_REG_WRITE(c, PP_RD_PTR_IRQ, te->rd_ptr_irq); + SDE_REG_WRITE(c, PP_START_POS, te->start_pos); + SDE_REG_WRITE(c, PP_SYNC_THRESH, + ((te->sync_threshold_continue << 16) | + te->sync_threshold_start)); + SDE_REG_WRITE(c, PP_SYNC_WRCOUNT, + (te->start_pos + te->sync_threshold_start + 1)); + + return 0; +} + +int sde_hw_pp_setup_autorefresh_config(struct sde_hw_pingpong *pp, + struct sde_hw_autorefresh *cfg) +{ + struct sde_hw_blk_reg_map *c = &pp->hw; + u32 refresh_cfg; + + if (cfg->enable) + refresh_cfg = BIT(31) | cfg->frame_count; + else + refresh_cfg = 0; + + SDE_REG_WRITE(c, PP_AUTOREFRESH_CONFIG, + refresh_cfg); + + return 0; +} + +int sde_hw_pp_setup_dsc_compression(struct sde_hw_pingpong *pp, + struct sde_hw_dsc_cfg *cfg) +{ + return 0; +} +int sde_hw_pp_enable_te(struct sde_hw_pingpong *pp, bool enable) +{ + struct sde_hw_blk_reg_map *c = &pp->hw; + + SDE_REG_WRITE(c, PP_TEAR_CHECK_EN, enable); + return 0; +} + +int sde_hw_pp_get_vsync_info(struct sde_hw_pingpong *pp, + struct sde_hw_pp_vsync_info *info) +{ + struct sde_hw_blk_reg_map *c = &pp->hw; + + info->init_val = SDE_REG_READ(c, PP_VSYNC_INIT_VAL) & 0xffff; + info->vsync_count = SDE_REG_READ(c, PP_SYNC_CONFIG_HEIGHT) & 0xffff; + info->line_count = SDE_REG_READ(c, PP_INT_COUNT_VAL) & 0xffff; + + return 0; +} + +static void _setup_pingpong_ops(struct sde_hw_pingpong_ops *ops, + unsigned long cap) +{ + ops->setup_tearcheck = sde_hw_pp_setup_te_config; + ops->enable_tearcheck = sde_hw_pp_enable_te; + ops->get_vsync_info = sde_hw_pp_get_vsync_info; + ops->setup_autorefresh = sde_hw_pp_setup_autorefresh_config; + ops->setup_dsc = sde_hw_pp_setup_dsc_compression; +}; + +struct sde_hw_pingpong *sde_hw_pingpong_init(enum sde_pingpong idx, + void __iomem *addr, + struct sde_mdss_cfg *m) +{ + struct sde_hw_pingpong *c; + struct sde_pingpong_cfg *cfg; + + c = kzalloc(sizeof(*c), GFP_KERNEL); + if (!c) + return ERR_PTR(-ENOMEM); + + cfg = _pingpong_offset(idx, m, addr, &c->hw); + if (IS_ERR_OR_NULL(cfg)) { + kfree(c); + return ERR_PTR(-EINVAL); + } + + c->idx = idx; + c->pingpong_hw_cap = cfg; + _setup_pingpong_ops(&c->ops, c->pingpong_hw_cap->features); + + return c; +} + diff --git a/drivers/gpu/drm/msm/sde/sde_hw_pingpong.h b/drivers/gpu/drm/msm/sde/sde_hw_pingpong.h new file mode 100644 index 000000000000..a2bf86fb98f7 --- /dev/null +++ b/drivers/gpu/drm/msm/sde/sde_hw_pingpong.h @@ -0,0 +1,115 @@ +/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _SDE_HW_PINGPONG_H +#define _SDE_HW_PINGPONG_H + +struct sde_hw_pingpong; + +struct sde_hw_tear_check { + /* + * This is ratio of MDP VSYNC clk freq(Hz) to + * refresh rate divided by no of lines + */ + u32 vsync_count; + u32 sync_cfg_height; + u32 vsync_init_val; + u32 sync_threshold_start; + u32 sync_threshold_continue; + u32 start_pos; + u32 rd_ptr_irq; + u8 hw_vsync_mode; +}; + +struct sde_hw_autorefresh { + bool enable; + u32 frame_count; +}; + +struct sde_hw_pp_vsync_info { + u32 init_val; /* value of rd pointer at vsync edge */ + u32 vsync_count; /* mdp clocks to complete one line */ + u32 line_count; /* current line count */ +}; + +struct sde_hw_dsc_cfg { + u8 enable; +}; + +/** + * + * struct sde_hw_pingpong_ops : Interface to the pingpong Hw driver functions + * Assumption is these functions will be called after clocks are enabled + * @setup_tearcheck : + * @enable_tearcheck : + * @get_vsync_info : + * @setup_autorefresh : + * #setup_dsc : + */ +struct sde_hw_pingpong_ops { + /** + * enables vysnc generation and sets up init value of + * read pointer and programs the tear check cofiguration + */ + int (*setup_tearcheck)(struct sde_hw_pingpong *pp, + struct sde_hw_tear_check *cfg); + + /** + * enables tear check block + */ + int (*enable_tearcheck)(struct sde_hw_pingpong *pp, + bool enable); + + /** + * provides the programmed and current + * line_count + */ + int (*get_vsync_info)(struct sde_hw_pingpong *pp, + struct sde_hw_pp_vsync_info *info); + + /** + * configure and enable the autorefresh config + */ + int (*setup_autorefresh)(struct sde_hw_pingpong *pp, + struct sde_hw_autorefresh *cfg); + + /** + * Program the dsc compression block + */ + int (*setup_dsc)(struct sde_hw_pingpong *pp, + struct sde_hw_dsc_cfg *cfg); +}; + +struct sde_hw_pingpong { + /* base */ + struct sde_hw_blk_reg_map hw; + + /* pingpong */ + enum sde_pingpong idx; + const struct sde_pingpong_cfg *pingpong_hw_cap; + + /* ops */ + struct sde_hw_pingpong_ops ops; +}; + +/** + * sde_hw_pingpong_init(): Initializes the pingpong driver for the passed + * pingpong idx. + * @idx: pingpong index for which driver object is required + * @addr: mapped register io address of MDP + * @m : pointer to mdss catalog data + */ +struct sde_hw_pingpong *sde_hw_pingpong_init(enum sde_pingpong idx, + void __iomem *addr, + struct sde_mdss_cfg *m); + +#endif /*_SDE_HW_PINGPONG_H */ diff --git a/drivers/gpu/drm/msm/sde/sde_hw_sspp.c b/drivers/gpu/drm/msm/sde/sde_hw_sspp.c new file mode 100644 index 000000000000..c54ad43f5001 --- /dev/null +++ b/drivers/gpu/drm/msm/sde/sde_hw_sspp.c @@ -0,0 +1,591 @@ +/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include "sde_hw_sspp.h" +#include "sde_hwio.h" +#include "sde_hw_catalog.h" +#include "sde_hw_lm.h" + +#define SDE_MDP_FETCH_CONFIG_RESET_VALUE 0x00000087 + +/* SDE_SSPP_SRC */ +#define SSPP_SRC_SIZE 0x00 +#define SSPP_SRC_XY 0x08 +#define SSPP_OUT_SIZE 0x0c +#define SSPP_OUT_XY 0x10 +#define SSPP_SRC0_ADDR 0x14 +#define SSPP_SRC1_ADDR 0x18 +#define SSPP_SRC2_ADDR 0x1C +#define SSPP_SRC3_ADDR 0x20 +#define SSPP_SRC_YSTRIDE0 0x24 +#define SSPP_SRC_YSTRIDE1 0x28 +#define SSPP_SRC_FORMAT 0x30 +#define SSPP_SRC_UNPACK_PATTERN 0x34 +#define SSPP_SRC_OP_MODE 0x38 +#define MDSS_MDP_OP_DEINTERLACE BIT(22) + +#define MDSS_MDP_OP_DEINTERLACE_ODD BIT(23) +#define MDSS_MDP_OP_IGC_ROM_1 BIT(18) +#define MDSS_MDP_OP_IGC_ROM_0 BIT(17) +#define MDSS_MDP_OP_IGC_EN BIT(16) +#define MDSS_MDP_OP_FLIP_UD BIT(14) +#define MDSS_MDP_OP_FLIP_LR BIT(13) +#define MDSS_MDP_OP_BWC_EN BIT(0) +#define MDSS_MDP_OP_PE_OVERRIDE BIT(31) +#define MDSS_MDP_OP_BWC_LOSSLESS (0 << 1) +#define MDSS_MDP_OP_BWC_Q_HIGH (1 << 1) +#define MDSS_MDP_OP_BWC_Q_MED (2 << 1) + +#define SSPP_SRC_CONSTANT_COLOR 0x3c +#define SSPP_FETCH_CONFIG 0x048 +#define SSPP_DANGER_LUT 0x60 +#define SSPP_SAFE_LUT 0x64 +#define SSPP_CREQ_LUT 0x68 +#define SSPP_DECIMATION_CONFIG 0xB4 +#define SSPP_SRC_ADDR_SW_STATUS 0x70 +#define SSPP_SW_PIX_EXT_C0_LR 0x100 +#define SSPP_SW_PIX_EXT_C0_TB 0x104 +#define SSPP_SW_PIX_EXT_C0_REQ_PIXELS 0x108 +#define SSPP_SW_PIX_EXT_C1C2_LR 0x110 +#define SSPP_SW_PIX_EXT_C1C2_TB 0x114 +#define SSPP_SW_PIX_EXT_C1C2_REQ_PIXELS 0x118 +#define SSPP_SW_PIX_EXT_C3_LR 0x120 +#define SSPP_SW_PIX_EXT_C3_TB 0x124 +#define SSPP_SW_PIX_EXT_C3_REQ_PIXELS 0x128 +#define SSPP_UBWC_ERROR_STATUS 0x138 +#define SSPP_VIG_OP_MODE 0x200 + +/* SDE_SSPP_SCALAR_QSEED2 */ +#define SCALE_CONFIG 0x04 +#define COMP0_3_PHASE_STEP_X 0x10 +#define COMP0_3_PHASE_STEP_Y 0x14 +#define COMP1_2_PHASE_STEP_X 0x18 +#define COMP1_2_PHASE_STEP_Y 0x1c +#define COMP0_3_INIT_PHASE_X 0x20 +#define COMP0_3_INIT_PHASE_Y 0x24 +#define COMP1_2_INIT_PHASE_X 0x28 +#define COMP1_2_INIT_PHASE_Y 0x2C +#define VIG_0_QSEED2_SHARP 0x30 + +#define VIG_0_CSC_1_MATRIX_COEFF_0 0x20 +#define VIG_0_CSC_1_COMP_0_PRE_CLAMP 0x34 +#define VIG_0_CSC_1_COMP_0_POST_CLAMP 0x40 +#define VIG_0_CSC_1_COMP_0_PRE_BIAS 0x4C +#define VIG_0_CSC_1_COMP_0_POST_BIAS 0x60 + +/* + * MDP Solid fill configuration + * argb8888 + */ +#define SSPP_SOLID_FILL 0x4037ff + +enum { + CSC = 0x1, + PA, + HIST, + SKIN_COL, + FOIL, + SKY_COL, + MEM_PROT_HUE, + MEM_PROT_SAT, + MEM_PROT_VAL, + MEM_PROT_CONT, + MEM_PROT_BLEND, + PA_SAT_ADJ +}; + +static inline int _sspp_subblk_offset(struct sde_hw_pipe *ctx, + int s_id, + u32 *idx) +{ + int rc = 0; + const struct sde_sspp_sub_blks *sblk = ctx->cap->sblk; + + switch (s_id) { + case SDE_SSPP_SRC: + *idx = sblk->src_blk.base; + break; + case SDE_SSPP_SCALAR_QSEED2: + case SDE_SSPP_SCALAR_QSEED3: + case SDE_SSPP_SCALAR_RGB: + *idx = sblk->scalar_blk.base; + break; + case SDE_SSPP_CSC: + *idx = sblk->csc_blk.base; + break; + case SDE_SSPP_PA_V1: + *idx = sblk->pa_blk.base; + break; + case SDE_SSPP_HIST_V1: + *idx = sblk->hist_lut.base; + break; + case SDE_SSPP_PCC: + *idx = sblk->pcc_blk.base; + break; + default: + rc = -EINVAL; + pr_err("Unsupported SSPP sub-blk for this hw\n"); + } + + return rc; +} + +static void _sspp_setup_opmode(struct sde_hw_pipe *ctx, + u32 op, u8 en) +{ + struct sde_hw_blk_reg_map *c = &ctx->hw; + u32 idx; + u32 opmode; + + if (ctx->cap->features == SDE_SSPP_PA_V1) { + + if (_sspp_subblk_offset(ctx, SDE_SSPP_SRC, &idx)) + return; + + opmode = SDE_REG_READ(c, SSPP_VIG_OP_MODE + idx); + + /* ops */ + switch (op) { + case CSC: + if (en) + /* CSC_1_EN and CSC_SRC_DATA_FORMAT*/ + opmode |= BIT(18) | BIT(17); + else + opmode &= ~BIT(17); + break; + default: + pr_err(" Unsupported operation\n"); + } + SDE_REG_WRITE(c, SSPP_VIG_OP_MODE + idx, opmode); + } +} +/** + * Setup source pixel format, flip, + */ +static void sde_hw_sspp_setup_format(struct sde_hw_pipe *ctx, + struct sde_hw_pipe_cfg *cfg, + u32 flags) +{ + struct sde_hw_blk_reg_map *c = &ctx->hw; + struct sde_mdp_format_params *fmt; + u32 chroma_samp, unpack, src_format; + u32 secure = 0; + u32 opmode = 0; + u32 idx; + + if (!_sspp_subblk_offset(ctx, SDE_SSPP_SRC, &idx)) + return; + + opmode = SDE_REG_READ(c, SSPP_SRC_OP_MODE + idx); + + /* format info */ + fmt = cfg->src.format; + if (WARN_ON(!fmt)) + return; + + if (flags & SDE_SSPP_SECURE_OVERLAY_SESSION) + secure = 0xF; + + if (flags & SDE_SSPP_FLIP_LR) + opmode |= MDSS_MDP_OP_FLIP_LR; + if (flags & SDE_SSPP_FLIP_UD) + opmode |= MDSS_MDP_OP_FLIP_UD; + + chroma_samp = fmt->chroma_sample; + if (flags & SDE_SSPP_SOURCE_ROTATED_90) { + if (chroma_samp == SDE_MDP_CHROMA_H2V1) + chroma_samp = SDE_MDP_CHROMA_H1V2; + else if (chroma_samp == SDE_MDP_CHROMA_H1V2) + chroma_samp = SDE_MDP_CHROMA_H2V1; + } + + src_format = (chroma_samp << 23) | (fmt->fetch_planes << 19) | + (fmt->bits[C3_ALPHA] << 6) | (fmt->bits[C2_R_Cr] << 4) | + (fmt->bits[C0_G_Y] << 0); + + if (flags & SDE_SSPP_ROT_90) + src_format |= BIT(11); /* ROT90 */ + + if (fmt->alpha_enable && + fmt->fetch_planes != SDE_MDP_PLANE_INTERLEAVED) + src_format |= BIT(8); /* SRCC3_EN */ + + unpack = (fmt->element[3] << 24) | (fmt->element[2] << 16) | + (fmt->element[1] << 8) | (fmt->element[0] << 0); + src_format |= ((fmt->unpack_count - 1) << 12) | + (fmt->unpack_tight << 17) | + (fmt->unpack_align_msb << 18) | + ((fmt->bpp - 1) << 9); + + if (fmt->fetch_mode != SDE_MDP_FETCH_LINEAR) { + opmode |= MDSS_MDP_OP_BWC_EN; + src_format |= (fmt->fetch_mode & 3) << 30; /*FRAME_FORMAT */ + SDE_REG_WRITE(c, SSPP_FETCH_CONFIG, + SDE_MDP_FETCH_CONFIG_RESET_VALUE | + ctx->highest_bank_bit << 18); + } + + /* if this is YUV pixel format, enable CSC */ + if (fmt->is_yuv) { + _sspp_setup_opmode(ctx, CSC, 0x0); + } else { + src_format |= BIT(15); + _sspp_setup_opmode(ctx, CSC, 0x1); + } + + opmode |= MDSS_MDP_OP_PE_OVERRIDE; + + SDE_REG_WRITE(c, SSPP_SRC_FORMAT + idx, src_format); + SDE_REG_WRITE(c, SSPP_SRC_UNPACK_PATTERN + idx, unpack); + SDE_REG_WRITE(c, SSPP_SRC_OP_MODE + idx, opmode); + SDE_REG_WRITE(c, SSPP_SRC_ADDR_SW_STATUS + idx, secure); + + /* clear previous UBWC error */ + SDE_REG_WRITE(c, SSPP_UBWC_ERROR_STATUS + idx, BIT(31)); +} + +static void sde_hw_sspp_setup_pe_config(struct sde_hw_pipe *ctx, + struct sde_hw_pipe_cfg *cfg, + struct sde_hw_pixel_ext *pe_ext) +{ + struct sde_hw_blk_reg_map *c = &ctx->hw; + u8 color; + u32 lr_pe[4], tb_pe[4], tot_req_pixels[4]; + const u32 bytemask = 0xffff; + const u8 shortmask = 0xff; + u32 idx; + + if (_sspp_subblk_offset(ctx, SDE_SSPP_SRC, &idx)) + return; + + /* program SW pixel extension override for all pipes*/ + for (color = 0; color < 4; color++) { + /* color 2 has the same set of registers as color 1 */ + if (color == 2) + continue; + + lr_pe[color] = ((pe_ext->right_ftch[color] & bytemask) << 24)| + ((pe_ext->right_rpt[color] & bytemask) << 16)| + ((pe_ext->left_ftch[color] & bytemask) << 8)| + (pe_ext->left_rpt[color] & bytemask); + + tb_pe[color] = ((pe_ext->btm_ftch[color] & bytemask) << 24)| + ((pe_ext->btm_rpt[color] & bytemask) << 16)| + ((pe_ext->top_ftch[color] & bytemask) << 8)| + (pe_ext->top_rpt[color] & bytemask); + + tot_req_pixels[color] = (((cfg->src.height + + pe_ext->num_ext_pxls_top[color] + + pe_ext->num_ext_pxls_btm[color]) & shortmask) << 16) | + ((pe_ext->roi_w[color] + + pe_ext->num_ext_pxls_left[color] + + pe_ext->num_ext_pxls_right[color]) & shortmask); + } + + /* color 0 */ + SDE_REG_WRITE(c, SSPP_SW_PIX_EXT_C0_LR + idx, lr_pe[0]); + SDE_REG_WRITE(c, SSPP_SW_PIX_EXT_C0_TB + idx, tb_pe[0]); + SDE_REG_WRITE(c, SSPP_SW_PIX_EXT_C0_REQ_PIXELS + idx, + tot_req_pixels[0]); + + /* color 1 and color 2 */ + SDE_REG_WRITE(c, SSPP_SW_PIX_EXT_C1C2_LR + idx, lr_pe[1]); + SDE_REG_WRITE(c, SSPP_SW_PIX_EXT_C1C2_TB + idx, tb_pe[1]); + SDE_REG_WRITE(c, SSPP_SW_PIX_EXT_C1C2_REQ_PIXELS + idx, + tot_req_pixels[1]); + + /* color 3 */ + SDE_REG_WRITE(c, SSPP_SW_PIX_EXT_C3_LR + idx, lr_pe[3]); + SDE_REG_WRITE(c, SSPP_SW_PIX_EXT_C3_TB + idx, lr_pe[3]); + SDE_REG_WRITE(c, SSPP_SW_PIX_EXT_C3_REQ_PIXELS + idx, + tot_req_pixels[3]); +} + +static void sde_hw_sspp_setup_scalar(struct sde_hw_pipe *ctx, + struct sde_hw_pixel_ext *pe_ext) +{ + struct sde_hw_blk_reg_map *c = &ctx->hw; + int scale_config; + const u8 mask = 0x3; + u32 idx; + + if (_sspp_subblk_offset(ctx, SDE_SSPP_SCALAR_QSEED2, &idx)) + return; + + scale_config = BIT(0) | BIT(1); + /* RGB/YUV config */ + scale_config |= (pe_ext->horz_filter[0] & mask) << 8; + scale_config |= (pe_ext->vert_filter[0] & mask) << 10; + /* Aplha config*/ + scale_config |= (pe_ext->horz_filter[3] & mask) << 16; + scale_config |= (pe_ext->vert_filter[3] & mask) << 18; + + SDE_REG_WRITE(c, SCALE_CONFIG + idx, scale_config); + SDE_REG_WRITE(c, COMP0_3_INIT_PHASE_X + idx, + pe_ext->init_phase_x[0]); + SDE_REG_WRITE(c, COMP0_3_INIT_PHASE_Y + idx, + pe_ext->init_phase_y[0]); + SDE_REG_WRITE(c, COMP0_3_PHASE_STEP_X + idx, + pe_ext->phase_step_x[0]); + SDE_REG_WRITE(c, COMP0_3_PHASE_STEP_Y + idx, + pe_ext->phase_step_y[0]); + + SDE_REG_WRITE(c, COMP1_2_INIT_PHASE_X + idx, + pe_ext->init_phase_x[1]); + SDE_REG_WRITE(c, COMP1_2_INIT_PHASE_Y + idx, + pe_ext->init_phase_y[1]); + SDE_REG_WRITE(c, COMP1_2_PHASE_STEP_X + idx, + pe_ext->phase_step_x[1]); + SDE_REG_WRITE(c, COMP1_2_PHASE_STEP_Y + idx, + pe_ext->phase_step_y[0]); +} + +/** + * sde_hw_sspp_setup_rects() + */ +static void sde_hw_sspp_setup_rects(struct sde_hw_pipe *ctx, + struct sde_hw_pipe_cfg *cfg, + struct sde_hw_pixel_ext *pe_ext) +{ + struct sde_hw_blk_reg_map *c = &ctx->hw; + u32 src_size, src_xy, dst_size, dst_xy, ystride0, ystride1; + u32 decimation = 0; + u32 idx; + + if (_sspp_subblk_offset(ctx, SDE_SSPP_SRC, &idx)) + return; + + /* program pixel extension override */ + if (!pe_ext) + sde_hw_sspp_setup_pe_config(ctx, cfg, pe_ext); + + /* src and dest rect programming */ + src_xy = (cfg->src_rect.y << 16) | + (cfg->src_rect.x); + src_size = (cfg->src_rect.h << 16) | + (cfg->src_rect.w); + dst_xy = (cfg->dst_rect.y << 16) | + (cfg->dst_rect.x); + dst_size = (cfg->dst_rect.h << 16) | + (cfg->dst_rect.w); + + ystride0 = (cfg->src.ystride[0]) | + (cfg->src.ystride[1] << 16); + ystride1 = (cfg->src.ystride[2]) | + (cfg->src.ystride[3] << 16); + + /* program scalar, phase registers, if pipes supporting scaling */ + if (src_size != dst_size) { + if (test_bit(SDE_SSPP_SCALAR_RGB, &ctx->cap->features) || + test_bit(SDE_SSPP_SCALAR_QSEED2, &ctx->cap->features)) { + /* program decimation */ + if (!cfg->horz_decimation) + decimation = (cfg->horz_decimation - 1) << 8; + if (!cfg->vert_decimation) + decimation |= (cfg->vert_decimation - 1); + + sde_hw_sspp_setup_scalar(ctx, pe_ext); + } + } + + /* Rectangle Register programming */ + SDE_REG_WRITE(c, SSPP_SRC_SIZE + idx, src_size); + SDE_REG_WRITE(c, SSPP_SRC_XY + idx, src_xy); + SDE_REG_WRITE(c, SSPP_OUT_SIZE + idx, dst_size); + SDE_REG_WRITE(c, SSPP_OUT_XY + idx, dst_xy); + + SDE_REG_WRITE(c, SSPP_SRC_YSTRIDE0 + idx, ystride0); + SDE_REG_WRITE(c, SSPP_SRC_YSTRIDE1 + idx, ystride1); + SDE_REG_WRITE(c, SSPP_DECIMATION_CONFIG + idx, decimation); +} + +static void sde_hw_sspp_setup_sourceaddress(struct sde_hw_pipe *ctx, + struct sde_hw_pipe_cfg *cfg) +{ + struct sde_hw_blk_reg_map *c = &ctx->hw; + int i; + u32 idx; + + if (_sspp_subblk_offset(ctx, SDE_SSPP_SRC, &idx)) + return; + + for (i = 0; i < cfg->src.num_planes; i++) + SDE_REG_WRITE(c, SSPP_SRC0_ADDR + idx + i*0x4, + cfg->addr.plane[i]); + +} + +static void sde_hw_sspp_setup_csc_8bit(struct sde_hw_pipe *ctx, + struct sde_csc_cfg *data) +{ + struct sde_hw_blk_reg_map *c = &ctx->hw; + + sde_hw_csc_setup(c, VIG_0_CSC_1_MATRIX_COEFF_0, data); +} + +static void sde_hw_sspp_setup_sharpening(struct sde_hw_pipe *ctx, + struct sde_hw_sharp_cfg *cfg) +{ + struct sde_hw_blk_reg_map *c = &ctx->hw; + u32 idx; + + if (_sspp_subblk_offset(ctx, SDE_SSPP_SRC, &idx)) + return; + + SDE_REG_WRITE(c, VIG_0_QSEED2_SHARP + idx, cfg->strength); + SDE_REG_WRITE(c, VIG_0_QSEED2_SHARP + idx + 0x4, cfg->edge_thr); + SDE_REG_WRITE(c, VIG_0_QSEED2_SHARP + idx + 0x8, cfg->smooth_thr); + SDE_REG_WRITE(c, VIG_0_QSEED2_SHARP + idx + 0xC, cfg->noise_thr); +} + +static void sde_hw_sspp_setup_solidfill(struct sde_hw_pipe *ctx, + u32 const_color, + u32 flags) +{ + struct sde_hw_blk_reg_map *c = &ctx->hw; + u32 secure = 0; + u32 unpack, src_format, opmode = 0; + u32 idx; + + if (_sspp_subblk_offset(ctx, SDE_SSPP_SRC, &idx)) + return; + + /* format info */ + src_format = SSPP_SOLID_FILL; + unpack = (C3_ALPHA << 24) | (C2_R_Cr << 16) | + (C1_B_Cb << 8) | (C0_G_Y << 0); + secure = (flags & SDE_SSPP_SECURE_OVERLAY_SESSION) ? 0xF : 0x00; + opmode = MDSS_MDP_OP_PE_OVERRIDE; + + SDE_REG_WRITE(c, SSPP_SRC_FORMAT + idx, src_format); + SDE_REG_WRITE(c, SSPP_SRC_UNPACK_PATTERN + idx, unpack); + SDE_REG_WRITE(c, SSPP_SRC_ADDR_SW_STATUS + idx, secure); + SDE_REG_WRITE(c, SSPP_SRC_CONSTANT_COLOR + idx, const_color); + SDE_REG_WRITE(c, SSPP_SRC_OP_MODE + idx, opmode); +} + +static void sde_hw_sspp_setup_histogram_v1(struct sde_hw_pipe *ctx, + void *cfg) +{ + +} + +static void sde_hw_sspp_setup_memcolor(struct sde_hw_pipe *ctx, + u32 memcolortype, u8 en) +{ +} + +static void sde_hw_sspp_setup_igc(struct sde_hw_pipe *ctx) +{ +} + +void sde_sspp_setup_pa(struct sde_hw_pipe *c) +{ +} + +static void sde_hw_sspp_setup_danger_safe(struct sde_hw_pipe *ctx, + u32 danger_lut, u32 safe_lut) +{ + struct sde_hw_blk_reg_map *c = &ctx->hw; + u32 idx; + + if (_sspp_subblk_offset(ctx, SDE_SSPP_SRC, &idx)) + return; + + SDE_REG_WRITE(c, SSPP_DANGER_LUT + idx, danger_lut); + SDE_REG_WRITE(c, SSPP_SAFE_LUT + idx, safe_lut); +} + +static void sde_hw_sspp_qseed2_coeff(void *ctx) +{ +} + +static void _setup_layer_ops(struct sde_hw_sspp_ops *ops, + unsigned long features) +{ + if (test_bit(SDE_SSPP_SRC, &features)) { + ops->setup_sourceformat = sde_hw_sspp_setup_format; + ops->setup_rects = sde_hw_sspp_setup_rects; + ops->setup_sourceaddress = sde_hw_sspp_setup_sourceaddress; + ops->setup_solidfill = sde_hw_sspp_setup_solidfill; + ops->setup_danger_safe = sde_hw_sspp_setup_danger_safe; + } + if (test_bit(SDE_SSPP_CSC, &features)) + ops->setup_csc = sde_hw_sspp_setup_csc_8bit; + + if (test_bit(SDE_SSPP_PA_V1, &features)) { + ops->setup_sharpening = sde_hw_sspp_setup_sharpening; + ops->setup_pa_memcolor = sde_hw_sspp_setup_memcolor; + } + if (test_bit(SDE_SSPP_HIST_V1, &features)) + ops->setup_histogram = sde_hw_sspp_setup_histogram_v1; + + if (test_bit(SDE_SSPP_IGC, &features)) + ops->setup_igc = sde_hw_sspp_setup_igc; +} + +static struct sde_sspp_cfg *_sspp_offset(enum sde_sspp sspp, + struct sde_mdss_cfg *m, + void __iomem *addr, + struct sde_hw_blk_reg_map *b) +{ + int i; + + for (i = 0; i < m->sspp_count; i++) { + if (sspp == m->sspp[i].id) { + b->base_off = addr; + b->blk_off = m->sspp[i].base; + b->hwversion = m->hwversion; + return &m->sspp[i]; + } + } + + return ERR_PTR(-ENOMEM); +} + +struct sde_hw_pipe *sde_hw_sspp_init(enum sde_sspp idx, + void __iomem *addr, + struct sde_mdss_cfg *m) +{ + struct sde_hw_pipe *c; + struct sde_sspp_cfg *cfg; + + c = kzalloc(sizeof(*c), GFP_KERNEL); + if (!c) + return ERR_PTR(-ENOMEM); + + cfg = _sspp_offset(idx, m, addr, &c->hw); + if (IS_ERR_OR_NULL(cfg)) { + kfree(c); + return ERR_PTR(-EINVAL); + } + + /* Assign ops */ + c->idx = idx; + c->cap = cfg; + _setup_layer_ops(&c->ops, c->cap->features); + c->highest_bank_bit = m->mdp[0].highest_bank_bit; + + /* + * Perform any default initialization for the sspp blocks + */ + if (test_bit(SDE_SSPP_SCALAR_QSEED2, &cfg->features)) + sde_hw_sspp_qseed2_coeff(c); + + if (test_bit(SDE_MDP_PANIC_PER_PIPE, &m->mdp[0].features)) + sde_hw_sspp_setup_danger_safe(c, + cfg->sblk->danger_lut, + cfg->sblk->safe_lut); + + return c; +} + diff --git a/drivers/gpu/drm/msm/sde/sde_hw_sspp.h b/drivers/gpu/drm/msm/sde/sde_hw_sspp.h new file mode 100644 index 000000000000..0c3873b6945e --- /dev/null +++ b/drivers/gpu/drm/msm/sde/sde_hw_sspp.h @@ -0,0 +1,266 @@ +/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _SDE_HW_SSPP_H +#define _SDE_HW_SSPP_H + +#include "sde_hw_catalog.h" +#include "sde_hw_mdss.h" +#include "sde_mdp_formats.h" +#include "sde_hw_mdp_util.h" + +struct sde_hw_pipe; + +/** + * Flags + */ +#define SDE_SSPP_SECURE_OVERLAY_SESSION 0x1 +#define SDE_SSPP_FLIP_LR 0x2 +#define SDE_SSPP_FLIP_UD 0x4 +#define SDE_SSPP_SOURCE_ROTATED_90 0x8 +#define SDE_SSPP_ROT_90 0x10 + +enum { + SDE_MDP_FRAME_LINEAR, + SDE_MDP_FRAME_TILE_A4X, + SDE_MDP_FRAME_TILE_A5X, +}; + +enum sde_hw_filter { + SDE_MDP_SCALE_FILTER_NEAREST = 0, + SDE_MDP_SCALE_FILTER_BIL, + SDE_MDP_SCALE_FILTER_PCMN, + SDE_MDP_SCALE_FILTER_CA, + SDE_MDP_SCALE_FILTER_MAX +}; + +struct sde_hw_sharp_cfg { + u32 strength; + u32 edge_thr; + u32 smooth_thr; + u32 noise_thr; +}; + +struct sde_hw_pixel_ext { + /* scaling factors are enabled for this input layer */ + uint8_t enable_pxl_ext; + + int init_phase_x[SDE_MAX_PLANES]; + int phase_step_x[SDE_MAX_PLANES]; + int init_phase_y[SDE_MAX_PLANES]; + int phase_step_y[SDE_MAX_PLANES]; + + /* + * Number of pixels extension in left, right, top and bottom direction + * for all color components. This pixel value for each color component + * should be sum of fetch + repeat pixels. + */ + int num_ext_pxls_left[SDE_MAX_PLANES]; + int num_ext_pxls_right[SDE_MAX_PLANES]; + int num_ext_pxls_top[SDE_MAX_PLANES]; + int num_ext_pxls_btm[SDE_MAX_PLANES]; + + /* + * Number of pixels needs to be overfetched in left, right, top and + * bottom directions from source image for scaling. + */ + int left_ftch[SDE_MAX_PLANES]; + int right_ftch[SDE_MAX_PLANES]; + int top_ftch[SDE_MAX_PLANES]; + int btm_ftch[SDE_MAX_PLANES]; + + /* + * Number of pixels needs to be repeated in left, right, top and + * bottom directions for scaling. + */ + int left_rpt[SDE_MAX_PLANES]; + int right_rpt[SDE_MAX_PLANES]; + int top_rpt[SDE_MAX_PLANES]; + int btm_rpt[SDE_MAX_PLANES]; + + uint32_t roi_w[SDE_MAX_PLANES]; + + /* + * Filter type to be used for scaling in horizontal and vertical + * directions + */ + enum sde_hw_filter horz_filter[SDE_MAX_PLANES]; + enum sde_hw_filter vert_filter[SDE_MAX_PLANES]; + +}; + +/** + * struct sde_hw_pipe_cfg : Pipe description + * @src: source surface information + * @src_rect: src ROI, caller takes into account the different operations + * such as decimation, flip etc to program this field + * @dest_rect: destination ROI. + * @ horz_decimation : horizontal decimation factor( 0, 2, 4, 8, 16) + * @ vert_decimation : vertical decimation factor( 0, 2, 4, 8, 16) + * 2: Read 1 line/pixel drop 1 line/pixel + * 4: Read 1 line/pixel drop 3 lines/pixels + * 8: Read 1 line/pixel drop 7 lines/pixels + * 16: Read 1 line/pixel drop 15 line/pixels + * @addr: source surface address + */ +struct sde_hw_pipe_cfg { + struct sde_hw_source_info src; + struct sde_rect src_rect; + struct sde_rect dst_rect; + u8 horz_decimation; + u8 vert_decimation; + struct addr_info addr; +}; + +/** + * struct danger_safe_cfg: + * @danger_lut: + * @safe_lut: + */ +struct danger_safe_cfg { + u32 danger_lut; + u32 safe_lut; +}; + +/** + * struct sde_hw_sspp_ops - interface to the SSPP Hw driver functions + * Caller must call the init function to get the pipe context for each pipe + * Assumption is these functions will be called after clocks are enabled + */ +struct sde_hw_sspp_ops { + /** + * setup_sourceformat - setup pixel format cropping rectangle, flip + * @ctx: Pointer to pipe context + * @cfg: Pointer to pipe config structure + * @flags: Format flags + */ + void (*setup_sourceformat)(struct sde_hw_pipe *ctx, + struct sde_hw_pipe_cfg *cfg, + u32 flags); + + /** + * setup_rects - setup pipe ROI rectangles + * @ctx: Pointer to pipe context + * @cfg: Pointer to pipe config structure + * @pe_ext: Pointer to pixel ext settings + */ + void (*setup_rects)(struct sde_hw_pipe *ctx, + struct sde_hw_pipe_cfg *cfg, + struct sde_hw_pixel_ext *pe_ext); + + /** + * setup_sourceaddress - setup pipe source addresses + * @ctx: Pointer to pipe context + * @cfg: Pointer to pipe config structure + */ + void (*setup_sourceaddress)(struct sde_hw_pipe *ctx, + struct sde_hw_pipe_cfg *cfg); + + /** + * setup_csc - setup color space coversion + * @ctx: Pointer to pipe context + * @data: Pointer to config structure + */ + void (*setup_csc)(struct sde_hw_pipe *ctx, + struct sde_csc_cfg *data); + + /** + * setup_solidfill - enable/disable colorfill + * @ctx: Pointer to pipe context + * @const_color: Fill color value + * @flags: Pipe flags + */ + void (*setup_solidfill)(struct sde_hw_pipe *ctx, + u32 const_color, + u32 flags); + + /** + * setup_sharpening - setup sharpening + * @ctx: Pointer to pipe context + * @cfg: Pointer to config structure + */ + void (*setup_sharpening)(struct sde_hw_pipe *ctx, + struct sde_hw_sharp_cfg *cfg); + + /** + * setup_pa_memcolor - setup source color processing + * @ctx: Pointer to pipe context + * @memcolortype: Memcolor type + * @en: PA enable + */ + void (*setup_pa_memcolor)(struct sde_hw_pipe *ctx, + u32 memcolortype, u8 en); + + /** + * setup_igc - setup inverse gamma correction + * @ctx: Pointer to pipe context + */ + void (*setup_igc)(struct sde_hw_pipe *ctx); + + /** + * setup_danger_safe - setup danger safe LUTS + * @ctx: Pointer to pipe context + * @danger_lut: Danger LUT setting + * @safe_lut: Safe LUT setting + */ + void (*setup_danger_safe)(struct sde_hw_pipe *ctx, + u32 danger_lut, + u32 safe_lut); + + /** + * setup_histogram - setup histograms + * @ctx: Pointer to pipe context + * @cfg: Pointer to histogram configuration + */ + void (*setup_histogram)(struct sde_hw_pipe *ctx, + void *cfg); +}; + +/** + * struct sde_hw_pipe - pipe description + * @base_off: mdp register mapped offset + * @blk_off: pipe offset relative to mdss offset + * @length length of register block offset + * @hwversion mdss hw version number + * @idx: pipe index + * @type : pipe type, VIG/DMA/RGB/CURSOR, certain operations are not + * supported for each pipe type + * @pipe_hw_cap: pointer to layer_cfg + * @highest_bank_bit: + * @ops: pointer to operations possible for this pipe + */ +struct sde_hw_pipe { + /* base */ + struct sde_hw_blk_reg_map hw; + + /* Pipe */ + enum sde_sspp idx; + const struct sde_sspp_cfg *cap; + u32 highest_bank_bit; + + /* Ops */ + struct sde_hw_sspp_ops ops; +}; + +/** + * sde_hw_sspp_init - initializes the sspp hw driver object. + * Should be called once before accessing every pipe. + * @idx: Pipe index for which driver object is required + * @addr: Mapped register io address of MDP + * @m: pointer to mdss catalog data @ops: + */ +struct sde_hw_pipe *sde_hw_sspp_init(enum sde_sspp idx, + void __iomem *addr, + struct sde_mdss_cfg *m); + +#endif /*_SDE_HW_SSPP_H */ + diff --git a/drivers/gpu/drm/msm/sde/sde_hw_wb.c b/drivers/gpu/drm/msm/sde/sde_hw_wb.c new file mode 100644 index 000000000000..1eaad185be65 --- /dev/null +++ b/drivers/gpu/drm/msm/sde/sde_hw_wb.c @@ -0,0 +1,120 @@ +/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include "sde_hw_mdss.h" +#include "sde_hwio.h" +#include "sde_hw_catalog.h" +#include "sde_hw_wb.h" + +static struct sde_wb_cfg *_wb_offset(enum sde_wb wb, + struct sde_mdss_cfg *m, + void __iomem *addr, + struct sde_hw_blk_reg_map *b) +{ + int i; + + for (i = 0; i < m->wb_count; i++) { + if (wb == m->wb[i].id) { + b->base_off = addr; + b->blk_off = m->wb[i].base; + b->hwversion = m->hwversion; + return &m->wb[i]; + } + } + return ERR_PTR(-EINVAL); +} + +static void sde_hw_wb_setup_csc_8bit(struct sde_hw_wb *ctx, + struct sde_csc_cfg *data) +{ +} + +static void sde_hw_wb_setup_outaddress(struct sde_hw_wb *ctx, + struct sde_hw_wb_cfg *data) +{ +} + +static void sde_hw_wb_setup_format(struct sde_hw_wb *ctx, + struct sde_hw_wb_cfg *data) +{ +} + +static void sde_hw_wb_setup_rotator(struct sde_hw_wb *ctx, + struct sde_hw_wb_cfg *data) +{ +} + +static void sde_hw_setup_dither(struct sde_hw_wb *ctx, + struct sde_hw_wb_cfg *data) +{ +} + +static void sde_hw_wb_setup_cdwn(struct sde_hw_wb *ctx, + struct sde_hw_wb_cfg *data) +{ +} +static void sde_hw_wb_traffic_shaper(struct sde_hw_wb *ctx, + struct sde_hw_wb_cfg *data) +{ +} + +static void _setup_wb_ops(struct sde_hw_wb_ops *ops, + unsigned long features) +{ + if (test_bit(SDE_WB_CSC, &features)) + ops->setup_csc_data = sde_hw_wb_setup_csc_8bit; + + ops->setup_outaddress = sde_hw_wb_setup_outaddress; + ops->setup_outformat = sde_hw_wb_setup_format; + + if (test_bit(SDE_WB_BLOCK_MODE, &features)) + ops->setup_rotator = sde_hw_wb_setup_rotator; + + if (test_bit(SDE_WB_DITHER, &features)) + ops->setup_dither = sde_hw_setup_dither; + + if (test_bit(SDE_WB_CHROMA_DOWN, &features)) + ops->setup_cdwn = sde_hw_wb_setup_cdwn; + + if (test_bit(SDE_WB_TRAFFIC_SHAPER, &features)) + ops->setup_trafficshaper = sde_hw_wb_traffic_shaper; +} + +struct sde_hw_wb *sde_hw_wb_init(enum sde_wb idx, + void __iomem *addr, + struct sde_mdss_cfg *m) +{ + struct sde_hw_wb *c; + struct sde_wb_cfg *cfg; + + c = kzalloc(sizeof(*c), GFP_KERNEL); + if (!c) + return ERR_PTR(-ENOMEM); + + cfg = _wb_offset(idx, m, addr, &c->hw); + if (!cfg) { + kfree(c); + return ERR_PTR(-EINVAL); + } + + /* Assign ops */ + c->idx = idx; + c->caps = cfg; + _setup_wb_ops(&c->ops, c->caps->features); + + /* + * Perform any default initialization for the chroma down module + */ + + return c; +} + diff --git a/drivers/gpu/drm/msm/sde/sde_hw_wb.h b/drivers/gpu/drm/msm/sde/sde_hw_wb.h new file mode 100644 index 000000000000..623af6e963dd --- /dev/null +++ b/drivers/gpu/drm/msm/sde/sde_hw_wb.h @@ -0,0 +1,85 @@ +/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _SDE_HW_WB_H +#define _SDE_HW_WB_H + +#include "sde_hw_catalog.h" +#include "sde_hw_mdss.h" +#include "sde_hw_mdp_util.h" + +struct sde_hw_wb; + +struct sde_hw_wb_cfg { + struct sde_hw_source_info dest; +}; + +/** + * + * struct sde_hw_wb_ops : Interface to the wb Hw driver functions + * Assumption is these functions will be called after clocks are enabled + */ +struct sde_hw_wb_ops { + void (*setup_csc_data)(struct sde_hw_wb *ctx, + struct sde_csc_cfg *data); + + void (*setup_outaddress)(struct sde_hw_wb *ctx, + struct sde_hw_wb_cfg *wb); + + void (*setup_outformat)(struct sde_hw_wb *ctx, + struct sde_hw_wb_cfg *wb); + + void (*setup_rotator)(struct sde_hw_wb *ctx, + struct sde_hw_wb_cfg *wb); + + void (*setup_dither)(struct sde_hw_wb *ctx, + struct sde_hw_wb_cfg *wb); + + void (*setup_cdwn)(struct sde_hw_wb *ctx, + struct sde_hw_wb_cfg *wb); + + void (*setup_trafficshaper)(struct sde_hw_wb *ctx, + struct sde_hw_wb_cfg *wb); +}; + +/** + * struct sde_hw_wb : WB driver object + * @struct sde_hw_blk_reg_map *hw; + * @idx + * @wb_hw_caps + * @mixer_hw_caps + * @ops + */ +struct sde_hw_wb { + /* base */ + struct sde_hw_blk_reg_map hw; + + /* wb path */ + int idx; + const struct sde_wb_cfg *caps; + + /* ops */ + struct sde_hw_wb_ops ops; +}; + +/** + * sde_hw_wb_init(): Initializes the wb_path hw driver object. + * should be called before accessing every mixer. + * @idx: wb_path index for which driver object is required + * @addr: mapped register io address of MDP + * @m : pointer to mdss catalog data + */ +struct sde_hw_wb *sde_hw_wb_init(enum sde_wb idx, + void __iomem *addr, + struct sde_mdss_cfg *m); + +#endif /*_SDE_HW_WB_H */ diff --git a/drivers/gpu/drm/msm/sde/sde_hwio.h b/drivers/gpu/drm/msm/sde/sde_hwio.h new file mode 100644 index 000000000000..c8d98a4a485e --- /dev/null +++ b/drivers/gpu/drm/msm/sde/sde_hwio.h @@ -0,0 +1,56 @@ +/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _SDE_HWIO_H +#define _SDE_HWIO_H + +#include "sde_hw_mdp_util.h" + +/** + * MDP TOP block Register and bit fields and defines + */ +#define DISP_INTF_SEL 0x004 +#define INTR_EN 0x010 +#define INTR_STATUS 0x014 +#define INTR_CLEAR 0x018 +#define INTR2_EN 0x008 +#define INTR2_STATUS 0x00c +#define INTR2_CLEAR 0x02c +#define HIST_INTR_EN 0x01c +#define HIST_INTR_STATUS 0x020 +#define HIST_INTR_CLEAR 0x024 +#define SPLIT_DISPLAY_EN 0x2F4 +#define SPLIT_DISPLAY_UPPER_PIPE_CTRL 0x2F8 +#define DSPP_IGC_COLOR0_RAM_LUTN 0x300 +#define DSPP_IGC_COLOR1_RAM_LUTN 0x304 +#define DSPP_IGC_COLOR2_RAM_LUTN 0x308 +#define PPB0_CNTL 0x330 +#define PPB0_CONFIG 0x334 +#define PPB1_CNTL 0x338 +#define PPB1_CONFIG 0x33C +#define HW_EVENTS_CTL 0x37C +#define CLK_CTRL3 0x3A8 +#define CLK_STATUS3 0x3AC +#define CLK_CTRL4 0x3B0 +#define CLK_STATUS4 0x3B4 +#define CLK_CTRL5 0x3B8 +#define CLK_STATUS5 0x3BC +#define CLK_CTRL7 0x3D0 +#define CLK_STATUS7 0x3D4 +#define SPLIT_DISPLAY_LOWER_PIPE_CTRL 0x3F0 +#define SPLIT_DISPLAY_TE_LINE_INTERVAL 0x3F4 +#define INTF_SW_RESET_MASK 0x3FC +#define MDP_OUT_CTL_0 0x410 +#define MDP_VSYNC_SEL 0x414 +#define DCE_SEL 0x450 + +#endif /*_SDE_HWIO_H */ diff --git a/drivers/gpu/drm/msm/sde/sde_irq.c b/drivers/gpu/drm/msm/sde/sde_irq.c new file mode 100644 index 000000000000..73c4d7cb9298 --- /dev/null +++ b/drivers/gpu/drm/msm/sde/sde_irq.c @@ -0,0 +1,116 @@ +/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include + +#include "msm_drv.h" +#include "sde_kms.h" + +void sde_set_irqmask(struct mdp_kms *mdp_kms, uint32_t irqmask, + uint32_t old_irqmask) +{ +} + +void sde_irq_preinstall(struct msm_kms *kms) +{ +} + +int sde_irq_postinstall(struct msm_kms *kms) +{ + return 0; +} + +void sde_irq_uninstall(struct msm_kms *kms) +{ +} + +irqreturn_t sde_irq(struct msm_kms *kms) +{ + return IRQ_HANDLED; +} + +int sde_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc) +{ + return 0; +} + +void sde_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc) +{ +} + +static void sde_hw_irq_mask(struct irq_data *irqd) +{ + struct sde_kms *sde_kms = irq_data_get_irq_chip_data(irqd); + + smp_mb__before_atomic(); + clear_bit(irqd->hwirq, &sde_kms->irqcontroller.enabled_mask); + smp_mb__after_atomic(); +} + +static void sde_hw_irq_unmask(struct irq_data *irqd) +{ + struct sde_kms *sde_kms = irq_data_get_irq_chip_data(irqd); + + smp_mb__before_atomic(); + set_bit(irqd->hwirq, &sde_kms->irqcontroller.enabled_mask); + smp_mb__after_atomic(); +} + +static struct irq_chip sde_hw_irq_chip = { + .name = "sde", + .irq_mask = sde_hw_irq_mask, + .irq_unmask = sde_hw_irq_unmask, +}; + +static int sde_hw_irqdomain_map(struct irq_domain *d, + unsigned int irq, irq_hw_number_t hwirq) +{ + struct sde_kms *sde_kms = d->host_data; + + irq_set_chip_and_handler(irq, &sde_hw_irq_chip, handle_level_irq); + irq_set_chip_data(irq, sde_kms); + + return 0; +} + +static struct irq_domain_ops sde_hw_irqdomain_ops = { + .map = sde_hw_irqdomain_map, + .xlate = irq_domain_xlate_onecell, +}; + +int sde_irq_domain_init(struct sde_kms *sde_kms) +{ + struct device *dev = sde_kms->dev->dev; + struct irq_domain *d; + + d = irq_domain_add_linear(dev->of_node, 32, + &sde_hw_irqdomain_ops, sde_kms); + + if (!d) + return -ENXIO; + + sde_kms->irqcontroller.enabled_mask = 0; + sde_kms->irqcontroller.domain = d; + + return 0; +} + +int sde_irq_domain_fini(struct sde_kms *sde_kms) +{ + if (sde_kms->irqcontroller.domain) { + irq_domain_remove(sde_kms->irqcontroller.domain); + sde_kms->irqcontroller.domain = NULL; + } + return 0; +} + diff --git a/drivers/gpu/drm/msm/sde/sde_kms.c b/drivers/gpu/drm/msm/sde/sde_kms.c new file mode 100644 index 000000000000..740b9c066467 --- /dev/null +++ b/drivers/gpu/drm/msm/sde/sde_kms.c @@ -0,0 +1,306 @@ +/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include "msm_drv.h" +#include "sde_kms.h" +#include "sde_hw_mdss.h" + +static int modeset_init_intf(struct sde_kms *sde_kms, int intf_num) +{ + struct sde_mdss_cfg *catalog = sde_kms->catalog; + u32 intf_type = catalog->intf[intf_num].type; + + switch (intf_type) { + case INTF_NONE: + break; + case INTF_DSI: + break; + case INTF_LCDC: + break; + case INTF_HDMI: + break; + case INTF_EDP: + default: + break; + } + + return 0; +} + +static int modeset_init(struct sde_kms *sde_kms) +{ + struct msm_drm_private *priv = sde_kms->dev->dev_private; + int i; + int ret; + struct sde_mdss_cfg *catalog = sde_kms->catalog; + struct drm_device *dev = sde_kms->dev; + struct drm_plane *primary_planes[MAX_PLANES]; + int primary_planes_idx = 0; + + int num_private_planes = catalog->mixer_count; + + ret = sde_irq_domain_init(sde_kms); + if (ret) + goto fail; + + /* Create the planes */ + for (i = 0; i < catalog->sspp_count; i++) { + struct drm_plane *plane; + bool primary = true; + + if (catalog->sspp[i].features & BIT(SDE_SSPP_CURSOR) + || !num_private_planes) + primary = false; + + plane = sde_plane_init(dev, primary); + if (IS_ERR(plane)) { + ret = PTR_ERR(plane); + goto fail; + } + priv->planes[priv->num_planes++] = plane; + + if (primary) + primary_planes[primary_planes_idx++] = plane; + if (num_private_planes) + num_private_planes--; + } + + /* Need enough primary planes to assign one per mixer (CRTC) */ + if (primary_planes_idx < catalog->mixer_count) { + ret = -EINVAL; + goto fail; + } + + /* Create one CRTC per mixer */ + for (i = 0; i < catalog->mixer_count; i++) { + /* + * Each mixer receives a private plane. We start + * with first RGB, and then DMA and then VIG. + */ + struct drm_crtc *crtc; + + crtc = sde_crtc_init(dev, NULL, primary_planes[i], i); + if (IS_ERR(crtc)) { + ret = PTR_ERR(crtc); + goto fail; + } + priv->crtcs[priv->num_crtcs++] = crtc; + } + + for (i = 0; i < catalog->intf_count; i++) { + ret = modeset_init_intf(sde_kms, i); + if (ret) + goto fail; + } + return 0; +fail: + return ret; +} + +static int sde_hw_init(struct msm_kms *kms) +{ + return 0; +} + +static long sde_round_pixclk(struct msm_kms *kms, unsigned long rate, + struct drm_encoder *encoder) +{ + return rate; +} + +static void sde_preclose(struct msm_kms *kms, struct drm_file *file) +{ +} + +static void sde_destroy(struct msm_kms *kms) +{ + struct sde_kms *sde_kms = to_sde_kms(to_mdp_kms(kms)); + + sde_irq_domain_fini(sde_kms); + kfree(sde_kms); +} + +static const struct mdp_kms_funcs kms_funcs = { + .base = { + .hw_init = sde_hw_init, + .irq_preinstall = sde_irq_preinstall, + .irq_postinstall = sde_irq_postinstall, + .irq_uninstall = sde_irq_uninstall, + .irq = sde_irq, + .enable_vblank = sde_enable_vblank, + .disable_vblank = sde_disable_vblank, + .get_format = mdp_get_format, + .round_pixclk = sde_round_pixclk, + .preclose = sde_preclose, + .destroy = sde_destroy, + }, + .set_irqmask = sde_set_irqmask, +}; + +static int get_clk(struct platform_device *pdev, struct clk **clkp, + const char *name, bool mandatory) +{ + struct device *dev = &pdev->dev; + struct clk *clk = devm_clk_get(dev, name); + + if (IS_ERR(clk) && mandatory) { + dev_err(dev, "failed to get %s (%ld)\n", name, PTR_ERR(clk)); + return PTR_ERR(clk); + } + if (IS_ERR(clk)) + DBG("skipping %s", name); + else + *clkp = clk; + + return 0; +} + +struct sde_kms *sde_hw_setup(struct platform_device *pdev) +{ + struct sde_kms *sde_kms; + struct msm_kms *kms = NULL; + int ret; + + sde_kms = kzalloc(sizeof(*sde_kms), GFP_KERNEL); + if (!sde_kms) + return NULL; + + mdp_kms_init(&sde_kms->base, &kms_funcs); + + kms = &sde_kms->base.base; + + sde_kms->mmio = msm_ioremap(pdev, "mdp_phys", "SDE"); + if (IS_ERR(sde_kms->mmio)) { + ret = PTR_ERR(sde_kms->mmio); + goto fail; + } + + sde_kms->vbif = msm_ioremap(pdev, "vbif_phys", "VBIF"); + if (IS_ERR(sde_kms->vbif)) { + ret = PTR_ERR(sde_kms->vbif); + goto fail; + } + + sde_kms->venus = devm_regulator_get_optional(&pdev->dev, "gdsc-venus"); + if (IS_ERR(sde_kms->venus)) { + ret = PTR_ERR(sde_kms->venus); + DBG("failed to get Venus GDSC regulator: %d\n", ret); + sde_kms->venus = NULL; + } + + if (sde_kms->venus) { + ret = regulator_enable(sde_kms->venus); + if (ret) { + DBG("failed to enable venus GDSC: %d\n", ret); + goto fail; + } + } + + sde_kms->vdd = devm_regulator_get(&pdev->dev, "vdd"); + if (IS_ERR(sde_kms->vdd)) { + ret = PTR_ERR(sde_kms->vdd); + goto fail; + } + + ret = regulator_enable(sde_kms->vdd); + if (ret) { + DBG("failed to enable regulator vdd: %d\n", ret); + goto fail; + } + + sde_kms->mmagic = devm_regulator_get_optional(&pdev->dev, "mmagic"); + if (IS_ERR(sde_kms->mmagic)) { + ret = PTR_ERR(sde_kms->mmagic); + DBG("failed to get mmagic GDSC regulator: %d\n", ret); + sde_kms->mmagic = NULL; + } + + /* mandatory clocks: */ + ret = get_clk(pdev, &sde_kms->axi_clk, "bus_clk", true); + if (ret) + goto fail; + ret = get_clk(pdev, &sde_kms->ahb_clk, "iface_clk", true); + if (ret) + goto fail; + ret = get_clk(pdev, &sde_kms->src_clk, "core_clk_src", true); + if (ret) + goto fail; + ret = get_clk(pdev, &sde_kms->core_clk, "core_clk", true); + if (ret) + goto fail; + ret = get_clk(pdev, &sde_kms->vsync_clk, "vsync_clk", true); + if (ret) + goto fail; + + /* optional clocks: */ + get_clk(pdev, &sde_kms->lut_clk, "lut_clk", false); + get_clk(pdev, &sde_kms->mmagic_clk, "mmagic_clk", false); + get_clk(pdev, &sde_kms->iommu_clk, "iommu_clk", false); + + return sde_kms; + +fail: + if (kms) + sde_destroy(kms); + + return ERR_PTR(ret); +} + +struct msm_kms *sde_kms_init(struct drm_device *dev) +{ + struct platform_device *pdev = dev->platformdev; + struct sde_mdss_cfg *catalog; + struct sde_kms *sde_kms; + struct msm_kms *msm_kms; + int ret = 0; + + sde_kms = sde_hw_setup(pdev); + if (IS_ERR(sde_kms)) { + ret = PTR_ERR(sde_kms); + goto fail; + } + + sde_kms->dev = dev; + msm_kms = &sde_kms->base.base; + + /* + * Currently hardcoding to MDSS version 1.7.0 (8996) + */ + catalog = sde_hw_catalog_init(1, 7, 0); + if (!catalog) + goto fail; + + sde_kms->catalog = catalog; + + /* + * Now we need to read the HW catalog and initialize resources such as + * clocks, regulators, GDSC/MMAGIC, ioremap the register ranges etc + */ + + /* + * modeset_init should create the DRM related objects i.e. CRTCs, + * planes, encoders, connectors and so forth + */ + modeset_init(sde_kms); + + dev->mode_config.min_width = 0; + dev->mode_config.min_height = 0; + + return msm_kms; + +fail: + if (msm_kms) + sde_destroy(msm_kms); + + return ERR_PTR(ret); +} diff --git a/drivers/gpu/drm/msm/sde/sde_kms.h b/drivers/gpu/drm/msm/sde/sde_kms.h new file mode 100644 index 000000000000..1afe1bb03c7b --- /dev/null +++ b/drivers/gpu/drm/msm/sde/sde_kms.h @@ -0,0 +1,116 @@ +/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef __SDE_KMS_H__ +#define __SDE_KMS_H__ + +#include "msm_drv.h" +#include "msm_kms.h" +#include "mdp/mdp_kms.h" +#include "sde_hw_catalog.h" +#include "sde_hw_mdss.h" + +struct sde_kms { + struct mdp_kms base; + struct drm_device *dev; + int rev; + struct sde_mdss_cfg *catalog; + + struct msm_mmu *mmu; + + /* io/register spaces: */ + void __iomem *mmio, *vbif; + + struct regulator *vdd; + struct regulator *mmagic; + struct regulator *venus; + + struct clk *axi_clk; + struct clk *ahb_clk; + struct clk *src_clk; + struct clk *core_clk; + struct clk *lut_clk; + struct clk *mmagic_clk; + struct clk *iommu_clk; + struct clk *vsync_clk; + + struct { + unsigned long enabled_mask; + struct irq_domain *domain; + } irqcontroller; +}; + +#define to_sde_kms(x) container_of(x, struct sde_kms, base) + +struct sde_plane_state { + struct drm_plane_state base; + + /* aligned with property */ + uint8_t premultiplied; + uint8_t zpos; + uint8_t alpha; + + /* assigned by crtc blender */ + enum sde_stage stage; + + /* some additional transactional status to help us know in the + * apply path whether we need to update SMP allocation, and + * whether current update is still pending: + */ + bool mode_changed : 1; + bool pending : 1; +}; + +#define to_sde_plane_state(x) \ + container_of(x, struct sde_plane_state, base) + +int sde_disable(struct sde_kms *sde_kms); +int sde_enable(struct sde_kms *sde_kms); + +void sde_set_irqmask(struct mdp_kms *mdp_kms, uint32_t irqmask, + uint32_t old_irqmask); +void sde_irq_preinstall(struct msm_kms *kms); +int sde_irq_postinstall(struct msm_kms *kms); +void sde_irq_uninstall(struct msm_kms *kms); +irqreturn_t sde_irq(struct msm_kms *kms); +int sde_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc); +void sde_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc); + +enum sde_sspp sde_plane_pipe(struct drm_plane *plane); +void sde_plane_install_properties(struct drm_plane *plane, + struct drm_mode_object *obj); +void sde_plane_set_scanout(struct drm_plane *plane, + struct drm_framebuffer *fb); +int sde_plane_mode_set(struct drm_plane *plane, + struct drm_crtc *crtc, struct drm_framebuffer *fb, + int crtc_x, int crtc_y, + unsigned int crtc_w, unsigned int crtc_h, + uint32_t src_x, uint32_t src_y, + uint32_t src_w, uint32_t src_h); +void sde_plane_complete_flip(struct drm_plane *plane); +struct drm_plane *sde_plane_init(struct drm_device *dev, bool private_plane); + +uint32_t sde_crtc_vblank(struct drm_crtc *crtc); + +void sde_crtc_cancel_pending_flip(struct drm_crtc *crtc, struct drm_file *file); +void sde_crtc_attach(struct drm_crtc *crtc, struct drm_plane *plane); +void sde_crtc_detach(struct drm_crtc *crtc, struct drm_plane *plane); +struct drm_crtc *sde_crtc_init(struct drm_device *dev, + struct drm_encoder *encoder, + struct drm_plane *plane, int id); + +struct drm_encoder *sde_encoder_init(struct drm_device *dev, int intf); + +int sde_irq_domain_init(struct sde_kms *sde_kms); +int sde_irq_domain_fini(struct sde_kms *sde_kms); + +#endif /* __sde_kms_H__ */ diff --git a/drivers/gpu/drm/msm/sde/sde_mdp_formats.h b/drivers/gpu/drm/msm/sde/sde_mdp_formats.h new file mode 100644 index 000000000000..4ad3ad3c744e --- /dev/null +++ b/drivers/gpu/drm/msm/sde/sde_mdp_formats.h @@ -0,0 +1,213 @@ +/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _SDE_MDP_FORMATS_H +#define _SDE_MDP_FORMATS_H + +#include +#include "sde_hw_mdss.h" + +/** + * MDP supported format packing, bpp, and other format + * information. + * MDP currently only supports interleaved RGB formats + * UBWC support for a pixel format is indicated by the flag, + * there is additional meta data plane for such formats + */ + +#define INTERLEAVED_RGB_FMT(fmt, a, r, g, b, e0, e1, e2, e3, alpha, bp, flg) \ +{ \ + .format = DRM_FORMAT_ ## fmt, \ + .fetch_planes = SDE_MDP_PLANE_INTERLEAVED, \ + .alpha_enable = alpha, \ + .element = { (e0), (e1), (e2), (e3) }, \ + .bits = { a, r, g, b}, \ + .chroma_sample = SDE_MDP_CHROMA_RGB, \ + .unpack_align_msb = 0, \ + .unpack_tight = 1, \ + .unpack_count = (alpha == true) ? 4:3, \ + .bpp = bp, \ + .fetch_mode = SDE_MDP_FETCH_LINEAR, \ + .is_yuv = false, \ + .flag = flg \ +} + +#define INTERLEAVED_YUV_FMT(fmt, a, r, g, b, e0, e1, e2, e3, \ +alpha, chroma, count, bp, flg) \ +{ \ + .format = DRM_FORMAT_ ## fmt, \ + .fetch_planes = SDE_MDP_PLANE_INTERLEAVED, \ + .alpha_enable = alpha, \ + .element = { (e0), (e1), (e2), (e3)}, \ + .bits = { a, r, g, b}, \ + .chroma_sample = chroma, \ + .unpack_align_msb = 0, \ + .unpack_tight = 1, \ + .unpack_count = count, \ + .bpp = bp, \ + .fetch_mode = SDE_MDP_FETCH_LINEAR, \ + .is_yuv = true, \ + .flag = flg \ +} +#define PSEDUO_YUV_FMT(fmt, a, r, g, b, e0, e1, chroma, flg) \ +{ \ + .format = DRM_FORMAT_ ## fmt, \ + .fetch_planes = SDE_MDP_PLANE_PSEUDO_PLANAR, \ + .alpha_enable = false, \ + .element = { (e0), (e1), 0, 0 }, \ + .bits = { a, r, g, b}, \ + .chroma_sample = chroma, \ + .unpack_align_msb = 0, \ + .unpack_tight = 1, \ + .unpack_count = 2, \ + .bpp = 2, \ + .fetch_mode = SDE_MDP_FETCH_LINEAR, \ + .is_yuv = true, \ + .flag = flg \ +} + +#define PLANAR_YUV_FMT(fmt, a, r, g, b, e0, e1, e2, alpha, chroma, bp, flg)\ +{ \ + .format = DRM_FORMAT_ ## fmt, \ + .fetch_planes = SDE_MDP_PLANE_INTERLEAVED, \ + .alpha_enable = alpha, \ + .element = { (e0), (e1), (e2), 0 }, \ + .bits = { a, r, g, b}, \ + .chroma_sample = chroma, \ + .unpack_align_msb = 0, \ + .unpack_tight = 1, \ + .unpack_count = 0, \ + .bpp = bp, \ + .fetch_mode = SDE_MDP_FETCH_LINEAR, \ + .is_yuv = true, \ + .flag = flg \ +} + +static struct sde_mdp_format_params sde_mdp_format_map[] = { + INTERLEAVED_RGB_FMT(ARGB8888, + COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, + C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, + true, 4, 0), + + INTERLEAVED_RGB_FMT(ABGR8888, + COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, + C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, + true, 4, 0), + + INTERLEAVED_RGB_FMT(RGBA8888, + COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, + C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, + true, 4, 0), + + INTERLEAVED_RGB_FMT(BGRA8888, + COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, + C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, + true, 4, 0), + + INTERLEAVED_RGB_FMT(XRGB8888, + COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, + C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, + true, 4, 0), + + INTERLEAVED_RGB_FMT(RGB888, + 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, + C1_B_Cb, C0_G_Y, C2_R_Cr, 0, + false, 3, 0), + + INTERLEAVED_RGB_FMT(BGR888, + 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, + C2_R_Cr, C0_G_Y, C1_B_Cb, 0, + false, 3, 0), + + INTERLEAVED_RGB_FMT(RGB565, + 0, COLOR_5BIT, COLOR_6BIT, COLOR_5BIT, + C1_B_Cb, C0_G_Y, C2_R_Cr, 0, + false, 2, 0), + + INTERLEAVED_RGB_FMT(BGR565, + 0, 5, 6, 5, + C2_R_Cr, C0_G_Y, C1_B_Cb, 0, + false, 2, 0), + + PSEDUO_YUV_FMT(NV12, + 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, + C1_B_Cb, C2_R_Cr, + SDE_MDP_CHROMA_420, 0), + + PSEDUO_YUV_FMT(NV21, + 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, + C2_R_Cr, C1_B_Cb, + SDE_MDP_CHROMA_420, 0), + + PSEDUO_YUV_FMT(NV16, + 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, + C1_B_Cb, C2_R_Cr, + SDE_MDP_CHROMA_H2V1, 0), + + PSEDUO_YUV_FMT(NV61, + 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, + C2_R_Cr, C1_B_Cb, + SDE_MDP_CHROMA_H2V1, 0), + + INTERLEAVED_YUV_FMT(VYUY, + 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, + C2_R_Cr, C0_G_Y, C1_B_Cb, C0_G_Y, + false, SDE_MDP_CHROMA_H2V1, 4, 2, + 0), + + INTERLEAVED_YUV_FMT(UYVY, + 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, + C1_B_Cb, C0_G_Y, C2_R_Cr, C0_G_Y, + false, SDE_MDP_CHROMA_H2V1, 4, 2, + 0), + + INTERLEAVED_YUV_FMT(YUYV, + 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, + C0_G_Y, C1_B_Cb, C0_G_Y, C2_R_Cr, + false, SDE_MDP_CHROMA_H2V1, 4, 2, + 0), + + INTERLEAVED_YUV_FMT(YVYU, + 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, + C0_G_Y, C2_R_Cr, C0_G_Y, C1_B_Cb, + false, SDE_MDP_CHROMA_H2V1, 4, 2, + 0), + + PLANAR_YUV_FMT(YUV420, + 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, + C2_R_Cr, C1_B_Cb, C0_G_Y, + false, SDE_MDP_CHROMA_420, 2, + 0), + + PLANAR_YUV_FMT(YVU420, + 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, + C1_B_Cb, C2_R_Cr, C0_G_Y, + false, SDE_MDP_CHROMA_420, 2, + 0), +}; + +struct sde_mdp_format_params *sde_mdp_get_format_params(u32 format, + u32 fmt_modifier) +{ + u32 i = 0; + struct sde_mdp_format_params *fmt = NULL; + + for (i = 0; i < ARRAY_SIZE(sde_mdp_format_map); i++) + if (format == sde_mdp_format_map[i].format) { + fmt = &sde_mdp_format_map[i]; + break; + } + + return fmt; +} + +#endif /*_SDE_MDP_FORMATS_H */ diff --git a/drivers/gpu/drm/msm/sde/sde_plane.c b/drivers/gpu/drm/msm/sde/sde_plane.c new file mode 100644 index 000000000000..17b7303557ef --- /dev/null +++ b/drivers/gpu/drm/msm/sde/sde_plane.c @@ -0,0 +1,115 @@ +/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include "sde_kms.h" + +struct sde_plane { + struct drm_plane base; + const char *name; + uint32_t nformats; + uint32_t formats[32]; +}; +#define to_sde_plane(x) container_of(x, struct sde_plane, base) + +static int sde_plane_update(struct drm_plane *plane, + struct drm_crtc *crtc, struct drm_framebuffer *fb, + int crtc_x, int crtc_y, + unsigned int crtc_w, unsigned int crtc_h, + uint32_t src_x, uint32_t src_y, + uint32_t src_w, uint32_t src_h) +{ + return 0; +} + +static int sde_plane_disable(struct drm_plane *plane) +{ + return 0; +} + +static void sde_plane_destroy(struct drm_plane *plane) +{ + struct sde_plane *sde_plane = to_sde_plane(plane); + struct msm_drm_private *priv = plane->dev->dev_private; + + if (priv->kms) + sde_plane_disable(plane); + + drm_plane_cleanup(plane); + + kfree(sde_plane); +} + +/* helper to install properties which are common to planes and crtcs */ +void sde_plane_install_properties(struct drm_plane *plane, + struct drm_mode_object *obj) +{ +} + +int sde_plane_set_property(struct drm_plane *plane, + struct drm_property *property, uint64_t val) +{ + return -EINVAL; +} + +static const struct drm_plane_funcs sde_plane_funcs = { + .update_plane = sde_plane_update, + .disable_plane = sde_plane_disable, + .destroy = sde_plane_destroy, + .set_property = sde_plane_set_property, +}; + +void sde_plane_set_scanout(struct drm_plane *plane, + struct drm_framebuffer *fb) +{ +} + +int sde_plane_mode_set(struct drm_plane *plane, + struct drm_crtc *crtc, struct drm_framebuffer *fb, + int crtc_x, int crtc_y, + unsigned int crtc_w, unsigned int crtc_h, + uint32_t src_x, uint32_t src_y, + uint32_t src_w, uint32_t src_h) +{ + return 0; +} + +/* initialize plane */ +struct drm_plane *sde_plane_init(struct drm_device *dev, bool private_plane) +{ + struct drm_plane *plane = NULL; + struct sde_plane *sde_plane; + int ret; + enum drm_plane_type type; + + sde_plane = kzalloc(sizeof(*sde_plane), GFP_KERNEL); + if (!sde_plane) { + ret = -ENOMEM; + goto fail; + } + + plane = &sde_plane->base; + + type = private_plane ? DRM_PLANE_TYPE_PRIMARY : DRM_PLANE_TYPE_OVERLAY; + drm_universal_plane_init(dev, plane, 0xff, &sde_plane_funcs, + sde_plane->formats, sde_plane->nformats, + type); + + sde_plane_install_properties(plane, &plane->base); + + return plane; + +fail: + if (plane) + sde_plane_destroy(plane); + + return ERR_PTR(ret); +} -- GitLab From e56f8713ba53d4899fc1b225dc9bcc7cf489a16d Mon Sep 17 00:00:00 2001 From: Adrian Salido-Moreno Date: Fri, 2 Oct 2015 15:54:46 -0700 Subject: [PATCH 007/310] drm/msm: add smmu handler Add msm_smmu driver to support mapping buffers to arm smmu memory. msm_smmu adds the hooks to support drm hooks. Current change only supports the unsecure domain memory. msm_gem object is also updated to attach the new msm_smmu driver. Change-Id: I4899bd74d8b41b864ed5e0dec2da11e929c7fa95 Signed-off-by: Adrian Salido-Moreno --- drivers/gpu/drm/msm/Makefile | 1 + drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c | 5 +- drivers/gpu/drm/msm/msm_gem.c | 24 +- drivers/gpu/drm/msm/msm_gem.h | 3 +- drivers/gpu/drm/msm/msm_mmu.h | 18 + drivers/gpu/drm/msm/msm_smmu.c | 439 ++++++++++++++++++++++++ 6 files changed, 479 insertions(+), 11 deletions(-) create mode 100644 drivers/gpu/drm/msm/msm_smmu.c diff --git a/drivers/gpu/drm/msm/Makefile b/drivers/gpu/drm/msm/Makefile index f8984d673ff0..7b613a55fdc2 100644 --- a/drivers/gpu/drm/msm/Makefile +++ b/drivers/gpu/drm/msm/Makefile @@ -51,6 +51,7 @@ msm-y := \ msm_gem_submit.o \ msm_gpu.o \ msm_iommu.o \ + msm_smmu.o \ msm_perf.o \ msm_rd.o \ msm_ringbuffer.o diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c index b532faa8026d..f7aebf5516ce 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014, The Linux Foundation. All rights reserved. + * Copyright (c) 2014, 2016, The Linux Foundation. All rights reserved. * Copyright (C) 2013 Red Hat * Author: Rob Clark * @@ -595,7 +595,8 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev) mdelay(16); if (config->platform.iommu) { - mmu = msm_iommu_new(&pdev->dev, config->platform.iommu); + mmu = msm_smmu_new(&pdev->dev, + MSM_SMMU_DOMAIN_UNSECURE); if (IS_ERR(mmu)) { ret = PTR_ERR(mmu); dev_err(dev->dev, "failed to init iommu: %d\n", ret); diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c index c76cc853b08a..6fa56abf0c78 100644 --- a/drivers/gpu/drm/msm/msm_gem.c +++ b/drivers/gpu/drm/msm/msm_gem.c @@ -295,16 +295,23 @@ int msm_gem_get_iova_locked(struct drm_gem_object *obj, int id, if (iommu_present(&platform_bus_type)) { struct msm_mmu *mmu = priv->mmus[id]; - uint32_t offset; if (WARN_ON(!mmu)) return -EINVAL; - offset = (uint32_t)mmap_offset(obj); - ret = mmu->funcs->map(mmu, offset, msm_obj->sgt, - obj->size, IOMMU_READ | IOMMU_WRITE); - msm_obj->domain[id].iova = offset; + if (obj->import_attach && mmu->funcs->map_dma_buf) { + ret = mmu->funcs->map_dma_buf(mmu, msm_obj->sgt, + obj->import_attach->dmabuf, + DMA_BIDIRECTIONAL); + if (ret) { + DRM_ERROR("Unable to map dma buf\n"); + return ret; + } + } + msm_obj->domain[id].iova = + sg_dma_address(msm_obj->sgt->sgl); } else { + WARN_ONCE(1, "physical address being used\n"); msm_obj->domain[id].iova = physaddr(obj); } } @@ -524,8 +531,11 @@ void msm_gem_free_object(struct drm_gem_object *obj) for (id = 0; id < ARRAY_SIZE(msm_obj->domain); id++) { struct msm_mmu *mmu = priv->mmus[id]; if (mmu && msm_obj->domain[id].iova) { - uint32_t offset = msm_obj->domain[id].iova; - mmu->funcs->unmap(mmu, offset, msm_obj->sgt, obj->size); + if (obj->import_attach && mmu->funcs->unmap_dma_buf) { + mmu->funcs->unmap_dma_buf(mmu, msm_obj->sgt, + obj->import_attach->dmabuf, + DMA_BIDIRECTIONAL); + } } } diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h index 6fc59bfeedeb..2e4ae6b1c5d0 100644 --- a/drivers/gpu/drm/msm/msm_gem.h +++ b/drivers/gpu/drm/msm/msm_gem.h @@ -53,8 +53,7 @@ struct msm_gem_object { void *vaddr; struct { - // XXX - uint32_t iova; + dma_addr_t iova; } domain[NUM_DOMAINS]; /* normally (resv == &_resv) except for imported bo's */ diff --git a/drivers/gpu/drm/msm/msm_mmu.h b/drivers/gpu/drm/msm/msm_mmu.h index 7cd88d9dc155..6d2f5627bfae 100644 --- a/drivers/gpu/drm/msm/msm_mmu.h +++ b/drivers/gpu/drm/msm/msm_mmu.h @@ -20,6 +20,14 @@ #include +struct msm_mmu; +struct msm_gpu; + +enum msm_mmu_domain_type { + MSM_SMMU_DOMAIN_UNSECURE, + MSM_SMMU_DOMAIN_MAX, +}; + struct msm_mmu_funcs { int (*attach)(struct msm_mmu *mmu, const char **names, int cnt); void (*detach)(struct msm_mmu *mmu, const char **names, int cnt); @@ -27,6 +35,14 @@ struct msm_mmu_funcs { unsigned len, int prot); int (*unmap)(struct msm_mmu *mmu, uint32_t iova, struct sg_table *sgt, unsigned len); + int (*map_sg)(struct msm_mmu *mmu, struct sg_table *sgt, + enum dma_data_direction dir); + void (*unmap_sg)(struct msm_mmu *mmu, struct sg_table *sgt, + enum dma_data_direction dir); + int (*map_dma_buf)(struct msm_mmu *mmu, struct sg_table *sgt, + struct dma_buf *dma_buf, int dir); + void (*unmap_dma_buf)(struct msm_mmu *mmu, struct sg_table *sgt, + struct dma_buf *dma_buf, int dir); void (*destroy)(struct msm_mmu *mmu); }; @@ -44,5 +60,7 @@ static inline void msm_mmu_init(struct msm_mmu *mmu, struct device *dev, struct msm_mmu *msm_iommu_new(struct device *dev, struct iommu_domain *domain); struct msm_mmu *msm_gpummu_new(struct device *dev, struct msm_gpu *gpu); +struct msm_mmu *msm_smmu_new(struct device *dev, + enum msm_mmu_domain_type domain); #endif /* __MSM_MMU_H__ */ diff --git a/drivers/gpu/drm/msm/msm_smmu.c b/drivers/gpu/drm/msm/msm_smmu.c new file mode 100644 index 000000000000..29b3ad106b15 --- /dev/null +++ b/drivers/gpu/drm/msm/msm_smmu.c @@ -0,0 +1,439 @@ +/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include + +#include +#include + +#include "msm_drv.h" +#include "msm_mmu.h" + +struct msm_smmu_client { + struct device *dev; + struct dma_iommu_mapping *mmu_mapping; + bool domain_attached; +}; + +struct msm_smmu { + struct msm_mmu base; + struct device *client_dev; + struct msm_smmu_client client; +}; + +struct msm_smmu_domain { + const char *label; + size_t va_start; + size_t va_size; + bool secure; +}; + +#define to_msm_smmu(x) container_of(x, struct msm_smmu, base) +#define msm_smmu_to_client(smmu) (&smmu->client) + +static int _msm_smmu_create_mapping(struct msm_smmu_client *client, + const struct msm_smmu_domain *domain); + +static int msm_smmu_attach(struct msm_mmu *mmu, const char **names, int cnt) +{ + struct msm_smmu *smmu = to_msm_smmu(mmu); + struct msm_smmu_client *client = msm_smmu_to_client(smmu); + int rc = 0; + + /* domain attach only once */ + if (client->domain_attached) + return 0; + + rc = arm_iommu_attach_device(client->dev, + client->mmu_mapping); + if (rc) { + dev_err(client->dev, "iommu attach dev failed (%d)\n", + rc); + return rc; + } + + client->domain_attached = true; + + dev_dbg(client->dev, "iommu domain attached\n"); + + return 0; +} + +static void msm_smmu_detach(struct msm_mmu *mmu, const char **names, int cnt) +{ + DBG("detaching"); +} + +static int msm_smmu_map(struct msm_mmu *mmu, uint32_t iova, + struct sg_table *sgt, unsigned len, int prot) +{ + struct msm_smmu *smmu = to_msm_smmu(mmu); + struct msm_smmu_client *client = msm_smmu_to_client(smmu); + struct iommu_domain *domain; + struct scatterlist *sg; + unsigned int da = iova; + unsigned int i, j; + int ret; + + if (!client) + return -ENODEV; + + domain = client->mmu_mapping->domain; + if (!domain || !sgt) + return -EINVAL; + + for_each_sg(sgt->sgl, sg, sgt->nents, i) { + u32 pa = sg_phys(sg) - sg->offset; + size_t bytes = sg->length + sg->offset; + + VERB("map[%d]: %08x %08x(%zx)", i, iova, pa, bytes); + + ret = iommu_map(domain, da, pa, bytes, prot); + if (ret) + goto fail; + + da += bytes; + } + + return 0; + +fail: + da = iova; + + for_each_sg(sgt->sgl, sg, i, j) { + size_t bytes = sg->length + sg->offset; + + iommu_unmap(domain, da, bytes); + da += bytes; + } + return ret; +} + +static int msm_smmu_map_sg(struct msm_mmu *mmu, struct sg_table *sgt, + enum dma_data_direction dir) +{ + struct msm_smmu *smmu = to_msm_smmu(mmu); + struct msm_smmu_client *client = msm_smmu_to_client(smmu); + int ret; + + ret = dma_map_sg(client->dev, sgt->sgl, sgt->nents, dir); + if (ret != sgt->nents) + return -ENOMEM; + + return 0; +} + +static void msm_smmu_unmap_sg(struct msm_mmu *mmu, struct sg_table *sgt, + enum dma_data_direction dir) +{ + struct msm_smmu *smmu = to_msm_smmu(mmu); + struct msm_smmu_client *client = msm_smmu_to_client(smmu); + + dma_unmap_sg(client->dev, sgt->sgl, sgt->nents, dir); +} + +static int msm_smmu_unmap(struct msm_mmu *mmu, uint32_t iova, + struct sg_table *sgt, unsigned len) +{ + struct msm_smmu *smmu = to_msm_smmu(mmu); + struct msm_smmu_client *client = msm_smmu_to_client(smmu); + struct iommu_domain *domain; + struct scatterlist *sg; + unsigned int da = iova; + int i; + + if (!client) + return -ENODEV; + + domain = client->mmu_mapping->domain; + if (!domain || !sgt) + return -EINVAL; + + for_each_sg(sgt->sgl, sg, sgt->nents, i) { + size_t bytes = sg->length + sg->offset; + size_t unmapped; + + unmapped = iommu_unmap(domain, da, bytes); + if (unmapped < bytes) + return unmapped; + + VERB("unmap[%d]: %08x(%zx)", i, iova, bytes); + + WARN_ON(!PAGE_ALIGNED(bytes)); + + da += bytes; + } + + return 0; +} + +static void msm_smmu_destroy(struct msm_mmu *mmu) +{ + struct msm_smmu *smmu = to_msm_smmu(mmu); + struct platform_device *pdev = to_platform_device(smmu->client_dev); + + platform_device_unregister(pdev); + kfree(smmu); +} + +static int msm_smmu_map_dma_buf(struct msm_mmu *mmu, struct sg_table *sgt, + struct dma_buf *dma_buf, int dir) +{ + struct msm_smmu *smmu = to_msm_smmu(mmu); + struct msm_smmu_client *client = msm_smmu_to_client(smmu); + int ret; + + ret = msm_dma_map_sg_lazy(client->dev, sgt->sgl, sgt->nents, dir, + dma_buf); + if (ret != sgt->nents) { + DRM_ERROR("dma map sg failed\n"); + return -ENOMEM; + } + + return 0; +} + + +static void msm_smmu_unmap_dma_buf(struct msm_mmu *mmu, struct sg_table *sgt, + struct dma_buf *dma_buf, int dir) +{ + struct msm_smmu *smmu = to_msm_smmu(mmu); + struct msm_smmu_client *client = msm_smmu_to_client(smmu); + + msm_dma_unmap_sg(client->dev, sgt->sgl, sgt->nents, dir, dma_buf); +} + +static const struct msm_mmu_funcs funcs = { + .attach = msm_smmu_attach, + .detach = msm_smmu_detach, + .map = msm_smmu_map, + .map_sg = msm_smmu_map_sg, + .unmap_sg = msm_smmu_unmap_sg, + .unmap = msm_smmu_unmap, + .map_dma_buf = msm_smmu_map_dma_buf, + .unmap_dma_buf = msm_smmu_unmap_dma_buf, + .destroy = msm_smmu_destroy, +}; + +static struct msm_smmu_domain msm_smmu_domains[MSM_SMMU_DOMAIN_MAX] = { + [MSM_SMMU_DOMAIN_UNSECURE] = { + .label = "mdp_ns", + .va_start = SZ_1M, + .va_size = SZ_2G, + }, +}; + +static const struct of_device_id msm_smmu_dt_match[] = { + { .compatible = "qcom,smmu_mdp_unsec", + .data = &msm_smmu_domains[MSM_SMMU_DOMAIN_UNSECURE] }, + {} +}; +MODULE_DEVICE_TABLE(of, msm_smmu_dt_match); + +static struct device *msm_smmu_device_create(struct device *dev, + enum msm_mmu_domain_type domain, + struct msm_smmu *smmu) +{ + struct device_node *child; + struct platform_device *pdev; + int i; + const char *compat = NULL; + + for (i = 0; i < ARRAY_SIZE(msm_smmu_dt_match); i++) { + if (msm_smmu_dt_match[i].data == &msm_smmu_domains[domain]) { + compat = msm_smmu_dt_match[i].compatible; + break; + } + } + + if (!compat) { + DRM_ERROR("unable to find matching domain for %d\n", domain); + return ERR_PTR(-ENOENT); + } + DRM_INFO("found domain %d compat: %s\n", domain, compat); + + if (domain == MSM_SMMU_DOMAIN_UNSECURE) { + int rc; + + smmu->client.dev = dev; + rc = _msm_smmu_create_mapping(msm_smmu_to_client(smmu), + msm_smmu_dt_match[i].data); + if (rc) + return ERR_PTR(rc); + + return NULL; + } + + child = of_find_compatible_node(dev->of_node, NULL, compat); + if (!child) { + DRM_ERROR("unable to find compatible node for %s\n", compat); + return ERR_PTR(-ENODEV); + } + + pdev = of_platform_device_create(child, NULL, dev); + if (!pdev) { + DRM_ERROR("unable to create smmu platform dev for domain %d\n", + domain); + return ERR_PTR(-ENODEV); + } + + return &pdev->dev; +} + +struct msm_mmu *msm_smmu_new(struct device *dev, + enum msm_mmu_domain_type domain) +{ + struct msm_smmu *smmu; + struct device *client_dev; + + smmu = kzalloc(sizeof(*smmu), GFP_KERNEL); + if (!smmu) + return ERR_PTR(-ENOMEM); + + client_dev = msm_smmu_device_create(dev, domain, smmu); + if (IS_ERR(client_dev)) + return (void *)client_dev ? : ERR_PTR(-ENODEV); + + smmu->client_dev = client_dev; + msm_mmu_init(&smmu->base, dev, &funcs); + + return &smmu->base; +} + +static int _msm_smmu_create_mapping(struct msm_smmu_client *client, + const struct msm_smmu_domain *domain) +{ + int disable_htw = 1; + int rc; + + client->mmu_mapping = arm_iommu_create_mapping(&platform_bus_type, + domain->va_start, domain->va_size); + if (IS_ERR(client->mmu_mapping)) { + dev_err(client->dev, + "iommu create mapping failed for domain=%s\n", + domain->label); + return PTR_ERR(client->mmu_mapping); + } + + rc = iommu_domain_set_attr(client->mmu_mapping->domain, + DOMAIN_ATTR_COHERENT_HTW_DISABLE, &disable_htw); + if (rc) { + dev_err(client->dev, "couldn't disable coherent HTW\n"); + goto error; + } + + if (domain->secure) { + int secure_vmid = VMID_CP_PIXEL; + + rc = iommu_domain_set_attr(client->mmu_mapping->domain, + DOMAIN_ATTR_SECURE_VMID, &secure_vmid); + if (rc) { + dev_err(client->dev, "couldn't set secure pix vmid\n"); + goto error; + } + } + + return 0; + +error: + arm_iommu_release_mapping(client->mmu_mapping); + return rc; +} + +/** + * msm_smmu_probe() + * @pdev: platform device + * + * Each smmu context acts as a separate device and the context banks are + * configured with a VA range. + * Registers the clks as each context bank has its own clks, for which voting + * has to be done everytime before using that context bank. + */ +static int msm_smmu_probe(struct platform_device *pdev) +{ + const struct of_device_id *match; + struct msm_smmu_client *client; + const struct msm_smmu_domain *domain; + int rc; + + match = of_match_device(msm_smmu_dt_match, &pdev->dev); + if (!match || !match->data) { + dev_err(&pdev->dev, "probe failed as match data is invalid\n"); + return -EINVAL; + } + + domain = match->data; + if (!domain) { + dev_err(&pdev->dev, "no matching device found\n"); + return -EINVAL; + } + + DRM_INFO("probing device %s\n", match->compatible); + + client = devm_kzalloc(&pdev->dev, sizeof(*client), GFP_KERNEL); + if (!client) + return -ENOMEM; + + client->dev = &pdev->dev; + + rc = _msm_smmu_create_mapping(client, domain); + platform_set_drvdata(pdev, client); + + return rc; +} + +static int msm_smmu_remove(struct platform_device *pdev) +{ + struct msm_smmu_client *client; + + client = platform_get_drvdata(pdev); + if (client->domain_attached) { + arm_iommu_detach_device(client->dev); + client->domain_attached = false; + } + arm_iommu_release_mapping(client->mmu_mapping); + + return 0; +} + +static struct platform_driver msm_smmu_driver = { + .probe = msm_smmu_probe, + .remove = msm_smmu_remove, + .driver = { + .name = "msmdrm_smmu", + .of_match_table = msm_smmu_dt_match, + }, +}; + +static int __init msm_smmu_driver_init(void) +{ + int ret; + + ret = platform_driver_register(&msm_smmu_driver); + if (ret) + pr_err("mdss_smmu_register_driver() failed!\n"); + + return ret; +} +module_init(msm_smmu_driver_init); + +static void __exit msm_smmu_driver_cleanup(void) +{ + platform_driver_unregister(&msm_smmu_driver); +} +module_exit(msm_smmu_driver_cleanup); + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("MSM SMMU driver"); -- GitLab From 729595c8f23ae8f472db01e8031aed5d8d5a3167 Mon Sep 17 00:00:00 2001 From: Abhijit Kulkarni Date: Fri, 24 Jun 2016 17:56:13 -0400 Subject: [PATCH 008/310] drm/msm/sde: add basic sde object enumeration Implement basic crtc, encoder, and plane support that implements DRM atomic call sequence, queries hardware, and enumerates resources as DRM objects. This includes basic color format support, as well as querying of displays to create appropriate encoders, crtcs. Also includes supporting clock control logic. Change-Id: I25a9c74b92262d81986b3441c89d51bff2d14dbb Signed-off-by: Abhijit Kulkarni --- drivers/gpu/drm/msm/Makefile | 3 +- drivers/gpu/drm/msm/sde/sde_crtc.c | 535 ++++++++++++- drivers/gpu/drm/msm/sde/sde_encoder.c | 552 +++++++++++++- drivers/gpu/drm/msm/sde/sde_hw_catalog_8996.c | 8 +- drivers/gpu/drm/msm/sde/sde_hw_intf.c | 22 +- drivers/gpu/drm/msm/sde/sde_hw_intf.h | 3 + drivers/gpu/drm/msm/sde/sde_hw_lm.c | 4 +- drivers/gpu/drm/msm/sde/sde_hw_mdp_ctl.c | 54 +- drivers/gpu/drm/msm/sde/sde_hw_mdp_ctl.h | 44 +- drivers/gpu/drm/msm/sde/sde_hw_mdss.h | 43 +- drivers/gpu/drm/msm/sde/sde_hw_sspp.c | 56 +- drivers/gpu/drm/msm/sde/sde_hw_sspp.h | 18 +- drivers/gpu/drm/msm/sde/sde_kms.c | 243 +++++- drivers/gpu/drm/msm/sde/sde_kms.h | 26 +- drivers/gpu/drm/msm/sde/sde_mdp_formats.c | 134 ++++ drivers/gpu/drm/msm/sde/sde_mdp_formats.h | 123 +-- drivers/gpu/drm/msm/sde/sde_plane.c | 709 +++++++++++++++++- 17 files changed, 2244 insertions(+), 333 deletions(-) create mode 100644 drivers/gpu/drm/msm/sde/sde_mdp_formats.c diff --git a/drivers/gpu/drm/msm/Makefile b/drivers/gpu/drm/msm/Makefile index 7b613a55fdc2..549b534a3f4d 100644 --- a/drivers/gpu/drm/msm/Makefile +++ b/drivers/gpu/drm/msm/Makefile @@ -86,4 +86,5 @@ obj-$(CONFIG_DRM_MSM) += sde/sde_hw_catalog.o \ sde/sde_hw_mdp_util.o \ sde/sde_hw_sspp.o \ sde/sde_hw_wb.o \ - sde/sde_hw_pingpong.o + sde/sde_hw_pingpong.o \ + sde/sde_mdp_formats.o diff --git a/drivers/gpu/drm/msm/sde/sde_crtc.c b/drivers/gpu/drm/msm/sde/sde_crtc.c index 4812a5fa51b7..b6ec66954e67 100644 --- a/drivers/gpu/drm/msm/sde/sde_crtc.c +++ b/drivers/gpu/drm/msm/sde/sde_crtc.c @@ -10,6 +10,7 @@ * GNU General Public License for more details. */ +#include #include #include #include @@ -17,73 +18,433 @@ #include "sde_kms.h" #include "sde_hw_lm.h" -#include "sde_hw_mdss.h" +#include "sde_hw_mdp_ctl.h" + +#define CRTC_DUAL_MIXERS 2 +#define PENDING_FLIP 2 + +#define CRTC_HW_MIXER_MAXSTAGES(c, idx) ((c)->mixer[idx].sblk->maxblendstages) + +struct sde_crtc_mixer { + struct sde_hw_dspp *hw_dspp; + struct sde_hw_mixer *hw_lm; + struct sde_hw_ctl *hw_ctl; + u32 flush_mask; +}; struct sde_crtc { struct drm_crtc base; char name[8]; struct drm_plane *plane; struct drm_plane *planes[8]; + struct drm_encoder *encoder; int id; bool enabled; - enum sde_lm mixer; - enum sde_ctl ctl_path; + + spinlock_t lm_lock; /* protect registers */ + + /* HW Resources reserved for the crtc */ + u32 num_ctls; + u32 num_mixers; + struct sde_crtc_mixer mixer[CRTC_DUAL_MIXERS]; + + /*if there is a pending flip, these will be non-null */ + struct drm_pending_vblank_event *event; }; #define to_sde_crtc(x) container_of(x, struct sde_crtc, base) -static void sde_crtc_destroy(struct drm_crtc *crtc) +static struct sde_kms *get_kms(struct drm_crtc *crtc) +{ + struct msm_drm_private *priv = crtc->dev->dev_private; + + return to_sde_kms(to_mdp_kms(priv->kms)); +} + +static inline struct sde_hw_ctl *sde_crtc_rm_get_ctl_path(enum sde_ctl idx, + void __iomem *addr, + struct sde_mdss_cfg *m) +{ + /* + * This module keeps track of the requested hw resources state, + * if the requested resource is being used it returns NULL, + * otherwise it returns the hw driver struct + */ + return sde_hw_ctl_init(idx, addr, m); +} + +static inline struct sde_hw_mixer *sde_crtc_rm_get_mixer(enum sde_lm idx, + void __iomem *addr, + struct sde_mdss_cfg *m) { + /* + * This module keeps track of the requested hw resources state, + * if the requested resource is being used it returns NULL, + * otherwise it returns the hw driver struct + */ + return sde_hw_lm_init(idx, addr, m); +} + +static int sde_crtc_reserve_hw_resources(struct drm_crtc *crtc, + struct drm_encoder *encoder) +{ + /* + * Assign CRTC resources + * num_ctls; + * num_mixers; + * sde_lm mixer[CRTC_MAX_PIPES]; + * sde_ctl ctl[CRTC_MAX_PIPES]; + */ struct sde_crtc *sde_crtc = to_sde_crtc(crtc); + struct sde_kms *kms = get_kms(crtc); + enum sde_lm lm_id[CRTC_DUAL_MIXERS]; + enum sde_ctl ctl_id[CRTC_DUAL_MIXERS]; + int i; - drm_crtc_cleanup(crtc); - kfree(sde_crtc); + if (!kms) { + DBG("[%s] invalid kms\n", __func__); + return -EINVAL; + } + + if (!kms->mmio) + return -EINVAL; + + /* + * simple check validate against catalog + */ + sde_crtc->num_ctls = 1; + sde_crtc->num_mixers = 1; + ctl_id[0] = CTL_0; + lm_id[0] = LM_0; + + /* + * need to also enable MDP core clock and AHB CLK + * before touching HW driver + */ + DBG("%s Enable clocks\n", __func__); + sde_enable(kms); + for (i = 0; i < sde_crtc->num_ctls; i++) { + sde_crtc->mixer[i].hw_ctl = sde_crtc_rm_get_ctl_path(ctl_id[i], + kms->mmio, kms->catalog); + if (!sde_crtc->mixer[i].hw_ctl) { + DBG("[%s], Invalid ctl_path", __func__); + return -EACCES; + } + } + + for (i = 0; i < sde_crtc->num_mixers; i++) { + sde_crtc->mixer[i].hw_lm = sde_crtc_rm_get_mixer(lm_id[i], + kms->mmio, kms->catalog); + if (!sde_crtc->mixer[i].hw_lm) { + DBG("[%s], Invalid ctl_path", __func__); + return -EACCES; + } + } + /* + * need to disable MDP core clock and AHB CLK + */ + sde_disable(kms); + return 0; } -static void sde_crtc_dpms(struct drm_crtc *crtc, int mode) +static void sde_crtc_destroy(struct drm_crtc *crtc) { + struct sde_crtc *sde_crtc = to_sde_crtc(crtc); + + DBG(""); + drm_crtc_cleanup(crtc); + kfree(sde_crtc); } static bool sde_crtc_mode_fixup(struct drm_crtc *crtc, const struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { + DBG(""); return true; } -static int sde_crtc_mode_set(struct drm_crtc *crtc, - struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode, - int x, int y, - struct drm_framebuffer *old_fb) +static void sde_crtc_mode_set_nofb(struct drm_crtc *crtc) { - return 0; + struct sde_crtc *sde_crtc = to_sde_crtc(crtc); + struct sde_crtc_mixer *mixer = sde_crtc->mixer; + struct drm_device *dev = crtc->dev; + struct sde_hw_mixer *lm; + unsigned long flags; + struct drm_display_mode *mode; + struct sde_hw_mixer_cfg cfg; + u32 mixer_width; + int i; + int rc; + + DBG(""); + if (WARN_ON(!crtc->state)) + return; + + mode = &crtc->state->adjusted_mode; + + DBG("%s: set mode: %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x", + sde_crtc->name, mode->base.id, mode->name, + mode->vrefresh, mode->clock, + mode->hdisplay, mode->hsync_start, + mode->hsync_end, mode->htotal, + mode->vdisplay, mode->vsync_start, + mode->vsync_end, mode->vtotal, + mode->type, mode->flags); + + /* + * reserve mixer(s) if not already avaialable + * if dual mode, mixer_width = half mode width + * program mode configuration on mixer(s) + */ + if ((sde_crtc->num_ctls == 0) || + (sde_crtc->num_mixers == 0)) { + rc = sde_crtc_reserve_hw_resources(crtc, sde_crtc->encoder); + if (rc) { + dev_err(dev->dev, " error reserving HW resource for this CRTC\n"); + return; + } + } + + if (sde_crtc->num_mixers == CRTC_DUAL_MIXERS) + mixer_width = mode->hdisplay >> 1; + else + mixer_width = mode->hdisplay; + + spin_lock_irqsave(&sde_crtc->lm_lock, flags); + + for (i = 0; i < sde_crtc->num_mixers; i++) { + lm = mixer[i].hw_lm; + cfg.out_width = mixer_width; + cfg.out_height = mode->vdisplay; + cfg.right_mixer = (i == 0) ? false : true; + cfg.flags = 0; + lm->ops.setup_mixer_out(lm, &cfg); + } + + spin_unlock_irqrestore(&sde_crtc->lm_lock, flags); } -static void sde_crtc_prepare(struct drm_crtc *crtc) +static void sde_crtc_get_blend_cfg(struct sde_hw_blend_cfg *cfg, + struct sde_plane_state *pstate) { + const struct mdp_format *format; + struct drm_plane *plane; + + format = to_mdp_format( + msm_framebuffer_format(pstate->base.fb)); + plane = pstate->base.plane; + + cfg->fg.alpha_sel = ALPHA_FG_CONST; + cfg->bg.alpha_sel = ALPHA_BG_CONST; + cfg->fg.const_alpha = pstate->alpha; + cfg->bg.const_alpha = 0xFF - pstate->alpha; + + if (format->alpha_enable && pstate->premultiplied) { + cfg->fg.alpha_sel = ALPHA_FG_CONST; + cfg->bg.alpha_sel = ALPHA_FG_PIXEL; + if (pstate->alpha != 0xff) { + cfg->bg.const_alpha = pstate->alpha; + cfg->bg.inv_alpha_sel = 1; + cfg->bg.mod_alpha = 1; + } else { + cfg->bg.inv_mode_alpha = 1; + } + } else if (format->alpha_enable) { + cfg->fg.alpha_sel = ALPHA_FG_PIXEL; + cfg->bg.alpha_sel = ALPHA_FG_PIXEL; + if (pstate->alpha != 0xff) { + cfg->bg.const_alpha = pstate->alpha; + cfg->fg.mod_alpha = 1; + cfg->bg.inv_alpha_sel = 1; + cfg->bg.mod_alpha = 1; + cfg->bg.inv_mode_alpha = 1; + } else { + cfg->bg.inv_mode_alpha = 1; + } + } } -static void sde_crtc_commit(struct drm_crtc *crtc) +static void blend_setup(struct drm_crtc *crtc) { + struct sde_crtc *sde_crtc = to_sde_crtc(crtc); + struct sde_crtc_mixer *mixer = sde_crtc->mixer; + struct drm_plane *plane; + struct sde_plane_state *pstate, *pstates[SDE_STAGE_MAX] = {0}; + struct sde_hw_stage_cfg stage_cfg; + struct sde_hw_blend_cfg blend; + struct sde_hw_ctl *ctl; + struct sde_hw_mixer *lm; + u32 flush_mask = 0; + unsigned long flags; + int i, j, plane_cnt = 0; + + spin_lock_irqsave(&sde_crtc->lm_lock, flags); + + /* ctl could be reserved already */ + if (!sde_crtc->num_ctls) + goto out; + + /* initialize stage cfg */ + memset(&stage_cfg, 0, sizeof(stage_cfg)); + memset(&blend, 0, sizeof(blend)); + + /* Collect all plane information */ + drm_atomic_crtc_for_each_plane(plane, crtc) { + pstate = to_sde_plane_state(plane->state); + pstates[pstate->stage] = pstate; + plane_cnt++; + for (i = 0; i < sde_crtc->num_mixers; i++) { + stage_cfg.stage[pstate->stage][i] = + sde_plane_pipe(plane); + + /* Cache the flushmask for this layer + * sourcesplit is always enabled, so this layer will + * be staged on both the mixers + */ + ctl = mixer[i].hw_ctl; + ctl->ops.get_bitmask_sspp(ctl, &flush_mask, + sde_plane_pipe(plane)); + } + } + + /* + * If there is no base layer, enable border color. + * currently border color is always black + */ + if ((stage_cfg.stage[SDE_STAGE_BASE][0] == SSPP_NONE) && + plane_cnt) { + stage_cfg.border_enable = 1; + DBG("Border Color is enabled\n"); + } + + /* Program hw */ + for (i = 0; i < sde_crtc->num_mixers; i++) { + if (!mixer[i].hw_lm) + continue; + + if (!mixer[i].hw_ctl) + continue; + + ctl = mixer[i].hw_ctl; + lm = mixer[i].hw_lm; + + /* stage config */ + ctl->ops.setup_blendstage(ctl, mixer[i].hw_lm->idx, + &stage_cfg); + /* stage config flush mask */ + mixer[i].flush_mask = flush_mask; + /* get the flush mask for mixer */ + ctl->ops.get_bitmask_mixer(ctl, &mixer[i].flush_mask, + mixer[i].hw_lm->idx); + + /* blend config */ + for (j = SDE_STAGE_0; j < SDE_STAGE_MAX; j++) { + if (!pstates[j]) + continue; + sde_crtc_get_blend_cfg(&blend, pstates[j]); + blend.fg.alpha_sel = ALPHA_FG_CONST; + blend.bg.alpha_sel = ALPHA_BG_CONST; + blend.fg.const_alpha = pstate->alpha; + blend.bg.const_alpha = 0xFF - pstate->alpha; + lm->ops.setup_blend_config(lm, j, &blend); + } + } +out: + spin_unlock_irqrestore(&sde_crtc->lm_lock, flags); } -static int sde_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y, - struct drm_framebuffer *old_fb) +static void request_pending(struct drm_crtc *crtc, u32 pending) +{ + DBG(""); +} +/** + * Flush the CTL PATH + */ +static u32 crtc_flush_all(struct drm_crtc *crtc) { + struct sde_crtc *sde_crtc = to_sde_crtc(crtc); + struct sde_hw_ctl *ctl; + int i; + + DBG(""); + + for (i = 0; i < sde_crtc->num_ctls; i++) { + /* + * Query flush_mask from encoder + * and append to the ctl_path flush_mask + */ + ctl = sde_crtc->mixer[i].hw_ctl; + ctl->ops.get_bitmask_intf(ctl, + &(sde_crtc->mixer[i].flush_mask), + INTF_1); + ctl->ops.setup_flush(ctl, + sde_crtc->mixer[i].flush_mask); + } + return 0; } -static void sde_crtc_load_lut(struct drm_crtc *crtc) +static void sde_crtc_atomic_begin(struct drm_crtc *crtc, + struct drm_crtc_state *old_crtc_state) { + struct sde_crtc *sde_crtc = to_sde_crtc(crtc); + struct drm_device *dev = crtc->dev; + unsigned long flags; + + DBG(""); + + WARN_ON(sde_crtc->event); + + spin_lock_irqsave(&dev->event_lock, flags); + sde_crtc->event = crtc->state->event; + spin_unlock_irqrestore(&dev->event_lock, flags); + + /* + * If no CTL has been allocated in sde_crtc_atomic_check(), + * it means we are trying to flush a CRTC whose state is disabled: + * nothing else needs to be done. + */ + if (unlikely(!sde_crtc->num_ctls)) + return; + + blend_setup(crtc); + + /* + * PP_DONE irq is only used by command mode for now. + * It is better to request pending before FLUSH and START trigger + * to make sure no pp_done irq missed. + * This is safe because no pp_done will happen before SW trigger + * in command mode. + */ } -static int sde_crtc_page_flip(struct drm_crtc *crtc, - struct drm_framebuffer *new_fb, - struct drm_pending_vblank_event *event, - uint32_t page_flip_flags) +static void sde_crtc_atomic_flush(struct drm_crtc *crtc, + struct drm_crtc_state *old_crtc_state) { - return 0; + struct sde_crtc *sde_crtc = to_sde_crtc(crtc); + struct drm_device *dev = crtc->dev; + unsigned long flags; + + DBG(""); + + WARN_ON(sde_crtc->event); + + spin_lock_irqsave(&dev->event_lock, flags); + sde_crtc->event = crtc->state->event; + spin_unlock_irqrestore(&dev->event_lock, flags); + + /* + * If no CTL has been allocated in sde_crtc_atomic_check(), + * it means we are trying to flush a CRTC whose state is disabled: + * nothing else needs to be done. + */ + if (unlikely(!sde_crtc->num_ctls)) + return; + + crtc_flush_all(crtc); + + request_pending(crtc, PENDING_FLIP); } static int sde_crtc_set_property(struct drm_crtc *crtc, @@ -92,21 +453,111 @@ static int sde_crtc_set_property(struct drm_crtc *crtc, return -EINVAL; } +static int sde_crtc_cursor_set(struct drm_crtc *crtc, + struct drm_file *file, uint32_t handle, + uint32_t width, uint32_t height) +{ + return 0; +} + +static int sde_crtc_cursor_move(struct drm_crtc *crtc, int x, int y) +{ + return 0; +} + +static void sde_crtc_disable(struct drm_crtc *crtc) +{ + DBG(""); +} + +static void sde_crtc_enable(struct drm_crtc *crtc) +{ + DBG(""); +} + +struct plane_state { + struct drm_plane *plane; + struct sde_plane_state *state; +}; + +static int pstate_cmp(const void *a, const void *b) +{ + struct plane_state *pa = (struct plane_state *)a; + struct plane_state *pb = (struct plane_state *)b; + + return pa->state->zpos - pb->state->zpos; +} + +static int sde_crtc_atomic_check(struct drm_crtc *crtc, + struct drm_crtc_state *state) +{ + struct sde_crtc *sde_crtc = to_sde_crtc(crtc); + struct sde_kms *sde_kms = get_kms(crtc); + struct drm_plane *plane; + struct drm_device *dev = crtc->dev; + struct plane_state pstates[SDE_STAGE_MAX]; + int max_stages = CRTC_HW_MIXER_MAXSTAGES(sde_kms->catalog, 0); + int cnt = 0, i; + + DBG("%s: check", sde_crtc->name); + + /* verify that there are not too many planes attached to crtc + * and that we don't have conflicting mixer stages: + */ + drm_atomic_crtc_state_for_each_plane(plane, state) { + struct drm_plane_state *pstate; + + if (cnt >= (max_stages)) { + dev_err(dev->dev, "too many planes!\n"); + return -EINVAL; + } + + pstate = state->state->plane_states[drm_plane_index(plane)]; + + /* plane might not have changed, in which case take + * current state: + */ + if (!pstate) + pstate = plane->state; + pstates[cnt].plane = plane; + pstates[cnt].state = to_sde_plane_state(pstate); + + cnt++; + } + + /* assign a stage based on sorted zpos property */ + sort(pstates, cnt, sizeof(pstates[0]), pstate_cmp, NULL); + + for (i = 0; i < cnt; i++) { + pstates[i].state->stage = SDE_STAGE_0 + i; + DBG("%s: assign pipe %d on stage=%d", sde_crtc->name, + sde_plane_pipe(pstates[i].plane), + pstates[i].state->stage); + } + + return 0; +} + static const struct drm_crtc_funcs sde_crtc_funcs = { - .set_config = drm_crtc_helper_set_config, + .set_config = drm_atomic_helper_set_config, .destroy = sde_crtc_destroy, - .page_flip = sde_crtc_page_flip, + .page_flip = drm_atomic_helper_page_flip, .set_property = sde_crtc_set_property, + .reset = drm_atomic_helper_crtc_reset, + .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state, + .cursor_set = sde_crtc_cursor_set, + .cursor_move = sde_crtc_cursor_move, }; static const struct drm_crtc_helper_funcs sde_crtc_helper_funcs = { - .dpms = sde_crtc_dpms, .mode_fixup = sde_crtc_mode_fixup, - .mode_set = sde_crtc_mode_set, - .prepare = sde_crtc_prepare, - .commit = sde_crtc_commit, - .mode_set_base = sde_crtc_mode_set_base, - .load_lut = sde_crtc_load_lut, + .mode_set_nofb = sde_crtc_mode_set_nofb, + .disable = sde_crtc_disable, + .enable = sde_crtc_enable, + .atomic_check = sde_crtc_atomic_check, + .atomic_begin = sde_crtc_atomic_begin, + .atomic_flush = sde_crtc_atomic_flush, }; uint32_t sde_crtc_vblank(struct drm_crtc *crtc) @@ -118,20 +569,20 @@ void sde_crtc_cancel_pending_flip(struct drm_crtc *crtc, struct drm_file *file) { } -void sde_crtc_attach(struct drm_crtc *crtc, struct drm_plane *plane) +static void sde_crtc_install_properties(struct drm_crtc *crtc, + struct drm_mode_object *obj) { } -void sde_crtc_detach(struct drm_crtc *crtc, struct drm_plane *plane) -{ -} +/* initialize crtc */ struct drm_crtc *sde_crtc_init(struct drm_device *dev, struct drm_encoder *encoder, struct drm_plane *plane, int id) { struct drm_crtc *crtc = NULL; struct sde_crtc *sde_crtc; + int rc; sde_crtc = kzalloc(sizeof(*sde_crtc), GFP_KERNEL); if (!sde_crtc) @@ -140,9 +591,21 @@ struct drm_crtc *sde_crtc_init(struct drm_device *dev, crtc = &sde_crtc->base; sde_crtc->id = id; + sde_crtc->encoder = encoder; - /* find out if we need one or two lms */ + sde_crtc_install_properties(crtc, &crtc->base); + + drm_crtc_init_with_planes(dev, crtc, plane, NULL, &sde_crtc_funcs); drm_crtc_helper_add(crtc, &sde_crtc_helper_funcs); + plane->crtc = crtc; + + rc = sde_crtc_reserve_hw_resources(crtc, encoder); + if (rc) { + dev_err(dev->dev, " error reserving HW resource for this CRTC\n"); + return ERR_PTR(-EINVAL); + } + + DBG("%s: Successfully initialized crtc\n", __func__); return crtc; } diff --git a/drivers/gpu/drm/msm/sde/sde_encoder.c b/drivers/gpu/drm/msm/sde/sde_encoder.c index 3c28e319c580..283d33b70b13 100644 --- a/drivers/gpu/drm/msm/sde/sde_encoder.c +++ b/drivers/gpu/drm/msm/sde/sde_encoder.c @@ -14,82 +14,566 @@ #include "drm_crtc.h" #include "drm_crtc_helper.h" +#include "sde_hwio.h" +#include "sde_hw_catalog.h" +#include "sde_hw_intf.h" +#include "sde_hw_mdp_ctl.h" +#include "sde_mdp_formats.h" + +#include "../dsi-staging/dsi_display.h" + +#define DBG(fmt, ...) DRM_DEBUG(fmt"\n", ##__VA_ARGS__) + struct sde_encoder { struct drm_encoder base; - int intf; + spinlock_t intf_lock; + bool enabled; + uint32_t bus_scaling_client; + struct sde_hw_intf *hw_intf; + struct sde_hw_ctl *hw_ctl; + int drm_mode_enc; + + void (*vblank_callback)(void *); + void *vblank_callback_data; + + struct mdp_irq vblank_irq; }; #define to_sde_encoder(x) container_of(x, struct sde_encoder, base) -static void sde_encoder_destroy(struct drm_encoder *encoder) +static struct sde_kms *get_kms(struct drm_encoder *drm_enc) { - struct sde_encoder *sde_encoder = to_sde_encoder(encoder); + struct msm_drm_private *priv = drm_enc->dev->dev_private; - drm_encoder_cleanup(encoder); - kfree(sde_encoder); + return to_sde_kms(to_mdp_kms(priv->kms)); } -static const struct drm_encoder_funcs sde_encoder_funcs = { - .destroy = sde_encoder_destroy, +#ifdef CONFIG_QCOM_BUS_SCALING +#include +#include +#define MDP_BUS_VECTOR_ENTRY(ab_val, ib_val) \ + { \ + .src = MSM_BUS_MASTER_MDP_PORT0, \ + .dst = MSM_BUS_SLAVE_EBI_CH0, \ + .ab = (ab_val), \ + .ib = (ib_val), \ + } + +static struct msm_bus_vectors mdp_bus_vectors[] = { + MDP_BUS_VECTOR_ENTRY(0, 0), + MDP_BUS_VECTOR_ENTRY(2000000000, 2000000000), +}; +static struct msm_bus_paths mdp_bus_usecases[] = { { + .num_paths = 1, + .vectors = + &mdp_bus_vectors[0], + }, { + .num_paths = 1, + .vectors = + &mdp_bus_vectors[1], + } +}; + +static struct msm_bus_scale_pdata mdp_bus_scale_table = { + .usecase = mdp_bus_usecases, + .num_usecases = ARRAY_SIZE(mdp_bus_usecases), + .name = "mdss_mdp", }; -static void sde_encoder_dpms(struct drm_encoder *encoder, int mode) +static void bs_init(struct sde_encoder *sde_enc) { + sde_enc->bus_scaling_client = + msm_bus_scale_register_client(&mdp_bus_scale_table); + DBG("bus scale client: %08x", sde_enc->bus_scaling_client); +} + +static void bs_fini(struct sde_encoder *sde_enc) +{ + if (sde_enc->bus_scaling_client) { + msm_bus_scale_unregister_client(sde_enc->bus_scaling_client); + sde_enc->bus_scaling_client = 0; + } } -static bool sde_encoder_mode_fixup(struct drm_encoder *encoder, - const struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode) +static void bs_set(struct sde_encoder *sde_enc, int idx) { + if (sde_enc->bus_scaling_client) { + DBG("set bus scaling: %d", idx); + idx = 1; + msm_bus_scale_client_update_request(sde_enc->bus_scaling_client, + idx); + } +} +#else +static void bs_init(struct sde_encoder *sde_enc) +{ +} + +static void bs_fini(struct sde_encoder *sde_enc) +{ +} + +static void bs_set(struct sde_encoder *sde_enc, int idx) +{ +} +#endif + +static bool sde_encoder_mode_fixup(struct drm_encoder *drm_enc, + const struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + DBG(""); return true; } -static void sde_encoder_mode_set(struct drm_encoder *encoder, - struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode) +static void sde_encoder_mode_set(struct drm_encoder *drm_enc, + struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) { + + struct sde_encoder *sde_enc = to_sde_encoder(drm_enc); + struct intf_timing_params p = {0}; + uint32_t hsync_polarity = 0, vsync_polarity = 0; + struct sde_mdp_format_params *sde_fmt_params = NULL; + u32 fmt_fourcc = DRM_FORMAT_RGB888, fmt_mod = 0; + unsigned long lock_flags; + struct sde_hw_intf_cfg intf_cfg = {0}; + + mode = adjusted_mode; + + DBG("set mode: %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x", + mode->base.id, mode->name, mode->vrefresh, mode->clock, + mode->hdisplay, mode->hsync_start, mode->hsync_end, mode->htotal, + mode->vdisplay, mode->vsync_start, mode->vsync_end, mode->vtotal, + mode->type, mode->flags); + + /* DSI controller cannot handle active-low sync signals. */ + if (sde_enc->hw_intf->cap->type != INTF_DSI) { + if (mode->flags & DRM_MODE_FLAG_NHSYNC) + hsync_polarity = 1; + if (mode->flags & DRM_MODE_FLAG_NVSYNC) + vsync_polarity = 1; + } + + /* + * For edp only: + * DISPLAY_V_START = (VBP * HCYCLE) + HBP + * DISPLAY_V_END = (VBP + VACTIVE) * HCYCLE - 1 - HFP + */ + /* + * if (sde_enc->hw->cap->type == INTF_EDP) { + * display_v_start += mode->htotal - mode->hsync_start; + * display_v_end -= mode->hsync_start - mode->hdisplay; + * } + */ + + /* + * https://www.kernel.org/doc/htmldocs/drm/ch02s05.html + * Active Region Front Porch Sync Back Porch + * <---------------------><----------------><---------><--------------> + * <--- [hv]display -----> + * <----------- [hv]sync_start ------------> + * <------------------- [hv]sync_end -----------------> + * <------------------------------ [hv]total -------------------------> + */ + + sde_fmt_params = sde_mdp_get_format_params(fmt_fourcc, fmt_mod); + + p.width = mode->hdisplay; /* active width */ + p.height = mode->vdisplay; /* active height */ + p.xres = p.width; /* Display panel width */ + p.yres = p.height; /* Display panel height */ + p.h_back_porch = mode->htotal - mode->hsync_end; + p.h_front_porch = mode->hsync_start - mode->hdisplay; + p.v_back_porch = mode->vtotal - mode->vsync_end; + p.v_front_porch = mode->vsync_start - mode->vdisplay; + p.hsync_pulse_width = mode->hsync_end - mode->hsync_start; + p.vsync_pulse_width = mode->vsync_end - mode->vsync_start; + p.hsync_polarity = hsync_polarity; + p.vsync_polarity = vsync_polarity; + p.border_clr = 0; + p.underflow_clr = 0xff; + p.hsync_skew = mode->hskew; + + intf_cfg.intf = sde_enc->hw_intf->idx; + intf_cfg.wb = SDE_NONE; + + spin_lock_irqsave(&sde_enc->intf_lock, lock_flags); + sde_enc->hw_intf->ops.setup_timing_gen(sde_enc->hw_intf, &p, + sde_fmt_params); + sde_enc->hw_ctl->ops.setup_intf_cfg(sde_enc->hw_ctl, &intf_cfg); + spin_unlock_irqrestore(&sde_enc->intf_lock, lock_flags); +} + +static void sde_encoder_wait_for_vblank(struct sde_encoder *sde_enc) +{ + struct sde_kms *sde_kms = get_kms(&sde_enc->base); + struct mdp_kms *mdp_kms = &sde_kms->base; + + DBG(""); + mdp_irq_wait(mdp_kms, sde_enc->vblank_irq.irqmask); +} + +static void sde_encoder_vblank_irq(struct mdp_irq *irq, uint32_t irqstatus) +{ + struct sde_encoder *sde_enc = container_of(irq, struct sde_encoder, + vblank_irq); + struct intf_status status = { 0 }; + unsigned long lock_flags; + + spin_lock_irqsave(&sde_enc->intf_lock, lock_flags); + if (sde_enc->vblank_callback) + sde_enc->vblank_callback(sde_enc->vblank_callback_data); + spin_unlock_irqrestore(&sde_enc->intf_lock, lock_flags); + + sde_enc->hw_intf->ops.get_status(sde_enc->hw_intf, &status); +} + +static void sde_encoder_disable(struct drm_encoder *drm_enc) +{ + struct sde_encoder *sde_enc = to_sde_encoder(drm_enc); + struct sde_kms *sde_kms = get_kms(drm_enc); + struct mdp_kms *mdp_kms = &(sde_kms->base); + unsigned long lock_flags; + + DBG(""); + + if (WARN_ON(!sde_enc->enabled)) + return; + + spin_lock_irqsave(&sde_enc->intf_lock, lock_flags); + sde_enc->hw_intf->ops.enable_timing(sde_enc->hw_intf, 0); + spin_unlock_irqrestore(&sde_enc->intf_lock, lock_flags); + + /* + * Wait for a vsync so we know the ENABLE=0 latched before + * the (connector) source of the vsync's gets disabled, + * otherwise we end up in a funny state if we re-enable + * before the disable latches, which results that some of + * the settings changes for the new modeset (like new + * scanout buffer) don't latch properly.. + */ + sde_encoder_wait_for_vblank(sde_enc); + + mdp_irq_unregister(mdp_kms, &sde_enc->vblank_irq); + bs_set(sde_enc, 0); + sde_enc->enabled = false; } -static void sde_encoder_prepare(struct drm_encoder *encoder) +static void sde_encoder_enable(struct drm_encoder *drm_enc) { + struct sde_encoder *sde_enc = to_sde_encoder(drm_enc); + struct mdp_kms *mdp_kms = &(get_kms(drm_enc)->base); + unsigned long lock_flags; + + DBG(""); + + if (WARN_ON(sde_enc->enabled)) + return; + + bs_set(sde_enc, 1); + spin_lock_irqsave(&sde_enc->intf_lock, lock_flags); + sde_enc->hw_intf->ops.enable_timing(sde_enc->hw_intf, 1); + spin_unlock_irqrestore(&sde_enc->intf_lock, lock_flags); + sde_enc->enabled = true; + + mdp_irq_register(mdp_kms, &sde_enc->vblank_irq); + DBG("Registered IRQ for intf %d mask 0x%X", sde_enc->hw_intf->idx, + sde_enc->vblank_irq.irqmask); } -static void sde_encoder_commit(struct drm_encoder *encoder) +void sde_encoder_get_hw_resources(struct drm_encoder *drm_enc, + struct sde_encoder_hw_resources *hw_res) { + struct sde_encoder *sde_enc = to_sde_encoder(drm_enc); + + DBG(""); + + if (WARN_ON(!hw_res)) + return; + + memset(hw_res, 0, sizeof(*hw_res)); + hw_res->intfs[sde_enc->hw_intf->idx] = true; +} + +static void sde_encoder_destroy(struct drm_encoder *drm_enc) +{ + struct sde_encoder *sde_enc = to_sde_encoder(drm_enc); + + DBG(""); + drm_encoder_cleanup(drm_enc); + bs_fini(sde_enc); + kfree(sde_enc->hw_intf); + kfree(sde_enc); } static const struct drm_encoder_helper_funcs sde_encoder_helper_funcs = { - .dpms = sde_encoder_dpms, .mode_fixup = sde_encoder_mode_fixup, .mode_set = sde_encoder_mode_set, - .prepare = sde_encoder_prepare, - .commit = sde_encoder_commit, + .disable = sde_encoder_disable, + .enable = sde_encoder_enable, +}; + +static const struct drm_encoder_funcs sde_encoder_funcs = {.destroy = + sde_encoder_destroy, +}; + +static int sde_encoder_setup_hw(struct sde_encoder *sde_enc, + struct sde_kms *sde_kms, + enum sde_intf intf_idx, + enum sde_ctl ctl_idx) +{ + int ret = 0; + + DBG(""); + + sde_enc->hw_intf = sde_hw_intf_init(intf_idx, sde_kms->mmio, + sde_kms->catalog); + if (!sde_enc->hw_intf) + return -EINVAL; + + sde_enc->hw_ctl = sde_hw_ctl_init(ctl_idx, sde_kms->mmio, + sde_kms->catalog); + if (!sde_enc->hw_ctl) + return -EINVAL; + + return ret; +} + +static int sde_encoder_virt_add_phys_vid_enc(struct sde_encoder *sde_enc, + struct sde_kms *sde_kms, + enum sde_intf intf_idx, + enum sde_ctl ctl_idx) +{ + int ret = 0; + + DBG(""); + + ret = sde_encoder_setup_hw(sde_enc, sde_kms, intf_idx, ctl_idx); + if (!ret) { + sde_enc->vblank_irq.irq = sde_encoder_vblank_irq; + sde_enc->vblank_irq.irqmask = 0x8000000; + } + return ret; +} + +static int sde_encoder_setup_hdmi(struct sde_encoder *sde_enc, + struct sde_kms *sde_kms, int *hdmi_info) +{ + int ret = 0; + enum sde_intf intf_idx = INTF_MAX; + + DBG(""); + + sde_enc->drm_mode_enc = DRM_MODE_ENCODER_TMDS; + + intf_idx = INTF_3; + if (intf_idx == INTF_MAX) + ret = -EINVAL; + + if (!ret) + ret = + sde_encoder_virt_add_phys_vid_enc(sde_enc, sde_kms, + intf_idx, + CTL_2); + + return ret; +} + +static int sde_encoder_setup_dsi(struct sde_encoder *sde_enc, + struct sde_kms *sde_kms, + struct dsi_display_info *dsi_info) +{ + int ret = 0; + int i = 0; + + DBG(""); + + sde_enc->drm_mode_enc = DRM_MODE_ENCODER_DSI; + + if (WARN_ON(dsi_info->num_of_h_tiles > 1)) { + DBG("Dual DSI mode not yet supported"); + ret = -EINVAL; + } + + WARN_ON(dsi_info->num_of_h_tiles != 1); + dsi_info->num_of_h_tiles = 1; + + DBG("dsi_info->num_of_h_tiles %d h_tiled %d dsi_info->h_tile_ids %d ", + dsi_info->num_of_h_tiles, dsi_info->h_tiled, + dsi_info->h_tile_ids[0]); + + for (i = 0; i < !ret && dsi_info->num_of_h_tiles; i++) { + enum sde_intf intf_idx = INTF_1; + enum sde_ctl ctl_idx = CTL_0; + + if (intf_idx == INTF_MAX) { + DBG("Error: could not get the interface id"); + ret = -EINVAL; + } + + /* Get DSI modes, create both VID & CMD Phys Encoders */ + if (!ret) + ret = + sde_encoder_virt_add_phys_vid_enc(sde_enc, sde_kms, + intf_idx, + ctl_idx); + } + + return ret; +} + +struct display_probe_info { + enum sde_intf_type type; + struct dsi_display_info dsi_info; + int hdmi_info; }; -/* initialize encoder */ -struct drm_encoder *sde_encoder_init(struct drm_device *dev, int intf) +static struct drm_encoder *sde_encoder_virt_init(struct drm_device *dev, + struct display_probe_info + *display) { - struct drm_encoder *encoder = NULL; - struct sde_encoder *sde_encoder; - int ret; + struct msm_drm_private *priv = dev->dev_private; + struct sde_kms *sde_kms = to_sde_kms(to_mdp_kms(priv->kms)); + struct drm_encoder *drm_enc = NULL; + struct sde_encoder *sde_enc = NULL; + int ret = 0; - sde_encoder = kzalloc(sizeof(*sde_encoder), GFP_KERNEL); - if (!sde_encoder) { + DBG(""); + + sde_enc = kzalloc(sizeof(*sde_enc), GFP_KERNEL); + if (!sde_enc) { ret = -ENOMEM; goto fail; } - sde_encoder->intf = intf; - encoder = &sde_encoder->base; + if (display->type == INTF_DSI) { + ret = + sde_encoder_setup_dsi(sde_enc, sde_kms, &display->dsi_info); + } else if (display->type == INTF_HDMI) { + ret = + sde_encoder_setup_hdmi(sde_enc, sde_kms, + &display->hdmi_info); + } else { + DBG("No valid displays found"); + ret = -EINVAL; + } + if (ret) + goto fail; - drm_encoder_init(dev, encoder, &sde_encoder_funcs, - DRM_MODE_ENCODER_TMDS); - drm_encoder_helper_add(encoder, &sde_encoder_helper_funcs); + spin_lock_init(&sde_enc->intf_lock); + drm_enc = &sde_enc->base; + drm_encoder_init(dev, drm_enc, &sde_encoder_funcs, + sde_enc->drm_mode_enc); + drm_encoder_helper_add(drm_enc, &sde_encoder_helper_funcs); + bs_init(sde_enc); - return encoder; + DBG("Created sde_encoder for intf %d", sde_enc->hw_intf->idx); + + return drm_enc; fail: - if (encoder) - sde_encoder_destroy(encoder); + if (drm_enc) + sde_encoder_destroy(drm_enc); return ERR_PTR(ret); } + +static int sde_encoder_probe_hdmi(struct drm_device *dev) +{ + struct msm_drm_private *priv = dev->dev_private; + struct drm_encoder *enc = NULL; + struct display_probe_info probe_info = { 0 }; + int ret = 0; + + DBG(""); + + probe_info.type = INTF_HDMI; + + enc = sde_encoder_virt_init(dev, &probe_info); + if (IS_ERR(enc)) + ret = PTR_ERR(enc); + else { + /* Register new encoder with the upper layer */ + priv->encoders[priv->num_encoders++] = enc; + } + return ret; +} + +static int sde_encoder_probe_dsi(struct drm_device *dev) +{ + struct msm_drm_private *priv = dev->dev_private; + u32 ret = 0; + u32 i = 0; + u32 num_displays = 0; + + DBG(""); + + num_displays = dsi_display_get_num_of_displays(); + DBG("num_displays %d", num_displays); + for (i = 0; i < num_displays; i++) { + struct dsi_display *dsi = dsi_display_get_display_by_index(i); + + if (dsi_display_is_active(dsi)) { + struct display_probe_info probe_info = { 0 }; + + DBG("display %d/%d is active", i, num_displays); + probe_info.type = INTF_DSI; + + ret = dsi_display_get_info(dsi, &probe_info.dsi_info); + if (WARN_ON(ret)) + DBG("Failed to retrieve dsi panel info"); + else { + struct drm_encoder *enc = + sde_encoder_virt_init(dev, + &probe_info); + if (IS_ERR(enc)) + return PTR_ERR(enc); + + ret = dsi_display_drm_init(dsi, enc); + if (ret) + return ret; + + /* Register new encoder with the upper layer */ + priv->encoders[priv->num_encoders++] = enc; + } + } else + DBG("display %d/%d is not active", i, num_displays); + } + + return ret; +} + +void sde_encoder_register_vblank_callback(struct drm_encoder *drm_enc, + void (*cb)(void *), void *data) { + struct sde_encoder *sde_enc = to_sde_encoder(drm_enc); + unsigned long lock_flags; + + DBG(""); + + spin_lock_irqsave(&sde_enc->intf_lock, lock_flags); + sde_enc->vblank_callback = cb; + sde_enc->vblank_callback_data = data; + spin_unlock_irqrestore(&sde_enc->intf_lock, lock_flags); +} + +/* encoders init, + * initialize encoder based on displays + */ +void sde_encoders_init(struct drm_device *dev) +{ + struct msm_drm_private *priv = dev->dev_private; + int ret = 0; + + DBG(""); + + /* Start num_encoders at 0, probe functions will increment */ + priv->num_encoders = 0; + ret = sde_encoder_probe_dsi(dev); + if (ret) + DBG("Error probing DSI, %d", ret); + else { + ret = sde_encoder_probe_hdmi(dev); + if (ret) + DBG("Error probing HDMI, %d", ret); + } +} diff --git a/drivers/gpu/drm/msm/sde/sde_hw_catalog_8996.c b/drivers/gpu/drm/msm/sde/sde_hw_catalog_8996.c index 68782de943c1..86673e07d3db 100644 --- a/drivers/gpu/drm/msm/sde/sde_hw_catalog_8996.c +++ b/drivers/gpu/drm/msm/sde/sde_hw_catalog_8996.c @@ -171,13 +171,13 @@ static inline int set_cfg_1xx_init(struct sde_mdss_cfg *cfg) {.id = SSPP_VIG3, .base = 0x0000b000, .features = VIG_17X_MASK, .sblk = &layer}, - {.id = SSPP_RGB0, .base = 0x00001500, + {.id = SSPP_RGB0, .base = 0x00015000, .features = RGB_17X_MASK, .sblk = &layer}, - {.id = SSPP_RGB1, .base = 0x00001700, + {.id = SSPP_RGB1, .base = 0x00017000, .features = RGB_17X_MASK, .sblk = &layer}, - {.id = SSPP_RGB2, .base = 0x00001900, + {.id = SSPP_RGB2, .base = 0x00019000, .features = RGB_17X_MASK, .sblk = &layer}, - {.id = SSPP_RGB3, .base = 0x00001B00, + {.id = SSPP_RGB3, .base = 0x0001B000, .features = RGB_17X_MASK, .sblk = &layer}, {.id = SSPP_DMA0, .base = 0x00025000, diff --git a/drivers/gpu/drm/msm/sde/sde_hw_intf.c b/drivers/gpu/drm/msm/sde/sde_hw_intf.c index 33d93e7a479b..7a1c37c65366 100644 --- a/drivers/gpu/drm/msm/sde/sde_hw_intf.c +++ b/drivers/gpu/drm/msm/sde/sde_hw_intf.c @@ -70,7 +70,8 @@ static struct sde_intf_cfg *_intf_offset(enum sde_intf intf, int i; for (i = 0; i < m->intf_count; i++) { - if (intf == m->intf[i].id) { + if ((intf == m->intf[i].id) && + (m->intf[i].type != INTF_NONE)) { b->base_off = addr; b->blk_off = m->intf[i].base; b->hwversion = m->hwversion; @@ -158,13 +159,13 @@ static void sde_hw_intf_setup_timing_engine(struct sde_hw_intf *ctx, (hsync_polarity << 0); /* HSYNC Polarity */ if (!fmt->is_yuv) - panel_format = (fmt->bits[0] | - (fmt->bits[1] << 2) | - (fmt->bits[2] << 4) | + panel_format = (fmt->bits[C0_G_Y] | + (fmt->bits[C1_B_Cb] << 2) | + (fmt->bits[C2_R_Cr] << 4) | (0x21 << 8)); else - /* Interface treats all the pixel data in RGB888 format */ - panel_format |= (COLOR_8BIT | + /* Interface treats all the pixel data in RGB888 format */ + panel_format = (COLOR_8BIT | (COLOR_8BIT << 2) | (COLOR_8BIT << 4) | (0x21 << 8)); @@ -354,8 +355,9 @@ struct sde_hw_intf *sde_hw_intf_init(enum sde_intf idx, return ERR_PTR(-ENOMEM); cfg = _intf_offset(idx, m, addr, &c->hw); - if (!cfg) { + if (IS_ERR_OR_NULL(cfg)) { kfree(c); + pr_err("Error Panic\n"); return ERR_PTR(-EINVAL); } @@ -371,3 +373,9 @@ struct sde_hw_intf *sde_hw_intf_init(enum sde_intf idx, */ return c; } + +void sde_hw_intf_deinit(struct sde_hw_intf *intf) +{ + kfree(intf); +} + diff --git a/drivers/gpu/drm/msm/sde/sde_hw_intf.h b/drivers/gpu/drm/msm/sde/sde_hw_intf.h index ce5190655dad..28ff5c71163d 100644 --- a/drivers/gpu/drm/msm/sde/sde_hw_intf.h +++ b/drivers/gpu/drm/msm/sde/sde_hw_intf.h @@ -15,6 +15,7 @@ #include "sde_hw_catalog.h" #include "sde_hw_mdss.h" +#include "sde_hw_mdp_util.h" struct sde_hw_intf; @@ -100,4 +101,6 @@ struct sde_hw_intf *sde_hw_intf_init(enum sde_intf idx, void __iomem *addr, struct sde_mdss_cfg *m); +void sde_hw_intf_deinit(struct sde_hw_intf *intf); + #endif /*_SDE_HW_INTF_H */ diff --git a/drivers/gpu/drm/msm/sde/sde_hw_lm.c b/drivers/gpu/drm/msm/sde/sde_hw_lm.c index e9aeab797a37..0f055faad4b2 100644 --- a/drivers/gpu/drm/msm/sde/sde_hw_lm.c +++ b/drivers/gpu/drm/msm/sde/sde_hw_lm.c @@ -58,7 +58,7 @@ static inline int _stage_offset(struct sde_hw_mixer *ctx, enum sde_stage stage) return -EINVAL; if ((stage - SDE_STAGE_0) <= sblk->maxblendstages) - return sblk->blendstage_base[stage]; + return sblk->blendstage_base[stage - 1]; else return -EINVAL; } @@ -126,7 +126,7 @@ static void sde_hw_lm_setup_blendcfg(struct sde_hw_mixer *ctx, SDE_REG_WRITE(c, LM_BLEND0_FG_ALPHA + stage_off, fg->const_alpha); - SDE_REG_WRITE(c, LM_BLEND0_FG_ALPHA + stage_off, + SDE_REG_WRITE(c, LM_BLEND0_BG_ALPHA + stage_off, bg->const_alpha); SDE_REG_WRITE(c, LM_OP_MODE, blend_op); } diff --git a/drivers/gpu/drm/msm/sde/sde_hw_mdp_ctl.c b/drivers/gpu/drm/msm/sde/sde_hw_mdp_ctl.c index 56115153bf2e..ab2e5a33c499 100644 --- a/drivers/gpu/drm/msm/sde/sde_hw_mdp_ctl.c +++ b/drivers/gpu/drm/msm/sde/sde_hw_mdp_ctl.c @@ -15,9 +15,9 @@ #include "sde_hw_mdp_ctl.h" #define CTL_LAYER(lm) \ - (((lm) == 5) ? (0x024) : ((lm) * 0x004)) + (((lm) == LM_5) ? (0x024) : (((lm) - LM_0) * 0x004)) #define CTL_LAYER_EXT(lm) \ - (0x40 + ((lm) * 0x004)) + (0x40 + (((lm) - LM_0) * 0x004)) #define CTL_TOP 0x014 #define CTL_FLUSH 0x018 #define CTL_START 0x01C @@ -61,15 +61,14 @@ static int _mixer_stages(const struct sde_lm_cfg *mixer, int count, return stages; } -static inline void sde_hw_ctl_setup_flush(struct sde_hw_ctl *ctx, u32 flushbits, - u8 force_start) +static inline void sde_hw_ctl_force_start(struct sde_hw_ctl *ctx) { - struct sde_hw_blk_reg_map *c = &ctx->hw; - - SDE_REG_WRITE(c, CTL_FLUSH, flushbits); + SDE_REG_WRITE(&ctx->hw, CTL_START, 0x1); +} - if (force_start) - SDE_REG_WRITE(c, CTL_START, 0x1); +static inline void sde_hw_ctl_setup_flush(struct sde_hw_ctl *ctx, u32 flushbits) +{ + SDE_REG_WRITE(&ctx->hw, CTL_FLUSH, flushbits); } static inline int sde_hw_ctl_get_bitmask_sspp(struct sde_hw_ctl *ctx, @@ -222,7 +221,7 @@ static void sde_hw_ctl_setup_blendstage(struct sde_hw_ctl *ctx, struct sde_hw_stage_cfg *cfg) { struct sde_hw_blk_reg_map *c = &ctx->hw; - u32 mixercfg, mixercfg_ext; + u32 mixercfg, mixercfg_ext = 0; int i, j; u8 stages; int pipes_per_stage; @@ -237,8 +236,8 @@ static void sde_hw_ctl_setup_blendstage(struct sde_hw_ctl *ctx, else pipes_per_stage = 1; - mixercfg = cfg->border_enable >> 24; /* BORDER_OUT */ -; + mixercfg = cfg->border_enable << 24; /* BORDER_OUT */ + for (i = 0; i <= stages; i++) { for (j = 0; j < pipes_per_stage; j++) { switch (cfg->stage[i][j]) { @@ -298,17 +297,38 @@ static void sde_hw_ctl_setup_blendstage(struct sde_hw_ctl *ctx, SDE_REG_WRITE(c, CTL_LAYER_EXT(lm), mixercfg_ext); } +static void sde_hw_ctl_intf_cfg(struct sde_hw_ctl *ctx, + struct sde_hw_intf_cfg *cfg) +{ + struct sde_hw_blk_reg_map *c = &ctx->hw; + u32 intf_cfg = 0; + + intf_cfg |= (cfg->intf & 0xF) << 4; + + if (cfg->wb) + intf_cfg |= (cfg->wb & 0x3) + 2; + + if (cfg->mode_3d) { + intf_cfg |= BIT(19); + intf_cfg |= (cfg->mode_3d - 1) << 20; + } + + SDE_REG_WRITE(c, CTL_TOP, intf_cfg); +} + static void _setup_ctl_ops(struct sde_hw_ctl_ops *ops, unsigned long cap) { ops->setup_flush = sde_hw_ctl_setup_flush; + ops->setup_start = sde_hw_ctl_force_start; + ops->setup_intf_cfg = sde_hw_ctl_intf_cfg; ops->reset = sde_hw_ctl_reset_control; + ops->setup_blendstage = sde_hw_ctl_setup_blendstage; ops->get_bitmask_sspp = sde_hw_ctl_get_bitmask_sspp; ops->get_bitmask_mixer = sde_hw_ctl_get_bitmask_mixer; ops->get_bitmask_dspp = sde_hw_ctl_get_bitmask_dspp; ops->get_bitmask_intf = sde_hw_ctl_get_bitmask_intf; ops->get_bitmask_cdm = sde_hw_ctl_get_bitmask_cdm; - ops->setup_blendstage = sde_hw_ctl_setup_blendstage; }; struct sde_hw_ctl *sde_hw_ctl_init(enum sde_ctl idx, @@ -323,8 +343,9 @@ struct sde_hw_ctl *sde_hw_ctl_init(enum sde_ctl idx, return ERR_PTR(-ENOMEM); cfg = _ctl_offset(idx, m, addr, &c->hw); - if (cfg) { + if (IS_ERR_OR_NULL(cfg)) { kfree(c); + pr_err("Error Panic\n"); return ERR_PTR(-EINVAL); } @@ -336,3 +357,8 @@ struct sde_hw_ctl *sde_hw_ctl_init(enum sde_ctl idx, return c; } + +void sde_hw_ctl_destroy(struct sde_hw_ctl *ctx) +{ + kfree(ctx); +} diff --git a/drivers/gpu/drm/msm/sde/sde_hw_mdp_ctl.h b/drivers/gpu/drm/msm/sde/sde_hw_mdp_ctl.h index 14a519f2a725..00f1ee4ff468 100644 --- a/drivers/gpu/drm/msm/sde/sde_hw_mdp_ctl.h +++ b/drivers/gpu/drm/msm/sde/sde_hw_mdp_ctl.h @@ -27,14 +27,46 @@ struct sde_hw_stage_cfg { u8 border_enable; }; +/** + * struct sde_hw_intf_cfg :Desbribes how the mdp writes data to + * output interface + * @intf : Interface id + * @wb: writeback id + * @mode_3d: 3d mux configuration + */ +struct sde_hw_intf_cfg { + enum sde_intf intf; + enum sde_wb wb; + enum sde_3d_blend_mode mode_3d; +}; + /** * struct sde_hw_ctl_ops - Interface to the wb Hw driver functions * Assumption is these functions will be called after clocks are enabled */ struct sde_hw_ctl_ops { + /** + * kickoff hw operation for Sw controlled interfaces + * DSI cmd mode and WB interface are SW controlled + * @ctx : ctl path ctx pointer + */ + void (*setup_start)(struct sde_hw_ctl *ctx); + + /** + * FLUSH the modules for this control path + * @ctx : ctl path ctx pointer + * @flushbits : module flushmask + */ void (*setup_flush)(struct sde_hw_ctl *ctx, - u32 flushbits, - u8 force_start); + u32 flushbits); + + /** + * Setup ctl_path interface config + * @ctx + * @cfg : interface config structure pointer + */ + void (*setup_intf_cfg)(struct sde_hw_ctl *ctx, + struct sde_hw_intf_cfg *cfg); int (*reset)(struct sde_hw_ctl *c); @@ -87,7 +119,7 @@ struct sde_hw_ctl { /** * sde_hw_ctl_init(): Initializes the ctl_path hw driver object. - * should be called before accessing every mixer. + * should be called before accessing every ctl path registers. * @idx: ctl_path index for which driver object is required * @addr: mapped register io address of MDP * @m : pointer to mdss catalog data @@ -96,4 +128,10 @@ struct sde_hw_ctl *sde_hw_ctl_init(enum sde_ctl idx, void __iomem *addr, struct sde_mdss_cfg *m); +/** + * sde_hw_ctl_destroy(): Destroys ctl driver context + * should be called to free the context + */ +void sde_hw_ctl_destroy(struct sde_hw_ctl *ctx); + #endif /*_SDE_HW_MDP_CTL_H */ diff --git a/drivers/gpu/drm/msm/sde/sde_hw_mdss.h b/drivers/gpu/drm/msm/sde/sde_hw_mdss.h index ce5a90bc2f55..da9efe55bd1a 100644 --- a/drivers/gpu/drm/msm/sde/sde_hw_mdss.h +++ b/drivers/gpu/drm/msm/sde/sde_hw_mdss.h @@ -16,6 +16,7 @@ #include #include +#define SDE_NONE 0 #define SDE_CSC_MATRIX_COEFF_SIZE 9 #define SDE_CSC_CLAMP_SIZE 6 #define SDE_CSC_BIAS_SIZE 3 @@ -57,7 +58,7 @@ enum sde_sspp_type { }; enum sde_lm { - LM_0 = 0, + LM_0 = 1, LM_1, LM_2, LM_3, @@ -79,7 +80,7 @@ enum sde_stage { SDE_STAGE_MAX }; enum sde_dspp { - DSPP_0 = 0, + DSPP_0 = 1, DSPP_1, DSPP_2, DSPP_3, @@ -87,7 +88,7 @@ enum sde_dspp { }; enum sde_ctl { - CTL_0 = 0, + CTL_0 = 1, CTL_1, CTL_2, CTL_3, @@ -96,13 +97,13 @@ enum sde_ctl { }; enum sde_cdm { - CDM_0 = 0, + CDM_0 = 1, CDM_1, CDM_MAX }; enum sde_pingpong { - PINGPONG_0 = 0, + PINGPONG_0 = 1, PINGPONG_1, PINGPONG_2, PINGPONG_3, @@ -111,7 +112,7 @@ enum sde_pingpong { }; enum sde_intf { - INTF_0 = 0, + INTF_0 = 1, INTF_1, INTF_2, INTF_3, @@ -208,12 +209,10 @@ enum sde_mdp_fetch_type { * expected by the HW programming. */ enum { - COLOR_4BIT, - COLOR_5BIT, - COLOR_6BIT, - COLOR_8BIT, - COLOR_ALPHA_1BIT = 0, - COLOR_ALPHA_4BIT = 1, + COLOR_1BIT = 0, + COLOR_5BIT = 1, + COLOR_6BIT = 2, + COLOR_8BIT = 3, }; enum sde_alpha_blend_type { @@ -224,6 +223,26 @@ enum sde_alpha_blend_type { ALPHA_MAX }; + +/** + * enum sde_3d_blend_mode + * Desribes how the 3d data is blended + * @BLEND_3D_NONE : 3d blending not enabled + * @BLEND_3D_FRAME_INT : Frame interleaving + * @BLEND_3D_H_ROW_INT : Horizontal row interleaving + * @BLEND_3D_V_ROW_INT : vertical row interleaving + * @BLEND_3D_COL_INT : column interleaving + * @BLEND_3D_MAX : + */ +enum sde_3d_blend_mode { + BLEND_3D_NONE = 0, + BLEND_3D_FRAME_INT, + BLEND_3D_H_ROW_INT, + BLEND_3D_V_ROW_INT, + BLEND_3D_COL_INT, + BLEND_3D_MAX +}; + struct addr_info { u32 plane[SDE_MAX_PLANES]; }; diff --git a/drivers/gpu/drm/msm/sde/sde_hw_sspp.c b/drivers/gpu/drm/msm/sde/sde_hw_sspp.c index c54ad43f5001..e5f673fffbff 100644 --- a/drivers/gpu/drm/msm/sde/sde_hw_sspp.c +++ b/drivers/gpu/drm/msm/sde/sde_hw_sspp.c @@ -182,7 +182,7 @@ static void sde_hw_sspp_setup_format(struct sde_hw_pipe *ctx, u32 opmode = 0; u32 idx; - if (!_sspp_subblk_offset(ctx, SDE_SSPP_SRC, &idx)) + if (_sspp_subblk_offset(ctx, SDE_SSPP_SRC, &idx)) return; opmode = SDE_REG_READ(c, SSPP_SRC_OP_MODE + idx); @@ -210,7 +210,7 @@ static void sde_hw_sspp_setup_format(struct sde_hw_pipe *ctx, src_format = (chroma_samp << 23) | (fmt->fetch_planes << 19) | (fmt->bits[C3_ALPHA] << 6) | (fmt->bits[C2_R_Cr] << 4) | - (fmt->bits[C0_G_Y] << 0); + (fmt->bits[C1_B_Cb] << 2) | (fmt->bits[C0_G_Y] << 0); if (flags & SDE_SSPP_ROT_90) src_format |= BIT(11); /* ROT90 */ @@ -235,12 +235,9 @@ static void sde_hw_sspp_setup_format(struct sde_hw_pipe *ctx, } /* if this is YUV pixel format, enable CSC */ - if (fmt->is_yuv) { - _sspp_setup_opmode(ctx, CSC, 0x0); - } else { + if (fmt->is_yuv) src_format |= BIT(15); - _sspp_setup_opmode(ctx, CSC, 0x1); - } + _sspp_setup_opmode(ctx, CSC, fmt->is_yuv); opmode |= MDSS_MDP_OP_PE_OVERRIDE; @@ -260,8 +257,8 @@ static void sde_hw_sspp_setup_pe_config(struct sde_hw_pipe *ctx, struct sde_hw_blk_reg_map *c = &ctx->hw; u8 color; u32 lr_pe[4], tb_pe[4], tot_req_pixels[4]; - const u32 bytemask = 0xffff; - const u8 shortmask = 0xff; + const u32 bytemask = 0xff; + const u32 shortmask = 0xffff; u32 idx; if (_sspp_subblk_offset(ctx, SDE_SSPP_SRC, &idx)) @@ -283,7 +280,7 @@ static void sde_hw_sspp_setup_pe_config(struct sde_hw_pipe *ctx, ((pe_ext->top_ftch[color] & bytemask) << 8)| (pe_ext->top_rpt[color] & bytemask); - tot_req_pixels[color] = (((cfg->src.height + + tot_req_pixels[color] = (((pe_ext->roi_h[color] + pe_ext->num_ext_pxls_top[color] + pe_ext->num_ext_pxls_btm[color]) & shortmask) << 16) | ((pe_ext->roi_w[color] + @@ -323,30 +320,30 @@ static void sde_hw_sspp_setup_scalar(struct sde_hw_pipe *ctx, scale_config = BIT(0) | BIT(1); /* RGB/YUV config */ - scale_config |= (pe_ext->horz_filter[0] & mask) << 8; - scale_config |= (pe_ext->vert_filter[0] & mask) << 10; + scale_config |= (pe_ext->horz_filter[SDE_SSPP_COMP_LUMA] & mask) << 8; + scale_config |= (pe_ext->vert_filter[SDE_SSPP_COMP_LUMA] & mask) << 10; /* Aplha config*/ - scale_config |= (pe_ext->horz_filter[3] & mask) << 16; - scale_config |= (pe_ext->vert_filter[3] & mask) << 18; + scale_config |= (pe_ext->horz_filter[SDE_SSPP_COMP_ALPHA] & mask) << 16; + scale_config |= (pe_ext->vert_filter[SDE_SSPP_COMP_ALPHA] & mask) << 18; SDE_REG_WRITE(c, SCALE_CONFIG + idx, scale_config); SDE_REG_WRITE(c, COMP0_3_INIT_PHASE_X + idx, - pe_ext->init_phase_x[0]); + pe_ext->init_phase_x[SDE_SSPP_COMP_LUMA]); SDE_REG_WRITE(c, COMP0_3_INIT_PHASE_Y + idx, - pe_ext->init_phase_y[0]); + pe_ext->init_phase_y[SDE_SSPP_COMP_LUMA]); SDE_REG_WRITE(c, COMP0_3_PHASE_STEP_X + idx, - pe_ext->phase_step_x[0]); + pe_ext->phase_step_x[SDE_SSPP_COMP_LUMA]); SDE_REG_WRITE(c, COMP0_3_PHASE_STEP_Y + idx, - pe_ext->phase_step_y[0]); + pe_ext->phase_step_y[SDE_SSPP_COMP_LUMA]); SDE_REG_WRITE(c, COMP1_2_INIT_PHASE_X + idx, - pe_ext->init_phase_x[1]); + pe_ext->init_phase_x[SDE_SSPP_COMP_CHROMA]); SDE_REG_WRITE(c, COMP1_2_INIT_PHASE_Y + idx, - pe_ext->init_phase_y[1]); + pe_ext->init_phase_y[SDE_SSPP_COMP_CHROMA]); SDE_REG_WRITE(c, COMP1_2_PHASE_STEP_X + idx, - pe_ext->phase_step_x[1]); + pe_ext->phase_step_x[SDE_SSPP_COMP_CHROMA]); SDE_REG_WRITE(c, COMP1_2_PHASE_STEP_Y + idx, - pe_ext->phase_step_y[0]); + pe_ext->phase_step_y[SDE_SSPP_COMP_CHROMA]); } /** @@ -365,7 +362,7 @@ static void sde_hw_sspp_setup_rects(struct sde_hw_pipe *ctx, return; /* program pixel extension override */ - if (!pe_ext) + if (pe_ext) sde_hw_sspp_setup_pe_config(ctx, cfg, pe_ext); /* src and dest rect programming */ @@ -388,10 +385,8 @@ static void sde_hw_sspp_setup_rects(struct sde_hw_pipe *ctx, if (test_bit(SDE_SSPP_SCALAR_RGB, &ctx->cap->features) || test_bit(SDE_SSPP_SCALAR_QSEED2, &ctx->cap->features)) { /* program decimation */ - if (!cfg->horz_decimation) - decimation = (cfg->horz_decimation - 1) << 8; - if (!cfg->vert_decimation) - decimation |= (cfg->vert_decimation - 1); + decimation = ((1 << cfg->horz_decimation) - 1) << 8; + decimation |= ((1 << cfg->vert_decimation) - 1); sde_hw_sspp_setup_scalar(ctx, pe_ext); } @@ -421,7 +416,6 @@ static void sde_hw_sspp_setup_sourceaddress(struct sde_hw_pipe *ctx, for (i = 0; i < cfg->src.num_planes; i++) SDE_REG_WRITE(c, SSPP_SRC0_ADDR + idx + i*0x4, cfg->addr.plane[i]); - } static void sde_hw_sspp_setup_csc_8bit(struct sde_hw_pipe *ctx, @@ -476,7 +470,6 @@ static void sde_hw_sspp_setup_solidfill(struct sde_hw_pipe *ctx, static void sde_hw_sspp_setup_histogram_v1(struct sde_hw_pipe *ctx, void *cfg) { - } static void sde_hw_sspp_setup_memcolor(struct sde_hw_pipe *ctx, @@ -589,3 +582,8 @@ struct sde_hw_pipe *sde_hw_sspp_init(enum sde_sspp idx, return c; } +void sde_hw_sspp_destroy(struct sde_hw_pipe *ctx) +{ + kfree(ctx); +} + diff --git a/drivers/gpu/drm/msm/sde/sde_hw_sspp.h b/drivers/gpu/drm/msm/sde/sde_hw_sspp.h index 0c3873b6945e..feb5e85e6cc1 100644 --- a/drivers/gpu/drm/msm/sde/sde_hw_sspp.h +++ b/drivers/gpu/drm/msm/sde/sde_hw_sspp.h @@ -15,7 +15,6 @@ #include "sde_hw_catalog.h" #include "sde_hw_mdss.h" -#include "sde_mdp_formats.h" #include "sde_hw_mdp_util.h" struct sde_hw_pipe; @@ -29,6 +28,15 @@ struct sde_hw_pipe; #define SDE_SSPP_SOURCE_ROTATED_90 0x8 #define SDE_SSPP_ROT_90 0x10 +/** + * Component indices + */ +enum { + SDE_SSPP_COMP_LUMA = 0, + SDE_SSPP_COMP_CHROMA = 1, + SDE_SSPP_COMP_ALPHA = 3 +}; + enum { SDE_MDP_FRAME_LINEAR, SDE_MDP_FRAME_TILE_A4X, @@ -88,6 +96,7 @@ struct sde_hw_pixel_ext { int btm_rpt[SDE_MAX_PLANES]; uint32_t roi_w[SDE_MAX_PLANES]; + uint32_t roi_h[SDE_MAX_PLANES]; /* * Filter type to be used for scaling in horizontal and vertical @@ -262,5 +271,12 @@ struct sde_hw_pipe *sde_hw_sspp_init(enum sde_sspp idx, void __iomem *addr, struct sde_mdss_cfg *m); +/** + * sde_hw_sspp_destroy(): Destroys SSPP driver context + * should be called during Hw pipe cleanup. + * @ctx: Pointer to SSPP driver context returned by sde_hw_sspp_init + */ +void sde_hw_sspp_destroy(struct sde_hw_pipe *ctx); + #endif /*_SDE_HW_SSPP_H */ diff --git a/drivers/gpu/drm/msm/sde/sde_kms.c b/drivers/gpu/drm/msm/sde/sde_kms.c index 740b9c066467..bbe1e98a022d 100644 --- a/drivers/gpu/drm/msm/sde/sde_kms.c +++ b/drivers/gpu/drm/msm/sde/sde_kms.c @@ -12,31 +12,63 @@ #include #include "msm_drv.h" +#include "msm_mmu.h" #include "sde_kms.h" #include "sde_hw_mdss.h" +#include "sde_hw_intf.h" -static int modeset_init_intf(struct sde_kms *sde_kms, int intf_num) +static const char * const iommu_ports[] = { + "mdp_0", +}; + +#define DEFAULT_MDP_SRC_CLK 200000000 + +int sde_disable(struct sde_kms *sde_kms) { - struct sde_mdss_cfg *catalog = sde_kms->catalog; - u32 intf_type = catalog->intf[intf_num].type; - - switch (intf_type) { - case INTF_NONE: - break; - case INTF_DSI: - break; - case INTF_LCDC: - break; - case INTF_HDMI: - break; - case INTF_EDP: - default: - break; - } + DBG(""); + + clk_disable_unprepare(sde_kms->ahb_clk); + clk_disable_unprepare(sde_kms->axi_clk); + clk_disable_unprepare(sde_kms->core_clk); + if (sde_kms->lut_clk) + clk_disable_unprepare(sde_kms->lut_clk); + + return 0; +} + +int sde_enable(struct sde_kms *sde_kms) +{ + DBG(""); + + clk_prepare_enable(sde_kms->ahb_clk); + clk_prepare_enable(sde_kms->axi_clk); + clk_prepare_enable(sde_kms->core_clk); + if (sde_kms->lut_clk) + clk_prepare_enable(sde_kms->lut_clk); return 0; } +static void sde_prepare_commit(struct msm_kms *kms, + struct drm_atomic_state *state) +{ + struct sde_kms *sde_kms = to_sde_kms(to_mdp_kms(kms)); + + sde_enable(sde_kms); +} + +static void sde_complete_commit(struct msm_kms *kms, + struct drm_atomic_state *state) +{ + struct sde_kms *sde_kms = to_sde_kms(to_mdp_kms(kms)); + + sde_disable(sde_kms); +} + +static void sde_wait_for_crtc_commit_done(struct msm_kms *kms, + struct drm_crtc *crtc) +{ +} static int modeset_init(struct sde_kms *sde_kms) { struct msm_drm_private *priv = sde_kms->dev->dev_private; @@ -62,8 +94,9 @@ static int modeset_init(struct sde_kms *sde_kms) || !num_private_planes) primary = false; - plane = sde_plane_init(dev, primary); + plane = sde_plane_init(dev, catalog->sspp[i].id, primary); if (IS_ERR(plane)) { + pr_err("%s: sde_plane_init failed", __func__); ret = PTR_ERR(plane); goto fail; } @@ -71,7 +104,7 @@ static int modeset_init(struct sde_kms *sde_kms) if (primary) primary_planes[primary_planes_idx++] = plane; - if (num_private_planes) + if (primary && num_private_planes) num_private_planes--; } @@ -81,15 +114,21 @@ static int modeset_init(struct sde_kms *sde_kms) goto fail; } - /* Create one CRTC per mixer */ - for (i = 0; i < catalog->mixer_count; i++) { + /* + * Enumerate displays supported + */ + sde_encoders_init(dev); + + /* Create one CRTC per display */ + for (i = 0; i < priv->num_encoders; i++) { /* - * Each mixer receives a private plane. We start + * Each CRTC receives a private plane. We start * with first RGB, and then DMA and then VIG. */ struct drm_crtc *crtc; - crtc = sde_crtc_init(dev, NULL, primary_planes[i], i); + crtc = sde_crtc_init(dev, priv->encoders[i], + primary_planes[i], i); if (IS_ERR(crtc)) { ret = PTR_ERR(crtc); goto fail; @@ -97,11 +136,13 @@ static int modeset_init(struct sde_kms *sde_kms) priv->crtcs[priv->num_crtcs++] = crtc; } - for (i = 0; i < catalog->intf_count; i++) { - ret = modeset_init_intf(sde_kms, i); - if (ret) - goto fail; - } + /* + * Iterate through the list of encoders and + * set the possible CRTCs + */ + for (i = 0; i < priv->num_encoders; i++) + priv->encoders[i]->possible_crtcs = (1 << priv->num_crtcs) - 1; + return 0; fail: return ret; @@ -137,6 +178,9 @@ static const struct mdp_kms_funcs kms_funcs = { .irq_postinstall = sde_irq_postinstall, .irq_uninstall = sde_irq_uninstall, .irq = sde_irq, + .prepare_commit = sde_prepare_commit, + .complete_commit = sde_complete_commit, + .wait_for_crtc_commit_done = sde_wait_for_crtc_commit_done, .enable_vblank = sde_enable_vblank, .disable_vblank = sde_disable_vblank, .get_format = mdp_get_format, @@ -184,6 +228,7 @@ struct sde_kms *sde_hw_setup(struct platform_device *pdev) ret = PTR_ERR(sde_kms->mmio); goto fail; } + pr_err("Mapped Mdp address space @%pK", sde_kms->mmio); sde_kms->vbif = msm_ioremap(pdev, "vbif_phys", "VBIF"); if (IS_ERR(sde_kms->vbif)) { @@ -247,8 +292,27 @@ struct sde_kms *sde_hw_setup(struct platform_device *pdev) get_clk(pdev, &sde_kms->mmagic_clk, "mmagic_clk", false); get_clk(pdev, &sde_kms->iommu_clk, "iommu_clk", false); + if (sde_kms->mmagic) { + ret = regulator_enable(sde_kms->mmagic); + if (ret) { + dev_err(sde_kms->dev->dev, + "failed to enable mmagic GDSC: %d\n", ret); + goto fail; + } + } + if (sde_kms->mmagic_clk) { + clk_prepare_enable(sde_kms->mmagic_clk); + if (ret) { + dev_err(sde_kms->dev->dev, "failed to enable mmagic_clk\n"); + goto undo_gdsc; + } + } + return sde_kms; +undo_gdsc: + if (sde_kms->mmagic) + regulator_disable(sde_kms->mmagic); fail: if (kms) sde_destroy(kms); @@ -256,6 +320,111 @@ fail: return ERR_PTR(ret); } +static int sde_translation_ctrl_pwr(struct sde_kms *sde_kms, bool on) +{ + struct device *dev = sde_kms->dev->dev; + int ret; + + if (on) { + if (sde_kms->iommu_clk) { + ret = clk_prepare_enable(sde_kms->iommu_clk); + if (ret) { + dev_err(dev, "failed to enable iommu_clk\n"); + goto undo_mmagic_clk; + } + } + } else { + if (sde_kms->iommu_clk) + clk_disable_unprepare(sde_kms->iommu_clk); + if (sde_kms->mmagic_clk) + clk_disable_unprepare(sde_kms->mmagic_clk); + if (sde_kms->mmagic) + regulator_disable(sde_kms->mmagic); + } + + return 0; + +undo_mmagic_clk: + if (sde_kms->mmagic_clk) + clk_disable_unprepare(sde_kms->mmagic_clk); + + return ret; +} +int sde_mmu_init(struct sde_kms *sde_kms) +{ + struct sde_mdss_cfg *catalog = sde_kms->catalog; + struct sde_hw_intf *intf = NULL; + struct iommu_domain *iommu; + struct msm_mmu *mmu; + int i, ret; + + /* + * Make sure things are off before attaching iommu (bootloader could + * have left things on, in which case we'll start getting faults if + * we don't disable): + */ + sde_enable(sde_kms); + for (i = 0; i < catalog->intf_count; i++) { + intf = sde_hw_intf_init(catalog->intf[i].id, + sde_kms->mmio, + catalog); + if (!IS_ERR_OR_NULL(intf)) { + intf->ops.enable_timing(intf, 0x0); + sde_hw_intf_deinit(intf); + } + } + sde_disable(sde_kms); + msleep(20); + + iommu = iommu_domain_alloc(&platform_bus_type); + + if (!IS_ERR_OR_NULL(iommu)) { + mmu = msm_smmu_new(sde_kms->dev->dev, MSM_SMMU_DOMAIN_UNSECURE); + if (IS_ERR(mmu)) { + ret = PTR_ERR(mmu); + dev_err(sde_kms->dev->dev, + "failed to init iommu: %d\n", ret); + iommu_domain_free(iommu); + goto fail; + } + + ret = sde_translation_ctrl_pwr(sde_kms, true); + if (ret) { + dev_err(sde_kms->dev->dev, + "failed to power iommu: %d\n", ret); + mmu->funcs->destroy(mmu); + goto fail; + } + + ret = mmu->funcs->attach(mmu, (const char **)iommu_ports, + ARRAY_SIZE(iommu_ports)); + if (ret) { + dev_err(sde_kms->dev->dev, + "failed to attach iommu: %d\n", ret); + mmu->funcs->destroy(mmu); + goto fail; + } + } else { + dev_info(sde_kms->dev->dev, + "no iommu, fallback to phys contig buffers for scanout\n"); + mmu = NULL; + } + sde_kms->mmu = mmu; + + sde_kms->mmu_id = msm_register_mmu(sde_kms->dev, mmu); + if (sde_kms->mmu_id < 0) { + ret = sde_kms->mmu_id; + dev_err(sde_kms->dev->dev, + "failed to register sde iommu: %d\n", ret); + goto fail; + } + + return 0; +fail: + return ret; + +} + struct msm_kms *sde_kms_init(struct drm_device *dev) { struct platform_device *pdev = dev->platformdev; @@ -282,10 +451,20 @@ struct msm_kms *sde_kms_init(struct drm_device *dev) sde_kms->catalog = catalog; + /* we need to set a default rate before enabling. + * Set a safe rate first, before initializing catalog + * later set more optimal rate based on bandwdith/clock + * requirements + */ + + clk_set_rate(sde_kms->src_clk, DEFAULT_MDP_SRC_CLK); + sde_enable(sde_kms); + /* * Now we need to read the HW catalog and initialize resources such as * clocks, regulators, GDSC/MMAGIC, ioremap the register ranges etc */ + sde_mmu_init(sde_kms); /* * modeset_init should create the DRM related objects i.e. CRTCs, @@ -296,6 +475,14 @@ struct msm_kms *sde_kms_init(struct drm_device *dev) dev->mode_config.min_width = 0; dev->mode_config.min_height = 0; + /* + * we can assume the max crtc width is equal to the max supported + * by LM_0 + * Also fixing the max height to 4k + */ + dev->mode_config.max_width = catalog->mixer[0].sblk->maxwidth; + dev->mode_config.max_height = 4096; + return msm_kms; fail: diff --git a/drivers/gpu/drm/msm/sde/sde_kms.h b/drivers/gpu/drm/msm/sde/sde_kms.h index 1afe1bb03c7b..441398b7e824 100644 --- a/drivers/gpu/drm/msm/sde/sde_kms.h +++ b/drivers/gpu/drm/msm/sde/sde_kms.h @@ -26,6 +26,7 @@ struct sde_kms { struct sde_mdss_cfg *catalog; struct msm_mmu *mmu; + int mmu_id; /* io/register spaces: */ void __iomem *mmio, *vbif; @@ -86,18 +87,8 @@ int sde_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc); void sde_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc); enum sde_sspp sde_plane_pipe(struct drm_plane *plane); -void sde_plane_install_properties(struct drm_plane *plane, - struct drm_mode_object *obj); -void sde_plane_set_scanout(struct drm_plane *plane, - struct drm_framebuffer *fb); -int sde_plane_mode_set(struct drm_plane *plane, - struct drm_crtc *crtc, struct drm_framebuffer *fb, - int crtc_x, int crtc_y, - unsigned int crtc_w, unsigned int crtc_h, - uint32_t src_x, uint32_t src_y, - uint32_t src_w, uint32_t src_h); -void sde_plane_complete_flip(struct drm_plane *plane); -struct drm_plane *sde_plane_init(struct drm_device *dev, bool private_plane); +struct drm_plane *sde_plane_init(struct drm_device *dev, uint32_t pipe, + bool private_plane); uint32_t sde_crtc_vblank(struct drm_crtc *crtc); @@ -108,7 +99,16 @@ struct drm_crtc *sde_crtc_init(struct drm_device *dev, struct drm_encoder *encoder, struct drm_plane *plane, int id); -struct drm_encoder *sde_encoder_init(struct drm_device *dev, int intf); +struct sde_encoder_hw_resources { + bool intfs[INTF_MAX]; + bool pingpongs[PINGPONG_MAX]; +}; +void sde_encoder_get_hw_resources(struct drm_encoder *encoder, + struct sde_encoder_hw_resources *hw_res); +void sde_encoder_register_vblank_callback(struct drm_encoder *drm_enc, + void (*cb)(void *), void *data); +void sde_encoders_init(struct drm_device *dev); + int sde_irq_domain_init(struct sde_kms *sde_kms); int sde_irq_domain_fini(struct sde_kms *sde_kms); diff --git a/drivers/gpu/drm/msm/sde/sde_mdp_formats.c b/drivers/gpu/drm/msm/sde/sde_mdp_formats.c new file mode 100644 index 000000000000..56b65d4bd45e --- /dev/null +++ b/drivers/gpu/drm/msm/sde/sde_mdp_formats.c @@ -0,0 +1,134 @@ +/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include "sde_mdp_formats.h" + +static struct sde_mdp_format_params sde_mdp_format_map[] = { + INTERLEAVED_RGB_FMT(ARGB8888, + COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, + C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, + true, 4, 0), + + INTERLEAVED_RGB_FMT(ABGR8888, + COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, + C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, + true, 4, 0), + + INTERLEAVED_RGB_FMT(RGBA8888, + COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, + C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, + true, 4, 0), + + INTERLEAVED_RGB_FMT(BGRA8888, + COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, + C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, + true, 4, 0), + + INTERLEAVED_RGB_FMT(XRGB8888, + COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, + C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, + true, 4, 0), + + INTERLEAVED_RGB_FMT(RGB888, + 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, + C1_B_Cb, C0_G_Y, C2_R_Cr, 0, + false, 3, 0), + + INTERLEAVED_RGB_FMT(BGR888, + 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, + C2_R_Cr, C0_G_Y, C1_B_Cb, 0, + false, 3, 0), + + INTERLEAVED_RGB_FMT(RGB565, + 0, COLOR_5BIT, COLOR_6BIT, COLOR_5BIT, + C1_B_Cb, C0_G_Y, C2_R_Cr, 0, + false, 2, 0), + + INTERLEAVED_RGB_FMT(BGR565, + 0, 5, 6, 5, + C2_R_Cr, C0_G_Y, C1_B_Cb, 0, + false, 2, 0), + + PSEDUO_YUV_FMT(NV12, + 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, + C1_B_Cb, C2_R_Cr, + SDE_MDP_CHROMA_420, 0), + + PSEDUO_YUV_FMT(NV21, + 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, + C2_R_Cr, C1_B_Cb, + SDE_MDP_CHROMA_420, 0), + + PSEDUO_YUV_FMT(NV16, + 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, + C1_B_Cb, C2_R_Cr, + SDE_MDP_CHROMA_H2V1, 0), + + PSEDUO_YUV_FMT(NV61, + 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, + C2_R_Cr, C1_B_Cb, + SDE_MDP_CHROMA_H2V1, 0), + + INTERLEAVED_YUV_FMT(VYUY, + 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, + C2_R_Cr, C0_G_Y, C1_B_Cb, C0_G_Y, + false, SDE_MDP_CHROMA_H2V1, 4, 2, + 0), + + INTERLEAVED_YUV_FMT(UYVY, + 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, + C1_B_Cb, C0_G_Y, C2_R_Cr, C0_G_Y, + false, SDE_MDP_CHROMA_H2V1, 4, 2, + 0), + + INTERLEAVED_YUV_FMT(YUYV, + 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, + C0_G_Y, C1_B_Cb, C0_G_Y, C2_R_Cr, + false, SDE_MDP_CHROMA_H2V1, 4, 2, + 0), + + INTERLEAVED_YUV_FMT(YVYU, + 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, + C0_G_Y, C2_R_Cr, C0_G_Y, C1_B_Cb, + false, SDE_MDP_CHROMA_H2V1, 4, 2, + 0), + + PLANAR_YUV_FMT(YUV420, + 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, + C2_R_Cr, C1_B_Cb, C0_G_Y, + false, SDE_MDP_CHROMA_420, 2, + 0), + + PLANAR_YUV_FMT(YVU420, + 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, + C1_B_Cb, C2_R_Cr, C0_G_Y, + false, SDE_MDP_CHROMA_420, 2, + 0), +}; + +struct sde_mdp_format_params *sde_mdp_get_format_params(u32 format, + u32 fmt_modifier) +{ + u32 i = 0; + struct sde_mdp_format_params *fmt = NULL; + + for (i = 0; i < sizeof(sde_mdp_format_map)/sizeof(*sde_mdp_format_map); + i++) + if (format == sde_mdp_format_map[i].format) { + fmt = &sde_mdp_format_map[i]; + break; + } + + return fmt; +} + diff --git a/drivers/gpu/drm/msm/sde/sde_mdp_formats.h b/drivers/gpu/drm/msm/sde/sde_mdp_formats.h index 4ad3ad3c744e..67f445b8900d 100644 --- a/drivers/gpu/drm/msm/sde/sde_mdp_formats.h +++ b/drivers/gpu/drm/msm/sde/sde_mdp_formats.h @@ -58,6 +58,7 @@ alpha, chroma, count, bp, flg) \ .is_yuv = true, \ .flag = flg \ } + #define PSEDUO_YUV_FMT(fmt, a, r, g, b, e0, e1, chroma, flg) \ { \ .format = DRM_FORMAT_ ## fmt, \ @@ -92,122 +93,12 @@ alpha, chroma, count, bp, flg) \ .flag = flg \ } -static struct sde_mdp_format_params sde_mdp_format_map[] = { - INTERLEAVED_RGB_FMT(ARGB8888, - COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, - C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, - true, 4, 0), - - INTERLEAVED_RGB_FMT(ABGR8888, - COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, - C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, - true, 4, 0), - - INTERLEAVED_RGB_FMT(RGBA8888, - COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, - C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, - true, 4, 0), - - INTERLEAVED_RGB_FMT(BGRA8888, - COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, - C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, - true, 4, 0), - - INTERLEAVED_RGB_FMT(XRGB8888, - COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, - C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, - true, 4, 0), - - INTERLEAVED_RGB_FMT(RGB888, - 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, - C1_B_Cb, C0_G_Y, C2_R_Cr, 0, - false, 3, 0), - - INTERLEAVED_RGB_FMT(BGR888, - 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, - C2_R_Cr, C0_G_Y, C1_B_Cb, 0, - false, 3, 0), - - INTERLEAVED_RGB_FMT(RGB565, - 0, COLOR_5BIT, COLOR_6BIT, COLOR_5BIT, - C1_B_Cb, C0_G_Y, C2_R_Cr, 0, - false, 2, 0), - - INTERLEAVED_RGB_FMT(BGR565, - 0, 5, 6, 5, - C2_R_Cr, C0_G_Y, C1_B_Cb, 0, - false, 2, 0), - - PSEDUO_YUV_FMT(NV12, - 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, - C1_B_Cb, C2_R_Cr, - SDE_MDP_CHROMA_420, 0), - - PSEDUO_YUV_FMT(NV21, - 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, - C2_R_Cr, C1_B_Cb, - SDE_MDP_CHROMA_420, 0), - - PSEDUO_YUV_FMT(NV16, - 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, - C1_B_Cb, C2_R_Cr, - SDE_MDP_CHROMA_H2V1, 0), - - PSEDUO_YUV_FMT(NV61, - 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, - C2_R_Cr, C1_B_Cb, - SDE_MDP_CHROMA_H2V1, 0), - - INTERLEAVED_YUV_FMT(VYUY, - 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, - C2_R_Cr, C0_G_Y, C1_B_Cb, C0_G_Y, - false, SDE_MDP_CHROMA_H2V1, 4, 2, - 0), - - INTERLEAVED_YUV_FMT(UYVY, - 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, - C1_B_Cb, C0_G_Y, C2_R_Cr, C0_G_Y, - false, SDE_MDP_CHROMA_H2V1, 4, 2, - 0), - - INTERLEAVED_YUV_FMT(YUYV, - 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, - C0_G_Y, C1_B_Cb, C0_G_Y, C2_R_Cr, - false, SDE_MDP_CHROMA_H2V1, 4, 2, - 0), - - INTERLEAVED_YUV_FMT(YVYU, - 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, - C0_G_Y, C2_R_Cr, C0_G_Y, C1_B_Cb, - false, SDE_MDP_CHROMA_H2V1, 4, 2, - 0), - - PLANAR_YUV_FMT(YUV420, - 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, - C2_R_Cr, C1_B_Cb, C0_G_Y, - false, SDE_MDP_CHROMA_420, 2, - 0), - - PLANAR_YUV_FMT(YVU420, - 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, - C1_B_Cb, C2_R_Cr, C0_G_Y, - false, SDE_MDP_CHROMA_420, 2, - 0), -}; - +/** + * sde_mdp_get_format_params(): Returns sde format structure pointer. + * @format: DRM format + * @fmt_modifier: DRM format modifier + */ struct sde_mdp_format_params *sde_mdp_get_format_params(u32 format, - u32 fmt_modifier) -{ - u32 i = 0; - struct sde_mdp_format_params *fmt = NULL; - - for (i = 0; i < ARRAY_SIZE(sde_mdp_format_map); i++) - if (format == sde_mdp_format_map[i].format) { - fmt = &sde_mdp_format_map[i]; - break; - } - - return fmt; -} + u32 fmt_modifier); #endif /*_SDE_MDP_FORMATS_H */ diff --git a/drivers/gpu/drm/msm/sde/sde_plane.c b/drivers/gpu/drm/msm/sde/sde_plane.c index 17b7303557ef..fc27a7ede026 100644 --- a/drivers/gpu/drm/msm/sde/sde_plane.c +++ b/drivers/gpu/drm/msm/sde/sde_plane.c @@ -11,103 +11,746 @@ */ #include "sde_kms.h" +#include "sde_hwio.h" +#include "sde_hw_mdp_ctl.h" +#include "sde_mdp_formats.h" +#include "sde_hw_sspp.h" + +#define DECIMATED_DIMENSION(dim, deci) (((dim) + ((1 << (deci)) - 1)) >> (deci)) +#define PHASE_STEP_SHIFT 21 +#define PHASE_STEP_UNIT_SCALE ((int) (1 << PHASE_STEP_SHIFT)) +#define PHASE_RESIDUAL 15 + +#define SDE_PLANE_FEATURE_SCALER \ + (BIT(SDE_SSPP_SCALAR_QSEED2)| \ + BIT(SDE_SSPP_SCALAR_QSEED3)| \ + BIT(SDE_SSPP_SCALAR_RGB)) + +#ifndef SDE_PLANE_DEBUG_START +#define SDE_PLANE_DEBUG_START() +#endif + +#ifndef SDE_PLANE_DEBUG_END +#define SDE_PLANE_DEBUG_END() +#endif struct sde_plane { struct drm_plane base; const char *name; + + int mmu_id; + + enum sde_sspp pipe; + uint32_t features; /* capabilities from catalog */ + uint32_t flush_mask; /* used to commit pipe registers */ uint32_t nformats; uint32_t formats[32]; + + struct sde_hw_pipe *pipe_hw; + struct sde_hw_pipe_cfg pipe_cfg; + struct sde_hw_pixel_ext pixel_ext; }; #define to_sde_plane(x) container_of(x, struct sde_plane, base) -static int sde_plane_update(struct drm_plane *plane, +static bool sde_plane_enabled(struct drm_plane_state *state) +{ + return state->fb && state->crtc; +} + +static void sde_plane_set_scanout(struct drm_plane *plane, + struct sde_hw_pipe_cfg *pipe_cfg, struct drm_framebuffer *fb) +{ + struct sde_plane *psde = to_sde_plane(plane); + int i; + + if (pipe_cfg && fb && psde->pipe_hw->ops.setup_sourceaddress) { + /* stride */ + i = min_t(int, ARRAY_SIZE(fb->pitches), SDE_MAX_PLANES); + while (i) { + --i; + pipe_cfg->src.ystride[i] = fb->pitches[i]; + } + + /* address */ + for (i = 0; i < ARRAY_SIZE(pipe_cfg->addr.plane); ++i) + pipe_cfg->addr.plane[i] = msm_framebuffer_iova(fb, + psde->mmu_id, i); + + /* hw driver */ + psde->pipe_hw->ops.setup_sourceaddress(psde->pipe_hw, pipe_cfg); + } +} + +static void sde_plane_scale_helper(struct drm_plane *plane, + uint32_t src, uint32_t dst, uint32_t *phase_steps, + enum sde_hw_filter *filter, struct sde_mdp_format_params *fmt, + uint32_t chroma_subsampling) +{ + /* calcualte phase steps, leave init phase as zero */ + phase_steps[SDE_SSPP_COMP_LUMA] = + mult_frac(1 << PHASE_STEP_SHIFT, src, dst); + phase_steps[SDE_SSPP_COMP_CHROMA] = + phase_steps[SDE_SSPP_COMP_LUMA] / chroma_subsampling; + + /* calculate scaler config, if necessary */ + if (src != dst) { + filter[SDE_SSPP_COMP_ALPHA] = (src < dst) ? + SDE_MDP_SCALE_FILTER_BIL : + SDE_MDP_SCALE_FILTER_PCMN; + + if (fmt->is_yuv) + filter[SDE_SSPP_COMP_LUMA] = SDE_MDP_SCALE_FILTER_CA; + else + filter[SDE_SSPP_COMP_LUMA] = + filter[SDE_SSPP_COMP_ALPHA]; + } +} + +/* CIFIX: clean up fmt/subsampling params once we're using fourcc formats */ +static void _sde_plane_pixel_ext_helper(struct drm_plane *plane, + uint32_t src, uint32_t dst, uint32_t decimated_src, + uint32_t *phase_steps, uint32_t *out_src, int *out_edge1, + int *out_edge2, struct sde_mdp_format_params *fmt, + uint32_t chroma_subsampling, bool post_compare) +{ + /* CIFIX: adapted from mdss_mdp_pipe_calc_pixel_extn() */ + int64_t edge1, edge2, caf; + uint32_t src_work; + int i, tmp; + + if (plane && phase_steps && out_src && out_edge1 && out_edge2 && fmt) { + /* enable CAF for YUV formats */ + if (fmt->is_yuv) + caf = PHASE_STEP_UNIT_SCALE; + else + caf = 0; + + for (i = 0; i < SDE_MAX_PLANES; i++) { + src_work = decimated_src; + if (i == 1 || i == 2) + src_work /= chroma_subsampling; + if (post_compare) + src = src_work; + if (!(fmt->is_yuv) && (src == dst)) { + /* unity */ + edge1 = 0; + edge2 = 0; + } else if (dst >= src) { + /* upscale */ + edge1 = (1 << PHASE_RESIDUAL); + edge1 -= caf; + edge2 = (1 << PHASE_RESIDUAL); + edge2 += (dst - 1) * *(phase_steps + i); + edge2 -= (src_work - 1) * PHASE_STEP_UNIT_SCALE; + edge2 += caf; + edge2 = -(edge2); + } else { + /* downscale */ + edge1 = 0; + edge2 = (dst - 1) * *(phase_steps + i); + edge2 -= (src_work - 1) * PHASE_STEP_UNIT_SCALE; + edge2 += *(phase_steps + i); + edge2 = -(edge2); + } + + /* only enable CAF for luma plane */ + caf = 0; + + /* populate output arrays */ + *(out_src + i) = src_work; + + /* edge updates taken from __pxl_extn_helper */ + /* CIFIX: why are we casting first to uint32_t? */ + if (edge1 >= 0) { + tmp = (uint32_t)edge1; + tmp >>= PHASE_STEP_SHIFT; + *(out_edge1 + i) = -tmp; + } else { + tmp = (uint32_t)(-edge1); + *(out_edge1 + i) = (tmp + PHASE_STEP_UNIT_SCALE + - 1) >> PHASE_STEP_SHIFT; + } + if (edge2 >= 0) { + tmp = (uint32_t)edge2; + tmp >>= PHASE_STEP_SHIFT; + *(out_edge2 + i) = -tmp; + } else { + tmp = (uint32_t)(-edge2); + *(out_edge2 + i) = (tmp + PHASE_STEP_UNIT_SCALE + - 1) >> PHASE_STEP_SHIFT; + } + } + } +} + +static int sde_plane_mode_set(struct drm_plane *plane, struct drm_crtc *crtc, struct drm_framebuffer *fb, int crtc_x, int crtc_y, unsigned int crtc_w, unsigned int crtc_h, uint32_t src_x, uint32_t src_y, uint32_t src_w, uint32_t src_h) { - return 0; + struct sde_plane *psde = to_sde_plane(plane); + struct sde_plane_state *pstate; + const struct mdp_format *format; + uint32_t nplanes, pix_format, tmp; + int i; + struct sde_mdp_format_params *fmt; + struct sde_hw_pixel_ext *pe; + int ret = 0; + + SDE_PLANE_DEBUG_START(); + nplanes = drm_format_num_planes(fb->pixel_format); + + pstate = to_sde_plane_state(plane->state); + + format = to_mdp_format(msm_framebuffer_format(fb)); + pix_format = format->base.pixel_format; + + /* src values are in Q16 fixed point, convert to integer */ + src_x = src_x >> 16; + src_y = src_y >> 16; + src_w = src_w >> 16; + src_h = src_h >> 16; + + DBG("%s: FB[%u] %u,%u,%u,%u -> CRTC[%u] %d,%d,%u,%u", psde->name, + fb->base.id, src_x, src_y, src_w, src_h, + crtc->base.id, crtc_x, crtc_y, crtc_w, crtc_h); + + /* update format configuration */ + memset(&(psde->pipe_cfg), 0, sizeof(struct sde_hw_pipe_cfg)); + + psde->pipe_cfg.src.format = sde_mdp_get_format_params(pix_format, + 0/* CIFIX: fmt_modifier */); + psde->pipe_cfg.src.width = fb->width; + psde->pipe_cfg.src.height = fb->height; + psde->pipe_cfg.src.num_planes = nplanes; + + sde_plane_set_scanout(plane, &psde->pipe_cfg, fb); + + psde->pipe_cfg.src_rect.x = src_x; + psde->pipe_cfg.src_rect.y = src_y; + psde->pipe_cfg.src_rect.w = src_w; + psde->pipe_cfg.src_rect.h = src_h; + + psde->pipe_cfg.dst_rect.x = crtc_x; + psde->pipe_cfg.dst_rect.y = crtc_y; + psde->pipe_cfg.dst_rect.w = crtc_w; + psde->pipe_cfg.dst_rect.h = crtc_h; + + psde->pipe_cfg.horz_decimation = 0; + psde->pipe_cfg.vert_decimation = 0; + + /* get sde pixel format definition */ + fmt = psde->pipe_cfg.src.format; + + /* update pixel extensions */ + pe = &(psde->pixel_ext); + if (!pe->enable_pxl_ext) { + uint32_t chroma_subsample_h, chroma_subsample_v; + + chroma_subsample_h = psde->pipe_cfg.horz_decimation ? 1 : + drm_format_horz_chroma_subsampling(pix_format); + chroma_subsample_v = psde->pipe_cfg.vert_decimation ? 1 : + drm_format_vert_chroma_subsampling(pix_format); + + memset(pe, 0, sizeof(struct sde_hw_pixel_ext)); + + /* calculate phase steps */ + sde_plane_scale_helper(plane, src_w, crtc_w, + pe->phase_step_x, + pe->horz_filter, fmt, chroma_subsample_h); + sde_plane_scale_helper(plane, src_h, crtc_h, + pe->phase_step_y, + pe->vert_filter, fmt, chroma_subsample_v); + + /* calculate left/right/top/bottom pixel extentions */ + tmp = DECIMATED_DIMENSION(src_w, + psde->pipe_cfg.horz_decimation); + if (fmt->is_yuv) + tmp &= ~0x1; + _sde_plane_pixel_ext_helper(plane, src_w, crtc_w, tmp, + pe->phase_step_x, + pe->roi_w, + pe->num_ext_pxls_left, + pe->num_ext_pxls_right, fmt, + chroma_subsample_h, 0); + + tmp = DECIMATED_DIMENSION(src_h, + psde->pipe_cfg.vert_decimation); + _sde_plane_pixel_ext_helper(plane, src_h, crtc_h, tmp, + pe->phase_step_y, + pe->roi_h, + pe->num_ext_pxls_top, + pe->num_ext_pxls_btm, fmt, + chroma_subsample_v, 1); + + /* CIFIX: port "Single pixel rgb scale adjustment"? */ + + for (i = 0; i < SDE_MAX_PLANES; i++) { + if (pe->num_ext_pxls_left[i] >= 0) + pe->left_rpt[i] = + pe->num_ext_pxls_left[i]; + else + pe->left_ftch[i] = + pe->num_ext_pxls_left[i]; + + if (pe->num_ext_pxls_right[i] >= 0) + pe->right_rpt[i] = + pe->num_ext_pxls_right[i]; + else + pe->right_ftch[i] = + pe->num_ext_pxls_right[i]; + + if (pe->num_ext_pxls_top[i] >= 0) + pe->top_rpt[i] = + pe->num_ext_pxls_top[i]; + else + pe->top_ftch[i] = + pe->num_ext_pxls_top[i]; + + if (pe->num_ext_pxls_btm[i] >= 0) + pe->btm_rpt[i] = + pe->num_ext_pxls_btm[i]; + else + pe->btm_ftch[i] = + pe->num_ext_pxls_btm[i]; + } + } + + if (psde->pipe_hw->ops.setup_sourceformat) + psde->pipe_hw->ops.setup_sourceformat(psde->pipe_hw, + &psde->pipe_cfg, 0 /* CIFIX: flags */); + if (psde->pipe_hw->ops.setup_rects) + psde->pipe_hw->ops.setup_rects(psde->pipe_hw, + &psde->pipe_cfg, &psde->pixel_ext); + + /* update csc */ + + SDE_PLANE_DEBUG_END(); + return ret; } -static int sde_plane_disable(struct drm_plane *plane) +static int sde_plane_prepare_fb(struct drm_plane *plane, + const struct drm_plane_state *new_state) { + struct drm_framebuffer *fb = new_state->fb; + struct sde_plane *psde = to_sde_plane(plane); + + if (!new_state->fb) + return 0; + + SDE_PLANE_DEBUG_START(); + SDE_PLANE_DEBUG_END(); + DBG("%s: prepare: FB[%u]", psde->name, fb->base.id); + return msm_framebuffer_prepare(fb, psde->mmu_id); +} + +static void sde_plane_cleanup_fb(struct drm_plane *plane, + const struct drm_plane_state *old_state) +{ + struct drm_framebuffer *fb = old_state->fb; + struct sde_plane *psde = to_sde_plane(plane); + + if (!fb) + return; + + SDE_PLANE_DEBUG_START(); + SDE_PLANE_DEBUG_END(); + DBG("%s: cleanup: FB[%u]", psde->name, fb->base.id); + msm_framebuffer_cleanup(fb, psde->mmu_id); +} + +static int sde_plane_atomic_check(struct drm_plane *plane, + struct drm_plane_state *state) +{ + struct sde_plane *psde = to_sde_plane(plane); + struct drm_plane_state *old_state = plane->state; + const struct mdp_format *format; + + SDE_PLANE_DEBUG_START(); + SDE_PLANE_DEBUG_END(); + DBG("%s: check (%d -> %d)", psde->name, + sde_plane_enabled(old_state), sde_plane_enabled(state)); + + if (sde_plane_enabled(state)) { + /* CIFIX: don't use mdp format? */ + format = to_mdp_format(msm_framebuffer_format(state->fb)); + if (MDP_FORMAT_IS_YUV(format) && + (!(psde->features & SDE_PLANE_FEATURE_SCALER) || + !(psde->features & BIT(SDE_SSPP_CSC)))) { + dev_err(plane->dev->dev, + "Pipe doesn't support YUV\n"); + + return -EINVAL; + } + + if (!(psde->features & SDE_PLANE_FEATURE_SCALER) && + (((state->src_w >> 16) != state->crtc_w) || + ((state->src_h >> 16) != state->crtc_h))) { + dev_err(plane->dev->dev, + "Pipe doesn't support scaling (%dx%d -> %dx%d)\n", + state->src_w >> 16, state->src_h >> 16, + state->crtc_w, state->crtc_h); + + return -EINVAL; + } + } + + if (sde_plane_enabled(state) && sde_plane_enabled(old_state)) { + /* we cannot change SMP block configuration during scanout: */ + bool full_modeset = false; + + if (state->fb->pixel_format != old_state->fb->pixel_format) { + DBG("%s: pixel_format change!", psde->name); + full_modeset = true; + } + if (state->src_w != old_state->src_w) { + DBG("%s: src_w change!", psde->name); + full_modeset = true; + } + if (to_sde_plane_state(old_state)->pending) { + DBG("%s: still pending!", psde->name); + full_modeset = true; + } + if (full_modeset) { + struct drm_crtc_state *crtc_state = + drm_atomic_get_crtc_state(state->state, + state->crtc); + crtc_state->mode_changed = true; + to_sde_plane_state(state)->mode_changed = true; + } + } else { + to_sde_plane_state(state)->mode_changed = true; + } + return 0; } -static void sde_plane_destroy(struct drm_plane *plane) +static void sde_plane_atomic_update(struct drm_plane *plane, + struct drm_plane_state *old_state) { struct sde_plane *sde_plane = to_sde_plane(plane); - struct msm_drm_private *priv = plane->dev->dev_private; + struct drm_plane_state *state = plane->state; - if (priv->kms) - sde_plane_disable(plane); + DBG("%s: update", sde_plane->name); - drm_plane_cleanup(plane); + SDE_PLANE_DEBUG_START(); + if (!sde_plane_enabled(state)) { + to_sde_plane_state(state)->pending = true; + } else if (to_sde_plane_state(state)->mode_changed) { + int ret; - kfree(sde_plane); + to_sde_plane_state(state)->pending = true; + ret = sde_plane_mode_set(plane, + state->crtc, state->fb, + state->crtc_x, state->crtc_y, + state->crtc_w, state->crtc_h, + state->src_x, state->src_y, + state->src_w, state->src_h); + /* atomic_check should have ensured that this doesn't fail */ + WARN_ON(ret < 0); + } else { + sde_plane_set_scanout(plane, &sde_plane->pipe_cfg, state->fb); + } + SDE_PLANE_DEBUG_END(); } /* helper to install properties which are common to planes and crtcs */ -void sde_plane_install_properties(struct drm_plane *plane, +static void sde_plane_install_properties(struct drm_plane *plane, struct drm_mode_object *obj) { + struct drm_device *dev = plane->dev; + struct msm_drm_private *dev_priv = dev->dev_private; + struct drm_property *prop; + + SDE_PLANE_DEBUG_START(); +#define INSTALL_PROPERTY(name, NAME, init_val, fnc, ...) do { \ + prop = dev_priv->plane_property[PLANE_PROP_##NAME]; \ + if (!prop) { \ + prop = drm_property_##fnc(dev, 0, #name, \ + ##__VA_ARGS__); \ + if (!prop) { \ + dev_warn(dev->dev, \ + "Create property %s failed\n", \ + #name); \ + return; \ + } \ + dev_priv->plane_property[PLANE_PROP_##NAME] = prop; \ + } \ + drm_object_attach_property(&plane->base, prop, init_val); \ + } while (0) + +#define INSTALL_RANGE_PROPERTY(name, NAME, min, max, init_val) \ + INSTALL_PROPERTY(name, NAME, init_val, \ + create_range, min, max) + +#define INSTALL_ENUM_PROPERTY(name, NAME, init_val) \ + INSTALL_PROPERTY(name, NAME, init_val, \ + create_enum, name##_prop_enum_list, \ + ARRAY_SIZE(name##_prop_enum_list)) + + INSTALL_RANGE_PROPERTY(zpos, ZPOS, 1, 255, 1); + +#undef INSTALL_RANGE_PROPERTY +#undef INSTALL_ENUM_PROPERTY +#undef INSTALL_PROPERTY + SDE_PLANE_DEBUG_END(); +} + +static int sde_plane_atomic_set_property(struct drm_plane *plane, + struct drm_plane_state *state, struct drm_property *property, + uint64_t val) +{ + struct drm_device *dev = plane->dev; + struct sde_plane_state *pstate; + struct msm_drm_private *dev_priv = dev->dev_private; + int ret = 0; + + SDE_PLANE_DEBUG_START(); + + pstate = to_sde_plane_state(state); + +#define SET_PROPERTY(name, NAME, type) do { \ + if (dev_priv->plane_property[PLANE_PROP_##NAME] == property) { \ + pstate->name = (type)val; \ + DBG("Set property %s %d", #name, (type)val); \ + goto done; \ + } \ + } while (0) + + SET_PROPERTY(zpos, ZPOS, uint8_t); + + dev_err(dev->dev, "Invalid property\n"); + ret = -EINVAL; +done: + SDE_PLANE_DEBUG_END(); + return ret; +#undef SET_PROPERTY } -int sde_plane_set_property(struct drm_plane *plane, +static int sde_plane_set_property(struct drm_plane *plane, struct drm_property *property, uint64_t val) { - return -EINVAL; + int rc; + + SDE_PLANE_DEBUG_START(); + rc = sde_plane_atomic_set_property(plane, plane->state, property, + val); + SDE_PLANE_DEBUG_END(); + return rc; +} + +static int sde_plane_atomic_get_property(struct drm_plane *plane, + const struct drm_plane_state *state, + struct drm_property *property, uint64_t *val) +{ + struct drm_device *dev = plane->dev; + struct sde_plane_state *pstate; + struct msm_drm_private *dev_priv = dev->dev_private; + int ret = 0; + + SDE_PLANE_DEBUG_START(); + pstate = to_sde_plane_state(state); + +#define GET_PROPERTY(name, NAME, type) do { \ + if (dev_priv->plane_property[PLANE_PROP_##NAME] == property) { \ + *val = pstate->name; \ + DBG("Get property %s %lld", #name, *val); \ + goto done; \ + } \ + } while (0) + + GET_PROPERTY(zpos, ZPOS, uint8_t); + + dev_err(dev->dev, "Invalid property\n"); + ret = -EINVAL; +done: + SDE_PLANE_DEBUG_END(); + return ret; +#undef SET_PROPERTY +} + +static void sde_plane_destroy(struct drm_plane *plane) +{ + struct sde_plane *psde = to_sde_plane(plane); + + SDE_PLANE_DEBUG_START(); + + if (psde->pipe_hw) + sde_hw_sspp_destroy(psde->pipe_hw); + + drm_plane_helper_disable(plane); + drm_plane_cleanup(plane); + + kfree(psde); + + SDE_PLANE_DEBUG_END(); +} + +static void sde_plane_destroy_state(struct drm_plane *plane, + struct drm_plane_state *state) +{ + SDE_PLANE_DEBUG_START(); + if (state->fb) + drm_framebuffer_unreference(state->fb); + + kfree(to_sde_plane_state(state)); + SDE_PLANE_DEBUG_END(); +} + +static struct drm_plane_state * +sde_plane_duplicate_state(struct drm_plane *plane) +{ + struct sde_plane_state *pstate; + + if (WARN_ON(!plane->state)) + return NULL; + + SDE_PLANE_DEBUG_START(); + pstate = kmemdup(to_sde_plane_state(plane->state), + sizeof(*pstate), GFP_KERNEL); + + if (pstate && pstate->base.fb) + drm_framebuffer_reference(pstate->base.fb); + + pstate->mode_changed = false; + pstate->pending = false; + SDE_PLANE_DEBUG_END(); + + return &pstate->base; +} + +static void sde_plane_reset(struct drm_plane *plane) +{ + struct sde_plane_state *pstate; + + SDE_PLANE_DEBUG_START(); + if (plane->state && plane->state->fb) + drm_framebuffer_unreference(plane->state->fb); + + kfree(to_sde_plane_state(plane->state)); + pstate = kzalloc(sizeof(*pstate), GFP_KERNEL); + + memset(pstate, 0, sizeof(struct sde_plane_state)); + + /* assign default blend parameters */ + pstate->alpha = 255; + pstate->premultiplied = 0; + + if (plane->type == DRM_PLANE_TYPE_PRIMARY) + pstate->zpos = STAGE_BASE; + else + pstate->zpos = STAGE0 + drm_plane_index(plane); + + pstate->base.plane = plane; + + plane->state = &pstate->base; + SDE_PLANE_DEBUG_END(); } static const struct drm_plane_funcs sde_plane_funcs = { - .update_plane = sde_plane_update, - .disable_plane = sde_plane_disable, + .update_plane = drm_atomic_helper_update_plane, + .disable_plane = drm_atomic_helper_disable_plane, .destroy = sde_plane_destroy, .set_property = sde_plane_set_property, + .atomic_set_property = sde_plane_atomic_set_property, + .atomic_get_property = sde_plane_atomic_get_property, + .reset = sde_plane_reset, + .atomic_duplicate_state = sde_plane_duplicate_state, + .atomic_destroy_state = sde_plane_destroy_state, }; -void sde_plane_set_scanout(struct drm_plane *plane, - struct drm_framebuffer *fb) -{ -} +static const struct drm_plane_helper_funcs sde_plane_helper_funcs = { + .prepare_fb = sde_plane_prepare_fb, + .cleanup_fb = sde_plane_cleanup_fb, + .atomic_check = sde_plane_atomic_check, + .atomic_update = sde_plane_atomic_update, +}; -int sde_plane_mode_set(struct drm_plane *plane, - struct drm_crtc *crtc, struct drm_framebuffer *fb, - int crtc_x, int crtc_y, - unsigned int crtc_w, unsigned int crtc_h, - uint32_t src_x, uint32_t src_y, - uint32_t src_w, uint32_t src_h) +enum sde_sspp sde_plane_pipe(struct drm_plane *plane) { - return 0; + struct sde_plane *sde_plane = to_sde_plane(plane); + + return sde_plane->pipe; } /* initialize plane */ -struct drm_plane *sde_plane_init(struct drm_device *dev, bool private_plane) +struct drm_plane *sde_plane_init(struct drm_device *dev, uint32_t pipe, + bool private_plane) { + static const char tmp_name[] = "---"; struct drm_plane *plane = NULL; - struct sde_plane *sde_plane; + struct sde_plane *psde; + struct sde_hw_ctl *sde_ctl; + struct msm_drm_private *priv; + struct sde_kms *kms; + struct sde_mdss_cfg *sde_cat; int ret; enum drm_plane_type type; - sde_plane = kzalloc(sizeof(*sde_plane), GFP_KERNEL); - if (!sde_plane) { + priv = dev->dev_private; + kms = to_sde_kms(to_mdp_kms(priv->kms)); + + psde = kzalloc(sizeof(*psde), GFP_KERNEL); + if (!psde) { ret = -ENOMEM; goto fail; } - plane = &sde_plane->base; + memset(psde, 0, sizeof(*psde)); + + plane = &psde->base; + + psde->pipe = pipe; + psde->name = tmp_name; + + if (kms) { + /* mmu id for buffer mapping */ + psde->mmu_id = kms->mmu_id; + + /* check catalog for features mask */ + sde_cat = kms->catalog; + if (sde_cat) + psde->features = sde_cat->sspp[pipe].features; + } + psde->nformats = mdp_get_formats(psde->formats, + ARRAY_SIZE(psde->formats), + !(psde->features & BIT(SDE_SSPP_CSC)) || + !(psde->features & SDE_PLANE_FEATURE_SCALER)); type = private_plane ? DRM_PLANE_TYPE_PRIMARY : DRM_PLANE_TYPE_OVERLAY; - drm_universal_plane_init(dev, plane, 0xff, &sde_plane_funcs, - sde_plane->formats, sde_plane->nformats, - type); + ret = drm_universal_plane_init(dev, plane, 0xff, &sde_plane_funcs, + psde->formats, psde->nformats, + type); + if (ret) + goto fail; + + drm_plane_helper_add(plane, &sde_plane_helper_funcs); sde_plane_install_properties(plane, &plane->base); + psde->pipe_hw = sde_hw_sspp_init(pipe, kms->mmio, sde_cat); + if (IS_ERR(psde->pipe_hw)) { + ret = PTR_ERR(psde->pipe_hw); + psde->pipe_hw = NULL; + goto fail; + } + + /* cache flush mask for later */ + sde_ctl = sde_hw_ctl_init(CTL_0, kms->mmio, sde_cat); + if (!IS_ERR(sde_ctl)) { + if (sde_ctl->ops.get_bitmask_sspp) + sde_ctl->ops.get_bitmask_sspp(sde_ctl, + &psde->flush_mask, pipe); + sde_hw_ctl_destroy(sde_ctl); + } + + pr_err("%s: Successfully created plane\n", __func__); return plane; fail: + pr_err("%s: Plane creation failed\n", __func__); if (plane) sde_plane_destroy(plane); -- GitLab From 24047f8d3ffde4425c34823d1025da130f02ce98 Mon Sep 17 00:00:00 2001 From: Lloyd Atkinson Date: Thu, 5 Nov 2015 15:52:07 -0500 Subject: [PATCH 009/310] drm/msm/sde: add controller instance to INTF block in catalog Controller Instance ID differentiates between INTFs of the same type. E.g. which DSI INTF is primary, which is secondary. Change-Id: Icc47df59a24faa2a019ab190d6c835a0ac93024d Signed-off-by: Lloyd Atkinson --- drivers/gpu/drm/msm/sde/sde_hw_catalog.h | 2 ++ drivers/gpu/drm/msm/sde/sde_hw_catalog_8996.c | 8 ++++---- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/msm/sde/sde_hw_catalog.h b/drivers/gpu/drm/msm/sde/sde_hw_catalog.h index 0d3c536cef26..9b06aca45241 100644 --- a/drivers/gpu/drm/msm/sde/sde_hw_catalog.h +++ b/drivers/gpu/drm/msm/sde/sde_hw_catalog.h @@ -371,10 +371,12 @@ struct sde_cdm_cfg { * @base register offset of this block * @features bit mask identifying sub-blocks/features * @type: Interface type(DSI, DP, HDMI) + * @controller_id: Controller Instance ID in case of multiple of intf type */ struct sde_intf_cfg { SDE_HW_BLK_INFO; u32 type; /* interface type*/ + u32 controller_id; }; /** diff --git a/drivers/gpu/drm/msm/sde/sde_hw_catalog_8996.c b/drivers/gpu/drm/msm/sde/sde_hw_catalog_8996.c index 86673e07d3db..a756c515f45e 100644 --- a/drivers/gpu/drm/msm/sde/sde_hw_catalog_8996.c +++ b/drivers/gpu/drm/msm/sde/sde_hw_catalog_8996.c @@ -244,13 +244,13 @@ static inline int set_cfg_1xx_init(struct sde_mdss_cfg *cfg) .intf_count = 4, .intf = { {.id = INTF_0, .base = 0x0006B000, - .type = INTF_NONE}, + .type = INTF_NONE, .controller_id = 0}, {.id = INTF_1, .base = 0x0006B800, - .type = INTF_DSI}, + .type = INTF_DSI, .controller_id = 0}, {.id = INTF_2, .base = 0x0006C000, - .type = INTF_DSI}, + .type = INTF_DSI, .controller_id = 1}, {.id = INTF_3, .base = 0x0006C800, - .type = INTF_HDMI}, + .type = INTF_HDMI, .controller_id = 0}, }, .wb_count = 3, .wb = { -- GitLab From 91cc41aba80dd81e0b060014b32968ca36d69e3f Mon Sep 17 00:00:00 2001 From: Lloyd Atkinson Date: Fri, 24 Jun 2016 18:14:13 -0400 Subject: [PATCH 010/310] drm/msm/sde: sde encoder virtualization Split SDE encoder into virtual and physical encoders. Virtual encoders are containers, one per logical display that contain one or more physical encoders. Physical encoders manage the INTF hardware. Change-Id: I6342511c59568c76278a519b84f93338157e59fa Signed-off-by: Lloyd Atkinson --- drivers/gpu/drm/msm/Makefile | 2 + drivers/gpu/drm/msm/sde/sde_encoder.c | 468 ++++++++---------- drivers/gpu/drm/msm/sde/sde_encoder_phys.h | 80 +++ .../gpu/drm/msm/sde/sde_encoder_phys_cmd.c | 25 + .../gpu/drm/msm/sde/sde_encoder_phys_vid.c | 280 +++++++++++ 5 files changed, 596 insertions(+), 259 deletions(-) create mode 100644 drivers/gpu/drm/msm/sde/sde_encoder_phys.h create mode 100644 drivers/gpu/drm/msm/sde/sde_encoder_phys_cmd.c create mode 100644 drivers/gpu/drm/msm/sde/sde_encoder_phys_vid.c diff --git a/drivers/gpu/drm/msm/Makefile b/drivers/gpu/drm/msm/Makefile index 549b534a3f4d..61163578c06c 100644 --- a/drivers/gpu/drm/msm/Makefile +++ b/drivers/gpu/drm/msm/Makefile @@ -40,6 +40,8 @@ msm-y := \ mdp/mdp5/mdp5_smp.o \ sde/sde_crtc.o \ sde/sde_encoder.o \ + sde/sde_encoder_phys_vid.o \ + sde/sde_encoder_phys_cmd.o \ sde/sde_irq.o \ sde/sde_kms.o \ sde/sde_plane.o \ diff --git a/drivers/gpu/drm/msm/sde/sde_encoder.c b/drivers/gpu/drm/msm/sde/sde_encoder.c index 283d33b70b13..6c42cb0e82f3 100644 --- a/drivers/gpu/drm/msm/sde/sde_encoder.c +++ b/drivers/gpu/drm/msm/sde/sde_encoder.c @@ -10,6 +10,7 @@ * GNU General Public License for more details. */ +#include "msm_drv.h" #include "sde_kms.h" #include "drm_crtc.h" #include "drm_crtc_helper.h" @@ -20,32 +21,11 @@ #include "sde_hw_mdp_ctl.h" #include "sde_mdp_formats.h" -#include "../dsi-staging/dsi_display.h" - -#define DBG(fmt, ...) DRM_DEBUG(fmt"\n", ##__VA_ARGS__) - -struct sde_encoder { - struct drm_encoder base; - spinlock_t intf_lock; - bool enabled; - uint32_t bus_scaling_client; - struct sde_hw_intf *hw_intf; - struct sde_hw_ctl *hw_ctl; - int drm_mode_enc; - - void (*vblank_callback)(void *); - void *vblank_callback_data; +#include "sde_encoder_phys.h" - struct mdp_irq vblank_irq; -}; -#define to_sde_encoder(x) container_of(x, struct sde_encoder, base) - -static struct sde_kms *get_kms(struct drm_encoder *drm_enc) -{ - struct msm_drm_private *priv = drm_enc->dev->dev_private; +#include "../dsi-staging/dsi_display.h" - return to_sde_kms(to_mdp_kms(priv->kms)); -} +#define to_sde_encoder_virt(x) container_of(x, struct sde_encoder_virt, base) #ifdef CONFIG_QCOM_BUS_SCALING #include @@ -62,6 +42,7 @@ static struct msm_bus_vectors mdp_bus_vectors[] = { MDP_BUS_VECTOR_ENTRY(0, 0), MDP_BUS_VECTOR_ENTRY(2000000000, 2000000000), }; + static struct msm_bus_paths mdp_bus_usecases[] = { { .num_paths = 1, .vectors = @@ -79,14 +60,14 @@ static struct msm_bus_scale_pdata mdp_bus_scale_table = { .name = "mdss_mdp", }; -static void bs_init(struct sde_encoder *sde_enc) +static void bs_init(struct sde_encoder_virt *sde_enc) { sde_enc->bus_scaling_client = msm_bus_scale_register_client(&mdp_bus_scale_table); DBG("bus scale client: %08x", sde_enc->bus_scaling_client); } -static void bs_fini(struct sde_encoder *sde_enc) +static void bs_fini(struct sde_encoder_virt *sde_enc) { if (sde_enc->bus_scaling_client) { msm_bus_scale_unregister_client(sde_enc->bus_scaling_client); @@ -94,7 +75,7 @@ static void bs_fini(struct sde_encoder *sde_enc) } } -static void bs_set(struct sde_encoder *sde_enc, int idx) +static void bs_set(struct sde_encoder_virt *sde_enc, int idx) { if (sde_enc->bus_scaling_client) { DBG("set bus scaling: %d", idx); @@ -104,242 +85,189 @@ static void bs_set(struct sde_encoder *sde_enc, int idx) } } #else -static void bs_init(struct sde_encoder *sde_enc) +static void bs_init(struct sde_encoder_virt *sde_enc) { } -static void bs_fini(struct sde_encoder *sde_enc) +static void bs_fini(struct sde_encoder_virt *sde_enc) { } -static void bs_set(struct sde_encoder *sde_enc, int idx) +static void bs_set(struct sde_encoder_virt *sde_enc, int idx) { } #endif -static bool sde_encoder_mode_fixup(struct drm_encoder *drm_enc, - const struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode) +void sde_encoder_get_hw_resources(struct drm_encoder *drm_enc, + struct sde_encoder_hw_resources *hw_res) { - DBG(""); - return true; -} + struct sde_encoder_virt *sde_enc = to_sde_encoder_virt(drm_enc); + int i = 0; -static void sde_encoder_mode_set(struct drm_encoder *drm_enc, - struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode) -{ + DBG(""); - struct sde_encoder *sde_enc = to_sde_encoder(drm_enc); - struct intf_timing_params p = {0}; - uint32_t hsync_polarity = 0, vsync_polarity = 0; - struct sde_mdp_format_params *sde_fmt_params = NULL; - u32 fmt_fourcc = DRM_FORMAT_RGB888, fmt_mod = 0; - unsigned long lock_flags; - struct sde_hw_intf_cfg intf_cfg = {0}; - - mode = adjusted_mode; - - DBG("set mode: %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x", - mode->base.id, mode->name, mode->vrefresh, mode->clock, - mode->hdisplay, mode->hsync_start, mode->hsync_end, mode->htotal, - mode->vdisplay, mode->vsync_start, mode->vsync_end, mode->vtotal, - mode->type, mode->flags); - - /* DSI controller cannot handle active-low sync signals. */ - if (sde_enc->hw_intf->cap->type != INTF_DSI) { - if (mode->flags & DRM_MODE_FLAG_NHSYNC) - hsync_polarity = 1; - if (mode->flags & DRM_MODE_FLAG_NVSYNC) - vsync_polarity = 1; + if (!hw_res) { + DRM_ERROR("Invalid pointer"); + return; } - /* - * For edp only: - * DISPLAY_V_START = (VBP * HCYCLE) + HBP - * DISPLAY_V_END = (VBP + VACTIVE) * HCYCLE - 1 - HFP - */ - /* - * if (sde_enc->hw->cap->type == INTF_EDP) { - * display_v_start += mode->htotal - mode->hsync_start; - * display_v_end -= mode->hsync_start - mode->hdisplay; - * } - */ - - /* - * https://www.kernel.org/doc/htmldocs/drm/ch02s05.html - * Active Region Front Porch Sync Back Porch - * <---------------------><----------------><---------><--------------> - * <--- [hv]display -----> - * <----------- [hv]sync_start ------------> - * <------------------- [hv]sync_end -----------------> - * <------------------------------ [hv]total -------------------------> - */ - - sde_fmt_params = sde_mdp_get_format_params(fmt_fourcc, fmt_mod); - - p.width = mode->hdisplay; /* active width */ - p.height = mode->vdisplay; /* active height */ - p.xres = p.width; /* Display panel width */ - p.yres = p.height; /* Display panel height */ - p.h_back_porch = mode->htotal - mode->hsync_end; - p.h_front_porch = mode->hsync_start - mode->hdisplay; - p.v_back_porch = mode->vtotal - mode->vsync_end; - p.v_front_porch = mode->vsync_start - mode->vdisplay; - p.hsync_pulse_width = mode->hsync_end - mode->hsync_start; - p.vsync_pulse_width = mode->vsync_end - mode->vsync_start; - p.hsync_polarity = hsync_polarity; - p.vsync_polarity = vsync_polarity; - p.border_clr = 0; - p.underflow_clr = 0xff; - p.hsync_skew = mode->hskew; - - intf_cfg.intf = sde_enc->hw_intf->idx; - intf_cfg.wb = SDE_NONE; - - spin_lock_irqsave(&sde_enc->intf_lock, lock_flags); - sde_enc->hw_intf->ops.setup_timing_gen(sde_enc->hw_intf, &p, - sde_fmt_params); - sde_enc->hw_ctl->ops.setup_intf_cfg(sde_enc->hw_ctl, &intf_cfg); - spin_unlock_irqrestore(&sde_enc->intf_lock, lock_flags); + /* Query resources used by phys encs, expected to be without overlap */ + memset(hw_res, 0, sizeof(*hw_res)); + for (i = 0; i < sde_enc->num_phys_encs; i++) { + struct sde_encoder_phys *phys = sde_enc->phys_encs[i]; + + if (phys) + phys->phys_ops.get_hw_resources(phys, hw_res); + } } -static void sde_encoder_wait_for_vblank(struct sde_encoder *sde_enc) +static void sde_encoder_destroy(struct drm_encoder *drm_enc) { - struct sde_kms *sde_kms = get_kms(&sde_enc->base); - struct mdp_kms *mdp_kms = &sde_kms->base; + struct sde_encoder_virt *sde_enc = to_sde_encoder_virt(drm_enc); + int i = 0; DBG(""); - mdp_irq_wait(mdp_kms, sde_enc->vblank_irq.irqmask); -} -static void sde_encoder_vblank_irq(struct mdp_irq *irq, uint32_t irqstatus) -{ - struct sde_encoder *sde_enc = container_of(irq, struct sde_encoder, - vblank_irq); - struct intf_status status = { 0 }; - unsigned long lock_flags; + for (i = 0; i < ARRAY_SIZE(sde_enc->phys_encs); i++) { + struct sde_encoder_phys *phys = sde_enc->phys_encs[i]; - spin_lock_irqsave(&sde_enc->intf_lock, lock_flags); - if (sde_enc->vblank_callback) - sde_enc->vblank_callback(sde_enc->vblank_callback_data); - spin_unlock_irqrestore(&sde_enc->intf_lock, lock_flags); + if (phys) { + phys->phys_ops.destroy(phys); + --sde_enc->num_phys_encs; + sde_enc->phys_encs[i] = NULL; + } + } - sde_enc->hw_intf->ops.get_status(sde_enc->hw_intf, &status); + if (sde_enc->num_phys_encs) { + DRM_ERROR("Expected num_phys_encs to be 0 not %d\n", + sde_enc->num_phys_encs); + } + + drm_encoder_cleanup(drm_enc); + bs_fini(sde_enc); + kfree(sde_enc); } -static void sde_encoder_disable(struct drm_encoder *drm_enc) +static bool sde_encoder_virt_mode_fixup(struct drm_encoder *drm_enc, + const struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) { - struct sde_encoder *sde_enc = to_sde_encoder(drm_enc); - struct sde_kms *sde_kms = get_kms(drm_enc); - struct mdp_kms *mdp_kms = &(sde_kms->base); - unsigned long lock_flags; + struct sde_encoder_virt *sde_enc = to_sde_encoder_virt(drm_enc); + int i = 0; DBG(""); - if (WARN_ON(!sde_enc->enabled)) - return; + for (i = 0; i < sde_enc->num_phys_encs; i++) { + struct sde_encoder_phys *phys = sde_enc->phys_encs[i]; - spin_lock_irqsave(&sde_enc->intf_lock, lock_flags); - sde_enc->hw_intf->ops.enable_timing(sde_enc->hw_intf, 0); - spin_unlock_irqrestore(&sde_enc->intf_lock, lock_flags); - - /* - * Wait for a vsync so we know the ENABLE=0 latched before - * the (connector) source of the vsync's gets disabled, - * otherwise we end up in a funny state if we re-enable - * before the disable latches, which results that some of - * the settings changes for the new modeset (like new - * scanout buffer) don't latch properly.. - */ - sde_encoder_wait_for_vblank(sde_enc); - - mdp_irq_unregister(mdp_kms, &sde_enc->vblank_irq); - bs_set(sde_enc, 0); - sde_enc->enabled = false; + if (phys) { + phys->phys_ops.mode_fixup(phys, mode, adjusted_mode); + if (memcmp(mode, adjusted_mode, sizeof(*mode)) != 0) { + DRM_ERROR("adjusted modes not supported\n"); + return false; + } + } + } + + return true; } -static void sde_encoder_enable(struct drm_encoder *drm_enc) +static void sde_encoder_virt_mode_set(struct drm_encoder *drm_enc, + struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) { - struct sde_encoder *sde_enc = to_sde_encoder(drm_enc); - struct mdp_kms *mdp_kms = &(get_kms(drm_enc)->base); - unsigned long lock_flags; + struct sde_encoder_virt *sde_enc = to_sde_encoder_virt(drm_enc); + int i = 0; DBG(""); - if (WARN_ON(sde_enc->enabled)) - return; + for (i = 0; i < sde_enc->num_phys_encs; i++) { + struct sde_encoder_phys *phys = sde_enc->phys_encs[i]; - bs_set(sde_enc, 1); - spin_lock_irqsave(&sde_enc->intf_lock, lock_flags); - sde_enc->hw_intf->ops.enable_timing(sde_enc->hw_intf, 1); - spin_unlock_irqrestore(&sde_enc->intf_lock, lock_flags); - sde_enc->enabled = true; - - mdp_irq_register(mdp_kms, &sde_enc->vblank_irq); - DBG("Registered IRQ for intf %d mask 0x%X", sde_enc->hw_intf->idx, - sde_enc->vblank_irq.irqmask); + if (phys) { + phys->phys_ops.mode_set(phys, mode, adjusted_mode); + if (memcmp(mode, adjusted_mode, sizeof(*mode)) != 0) + DRM_ERROR("adjusted modes not supported\n"); + } + } } -void sde_encoder_get_hw_resources(struct drm_encoder *drm_enc, - struct sde_encoder_hw_resources *hw_res) +static void sde_encoder_virt_enable(struct drm_encoder *drm_enc) { - struct sde_encoder *sde_enc = to_sde_encoder(drm_enc); + struct sde_encoder_virt *sde_enc = to_sde_encoder_virt(drm_enc); + int i = 0; DBG(""); - if (WARN_ON(!hw_res)) - return; + bs_set(sde_enc, 1); - memset(hw_res, 0, sizeof(*hw_res)); - hw_res->intfs[sde_enc->hw_intf->idx] = true; + for (i = 0; i < sde_enc->num_phys_encs; i++) { + struct sde_encoder_phys *phys = sde_enc->phys_encs[i]; + + if (phys) + phys->phys_ops.enable(phys); + } } -static void sde_encoder_destroy(struct drm_encoder *drm_enc) +static void sde_encoder_virt_disable(struct drm_encoder *drm_enc) { - struct sde_encoder *sde_enc = to_sde_encoder(drm_enc); + struct sde_encoder_virt *sde_enc = to_sde_encoder_virt(drm_enc); + int i = 0; DBG(""); - drm_encoder_cleanup(drm_enc); - bs_fini(sde_enc); - kfree(sde_enc->hw_intf); - kfree(sde_enc); + + for (i = 0; i < sde_enc->num_phys_encs; i++) { + struct sde_encoder_phys *phys = sde_enc->phys_encs[i]; + + if (phys && phys->phys_ops.disable) + phys->phys_ops.disable(phys); + } + + bs_set(sde_enc, 0); } static const struct drm_encoder_helper_funcs sde_encoder_helper_funcs = { - .mode_fixup = sde_encoder_mode_fixup, - .mode_set = sde_encoder_mode_set, - .disable = sde_encoder_disable, - .enable = sde_encoder_enable, + .mode_fixup = sde_encoder_virt_mode_fixup, + .mode_set = sde_encoder_virt_mode_set, + .disable = sde_encoder_virt_disable, + .enable = sde_encoder_virt_enable, }; -static const struct drm_encoder_funcs sde_encoder_funcs = {.destroy = - sde_encoder_destroy, +static const struct drm_encoder_funcs sde_encoder_funcs = { + .destroy = sde_encoder_destroy, }; -static int sde_encoder_setup_hw(struct sde_encoder *sde_enc, - struct sde_kms *sde_kms, - enum sde_intf intf_idx, - enum sde_ctl ctl_idx) +static enum sde_intf sde_encoder_get_intf(struct sde_mdss_cfg *catalog, + enum sde_intf_type type, u32 instance) { - int ret = 0; + int i = 0; DBG(""); - sde_enc->hw_intf = sde_hw_intf_init(intf_idx, sde_kms->mmio, - sde_kms->catalog); - if (!sde_enc->hw_intf) - return -EINVAL; + for (i = 0; i < catalog->intf_count; i++) { + if (catalog->intf[i].type == type + && catalog->intf[i].controller_id == instance) { + return catalog->intf[i].id; + } + } - sde_enc->hw_ctl = sde_hw_ctl_init(ctl_idx, sde_kms->mmio, - sde_kms->catalog); - if (!sde_enc->hw_ctl) - return -EINVAL; + return INTF_MAX; +} - return ret; +static void sde_encoder_vblank_callback(struct drm_encoder *drm_enc) +{ + struct sde_encoder_virt *sde_enc = to_sde_encoder_virt(drm_enc); + unsigned long lock_flags; + + DBG(""); + + spin_lock_irqsave(&sde_enc->spin_lock, lock_flags); + if (sde_enc->kms_vblank_callback) + sde_enc->kms_vblank_callback(sde_enc->kms_vblank_callback_data); + spin_unlock_irqrestore(&sde_enc->spin_lock, lock_flags); } -static int sde_encoder_virt_add_phys_vid_enc(struct sde_encoder *sde_enc, +static int sde_encoder_virt_add_phys_vid_enc(struct sde_encoder_virt *sde_enc, struct sde_kms *sde_kms, enum sde_intf intf_idx, enum sde_ctl ctl_idx) @@ -348,15 +276,30 @@ static int sde_encoder_virt_add_phys_vid_enc(struct sde_encoder *sde_enc, DBG(""); - ret = sde_encoder_setup_hw(sde_enc, sde_kms, intf_idx, ctl_idx); - if (!ret) { - sde_enc->vblank_irq.irq = sde_encoder_vblank_irq; - sde_enc->vblank_irq.irqmask = 0x8000000; + if (sde_enc->num_phys_encs >= ARRAY_SIZE(sde_enc->phys_encs)) { + DRM_ERROR("Too many video encoders %d, unable to add\n", + sde_enc->num_phys_encs); + ret = -EINVAL; + } else { + struct sde_encoder_virt_ops parent_ops = { + sde_encoder_vblank_callback + }; + struct sde_encoder_phys *enc = + sde_encoder_phys_vid_init(sde_kms, intf_idx, ctl_idx, + &sde_enc->base, parent_ops); + if (IS_ERR(enc)) + ret = PTR_ERR(enc); + + if (!ret) { + sde_enc->phys_encs[sde_enc->num_phys_encs] = enc; + ++sde_enc->num_phys_encs; + } } + return ret; } -static int sde_encoder_setup_hdmi(struct sde_encoder *sde_enc, +static int sde_encoder_setup_hdmi(struct sde_encoder_virt *sde_enc, struct sde_kms *sde_kms, int *hdmi_info) { int ret = 0; @@ -364,9 +307,7 @@ static int sde_encoder_setup_hdmi(struct sde_encoder *sde_enc, DBG(""); - sde_enc->drm_mode_enc = DRM_MODE_ENCODER_TMDS; - - intf_idx = INTF_3; + intf_idx = sde_encoder_get_intf(sde_kms->catalog, INTF_HDMI, 0); if (intf_idx == INTF_MAX) ret = -EINVAL; @@ -379,7 +320,7 @@ static int sde_encoder_setup_hdmi(struct sde_encoder *sde_enc, return ret; } -static int sde_encoder_setup_dsi(struct sde_encoder *sde_enc, +static int sde_encoder_setup_dsi(struct sde_encoder_virt *sde_enc, struct sde_kms *sde_kms, struct dsi_display_info *dsi_info) { @@ -388,30 +329,27 @@ static int sde_encoder_setup_dsi(struct sde_encoder *sde_enc, DBG(""); - sde_enc->drm_mode_enc = DRM_MODE_ENCODER_DSI; - - if (WARN_ON(dsi_info->num_of_h_tiles > 1)) { - DBG("Dual DSI mode not yet supported"); - ret = -EINVAL; - } + WARN_ON(dsi_info->num_of_h_tiles < 1); - WARN_ON(dsi_info->num_of_h_tiles != 1); - dsi_info->num_of_h_tiles = 1; + if (dsi_info->num_of_h_tiles == 0) + dsi_info->num_of_h_tiles = 1; DBG("dsi_info->num_of_h_tiles %d h_tiled %d dsi_info->h_tile_ids %d ", - dsi_info->num_of_h_tiles, dsi_info->h_tiled, - dsi_info->h_tile_ids[0]); + dsi_info->num_of_h_tiles, dsi_info->h_tiled, + dsi_info->h_tile_ids[0]); - for (i = 0; i < !ret && dsi_info->num_of_h_tiles; i++) { - enum sde_intf intf_idx = INTF_1; + for (i = 0; i < dsi_info->num_of_h_tiles && !ret; i++) { + enum sde_intf intf_idx = INTF_MAX; enum sde_ctl ctl_idx = CTL_0; + intf_idx = sde_encoder_get_intf(sde_kms->catalog, + INTF_DSI, dsi_info->h_tile_ids[i]); if (intf_idx == INTF_MAX) { DBG("Error: could not get the interface id"); ret = -EINVAL; } - /* Get DSI modes, create both VID & CMD Phys Encoders */ + /* Create both VID and CMD Phys Encoders here */ if (!ret) ret = sde_encoder_virt_add_phys_vid_enc(sde_enc, sde_kms, @@ -429,13 +367,13 @@ struct display_probe_info { }; static struct drm_encoder *sde_encoder_virt_init(struct drm_device *dev, - struct display_probe_info - *display) + struct display_probe_info *display) { struct msm_drm_private *priv = dev->dev_private; struct sde_kms *sde_kms = to_sde_kms(to_mdp_kms(priv->kms)); struct drm_encoder *drm_enc = NULL; - struct sde_encoder *sde_enc = NULL; + struct sde_encoder_virt *sde_enc = NULL; + int drm_encoder_mode = DRM_MODE_ENCODER_NONE; int ret = 0; DBG(""); @@ -447,31 +385,35 @@ static struct drm_encoder *sde_encoder_virt_init(struct drm_device *dev, } if (display->type == INTF_DSI) { + drm_encoder_mode = DRM_MODE_ENCODER_DSI; ret = sde_encoder_setup_dsi(sde_enc, sde_kms, &display->dsi_info); + } else if (display->type == INTF_HDMI) { + drm_encoder_mode = DRM_MODE_ENCODER_TMDS; ret = sde_encoder_setup_hdmi(sde_enc, sde_kms, &display->hdmi_info); } else { - DBG("No valid displays found"); + DRM_ERROR("No valid displays found\n"); ret = -EINVAL; } + if (ret) goto fail; - spin_lock_init(&sde_enc->intf_lock); + spin_lock_init(&sde_enc->spin_lock); drm_enc = &sde_enc->base; - drm_encoder_init(dev, drm_enc, &sde_encoder_funcs, - sde_enc->drm_mode_enc); + drm_encoder_init(dev, drm_enc, &sde_encoder_funcs, drm_encoder_mode); drm_encoder_helper_add(drm_enc, &sde_encoder_helper_funcs); bs_init(sde_enc); - DBG("Created sde_encoder for intf %d", sde_enc->hw_intf->idx); + DBG("Created encoder"); return drm_enc; fail: + DRM_ERROR("Failed to create encoder\n"); if (drm_enc) sde_encoder_destroy(drm_enc); @@ -492,10 +434,12 @@ static int sde_encoder_probe_hdmi(struct drm_device *dev) enc = sde_encoder_virt_init(dev, &probe_info); if (IS_ERR(enc)) ret = PTR_ERR(enc); - else { - /* Register new encoder with the upper layer */ + + if (!ret) { + /* Register new encoder with the upper layer */ priv->encoders[priv->num_encoders++] = enc; } + return ret; } @@ -510,50 +454,56 @@ static int sde_encoder_probe_dsi(struct drm_device *dev) num_displays = dsi_display_get_num_of_displays(); DBG("num_displays %d", num_displays); + + if (priv->num_encoders + num_displays > ARRAY_SIZE(priv->encoders)) { + DBG("Too many displays found in probe"); + return -EINVAL; + } + for (i = 0; i < num_displays; i++) { + struct dsi_display *dsi = dsi_display_get_display_by_index(i); if (dsi_display_is_active(dsi)) { + struct drm_encoder *enc = NULL; struct display_probe_info probe_info = { 0 }; - DBG("display %d/%d is active", i, num_displays); probe_info.type = INTF_DSI; + DBG("display %d is active", i); + ret = dsi_display_get_info(dsi, &probe_info.dsi_info); - if (WARN_ON(ret)) - DBG("Failed to retrieve dsi panel info"); - else { - struct drm_encoder *enc = - sde_encoder_virt_init(dev, - &probe_info); - if (IS_ERR(enc)) - return PTR_ERR(enc); - - ret = dsi_display_drm_init(dsi, enc); - if (ret) - return ret; - - /* Register new encoder with the upper layer */ - priv->encoders[priv->num_encoders++] = enc; - } - } else - DBG("display %d/%d is not active", i, num_displays); + if (ret) + return ret; + + enc = sde_encoder_virt_init(dev, &probe_info); + if (IS_ERR(enc)) + return PTR_ERR(enc); + + ret = dsi_display_drm_init(dsi, enc); + if (ret) + return ret; + + /* Register new encoder with the upper layer */ + priv->encoders[priv->num_encoders++] = enc; + } } return ret; } void sde_encoder_register_vblank_callback(struct drm_encoder *drm_enc, - void (*cb)(void *), void *data) { - struct sde_encoder *sde_enc = to_sde_encoder(drm_enc); + void (*cb)(void *), void *data) +{ + struct sde_encoder_virt *sde_enc = to_sde_encoder_virt(drm_enc); unsigned long lock_flags; DBG(""); - spin_lock_irqsave(&sde_enc->intf_lock, lock_flags); - sde_enc->vblank_callback = cb; - sde_enc->vblank_callback_data = data; - spin_unlock_irqrestore(&sde_enc->intf_lock, lock_flags); + spin_lock_irqsave(&sde_enc->spin_lock, lock_flags); + sde_enc->kms_vblank_callback = cb; + sde_enc->kms_vblank_callback_data = data; + spin_unlock_irqrestore(&sde_enc->spin_lock, lock_flags); } /* encoders init, @@ -566,14 +516,14 @@ void sde_encoders_init(struct drm_device *dev) DBG(""); - /* Start num_encoders at 0, probe functions will increment */ + /* Start num_encoders at 0, probe functions will increment */ priv->num_encoders = 0; ret = sde_encoder_probe_dsi(dev); if (ret) - DBG("Error probing DSI, %d", ret); + DRM_ERROR("Error probing DSI, %d\n", ret); else { ret = sde_encoder_probe_hdmi(dev); if (ret) - DBG("Error probing HDMI, %d", ret); + DRM_ERROR("Error probing HDMI, %d\n", ret); } } diff --git a/drivers/gpu/drm/msm/sde/sde_encoder_phys.h b/drivers/gpu/drm/msm/sde/sde_encoder_phys.h new file mode 100644 index 000000000000..427a6d94322e --- /dev/null +++ b/drivers/gpu/drm/msm/sde/sde_encoder_phys.h @@ -0,0 +1,80 @@ +/* + * Copyright (c) 2015 The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef __SDE_ENCODER_PHYS_H__ +#define __SDE_ENCODER_PHYS_H__ + +#include "sde_kms.h" +#include "sde_hw_intf.h" +#include "sde_hw_mdp_ctl.h" + +#define MAX_PHYS_ENCODERS_PER_VIRTUAL 4 + +struct sde_encoder_phys; + +struct sde_encoder_virt_ops { + void (*handle_vblank_virt)(struct drm_encoder *); +}; + +struct sde_encoder_phys_ops { + void (*mode_set)(struct sde_encoder_phys *encoder, + struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode); + bool (*mode_fixup)(struct sde_encoder_phys *encoder, + const struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode); + void (*enable)(struct sde_encoder_phys *encoder); + void (*disable)(struct sde_encoder_phys *encoder); + void (*destroy)(struct sde_encoder_phys *encoder); + void (*get_hw_resources)(struct sde_encoder_phys *encoder, + struct sde_encoder_hw_resources *hw_res); +}; + +struct sde_encoder_phys { + struct drm_encoder *parent; + struct sde_encoder_virt_ops parent_ops; + struct sde_encoder_phys_ops phys_ops; + struct sde_hw_intf *hw_intf; + struct sde_hw_ctl *hw_ctl; + struct mdp_kms *mdp_kms; + struct drm_display_mode cached_mode; + bool enabled; + spinlock_t spin_lock; +}; + +struct sde_encoder_phys_vid { + struct sde_encoder_phys base; + struct mdp_irq vblank_irq; +}; + +struct sde_encoder_virt { + struct drm_encoder base; + spinlock_t spin_lock; + uint32_t bus_scaling_client; + + int num_phys_encs; + struct sde_encoder_phys *phys_encs[MAX_PHYS_ENCODERS_PER_VIRTUAL]; + + void (*kms_vblank_callback)(void *); + void *kms_vblank_callback_data; +}; + +struct sde_encoder_phys *sde_encoder_phys_vid_init(struct sde_kms *sde_kms, + enum sde_intf intf_idx, + enum sde_ctl ctl_idx, + struct drm_encoder *parent, + struct sde_encoder_virt_ops + parent_ops); + +#endif /* __sde_encoder_phys_H__ */ diff --git a/drivers/gpu/drm/msm/sde/sde_encoder_phys_cmd.c b/drivers/gpu/drm/msm/sde/sde_encoder_phys_cmd.c new file mode 100644 index 000000000000..693e1f33e7d8 --- /dev/null +++ b/drivers/gpu/drm/msm/sde/sde_encoder_phys_cmd.c @@ -0,0 +1,25 @@ +/* + * Copyright (c) 2015 The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include "msm_drv.h" +#include "sde_kms.h" +#include "drm_crtc.h" +#include "drm_crtc_helper.h" + +#include "sde_hwio.h" +#include "sde_hw_catalog.h" +#include "sde_hw_intf.h" +#include "sde_mdp_formats.h" + +#include "sde_encoder_phys.h" diff --git a/drivers/gpu/drm/msm/sde/sde_encoder_phys_vid.c b/drivers/gpu/drm/msm/sde/sde_encoder_phys_vid.c new file mode 100644 index 000000000000..55fad67cbf12 --- /dev/null +++ b/drivers/gpu/drm/msm/sde/sde_encoder_phys_vid.c @@ -0,0 +1,280 @@ +/* + * Copyright (c) 2015 The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include "msm_drv.h" +#include "sde_kms.h" +#include "drm_crtc.h" +#include "drm_crtc_helper.h" + +#include "sde_encoder_phys.h" +#include "sde_mdp_formats.h" + + +#define to_sde_encoder_phys_vid(x) \ + container_of(x, struct sde_encoder_phys_vid, base) + +static bool sde_encoder_phys_vid_mode_fixup(struct sde_encoder_phys *drm_enc, + const struct drm_display_mode *mode, + struct drm_display_mode + *adjusted_mode) +{ + DBG(""); + return true; +} + +static void sde_encoder_phys_vid_mode_set(struct sde_encoder_phys *phys_enc, + struct drm_display_mode *mode, + struct drm_display_mode + *adjusted_mode) +{ + mode = adjusted_mode; + phys_enc->cached_mode = *adjusted_mode; + + DBG("set mode: %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x", + mode->base.id, mode->name, mode->vrefresh, mode->clock, + mode->hdisplay, mode->hsync_start, mode->hsync_end, mode->htotal, + mode->vdisplay, mode->vsync_start, mode->vsync_end, mode->vtotal, + mode->type, mode->flags); +} + +static void sde_encoder_phys_vid_setup_timing_engine(struct sde_encoder_phys + *phys_enc) +{ + struct drm_display_mode *mode = &phys_enc->cached_mode; + struct intf_timing_params p = { 0 }; + uint32_t hsync_polarity = 0; + uint32_t vsync_polarity = 0; + struct sde_mdp_format_params *sde_fmt_params = NULL; + u32 fmt_fourcc = DRM_FORMAT_RGB888; + u32 fmt_mod = 0; + unsigned long lock_flags; + struct sde_hw_intf_cfg intf_cfg = {0}; + + DBG("enable mode: %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x", + mode->base.id, mode->name, mode->vrefresh, mode->clock, + mode->hdisplay, mode->hsync_start, mode->hsync_end, mode->htotal, + mode->vdisplay, mode->vsync_start, mode->vsync_end, mode->vtotal, + mode->type, mode->flags); + + /* DSI controller cannot handle active-low sync signals. */ + if (phys_enc->hw_intf->cap->type != INTF_DSI) { + if (mode->flags & DRM_MODE_FLAG_NHSYNC) + hsync_polarity = 1; + if (mode->flags & DRM_MODE_FLAG_NVSYNC) + vsync_polarity = 1; + } + + /* + * For edp only: + * DISPLAY_V_START = (VBP * HCYCLE) + HBP + * DISPLAY_V_END = (VBP + VACTIVE) * HCYCLE - 1 - HFP + */ + /* + * if (vid_enc->hw->cap->type == INTF_EDP) { + * display_v_start += mode->htotal - mode->hsync_start; + * display_v_end -= mode->hsync_start - mode->hdisplay; + * } + */ + + /* + * https://www.kernel.org/doc/htmldocs/drm/ch02s05.html + * Active Region Front Porch Sync Back Porch + * <---------------------><----------------><---------><--------------> + * <--- [hv]display -----> + * <----------- [hv]sync_start ------------> + * <------------------- [hv]sync_end -----------------> + * <------------------------------ [hv]total -------------------------> + */ + + sde_fmt_params = sde_mdp_get_format_params(fmt_fourcc, fmt_mod); + + p.width = mode->hdisplay; /* active width */ + p.height = mode->vdisplay; /* active height */ + p.xres = p.width; /* Display panel width */ + p.yres = p.height; /* Display panel height */ + p.h_back_porch = mode->htotal - mode->hsync_end; + p.h_front_porch = mode->hsync_start - mode->hdisplay; + p.v_back_porch = mode->vtotal - mode->vsync_end; + p.v_front_porch = mode->vsync_start - mode->vdisplay; + p.hsync_pulse_width = mode->hsync_end - mode->hsync_start; + p.vsync_pulse_width = mode->vsync_end - mode->vsync_start; + p.hsync_polarity = hsync_polarity; + p.vsync_polarity = vsync_polarity; + p.border_clr = 0; + p.underflow_clr = 0xff; + p.hsync_skew = mode->hskew; + + intf_cfg.intf = phys_enc->hw_intf->idx; + intf_cfg.wb = SDE_NONE; + + spin_lock_irqsave(&phys_enc->spin_lock, lock_flags); + phys_enc->hw_intf->ops.setup_timing_gen(phys_enc->hw_intf, &p, + sde_fmt_params); + phys_enc->hw_ctl->ops.setup_intf_cfg(phys_enc->hw_ctl, &intf_cfg); + spin_unlock_irqrestore(&phys_enc->spin_lock, lock_flags); +} + +static void sde_encoder_phys_vid_wait_for_vblank(struct sde_encoder_phys_vid + *vid_enc) +{ + DBG(""); + mdp_irq_wait(vid_enc->base.mdp_kms, vid_enc->vblank_irq.irqmask); +} + +static void sde_encoder_phys_vid_vblank_irq(struct mdp_irq *irq, + uint32_t irqstatus) +{ + struct sde_encoder_phys_vid *vid_enc = + container_of(irq, struct sde_encoder_phys_vid, + vblank_irq); + struct sde_encoder_phys *phys_enc = &vid_enc->base; + struct intf_status status = { 0 }; + + phys_enc->hw_intf->ops.get_status(phys_enc->hw_intf, &status); + phys_enc->parent_ops.handle_vblank_virt(phys_enc->parent); +} + +static void sde_encoder_phys_vid_enable(struct sde_encoder_phys *phys_enc) +{ + struct sde_encoder_phys_vid *vid_enc = + to_sde_encoder_phys_vid(phys_enc); + unsigned long lock_flags; + + DBG(""); + + if (WARN_ON(phys_enc->enabled)) + return; + + sde_encoder_phys_vid_setup_timing_engine(phys_enc); + + spin_lock_irqsave(&phys_enc->spin_lock, lock_flags); + phys_enc->hw_intf->ops.enable_timing(phys_enc->hw_intf, 1); + spin_unlock_irqrestore(&phys_enc->spin_lock, lock_flags); + + phys_enc->enabled = true; + + mdp_irq_register(phys_enc->mdp_kms, &vid_enc->vblank_irq); + DBG("Registered IRQ for intf %d mask 0x%X", phys_enc->hw_intf->idx, + vid_enc->vblank_irq.irqmask); +} + +static void sde_encoder_phys_vid_disable(struct sde_encoder_phys *phys_enc) +{ + struct sde_encoder_phys_vid *vid_enc = + to_sde_encoder_phys_vid(phys_enc); + unsigned long lock_flags; + + DBG(""); + + if (WARN_ON(!phys_enc->enabled)) + return; + + spin_lock_irqsave(&phys_enc->spin_lock, lock_flags); + phys_enc->hw_intf->ops.enable_timing(phys_enc->hw_intf, 0); + spin_unlock_irqrestore(&phys_enc->spin_lock, lock_flags); + + /* + * Wait for a vsync so we know the ENABLE=0 latched before + * the (connector) source of the vsync's gets disabled, + * otherwise we end up in a funny state if we re-enable + * before the disable latches, which results that some of + * the settings changes for the new modeset (like new + * scanout buffer) don't latch properly.. + */ + sde_encoder_phys_vid_wait_for_vblank(vid_enc); + mdp_irq_unregister(phys_enc->mdp_kms, &vid_enc->vblank_irq); + phys_enc->enabled = false; +} + +static void sde_encoder_phys_vid_destroy(struct sde_encoder_phys *phys_enc) +{ + struct sde_encoder_phys_vid *vid_enc = + to_sde_encoder_phys_vid(phys_enc); + DBG(""); + kfree(phys_enc->hw_intf); + kfree(vid_enc); +} + +static void sde_encoder_phys_vid_get_hw_resources(struct sde_encoder_phys + *phys_enc, struct + sde_encoder_hw_resources + *hw_res) +{ + DBG(""); + hw_res->intfs[phys_enc->hw_intf->idx] = true; +} + +static void sde_encoder_phys_vid_init_cbs(struct sde_encoder_phys_ops *ops) +{ + ops->mode_set = sde_encoder_phys_vid_mode_set; + ops->mode_fixup = sde_encoder_phys_vid_mode_fixup; + ops->enable = sde_encoder_phys_vid_enable; + ops->disable = sde_encoder_phys_vid_disable; + ops->destroy = sde_encoder_phys_vid_destroy; + ops->get_hw_resources = sde_encoder_phys_vid_get_hw_resources; +} + +struct sde_encoder_phys *sde_encoder_phys_vid_init(struct sde_kms *sde_kms, + enum sde_intf intf_idx, + enum sde_ctl ctl_idx, + struct drm_encoder *parent, + struct sde_encoder_virt_ops + parent_ops) +{ + struct sde_encoder_phys *phys_enc = NULL; + struct sde_encoder_phys_vid *vid_enc = NULL; + int ret = 0; + + DBG(""); + + vid_enc = kzalloc(sizeof(*vid_enc), GFP_KERNEL); + if (!vid_enc) { + ret = -ENOMEM; + goto fail; + } + phys_enc = &vid_enc->base; + + phys_enc->hw_intf = + sde_hw_intf_init(intf_idx, sde_kms->mmio, sde_kms->catalog); + if (!phys_enc->hw_intf) { + ret = -ENOMEM; + goto fail; + } + + phys_enc->hw_ctl = sde_hw_ctl_init(ctl_idx, sde_kms->mmio, + sde_kms->catalog); + if (!phys_enc->hw_ctl) { + ret = -ENOMEM; + goto fail; + } + + sde_encoder_phys_vid_init_cbs(&phys_enc->phys_ops); + phys_enc->parent = parent; + phys_enc->parent_ops = parent_ops; + phys_enc->mdp_kms = &sde_kms->base; + vid_enc->vblank_irq.irq = sde_encoder_phys_vid_vblank_irq; + vid_enc->vblank_irq.irqmask = 0x8000000; + spin_lock_init(&phys_enc->spin_lock); + + DBG("Created sde_encoder_phys_vid for intf %d", phys_enc->hw_intf->idx); + + return phys_enc; + +fail: + DRM_ERROR("Failed to create encoder\n"); + if (vid_enc) + sde_encoder_phys_vid_destroy(phys_enc); + + return ERR_PTR(ret); +} -- GitLab From 77134e9ea252d5a95fffbfaa28b15274e9c7482d Mon Sep 17 00:00:00 2001 From: Lloyd Atkinson Date: Sun, 26 Jun 2016 10:08:25 -0400 Subject: [PATCH 011/310] drm/msm/sde: programmable pre-fetch support for video encoders Add support in encoder for programming early fetch in the vertical front porch. Change-Id: I60fcf4a4e6aea80292b590ee14506579123f372d Signed-off-by: Lloyd Atkinson --- drivers/gpu/drm/msm/sde/sde_encoder.c | 117 ++++++-- .../gpu/drm/msm/sde/sde_encoder_phys_vid.c | 280 +++++++++++++----- drivers/gpu/drm/msm/sde/sde_hw_catalog.h | 2 + drivers/gpu/drm/msm/sde/sde_hw_catalog_8996.c | 12 +- 4 files changed, 303 insertions(+), 108 deletions(-) diff --git a/drivers/gpu/drm/msm/sde/sde_encoder.c b/drivers/gpu/drm/msm/sde/sde_encoder.c index 6c42cb0e82f3..fbca699e9ada 100644 --- a/drivers/gpu/drm/msm/sde/sde_encoder.c +++ b/drivers/gpu/drm/msm/sde/sde_encoder.c @@ -101,37 +101,46 @@ static void bs_set(struct sde_encoder_virt *sde_enc, int idx) void sde_encoder_get_hw_resources(struct drm_encoder *drm_enc, struct sde_encoder_hw_resources *hw_res) { - struct sde_encoder_virt *sde_enc = to_sde_encoder_virt(drm_enc); + struct sde_encoder_virt *sde_enc = NULL; int i = 0; DBG(""); - if (!hw_res) { + if (!hw_res || !drm_enc) { DRM_ERROR("Invalid pointer"); return; } + sde_enc = to_sde_encoder_virt(drm_enc); + /* Query resources used by phys encs, expected to be without overlap */ memset(hw_res, 0, sizeof(*hw_res)); for (i = 0; i < sde_enc->num_phys_encs; i++) { struct sde_encoder_phys *phys = sde_enc->phys_encs[i]; - if (phys) + if (phys && phys->phys_ops.get_hw_resources) phys->phys_ops.get_hw_resources(phys, hw_res); } } static void sde_encoder_destroy(struct drm_encoder *drm_enc) { - struct sde_encoder_virt *sde_enc = to_sde_encoder_virt(drm_enc); + struct sde_encoder_virt *sde_enc = NULL; int i = 0; DBG(""); + if (!drm_enc) { + DRM_ERROR("Invalid pointer"); + return; + } + + sde_enc = to_sde_encoder_virt(drm_enc); + for (i = 0; i < ARRAY_SIZE(sde_enc->phys_encs); i++) { struct sde_encoder_phys *phys = sde_enc->phys_encs[i]; - if (phys) { + if (phys && phys->phys_ops.destroy) { phys->phys_ops.destroy(phys); --sde_enc->num_phys_encs; sde_enc->phys_encs[i] = NULL; @@ -152,70 +161,103 @@ static bool sde_encoder_virt_mode_fixup(struct drm_encoder *drm_enc, const struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { - struct sde_encoder_virt *sde_enc = to_sde_encoder_virt(drm_enc); + struct sde_encoder_virt *sde_enc = NULL; int i = 0; + bool ret = true; DBG(""); + if (!drm_enc) { + DRM_ERROR("Invalid pointer"); + return false; + } + + sde_enc = to_sde_encoder_virt(drm_enc); + for (i = 0; i < sde_enc->num_phys_encs; i++) { struct sde_encoder_phys *phys = sde_enc->phys_encs[i]; - if (phys) { - phys->phys_ops.mode_fixup(phys, mode, adjusted_mode); - if (memcmp(mode, adjusted_mode, sizeof(*mode)) != 0) { - DRM_ERROR("adjusted modes not supported\n"); - return false; + if (phys && phys->phys_ops.mode_fixup) { + ret = + phys->phys_ops.mode_fixup(phys, mode, + adjusted_mode); + if (!ret) { + DBG("Mode unsupported by phys_enc %d", i); + break; + } + + if (sde_enc->num_phys_encs > 1) { + DBG("ModeFix only checking 1 phys_enc"); + break; } } } - return true; + return ret; } static void sde_encoder_virt_mode_set(struct drm_encoder *drm_enc, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { - struct sde_encoder_virt *sde_enc = to_sde_encoder_virt(drm_enc); + struct sde_encoder_virt *sde_enc = NULL; int i = 0; DBG(""); + if (!drm_enc) { + DRM_ERROR("Invalid pointer"); + return; + } + + sde_enc = to_sde_encoder_virt(drm_enc); + for (i = 0; i < sde_enc->num_phys_encs; i++) { struct sde_encoder_phys *phys = sde_enc->phys_encs[i]; - if (phys) { + if (phys && phys->phys_ops.mode_set) phys->phys_ops.mode_set(phys, mode, adjusted_mode); - if (memcmp(mode, adjusted_mode, sizeof(*mode)) != 0) - DRM_ERROR("adjusted modes not supported\n"); - } } } static void sde_encoder_virt_enable(struct drm_encoder *drm_enc) { - struct sde_encoder_virt *sde_enc = to_sde_encoder_virt(drm_enc); + struct sde_encoder_virt *sde_enc = NULL; int i = 0; DBG(""); + if (!drm_enc) { + DRM_ERROR("Invalid pointer"); + return; + } + + sde_enc = to_sde_encoder_virt(drm_enc); + bs_set(sde_enc, 1); for (i = 0; i < sde_enc->num_phys_encs; i++) { struct sde_encoder_phys *phys = sde_enc->phys_encs[i]; - if (phys) + if (phys && phys->phys_ops.enable) phys->phys_ops.enable(phys); } } static void sde_encoder_virt_disable(struct drm_encoder *drm_enc) { - struct sde_encoder_virt *sde_enc = to_sde_encoder_virt(drm_enc); + struct sde_encoder_virt *sde_enc = NULL; int i = 0; DBG(""); + if (!drm_enc) { + DRM_ERROR("Invalid pointer"); + return; + } + + sde_enc = to_sde_encoder_virt(drm_enc); + for (i = 0; i < sde_enc->num_phys_encs; i++) { struct sde_encoder_phys *phys = sde_enc->phys_encs[i]; @@ -256,11 +298,18 @@ static enum sde_intf sde_encoder_get_intf(struct sde_mdss_cfg *catalog, static void sde_encoder_vblank_callback(struct drm_encoder *drm_enc) { - struct sde_encoder_virt *sde_enc = to_sde_encoder_virt(drm_enc); + struct sde_encoder_virt *sde_enc = NULL; unsigned long lock_flags; DBG(""); + if (!drm_enc) { + DRM_ERROR("Invalid pointer"); + return; + } + + sde_enc = to_sde_encoder_virt(drm_enc); + spin_lock_irqsave(&sde_enc->spin_lock, lock_flags); if (sde_enc->kms_vblank_callback) sde_enc->kms_vblank_callback(sde_enc->kms_vblank_callback_data); @@ -286,7 +335,8 @@ static int sde_encoder_virt_add_phys_vid_enc(struct sde_encoder_virt *sde_enc, }; struct sde_encoder_phys *enc = sde_encoder_phys_vid_init(sde_kms, intf_idx, ctl_idx, - &sde_enc->base, parent_ops); + &sde_enc->base, + parent_ops); if (IS_ERR(enc)) ret = PTR_ERR(enc); @@ -304,6 +354,7 @@ static int sde_encoder_setup_hdmi(struct sde_encoder_virt *sde_enc, { int ret = 0; enum sde_intf intf_idx = INTF_MAX; + enum sde_ctl ctl_idx = CTL_2; DBG(""); @@ -314,8 +365,7 @@ static int sde_encoder_setup_hdmi(struct sde_encoder_virt *sde_enc, if (!ret) ret = sde_encoder_virt_add_phys_vid_enc(sde_enc, sde_kms, - intf_idx, - CTL_2); + intf_idx, ctl_idx); return ret; } @@ -343,13 +393,14 @@ static int sde_encoder_setup_dsi(struct sde_encoder_virt *sde_enc, enum sde_ctl ctl_idx = CTL_0; intf_idx = sde_encoder_get_intf(sde_kms->catalog, - INTF_DSI, dsi_info->h_tile_ids[i]); + INTF_DSI, + dsi_info->h_tile_ids[i]); if (intf_idx == INTF_MAX) { DBG("Error: could not get the interface id"); ret = -EINVAL; } - /* Create both VID and CMD Phys Encoders here */ + /* Create both VID and CMD Phys Encoders here */ if (!ret) ret = sde_encoder_virt_add_phys_vid_enc(sde_enc, sde_kms, @@ -511,12 +562,22 @@ void sde_encoder_register_vblank_callback(struct drm_encoder *drm_enc, */ void sde_encoders_init(struct drm_device *dev) { - struct msm_drm_private *priv = dev->dev_private; + struct msm_drm_private *priv = NULL; int ret = 0; DBG(""); - /* Start num_encoders at 0, probe functions will increment */ + if (!dev || !dev->dev_private) { + DRM_ERROR("Invalid pointer"); + return; + } + + priv = dev->dev_private; + if (!priv->kms) { + DRM_ERROR("Invalid pointer"); + return; + } + /* Start num_encoders at 0, probe functions will increment */ priv->num_encoders = 0; ret = sde_encoder_probe_dsi(dev); if (ret) diff --git a/drivers/gpu/drm/msm/sde/sde_encoder_phys_vid.c b/drivers/gpu/drm/msm/sde/sde_encoder_phys_vid.c index 55fad67cbf12..0528c3d1ff8d 100644 --- a/drivers/gpu/drm/msm/sde/sde_encoder_phys_vid.c +++ b/drivers/gpu/drm/msm/sde/sde_encoder_phys_vid.c @@ -20,23 +20,185 @@ #include "sde_encoder_phys.h" #include "sde_mdp_formats.h" - #define to_sde_encoder_phys_vid(x) \ container_of(x, struct sde_encoder_phys_vid, base) -static bool sde_encoder_phys_vid_mode_fixup(struct sde_encoder_phys *drm_enc, - const struct drm_display_mode *mode, - struct drm_display_mode - *adjusted_mode) +static void drm_mode_to_intf_timing_params( + const struct sde_encoder_phys *phys_enc, + const struct drm_display_mode *mode, + struct intf_timing_params *timing) +{ + memset(timing, 0, sizeof(*timing)); + /* + * https://www.kernel.org/doc/htmldocs/drm/ch02s05.html + * Active Region Front Porch Sync Back Porch + * <-----------------><------------><-----><-----------> + * <- [hv]display ---> + * <--------- [hv]sync_start ------> + * <----------------- [hv]sync_end -------> + * <---------------------------- [hv]total -------------> + */ + timing->width = mode->hdisplay; /* active width */ + timing->height = mode->vdisplay; /* active height */ + timing->xres = timing->width; + timing->yres = timing->height; + timing->h_back_porch = mode->htotal - mode->hsync_end; + timing->h_front_porch = mode->hsync_start - mode->hdisplay; + timing->v_back_porch = mode->vtotal - mode->vsync_end; + timing->v_front_porch = mode->vsync_start - mode->vdisplay; + timing->hsync_pulse_width = mode->hsync_end - mode->hsync_start; + timing->vsync_pulse_width = mode->vsync_end - mode->vsync_start; + timing->hsync_polarity = (mode->flags & DRM_MODE_FLAG_NHSYNC) ? 1 : 0; + timing->vsync_polarity = (mode->flags & DRM_MODE_FLAG_NVSYNC) ? 1 : 0; + timing->border_clr = 0; + timing->underflow_clr = 0xff; + timing->hsync_skew = mode->hskew; + + /* DSI controller cannot handle active-low sync signals. */ + if (phys_enc->hw_intf->cap->type == INTF_DSI) { + timing->hsync_polarity = 0; + timing->vsync_polarity = 0; + } + + /* + * For edp only: + * DISPLAY_V_START = (VBP * HCYCLE) + HBP + * DISPLAY_V_END = (VBP + VACTIVE) * HCYCLE - 1 - HFP + */ + /* + * if (vid_enc->hw->cap->type == INTF_EDP) { + * display_v_start += mode->htotal - mode->hsync_start; + * display_v_end -= mode->hsync_start - mode->hdisplay; + * } + */ +} + +static inline u32 get_horizontal_total(const struct intf_timing_params *timing) +{ + u32 active = timing->xres; + u32 inactive = + timing->h_back_porch + timing->h_front_porch + + timing->hsync_pulse_width; + return active + inactive; +} + +static inline u32 get_vertical_total(const struct intf_timing_params *timing) +{ + u32 active = timing->yres; + u32 inactive = + timing->v_back_porch + timing->v_front_porch + + timing->vsync_pulse_width; + return active + inactive; +} + +/* + * programmable_fetch_get_num_lines: + * Number of fetch lines in vertical front porch + * @timing: Pointer to the intf timing information for the requested mode + * + * Returns the number of fetch lines in vertical front porch at which mdp + * can start fetching the next frame. + * + * Number of needed prefetch lines is anything that cannot be absorbed in the + * start of frame time (back porch + vsync pulse width). + * + * Some panels have very large VFP, however we only need a total number of + * lines based on the chip worst case latencies. + */ +static u32 programmable_fetch_get_num_lines( + struct sde_encoder_phys *phys_enc, + const struct intf_timing_params *timing) +{ + u32 worst_case_needed_lines = + phys_enc->hw_intf->cap->prog_fetch_lines_worst_case; + u32 start_of_frame_lines = + timing->v_back_porch + timing->vsync_pulse_width; + u32 needed_vfp_lines = worst_case_needed_lines - start_of_frame_lines; + u32 actual_vfp_lines = 0; + + /* Fetch must be outside active lines, otherwise undefined. */ + + if (start_of_frame_lines >= worst_case_needed_lines) { + DBG("Programmable fetch is not needed due to large vbp+vsw"); + actual_vfp_lines = 0; + } else if (timing->v_front_porch < needed_vfp_lines) { + /* Warn fetch needed, but not enough porch in panel config */ + pr_warn_once + ("low vbp+vfp may lead to perf issues in some cases\n"); + DBG("Less vfp than fetch requires, using entire vfp"); + actual_vfp_lines = timing->v_front_porch; + } else { + DBG("Room in vfp for needed prefetch"); + actual_vfp_lines = needed_vfp_lines; + } + + DBG("v_front_porch %u v_back_porch %u vsync_pulse_width %u", + timing->v_front_porch, timing->v_back_porch, + timing->vsync_pulse_width); + DBG("wc_lines %u needed_vfp_lines %u actual_vfp_lines %u", + worst_case_needed_lines, needed_vfp_lines, actual_vfp_lines); + + return actual_vfp_lines; +} + +/* + * programmable_fetch_config: Programs HW to prefetch lines by offsetting + * the start of fetch into the vertical front porch for cases where the + * vsync pulse width and vertical back porch time is insufficient + * + * Gets # of lines to pre-fetch, then calculate VSYNC counter value. + * HW layer requires VSYNC counter of first pixel of tgt VFP line. + * + * @timing: Pointer to the intf timing information for the requested mode + */ +static void programmable_fetch_config(struct sde_encoder_phys *phys_enc, + const struct intf_timing_params *timing) +{ + struct intf_prog_fetch f = { 0 }; + u32 vfp_fetch_lines = 0; + u32 horiz_total = 0; + u32 vert_total = 0; + u32 vfp_fetch_start_vsync_counter = 0; + unsigned long lock_flags; + + if (WARN_ON_ONCE(!phys_enc->hw_intf->ops.setup_prg_fetch)) + return; + + vfp_fetch_lines = programmable_fetch_get_num_lines(phys_enc, timing); + if (vfp_fetch_lines) { + vert_total = get_vertical_total(timing); + horiz_total = get_horizontal_total(timing); + vfp_fetch_start_vsync_counter = + (vert_total - vfp_fetch_lines) * horiz_total + 1; + f.enable = 1; + f.fetch_start = vfp_fetch_start_vsync_counter; + } + + DBG("vfp_fetch_lines %u vfp_fetch_start_vsync_counter %u", + vfp_fetch_lines, vfp_fetch_start_vsync_counter); + + spin_lock_irqsave(&phys_enc->spin_lock, lock_flags); + phys_enc->hw_intf->ops.setup_prg_fetch(phys_enc->hw_intf, &f); + spin_unlock_irqrestore(&phys_enc->spin_lock, lock_flags); +} + +static bool sde_encoder_phys_vid_mode_fixup( + struct sde_encoder_phys *phys_enc, + const struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) { DBG(""); + + /* + * Modifying mode has consequences when the mode comes back to us + */ return true; } -static void sde_encoder_phys_vid_mode_set(struct sde_encoder_phys *phys_enc, - struct drm_display_mode *mode, - struct drm_display_mode - *adjusted_mode) +static void sde_encoder_phys_vid_mode_set( + struct sde_encoder_phys *phys_enc, + struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) { mode = adjusted_mode; phys_enc->cached_mode = *adjusted_mode; @@ -48,18 +210,22 @@ static void sde_encoder_phys_vid_mode_set(struct sde_encoder_phys *phys_enc, mode->type, mode->flags); } -static void sde_encoder_phys_vid_setup_timing_engine(struct sde_encoder_phys - *phys_enc) +static void sde_encoder_phys_vid_setup_timing_engine( + struct sde_encoder_phys *phys_enc) { struct drm_display_mode *mode = &phys_enc->cached_mode; struct intf_timing_params p = { 0 }; - uint32_t hsync_polarity = 0; - uint32_t vsync_polarity = 0; struct sde_mdp_format_params *sde_fmt_params = NULL; u32 fmt_fourcc = DRM_FORMAT_RGB888; u32 fmt_mod = 0; unsigned long lock_flags; - struct sde_hw_intf_cfg intf_cfg = {0}; + struct sde_hw_intf_cfg intf_cfg = { 0 }; + + if (WARN_ON(!phys_enc->hw_intf->ops.setup_timing_gen)) + return; + + if (WARN_ON(!phys_enc->hw_ctl->ops.setup_intf_cfg)) + return; DBG("enable mode: %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x", mode->base.id, mode->name, mode->vrefresh, mode->clock, @@ -67,66 +233,24 @@ static void sde_encoder_phys_vid_setup_timing_engine(struct sde_encoder_phys mode->vdisplay, mode->vsync_start, mode->vsync_end, mode->vtotal, mode->type, mode->flags); - /* DSI controller cannot handle active-low sync signals. */ - if (phys_enc->hw_intf->cap->type != INTF_DSI) { - if (mode->flags & DRM_MODE_FLAG_NHSYNC) - hsync_polarity = 1; - if (mode->flags & DRM_MODE_FLAG_NVSYNC) - vsync_polarity = 1; - } - - /* - * For edp only: - * DISPLAY_V_START = (VBP * HCYCLE) + HBP - * DISPLAY_V_END = (VBP + VACTIVE) * HCYCLE - 1 - HFP - */ - /* - * if (vid_enc->hw->cap->type == INTF_EDP) { - * display_v_start += mode->htotal - mode->hsync_start; - * display_v_end -= mode->hsync_start - mode->hdisplay; - * } - */ - - /* - * https://www.kernel.org/doc/htmldocs/drm/ch02s05.html - * Active Region Front Porch Sync Back Porch - * <---------------------><----------------><---------><--------------> - * <--- [hv]display -----> - * <----------- [hv]sync_start ------------> - * <------------------- [hv]sync_end -----------------> - * <------------------------------ [hv]total -------------------------> - */ + drm_mode_to_intf_timing_params(phys_enc, mode, &p); sde_fmt_params = sde_mdp_get_format_params(fmt_fourcc, fmt_mod); - p.width = mode->hdisplay; /* active width */ - p.height = mode->vdisplay; /* active height */ - p.xres = p.width; /* Display panel width */ - p.yres = p.height; /* Display panel height */ - p.h_back_porch = mode->htotal - mode->hsync_end; - p.h_front_porch = mode->hsync_start - mode->hdisplay; - p.v_back_porch = mode->vtotal - mode->vsync_end; - p.v_front_porch = mode->vsync_start - mode->vdisplay; - p.hsync_pulse_width = mode->hsync_end - mode->hsync_start; - p.vsync_pulse_width = mode->vsync_end - mode->vsync_start; - p.hsync_polarity = hsync_polarity; - p.vsync_polarity = vsync_polarity; - p.border_clr = 0; - p.underflow_clr = 0xff; - p.hsync_skew = mode->hskew; - intf_cfg.intf = phys_enc->hw_intf->idx; intf_cfg.wb = SDE_NONE; spin_lock_irqsave(&phys_enc->spin_lock, lock_flags); phys_enc->hw_intf->ops.setup_timing_gen(phys_enc->hw_intf, &p, - sde_fmt_params); + sde_fmt_params); phys_enc->hw_ctl->ops.setup_intf_cfg(phys_enc->hw_ctl, &intf_cfg); spin_unlock_irqrestore(&phys_enc->spin_lock, lock_flags); + + programmable_fetch_config(phys_enc, &p); } -static void sde_encoder_phys_vid_wait_for_vblank(struct sde_encoder_phys_vid - *vid_enc) +static void sde_encoder_phys_vid_wait_for_vblank( + struct sde_encoder_phys_vid *vid_enc) { DBG(""); mdp_irq_wait(vid_enc->base.mdp_kms, vid_enc->vblank_irq.irqmask); @@ -139,9 +263,7 @@ static void sde_encoder_phys_vid_vblank_irq(struct mdp_irq *irq, container_of(irq, struct sde_encoder_phys_vid, vblank_irq); struct sde_encoder_phys *phys_enc = &vid_enc->base; - struct intf_status status = { 0 }; - phys_enc->hw_intf->ops.get_status(phys_enc->hw_intf, &status); phys_enc->parent_ops.handle_vblank_virt(phys_enc->parent); } @@ -156,6 +278,9 @@ static void sde_encoder_phys_vid_enable(struct sde_encoder_phys *phys_enc) if (WARN_ON(phys_enc->enabled)) return; + if (WARN_ON(!phys_enc->hw_intf->ops.enable_timing)) + return; + sde_encoder_phys_vid_setup_timing_engine(phys_enc); spin_lock_irqsave(&phys_enc->spin_lock, lock_flags); @@ -180,6 +305,9 @@ static void sde_encoder_phys_vid_disable(struct sde_encoder_phys *phys_enc) if (WARN_ON(!phys_enc->enabled)) return; + if (WARN_ON(!phys_enc->hw_intf->ops.enable_timing)) + return; + spin_lock_irqsave(&phys_enc->spin_lock, lock_flags); phys_enc->hw_intf->ops.enable_timing(phys_enc->hw_intf, 0); spin_unlock_irqrestore(&phys_enc->spin_lock, lock_flags); @@ -206,10 +334,9 @@ static void sde_encoder_phys_vid_destroy(struct sde_encoder_phys *phys_enc) kfree(vid_enc); } -static void sde_encoder_phys_vid_get_hw_resources(struct sde_encoder_phys - *phys_enc, struct - sde_encoder_hw_resources - *hw_res) +static void sde_encoder_phys_vid_get_hw_resources( + struct sde_encoder_phys *phys_enc, + struct sde_encoder_hw_resources *hw_res) { DBG(""); hw_res->intfs[phys_enc->hw_intf->idx] = true; @@ -225,15 +352,16 @@ static void sde_encoder_phys_vid_init_cbs(struct sde_encoder_phys_ops *ops) ops->get_hw_resources = sde_encoder_phys_vid_get_hw_resources; } -struct sde_encoder_phys *sde_encoder_phys_vid_init(struct sde_kms *sde_kms, - enum sde_intf intf_idx, - enum sde_ctl ctl_idx, - struct drm_encoder *parent, - struct sde_encoder_virt_ops - parent_ops) +struct sde_encoder_phys *sde_encoder_phys_vid_init( + struct sde_kms *sde_kms, + enum sde_intf intf_idx, + enum sde_ctl ctl_idx, + struct drm_encoder *parent, + struct sde_encoder_virt_ops parent_ops) { struct sde_encoder_phys *phys_enc = NULL; struct sde_encoder_phys_vid *vid_enc = NULL; + u32 irq_mask = 0x8000000; int ret = 0; DBG(""); @@ -253,7 +381,7 @@ struct sde_encoder_phys *sde_encoder_phys_vid_init(struct sde_kms *sde_kms, } phys_enc->hw_ctl = sde_hw_ctl_init(ctl_idx, sde_kms->mmio, - sde_kms->catalog); + sde_kms->catalog); if (!phys_enc->hw_ctl) { ret = -ENOMEM; goto fail; @@ -264,7 +392,7 @@ struct sde_encoder_phys *sde_encoder_phys_vid_init(struct sde_kms *sde_kms, phys_enc->parent_ops = parent_ops; phys_enc->mdp_kms = &sde_kms->base; vid_enc->vblank_irq.irq = sde_encoder_phys_vid_vblank_irq; - vid_enc->vblank_irq.irqmask = 0x8000000; + vid_enc->vblank_irq.irqmask = irq_mask; spin_lock_init(&phys_enc->spin_lock); DBG("Created sde_encoder_phys_vid for intf %d", phys_enc->hw_intf->idx); diff --git a/drivers/gpu/drm/msm/sde/sde_hw_catalog.h b/drivers/gpu/drm/msm/sde/sde_hw_catalog.h index 9b06aca45241..6aee5467739a 100644 --- a/drivers/gpu/drm/msm/sde/sde_hw_catalog.h +++ b/drivers/gpu/drm/msm/sde/sde_hw_catalog.h @@ -372,11 +372,13 @@ struct sde_cdm_cfg { * @features bit mask identifying sub-blocks/features * @type: Interface type(DSI, DP, HDMI) * @controller_id: Controller Instance ID in case of multiple of intf type + * @prog_fetch_lines_worst_case Worst case latency num lines needed to prefetch */ struct sde_intf_cfg { SDE_HW_BLK_INFO; u32 type; /* interface type*/ u32 controller_id; + u32 prog_fetch_lines_worst_case; }; /** diff --git a/drivers/gpu/drm/msm/sde/sde_hw_catalog_8996.c b/drivers/gpu/drm/msm/sde/sde_hw_catalog_8996.c index a756c515f45e..d8831af35bb7 100644 --- a/drivers/gpu/drm/msm/sde/sde_hw_catalog_8996.c +++ b/drivers/gpu/drm/msm/sde/sde_hw_catalog_8996.c @@ -244,13 +244,17 @@ static inline int set_cfg_1xx_init(struct sde_mdss_cfg *cfg) .intf_count = 4, .intf = { {.id = INTF_0, .base = 0x0006B000, - .type = INTF_NONE, .controller_id = 0}, + .type = INTF_NONE, .controller_id = 0, + .prog_fetch_lines_worst_case = 21}, {.id = INTF_1, .base = 0x0006B800, - .type = INTF_DSI, .controller_id = 0}, + .type = INTF_DSI, .controller_id = 0, + .prog_fetch_lines_worst_case = 21}, {.id = INTF_2, .base = 0x0006C000, - .type = INTF_DSI, .controller_id = 1}, + .type = INTF_DSI, .controller_id = 1, + .prog_fetch_lines_worst_case = 21}, {.id = INTF_3, .base = 0x0006C800, - .type = INTF_HDMI, .controller_id = 0}, + .type = INTF_HDMI, .controller_id = 0, + .prog_fetch_lines_worst_case = 21}, }, .wb_count = 3, .wb = { -- GitLab From fd4d7865e24b962c4a412f901f39000361e09d4e Mon Sep 17 00:00:00 2001 From: Clarence Ip Date: Sun, 26 Jun 2016 10:09:34 -0400 Subject: [PATCH 012/310] drm/msm/sde: move sde encoder to new display interface Call panels via common display-manager interface rather than directly. Change-Id: I4fe86b6b206929217c0cf807a93287140d507e6c Signed-off-by: Clarence Ip --- drivers/gpu/drm/msm/sde/sde_encoder.c | 72 +++++++++++++-------------- 1 file changed, 35 insertions(+), 37 deletions(-) diff --git a/drivers/gpu/drm/msm/sde/sde_encoder.c b/drivers/gpu/drm/msm/sde/sde_encoder.c index fbca699e9ada..d1d9936431ef 100644 --- a/drivers/gpu/drm/msm/sde/sde_encoder.c +++ b/drivers/gpu/drm/msm/sde/sde_encoder.c @@ -22,8 +22,7 @@ #include "sde_mdp_formats.h" #include "sde_encoder_phys.h" - -#include "../dsi-staging/dsi_display.h" +#include "display_manager.h" #define to_sde_encoder_virt(x) container_of(x, struct sde_encoder_virt, base) @@ -372,29 +371,29 @@ static int sde_encoder_setup_hdmi(struct sde_encoder_virt *sde_enc, static int sde_encoder_setup_dsi(struct sde_encoder_virt *sde_enc, struct sde_kms *sde_kms, - struct dsi_display_info *dsi_info) + struct display_info *disp_info) { int ret = 0; int i = 0; DBG(""); - WARN_ON(dsi_info->num_of_h_tiles < 1); + WARN_ON(disp_info->num_of_h_tiles < 1); - if (dsi_info->num_of_h_tiles == 0) - dsi_info->num_of_h_tiles = 1; + if (disp_info->num_of_h_tiles == 0) + disp_info->num_of_h_tiles = 1; - DBG("dsi_info->num_of_h_tiles %d h_tiled %d dsi_info->h_tile_ids %d ", - dsi_info->num_of_h_tiles, dsi_info->h_tiled, - dsi_info->h_tile_ids[0]); + DBG("num_of_h_tiles %d h_tile_instance_0 %d h_tile_instance_1 %d\n", + disp_info->num_of_h_tiles, disp_info->h_tile_instance[0], + disp_info->h_tile_instance[1]); - for (i = 0; i < dsi_info->num_of_h_tiles && !ret; i++) { + for (i = 0; i < disp_info->num_of_h_tiles && !ret; i++) { enum sde_intf intf_idx = INTF_MAX; enum sde_ctl ctl_idx = CTL_0; intf_idx = sde_encoder_get_intf(sde_kms->catalog, INTF_DSI, - dsi_info->h_tile_ids[i]); + disp_info->h_tile_instance[i]); if (intf_idx == INTF_MAX) { DBG("Error: could not get the interface id"); ret = -EINVAL; @@ -413,7 +412,7 @@ static int sde_encoder_setup_dsi(struct sde_encoder_virt *sde_enc, struct display_probe_info { enum sde_intf_type type; - struct dsi_display_info dsi_info; + struct display_info disp_info; int hdmi_info; }; @@ -437,8 +436,9 @@ static struct drm_encoder *sde_encoder_virt_init(struct drm_device *dev, if (display->type == INTF_DSI) { drm_encoder_mode = DRM_MODE_ENCODER_DSI; - ret = - sde_encoder_setup_dsi(sde_enc, sde_kms, &display->dsi_info); + ret = sde_encoder_setup_dsi(sde_enc, + sde_kms, + &display->disp_info); } else if (display->type == INTF_HDMI) { drm_encoder_mode = DRM_MODE_ENCODER_TMDS; @@ -500,10 +500,12 @@ static int sde_encoder_probe_dsi(struct drm_device *dev) u32 ret = 0; u32 i = 0; u32 num_displays = 0; + struct display_manager *dm = priv->dm; + struct display_probe_info probe_info = { 0 }; DBG(""); - num_displays = dsi_display_get_num_of_displays(); + num_displays = display_manager_get_count(dm); DBG("num_displays %d", num_displays); if (priv->num_encoders + num_displays > ARRAY_SIZE(priv->encoders)) { @@ -512,32 +514,28 @@ static int sde_encoder_probe_dsi(struct drm_device *dev) } for (i = 0; i < num_displays; i++) { + struct drm_encoder *enc; - struct dsi_display *dsi = dsi_display_get_display_by_index(i); - - if (dsi_display_is_active(dsi)) { - struct drm_encoder *enc = NULL; - struct display_probe_info probe_info = { 0 }; - - probe_info.type = INTF_DSI; - - DBG("display %d is active", i); - - ret = dsi_display_get_info(dsi, &probe_info.dsi_info); - if (ret) - return ret; - - enc = sde_encoder_virt_init(dev, &probe_info); - if (IS_ERR(enc)) - return PTR_ERR(enc); + ret = display_manager_get_info_by_index(dm, i, + &probe_info.disp_info); + if (ret) { + pr_err("Failed to get display info, %d\n", ret); + return ret; + } - ret = dsi_display_drm_init(dsi, enc); - if (ret) - return ret; + enc = sde_encoder_virt_init(dev, &probe_info); + if (IS_ERR_OR_NULL(enc)) { + pr_err("encoder virt init failed\n"); + return PTR_ERR(enc); + } - /* Register new encoder with the upper layer */ - priv->encoders[priv->num_encoders++] = enc; + ret = display_manager_drm_init_by_index(dm, i, enc); + if (ret) { + pr_err("display drm init failed\n"); + return ret; } + + priv->encoders[priv->num_encoders++] = enc; } return ret; -- GitLab From c426d6674a9c821709318dfcbe935a787619b833 Mon Sep 17 00:00:00 2001 From: Lloyd Atkinson Date: Sun, 26 Jun 2016 10:11:08 -0400 Subject: [PATCH 013/310] drm/msm/sde: simplify encoder display probe logic Cleanup the display probing logic now that the common display interface is available. Change-Id: I3a6f815d8e7ab7f22e719eaf7ef4c8150470d54f Signed-off-by: Lloyd Atkinson --- drivers/gpu/drm/msm/sde/sde_encoder.c | 223 +++++++++----------------- 1 file changed, 79 insertions(+), 144 deletions(-) diff --git a/drivers/gpu/drm/msm/sde/sde_encoder.c b/drivers/gpu/drm/msm/sde/sde_encoder.c index d1d9936431ef..2937170b7c67 100644 --- a/drivers/gpu/drm/msm/sde/sde_encoder.c +++ b/drivers/gpu/drm/msm/sde/sde_encoder.c @@ -279,7 +279,7 @@ static const struct drm_encoder_funcs sde_encoder_funcs = { }; static enum sde_intf sde_encoder_get_intf(struct sde_mdss_cfg *catalog, - enum sde_intf_type type, u32 instance) + enum sde_intf_type type, u32 controller_id) { int i = 0; @@ -287,7 +287,7 @@ static enum sde_intf sde_encoder_get_intf(struct sde_mdss_cfg *catalog, for (i = 0; i < catalog->intf_count; i++) { if (catalog->intf[i].type == type - && catalog->intf[i].controller_id == instance) { + && catalog->intf[i].controller_id == controller_id) { return catalog->intf[i].id; } } @@ -348,52 +348,49 @@ static int sde_encoder_virt_add_phys_vid_enc(struct sde_encoder_virt *sde_enc, return ret; } -static int sde_encoder_setup_hdmi(struct sde_encoder_virt *sde_enc, - struct sde_kms *sde_kms, int *hdmi_info) -{ - int ret = 0; - enum sde_intf intf_idx = INTF_MAX; - enum sde_ctl ctl_idx = CTL_2; - - DBG(""); - - intf_idx = sde_encoder_get_intf(sde_kms->catalog, INTF_HDMI, 0); - if (intf_idx == INTF_MAX) - ret = -EINVAL; - - if (!ret) - ret = - sde_encoder_virt_add_phys_vid_enc(sde_enc, sde_kms, - intf_idx, ctl_idx); - - return ret; -} - -static int sde_encoder_setup_dsi(struct sde_encoder_virt *sde_enc, +static int sde_encoder_setup_display(struct sde_encoder_virt *sde_enc, struct sde_kms *sde_kms, - struct display_info *disp_info) + struct display_info *disp_info, + int *drm_enc_mode) { int ret = 0; int i = 0; + enum sde_intf_type intf_type = INTF_NONE; DBG(""); - WARN_ON(disp_info->num_of_h_tiles < 1); + if (disp_info->intf == DISPLAY_INTF_DSI) { + *drm_enc_mode = DRM_MODE_ENCODER_DSI; + intf_type = INTF_DSI; + } else if (disp_info->intf == DISPLAY_INTF_HDMI) { + *drm_enc_mode = DRM_MODE_ENCODER_TMDS; + intf_type = INTF_HDMI; + } else { + DRM_ERROR("Unsupported display interface type"); + return -EINVAL; + } - if (disp_info->num_of_h_tiles == 0) - disp_info->num_of_h_tiles = 1; + WARN_ON(disp_info->num_of_h_tiles < 1); - DBG("num_of_h_tiles %d h_tile_instance_0 %d h_tile_instance_1 %d\n", - disp_info->num_of_h_tiles, disp_info->h_tile_instance[0], - disp_info->h_tile_instance[1]); + DBG("dsi_info->num_of_h_tiles %d", disp_info->num_of_h_tiles); for (i = 0; i < disp_info->num_of_h_tiles && !ret; i++) { + /* + * Left-most tile is at index 0, content is controller id + * h_tile_instance_ids[2] = {0, 1}; DSI0 = left, DSI1 = right + * h_tile_instance_ids[2] = {1, 0}; DSI1 = left, DSI0 = right + */ enum sde_intf intf_idx = INTF_MAX; enum sde_ctl ctl_idx = CTL_0; + u32 controller_id = disp_info->h_tile_instance[i]; + + if (intf_type == INTF_HDMI) + ctl_idx = CTL_2; + + DBG("h_tile_instance %d = %d", i, controller_id); intf_idx = sde_encoder_get_intf(sde_kms->catalog, - INTF_DSI, - disp_info->h_tile_instance[i]); + intf_type, controller_id); if (intf_idx == INTF_MAX) { DBG("Error: could not get the interface id"); ret = -EINVAL; @@ -401,29 +398,22 @@ static int sde_encoder_setup_dsi(struct sde_encoder_virt *sde_enc, /* Create both VID and CMD Phys Encoders here */ if (!ret) - ret = - sde_encoder_virt_add_phys_vid_enc(sde_enc, sde_kms, - intf_idx, - ctl_idx); + ret = sde_encoder_virt_add_phys_vid_enc( + sde_enc, sde_kms, intf_idx, ctl_idx); } + return ret; } -struct display_probe_info { - enum sde_intf_type type; - struct display_info disp_info; - int hdmi_info; -}; - -static struct drm_encoder *sde_encoder_virt_init(struct drm_device *dev, - struct display_probe_info *display) +static struct drm_encoder *sde_encoder_virt_init( + struct drm_device *dev, struct display_info *disp_info) { struct msm_drm_private *priv = dev->dev_private; struct sde_kms *sde_kms = to_sde_kms(to_mdp_kms(priv->kms)); struct drm_encoder *drm_enc = NULL; struct sde_encoder_virt *sde_enc = NULL; - int drm_encoder_mode = DRM_MODE_ENCODER_NONE; + int drm_enc_mode = DRM_MODE_ENCODER_NONE; int ret = 0; DBG(""); @@ -434,28 +424,14 @@ static struct drm_encoder *sde_encoder_virt_init(struct drm_device *dev, goto fail; } - if (display->type == INTF_DSI) { - drm_encoder_mode = DRM_MODE_ENCODER_DSI; - ret = sde_encoder_setup_dsi(sde_enc, - sde_kms, - &display->disp_info); - - } else if (display->type == INTF_HDMI) { - drm_encoder_mode = DRM_MODE_ENCODER_TMDS; - ret = - sde_encoder_setup_hdmi(sde_enc, sde_kms, - &display->hdmi_info); - } else { - DRM_ERROR("No valid displays found\n"); - ret = -EINVAL; - } - + ret = sde_encoder_setup_display(sde_enc, sde_kms, disp_info, + &drm_enc_mode); if (ret) goto fail; spin_lock_init(&sde_enc->spin_lock); drm_enc = &sde_enc->base; - drm_encoder_init(dev, drm_enc, &sde_encoder_funcs, drm_encoder_mode); + drm_encoder_init(dev, drm_enc, &sde_encoder_funcs, drm_enc_mode); drm_encoder_helper_add(drm_enc, &sde_encoder_helper_funcs); bs_init(sde_enc); @@ -471,76 +447,6 @@ fail: return ERR_PTR(ret); } -static int sde_encoder_probe_hdmi(struct drm_device *dev) -{ - struct msm_drm_private *priv = dev->dev_private; - struct drm_encoder *enc = NULL; - struct display_probe_info probe_info = { 0 }; - int ret = 0; - - DBG(""); - - probe_info.type = INTF_HDMI; - - enc = sde_encoder_virt_init(dev, &probe_info); - if (IS_ERR(enc)) - ret = PTR_ERR(enc); - - if (!ret) { - /* Register new encoder with the upper layer */ - priv->encoders[priv->num_encoders++] = enc; - } - - return ret; -} - -static int sde_encoder_probe_dsi(struct drm_device *dev) -{ - struct msm_drm_private *priv = dev->dev_private; - u32 ret = 0; - u32 i = 0; - u32 num_displays = 0; - struct display_manager *dm = priv->dm; - struct display_probe_info probe_info = { 0 }; - - DBG(""); - - num_displays = display_manager_get_count(dm); - DBG("num_displays %d", num_displays); - - if (priv->num_encoders + num_displays > ARRAY_SIZE(priv->encoders)) { - DBG("Too many displays found in probe"); - return -EINVAL; - } - - for (i = 0; i < num_displays; i++) { - struct drm_encoder *enc; - - ret = display_manager_get_info_by_index(dm, i, - &probe_info.disp_info); - if (ret) { - pr_err("Failed to get display info, %d\n", ret); - return ret; - } - - enc = sde_encoder_virt_init(dev, &probe_info); - if (IS_ERR_OR_NULL(enc)) { - pr_err("encoder virt init failed\n"); - return PTR_ERR(enc); - } - - ret = display_manager_drm_init_by_index(dm, i, enc); - if (ret) { - pr_err("display drm init failed\n"); - return ret; - } - - priv->encoders[priv->num_encoders++] = enc; - } - - return ret; -} - void sde_encoder_register_vblank_callback(struct drm_encoder *drm_enc, void (*cb)(void *), void *data) { @@ -561,7 +467,9 @@ void sde_encoder_register_vblank_callback(struct drm_encoder *drm_enc, void sde_encoders_init(struct drm_device *dev) { struct msm_drm_private *priv = NULL; - int ret = 0; + struct display_manager *disp_man = NULL; + u32 i = 0; + u32 num_displays = 0; DBG(""); @@ -571,18 +479,45 @@ void sde_encoders_init(struct drm_device *dev) } priv = dev->dev_private; - if (!priv->kms) { + priv->num_encoders = 0; + if (!priv->kms || !priv->dm) { DRM_ERROR("Invalid pointer"); return; } - /* Start num_encoders at 0, probe functions will increment */ - priv->num_encoders = 0; - ret = sde_encoder_probe_dsi(dev); - if (ret) - DRM_ERROR("Error probing DSI, %d\n", ret); - else { - ret = sde_encoder_probe_hdmi(dev); - if (ret) - DRM_ERROR("Error probing HDMI, %d\n", ret); + disp_man = priv->dm; + + num_displays = display_manager_get_count(disp_man); + DBG("num_displays %d", num_displays); + + if (num_displays > ARRAY_SIZE(priv->encoders)) { + num_displays = ARRAY_SIZE(priv->encoders); + DRM_ERROR("Too many displays found, capping to %d", + num_displays); + } + + for (i = 0; i < num_displays; i++) { + struct display_info info = { 0 }; + struct drm_encoder *enc = NULL; + u32 ret = 0; + + ret = display_manager_get_info_by_index(disp_man, i, &info); + if (ret) { + DRM_ERROR("Failed to get display info, %d", ret); + return; + } + + enc = sde_encoder_virt_init(dev, &info); + if (IS_ERR_OR_NULL(enc)) { + DRM_ERROR("Encoder initialization failed"); + return; + } + + ret = display_manager_drm_init_by_index(disp_man, i, enc); + if (ret) { + DRM_ERROR("Display drm_init failed, %d", ret); + return; + } + + priv->encoders[priv->num_encoders++] = enc; } } -- GitLab From d18be1adef96167ad2a41533554c21dda0239932 Mon Sep 17 00:00:00 2001 From: Ben Chan Date: Sun, 26 Jun 2016 22:02:47 -0400 Subject: [PATCH 014/310] drm/msm/sde: sde hw interrupt handling Existing SDE HW interrupt was based on mdp/kms and is not sufficient for supporting the SDE HW interrupt manipulation. Changes are for enabling full SDE interrupt support and hiding HAL interface implementation details from crtc/encoder. Change-Id: I917a153d12bbb6b84758591ba69fe15181af7791 Signed-off-by: Ben Chan --- drivers/gpu/drm/msm/Makefile | 1 + drivers/gpu/drm/msm/sde/sde_crtc.c | 3 +- drivers/gpu/drm/msm/sde/sde_encoder.c | 2 +- drivers/gpu/drm/msm/sde/sde_encoder_phys.h | 14 +- .../gpu/drm/msm/sde/sde_encoder_phys_vid.c | 168 ++- drivers/gpu/drm/msm/sde/sde_hw_catalog.h | 7 + drivers/gpu/drm/msm/sde/sde_hw_catalog_8996.c | 4 + drivers/gpu/drm/msm/sde/sde_hw_interrupts.c | 969 ++++++++++++++++++ drivers/gpu/drm/msm/sde/sde_hw_interrupts.h | 245 +++++ drivers/gpu/drm/msm/sde/sde_hw_mdss.h | 19 + drivers/gpu/drm/msm/sde/sde_hwio.h | 3 + drivers/gpu/drm/msm/sde/sde_irq.c | 240 ++++- drivers/gpu/drm/msm/sde/sde_kms.c | 55 +- drivers/gpu/drm/msm/sde/sde_kms.h | 120 ++- drivers/gpu/drm/msm/sde/sde_plane.c | 13 +- 15 files changed, 1784 insertions(+), 79 deletions(-) create mode 100644 drivers/gpu/drm/msm/sde/sde_hw_interrupts.c create mode 100644 drivers/gpu/drm/msm/sde/sde_hw_interrupts.h diff --git a/drivers/gpu/drm/msm/Makefile b/drivers/gpu/drm/msm/Makefile index 61163578c06c..7c73657b399e 100644 --- a/drivers/gpu/drm/msm/Makefile +++ b/drivers/gpu/drm/msm/Makefile @@ -89,4 +89,5 @@ obj-$(CONFIG_DRM_MSM) += sde/sde_hw_catalog.o \ sde/sde_hw_sspp.o \ sde/sde_hw_wb.o \ sde/sde_hw_pingpong.o \ + sde/sde_hw_interrupts.o \ sde/sde_mdp_formats.o diff --git a/drivers/gpu/drm/msm/sde/sde_crtc.c b/drivers/gpu/drm/msm/sde/sde_crtc.c index b6ec66954e67..d5bdf0c71658 100644 --- a/drivers/gpu/drm/msm/sde/sde_crtc.c +++ b/drivers/gpu/drm/msm/sde/sde_crtc.c @@ -57,8 +57,7 @@ struct sde_crtc { static struct sde_kms *get_kms(struct drm_crtc *crtc) { struct msm_drm_private *priv = crtc->dev->dev_private; - - return to_sde_kms(to_mdp_kms(priv->kms)); + return to_sde_kms(priv->kms); } static inline struct sde_hw_ctl *sde_crtc_rm_get_ctl_path(enum sde_ctl idx, diff --git a/drivers/gpu/drm/msm/sde/sde_encoder.c b/drivers/gpu/drm/msm/sde/sde_encoder.c index 2937170b7c67..43be77e26f2d 100644 --- a/drivers/gpu/drm/msm/sde/sde_encoder.c +++ b/drivers/gpu/drm/msm/sde/sde_encoder.c @@ -410,7 +410,7 @@ static struct drm_encoder *sde_encoder_virt_init( struct drm_device *dev, struct display_info *disp_info) { struct msm_drm_private *priv = dev->dev_private; - struct sde_kms *sde_kms = to_sde_kms(to_mdp_kms(priv->kms)); + struct sde_kms *sde_kms = to_sde_kms(priv->kms); struct drm_encoder *drm_enc = NULL; struct sde_encoder_virt *sde_enc = NULL; int drm_enc_mode = DRM_MODE_ENCODER_NONE; diff --git a/drivers/gpu/drm/msm/sde/sde_encoder_phys.h b/drivers/gpu/drm/msm/sde/sde_encoder_phys.h index 427a6d94322e..27fc11175c19 100644 --- a/drivers/gpu/drm/msm/sde/sde_encoder_phys.h +++ b/drivers/gpu/drm/msm/sde/sde_encoder_phys.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015 The Linux Foundation. All rights reserved. + * Copyright (c) 2015-2016 The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -47,15 +47,23 @@ struct sde_encoder_phys { struct sde_encoder_phys_ops phys_ops; struct sde_hw_intf *hw_intf; struct sde_hw_ctl *hw_ctl; - struct mdp_kms *mdp_kms; + struct sde_kms *sde_kms; struct drm_display_mode cached_mode; bool enabled; spinlock_t spin_lock; }; +/** + * struct sde_encoder_phys_vid - sub-class of sde_encoder_phys to handle video + * mode specific operations + * @base: Baseclass physical encoder structure + * @irq_idx: IRQ interface lookup index + * @vblank_complete: for vblank irq synchronization + */ struct sde_encoder_phys_vid { struct sde_encoder_phys base; - struct mdp_irq vblank_irq; + int irq_idx; + struct completion vblank_complete; }; struct sde_encoder_virt { diff --git a/drivers/gpu/drm/msm/sde/sde_encoder_phys_vid.c b/drivers/gpu/drm/msm/sde/sde_encoder_phys_vid.c index 0528c3d1ff8d..33d1a8eef7a5 100644 --- a/drivers/gpu/drm/msm/sde/sde_encoder_phys_vid.c +++ b/drivers/gpu/drm/msm/sde/sde_encoder_phys_vid.c @@ -20,9 +20,31 @@ #include "sde_encoder_phys.h" #include "sde_mdp_formats.h" +#define VBLANK_TIMEOUT msecs_to_jiffies(100) + #define to_sde_encoder_phys_vid(x) \ container_of(x, struct sde_encoder_phys_vid, base) +static bool sde_encoder_phys_vid_is_master( + struct sde_encoder_phys *phys_enc) +{ + bool ret = true; + + return ret; +} + +static void sde_encoder_phys_vid_wait_for_vblank( + struct sde_encoder_phys_vid *vid_enc) +{ + int rc = 0; + + DBG(""); + rc = wait_for_completion_timeout(&vid_enc->vblank_complete, + VBLANK_TIMEOUT); + if (rc == 0) + DRM_ERROR("Timed out waiting for vblank irq\n"); +} + static void drm_mode_to_intf_timing_params( const struct sde_encoder_phys *phys_enc, const struct drm_display_mode *mode, @@ -195,19 +217,29 @@ static bool sde_encoder_phys_vid_mode_fixup( return true; } +static void sde_encoder_phys_vid_flush_intf(struct sde_encoder_phys *phys_enc) +{ + struct sde_hw_intf *intf = phys_enc->hw_intf; + struct sde_hw_ctl *ctl = phys_enc->hw_ctl; + u32 flush_mask = 0; + + DBG(""); + + ctl->ops.get_bitmask_intf(ctl, &flush_mask, intf->idx); + ctl->ops.setup_flush(ctl, flush_mask); + + DBG("Flushing CTL_ID %d, flush_mask %x, INTF %d", + ctl->idx, flush_mask, intf->idx); +} + static void sde_encoder_phys_vid_mode_set( struct sde_encoder_phys *phys_enc, struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode) + struct drm_display_mode *adj_mode) { - mode = adjusted_mode; - phys_enc->cached_mode = *adjusted_mode; - - DBG("set mode: %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x", - mode->base.id, mode->name, mode->vrefresh, mode->clock, - mode->hdisplay, mode->hsync_start, mode->hsync_end, mode->htotal, - mode->vdisplay, mode->vsync_start, mode->vsync_end, mode->vtotal, - mode->type, mode->flags); + phys_enc->cached_mode = *adj_mode; + DBG("intf %d, caching mode:", phys_enc->hw_intf->idx); + drm_mode_debug_printmodeline(adj_mode); } static void sde_encoder_phys_vid_setup_timing_engine( @@ -249,56 +281,113 @@ static void sde_encoder_phys_vid_setup_timing_engine( programmable_fetch_config(phys_enc, &p); } -static void sde_encoder_phys_vid_wait_for_vblank( - struct sde_encoder_phys_vid *vid_enc) +static void sde_encoder_phys_vid_vblank_irq(void *arg, int irq_idx) { - DBG(""); - mdp_irq_wait(vid_enc->base.mdp_kms, vid_enc->vblank_irq.irqmask); + struct sde_encoder_phys_vid *vid_enc = arg; + struct sde_encoder_phys *phys_enc = &vid_enc->base; + + phys_enc->parent_ops.handle_vblank_virt(phys_enc->parent); + + /* signal VBLANK completion */ + complete_all(&vid_enc->vblank_complete); } -static void sde_encoder_phys_vid_vblank_irq(struct mdp_irq *irq, - uint32_t irqstatus) +static int sde_encoder_phys_vid_register_irq(struct sde_encoder_phys *phys_enc) { struct sde_encoder_phys_vid *vid_enc = - container_of(irq, struct sde_encoder_phys_vid, - vblank_irq); - struct sde_encoder_phys *phys_enc = &vid_enc->base; + to_sde_encoder_phys_vid(phys_enc); + struct sde_irq_callback irq_cb; + int ret = 0; - phys_enc->parent_ops.handle_vblank_virt(phys_enc->parent); + vid_enc->irq_idx = sde_irq_idx_lookup(phys_enc->sde_kms, + SDE_IRQ_TYPE_INTF_VSYNC, phys_enc->hw_intf->idx); + if (vid_enc->irq_idx < 0) { + DRM_ERROR( + "Failed to lookup IRQ index for INTF_VSYNC with intf=%d\n", + phys_enc->hw_intf->idx); + return -EINVAL; + } + + irq_cb.func = sde_encoder_phys_vid_vblank_irq; + irq_cb.arg = vid_enc; + ret = sde_register_irq_callback(phys_enc->sde_kms, vid_enc->irq_idx, + &irq_cb); + if (ret) { + DRM_ERROR("Failed to register IRQ callback INTF_VSYNC\n"); + return ret; + } + + ret = sde_enable_irq(phys_enc->sde_kms, &vid_enc->irq_idx, 1); + if (ret) { + DRM_ERROR( + "Failed to enable IRQ for INTF_VSYNC, intf %d, irq_idx=%d\n", + phys_enc->hw_intf->idx, + vid_enc->irq_idx); + vid_enc->irq_idx = -EINVAL; + + /* Unregister callback on IRQ enable failure */ + sde_register_irq_callback(phys_enc->sde_kms, vid_enc->irq_idx, + NULL); + return ret; + } + + DBG("Registered IRQ for intf %d, irq_idx=%d\n", + phys_enc->hw_intf->idx, + vid_enc->irq_idx); + + return ret; } -static void sde_encoder_phys_vid_enable(struct sde_encoder_phys *phys_enc) +static int sde_encoder_phys_vid_unregister_irq( + struct sde_encoder_phys *phys_enc) { struct sde_encoder_phys_vid *vid_enc = - to_sde_encoder_phys_vid(phys_enc); - unsigned long lock_flags; + to_sde_encoder_phys_vid(phys_enc); - DBG(""); + sde_register_irq_callback(phys_enc->sde_kms, vid_enc->irq_idx, NULL); + sde_disable_irq(phys_enc->sde_kms, &vid_enc->irq_idx, 1); - if (WARN_ON(phys_enc->enabled)) - return; + DBG("Un-Register IRQ for intf %d, irq_idx=%d\n", + phys_enc->hw_intf->idx, + vid_enc->irq_idx); + + return 0; +} + +static void sde_encoder_phys_vid_enable(struct sde_encoder_phys *phys_enc) +{ + int ret = 0; + + DBG(""); if (WARN_ON(!phys_enc->hw_intf->ops.enable_timing)) return; sde_encoder_phys_vid_setup_timing_engine(phys_enc); - spin_lock_irqsave(&phys_enc->spin_lock, lock_flags); - phys_enc->hw_intf->ops.enable_timing(phys_enc->hw_intf, 1); - spin_unlock_irqrestore(&phys_enc->spin_lock, lock_flags); + sde_encoder_phys_vid_flush_intf(phys_enc); + + /* Register for interrupt unless we're the slave encoder */ + if (sde_encoder_phys_vid_is_master(phys_enc)) + ret = sde_encoder_phys_vid_register_irq(phys_enc); - phys_enc->enabled = true; + if (!ret && !phys_enc->enabled) { + unsigned long lock_flags = 0; - mdp_irq_register(phys_enc->mdp_kms, &vid_enc->vblank_irq); - DBG("Registered IRQ for intf %d mask 0x%X", phys_enc->hw_intf->idx, - vid_enc->vblank_irq.irqmask); + /* Now enable timing engine */ + spin_lock_irqsave(&phys_enc->spin_lock, lock_flags); + phys_enc->hw_intf->ops.enable_timing(phys_enc->hw_intf, 1); + spin_unlock_irqrestore(&phys_enc->spin_lock, lock_flags); + + phys_enc->enabled = true; + } } static void sde_encoder_phys_vid_disable(struct sde_encoder_phys *phys_enc) { - struct sde_encoder_phys_vid *vid_enc = - to_sde_encoder_phys_vid(phys_enc); unsigned long lock_flags; + struct sde_encoder_phys_vid *vid_enc = + to_sde_encoder_phys_vid(phys_enc); DBG(""); @@ -310,6 +399,7 @@ static void sde_encoder_phys_vid_disable(struct sde_encoder_phys *phys_enc) spin_lock_irqsave(&phys_enc->spin_lock, lock_flags); phys_enc->hw_intf->ops.enable_timing(phys_enc->hw_intf, 0); + reinit_completion(&vid_enc->vblank_complete); spin_unlock_irqrestore(&phys_enc->spin_lock, lock_flags); /* @@ -321,7 +411,7 @@ static void sde_encoder_phys_vid_disable(struct sde_encoder_phys *phys_enc) * scanout buffer) don't latch properly.. */ sde_encoder_phys_vid_wait_for_vblank(vid_enc); - mdp_irq_unregister(phys_enc->mdp_kms, &vid_enc->vblank_irq); + sde_encoder_phys_vid_unregister_irq(phys_enc); phys_enc->enabled = false; } @@ -361,7 +451,6 @@ struct sde_encoder_phys *sde_encoder_phys_vid_init( { struct sde_encoder_phys *phys_enc = NULL; struct sde_encoder_phys_vid *vid_enc = NULL; - u32 irq_mask = 0x8000000; int ret = 0; DBG(""); @@ -371,6 +460,9 @@ struct sde_encoder_phys *sde_encoder_phys_vid_init( ret = -ENOMEM; goto fail; } + vid_enc->irq_idx = -EINVAL; + init_completion(&vid_enc->vblank_complete); + phys_enc = &vid_enc->base; phys_enc->hw_intf = @@ -390,9 +482,7 @@ struct sde_encoder_phys *sde_encoder_phys_vid_init( sde_encoder_phys_vid_init_cbs(&phys_enc->phys_ops); phys_enc->parent = parent; phys_enc->parent_ops = parent_ops; - phys_enc->mdp_kms = &sde_kms->base; - vid_enc->vblank_irq.irq = sde_encoder_phys_vid_vblank_irq; - vid_enc->vblank_irq.irqmask = irq_mask; + phys_enc->sde_kms = sde_kms; spin_lock_init(&phys_enc->spin_lock); DBG("Created sde_encoder_phys_vid for intf %d", phys_enc->hw_intf->idx); diff --git a/drivers/gpu/drm/msm/sde/sde_hw_catalog.h b/drivers/gpu/drm/msm/sde/sde_hw_catalog.h index 6aee5467739a..9e543bbfd2a2 100644 --- a/drivers/gpu/drm/msm/sde/sde_hw_catalog.h +++ b/drivers/gpu/drm/msm/sde/sde_hw_catalog.h @@ -282,6 +282,10 @@ struct sde_wb_sub_blocks { u32 maxlinewidth; }; +struct sde_mdss_base_cfg { + SDE_HW_BLK_INFO; +}; + /* struct sde_mdp_cfg : MDP TOP-BLK instance info * @id: index identifying this block * @base: register base offset to mdss @@ -411,6 +415,9 @@ struct sde_ad_cfg { struct sde_mdss_cfg { u32 hwversion; + u32 mdss_count; + struct sde_mdss_base_cfg mdss[MAX_BLOCKS]; + u32 mdp_count; struct sde_mdp_cfg mdp[MAX_BLOCKS]; diff --git a/drivers/gpu/drm/msm/sde/sde_hw_catalog_8996.c b/drivers/gpu/drm/msm/sde/sde_hw_catalog_8996.c index d8831af35bb7..7238890b1978 100644 --- a/drivers/gpu/drm/msm/sde/sde_hw_catalog_8996.c +++ b/drivers/gpu/drm/msm/sde/sde_hw_catalog_8996.c @@ -146,6 +146,10 @@ static inline int set_cfg_1xx_init(struct sde_mdss_cfg *cfg) /* Setup Register maps and defaults */ *cfg = (struct sde_mdss_cfg){ + .mdss_count = 1, + .mdss = { + {.id = MDP_TOP, .base = 0x00000000, .features = 0} + }, .mdp_count = 1, .mdp = { {.id = MDP_TOP, .base = 0x00001000, .features = 0, diff --git a/drivers/gpu/drm/msm/sde/sde_hw_interrupts.c b/drivers/gpu/drm/msm/sde/sde_hw_interrupts.c new file mode 100644 index 000000000000..99aa2e59dd85 --- /dev/null +++ b/drivers/gpu/drm/msm/sde/sde_hw_interrupts.c @@ -0,0 +1,969 @@ +/* Copyright (c) 2016, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include + +#include "sde_kms.h" +#include "sde_hw_interrupts.h" +#include "sde_hw_mdp_util.h" +#include "sde_hw_mdss.h" + +/** + * Register offsets in MDSS register file for the interrupt registers + * w.r.t. to the MDSS base + */ +#define HW_INTR_STATUS 0x0010 +#define MDP_SSPP_TOP0_OFF 0x1000 +#define MDP_INTF_0_OFF 0x6B000 +#define MDP_INTF_1_OFF 0x6B800 +#define MDP_INTF_2_OFF 0x6C000 +#define MDP_INTF_3_OFF 0x6C800 +#define MDP_INTF_4_OFF 0x6D000 + +/** + * WB interrupt status bit definitions + */ +#define SDE_INTR_WB_0_DONE BIT(0) +#define SDE_INTR_WB_1_DONE BIT(1) +#define SDE_INTR_WB_2_DONE BIT(4) + +/** + * WDOG timer interrupt status bit definitions + */ +#define SDE_INTR_WD_TIMER_0_DONE BIT(2) +#define SDE_INTR_WD_TIMER_1_DONE BIT(3) +#define SDE_INTR_WD_TIMER_2_DONE BIT(5) +#define SDE_INTR_WD_TIMER_3_DONE BIT(6) +#define SDE_INTR_WD_TIMER_4_DONE BIT(7) + +/** + * Pingpong interrupt status bit definitions + */ +#define SDE_INTR_PING_PONG_0_DONE BIT(8) +#define SDE_INTR_PING_PONG_1_DONE BIT(9) +#define SDE_INTR_PING_PONG_2_DONE BIT(10) +#define SDE_INTR_PING_PONG_3_DONE BIT(11) +#define SDE_INTR_PING_PONG_0_RD_PTR BIT(12) +#define SDE_INTR_PING_PONG_1_RD_PTR BIT(13) +#define SDE_INTR_PING_PONG_2_RD_PTR BIT(14) +#define SDE_INTR_PING_PONG_3_RD_PTR BIT(15) +#define SDE_INTR_PING_PONG_0_WR_PTR BIT(16) +#define SDE_INTR_PING_PONG_1_WR_PTR BIT(17) +#define SDE_INTR_PING_PONG_2_WR_PTR BIT(18) +#define SDE_INTR_PING_PONG_3_WR_PTR BIT(19) +#define SDE_INTR_PING_PONG_0_AUTOREFRESH_DONE BIT(20) +#define SDE_INTR_PING_PONG_1_AUTOREFRESH_DONE BIT(21) +#define SDE_INTR_PING_PONG_2_AUTOREFRESH_DONE BIT(22) +#define SDE_INTR_PING_PONG_3_AUTOREFRESH_DONE BIT(23) + +/** + * Interface interrupt status bit definitions + */ +#define SDE_INTR_INTF_0_UNDERRUN BIT(24) +#define SDE_INTR_INTF_1_UNDERRUN BIT(26) +#define SDE_INTR_INTF_2_UNDERRUN BIT(28) +#define SDE_INTR_INTF_3_UNDERRUN BIT(30) +#define SDE_INTR_INTF_0_VSYNC BIT(25) +#define SDE_INTR_INTF_1_VSYNC BIT(27) +#define SDE_INTR_INTF_2_VSYNC BIT(29) +#define SDE_INTR_INTF_3_VSYNC BIT(31) + +/** + * Pingpong Secondary interrupt status bit definitions + */ +#define SDE_INTR_PING_PONG_S0_AUTOREFRESH_DONE BIT(0) +#define SDE_INTR_PING_PONG_S0_WR_PTR BIT(4) +#define SDE_INTR_PING_PONG_S0_RD_PTR BIT(8) +#define SDE_INTR_PING_PONG_S0_TEAR_DETECTED BIT(22) +#define SDE_INTR_PING_PONG_S0_TE_DETECTED BIT(28) + +/** + * Pingpong TEAR detection interrupt status bit definitions + */ +#define SDE_INTR_PING_PONG_0_TEAR_DETECTED BIT(16) +#define SDE_INTR_PING_PONG_1_TEAR_DETECTED BIT(17) +#define SDE_INTR_PING_PONG_2_TEAR_DETECTED BIT(18) +#define SDE_INTR_PING_PONG_3_TEAR_DETECTED BIT(19) + +/** + * Pingpong TE detection interrupt status bit definitions + */ +#define SDE_INTR_PING_PONG_0_TE_DETECTED BIT(24) +#define SDE_INTR_PING_PONG_1_TE_DETECTED BIT(25) +#define SDE_INTR_PING_PONG_2_TE_DETECTED BIT(26) +#define SDE_INTR_PING_PONG_3_TE_DETECTED BIT(27) + +/** + * Concurrent WB overflow interrupt status bit definitions + */ +#define SDE_INTR_CWB_2_OVERFLOW BIT(14) +#define SDE_INTR_CWB_3_OVERFLOW BIT(15) + +/** + * Histogram VIG done interrupt status bit definitions + */ +#define SDE_INTR_HIST_VIG_0_DONE BIT(0) +#define SDE_INTR_HIST_VIG_1_DONE BIT(4) +#define SDE_INTR_HIST_VIG_2_DONE BIT(8) +#define SDE_INTR_HIST_VIG_3_DONE BIT(10) + +/** + * Histogram VIG reset Sequence done interrupt status bit definitions + */ +#define SDE_INTR_HIST_VIG_0_RSTSEQ_DONE BIT(1) +#define SDE_INTR_HIST_VIG_1_RSTSEQ_DONE BIT(5) +#define SDE_INTR_HIST_VIG_2_RSTSEQ_DONE BIT(9) +#define SDE_INTR_HIST_VIG_3_RSTSEQ_DONE BIT(11) + +/** + * Histogram DSPP done interrupt status bit definitions + */ +#define SDE_INTR_HIST_DSPP_0_DONE BIT(12) +#define SDE_INTR_HIST_DSPP_1_DONE BIT(16) +#define SDE_INTR_HIST_DSPP_2_DONE BIT(20) +#define SDE_INTR_HIST_DSPP_3_DONE BIT(22) + +/** + * Histogram DSPP reset Sequence done interrupt status bit definitions + */ +#define SDE_INTR_HIST_DSPP_0_RSTSEQ_DONE BIT(13) +#define SDE_INTR_HIST_DSPP_1_RSTSEQ_DONE BIT(17) +#define SDE_INTR_HIST_DSPP_2_RSTSEQ_DONE BIT(21) +#define SDE_INTR_HIST_DSPP_3_RSTSEQ_DONE BIT(23) + +/** + * INTF interrupt status bit definitions + */ +#define SDE_INTR_VIDEO_INTO_STATIC BIT(0) +#define SDE_INTR_VIDEO_OUTOF_STATIC BIT(1) +#define SDE_INTR_DSICMD_0_INTO_STATIC BIT(2) +#define SDE_INTR_DSICMD_0_OUTOF_STATIC BIT(3) +#define SDE_INTR_DSICMD_1_INTO_STATIC BIT(4) +#define SDE_INTR_DSICMD_1_OUTOF_STATIC BIT(5) +#define SDE_INTR_DSICMD_2_INTO_STATIC BIT(6) +#define SDE_INTR_DSICMD_2_OUTOF_STATIC BIT(7) +#define SDE_INTR_PROG_LINE BIT(8) + +/** + * struct sde_intr_reg - array of SDE register sets + * @clr_off: offset to CLEAR reg + * @en_off: offset to ENABLE reg + * @status_off: offset to STATUS reg + */ +struct sde_intr_reg { + u32 clr_off; + u32 en_off; + u32 status_off; +}; + +/** + * struct sde_irq_type - maps each irq with i/f + * @intr_type: type of interrupt listed in sde_intr_type + * @instance_idx: instance index of the associated HW block in SDE + * @irq_mask: corresponding bit in the interrupt status reg + * @reg_idx: which reg set to use + */ +struct sde_irq_type { + u32 intr_type; + u32 instance_idx; + u32 irq_mask; + u32 reg_idx; +}; + +/** + * List of SDE interrupt registers + */ +static const struct sde_intr_reg sde_intr_set[] = { + { + MDP_SSPP_TOP0_OFF+INTR_CLEAR, + MDP_SSPP_TOP0_OFF+INTR_EN, + MDP_SSPP_TOP0_OFF+INTR_STATUS + }, + { + MDP_SSPP_TOP0_OFF+INTR2_CLEAR, + MDP_SSPP_TOP0_OFF+INTR2_EN, + MDP_SSPP_TOP0_OFF+INTR2_STATUS + }, + { + MDP_SSPP_TOP0_OFF+HIST_INTR_CLEAR, + MDP_SSPP_TOP0_OFF+HIST_INTR_EN, + MDP_SSPP_TOP0_OFF+HIST_INTR_STATUS + }, + { + MDP_INTF_0_OFF+INTF_INTR_CLEAR, + MDP_INTF_0_OFF+INTF_INTR_EN, + MDP_INTF_0_OFF+INTF_INTR_STATUS + }, + { + MDP_INTF_1_OFF+INTF_INTR_CLEAR, + MDP_INTF_1_OFF+INTF_INTR_EN, + MDP_INTF_1_OFF+INTF_INTR_STATUS + }, + { + MDP_INTF_2_OFF+INTF_INTR_CLEAR, + MDP_INTF_2_OFF+INTF_INTR_EN, + MDP_INTF_2_OFF+INTF_INTR_STATUS + }, + { + MDP_INTF_3_OFF+INTF_INTR_CLEAR, + MDP_INTF_3_OFF+INTF_INTR_EN, + MDP_INTF_3_OFF+INTF_INTR_STATUS + }, + { + MDP_INTF_4_OFF+INTF_INTR_CLEAR, + MDP_INTF_4_OFF+INTF_INTR_EN, + MDP_INTF_4_OFF+INTF_INTR_STATUS + } +}; + +/** + * IRQ mapping table - use for lookup an irq_idx in this table that have + * a matching interface type and instance index. + */ +static const struct sde_irq_type sde_irq_map[] = { + /* BEGIN MAP_RANGE: 0-31, INTR */ + /* irq_idx: 0-3 */ + { SDE_IRQ_TYPE_WB_ROT_COMP, WB_0, SDE_INTR_WB_0_DONE, 0}, + { SDE_IRQ_TYPE_WB_ROT_COMP, WB_1, SDE_INTR_WB_1_DONE, 0}, + { SDE_IRQ_TYPE_WD_TIMER, WD_TIMER_0, SDE_INTR_WD_TIMER_0_DONE, 0}, + { SDE_IRQ_TYPE_WD_TIMER, WD_TIMER_1, SDE_INTR_WD_TIMER_1_DONE, 0}, + /* irq_idx: 4-7 */ + { SDE_IRQ_TYPE_WB_WFD_COMP, WB_2, SDE_INTR_WB_2_DONE, 0}, + { SDE_IRQ_TYPE_WD_TIMER, WD_TIMER_2, SDE_INTR_WD_TIMER_2_DONE, 0}, + { SDE_IRQ_TYPE_WD_TIMER, WD_TIMER_3, SDE_INTR_WD_TIMER_3_DONE, 0}, + { SDE_IRQ_TYPE_WD_TIMER, WD_TIMER_4, SDE_INTR_WD_TIMER_4_DONE, 0}, + /* irq_idx: 8-11 */ + { SDE_IRQ_TYPE_PING_PONG_COMP, PINGPONG_0, + SDE_INTR_PING_PONG_0_DONE, 0}, + { SDE_IRQ_TYPE_PING_PONG_COMP, PINGPONG_1, + SDE_INTR_PING_PONG_1_DONE, 0}, + { SDE_IRQ_TYPE_PING_PONG_COMP, PINGPONG_2, + SDE_INTR_PING_PONG_2_DONE, 0}, + { SDE_IRQ_TYPE_PING_PONG_COMP, PINGPONG_3, + SDE_INTR_PING_PONG_3_DONE, 0}, + /* irq_idx: 12-15 */ + { SDE_IRQ_TYPE_PING_PONG_RD_PTR, PINGPONG_0, + SDE_INTR_PING_PONG_0_RD_PTR, 0}, + { SDE_IRQ_TYPE_PING_PONG_RD_PTR, PINGPONG_1, + SDE_INTR_PING_PONG_1_RD_PTR, 0}, + { SDE_IRQ_TYPE_PING_PONG_RD_PTR, PINGPONG_2, + SDE_INTR_PING_PONG_2_RD_PTR, 0}, + { SDE_IRQ_TYPE_PING_PONG_RD_PTR, PINGPONG_3, + SDE_INTR_PING_PONG_3_RD_PTR, 0}, + /* irq_idx: 16-19 */ + { SDE_IRQ_TYPE_PING_PONG_WR_PTR, PINGPONG_0, + SDE_INTR_PING_PONG_0_WR_PTR, 0}, + { SDE_IRQ_TYPE_PING_PONG_WR_PTR, PINGPONG_1, + SDE_INTR_PING_PONG_1_WR_PTR, 0}, + { SDE_IRQ_TYPE_PING_PONG_WR_PTR, PINGPONG_2, + SDE_INTR_PING_PONG_2_WR_PTR, 0}, + { SDE_IRQ_TYPE_PING_PONG_WR_PTR, PINGPONG_3, + SDE_INTR_PING_PONG_3_WR_PTR, 0}, + /* irq_idx: 20-23 */ + { SDE_IRQ_TYPE_PING_PONG_AUTO_REF, PINGPONG_0, + SDE_INTR_PING_PONG_0_AUTOREFRESH_DONE, 0}, + { SDE_IRQ_TYPE_PING_PONG_AUTO_REF, PINGPONG_1, + SDE_INTR_PING_PONG_1_AUTOREFRESH_DONE, 0}, + { SDE_IRQ_TYPE_PING_PONG_AUTO_REF, PINGPONG_2, + SDE_INTR_PING_PONG_2_AUTOREFRESH_DONE, 0}, + { SDE_IRQ_TYPE_PING_PONG_AUTO_REF, PINGPONG_3, + SDE_INTR_PING_PONG_3_AUTOREFRESH_DONE, 0}, + /* irq_idx: 24-27 */ + { SDE_IRQ_TYPE_INTF_UNDER_RUN, INTF_0, SDE_INTR_INTF_0_UNDERRUN, 0}, + { SDE_IRQ_TYPE_INTF_VSYNC, INTF_0, SDE_INTR_INTF_0_VSYNC, 0}, + { SDE_IRQ_TYPE_INTF_UNDER_RUN, INTF_1, SDE_INTR_INTF_1_UNDERRUN, 0}, + { SDE_IRQ_TYPE_INTF_VSYNC, INTF_1, SDE_INTR_INTF_1_VSYNC, 0}, + /* irq_idx: 28-31 */ + { SDE_IRQ_TYPE_INTF_UNDER_RUN, INTF_2, SDE_INTR_INTF_2_UNDERRUN, 0}, + { SDE_IRQ_TYPE_INTF_VSYNC, INTF_2, SDE_INTR_INTF_2_VSYNC, 0}, + { SDE_IRQ_TYPE_INTF_UNDER_RUN, INTF_3, SDE_INTR_INTF_3_UNDERRUN, 0}, + { SDE_IRQ_TYPE_INTF_VSYNC, INTF_3, SDE_INTR_INTF_3_VSYNC, 0}, + + /* BEGIN MAP_RANGE: 32-64, INTR2 */ + /* irq_idx: 32-35 */ + { SDE_IRQ_TYPE_PING_PONG_AUTO_REF, PINGPONG_S0, + SDE_INTR_PING_PONG_S0_AUTOREFRESH_DONE, 1}, + { SDE_IRQ_TYPE_RESERVED, 0, 0, 1}, + { SDE_IRQ_TYPE_RESERVED, 0, 0, 1}, + { SDE_IRQ_TYPE_RESERVED, 0, 0, 1}, + /* irq_idx: 36-39 */ + { SDE_IRQ_TYPE_PING_PONG_WR_PTR, PINGPONG_S0, + SDE_INTR_PING_PONG_S0_WR_PTR, 1}, + { SDE_IRQ_TYPE_RESERVED, 0, 0, 1}, + { SDE_IRQ_TYPE_RESERVED, 0, 0, 1}, + { SDE_IRQ_TYPE_RESERVED, 0, 0, 1}, + /* irq_idx: 40-43 */ + { SDE_IRQ_TYPE_PING_PONG_RD_PTR, PINGPONG_S0, + SDE_INTR_PING_PONG_S0_RD_PTR, 1}, + { SDE_IRQ_TYPE_RESERVED, 0, 0, 1}, + { SDE_IRQ_TYPE_RESERVED, 0, 0, 1}, + { SDE_IRQ_TYPE_RESERVED, 0, 0, 1}, + /* irq_idx: 44-47 */ + { SDE_IRQ_TYPE_RESERVED, 0, 0, 1}, + { SDE_IRQ_TYPE_RESERVED, 0, 0, 1}, + { SDE_IRQ_TYPE_CWB_OVERFLOW, CWB_2, SDE_INTR_CWB_2_OVERFLOW, 1}, + { SDE_IRQ_TYPE_CWB_OVERFLOW, CWB_3, SDE_INTR_CWB_3_OVERFLOW, 1}, + /* irq_idx: 48-51 */ + { SDE_IRQ_TYPE_PING_PONG_TEAR_CHECK, PINGPONG_0, + SDE_INTR_PING_PONG_0_TEAR_DETECTED, 1}, + { SDE_IRQ_TYPE_PING_PONG_TEAR_CHECK, PINGPONG_1, + SDE_INTR_PING_PONG_1_TEAR_DETECTED, 1}, + { SDE_IRQ_TYPE_PING_PONG_TEAR_CHECK, PINGPONG_2, + SDE_INTR_PING_PONG_2_TEAR_DETECTED, 1}, + { SDE_IRQ_TYPE_PING_PONG_TEAR_CHECK, PINGPONG_3, + SDE_INTR_PING_PONG_3_TEAR_DETECTED, 1}, + /* irq_idx: 52-55 */ + { SDE_IRQ_TYPE_RESERVED, 0, 0, 1}, + { SDE_IRQ_TYPE_RESERVED, 0, 0, 1}, + { SDE_IRQ_TYPE_PING_PONG_TEAR_CHECK, PINGPONG_S0, + SDE_INTR_PING_PONG_S0_TEAR_DETECTED, 1}, + { SDE_IRQ_TYPE_RESERVED, 0, 0, 1}, + /* irq_idx: 56-59 */ + { SDE_IRQ_TYPE_PING_PONG_TE_CHECK, PINGPONG_0, + SDE_INTR_PING_PONG_0_TE_DETECTED, 1}, + { SDE_IRQ_TYPE_PING_PONG_TE_CHECK, PINGPONG_1, + SDE_INTR_PING_PONG_1_TE_DETECTED, 1}, + { SDE_IRQ_TYPE_PING_PONG_TE_CHECK, PINGPONG_2, + SDE_INTR_PING_PONG_2_TE_DETECTED, 1}, + { SDE_IRQ_TYPE_PING_PONG_TE_CHECK, PINGPONG_3, + SDE_INTR_PING_PONG_3_TE_DETECTED, 1}, + /* irq_idx: 60-63 */ + { SDE_IRQ_TYPE_PING_PONG_TE_CHECK, PINGPONG_S0, + SDE_INTR_PING_PONG_S0_TE_DETECTED, 1}, + { SDE_IRQ_TYPE_RESERVED, 0, 0, 1}, + { SDE_IRQ_TYPE_RESERVED, 0, 0, 1}, + { SDE_IRQ_TYPE_RESERVED, 0, 0, 1}, + + /* BEGIN MAP_RANGE: 64-95 HIST */ + /* irq_idx: 64-67 */ + { SDE_IRQ_TYPE_HIST_VIG_DONE, SSPP_VIG0, SDE_INTR_HIST_VIG_0_DONE, 2}, + { SDE_IRQ_TYPE_HIST_VIG_RSTSEQ, SSPP_VIG0, + SDE_INTR_HIST_VIG_0_RSTSEQ_DONE, 2}, + { SDE_IRQ_TYPE_RESERVED, 0, 0, 2}, + { SDE_IRQ_TYPE_RESERVED, 0, 0, 2}, + /* irq_idx: 68-71 */ + { SDE_IRQ_TYPE_HIST_VIG_DONE, SSPP_VIG1, SDE_INTR_HIST_VIG_1_DONE, 2}, + { SDE_IRQ_TYPE_HIST_VIG_RSTSEQ, SSPP_VIG1, + SDE_INTR_HIST_VIG_1_RSTSEQ_DONE, 2}, + { SDE_IRQ_TYPE_RESERVED, 0, 0, 2}, + { SDE_IRQ_TYPE_RESERVED, 0, 0, 2}, + /* irq_idx: 68-71 */ + { SDE_IRQ_TYPE_HIST_VIG_DONE, SSPP_VIG2, SDE_INTR_HIST_VIG_2_DONE, 2}, + { SDE_IRQ_TYPE_HIST_VIG_RSTSEQ, SSPP_VIG2, + SDE_INTR_HIST_VIG_2_RSTSEQ_DONE, 2}, + { SDE_IRQ_TYPE_HIST_VIG_DONE, SSPP_VIG3, SDE_INTR_HIST_VIG_3_DONE, 2}, + { SDE_IRQ_TYPE_HIST_VIG_RSTSEQ, SSPP_VIG3, + SDE_INTR_HIST_VIG_3_RSTSEQ_DONE, 2}, + /* irq_idx: 72-75 */ + { SDE_IRQ_TYPE_HIST_DSPP_DONE, DSPP_0, SDE_INTR_HIST_DSPP_0_DONE, 2}, + { SDE_IRQ_TYPE_HIST_DSPP_RSTSEQ, DSPP_0, + SDE_INTR_HIST_DSPP_0_RSTSEQ_DONE, 2}, + { SDE_IRQ_TYPE_RESERVED, 0, 0, 2}, + { SDE_IRQ_TYPE_RESERVED, 0, 0, 2}, + /* irq_idx: 76-79 */ + { SDE_IRQ_TYPE_HIST_DSPP_DONE, DSPP_1, SDE_INTR_HIST_DSPP_1_DONE, 2}, + { SDE_IRQ_TYPE_HIST_DSPP_RSTSEQ, DSPP_1, + SDE_INTR_HIST_DSPP_1_RSTSEQ_DONE, 2}, + { SDE_IRQ_TYPE_RESERVED, 0, 0, 2}, + { SDE_IRQ_TYPE_RESERVED, 0, 0, 2}, + /* irq_idx: 80-83 */ + { SDE_IRQ_TYPE_HIST_DSPP_DONE, DSPP_2, SDE_INTR_HIST_DSPP_2_DONE, 2}, + { SDE_IRQ_TYPE_HIST_DSPP_RSTSEQ, DSPP_2, + SDE_INTR_HIST_DSPP_2_RSTSEQ_DONE, 2}, + { SDE_IRQ_TYPE_HIST_DSPP_DONE, DSPP_3, SDE_INTR_HIST_DSPP_3_DONE, 2}, + { SDE_IRQ_TYPE_HIST_DSPP_RSTSEQ, DSPP_3, + SDE_INTR_HIST_DSPP_3_RSTSEQ_DONE, 2}, + /* irq_idx: 84-87 */ + { SDE_IRQ_TYPE_RESERVED, 0, 0, 2}, + { SDE_IRQ_TYPE_RESERVED, 0, 0, 2}, + { SDE_IRQ_TYPE_RESERVED, 0, 0, 2}, + { SDE_IRQ_TYPE_RESERVED, 0, 0, 2}, + /* irq_idx: 88-91 */ + { SDE_IRQ_TYPE_RESERVED, 0, 0, 2}, + { SDE_IRQ_TYPE_RESERVED, 0, 0, 2}, + { SDE_IRQ_TYPE_RESERVED, 0, 0, 2}, + { SDE_IRQ_TYPE_RESERVED, 0, 0, 2}, + /* irq_idx: 92-95 */ + { SDE_IRQ_TYPE_RESERVED, 0, 0, 2}, + { SDE_IRQ_TYPE_RESERVED, 0, 0, 2}, + { SDE_IRQ_TYPE_RESERVED, 0, 0, 2}, + { SDE_IRQ_TYPE_RESERVED, 0, 0, 2}, + + /* BEGIN MAP_RANGE: 96-127 INTF_0_INTR */ + /* irq_idx: 96-99 */ + { SDE_IRQ_TYPE_SFI_VIDEO_IN, INTF_0, + SDE_INTR_VIDEO_INTO_STATIC, 3}, + { SDE_IRQ_TYPE_SFI_VIDEO_OUT, INTF_0, + SDE_INTR_VIDEO_OUTOF_STATIC, 3}, + { SDE_IRQ_TYPE_SFI_CMD_0_IN, INTF_0, + SDE_INTR_DSICMD_0_INTO_STATIC, 3}, + { SDE_IRQ_TYPE_SFI_CMD_0_OUT, INTF_0, + SDE_INTR_DSICMD_0_OUTOF_STATIC, 3}, + /* irq_idx: 100-103 */ + { SDE_IRQ_TYPE_SFI_CMD_1_IN, INTF_0, + SDE_INTR_DSICMD_1_INTO_STATIC, 3}, + { SDE_IRQ_TYPE_SFI_CMD_1_OUT, INTF_0, + SDE_INTR_DSICMD_1_OUTOF_STATIC, 3}, + { SDE_IRQ_TYPE_SFI_CMD_2_IN, INTF_0, + SDE_INTR_DSICMD_2_INTO_STATIC, 3}, + { SDE_IRQ_TYPE_SFI_CMD_2_OUT, INTF_0, + SDE_INTR_DSICMD_2_OUTOF_STATIC, 3}, + /* irq_idx: 104-107 */ + { SDE_IRQ_TYPE_PROG_LINE, INTF_0, SDE_INTR_PROG_LINE, 3}, + { SDE_IRQ_TYPE_RESERVED, 0, 0, 3}, + { SDE_IRQ_TYPE_RESERVED, 0, 0, 3}, + { SDE_IRQ_TYPE_RESERVED, 0, 0, 3}, + /* irq_idx: 108-111 */ + { SDE_IRQ_TYPE_RESERVED, 0, 0, 3}, + { SDE_IRQ_TYPE_RESERVED, 0, 0, 3}, + { SDE_IRQ_TYPE_RESERVED, 0, 0, 3}, + { SDE_IRQ_TYPE_RESERVED, 0, 0, 3}, + /* irq_idx: 112-115 */ + { SDE_IRQ_TYPE_RESERVED, 0, 0, 3}, + { SDE_IRQ_TYPE_RESERVED, 0, 0, 3}, + { SDE_IRQ_TYPE_RESERVED, 0, 0, 3}, + { SDE_IRQ_TYPE_RESERVED, 0, 0, 3}, + /* irq_idx: 116-119 */ + { SDE_IRQ_TYPE_RESERVED, 0, 0, 3}, + { SDE_IRQ_TYPE_RESERVED, 0, 0, 3}, + { SDE_IRQ_TYPE_RESERVED, 0, 0, 3}, + { SDE_IRQ_TYPE_RESERVED, 0, 0, 3}, + /* irq_idx: 120-123 */ + { SDE_IRQ_TYPE_RESERVED, 0, 0, 3}, + { SDE_IRQ_TYPE_RESERVED, 0, 0, 3}, + { SDE_IRQ_TYPE_RESERVED, 0, 0, 3}, + { SDE_IRQ_TYPE_RESERVED, 0, 0, 3}, + /* irq_idx: 124-127 */ + { SDE_IRQ_TYPE_RESERVED, 0, 0, 3}, + { SDE_IRQ_TYPE_RESERVED, 0, 0, 3}, + { SDE_IRQ_TYPE_RESERVED, 0, 0, 3}, + { SDE_IRQ_TYPE_RESERVED, 0, 0, 3}, + + /* BEGIN MAP_RANGE: 128-159 INTF_1_INTR */ + /* irq_idx: 128-131 */ + { SDE_IRQ_TYPE_SFI_VIDEO_IN, INTF_1, + SDE_INTR_VIDEO_INTO_STATIC, 4}, + { SDE_IRQ_TYPE_SFI_VIDEO_OUT, INTF_1, + SDE_INTR_VIDEO_OUTOF_STATIC, 4}, + { SDE_IRQ_TYPE_SFI_CMD_0_IN, INTF_1, + SDE_INTR_DSICMD_0_INTO_STATIC, 4}, + { SDE_IRQ_TYPE_SFI_CMD_0_OUT, INTF_1, + SDE_INTR_DSICMD_0_OUTOF_STATIC, 4}, + /* irq_idx: 132-135 */ + { SDE_IRQ_TYPE_SFI_CMD_1_IN, INTF_1, + SDE_INTR_DSICMD_1_INTO_STATIC, 4}, + { SDE_IRQ_TYPE_SFI_CMD_1_OUT, INTF_1, + SDE_INTR_DSICMD_1_OUTOF_STATIC, 4}, + { SDE_IRQ_TYPE_SFI_CMD_2_IN, INTF_1, + SDE_INTR_DSICMD_2_INTO_STATIC, 4}, + { SDE_IRQ_TYPE_SFI_CMD_2_OUT, INTF_1, + SDE_INTR_DSICMD_2_OUTOF_STATIC, 4}, + /* irq_idx: 136-139 */ + { SDE_IRQ_TYPE_PROG_LINE, INTF_1, SDE_INTR_PROG_LINE, 4}, + { SDE_IRQ_TYPE_RESERVED, 0, 0, 4}, + { SDE_IRQ_TYPE_RESERVED, 0, 0, 4}, + { SDE_IRQ_TYPE_RESERVED, 0, 0, 4}, + /* irq_idx: 140-143 */ + { SDE_IRQ_TYPE_RESERVED, 0, 0, 4}, + { SDE_IRQ_TYPE_RESERVED, 0, 0, 4}, + { SDE_IRQ_TYPE_RESERVED, 0, 0, 4}, + { SDE_IRQ_TYPE_RESERVED, 0, 0, 4}, + /* irq_idx: 144-147 */ + { SDE_IRQ_TYPE_RESERVED, 0, 0, 4}, + { SDE_IRQ_TYPE_RESERVED, 0, 0, 4}, + { SDE_IRQ_TYPE_RESERVED, 0, 0, 4}, + { SDE_IRQ_TYPE_RESERVED, 0, 0, 4}, + /* irq_idx: 148-151 */ + { SDE_IRQ_TYPE_RESERVED, 0, 0, 4}, + { SDE_IRQ_TYPE_RESERVED, 0, 0, 4}, + { SDE_IRQ_TYPE_RESERVED, 0, 0, 4}, + { SDE_IRQ_TYPE_RESERVED, 0, 0, 4}, + /* irq_idx: 152-155 */ + { SDE_IRQ_TYPE_RESERVED, 0, 0, 4}, + { SDE_IRQ_TYPE_RESERVED, 0, 0, 4}, + { SDE_IRQ_TYPE_RESERVED, 0, 0, 4}, + { SDE_IRQ_TYPE_RESERVED, 0, 0, 4}, + /* irq_idx: 156-159 */ + { SDE_IRQ_TYPE_RESERVED, 0, 0, 4}, + { SDE_IRQ_TYPE_RESERVED, 0, 0, 4}, + { SDE_IRQ_TYPE_RESERVED, 0, 0, 4}, + { SDE_IRQ_TYPE_RESERVED, 0, 0, 4}, + + /* BEGIN MAP_RANGE: 160-191 INTF_2_INTR */ + /* irq_idx: 160-163 */ + { SDE_IRQ_TYPE_SFI_VIDEO_IN, INTF_2, + SDE_INTR_VIDEO_INTO_STATIC, 5}, + { SDE_IRQ_TYPE_SFI_VIDEO_OUT, INTF_2, + SDE_INTR_VIDEO_OUTOF_STATIC, 5}, + { SDE_IRQ_TYPE_SFI_CMD_0_IN, INTF_2, + SDE_INTR_DSICMD_0_INTO_STATIC, 5}, + { SDE_IRQ_TYPE_SFI_CMD_0_OUT, INTF_2, + SDE_INTR_DSICMD_0_OUTOF_STATIC, 5}, + /* irq_idx: 164-167 */ + { SDE_IRQ_TYPE_SFI_CMD_1_IN, INTF_2, + SDE_INTR_DSICMD_1_INTO_STATIC, 5}, + { SDE_IRQ_TYPE_SFI_CMD_1_OUT, INTF_2, + SDE_INTR_DSICMD_1_OUTOF_STATIC, 5}, + { SDE_IRQ_TYPE_SFI_CMD_2_IN, INTF_2, + SDE_INTR_DSICMD_2_INTO_STATIC, 5}, + { SDE_IRQ_TYPE_SFI_CMD_2_OUT, INTF_2, + SDE_INTR_DSICMD_2_OUTOF_STATIC, 5}, + /* irq_idx: 168-171 */ + { SDE_IRQ_TYPE_PROG_LINE, INTF_2, SDE_INTR_PROG_LINE, 5}, + { SDE_IRQ_TYPE_RESERVED, 0, 0, 5}, + { SDE_IRQ_TYPE_RESERVED, 0, 0, 5}, + { SDE_IRQ_TYPE_RESERVED, 0, 0, 5}, + /* irq_idx: 172-175 */ + { SDE_IRQ_TYPE_RESERVED, 0, 0, 5}, + { SDE_IRQ_TYPE_RESERVED, 0, 0, 5}, + { SDE_IRQ_TYPE_RESERVED, 0, 0, 5}, + { SDE_IRQ_TYPE_RESERVED, 0, 0, 5}, + /* irq_idx: 176-179 */ + { SDE_IRQ_TYPE_RESERVED, 0, 0, 5}, + { SDE_IRQ_TYPE_RESERVED, 0, 0, 5}, + { SDE_IRQ_TYPE_RESERVED, 0, 0, 5}, + { SDE_IRQ_TYPE_RESERVED, 0, 0, 5}, + /* irq_idx: 180-183 */ + { SDE_IRQ_TYPE_RESERVED, 0, 0, 5}, + { SDE_IRQ_TYPE_RESERVED, 0, 0, 5}, + { SDE_IRQ_TYPE_RESERVED, 0, 0, 5}, + { SDE_IRQ_TYPE_RESERVED, 0, 0, 5}, + /* irq_idx: 184-187 */ + { SDE_IRQ_TYPE_RESERVED, 0, 0, 5}, + { SDE_IRQ_TYPE_RESERVED, 0, 0, 5}, + { SDE_IRQ_TYPE_RESERVED, 0, 0, 5}, + { SDE_IRQ_TYPE_RESERVED, 0, 0, 5}, + /* irq_idx: 188-191 */ + { SDE_IRQ_TYPE_RESERVED, 0, 0, 5}, + { SDE_IRQ_TYPE_RESERVED, 0, 0, 5}, + { SDE_IRQ_TYPE_RESERVED, 0, 0, 5}, + { SDE_IRQ_TYPE_RESERVED, 0, 0, 5}, + + /* BEGIN MAP_RANGE: 192-223 INTF_3_INTR */ + /* irq_idx: 192-195 */ + { SDE_IRQ_TYPE_SFI_VIDEO_IN, INTF_3, + SDE_INTR_VIDEO_INTO_STATIC, 6}, + { SDE_IRQ_TYPE_SFI_VIDEO_OUT, INTF_3, + SDE_INTR_VIDEO_OUTOF_STATIC, 6}, + { SDE_IRQ_TYPE_SFI_CMD_0_IN, INTF_3, + SDE_INTR_DSICMD_0_INTO_STATIC, 6}, + { SDE_IRQ_TYPE_SFI_CMD_0_OUT, INTF_3, + SDE_INTR_DSICMD_0_OUTOF_STATIC, 6}, + /* irq_idx: 196-199 */ + { SDE_IRQ_TYPE_SFI_CMD_1_IN, INTF_3, + SDE_INTR_DSICMD_1_INTO_STATIC, 6}, + { SDE_IRQ_TYPE_SFI_CMD_1_OUT, INTF_3, + SDE_INTR_DSICMD_1_OUTOF_STATIC, 6}, + { SDE_IRQ_TYPE_SFI_CMD_2_IN, INTF_3, + SDE_INTR_DSICMD_2_INTO_STATIC, 6}, + { SDE_IRQ_TYPE_SFI_CMD_2_OUT, INTF_3, + SDE_INTR_DSICMD_2_OUTOF_STATIC, 6}, + /* irq_idx: 200-203 */ + { SDE_IRQ_TYPE_PROG_LINE, INTF_3, SDE_INTR_PROG_LINE, 6}, + { SDE_IRQ_TYPE_RESERVED, 0, 0, 6}, + { SDE_IRQ_TYPE_RESERVED, 0, 0, 6}, + { SDE_IRQ_TYPE_RESERVED, 0, 0, 6}, + /* irq_idx: 204-207 */ + { SDE_IRQ_TYPE_RESERVED, 0, 0, 6}, + { SDE_IRQ_TYPE_RESERVED, 0, 0, 6}, + { SDE_IRQ_TYPE_RESERVED, 0, 0, 6}, + { SDE_IRQ_TYPE_RESERVED, 0, 0, 6}, + /* irq_idx: 208-211 */ + { SDE_IRQ_TYPE_RESERVED, 0, 0, 6}, + { SDE_IRQ_TYPE_RESERVED, 0, 0, 6}, + { SDE_IRQ_TYPE_RESERVED, 0, 0, 6}, + { SDE_IRQ_TYPE_RESERVED, 0, 0, 6}, + /* irq_idx: 212-215 */ + { SDE_IRQ_TYPE_RESERVED, 0, 0, 6}, + { SDE_IRQ_TYPE_RESERVED, 0, 0, 6}, + { SDE_IRQ_TYPE_RESERVED, 0, 0, 6}, + { SDE_IRQ_TYPE_RESERVED, 0, 0, 6}, + /* irq_idx: 216-219 */ + { SDE_IRQ_TYPE_RESERVED, 0, 0, 6}, + { SDE_IRQ_TYPE_RESERVED, 0, 0, 6}, + { SDE_IRQ_TYPE_RESERVED, 0, 0, 6}, + { SDE_IRQ_TYPE_RESERVED, 0, 0, 6}, + /* irq_idx: 220-223 */ + { SDE_IRQ_TYPE_RESERVED, 0, 0, 6}, + { SDE_IRQ_TYPE_RESERVED, 0, 0, 6}, + { SDE_IRQ_TYPE_RESERVED, 0, 0, 6}, + { SDE_IRQ_TYPE_RESERVED, 0, 0, 6}, + + /* BEGIN MAP_RANGE: 224-255 INTF_4_INTR */ + /* irq_idx: 224-227 */ + { SDE_IRQ_TYPE_SFI_VIDEO_IN, INTF_4, + SDE_INTR_VIDEO_INTO_STATIC, 7}, + { SDE_IRQ_TYPE_SFI_VIDEO_OUT, INTF_4, + SDE_INTR_VIDEO_OUTOF_STATIC, 7}, + { SDE_IRQ_TYPE_SFI_CMD_0_IN, INTF_4, + SDE_INTR_DSICMD_0_INTO_STATIC, 7}, + { SDE_IRQ_TYPE_SFI_CMD_0_OUT, INTF_4, + SDE_INTR_DSICMD_0_OUTOF_STATIC, 7}, + /* irq_idx: 228-231 */ + { SDE_IRQ_TYPE_SFI_CMD_1_IN, INTF_4, + SDE_INTR_DSICMD_1_INTO_STATIC, 7}, + { SDE_IRQ_TYPE_SFI_CMD_1_OUT, INTF_4, + SDE_INTR_DSICMD_1_OUTOF_STATIC, 7}, + { SDE_IRQ_TYPE_SFI_CMD_2_IN, INTF_4, + SDE_INTR_DSICMD_2_INTO_STATIC, 7}, + { SDE_IRQ_TYPE_SFI_CMD_2_OUT, INTF_4, + SDE_INTR_DSICMD_2_OUTOF_STATIC, 7}, + /* irq_idx: 232-235 */ + { SDE_IRQ_TYPE_PROG_LINE, INTF_4, SDE_INTR_PROG_LINE, 7}, + { SDE_IRQ_TYPE_RESERVED, 0, 0, 7}, + { SDE_IRQ_TYPE_RESERVED, 0, 0, 7}, + { SDE_IRQ_TYPE_RESERVED, 0, 0, 7}, + /* irq_idx: 236-239 */ + { SDE_IRQ_TYPE_RESERVED, 0, 0, 7}, + { SDE_IRQ_TYPE_RESERVED, 0, 0, 7}, + { SDE_IRQ_TYPE_RESERVED, 0, 0, 7}, + { SDE_IRQ_TYPE_RESERVED, 0, 0, 7}, + /* irq_idx: 240-243 */ + { SDE_IRQ_TYPE_RESERVED, 0, 0, 7}, + { SDE_IRQ_TYPE_RESERVED, 0, 0, 7}, + { SDE_IRQ_TYPE_RESERVED, 0, 0, 7}, + { SDE_IRQ_TYPE_RESERVED, 0, 0, 7}, + /* irq_idx: 244-247 */ + { SDE_IRQ_TYPE_RESERVED, 0, 0, 7}, + { SDE_IRQ_TYPE_RESERVED, 0, 0, 7}, + { SDE_IRQ_TYPE_RESERVED, 0, 0, 7}, + { SDE_IRQ_TYPE_RESERVED, 0, 0, 7}, + /* irq_idx: 248-251 */ + { SDE_IRQ_TYPE_RESERVED, 0, 0, 7}, + { SDE_IRQ_TYPE_RESERVED, 0, 0, 7}, + { SDE_IRQ_TYPE_RESERVED, 0, 0, 7}, + { SDE_IRQ_TYPE_RESERVED, 0, 0, 7}, + /* irq_idx: 252-255 */ + { SDE_IRQ_TYPE_RESERVED, 0, 0, 7}, + { SDE_IRQ_TYPE_RESERVED, 0, 0, 7}, + { SDE_IRQ_TYPE_RESERVED, 0, 0, 7}, + { SDE_IRQ_TYPE_RESERVED, 0, 0, 7}, +}; + +static int sde_hw_intr_irqidx_lookup(enum sde_intr_type intr_type, + u32 instance_idx) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(sde_irq_map); i++) { + if (intr_type == sde_irq_map[i].intr_type && + instance_idx == sde_irq_map[i].instance_idx) + return i; + } + + pr_debug("IRQ lookup fail!! intr_type=%d, instance_idx=%d\n", + intr_type, instance_idx); + return -EINVAL; +} + +static void sde_hw_intr_set_mask(struct sde_hw_intr *intr, uint32_t reg_off, + uint32_t mask) +{ + SDE_REG_WRITE(&intr->hw, reg_off, mask); +} + +static void sde_hw_intr_dispatch_irq(struct sde_hw_intr *intr, + void (*cbfunc)(void *, int), + void *arg) +{ + int reg_idx; + int irq_idx; + int start_idx; + int end_idx; + u32 irq_status; + unsigned long irq_flags; + + /* + * The dispatcher will save the IRQ status before calling here. + * Now need to go through each IRQ status and find matching + * irq lookup index. + */ + spin_lock_irqsave(&intr->status_lock, irq_flags); + for (reg_idx = 0; reg_idx < ARRAY_SIZE(sde_intr_set); reg_idx++) { + irq_status = intr->save_irq_status[reg_idx]; + + /* + * Each Interrupt register has a range of 32 indexes, and + * that is static for sde_irq_map. + */ + start_idx = reg_idx * 32; + end_idx = start_idx + 32; + + /* + * Search through matching intr status from irq map. + * start_idx and end_idx defined the search range in + * the sde_irq_map. + */ + for (irq_idx = start_idx; + (irq_idx < end_idx) && irq_status; + irq_idx++) + if ((irq_status & sde_irq_map[irq_idx].irq_mask) && + (sde_irq_map[irq_idx].reg_idx == reg_idx)) { + /* + * Once a match on irq mask, perform a callback + * to the given cbfunc. cbfunc will take care + * the interrupt status clearing. If cbfunc is + * not provided, then the interrupt clearing + * is here. + */ + if (cbfunc) + cbfunc(arg, irq_idx); + else + intr->ops.clear_interrupt_status( + intr, irq_idx); + + /* + * When callback finish, clear the irq_status + * with the matching mask. Once irq_status + * is all cleared, the search can be stopped. + */ + irq_status &= ~sde_irq_map[irq_idx].irq_mask; + } + } + spin_unlock_irqrestore(&intr->status_lock, irq_flags); +} + +static int sde_hw_intr_enable_irq(struct sde_hw_intr *intr, int irq_idx) +{ + int reg_idx; + unsigned long irq_flags; + const struct sde_intr_reg *reg; + const struct sde_irq_type *irq; + const char *dbgstr = NULL; + uint32_t cache_irq_mask; + + if (irq_idx < 0 || irq_idx >= ARRAY_SIZE(sde_irq_map)) { + pr_err("invalid IRQ index: [%d]\n", irq_idx); + return -EINVAL; + } + + irq = &sde_irq_map[irq_idx]; + reg_idx = irq->reg_idx; + reg = &sde_intr_set[reg_idx]; + + spin_lock_irqsave(&intr->mask_lock, irq_flags); + cache_irq_mask = intr->cache_irq_mask[reg_idx]; + if (cache_irq_mask & irq->irq_mask) { + dbgstr = "SDE IRQ already set:"; + } else { + dbgstr = "SDE IRQ enabled:"; + + cache_irq_mask |= irq->irq_mask; + /* Cleaning any pending interrupt */ + SDE_REG_WRITE(&intr->hw, reg->clr_off, irq->irq_mask); + /* Enabling interrupts with the new mask */ + SDE_REG_WRITE(&intr->hw, reg->en_off, cache_irq_mask); + + intr->cache_irq_mask[reg_idx] = cache_irq_mask; + } + spin_unlock_irqrestore(&intr->mask_lock, irq_flags); + + pr_debug("%s MASK:0x%.8x, CACHE-MASK:0x%.8x\n", dbgstr, + irq->irq_mask, cache_irq_mask); + + return 0; +} + +static int sde_hw_intr_disable_irq(struct sde_hw_intr *intr, int irq_idx) +{ + int reg_idx; + unsigned long irq_flags; + const struct sde_intr_reg *reg; + const struct sde_irq_type *irq; + const char *dbgstr = NULL; + uint32_t cache_irq_mask; + + if (irq_idx < 0 || irq_idx >= ARRAY_SIZE(sde_irq_map)) { + pr_err("invalid IRQ index: [%d]\n", irq_idx); + return -EINVAL; + } + + irq = &sde_irq_map[irq_idx]; + reg_idx = irq->reg_idx; + reg = &sde_intr_set[reg_idx]; + + spin_lock_irqsave(&intr->mask_lock, irq_flags); + cache_irq_mask = intr->cache_irq_mask[reg_idx]; + if ((cache_irq_mask & irq->irq_mask) == 0) { + dbgstr = "SDE IRQ is already cleared:"; + } else { + dbgstr = "SDE IRQ mask disable:"; + + cache_irq_mask &= ~irq->irq_mask; + /* Disable interrupts based on the new mask */ + SDE_REG_WRITE(&intr->hw, reg->en_off, cache_irq_mask); + /* Cleaning any pending interrupt */ + SDE_REG_WRITE(&intr->hw, reg->clr_off, irq->irq_mask); + + intr->cache_irq_mask[reg_idx] = cache_irq_mask; + } + spin_unlock_irqrestore(&intr->mask_lock, irq_flags); + + pr_debug("%s MASK:0x%.8x, CACHE-MASK:0x%.8x\n", dbgstr, + irq->irq_mask, cache_irq_mask); + + return 0; +} + +static int sde_hw_intr_clear_irqs(struct sde_hw_intr *intr) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(sde_intr_set); i++) + SDE_REG_WRITE(&intr->hw, sde_intr_set[i].clr_off, 0xffffffff); + + return 0; +} + +static int sde_hw_intr_disable_irqs(struct sde_hw_intr *intr) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(sde_intr_set); i++) + SDE_REG_WRITE(&intr->hw, sde_intr_set[i].en_off, 0x00000000); + + return 0; +} + +static int sde_hw_intr_get_valid_interrupts(struct sde_hw_intr *intr, + uint32_t *mask) +{ + *mask = IRQ_SOURCE_MDP | IRQ_SOURCE_DSI0 | IRQ_SOURCE_DSI1 + | IRQ_SOURCE_HDMI | IRQ_SOURCE_EDP; + return 0; +} + +static int sde_hw_intr_get_interrupt_sources(struct sde_hw_intr *intr, + uint32_t *sources) +{ + *sources = SDE_REG_READ(&intr->hw, HW_INTR_STATUS); + return 0; +} + +static void sde_hw_intr_get_interrupt_statuses(struct sde_hw_intr *intr) +{ + int i; + u32 enable_mask; + unsigned long irq_flags; + + spin_lock_irqsave(&intr->status_lock, irq_flags); + for (i = 0; i < ARRAY_SIZE(sde_intr_set); i++) { + /* Read interrupt status */ + intr->save_irq_status[i] = SDE_REG_READ(&intr->hw, + sde_intr_set[i].status_off); + + /* Read enable mask */ + enable_mask = SDE_REG_READ(&intr->hw, sde_intr_set[i].en_off); + + /* and clear the interrupt */ + if (intr->save_irq_status[i]) + SDE_REG_WRITE(&intr->hw, sde_intr_set[i].clr_off, + intr->save_irq_status[i]); + + /* Finally update IRQ status based on enable mask */ + intr->save_irq_status[i] &= enable_mask; + } + spin_unlock_irqrestore(&intr->status_lock, irq_flags); +} + +static void sde_hw_intr_clear_interrupt_status(struct sde_hw_intr *intr, + int irq_idx) +{ + int reg_idx; + unsigned long irq_flags; + + spin_lock_irqsave(&intr->mask_lock, irq_flags); + + reg_idx = sde_irq_map[irq_idx].reg_idx; + SDE_REG_WRITE(&intr->hw, sde_intr_set[reg_idx].clr_off, + sde_irq_map[irq_idx].irq_mask); + + spin_unlock_irqrestore(&intr->mask_lock, irq_flags); +} + + +static void __setup_intr_ops(struct sde_hw_intr_ops *ops) +{ + ops->set_mask = sde_hw_intr_set_mask; + ops->irq_idx_lookup = sde_hw_intr_irqidx_lookup; + ops->enable_irq = sde_hw_intr_enable_irq; + ops->disable_irq = sde_hw_intr_disable_irq; + ops->dispatch_irqs = sde_hw_intr_dispatch_irq; + ops->clear_all_irqs = sde_hw_intr_clear_irqs; + ops->disable_all_irqs = sde_hw_intr_disable_irqs; + ops->get_valid_interrupts = sde_hw_intr_get_valid_interrupts; + ops->get_interrupt_sources = sde_hw_intr_get_interrupt_sources; + ops->get_interrupt_statuses = sde_hw_intr_get_interrupt_statuses; + ops->clear_interrupt_status = sde_hw_intr_clear_interrupt_status; +} + +static struct sde_mdss_base_cfg *__intr_offset(struct sde_mdss_cfg *m, + void __iomem *addr, struct sde_hw_blk_reg_map *hw) +{ + if (m->mdp_count == 0) + return NULL; + + hw->base_off = addr; + hw->blk_off = m->mdss[0].base; + hw->hwversion = m->hwversion; + return &m->mdss[0]; +} + +struct sde_hw_intr *sde_hw_intr_init(void __iomem *addr, + struct sde_mdss_cfg *m) +{ + struct sde_hw_intr *intr = kzalloc(sizeof(*intr), GFP_KERNEL); + struct sde_mdss_base_cfg *cfg; + + if (!intr) + return ERR_PTR(-ENOMEM); + + cfg = __intr_offset(m, addr, &intr->hw); + if (!cfg) { + kfree(intr); + return ERR_PTR(-EINVAL); + } + __setup_intr_ops(&intr->ops); + + intr->irq_idx_tbl_size = ARRAY_SIZE(sde_irq_map); + + intr->cache_irq_mask = kcalloc(ARRAY_SIZE(sde_intr_set), sizeof(u32), + GFP_KERNEL); + if (intr->cache_irq_mask == NULL) { + kfree(intr); + return ERR_PTR(-ENOMEM); + } + + intr->save_irq_status = kcalloc(ARRAY_SIZE(sde_intr_set), sizeof(u32), + GFP_KERNEL); + if (intr->save_irq_status == NULL) { + kfree(intr->cache_irq_mask); + kfree(intr); + return ERR_PTR(-ENOMEM); + } + + spin_lock_init(&intr->mask_lock); + spin_lock_init(&intr->status_lock); + + return intr; +} + +void sde_hw_intr_destroy(struct sde_hw_intr *intr) +{ + if (intr) { + kfree(intr->cache_irq_mask); + kfree(intr->save_irq_status); + kfree(intr); + } +} + diff --git a/drivers/gpu/drm/msm/sde/sde_hw_interrupts.h b/drivers/gpu/drm/msm/sde/sde_hw_interrupts.h new file mode 100644 index 000000000000..0ddb1e78a953 --- /dev/null +++ b/drivers/gpu/drm/msm/sde/sde_hw_interrupts.h @@ -0,0 +1,245 @@ +/* Copyright (c) 2016, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _SDE_HW_INTERRUPTS_H +#define _SDE_HW_INTERRUPTS_H + +#include + +#include "sde_hwio.h" +#include "sde_hw_catalog.h" +#include "sde_hw_mdp_util.h" +#include "sde_hw_mdss.h" + +#define IRQ_SOURCE_MDP BIT(0) +#define IRQ_SOURCE_DSI0 BIT(4) +#define IRQ_SOURCE_DSI1 BIT(5) +#define IRQ_SOURCE_HDMI BIT(8) +#define IRQ_SOURCE_EDP BIT(12) +#define IRQ_SOURCE_MHL BIT(16) + +/** + * sde_intr_type - HW Interrupt Type + * @SDE_IRQ_TYPE_WB_ROT_COMP: WB rotator done + * @SDE_IRQ_TYPE_WB_WFD_COMP: WB WFD done + * @SDE_IRQ_TYPE_PING_PONG_COMP: PingPong done + * @SDE_IRQ_TYPE_PING_PONG_RD_PTR: PingPong read pointer + * @SDE_IRQ_TYPE_PING_PONG_WR_PTR: PingPong write pointer + * @SDE_IRQ_TYPE_PING_PONG_AUTO_REF: PingPong auto refresh + * @SDE_IRQ_TYPE_PING_PONG_TEAR_CHECK: PingPong Tear check + * @SDE_IRQ_TYPE_PING_PONG_TE_CHECK: PingPong TE detection + * @SDE_IRQ_TYPE_INTF_UNDER_RUN: INTF underrun + * @SDE_IRQ_TYPE_INTF_VSYNC: INTF VSYNC + * @SDE_IRQ_TYPE_CWB_OVERFLOW: Concurrent WB overflow + * @SDE_IRQ_TYPE_HIST_VIG_DONE: VIG Histogram done + * @SDE_IRQ_TYPE_HIST_VIG_RSTSEQ: VIG Histogram reset + * @SDE_IRQ_TYPE_HIST_DSPP_DONE: DSPP Histogram done + * @SDE_IRQ_TYPE_HIST_DSPP_RSTSEQ: DSPP Histogram reset + * @SDE_IRQ_TYPE_WD_TIMER: Watchdog timer + * @SDE_IRQ_TYPE_SFI_VIDEO_IN: Video static frame INTR into static + * @SDE_IRQ_TYPE_SFI_VIDEO_OUT: Video static frame INTR out-of static + * @SDE_IRQ_TYPE_SFI_CMD_0_IN: DSI CMD0 static frame INTR into static + * @SDE_IRQ_TYPE_SFI_CMD_0_OUT: DSI CMD0 static frame INTR out-of static + * @SDE_IRQ_TYPE_SFI_CMD_1_IN: DSI CMD1 static frame INTR into static + * @SDE_IRQ_TYPE_SFI_CMD_1_OUT: DSI CMD1 static frame INTR out-of static + * @SDE_IRQ_TYPE_SFI_CMD_2_IN: DSI CMD2 static frame INTR into static + * @SDE_IRQ_TYPE_SFI_CMD_2_OUT: DSI CMD2 static frame INTR out-of static + * @SDE_IRQ_TYPE_PROG_LINE: Programmable Line interrupt + * @SDE_IRQ_TYPE_RESERVED: Reserved for expansion + */ +enum sde_intr_type { + SDE_IRQ_TYPE_WB_ROT_COMP, + SDE_IRQ_TYPE_WB_WFD_COMP, + SDE_IRQ_TYPE_PING_PONG_COMP, + SDE_IRQ_TYPE_PING_PONG_RD_PTR, + SDE_IRQ_TYPE_PING_PONG_WR_PTR, + SDE_IRQ_TYPE_PING_PONG_AUTO_REF, + SDE_IRQ_TYPE_PING_PONG_TEAR_CHECK, + SDE_IRQ_TYPE_PING_PONG_TE_CHECK, + SDE_IRQ_TYPE_INTF_UNDER_RUN, + SDE_IRQ_TYPE_INTF_VSYNC, + SDE_IRQ_TYPE_CWB_OVERFLOW, + SDE_IRQ_TYPE_HIST_VIG_DONE, + SDE_IRQ_TYPE_HIST_VIG_RSTSEQ, + SDE_IRQ_TYPE_HIST_DSPP_DONE, + SDE_IRQ_TYPE_HIST_DSPP_RSTSEQ, + SDE_IRQ_TYPE_WD_TIMER, + SDE_IRQ_TYPE_SFI_VIDEO_IN, + SDE_IRQ_TYPE_SFI_VIDEO_OUT, + SDE_IRQ_TYPE_SFI_CMD_0_IN, + SDE_IRQ_TYPE_SFI_CMD_0_OUT, + SDE_IRQ_TYPE_SFI_CMD_1_IN, + SDE_IRQ_TYPE_SFI_CMD_1_OUT, + SDE_IRQ_TYPE_SFI_CMD_2_IN, + SDE_IRQ_TYPE_SFI_CMD_2_OUT, + SDE_IRQ_TYPE_PROG_LINE, + SDE_IRQ_TYPE_RESERVED, +}; + +struct sde_hw_intr; + +/** + * Interrupt operations. + */ +struct sde_hw_intr_ops { + /** + * set_mask - Programs the given interrupt register with the + * given interrupt mask. Register value will get overwritten. + * @intr: HW interrupt handle + * @reg_off: MDSS HW register offset + * @irqmask: IRQ mask value + */ + void (*set_mask)( + struct sde_hw_intr *intr, + uint32_t reg, + uint32_t irqmask); + + /** + * irq_idx_lookup - Lookup IRQ index on the HW interrupt type + * Used for all irq related ops + * @intr_type: Interrupt type defined in sde_intr_type + * @instance_idx: HW interrupt block instance + * @return: irq_idx or -EINVAL for lookup fail + */ + int (*irq_idx_lookup)( + enum sde_intr_type intr_type, + u32 instance_idx); + + /** + * enable_irq - Enable IRQ based on lookup IRQ index + * @intr: HW interrupt handle + * @irq_idx: Lookup irq index return from irq_idx_lookup + * @return: 0 for success, otherwise failure + */ + int (*enable_irq)( + struct sde_hw_intr *intr, + int irq_idx); + + /** + * disable_irq - Disable IRQ based on lookup IRQ index + * @intr: HW interrupt handle + * @irq_idx: Lookup irq index return from irq_idx_lookup + * @return: 0 for success, otherwise failure + */ + int (*disable_irq)( + struct sde_hw_intr *intr, + int irq_idx); + + /** + * clear_all_irqs - Clears all the interrupts (i.e. acknowledges + * any asserted IRQs). Useful during reset. + * @intr: HW interrupt handle + * @return: 0 for success, otherwise failure + */ + int (*clear_all_irqs)( + struct sde_hw_intr *intr); + + /** + * disable_all_irqs - Disables all the interrupts. Useful during reset. + * @intr: HW interrupt handle + * @return: 0 for success, otherwise failure + */ + int (*disable_all_irqs)( + struct sde_hw_intr *intr); + + /** + * dispatch_irqs - IRQ dispatcher will call the given callback + * function when a matching interrupt status bit is + * found in the irq mapping table. + * @intr: HW interrupt handle + * @cbfunc: Callback function pointer + * @arg: Argument to pass back during callback + */ + void (*dispatch_irqs)( + struct sde_hw_intr *intr, + void (*cbfunc)(void *arg, int irq_idx), + void *arg); + + /** + * get_interrupt_statuses - Gets and store value from all interrupt + * status registers that are currently fired. + * @intr: HW interrupt handle + */ + void (*get_interrupt_statuses)( + struct sde_hw_intr *intr); + + /** + * clear_interrupt_status - Clears HW interrupt status based on given + * lookup IRQ index. + * @intr: HW interrupt handle + * @irq_idx: Lookup irq index return from irq_idx_lookup + */ + void (*clear_interrupt_status)( + struct sde_hw_intr *intr, + int irq_idx); + + /** + * get_valid_interrupts - Gets a mask of all valid interrupt sources + * within SDE. These are actually status bits + * within interrupt registers that specify the + * source of the interrupt in IRQs. For example, + * valid interrupt sources can be MDP, DSI, + * HDMI etc. + * @intr: HW interrupt handle + * @mask: Returning the interrupt source MASK + * @return: 0 for success, otherwise failure + */ + int (*get_valid_interrupts)( + struct sde_hw_intr *intr, + uint32_t *mask); + + /** + * get_interrupt_sources - Gets the bitmask of the SDE interrupt + * source that are currently fired. + * @intr: HW interrupt handle + * @sources: Returning the SDE interrupt source status bit mask + * @return: 0 for success, otherwise failure + */ + int (*get_interrupt_sources)( + struct sde_hw_intr *intr, + uint32_t *sources); +}; + +/** + * struct sde_hw_intr: hw interrupts handling data structure + * @hw: virtual address mapping + * @ops: function pointer mapping for IRQ handling + * @cache_irq_mask: array of IRQ enable masks reg storage created during init + * @save_irq_status: array of IRQ status reg storage created during init + * @irq_idx_tbl_size: total number of irq_idx mapped in the hw_interrupts + * @mask_lock: spinlock for accessing IRQ mask + * @status_lock: spinlock for accessing IRQ status + */ +struct sde_hw_intr { + struct sde_hw_blk_reg_map hw; + struct sde_hw_intr_ops ops; + u32 *cache_irq_mask; + u32 *save_irq_status; + u32 irq_idx_tbl_size; + spinlock_t mask_lock; + spinlock_t status_lock; +}; + +/** + * sde_hw_intr_init(): Initializes the interrupts hw object + * @addr: mapped register io address of MDP + * @m : pointer to mdss catalog data + */ +struct sde_hw_intr *sde_hw_intr_init(void __iomem *addr, + struct sde_mdss_cfg *m); + +/** + * sde_hw_intr_destroy(): Cleanup interrutps hw object + * @intr: pointer to interrupts hw object + */ +void sde_hw_intr_destroy(struct sde_hw_intr *intr); +#endif diff --git a/drivers/gpu/drm/msm/sde/sde_hw_mdss.h b/drivers/gpu/drm/msm/sde/sde_hw_mdss.h index da9efe55bd1a..55f09b522f52 100644 --- a/drivers/gpu/drm/msm/sde/sde_hw_mdss.h +++ b/drivers/gpu/drm/msm/sde/sde_hw_mdss.h @@ -108,6 +108,7 @@ enum sde_pingpong { PINGPONG_2, PINGPONG_3, PINGPONG_4, + PINGPONG_S0, PINGPONG_MAX }; @@ -154,6 +155,24 @@ enum sde_ad { AD_MAX }; +enum sde_cwb { + CWB_0 = 0x1, + CWB_1, + CWB_2, + CWB_3, + CWB_MAX +}; + +enum sde_wd_timer { + WD_TIMER_0 = 0x1, + WD_TIMER_1, + WD_TIMER_2, + WD_TIMER_3, + WD_TIMER_4, + WD_TIMER_5, + WD_TIMER_MAX +}; + /** * MDP HW,Component order color map */ diff --git a/drivers/gpu/drm/msm/sde/sde_hwio.h b/drivers/gpu/drm/msm/sde/sde_hwio.h index c8d98a4a485e..38d3fa2fc011 100644 --- a/drivers/gpu/drm/msm/sde/sde_hwio.h +++ b/drivers/gpu/drm/msm/sde/sde_hwio.h @@ -28,6 +28,9 @@ #define HIST_INTR_EN 0x01c #define HIST_INTR_STATUS 0x020 #define HIST_INTR_CLEAR 0x024 +#define INTF_INTR_EN 0x1C0 +#define INTF_INTR_STATUS 0x1C4 +#define INTF_INTR_CLEAR 0x1C8 #define SPLIT_DISPLAY_EN 0x2F4 #define SPLIT_DISPLAY_UPPER_PIPE_CTRL 0x2F8 #define DSPP_IGC_COLOR0_RAM_LUTN 0x300 diff --git a/drivers/gpu/drm/msm/sde/sde_irq.c b/drivers/gpu/drm/msm/sde/sde_irq.c index 73c4d7cb9298..305f51c8b2f3 100644 --- a/drivers/gpu/drm/msm/sde/sde_irq.c +++ b/drivers/gpu/drm/msm/sde/sde_irq.c @@ -12,36 +12,263 @@ #include #include +#include #include "msm_drv.h" #include "sde_kms.h" -void sde_set_irqmask(struct mdp_kms *mdp_kms, uint32_t irqmask, - uint32_t old_irqmask) +static void sde_irq_callback_handler(void *arg, int irq_idx) { + struct sde_kms *sde_kms = arg; + struct sde_irq *irq_obj = &sde_kms->irq_obj; + + /* + * Perform registered function callback + */ + if (irq_obj->irq_cb_tbl && irq_obj->irq_cb_tbl[irq_idx].func) + irq_obj->irq_cb_tbl[irq_idx].func( + irq_obj->irq_cb_tbl[irq_idx].arg, + irq_idx); + + /* + * Clear pending interrupt status in HW. + * NOTE: sde_irq_callback_handler is protected by top-level + * spinlock, so it is safe to clear any interrupt status here. + */ + sde_kms->hw_intr->ops.clear_interrupt_status( + sde_kms->hw_intr, + irq_idx); +} + +static void sde_irq_intf_error_handler(void *arg, int irq_idx) +{ + DRM_ERROR("INTF underrun detected, irq_idx=%d\n", irq_idx); +} + +void sde_set_irqmask(struct sde_kms *sde_kms, uint32_t reg, uint32_t irqmask) +{ + if (!sde_kms || !sde_kms->hw_intr || + !sde_kms->hw_intr->ops.set_mask) + return; + + sde_kms->hw_intr->ops.set_mask(sde_kms->hw_intr, reg, irqmask); +} + +int sde_irq_idx_lookup(struct sde_kms *sde_kms, enum sde_intr_type intr_type, + u32 instance_idx) +{ + if (!sde_kms || !sde_kms->hw_intr || + !sde_kms->hw_intr->ops.irq_idx_lookup) + return -EINVAL; + + return sde_kms->hw_intr->ops.irq_idx_lookup(intr_type, + instance_idx); +} + +int sde_enable_irq(struct sde_kms *sde_kms, int *irq_idxs, u32 irq_count) +{ + int i; + int ret = 0; + + if (!sde_kms || !irq_idxs || !sde_kms->hw_intr || + !sde_kms->hw_intr->ops.enable_irq) + return -EINVAL; + + for (i = 0; i < irq_count; i++) { + ret = sde_kms->hw_intr->ops.enable_irq( + sde_kms->hw_intr, + irq_idxs[i]); + if (ret) { + DRM_ERROR("Fail to enable IRQ for irq_idx:%d\n", + irq_idxs[i]); + return ret; + } + } + + return ret; +} + +int sde_disable_irq(struct sde_kms *sde_kms, int *irq_idxs, u32 irq_count) +{ + int i; + int ret = 0; + + if (!sde_kms || !irq_idxs || !sde_kms->hw_intr || + !sde_kms->hw_intr->ops.disable_irq) + return -EINVAL; + + for (i = 0; i < irq_count; i++) { + ret = sde_kms->hw_intr->ops.disable_irq( + sde_kms->hw_intr, + irq_idxs[i]); + if (ret) { + DRM_ERROR("Fail to disable IRQ for irq_idx:%d\n", + irq_idxs[i]); + return ret; + } + } + + return ret; +} + +int sde_register_irq_callback(struct sde_kms *sde_kms, int irq_idx, + struct sde_irq_callback *register_irq_cb) +{ + struct sde_irq_callback *irq_cb_tbl; + unsigned long irq_flags; + + /* + * We allow NULL register_irq_cb as input for callback registration + */ + if (!sde_kms || !sde_kms->irq_obj.irq_cb_tbl) + return -EINVAL; + + if (irq_idx < 0 || irq_idx >= sde_kms->hw_intr->irq_idx_tbl_size) { + DRM_ERROR("invalid IRQ index: [%d]\n", irq_idx); + return -EINVAL; + } + + irq_cb_tbl = sde_kms->irq_obj.irq_cb_tbl; + spin_lock_irqsave(&sde_kms->irq_obj.cb_lock, irq_flags); + irq_cb_tbl[irq_idx].func = register_irq_cb ? + register_irq_cb->func : NULL; + irq_cb_tbl[irq_idx].arg = register_irq_cb ? + register_irq_cb->arg : NULL; + spin_unlock_irqrestore(&sde_kms->irq_obj.cb_lock, irq_flags); + + return 0; +} + +void sde_clear_all_irqs(struct sde_kms *sde_kms) +{ + if (!sde_kms || !sde_kms->hw_intr || + !sde_kms->hw_intr->ops.clear_all_irqs) + return; + + sde_kms->hw_intr->ops.clear_all_irqs(sde_kms->hw_intr); +} + +void sde_disable_all_irqs(struct sde_kms *sde_kms) +{ + if (!sde_kms || !sde_kms->hw_intr || + !sde_kms->hw_intr->ops.disable_all_irqs) + return; + + sde_kms->hw_intr->ops.disable_all_irqs(sde_kms->hw_intr); } void sde_irq_preinstall(struct msm_kms *kms) { + struct sde_kms *sde_kms = to_sde_kms(kms); + + sde_enable(sde_kms); + sde_clear_all_irqs(sde_kms); + sde_disable_all_irqs(sde_kms); + sde_disable(sde_kms); + + spin_lock_init(&sde_kms->irq_obj.cb_lock); + + /* Create irq callbacks for all possible irq_idx */ + sde_kms->irq_obj.total_irqs = sde_kms->hw_intr->irq_idx_tbl_size; + sde_kms->irq_obj.irq_cb_tbl = kcalloc(sde_kms->irq_obj.total_irqs, + sizeof(struct sde_irq_callback), GFP_KERNEL); + if (!sde_kms->irq_obj.irq_cb_tbl) + DRM_ERROR("Fail to allocate memory of IRQ callback list\n"); } int sde_irq_postinstall(struct msm_kms *kms) { + struct sde_kms *sde_kms = to_sde_kms(kms); + struct sde_irq_callback irq_cb; + int irq_idx; + int i; + + irq_cb.func = sde_irq_intf_error_handler; + irq_cb.arg = sde_kms; + + /* Register interface underrun callback */ + sde_enable(sde_kms); + for (i = 0; i < sde_kms->catalog->intf_count; i++) { + irq_idx = sde_irq_idx_lookup(sde_kms, + SDE_IRQ_TYPE_INTF_UNDER_RUN, i+INTF_0); + sde_register_irq_callback(sde_kms, irq_idx, &irq_cb); + sde_enable_irq(sde_kms, &irq_idx, 1); + } + sde_disable(sde_kms); + return 0; } void sde_irq_uninstall(struct msm_kms *kms) { + struct sde_kms *sde_kms = to_sde_kms(kms); + + sde_enable(sde_kms); + sde_clear_all_irqs(sde_kms); + sde_disable_all_irqs(sde_kms); + sde_disable(sde_kms); + + kfree(sde_kms->irq_obj.irq_cb_tbl); +} + +static void _sde_irq_mdp_done(struct sde_kms *sde_kms) +{ + /* + * Read interrupt status from all sources. Interrupt status are + * stored within hw_intr. + * Function will also clear the interrupt status after reading. + * Individual interrupt status bit will only get stored if it + * is enabled. + */ + sde_kms->hw_intr->ops.get_interrupt_statuses(sde_kms->hw_intr); + + /* + * Dispatch to HW driver to handle interrupt lookup that is being + * fired. When matching interrupt is located, HW driver will call to + * sde_irq_callback_handler with the irq_idx from the lookup table. + * sde_irq_callback_handler will perform the registered function + * callback, and do the interrupt status clearing once the registered + * callback is finished. + */ + sde_kms->hw_intr->ops.dispatch_irqs( + sde_kms->hw_intr, + sde_irq_callback_handler, + sde_kms); } irqreturn_t sde_irq(struct msm_kms *kms) { + struct sde_kms *sde_kms = to_sde_kms(kms); + u32 interrupts; + + sde_kms->hw_intr->ops.get_interrupt_sources(sde_kms->hw_intr, + &interrupts); + + /* + * Taking care of MDP interrupt + */ + if (interrupts & IRQ_SOURCE_MDP) { + interrupts &= ~IRQ_SOURCE_MDP; + _sde_irq_mdp_done(sde_kms); + } + + /* + * Routing all other interrupts to external drivers + */ + while (interrupts) { + irq_hw_number_t hwirq = fls(interrupts) - 1; + + generic_handle_irq(irq_find_mapping( + sde_kms->irqcontroller.domain, hwirq)); + interrupts &= ~(1 << hwirq); + } + return IRQ_HANDLED; } int sde_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc) { - return 0; + return sde_crtc_vblank(crtc); } void sde_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc) @@ -76,6 +303,13 @@ static int sde_hw_irqdomain_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hwirq) { struct sde_kms *sde_kms = d->host_data; + uint32_t valid_irqs; + + sde_kms->hw_intr->ops.get_valid_interrupts(sde_kms->hw_intr, + &valid_irqs); + + if (!(valid_irqs & (1 << hwirq))) + return -EPERM; irq_set_chip_and_handler(irq, &sde_hw_irq_chip, handle_level_irq); irq_set_chip_data(irq, sde_kms); diff --git a/drivers/gpu/drm/msm/sde/sde_kms.c b/drivers/gpu/drm/msm/sde/sde_kms.c index bbe1e98a022d..789b870f9da9 100644 --- a/drivers/gpu/drm/msm/sde/sde_kms.c +++ b/drivers/gpu/drm/msm/sde/sde_kms.c @@ -52,16 +52,14 @@ int sde_enable(struct sde_kms *sde_kms) static void sde_prepare_commit(struct msm_kms *kms, struct drm_atomic_state *state) { - struct sde_kms *sde_kms = to_sde_kms(to_mdp_kms(kms)); - + struct sde_kms *sde_kms = to_sde_kms(kms); sde_enable(sde_kms); } static void sde_complete_commit(struct msm_kms *kms, struct drm_atomic_state *state) { - struct sde_kms *sde_kms = to_sde_kms(to_mdp_kms(kms)); - + struct sde_kms *sde_kms = to_sde_kms(kms); sde_disable(sde_kms); } @@ -165,30 +163,28 @@ static void sde_preclose(struct msm_kms *kms, struct drm_file *file) static void sde_destroy(struct msm_kms *kms) { - struct sde_kms *sde_kms = to_sde_kms(to_mdp_kms(kms)); + struct sde_kms *sde_kms = to_sde_kms(kms); sde_irq_domain_fini(sde_kms); + sde_hw_intr_destroy(sde_kms->hw_intr); kfree(sde_kms); } -static const struct mdp_kms_funcs kms_funcs = { - .base = { - .hw_init = sde_hw_init, - .irq_preinstall = sde_irq_preinstall, - .irq_postinstall = sde_irq_postinstall, - .irq_uninstall = sde_irq_uninstall, - .irq = sde_irq, - .prepare_commit = sde_prepare_commit, - .complete_commit = sde_complete_commit, - .wait_for_crtc_commit_done = sde_wait_for_crtc_commit_done, - .enable_vblank = sde_enable_vblank, - .disable_vblank = sde_disable_vblank, - .get_format = mdp_get_format, - .round_pixclk = sde_round_pixclk, - .preclose = sde_preclose, - .destroy = sde_destroy, - }, - .set_irqmask = sde_set_irqmask, +static const struct msm_kms_funcs kms_funcs = { + .hw_init = sde_hw_init, + .irq_preinstall = sde_irq_preinstall, + .irq_postinstall = sde_irq_postinstall, + .irq_uninstall = sde_irq_uninstall, + .irq = sde_irq, + .prepare_commit = sde_prepare_commit, + .complete_commit = sde_complete_commit, + .wait_for_crtc_commit_done = sde_wait_for_crtc_commit_done, + .enable_vblank = sde_enable_vblank, + .disable_vblank = sde_disable_vblank, + .get_format = mdp_get_format, + .round_pixclk = sde_round_pixclk, + .preclose = sde_preclose, + .destroy = sde_destroy, }; static int get_clk(struct platform_device *pdev, struct clk **clkp, @@ -219,9 +215,9 @@ struct sde_kms *sde_hw_setup(struct platform_device *pdev) if (!sde_kms) return NULL; - mdp_kms_init(&sde_kms->base, &kms_funcs); + msm_kms_init(&sde_kms->base, &kms_funcs); - kms = &sde_kms->base.base; + kms = &sde_kms->base; sde_kms->mmio = msm_ioremap(pdev, "mdp_phys", "SDE"); if (IS_ERR(sde_kms->mmio)) { @@ -440,7 +436,7 @@ struct msm_kms *sde_kms_init(struct drm_device *dev) } sde_kms->dev = dev; - msm_kms = &sde_kms->base.base; + msm_kms = &sde_kms->base; /* * Currently hardcoding to MDSS version 1.7.0 (8996) @@ -483,6 +479,13 @@ struct msm_kms *sde_kms_init(struct drm_device *dev) dev->mode_config.max_width = catalog->mixer[0].sblk->maxwidth; dev->mode_config.max_height = 4096; + sde_enable(sde_kms); + sde_kms->hw_intr = sde_hw_intr_init(sde_kms->mmio, sde_kms->catalog); + sde_disable(sde_kms); + + if (IS_ERR_OR_NULL(sde_kms->hw_intr)) + goto fail; + return msm_kms; fail: diff --git a/drivers/gpu/drm/msm/sde/sde_kms.h b/drivers/gpu/drm/msm/sde/sde_kms.h index 441398b7e824..5f1a52c52641 100644 --- a/drivers/gpu/drm/msm/sde/sde_kms.h +++ b/drivers/gpu/drm/msm/sde/sde_kms.h @@ -18,9 +18,32 @@ #include "mdp/mdp_kms.h" #include "sde_hw_catalog.h" #include "sde_hw_mdss.h" +#include "sde_hw_interrupts.h" + +/* + * struct sde_irq_callback - IRQ callback handlers + * @func: intr handler + * @arg: argument for the handler + */ +struct sde_irq_callback { + void (*func)(void *arg, int irq_idx); + void *arg; +}; + +/** + * struct sde_irq: IRQ structure contains callback registration info + * @total_irq: total number of irq_idx obtained from HW interrupts mapping + * @irq_cb_tbl: array of IRQ callbacks setting + * @cb_lock: callback lock + */ +struct sde_irq { + u32 total_irqs; + struct sde_irq_callback *irq_cb_tbl; + spinlock_t cb_lock; +}; struct sde_kms { - struct mdp_kms base; + struct msm_kms base; struct drm_device *dev; int rev; struct sde_mdss_cfg *catalog; @@ -48,6 +71,14 @@ struct sde_kms { unsigned long enabled_mask; struct irq_domain *domain; } irqcontroller; + + struct sde_hw_intr *hw_intr; + struct sde_irq irq_obj; +}; + +struct vsync_info { + u32 frame_count; + u32 line_count; }; #define to_sde_kms(x) container_of(x, struct sde_kms, base) @@ -77,12 +108,95 @@ struct sde_plane_state { int sde_disable(struct sde_kms *sde_kms); int sde_enable(struct sde_kms *sde_kms); -void sde_set_irqmask(struct mdp_kms *mdp_kms, uint32_t irqmask, - uint32_t old_irqmask); +/** + * IRQ functions + */ +int sde_irq_domain_init(struct sde_kms *sde_kms); +int sde_irq_domain_fini(struct sde_kms *sde_kms); void sde_irq_preinstall(struct msm_kms *kms); int sde_irq_postinstall(struct msm_kms *kms); void sde_irq_uninstall(struct msm_kms *kms); irqreturn_t sde_irq(struct msm_kms *kms); + +/** + * sde_set_irqmask - IRQ helper function for writing IRQ mask + * to SDE HW interrupt register. + * @sde_kms: SDE handle + * @reg_off: SDE HW interrupt register offset + * @irqmask: IRQ mask + */ +void sde_set_irqmask( + struct sde_kms *sde_kms, + uint32_t reg_off, + uint32_t irqmask); + +/** + * sde_irq_idx_lookup - IRQ helper function for lookup irq_idx from HW + * interrupt mapping table. + * @sde_kms: SDE handle + * @intr_type: SDE HW interrupt type for lookup + * @instance_idx: SDE HW block instance defined in sde_hw_mdss.h + * @return: irq_idx or -EINVAL when fail to lookup + */ +int sde_irq_idx_lookup( + struct sde_kms *sde_kms, + enum sde_intr_type intr_type, + uint32_t instance_idx); + +/** + * sde_enable_irq - IRQ helper function for enabling one or more IRQs + * @sde_kms: SDE handle + * @irq_idxs: Array of irq index + * @irq_count: Number of irq_idx provided in the array + * @return: 0 for success enabling IRQ, otherwise failure + */ +int sde_enable_irq( + struct sde_kms *sde_kms, + int *irq_idxs, + uint32_t irq_count); + +/** + * sde_disable_irq - IRQ helper function for diabling one of more IRQs + * @sde_kms: SDE handle + * @irq_idxs: Array of irq index + * @irq_count: Number of irq_idx provided in the array + * @return: 0 for success disabling IRQ, otherwise failure + */ +int sde_disable_irq( + struct sde_kms *sde_kms, + int *irq_idxs, + uint32_t irq_count); + +/** + * sde_register_irq_callback - For registering callback function on IRQ + * interrupt + * @sde_kms: SDE handle + * @irq_idx: irq index + * @irq_cb: IRQ callback structure, containing callback function + * and argument. Passing NULL for irq_cb will unregister + * the callback for the given irq_idx + * @return: 0 for success registering callback, otherwise failure + */ +int sde_register_irq_callback( + struct sde_kms *sde_kms, + int irq_idx, + struct sde_irq_callback *irq_cb); + +/** + * sde_clear_all_irqs - Clearing all SDE IRQ interrupt status + * @sde_kms: SDE handle + */ +void sde_clear_all_irqs(struct sde_kms *sde_kms); + +/** + * sde_disable_all_irqs - Diabling all SDE IRQ interrupt + * @sde_kms: SDE handle + */ +void sde_disable_all_irqs(struct sde_kms *sde_kms); + +/** + * Vblank enable/disable functions + */ int sde_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc); void sde_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc); diff --git a/drivers/gpu/drm/msm/sde/sde_plane.c b/drivers/gpu/drm/msm/sde/sde_plane.c index fc27a7ede026..f0f4b4e6e5e2 100644 --- a/drivers/gpu/drm/msm/sde/sde_plane.c +++ b/drivers/gpu/drm/msm/sde/sde_plane.c @@ -690,7 +690,16 @@ struct drm_plane *sde_plane_init(struct drm_device *dev, uint32_t pipe, enum drm_plane_type type; priv = dev->dev_private; - kms = to_sde_kms(to_mdp_kms(priv->kms)); + if (!priv) { + DRM_ERROR("[%u]Private data is NULL\n", pipe); + goto exit; + } + + if (!priv->kms) { + DRM_ERROR("[%u]Invalid KMS reference\n", pipe); + goto exit; + } + kms = to_sde_kms(priv->kms); psde = kzalloc(sizeof(*psde), GFP_KERNEL); if (!psde) { @@ -753,6 +762,6 @@ fail: pr_err("%s: Plane creation failed\n", __func__); if (plane) sde_plane_destroy(plane); - +exit: return ERR_PTR(ret); } -- GitLab From a3d4b37283c0ababc5cb0335d5eb2840e4741b8f Mon Sep 17 00:00:00 2001 From: Abhijit Kulkarni Date: Fri, 24 Jun 2016 18:27:48 -0400 Subject: [PATCH 015/310] drm/msm/sde: adding implementation for mdp_top in hw driver Add mdp_top block support to hw layer, provides split pipe control, and interface select. Use mdp_top from intf block to program interface select. Change-Id: I15f2070f7d552a6ec11bda0302f362e22ca6e84e Signed-off-by: Abhijit Kulkarni --- drivers/gpu/drm/msm/Makefile | 1 + drivers/gpu/drm/msm/sde/sde_hw_intf.c | 14 ++- drivers/gpu/drm/msm/sde/sde_hw_intf.h | 1 + drivers/gpu/drm/msm/sde/sde_hw_lm.c | 4 +- drivers/gpu/drm/msm/sde/sde_hw_mdp_ctl.h | 1 + drivers/gpu/drm/msm/sde/sde_hw_mdp_top.c | 110 +++++++++++++++++++++++ drivers/gpu/drm/msm/sde/sde_hw_mdp_top.h | 66 ++++++++++++++ drivers/gpu/drm/msm/sde/sde_hw_sspp.c | 2 +- 8 files changed, 193 insertions(+), 6 deletions(-) create mode 100644 drivers/gpu/drm/msm/sde/sde_hw_mdp_top.c create mode 100644 drivers/gpu/drm/msm/sde/sde_hw_mdp_top.h diff --git a/drivers/gpu/drm/msm/Makefile b/drivers/gpu/drm/msm/Makefile index 7c73657b399e..b7fa5c05e12d 100644 --- a/drivers/gpu/drm/msm/Makefile +++ b/drivers/gpu/drm/msm/Makefile @@ -89,5 +89,6 @@ obj-$(CONFIG_DRM_MSM) += sde/sde_hw_catalog.o \ sde/sde_hw_sspp.o \ sde/sde_hw_wb.o \ sde/sde_hw_pingpong.o \ + sde/sde_hw_mdp_top.o \ sde/sde_hw_interrupts.o \ sde/sde_mdp_formats.o diff --git a/drivers/gpu/drm/msm/sde/sde_hw_intf.c b/drivers/gpu/drm/msm/sde/sde_hw_intf.c index 7a1c37c65366..95d82bcd1dbf 100644 --- a/drivers/gpu/drm/msm/sde/sde_hw_intf.c +++ b/drivers/gpu/drm/msm/sde/sde_hw_intf.c @@ -13,6 +13,7 @@ #include "sde_hwio.h" #include "sde_hw_catalog.h" #include "sde_hw_intf.h" +#include "sde_hw_mdp_top.h" #define INTF_TIMING_ENGINE_EN 0x000 #define INTF_CONFIG 0x004 @@ -205,10 +206,16 @@ static void sde_hw_intf_enable_timing_engine( /* Display interface select */ if (enable) { - intf_sel = SDE_REG_READ(c, DISP_INTF_SEL); + /* top block */ + struct sde_hw_mdp *mdp = sde_hw_mdptop_init(MDP_TOP, + c->base_off, + intf->mdss); + struct sde_hw_blk_reg_map *top = &mdp->hw; - intf_sel |= (intf->cap->type << ((intf->idx) * 8)); - SDE_REG_WRITE(c, DISP_INTF_SEL, intf_sel); + intf_sel = SDE_REG_READ(top, DISP_INTF_SEL); + + intf_sel |= (intf->cap->type << ((intf->idx - INTF_0) * 8)); + SDE_REG_WRITE(top, DISP_INTF_SEL, intf_sel); } SDE_REG_WRITE(c, INTF_TIMING_ENGINE_EN, @@ -366,6 +373,7 @@ struct sde_hw_intf *sde_hw_intf_init(enum sde_intf idx, */ c->idx = idx; c->cap = cfg; + c->mdss = m; _setup_intf_ops(&c->ops, c->cap->features); /* diff --git a/drivers/gpu/drm/msm/sde/sde_hw_intf.h b/drivers/gpu/drm/msm/sde/sde_hw_intf.h index 28ff5c71163d..63623f48e6b6 100644 --- a/drivers/gpu/drm/msm/sde/sde_hw_intf.h +++ b/drivers/gpu/drm/msm/sde/sde_hw_intf.h @@ -85,6 +85,7 @@ struct sde_hw_intf { /* intf */ enum sde_intf idx; const struct sde_intf_cfg *cap; + const struct sde_mdss_cfg *mdss; /* ops */ struct sde_hw_intf_ops ops; diff --git a/drivers/gpu/drm/msm/sde/sde_hw_lm.c b/drivers/gpu/drm/msm/sde/sde_hw_lm.c index 0f055faad4b2..97f5645a6f39 100644 --- a/drivers/gpu/drm/msm/sde/sde_hw_lm.c +++ b/drivers/gpu/drm/msm/sde/sde_hw_lm.c @@ -76,7 +76,7 @@ static void sde_hw_lm_setup_out(struct sde_hw_mixer *ctx, SDE_REG_WRITE(c, LM_OUT_SIZE, outsize); /* SPLIT_LEFT_RIGHT */ - opmode = (opmode & ~(1 << 31)) | (mixer->right_mixer & 1 << 31); + opmode = (opmode & ~(1 << 31)) | ((mixer->right_mixer) ? (1 << 31) : 0); SDE_REG_WRITE(c, LM_OP_MODE, opmode); } @@ -128,7 +128,7 @@ static void sde_hw_lm_setup_blendcfg(struct sde_hw_mixer *ctx, fg->const_alpha); SDE_REG_WRITE(c, LM_BLEND0_BG_ALPHA + stage_off, bg->const_alpha); - SDE_REG_WRITE(c, LM_OP_MODE, blend_op); + SDE_REG_WRITE(c, LM_BLEND0_OP + stage_off, blend_op); } static void sde_hw_lm_setup_color3(struct sde_hw_mixer *ctx, diff --git a/drivers/gpu/drm/msm/sde/sde_hw_mdp_ctl.h b/drivers/gpu/drm/msm/sde/sde_hw_mdp_ctl.h index 00f1ee4ff468..4ed35ffdd245 100644 --- a/drivers/gpu/drm/msm/sde/sde_hw_mdp_ctl.h +++ b/drivers/gpu/drm/msm/sde/sde_hw_mdp_ctl.h @@ -14,6 +14,7 @@ #define _SDE_HW_MDP_CTL_H #include "sde_hw_mdss.h" +#include "sde_hw_mdp_util.h" #include "sde_hw_catalog.h" struct sde_hw_ctl; diff --git a/drivers/gpu/drm/msm/sde/sde_hw_mdp_top.c b/drivers/gpu/drm/msm/sde/sde_hw_mdp_top.c new file mode 100644 index 000000000000..66a0e612c8fe --- /dev/null +++ b/drivers/gpu/drm/msm/sde/sde_hw_mdp_top.c @@ -0,0 +1,110 @@ +/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include "sde_hwio.h" +#include "sde_hw_catalog.h" +#include "sde_hw_mdp_top.h" + +#define SPLIT_DISPLAY_ENABLE 0x2F4 +#define LOWER_PIPE_CTRL 0x2F8 +#define UPPER_PIPE_CTRL 0x3F0 +#define TE_LINE_INTERVAL 0x3F4 + +static void sde_hw_setup_split_pipe_control(struct sde_hw_mdp *mdp, + struct split_pipe_cfg *cfg) +{ + struct sde_hw_blk_reg_map *c = &mdp->hw; + u32 upper_pipe; + u32 lower_pipe; + + if (cfg->en) { + upper_pipe = BIT(8); + lower_pipe = BIT(8); + + if (cfg->mode == INTF_MODE_CMD) { + upper_pipe |= BIT(0); + lower_pipe |= BIT(0); + } + + SDE_REG_WRITE(c, LOWER_PIPE_CTRL, lower_pipe); + SDE_REG_WRITE(c, UPPER_PIPE_CTRL, upper_pipe); + } + + SDE_REG_WRITE(c, SPLIT_DISPLAY_ENABLE, cfg->en & 0x1); +} + +static void _setup_mdp_ops(struct sde_hw_mdp_ops *ops, + unsigned long cap) +{ + ops->setup_split_pipe = sde_hw_setup_split_pipe_control; +} + +static const struct sde_mdp_cfg *_top_offset(enum sde_mdp mdp, + const struct sde_mdss_cfg *m, + void __iomem *addr, + struct sde_hw_blk_reg_map *b) +{ + int i; + + for (i = 0; i < m->mdp_count; i++) { + if (mdp == m->mdp[i].id) { + b->base_off = addr; + b->blk_off = m->mdp[i].base; + b->hwversion = m->hwversion; + return &m->mdp[i]; + } + } + + return ERR_PTR(-EINVAL); +} + +struct sde_hw_mdp *sde_hw_mdptop_init(enum sde_mdp idx, + void __iomem *addr, + const struct sde_mdss_cfg *m) +{ + static struct sde_hw_mdp *c; + const struct sde_mdp_cfg *cfg; + + /* mdp top is singleton */ + if (c) { + pr_err(" %s returning %p", __func__, c); + return c; + } + + c = kzalloc(sizeof(*c), GFP_KERNEL); + pr_err(" %s returning %p", __func__, c); + if (!c) + return ERR_PTR(-ENOMEM); + + cfg = _top_offset(idx, m, addr, &c->hw); + if (IS_ERR_OR_NULL(cfg)) { + kfree(c); + return ERR_PTR(-EINVAL); + } + + /* + * Assign ops + */ + c->idx = idx; + c->cap = cfg; + _setup_mdp_ops(&c->ops, c->cap->features); + + /* + * Perform any default initialization for the intf + */ + return c; +} + +void sde_hw_mdp_destroy(struct sde_hw_mdp *mdp) +{ +} + diff --git a/drivers/gpu/drm/msm/sde/sde_hw_mdp_top.h b/drivers/gpu/drm/msm/sde/sde_hw_mdp_top.h new file mode 100644 index 000000000000..10c008c4675d --- /dev/null +++ b/drivers/gpu/drm/msm/sde/sde_hw_mdp_top.h @@ -0,0 +1,66 @@ +/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _SDE_HW_MDP_TOP_H +#define _SDE_HW_MDP_TOP_H + +#include "sde_hw_catalog.h" +#include "sde_hw_mdss.h" +#include "sde_hw_mdp_util.h" + +struct sde_hw_mdp; + +/** + * struct split_pipe_cfg - pipe configuration for dual display panels + * @en : Enable/disable dual pipe confguration + * @mode : Panel interface mode + */ +struct split_pipe_cfg { + bool en; + enum sde_intf_mode mode; +}; + +/** + * struct sde_hw_mdp_ops - interface to the MDP TOP Hw driver functions + * Assumption is these functions will be called after clocks are enabled. + * @setup_split_pipe : Programs the pipe control registers + */ +struct sde_hw_mdp_ops { + void (*setup_split_pipe)(struct sde_hw_mdp *mdp, + struct split_pipe_cfg *p); +}; + +struct sde_hw_mdp { + /* base */ + struct sde_hw_blk_reg_map hw; + + /* intf */ + enum sde_mdp idx; + const struct sde_mdp_cfg *cap; + + /* ops */ + struct sde_hw_mdp_ops ops; +}; + +/** + * sde_hw_intf_init - initializes the intf driver for the passed interface idx + * @idx: Interface index for which driver object is required + * @addr: Mapped register io address of MDP + * @m: Pointer to mdss catalog data + */ +struct sde_hw_mdp *sde_hw_mdptop_init(enum sde_mdp idx, + void __iomem *addr, + const struct sde_mdss_cfg *m); + +void sde_hw_mdp_destroy(struct sde_hw_mdp *mdp); + +#endif /*_SDE_HW_MDP_TOP_H */ diff --git a/drivers/gpu/drm/msm/sde/sde_hw_sspp.c b/drivers/gpu/drm/msm/sde/sde_hw_sspp.c index e5f673fffbff..2800a71fea0e 100644 --- a/drivers/gpu/drm/msm/sde/sde_hw_sspp.c +++ b/drivers/gpu/drm/msm/sde/sde_hw_sspp.c @@ -414,7 +414,7 @@ static void sde_hw_sspp_setup_sourceaddress(struct sde_hw_pipe *ctx, return; for (i = 0; i < cfg->src.num_planes; i++) - SDE_REG_WRITE(c, SSPP_SRC0_ADDR + idx + i*0x4, + SDE_REG_WRITE(c, SSPP_SRC0_ADDR + idx + i*0x4, cfg->addr.plane[i]); } -- GitLab From 7af0bbf65f8b5193a2efa0b0633e5631a6e67884 Mon Sep 17 00:00:00 2001 From: Abhijit Kulkarni Date: Sun, 26 Jun 2016 22:12:09 -0400 Subject: [PATCH 016/310] drm/msm/sde: add resource manager to enable dual dsi Add resource manager for retrieving the control paths and layer mixers. Encoder and CRTC use this to get the hw driver contexts for those blocks. Change-Id: Id6789ef24616197a295bcb5687a0de659cc11e5d Signed-off-by: Abhijit Kulkarni --- drivers/gpu/drm/msm/Makefile | 1 + drivers/gpu/drm/msm/sde/sde_crtc.c | 282 +++++++++++------- drivers/gpu/drm/msm/sde/sde_crtc.h | 79 +++++ drivers/gpu/drm/msm/sde/sde_encoder.c | 62 +++- drivers/gpu/drm/msm/sde/sde_encoder_phys.h | 7 +- .../gpu/drm/msm/sde/sde_encoder_phys_vid.c | 87 +++++- drivers/gpu/drm/msm/sde/sde_kms.c | 23 +- drivers/gpu/drm/msm/sde/sde_kms.h | 88 +++++- drivers/gpu/drm/msm/sde/sde_kms_utils.c | 173 +++++++++++ 9 files changed, 655 insertions(+), 147 deletions(-) create mode 100644 drivers/gpu/drm/msm/sde/sde_crtc.h create mode 100644 drivers/gpu/drm/msm/sde/sde_kms_utils.c diff --git a/drivers/gpu/drm/msm/Makefile b/drivers/gpu/drm/msm/Makefile index b7fa5c05e12d..05b6ca9b5c55 100644 --- a/drivers/gpu/drm/msm/Makefile +++ b/drivers/gpu/drm/msm/Makefile @@ -43,6 +43,7 @@ msm-y := \ sde/sde_encoder_phys_vid.o \ sde/sde_encoder_phys_cmd.o \ sde/sde_irq.o \ + sde/sde_kms_utils.o \ sde/sde_kms.o \ sde/sde_plane.o \ msm_atomic.o \ diff --git a/drivers/gpu/drm/msm/sde/sde_crtc.c b/drivers/gpu/drm/msm/sde/sde_crtc.c index d5bdf0c71658..69d445987049 100644 --- a/drivers/gpu/drm/msm/sde/sde_crtc.c +++ b/drivers/gpu/drm/msm/sde/sde_crtc.c @@ -19,40 +19,11 @@ #include "sde_kms.h" #include "sde_hw_lm.h" #include "sde_hw_mdp_ctl.h" +#include "sde_crtc.h" -#define CRTC_DUAL_MIXERS 2 -#define PENDING_FLIP 2 - -#define CRTC_HW_MIXER_MAXSTAGES(c, idx) ((c)->mixer[idx].sblk->maxblendstages) - -struct sde_crtc_mixer { - struct sde_hw_dspp *hw_dspp; - struct sde_hw_mixer *hw_lm; - struct sde_hw_ctl *hw_ctl; - u32 flush_mask; -}; - -struct sde_crtc { - struct drm_crtc base; - char name[8]; - struct drm_plane *plane; - struct drm_plane *planes[8]; - struct drm_encoder *encoder; - int id; - bool enabled; - - spinlock_t lm_lock; /* protect registers */ - - /* HW Resources reserved for the crtc */ - u32 num_ctls; - u32 num_mixers; - struct sde_crtc_mixer mixer[CRTC_DUAL_MIXERS]; - - /*if there is a pending flip, these will be non-null */ - struct drm_pending_vblank_event *event; -}; - -#define to_sde_crtc(x) container_of(x, struct sde_crtc, base) +#define CTL(i) (CTL_0 + (i)) +#define LM(i) (LM_0 + (i)) +#define INTF(i) (INTF_0 + (i)) static struct sde_kms *get_kms(struct drm_crtc *crtc) { @@ -60,89 +31,91 @@ static struct sde_kms *get_kms(struct drm_crtc *crtc) return to_sde_kms(priv->kms); } -static inline struct sde_hw_ctl *sde_crtc_rm_get_ctl_path(enum sde_ctl idx, - void __iomem *addr, - struct sde_mdss_cfg *m) -{ - /* - * This module keeps track of the requested hw resources state, - * if the requested resource is being used it returns NULL, - * otherwise it returns the hw driver struct - */ - return sde_hw_ctl_init(idx, addr, m); -} - -static inline struct sde_hw_mixer *sde_crtc_rm_get_mixer(enum sde_lm idx, - void __iomem *addr, - struct sde_mdss_cfg *m) -{ - /* - * This module keeps track of the requested hw resources state, - * if the requested resource is being used it returns NULL, - * otherwise it returns the hw driver struct - */ - return sde_hw_lm_init(idx, addr, m); -} - static int sde_crtc_reserve_hw_resources(struct drm_crtc *crtc, struct drm_encoder *encoder) { - /* - * Assign CRTC resources - * num_ctls; - * num_mixers; - * sde_lm mixer[CRTC_MAX_PIPES]; - * sde_ctl ctl[CRTC_MAX_PIPES]; - */ struct sde_crtc *sde_crtc = to_sde_crtc(crtc); - struct sde_kms *kms = get_kms(crtc); - enum sde_lm lm_id[CRTC_DUAL_MIXERS]; - enum sde_ctl ctl_id[CRTC_DUAL_MIXERS]; - int i; - - if (!kms) { - DBG("[%s] invalid kms\n", __func__); + struct sde_kms *sde_kms = get_kms(crtc); + struct sde_encoder_hw_resources enc_hw_res; + const struct sde_hw_res_map *plat_hw_res_map; + enum sde_lm unused_lm_id[CRTC_DUAL_MIXERS] = {0}; + enum sde_lm lm_idx; + int i, count = 0; + + if (!sde_kms) { + DBG("[%s] invalid kms", __func__); return -EINVAL; } - if (!kms->mmio) + if (!sde_kms->mmio) return -EINVAL; - /* - * simple check validate against catalog - */ - sde_crtc->num_ctls = 1; - sde_crtc->num_mixers = 1; - ctl_id[0] = CTL_0; - lm_id[0] = LM_0; - - /* - * need to also enable MDP core clock and AHB CLK - * before touching HW driver - */ - DBG("%s Enable clocks\n", __func__); - sde_enable(kms); - for (i = 0; i < sde_crtc->num_ctls; i++) { - sde_crtc->mixer[i].hw_ctl = sde_crtc_rm_get_ctl_path(ctl_id[i], - kms->mmio, kms->catalog); - if (!sde_crtc->mixer[i].hw_ctl) { - DBG("[%s], Invalid ctl_path", __func__); - return -EACCES; + /* Get unused LMs */ + for (i = 0; i < sde_kms->catalog->mixer_count; i++) { + if (!sde_rm_get_mixer(sde_kms, LM(i))) { + unused_lm_id[count++] = LM(i); + if (count == CRTC_DUAL_MIXERS) + break; } } - for (i = 0; i < sde_crtc->num_mixers; i++) { - sde_crtc->mixer[i].hw_lm = sde_crtc_rm_get_mixer(lm_id[i], - kms->mmio, kms->catalog); - if (!sde_crtc->mixer[i].hw_lm) { - DBG("[%s], Invalid ctl_path", __func__); - return -EACCES; + /* query encoder resources */ + sde_encoder_get_hw_resources(sde_crtc->encoder, &enc_hw_res); + + /* parse encoder hw resources, find CTL paths */ + for (i = CTL_0; i <= sde_kms->catalog->ctl_count; i++) { + WARN_ON(sde_crtc->num_ctls > CRTC_DUAL_MIXERS); + if (enc_hw_res.ctls[i]) { + struct sde_crtc_mixer *mixer = + &sde_crtc->mixer[sde_crtc->num_ctls]; + mixer->hw_ctl = sde_rm_get_ctl_path(sde_kms, i); + if (IS_ERR_OR_NULL(mixer->hw_ctl)) { + DBG("[%s], Invalid ctl_path", __func__); + return -EACCES; + } + sde_crtc->num_ctls++; } } + + /* shortcut this process if encoder has no ctl paths */ + if (!sde_crtc->num_ctls) + return 0; + /* - * need to disable MDP core clock and AHB CLK + * Get default LMs if specified in platform config + * other wise acquire the free LMs */ - sde_disable(kms); + for (i = INTF_0; i <= sde_kms->catalog->intf_count; i++) { + if (enc_hw_res.intfs[i]) { + struct sde_crtc_mixer *mixer = + &sde_crtc->mixer[sde_crtc->num_mixers]; + plat_hw_res_map = sde_rm_get_res_map(sde_kms, i); + + lm_idx = plat_hw_res_map->lm; + if (!lm_idx) + lm_idx = unused_lm_id[sde_crtc->num_mixers]; + + DBG("Acquiring LM %d", lm_idx); + mixer->hw_lm = sde_rm_acquire_mixer(sde_kms, lm_idx); + if (IS_ERR_OR_NULL(mixer->hw_lm)) { + DBG("[%s], Invalid mixer", __func__); + return -EACCES; + } + /* interface info */ + mixer->intf_idx = i; + mixer->mode = enc_hw_res.intfs[i]; + sde_crtc->num_mixers++; + } + } + + DBG("control paths %d, num_mixers %d, lm[0] %d, ctl[0] %d ", + sde_crtc->num_ctls, sde_crtc->num_mixers, + sde_crtc->mixer[0].hw_lm->idx, + sde_crtc->mixer[0].hw_ctl->idx); + if (sde_crtc->num_mixers == CRTC_DUAL_MIXERS) + DBG("lm[1] %d, ctl[1], %d", + sde_crtc->mixer[1].hw_lm->idx, + sde_crtc->mixer[1].hw_ctl->idx); return 0; } @@ -278,6 +251,7 @@ static void blend_setup(struct drm_crtc *crtc) unsigned long flags; int i, j, plane_cnt = 0; + DBG(""); spin_lock_irqsave(&sde_crtc->lm_lock, flags); /* ctl could be reserved already */ @@ -353,10 +327,104 @@ out: spin_unlock_irqrestore(&sde_crtc->lm_lock, flags); } +/* if file!=NULL, this is preclose potential cancel-flip path */ +static void complete_flip(struct drm_crtc *crtc, struct drm_file *file) +{ + struct sde_crtc *sde_crtc = to_sde_crtc(crtc); + struct drm_device *dev = crtc->dev; + struct drm_pending_vblank_event *event; + unsigned long flags; + + spin_lock_irqsave(&dev->event_lock, flags); + event = sde_crtc->event; + if (event) { + /* if regular vblank case (!file) or if cancel-flip from + * preclose on file that requested flip, then send the + * event: + */ + if (!file || (event->base.file_priv == file)) { + sde_crtc->event = NULL; + DBG("%s: send event: %pK", sde_crtc->name, event); + drm_send_vblank_event(dev, sde_crtc->id, event); + } + } + spin_unlock_irqrestore(&dev->event_lock, flags); +} + +static void sde_crtc_vblank_cb(void *data) +{ + struct drm_crtc *crtc = (struct drm_crtc *)data; + struct sde_crtc *sde_crtc = to_sde_crtc(crtc); + unsigned pending; + + /* unregister callback */ + sde_encoder_register_vblank_callback(sde_crtc->encoder, NULL, NULL); + + pending = atomic_xchg(&sde_crtc->pending, 0); + + if (pending & PENDING_FLIP) + complete_flip(crtc, NULL); +} + +static int frame_flushed(struct sde_crtc *sde_crtc) +{ + struct vsync_info vsync; + + /* encoder get vsync_info */ + /* if frame_count does not match frame is flushed */ + sde_encoder_get_vsync_info(sde_crtc->encoder, &vsync); + + return (vsync.frame_count & sde_crtc->vsync_count); + +} + +void sde_crtc_wait_for_commit_done(struct drm_crtc *crtc) +{ + struct drm_device *dev = crtc->dev; + struct sde_crtc *sde_crtc = to_sde_crtc(crtc); + u32 pending; + int i, ret; + + /* ref count the vblank event */ + ret = drm_crtc_vblank_get(crtc); + if (ret) + return; + + /* register callback */ + sde_encoder_register_vblank_callback(sde_crtc->encoder, + sde_crtc_vblank_cb, + (void *)crtc); + + /* wait */ + pending = atomic_read(&sde_crtc->pending); + if (pending & PENDING_FLIP) { + wait_event_timeout(dev->vblank[drm_crtc_index(crtc)].queue, + (frame_flushed(sde_crtc) != 0), + msecs_to_jiffies(CRTC_MAX_WAIT_ONE_FRAME)); + if (ret <= 0) + dev_warn(dev->dev, "vblank time out, crtc=%d\n", + sde_crtc->id); + } + + for (i = 0; i < sde_crtc->num_ctls; i++) + sde_crtc->mixer[i].flush_mask = 0; + + /* release */ + drm_crtc_vblank_put(crtc); +} + static void request_pending(struct drm_crtc *crtc, u32 pending) { - DBG(""); + struct sde_crtc *sde_crtc = to_sde_crtc(crtc); + struct vsync_info vsync; + + /* request vsync info, cache the current frame count */ + sde_encoder_get_vsync_info(sde_crtc->encoder, &vsync); + sde_crtc->vsync_count = vsync.frame_count; + + atomic_or(pending, &sde_crtc->pending); } + /** * Flush the CTL PATH */ @@ -369,14 +437,12 @@ static u32 crtc_flush_all(struct drm_crtc *crtc) DBG(""); for (i = 0; i < sde_crtc->num_ctls; i++) { - /* - * Query flush_mask from encoder - * and append to the ctl_path flush_mask - */ ctl = sde_crtc->mixer[i].hw_ctl; ctl->ops.get_bitmask_intf(ctl, &(sde_crtc->mixer[i].flush_mask), - INTF_1); + sde_crtc->mixer[i].intf_idx); + DBG("Flushing CTL_ID %d, flush_mask %x", ctl->idx, + sde_crtc->mixer[i].flush_mask); ctl->ops.setup_flush(ctl, sde_crtc->mixer[i].flush_mask); } @@ -425,7 +491,7 @@ static void sde_crtc_atomic_flush(struct drm_crtc *crtc, struct drm_device *dev = crtc->dev; unsigned long flags; - DBG(""); + DBG("%s: event: %pK", sde_crtc->name, crtc->state->event); WARN_ON(sde_crtc->event); @@ -605,6 +671,6 @@ struct drm_crtc *sde_crtc_init(struct drm_device *dev, return ERR_PTR(-EINVAL); } - DBG("%s: Successfully initialized crtc\n", __func__); + DBG("%s: Successfully initialized crtc", __func__); return crtc; } diff --git a/drivers/gpu/drm/msm/sde/sde_crtc.h b/drivers/gpu/drm/msm/sde/sde_crtc.h new file mode 100644 index 000000000000..9f14f999913d --- /dev/null +++ b/drivers/gpu/drm/msm/sde/sde_crtc.h @@ -0,0 +1,79 @@ +/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _SDE_CRTC_H_ +#define _SDE_CRTC_H_ + +#include "drm_crtc.h" + +#define DBG(fmt, ...) DRM_DEBUG(fmt"\n", ##__VA_ARGS__) + +#define CRTC_DUAL_MIXERS 2 +#define PENDING_FLIP 2 +/* worst case one frame wait time based on 30 FPS : 33.33ms*/ +#define CRTC_MAX_WAIT_ONE_FRAME 34 +#define CRTC_HW_MIXER_MAXSTAGES(c, idx) ((c)->mixer[idx].sblk->maxblendstages) + +/** + * struct sde_crtc_mixer - stores the map for each virtual pipeline in the CRTC + * @hw_dspp : DSPP HW Driver context + * @hw_lm : LM HW Driver context + * @hw_ctl : CTL Path HW driver context + * @intf_idx : Interface idx + * @mode : Interface mode Active/CMD + * @flush_mask : Flush mask value for this commit + */ +struct sde_crtc_mixer { + struct sde_hw_dspp *hw_dspp; + struct sde_hw_mixer *hw_lm; + struct sde_hw_ctl *hw_ctl; + enum sde_intf intf_idx; + enum sde_intf_mode mode; + u32 flush_mask; +}; + +/** + * struct sde_crtc - virtualized CRTC data structure + * @base : Base drm crtc structure + * @name : ASCII description of this crtc + * @encoder : Associated drm encoder object + * @id : Unique crtc identifier + * @lm_lock : LM register access spinlock + * @num_ctls : Number of ctl paths in use + * @num_mixers : Number of mixers in use + * @mixer : List of active mixers + * @event : Pointer to last received drm vblank event + * @pending : Whether or not an update is pending + * @vsync_count : Running count of received vsync events + */ +struct sde_crtc { + struct drm_crtc base; + char name[8]; + struct drm_encoder *encoder; + int id; + + spinlock_t lm_lock; /* protect registers */ + + /* HW Resources reserved for the crtc */ + u32 num_ctls; + u32 num_mixers; + struct sde_crtc_mixer mixer[CRTC_DUAL_MIXERS]; + + /*if there is a pending flip, these will be non-null */ + struct drm_pending_vblank_event *event; + atomic_t pending; + u32 vsync_count; +}; + +#define to_sde_crtc(x) container_of(x, struct sde_crtc, base) + +#endif /* _SDE_CRTC_H_ */ diff --git a/drivers/gpu/drm/msm/sde/sde_encoder.c b/drivers/gpu/drm/msm/sde/sde_encoder.c index 43be77e26f2d..63bd58e90b6c 100644 --- a/drivers/gpu/drm/msm/sde/sde_encoder.c +++ b/drivers/gpu/drm/msm/sde/sde_encoder.c @@ -148,7 +148,7 @@ static void sde_encoder_destroy(struct drm_encoder *drm_enc) if (sde_enc->num_phys_encs) { DRM_ERROR("Expected num_phys_encs to be 0 not %d\n", - sde_enc->num_phys_encs); + sde_enc->num_phys_encs); } drm_encoder_cleanup(drm_enc); @@ -201,6 +201,7 @@ static void sde_encoder_virt_mode_set(struct drm_encoder *drm_enc, { struct sde_encoder_virt *sde_enc = NULL; int i = 0; + bool splitmode = false; DBG(""); @@ -211,11 +212,23 @@ static void sde_encoder_virt_mode_set(struct drm_encoder *drm_enc, sde_enc = to_sde_encoder_virt(drm_enc); + /* + * Panel is driven by two interfaces ,each interface drives half of + * the horizontal + */ + if (sde_enc->num_phys_encs == 2) + splitmode = true; + for (i = 0; i < sde_enc->num_phys_encs; i++) { struct sde_encoder_phys *phys = sde_enc->phys_encs[i]; - - if (phys && phys->phys_ops.mode_set) - phys->phys_ops.mode_set(phys, mode, adjusted_mode); + if (phys) { + phys->phys_ops.mode_set(phys, + mode, + adjusted_mode, + splitmode); + if (memcmp(mode, adjusted_mode, sizeof(*mode)) != 0) + DRM_ERROR("adjusted modes not supported\n"); + } } } @@ -223,6 +236,7 @@ static void sde_encoder_virt_enable(struct drm_encoder *drm_enc) { struct sde_encoder_virt *sde_enc = NULL; int i = 0; + bool splitmode = false; DBG(""); @@ -235,10 +249,19 @@ static void sde_encoder_virt_enable(struct drm_encoder *drm_enc) bs_set(sde_enc, 1); + if (sde_enc->num_phys_encs == 2) + splitmode = true; + + for (i = 0; i < sde_enc->num_phys_encs; i++) { struct sde_encoder_phys *phys = sde_enc->phys_encs[i]; if (phys && phys->phys_ops.enable) + + /* enable/disable dual interface top config */ + if (phys->phys_ops.enable_split_config) + phys->phys_ops.enable_split_config(phys, + splitmode); phys->phys_ops.enable(phys); } } @@ -380,13 +403,11 @@ static int sde_encoder_setup_display(struct sde_encoder_virt *sde_enc, * h_tile_instance_ids[2] = {0, 1}; DSI0 = left, DSI1 = right * h_tile_instance_ids[2] = {1, 0}; DSI1 = left, DSI0 = right */ + const struct sde_hw_res_map *hw_res_map = NULL; enum sde_intf intf_idx = INTF_MAX; - enum sde_ctl ctl_idx = CTL_0; + enum sde_ctl ctl_idx = CTL_MAX; u32 controller_id = disp_info->h_tile_instance[i]; - if (intf_type == INTF_HDMI) - ctl_idx = CTL_2; - DBG("h_tile_instance %d = %d", i, controller_id); intf_idx = sde_encoder_get_intf(sde_kms->catalog, @@ -396,6 +417,12 @@ static int sde_encoder_setup_display(struct sde_encoder_virt *sde_enc, ret = -EINVAL; } + hw_res_map = sde_rm_get_res_map(sde_kms, intf_idx); + if (IS_ERR_OR_NULL(hw_res_map)) + ret = -EINVAL; + else + ctl_idx = hw_res_map->ctl; + /* Create both VID and CMD Phys Encoders here */ if (!ret) ret = sde_encoder_virt_add_phys_vid_enc( @@ -461,6 +488,25 @@ void sde_encoder_register_vblank_callback(struct drm_encoder *drm_enc, spin_unlock_irqrestore(&sde_enc->spin_lock, lock_flags); } +void sde_encoder_get_vsync_info(struct drm_encoder *drm_enc, + struct vsync_info *vsync) +{ + struct sde_encoder_virt *sde_enc = to_sde_encoder_virt(drm_enc); + struct sde_encoder_phys *phys; + + DBG(""); + + if (!vsync) { + DRM_ERROR("Invalid pointer"); + return; + } + + /* we get the vsync info from the intf at index 0: master index */ + phys = sde_enc->phys_encs[0]; + if (phys) + phys->phys_ops.get_vsync_info(phys, vsync); +} + /* encoders init, * initialize encoder based on displays */ diff --git a/drivers/gpu/drm/msm/sde/sde_encoder_phys.h b/drivers/gpu/drm/msm/sde/sde_encoder_phys.h index 27fc11175c19..d35e084f9bef 100644 --- a/drivers/gpu/drm/msm/sde/sde_encoder_phys.h +++ b/drivers/gpu/drm/msm/sde/sde_encoder_phys.h @@ -30,7 +30,8 @@ struct sde_encoder_virt_ops { struct sde_encoder_phys_ops { void (*mode_set)(struct sde_encoder_phys *encoder, struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode); + struct drm_display_mode *adjusted_mode, + bool splitmode); bool (*mode_fixup)(struct sde_encoder_phys *encoder, const struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode); @@ -39,6 +40,10 @@ struct sde_encoder_phys_ops { void (*destroy)(struct sde_encoder_phys *encoder); void (*get_hw_resources)(struct sde_encoder_phys *encoder, struct sde_encoder_hw_resources *hw_res); + void (*get_vsync_info)(struct sde_encoder_phys *enc, + struct vsync_info *vsync); + void (*enable_split_config)(struct sde_encoder_phys *enc, + bool enable); }; struct sde_encoder_phys { diff --git a/drivers/gpu/drm/msm/sde/sde_encoder_phys_vid.c b/drivers/gpu/drm/msm/sde/sde_encoder_phys_vid.c index 33d1a8eef7a5..aefa11d5cdde 100644 --- a/drivers/gpu/drm/msm/sde/sde_encoder_phys_vid.c +++ b/drivers/gpu/drm/msm/sde/sde_encoder_phys_vid.c @@ -1,5 +1,4 @@ -/* - * Copyright (c) 2015 The Linux Foundation. All rights reserved. +/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -9,7 +8,6 @@ * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. - * */ #include "msm_drv.h" @@ -19,6 +17,7 @@ #include "sde_encoder_phys.h" #include "sde_mdp_formats.h" +#include "sde_hw_mdp_top.h" #define VBLANK_TIMEOUT msecs_to_jiffies(100) @@ -232,14 +231,26 @@ static void sde_encoder_phys_vid_flush_intf(struct sde_encoder_phys *phys_enc) ctl->idx, flush_mask, intf->idx); } -static void sde_encoder_phys_vid_mode_set( - struct sde_encoder_phys *phys_enc, - struct drm_display_mode *mode, - struct drm_display_mode *adj_mode) +static void sde_encoder_phys_vid_mode_set(struct sde_encoder_phys *phys_enc, + struct drm_display_mode *mode, + struct drm_display_mode + *adjusted_mode, + bool splitmode) { - phys_enc->cached_mode = *adj_mode; - DBG("intf %d, caching mode:", phys_enc->hw_intf->idx); - drm_mode_debug_printmodeline(adj_mode); + mode = adjusted_mode; + phys_enc->cached_mode = *adjusted_mode; + if (splitmode) { + phys_enc->cached_mode.hdisplay >>= 1; + phys_enc->cached_mode.htotal >>= 1; + phys_enc->cached_mode.hsync_start >>= 1; + phys_enc->cached_mode.hsync_end >>= 1; + } + + DBG("set mode: %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x", + mode->base.id, mode->name, mode->vrefresh, mode->clock, + mode->hdisplay, mode->hsync_start, mode->hsync_end, mode->htotal, + mode->vdisplay, mode->vsync_start, mode->vsync_end, mode->vtotal, + mode->type, mode->flags); } static void sde_encoder_phys_vid_setup_timing_engine( @@ -428,8 +439,57 @@ static void sde_encoder_phys_vid_get_hw_resources( struct sde_encoder_phys *phys_enc, struct sde_encoder_hw_resources *hw_res) { + struct msm_drm_private *priv = phys_enc->parent->dev->dev_private; + struct sde_kms *sde_kms = to_sde_kms(priv->kms); + const struct sde_hw_res_map *hw_res_map; + + DBG("Intf %d\n", phys_enc->hw_intf->idx); + + hw_res->intfs[phys_enc->hw_intf->idx] = INTF_MODE_VIDEO; + /* + * defaults should not be in use, + * otherwise signal/return failure + */ + hw_res_map = sde_rm_get_res_map(sde_kms, phys_enc->hw_intf->idx); + + /* This is video mode panel so PINGPONG will be in by-pass mode + * only assign ctl path.For cmd panel check if pp_split is + * enabled, override default map + */ + hw_res->ctls[hw_res_map->ctl] = true; +} + +/** + * video mode will use the intf (get_status) + * cmd mode will use the pingpong (get_vsync_info) + * to get this information + */ +static void sde_encoder_intf_get_vsync_info(struct sde_encoder_phys *phys_enc, + struct vsync_info *vsync) +{ + struct intf_status status; + DBG(""); - hw_res->intfs[phys_enc->hw_intf->idx] = true; + phys_enc->hw_intf->ops.get_status(phys_enc->hw_intf, &status); + vsync->frame_count = status.frame_count; + vsync->line_count = status.line_count; + DBG(" sde_encoder_intf_get_vsync_info, count %d", vsync->frame_count); +} + +static void sde_encoder_intf_split_config(struct sde_encoder_phys *phys_enc, + bool enable) +{ + struct msm_drm_private *priv = phys_enc->parent->dev->dev_private; + struct sde_kms *sde_kms = to_sde_kms(priv->kms); + struct sde_hw_mdp *mdp = sde_hw_mdptop_init(MDP_TOP, sde_kms->mmio, + sde_kms->catalog); + struct split_pipe_cfg cfg; + + DBG("%p", mdp); + cfg.en = true; + cfg.mode = INTF_MODE_VIDEO; + if (!IS_ERR_OR_NULL(mdp)) + mdp->ops.setup_split_pipe(mdp, &cfg); } static void sde_encoder_phys_vid_init_cbs(struct sde_encoder_phys_ops *ops) @@ -440,6 +500,8 @@ static void sde_encoder_phys_vid_init_cbs(struct sde_encoder_phys_ops *ops) ops->disable = sde_encoder_phys_vid_disable; ops->destroy = sde_encoder_phys_vid_destroy; ops->get_hw_resources = sde_encoder_phys_vid_get_hw_resources; + ops->get_vsync_info = sde_encoder_intf_get_vsync_info; + ops->enable_split_config = sde_encoder_intf_split_config; } struct sde_encoder_phys *sde_encoder_phys_vid_init( @@ -472,8 +534,7 @@ struct sde_encoder_phys *sde_encoder_phys_vid_init( goto fail; } - phys_enc->hw_ctl = sde_hw_ctl_init(ctl_idx, sde_kms->mmio, - sde_kms->catalog); + phys_enc->hw_ctl = sde_rm_acquire_ctl_path(sde_kms, ctl_idx); if (!phys_enc->hw_ctl) { ret = -ENOMEM; goto fail; diff --git a/drivers/gpu/drm/msm/sde/sde_kms.c b/drivers/gpu/drm/msm/sde/sde_kms.c index 789b870f9da9..9ca4e325e13b 100644 --- a/drivers/gpu/drm/msm/sde/sde_kms.c +++ b/drivers/gpu/drm/msm/sde/sde_kms.c @@ -21,18 +21,21 @@ static const char * const iommu_ports[] = { "mdp_0", }; +static const struct sde_hw_res_map res_table[INTF_MAX] = { + { SDE_NONE, SDE_NONE, SDE_NONE, SDE_NONE}, + { INTF_0, SDE_NONE, SDE_NONE, SDE_NONE}, + { INTF_1, LM_0, PINGPONG_0, CTL_0}, + { INTF_2, LM_1, PINGPONG_1, CTL_1}, + { INTF_3, SDE_NONE, SDE_NONE, CTL_2}, +}; + + #define DEFAULT_MDP_SRC_CLK 200000000 int sde_disable(struct sde_kms *sde_kms) { DBG(""); - clk_disable_unprepare(sde_kms->ahb_clk); - clk_disable_unprepare(sde_kms->axi_clk); - clk_disable_unprepare(sde_kms->core_clk); - if (sde_kms->lut_clk) - clk_disable_unprepare(sde_kms->lut_clk); - return 0; } @@ -64,8 +67,9 @@ static void sde_complete_commit(struct msm_kms *kms, } static void sde_wait_for_crtc_commit_done(struct msm_kms *kms, - struct drm_crtc *crtc) + struct drm_crtc *crtc) { + sde_crtc_wait_for_commit_done(crtc); } static int modeset_init(struct sde_kms *sde_kms) { @@ -455,6 +459,7 @@ struct msm_kms *sde_kms_init(struct drm_device *dev) clk_set_rate(sde_kms->src_clk, DEFAULT_MDP_SRC_CLK); sde_enable(sde_kms); + sde_kms->hw_res.res_table = res_table; /* * Now we need to read the HW catalog and initialize resources such as @@ -479,9 +484,7 @@ struct msm_kms *sde_kms_init(struct drm_device *dev) dev->mode_config.max_width = catalog->mixer[0].sblk->maxwidth; dev->mode_config.max_height = 4096; - sde_enable(sde_kms); - sde_kms->hw_intr = sde_hw_intr_init(sde_kms->mmio, sde_kms->catalog); - sde_disable(sde_kms); + sde_kms->hw_intr = sde_rm_acquire_intr(sde_kms); if (IS_ERR_OR_NULL(sde_kms->hw_intr)) goto fail; diff --git a/drivers/gpu/drm/msm/sde/sde_kms.h b/drivers/gpu/drm/msm/sde/sde_kms.h index 5f1a52c52641..7ae00110633d 100644 --- a/drivers/gpu/drm/msm/sde/sde_kms.h +++ b/drivers/gpu/drm/msm/sde/sde_kms.h @@ -17,7 +17,8 @@ #include "msm_kms.h" #include "mdp/mdp_kms.h" #include "sde_hw_catalog.h" -#include "sde_hw_mdss.h" +#include "sde_hw_mdp_ctl.h" +#include "sde_hw_lm.h" #include "sde_hw_interrupts.h" /* @@ -42,6 +43,38 @@ struct sde_irq { spinlock_t cb_lock; }; +/** + * struct sde_hw_res_map : Default resource table identifying default + * hw resource map. Primarily used for forcing DSI to use CTL_0/1 + * and Pingpong 0/1, if the field is set to SDE_NONE means any HW + * intstance for that tpye is allowed as long as it is unused. + */ +struct sde_hw_res_map { + enum sde_intf intf; + enum sde_lm lm; + enum sde_pingpong pp; + enum sde_ctl ctl; +}; + +/* struct sde_hw_resource_manager : Resource mananger maintains the current + * platform configuration and manages shared + * hw resources ex:ctl_path hw driver context + * is needed by CRTCs/PLANEs/ENCODERs + * @ctl : table of control path hw driver contexts allocated + * @mixer : list of mixer hw drivers contexts allocated + * @intr : pointer to hw interrupt context + * @res_table : pointer to default hw_res table for this platform + * @feature_map :BIT map for default enabled features ex:specifies if PP_SPLIT + * is enabled/disabled by defalt for this platform + */ +struct sde_hw_resource_manager { + struct sde_hw_ctl *ctl[CTL_MAX]; + struct sde_hw_mixer *mixer[LM_MAX]; + struct sde_hw_intr *intr; + const struct sde_hw_res_map *res_table; + bool feature_map; +}; + struct sde_kms { struct msm_kms base; struct drm_device *dev; @@ -74,6 +107,7 @@ struct sde_kms { struct sde_hw_intr *hw_intr; struct sde_irq irq_obj; + struct sde_hw_resource_manager hw_res; }; struct vsync_info { @@ -108,6 +142,36 @@ struct sde_plane_state { int sde_disable(struct sde_kms *sde_kms); int sde_enable(struct sde_kms *sde_kms); +/** + * HW resource manager functions + * @sde_rm_acquire_ctl_path : Allocates control path + * @sde_rm_get_ctl_path : returns control path driver context for already + * acquired ctl path + * @sde_rm_release_ctl_path : Frees control path driver context + * @sde_rm_acquire_mixer : Allocates mixer hw driver context + * @sde_rm_get_mixer : returns mixer context for already + * acquired mixer + * @sde_rm_release_mixer : Frees mixer hw driver context + * @sde_rm_get_hw_res_map : Returns map for the passed INTF + */ +struct sde_hw_ctl *sde_rm_acquire_ctl_path(struct sde_kms *sde_kms, + enum sde_ctl idx); +struct sde_hw_ctl *sde_rm_get_ctl_path(struct sde_kms *sde_kms, + enum sde_ctl idx); +void sde_rm_release_ctl_path(struct sde_kms *sde_kms, + enum sde_ctl idx); +struct sde_hw_mixer *sde_rm_acquire_mixer(struct sde_kms *sde_kms, + enum sde_lm idx); +struct sde_hw_mixer *sde_rm_get_mixer(struct sde_kms *sde_kms, + enum sde_lm idx); +void sde_rm_release_mixer(struct sde_kms *sde_kms, + enum sde_lm idx); +struct sde_hw_intr *sde_rm_acquire_intr(struct sde_kms *sde_kms); +struct sde_hw_intr *sde_rm_get_intr(struct sde_kms *sde_kms); + +const struct sde_hw_res_map *sde_rm_get_res_map(struct sde_kms *sde_kms, + enum sde_intf idx); + /** * IRQ functions */ @@ -200,31 +264,41 @@ void sde_disable_all_irqs(struct sde_kms *sde_kms); int sde_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc); void sde_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc); +/** + * Plane functions + */ enum sde_sspp sde_plane_pipe(struct drm_plane *plane); struct drm_plane *sde_plane_init(struct drm_device *dev, uint32_t pipe, bool private_plane); +/** + * CRTC functions + */ uint32_t sde_crtc_vblank(struct drm_crtc *crtc); - +void sde_crtc_wait_for_commit_done(struct drm_crtc *crtc); void sde_crtc_cancel_pending_flip(struct drm_crtc *crtc, struct drm_file *file); -void sde_crtc_attach(struct drm_crtc *crtc, struct drm_plane *plane); -void sde_crtc_detach(struct drm_crtc *crtc, struct drm_plane *plane); struct drm_crtc *sde_crtc_init(struct drm_device *dev, struct drm_encoder *encoder, struct drm_plane *plane, int id); +/** + * Encoder functions and data types + */ struct sde_encoder_hw_resources { - bool intfs[INTF_MAX]; + enum sde_intf_mode intfs[INTF_MAX]; bool pingpongs[PINGPONG_MAX]; + bool ctls[CTL_MAX]; + bool pingpongsplit; }; + void sde_encoder_get_hw_resources(struct drm_encoder *encoder, struct sde_encoder_hw_resources *hw_res); void sde_encoder_register_vblank_callback(struct drm_encoder *drm_enc, void (*cb)(void *), void *data); void sde_encoders_init(struct drm_device *dev); +void sde_encoder_get_vsync_info(struct drm_encoder *encoder, + struct vsync_info *vsync); -int sde_irq_domain_init(struct sde_kms *sde_kms); -int sde_irq_domain_fini(struct sde_kms *sde_kms); #endif /* __sde_kms_H__ */ diff --git a/drivers/gpu/drm/msm/sde/sde_kms_utils.c b/drivers/gpu/drm/msm/sde/sde_kms_utils.c new file mode 100644 index 000000000000..9d6f28cfc06c --- /dev/null +++ b/drivers/gpu/drm/msm/sde/sde_kms_utils.c @@ -0,0 +1,173 @@ +/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include "sde_kms.h" +#include "sde_hw_lm.h" +#include "sde_hw_mdp_ctl.h" + +struct sde_hw_intr *sde_rm_acquire_intr(struct sde_kms *sde_kms) +{ + struct sde_hw_intr *hw_intr; + + if (!sde_kms) { + DRM_ERROR("Invalid KMS Driver"); + return ERR_PTR(-EINVAL); + } + + if (sde_kms->hw_res.intr) { + DRM_ERROR("intr already in use "); + return ERR_PTR(-ENODEV); + } + + sde_enable(sde_kms); + hw_intr = sde_hw_intr_init(sde_kms->mmio, + sde_kms->catalog); + sde_disable(sde_kms); + + if (!IS_ERR_OR_NULL(hw_intr)) + sde_kms->hw_res.intr = hw_intr; + + return hw_intr; +} + +struct sde_hw_intr *sde_rm_get_intr(struct sde_kms *sde_kms) +{ + if (!sde_kms) { + DRM_ERROR("Invalid KMS Driver"); + return ERR_PTR(-EINVAL); + } + + return sde_kms->hw_res.intr; +} + +struct sde_hw_ctl *sde_rm_acquire_ctl_path(struct sde_kms *sde_kms, + enum sde_ctl idx) +{ + struct sde_hw_ctl *hw_ctl; + + if (!sde_kms) { + DRM_ERROR("Invalid KMS driver"); + return ERR_PTR(-EINVAL); + } + + if ((idx == SDE_NONE) || (idx > sde_kms->catalog->ctl_count)) { + DRM_ERROR("Invalid Ctl Path Idx %d", idx); + return ERR_PTR(-EINVAL); + } + + if (sde_kms->hw_res.ctl[idx]) { + DRM_ERROR("CTL path %d already in use ", idx); + return ERR_PTR(-ENODEV); + } + + sde_enable(sde_kms); + hw_ctl = sde_hw_ctl_init(idx, sde_kms->mmio, sde_kms->catalog); + sde_disable(sde_kms); + + if (!IS_ERR_OR_NULL(hw_ctl)) + sde_kms->hw_res.ctl[idx] = hw_ctl; + + return hw_ctl; +} + +struct sde_hw_ctl *sde_rm_get_ctl_path(struct sde_kms *sde_kms, + enum sde_ctl idx) +{ + if (!sde_kms) { + DRM_ERROR("Invalid KMS Driver"); + return ERR_PTR(-EINVAL); + } + if ((idx == SDE_NONE) || (idx > sde_kms->catalog->ctl_count)) { + DRM_ERROR("Invalid Ctl path Idx %d", idx); + return ERR_PTR(-EINVAL); + } + + return sde_kms->hw_res.ctl[idx]; +} + +void sde_rm_release_ctl_path(struct sde_kms *sde_kms, enum sde_ctl idx) +{ + if (!sde_kms) { + DRM_ERROR("Invalid pointer\n"); + return; + } + if ((idx == SDE_NONE) || (idx > sde_kms->catalog->ctl_count)) { + DRM_ERROR("Invalid Ctl path Idx %d", idx); + return; + } +} + +struct sde_hw_mixer *sde_rm_acquire_mixer(struct sde_kms *sde_kms, + enum sde_lm idx) +{ + struct sde_hw_mixer *mixer; + + if (!sde_kms) { + DRM_ERROR("Invalid KMS Driver"); + return ERR_PTR(-EINVAL); + } + + if ((idx == SDE_NONE) || (idx > sde_kms->catalog->mixer_count)) { + DBG("Invalid mixer id %d", idx); + return ERR_PTR(-EINVAL); + } + + if (sde_kms->hw_res.mixer[idx]) { + DRM_ERROR("mixer %d already in use ", idx); + return ERR_PTR(-ENODEV); + } + + sde_enable(sde_kms); + mixer = sde_hw_lm_init(idx, sde_kms->mmio, sde_kms->catalog); + sde_disable(sde_kms); + + if (!IS_ERR_OR_NULL(mixer)) + sde_kms->hw_res.mixer[idx] = mixer; + + return mixer; +} + +struct sde_hw_mixer *sde_rm_get_mixer(struct sde_kms *sde_kms, + enum sde_lm idx) +{ + if (!sde_kms) { + DRM_ERROR("Invalid KMS Driver"); + return ERR_PTR(-EINVAL); + } + + if ((idx == SDE_NONE) || (idx > sde_kms->catalog->mixer_count)) { + DRM_ERROR("Invalid mixer id %d", idx); + return ERR_PTR(-EINVAL); + } + + return sde_kms->hw_res.mixer[idx]; +} + +const struct sde_hw_res_map *sde_rm_get_res_map(struct sde_kms *sde_kms, + enum sde_intf idx) +{ + if (!sde_kms) { + DRM_ERROR("Invalid KMS Driver"); + return ERR_PTR(-EINVAL); + } + if ((idx == SDE_NONE) || (idx > sde_kms->catalog->intf_count)) { + DRM_ERROR("Invalid intf id %d", idx); + return ERR_PTR(-EINVAL); + } + + DBG(" Platform Resource map for INTF %d -> lm %d, pp %d ctl %d", + sde_kms->hw_res.res_table[idx].intf, + sde_kms->hw_res.res_table[idx].lm, + sde_kms->hw_res.res_table[idx].pp, + sde_kms->hw_res.res_table[idx].ctl); + return &(sde_kms->hw_res.res_table[idx]); +} -- GitLab From 989b6c672e98c8c9c217bcc88ac291f7c6c7f2b0 Mon Sep 17 00:00:00 2001 From: Ajay Singh Parmar Date: Wed, 22 Jun 2016 17:31:21 -0700 Subject: [PATCH 017/310] drm/msm/dsi-staging: add dsi controller driver Add dsi controller driver. Initialize and implement hw modules, memory map, interrupt handling, check and set states, dsi message send and receive, low power mode and driver initialization. Change-Id: I9905e775ed73ba8dc8b57c717d32c4383b51d361 Signed-off-by: Ajay Singh Parmar --- .../devicetree/bindings/drm/msm/sde-dsi.txt | 63 + drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c | 2170 +++++++++++++++++ drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.h | 471 ++++ 3 files changed, 2704 insertions(+) create mode 100644 Documentation/devicetree/bindings/drm/msm/sde-dsi.txt create mode 100644 drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c create mode 100644 drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.h diff --git a/Documentation/devicetree/bindings/drm/msm/sde-dsi.txt b/Documentation/devicetree/bindings/drm/msm/sde-dsi.txt new file mode 100644 index 000000000000..e435df862d26 --- /dev/null +++ b/Documentation/devicetree/bindings/drm/msm/sde-dsi.txt @@ -0,0 +1,63 @@ +Qualcomm Technologies, Inc. + +mdss-dsi is the master DSI device which supports multiple DSI host controllers +that are compatible with MIPI display serial interface specification. + +DSI Controller: +Required properties: +- compatible: Should be "qcom,dsi-ctrl-hw-v". Supported + versions include 1.4 and 2.0. + eg: qcom,dsi-ctrl-hw-v1.4, qcom,dsi-ctrl-hw-v2.0 +- reg: Base address and length of DSI controller's memory + mapped regions. +- reg-names: A list of strings that name the list of regs. + "dsi_ctrl" - DSI controller memory region. + "mmss_misc" - MMSS misc memory region. +- cell-index: Specifies the controller instance. +- clocks: Clocks required for DSI controller operation. +- clock-names: Names of the clocks corresponding to handles. Following + clocks are required: + "mdp_core_clk" + "iface_clk" + "core_mmss_clk" + "bus_clk" + "byte_clk" + "pixel_clk" + "core_clk" + "byte_clk_rcg" + "pixel_clk_rcg" +- gdsc-supply: phandle to gdsc regulator node. +- vdda-supply: phandle to vdda regulator node. +- vcca-supply: phandle to vcca regulator node. +- interrupt-parent phandle to the interrupt parent device node. +- interrupts: The interrupt signal from the DSI block. + +Bus Scaling Data: +- qcom,msm-bus,name: String property describing MDSS client. +- qcom,msm-bus,num-cases: This is the number of bus scaling use cases + defined in the vectors property. This must be + set to <2> for MDSS DSI driver where use-case 0 + is used to remove BW votes from the system. Use + case 1 is used to generate bandwidth requestes + when sending command packets. +- qcom,msm-bus,num-paths: This represents number of paths in each bus + scaling usecase. This value depends on number of + AXI master ports dedicated to MDSS for + particular chipset. +- qcom,msm-bus,vectors-KBps: A series of 4 cell properties, with a format + of (src, dst, ab, ib) which is defined at + Documentation/devicetree/bindings/arm/msm/msm_bus.txt. + DSI driver should always set average bandwidth + (ab) to 0 and always use instantaneous + bandwidth(ib) values. + +Optional properties: +- label: String to describe controller. +- qcom,platform-te-gpio: Specifies the gpio used for TE. +- qcom,-supply-entries: A node that lists the elements of the supply used by the + a particular "type" of DSI module. The module "types" + can be "core", "ctrl", and "phy". Within the same type, + there can be more than one instance of this binding, + in which case the entry would be appended with the + supply entry index. + diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c new file mode 100644 index 000000000000..a282fce38354 --- /dev/null +++ b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c @@ -0,0 +1,2170 @@ +/* + * Copyright (c) 2016, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#define pr_fmt(fmt) "dsi-ctrl:[%s] " fmt, __func__ + +#include +#include +#include +#include +#include +#include +#include