diff --git a/Documentation/devicetree/bindings/fb/adv7533.txt b/Documentation/devicetree/bindings/fb/adv7533.txt new file mode 100644 index 0000000000000000000000000000000000000000..b198f37f8fc610ad8bb77c7110fed95008e85cdc --- /dev/null +++ b/Documentation/devicetree/bindings/fb/adv7533.txt @@ -0,0 +1,54 @@ +ADV7533 DSI to HDMI bridge + + +Required properties: +- compatible: Must be "adv7533" +- reg: Main I2C slave ID (for I2C host driver) +- adi,video-mode: Excepted a number and possible inputs are 0 to 3, while: + 3 = 1080p + 2 = 720p + 1 = 480p + 0 = 1080p pattern +- adi,main-addr: Main I2C slave ID +- adi,cec-dsi-addr: CEC DSI I2C slave ID + +Optional properties: +- adi,enable-audio: +- adi,disable-gpios: +- adi,irq-gpio: Main IRQ gpio mapping +- adi,hpd-irq-gpio: HPD IRQ gpio mapping +- adi,switch-gpio: DSI switch gpio mapping +- qcom,supply-names: Regulator names that supply 5v to bridge chip +- qcom,min-voltage-level Minimum voltage level to be supplied to bridge chip +- qcom,max-voltage-level Maximum voltage level to be supplied to bridge chip +- qcom,enable-load Load current to bridge chip when enabled +- qcom,disable-load Load current to bridge chip when disabled +- qcom,post-on-sleep Sleep time (ms) to indicate the sleep + time after the vreg is enabled + +Example: +&soc { + i2c@78b8000 { + adv7533@39 { + compatible = "adv7533"; + reg = <0x39>; + adi,video-mode = <3>; /* 3 = 1080p */ + adi,main-addr = <0x39>; + adi,cec-dsi-addr = <0x3C>; + adi,enable-audio; + pinctrl-names = "pmx_adv7533_active","pmx_adv7533_suspend"; + pinctrl-0 = <&adv7533_int_active &adv7533_hpd_int_active &adv7533_switch_active>; + pinctrl-1 = <&adv7533_int_suspend &adv7533_hpd_int_suspend &adv7533_switch_suspend>; + adi,irq-gpio = <&msm_gpio 31 0x2002>; + adi,hpd-irq-gpio = <&msm_gpio 20 0x2003>; + adi,switch-gpio = <&msm_gpio 32 0x0>; + hpd-5v-en-supply = <&adv_vreg>; + qcom,supply-names = "hpd-5v-en"; + qcom,min-voltage-level = <0>; + qcom,max-voltage-level = <0>; + qcom,enable-load = <0>; + qcom,disable-load = <0>; + qcom,post-on-sleep = <10>; + }; + }; +}; diff --git a/Documentation/devicetree/bindings/fb/lt8912.txt b/Documentation/devicetree/bindings/fb/lt8912.txt new file mode 100644 index 0000000000000000000000000000000000000000..daeb15fe3ab555c2de51d733487b84ce98957391 --- /dev/null +++ b/Documentation/devicetree/bindings/fb/lt8912.txt @@ -0,0 +1,20 @@ +LT8912 DSI to HDMI bridge + + +Required properties: +- compatible: Must be "lontium,lt8912" +- reg: Main I2C slave ID (for I2C host driver) + +Optional properties: +- qcom,hdmi-reset: Main reset gpio mapping + +Example: +&soc { + i2c@78b8000 { + lt8912@48 { + compatible = "lontium,lt8912"; + reg = <0x48>; + qcom,hdmi-reset = <&tlmm 64 0x0>; + }; + }; +}; diff --git a/Documentation/devicetree/bindings/fb/mdss-dsi-panel.txt b/Documentation/devicetree/bindings/fb/mdss-dsi-panel.txt index 608b4260a0ab6182c568a3f2196c358ce9efef6d..78566c9b9b36883710f1ae32ac307c00a3dbd0ab 100644 --- a/Documentation/devicetree/bindings/fb/mdss-dsi-panel.txt +++ b/Documentation/devicetree/bindings/fb/mdss-dsi-panel.txt @@ -1,4 +1,4 @@ -Qualcomm mdss-dsi-panel +Qualcomm Technologies, Inc. mdss-dsi-panel mdss-dsi-panel is a dsi panel device which supports panels that are compatible with MIPI display serial interface specification. @@ -12,6 +12,7 @@ Required properties: This property specifies the version for DSI HW that this panel will work with "qcom,dsi-panel-v2" = DSI V2.0 + "qcom,msm-dsi-v2" = DSI V2.0 - status: This property applies to DSI V2 panels only. This property should not be added for panels that work based on version "V6.0" @@ -37,8 +38,10 @@ Required properties: "display_2" = DISPLAY_2 - qcom,mdss-dsi-panel-timings: An array of length 12 that specifies the PHY timing settings for the panel. -- qcom,mdss-dsi-panel-timings-8996: An array of length 40 char that specifies the 8996 PHY lane - timing settings for the panel. +- qcom,mdss-dsi-panel-timings-phy-v2: An array of length 40 char that specifies the PHY version 2 + lane timing settings for the panel. +- qcom,mdss-dsi-panel-timings-phy-12nm: An array of length 8 char that specifies the 12nm DSI PHY + lane timing settings for the panel. - qcom,mdss-dsi-on-command: A byte stream formed by multiple dcs packets base on qcom dsi controller protocol. byte 0: dcs data type @@ -61,9 +64,39 @@ Required properties: transmitted byte 5, 6: 16 bits length in network byte order byte 7 and beyond: number byte of payload +- qcom,mdss-dsi-lp-mode-on: This is used to enable display low persistence mode. + A byte stream formed by multiple dcs packets base on + qcom dsi controller protocol. + byte 0: dcs data type + byte 1: set to indicate this is an individual packet + (no chain) + byte 2: virtual channel number + byte 3: expect ack from client (dcs read command) + byte 4: wait number of specified ms after dcs command + transmitted + byte 5, 6: 16 bits length in network byte order + byte 7 and beyond: number byte of payload +- qcom,mdss-dsi-lp-mode-off: This is used to disable display low persistence mode. + A byte stream formed by multiple dcs packets base on + qcom dsi controller protocol. + byte 0: dcs data type + byte 1: set to indicate this is an individual packet + (no chain) + byte 2: virtual channel number + byte 3: expect ack from client (dcs read command) + byte 4: wait number of specified ms after dcs command + transmitted + byte 5, 6: 16 bits length in network byte order + byte 7 and beyond: number byte of payload - qcom,mdss-dsi-post-panel-on-command: same as "qcom,mdss-dsi-on-command" except commands are sent after displaying an image. +- qcom,mdss-dsi-idle-on-command: same as "qcom,mdss-dsi-on-command". Set of DCS command + used for idle mode entry. + +- qcom,mdss-dsi-idle-off-command: same as "qcom,mdss-dsi-on-command". Set of DCS command + used for idle mode exit. + Note, if a short DCS packet(i.e packet with Byte 0:dcs data type as 05) mentioned in qcom,mdss-dsi-on-command/qcom,mdss-dsi-off-command stream fails to transmit, then 3 options can be tried. @@ -248,6 +281,35 @@ Optional properties: 60 = 60 frames per second (default) - qcom,mdss-dsi-panel-clockrate: A 64 bit value specifies the panel clock speed in Hz. 0 = default value. +- qcom,mdss-mdp-kickoff-threshold: This property can be used to define a region + (in terms of scanlines) where the +hardware is allowed + to trigger a data transfer from MDP to DSI. + If this property is used, the region must be defined setting + two values, the low and the high thresholds: + + Where following condition must be met: + low_threshold < high_threshold + These values will be used by the driver in such way that if + the Driver receives a request to kickoff a transfer (MDP to DSI), + the transfer will be triggered only if the following condition + is satisfied: + low_threshold < scanline < high_threshold + If the condition is not met, then the driver will delay the + transfer by the time defined in the following property: + "qcom,mdss-mdp-kickoff-delay". + So in order to use this property, the delay property must + be defined as well and greater than 0. +- qcom,mdss-mdp-kickoff-delay: This property defines the delay in microseconds that + the driver will delay before triggering an MDP transfer if the + thresholds defined by the following property are not met: + "qcom,mdss-mdp-kickoff-threshold". + So in order to use this property, the threshold property must + be defined as well. Note that this delay cannot be zero + and also should not be greater than +the fps window. + i.e. For 60fps value should not exceed +16666 uS. - qcom,mdss-mdp-transfer-time-us: Specifies the dsi transfer time for command mode panels in microseconds. Driver uses this number to adjust the clock rate according to the expected transfer time. @@ -275,14 +337,10 @@ Optional properties: to the physical width in the framebuffer information. - qcom,mdss-pan-physical-height-dimension: Specifies panel physical height in mm which corresponds to the physical height in the framebuffer information. -- qcom,mdss-dsi-mode-sel-gpio-state: String that specifies the lcd mode for panel - (such as single-port/dual-port), if qcom,panel-mode-gpio - binding is defined in dsi controller. - "dual_port" = Set GPIO to LOW - "single_port" = Set GPIO to HIGH +- qcom,mdss-dsi-panel-mode-gpio-state: String that specifies the mode state for panel if it is defined + in dsi controller. "high" = Set GPIO to HIGH "low" = Set GPIO to LOW - The default value is "dual_port". - qcom,mdss-tear-check-disable: Boolean to disable mdp tear check. Tear check is enabled by default to avoid tearing. Other tear-check properties are ignored if this property is present. The below tear check configuration properties can be individually tuned if @@ -330,6 +388,28 @@ Optional properties: 2A/2B command. - qcom,dcs-cmd-by-left: Boolean to indicate that dcs command are sent through the left DSI controller only in a dual-dsi configuration +- qcom,mdss-dsi-panel-hdr-enabled: Boolean to indicate HDR support in panel. +- qcom,mdss-dsi-panel-hdr-color-primaries: + Array of 8 unsigned integers denoting chromaticity of panel.These + values are specified in nits units. The value range is 0 through 50000. + To obtain real chromacity, these values should be divided by factor of + 50000. The structure of array is defined in below order + value 1: x value of white chromaticity of display panel + value 2: y value of white chromaticity of display panel + value 3: x value of red chromaticity of display panel + value 4: y value of red chromaticity of display panel + value 5: x value of green chromaticity of display panel + value 6: y value of green chromaticity of display panel + value 7: x value of blue chromaticity of display panel + value 8: y value of blue chromaticity of display panel +- qcom,mdss-dsi-panel-peak-brightness: Maximum brightness supported by panel.In absence of maximum value + typical value becomes peak brightness. Value is specified in nits units. + To obtail real peak brightness, this value should be divided by factor of + 10000. +- qcom,mdss-dsi-panel-blackness-level: Blackness level supported by panel. Blackness level is defined as + ratio of peak brightness to contrast. Value is specified in nits units. + To obtail real blackness level, this value should be divided by factor of + 10000. - qcom,mdss-dsi-lp11-init: Boolean used to enable the DSI clocks and data lanes (low power 11) before issuing hardware reset line. - qcom,mdss-dsi-init-delay-us: Delay in microseconds(us) before performing any DSI activity in lp11 @@ -424,7 +504,11 @@ Optional properties: fields in the supply entry, refer to the qcom,ctrl-supply-entries binding above. - qcom,config-select: Optional property to select default configuration. - +- qcom,panel-allow-phy-poweroff: A boolean property indicates that panel allows to turn off the phy power + supply during idle screen. A panel should able to handle the dsi lanes + in floating state(not LP00 or LP11) to turn on this property. Software + turns off PHY pmic power supply, phy ldo and DSI Lane ldo during + idle screen (footswitch control off) when this property is enabled. [[Optional config sub-nodes]] These subnodes provide different configurations for a given same panel. Default configuration can be chosen by specifying phandle of the selected subnode in the qcom,config-select. @@ -471,6 +555,7 @@ Optional properites: to a non-DSI interface. - qcom,bridge-name: A string to indicate the name of the bridge chip connected to DSI. qcom,bridge-name is required if qcom,dba-panel is defined for the panel. +- qcom,hdmi-mode: Indicates where current panel is HDMI mode, otherwise, it will be DVI mode. - qcom,adjust-timer-wakeup-ms: An integer value to indicate the timer delay(in ms) to accommodate s/w delay while configuring the event timer wakeup logic. @@ -493,6 +578,8 @@ Additional properties added to the second level nodes that represent timings pro Note, if a given optional qcom,* binding is not present, then the driver will configure the default values specified. +Note, all the "qcom,supply-*" properties have their definitions in mdss-dsi-txt. + Example: &mdss_mdp { dsi_sim_vid: qcom,mdss_dsi_sim_video { @@ -538,7 +625,6 @@ Example: qcom,mdss-dsi-underflow-color = <0xff>; qcom,mdss-dsi-bl-min-level = <1>; qcom,mdss-dsi-bl-max-level = < 15>; - qcom,mdss-brightness-max-level = <255>; qcom,mdss-dsi-interleave-mode = <0>; qcom,mdss-dsi-panel-type = "dsi_video_mode"; qcom,mdss-dsi-te-check-enable; @@ -568,19 +654,26 @@ Example: qcom,mdss-dsi-dma-trigger = <0>; qcom,mdss-dsi-panel-framerate = <60>; qcom,mdss-dsi-panel-clockrate = <424000000>; + qcom,mdss-mdp-kickoff-threshold = <11 2430>; + qcom,mdss-mdp-kickoff-delay = <1000>; qcom,mdss-mdp-transfer-time-us = <12500>; qcom,mdss-dsi-panel-timings = [7d 25 1d 00 37 33 22 27 1e 03 04 00]; - qcom,mdss-dsi-panel-timings-8996 = [23 20 06 09 05 03 04 a0 + qcom,mdss-dsi-panel-timings-phy-v2 = [23 20 06 09 05 03 04 a0 23 20 06 09 05 03 04 a0 23 20 06 09 05 03 04 a0 23 20 06 09 05 03 04 a0 23 2e 06 08 05 03 04 a0]; + qcom,mdss-dsi-panel-timings-phy-12nm = + [a9 4e 56 0b 8a 4d 0b d6]; qcom,mdss-dsi-on-command = [32 01 00 00 00 00 02 00 00 29 01 00 00 10 00 02 FF 99]; qcom,mdss-dsi-on-command-state = "dsi_lp_mode"; qcom,mdss-dsi-off-command = [22 01 00 00 00 00 00]; qcom,mdss-dsi-off-command-state = "dsi_hs_mode"; + qcom,mdss-dsi-lp-mode-on = [32 01 00 00 00 00 02 00 00 + 29 01 00 00 10 00 02 FF 99]; + qcom,mdss-dsi-lp-mode-off = [22 01 00 00 00 00 00]; qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled"; qcom,mdss-dsi-pan-enable-dynamic-fps; qcom,mdss-dsi-pan-fps-update = "dfps_suspend_resume_mode"; @@ -592,7 +685,7 @@ Example: qcom,5v-boost-gpio = <&pm8994_gpios 14 0>; qcom,mdss-pan-physical-width-dimension = <60>; qcom,mdss-pan-physical-height-dimension = <140>; - qcom,mdss-dsi-mode-sel-gpio-state = "dsc_mode"; + qcom,mdss-dsi-panel-mode-gpio-state = "low"; qcom,mdss-tear-check-sync-cfg-height = <0xfff0>; qcom,mdss-tear-check-sync-init-val = <1280>; qcom,mdss-tear-check-sync-threshold-start = <4>; @@ -611,6 +704,7 @@ Example: qcom,suspend-ulps-enabled; qcom,panel-roi-alignment = <4 4 2 2 20 20>; qcom,esd-check-enabled; + qcom,panel-allow-phy-poweroff; qcom,mdss-dsi-panel-status-command = [06 01 00 01 05 00 02 0A 08]; qcom,mdss-dsi-panel-status-command-state = "dsi_lp_mode"; qcom,mdss-dsi-panel-status-check-mode = "reg_read"; @@ -682,6 +776,7 @@ Example: qcom,supply-max-voltage = <2800000>; qcom,supply-enable-load = <100000>; qcom,supply-disable-load = <100>; + qcom,supply-ulp-load = <100>; qcom,supply-pre-on-sleep = <0>; qcom,supply-post-on-sleep = <0>; qcom,supply-pre-off-sleep = <0>; @@ -695,6 +790,7 @@ Example: qcom,supply-max-voltage = <1800000>; qcom,supply-enable-load = <100000>; qcom,supply-disable-load = <100>; + qcom,supply-ulp-load = <100>; qcom,supply-pre-on-sleep = <0>; qcom,supply-post-on-sleep = <0>; qcom,supply-pre-off-sleep = <0>; diff --git a/Documentation/devicetree/bindings/fb/mdss-dsi.txt b/Documentation/devicetree/bindings/fb/mdss-dsi.txt new file mode 100644 index 0000000000000000000000000000000000000000..8b593a97ef0d80dc4fb9785bd96c89cc2f1dd8ba --- /dev/null +++ b/Documentation/devicetree/bindings/fb/mdss-dsi.txt @@ -0,0 +1,261 @@ +Qualcomm Technologies, Inc. mdss-dsi + +mdss-dsi is the master DSI device which supports multiple DSI host controllers that +are compatible with MIPI display serial interface specification. + +Required properties: +- compatible: Must be "qcom,mdss-dsi" +- hw-config: Specifies the DSI host setup configuration + "hw-config" = "single_dsi" + "hw-config" = "dual_dsi" + "hw-config" = "split_dsi" +- ranges: The standard property which specifies the child address + space, parent address space and the length. +- vdda-supply: Phandle for vreg regulator device node. + +Bus Scaling Data: +- qcom,msm-bus,name: String property describing MDSS client. +- qcom, msm-bus,num-cases: This is the number of bus scaling use cases + defined in the vectors property. This must be + set to <2> for MDSS DSI driver where use-case 0 + is used to remove BW votes from the system. Use + case 1 is used to generate bandwidth requestes + when sending command packets. +- qcom,msm-bus,num-paths: This represents number of paths in each bus + scaling usecase. This value depends on number of + AXI master ports dedicated to MDSS for + particular chipset. +- qcom,msm-bus,vectors-KBps: A series of 4 cell properties, with a format + of (src, dst, ab, ib) which is defined at + Documentation/devicetree/bindings/arm/msm/msm_bus.txt. + DSI driver should always set average bandwidth + (ab) to 0 and always use instantaneous + bandwidth(ib) values. + +Optional properties: +- vcca-supply: Phandle for vcca regulator device node. +- qcom,-supply-entries: A node that lists the elements of the supply used by the + a particular "type" of DSI modulee. The module "types" + can be "core", "ctrl", and "phy". Within the same type, + there can be more than one instance of this binding, + in which case the entry would be appended with the + supply entry index. + e.g. qcom,ctrl-supply-entry@0 + -- qcom,supply-name: name of the supply (vdd/vdda/vddio) + -- qcom,supply-min-voltage: minimum voltage level (uV) + -- qcom,supply-max-voltage: maximum voltage level (uV) + -- qcom,supply-enable-load: load drawn (uA) from enabled supply + -- qcom,supply-disable-load: load drawn (uA) from disabled supply + -- qcom,supply-ulp-load: load drawn (uA) from supply in ultra-low power mode + -- qcom,supply-pre-on-sleep: time to sleep (ms) before turning on + -- qcom,supply-post-on-sleep: time to sleep (ms) after turning on + -- qcom,supply-pre-off-sleep: time to sleep (ms) before turning off + -- qcom,supply-post-off-sleep: time to sleep (ms) after turning off +- pll-src-config Specified the source PLL for the DSI + link clocks: + "PLL0" - Clocks sourced out of DSI PLL0 + "PLL1" - Clocks sourced out of DSI PLL1 + This property is only valid for + certain DSI hardware configurations + mentioned in the "hw-config" binding above. + For example, in split_dsi config, the clocks can + only be sourced out of PLL0. For + dual_dsi, both PLL would be active. + For single DSI, it is possible to + select either PLL. If no value is specified, + the default value for single DSI is set as PLL0. +- qcom,mmss-ulp-clamp-ctrl-offset: Specifies the offset for dsi ulps clamp control register. +- qcom,mmss-phyreset-ctrl-offset: Specifies the offset for dsi phy reset control register. +- qcom,dsi-clk-ln-recovery: Boolean which enables the clk lane recovery + +mdss-dsi-ctrl is a dsi controller device which is treated as a subnode of the mdss-dsi device. + +Required properties: +- compatible: Must be "qcom,mdss-dsi-ctrl" +- cell-index: Specifies the controller used among the two controllers. +- reg: Base address and length of the different register + regions(s) required for DSI device functionality. +- reg-names: A list of strings that map in order to the list of regs. + "dsi_ctrl" - MDSS DSI controller register region + "dsi_phy" - MDSS DSI PHY register region + "dsi_phy_regulator" - MDSS DSI PHY REGULATOR region + "mmss_misc_phys" - Register region for MMSS DSI clamps +- vdd-supply: Phandle for vdd regulator device node. +- vddio-supply: Phandle for vdd-io regulator device node. +- qcom,mdss-fb-map-prim: pHandle that specifies the framebuffer to which the + primary interface is mapped. +- qcom,mdss-mdp: pHandle that specifies the mdss-mdp device. +- qcom,platform-regulator-settings: An array of length 7 or 5 that specifies the PHY + regulator settings. It use 5 bytes for 8996 pll. +- qcom,platform-strength-ctrl: An array of length 2 or 10 that specifies the PHY + strengthCtrl settings. It use 10 bytes for 8996 pll. +- qcom,platform-lane-config: An array of length 45 or 20 that specifies the PHY + lane configuration settings. It use 20 bytes for 8996 pll. +- qcom,platform-bist-ctrl: An array of length 6 that specifies the PHY + BIST ctrl settings. +- qcom,dsi-pref-prim-pan: phandle that specifies the primary panel to be used + with the controller. + +Optional properties: +- label: A string used to describe the controller used. +- qcom,mdss-fb-map: pHandle that specifies the framebuffer to which the + interface is mapped. +- qcom,mdss-fb-map-sec: pHandle that specifies the framebuffer to which the + secondary interface is mapped. +- qcom,platform-enable-gpio: Specifies the panel lcd/display enable gpio. +- qcom,platform-reset-gpio: Specifies the panel reset gpio. +- qcom,platform-te-gpio: Specifies the gpio used for TE. +- qcom,platform-bklight-en-gpio: Specifies the gpio used to enable display back-light +- qcom,platform-mode-gpio: Select video/command mode of panel through gpio when it supports + both modes. +- qcom,platform-intf-mux-gpio: Select dsi/external(hdmi) interface through gpio when it supports + either dsi or external interface. +- pinctrl-names: List of names to assign mdss pin states defined in pinctrl device node + Refer to pinctrl-bindings.txt +- pinctrl-<0..n>: Lists phandles each pointing to the pin configuration node within a pin + controller. These pin configurations are installed in the pinctrl + device node. Refer to pinctrl-bindings.txt +- qcom,regulator-ldo-mode: Boolean to enable ldo mode for the dsi phy regulator +- qcom,null-insertion-enabled: Boolean to enable NULL packet insertion + feature for DSI controller. +- qcom,dsi-irq-line: Boolean specifies if DSI has a different irq line than mdp. +- qcom,lane-map: Specifies the data lane swap configuration. + "lane_map_0123" = <0 1 2 3> (default value) + "lane_map_3012" = <3 0 1 2> + "lane_map_2301" = <2 3 0 1> + "lane_map_1230" = <1 2 3 0> + "lane_map_0321" = <0 3 2 1> + "lane_map_1032" = <1 0 3 2> + "lane_map_2103" = <2 1 0 3> + "lane_map_3210" = <3 2 1 0> +- qcom,pluggable Boolean to enable hotplug feature. +- qcom,timing-db-mode: Boolean specifies dsi timing mode registers are supported or not. +- qcom,display-id A string indicates the display ID for the controller. + The possible values are: + - "primary" + - "secondary" + - "tertiary" +- qcom,bridge-index: Instance id of the bridge chip connected to DSI. qcom,bridge-index is + required if a bridge chip panel is used. + +Example: + mdss_dsi: qcom,mdss_dsi@0 { + compatible = "qcom,mdss-dsi"; + hw-config = "single_dsi"; + pll-src-config = "PLL0"; + #address-cells = <1>; + #size-cells = <1>; + vdda-supply = <&pm8226_l4>; + vcca-supply = <&pm8226_l28>; + reg = <0x1a98000 0x1a98000 0x25c + 0x1a98500 0x1a98500 0x280 + 0x1a98780 0x1a98780 0x30 + 0x193e000 0x193e000 0x30>; + + qcom,dsi-clk-ln-recovery; + + qcom,core-supply-entries { + #address-cells = <1>; + #size-cells = <0>; + + qcom,core-supply-entry@0 { + reg = <0>; + qcom,supply-name = "gdsc"; + qcom,supply-min-voltage = <0>; + qcom,supply-max-voltage = <0>; + qcom,supply-enable-load = <0>; + qcom,supply-disable-load = <0>; + qcom,supply-ulp-load = <0>; + qcom,supply-pre-on-sleep = <0>; + qcom,supply-post-on-sleep = <0>; + qcom,supply-pre-off-sleep = <0>; + qcom,supply-post-off-sleep = <0>; + }; + }; + + qcom,phy-supply-entries { + #address-cells = <1>; + #size-cells = <0>; + + qcom,phy-supply-entry@0 { + reg = <0>; + qcom,supply-name = "vddio"; + qcom,supply-min-voltage = <1800000>; + qcom,supply-max-voltage = <1800000>; + qcom,supply-enable-load = <100000>; + qcom,supply-disable-load = <100>; + qcom,supply-ulp-load = <100>; + qcom,supply-pre-on-sleep = <0>; + qcom,supply-post-on-sleep = <20>; + qcom,supply-pre-off-sleep = <0>; + qcom,supply-post-off-sleep = <0>; + }; + }; + + qcom,ctrl-supply-entries { + #address-cells = <1>; + #size-cells = <0>; + + qcom,ctrl-supply-entry@0 { + reg = <0>; + qcom,supply-name = "vdda"; + qcom,supply-min-voltage = <1200000>; + qcom,supply-max-voltage = <1200000>; + qcom,supply-enable-load = <100000>; + qcom,supply-disable-load = <100>; + qcom,supply-ulp-load = <1000>; + qcom,supply-pre-on-sleep = <0>; + qcom,supply-post-on-sleep = <20>; + qcom,supply-pre-off-sleep = <0>; + qcom,supply-post-off-sleep = <0>; + }; + }; + + mdss_dsi0: mdss_dsi_ctrl0@fd922800 { + compatible = "qcom,mdss-dsi-ctrl"; + label = "MDSS DSI CTRL->0"; + cell-index = <0>; + reg = <0xfd922800 0x1f8>, + <0xfd922b00 0x2b0>, + <0xfd998780 0x30>, + <0xfd828000 0x108>; + reg-names = "dsi_ctrl", "dsi_phy", + "dsi_phy_regulator", "mmss_misc_phys"; + + vdd-supply = <&pm8226_l15>; + vddio-supply = <&pm8226_l8>; + qcom,mdss-fb-map-prim = <&mdss_fb0>; + qcom,mdss-mdp = <&mdss_mdp>; + + qcom,dsi-pref-prim-pan = <&dsi_tosh_720_vid>; + + qcom,platform-strength-ctrl = [ff 06]; + qcom,platform-bist-ctrl = [00 00 b1 ff 00 00]; + qcom,platform-regulator-settings = [07 09 03 00 20 00 01]; + qcom,platform-lane-config = [00 00 00 00 00 00 00 01 97 + 00 00 00 00 05 00 00 01 97 + 00 00 00 00 0a 00 00 01 97 + 00 00 00 00 0f 00 00 01 97 + 00 c0 00 00 00 00 00 01 bb]; + + qcom,mmss-ulp-clamp-ctrl-offset = <0x20>; + qcom,mmss-phyreset-ctrl-offset = <0x24>; + qcom,regulator-ldo-mode; + qcom,null-insertion-enabled; + qcom,timing-db-mode; + + pinctrl-names = "mdss_default", "mdss_sleep"; + pinctrl-0 = <&mdss_dsi_active>; + pinctrl-1 = <&mdss_dsi_suspend>; + qcom,platform-reset-gpio = <&msmgpio 25 1>; + qcom,platform-te-gpio = <&msmgpio 24 0>; + qcom,platform-enable-gpio = <&msmgpio 58 1>; + qcom,platform-bklight-en-gpio = <&msmgpio 86 0>; + qcom,platform-mode-gpio = <&msmgpio 7 0>; + qcom,platform-intf-mux-gpio = <&tlmm 115 0>; + qcom,dsi-irq-line; + qcom,lane-map = "lane_map_3012"; + qcom,display-id = "primary"; + qcom,bridge-index = <00>; + }; + }; diff --git a/Documentation/devicetree/bindings/fb/mdss-edp.txt b/Documentation/devicetree/bindings/fb/mdss-edp.txt new file mode 100644 index 0000000000000000000000000000000000000000..3d649e5a6a0eb4ee3e422ed900569b3e31520240 --- /dev/null +++ b/Documentation/devicetree/bindings/fb/mdss-edp.txt @@ -0,0 +1,52 @@ +Qualcomm Technologies, Inc. MDSS EDP + +MDSS EDP is a edp driver which supports panels that are compatible with +VESA EDP display interface specification. + +When configuring the optional properties for external backlight, one should also +configure the gpio that drives the pwm to it. + +Required properties +- compatible : Must be "qcom,mdss-edp". +- reg : Offset and length of the register set for the + device. +- reg-names : Names to refer to register sets related to this + device +- vdda-supply : Phandle for vdd regulator device node. +- gpio-panel-en : GPIO for supplying power to panel and backlight + driver. +- gpio-lvl-en : GPIO to enable HPD be received by host. +- status : A string that has to be set to "okay/ok" to enable + the driver. By default this property will be set to + "disable". Will be set to "ok/okay" status for + specific platforms. +- qcom,mdss-fb-map: pHandle that specifies the framebuffer to which the + interface is mapped. +- gpio-panel-hpd : gpio pin use for edp hpd + +Optional properties +- qcom,panel-lpg-channel : LPG channel for backlight. +- qcom,panel-pwm-period : PWM period in microseconds. + + +Optional properties: +- qcom,mdss-brightness-max-level: Specifies the max brightness level supported. + 255 = default value. + +Example: + mdss_edp: qcom,mdss_edp@fd923400 { + compatible = "qcom,mdss-edp"; + reg = <0xfd923400 0x700>, + <0xfd8c2000 0x1000>; + reg-names = "edp_base", "mmss_cc_base"; + vdda-supply = <&pm8941_l12>; + gpio-panel-en = <&msmgpio 58 0>; + gpio-lvl-en = <&msmgpio 91 0>; + qcom,panel-lpg-channel = <7>; /* LPG Channel 8 */ + qcom,panel-pwm-period = <53>; + status = "disable"; + qcom,mdss-fb-map = <&mdss_fb0>; + gpio-panel-hpd = <&msmgpio 102 0>; + }; + + diff --git a/Documentation/devicetree/bindings/fb/mdss-mdp.txt b/Documentation/devicetree/bindings/fb/mdss-mdp.txt new file mode 100644 index 0000000000000000000000000000000000000000..5624321b47d68d19866f0c50f240ac59ebbfc7b9 --- /dev/null +++ b/Documentation/devicetree/bindings/fb/mdss-mdp.txt @@ -0,0 +1,898 @@ +Qualcomm Technologies, Inc. MDSS MDP + +MDSS is Mobile Display SubSystem which implements Linux framebuffer APIs to +drive user interface to different panel interfaces. MDP driver is the core of +MDSS which manage all data paths to different panel interfaces. + +Required properties +- compatible : Must be "qcom,mdss_mdp" + - "qcom,mdss_mdp3" for mdp3 +- reg : offset and length of the register set for the device. +- reg-names : names to refer to register sets related to this device +- interrupts : Interrupt associated with MDSS. +- interrupt-controller: Mark the device node as an interrupt controller. + This is an empty, boolean property. +- #interrupt-cells: Should be one. The first cell is interrupt number. +- vdd-supply : Phandle for vdd regulator device node. +- qcom,max-clk-rate: Specify maximum MDP core clock rate in hz that this + device supports. +- qcom,mdss-pipe-vig-off: Array of offset for MDP source surface pipes of + type VIG, the offsets are calculated from + register "mdp_phys" defined in reg property. + The number of offsets defined here should + reflect the amount of VIG pipes that can be + active in MDP for this configuration. +- qcom,mdss-pipe-vig-fetch-id: Array of shared memory pool fetch ids + corresponding to the VIG pipe offsets defined in + previous property, the amount of fetch ids + defined should match the number of offsets + defined in property: qcom,mdss-pipe-vig-off +- qcom,mdss-pipe-vig-xin-id: Array of VBIF clients ids (xins) corresponding + to the respective VIG pipes. Number of xin ids + defined should match the number of offsets + defined in property: qcom,mdss-pipe-vig-off +- qcom,mdss-pipe-vig-clk-ctrl-off: Array of offsets describing clk control + offsets for dynamic clock gating. 1st value + in the array represents offset of the control + register. 2nd value represents bit offset within + control register and 3rd value represents bit + offset within status register. Number of tuples + defined should match the number of offsets + defined in property: qcom,mdss-pipe-vig-off +- qcom,mdss-pipe-rgb-off: Array of offsets for MDP source surface pipes of + type RGB, the offsets are calculated from + register "mdp_phys" defined in reg property. + The number of offsets defined here should + reflect the amount of RGB pipes that can be + active in MDP for this configuration. +- qcom,mdss-pipe-rgb-fetch-id: Array of shared memory pool fetch ids + corresponding to the RGB pipe offsets defined in + previous property, the amount of fetch ids + defined should match the number of offsets + defined in property: qcom,mdss-pipe-rgb-off +- qcom,mdss-pipe-rgb-xin-id: Array of VBIF clients ids (xins) corresponding + to the respective RGB pipes. Number of xin ids + defined should match the number of offsets + defined in property: qcom,mdss-pipe-rgb-off +- qcom,mdss-pipe-rgb-clk-ctrl-off: Array of offsets describing clk control + offsets for dynamic clock gating. 1st value + in the array represents offset of the control + register. 2nd value represents bit offset within + control register and 3rd value represents bit + offset within status register. Number of tuples + defined should match the number of offsets + defined in property: qcom,mdss-pipe-rgb-off +- qcom,mdss-pipe-dma-off: Array of offsets for MDP source surface pipes of + type DMA, the offsets are calculated from + register "mdp_phys" defined in reg property. + The number of offsets defined here should + reflect the amount of DMA pipes that can be + active in MDP for this configuration. +- qcom,mdss-pipe-dma-fetch-id: Array of shared memory pool fetch ids + corresponding to the DMA pipe offsets defined in + previous property, the amount of fetch ids + defined should match the number of offsets + defined in property: qcom,mdss-pipe-dma-off +- qcom,mdss-pipe-dma-xin-id: Array of VBIF clients ids (xins) corresponding + to the respective DMA pipes. Number of xin ids + defined should match the number of offsets + defined in property: qcom,mdss-pipe-dma-off +- qcom,mdss-pipe-dma-clk-ctrl-off: Array of offsets describing clk control + offsets for dynamic clock gating. 1st value + in the array represents offset of the control + register. 2nd value represents bit offset within + control register and 3rd value represents bit + offset within status register. Number of tuples + defined should match the number of offsets + defined in property: qcom,mdss-pipe-dma-off +- qcom,mdss-pipe-cursor-off: Array of offsets for MDP source surface pipes of + type cursor, the offsets are calculated from + register "mdp_phys" defined in reg property. + The number of offsets defined here should + reflect the amount of cursor pipes that can be + active in MDP for this configuration. Meant for + hardware that has hw cursors support as a + source pipe. +- qcom,mdss-pipe-cursor-xin-id: Array of VBIF clients ids (xins) corresponding + to the respective cursor pipes. Number of xin ids + defined should match the number of offsets + defined in property: qcom,mdss-pipe-cursor-off +- qcom,mdss-pipe-cursor-clk-ctrl-off: Array of offsets describing clk control + offsets for dynamic clock gating. 1st value + in the array represents offset of the control + register. 2nd value represents bit offset within + control register and 3rd value represents bit + offset within status register. Number of tuples + defined should match the number of offsets + defined in property: qcom,mdss-pipe-cursor-off +- qcom,mdss-ctl-off: Array of offset addresses for the available ctl + hw blocks within MDP, these offsets are + calculated from register "mdp_phys" defined in + reg property. The number of ctl offsets defined + here should reflect the number of control paths + that can be configured concurrently on MDP for + this configuration. +- qcom,mdss-wb-off: Array of offset addresses for the progammable + writeback blocks within MDP. The number of + offsets defined should match the number of ctl + blocks defined in property: qcom,mdss-ctl-off +- qcom,mdss-mixer-intf-off: Array of offset addresses for the available + mixer blocks that can drive data to panel + interfaces. + These offsets are be calculated from register + "mdp_phys" defined in reg property. + The number of offsets defined should reflect the + amount of mixers that can drive data to a panel + interface. +- qcom,mdss-dspp-off: Array of offset addresses for the available dspp + blocks. These offsets are calculated from + register "mdp_phys" defined in reg property. + The number of dspp blocks should match the + number of mixers driving data to interface + defined in property: qcom,mdss-mixer-intf-off +- qcom,mdss-pingpong-off: Array of offset addresses for the available + pingpong blocks. These offsets are calculated + from register "mdp_phys" defined in reg property. + The number of pingpong blocks should match the + number of mixers driving data to interface + defined in property: qcom,mdss-mixer-intf-off +- qcom,mdss-mixer-wb-off: Array of offset addresses for the available + mixer blocks that can be drive data to writeback + block. These offsets will be calculated from + register "mdp_phys" defined in reg property. + The number of writeback mixer offsets defined + should reflect the number of mixers that can + drive data to a writeback block. +- qcom,mdss-intf-off: Array of offset addresses for the available MDP + video interface blocks that can drive data to a + panel controller through timing engine. + The offsets are calculated from "mdp_phys" + defined in reg property. The number of offsets + defiend should reflect the number of progammable + interface blocks available in hardware. +- qcom,mdss-pref-prim-intf: A string which indicates the configured hardware + interface between MDP and the primary panel. + Individual panel controller drivers initialize + hardware based on this property. + Based on the interfaces supported at present, + possible values are: + - "dsi" + - "edp" + - "hdmi" + +Bus Scaling Data: +- qcom,msm-bus,name: String property describing MDSS client. +- qcom,msm-bus,num-cases: This is the the number of Bus Scaling use cases + defined in the vectors property. This must be + set to <3> for MDSS driver where use-case 0 is + used to take off MDSS BW votes from the system. + And use-case 1 & 2 are used in ping-pong fashion + to generate run-time BW requests. +- qcom,msm-bus,active-only: A boolean flag indicating if it is active only. +- qcom,msm-bus,num-paths: This represents the number of paths in each + Bus Scaling Usecase. This value depends on + how many number of AXI master ports are + dedicated to MDSS for particular chipset. This + value represents the RT + NRT AXI master ports. +- qcom,msm-bus,vectors-KBps: * A series of 4 cell properties, with a format + of (src, dst, ab, ib) which is defined at + Documentation/devicetree/bindings/arm/msm/msm_bus.txt + * Current values of src & dst are defined at + include/linux/msm-bus-board.h + src values allowed for MDSS are: + 22 = MSM_BUS_MASTER_MDP_PORT0 + 23 = MSM_BUS_MASTER_MDP_PORT1 + 25 = MSM_BUS_MASTER_ROTATOR + dst values allowed for MDSS are: + 512 = MSM_BUS_SLAVE_EBI_CH0 + ab: Represents aggregated bandwidth. + ib: Represents instantaneous bandwidth. + * Total number of 4 cell properties will be + (number of use-cases * number of paths). + * These values will be overridden by the driver + based on the run-time requirements. So initial + ab and ib values defined here are random and + bare no logic except for the use-case 0 where ab + and ib values needs to be 0. + * Define realtime vector properties followed by + non-realtime vector properties. + +- qcom,mdss-prefill-outstanding-buffer-bytes: The size of mdp outstanding buffer + in bytes. The buffer is filled during prefill + time and the buffer size shall be included in + prefill bandwidth calculation. +- qcom,mdss-prefill-y-buffer-bytes: The size of mdp y plane buffer in bytes. The + buffer is filled during prefill time when format + is YUV and the buffer size shall be included in + prefill bandwidth calculation. +- qcom,mdss-prefill-scaler-buffer-lines-bilinear: The value indicates how many lines + of scaler line buffer need to be filled during + prefill time. If bilinear scalar is enabled, then this + number of lines is used to determine how many bytes + of scaler buffer to be included in prefill bandwidth + calculation. +- qcom,mdss-prefill-scaler-buffer-lines-caf: The value indicates how many lines of + of scaler line buffer need to be filled during + prefill time. If CAF mode filter is enabled, then + this number of lines is used to determine how many + bytes of scaler buffer to be included in prefill + bandwidth calculation. +- qcom,mdss-prefill-post-scaler-buffer: The size of post scaler buffer in bytes. + The buffer is used to smooth the output of the + scaler. If the buffer is present in h/w, it is + filled during prefill time and the number of bytes + shall be included in prefill bandwidth calculation. +- qcom,mdss-prefill-pingpong-buffer-pixels: The size of pingpong buffer in pixels. + The buffer is used to keep pixels flowing to the + panel interface. If the vertical start position of a + layer is in the beginning of the active area, pingpong + buffer must be filled during prefill time to generate + starting lines. The number of bytes to be filled is + determined by the line width, starting position, + byte per pixel and scaling ratio, this number shall be + included in prefill bandwidth calculation. +- qcom,mdss-prefill-fbc-lines: The value indicates how many lines are required to fill + fbc buffer during prefill time if FBC (Frame Buffer + Compressor) is enabled. The number of bytes to be filled + is determined by the line width, bytes per pixel and + scaling ratio, this number shall be included in prefill bandwidth + calculation. +- qcom,max-mixer-width: Specify maximum MDP mixer width that the device supports. + This is a mandatory property, if not specified then + mdp probe will fail. + +Optional properties: +- batfet-supply : Phandle for battery FET regulator device node. +- vdd-cx-supply : Phandle for vdd CX regulator device node. +- qcom,vbif-settings : Array with key-value pairs of constant VBIF register + settings used to setup MDSS QoS for optimum performance. + The key used should be offset from "vbif_phys" register + defined in reg property. +- qcom,vbif-nrt-settings : The key used should be offset from "vbif_nrt_phys" + register defined in reg property. Refer qcom,vbif-settings + for a detailed description of this binding. +- qcom,mdp-settings : Array with key-value pairs of constant MDP register + settings used to setup MDSS QoS for best performance. + The key used should be offset from "mdp_phys" register + defined in reg property. +- qcom,mdss-smp-data: Array of shared memory pool data for dynamic SMP. There + should be only two values in this property. The first + value corresponds to the number of smp blocks and the + second is the size of each block present in the mdss + hardware. This property is optional for MDP hardware + with fix pixel latency ram. +- qcom,mdss-rot-block-size: The size of a memory block (in pixels) to be used + by the rotator. If this property is not specified, + then a default value of 128 pixels would be used. +- qcom,mdss-has-bwc: Boolean property to indicate the presence of bandwidth + compression feature in the rotator. +- qcom,mdss-has-non-scalar-rgb: Boolean property to indicate the presence of RGB + pipes which have no scaling support. +- qcom,mdss-has-decimation: Boolean property to indicate the presence of + decimation feature in fetch. +- qcom,mdss-has-fixed-qos-arbiter-enabled: Boolean property to indicate the + presence of rt/nrt feature. This feature enables + increased performance by prioritizing the real time + (rt) traffic over non real time (nrt) traffic to + access the memory. +- qcom,mdss-num-nrt-paths: Integer property represents the number of non-realtime + paths in each Bus Scaling Usecase. This value depends on + number of AXI ports are dedicated to non-realtime VBIF for + particular chipset. This property is mandatory when + "qcom,mdss-has-fixed-qos-arbiter-enabled" is enabled. + These paths must be defined after rt-paths in + "qcom,msm-bus,vectors-KBps" vector request. +- qcom,mdss-has-source-split: Boolean property to indicate if source split + feature is available or not. +- qcom,mdss-has-rotator-downscale: Boolean property to indicate if rotator + downscale feature is available or not. +- qcom,mdss-rot-downscale-min: This integer value indicates the Minimum + downscale factor supported by rotator. +- qcom,mdss-rot-downscale-max: This integer value indicates the Maximum + downscale factor supported by rotator. +- qcom,mdss-ad-off: Array of offset addresses for the available + Assertive Display (AD) blocks. These offsets + are calculated from the register "mdp_phys" + defined in reg property. The number of AD + offsets should be less than or equal to the + number of mixers driving interfaces defined in + property: qcom,mdss-mixer-intf-off. Assumes + that AD blocks are aligned with the mixer + offsets as well (i.e. the first mixer offset + corresponds to the same pathway as the first + AD offset). +- qcom,mdss-has-wb-ad: Boolean property to indicate assertive display feature + support on write back framebuffer. +- qcom,mdss-no-lut-read: Boolean property to indicate reading of LUT is + not supported. +- qcom,mdss-no-hist-vote Boolean property to indicate histogram reads + and histogram LUT writes do not need additional + bandwidth voting. +- qcom,mdss-mdp-wfd-mode: A string that specifies what is the mode of + writeback wfd block. + "intf" = Writeback wfd block is + connected to the interface mixer. + "shared" = Writeback block shared + between wfd and rotator. + "dedicated" = Dedicated writeback + block for wfd using writeback mixer. +- qcom,mdss-smp-mb-per-pipe: Maximum number of shared memory pool blocks + restricted for a source surface pipe. If this + property is not specified, no such restriction + would be applied. +- qcom,mdss-highest-bank-bit: Property to indicate tile format as opposed to usual + linear format. The value tells the GPU highest memory + bank bit used. +- qcom,mdss-pipe-rgb-fixed-mmb: Array of indexes describing fixed Memory Macro + Blocks (MMBs) for rgb pipes. First value denotes + total numbers of MMBs per pipe while values, if + any, following first one denotes indexes of MMBs + to that RGB pipe. +- qcom,mdss-pipe-vig-fixed-mmb: Array of indexes describing fixed Memory Macro + Blocks (MMBs) for vig pipes. First value denotes + total numbers of MMBs per pipe while values, if + any, following first one denotes indexes of MMBs + to that VIG pipe. +- qcom,mdss-pipe-sw-reset-off: Property to indicate offset to the register which + holds sw_reset bitmap for different MDSS + components. +- qcom,mdss-pipe-vig-sw-reset-map: Array of bit offsets for vig pipes within + sw_reset register bitmap. Number of offsets + defined should match the number of offsets + defined in property: qcom,mdss-pipe-vig-off +- qcom,mdss-pipe-rgb-sw-reset-map: Array of bit offsets for rgb pipes within + sw_reset register bitmap. Number of offsets + defined should match the number of offsets + defined in property: qcom,mdss-pipe-rgb-off +- qcom,mdss-pipe-dma-sw-reset-map: Array of bit offsets for dma pipes within + sw_reset register bitmap. Number of offsets + defined should match the number of offsets + defined in property: qcom,mdss-pipe-dma-off +- qcom,mdss-default-ot-wr-limit: This integer value indicates maximum number of pending + writes that can be allowed on each WR xin. + This value can be used to reduce the pending writes + limit and can be tuned to match performance + requirements depending upon system state. + Some platforms require a dynamic ot limiting in + some cases. Setting this default ot write limit + will enable this dynamic limiting for the write + operations in the platforms that require these + limits. +- qcom,mdss-default-ot-rd-limit: This integer value indicates the default number of pending + reads that can be allowed on each RD xin. + Some platforms require a dynamic ot limiting in + some cases. Setting this default ot read limit + will enable this dynamic limiting for the read + operations in the platforms that require these + limits. +- qcom,mdss-clk-levels: This array indicates the mdp core clock level selection + array. Core clock is calculated for each frame and + hence depending upon calculated value, clock rate + will be rounded up to the next level according to + this table. Order of entries need to be ordered in + ascending order. +- qcom,mdss-vbif-qos-rt-setting: This array is used to program vbif qos remapper register + priority for real time clients. +- qcom,mdss-vbif-qos-nrt-setting: This array is used to program vbif qos remapper register + priority for non real time clients. +- qcom,mdss-traffic-shaper-enabled: This boolean property enables traffic shaper functionality + for MDSS rotator which spread out rotator bandwidth request + so that rotator don't compete with other real time read + clients. +- qcom,mdss-dram-channels: This represents the number of channels in the + Bus memory controller. +- qcom,regs-dump-mdp: This array represents the registers offsets that + will be dumped from the mdp when the debug logging + is enabled; each entry in the table is an start and + end offset from the MDP address "mdp_phys", the + format of each entry is as follows: + + Ex: + <0x01000 0x01404> + Will dump the MDP registers + from the address: "mdp_phys + 0x01000" + to the address: "mdp_phys + 0x01404" +- qcom,regs-dump-names-mdp: This array represents the tag that will be used + for each of the entries defined within regs-dump. + Note that each tag matches with one of the + regs-dump entries in the same order as they + are defined. +- qcom,regs-dump-xin-id-mdp: Array of VBIF clients ids (xins) corresponding + to mdp block. Xin id property is not valid for mdp + internal blocks like ctl, lm, dspp. It should set + to 0xff for such blocks. + +Fudge Factors: Fudge factors are used to boost demand for + resources like bus bandswidth, clk rate etc. to + overcome system inefficiencies and avoid any + glitches. These fudge factors are expressed in + terms of numerator and denominator. First value + is numerator followed by denominator. They all + are optional but highly recommended. + Ex: + x = value to be fudged + a = numerator, default value is 1 + b = denominator, default value is 1 + FUDGE(x, a, b) = ((x * a) / b) +- qcom,mdss-ib-factor: This fudge factor is applied to calculated ib + values in default conditions. +- qcom,mdss-ib-factor-overlap: This fudge factor is applied to calculated ib + values when the overlap bandwidth is the + predominant value compared to prefill bandwidth + value. +- qcom,mdss-clk-factor: This fudge factor is applied to calculated mdp + clk rate in default conditions. + +- qcom,max-bandwidth-low-kbps: This value indicates the max bandwidth in KB + that can be supported without underflow. + This is a low bandwidth threshold which should + be applied in most scenarios to be safe from + underflows when unable to satisfy bandwidth + requirements. +- qcom,max-bandwidth-high-kbps: This value indicates the max bandwidth in KB + that can be supported without underflow. + This is a high bandwidth threshold which can be + applied in scenarios where panel interface can + be more tolerant to memory latency such as + command mode panels. +- qcom,max-bandwidth-per-pipe-kbps: A two dimensional array indicating the max + bandwidth in KB that a single pipe can support + without underflow for various usecases. The + first parameter indicates the usecase and the + second parameter gives the max bw allowed for + the usecase. Following are the enum values for + modes in different cases: + For default case, mode = 1 + camera usecase, mode = 2 + hflip usecase, mode = 4 + vflip usecase, mode = 8 + First parameter/mode value need to match enum, + mdss_mdp_max_bw_mode, present in + include/uapi/linux/msm_mdp.h. +- qcom,max-bw-settings: This two dimension array indicates the max bandwidth + in KB that has to be supported when particular + scenarios are involved such as camera, flip. + The first parameter indicate the + scenario/usecase and second parameter indicate + the maximum bandwidth for that usecase. + Following are the enum values for modes in different + cases: + For default case, mode = 1 + camera usecase, mode = 2 + hflip usecase, mode = 4 + vflip usecase, mode = 8 + First parameter/mode value need to match enum, + mdss_mdp_max_bw_mode, present in + include/uapi/linux/msm_mdp.h. + +- qcom,mdss-has-panic-ctrl: Boolean property to indicate if panic/robust signal + control feature is available or not. +- qcom,mdss-en-svs-high: Boolean property to indicate if this target needs to + enable the svs high voltage level for CX rail. +- qcom,mdss-pipe-vig-panic-ctrl-offsets: Array of panic/robust signal offsets + corresponding to the respective VIG pipes. + Number of signal offsets should match the + number of offsets defined in property: + qcom,mdss-pipe-vig-off +- qcom,mdss-pipe-rgb-panic-ctrl-offsets: Array of panic/robust signal offsets + corresponding to the respective RGB pipes. + Number of signal offsets should match the + number of offsets defined in property: + qcom,mdss-pipe-rgb-off +- qcom,mdss-pipe-dma-panic-ctrl-offsets: Array of panic/robust signal offsets + corresponding to the respective DMA pipes. + Number of signal offsets should match the + number of offsets defined in property: + qcom,mdss-pipe-dma-off +- qcom,mdss-per-pipe-panic-luts: Array to configure the panic/robust luts for + each rt and nrt clients. This property is + for the MDPv1.7 and above, which configures + the panic independently on each client. + Each element of the array corresponds to: + First element - panic for linear formats + Second element - panic for tile formats + Third element - robust for linear formats + Fourth element - robust for tile formats +- qcom,mdss-has-pingpong-split: Boolean property to indicate if destination + split feature is available or not in the target. +- qcom,mdss-slave-pingpong-off: Offset address for the extra TE block which needs + to be programmed when pingpong split feature is enabled. + Offset is calculated from the "mdp_phys" + register value. Mandatory when qcom,mdss-has-pingpong-split + is enabled. +- qcom,mdss-ppb-ctl-off: Array of offset addresses of ping pong buffer control registers. + The offsets are calculated from the "mdp_phys" base address + specified. The number of offsets should match the + number of ping pong buffers available in the hardware. + Mandatory when qcom,mdss-has-pingpong-split is enabled. +- qcom,mdss-ppb-cfg-off: Array of offset addresses of ping pong buffer config registers. + The offsets are calculated from the "mdp_phys" base address + specified. The number of offsets should match the + number of ping pong buffers available in the hardware. + Mandatory when qcom,mdss-has-pingpong-split is enabled. +- qcom,mdss-cdm-off: Array of offset addresses for the available + chroma down modules that can convert RGB data + to YUV before sending it to the interface + block. These offsets will be calculated from + register "mdp_phys" define in reg property. The + number of cdm offsets should reflect the number + of cdm blocks present in hardware. +- qcom,mdss-dsc-off: Array of offset addresses for the available + display stream compression module block. + These offsets will be calculated from + register "mdp_phys" define in reg property. The + number of dsc offsets should reflect the number + of dsc blocks present in hardware. +- qcom,max-pipe-width: This value specifies the maximum MDP SSPP width + the device supports. If not specified, a default value + of 2048 will be applied. +- qcom,mdss-reg-bus: Property to provide Bus scaling for register access for + MDP and DSI Blocks. + +- qcom,mdss-rot-reg-bus: Property to provide Bus scaling for register access for + Rotator Block. + +- qcom,mdss-hw-rt: Optional Property to request min vote on the bus. + Few Low tier targets expect min vote on the bus during SMMU + and TZ operations. use this handle to request the vote needed. + +Optional subnodes: +- mdss_fb: Child nodes representing the frame buffer virtual devices. + +Subnode properties: +- compatible : Must be "qcom,mdss-fb" +- cell-index : Index representing frame buffer +- qcom,mdss-mixer-swap: A boolean property that indicates if the mixer muxes + need to be swapped based on the target panel. + By default the property is not defined. +- qcom,memblock-reserve: Specifies the memory location and the size reserved + for the framebuffer used to display the splash screen. + This property is required whenever the continuous splash + screen feature is enabled for the corresponding + framebuffer device. It should be used for only 32bit + kernel. +- qcom,cont-splash-memory: Specifies the memory block region reserved for + continuous splash screen feature. This property should be + defined for corresponding framebuffer device if + "qcom,memblock-reserve" is not defined when continuous + splash screen feature is enabled. +- linux,contiguous-region: Phandle to the continuous memory region reserved for + frame-buffer or continuous splash screen. Size of this + region is dependent on the display panel resolution and + buffering scheme for frame-buffer node. Currently driver + uses double buffering. + + Example: Width = 1920, Height = 1080, BytesPerPixel = 4, + Number of frame-buffers reserved = 2. + Size = 1920*1080*4*2 = ROUND_1MB(15.8MB) = 16MB. +- qcom,mdss-fb-splash-logo-enabled: The boolean entry enables the framebuffer + driver to display the splash logo image. + It is independent of continuous splash + screen feature and has no relation with + qcom,cont-splash-enabled entry present in + panel configuration. +- qcom,mdss-idle-power-collapse-enabled: Boolean property that enables support + for mdss power collapse in idle + screen use cases with smart panels. +- qcom,boot-indication-enabled: Boolean property that enables turning on the blue + LED for notifying that the device is in boot + process. + +- qcom,mdss-pp-offets: A node that lists the offsets of post processing blocks + from base module. + -- qcom,mdss-mdss-sspp-igc-lut-off: This 32 bit value provides the + offset to the IGC lut rams from mdp_phys base. + -- qcom,mdss-sspp-vig-pcc-off: This 32 bit value provides the offset + to PCC block from the VIG pipe base address. + -- qcom,mdss-sspp-rgb-pcc-off: This 32 bit value provides the offset + to PCC block from the RGB pipe base address. + -- qcom,mdss-sspp-dma-pcc-off: This 32 bit value provides the offset + to PCC block from the DMA pipe base address. + -- qcom,mdss-dspp-pcc-off: This 32 bit value provides the offset + to PCC block from the DSPP pipe base address. + -- qcom,mdss-lm-pgc-off: This 32 bit value provides the offset + to PGC block from the layer mixer base address. + -- qcom,mdss-dspp-gamut-off: This 32 bit value provides the offset + to gamut block from DSPP base address. + -- qcom,mdss-dspp-pgc-off: This 32 bit value provides the offset to + PGC block from the DSPP base address. + +- qcom,mdss-scaler-offsets: A node that lists the offsets of scaler blocks + from base module. + -- qcom,mdss-vig-scaler-off: This 32 bit value provides the + offset to vig scaler from vig pipe base. + -- qcom,mdss-vig-scaler-lut-off: This 32 bit value provides the + offset to vig scaler lut from vig pipe base. + -- qcom,mdss-has-dest-scaler: Boolean property to indicate the + presence of destination scaler block. + -- qcom,mdss-dest-block-off: This 32 bit value provides the + offset from mdp base to destination scaler block. + -- qcom,mdss-dest-scaler-off: Array containing offsets of + destination scalar modules from the scaler block. + -- qcom,mdss-dest-scaler-lut-off: Array containing offsets of destination + scaler lut tables from scalar block. + +- qcom,mdss-has-separate-rotator: Boolean property to indicate support of + indpendent rotator. Indpendent rotator has + separate DMA pipe working in block mode only. + +- smmu_mdp_***: Child nodes representing the mdss smmu virtual devices. + Mandatory smmu v2 and not required for smmu v1. + +Subnode properties: +- compatible : Compatible name used in smmu v2. + smmu_v2 names should be: + "qcom,smmu_mdp_unsec" - smmu context bank device for + unsecure mdp domain. + "qcom,smmu_rot_unsec" - smmu context bank device for + unsecure rotation domain. + "qcom,smmu_mdp_sec" - smmu context bank device for + secure mdp domain. + "qcom,smmu_rot_sec" - smmu context bank device for + secure rotation domain. + "qcom,smmu_kms_unsec" - smmu context bank device for + unsecure mdp domain for KMS driver. + "qcom,smmu_nrt_unsec" - smmu context bank device for + unsecure rotation domain for KMS driver. + "qcom,smmu_kms_sec" - smmu context bank device for + secure mdp domain for KMS driver. + "qcom,smmu_nrt_sec" - smmu context bank device for + secure rotation domain for KMS driver. + "qcom,smmu_arm_mdp_unsec" - arm smmu context bank device for + unsecure mdp domain. + "qcom,smmu_arm_mdp_sec" - arm smmu context bank device for + secure mdp domain. +- gdsc-mmagic-mdss-supply: Phandle for mmagic mdss supply regulator device node. +- reg : offset and length of the register set for the device. +- reg-names : names to refer to register sets related to this device +- clocks: List of Phandles for clock device nodes + needed by the device. +- clock-names: List of clock names needed by the device. + +Subnode properties: +Required properties: +- compatible: Must be "qcom,mdss_wb" +- qcom,mdss_pan_res: Array containing two elements, width and height which + specifies size of writeback buffer. +- qcom,mdss_pan_bpp: Specifies bits per pixel for writeback buffer. +- qcom,mdss-fb-map: Specifies the handle for frame buffer. + +Example: + mdss_mdp: qcom,mdss_mdp@fd900000 { + compatible = "qcom,mdss_mdp"; + reg = <0xfd900000 0x22100>, + <0xfd924000 0x1000>, + <0xfd925000 0x1000>; + reg-names = "mdp_phys", "vbif_phys", "vbif_nrt_phys"; + interrupts = <0 72 0>; + interrupt-controller; + #interrupt-cells = <1>; + #address-cells = <1>; + #size-cells = <1>; + vdd-supply = <&gdsc_mdss>; + batfet-supply = <&pm8941_chg_batif>; + vdd-cx-supply = <&pm8841_s2_corner>; + + /* Bus Scale Settings */ + qcom,msm-bus,name = "mdss_mdp"; + qcom,msm-bus,num-cases = <3>; + qcom,msm-bus,num-paths = <2>; + qcom,mdss-dram-channels = <2>; + qcom,mdss-num-nrt-paths = <1>; + qcom,msm-bus,vectors-KBps = + <22 512 0 0>, <23 512 0 0>, + <22 512 0 6400000>, <23 512 0 6400000>, + <22 512 0 6400000>, <23 512 0 6400000>; + + /* Fudge factors */ + qcom,mdss-ab-factor = <2 1>; /* 2 times */ + qcom,mdss-ib-factor = <3 2>; /* 1.5 times */ + qcom,mdss-high-ib-factor = <2 1>; /* 2 times */ + qcom,mdss-clk-factor = <5 4>; /* 1.25 times */ + + /* Clock levels */ + qcom,mdss-clk-levels = <92310000, 177780000, 200000000>; + + /* VBIF QoS remapper settings*/ + qcom,mdss-vbif-qos-rt-setting = <2 2 2 2>; + qcom,mdss-vbif-qos-nrt-setting = <1 1 1 1>; + + qcom,max-bandwidth-low-kbps = <2300000>; + qcom,max-bandwidth-high-kbps = <3000000>; + qcom,max-bandwidth-per-pipe-kbps = <4 2100000>, + <8 1800000>; + qcom,max-bw-settings = <1 2300000>, + <2 1700000>, + <4 2300000>, + <8 2000000>; + + qcom,max-mixer-width = <2048>; + qcom,max-pipe-width = <2048>; + qcom,max-clk-rate = <320000000>; + qcom,vbif-settings = <0x0004 0x00000001>, + <0x00D8 0x00000707>; + qcom,vbif-nrt-settings = <0x0004 0x00000001>, + <0x00D8 0x00000707>; + qcom,mdp-settings = <0x02E0 0x000000AA>, + <0x02E4 0x00000055>; + qcom,mdss-pipe-vig-off = <0x00001200 0x00001600 + 0x00001A00>; + qcom,mdss-pipe-rgb-off = <0x00001E00 0x00002200 + 0x00002600>; + qcom,mdss-pipe-dma-off = <0x00002A00 0x00002E00>; + qcom,mdss-pipe-cursor-off = <0x00035000 0x00037000>; + qcom,mdss-dsc-off = <0x00081000 0x00081400>; + qcom,mdss-pipe-vig-fetch-id = <1 4 7>; + qcom,mdss-pipe-rgb-fetch-id = <16 17 18>; + qcom,mdss-pipe-dma-fetch-id = <10 13>; + qcom,mdss-pipe-rgb-fixed-mmb = <2 0 1>, + <2 2 3>, + <2 4 5>, + <2 6 7>; + qcom,mdss-pipe-vig-fixed-mmb = <1 8>, + <1 9>, + <1 10>, + <1 11>; + qcom,mdss-smp-data = <22 4096>; + qcom,mdss-rot-block-size = <64>; + qcom,mdss-rotator-ot-limit = <2>; + qcom,mdss-smp-mb-per-pipe = <2>; + qcom,mdss-pref-prim-intf = "dsi"; + qcom,mdss-has-non-scalar-rgb; + qcom,mdss-has-bwc; + qcom,mdss-has-decimation; + qcom,mdss-has-fixed-qos-arbiter-enabled; + qcom,mdss-has-source-split; + qcom,mdss-wfd-mode = "intf"; + qcom,mdss-no-lut-read; + qcom,mdss-no-hist-vote; + qcom,mdss-traffic-shaper-enabled; + qcom,mdss-has-rotator-downscale; + qcom,mdss-rot-downscale-min = <2>; + qcom,mdss-rot-downscale-max = <16>; + + qcom,mdss-has-pingpong-split; + qcom,mdss-pipe-vig-xin-id = <0 4 8>; + qcom,mdss-pipe-rgb-xin-id = <1 5 9>; + qcom,mdss-pipe-dma-xin-id = <2 10>; + qcom,mdss-pipe-cursor-xin-id = <7 7>; + + qcom,mdss-pipe-vig-clk-ctrl-offsets = <0x3AC 0 0>, + <0x3B4 0 0>, + <0x3BC 0 0>, + <0x3C4 0 0>; + + qcom,mdss-pipe-rgb-clk-ctrl-offsets = <0x3AC 4 8>, + <0x3B4 4 8>, + <0x3BC 4 8>, + <0x3C4 4 8>; + + qcom,mdss-pipe-dma-clk-ctrl-offsets = <0x3AC 8 12>, + <0x3B4 8 12>; + + qcom,mdss-per-pipe-panic-luts = <0x000f>, + <0xffff>, + <0xfffc>, + <0xff00>; + + qcom,mdss-has-panic-ctrl; + qcom,mdss-pipe-vig-panic-ctrl-offsets = <0 1 2 3>; + qcom,mdss-pipe-rgb-panic-ctrl-offsets = <4 5 6 7>; + qcom,mdss-pipe-dma-panic-ctrl-offsets = <8 9>; + + qcom,mdss-pipe-sw-reset-off = <0x0128>; + qcom,mdss-pipe-vig-sw-reset-map = <5 6 7 8>; + qcom,mdss-pipe-rgb-sw-reset-map = <9 10 11 12>; + qcom,mdss-pipe-dma-sw-reset-map = <13 14>; + + qcom,mdss-ctl-off = <0x00000600 0x00000700 0x00000800 + 0x00000900 0x0000A00>; + qcom,mdss-mixer-intf-off = <0x00003200 0x00003600 + 0x00003A00>; + qcom,mdss-mixer-wb-off = <0x00003E00 0x00004200>; + qcom,mdss-dspp-off = <0x00004600 0x00004A00 0x00004E00>; + qcom,mdss-pingpong-off = <0x00012D00 0x00012E00 0x00012F00>; + qcom,mdss-wb-off = <0x00011100 0x00013100 0x00015100 + 0x00017100 0x00019100>; + qcom,mdss-intf-off = <0x00021100 0x00021300 + 0x00021500 0x00021700>; + qcom,mdss-cdm-off = <0x0007A200>; + qcom,mdss-ppb-ctl-off = <0x0000420>; + qcom,mdss-ppb-cfg-off = <0x0000424>; + qcom,mdss-slave-pingpong-off = <0x00073000> + + /* buffer parameters to calculate prefill bandwidth */ + qcom,mdss-prefill-outstanding-buffer-bytes = <1024>; + qcom,mdss-prefill-y-buffer-bytes = <4096>; + qcom,mdss-prefill-scaler-buffer-lines-bilinear = <2>; + qcom,mdss-prefill-scaler-buffer-lines-caf = <4>; + qcom,mdss-prefill-post-scaler-buffer-pixels = <2048>; + qcom,mdss-prefill-pingpong-buffer-pixels = <5120>; + qcom,mdss-prefill-fbc-lines = <2>; + qcom,mdss-idle-power-collapse-enabled; + + qcom,regs-dump-xin-id-mdp = <0xff 0xff 0xff 0xff 0x0 0x0>; + mdss_fb0: qcom,mdss_fb_primary { + cell-index = <0>; + compatible = "qcom,mdss-fb"; + qcom,mdss-mixer-swap; + linux,contiguous-region = <&fb_mem>; + qcom,mdss-fb-splash-logo-enabled: + qcom,cont-splash-memory { + linux,contiguous-region = <&cont_splash_mem>; + }; + }; + + qcom,mdss-pp-offsets { + qcom,mdss-sspp-mdss-igc-lut-off = <0x3000>; + qcom,mdss-sspp-vig-pcc-off = <0x1580>; + qcom,mdss-sspp-rgb-pcc-off = <0x180>; + qcom,mdss-sspp-dma-pcc-off = <0x180>; + qcom,mdss-lm-pgc-off = <0x3C0>; + qcom,mdss-dspp-gamut-off = <0x1600>; + qcom,mdss-dspp-pcc-off = <0x1700>; + qcom,mdss-dspp-pgc-off = <0x17C0>; + }; + + qcom,mdss-scaler-offsets { + qcom,mdss-vig-scaler-off = <0xA00>; + qcom,mdss-vig-scaler-lut-off = <0xB00>; + qcom,mdss-has-dest-scaler; + qcom,mdss-dest-block-off = <0x00061000>; + qcom,mdss-dest-scaler-off = <0x800 0x1000>; + qcom,mdss-dest-scaler-lut-off = <0x900 0x1100>; + }; + + qcom,mdss-reg-bus { + /* Reg Bus Scale Settings */ + qcom,msm-bus,name = "mdss_reg"; + qcom,msm-bus,num-cases = <4>; + qcom,msm-bus,num-paths = <1>; + qcom,msm-bus,active-only; + qcom,msm-bus,vectors-KBps = + <1 590 0 0>, + <1 590 0 76800>, + <1 590 0 160000>, + <1 590 0 320000>; + }; + + qcom,mdss-hw-rt-bus { + /* hw-rt Bus Scale Settings */ + qcom,msm-bus,name = "mdss_hw_rt"; + qcom,msm-bus,num-cases = <2>; + qcom,msm-bus,num-paths = <1>; + qcom,msm-bus,vectors-KBps = + <22 512 0 0>, + <22 512 0 1000>; + }; + + smmu_mdp_sec: qcom,smmu_mdp_sec_cb { + compatible = "qcom,smmu_mdp_sec"; + iommus = <&mdp_smmu 1>; + reg = <0xd09000 0x000d00>, + reg-names = "mmu_cb"; + gdsc-mmagic-mdss-supply = <&gdsc_mmagic_mdss>; + clocks = <&clock_mmss clk_smmu_mdp_ahb_clk>, + <&clock_mmss clk_smmu_mdp_axi_clk>; + clock-names = "dummy_clk", "dummy_clk"; + }; + + qcom,mdss_wb_panel { + compatible = "qcom,mdss_wb"; + qcom,mdss_pan_res = <1280 720>; + qcom,mdss_pan_bpp = <24>; + qcom,mdss-fb-map = <&mdss_fb1>; + }; + + qcom,mdss-rot-reg-bus { + /* Reg Bus Scale Settings */ + qcom,msm-bus,name = "mdss_rot_reg"; + qcom,msm-bus,num-cases = <2>; + qcom,msm-bus,num-paths = <1>; + qcom,msm-bus,active-only; + qcom,msm-bus,vectors-KBps = + <1 590 0 0>, + <1 590 0 76800>; + }; + }; + diff --git a/Documentation/devicetree/bindings/fb/mdss-pll.txt b/Documentation/devicetree/bindings/fb/mdss-pll.txt index e92e4deb0469432cb0b94f6a314974e1771cf438..6b9238c4bf302d0dc9824b79b23a5596741d02b9 100644 --- a/Documentation/devicetree/bindings/fb/mdss-pll.txt +++ b/Documentation/devicetree/bindings/fb/mdss-pll.txt @@ -1,24 +1,20 @@ -Qualcomm Technologies MDSS pll for DSI/EDP/HDMI +Qualcomm Technologies, Inc. MDSS pll for DSI/EDP/HDMI -mdss-pll is a pll controller device which supports pll devices that -are compatible with MIPI display serial interface specification, -HDMI and edp. +mdss-pll is a pll controller device which supports pll devices that are +compatiable with MIPI display serial interface specification, HDMI and edp. Required properties: -- compatible: Compatible name used in the driver - "qcom,mdss_dsi_pll_8916", "qcom,mdss_dsi_pll_8939", - "qcom,mdss_dsi_pll_8974", "qcom,mdss_dsi_pll_8994", - "qcom,mdss_dsi_pll_8994", "qcom,mdss_dsi_pll_8909", - "qcom,mdss_hdmi_pll", "qcom,mdss_hdmi_pll_8994", - "qcom,mdss_dsi_pll_8992", "qcom,mdss_hdmi_pll_8992", - "qcom,mdss_dsi_pll_8996", "qcom,mdss_hdmi_pll_8996", - "qcom,mdss_hdmi_pll_8996_v2", "qcom,mdss_dsi_pll_8996_v2", - "qcom,mdss_hdmi_pll_8996_v3", "qcom,mdss_hdmi_pll_8996_v3_1p8", - "qcom,mdss_edp_pll_8996_v3", "qcom,mdss_edp_pll_8996_v3_1p8", - "qcom,mdss_dsi_pll_10nm", "qcom,mdss_dp_pll_8998", - "qcom,mdss_hdmi_pll_8998", "qcom,mdss_dp_pll_10nm", - "qcom,mdss_dsi_pll_7nm", - "qcom,mdss_dp_pll_7nm". +- compatible: Compatible name used in the driver. Should be one of: + "qcom,mdss_dsi_pll_8916", "qcom,mdss_dsi_pll_8939", + "qcom,mdss_dsi_pll_8974", "qcom,mdss_dsi_pll_8994", + "qcom,mdss_dsi_pll_8994", "qcom,mdss_dsi_pll_8909", + "qcom,mdss_hdmi_pll", "qcom,mdss_hdmi_pll_8994", + "qcom,mdss_dsi_pll_8992", "qcom,mdss_hdmi_pll_8992", + "qcom,mdss_dsi_pll_8996", "qcom,mdss_hdmi_pll_8996", + "qcom,mdss_hdmi_pll_8996_v2", "qcom,mdss_dsi_pll_8996_v2", + "qcom,mdss_hdmi_pll_8996_v3", "qcom,mdss_dsi_pll_8952", + "qcom,mdss_dsi_pll_8937", "qcom,mdss_hdmi_pll_8996_v3_1p8", + "qcom,mdss_dsi_pll_8953" - cell-index: Specifies the controller used - reg: offset and length of the register set for the device. - reg-names : names to refer to register sets related to this device diff --git a/Documentation/devicetree/bindings/fb/mdss-qpic-panel.txt b/Documentation/devicetree/bindings/fb/mdss-qpic-panel.txt new file mode 100644 index 0000000000000000000000000000000000000000..8c11a438f5d8f569ec77f77484f0388dc436b8e6 --- /dev/null +++ b/Documentation/devicetree/bindings/fb/mdss-qpic-panel.txt @@ -0,0 +1,25 @@ +Qualcomm Technologies, Inc. mdss-qpic-panel + +mdss-qpic-panel is a panel device which can be driven by qpic. + +Required properties: +- compatible: Must be "qcom,mdss-qpic-panel" +- qcom,mdss-pan-res: A two dimensional array that specifies the panel + resolution. +- qcom,mdss-pan-bpp: Specifies the panel bits per pixel. +- qcom,refresh_rate: Panel refresh rate + +Optional properties: +- label: A string used as a descriptive name of the panel + + +Example: +/ { + qcom,mdss_lcdc_ili9341_qvga { + compatible = "qcom,mdss-qpic-panel"; + label = "ili qvga lcdc panel"; + qcom,mdss-pan-res = <240 320>; + qcom,mdss-pan-bpp = <18>; + qcom,refresh_rate = <60>; + }; +}; diff --git a/Documentation/devicetree/bindings/fb/mdss-qpic.txt b/Documentation/devicetree/bindings/fb/mdss-qpic.txt new file mode 100644 index 0000000000000000000000000000000000000000..16d5b3547bdc28725ca1f04922f27a2a530c939c --- /dev/null +++ b/Documentation/devicetree/bindings/fb/mdss-qpic.txt @@ -0,0 +1,49 @@ +Qualcomm Technolgies, Inc. mdss-qpic + +mdss-qpic is a qpic controller device which supports dma transmission to MIPI +and LCDC panel. + +Required properties: +- compatible: must be "qcom,mdss_qpic" +- reg: offset and length of the register set for the device. +- reg-names : names to refer to register sets related to this device +- interrupts: IRQ line +- vdd-supply: Phandle for vdd regulator device node. +- avdd-supply: Phandle for avdd regulator device node. +- qcom,cs-gpio: Phandle for cs gpio device node. +- qcom,te-gpio: Phandle for te gpio device node. +- qcom,rst-gpio: Phandle for rst gpio device node. +- qcom,ad8-gpio: Phandle for ad8 gpio device node. +- qcom,bl-gpio: Phandle for backlight gpio device node. + +Optional properties: +- Refer to "Documentation/devicetree/bindings/arm/msm/msm_bus.txt" for +below Bus Scaling properties: + - qcom,msm-bus,name + - qcom,msm-bus,num-cases + - qcom,msm-bus,num-paths + - qcom,msm-bus,vectors-KBps + +Example: + qcom,msm_qpic@f9ac0000 { + compatible = "qcom,mdss_qpic"; + reg = <0xf9ac0000 0x24000>; + reg-names = "qpic_base"; + interrupts = <0 251 0>; + + qcom,msm-bus,name = "mdss_qpic"; + qcom,msm-bus,num-cases = <2>; + qcom,msm-bus,num-paths = <1>; + + qcom,msm-bus,vectors-KBps = + <91 512 0 0>, + <91 512 400000 800000>; + + vdd-supply = <&pm8019_l11>; + avdd-supply = <&pm8019_l14>; + qcom,cs-gpio = <&msmgpio 21 0>; + qcom,te-gpio = <&msmgpio 22 0>; + qcom,rst-gpio = <&msmgpio 23 0>; + qcom,ad8-gpio = <&msmgpio 20 0>; + qcom,bl-gpio = <&msmgpio 84 0>; + }; diff --git a/Documentation/devicetree/bindings/fb/mdss-rotator.txt b/Documentation/devicetree/bindings/fb/mdss-rotator.txt new file mode 100644 index 0000000000000000000000000000000000000000..5e077ac23819adc6804913465beb5b17ee4ec53f --- /dev/null +++ b/Documentation/devicetree/bindings/fb/mdss-rotator.txt @@ -0,0 +1,78 @@ +QTI MDSS Rotator + +MDSS rotator is a rotator driver, which manages the rotator hw +block inside the Mobile Display Subsystem. + +Required properties +- compatible : Must be "qcom,mdss-rotator". +- qcom,mdss-wb-count: The number of writeback block + in the hardware +- -supply: Phandle for regulator device node. + +Bus Scaling Data: +- qcom,msm-bus,name: String property describing MDSS client. +- qcom,msm-bus,num-cases: This is the the number of Bus Scaling use cases + defined in the vectors property. This must be + set to <3> for MDSS driver where use-case 0 is + used to take off MDSS BW votes from the system. + And use-case 1 & 2 are used in ping-pong fashion + to generate run-time BW requests. +- qcom,msm-bus,num-paths: This represents the number of paths in each + Bus Scaling Usecase. This value depends on + how many number of AXI master ports are + dedicated to MDSS for particular chipset. +- qcom,msm-bus,vectors-KBps: * A series of 4 cell properties, with a format + of (src, dst, ab, ib) which is defined at + Documentation/devicetree/bindings/arm/msm/msm_bus.txt + * Current values of src & dst are defined at + include/linux/msm-bus-board.h + src values allowed for MDSS are: + 22 = MSM_BUS_MASTER_MDP_PORT0 + 23 = MSM_BUS_MASTER_MDP_PORT1 + 25 = MSM_BUS_MASTER_ROTATOR + dst values allowed for MDSS are: + 512 = MSM_BUS_SLAVE_EBI_CH0 + ab: Represents aggregated bandwidth. + ib: Represents instantaneous bandwidth. + * Total number of 4 cell properties will be + (number of use-cases * number of paths). + * These values will be overridden by the driver + based on the run-time requirements. So initial + ab and ib values defined here are random and + bare no logic except for the use-case 0 where ab + and ib values needs to be 0. + * Define realtime vector properties followed by + non-realtime vector properties. + +Optional properties +- qcom,mdss-has-reg-bus: Boolean property to indicate + if rotator needs to vote for register bus. This + property is needed starting 8996 +- qcom,mdss-has-ubwc: Boolean property to indicate + if the hw supports universal + bandwidth compression (ubwc) +- qcom,mdss-has-downscale Boolean property to indicate + if the hw supports downscale + +Example: + mdss_rotator: qcom,mdss_rotator { + compatible = "qcom,mdss_rotator"; + qcom,mdss-has-downscale; + qcom,mdss-has-ubwc; + qcom,mdss-wb-count = <2>; + + qcom,mdss-has-reg-bus; + /* Bus Scale Settings */ + qcom,msm-bus,name = "mdss_rotator"; + qcom,msm-bus,num-cases = <3>; + qcom,msm-bus,num-paths = <1>; + qcom,mdss-num-nrt-paths = <1>; + qcom,msm-bus,vectors-KBps = + <25 512 0 0>, + <25 512 0 6400000>, + <25 512 0 6400000>; + + vdd-supply = <&gdsc_mdss>; + gdsc-mmagic-mdss-supply = <&gdsc_mmagic_mdss>; + qcom,supply-names = "vdd", "gdsc-mmagic-mdss"; + }; diff --git a/Documentation/devicetree/bindings/fb/msm-hdmi-tx.txt b/Documentation/devicetree/bindings/fb/msm-hdmi-tx.txt new file mode 100644 index 0000000000000000000000000000000000000000..285a14f7ff6918ecafec26df5a737f85a2a38f2f --- /dev/null +++ b/Documentation/devicetree/bindings/fb/msm-hdmi-tx.txt @@ -0,0 +1,116 @@ +* Qualcomm Technologies, Inc. HDMI Tx + +Required properties: +- cell-index: hdmi tx controller index +- compatible: must be "qcom,hdmi-tx" +- reg: offset and length of the register regions(s) for the device. +- reg-names: a list of strings that map in order to the list of regs. + +- hpd-gdsc-supply: phandle to the mdss gdsc regulator device tree node. +- hpd-5v-supply: phandle to the 5V regulator device tree node. +- core-vdda-supply: phandle to the HDMI vdda regulator device tree node. +- core-vcc-supply: phandle to the HDMI vcc regulator device tree node. +- qcom,supply-names: a list of strings that map in order + to the list of supplies. +- qcom,min-voltage-level: specifies minimum voltage (uV) level + of supply(ies) mentioned above. +- qcom,max-voltage-level: specifies maximum voltage (uV) level + of supply(ies) mentioned above. +- qcom,enable-load: specifies the current (uA) that will be + drawn from the enabled supply(ies) mentioned above. +- qcom,disable-load: specifies the current (uA) that will be + drawn from the disabled supply(ies) mentioned above. + +- qcom,hdmi-tx-cec: gpio for Consumer Electronics Control (cec) line. +- qcom,hdmi-tx-ddc-clk: gpio for Display Data Channel (ddc) clock line. +- qcom,hdmi-tx-ddc-data: gpio for ddc data line. + +Optional properties: +- hpd-5v-en-supply: phandle to the 5V boost enable regulator device tree node. +- qcom,hdmi-tx-mux-sel: gpio required to toggle HDMI output between + docking station, type A, and liquid device, type D, ports. Required + property for liquid devices. +- qcom,hdmi-tx-ddc-mux-sel: gpio for ddc mux select. +- qcom,hdmi-tx-mux-en: gpio required to enable mux for HDMI output + on liquid devices. Required property for liquid devices. +- qcom,hdmi-tx-mux-lpm: gpio required for hdmi mux configuration + selection on liquid devices. Required property for liquid devices. +- qcom,conditional-power-on: Enables HPD conditionally on MTP targets. + Required property for MTP devices which are reworked to expose HDMI port. +- qcom,hdmi-tx-hpd: gpio required for HDMI hot-plug detect. Required on + platforms where companion chip is not used. +- pinctrl-names: a list of strings that map to the pinctrl states. +- pinctrl-0: list of phandles, each pointing at a pin configuration node. +... +- pinctrl-n: list of phandles, each pointing at a pin configuration node. +- qcom,conti-splash-enabled: Enables the hdmi continuous splash screen feature. + HDMI interface will remain powered on from LK to kernel with continuous + display of bootup logo. +- qcom,pluggable: boolean to enable hotplug feature. +- qcom,display-id: A string indicates the display ID for the controller. + The possible values are: + - "primary" + - "secondary" + - "tertiary" + +[Optional child nodes]: These nodes are for devices which are +dependent on HDMI Tx controller. If HDMI Tx controller is disabled then +these devices will be disabled as well. Ex. HDMI Audio Codec device. + +- qcom,msm-hdmi-audio-rx: Node for HDMI audio codec. +Required properties: +- compatible : "msm-hdmi-audio-codec-rx"; + +Example: + mdss_hdmi_tx: qcom,hdmi_tx@fd922100 { + cell-index = <0>; + compatible = "qcom,hdmi-tx"; + reg = <0xfd922100 0x35C>, + <0xfd922500 0x7C>, + <0xfc4b8000 0x60F0>, + <0xfe2a0000 0xFFF>; + reg-names = "core_physical", "phy_physical", "qfprom_physical", + "hdcp_physical"; + + hpd-gdsc-supply = <&gdsc_mdss>; + hpd-5v-supply = <&pm8941_mvs2>; + hpd-5v-en-supply = <&hdmi_vreg>; + core-vdda-supply = <&pm8941_l12>; + core-vcc-supply = <&pm8941_s3>; + qcom,supply-names = "hpd-gdsc", "hpd-5v", "hpd-5v-en", "core-vdda", "core-vcc"; + qcom,min-voltage-level = <0 0 0 1800000 1800000>; + qcom,max-voltage-level = <0 0 0 1800000 1800000>; + qcom,enable-load = <0 0 0 1800000 0>; + qcom,disable-load = <0 0 0 0 0>; + + qcom,hdmi-tx-ddc-mux-sel = <&pma8084_gpios 6 0>; + qcom,hdmi-tx-cec = <&msmgpio 31 0>; + qcom,hdmi-tx-ddc-clk = <&msmgpio 32 0>; + qcom,hdmi-tx-ddc-data = <&msmgpio 33 0>; + qcom,hdmi-tx-hpd = <&msmgpio 34 0>; + + qcom,hdmi-tx-mux-lpm = <&msmgpio 27 0>; + qcom,hdmi-tx-mux-en = <&msmgpio 83 0>; + qcom,hdmi-tx-mux-sel = <&msmgpio 85 0>; + + qcom,conditional-power-on; + qcom,pluggable; + qcom,display-id = "secondary"; + + qcom,msm-hdmi-audio-rx { + compatible = "qcom,msm-hdmi-audio-codec-rx"; + }; + pinctrl-names = "hdmi_hpd_active", "hdmi_ddc_active", + "hdmi_cec_active", "hdmi_active", + "hdmi_sleep"; + pinctrl-0 = <&mdss_hdmi_hpd_active &mdss_hdmi_ddc_suspend + &mdss_hdmi_cec_suspend>; + pinctrl-1 = <&mdss_hdmi_hpd_active &mdss_hdmi_ddc_active + &mdss_hdmi_cec_suspend>; + pinctrl-2 = <&mdss_hdmi_hpd_active &mdss_hdmi_cec_active + &mdss_hdmi_ddc_suspend>; + pinctrl-3 = <&mdss_hdmi_hpd_active &mdss_hdmi_ddc_active + &mdss_hdmi_cec_active>; + pinctrl-4 = <&mdss_hdmi_hpd_suspend &mdss_hdmi_ddc_suspend + &mdss_hdmi_cec_suspend>; + }; diff --git a/Documentation/devicetree/bindings/fb/mxsfb.txt b/Documentation/devicetree/bindings/fb/mxsfb.txt new file mode 100644 index 0000000000000000000000000000000000000000..96ec5179c8a00199e2056ee574c3c55aef37bae3 --- /dev/null +++ b/Documentation/devicetree/bindings/fb/mxsfb.txt @@ -0,0 +1,49 @@ +* Freescale MXS LCD Interface (LCDIF) + +Required properties: +- compatible: Should be "fsl,-lcdif". Supported chips include + imx23 and imx28. +- reg: Address and length of the register set for lcdif +- interrupts: Should contain lcdif interrupts +- display : phandle to display node (see below for details) + +* display node + +Required properties: +- bits-per-pixel : <16> for RGB565, <32> for RGB888/666. +- bus-width : number of data lines. Could be <8>, <16>, <18> or <24>. + +Required sub-node: +- display-timings : Refer to binding doc display-timing.txt for details. + +Examples: + +lcdif@80030000 { + compatible = "fsl,imx28-lcdif"; + reg = <0x80030000 2000>; + interrupts = <38 86>; + + display: display { + bits-per-pixel = <32>; + bus-width = <24>; + + display-timings { + native-mode = <&timing0>; + timing0: timing0 { + clock-frequency = <33500000>; + hactive = <800>; + vactive = <480>; + hfront-porch = <164>; + hback-porch = <89>; + hsync-len = <10>; + vback-porch = <23>; + vfront-porch = <10>; + vsync-len = <10>; + hsync-active = <0>; + vsync-active = <0>; + de-active = <1>; + pixelclk-active = <0>; + }; + }; + }; +}; diff --git a/Documentation/devicetree/bindings/fb/sm501fb.txt b/Documentation/devicetree/bindings/fb/sm501fb.txt new file mode 100644 index 0000000000000000000000000000000000000000..9d9f0098092b927b48dabbf198ee252e91d08a58 --- /dev/null +++ b/Documentation/devicetree/bindings/fb/sm501fb.txt @@ -0,0 +1,34 @@ +* SM SM501 + +The SM SM501 is a LCD controller, with proper hardware, it can also +drive DVI monitors. + +Required properties: +- compatible : should be "smi,sm501". +- reg : contain two entries: + - First entry: System Configuration register + - Second entry: IO space (Display Controller register) +- interrupts : SMI interrupt to the cpu should be described here. +- interrupt-parent : the phandle for the interrupt controller that + services interrupts for this device. + +Optional properties: +- mode : select a video mode: + x[-][@] +- edid : verbatim EDID data block describing attached display. + Data from the detailed timing descriptor will be used to + program the display controller. +- little-endian: available on big endian systems, to + set different foreign endian. +- big-endian: available on little endian systems, to + set different foreign endian. + +Example for MPC5200: + display@1,0 { + compatible = "smi,sm501"; + reg = <1 0x00000000 0x00800000 + 1 0x03e00000 0x00200000>; + interrupts = <1 1 3>; + mode = "640x480-32@60"; + edid = [edid-data]; + }; diff --git a/Documentation/devicetree/bindings/pci/msm_pcie.txt b/Documentation/devicetree/bindings/pci/msm_pcie.txt index e4d8c1487ad1419b3374d94ab0567ee5b4c58908..a764fcdff19a890757d7c59c53a5a1ce844984e6 100644 --- a/Documentation/devicetree/bindings/pci/msm_pcie.txt +++ b/Documentation/devicetree/bindings/pci/msm_pcie.txt @@ -74,6 +74,8 @@ Optional Properties: - qcom,max-link-speed: Max Gen speed Root complex supports. - qcom,n-fts: The number of fast training sequences sent when the link state is changed from L0s to L0. + - qcom,phy-power-down-offset: Offset from PCIe PHY base to control the power state + of the PHY. - qcom,pcie-phy-ver: version of PCIe PHY. - qcom,phy-sequence: The initialization sequence to bring up the PCIe PHY. Should be specified in groups (offset, value, delay). @@ -274,6 +276,7 @@ Example: qcom,wr-halt-size = <0xa>; /* 1KB */ qcom,slv-addr-space-size = <0x1000000>; /* 16MB */ qcom,phy-status-offset = <0x800>; + qcom,phy-power-down-offset = <0x840>; qcom,cpl-timeout = <0x2>; iommus = <&anoc0_smmu>; diff --git a/Documentation/devicetree/bindings/platform/msm/ipa.txt b/Documentation/devicetree/bindings/platform/msm/ipa.txt index ac31663c9d9974df12ee09f3ed5c979ab6d546b0..15611971426db68acc56224d077f0f1e6304f83d 100644 --- a/Documentation/devicetree/bindings/platform/msm/ipa.txt +++ b/Documentation/devicetree/bindings/platform/msm/ipa.txt @@ -141,14 +141,12 @@ IPA SMMU sub nodes IPA SMP2P sub nodes --compatible: "qcom,smp2pgpio-map-ipa-1-out" - represents the out gpio from +-compatible: "qcom,smp2p-map-ipa-1-out" - represents the out smp2p from ipa driver to modem. --compatible: "qcom,smp2pgpio-map-ipa-1-in" - represents the in gpio to +-compatible: "qcom,smp2p-map-ipa-1-in" - represents the in smp2p to ipa driver from modem. --gpios: Binding to the gpio defined in XXX-smp2p.dtsi - Example: @@ -201,15 +199,13 @@ qcom,ipa@fd4c0000 { qcom,descriptor-fifo-size = <0x300>; }; - /* smp2p gpio information */ - qcom,smp2pgpio_map_ipa_1_out { - compatible = "qcom,smp2pgpio-map-ipa-1-out"; - gpios = <&smp2pgpio_ipa_1_out 0 0>; + /* smp2p information */ + qcom,smp2p_map_ipa_1_out { + compatible = "qcom,smp2p-map-ipa-1-out"; }; - qcom,smp2pgpio_map_ipa_1_in { - compatible = "qcom,smp2pgpio-map-ipa-1-in"; - gpios = <&smp2pgpio_ipa_1_in 0 0>; + qcom,smp2p_map_ipa_1_in { + compatible = "qcom,smp2p-map-ipa-1-in"; }; ipa_smmu_ap: ipa_smmu_ap { diff --git a/Documentation/devicetree/bindings/platform/msm/ipa_mhi_proxy.txt b/Documentation/devicetree/bindings/platform/msm/ipa_mhi_proxy.txt new file mode 100644 index 0000000000000000000000000000000000000000..d3483d81572c3052d86228259a8ca44a18928463 --- /dev/null +++ b/Documentation/devicetree/bindings/platform/msm/ipa_mhi_proxy.txt @@ -0,0 +1,27 @@ +* Qualcomm Technologies, Inc. IPA MHI proxy driver module + +This module enables modem to modem communication using IPA +and MHI. + +Required properties: +- compatible: Must be "qcom,ipa-mhi-proxy" +- qcom,mhi-chdb-base: MHI channel doorbell base address in MMIO space +- qcom,mhi-erdb-base: MHI event doorbell base address in MMIO space + +Optional: +- qcom,ctrl-iova: Pair of start address and size of the IOVA space + dedicated for MHI control structures + (such as transfer rings and event rings). + If not present, SMMU S1 is considered to be in bypass mode. +- qcom,data-iova: Pair of start address and size of the IOVA space + dedicated for MHI data buffers. + If not present, SMMU S1 is considered to be in bypass mode. + +Example: + imp: qcom,ipa-mhi-proxy { + compatible = "qcom,ipa-mhi-proxy"; + qcom,ctrl-iova = <0x00010000 0x0FFF0000>; + qcom,data-iova = <0x10000000 0x0FFFFFFF>; + qcom,mhi-chdb-base = <0x40300300>; + qcom,mhi-erdb-base = <0x40300700>; + }; diff --git a/Documentation/devicetree/bindings/thermal/tsens.txt b/Documentation/devicetree/bindings/thermal/tsens.txt index 3002e4ce6b517cc1da47f7ba8acfff2c6ce3a649..dcb4bda84f2bd4707049cf7727172a7ee3ae2ad2 100644 --- a/Documentation/devicetree/bindings/thermal/tsens.txt +++ b/Documentation/devicetree/bindings/thermal/tsens.txt @@ -21,7 +21,7 @@ Required properties: should be "qcom,tsens24xx" for 2.4 TSENS controller. should be "qcom,msm8937-tsens" for 8937 TSENS driver. should be "qcom,qcs405-tsens" for QCS405 TSENS driver. - should be "qcom,sdm640-tsens" for 640 TSENS driver. + should be "qcom,sm6150-tsens" for 6150 TSENS driver. The compatible property is used to identify the respective controller to use for the corresponding SoC. diff --git a/arch/arm/configs/qcs405-perf_defconfig b/arch/arm/configs/qcs405-perf_defconfig index cd1dfe5b0910705e24543e268f7c0de1e5bdac95..82c5f5a9d2e7499c34dfb148167b4f2373593fdc 100644 --- a/arch/arm/configs/qcs405-perf_defconfig +++ b/arch/arm/configs/qcs405-perf_defconfig @@ -252,6 +252,14 @@ CONFIG_SPI_SPIDEV=y CONFIG_PINCTRL_QCS405=y CONFIG_GPIOLIB=y CONFIG_THERMAL=y +CONFIG_THERMAL_GOV_USER_SPACE=y +CONFIG_THERMAL_GOV_LOW_LIMITS=y +CONFIG_CPU_THERMAL=y +CONFIG_DEVFREQ_THERMAL=y +CONFIG_THERMAL_TSENS=y +CONFIG_QTI_VIRTUAL_SENSOR=y +CONFIG_QTI_QMI_COOLING_DEVICE=y +CONFIG_REGULATOR_COOLING_DEVICE=y CONFIG_REGULATOR=y CONFIG_REGULATOR_FIXED_VOLTAGE=y CONFIG_REGULATOR_FAN53555=y diff --git a/arch/arm/configs/qcs405_defconfig b/arch/arm/configs/qcs405_defconfig index 263182e8027faed475e9665bb974c832704d7227..23a0718ad009b3b9da98337800a6bb296adcd192 100644 --- a/arch/arm/configs/qcs405_defconfig +++ b/arch/arm/configs/qcs405_defconfig @@ -261,6 +261,14 @@ CONFIG_PINCTRL_QCS405=y CONFIG_GPIOLIB=y CONFIG_POWER_SUPPLY=y CONFIG_THERMAL=y +CONFIG_THERMAL_GOV_USER_SPACE=y +CONFIG_THERMAL_GOV_LOW_LIMITS=y +CONFIG_CPU_THERMAL=y +CONFIG_DEVFREQ_THERMAL=y +CONFIG_THERMAL_TSENS=y +CONFIG_QTI_VIRTUAL_SENSOR=y +CONFIG_QTI_QMI_COOLING_DEVICE=y +CONFIG_REGULATOR_COOLING_DEVICE=y CONFIG_REGULATOR=y CONFIG_REGULATOR_FIXED_VOLTAGE=y CONFIG_REGULATOR_FAN53555=y diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 05b24751131e6cfb0ef4ddb88c76f8370bf8adca..57eeeaff9a6f2046e197dbc3fce1ddda7d1b177b 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -645,6 +645,13 @@ config HOTPLUG_CPU Say Y here to experiment with turning CPUs off and on. CPUs can be controlled through /sys/devices/system/cpu. +config ARCH_ENABLE_MEMORY_HOTPLUG + depends on !NUMA + def_bool y + +config ARCH_ENABLE_MEMORY_HOTREMOVE + def_bool y + # The GPIO number here must be sorted by descending number. In case of # a multiplatform kernel, we just want the highest value required by the # selected platforms. @@ -762,6 +769,10 @@ config ARM64_DMA_IOMMU_ALIGNMENT endif +config ARCH_MEMORY_PROBE + def_bool y + depends on MEMORY_HOTPLUG + config SECCOMP bool "Enable seccomp to safely compute untrusted bytecode" ---help--- diff --git a/arch/arm64/boot/dts/qcom/qcs405-coresight.dtsi b/arch/arm64/boot/dts/qcom/qcs405-coresight.dtsi new file mode 100644 index 0000000000000000000000000000000000000000..ff42137a9bc1442522a13f0e50753efa29ae073b --- /dev/null +++ b/arch/arm64/boot/dts/qcom/qcs405-coresight.dtsi @@ -0,0 +1,1070 @@ +/* Copyright (c) 2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +&soc { + + replicator_qdss: replicator@6046000 { + compatible = "arm,primecell"; + arm,primecell-periphid = <0x0003b909>; + + reg = <0x6046000 0x1000>; + reg-names = "replicator-base"; + + coresight-name = "coresight-replicator-qdss"; + + clocks = <&clock_gcc QDSS_CLK>, + <&clock_gcc QDSS_A_CLK>; + clock-names = "apb_pclk", "core_a_clk"; + + ports { + #address-cells = <1>; + #size-cells = <0>; + + port@0 { + reg = <0>; + replicator_out_tmc_etr: endpoint { + remote-endpoint= + <&tmc_etr_in_replicator>; + }; + }; + + port@1 { + reg = <0>; + replicator_in_tmc_etf: endpoint { + slave-mode; + remote-endpoint= + <&tmc_etf_out_replicator>; + }; + }; + }; + }; + + tmc_etr: tmc@6048000 { + compatible = "arm,primecell"; + arm,primecell-periphid = <0x0003b961>; + + reg = <0x6048000 0x1000>, + <0x6064000 0x15000>; + reg-names = "tmc-base", "bam-base"; + + arm,buffer-size = <0x400000>; + + coresight-name = "coresight-tmc-etr"; + coresight-ctis = <&cti0>; + + clocks = <&clock_gcc QDSS_CLK>, + <&clock_gcc QDSS_A_CLK>; + clock-names = "apb_pclk", "core_a_clk"; + + interrupts = ; + interrupt-names = "byte-cntr-irq"; + + port { + tmc_etr_in_replicator: endpoint { + slave-mode; + remote-endpoint = <&replicator_out_tmc_etr>; + }; + }; + }; + + tmc_etf: tmc@6047000 { + compatible = "arm,primecell"; + arm,primecell-periphid = <0x0003b961>; + + reg = <0x6047000 0x1000>; + reg-names = "tmc-base"; + + coresight-name = "coresight-tmc-etf"; + coresight-ctis = <&cti0>; + arm,default-sink; + + clocks = <&clock_gcc QDSS_CLK>, + <&clock_gcc QDSS_A_CLK>; + clock-names = "apb_pclk", "core_a_clk"; + + ports { + #address-cells = <1>; + #size-cells = <0>; + + port@0 { + reg = <0>; + tmc_etf_out_replicator: endpoint { + remote-endpoint = + <&replicator_in_tmc_etf>; + }; + }; + + port@1 { + reg = <0>; + tmc_etf_in_funnel_merg: endpoint { + slave-mode; + remote-endpoint = + <&funnel_merg_out_tmc_etf>; + }; + }; + }; + + }; + + funnel_merg: funnel@6045000 { + compatible = "arm,primecell"; + arm,primecell-periphid = <0x0003b908>; + + reg = <0x6045000 0x1000>; + reg-names = "funnel-base"; + + coresight-name = "coresight-funnel-merg"; + + clocks = <&clock_gcc QDSS_CLK>, + <&clock_gcc QDSS_A_CLK>; + clock-names = "apb_pclk", "core_a_clk"; + + ports { + #address-cells = <1>; + #size-cells = <0>; + + port@0 { + reg = <0>; + funnel_merg_out_tmc_etf: endpoint { + remote-endpoint = + <&tmc_etf_in_funnel_merg>; + }; + }; + + port@1 { + reg = <0>; + funnel_merg_in_funnel_in0: endpoint { + slave-mode; + remote-endpoint = + <&funnel_in0_out_funnel_merg>; + }; + }; + + port@2 { + reg = <1>; + funnel_merg_in_funnel_in1: endpoint { + slave-mode; + remote-endpoint = + <&funnel_in1_out_funnel_merg>; + }; + }; + + port@3 { + reg = <2>; + funnel_merg_in_funnel_in2: endpoint { + slave-mode; + remote-endpoint = + <&funnel_in2_out_funnel_merg>; + }; + }; + }; + }; + + funnel_in0: funnel@6041000 { + compatible = "arm,primecell"; + arm,primecell-periphid = <0x0003b908>; + + reg = <0x6041000 0x1000>; + reg-names = "funnel-base"; + + coresight-name = "coresight-funnel-in0"; + + clocks = <&clock_gcc QDSS_CLK>, + <&clock_gcc QDSS_A_CLK>; + clock-names = "apb_pclk", "core_a_clk"; + + ports { + #address-cells = <1>; + #size-cells = <0>; + + port@0 { + reg = <0>; + funnel_in0_out_funnel_merg: endpoint { + remote-endpoint = + <&funnel_merg_in_funnel_in0>; + }; + }; + + port@1 { + reg = <0>; + funnel_in0_in_rpm_etm0: endpoint { + slave-mode; + remote-endpoint = + <&rpm_etm0_out_funnel_in0>; + }; + }; + + port@2 { + reg = <6>; + funnel_in0_in_funnel_qatb: endpoint { + slave-mode; + remote-endpoint = + <&funnel_qatb_out_funnel_in0>; + }; + }; + + port@3 { + reg = <7>; + funnel_in0_in_stm: endpoint { + slave-mode; + remote-endpoint = + <&stm_out_funnel_in0>; + }; + }; + }; + }; + + funnel_in1: funnel@6042000 { + compatible = "arm,primecell"; + arm,primecell-periphid = <0x0003b908>; + + reg = <0x6042000 0x1000>; + reg-names = "funnel-base"; + + coresight-name = "coresight-funnel-in1"; + + clocks = <&clock_gcc QDSS_CLK>, + <&clock_gcc QDSS_A_CLK>; + clock-names = "apb_pclk", "core_a_clk"; + + ports { + #address-cells = <1>; + #size-cells = <0>; + + port@0 { + reg = <0>; + funnel_in1_out_funnel_merg: endpoint { + remote-endpoint = + <&funnel_merg_in_funnel_in1>; + }; + }; + port@1 { + reg = <3>; + funnel_in1_in_audio_etm0: endpoint { + slave-mode; + remote-endpoint = + <&audio_etm0_out_funnel_in1>; + }; + }; + port@2 { + reg = <4>; + funnel_in1_in_modem_etm0: endpoint { + slave-mode; + remote-endpoint = + <&modem_etm0_out_funnel_in1>; + }; + }; + }; + }; + + funnel_in2: funnel@6043000 { + compatible = "arm,primecell"; + arm,primecell-periphid = <0x0003b908>; + + reg = <0x6043000 0x1000>; + reg-names = "funnel-base"; + + coresight-name = "coresight-funnel-in2"; + + clocks = <&clock_gcc QDSS_CLK>, + <&clock_gcc QDSS_A_CLK>; + clock-names = "apb_pclk", "core_a_clk"; + + ports { + #address-cells = <1>; + #size-cells = <0>; + + port@0 { + reg = <0>; + funnel_in2_out_funnel_merg: endpoint { + remote-endpoint = + <&funnel_merg_in_funnel_in2>; + }; + }; + + port@1 { + reg = <3>; + funnel_in2_in_turing_etm0: endpoint { + slave-mode; + remote-endpoint = + <&turing_etm0_out_funnel_in2>; + }; + }; + + port@2 { + reg = <7>; + funnel_in2_in_funnel_apss: endpoint { + slave-mode; + remote-endpoint = + <&funnel_apss_out_funnel_in2>; + }; + }; + }; + }; + + stm: stm@6002000 { + compatible = "arm,primecell"; + arm,primecell-periphid = <0x0003b962>; + + reg = <0x6002000 0x1000>, + <0x09000000 0x1000000>; + reg-names = "stm-base", "stm-stimulus-base"; + + coresight-name = "coresight-stm"; + + clocks = <&clock_gcc QDSS_CLK>, + <&clock_gcc QDSS_A_CLK>; + clock-names = "apb_pclk", "core_a_clk"; + + port { + stm_out_funnel_in0: endpoint { + remote-endpoint = <&funnel_in0_in_stm>; + }; + }; + }; + + tpda: tpda@6004000 { + compatible = "arm,primecell"; + arm,primecell-periphid = <0x0003b969>; + reg = <0x6004000 0x1000>; + reg-names = "tpda-base"; + + coresight-name = "coresight-tpda"; + + qcom,tpda-atid = <65>; + qcom,bc-elem-size = <10 32>, + <13 32>; + qcom,tc-elem-size = <13 32>; + qcom,dsb-elem-size = <0 32>, + <2 32>, + <3 32>, + <5 32>, + <6 32>, + <10 32>, + <11 32>, + <13 32>; + qcom,cmb-elem-size = <3 64>, + <7 64>, + <13 64>; + + clocks = <&clock_gcc QDSS_CLK>, + <&clock_gcc QDSS_A_CLK>; + clock-names = "apb_pclk", "core_a_clk"; + + ports { + #address-cells = <1>; + #size-cells = <0>; + port@0 { + reg = <0>; + tpda_out_funnel_qatb: endpoint { + remote-endpoint = + <&funnel_qatb_in_tpda>; + }; + }; + + port@1 { + reg = <0>; + tpda_in_tpdm_wcss: endpoint { + slave-mode; + remote-endpoint = + <&tpdm_wcss_out_tpda>; + }; + }; + + port@2 { + reg = <7>; + tpda_in_tpdm_dcc: endpoint { + slave-mode; + remote-endpoint = + <&tpdm_dcc_out_tpda>; + }; + }; + + port@3 { + reg = <9>; + tpda_in_tpdm_0_north: endpoint { + slave-mode; + remote-endpoint = + <&tpdm_0_north_out_tpda>; + }; + }; + port@4 { + reg = <10>; + tpda_in_tpdm_1_south: endpoint { + slave-mode; + remote-endpoint = + <&tpdm_1_south_out_tpda>; + }; + }; + + port@5 { + reg = <11>; + tpda_in_tpdm_2_center: endpoint { + slave-mode; + remote-endpoint = + <&tpdm_2_center_out_tpda>; + }; + }; + + + port@6 { + reg = <12>; + tpda_in_tpdm_3_center: endpoint { + slave-mode; + remote-endpoint = + <&tpdm_3_center_out_tpda>; + }; + }; + }; + }; + + tpdm_0_north: tpdm@6114000 { + compatible = "arm,primecell"; + arm,primecell-periphid = <0x0003b968>; + reg = <0x6114000 0x1000>; + reg-names = "tpdm-base"; + + coresight-name = "coresight-tpdm-0-north"; + + clocks = <&clock_gcc QDSS_CLK>, + <&clock_gcc QDSS_A_CLK>; + clock-names = "apb_pclk", "core_a_clk"; + + qcom,msr-fix-req; + + port { + tpdm_0_north_out_tpda: endpoint { + remote-endpoint = <&tpda_in_tpdm_0_north>; + }; + }; + }; + + tpdm_1_south: tpdm@6115000 { + compatible = "arm,primecell"; + arm,primecell-periphid = <0x0003b968>; + reg = <0x6115000 0x1000>; + reg-names = "tpdm-base"; + + coresight-name = "coresight-tpdm-1-south"; + + clocks = <&clock_gcc QDSS_CLK>, + <&clock_gcc QDSS_A_CLK>; + clock-names = "apb_pclk", "core_a_clk"; + + qcom,msr-fix-req; + + port { + tpdm_1_south_out_tpda: endpoint { + remote-endpoint = + <&tpda_in_tpdm_1_south>; + }; + }; + }; + + + tpdm_2_center: tpdm@6116000 { + compatible = "arm,primecell"; + arm,primecell-periphid = <0x0003b968>; + reg = <0x6116000 0x1000>; + reg-names = "tpdm-base"; + + coresight-name = "coresight-tpdm-2-center"; + + clocks = <&clock_gcc QDSS_CLK>, + <&clock_gcc QDSS_A_CLK>; + clock-names = "apb_pclk", "core_a_clk"; + + port { + tpdm_2_center_out_tpda: endpoint { + remote-endpoint = <&tpda_in_tpdm_2_center>; + }; + }; + }; + + + tpdm_3_center: tpdm@6117000 { + compatible = "arm,primecell"; + arm,primecell-periphid = <0x0003b968>; + reg = <0x6117000 0x1000>; + reg-names = "tpdm-base"; + + coresight-name = "coresight-tpdm-3-center"; + + clocks = <&clock_gcc QDSS_CLK>, + <&clock_gcc QDSS_A_CLK>; + clock-names = "apb_pclk", "core_a_clk"; + + port { + tpdm_3_center_out_tpda: endpoint { + remote-endpoint = <&tpda_in_tpdm_3_center>; + }; + }; + }; + + tpdm_dcc: tpdm@6178000 { + compatible = "arm,primecell"; + arm,primecell-periphid = <0x0003b968>; + reg = <0x6178000 0x1000>; + reg-names = "tpdm-base"; + + coresight-name = "coresight-tpdm-dcc"; + + clocks = <&clock_gcc QDSS_CLK>, + <&clock_gcc QDSS_A_CLK>; + clock-names = "apb_pclk", "core_a_clk"; + + qcom,msr-fix-req; + + port { + tpdm_dcc_out_tpda: endpoint { + remote-endpoint = <&tpda_in_tpdm_dcc>; + }; + }; + }; + + tpdm_wcss: tpdm@1440000 { + compatible = "arm,primecell"; + arm,primecell-periphid = <0x0003b968>; + reg = <0x1440000 0x1000>; + reg-names = "tpdm-base"; + + coresight-name = "coresight-tpdm-wcss"; + + clocks = <&clock_gcc QDSS_CLK>, + <&clock_gcc QDSS_A_CLK>; + clock-names = "apb_pclk", "core_a_clk"; + + port { + tpdm_wcss_out_tpda: endpoint { + remote-endpoint = <&tpda_in_tpdm_wcss>; + }; + }; + }; + + funnel_qatb: funnel@6005000 { + compatible = "arm,primecell"; + arm,primecell-periphid = <0x0003b908>; + + reg = <0x6005000 0x1000>; + reg-names = "funnel-base"; + + coresight-name = "coresight-funnel-qatb"; + + clocks = <&clock_gcc QDSS_CLK>, + <&clock_gcc QDSS_A_CLK>; + clock-names = "apb_pclk", "core_a_clk"; + + ports { + #address-cells = <1>; + #size-cells = <0>; + + port@0 { + reg = <0>; + funnel_qatb_out_funnel_in0: endpoint { + remote-endpoint = + <&funnel_in0_in_funnel_qatb>; + }; + }; + + port@1 { + reg = <1>; + funnel_qatb_in_tpda: endpoint { + slave-mode; + remote-endpoint = + <&tpda_out_funnel_qatb>; + }; + }; + }; + }; + + cti_cpu0: cti@61b8000 { + compatible = "arm,primecell"; + arm,primecell-periphid = <0x0003b966>; + reg = <0x61b8000 0x1000>; + reg-names = "cti-base"; + + coresight-name = "coresight-cti-cpu0"; + cpu = <&CPU0>; + + clocks = <&clock_gcc QDSS_CLK>, + <&clock_gcc QDSS_A_CLK>; + clock-names = "apb_pclk", "core_a_clk"; + + }; + + cti_cpu1: cti@61b9000 { + compatible = "arm,primecell"; + arm,primecell-periphid = <0x0003b966>; + reg = <0x61b9000 0x1000>; + reg-names = "cti-base"; + + coresight-name = "coresight-cti-cpu1"; + cpu = <&CPU1>; + + clocks = <&clock_gcc QDSS_CLK>, + <&clock_gcc QDSS_A_CLK>; + clock-names = "apb_pclk", "core_a_clk"; + + }; + + cti_cpu2: cti@61ba000 { + compatible = "arm,primecell"; + arm,primecell-periphid = <0x0003b966>; + reg = <0x61ba000 0x1000>; + reg-names = "cti-base"; + + coresight-name = "coresight-cti-cpu2"; + cpu = <&CPU2>; + + clocks = <&clock_gcc QDSS_CLK>, + <&clock_gcc QDSS_A_CLK>; + clock-names = "apb_pclk", "core_a_clk"; + + }; + + cti_cpu3: cti@61bb000 { + compatible = "arm,primecell"; + arm,primecell-periphid = <0x0003b966>; + reg = <0x61bb000 0x1000>; + reg-names = "cti-base"; + + coresight-name = "coresight-cti-cpu3"; + cpu = <&CPU3>; + + clocks = <&clock_gcc QDSS_CLK>, + <&clock_gcc QDSS_A_CLK>; + clock-names = "apb_pclk", "core_a_clk"; + + }; + + cti0: cti@6010000 { + compatible = "arm,primecell"; + arm,primecell-periphid = <0x0003b966>; + reg = <0x6010000 0x1000>; + reg-names = "cti-base"; + + coresight-name = "coresight-cti0"; + + clocks = <&clock_gcc QDSS_CLK>, + <&clock_gcc QDSS_A_CLK>; + clock-names = "apb_pclk", "core_a_clk"; + + }; + + cti1: cti@6011000 { + compatible = "arm,primecell"; + arm,primecell-periphid = <0x0003b966>; + reg = <0x6011000 0x1000>; + reg-names = "cti-base"; + + coresight-name = "coresight-cti1"; + + clocks = <&clock_gcc QDSS_CLK>, + <&clock_gcc QDSS_A_CLK>; + clock-names = "apb_pclk", "core_a_clk"; + + }; + + cti2: cti@6012000 { + compatible = "arm,primecell"; + arm,primecell-periphid = <0x0003b966>; + reg = <0x6012000 0x1000>; + reg-names = "cti-base"; + + coresight-name = "coresight-cti2"; + + clocks = <&clock_gcc QDSS_CLK>, + <&clock_gcc QDSS_A_CLK>; + clock-names = "apb_pclk", "core_a_clk"; + + }; + + cti3: cti@6013000 { + compatible = "arm,primecell"; + arm,primecell-periphid = <0x0003b966>; + reg = <0x6013000 0x1000>; + reg-names = "cti-base"; + + coresight-name = "coresight-cti3"; + + clocks = <&clock_gcc QDSS_CLK>, + <&clock_gcc QDSS_A_CLK>; + clock-names = "apb_pclk", "core_a_clk"; + + }; + + cti4: cti@6014000 { + compatible = "arm,primecell"; + arm,primecell-periphid = <0x0003b966>; + reg = <0x6014000 0x1000>; + reg-names = "cti-base"; + + coresight-name = "coresight-cti4"; + + clocks = <&clock_gcc QDSS_CLK>, + <&clock_gcc QDSS_A_CLK>; + clock-names = "apb_pclk", "core_a_clk"; + + }; + + cti5: cti@6015000 { + compatible = "arm,primecell"; + arm,primecell-periphid = <0x0003b966>; + reg = <0x6015000 0x1000>; + reg-names = "cti-base"; + + coresight-name = "coresight-cti5"; + + clocks = <&clock_gcc QDSS_CLK>, + <&clock_gcc QDSS_A_CLK>; + clock-names = "apb_pclk", "core_a_clk"; + + }; + + cti6: cti@6016000 { + compatible = "arm,primecell"; + arm,primecell-periphid = <0x0003b966>; + reg = <0x6016000 0x1000>; + reg-names = "cti-base"; + + coresight-name = "coresight-cti6"; + + clocks = <&clock_gcc QDSS_CLK>, + <&clock_gcc QDSS_A_CLK>; + clock-names = "apb_pclk", "core_a_clk"; + + }; + + cti7: cti@6017000 { + compatible = "arm,primecell"; + arm,primecell-periphid = <0x0003b966>; + reg = <0x6017000 0x1000>; + reg-names = "cti-base"; + + coresight-name = "coresight-cti7"; + + clocks = <&clock_gcc QDSS_CLK>, + <&clock_gcc QDSS_A_CLK>; + clock-names = "apb_pclk", "core_a_clk"; + + }; + + cti8: cti@6018000 { + compatible = "arm,primecell"; + arm,primecell-periphid = <0x0003b966>; + reg = <0x6018000 0x1000>; + reg-names = "cti-base"; + + coresight-name = "coresight-cti8"; + + clocks = <&clock_gcc QDSS_CLK>, + <&clock_gcc QDSS_A_CLK>; + clock-names = "apb_pclk", "core_a_clk"; + + }; + + cti9: cti@6019000 { + compatible = "arm,primecell"; + arm,primecell-periphid = <0x0003b966>; + reg = <0x6019000 0x1000>; + reg-names = "cti-base"; + + coresight-name = "coresight-cti9"; + + clocks = <&clock_gcc QDSS_CLK>, + <&clock_gcc QDSS_A_CLK>; + clock-names = "apb_pclk", "core_a_clk"; + + }; + + cti10: cti@601a000 { + compatible = "arm,primecell"; + arm,primecell-periphid = <0x0003b966>; + reg = <0x601a000 0x1000>; + reg-names = "cti-base"; + + coresight-name = "coresight-cti10"; + + clocks = <&clock_gcc QDSS_CLK>, + <&clock_gcc QDSS_A_CLK>; + clock-names = "apb_pclk", "core_a_clk"; + + }; + + cti11: cti@601b000 { + compatible = "arm,primecell"; + arm,primecell-periphid = <0x0003b966>; + reg = <0x601b000 0x1000>; + reg-names = "cti-base"; + + coresight-name = "coresight-cti11"; + + clocks = <&clock_gcc QDSS_CLK>, + <&clock_gcc QDSS_A_CLK>; + clock-names = "apb_pclk", "core_a_clk"; + + }; + + cti12: cti@601c000 { + compatible = "arm,primecell"; + arm,primecell-periphid = <0x0003b966>; + reg = <0x601c000 0x1000>; + reg-names = "cti-base"; + + coresight-name = "coresight-cti12"; + + clocks = <&clock_gcc QDSS_CLK>, + <&clock_gcc QDSS_A_CLK>; + clock-names = "apb_pclk", "core_a_clk"; + + }; + + cti13: cti@601d000 { + compatible = "arm,primecell"; + arm,primecell-periphid = <0x0003b966>; + reg = <0x601d000 0x1000>; + reg-names = "cti-base"; + + coresight-name = "coresight-cti13"; + + clocks = <&clock_gcc QDSS_CLK>, + <&clock_gcc QDSS_A_CLK>; + clock-names = "apb_pclk", "core_a_clk"; + + }; + + cti14: cti@601e000 { + compatible = "arm,primecell"; + arm,primecell-periphid = <0x0003b966>; + reg = <0x601e000 0x1000>; + reg-names = "cti-base"; + + coresight-name = "coresight-cti14"; + + clocks = <&clock_gcc QDSS_CLK>, + <&clock_gcc QDSS_A_CLK>; + clock-names = "apb_pclk", "core_a_clk"; + + }; + + cti15: cti@601f000 { + compatible = "arm,primecell"; + arm,primecell-periphid = <0x0003b966>; + reg = <0x601f000 0x1000>; + reg-names = "cti-base"; + + coresight-name = "coresight-cti15"; + + clocks = <&clock_gcc QDSS_CLK>, + <&clock_gcc QDSS_A_CLK>; + clock-names = "apb_pclk", "core_a_clk"; + + }; + + rpm_etm0 { + compatible = "qcom,coresight-remote-etm"; + + coresight-name = "coresight-rpm-etm0"; + qcom,inst-id = <4>; + + port{ + rpm_etm0_out_funnel_in0: endpoint { + remote-endpoint = + <&funnel_in0_in_rpm_etm0>; + }; + }; + }; + + turing_etm0 { + compatible = "qcom,coresight-remote-etm"; + + coresight-name = "coresight-turing-etm0"; + qcom,inst-id = <13>; + + port{ + turing_etm0_out_funnel_in2: endpoint { + remote-endpoint = + <&funnel_in2_in_turing_etm0>; + }; + }; + }; + + modem_etm0 { + compatible = "qcom,coresight-remote-etm"; + + coresight-name = "coresight-modem-etm0"; + qcom,inst-id = <2>; + + port { + modem_etm0_out_funnel_in1: endpoint { + remote-endpoint = + <&funnel_in1_in_modem_etm0>; + }; + }; + }; + + audio_etm0 { + compatible = "qcom,coresight-remote-etm"; + + coresight-name = "coresight-audio-etm0"; + qcom,inst-id = <5>; + + port { + audio_etm0_out_funnel_in1: endpoint { + remote-endpoint = + <&funnel_in1_in_audio_etm0>; + }; + }; + }; + + etm0: etm@61bc000 { + compatible = "arm,primecell"; + arm,primecell-periphid = <0x000bb95d>; + + reg = <0x61bc000 0x1000>; + cpu = <&CPU0>; + + coresight-name = "coresight-etm0"; + + clocks = <&clock_gcc QDSS_CLK>, + <&clock_gcc QDSS_A_CLK>; + clock-names = "apb_pclk", "core_a_clk"; + + port { + etm0_out_funnel_apss: endpoint { + remote-endpoint = <&funnel_apss_in_etm0>; + }; + }; + }; + + etm1: etm@61bd000 { + compatible = "arm,primecell"; + arm,primecell-periphid = <0x000bb95d>; + + reg = <0x61bd000 0x1000>; + cpu = <&CPU1>; + + coresight-name = "coresight-etm1"; + + clocks = <&clock_gcc QDSS_CLK>, + <&clock_gcc QDSS_A_CLK>; + clock-names = "apb_pclk", "core_a_clk"; + + port { + etm1_out_funnel_apss: endpoint { + remote-endpoint = <&funnel_apss_in_etm1>; + }; + }; + }; + + etm2: etm@61be000 { + compatible = "arm,primecell"; + arm,primecell-periphid = <0x000bb95d>; + + reg = <0x61be000 0x1000>; + cpu = <&CPU2>; + + coresight-name = "coresight-etm2"; + + clocks = <&clock_gcc QDSS_CLK>, + <&clock_gcc QDSS_A_CLK>; + clock-names = "apb_pclk", "core_a_clk"; + + port { + etm2_out_funnel_apss: endpoint { + remote-endpoint = <&funnel_apss_in_etm2>; + }; + }; + }; + + etm3: etm@61bf000 { + compatible = "arm,primecell"; + arm,primecell-periphid = <0x000bb95d>; + + reg = <0x61bf000 0x1000>; + cpu = <&CPU3>; + + coresight-name = "coresight-etm3"; + + clocks = <&clock_gcc QDSS_CLK>, + <&clock_gcc QDSS_A_CLK>; + clock-names = "apb_pclk", "core_a_clk"; + + port { + etm3_out_funnel_apss: endpoint { + remote-endpoint = <&funnel_apss_in_etm3>; + }; + }; + }; + + funnel_apss: funnel@61a1000 { + compatible = "arm,primecell"; + arm,primecell-periphid = <0x0003b908>; + + reg = <0x61a1000 0x1000>; + reg-names = "funnel-base"; + + coresight-name = "coresight-funnel-apss"; + + clocks = <&clock_gcc QDSS_CLK>, + <&clock_gcc QDSS_A_CLK>; + clock-names = "apb_pclk", "core_a_clk"; + + ports { + #address-cells = <1>; + #size-cells = <0>; + + port@0 { + reg = <0>; + funnel_apss_out_funnel_in2: endpoint { + remote-endpoint = + <&funnel_in2_in_funnel_apss>; + }; + }; + port@1 { + reg = <0>; + funnel_apss_in_etm0: endpoint { + slave-mode; + remote-endpoint = + <&etm0_out_funnel_apss>; + }; + }; + + port@2 { + reg = <1>; + funnel_apss_in_etm1: endpoint { + slave-mode; + remote-endpoint = + <&etm1_out_funnel_apss>; + }; + }; + + port@3 { + reg = <2>; + funnel_apss_in_etm2: endpoint { + slave-mode; + remote-endpoint = + <&etm2_out_funnel_apss>; + }; + }; + + port@4 { + reg = <3>; + funnel_apss_in_etm3: endpoint { + slave-mode; + remote-endpoint = + <&etm3_out_funnel_apss>; + }; + }; + }; + }; +}; diff --git a/arch/arm64/boot/dts/qcom/qcs405-cpu.dtsi b/arch/arm64/boot/dts/qcom/qcs405-cpu.dtsi index b34ce592856cfbfc7897a879dfae6f438b710299..3f83fd04b6de1712608b59de3d98f47f599c0938 100644 --- a/arch/arm64/boot/dts/qcom/qcs405-cpu.dtsi +++ b/arch/arm64/boot/dts/qcom/qcs405-cpu.dtsi @@ -43,6 +43,7 @@ reg = <0x100>; enable-method = "psci"; next-level-cache = <&L2_1>; + #cooling-cells = <2>; L2_1: l2-cache { compatible = "arm,arch-cache"; cache-level = <2>; @@ -65,6 +66,7 @@ reg = <0x101>; enable-method = "psci"; next-level-cache = <&L2_1>; + #cooling-cells = <2>; L1_I_101: l1-icache { compatible = "arm,arch-cache"; qcom,dump-size = <0x8800>; @@ -81,6 +83,7 @@ reg = <0x102>; enable-method = "psci"; next-level-cache = <&L2_1>; + #cooling-cells = <2>; L1_I_102: l1-icache { compatible = "arm,arch-cache"; qcom,dump-size = <0x8800>; @@ -97,6 +100,7 @@ reg = <0x103>; enable-method = "psci"; next-level-cache = <&L2_1>; + #cooling-cells = <2>; L1_I_103: l1-icache { compatible = "arm,arch-cache"; qcom,dump-size = <0x8800>; diff --git a/arch/arm64/boot/dts/qcom/qcs405-regulator.dtsi b/arch/arm64/boot/dts/qcom/qcs405-regulator.dtsi index 3546fb08a5ed3ffe9baa0915b0dd9b7f7a61bc6d..5d2842fb2ad2c6b90b43f54f33e750d67a3283b7 100644 --- a/arch/arm64/boot/dts/qcom/qcs405-regulator.dtsi +++ b/arch/arm64/boot/dts/qcom/qcs405-regulator.dtsi @@ -50,6 +50,14 @@ ; qcom,use-voltage-level; }; + + cx_cdev: cx-cdev-lvl { + compatible = "qcom,regulator-cooling-device"; + regulator-cdev-supply = <&pms405_s1_floor_level>; + regulator-levels = ; + #cooling-cells = <2>; + }; }; /* PMS405 S2 - VDD_LPI_CX supply */ diff --git a/arch/arm64/boot/dts/qcom/qcs405-rumi.dtsi b/arch/arm64/boot/dts/qcom/qcs405-rumi.dtsi index d9d3d53be1ec198cc1f1b2a11d8db587199b6862..12fdb0f555ce3b143f2ce39550105fe06680c3f2 100644 --- a/arch/arm64/boot/dts/qcom/qcs405-rumi.dtsi +++ b/arch/arm64/boot/dts/qcom/qcs405-rumi.dtsi @@ -81,6 +81,10 @@ rpm-standalone; }; +&thermal_zones { + /delete-node/ aoss-lowf; +}; + #include "qcs405-stub-regulator.dtsi" &sdhc_1 { diff --git a/arch/arm64/boot/dts/qcom/qcs405-thermal.dtsi b/arch/arm64/boot/dts/qcom/qcs405-thermal.dtsi index 63a49cd149ec6d3e47c2a19ffcd2eba916e63613..64559e4f8d32eae4cd9bbce1ccb6b5bb06a38711 100644 --- a/arch/arm64/boot/dts/qcom/qcs405-thermal.dtsi +++ b/arch/arm64/boot/dts/qcom/qcs405-thermal.dtsi @@ -12,6 +12,36 @@ #include +&soc { + qmi-tmd-devices { + compatible = "qcom,qmi-cooling-devices"; + + modem { + qcom,instance-id = <0x0>; + + modem_pa: modem_pa { + qcom,qmi-dev-name = "pa"; + #cooling-cells = <2>; + }; + + modem_proc: modem_proc { + qcom,qmi-dev-name = "modem"; + #cooling-cells = <2>; + }; + + modem_current: modem_current { + qcom,qmi-dev-name = "modem_current"; + #cooling-cells = <2>; + }; + + modem_vdd: modem_vdd { + qcom,qmi-dev-name = "cpuv_restriction_cold"; + #cooling-cells = <2>; + }; + }; + }; +}; + &thermal_zones { aoss-usr { polling-delay-passive = <0>; @@ -152,4 +182,175 @@ }; }; }; + + cpuss-max-step { + polling-delay-passive = <50>; + polling-delay = <100>; + thermal-governor = "step_wise"; + trips { + cpu_trip:cpu-trip { + temperature = <85000>; + hysteresis = <0>; + type = "passive"; + }; + }; + cooling-maps { + cpu0_cdev { + trip = <&cpu_trip>; + cooling-device = + <&CPU0 THERMAL_NO_LIMIT + (THERMAL_MAX_LIMIT-1)>; + }; + cpu1_cdev { + trip = <&cpu_trip>; + cooling-device = + <&CPU1 THERMAL_NO_LIMIT + (THERMAL_MAX_LIMIT-1)>; + }; + cpu2_cdev { + trip = <&cpu_trip>; + cooling-device = + <&CPU2 THERMAL_NO_LIMIT + (THERMAL_MAX_LIMIT-1)>; + }; + cpu3_cdev { + trip = <&cpu_trip>; + cooling-device = + <&CPU3 THERMAL_NO_LIMIT + (THERMAL_MAX_LIMIT-1)>; + }; + }; + }; + + gpu-step { + polling-delay-passive = <250>; + polling-delay = <0>; + thermal-sensors = <&tsens0 6>; + thermal-governor = "step_wise"; + trips { + gpu_step_trip: gpu-step-trip { + temperature = <95000>; + hysteresis = <0>; + type = "passive"; + }; + }; + }; + + cpuss-0-step { + polling-delay-passive = <0>; + polling-delay = <0>; + thermal-sensors = <&tsens0 1>; + thermal-governor = "step_wise"; + trips { + cpuss_0_step_trip: cpuss-0-step-trip { + temperature = <105000>; + hysteresis = <15000>; + type = "passive"; + }; + }; + cooling-maps { + cpu0_cdev { + trip = <&cpuss_0_step_trip>; + cooling-device = + <&CPU0 THERMAL_MAX_LIMIT + THERMAL_MAX_LIMIT>; + }; + }; + }; + + cpuss-1-step { + polling-delay-passive = <0>; + polling-delay = <0>; + thermal-sensors = <&tsens0 2>; + thermal-governor = "step_wise"; + trips { + cpuss_1_step_trip: cpuss-1-step-trip { + temperature = <105000>; + hysteresis = <15000>; + type = "passive"; + }; + }; + cooling-maps { + cpu1_cdev { + trip = <&cpuss_1_step_trip>; + cooling-device = + <&CPU1 THERMAL_MAX_LIMIT + THERMAL_MAX_LIMIT>; + }; + }; + }; + + cpuss-2-step { + polling-delay-passive = <0>; + polling-delay = <0>; + thermal-sensors = <&tsens0 3>; + thermal-governor = "step_wise"; + trips { + cpuss_2_step_trip: cpuss-2-step-trip { + temperature = <105000>; + hysteresis = <15000>; + type = "passive"; + }; + }; + cooling-maps { + cpu2_cdev { + trip = <&cpuss_2_step_trip>; + cooling-device = + <&CPU2 THERMAL_MAX_LIMIT + THERMAL_MAX_LIMIT>; + }; + }; + }; + + cpuss-3-step { + polling-delay-passive = <0>; + polling-delay = <0>; + thermal-sensors = <&tsens0 4>; + thermal-governor = "step_wise"; + trips { + cpuss_3_step_trip: cpuss-3-step-trip { + temperature = <105000>; + hysteresis = <15000>; + type = "passive"; + }; + }; + cooling-maps { + cpu3_cdev { + trip = <&cpuss_3_step_trip>; + cooling-device = + <&CPU3 THERMAL_MAX_LIMIT + THERMAL_MAX_LIMIT>; + }; + }; + }; + + aoss-lowf { + polling-delay-passive = <0>; + polling-delay = <0>; + thermal-governor = "low_limits_floor"; + thermal-sensors = <&tsens0 0>; + tracks-low; + trips { + aoss_lowf: aoss-lowf { + temperature = <5000>; + hysteresis = <5000>; + type = "passive"; + }; + }; + cooling-maps { + cpu0_cdev { + trip = <&aoss_lowf>; + cooling-device = <&CPU0 (THERMAL_MAX_LIMIT-2) + (THERMAL_MAX_LIMIT-2)>; + }; + cx_vdd_cdev { + trip = <&aoss_lowf>; + cooling-device = <&cx_cdev 0 0>; + }; + modem_vdd_cdev { + trip = <&aoss_lowf>; + cooling-device = <&modem_vdd 0 0>; + }; + }; + }; }; diff --git a/arch/arm64/boot/dts/qcom/qcs405.dtsi b/arch/arm64/boot/dts/qcom/qcs405.dtsi index c9ea22a9501b70d32984c9b8df3805db76661fc6..df5ca407bf2731009b0930e0caac586a4a664f1f 100644 --- a/arch/arm64/boot/dts/qcom/qcs405.dtsi +++ b/arch/arm64/boot/dts/qcom/qcs405.dtsi @@ -273,12 +273,12 @@ }; - dcc: dcc@b3000 { - compatible = "qcom,dcc"; - reg = <0xb3000 0x1000>, - <0xb4000 0x2000>; + dcc: dcc_v2@b2000 { + compatible = "qcom,dcc_v2"; + reg = <0x000b2000 0x1000>, + <0x000bf800 0x800>; reg-names = "dcc-base", "dcc-ram-base"; - qcom,save-reg; + dcc-ram-offset = <0x400>; }; rpm_bus: qcom,rpm-smd { @@ -658,10 +658,10 @@ }; #include "qcs405-gdsc.dtsi" -#include "qcs405-thermal.dtsi" #include "pms405.dtsi" #include "pms405-rpm-regulator.dtsi" #include "qcs405-regulator.dtsi" +#include "qcs405-thermal.dtsi" &gdsc_mdss { status = "ok"; @@ -670,3 +670,5 @@ &gdsc_oxili_gx { status = "ok"; }; + +#include "qcs405-coresight.dtsi" diff --git a/arch/arm64/boot/dts/qcom/sm6150.dtsi b/arch/arm64/boot/dts/qcom/sm6150.dtsi index 6ceba108843ef8ed88951e433ae16359403a8a6b..fac1e15aacf02dcdb871af9701e2d630cf3e716b 100644 --- a/arch/arm64/boot/dts/qcom/sm6150.dtsi +++ b/arch/arm64/boot/dts/qcom/sm6150.dtsi @@ -334,6 +334,23 @@ soc: soc { }; + firmware: firmware { + android { + compatible = "android,firmware"; + fstab { + compatible = "android,fstab"; + vendor { + compatible = "android,vendor"; + dev = "/dev/block/platform/soc/7c4000.sdhci/by-name/vendor"; + type = "ext4"; + mnt_flags = "ro,barrier=1,discard"; + fsmgr_flags = "wait,slotselect,avb"; + status = "ok"; + }; + }; + }; + }; + reserved-memory { #address-cells = <2>; #size-cells = <2>; diff --git a/arch/arm64/boot/dts/qcom/sm8150-cdp.dtsi b/arch/arm64/boot/dts/qcom/sm8150-cdp.dtsi index 23bf63b975317e16c6afdebab1245e6f344f8f3a..c8cbf4000dd1a8ea7d03cf0695a03b1498cc5759 100644 --- a/arch/arm64/boot/dts/qcom/sm8150-cdp.dtsi +++ b/arch/arm64/boot/dts/qcom/sm8150-cdp.dtsi @@ -81,11 +81,11 @@ linux,can-disable; }; }; +}; - sound-tavil { - qcom,model = "sm8150-tavil-cdp-snd-card"; - qcom,us-euro-gpios = <&tavil_us_euro_switch>; - }; +&snd_934x { + qcom,model = "sm8150-tavil-cdp-snd-card"; + qcom,us-euro-gpios = <&tavil_us_euro_switch>; }; &dsi_sharp_4k_dsc_cmd { diff --git a/arch/arm64/boot/dts/qcom/sm8150-coresight.dtsi b/arch/arm64/boot/dts/qcom/sm8150-coresight.dtsi index 8247f89057c87b176656b3a2572a775b2510d3f4..5da67a724097827ff04a75642d9360d74c447f30 100644 --- a/arch/arm64/boot/dts/qcom/sm8150-coresight.dtsi +++ b/arch/arm64/boot/dts/qcom/sm8150-coresight.dtsi @@ -1968,6 +1968,9 @@ clocks = <&clock_aop QDSS_CLK>; clock-names = "apb_pclk"; + qcom,cti-gpio-trigout = <4>; + pinctrl-names = "cti-trigout-pctrl"; + pinctrl-0 = <&trigout_a>; }; cti3: cti@6013000 { diff --git a/arch/arm64/boot/dts/qcom/sm8150-pcie.dtsi b/arch/arm64/boot/dts/qcom/sm8150-pcie.dtsi index 15138c70df066f96ebe56b0e6b057f55972e4a6a..fe51104a1b2557f31718ea7f80e801c51be10c54 100644 --- a/arch/arm64/boot/dts/qcom/sm8150-pcie.dtsi +++ b/arch/arm64/boot/dts/qcom/sm8150-pcie.dtsi @@ -214,6 +214,7 @@ qcom,slv-addr-space-size = <0x4000000>; qcom,phy-status-offset = <0x814>; + qcom,phy-power-down-offset = <0x840>; qcom,boot-option = <0x1>; @@ -530,6 +531,7 @@ qcom,slv-addr-space-size = <0x20000000>; qcom,phy-status-offset = <0xa14>; + qcom,phy-power-down-offset = <0xa40>; qcom,boot-option = <0x1>; diff --git a/arch/arm64/boot/dts/qcom/sm8150-pinctrl.dtsi b/arch/arm64/boot/dts/qcom/sm8150-pinctrl.dtsi index 80d43a2cf6224ff05fcfce78f19a38d240bf0d5f..618c6044d1e5dcaae53b70d831cd672ca00fd72a 100644 --- a/arch/arm64/boot/dts/qcom/sm8150-pinctrl.dtsi +++ b/arch/arm64/boot/dts/qcom/sm8150-pinctrl.dtsi @@ -3926,5 +3926,17 @@ drive-strength = <2>; /* 2 MA */ }; }; + + trigout_a: trigout_a { + mux { + pins = "gpio49"; + function = "qdss_cti"; + }; + config { + pins = "gpio49"; + drive-strength = <2>; + bias-disable; + }; + }; }; }; diff --git a/arch/arm64/boot/dts/qcom/sm8150-sdx50m.dtsi b/arch/arm64/boot/dts/qcom/sm8150-sdx50m.dtsi index 258b23860f1bad1d1c26f6413d22d0f9a8874e87..c003c56ff41cb63595aa32f2d882b69310fd9039 100644 --- a/arch/arm64/boot/dts/qcom/sm8150-sdx50m.dtsi +++ b/arch/arm64/boot/dts/qcom/sm8150-sdx50m.dtsi @@ -54,3 +54,13 @@ &pcie1 { dma-coherent; }; + +&soc { + imp: qcom,ipa-mhi-proxy { + compatible = "qcom,ipa-mhi-proxy"; + qcom,ctrl-iova = <0x00010000 0x0FFF0000>; + qcom,data-iova = <0x10000000 0x0FFFFFFF>; + qcom,mhi-chdb-base = <0x40300300>; + qcom,mhi-erdb-base = <0x40300700>; + }; +}; diff --git a/arch/arm64/boot/dts/qcom/sm8150-usb.dtsi b/arch/arm64/boot/dts/qcom/sm8150-usb.dtsi index 3b8a17867a989ed65f1b5375bb16bf4a56255d95..97a4a7690b9ed21b6e94f9041c1f77445b4c5aff 100644 --- a/arch/arm64/boot/dts/qcom/sm8150-usb.dtsi +++ b/arch/arm64/boot/dts/qcom/sm8150-usb.dtsi @@ -55,15 +55,24 @@ qcom,dwc-usb3-msm-tx-fifo-size = <27696>; qcom,msm-bus,name = "usb0"; - qcom,msm-bus,num-cases = <2>; + qcom,msm-bus,num-cases = <3>; qcom,msm-bus,num-paths = <3>; qcom,msm-bus,vectors-KBps = + /* suspend vote */ , , , + + /* nominal vote */ , , + , + + /* svs vote */ + , + , ; dwc3@a600000 { diff --git a/arch/arm64/boot/dts/qcom/sm8150.dtsi b/arch/arm64/boot/dts/qcom/sm8150.dtsi index ac203b8226a7ba0e366496e59675e72a35c17f79..21e64d9c3aa1222dfac23af353ffe232f7d0463b 100644 --- a/arch/arm64/boot/dts/qcom/sm8150.dtsi +++ b/arch/arm64/boot/dts/qcom/sm8150.dtsi @@ -1087,11 +1087,13 @@ qcom,cachemiss-ev = <0x17>; qcom,core-dev-table = < 300000 300000000 >, - < 576000 576000000 >, - < 672000 768000000 >, - < 864000 960000000 >, - < 1171200 1228800000 >, - < 1267200 1344000000 >; + < 480000 403200000 >, + < 672000 480000000 >, + < 768000 576000000 >, + < 864000 672000000 >, + < 979200 768000000 >, + < 1075200 864000000 >, + < 1267200 960000000 >; }; cpu4_cpu_l3_lat: qcom,cpu4-cpu-l3-lat { @@ -1108,11 +1110,11 @@ qcom,cachemiss-ev = <0x17>; qcom,core-dev-table = < 300000 300000000 >, - < 576000 576000000 >, - < 768000 768000000 >, - < 960000 960000000 >, - < 1248000 1228800000 >, - < 1593600 1344000000 >; + < 768000 576000000 >, + < 1152000 768000000 >, + < 1344000 960000000 >, + < 1689600 1228800000 >, + < 2016000 1344000000 >; }; cpu0_cpu_llcc_lat: qcom,cpu0-cpu-llcc-lat { @@ -1131,11 +1133,9 @@ qcom,cachemiss-ev = <0x2A>; qcom,core-dev-table = < 300000 MHZ_TO_MBPS(150, 16) >, - < 576000 MHZ_TO_MBPS(200, 16) >, - < 672000 MHZ_TO_MBPS(403, 16) >, - < 864000 MHZ_TO_MBPS(533, 16) >, - < 1171200 MHZ_TO_MBPS(666, 16) >, - < 1267200 MHZ_TO_MBPS(777, 16) >; + < 768000 MHZ_TO_MBPS(200, 16) >, + < 1075200 MHZ_TO_MBPS(403, 16) >, + < 1267200 MHZ_TO_MBPS(403, 16) >; }; cpu4_cpu_llcc_lat: qcom,cpu4-cpu-llcc-lat { @@ -1156,9 +1156,10 @@ < 300000 MHZ_TO_MBPS(150, 16) >, < 576000 MHZ_TO_MBPS(200, 16) >, < 768000 MHZ_TO_MBPS(403, 16) >, - < 960000 MHZ_TO_MBPS(533, 16) >, - < 1248000 MHZ_TO_MBPS(666, 16) >, - < 1593600 MHZ_TO_MBPS(777, 16) >; + < 960000 MHZ_TO_MBPS(403, 16) >, + < 1248000 MHZ_TO_MBPS(533, 16) >, + < 1728000 MHZ_TO_MBPS(666, 16) >, + < 2016000 MHZ_TO_MBPS(777, 16) >; }; cpu0_llcc_ddr_lat: qcom,cpu0-llcc-ddr-lat { @@ -1177,11 +1178,9 @@ qcom,cachemiss-ev = <0x1000>; qcom,core-dev-table = < 300000 MHZ_TO_MBPS( 200, 4) >, - < 576000 MHZ_TO_MBPS( 451, 4) >, - < 672000 MHZ_TO_MBPS( 768, 4) >, - < 864000 MHZ_TO_MBPS(1017, 4) >, - < 1171200 MHZ_TO_MBPS(1555, 4) >, - < 1267200 MHZ_TO_MBPS(1804, 4) >; + < 768000 MHZ_TO_MBPS( 451, 4) >, + < 1075200 MHZ_TO_MBPS( 547, 4) >, + < 1267200 MHZ_TO_MBPS( 768, 4) >; }; cpu4_llcc_ddr_lat: qcom,cpu4-llcc-ddr-lat { @@ -1201,11 +1200,12 @@ qcom,core-dev-table = < 300000 MHZ_TO_MBPS( 200, 4) >, < 576000 MHZ_TO_MBPS( 451, 4) >, - < 768000 MHZ_TO_MBPS( 768, 4) >, - < 960000 MHZ_TO_MBPS(1017, 4) >, - < 1248000 MHZ_TO_MBPS(1555, 4) >, - < 1593600 MHZ_TO_MBPS(1804, 4) >, - < 1689600 MHZ_TO_MBPS(2092, 4) >; + < 768000 MHZ_TO_MBPS( 547, 4) >, + < 960000 MHZ_TO_MBPS( 768, 4) >, + < 1248000 MHZ_TO_MBPS(1017, 4) >, + < 1728000 MHZ_TO_MBPS(1555, 4) >, + < 2016000 MHZ_TO_MBPS(1804, 4) >, + < 2054400 MHZ_TO_MBPS(2092, 4) >; }; cpu4_cpu_ddr_latfloor: qcom,cpu4-cpu-ddr-latfloor { @@ -2093,10 +2093,10 @@ <&clock_gcc GCC_UFS_PHY_RX_SYMBOL_0_CLK>, <&clock_gcc GCC_UFS_PHY_RX_SYMBOL_1_CLK>; freq-table-hz = - <37500000 300000000>, + <50000000 200000000>, <0 0>, <0 0>, - <37500000 300000000>, + <37500000 150000000>, <75000000 300000000>, <0 0>, <0 0>, @@ -2188,14 +2188,6 @@ dma-coherent; }; - qcom,msm_fastrpc_compute_cb3 { - compatible = "qcom,msm-fastrpc-compute-cb"; - label = "cdsprpc-smd"; - iommus = <&apps_smmu 0x3 0x3440>, - <&apps_smmu 0x23 0x3400>; - dma-coherent; - }; - qcom,msm_fastrpc_compute_cb4 { compatible = "qcom,msm-fastrpc-compute-cb"; label = "cdsprpc-smd"; @@ -2233,19 +2225,28 @@ dma-coherent; }; - qcom,msm_fastrpc_compute_cb9 { + qcom,msm_fastrpc_compute_cb2 { compatible = "qcom,msm-fastrpc-compute-cb"; label = "cdsprpc-smd"; - qcom,secure-context-bank; - iommus = <&apps_smmu 0x9 0x3460>; + iommus = <&apps_smmu 0x2 0x3440>, + <&apps_smmu 0x22 0x3400>; dma-coherent; }; - qcom,msm_fastrpc_compute_cb2 { + qcom,msm_fastrpc_compute_cb3 { compatible = "qcom,msm-fastrpc-compute-cb"; label = "cdsprpc-smd"; - iommus = <&apps_smmu 0x2 0x3440>, - <&apps_smmu 0x22 0x3400>; + iommus = <&apps_smmu 0x3 0x3440>, + <&apps_smmu 0x1423 0x0>, + <&apps_smmu 0x2023 0x0>; + dma-coherent; + }; + + qcom,msm_fastrpc_compute_cb9 { + compatible = "qcom,msm-fastrpc-compute-cb"; + label = "cdsprpc-smd"; + qcom,secure-context-bank; + iommus = <&apps_smmu 0x9 0x3460>; dma-coherent; }; @@ -3205,6 +3206,7 @@ reg = <0x0 0x200000>; reg-names = "rmtfs"; qcom,client-id = <0x00000001>; + qcom,guard-memory; }; qcom,msm_gsi { @@ -3243,30 +3245,35 @@ qcom,msm-bus,num-paths = <4>; qcom,msm-bus,vectors-KBps = /* No vote */ - <90 512 0 0>, - <90 585 0 0>, - <1 676 0 0>, - <143 777 0 0>, + , + , + , + , + /* SVS2 */ - <90 512 3616000 7232000>, - <90 585 300000 600000>, - <1 676 90000 180000>, /*gcc_config_noc_clk_src */ - <143 777 0 120>, /* IB defined for IPA2X_clk in MHz*/ + , + , + , + , + /* SVS */ - <90 512 6640000 13280000>, - <90 585 400000 800000>, - <1 676 100000 200000>, - <143 777 0 250>, /* IB defined for IPA2X_clk in MHz*/ + , + , + , + , + /* NOMINAL */ - <90 512 10400000 20800000>, - <90 585 800000 1600000>, - <1 676 200000 400000>, - <143 777 0 440>, /* IB defined for IPA2X_clk in MHz*/ + , + , + , + , + /* TURBO */ - <90 512 10400000 20800000>, - <90 585 960000 1920000>, - <1 676 266000 532000>, - <143 777 0 500>; /* IB defined for IPA clk in MHz*/ + , + , + , + ; + qcom,bus-vector-names = "MIN", "SVS2", "SVS", "NOMINAL", "TURBO"; qcom,throughput-threshold = <310 600 1000>; diff --git a/arch/arm64/configs/qcs405-perf_defconfig b/arch/arm64/configs/qcs405-perf_defconfig index fe97e9138a818568de649bdddc9b588eeed685e0..924c745c5d1a2ed3839e8d084d0c2214dd1bfed7 100644 --- a/arch/arm64/configs/qcs405-perf_defconfig +++ b/arch/arm64/configs/qcs405-perf_defconfig @@ -43,6 +43,7 @@ CONFIG_PM_WAKELOCKS_LIMIT=0 # CONFIG_PM_WAKELOCKS_GC is not set CONFIG_CPU_IDLE=y CONFIG_ARM_CPUIDLE=y +CONFIG_CPU_FREQ=y CONFIG_NET=y CONFIG_PACKET=y CONFIG_UNIX=y @@ -249,6 +250,15 @@ CONFIG_SLIMBUS=y CONFIG_SLIMBUS_MSM_NGD=y CONFIG_PINCTRL_QCS405=y CONFIG_THERMAL=y +CONFIG_THERMAL_WRITABLE_TRIPS=y +CONFIG_THERMAL_GOV_USER_SPACE=y +CONFIG_THERMAL_GOV_LOW_LIMITS=y +CONFIG_CPU_THERMAL=y +CONFIG_DEVFREQ_THERMAL=y +CONFIG_THERMAL_TSENS=y +CONFIG_QTI_VIRTUAL_SENSOR=y +CONFIG_QTI_QMI_COOLING_DEVICE=y +CONFIG_REGULATOR_COOLING_DEVICE=y CONFIG_MFD_SYSCON=y CONFIG_REGULATOR=y CONFIG_REGULATOR_FIXED_VOLTAGE=y diff --git a/arch/arm64/configs/qcs405_defconfig b/arch/arm64/configs/qcs405_defconfig index d4644e9a00f40f6094ec650fc5a2475ec6e946f5..b858c18e51848574141180a2ac093ba849f86db2 100644 --- a/arch/arm64/configs/qcs405_defconfig +++ b/arch/arm64/configs/qcs405_defconfig @@ -49,6 +49,7 @@ CONFIG_PM_WAKELOCKS_LIMIT=0 CONFIG_PM_DEBUG=y CONFIG_CPU_IDLE=y CONFIG_ARM_CPUIDLE=y +CONFIG_CPU_FREQ=y CONFIG_NET=y CONFIG_PACKET=y CONFIG_UNIX=y @@ -259,6 +260,15 @@ CONFIG_SLIMBUS=y CONFIG_SLIMBUS_MSM_NGD=y CONFIG_PINCTRL_QCS405=y CONFIG_THERMAL=y +CONFIG_THERMAL_WRITABLE_TRIPS=y +CONFIG_THERMAL_GOV_USER_SPACE=y +CONFIG_THERMAL_GOV_LOW_LIMITS=y +CONFIG_CPU_THERMAL=y +CONFIG_DEVFREQ_THERMAL=y +CONFIG_THERMAL_TSENS=y +CONFIG_QTI_VIRTUAL_SENSOR=y +CONFIG_QTI_QMI_COOLING_DEVICE=y +CONFIG_REGULATOR_COOLING_DEVICE=y CONFIG_MFD_SYSCON=y CONFIG_REGULATOR=y CONFIG_REGULATOR_FIXED_VOLTAGE=y @@ -401,11 +411,16 @@ CONFIG_IPC_LOGGING=y CONFIG_BLK_DEV_IO_TRACE=y CONFIG_LKDTM=y CONFIG_CORESIGHT=y +CONFIG_CORESIGHT_LINK_AND_SINK_TMC=y +CONFIG_CORESIGHT_SOURCE_ETM4X=y +CONFIG_CORESIGHT_DYNAMIC_REPLICATOR=y CONFIG_CORESIGHT_STM=y CONFIG_CORESIGHT_CTI=y CONFIG_CORESIGHT_TPDA=y CONFIG_CORESIGHT_TPDM=y CONFIG_CORESIGHT_HWEVENT=y +CONFIG_CORESIGHT_REMOTE_ETM=y +CONFIG_CORESIGHT_REMOTE_ETM_DEFAULT_ENABLE=0 CONFIG_CORESIGHT_EVENT=y CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y CONFIG_SECURITY=y diff --git a/arch/arm64/configs/sm8150-perf_defconfig b/arch/arm64/configs/sm8150-perf_defconfig index 6944ada2a504977f95bb7b322401d03581c3a54b..4910c404849beefb319254a61d9e4e5e9b97c3a9 100644 --- a/arch/arm64/configs/sm8150-perf_defconfig +++ b/arch/arm64/configs/sm8150-perf_defconfig @@ -463,6 +463,7 @@ CONFIG_IPA3=y CONFIG_IPA_WDI_UNIFIED_API=y CONFIG_RMNET_IPA3=y CONFIG_RNDIS_IPA=y +CONFIG_IPA3_MHI_PROXY=y CONFIG_IPA_UT=y CONFIG_MSM_11AD=m CONFIG_QCOM_MDSS_PLL=y diff --git a/arch/arm64/configs/sm8150_defconfig b/arch/arm64/configs/sm8150_defconfig index eef653aada37cf0314a11e0be359ae43345e9af9..139384e3e24d9b491027bbbca37a135611bcab15 100644 --- a/arch/arm64/configs/sm8150_defconfig +++ b/arch/arm64/configs/sm8150_defconfig @@ -483,6 +483,7 @@ CONFIG_IPA3=y CONFIG_IPA_WDI_UNIFIED_API=y CONFIG_RMNET_IPA3=y CONFIG_RNDIS_IPA=y +CONFIG_IPA3_MHI_PROXY=y CONFIG_IPA_UT=y CONFIG_MSM_11AD=m CONFIG_QCOM_MDSS_PLL=y diff --git a/arch/arm64/include/asm/mmu.h b/arch/arm64/include/asm/mmu.h index 6dd83d75b82ab8b9808f8b60b3ac6252344222a8..dd6c38205afc5b01bedcf3fe1b2d1498d7e27f1c 100644 --- a/arch/arm64/include/asm/mmu.h +++ b/arch/arm64/include/asm/mmu.h @@ -87,6 +87,13 @@ extern void create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys, pgprot_t prot, bool page_mappings_only); extern void *fixmap_remap_fdt(phys_addr_t dt_phys); extern void mark_linear_text_alias_ro(void); +#ifdef CONFIG_MEMORY_HOTPLUG +extern void hotplug_paging(phys_addr_t start, phys_addr_t size); +#ifdef CONFIG_MEMORY_HOTREMOVE +extern void remove_pagetable(unsigned long start, + unsigned long end, bool direct); +#endif +#endif #endif /* !__ASSEMBLY__ */ #endif diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h index fe38e0ec2ebad83d96dd0dbf9c85b05b1fecf437..90ac66bd39ee81e7be8b2fb7cff65302c4ce7ce0 100644 --- a/arch/arm64/include/asm/pgtable.h +++ b/arch/arm64/include/asm/pgtable.h @@ -449,6 +449,11 @@ static inline phys_addr_t pmd_page_paddr(pmd_t pmd) return pmd_val(pmd) & PHYS_MASK & (s32)PAGE_MASK; } +static inline unsigned long pmd_page_vaddr(pmd_t pmd) +{ + return (unsigned long) __va(pmd_page_paddr(pmd)); +} + /* Find an entry in the third-level page table. */ #define pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) @@ -500,6 +505,11 @@ static inline phys_addr_t pud_page_paddr(pud_t pud) return pud_val(pud) & PHYS_MASK & (s32)PAGE_MASK; } +static inline unsigned long pud_page_vaddr(pud_t pud) +{ + return (unsigned long) __va(pud_page_paddr(pud)); +} + /* Find an entry in the second-level page table. */ #define pmd_index(addr) (((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1)) @@ -552,6 +562,11 @@ static inline phys_addr_t pgd_page_paddr(pgd_t pgd) return pgd_val(pgd) & PHYS_MASK & (s32)PAGE_MASK; } +static inline unsigned long pgd_page_vaddr(pgd_t pgd) +{ + return (unsigned long) __va(pgd_page_paddr(pgd)); +} + /* Find an entry in the frst-level page table. */ #define pud_index(addr) (((addr) >> PUD_SHIFT) & (PTRS_PER_PUD - 1)) diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c index 2734f7d213f48f78bb6796cf54a854adaea1e2ec..2bac45329bb218320f15e603d47b8377b74b3258 100644 --- a/arch/arm64/mm/init.c +++ b/arch/arm64/mm/init.c @@ -726,3 +726,113 @@ static int __init register_mem_limit_dumper(void) return 0; } __initcall(register_mem_limit_dumper); + +#ifdef CONFIG_MEMORY_HOTPLUG +int arch_add_memory(int nid, u64 start, u64 size, bool want_memblock) +{ + pg_data_t *pgdat; + unsigned long start_pfn = start >> PAGE_SHIFT; + unsigned long nr_pages = size >> PAGE_SHIFT; + unsigned long end_pfn = start_pfn + nr_pages; + unsigned long max_sparsemem_pfn = 1UL << (MAX_PHYSMEM_BITS-PAGE_SHIFT); + int ret; + + if (end_pfn > max_sparsemem_pfn) { + pr_err("end_pfn too big"); + return -1; + } + hotplug_paging(start, size); + + /* + * Mark the first page in the range as unusable. This is needed + * because __add_section (within __add_pages) wants pfn_valid + * of it to be false, and in arm64 pfn falid is implemented by + * just checking at the nomap flag for existing blocks. + * + * A small trick here is that __add_section() requires only + * phys_start_pfn (that is the first pfn of a section) to be + * invalid. Regardless of whether it was assumed (by the function + * author) that all pfns within a section are either all valid + * or all invalid, it allows to avoid looping twice (once here, + * second when memblock_clear_nomap() is called) through all + * pfns of the section and modify only one pfn. Thanks to that, + * further, in __add_zone() only this very first pfn is skipped + * and corresponding page is not flagged reserved. Therefore it + * is enough to correct this setup only for it. + * + * When arch_add_memory() returns the walk_memory_range() function + * is called and passed with online_memory_block() callback, + * which execution finally reaches the memory_block_action() + * function, where also only the first pfn of a memory block is + * checked to be reserved. Above, it was first pfn of a section, + * here it is a block but + * (drivers/base/memory.c): + * sections_per_block = block_sz / MIN_MEMORY_BLOCK_SIZE; + * (include/linux/memory.h): + * #define MIN_MEMORY_BLOCK_SIZE (1UL << SECTION_SIZE_BITS) + * so we can consider block and section equivalently + */ + memblock_mark_nomap(start, 1<> PAGE_SHIFT; + unsigned long nr_pages = size >> PAGE_SHIFT; + struct page *page = pfn_to_page(start_pfn); + struct zone *zone; + int ret = 0; + + zone = page_zone(page); + ret = __remove_pages(zone, start_pfn, nr_pages); + WARN_ON_ONCE(ret); + + kernel_physical_mapping_remove(start, start + size); + + return ret; +} + +#endif /* CONFIG_MEMORY_HOTREMOVE */ +#endif /* CONFIG_MEMORY_HOTPLUG */ diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index 3e350ae97369f196c92225ad83f2776f13b19dd5..a7c6f6239e34c8bf219d7a6dc02c0e1038c42900 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c @@ -688,6 +688,412 @@ void __init paging_init(void) SWAPPER_DIR_SIZE - PAGE_SIZE); } +#ifdef CONFIG_MEMORY_HOTPLUG +/* + * hotplug_paging() is used by memory hotplug to build new page tables + * for hot added memory. + */ +void hotplug_paging(phys_addr_t start, phys_addr_t size) +{ + + struct page *pg; + phys_addr_t pgd_phys = pgd_pgtable_alloc(); + pgd_t *pgd = pgd_set_fixmap(pgd_phys); + + memcpy(pgd, swapper_pg_dir, PAGE_SIZE); + + __create_pgd_mapping(pgd, start, __phys_to_virt(start), size, + PAGE_KERNEL, pgd_pgtable_alloc, !debug_pagealloc_enabled()); + + cpu_replace_ttbr1(__va(pgd_phys)); + memcpy(swapper_pg_dir, pgd, PAGE_SIZE); + cpu_replace_ttbr1(swapper_pg_dir); + + pgd_clear_fixmap(); + + pg = phys_to_page(pgd_phys); + pgtable_page_dtor(pg); + __free_pages(pg, 0); +} + +#ifdef CONFIG_MEMORY_HOTREMOVE +#define PAGE_INUSE 0xFD + +static void free_pagetable(struct page *page, int order, bool direct) +{ + unsigned long magic; + unsigned int nr_pages = 1 << order; + + /* bootmem page has reserved flag */ + if (PageReserved(page)) { + __ClearPageReserved(page); + + magic = (unsigned long)page->lru.next; + if (magic == SECTION_INFO || magic == MIX_SECTION_INFO) { + while (nr_pages--) + put_page_bootmem(page++); + } else { + while (nr_pages--) + free_reserved_page(page++); + } + } else { + /* + * Only direct pagetable allocation (those allocated via + * hotplug) call the pgtable_page_ctor; vmemmap pgtable + * allocations don't. + */ + if (direct) + pgtable_page_dtor(page); + + free_pages((unsigned long)page_address(page), order); + } +} + +static void free_pte_table(pmd_t *pmd, bool direct) +{ + pte_t *pte_start, *pte; + struct page *page; + int i; + + pte_start = (pte_t *) pmd_page_vaddr(*pmd); + /* Check if there is no valid entry in the PMD */ + for (i = 0; i < PTRS_PER_PTE; i++) { + pte = pte_start + i; + if (!pte_none(*pte)) + return; + } + + page = pmd_page(*pmd); + + free_pagetable(page, 0, direct); + + /* + * This spin lock could be only taken in _pte_aloc_kernel + * in mm/memory.c and nowhere else (for arm64). Not sure if + * the function above can be called concurrently. In doubt, + * I am living it here for now, but it probably can be removed + */ + spin_lock(&init_mm.page_table_lock); + pmd_clear(pmd); + spin_unlock(&init_mm.page_table_lock); +} + +static void free_pmd_table(pud_t *pud, bool direct) +{ + pmd_t *pmd_start, *pmd; + struct page *page; + int i; + + pmd_start = (pmd_t *) pud_page_vaddr(*pud); + /* Check if there is no valid entry in the PMD */ + for (i = 0; i < PTRS_PER_PMD; i++) { + pmd = pmd_start + i; + if (!pmd_none(*pmd)) + return; + } + + page = pud_page(*pud); + + free_pagetable(page, 0, direct); + + /* + * This spin lock could be only taken in _pte_aloc_kernel + * in mm/memory.c and nowhere else (for arm64). Not sure if + * the function above can be called concurrently. In doubt, + * I am living it here for now, but it probably can be removed + */ + spin_lock(&init_mm.page_table_lock); + pud_clear(pud); + spin_unlock(&init_mm.page_table_lock); +} + +/* + * When the PUD is folded on the PGD (three levels of paging), + * there's no need to free PUDs + */ +#if CONFIG_PGTABLE_LEVELS > 3 +static void free_pud_table(pgd_t *pgd, bool direct) +{ + pud_t *pud_start, *pud; + struct page *page; + int i; + + pud_start = (pud_t *) pgd_page_vaddr(*pgd); + /* Check if there is no valid entry in the PUD */ + for (i = 0; i < PTRS_PER_PUD; i++) { + pud = pud_start + i; + if (!pud_none(*pud)) + return; + } + + page = pgd_page(*pgd); + + free_pagetable(page, 0, direct); + + /* + * This spin lock could be only + * taken in _pte_aloc_kernel in + * mm/memory.c and nowhere else + * (for arm64). Not sure if the + * function above can be called + * concurrently. In doubt, + * I am living it here for now, + * but it probably can be removed. + */ + spin_lock(&init_mm.page_table_lock); + pgd_clear(pgd); + spin_unlock(&init_mm.page_table_lock); +} +#endif + +static void remove_pte_table(pte_t *pte, unsigned long addr, + unsigned long end, bool direct) +{ + unsigned long next; + void *page_addr; + + for (; addr < end; addr = next, pte++) { + next = (addr + PAGE_SIZE) & PAGE_MASK; + if (next > end) + next = end; + + if (!pte_present(*pte)) + continue; + + if (PAGE_ALIGNED(addr) && PAGE_ALIGNED(next)) { + /* + * Do not free direct mapping pages since they were + * freed when offlining, or simplely not in use. + */ + if (!direct) + free_pagetable(pte_page(*pte), 0, direct); + + /* + * This spin lock could be only + * taken in _pte_aloc_kernel in + * mm/memory.c and nowhere else + * (for arm64). Not sure if the + * function above can be called + * concurrently. In doubt, + * I am living it here for now, + * but it probably can be removed. + */ + spin_lock(&init_mm.page_table_lock); + pte_clear(&init_mm, addr, pte); + spin_unlock(&init_mm.page_table_lock); + } else { + /* + * If we are here, we are freeing vmemmap pages since + * direct mapped memory ranges to be freed are aligned. + * + * If we are not removing the whole page, it means + * other page structs in this page are being used and + * we canot remove them. So fill the unused page_structs + * with 0xFD, and remove the page when it is wholly + * filled with 0xFD. + */ + memset((void *)addr, PAGE_INUSE, next - addr); + + page_addr = page_address(pte_page(*pte)); + if (!memchr_inv(page_addr, PAGE_INUSE, PAGE_SIZE)) { + free_pagetable(pte_page(*pte), 0, direct); + + /* + * This spin lock could be only + * taken in _pte_aloc_kernel in + * mm/memory.c and nowhere else + * (for arm64). Not sure if the + * function above can be called + * concurrently. In doubt, + * I am living it here for now, + * but it probably can be removed. + */ + spin_lock(&init_mm.page_table_lock); + pte_clear(&init_mm, addr, pte); + spin_unlock(&init_mm.page_table_lock); + } + } + } + + // I am adding this flush here in simmetry to the x86 code. + // Why do I need to call it here and not in remove_p[mu]d + flush_tlb_all(); +} + +static void remove_pmd_table(pmd_t *pmd, unsigned long addr, + unsigned long end, bool direct) +{ + unsigned long next; + void *page_addr; + pte_t *pte; + + for (; addr < end; addr = next, pmd++) { + next = pmd_addr_end(addr, end); + + if (!pmd_present(*pmd)) + continue; + + // check if we are using 2MB section mappings + if (pmd_sect(*pmd)) { + if (PAGE_ALIGNED(addr) && PAGE_ALIGNED(next)) { + if (!direct) { + free_pagetable(pmd_page(*pmd), + get_order(PMD_SIZE), direct); + } + /* + * This spin lock could be only + * taken in _pte_aloc_kernel in + * mm/memory.c and nowhere else + * (for arm64). Not sure if the + * function above can be called + * concurrently. In doubt, + * I am living it here for now, + * but it probably can be removed. + */ + spin_lock(&init_mm.page_table_lock); + pmd_clear(pmd); + spin_unlock(&init_mm.page_table_lock); + } else { + /* If here, we are freeing vmemmap pages. */ + memset((void *)addr, PAGE_INUSE, next - addr); + + page_addr = page_address(pmd_page(*pmd)); + if (!memchr_inv(page_addr, PAGE_INUSE, + PMD_SIZE)) { + free_pagetable(pmd_page(*pmd), + get_order(PMD_SIZE), direct); + + /* + * This spin lock could be only + * taken in _pte_aloc_kernel in + * mm/memory.c and nowhere else + * (for arm64). Not sure if the + * function above can be called + * concurrently. In doubt, + * I am living it here for now, + * but it probably can be removed. + */ + spin_lock(&init_mm.page_table_lock); + pmd_clear(pmd); + spin_unlock(&init_mm.page_table_lock); + } + } + continue; + } + + BUG_ON(!pmd_table(*pmd)); + + pte = pte_offset_map(pmd, addr); + remove_pte_table(pte, addr, next, direct); + free_pte_table(pmd, direct); + } +} + +static void remove_pud_table(pud_t *pud, unsigned long addr, + unsigned long end, bool direct) +{ + unsigned long next; + pmd_t *pmd; + void *page_addr; + + for (; addr < end; addr = next, pud++) { + next = pud_addr_end(addr, end); + if (!pud_present(*pud)) + continue; + /* + * If we are using 4K granules, check if we are using + * 1GB section mapping. + */ + if (pud_sect(*pud)) { + if (PAGE_ALIGNED(addr) && PAGE_ALIGNED(next)) { + if (!direct) { + free_pagetable(pud_page(*pud), + get_order(PUD_SIZE), direct); + } + + /* + * This spin lock could be only + * taken in _pte_aloc_kernel in + * mm/memory.c and nowhere else + * (for arm64). Not sure if the + * function above can be called + * concurrently. In doubt, + * I am living it here for now, + * but it probably can be removed. + */ + spin_lock(&init_mm.page_table_lock); + pud_clear(pud); + spin_unlock(&init_mm.page_table_lock); + } else { + /* If here, we are freeing vmemmap pages. */ + memset((void *)addr, PAGE_INUSE, next - addr); + + page_addr = page_address(pud_page(*pud)); + if (!memchr_inv(page_addr, PAGE_INUSE, + PUD_SIZE)) { + + free_pagetable(pud_page(*pud), + get_order(PUD_SIZE), direct); + + /* + * This spin lock could be only + * taken in _pte_aloc_kernel in + * mm/memory.c and nowhere else + * (for arm64). Not sure if the + * function above can be called + * concurrently. In doubt, + * I am living it here for now, + * but it probably can be removed. + */ + spin_lock(&init_mm.page_table_lock); + pud_clear(pud); + spin_unlock(&init_mm.page_table_lock); + } + } + continue; + } + + BUG_ON(!pud_table(*pud)); + + pmd = pmd_offset(pud, addr); + remove_pmd_table(pmd, addr, next, direct); + free_pmd_table(pud, direct); + } +} + +void remove_pagetable(unsigned long start, unsigned long end, bool direct) +{ + unsigned long next; + unsigned long addr; + pgd_t *pgd; + pud_t *pud; + + for (addr = start; addr < end; addr = next) { + next = pgd_addr_end(addr, end); + + pgd = pgd_offset_k(addr); + if (pgd_none(*pgd)) + continue; + + pud = pud_offset(pgd, addr); + remove_pud_table(pud, addr, next, direct); + /* + * When the PUD is folded on the PGD (three levels of paging), + * I did already clear the PMD page in free_pmd_table, + * and reset the corresponding PGD==PUD entry. + */ +#if CONFIG_PGTABLE_LEVELS > 3 + free_pud_table(pgd, direct); +#endif + } + + flush_tlb_all(); +} + + +#endif /* CONFIG_MEMORY_HOTREMOVE */ +#endif /* CONFIG_MEMORY_HOTPLUG */ + /* * Check whether a kernel address is valid (derived from arch/x86/). */ @@ -769,6 +1175,9 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node) #endif /* CONFIG_ARM64_64K_PAGES */ void vmemmap_free(unsigned long start, unsigned long end) { +#ifdef CONFIG_MEMORY_HOTREMOVE + remove_pagetable(start, end, false); +#endif } #endif /* CONFIG_SPARSEMEM_VMEMMAP */ diff --git a/drivers/char/diag/diag_masks.c b/drivers/char/diag/diag_masks.c index f6678c6b6708adf1136f726f9e71b7f2d264c724..14177c6cb46088b247e346c7473028e1820db962 100644 --- a/drivers/char/diag/diag_masks.c +++ b/drivers/char/diag/diag_masks.c @@ -2191,9 +2191,12 @@ void diag_send_updates_peripheral(uint8_t peripheral) if (driver->time_sync_enabled) diag_send_time_sync_update(peripheral); mutex_lock(&driver->md_session_lock); - diag_send_msg_mask_update(peripheral, ALL_SSID, ALL_SSID); - diag_send_log_mask_update(peripheral, ALL_EQUIP_ID); - diag_send_event_mask_update(peripheral); + if (driver->set_mask_cmd) { + diag_send_msg_mask_update(peripheral, + ALL_SSID, ALL_SSID); + diag_send_log_mask_update(peripheral, ALL_EQUIP_ID); + diag_send_event_mask_update(peripheral); + } mutex_unlock(&driver->md_session_lock); diag_send_real_time_update(peripheral, driver->real_time_mode[DIAG_LOCAL_PROC]); @@ -2230,6 +2233,7 @@ int diag_process_apps_masks(unsigned char *buf, int len, int pid) break; case DIAG_CMD_OP_SET_LOG_MASK: hdlr = diag_cmd_set_log_mask; + driver->set_mask_cmd = 1; break; case DIAG_CMD_OP_GET_LOG_MASK: hdlr = diag_cmd_get_log_mask; @@ -2249,17 +2253,21 @@ int diag_process_apps_masks(unsigned char *buf, int len, int pid) break; case DIAG_CMD_OP_SET_MSG_MASK: hdlr = diag_cmd_set_msg_mask; + driver->set_mask_cmd = 1; break; case DIAG_CMD_OP_SET_ALL_MSG_MASK: hdlr = diag_cmd_set_all_msg_mask; + driver->set_mask_cmd = 1; break; } } else if (*buf == DIAG_CMD_GET_EVENT_MASK) { hdlr = diag_cmd_get_event_mask; } else if (*buf == DIAG_CMD_SET_EVENT_MASK) { hdlr = diag_cmd_update_event_mask; + driver->set_mask_cmd = 1; } else if (*buf == DIAG_CMD_EVENT_TOGGLE) { hdlr = diag_cmd_toggle_events; + driver->set_mask_cmd = 1; } if (hdlr) diff --git a/drivers/char/diag/diagchar.h b/drivers/char/diag/diagchar.h index 4055608f88e28571222289ba22465ec87c210838..bf945f450a69d66ba761432615e5554854ea583c 100644 --- a/drivers/char/diag/diagchar.h +++ b/drivers/char/diag/diagchar.h @@ -668,6 +668,7 @@ struct diagchar_dev { struct diag_mask_info *log_mask; struct diag_mask_info *event_mask; struct diag_mask_info *build_time_mask; + uint8_t set_mask_cmd; uint8_t msg_mask_tbl_count; uint8_t bt_msg_mask_tbl_count; uint16_t event_mask_size; diff --git a/drivers/clk/qcom/clk-cpu-osm.c b/drivers/clk/qcom/clk-cpu-osm.c index 434ea0bc3b1adf5ee5a239942235c0531cd1134a..81f6fde79f899521293846afe518161a75469877 100644 --- a/drivers/clk/qcom/clk-cpu-osm.c +++ b/drivers/clk/qcom/clk-cpu-osm.c @@ -304,6 +304,7 @@ static struct clk_osm l3_clk = { static DEFINE_CLK_VOTER(l3_cluster0_vote_clk, l3_clk, 0); static DEFINE_CLK_VOTER(l3_cluster1_vote_clk, l3_clk, 0); static DEFINE_CLK_VOTER(l3_misc_vote_clk, l3_clk, 0); +static DEFINE_CLK_VOTER(l3_gpu_vote_clk, l3_clk, 0); static struct clk_osm pwrcl_clk = { .cluster_num = 1, @@ -432,6 +433,7 @@ static struct clk_hw *osm_qcom_clk_hws[] = { [L3_CLUSTER0_VOTE_CLK] = &l3_cluster0_vote_clk.hw, [L3_CLUSTER1_VOTE_CLK] = &l3_cluster1_vote_clk.hw, [L3_MISC_VOTE_CLK] = &l3_misc_vote_clk.hw, + [L3_GPU_VOTE_CLK] = &l3_gpu_vote_clk.hw, [L3_CLK] = &l3_clk.hw, [CPU0_PWRCL_CLK] = &cpu0_pwrcl_clk.hw, [CPU1_PWRCL_CLK] = &cpu1_pwrcl_clk.hw, @@ -1119,6 +1121,8 @@ static int clk_cpu_osm_driver_probe(struct platform_device *pdev) "clk: Failed to enable cluster1 clock for L3\n"); WARN(clk_prepare_enable(l3_misc_vote_clk.hw.clk), "clk: Failed to enable misc clock for L3\n"); + WARN(clk_prepare_enable(l3_gpu_vote_clk.hw.clk), + "clk: Failed to enable gpu clock for L3\n"); populate_opp_table(pdev); diff --git a/drivers/cpufreq/qcom-cpufreq.c b/drivers/cpufreq/qcom-cpufreq.c index 33ba7bf6ee2e4fc5ee9033c07c50193b301b2dde..7f7626f29a792e4eee4be1076cc15f50ca914fb9 100644 --- a/drivers/cpufreq/qcom-cpufreq.c +++ b/drivers/cpufreq/qcom-cpufreq.c @@ -27,10 +27,13 @@ #include #include #include +#include +#include #include static DEFINE_MUTEX(l2bw_lock); +static struct thermal_cooling_device *cdev[NR_CPUS]; static struct clk *cpu_clk[NR_CPUS]; static struct clk *l2_clk; static DEFINE_PER_CPU(struct cpufreq_frequency_table *, freq_table); @@ -306,6 +309,50 @@ static struct freq_attr *msm_freq_attr[] = { NULL, }; +static void msm_cpufreq_ready(struct cpufreq_policy *policy) +{ + struct device_node *np, *lmh_node; + unsigned int cpu = 0; + + if (cdev[policy->cpu]) + return; + + np = of_cpu_device_node_get(policy->cpu); + if (WARN_ON(!np)) + return; + + /* + * For now, just loading the cooling device; + * thermal DT code takes care of matching them. + */ + if (of_find_property(np, "#cooling-cells", NULL)) { + lmh_node = of_parse_phandle(np, "qcom,lmh-dcvs", 0); + if (lmh_node) { + of_node_put(lmh_node); + goto ready_exit; + } + + for_each_cpu(cpu, policy->related_cpus) { + + of_node_put(np); + np = of_cpu_device_node_get(cpu); + if (WARN_ON(!np)) + return; + + cdev[cpu] = of_cpufreq_cooling_register(np, policy); + if (IS_ERR(cdev[cpu])) { + pr_err( + "running cpufreq for CPU%d without cooling dev: %ld\n", + cpu, PTR_ERR(cdev[cpu])); + cdev[cpu] = NULL; + } + } + } + +ready_exit: + of_node_put(np); +} + static struct cpufreq_driver msm_cpufreq_driver = { /* lps calculations are handled here. */ .flags = CPUFREQ_STICKY | CPUFREQ_CONST_LOOPS | @@ -317,6 +364,7 @@ static struct cpufreq_driver msm_cpufreq_driver = { .get = msm_cpufreq_get_freq, .name = "msm", .attr = msm_freq_attr, + .ready = msm_cpufreq_ready, }; static struct cpufreq_frequency_table *cpufreq_parse_dt(struct device *dev, diff --git a/drivers/esoc/esoc-mdm-pon.c b/drivers/esoc/esoc-mdm-pon.c index ee07f2d947b7977f10d52178960dda1a6549d9b2..f2e6f7af446f93f7715f1db595fe08cecc914d2f 100644 --- a/drivers/esoc/esoc-mdm-pon.c +++ b/drivers/esoc/esoc-mdm-pon.c @@ -57,13 +57,13 @@ static int sdx50m_toggle_soft_reset(struct mdm_ctrl *mdm, bool atomic) * Allow PS hold assert to be detected */ if (!atomic) - usleep_range(203000, 300000); + usleep_range(80000,180000); else /* * The flow falls through this path as a part of the * panic handler, which has to executed atomically. */ - mdelay(203); + mdelay(100); gpio_direction_output(MDM_GPIO(mdm, AP2MDM_SOFT_RESET), soft_reset_direction_de_assert); return 0; @@ -145,7 +145,7 @@ static int sdx50m_power_down(struct mdm_ctrl *mdm) * for the reset to fully take place. Sleep here to ensure the * reset has occurred before the function exits. */ - msleep(406); + msleep(300); return 0; } @@ -175,7 +175,7 @@ static void sdx50m_cold_reset(struct mdm_ctrl *mdm) * The function is executed as a part of the atomic reboot handler. * Hence, go with a busy loop instead of sleep. */ - mdelay(334); + mdelay(600); gpio_direction_output(MDM_GPIO(mdm, AP2MDM_SOFT_RESET), !mdm->soft_reset_inverted); diff --git a/drivers/gpu/msm/adreno-gpulist.h b/drivers/gpu/msm/adreno-gpulist.h index 1c937d99a5a1c806752daa036f5ddf8ab720ea3b..9e58cb4510a8a0aa1bdb290855c3cdd532bd61f9 100644 --- a/drivers/gpu/msm/adreno-gpulist.h +++ b/drivers/gpu/msm/adreno-gpulist.h @@ -385,7 +385,8 @@ static const struct adreno_gpu_core adreno_gpulist[] = { .major = 4, .minor = 0, .patchid = ANY_ID, - .features = ADRENO_64BIT | ADRENO_RPMH | ADRENO_GPMU, + .features = ADRENO_64BIT | ADRENO_RPMH | ADRENO_GPMU | + ADRENO_IFPC, .sqefw_name = "a630_sqe.fw", .zap_name = "a640_zap", .gpudev = &adreno_a6xx_gpudev, diff --git a/drivers/pci/host/pci-msm.c b/drivers/pci/host/pci-msm.c index 4b28e527efff106cbd2305faa9e26865df533659..85320e48b26b8a0855f7911e8d497c736b155715 100644 --- a/drivers/pci/host/pci-msm.c +++ b/drivers/pci/host/pci-msm.c @@ -56,13 +56,6 @@ #define PCIE20_PARF_DBI_BASE_ADDR 0x350 #define PCIE20_PARF_SLV_ADDR_SPACE_SIZE 0x358 -#define PCS_BASE 0x800 - -#define PCS_PORT(n) (PCS_BASE + n * 0x1000) - -#define PCIE_N_SW_RESET(n) (PCS_PORT(n) + 0x00) -#define PCIE_N_POWER_DOWN_CONTROL(n) (PCS_PORT(n) + 0x04) - #define PCIE_GEN3_SPCIE_CAP 0x0154 #define PCIE_GEN3_GEN2_CTRL 0x080c #define PCIE_GEN3_RELATED 0x0890 @@ -600,6 +593,7 @@ struct msm_pcie_dev_t { uint32_t wr_halt_size; uint32_t slv_addr_space_size; uint32_t phy_status_offset; + uint32_t phy_power_down_offset; uint32_t cpl_timeout; uint32_t current_bdf; uint32_t perst_delay_us_min; @@ -1251,6 +1245,8 @@ static void msm_pcie_show_status(struct msm_pcie_dev_t *dev) dev->slv_addr_space_size); PCIE_DBG_FS(dev, "phy_status_offset: 0x%x\n", dev->phy_status_offset); + PCIE_DBG_FS(dev, "phy_power_down_offset: 0x%x\n", + dev->phy_power_down_offset); PCIE_DBG_FS(dev, "cpl_timeout: 0x%x\n", dev->cpl_timeout); PCIE_DBG_FS(dev, "current_bdf: 0x%x\n", @@ -4042,12 +4038,8 @@ static int msm_pcie_enable(struct msm_pcie_dev_t *dev, u32 options) gpio_set_value(dev->gpio[MSM_PCIE_GPIO_EP].num, 1 - dev->gpio[MSM_PCIE_GPIO_EP].on); - if (dev->max_link_speed != GEN3_SPEED) { - msm_pcie_write_reg(dev->phy, - PCIE_N_SW_RESET(dev->rc_idx), 0x1); - msm_pcie_write_reg(dev->phy, - PCIE_N_POWER_DOWN_CONTROL(dev->rc_idx), 0); - } + if (dev->phy_power_down_offset) + msm_pcie_write_reg(dev->phy, dev->phy_power_down_offset, 0); msm_pcie_pipe_clk_deinit(dev); msm_pcie_clk_deinit(dev); @@ -4085,12 +4077,8 @@ static void msm_pcie_disable(struct msm_pcie_dev_t *dev, u32 options) gpio_set_value(dev->gpio[MSM_PCIE_GPIO_PERST].num, dev->gpio[MSM_PCIE_GPIO_PERST].on); - if (dev->max_link_speed != GEN3_SPEED) { - msm_pcie_write_reg(dev->phy, - PCIE_N_SW_RESET(dev->rc_idx), 0x1); - msm_pcie_write_reg(dev->phy, - PCIE_N_POWER_DOWN_CONTROL(dev->rc_idx), 0); - } + if (dev->phy_power_down_offset) + msm_pcie_write_reg(dev->phy, dev->phy_power_down_offset, 0); if (options & PM_CLK) { msm_pcie_write_mask(dev->parf + PCIE20_PARF_PHY_CTRL, 0, @@ -5906,6 +5894,19 @@ static int msm_pcie_probe(struct platform_device *pdev) rc_idx, msm_pcie_dev[rc_idx].phy_status_offset); } + msm_pcie_dev[rc_idx].phy_power_down_offset = 0; + ret = of_property_read_u32(pdev->dev.of_node, + "qcom,phy-power-down-offset", + &msm_pcie_dev[rc_idx].phy_power_down_offset); + if (ret) + PCIE_DBG(&msm_pcie_dev[rc_idx], + "RC%d: qcom,phy-power-down-offset not found.\n", + rc_idx); + else + PCIE_DBG(&msm_pcie_dev[rc_idx], + "RC%d: phy-power-down-offset: 0x%x.\n", + rc_idx, msm_pcie_dev[rc_idx].phy_power_down_offset); + msm_pcie_dev[rc_idx].cpl_timeout = 0; ret = of_property_read_u32((&pdev->dev)->of_node, "qcom,cpl-timeout", diff --git a/drivers/platform/msm/Kconfig b/drivers/platform/msm/Kconfig index 9a7e6980f990436d6ab328e8730f074c8d9a104c..45350332238404b79df758832ee62ca31e40d3b0 100644 --- a/drivers/platform/msm/Kconfig +++ b/drivers/platform/msm/Kconfig @@ -147,6 +147,17 @@ config RNDIS_IPA This Network interface is aimed to allow data path go through IPA core while using RNDIS protocol. +config IPA3_MHI_PROXY + tristate "IPA3 MHI proxy driver" + depends on RMNET_IPA3 + help + This driver is used as a proxy between modem and MHI host driver. + Its main functionality is to setup MHI Satellite channels on behalf of + modem and provide the ability of modem to MHI device communication. + Once the configuration is done modem will communicate directly with + the MHI device without AP involvement, with the exception of + power management. + config IPA_UT tristate "IPA Unit-Test Framework and Test Suites" depends on IPA3 && DEBUG_FS diff --git a/drivers/platform/msm/ipa/ipa_api.c b/drivers/platform/msm/ipa/ipa_api.c index 87c4e488e626bcb3b2bd6b0ee62b13ad65459594..6551d99a0fe2aa49e67fdf38823f52e9fa1e12be 100644 --- a/drivers/platform/msm/ipa/ipa_api.c +++ b/drivers/platform/msm/ipa/ipa_api.c @@ -2771,8 +2771,8 @@ static const struct of_device_id ipa_plat_drv_match[] = { { .compatible = "qcom,ipa-smmu-ap-cb", }, { .compatible = "qcom,ipa-smmu-wlan-cb", }, { .compatible = "qcom,ipa-smmu-uc-cb", }, - { .compatible = "qcom,smp2pgpio-map-ipa-1-in", }, - { .compatible = "qcom,smp2pgpio-map-ipa-1-out", }, + { .compatible = "qcom,smp2p-map-ipa-1-in", }, + { .compatible = "qcom,smp2p-map-ipa-1-out", }, {} }; diff --git a/drivers/platform/msm/ipa/ipa_clients/ipa_usb.c b/drivers/platform/msm/ipa/ipa_clients/ipa_usb.c index 9f75c6c13ab089b429d419f4fb76c24adeb91c79..a6f713b271c853bec92ce9ae7a08b6fd7130fd03 100644 --- a/drivers/platform/msm/ipa/ipa_clients/ipa_usb.c +++ b/drivers/platform/msm/ipa/ipa_clients/ipa_usb.c @@ -1871,12 +1871,15 @@ static int ipa3_usb_xdci_connect_internal( } if (ipa_pm_is_used()) { - result = ipa_pm_set_perf_profile( - ipa3_usb_ctx->ttype_ctx[ttype].pm_ctx.hdl, - params->max_supported_bandwidth_mbps); - if (result) { - IPA_USB_ERR("failed to set perf profile\n"); - return result; + /* perf profile is not set on USB DPL pipe */ + if (ttype != IPA_USB_TRANSPORT_DPL) { + result = ipa_pm_set_perf_profile( + ipa3_usb_ctx->ttype_ctx[ttype].pm_ctx.hdl, + params->max_supported_bandwidth_mbps); + if (result) { + IPA_USB_ERR("failed to set perf profile\n"); + return result; + } } result = ipa_pm_activate_sync( diff --git a/drivers/platform/msm/ipa/ipa_v3/Makefile b/drivers/platform/msm/ipa/ipa_v3/Makefile index ae4dccfe75c3ad762212ea54bcafb94c6473d521..9901e642a6a56fc0922f92c5987146cb5de378bd 100644 --- a/drivers/platform/msm/ipa/ipa_v3/Makefile +++ b/drivers/platform/msm/ipa/ipa_v3/Makefile @@ -7,3 +7,5 @@ ipat-y := ipa.o ipa_debugfs.o ipa_hdr.o ipa_flt.o ipa_rt.o ipa_dp.o ipa_client.o ipa_hw_stats.o ipa_pm.o ipa_wdi3_i.o obj-$(CONFIG_RMNET_IPA3) += rmnet_ipa.o ipa_qmi_service_v01.o ipa_qmi_service.o rmnet_ipa_fd_ioctl.o + +obj-$(CONFIG_IPA3_MHI_PROXY) += ipa_mhi_proxy.o diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa.c b/drivers/platform/msm/ipa/ipa_v3/ipa.c index 11790bdd8603db5cd815bbaf9c5a082984ef21ea..0cbbb42e52618a151d03bed4b4123cf8d0f2f8ec 100644 --- a/drivers/platform/msm/ipa/ipa_v3/ipa.c +++ b/drivers/platform/msm/ipa/ipa_v3/ipa.c @@ -39,6 +39,8 @@ #include #include #include +#include +#include #ifdef CONFIG_ARM64 @@ -61,6 +63,8 @@ #define IPA_GPIO_IN_QUERY_CLK_IDX 0 #define IPA_GPIO_OUT_CLK_RSP_CMPLT_IDX 0 #define IPA_GPIO_OUT_CLK_VOTE_IDX 1 +#define IPA_SMP2P_SMEM_STATE_MASK 3 + #define IPA_SUMMING_THRESHOLD (0x10) #define IPA_PIPE_MEM_START_OFST (0x0) @@ -4245,8 +4249,9 @@ static void ipa3_freeze_clock_vote_and_notify_modem(void) if (ipa3_ctx->smp2p_info.res_sent) return; - if (ipa3_ctx->smp2p_info.out_base_id == 0) { - IPAERR("smp2p out gpio not assigned\n"); + if (IS_ERR(ipa3_ctx->smp2p_info.smem_state)) { + IPAERR("fail to get smp2p clk resp bit %d\n", + PTR_ERR(ipa3_ctx->smp2p_info.smem_state)); return; } @@ -4257,11 +4262,9 @@ static void ipa3_freeze_clock_vote_and_notify_modem(void) else ipa3_ctx->smp2p_info.ipa_clk_on = true; - gpio_set_value(ipa3_ctx->smp2p_info.out_base_id + - IPA_GPIO_OUT_CLK_VOTE_IDX, - ipa3_ctx->smp2p_info.ipa_clk_on); - gpio_set_value(ipa3_ctx->smp2p_info.out_base_id + - IPA_GPIO_OUT_CLK_RSP_CMPLT_IDX, 1); + qcom_smem_state_update_bits(ipa3_ctx->smp2p_info.smem_state, + BIT(IPA_SMP2P_SMEM_STATE_MASK), + BIT(ipa3_ctx->smp2p_info.ipa_clk_on | (1<<1))); ipa3_ctx->smp2p_info.res_sent = true; IPADBG("IPA clocks are %s\n", @@ -6250,56 +6253,42 @@ static int ipa3_smp2p_probe(struct device *dev) { struct device_node *node = dev->of_node; int res; + int irq = 0; if (ipa3_ctx == NULL) { IPAERR("ipa3_ctx was not initialized\n"); - return -ENXIO; + return -EPROBE_DEFER; } IPADBG("node->name=%s\n", node->name); - if (strcmp("qcom,smp2pgpio_map_ipa_1_out", node->name) == 0) { - res = of_get_gpio(node, 0); - if (res < 0) { - IPADBG("of_get_gpio returned %d\n", res); - return res; + if (strcmp("qcom,smp2p_map_ipa_1_out", node->name) == 0) { + if (of_find_property(node, "qcom,smem-states", NULL)) { + ipa3_ctx->smp2p_info.smem_state = + qcom_smem_state_get(dev, "ipa-smp2p-out", + &ipa3_ctx->smp2p_info.smem_bit); + if (IS_ERR(ipa3_ctx->smp2p_info.smem_state)) { + IPAERR("fail to get smp2p clk resp bit %d\n", + PTR_ERR(ipa3_ctx->smp2p_info.smem_state)); + return PTR_ERR(ipa3_ctx->smp2p_info.smem_state); + } + IPADBG("smem_bit=%d\n", ipa3_ctx->smp2p_info.smem_bit); } - - ipa3_ctx->smp2p_info.out_base_id = res; - IPADBG("smp2p out_base_id=%d\n", - ipa3_ctx->smp2p_info.out_base_id); - } else if (strcmp("qcom,smp2pgpio_map_ipa_1_in", node->name) == 0) { - int irq; - - res = of_get_gpio(node, 0); + } else if (strcmp("qcom,smp2p_map_ipa_1_in", node->name) == 0) { + res = irq = of_irq_get_byname(node, "ipa-smp2p-in"); if (res < 0) { - IPADBG("of_get_gpio returned %d\n", res); + IPADBG("of_irq_get_byname returned %d\n", irq); return res; } - ipa3_ctx->smp2p_info.in_base_id = res; - IPADBG("smp2p in_base_id=%d\n", - ipa3_ctx->smp2p_info.in_base_id); - - /* register for modem clk query */ - irq = gpio_to_irq(ipa3_ctx->smp2p_info.in_base_id + - IPA_GPIO_IN_QUERY_CLK_IDX); - if (irq < 0) { - IPAERR("gpio_to_irq failed %d\n", irq); - return -ENODEV; - } + ipa3_ctx->smp2p_info.in_base_id = irq; IPADBG("smp2p irq#=%d\n", irq); - res = request_irq(irq, + res = devm_request_threaded_irq(dev, irq, NULL, (irq_handler_t)ipa3_smp2p_modem_clk_query_isr, IRQF_TRIGGER_RISING, "ipa_smp2p_clk_vote", dev); if (res) { IPAERR("fail to register smp2p irq=%d\n", irq); return -ENODEV; } - res = enable_irq_wake(ipa3_ctx->smp2p_info.in_base_id + - IPA_GPIO_IN_QUERY_CLK_IDX); - if (res) - IPAERR("failed to enable irq wake\n"); } - return 0; } @@ -6339,11 +6328,10 @@ int ipa3_plat_drv_probe(struct platform_device *pdev_p, } if (of_device_is_compatible(dev->of_node, - "qcom,smp2pgpio-map-ipa-1-in")) + "qcom,smp2p-map-ipa-1-out")) return ipa3_smp2p_probe(dev); - if (of_device_is_compatible(dev->of_node, - "qcom,smp2pgpio-map-ipa-1-out")) + "qcom,smp2p-map-ipa-1-in")) return ipa3_smp2p_probe(dev); result = get_ipa_dts_configuration(pdev_p, &ipa3_res); diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c b/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c index 17017a462579fe3b05001997341b31f1efc06e0f..569ab9d5edd2e68cc9fff39e163f750cc4852d66 100644 --- a/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c @@ -650,7 +650,6 @@ int ipa3_send_cmd_timeout(u16 num_desc, struct ipa3_desc *descr, u32 timeout) atomic_set(&comp->cnt, 2); sys = ipa3_ctx->ep[ep_idx].sys; - IPA_ACTIVE_CLIENTS_INC_SIMPLE(); if (num_desc == 1) { if (descr->callback || descr->user1) @@ -689,7 +688,6 @@ int ipa3_send_cmd_timeout(u16 num_desc, struct ipa3_desc *descr, u32 timeout) kfree(comp); bail: - IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); return result; } diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_i.h b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h index b03657e15d707bde027afe9795f8973bef010240..8a39857d0b675892929ac1b36fb83737dd459873 100644 --- a/drivers/platform/msm/ipa/ipa_v3/ipa_i.h +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h @@ -1092,6 +1092,8 @@ struct ipa3_smp2p_info { u32 in_base_id; bool ipa_clk_on; bool res_sent; + unsigned int smem_bit; + struct qcom_smem_state *smem_state; }; /** @@ -2238,6 +2240,7 @@ int ipa3_uc_mhi_resume_channel(int channelHandle, bool LPTransitionRejected); int ipa3_uc_mhi_stop_event_update_channel(int channelHandle); int ipa3_uc_mhi_print_stats(char *dbg_buff, int size); int ipa3_uc_memcpy(phys_addr_t dest, phys_addr_t src, int len); +int ipa3_uc_send_remote_ipa_info(u32 remote_addr, uint32_t mbox_n); void ipa3_tag_destroy_imm(void *user1, int user2); const struct ipa_gsi_ep_config *ipa3_get_gsi_ep_info (enum ipa_client_type client); diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_mhi_proxy.c b/drivers/platform/msm/ipa/ipa_v3/ipa_mhi_proxy.c new file mode 100644 index 0000000000000000000000000000000000000000..e05c0232f00be05c34af50d387619463842afa65 --- /dev/null +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_mhi_proxy.c @@ -0,0 +1,995 @@ +/* Copyright (c) 2018 The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include +#include +#include +#include "ipa_qmi_service.h" +#include "../ipa_common_i.h" +#include "ipa_i.h" + +#define IMP_DRV_NAME "ipa_mhi_proxy" + +#define IMP_DBG(fmt, args...) \ + do { \ + pr_debug(IMP_DRV_NAME " %s:%d " fmt, \ + __func__, __LINE__, ## args); \ + IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \ + IMP_DRV_NAME " %s:%d " fmt, ## args); \ + IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \ + IMP_DRV_NAME " %s:%d " fmt, ## args); \ + } while (0) + +#define IMP_DBG_LOW(fmt, args...) \ + do { \ + pr_debug(IMP_DRV_NAME " %s:%d " fmt, \ + __func__, __LINE__, ## args); \ + IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \ + IMP_DRV_NAME " %s:%d " fmt, ## args); \ + } while (0) + + +#define IMP_ERR(fmt, args...) \ + do { \ + pr_err(IMP_DRV_NAME " %s:%d " fmt, \ + __func__, __LINE__, ## args); \ + IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \ + IMP_DRV_NAME " %s:%d " fmt, ## args); \ + IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \ + IMP_DRV_NAME " %s:%d " fmt, ## args); \ + } while (0) + + +#define IMP_FUNC_ENTRY() \ + IMP_DBG_LOW("ENTRY\n") +#define IMP_FUNC_EXIT() \ + IMP_DBG_LOW("EXIT\n") + +#define IMP_IPA_UC_UL_CH_n 0 +#define IMP_IPA_UC_UL_EV_n 1 +#define IMP_IPA_UC_DL_CH_n 2 +#define IMP_IPA_UC_DL_EV_n 3 +#define IMP_IPA_UC_m 1 + +/* each pair of UL/DL channels are defined below */ +static const struct mhi_device_id mhi_driver_match_table[] = { + { .chan = "IP_HW_OFFLOAD_0" }, + {}, +}; + +static int imp_mhi_probe_cb(struct mhi_device *, const struct mhi_device_id *); +static void imp_mhi_remove_cb(struct mhi_device *); +static void imp_mhi_status_cb(struct mhi_device *, enum MHI_CB); + +static struct mhi_driver mhi_driver = { + .id_table = mhi_driver_match_table, + .probe = imp_mhi_probe_cb, + .remove = imp_mhi_remove_cb, + .status_cb = imp_mhi_status_cb, + .driver = { + .name = IMP_DRV_NAME, + .owner = THIS_MODULE, + }, +}; + +struct imp_channel_context_type { + u32 chstate:8; + u32 brsmode:2; + u32 pollcfg:6; + u32 reserved:16; + + u32 chtype; + + u32 erindex; + + u64 rbase; + + u64 rlen; + + u64 rpp; + + u64 wpp; +} __packed; + +struct imp_event_context_type { + u32 reserved:8; + u32 intmodc:8; + u32 intmodt:16; + + u32 ertype; + + u32 msivec; + + u64 rbase; + + u64 rlen; + + u64 rpp; + + u64 wpp; +} __packed; + +struct imp_iova_addr { + dma_addr_t base; + unsigned int size; +}; + +struct imp_dev_info { + struct platform_device *pdev; + bool smmu_enabled; + struct imp_iova_addr ctrl; + struct imp_iova_addr data; + u32 chdb_base; + u32 erdb_base; +}; + +struct imp_event_props { + u16 id; + phys_addr_t doorbell; + u16 uc_mbox_n; + struct imp_event_context_type ev_ctx; +}; + +struct imp_event { + struct imp_event_props props; +}; + +struct imp_channel_props { + enum dma_data_direction dir; + u16 id; + phys_addr_t doorbell; + u16 uc_mbox_n; + struct imp_channel_context_type ch_ctx; + +}; + +struct imp_channel { + struct imp_channel_props props; + struct imp_event event; +}; + +enum imp_state { + IMP_INVALID = 0, + IMP_PROBED, + IMP_READY, + IMP_STARTED +}; + +struct imp_qmi_cache { + struct ipa_mhi_ready_indication_msg_v01 ready_ind; + struct ipa_mhi_alloc_channel_req_msg_v01 alloc_ch_req; + struct ipa_mhi_alloc_channel_resp_msg_v01 alloc_ch_resp; +}; + +struct imp_mhi_driver { + struct mhi_device *mhi_dev; + struct imp_channel ul_chan; + struct imp_channel dl_chan; +}; + +struct imp_context { + struct imp_dev_info dev_info; + struct imp_mhi_driver md; + struct mutex mutex; + enum imp_state state; + bool in_lpm; + bool lpm_disabled; + struct imp_qmi_cache qmi; + +}; + +static struct imp_context *imp_ctx; + +static void _populate_smmu_info(struct ipa_mhi_ready_indication_msg_v01 *req) +{ + req->smmu_info_valid = true; + req->smmu_info.iova_ctl_base_addr = imp_ctx->dev_info.ctrl.base; + req->smmu_info.iova_ctl_size = imp_ctx->dev_info.ctrl.size; + req->smmu_info.iova_data_base_addr = imp_ctx->dev_info.data.base; + req->smmu_info.iova_data_size = imp_ctx->dev_info.data.size; +} + +static void imp_mhi_trigger_ready_ind(void) +{ + struct ipa_mhi_ready_indication_msg_v01 *req + = &imp_ctx->qmi.ready_ind; + int ret; + struct imp_channel *ch; + struct ipa_mhi_ch_init_info_type_v01 *ch_info; + + IMP_FUNC_ENTRY(); + if (imp_ctx->state != IMP_PROBED) { + IMP_ERR("invalid state %d\n", imp_ctx->state); + goto exit; + } + + if (imp_ctx->dev_info.smmu_enabled) + _populate_smmu_info(req); + + req->ch_info_arr_len = 0; + BUILD_BUG_ON(QMI_IPA_REMOTE_MHI_CHANNELS_NUM_MAX_V01 < 2); + + /* UL channel */ + ch = &imp_ctx->md.ul_chan; + ch_info = &req->ch_info_arr[req->ch_info_arr_len]; + + ch_info->ch_id = ch->props.id; + ch_info->direction_type = ch->props.dir; + ch_info->er_id = ch->event.props.id; + + /* uC is a doorbell proxy between local Q6 and remote Q6 */ + ch_info->ch_doorbell_addr = ipa3_ctx->ipa_wrapper_base + + ipahal_get_reg_base() + + ipahal_get_reg_mn_ofst(IPA_UC_MAILBOX_m_n, + IMP_IPA_UC_m, + ch->props.uc_mbox_n); + + ch_info->er_doorbell_addr = ipa3_ctx->ipa_wrapper_base + + ipahal_get_reg_base() + + ipahal_get_reg_mn_ofst(IPA_UC_MAILBOX_m_n, + IMP_IPA_UC_m, + ch->event.props.uc_mbox_n); + req->ch_info_arr_len++; + + /* DL channel */ + ch = &imp_ctx->md.dl_chan; + ch_info = &req->ch_info_arr[req->ch_info_arr_len]; + + ch_info->ch_id = ch->props.id; + ch_info->direction_type = ch->props.dir; + ch_info->er_id = ch->event.props.id; + + /* uC is a doorbell proxy between local Q6 and remote Q6 */ + ch_info->ch_doorbell_addr = ipa3_ctx->ipa_wrapper_base + + ipahal_get_reg_base() + + ipahal_get_reg_mn_ofst(IPA_UC_MAILBOX_m_n, + IMP_IPA_UC_m, + ch->props.uc_mbox_n); + + ch_info->er_doorbell_addr = ipa3_ctx->ipa_wrapper_base + + ipahal_get_reg_base() + + ipahal_get_reg_mn_ofst(IPA_UC_MAILBOX_m_n, + IMP_IPA_UC_m, + ch->event.props.uc_mbox_n); + req->ch_info_arr_len++; + + IMP_DBG("sending IND to modem\n"); + ret = ipa3_qmi_send_mhi_ready_indication(req); + if (ret) { + IMP_ERR("failed to send ready indication to modem %d\n", ret); + return; + } + + imp_ctx->state = IMP_READY; + +exit: + IMP_FUNC_EXIT(); +} + +static struct imp_channel *imp_get_ch_by_id(u16 id) +{ + if (imp_ctx->md.ul_chan.props.id == id) + return &imp_ctx->md.ul_chan; + + if (imp_ctx->md.dl_chan.props.id == id) + return &imp_ctx->md.dl_chan; + + return NULL; +} + +static struct ipa_mhi_er_info_type_v01 * + _find_ch_in_er_info_arr(struct ipa_mhi_alloc_channel_req_msg_v01 *req, + u16 id) +{ + int i; + + if (req->er_info_arr_len > QMI_IPA_REMOTE_MHI_CHANNELS_NUM_MAX_V01) + return NULL; + + for (i = 0; i < req->tr_info_arr_len; i++) + if (req->er_info_arr[i].er_id == id) + return &req->er_info_arr[i]; + return NULL; +} + +/* round addresses for closest page per SMMU requirements */ +static inline void imp_smmu_round_to_page(uint64_t iova, uint64_t pa, + uint64_t size, unsigned long *iova_p, phys_addr_t *pa_p, u32 *size_p) +{ + *iova_p = rounddown(iova, PAGE_SIZE); + *pa_p = rounddown(pa, PAGE_SIZE); + *size_p = roundup(size + pa - *pa_p, PAGE_SIZE); +} + +static void __map_smmu_info(struct device *dev, + struct imp_iova_addr *partition, int num_mapping, + struct ipa_mhi_mem_addr_info_type_v01 *map_info, + bool map) +{ + int i; + struct iommu_domain *domain; + unsigned long iova_p; + phys_addr_t pa_p; + u32 size_p; + + domain = iommu_get_domain_for_dev(dev); + if (!domain) { + IMP_ERR("domain is NULL for dev\n"); + return; + } + + for (i = 0; i < num_mapping; i++) { + imp_smmu_round_to_page(map_info[i].iova, map_info[i].pa, + map_info[i].size, &iova_p, &pa_p, &size_p); + + if (map) { + /* boundary check */ + WARN_ON(partition->base > iova_p || + (partition->base + partition->size) < + (iova_p + size_p)); + + IMP_DBG("mapping 0x%lx to 0x%pa size %d\n", + iova_p, &pa_p, size_p); + iommu_map(domain, + iova_p, pa_p, size_p, + IOMMU_READ | IOMMU_WRITE | IOMMU_MMIO); + } else { + IMP_DBG("unmapping 0x%lx to 0x%pa size %d\n", + iova_p, &pa_p, size_p); + iommu_unmap(domain, iova_p, size_p); + } + } +} + +static int __imp_configure_mhi_device( + struct ipa_mhi_alloc_channel_req_msg_v01 *req, + struct ipa_mhi_alloc_channel_resp_msg_v01 *resp) +{ + struct mhi_buf ch_config[2]; + int i; + struct ipa_mhi_er_info_type_v01 *er_info; + struct imp_channel *ch; + int ridx = 0; + int ret; + + IMP_FUNC_ENTRY(); + + /* configure MHI */ + for (i = 0; i < req->tr_info_arr_len; i++) { + ch = imp_get_ch_by_id(req->tr_info_arr[i].ch_id); + if (!ch) { + IMP_ERR("unknown channel %d\n", + req->tr_info_arr[i].ch_id); + resp->alloc_resp_arr[ridx].ch_id = + req->tr_info_arr[i].ch_id; + resp->alloc_resp_arr[ridx].is_success = 0; + ridx++; + resp->alloc_resp_arr_len = ridx; + resp->resp.result = IPA_QMI_RESULT_FAILURE_V01; + resp->resp.error = IPA_QMI_ERR_INVALID_ID_V01; + return -EINVAL; + } + + /* populate CCA */ + if (req->tr_info_arr[i].brst_mode_type == + QMI_IPA_BURST_MODE_ENABLED_V01) + ch->props.ch_ctx.brsmode = 3; + else if (req->tr_info_arr[i].brst_mode_type == + QMI_IPA_BURST_MODE_DISABLED_V01) + ch->props.ch_ctx.brsmode = 2; + else + ch->props.ch_ctx.brsmode = 0; + + ch->props.ch_ctx.pollcfg = req->tr_info_arr[i].poll_cfg; + ch->props.ch_ctx.chtype = ch->props.dir; + ch->props.ch_ctx.erindex = ch->event.props.id; + ch->props.ch_ctx.rbase = req->tr_info_arr[i].ring_iova; + ch->props.ch_ctx.rlen = req->tr_info_arr[i].ring_len; + ch->props.ch_ctx.rpp = req->tr_info_arr[i].rp; + ch->props.ch_ctx.wpp = req->tr_info_arr[i].wp; + + ch_config[0].buf = &ch->props.ch_ctx; + ch_config[0].len = sizeof(ch->props.ch_ctx); + ch_config[0].name = "CCA"; + + /* populate ECA */ + er_info = _find_ch_in_er_info_arr(req, ch->event.props.id); + if (!er_info) { + IMP_ERR("no event ring for ch %d\n", + req->tr_info_arr[i].ch_id); + resp->alloc_resp_arr[ridx].ch_id = + req->tr_info_arr[i].ch_id; + resp->alloc_resp_arr[ridx].is_success = 0; + ridx++; + resp->alloc_resp_arr_len = ridx; + resp->resp.result = IPA_QMI_RESULT_FAILURE_V01; + resp->resp.error = IPA_QMI_ERR_INTERNAL_V01; + return -EINVAL; + } + + ch->event.props.ev_ctx.intmodc = er_info->intmod_count; + ch->event.props.ev_ctx.intmodt = er_info->intmod_cycles; + ch->event.props.ev_ctx.ertype = 1; + ch->event.props.ev_ctx.msivec = er_info->msi_addr; + ch->event.props.ev_ctx.rbase = er_info->ring_iova; + ch->event.props.ev_ctx.rlen = er_info->ring_len; + ch->event.props.ev_ctx.rpp = er_info->rp; + ch->event.props.ev_ctx.wpp = er_info->wp; + ch_config[1].buf = &ch->event.props.ev_ctx; + ch_config[1].len = sizeof(ch->event.props.ev_ctx); + ch_config[1].name = "ECA"; + + IMP_DBG("Configuring MHI device for ch %d\n", ch->props.id); + ret = mhi_device_configure(imp_ctx->md.mhi_dev, ch->props.dir, + ch_config, 2); + if (ret) { + IMP_ERR("mhi_device_configure failed for ch %d\n", + req->tr_info_arr[i].ch_id); + resp->alloc_resp_arr[ridx].ch_id = + req->tr_info_arr[i].ch_id; + resp->alloc_resp_arr[ridx].is_success = 0; + ridx++; + resp->alloc_resp_arr_len = ridx; + resp->resp.result = IPA_QMI_RESULT_FAILURE_V01; + resp->resp.error = IPA_QMI_ERR_INTERNAL_V01; + return -EINVAL; + } + } + + IMP_FUNC_EXIT(); + + return 0; +} + +/** + * imp_handle_allocate_channel_req() - Allocate a new MHI channel + * + * Allocates MHI channel and start them. + * + * Return: QMI return codes + */ +struct ipa_mhi_alloc_channel_resp_msg_v01 *imp_handle_allocate_channel_req( + struct ipa_mhi_alloc_channel_req_msg_v01 *req) +{ + int ret; + struct ipa_mhi_alloc_channel_resp_msg_v01 *resp = + &imp_ctx->qmi.alloc_ch_resp; + + IMP_FUNC_ENTRY(); + + memset(resp, 0, sizeof(*resp)); + + if (imp_ctx->state != IMP_READY) { + IMP_ERR("invalid state %d\n", imp_ctx->state); + resp->resp.result = IPA_QMI_RESULT_FAILURE_V01; + resp->resp.error = IPA_QMI_ERR_INCOMPATIBLE_STATE_V01; + return resp; + } + + /* cache the req */ + memcpy(&imp_ctx->qmi.alloc_ch_req, req, sizeof(*req)); + + if (req->tr_info_arr_len > QMI_IPA_REMOTE_MHI_CHANNELS_NUM_MAX_V01) { + IMP_ERR("invalid tr_info_arr_len %d\n", req->tr_info_arr_len); + resp->resp.result = IPA_QMI_RESULT_FAILURE_V01; + resp->resp.error = IPA_QMI_ERR_NO_MEMORY_V01; + return resp; + } + + if ((req->ctrl_addr_map_info_len == 0 || + req->data_addr_map_info_len == 0) && + imp_ctx->dev_info.smmu_enabled) { + IMP_ERR("no mapping provided, but smmu is enabled\n"); + resp->resp.result = IPA_QMI_RESULT_FAILURE_V01; + resp->resp.error = IPA_QMI_ERR_INTERNAL_V01; + return resp; + } + + mutex_lock(&imp_ctx->mutex); + + if (imp_ctx->dev_info.smmu_enabled) { + /* map CTRL */ + __map_smmu_info(imp_ctx->md.mhi_dev->dev.parent, + &imp_ctx->dev_info.ctrl, + req->ctrl_addr_map_info_len, + req->ctrl_addr_map_info, + true); + + /* map DATA */ + __map_smmu_info(imp_ctx->md.mhi_dev->dev.parent, + &imp_ctx->dev_info.data, + req->data_addr_map_info_len, + req->data_addr_map_info, + true); + } + + resp->alloc_resp_arr_valid = true; + ret = __imp_configure_mhi_device(req, resp); + if (ret) + goto fail_smmu; + + IMP_DBG("Starting MHI channels %d and %d\n", + imp_ctx->md.ul_chan.props.id, + imp_ctx->md.dl_chan.props.id); + ret = mhi_prepare_for_transfer(imp_ctx->md.mhi_dev); + if (ret) { + IMP_ERR("mhi_prepare_for_transfer failed %d\n", ret); + resp->alloc_resp_arr[resp->alloc_resp_arr_len] + .ch_id = imp_ctx->md.ul_chan.props.id; + resp->alloc_resp_arr[resp->alloc_resp_arr_len] + .is_success = 0; + resp->alloc_resp_arr_len++; + resp->alloc_resp_arr[resp->alloc_resp_arr_len] + .ch_id = imp_ctx->md.dl_chan.props.id; + resp->alloc_resp_arr[resp->alloc_resp_arr_len] + .is_success = 0; + resp->alloc_resp_arr_len++; + resp->resp.result = IPA_QMI_RESULT_FAILURE_V01; + resp->resp.error = IPA_QMI_ERR_INTERNAL_V01; + goto fail_smmu; + } + + resp->alloc_resp_arr[resp->alloc_resp_arr_len] + .ch_id = imp_ctx->md.ul_chan.props.id; + resp->alloc_resp_arr[resp->alloc_resp_arr_len] + .is_success = 1; + resp->alloc_resp_arr_len++; + + resp->alloc_resp_arr[resp->alloc_resp_arr_len] + .ch_id = imp_ctx->md.dl_chan.props.id; + resp->alloc_resp_arr[resp->alloc_resp_arr_len] + .is_success = 1; + resp->alloc_resp_arr_len++; + + imp_ctx->state = IMP_STARTED; + mutex_unlock(&imp_ctx->mutex); + IMP_FUNC_EXIT(); + + resp->resp.result = IPA_QMI_RESULT_SUCCESS_V01; + return resp; + +fail_smmu: + if (imp_ctx->dev_info.smmu_enabled) { + /* unmap CTRL */ + __map_smmu_info(imp_ctx->md.mhi_dev->dev.parent, + &imp_ctx->dev_info.ctrl, + req->ctrl_addr_map_info_len, + req->ctrl_addr_map_info, + false); + + /* unmap DATA */ + __map_smmu_info(imp_ctx->md.mhi_dev->dev.parent, + &imp_ctx->dev_info.data, + req->data_addr_map_info_len, + req->data_addr_map_info, + false); + } + mutex_unlock(&imp_ctx->mutex); + return resp; +} + +/** + * imp_handle_vote_req() - Votes for MHI / PCIe clocks + * + * Hold a vote to prevent / allow low power mode on MHI. + * + * Return: 0 on success, negative otherwise + */ +int imp_handle_vote_req(bool vote) +{ + int ret; + + IMP_DBG_LOW("vote %d\n", vote); + + mutex_lock(&imp_ctx->mutex); + if (imp_ctx->state != IMP_READY) { + IMP_ERR("unexpected vote when in state %d\n", imp_ctx->state); + mutex_unlock(&imp_ctx->mutex); + return -EPERM; + } + + if (vote == imp_ctx->lpm_disabled) { + IMP_ERR("already voted/devoted %d\n", vote); + mutex_unlock(&imp_ctx->mutex); + return -EPERM; + } + + if (vote) { + ret = mhi_device_get_sync(imp_ctx->md.mhi_dev); + if (ret) { + IMP_ERR("mhi_sync_get failed %d\n", ret); + mutex_unlock(&imp_ctx->mutex); + return ret; + } + imp_ctx->lpm_disabled = true; + } else { + mhi_device_put(imp_ctx->md.mhi_dev); + imp_ctx->lpm_disabled = false; + } + + return 0; +} + +static int imp_read_iova_from_dtsi(const char *node, struct imp_iova_addr *out) +{ + u32 iova_mapping[2]; + struct device_node *of_node = imp_ctx->dev_info.pdev->dev.of_node; + + if (of_property_read_u32_array(of_node, node, iova_mapping, 2)) { + IMP_DBG("failed to read of_node %s\n", node); + return -EINVAL; + } + + out->base = iova_mapping[0]; + out->size = iova_mapping[1]; + IMP_DBG("%s: base: 0x%pad size: 0x%x\n", node, &out->base, out->size); + + return 0; +} + +static void imp_mhi_shutdown(void) +{ + struct ipa_mhi_cleanup_req_msg_v01 req = { 0 }; + + IMP_FUNC_ENTRY(); + + if (imp_ctx->state == IMP_STARTED) { + req.cleanup_valid = true; + req.cleanup = true; + ipa3_qmi_send_mhi_cleanup_request(&req); + if (imp_ctx->dev_info.smmu_enabled) { + struct ipa_mhi_alloc_channel_req_msg_v01 *creq + = &imp_ctx->qmi.alloc_ch_req; + + /* unmap CTRL */ + __map_smmu_info(imp_ctx->md.mhi_dev->dev.parent, + &imp_ctx->dev_info.ctrl, + creq->ctrl_addr_map_info_len, + creq->ctrl_addr_map_info, + false); + + /* unmap DATA */ + __map_smmu_info(imp_ctx->md.mhi_dev->dev.parent, + &imp_ctx->dev_info.data, + creq->data_addr_map_info_len, + creq->data_addr_map_info, + false); + } + if (imp_ctx->lpm_disabled) { + mhi_device_put(imp_ctx->md.mhi_dev); + imp_ctx->lpm_disabled = false; + } + + } + imp_ctx->state = IMP_PROBED; + + IMP_FUNC_EXIT(); +} + +static int imp_mhi_probe_cb(struct mhi_device *mhi_dev, + const struct mhi_device_id *id) +{ + struct imp_channel *ch; + struct imp_event *ev; + int ret; + + IMP_FUNC_ENTRY(); + + if (id != &mhi_driver_match_table[0]) { + IMP_ERR("only chan=%s is supported for now\n", + mhi_driver_match_table[0].chan); + return -EPERM; + } + + IPA_ACTIVE_CLIENTS_INC_SIMPLE(); + + imp_ctx->md.mhi_dev = mhi_dev; + + mutex_lock(&imp_ctx->mutex); + /* store UL channel properties */ + ch = &imp_ctx->md.ul_chan; + ev = &imp_ctx->md.ul_chan.event; + + ch->props.id = mhi_dev->ul_chan_id; + ch->props.dir = DMA_TO_DEVICE; + ch->props.doorbell = imp_ctx->dev_info.chdb_base + ch->props.id * 8; + ch->props.uc_mbox_n = IMP_IPA_UC_UL_CH_n; + IMP_DBG("ul ch id %d doorbell 0x%pa uc_mbox_n %d\n", + ch->props.id, &ch->props.doorbell, ch->props.uc_mbox_n); + + ret = ipa3_uc_send_remote_ipa_info(ch->props.doorbell, + ch->props.uc_mbox_n); + if (ret) + goto fail; + IMP_DBG("mapped ch db 0x%pad to mbox %d\n", &ch->props.doorbell, + ch->props.uc_mbox_n); + + ev->props.id = mhi_dev->ul_event_id; + ev->props.doorbell = imp_ctx->dev_info.erdb_base + ev->props.id * 8; + ev->props.uc_mbox_n = IMP_IPA_UC_UL_EV_n; + IMP_DBG("allocated ev %d\n", ev->props.id); + + ret = ipa3_uc_send_remote_ipa_info(ev->props.doorbell, + ev->props.uc_mbox_n); + if (ret) + goto fail; + IMP_DBG("mapped ch db 0x%pad to mbox %d\n", &ev->props.doorbell, + ev->props.uc_mbox_n); + + /* store DL channel properties */ + ch = &imp_ctx->md.dl_chan; + ev = &imp_ctx->md.dl_chan.event; + + ch->props.dir = DMA_FROM_DEVICE; + ch->props.id = mhi_dev->dl_chan_id; + ch->props.doorbell = imp_ctx->dev_info.chdb_base + ch->props.id * 8; + ch->props.uc_mbox_n = IMP_IPA_UC_DL_CH_n; + IMP_DBG("dl ch id %d doorbell 0x%pa uc_mbox_n %d\n", + ch->props.id, &ch->props.doorbell, ch->props.uc_mbox_n); + + ret = ipa3_uc_send_remote_ipa_info(ch->props.doorbell, + ch->props.uc_mbox_n); + if (ret) + goto fail; + IMP_DBG("mapped ch db 0x%pad to mbox %d\n", &ch->props.doorbell, + ch->props.uc_mbox_n); + + ev->props.id = mhi_dev->dl_event_id; + ev->props.doorbell = imp_ctx->dev_info.erdb_base + ev->props.id * 8; + ev->props.uc_mbox_n = IMP_IPA_UC_DL_EV_n; + IMP_DBG("allocated ev %d\n", ev->props.id); + + ret = ipa3_uc_send_remote_ipa_info(ev->props.doorbell, + ev->props.uc_mbox_n); + if (ret) + goto fail; + IMP_DBG("mapped ch db 0x%pad to mbox %d\n", &ev->props.doorbell, + ev->props.uc_mbox_n); + + imp_mhi_trigger_ready_ind(); + + mutex_unlock(&imp_ctx->mutex); + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); + mhi_device_get_sync(imp_ctx->md.mhi_dev); + + + IMP_FUNC_EXIT(); + return 0; + +fail: + mutex_unlock(&imp_ctx->mutex); + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); + return ret; +} + +static void imp_mhi_remove_cb(struct mhi_device *mhi_dev) +{ + IMP_FUNC_ENTRY(); + + mutex_lock(&imp_ctx->mutex); + imp_mhi_shutdown(); + mutex_unlock(&imp_ctx->mutex); + IMP_FUNC_EXIT(); +} + +static void imp_mhi_status_cb(struct mhi_device *mhi_dev, enum MHI_CB mhi_cb) +{ + IMP_DBG("%d\n", mhi_cb); + + mutex_lock(&imp_ctx->mutex); + if (mhi_dev != imp_ctx->md.mhi_dev) { + IMP_DBG("ignoring secondary callbacks\n"); + mutex_unlock(&imp_ctx->mutex); + return; + } + + switch (mhi_cb) { + case MHI_CB_IDLE: + break; + case MHI_CB_LPM_ENTER: + if (imp_ctx->state == IMP_STARTED) { + if (!imp_ctx->in_lpm) { + IPA_ACTIVE_CLIENTS_DEC_SPECIAL("IMP"); + imp_ctx->in_lpm = true; + } else { + IMP_ERR("already in LPM\n"); + } + } + break; + case MHI_CB_LPM_EXIT: + if (imp_ctx->state == IMP_STARTED) { + if (imp_ctx->in_lpm) { + IPA_ACTIVE_CLIENTS_INC_SPECIAL("IMP"); + imp_ctx->in_lpm = false; + } else { + IMP_ERR("not in LPM\n"); + } + } + break; + + case MHI_CB_EE_RDDM: + case MHI_CB_PENDING_DATA: + IMP_ERR("unexpected event %d\n", mhi_cb); + break; + } + mutex_unlock(&imp_ctx->mutex); +} + +static int imp_probe(struct platform_device *pdev) +{ + int ret; + + IMP_FUNC_ENTRY(); + + if (ipa3_uc_state_check()) { + IMP_DBG("uC not ready yet\n"); + return -EPROBE_DEFER; + } + + imp_ctx->dev_info.pdev = pdev; + imp_ctx->dev_info.smmu_enabled = true; + ret = imp_read_iova_from_dtsi("qcom,ctrl-iova", + &imp_ctx->dev_info.ctrl); + if (ret) + imp_ctx->dev_info.smmu_enabled = false; + + ret = imp_read_iova_from_dtsi("qcom,data-iova", + &imp_ctx->dev_info.data); + if (ret) + imp_ctx->dev_info.smmu_enabled = false; + + IMP_DBG("smmu_enabled=%d\n", imp_ctx->dev_info.smmu_enabled); + + if (of_property_read_u32(pdev->dev.of_node, "qcom,mhi-chdb-base", + &imp_ctx->dev_info.chdb_base)) { + IMP_ERR("failed to read of_node %s\n", "qcom,mhi-chdb-base"); + return -EINVAL; + } + IMP_DBG("chdb-base=0x%x\n", imp_ctx->dev_info.chdb_base); + + if (of_property_read_u32(pdev->dev.of_node, "qcom,mhi-erdb-base", + &imp_ctx->dev_info.erdb_base)) { + IMP_ERR("failed to read of_node %s\n", "qcom,mhi-erdb-base"); + return -EINVAL; + } + IMP_DBG("erdb-base=0x%x\n", imp_ctx->dev_info.erdb_base); + + imp_ctx->state = IMP_PROBED; + ret = mhi_driver_register(&mhi_driver); + if (ret) { + IMP_ERR("mhi_driver_register failed %d\n", ret); + mutex_unlock(&imp_ctx->mutex); + return ret; + } + + IMP_FUNC_EXIT(); + return 0; +} + +static int imp_remove(struct platform_device *pdev) +{ + IMP_FUNC_ENTRY(); + mutex_lock(&imp_ctx->mutex); + mhi_driver_unregister(&mhi_driver); + + if (!imp_ctx->in_lpm) + IPA_ACTIVE_CLIENTS_DEC_SPECIAL("IMP"); + imp_ctx->in_lpm = false; + imp_ctx->lpm_disabled = false; + + imp_ctx->state = IMP_INVALID; + mutex_unlock(&imp_ctx->mutex); + + return 0; +} + +static const struct of_device_id imp_dt_match[] = { + { .compatible = "qcom,ipa-mhi-proxy" }, + {}, +}; +MODULE_DEVICE_TABLE(of, imp_dt_match); + +static struct platform_driver ipa_mhi_proxy_driver = { + .driver = { + .name = "ipa_mhi_proxy", + .owner = THIS_MODULE, + .of_match_table = imp_dt_match, + }, + .probe = imp_probe, + .remove = imp_remove, +}; + +/** + * imp_handle_modem_ready() - Registers IMP as a platform device + * + * This function is called after modem is loaded and QMI handshake is done. + * IMP will register itself as a platform device, and on support device the + * probe function will get called. + * + * Return: None + */ +void imp_handle_modem_ready(void) +{ + + if (!imp_ctx) { + imp_ctx = kzalloc(sizeof(*imp_ctx), GFP_KERNEL); + if (!imp_ctx) + return; + + mutex_init(&imp_ctx->mutex); + } + + if (imp_ctx->state != IMP_INVALID) { + IMP_ERR("unexpected state %d\n", imp_ctx->state); + return; + } + + IMP_DBG("register platform device\n"); + platform_driver_register(&ipa_mhi_proxy_driver); +} + +/** + * imp_handle_modem_shutdown() - Handles modem SSR + * + * Performs MHI cleanup when modem is going to SSR (Subsystem Restart). + * + * Return: None + */ +void imp_handle_modem_shutdown(void) +{ + IMP_FUNC_ENTRY(); + + mutex_lock(&imp_ctx->mutex); + + if (imp_ctx->state == IMP_INVALID) { + mutex_unlock(&imp_ctx->mutex); + return; + } + if (imp_ctx->state == IMP_STARTED) { + mhi_unprepare_from_transfer(imp_ctx->md.mhi_dev); + imp_ctx->state = IMP_READY; + } + + if (imp_ctx->state == IMP_READY) { + if (imp_ctx->dev_info.smmu_enabled) { + struct ipa_mhi_alloc_channel_req_msg_v01 *creq + = &imp_ctx->qmi.alloc_ch_req; + + /* unmap CTRL */ + __map_smmu_info(imp_ctx->md.mhi_dev->dev.parent, + &imp_ctx->dev_info.ctrl, + creq->ctrl_addr_map_info_len, + creq->ctrl_addr_map_info, + false); + + /* unmap DATA */ + __map_smmu_info(imp_ctx->md.mhi_dev->dev.parent, + &imp_ctx->dev_info.data, + creq->data_addr_map_info_len, + creq->data_addr_map_info, + false); + } + } + + imp_ctx->state = IMP_PROBED; + mutex_unlock(&imp_ctx->mutex); + + IMP_FUNC_EXIT(); + + platform_driver_unregister(&ipa_mhi_proxy_driver); +} + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("IPA MHI Proxy Driver"); diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_mhi_proxy.h b/drivers/platform/msm/ipa/ipa_v3/ipa_mhi_proxy.h new file mode 100644 index 0000000000000000000000000000000000000000..3a1d97d188c8964727f6d7dd4a17322f35051913 --- /dev/null +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_mhi_proxy.h @@ -0,0 +1,48 @@ +/* Copyright (c) 2018 The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef __IMP_H_ +#define __IMP_H_ + +#ifdef CONFIG_IPA3_MHI_PROXY + +#include "ipa_qmi_service.h" + +void imp_handle_modem_ready(void); + +struct ipa_mhi_alloc_channel_resp_msg_v01 *imp_handle_allocate_channel_req( + struct ipa_mhi_alloc_channel_req_msg_v01 *req); + +int imp_handle_vote_req(bool vote); + +#else /* CONFIG_IPA3_MHI_PROXY */ + +static inline void imp_handle_modem_ready(void) +{ + +} + +static inline struct ipa_mhi_alloc_channel_resp_msg_v01 + *imp_handle_allocate_channel_req( + struct ipa_mhi_alloc_channel_req_msg_v01 *req) +{ + return NULL; +} + +static inline int imp_handle_vote_req(bool vote) +{ + return -EPERM; +} + +#endif /* CONFIG_IPA3_MHI_PROXY */ + +#endif /* __IMP_H_ */ diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c b/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c index 1249b1408b196d2dd94453b42443b7ceafc25051..dfb94a683c286ebb7ddccaa8fd09262166f9ad88 100644 --- a/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c @@ -23,6 +23,7 @@ #include #include "ipa_qmi_service.h" +#include "ipa_mhi_proxy.h" #define IPA_Q6_SVC_VERS 1 #define IPA_A5_SVC_VERS 1 @@ -35,6 +36,7 @@ #define QMI_SEND_STATS_REQ_TIMEOUT_MS 5000 #define QMI_SEND_REQ_TIMEOUT_MS 60000 +#define QMI_MHI_SEND_REQ_TIMEOUT_MS 1000 #define QMI_IPA_FORCE_CLEAR_DATAPATH_TIMEOUT_MS 1000 @@ -69,6 +71,9 @@ static void ipa3_handle_indication_req(struct qmi_handle *qmi_handle, indication_req = (struct ipa_indication_reg_req_msg_v01 *)decoded_msg; IPAWANDBG("Received INDICATION Request\n"); + /* cache the client sq */ + memcpy(&ipa3_qmi_ctx->client_sq, sq, sizeof(*sq)); + memset(&resp, 0, sizeof(struct ipa_indication_reg_resp_msg_v01)); resp.resp.result = IPA_QMI_RESULT_SUCCESS_V01; @@ -95,7 +100,7 @@ static void ipa3_handle_indication_req(struct qmi_handle *qmi_handle, IPA_QMI_RESULT_SUCCESS_V01; rc = qmi_send_indication(qmi_handle, - &(ipa3_qmi_ctx->ipa_q6_client_params.sq), + &(ipa3_qmi_ctx->client_sq), QMI_IPA_MASTER_DRIVER_INIT_COMPLETE_IND_V01, QMI_IPA_MASTER_DRIVER_INIT_COMPLETE_IND_MAX_MSG_LEN_V01, ipa3_master_driver_init_complt_ind_msg_data_v01_ei, @@ -261,6 +266,64 @@ static void ipa3_handle_modem_init_cmplt_req(struct qmi_handle *qmi_handle, IPAWANDBG("Sent QMI_IPA_INIT_MODEM_DRIVER_CMPLT_RESP_V01\n"); } +static void ipa3_handle_mhi_alloc_channel_req(struct qmi_handle *qmi_handle, + struct sockaddr_qrtr *sq, + struct qmi_txn *txn, + const void *decoded_msg) +{ + struct ipa_mhi_alloc_channel_req_msg_v01 *ch_alloc_req; + struct ipa_mhi_alloc_channel_resp_msg_v01 *resp; + int rc; + + IPAWANDBG("Received QMI_IPA_MHI_ALLOC_CHANNEL_REQ_V01\n"); + ch_alloc_req = (struct ipa_mhi_alloc_channel_req_msg_v01 *)decoded_msg; + + resp = imp_handle_allocate_channel_req(ch_alloc_req); + + rc = qmi_send_response(qmi_handle, sq, txn, + QMI_IPA_MHI_ALLOC_CHANNEL_RESP_V01, + IPA_MHI_ALLOC_CHANNEL_RESP_MSG_V01_MAX_MSG_LEN, + ipa_mhi_alloc_channel_resp_msg_v01_ei, + resp); + + if (rc < 0) + IPAWANERR("QMI_IPA_MHI_ALLOC_CHANNEL_RESP_V01 failed\n"); + else + IPAWANDBG("Sent QMI_IPA_MHI_ALLOC_CHANNEL_RESP_V01\n"); +} + +static void ipa3_handle_mhi_vote_req(struct qmi_handle *qmi_handle, + struct sockaddr_qrtr *sq, + struct qmi_txn *txn, + const void *decoded_msg) +{ + struct ipa_mhi_clk_vote_req_msg_v01 *vote_req; + struct ipa_mhi_clk_vote_resp_msg_v01 resp; + int rc; + + IPAWANDBG_LOW("Received QMI_IPA_MHI_CLK_VOTE_REQ_V01\n"); + vote_req = (struct ipa_mhi_clk_vote_req_msg_v01 *)decoded_msg; + + rc = imp_handle_vote_req(vote_req->mhi_vote); + if (rc) { + resp.resp.result = IPA_QMI_RESULT_FAILURE_V01; + resp.resp.error = IPA_QMI_ERR_INTERNAL_V01; + } else { + resp.resp.result = IPA_QMI_RESULT_SUCCESS_V01; + } + + rc = qmi_send_response(qmi_handle, sq, txn, + QMI_IPA_MHI_CLK_VOTE_RESP_V01, + IPA_MHI_CLK_VOTE_RESP_MSG_V01_MAX_MSG_LEN, + ipa_mhi_clk_vote_resp_msg_v01_ei, + &resp); + + if (rc < 0) + IPAWANERR("QMI_IPA_MHI_CLK_VOTE_RESP_V01 failed\n"); + else + IPAWANDBG("Sent QMI_IPA_MHI_CLK_VOTE_RESP_V01\n"); +} + static void ipa3_a5_svc_disconnect_cb(struct qmi_handle *qmi, unsigned int node, unsigned int port) { @@ -325,7 +388,7 @@ static int ipa3_qmi_send_req_wait(struct qmi_handle *client_handle, } ret = qmi_send_request(client_handle, - &ipa3_qmi_ctx->ipa_q6_client_params.sq, + &ipa3_qmi_ctx->server_sq, &txn, req_desc->msg_id, req_desc->max_msg_len, @@ -988,8 +1051,8 @@ static void ipa3_q6_clnt_svc_arrive(struct work_struct *work) struct ipa_master_driver_init_complt_ind_msg_v01 ind; rc = kernel_connect(ipa_q6_clnt->sock, - (struct sockaddr *) &ipa3_qmi_ctx->ipa_q6_client_params.sq, - sizeof(ipa3_qmi_ctx->ipa_q6_client_params.sq), + (struct sockaddr *) &ipa3_qmi_ctx->server_sq, + sizeof(ipa3_qmi_ctx->server_sq), 0); if (rc < 0) { @@ -1044,7 +1107,7 @@ static void ipa3_q6_clnt_svc_arrive(struct work_struct *work) IPA_QMI_RESULT_SUCCESS_V01; rc = qmi_send_indication(ipa3_svc_handle, - &ipa3_qmi_ctx->ipa_q6_client_params.sq, + &ipa3_qmi_ctx->client_sq, QMI_IPA_MASTER_DRIVER_INIT_COMPLETE_IND_V01, QMI_IPA_MASTER_DRIVER_INIT_COMPLETE_IND_MAX_MSG_LEN_V01, ipa3_master_driver_init_complt_ind_msg_data_v01_ei, @@ -1059,9 +1122,9 @@ static void ipa3_q6_clnt_svc_arrive(struct work_struct *work) static void ipa3_q6_clnt_svc_exit(struct work_struct *work) { - ipa3_qmi_ctx->ipa_q6_client_params.sq.sq_family = 0; - ipa3_qmi_ctx->ipa_q6_client_params.sq.sq_node = 0; - ipa3_qmi_ctx->ipa_q6_client_params.sq.sq_port = 0; + ipa3_qmi_ctx->server_sq.sq_family = 0; + ipa3_qmi_ctx->server_sq.sq_node = 0; + ipa3_qmi_ctx->server_sq.sq_port = 0; } static int ipa3_q6_clnt_svc_event_notify_svc_new(struct qmi_handle *qmi, @@ -1071,9 +1134,9 @@ static int ipa3_q6_clnt_svc_event_notify_svc_new(struct qmi_handle *qmi, service->service, service->version, service->instance, service->node, service->port); - ipa3_qmi_ctx->ipa_q6_client_params.sq.sq_family = AF_QIPCRTR; - ipa3_qmi_ctx->ipa_q6_client_params.sq.sq_node = service->node; - ipa3_qmi_ctx->ipa_q6_client_params.sq.sq_port = service->port; + ipa3_qmi_ctx->server_sq.sq_family = AF_QIPCRTR; + ipa3_qmi_ctx->server_sq.sq_node = service->node; + ipa3_qmi_ctx->server_sq.sq_port = service->port; if (!workqueues_stopped) { queue_delayed_work(ipa_clnt_req_workqueue, @@ -1158,6 +1221,22 @@ static struct qmi_msg_handler server_handlers[] = { QMI_IPA_INIT_MODEM_DRIVER_CMPLT_REQ_MAX_MSG_LEN_V01, .fn = ipa3_handle_modem_init_cmplt_req, }, + { + .type = QMI_REQUEST, + .msg_id = QMI_IPA_MHI_ALLOC_CHANNEL_REQ_V01, + .ei = ipa_mhi_alloc_channel_req_msg_v01_ei, + .decoded_size = + IPA_MHI_ALLOC_CHANNEL_REQ_MSG_V01_MAX_MSG_LEN, + .fn = ipa3_handle_mhi_alloc_channel_req, + }, + { + .type = QMI_REQUEST, + .msg_id = QMI_IPA_MHI_CLK_VOTE_REQ_V01, + .ei = ipa_mhi_clk_vote_req_msg_v01_ei, + .decoded_size = + IPA_MHI_CLK_VOTE_REQ_MSG_V01_MAX_MSG_LEN, + .fn = ipa3_handle_mhi_vote_req, + }, }; @@ -1605,6 +1684,55 @@ int ipa3_qmi_get_per_client_packet_stats( "struct ipa_get_stats_per_client_req_msg_v01"); } +int ipa3_qmi_send_mhi_ready_indication( + struct ipa_mhi_ready_indication_msg_v01 *req) +{ + IPAWANDBG("Sending QMI_IPA_MHI_READY_IND_V01\n"); + + if (unlikely(!ipa3_svc_handle)) + return -ETIMEDOUT; + + return qmi_send_indication(ipa3_svc_handle, + &ipa3_qmi_ctx->client_sq, + QMI_IPA_MHI_READY_IND_V01, + IPA_MHI_READY_INDICATION_MSG_V01_MAX_MSG_LEN, + ipa_mhi_ready_indication_msg_v01_ei, + req); +} + +int ipa3_qmi_send_mhi_cleanup_request(struct ipa_mhi_cleanup_req_msg_v01 *req) +{ + + struct ipa_msg_desc req_desc, resp_desc; + struct ipa_mhi_cleanup_resp_msg_v01 resp; + int rc; + + memset(&resp, 0, sizeof(resp)); + + IPAWANDBG("Sending QMI_IPA_MHI_CLEANUP_REQ_V01\n"); + if (unlikely(!ipa_q6_clnt)) + return -ETIMEDOUT; + + req_desc.max_msg_len = IPA_MHI_CLK_VOTE_REQ_MSG_V01_MAX_MSG_LEN; + req_desc.msg_id = QMI_IPA_MHI_CLEANUP_REQ_V01; + req_desc.ei_array = ipa_mhi_cleanup_req_msg_v01_ei; + + resp_desc.max_msg_len = IPA_MHI_CLK_VOTE_RESP_MSG_V01_MAX_MSG_LEN; + resp_desc.msg_id = QMI_IPA_MHI_CLEANUP_RESP_V01; + resp_desc.ei_array = ipa_mhi_cleanup_resp_msg_v01_ei; + + rc = ipa3_qmi_send_req_wait(ipa_q6_clnt, + &req_desc, req, + &resp_desc, &resp, + QMI_MHI_SEND_REQ_TIMEOUT_MS); + + IPAWANDBG("QMI_IPA_MHI_CLEANUP_RESP_V01 received\n"); + + return ipa3_check_qmi_response(rc, + QMI_IPA_MHI_CLEANUP_REQ_V01, resp.resp.result, + resp.resp.error, "ipa_mhi_cleanup_req_msg"); +} + void ipa3_qmi_init(void) { mutex_init(&ipa3_qmi_lock); diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.h b/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.h index 9d47878fda989a6626896e72d5daa6c89f66c29f..ce2e34a683ba3aef13d0346d06382d322f2912ed 100644 --- a/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.h +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.h @@ -69,10 +69,6 @@ DEV_NAME " %s:%d " fmt, ## args); \ } while (0) -struct ipa_q6_all_client_params { - struct sockaddr_qrtr sq; -}; - extern struct ipa3_qmi_context *ipa3_qmi_ctx; struct ipa3_qmi_context { @@ -92,7 +88,8 @@ struct ipa3_qmi_context { ipa_configure_ul_firewall_rules_req_msg_cache [MAX_NUM_QMI_RULE_CACHE]; bool modem_cfg_emb_pipe_flt; - struct ipa_q6_all_client_params ipa_q6_client_params; + struct sockaddr_qrtr client_sq; + struct sockaddr_qrtr server_sq; }; struct ipa3_rmnet_mux_val { @@ -175,6 +172,18 @@ extern struct qmi_elem_info extern struct qmi_elem_info ipa3_configure_ul_firewall_rules_ind_msg_data_v01_ei[]; +extern struct qmi_elem_info ipa_mhi_ready_indication_msg_v01_ei[]; +extern struct qmi_elem_info ipa_mhi_mem_addr_info_type_v01_ei[]; +extern struct qmi_elem_info ipa_mhi_tr_info_type_v01_ei[]; +extern struct qmi_elem_info ipa_mhi_er_info_type_v01_ei[]; +extern struct qmi_elem_info ipa_mhi_alloc_channel_req_msg_v01_ei[]; +extern struct qmi_elem_info ipa_mhi_ch_alloc_resp_type_v01_ei[]; +extern struct qmi_elem_info ipa_mhi_alloc_channel_resp_msg_v01_ei[]; +extern struct qmi_elem_info ipa_mhi_clk_vote_req_msg_v01_ei[]; +extern struct qmi_elem_info ipa_mhi_clk_vote_resp_msg_v01_ei[]; +extern struct qmi_elem_info ipa_mhi_cleanup_req_msg_v01_ei[]; +extern struct qmi_elem_info ipa_mhi_cleanup_resp_msg_v01_ei[]; + /** * struct ipa3_rmnet_context - IPA rmnet context * @ipa_rmnet_ssr: support modem SSR @@ -285,6 +294,11 @@ int ipa3_qmi_get_per_client_packet_stats( struct ipa_get_stats_per_client_req_msg_v01 *req, struct ipa_get_stats_per_client_resp_msg_v01 *resp); +int ipa3_qmi_send_mhi_ready_indication( + struct ipa_mhi_ready_indication_msg_v01 *req); + +int ipa3_qmi_send_mhi_cleanup_request(struct ipa_mhi_cleanup_req_msg_v01 *req); + void ipa3_qmi_init(void); void ipa3_qmi_cleanup(void); @@ -407,6 +421,18 @@ static inline int ipa3_qmi_stop_data_qouta(void) static inline void ipa3_q6_handshake_complete(bool ssr_bootup) { } +static int ipa3_qmi_send_mhi_ready_indication( + struct ipa_mhi_ready_indication_msg_v01 *req) +{ + return -EPERM; +} + +static int ipa3_qmi_send_mhi_cleanup_request( + struct ipa_mhi_cleanup_req_msg_v01 *req) +{ + return -EPERM; +} + static inline int ipa3_wwan_set_modem_perf_profile(int throughput); static inline int ipa3_qmi_enable_per_client_stats( struct ipa_enable_per_client_stats_req_msg_v01 *req, diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service_v01.c b/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service_v01.c index 05c4682e4c3b984c947f1f47e3fb6dfcb4827233..5a544becccb5a94e160f3fa7ec7e8a9e131100b5 100644 --- a/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service_v01.c +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service_v01.c @@ -1337,6 +1337,26 @@ struct qmi_elem_info ipa3_indication_reg_req_msg_data_v01_ei[] = { struct ipa_indication_reg_req_msg_v01, data_usage_quota_reached), }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .is_array = NO_ARRAY, + .tlv_type = 0x12, + .offset = offsetof( + struct ipa_indication_reg_req_msg_v01, + ipa_mhi_ready_ind_valid), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(u8), + .is_array = NO_ARRAY, + .tlv_type = 0x12, + .offset = offsetof( + struct ipa_indication_reg_req_msg_v01, + ipa_mhi_ready_ind), + }, { .data_type = QMI_EOTI, .is_array = NO_ARRAY, @@ -3465,3 +3485,568 @@ struct qmi_elem_info ipa3_configure_ul_firewall_rules_ind_msg_data_v01_ei[] = { .tlv_type = QMI_COMMON_TLV_TYPE, }, }; + +static struct qmi_elem_info ipa_mhi_ch_init_info_type_v01_ei[] = { + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(u8), + .is_array = NO_ARRAY, + .tlv_type = 0, + .offset = offsetof(struct ipa_mhi_ch_init_info_type_v01, + ch_id), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(u8), + .is_array = NO_ARRAY, + .tlv_type = 0, + .offset = offsetof(struct ipa_mhi_ch_init_info_type_v01, + er_id), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(u32), + .is_array = NO_ARRAY, + .tlv_type = 0, + .offset = offsetof(struct ipa_mhi_ch_init_info_type_v01, + ch_doorbell_addr), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(u32), + .is_array = NO_ARRAY, + .tlv_type = 0, + .offset = offsetof(struct ipa_mhi_ch_init_info_type_v01, + er_doorbell_addr), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(u32), + .is_array = NO_ARRAY, + .tlv_type = 0, + .offset = offsetof(struct ipa_mhi_ch_init_info_type_v01, + direction_type), + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .is_array = QMI_COMMON_TLV_TYPE, + }, +}; + +static struct qmi_elem_info ipa_mhi_smmu_info_type_v01_ei[] = { + { + .data_type = QMI_UNSIGNED_8_BYTE, + .elem_len = 1, + .elem_size = sizeof(u64), + .is_array = NO_ARRAY, + .tlv_type = 0, + .offset = offsetof(struct ipa_mhi_smmu_info_type_v01, + iova_ctl_base_addr), + }, + { + .data_type = QMI_UNSIGNED_8_BYTE, + .elem_len = 1, + .elem_size = sizeof(u64), + .is_array = NO_ARRAY, + .tlv_type = 0, + .offset = offsetof(struct ipa_mhi_smmu_info_type_v01, + iova_ctl_size), + }, + { + .data_type = QMI_UNSIGNED_8_BYTE, + .elem_len = 1, + .elem_size = sizeof(u64), + .is_array = NO_ARRAY, + .tlv_type = 0, + .offset = offsetof(struct ipa_mhi_smmu_info_type_v01, + iova_data_base_addr), + }, + { + .data_type = QMI_UNSIGNED_8_BYTE, + .elem_len = 1, + .elem_size = sizeof(u64), + .is_array = NO_ARRAY, + .tlv_type = 0, + .offset = offsetof(struct ipa_mhi_smmu_info_type_v01, + iova_data_size), + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .is_array = QMI_COMMON_TLV_TYPE, + }, +}; + + +struct qmi_elem_info ipa_mhi_ready_indication_msg_v01_ei[] = { + { + .data_type = QMI_DATA_LEN, + .elem_len = 1, + .elem_size = sizeof(u8), + .is_array = NO_ARRAY, + .tlv_type = 0x01, + .offset = offsetof(struct ipa_mhi_ready_indication_msg_v01, + ch_info_arr_len), + }, + { + .data_type = QMI_STRUCT, + .elem_len = QMI_IPA_REMOTE_MHI_CHANNELS_NUM_MAX_V01, + .elem_size = sizeof(struct ipa_mhi_ch_init_info_type_v01), + .is_array = VAR_LEN_ARRAY, + .tlv_type = 0x01, + .offset = offsetof(struct ipa_mhi_ready_indication_msg_v01, + ch_info_arr), + .ei_array = ipa_mhi_ch_init_info_type_v01_ei, + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .is_array = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct ipa_mhi_ready_indication_msg_v01, + smmu_info_valid), + }, + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct ipa_mhi_smmu_info_type_v01), + .is_array = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct ipa_mhi_ready_indication_msg_v01, + smmu_info), + .ei_array = ipa_mhi_smmu_info_type_v01_ei, + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .is_array = QMI_COMMON_TLV_TYPE, + }, +}; + +struct qmi_elem_info ipa_mhi_mem_addr_info_type_v01_ei[] = { + { + .data_type = QMI_UNSIGNED_8_BYTE, + .elem_len = 1, + .elem_size = sizeof(u64), + .is_array = NO_ARRAY, + .tlv_type = 0, + .offset = offsetof(struct ipa_mhi_mem_addr_info_type_v01, + pa), + }, + { + .data_type = QMI_UNSIGNED_8_BYTE, + .elem_len = 1, + .elem_size = sizeof(u64), + .is_array = NO_ARRAY, + .tlv_type = 0, + .offset = offsetof(struct ipa_mhi_mem_addr_info_type_v01, + iova), + }, + { + .data_type = QMI_UNSIGNED_8_BYTE, + .elem_len = 1, + .elem_size = sizeof(u64), + .is_array = NO_ARRAY, + .tlv_type = 0, + .offset = offsetof(struct ipa_mhi_mem_addr_info_type_v01, + size), + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .is_array = QMI_COMMON_TLV_TYPE, + }, +}; + +struct qmi_elem_info ipa_mhi_tr_info_type_v01_ei[] = { + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(u8), + .is_array = NO_ARRAY, + .tlv_type = 0, + .offset = offsetof(struct ipa_mhi_tr_info_type_v01, + ch_id), + }, + { + .data_type = QMI_UNSIGNED_2_BYTE, + .elem_len = 1, + .elem_size = sizeof(u16), + .is_array = NO_ARRAY, + .tlv_type = 0, + .offset = offsetof(struct ipa_mhi_tr_info_type_v01, + poll_cfg), + }, + { + .data_type = QMI_SIGNED_4_BYTE_ENUM, + .elem_len = 1, + .elem_size = sizeof(enum ipa_mhi_brst_mode_enum_v01), + .is_array = NO_ARRAY, + .tlv_type = 0, + .offset = offsetof(struct ipa_mhi_tr_info_type_v01, + brst_mode_type), + }, + { + .data_type = QMI_UNSIGNED_8_BYTE, + .elem_len = 1, + .elem_size = sizeof(u64), + .is_array = NO_ARRAY, + .tlv_type = 0, + .offset = offsetof(struct ipa_mhi_tr_info_type_v01, + ring_iova), + }, + { + .data_type = QMI_UNSIGNED_8_BYTE, + .elem_len = 1, + .elem_size = sizeof(u64), + .is_array = NO_ARRAY, + .tlv_type = 0, + .offset = offsetof(struct ipa_mhi_tr_info_type_v01, + ring_len), + }, + { + .data_type = QMI_UNSIGNED_8_BYTE, + .elem_len = 1, + .elem_size = sizeof(u64), + .is_array = NO_ARRAY, + .tlv_type = 0, + .offset = offsetof(struct ipa_mhi_tr_info_type_v01, + rp), + }, + { + .data_type = QMI_UNSIGNED_8_BYTE, + .elem_len = 1, + .elem_size = sizeof(u64), + .is_array = NO_ARRAY, + .tlv_type = 0, + .offset = offsetof(struct ipa_mhi_tr_info_type_v01, + wp), + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .is_array = QMI_COMMON_TLV_TYPE, + }, +}; + +struct qmi_elem_info ipa_mhi_er_info_type_v01_ei[] = { + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(u8), + .is_array = NO_ARRAY, + .tlv_type = 0, + .offset = offsetof(struct ipa_mhi_er_info_type_v01, + er_id), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(u32), + .is_array = NO_ARRAY, + .tlv_type = 0, + .offset = offsetof(struct ipa_mhi_er_info_type_v01, + intmod_cycles), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(u32), + .is_array = NO_ARRAY, + .tlv_type = 0, + .offset = offsetof(struct ipa_mhi_er_info_type_v01, + intmod_count), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(u32), + .is_array = NO_ARRAY, + .tlv_type = 0, + .offset = offsetof(struct ipa_mhi_er_info_type_v01, + msi_addr), + }, + { + .data_type = QMI_UNSIGNED_8_BYTE, + .elem_len = 1, + .elem_size = sizeof(u64), + .is_array = NO_ARRAY, + .tlv_type = 0, + .offset = offsetof(struct ipa_mhi_er_info_type_v01, + ring_iova), + }, + { + .data_type = QMI_UNSIGNED_8_BYTE, + .elem_len = 1, + .elem_size = sizeof(u64), + .is_array = NO_ARRAY, + .tlv_type = 0, + .offset = offsetof(struct ipa_mhi_er_info_type_v01, + ring_len), + }, + { + .data_type = QMI_UNSIGNED_8_BYTE, + .elem_len = 1, + .elem_size = sizeof(u64), + .is_array = NO_ARRAY, + .tlv_type = 0, + .offset = offsetof(struct ipa_mhi_er_info_type_v01, + rp), + }, + { + .data_type = QMI_UNSIGNED_8_BYTE, + .elem_len = 1, + .elem_size = sizeof(u64), + .is_array = NO_ARRAY, + .tlv_type = 0, + .offset = offsetof(struct ipa_mhi_er_info_type_v01, + wp), + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .is_array = QMI_COMMON_TLV_TYPE, + }, +}; + +struct qmi_elem_info ipa_mhi_alloc_channel_req_msg_v01_ei[] = { + { + .data_type = QMI_DATA_LEN, + .elem_len = 1, + .elem_size = sizeof(u8), + .is_array = NO_ARRAY, + .tlv_type = 0x01, + .offset = offsetof(struct ipa_mhi_alloc_channel_req_msg_v01, + tr_info_arr_len), + }, + { + .data_type = QMI_STRUCT, + .elem_len = QMI_IPA_REMOTE_MHI_CHANNELS_NUM_MAX_V01, + .elem_size = sizeof(struct ipa_mhi_tr_info_type_v01), + .is_array = VAR_LEN_ARRAY, + .tlv_type = 0x01, + .offset = offsetof(struct ipa_mhi_alloc_channel_req_msg_v01, + tr_info_arr), + .ei_array = ipa_mhi_tr_info_type_v01_ei, + }, + { + .data_type = QMI_DATA_LEN, + .elem_len = 1, + .elem_size = sizeof(u8), + .is_array = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof(struct ipa_mhi_alloc_channel_req_msg_v01, + er_info_arr_len), + }, + { + .data_type = QMI_STRUCT, + .elem_len = QMI_IPA_REMOTE_MHI_CHANNELS_NUM_MAX_V01, + .elem_size = sizeof(struct ipa_mhi_er_info_type_v01), + .is_array = VAR_LEN_ARRAY, + .tlv_type = 0x02, + .offset = offsetof(struct ipa_mhi_alloc_channel_req_msg_v01, + er_info_arr), + .ei_array = ipa_mhi_er_info_type_v01_ei, + }, + { + .data_type = QMI_DATA_LEN, + .elem_len = 1, + .elem_size = sizeof(u8), + .is_array = NO_ARRAY, + .tlv_type = 0x03, + .offset = offsetof(struct ipa_mhi_alloc_channel_req_msg_v01, + ctrl_addr_map_info_len), + }, + { + .data_type = QMI_STRUCT, + .elem_len = QMI_IPA_REMOTE_MHI_MEMORY_MAPPING_NUM_MAX_V01, + .elem_size = sizeof(struct ipa_mhi_mem_addr_info_type_v01), + .is_array = VAR_LEN_ARRAY, + .tlv_type = 0x03, + .offset = offsetof(struct ipa_mhi_alloc_channel_req_msg_v01, + ctrl_addr_map_info), + .ei_array = ipa_mhi_mem_addr_info_type_v01_ei, + }, + { + .data_type = QMI_DATA_LEN, + .elem_len = 1, + .elem_size = sizeof(u8), + .is_array = NO_ARRAY, + .tlv_type = 0x04, + .offset = offsetof(struct ipa_mhi_alloc_channel_req_msg_v01, + data_addr_map_info_len), + }, + { + .data_type = QMI_STRUCT, + .elem_len = QMI_IPA_REMOTE_MHI_MEMORY_MAPPING_NUM_MAX_V01, + .elem_size = sizeof(struct ipa_mhi_mem_addr_info_type_v01), + .is_array = VAR_LEN_ARRAY, + .tlv_type = 0x04, + .offset = offsetof(struct ipa_mhi_alloc_channel_req_msg_v01, + data_addr_map_info), + .ei_array = ipa_mhi_mem_addr_info_type_v01_ei, + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .is_array = QMI_COMMON_TLV_TYPE, + }, +}; + +struct qmi_elem_info ipa_mhi_ch_alloc_resp_type_v01_ei[] = { + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(u8), + .is_array = NO_ARRAY, + .tlv_type = 0, + .offset = offsetof(struct ipa_mhi_ch_alloc_resp_type_v01, + ch_id), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(u8), + .is_array = NO_ARRAY, + .tlv_type = 0, + .offset = offsetof(struct ipa_mhi_ch_alloc_resp_type_v01, + is_success), + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .is_array = QMI_COMMON_TLV_TYPE, + }, +}; + +struct qmi_elem_info ipa_mhi_alloc_channel_resp_msg_v01_ei[] = { + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct qmi_response_type_v01), + .is_array = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof(struct ipa_mhi_alloc_channel_resp_msg_v01, + resp), + .ei_array = qmi_response_type_v01_ei, + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .is_array = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct ipa_mhi_alloc_channel_resp_msg_v01, + alloc_resp_arr_valid), + }, + { + .data_type = QMI_DATA_LEN, + .elem_len = 1, + .elem_size = sizeof(u8), + .is_array = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct ipa_mhi_alloc_channel_resp_msg_v01, + alloc_resp_arr_len), + }, + { + .data_type = QMI_STRUCT, + .elem_len = QMI_IPA_REMOTE_MHI_CHANNELS_NUM_MAX_V01, + .elem_size = sizeof(struct ipa_mhi_ch_alloc_resp_type_v01), + .is_array = VAR_LEN_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct ipa_mhi_alloc_channel_resp_msg_v01, + alloc_resp_arr), + .ei_array = ipa_mhi_ch_alloc_resp_type_v01_ei, + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .is_array = QMI_COMMON_TLV_TYPE, + }, +}; + +struct qmi_elem_info ipa_mhi_clk_vote_req_msg_v01_ei[] = { + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(u8), + .is_array = NO_ARRAY, + .tlv_type = 0x01, + .offset = offsetof(struct ipa_mhi_clk_vote_req_msg_v01, + mhi_vote), + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .is_array = QMI_COMMON_TLV_TYPE, + }, +}; + +struct qmi_elem_info ipa_mhi_clk_vote_resp_msg_v01_ei[] = { + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct qmi_response_type_v01), + .is_array = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof(struct ipa_mhi_clk_vote_resp_msg_v01, + resp), + .ei_array = qmi_response_type_v01_ei, + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .is_array = QMI_COMMON_TLV_TYPE, + }, +}; + +struct qmi_elem_info ipa_mhi_cleanup_req_msg_v01_ei[] = { + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .is_array = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct ipa_mhi_cleanup_req_msg_v01, + cleanup_valid), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(u8), + .is_array = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct ipa_mhi_cleanup_req_msg_v01, + cleanup), + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .is_array = QMI_COMMON_TLV_TYPE, + }, +}; + +struct qmi_elem_info ipa_mhi_cleanup_resp_msg_v01_ei[] = { + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct qmi_response_type_v01), + .is_array = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof(struct ipa_mhi_cleanup_resp_msg_v01, + resp), + .ei_array = qmi_response_type_v01_ei, + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .is_array = QMI_COMMON_TLV_TYPE, + }, +}; diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_uc.c b/drivers/platform/msm/ipa/ipa_v3/ipa_uc.c index 01b66391ba0a96dbbefa67c2b17f676c0993fa52..def309a70ee60c43909cabc86cdb9c4cf90b1fe0 100644 --- a/drivers/platform/msm/ipa/ipa_v3/ipa_uc.c +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_uc.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved. +/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -41,6 +41,7 @@ * IPA_CPU_2_HW_CMD_MEMCPY : CPU instructs HW to do memcopy using QMB. * IPA_CPU_2_HW_CMD_RESET_PIPE : Command to reset a pipe - SW WA for a HW bug. * IPA_CPU_2_HW_CMD_GSI_CH_EMPTY : Command to check for GSI channel emptiness. + * IPA_CPU_2_HW_CMD_REMOTE_IPA_INFO: Command to store remote IPA Info */ enum ipa3_cpu_2_hw_commands { IPA_CPU_2_HW_CMD_NO_OP = @@ -65,6 +66,8 @@ enum ipa3_cpu_2_hw_commands { FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 9), IPA_CPU_2_HW_CMD_GSI_CH_EMPTY = FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 10), + IPA_CPU_2_HW_CMD_REMOTE_IPA_INFO = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 11), }; /** @@ -161,6 +164,19 @@ union IpaHwChkChEmptyCmdData_t { u32 raw32b; } __packed; + +/** + * Structure holding the parameters for IPA_CPU_2_HW_CMD_REMOTE_IPA_INFO + * command. + * @remoteIPAAddr: 5G IPA address : uC proxies Q6 doorbell to this address + * @mboxN: mbox on which Q6 will interrupt uC + */ +struct IpaHwDbAddrInfo_t { + u32 remoteIPAAddr; + uint32_t mboxN; +} __packed; + + /** * When resource group 10 limitation mitigation is enabled, uC send * cmd should be able to run in interrupt context, so using spin lock @@ -927,3 +943,36 @@ int ipa3_uc_memcpy(phys_addr_t dest, phys_addr_t src, int len) dma_free_coherent(ipa3_ctx->pdev, mem.size, mem.base, mem.phys_base); return res; } + +int ipa3_uc_send_remote_ipa_info(u32 remote_addr, uint32_t mbox_n) +{ + int res; + struct ipa_mem_buffer cmd; + struct IpaHwDbAddrInfo_t *uc_info; + + cmd.size = sizeof(*uc_info); + cmd.base = dma_alloc_coherent(ipa3_ctx->uc_pdev, cmd.size, + &cmd.phys_base, GFP_KERNEL); + if (cmd.base == NULL) + return -ENOMEM; + + uc_info = (struct IpaHwDbAddrInfo_t *) cmd.base; + uc_info->remoteIPAAddr = remote_addr; + uc_info->mboxN = mbox_n; + + res = ipa3_uc_send_cmd((u32)(cmd.phys_base), + IPA_CPU_2_HW_CMD_REMOTE_IPA_INFO, 0, + false, 10 * HZ); + + if (res) { + IPAERR("fail to map 0x%x to mbox %d\n", + uc_info->remoteIPAAddr, + uc_info->mboxN); + goto free_coherent; + } + + res = 0; +free_coherent: + dma_free_coherent(ipa3_ctx->uc_pdev, cmd.size, cmd.base, cmd.phys_base); + return res; +} diff --git a/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c index 357e5efc4adda5055dd657481fe8cb1dd9b4ceb6..f1672ddbed614fdad125b158906295e5b22c3e54 100644 --- a/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c +++ b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c @@ -36,6 +36,7 @@ #include #include #include +#include "ipa_mhi_proxy.h" #include "ipa_trace.h" @@ -3601,6 +3602,8 @@ void ipa3_q6_handshake_complete(bool ssr_bootup) */ rmnet_ipa_get_network_stats_and_update(); } + + imp_handle_modem_ready(); } static inline bool rmnet_ipa3_check_any_client_inited diff --git a/drivers/platform/msm/msm_ext_display.c b/drivers/platform/msm/msm_ext_display.c index ad2b8399187780b99a200115c0168644090698a1..618aa60a4aecd1a0a9d60f8ea56132b45dcdeb66 100644 --- a/drivers/platform/msm/msm_ext_display.c +++ b/drivers/platform/msm/msm_ext_display.c @@ -442,6 +442,7 @@ int msm_ext_disp_select_audio_codec(struct platform_device *pdev, return ret; } +EXPORT_SYMBOL(msm_ext_disp_select_audio_codec); static int msm_ext_disp_validate_intf(struct msm_ext_disp_init_data *init_data) { diff --git a/drivers/power/supply/qcom/fg-alg.c b/drivers/power/supply/qcom/fg-alg.c index 3d74f7e4452c9d7d980876150b8198f47fad7ebe..099e39c0eb4368a7e1c34e819cf2d96f09989df3 100644 --- a/drivers/power/supply/qcom/fg-alg.c +++ b/drivers/power/supply/qcom/fg-alg.c @@ -164,13 +164,13 @@ void cycle_count_update(struct cycle_counter *counter, int batt_soc, } /** - * get_cycle_count - + * get_bucket_cycle_count - * @counter: Cycle counter object * * Returns the cycle counter for a SOC bucket. * */ -int get_cycle_count(struct cycle_counter *counter) +static int get_bucket_cycle_count(struct cycle_counter *counter) { int count; @@ -186,6 +186,70 @@ int get_cycle_count(struct cycle_counter *counter) return count; } +/** + * get_cycle_count - + * @counter: Cycle counter object + * @count: Average cycle count returned to the caller + * + * Get average cycle count for all buckets + * + */ +int get_cycle_count(struct cycle_counter *counter, int *count) +{ + int i, rc, temp = 0; + + for (i = 1; i <= BUCKET_COUNT; i++) { + counter->id = i; + rc = get_bucket_cycle_count(counter); + if (rc < 0) { + pr_err("Couldn't get cycle count rc=%d\n", rc); + return rc; + } + temp += rc; + } + + /* + * Normalize the counter across each bucket so that we can get + * the overall charge cycle count. + */ + + *count = temp / BUCKET_COUNT; + return 0; +} + +/** + * get_cycle_counts - + * @counter: Cycle counter object + * @buf: Bucket cycle counts formatted in a string returned to the caller + * + * Get cycle count for all buckets in a string format + * + */ +int get_cycle_counts(struct cycle_counter *counter, const char **buf) +{ + int i, rc, len = 0; + + for (i = 1; i <= BUCKET_COUNT; i++) { + counter->id = i; + rc = get_bucket_cycle_count(counter); + if (rc < 0) { + pr_err("Couldn't get cycle count rc=%d\n", rc); + return rc; + } + + if (sizeof(counter->str_buf) - len < 8) { + pr_err("Invalid length %d\n", len); + return -EINVAL; + } + + len += snprintf(counter->str_buf + len, 8, "%d ", rc); + } + + counter->str_buf[len] = '\0'; + *buf = counter->str_buf; + return 0; +} + /** * cycle_count_init - * @counter: Cycle counter object diff --git a/drivers/power/supply/qcom/fg-alg.h b/drivers/power/supply/qcom/fg-alg.h index ff5becee881bb835890ebb95db2b4e78cf1103fe..41d278a386ac3dd5e17d4a8dcd0035d14c04d783 100644 --- a/drivers/power/supply/qcom/fg-alg.h +++ b/drivers/power/supply/qcom/fg-alg.h @@ -18,6 +18,7 @@ struct cycle_counter { void *data; + char str_buf[BUCKET_COUNT * 8]; bool started[BUCKET_COUNT]; u16 count[BUCKET_COUNT]; u8 last_soc[BUCKET_COUNT]; @@ -60,7 +61,8 @@ int restore_cycle_count(struct cycle_counter *counter); void clear_cycle_count(struct cycle_counter *counter); void cycle_count_update(struct cycle_counter *counter, int batt_soc, int charge_status, bool charge_done, bool input_present); -int get_cycle_count(struct cycle_counter *counter); +int get_cycle_count(struct cycle_counter *counter, int *count); +int get_cycle_counts(struct cycle_counter *counter, const char **buf); int cycle_count_init(struct cycle_counter *counter); void cap_learning_abort(struct cap_learning *cl); void cap_learning_update(struct cap_learning *cl, int batt_temp, diff --git a/drivers/power/supply/qcom/qpnp-fg-gen4.c b/drivers/power/supply/qcom/qpnp-fg-gen4.c index 8b066a630daccc12b94f4162203eb6c12b0407d3..4768a6704fd988650a090142da26ad8a8b943b18 100644 --- a/drivers/power/supply/qcom/qpnp-fg-gen4.c +++ b/drivers/power/supply/qcom/qpnp-fg-gen4.c @@ -148,7 +148,6 @@ struct fg_gen4_chip { struct ttf ttf; struct delayed_work ttf_work; char batt_profile[PROFILE_LEN]; - char counter_buf[BUCKET_COUNT * 8]; bool ki_coeff_dischg_en; bool slope_limit_en; }; @@ -2037,31 +2036,6 @@ static int fg_get_time_to_empty(struct fg_dev *fg, int *val) return 0; } -static const char *fg_gen4_get_cycle_counts(struct fg_gen4_chip *chip) -{ - int i, rc, len = 0; - char *buf; - - buf = chip->counter_buf; - for (i = 1; i <= BUCKET_COUNT; i++) { - chip->counter->id = i; - rc = get_cycle_count(chip->counter); - if (rc < 0) { - pr_err("Couldn't get cycle count rc=%d\n", rc); - return NULL; - } - - if (sizeof(chip->counter_buf) - len < 8) { - pr_err("Invalid length %d\n", len); - return NULL; - } - - len += snprintf(buf+len, 8, "%d ", rc); - } - - buf[len] = '\0'; - return buf; -} static void sram_dump_work(struct work_struct *work) { @@ -2240,8 +2214,13 @@ static int fg_psy_get_property(struct power_supply *psy, case POWER_SUPPLY_PROP_CHARGE_COUNTER_SHADOW: rc = fg_gen4_get_charge_counter_shadow(chip, &pval->intval); break; + case POWER_SUPPLY_PROP_CYCLE_COUNT: + rc = get_cycle_count(chip->counter, &pval->intval); + break; case POWER_SUPPLY_PROP_CYCLE_COUNTS: - pval->strval = fg_gen4_get_cycle_counts(chip); + rc = get_cycle_counts(chip->counter, &pval->strval); + if (rc < 0) + pval->strval = NULL; break; case POWER_SUPPLY_PROP_SOC_REPORTING_READY: pval->intval = fg->soc_reporting_ready; diff --git a/drivers/power/supply/qcom/qpnp-smb5.c b/drivers/power/supply/qcom/qpnp-smb5.c index 79f572a1b9410e85716a64f773130dd14155d41d..92360f96f20ccb64e67e65e019614e6b796916f6 100644 --- a/drivers/power/supply/qcom/qpnp-smb5.c +++ b/drivers/power/supply/qcom/qpnp-smb5.c @@ -1032,6 +1032,7 @@ static enum power_supply_property smb5_batt_props[] = { POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT_MAX, POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT, POWER_SUPPLY_PROP_CHARGE_COUNTER, + POWER_SUPPLY_PROP_CYCLE_COUNT, POWER_SUPPLY_PROP_RECHARGE_SOC, }; @@ -1133,6 +1134,9 @@ static int smb5_batt_get_prop(struct power_supply *psy, case POWER_SUPPLY_PROP_CHARGE_COUNTER: rc = smblib_get_prop_batt_charge_counter(chg, val); break; + case POWER_SUPPLY_PROP_CYCLE_COUNT: + rc = smblib_get_prop_batt_cycle_count(chg, val); + break; case POWER_SUPPLY_PROP_RECHARGE_SOC: val->intval = chg->auto_recharge_soc; break; diff --git a/drivers/power/supply/qcom/smb5-lib.c b/drivers/power/supply/qcom/smb5-lib.c index 1869fb848f80d513a6d208b181f06be4c7fd93cc..0435a94907ec006ecd69b32c5535cf19404a3483 100644 --- a/drivers/power/supply/qcom/smb5-lib.c +++ b/drivers/power/supply/qcom/smb5-lib.c @@ -1453,6 +1453,19 @@ int smblib_get_prop_batt_charge_counter(struct smb_charger *chg, return rc; } +int smblib_get_prop_batt_cycle_count(struct smb_charger *chg, + union power_supply_propval *val) +{ + int rc; + + if (!chg->bms_psy) + return -EINVAL; + + rc = power_supply_get_property(chg->bms_psy, + POWER_SUPPLY_PROP_CYCLE_COUNT, val); + return rc; +} + /*********************** * BATTERY PSY SETTERS * ***********************/ diff --git a/drivers/power/supply/qcom/smb5-lib.h b/drivers/power/supply/qcom/smb5-lib.h index 29e067000349d302d002a1bcb6369939eb9d727d..b7a2b5a0d8e2c1529b4c4db89d9fa8c7cb4b63f9 100644 --- a/drivers/power/supply/qcom/smb5-lib.h +++ b/drivers/power/supply/qcom/smb5-lib.h @@ -457,6 +457,8 @@ int smblib_get_prop_batt_temp(struct smb_charger *chg, union power_supply_propval *val); int smblib_get_prop_batt_charge_counter(struct smb_charger *chg, union power_supply_propval *val); +int smblib_get_prop_batt_cycle_count(struct smb_charger *chg, + union power_supply_propval *val); int smblib_set_prop_input_suspend(struct smb_charger *chg, const union power_supply_propval *val); int smblib_set_prop_batt_capacity(struct smb_charger *chg, diff --git a/drivers/soc/qcom/socinfo.c b/drivers/soc/qcom/socinfo.c index f39317c63aa6dd11fefb313da28470a1831a12bd..d5f1cf0972afb2bdc70164947d91fe3d21d2efc1 100644 --- a/drivers/soc/qcom/socinfo.c +++ b/drivers/soc/qcom/socinfo.c @@ -307,6 +307,9 @@ static struct msm_soc_info cpu_of_id[] = { /* sm8150 ID */ [339] = {MSM_CPU_SM8150, "SM8150"}, + /* sa8150 ID */ + [362] = {MSM_CPU_SA8150, "SA8150"}, + /* sdmshrike ID */ [340] = {MSM_CPU_SDMSHRIKE, "SDMSHRIKE"}, @@ -1171,6 +1174,10 @@ static void * __init setup_dummy_socinfo(void) dummy_socinfo.id = 339; strlcpy(dummy_socinfo.build_id, "sm8150 - ", sizeof(dummy_socinfo.build_id)); + } else if (early_machine_is_sa8150()) { + dummy_socinfo.id = 362; + strlcpy(dummy_socinfo.build_id, "sa8150 - ", + sizeof(dummy_socinfo.build_id)); } else if (early_machine_is_sdmshrike()) { dummy_socinfo.id = 340; strlcpy(dummy_socinfo.build_id, "sdmshrike - ", diff --git a/drivers/staging/android/ion/ion_page_pool.c b/drivers/staging/android/ion/ion_page_pool.c index 0ddc663678c4be8342589c39c253f91f906c8184..b1efb3958a871e16c11bfbb2b9aaa9519abceaf9 100644 --- a/drivers/staging/android/ion/ion_page_pool.c +++ b/drivers/staging/android/ion/ion_page_pool.c @@ -48,6 +48,9 @@ static int ion_page_pool_add(struct ion_page_pool *pool, struct page *page) list_add_tail(&page->lru, &pool->low_items); pool->low_count++; } + + mod_node_page_state(page_pgdat(page), NR_INDIRECTLY_RECLAIMABLE_BYTES, + (1 << (PAGE_SHIFT + pool->order))); mutex_unlock(&pool->mutex); return 0; } @@ -67,6 +70,8 @@ static struct page *ion_page_pool_remove(struct ion_page_pool *pool, bool high) } list_del(&page->lru); + mod_node_page_state(page_pgdat(page), NR_INDIRECTLY_RECLAIMABLE_BYTES, + -(1 << (PAGE_SHIFT + pool->order))); return page; } diff --git a/drivers/thermal/msm-tsens.c b/drivers/thermal/msm-tsens.c index c89ccc6d03b19fade6efbcd4d98bdcbcf2b39a4f..dfbb3befbc998e35e6dab9b5df2e2be2da81fd17 100644 --- a/drivers/thermal/msm-tsens.c +++ b/drivers/thermal/msm-tsens.c @@ -81,7 +81,7 @@ static const struct of_device_id tsens_table[] = { { .compatible = "qcom,sdm630-tsens", .data = &data_tsens23xx, }, - { .compatible = "qcom,sdm640-tsens", + { .compatible = "qcom,sm6150-tsens", .data = &data_tsens23xx, }, { .compatible = "qcom,sdm845-tsens", diff --git a/drivers/thermal/qcom/qti_virtual_sensor.c b/drivers/thermal/qcom/qti_virtual_sensor.c index 903d8066667ff02be9e9667e191e90555d19c3a6..31fcfde67a0bc2c0605a55c5db237db4acd93bc2 100644 --- a/drivers/thermal/qcom/qti_virtual_sensor.c +++ b/drivers/thermal/qcom/qti_virtual_sensor.c @@ -110,6 +110,16 @@ static const struct virtual_sensor_data qti_virtual_sensors[] = { "gpuss-1-usr"}, .logic = VIRT_MAXIMUM, }, + { + .virt_zone_name = "cpuss-max-step", + .num_sensors = 5, + .sensor_names = {"cpuss-0-usr", + "cpuss-1-usr", + "cpuss-2-usr", + "cpuss-3-usr", + "mhm-usr"}, + .logic = VIRT_MAXIMUM, + }, }; int qti_virtual_sensor_register(struct device *dev) diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c index f6a855c0b516eecc2bd87de53e23a641cda54095..4b1d34d67db42284ec3abae8eaa9f483fdada040 100644 --- a/drivers/thermal/thermal_core.c +++ b/drivers/thermal/thermal_core.c @@ -724,7 +724,7 @@ int thermal_zone_bind_cooling_device(struct thermal_zone_device *tz, * based on the MACRO determine the default state to use or the * offset from the max_state. */ - if (upper > (THERMAL_MAX_LIMIT - max_state)) { + if (upper >= (THERMAL_MAX_LIMIT - max_state)) { /* upper default max_state */ if (upper == THERMAL_NO_LIMIT) upper = max_state; @@ -732,7 +732,7 @@ int thermal_zone_bind_cooling_device(struct thermal_zone_device *tz, upper = max_state - (THERMAL_MAX_LIMIT - upper); } - if (lower > (THERMAL_MAX_LIMIT - max_state)) { + if (lower >= (THERMAL_MAX_LIMIT - max_state)) { /* lower default 0 */ if (lower == THERMAL_NO_LIMIT) lower = 0; diff --git a/drivers/thermal/tsens1xxx.c b/drivers/thermal/tsens1xxx.c index 02322afaf89c6acddb1b6ad697de7b5b7b909066..914e2ced7755c0d2c63058cae1b329bf3aa497de 100644 --- a/drivers/thermal/tsens1xxx.c +++ b/drivers/thermal/tsens1xxx.c @@ -351,8 +351,8 @@ static irqreturn_t tsens_irq_thread(int irq, void *data) th_temp = code_to_degc((threshold & TSENS_UPPER_THRESHOLD_MASK) >> TSENS_UPPER_THRESHOLD_SHIFT, - tm->sensor); - if (th_temp > temp) { + (tm->sensor + i)); + if (th_temp > (temp/TSENS_SCALE_MILLIDEG)) { pr_debug("Re-arm high threshold\n"); rc = tsens_tz_activate_trip_type( &tm->sensor[i], @@ -373,8 +373,8 @@ static irqreturn_t tsens_irq_thread(int irq, void *data) tm->tsens_tm_addr + addr_offset)); th_temp = code_to_degc((threshold & TSENS_LOWER_THRESHOLD_MASK), - tm->sensor); - if (th_temp < temp) { + (tm->sensor + i)); + if (th_temp < (temp/TSENS_SCALE_MILLIDEG)) { pr_debug("Re-arm Low threshold\n"); rc = tsens_tz_activate_trip_type( &tm->sensor[i], diff --git a/drivers/usb/dwc3/dwc3-msm.c b/drivers/usb/dwc3/dwc3-msm.c index f5b04265e5afd645e5a1728b5fbf9e8c3d393745..85142b7e5d7ed6bd842d397ccb61fb513252b09c 100644 --- a/drivers/usb/dwc3/dwc3-msm.c +++ b/drivers/usb/dwc3/dwc3-msm.c @@ -2113,6 +2113,60 @@ static void configure_nonpdc_usb_interrupt(struct dwc3_msm *mdwc, } } +enum bus_vote { + BUS_VOTE_INVALID, + BUS_VOTE_SUSPEND, + BUS_VOTE_NOMINAL, + BUS_VOTE_SVS +}; + +static int dwc3_msm_update_bus_bw(struct dwc3_msm *mdwc, enum bus_vote bv) +{ + int ret = 0; + struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3); + + if (!mdwc->bus_perf_client) + return 0; + + dbg_event(0xFF, "bus_vote_start", bv); + + switch (bv) { + case BUS_VOTE_SVS: + /* On some platforms SVS does not have separate vote. Vote for + * nominal if svs usecase does not exist + */ + if (mdwc->bus_scale_table->num_usecases == 2) + goto nominal_vote; + + /* index starts from zero */ + ret = msm_bus_scale_client_update_request( + mdwc->bus_perf_client, 2); + if (ret) + dev_err(mdwc->dev, "bus bw voting failed %d\n", ret); + break; + case BUS_VOTE_NOMINAL: +nominal_vote: + ret = msm_bus_scale_client_update_request( + mdwc->bus_perf_client, 1); + if (ret) + dev_err(mdwc->dev, "bus bw voting failed %d\n", ret); + break; + case BUS_VOTE_SUSPEND: + ret = msm_bus_scale_client_update_request( + mdwc->bus_perf_client, 0); + if (ret) + dev_err(mdwc->dev, "bus bw voting failed %d\n", ret); + break; + default: + dev_err(mdwc->dev, "Unsupported bus vote:%d\n", bv); + ret = -EINVAL; + } + + dbg_event(0xFF, "bus_vote_end", bv); + + return ret; + +} static int dwc3_msm_suspend(struct dwc3_msm *mdwc) { int ret; @@ -2244,15 +2298,7 @@ static int dwc3_msm_suspend(struct dwc3_msm *mdwc) } } - /* Remove bus voting */ - if (mdwc->bus_perf_client) { - dbg_event(0xFF, "bus_devote_start", 0); - ret = msm_bus_scale_client_update_request( - mdwc->bus_perf_client, 0); - dbg_event(0xFF, "bus_devote_finish", 0); - if (ret) - dev_err(mdwc->dev, "bus bw unvoting failed %d\n", ret); - } + dwc3_msm_update_bus_bw(mdwc, BUS_VOTE_SUSPEND); /* * release wakeup source with timeout to defer system suspend to @@ -2310,15 +2356,10 @@ static int dwc3_msm_resume(struct dwc3_msm *mdwc) pm_stay_awake(mdwc->dev); - /* Enable bus voting */ - if (mdwc->bus_perf_client) { - dbg_event(0xFF, "bus_vote_start", 1); - ret = msm_bus_scale_client_update_request( - mdwc->bus_perf_client, 1); - dbg_event(0xFF, "bus_vote_finish", 1); - if (ret) - dev_err(mdwc->dev, "bus bw voting failed %d\n", ret); - } + if (mdwc->in_host_mode && mdwc->max_rh_port_speed == USB_SPEED_HIGH) + dwc3_msm_update_bus_bw(mdwc, BUS_VOTE_SVS); + else + dwc3_msm_update_bus_bw(mdwc, BUS_VOTE_NOMINAL); /* Vote for TCXO while waking up USB HSPHY */ ret = clk_prepare_enable(mdwc->xo_clk); @@ -2513,10 +2554,10 @@ static void dwc3_resume_work(struct work_struct *w) dev_dbg(mdwc->dev, "%s: dwc3 resume work\n", __func__); - if (mdwc->vbus_active && !mdwc->in_restart) { + if (mdwc->extcon && mdwc->vbus_active && !mdwc->in_restart) { extcon_id = EXTCON_USB; edev = mdwc->extcon[mdwc->ext_idx].edev; - } else if (mdwc->id_state == DWC3_ID_GROUND) { + } else if (mdwc->extcon && mdwc->id_state == DWC3_ID_GROUND) { extcon_id = EXTCON_USB_HOST; edev = mdwc->extcon[mdwc->ext_idx].edev; } @@ -3503,6 +3544,7 @@ static int dwc3_msm_host_notifier(struct notifier_block *nb, "set hs core clk rate %ld\n", mdwc->core_clk_rate_hs); mdwc->max_rh_port_speed = USB_SPEED_HIGH; + dwc3_msm_update_bus_bw(mdwc, BUS_VOTE_SVS); } else { mdwc->max_rh_port_speed = USB_SPEED_SUPER; } @@ -3512,6 +3554,7 @@ static int dwc3_msm_host_notifier(struct notifier_block *nb, dev_dbg(mdwc->dev, "set core clk rate %ld\n", mdwc->core_clk_rate); mdwc->max_rh_port_speed = USB_SPEED_UNKNOWN; + dwc3_msm_update_bus_bw(mdwc, BUS_VOTE_NOMINAL); } } diff --git a/drivers/video/fbdev/Kconfig b/drivers/video/fbdev/Kconfig index 5e58f5ec0a28e449afa8813a652b1aa3469e0721..5dd7f6b3c45cb42e1e004b00da12b6648faf803a 100644 --- a/drivers/video/fbdev/Kconfig +++ b/drivers/video/fbdev/Kconfig @@ -2334,6 +2334,19 @@ config FB_PRE_INIT_FB Select this option if display contents should be inherited as set by the bootloader. +config FB_MSM + tristate "MSM Framebuffer support" + depends on FB && ARCH_QCOM + select FB_CFB_FILLRECT + select FB_CFB_COPYAREA + select FB_CFB_IMAGEBLIT + select SYNC_FILE + ---help--- + The MSM driver implements a frame buffer interface to + provide access to the display hardware and provide + a way for users to display graphics + on connected display panels. + config FB_MX3 tristate "MX3 Framebuffer support" depends on FB && MX3_IPU @@ -2454,6 +2467,7 @@ config FB_SIMPLE source "drivers/video/fbdev/omap/Kconfig" source "drivers/video/fbdev/omap2/Kconfig" source "drivers/video/fbdev/mmp/Kconfig" +source "drivers/video/fbdev/msm/Kconfig" config FB_SH_MOBILE_MERAM tristate "SuperH Mobile MERAM read ahead support" diff --git a/drivers/video/fbdev/Makefile b/drivers/video/fbdev/Makefile index 8895536a20d648723197affff38d1bbf8fc140a8..653288d107c4beb960e94508615952bfce662e00 100644 --- a/drivers/video/fbdev/Makefile +++ b/drivers/video/fbdev/Makefile @@ -131,6 +131,11 @@ obj-$(CONFIG_FB_HYPERV) += hyperv_fb.o obj-$(CONFIG_FB_OPENCORES) += ocfb.o obj-$(CONFIG_FB_SM712) += sm712fb.o +ifeq ($(CONFIG_FB_MSM),y) +obj-y += msm/ +else +obj-$(CONFIG_MSM_DBA) += msm/msm_dba/ +endif # Platform or fallback drivers go here obj-$(CONFIG_FB_UVESA) += uvesafb.o obj-$(CONFIG_FB_VESA) += vesafb.o diff --git a/drivers/video/fbdev/core/fbmem.c b/drivers/video/fbdev/core/fbmem.c index 1b4f1d5d07d6ce6ffebcfb306cf2b6567493ec54..8634175da1fbf6e386ddea9741a4cd587e3e89af 100644 --- a/drivers/video/fbdev/core/fbmem.c +++ b/drivers/video/fbdev/core/fbmem.c @@ -1087,7 +1087,7 @@ fb_blank(struct fb_info *info, int blank) EXPORT_SYMBOL(fb_blank); static long do_fb_ioctl(struct fb_info *info, unsigned int cmd, - unsigned long arg) + unsigned long arg, struct file *file) { struct fb_ops *fb; struct fb_var_screeninfo var; @@ -1106,6 +1106,13 @@ static long do_fb_ioctl(struct fb_info *info, unsigned int cmd, memset(&cmap, 0, sizeof(cmap)); memset(&event, 0, sizeof(event)); + memset(&var, 0, sizeof(var)); + memset(&fix, 0, sizeof(fix)); + memset(&con2fb, 0, sizeof(con2fb)); + memset(&cmap_from, 0, sizeof(cmap_from)); + memset(&cmap, 0, sizeof(cmap)); + memset(&event, 0, sizeof(event)); + switch (cmd) { case FBIOGET_VSCREENINFO: if (!lock_fb_info(info)) @@ -1224,7 +1231,9 @@ static long do_fb_ioctl(struct fb_info *info, unsigned int cmd, if (!lock_fb_info(info)) return -ENODEV; fb = info->fbops; - if (fb->fb_ioctl) + if (fb->fb_ioctl_v2) + ret = fb->fb_ioctl_v2(info, cmd, arg, file); + else if (fb->fb_ioctl) ret = fb->fb_ioctl(info, cmd, arg); else ret = -ENOTTY; @@ -1239,7 +1248,7 @@ static long fb_ioctl(struct file *file, unsigned int cmd, unsigned long arg) if (!info) return -ENODEV; - return do_fb_ioctl(info, cmd, arg); + return do_fb_ioctl(info, cmd, arg, file); } #ifdef CONFIG_COMPAT @@ -1270,7 +1279,7 @@ struct fb_cmap32 { }; static int fb_getput_cmap(struct fb_info *info, unsigned int cmd, - unsigned long arg) + unsigned long arg, struct file *file) { struct fb_cmap_user __user *cmap; struct fb_cmap32 __user *cmap32; @@ -1293,7 +1302,7 @@ static int fb_getput_cmap(struct fb_info *info, unsigned int cmd, put_user(compat_ptr(data), &cmap->transp)) return -EFAULT; - err = do_fb_ioctl(info, cmd, (unsigned long) cmap); + err = do_fb_ioctl(info, cmd, (unsigned long) cmap, file); if (!err) { if (copy_in_user(&cmap32->start, @@ -1338,7 +1347,7 @@ static int do_fscreeninfo_to_user(struct fb_fix_screeninfo *fix, } static int fb_get_fscreeninfo(struct fb_info *info, unsigned int cmd, - unsigned long arg) + unsigned long arg, struct file *file) { struct fb_fix_screeninfo fix; @@ -1367,20 +1376,22 @@ static long fb_compat_ioctl(struct file *file, unsigned int cmd, case FBIOPUT_CON2FBMAP: arg = (unsigned long) compat_ptr(arg); case FBIOBLANK: - ret = do_fb_ioctl(info, cmd, arg); + ret = do_fb_ioctl(info, cmd, arg, file); break; case FBIOGET_FSCREENINFO: - ret = fb_get_fscreeninfo(info, cmd, arg); + ret = fb_get_fscreeninfo(info, cmd, arg, file); break; case FBIOGETCMAP: case FBIOPUTCMAP: - ret = fb_getput_cmap(info, cmd, arg); + ret = fb_getput_cmap(info, cmd, arg, file); break; default: - if (fb->fb_compat_ioctl) + if (fb->fb_compat_ioctl_v2) + ret = fb->fb_compat_ioctl_v2(info, cmd, arg, file); + else if (fb->fb_compat_ioctl) ret = fb->fb_compat_ioctl(info, cmd, arg); break; } diff --git a/drivers/video/fbdev/msm/Kconfig b/drivers/video/fbdev/msm/Kconfig new file mode 100644 index 0000000000000000000000000000000000000000..e8f902bc0e19b87ec14ba0f7e2410457d6f952dc --- /dev/null +++ b/drivers/video/fbdev/msm/Kconfig @@ -0,0 +1,137 @@ +source "drivers/video/fbdev/msm/msm_dba/Kconfig" + +if FB_MSM + +config FB_MSM_MDSS_COMMON + bool + +choice + prompt "MDP HW version" + default FB_MSM_MDP + +config FB_MSM_MDP + bool "MDP HW" + select FB_MSM_MDP_HW + ---help--- + The Mobile Display Processor (MDP) driver support devices which + contain MDP hardware block. + + Support for MSM MDP HW revision 2.2. + Say Y here if this is msm7201 variant platform. + +config FB_MSM_MDSS + bool "MDSS HW" + select SYNC_FILE + select FB_MSM_MDSS_COMMON + ---help--- + The Mobile Display Sub System (MDSS) driver supports devices which + contain MDSS hardware block. + + The MDSS driver implements frame buffer interface to provide access to + the display hardware and provide a way for users to display graphics + on connected display panels. + +config FB_MSM_MDP_NONE + bool "MDP HW None" + ---help--- + This is used for platforms without Mobile Display Sub System (MDSS). + mdm platform don't have MDSS hardware block. + + Say Y here if this is mdm platform. + +endchoice + +config FB_MSM_QPIC + bool + select FB_MSM_MDSS_COMMON + +config FB_MSM_QPIC_ILI_QVGA_PANEL + bool "Qpic MIPI ILI QVGA Panel" + select FB_MSM_QPIC + ---help--- + Support for MIPI ILI QVGA (240x320) panel ILI TECHNOLOGY 9341 + with on-chip full display RAM use parallel interface. + +config FB_MSM_QPIC_PANEL_DETECT + bool "Qpic Panel Detect" + select FB_MSM_QPIC_ILI_QVGA_PANEL + ---help--- + Support for Qpic panel auto detect. + +config FB_MSM_MDSS_WRITEBACK + bool "MDSS Writeback Panel" + ---help--- + The MDSS Writeback Panel provides support for routing the output of + MDSS frame buffer driver and MDP processing to memory. + +config FB_MSM_MDSS_HDMI_PANEL + bool "MDSS HDMI Tx Panel" + depends on FB_MSM_MDSS + select MSM_EXT_DISPLAY + default n + ---help--- + The MDSS HDMI Panel provides support for transmitting TMDS signals of + MDSS frame buffer data to connected hdmi compliant TVs, monitors etc. + +config FB_MSM_MDSS_HDMI_MHL_SII8334 + depends on FB_MSM_MDSS_HDMI_PANEL + bool 'MHL SII8334 support ' + default n + ---help--- + Support the HDMI to MHL conversion. + MHL (Mobile High-Definition Link) technology + uses USB connector to output HDMI content + +config FB_MSM_MDSS_MHL3 + depends on FB_MSM_MDSS_HDMI_PANEL + bool "MHL3 SII8620 Support" + default n + ---help--- + Support the SiliconImage 8620 MHL Tx transmitter that uses + USB connector to output HDMI content. Transmitter is an + i2c device acting as an HDMI to MHL bridge. Chip supports + MHL 3.0 standard. + +config FB_MSM_MDSS_DSI_CTRL_STATUS + tristate "DSI controller status check feature" + ---help--- + Check DSI controller status periodically (default period is 5 + seconds) by sending Bus-Turn-Around (BTA) command. If DSI controller + fails to acknowledge the BTA command, it sends PANEL_ALIVE=0 status + to HAL layer to reset the controller. + +config FB_MSM_MDSS_EDP_PANEL + depends on FB_MSM_MDSS + bool "MDSS eDP Panel" + ---help--- + The MDSS eDP Panel provides support for eDP host controller driver. + Which runs in Video mode only and is responsible for transmitting + frame buffer from host SOC to eDP display panel. + +config FB_MSM_MDSS_MDP3 + depends on FB_MSM_MDSS + bool "MDP3 display controller" + ---help--- + The MDP3 provides support for an older version display controller. + Included in latest display sub-system, known as MDSS. + +config FB_MSM_MDSS_XLOG_DEBUG + depends on FB_MSM_MDSS + bool "Enable MDSS debugging" + ---help--- + The MDSS debugging provides support to enable display debugging + features to: Dump MDSS registers during driver errors, panic + driver during fatal errors and enable some display-driver logging + into an internal buffer (this avoids logging overhead). + +config FB_MSM_MDSS_FRC_DEBUG + depends on DEBUG_FS && FB_MSM_MDSS + bool "Enable Video FRC debugging" + default n + ---help--- + The MDSS FRC debugging provides support to enable the deterministic + frame rate control (FRC) debugging features to: Collect video frame + statistics and check whether its output pattern matches expected + cadence. + +endif diff --git a/drivers/video/fbdev/msm/Makefile b/drivers/video/fbdev/msm/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..81d4953828f099d1a65194dea02314c5d9c718f8 --- /dev/null +++ b/drivers/video/fbdev/msm/Makefile @@ -0,0 +1,74 @@ +ccflags-y += -I$(src) + +obj-$(CONFIG_FB_MSM_MDSS_MHL3) += mhl3/ +obj-$(CONFIG_MSM_DBA) += msm_dba/ + +mdss-mdp3-objs = mdp3.o mdp3_layer.o mdp3_dma.o mdp3_ctrl.o dsi_status_v2.o +mdss-mdp3-objs += mdp3_ppp.o mdp3_ppp_hwio.o mdp3_ppp_data.o +obj-$(CONFIG_FB_MSM_MDSS_MDP3) += mdss-mdp3.o +ifeq ($(CONFIG_FB_MSM_MDSS_MDP3), y) +ccflags-y += -DTARGET_HW_MDSS_MDP3 +endif +mdss-mdp-objs := mdss_mdp.o mdss_mdp_ctl.o mdss_mdp_pipe.o mdss_mdp_util.o dsi_status_6g.o +mdss-mdp-objs += mdss_mdp_pp.o mdss_mdp_pp_debug.o mdss_mdp_pp_cache_config.o mdss_sync.o +mdss-mdp-objs += mdss_mdp_intf_video.o +mdss-mdp-objs += mdss_mdp_intf_cmd.o +mdss-mdp-objs += mdss_mdp_intf_writeback.o +mdss-mdp-objs += mdss_rotator.o +mdss-mdp-objs += mdss_mdp_overlay.o +mdss-mdp-objs += mdss_mdp_layer.o +mdss-mdp-objs += mdss_mdp_splash_logo.o +mdss-mdp-objs += mdss_mdp_cdm.o +mdss-mdp-objs += mdss_smmu.o +mdss-mdp-objs += mdss_mdp_wfd.o +mdss-mdp-objs += mdss_io_util.o +obj-$(CONFIG_FB_MSM_MDSS) += mdss-mdp.o +obj-$(CONFIG_FB_MSM_MDSS) += mdss_mdp_debug.o + +mdss-mdp-objs += mdss_mdp_pp_v1_7.o +mdss-mdp-objs += mdss_mdp_pp_v3.o +mdss-mdp-objs += mdss_mdp_pp_common.o + +ifeq ($(CONFIG_FB_MSM_MDSS),y) +obj-$(CONFIG_DEBUG_FS) += mdss_debug.o mdss_debug_xlog.o +endif + +ifeq ($(CONFIG_FB_MSM_MDSS_FRC_DEBUG),y) +obj-$(CONFIG_DEBUG_FS) += mdss_debug_frc.o +endif + +mdss-dsi-objs := mdss_dsi.o mdss_dsi_host.o mdss_dsi_cmd.o mdss_dsi_status.o +mdss-dsi-objs += mdss_dsi_panel.o +mdss-dsi-objs += msm_mdss_io_8974.o +mdss-dsi-objs += mdss_dsi_phy.o +mdss-dsi-objs += mdss_dsi_phy_12nm.o +mdss-dsi-objs += mdss_dsi_clk.o +obj-$(CONFIG_FB_MSM_MDSS) += mdss-dsi.o +obj-$(CONFIG_FB_MSM_MDSS) += mdss_panel.o + +ifneq ($(CONFIG_FB_MSM_MDSS_MDP3), y) +obj-$(CONFIG_FB_MSM_MDSS) += mdss_hdmi_util.o +obj-$(CONFIG_FB_MSM_MDSS) += mdss_hdmi_edid.o +obj-$(CONFIG_FB_MSM_MDSS) += mdss_cec_core.o +obj-$(CONFIG_FB_MSM_MDSS) += mdss_dba_utils.o +obj-$(CONFIG_FB_MSM_MDSS_EDP_PANEL) += mdss_edp.o +obj-$(CONFIG_FB_MSM_MDSS_EDP_PANEL) += mdss_edp_aux.o + +obj-$(CONFIG_FB_MSM_MDSS_HDMI_PANEL) += mdss_hdmi_tx.o +obj-$(CONFIG_FB_MSM_MDSS_HDMI_PANEL) += mdss_hdmi_panel.o +obj-$(CONFIG_FB_MSM_MDSS_HDMI_PANEL) += mdss_hdmi_hdcp.o +obj-$(CONFIG_FB_MSM_MDSS_HDMI_PANEL) += mdss_hdmi_hdcp2p2.o +obj-$(CONFIG_FB_MSM_MDSS_HDMI_PANEL) += mdss_hdmi_cec.o +obj-$(CONFIG_FB_MSM_MDSS_HDMI_PANEL) += mdss_hdmi_audio.o +obj-$(CONFIG_FB_MSM_MDSS_HDMI_MHL_SII8334) += mhl_sii8334.o mhl_msc.o +ccflags-y += -DTARGET_HW_MDSS_HDMI +endif + +obj-$(CONFIG_FB_MSM_MDSS_WRITEBACK) += mdss_wb.o + +mdss-qpic-objs := mdss_qpic.o mdss_fb.o mdss_qpic_panel.o mdss_sync.o +obj-$(CONFIG_FB_MSM_QPIC) += mdss-qpic.o +obj-$(CONFIG_FB_MSM_QPIC_ILI_QVGA_PANEL) += qpic_panel_ili_qvga.o + +obj-$(CONFIG_FB_MSM_MDSS) += mdss_fb.o mdss_util.o +obj-$(CONFIG_COMPAT) += mdss_compat_utils.o diff --git a/drivers/video/fbdev/msm/dsi_host_v2.c b/drivers/video/fbdev/msm/dsi_host_v2.c new file mode 100644 index 0000000000000000000000000000000000000000..58927a6d26c69859a6dd6db53bd014c2cc5744ef --- /dev/null +++ b/drivers/video/fbdev/msm/dsi_host_v2.c @@ -0,0 +1,1889 @@ +/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "dsi_v2.h" +#include "dsi_io_v2.h" +#include "dsi_host_v2.h" +#include "mdss_debug.h" +#include "mdp3.h" + +#define DSI_POLL_SLEEP_US 1000 +#define DSI_POLL_TIMEOUT_US 16000 +#define DSI_ESC_CLK_RATE 19200000 +#define DSI_DMA_CMD_TIMEOUT_MS 200 +#define VSYNC_PERIOD 17 +#define DSI_MAX_PKT_SIZE 10 +#define DSI_SHORT_PKT_DATA_SIZE 2 +#define DSI_MAX_BYTES_TO_READ 16 + +struct dsi_host_v2_private { + unsigned char *dsi_base; + size_t dsi_reg_size; + struct device dis_dev; + int clk_count; + int dsi_on; + + void (*debug_enable_clk)(int on); +}; + +static struct dsi_host_v2_private *dsi_host_private; +static int msm_dsi_clk_ctrl(struct mdss_panel_data *pdata, int enable); + +int msm_dsi_init(void) +{ + if (!dsi_host_private) { + dsi_host_private = kzalloc(sizeof(struct dsi_host_v2_private), + GFP_KERNEL); + if (!dsi_host_private) + return -ENOMEM; + + } + + return 0; +} + +void msm_dsi_deinit(void) +{ + kfree(dsi_host_private); + dsi_host_private = NULL; +} + +void msm_dsi_ack_err_status(unsigned char *ctrl_base) +{ + u32 status; + + status = MIPI_INP(ctrl_base + DSI_ACK_ERR_STATUS); + + if (status) { + MIPI_OUTP(ctrl_base + DSI_ACK_ERR_STATUS, status); + + /* Writing of an extra 0 needed to clear error bits */ + MIPI_OUTP(ctrl_base + DSI_ACK_ERR_STATUS, 0); + pr_err("%s: status=%x\n", __func__, status); + } +} + +void msm_dsi_timeout_status(unsigned char *ctrl_base) +{ + u32 status; + + status = MIPI_INP(ctrl_base + DSI_TIMEOUT_STATUS); + if (status & 0x0111) { + MIPI_OUTP(ctrl_base + DSI_TIMEOUT_STATUS, status); + pr_err("%s: status=%x\n", __func__, status); + } +} + +void msm_dsi_dln0_phy_err(unsigned char *ctrl_base) +{ + u32 status; + + status = MIPI_INP(ctrl_base + DSI_DLN0_PHY_ERR); + + if (status & 0x011111) { + MIPI_OUTP(ctrl_base + DSI_DLN0_PHY_ERR, status); + pr_err("%s: status=%x\n", __func__, status); + } +} + +void msm_dsi_fifo_status(unsigned char *ctrl_base) +{ + u32 status; + + status = MIPI_INP(ctrl_base + DSI_FIFO_STATUS); + + if (status & 0x44444489) { + MIPI_OUTP(ctrl_base + DSI_FIFO_STATUS, status); + pr_err("%s: status=%x\n", __func__, status); + } +} + +void msm_dsi_status(unsigned char *ctrl_base) +{ + u32 status; + + status = MIPI_INP(ctrl_base + DSI_STATUS); + + if (status & 0x80000000) { + MIPI_OUTP(ctrl_base + DSI_STATUS, status); + pr_err("%s: status=%x\n", __func__, status); + } +} + +void msm_dsi_error(unsigned char *ctrl_base) +{ + msm_dsi_ack_err_status(ctrl_base); + msm_dsi_timeout_status(ctrl_base); + msm_dsi_fifo_status(ctrl_base); + msm_dsi_status(ctrl_base); + msm_dsi_dln0_phy_err(ctrl_base); +} + +static void msm_dsi_set_irq_mask(struct mdss_dsi_ctrl_pdata *ctrl, u32 mask) +{ + u32 intr_ctrl; + + intr_ctrl = MIPI_INP(dsi_host_private->dsi_base + DSI_INT_CTRL); + intr_ctrl |= mask; + MIPI_OUTP(dsi_host_private->dsi_base + DSI_INT_CTRL, intr_ctrl); +} + +static void msm_dsi_clear_irq_mask(struct mdss_dsi_ctrl_pdata *ctrl, u32 mask) +{ + u32 intr_ctrl; + + intr_ctrl = MIPI_INP(dsi_host_private->dsi_base + DSI_INT_CTRL); + intr_ctrl &= ~mask; + MIPI_OUTP(dsi_host_private->dsi_base + DSI_INT_CTRL, intr_ctrl); +} + +static void msm_dsi_set_irq(struct mdss_dsi_ctrl_pdata *ctrl, u32 mask) +{ + unsigned long flags; + + spin_lock_irqsave(&ctrl->irq_lock, flags); + if (ctrl->dsi_irq_mask & mask) { + spin_unlock_irqrestore(&ctrl->irq_lock, flags); + return; + } + if (ctrl->dsi_irq_mask == 0) { + ctrl->mdss_util->enable_irq(ctrl->dsi_hw); + pr_debug("%s: IRQ Enable, mask=%x term=%x\n", __func__, + (int)ctrl->dsi_irq_mask, (int)mask); + } + + msm_dsi_set_irq_mask(ctrl, mask); + ctrl->dsi_irq_mask |= mask; + spin_unlock_irqrestore(&ctrl->irq_lock, flags); +} + +static void msm_dsi_clear_irq(struct mdss_dsi_ctrl_pdata *ctrl, u32 mask) +{ + unsigned long flags; + + spin_lock_irqsave(&ctrl->irq_lock, flags); + if (!(ctrl->dsi_irq_mask & mask)) { + spin_unlock_irqrestore(&ctrl->irq_lock, flags); + return; + } + ctrl->dsi_irq_mask &= ~mask; + if (ctrl->dsi_irq_mask == 0) { + ctrl->mdss_util->disable_irq(ctrl->dsi_hw); + pr_debug("%s: IRQ Disable, mask=%x term=%x\n", __func__, + (int)ctrl->dsi_irq_mask, (int)mask); + } + msm_dsi_clear_irq_mask(ctrl, mask); + spin_unlock_irqrestore(&ctrl->irq_lock, flags); +} + +irqreturn_t msm_dsi_isr_handler(int irq, void *ptr) +{ + u32 isr; + + struct mdss_dsi_ctrl_pdata *ctrl = + (struct mdss_dsi_ctrl_pdata *)ptr; + + spin_lock(&ctrl->mdp_lock); + + if (ctrl->dsi_irq_mask == 0) { + spin_unlock(&ctrl->mdp_lock); + return IRQ_HANDLED; + } + + isr = MIPI_INP(dsi_host_private->dsi_base + DSI_INT_CTRL); + MIPI_OUTP(dsi_host_private->dsi_base + DSI_INT_CTRL, isr); + + pr_debug("%s: isr=%x", __func__, isr); + + if (isr & DSI_INTR_ERROR) { + pr_err("%s: isr=%x %x", __func__, isr, (int)DSI_INTR_ERROR); + msm_dsi_error(dsi_host_private->dsi_base); + } + + if (isr & DSI_INTR_VIDEO_DONE) + complete(&ctrl->video_comp); + + if (isr & DSI_INTR_CMD_DMA_DONE) + complete(&ctrl->dma_comp); + + if (isr & DSI_INTR_BTA_DONE) + complete(&ctrl->bta_comp); + + if (isr & DSI_INTR_CMD_MDP_DONE) + complete(&ctrl->mdp_comp); + + spin_unlock(&ctrl->mdp_lock); + + return IRQ_HANDLED; +} + +int msm_dsi_irq_init(struct device *dev, int irq_no, + struct mdss_dsi_ctrl_pdata *ctrl) +{ + int ret; + u32 isr; + struct mdss_hw *dsi_hw; + + msm_dsi_ahb_ctrl(1); + isr = MIPI_INP(dsi_host_private->dsi_base + DSI_INT_CTRL); + isr &= ~DSI_INTR_ALL_MASK; + MIPI_OUTP(dsi_host_private->dsi_base + DSI_INT_CTRL, isr); + msm_dsi_ahb_ctrl(0); + + ret = devm_request_irq(dev, irq_no, msm_dsi_isr_handler, + IRQF_DISABLED, "DSI", ctrl); + if (ret) { + pr_err("%s request_irq() failed!\n", __func__); + return ret; + } + + dsi_hw = kzalloc(sizeof(struct mdss_hw), GFP_KERNEL); + if (!dsi_hw) + return -ENOMEM; + + ctrl->dsi_hw = dsi_hw; + + dsi_hw->irq_info = kzalloc(sizeof(struct irq_info), GFP_KERNEL); + if (!dsi_hw->irq_info) { + kfree(dsi_hw); + pr_err("no mem to save irq info: kzalloc fail\n"); + return -ENOMEM; + } + + dsi_hw->hw_ndx = MDSS_HW_DSI0; + dsi_hw->irq_info->irq = irq_no; + dsi_hw->irq_info->irq_mask = 0; + dsi_hw->irq_info->irq_ena = false; + dsi_hw->irq_info->irq_buzy = false; + + ctrl->mdss_util->register_irq(ctrl->dsi_hw); + ctrl->mdss_util->disable_irq(ctrl->dsi_hw); + + return 0; +} + +static void msm_dsi_get_cmd_engine(struct mdss_dsi_ctrl_pdata *ctrl) +{ + unsigned char *ctrl_base = dsi_host_private->dsi_base; + u32 dsi_ctrl; + + if (ctrl->panel_mode == DSI_VIDEO_MODE) { + dsi_ctrl = MIPI_INP(ctrl_base + DSI_CTRL); + MIPI_OUTP(ctrl_base + DSI_CTRL, dsi_ctrl | 0x04); + } +} + +static void msm_dsi_release_cmd_engine(struct mdss_dsi_ctrl_pdata *ctrl) +{ + unsigned char *ctrl_base = dsi_host_private->dsi_base; + u32 dsi_ctrl; + + if (ctrl->panel_mode == DSI_VIDEO_MODE) { + dsi_ctrl = MIPI_INP(ctrl_base + DSI_CTRL); + dsi_ctrl &= ~0x04; + MIPI_OUTP(ctrl_base + DSI_CTRL, dsi_ctrl); + } +} + +static int msm_dsi_wait4mdp_done(struct mdss_dsi_ctrl_pdata *ctrl) +{ + int rc; + unsigned long flag; + + spin_lock_irqsave(&ctrl->mdp_lock, flag); + reinit_completion(&ctrl->mdp_comp); + msm_dsi_set_irq(ctrl, DSI_INTR_CMD_MDP_DONE_MASK); + spin_unlock_irqrestore(&ctrl->mdp_lock, flag); + + rc = wait_for_completion_timeout(&ctrl->mdp_comp, + msecs_to_jiffies(VSYNC_PERIOD * 4)); + + if (rc == 0) { + pr_err("DSI wait 4 mdp done time out\n"); + rc = -ETIME; + } else if (!IS_ERR_VALUE(rc)) { + rc = 0; + } + + msm_dsi_clear_irq(ctrl, DSI_INTR_CMD_MDP_DONE_MASK); + + return rc; +} + +void msm_dsi_cmd_mdp_busy(struct mdss_dsi_ctrl_pdata *ctrl) +{ + int rc; + u32 dsi_status; + unsigned char *ctrl_base = dsi_host_private->dsi_base; + + if (ctrl->panel_mode == DSI_VIDEO_MODE) + return; + + dsi_status = MIPI_INP(ctrl_base + DSI_STATUS); + if (dsi_status & 0x04) { + pr_debug("dsi command engine is busy\n"); + rc = msm_dsi_wait4mdp_done(ctrl); + if (rc) + pr_err("Timed out waiting for mdp done"); + } +} + +static int msm_dsi_wait4video_done(struct mdss_dsi_ctrl_pdata *ctrl) +{ + int rc; + unsigned long flag; + + spin_lock_irqsave(&ctrl->mdp_lock, flag); + reinit_completion(&ctrl->video_comp); + msm_dsi_set_irq(ctrl, DSI_INTR_VIDEO_DONE_MASK); + spin_unlock_irqrestore(&ctrl->mdp_lock, flag); + + rc = wait_for_completion_timeout(&ctrl->video_comp, + msecs_to_jiffies(VSYNC_PERIOD * 4)); + + if (rc == 0) { + pr_err("DSI wait 4 video done time out\n"); + rc = -ETIME; + } else if (!IS_ERR_VALUE(rc)) { + rc = 0; + } + + msm_dsi_clear_irq(ctrl, DSI_INTR_VIDEO_DONE_MASK); + + return rc; +} + +static int msm_dsi_wait4video_eng_busy(struct mdss_dsi_ctrl_pdata *ctrl) +{ + int rc = 0; + u32 dsi_status; + unsigned char *ctrl_base = dsi_host_private->dsi_base; + + if (ctrl->panel_mode == DSI_CMD_MODE) + return rc; + + dsi_status = MIPI_INP(ctrl_base + DSI_STATUS); + if (dsi_status & 0x08) { + pr_debug("dsi command in video mode wait for active region\n"); + rc = msm_dsi_wait4video_done(ctrl); + /* delay 4-5 ms to skip BLLP */ + if (!rc) + usleep_range(4000, 5000); + } + return rc; +} + +void msm_dsi_host_init(struct mdss_panel_data *pdata) +{ + u32 dsi_ctrl, data; + unsigned char *ctrl_base = dsi_host_private->dsi_base; + struct mdss_dsi_ctrl_pdata *ctrl_pdata = NULL; + struct mipi_panel_info *pinfo; + + pr_debug("%s\n", __func__); + + ctrl_pdata = container_of(pdata, struct mdss_dsi_ctrl_pdata, + panel_data); + pinfo = &pdata->panel_info.mipi; + + + if (pinfo->mode == DSI_VIDEO_MODE) { + data = 0; + if (pinfo->pulse_mode_hsa_he) + data |= BIT(28); + if (pinfo->hfp_power_stop) + data |= BIT(24); + if (pinfo->hbp_power_stop) + data |= BIT(20); + if (pinfo->hsa_power_stop) + data |= BIT(16); + if (pinfo->eof_bllp_power_stop) + data |= BIT(15); + if (pinfo->bllp_power_stop) + data |= BIT(12); + data |= ((pinfo->traffic_mode & 0x03) << 8); + data |= ((pinfo->dst_format & 0x03) << 4); /* 2 bits */ + data |= (pinfo->vc & 0x03); + MIPI_OUTP(ctrl_base + DSI_VIDEO_MODE_CTRL, data); + + data = 0; + data |= ((pinfo->rgb_swap & 0x07) << 12); + if (pinfo->b_sel) + data |= BIT(8); + if (pinfo->g_sel) + data |= BIT(4); + if (pinfo->r_sel) + data |= BIT(0); + MIPI_OUTP(ctrl_base + DSI_VIDEO_MODE_DATA_CTRL, data); + } else if (pinfo->mode == DSI_CMD_MODE) { + data = 0; + data |= ((pinfo->interleave_max & 0x0f) << 20); + data |= ((pinfo->rgb_swap & 0x07) << 16); + if (pinfo->b_sel) + data |= BIT(12); + if (pinfo->g_sel) + data |= BIT(8); + if (pinfo->r_sel) + data |= BIT(4); + data |= (pinfo->dst_format & 0x0f); /* 4 bits */ + MIPI_OUTP(ctrl_base + DSI_COMMAND_MODE_MDP_CTRL, data); + + /* DSI_COMMAND_MODE_MDP_DCS_CMD_CTRL */ + data = pinfo->wr_mem_continue & 0x0ff; + data <<= 8; + data |= (pinfo->wr_mem_start & 0x0ff); + if (pinfo->insert_dcs_cmd) + data |= BIT(16); + MIPI_OUTP(ctrl_base + DSI_COMMAND_MODE_MDP_DCS_CMD_CTRL, + data); + } else + pr_err("%s: Unknown DSI mode=%d\n", __func__, pinfo->mode); + + dsi_ctrl = BIT(8) | BIT(2); /* clock enable & cmd mode */ + + if (pinfo->crc_check) + dsi_ctrl |= BIT(24); + if (pinfo->ecc_check) + dsi_ctrl |= BIT(20); + if (pinfo->data_lane3) + dsi_ctrl |= BIT(7); + if (pinfo->data_lane2) + dsi_ctrl |= BIT(6); + if (pinfo->data_lane1) + dsi_ctrl |= BIT(5); + if (pinfo->data_lane0) + dsi_ctrl |= BIT(4); + + /* from frame buffer, low power mode */ + /* DSI_COMMAND_MODE_DMA_CTRL */ + MIPI_OUTP(ctrl_base + DSI_COMMAND_MODE_DMA_CTRL, 0x14000000); + + data = 0; + if (pinfo->te_sel) + data |= BIT(31); + data |= pinfo->mdp_trigger << 4;/* cmd mdp trigger */ + data |= pinfo->dma_trigger; /* cmd dma trigger */ + data |= (pinfo->stream & 0x01) << 8; + MIPI_OUTP(ctrl_base + DSI_TRIG_CTRL, data); + + /* DSI_LAN_SWAP_CTRL */ + MIPI_OUTP(ctrl_base + DSI_LANE_SWAP_CTRL, ctrl_pdata->dlane_swap); + + /* clock out ctrl */ + data = pinfo->t_clk_post & 0x3f; /* 6 bits */ + data <<= 8; + data |= pinfo->t_clk_pre & 0x3f; /* 6 bits */ + /* DSI_CLKOUT_TIMING_CTRL */ + MIPI_OUTP(ctrl_base + DSI_CLKOUT_TIMING_CTRL, data); + + data = 0; + if (pinfo->rx_eot_ignore) + data |= BIT(4); + if (pinfo->tx_eot_append) + data |= BIT(0); + MIPI_OUTP(ctrl_base + DSI_EOT_PACKET_CTRL, data); + + + /* allow only ack-err-status to generate interrupt */ + /* DSI_ERR_INT_MASK0 */ + MIPI_OUTP(ctrl_base + DSI_ERR_INT_MASK0, 0x13ff3fe0); + + /* turn esc, byte, dsi, pclk, sclk, hclk on */ + MIPI_OUTP(ctrl_base + DSI_CLK_CTRL, 0x23f); + + dsi_ctrl |= BIT(0); /* enable dsi */ + MIPI_OUTP(ctrl_base + DSI_CTRL, dsi_ctrl); + + wmb(); /* ensure write is finished before progressing */ +} + +void dsi_set_tx_power_mode(int mode) +{ + u32 data; + unsigned char *ctrl_base = dsi_host_private->dsi_base; + + data = MIPI_INP(ctrl_base + DSI_COMMAND_MODE_DMA_CTRL); + + if (mode == 0) + data &= ~BIT(26); + else + data |= BIT(26); + + MIPI_OUTP(ctrl_base + DSI_COMMAND_MODE_DMA_CTRL, data); +} + +void msm_dsi_sw_reset(void) +{ + u32 dsi_ctrl; + unsigned char *ctrl_base = dsi_host_private->dsi_base; + + pr_debug("%s\n", __func__); + + dsi_ctrl = MIPI_INP(ctrl_base + DSI_CTRL); + dsi_ctrl &= ~0x01; + MIPI_OUTP(ctrl_base + DSI_CTRL, dsi_ctrl); + wmb(); /* ensure write is finished before progressing */ + + /* turn esc, byte, dsi, pclk, sclk, hclk on */ + MIPI_OUTP(ctrl_base + DSI_CLK_CTRL, 0x23f); + wmb(); /* ensure write is finished before progressing */ + + MIPI_OUTP(ctrl_base + DSI_SOFT_RESET, 0x01); + wmb(); /* ensure write is finished before progressing */ + MIPI_OUTP(ctrl_base + DSI_SOFT_RESET, 0x00); + wmb(); /* ensure write is finished before progressing */ +} + +void msm_dsi_controller_cfg(int enable) +{ + u32 dsi_ctrl, status; + unsigned char *ctrl_base = dsi_host_private->dsi_base; + + pr_debug("%s\n", __func__); + + /* Check for CMD_MODE_DMA_BUSY */ + if (readl_poll_timeout((ctrl_base + DSI_STATUS), + status, + ((status & 0x02) == 0), + DSI_POLL_SLEEP_US, DSI_POLL_TIMEOUT_US)) { + pr_err("%s: DSI status=%x failed\n", __func__, status); + pr_err("%s: Doing sw reset\n", __func__); + msm_dsi_sw_reset(); + } + + /* Check for x_HS_FIFO_EMPTY */ + if (readl_poll_timeout((ctrl_base + DSI_FIFO_STATUS), + status, + ((status & 0x11111000) == 0x11111000), + DSI_POLL_SLEEP_US, DSI_POLL_TIMEOUT_US)) + pr_err("%s: FIFO status=%x failed\n", __func__, status); + + /* Check for VIDEO_MODE_ENGINE_BUSY */ + if (readl_poll_timeout((ctrl_base + DSI_STATUS), + status, + ((status & 0x08) == 0), + DSI_POLL_SLEEP_US, DSI_POLL_TIMEOUT_US)) { + pr_err("%s: DSI status=%x\n", __func__, status); + pr_err("%s: Doing sw reset\n", __func__); + msm_dsi_sw_reset(); + } + + dsi_ctrl = MIPI_INP(ctrl_base + DSI_CTRL); + if (enable) + dsi_ctrl |= 0x01; + else + dsi_ctrl &= ~0x01; + + MIPI_OUTP(ctrl_base + DSI_CTRL, dsi_ctrl); + wmb(); /* ensure write is finished before progressing */ +} + +void msm_dsi_op_mode_config(int mode, struct mdss_panel_data *pdata) +{ + u32 dsi_ctrl; + unsigned char *ctrl_base = dsi_host_private->dsi_base; + + pr_debug("%s\n", __func__); + + dsi_ctrl = MIPI_INP(ctrl_base + DSI_CTRL); + + if (dsi_ctrl & DSI_VIDEO_MODE_EN) + dsi_ctrl &= ~(DSI_CMD_MODE_EN|DSI_EN); + else + dsi_ctrl &= ~(DSI_CMD_MODE_EN|DSI_VIDEO_MODE_EN|DSI_EN); + + if (mode == DSI_VIDEO_MODE) { + dsi_ctrl |= (DSI_VIDEO_MODE_EN|DSI_EN); + } else { + dsi_ctrl |= (DSI_CMD_MODE_EN|DSI_EN); + /* For Video mode panel, keep Video and Cmd mode ON */ + if (pdata->panel_info.type == MIPI_VIDEO_PANEL) + dsi_ctrl |= DSI_VIDEO_MODE_EN; + } + + pr_debug("%s: dsi_ctrl=%x\n", __func__, dsi_ctrl); + + MIPI_OUTP(ctrl_base + DSI_CTRL, dsi_ctrl); + wmb(); /* ensure write is finished before progressing */ +} + +int msm_dsi_cmd_dma_tx(struct mdss_dsi_ctrl_pdata *ctrl, + struct dsi_buf *tp) +{ + int len, rc; + unsigned long size, addr; + unsigned char *ctrl_base = dsi_host_private->dsi_base; + unsigned long flag; + + len = ALIGN(tp->len, 4); + size = ALIGN(tp->len, SZ_4K); + + tp->dmap = dma_map_single(&dsi_host_private->dis_dev, tp->data, size, + DMA_TO_DEVICE); + if (dma_mapping_error(&dsi_host_private->dis_dev, tp->dmap)) { + pr_err("%s: dmap mapp failed\n", __func__); + return -ENOMEM; + } + + addr = tp->dmap; + + msm_dsi_get_cmd_engine(ctrl); + + spin_lock_irqsave(&ctrl->mdp_lock, flag); + reinit_completion(&ctrl->dma_comp); + msm_dsi_set_irq(ctrl, DSI_INTR_CMD_DMA_DONE_MASK); + spin_unlock_irqrestore(&ctrl->mdp_lock, flag); + + MIPI_OUTP(ctrl_base + DSI_DMA_CMD_OFFSET, addr); + MIPI_OUTP(ctrl_base + DSI_DMA_CMD_LENGTH, len); + wmb(); /* ensure write is finished before progressing */ + + MIPI_OUTP(ctrl_base + DSI_CMD_MODE_DMA_SW_TRIGGER, 0x01); + wmb(); /* ensure write is finished before progressing */ + + rc = wait_for_completion_timeout(&ctrl->dma_comp, + msecs_to_jiffies(DSI_DMA_CMD_TIMEOUT_MS)); + if (rc == 0) { + pr_err("DSI command transaction time out\n"); + rc = -ETIME; + } else if (!IS_ERR_VALUE(rc)) { + rc = 0; + } + + dma_unmap_single(&dsi_host_private->dis_dev, tp->dmap, size, + DMA_TO_DEVICE); + tp->dmap = 0; + + msm_dsi_clear_irq(ctrl, DSI_INTR_CMD_DMA_DONE_MASK); + + msm_dsi_release_cmd_engine(ctrl); + + return rc; +} + +int msm_dsi_cmd_dma_rx(struct mdss_dsi_ctrl_pdata *ctrl, + struct dsi_buf *rp, int rlen) +{ + u32 *lp, data; + int i, off, cnt; + unsigned char *ctrl_base = dsi_host_private->dsi_base; + + lp = (u32 *)rp->data; + cnt = rlen; + cnt += 3; + cnt >>= 2; + + if (cnt > 4) + cnt = 4; /* 4 x 32 bits registers only */ + + off = DSI_RDBK_DATA0; + off += ((cnt - 1) * 4); + + for (i = 0; i < cnt; i++) { + data = (u32)MIPI_INP(ctrl_base + off); + *lp++ = ntohl(data); /* to network byte order */ + pr_debug("%s: data = 0x%x and ntohl(data) = 0x%x\n", + __func__, data, ntohl(data)); + off -= 4; + rp->len += sizeof(*lp); + } + + return rlen; +} + +static int msm_dsi_cmds_tx(struct mdss_dsi_ctrl_pdata *ctrl, + struct dsi_cmd_desc *cmds, int cnt) +{ + struct dsi_buf *tp; + struct dsi_cmd_desc *cm; + struct dsi_ctrl_hdr *dchdr; + int len; + int rc = 0; + + tp = &ctrl->tx_buf; + mdss_dsi_buf_init(tp); + cm = cmds; + len = 0; + while (cnt--) { + dchdr = &cm->dchdr; + mdss_dsi_buf_reserve(tp, len); + len = mdss_dsi_cmd_dma_add(tp, cm); + if (!len) { + pr_err("%s: failed to add cmd = 0x%x\n", + __func__, cm->payload[0]); + rc = -EINVAL; + goto dsi_cmds_tx_err; + } + + if (dchdr->last) { + tp->data = tp->start; /* begin of buf */ + rc = msm_dsi_wait4video_eng_busy(ctrl); + if (rc) { + pr_err("%s: wait4video_eng failed\n", __func__); + goto dsi_cmds_tx_err; + + } + + rc = msm_dsi_cmd_dma_tx(ctrl, tp); + if (IS_ERR_VALUE(len)) { + pr_err("%s: failed to call cmd_dma_tx for cmd = 0x%x\n", + __func__, cmds->payload[0]); + goto dsi_cmds_tx_err; + } + + if (dchdr->wait) + usleep_range(dchdr->wait * 1000, + dchdr->wait * 1000); + + mdss_dsi_buf_init(tp); + len = 0; + } + cm++; + } + +dsi_cmds_tx_err: + return rc; +} + +static int msm_dsi_parse_rx_response(struct dsi_buf *rp) +{ + int rc = 0; + unsigned char cmd; + + cmd = rp->data[0]; + switch (cmd) { + case DTYPE_ACK_ERR_RESP: + pr_debug("%s: rx ACK_ERR_PACLAGE\n", __func__); + rc = -EINVAL; + break; + case DTYPE_GEN_READ1_RESP: + case DTYPE_DCS_READ1_RESP: + mdss_dsi_short_read1_resp(rp); + break; + case DTYPE_GEN_READ2_RESP: + case DTYPE_DCS_READ2_RESP: + mdss_dsi_short_read2_resp(rp); + break; + case DTYPE_GEN_LREAD_RESP: + case DTYPE_DCS_LREAD_RESP: + mdss_dsi_long_read_resp(rp); + break; + default: + rc = -EINVAL; + pr_warn("%s: Unknown cmd received\n", __func__); + break; + } + + return rc; +} + +/* MIPI_DSI_MRPS, Maximum Return Packet Size */ +static char max_pktsize[2] = {0x00, 0x00}; /* LSB tx first, 10 bytes */ + +static struct dsi_cmd_desc pkt_size_cmd = { + {DTYPE_MAX_PKTSIZE, 1, 0, 0, 0, sizeof(max_pktsize)}, + max_pktsize, +}; + +static int msm_dsi_set_max_packet_size(struct mdss_dsi_ctrl_pdata *ctrl, + int size) +{ + struct dsi_buf *tp; + int rc; + + tp = &ctrl->tx_buf; + mdss_dsi_buf_init(tp); + max_pktsize[0] = size; + + rc = mdss_dsi_cmd_dma_add(tp, &pkt_size_cmd); + if (!rc) { + pr_err("%s: failed to add max_pkt_size\n", __func__); + return -EINVAL; + } + + rc = msm_dsi_wait4video_eng_busy(ctrl); + if (rc) { + pr_err("%s: failed to wait4video_eng\n", __func__); + return rc; + } + + rc = msm_dsi_cmd_dma_tx(ctrl, tp); + if (IS_ERR_VALUE(rc)) { + pr_err("%s: failed to tx max_pkt_size\n", __func__); + return rc; + } + pr_debug("%s: max_pkt_size=%d sent\n", __func__, size); + return rc; +} + +/* read data length is less than or equal to 10 bytes*/ +static int msm_dsi_cmds_rx_1(struct mdss_dsi_ctrl_pdata *ctrl, + struct dsi_cmd_desc *cmds, int rlen) +{ + int rc; + struct dsi_buf *tp, *rp; + + tp = &ctrl->tx_buf; + rp = &ctrl->rx_buf; + mdss_dsi_buf_init(rp); + mdss_dsi_buf_init(tp); + + rc = mdss_dsi_cmd_dma_add(tp, cmds); + if (!rc) { + pr_err("%s: dsi_cmd_dma_add failed\n", __func__); + rc = -EINVAL; + goto dsi_cmds_rx_1_error; + } + + rc = msm_dsi_wait4video_eng_busy(ctrl); + if (rc) { + pr_err("%s: wait4video_eng failed\n", __func__); + goto dsi_cmds_rx_1_error; + } + + rc = msm_dsi_cmd_dma_tx(ctrl, tp); + if (IS_ERR_VALUE(rc)) { + pr_err("%s: msm_dsi_cmd_dma_tx failed\n", __func__); + goto dsi_cmds_rx_1_error; + } + + if (rlen <= DSI_SHORT_PKT_DATA_SIZE) { + msm_dsi_cmd_dma_rx(ctrl, rp, rlen); + } else { + msm_dsi_cmd_dma_rx(ctrl, rp, rlen + DSI_HOST_HDR_SIZE); + rp->len = rlen + DSI_HOST_HDR_SIZE; + } + rc = msm_dsi_parse_rx_response(rp); + +dsi_cmds_rx_1_error: + if (rc) + rp->len = 0; + + return rc; +} + +/* read data length is more than 10 bytes, which requires multiple DSI read*/ +static int msm_dsi_cmds_rx_2(struct mdss_dsi_ctrl_pdata *ctrl, + struct dsi_cmd_desc *cmds, int rlen) +{ + int rc; + struct dsi_buf *tp, *rp; + int pkt_size, data_bytes, total; + + tp = &ctrl->tx_buf; + rp = &ctrl->rx_buf; + mdss_dsi_buf_init(rp); + pkt_size = DSI_MAX_PKT_SIZE; + data_bytes = MDSS_DSI_LEN; + total = 0; + + while (true) { + rc = msm_dsi_set_max_packet_size(ctrl, pkt_size); + if (rc) + break; + + mdss_dsi_buf_init(tp); + rc = mdss_dsi_cmd_dma_add(tp, cmds); + if (!rc) { + pr_err("%s: dsi_cmd_dma_add failed\n", __func__); + rc = -EINVAL; + break; + } + rc = msm_dsi_wait4video_eng_busy(ctrl); + if (rc) { + pr_err("%s: wait4video_eng failed\n", __func__); + break; + } + + rc = msm_dsi_cmd_dma_tx(ctrl, tp); + if (IS_ERR_VALUE(rc)) { + pr_err("%s: msm_dsi_cmd_dma_tx failed\n", __func__); + break; + } + + msm_dsi_cmd_dma_rx(ctrl, rp, DSI_MAX_BYTES_TO_READ); + + rp->data += DSI_MAX_BYTES_TO_READ - DSI_HOST_HDR_SIZE; + total += data_bytes; + if (total >= rlen) + break; + + data_bytes = DSI_MAX_BYTES_TO_READ - DSI_HOST_HDR_SIZE; + pkt_size += data_bytes; + } + + if (!rc) { + rp->data = rp->start; + rp->len = rlen + DSI_HOST_HDR_SIZE; + rc = msm_dsi_parse_rx_response(rp); + } + + if (rc) + rp->len = 0; + + return rc; +} + +int msm_dsi_cmds_rx(struct mdss_dsi_ctrl_pdata *ctrl, + struct dsi_cmd_desc *cmds, int rlen) +{ + int rc; + + if (rlen <= DSI_MAX_PKT_SIZE) + rc = msm_dsi_cmds_rx_1(ctrl, cmds, rlen); + else + rc = msm_dsi_cmds_rx_2(ctrl, cmds, rlen); + + return rc; +} + +void msm_dsi_cmdlist_tx(struct mdss_dsi_ctrl_pdata *ctrl, + struct dcs_cmd_req *req) +{ + int ret; + + ret = msm_dsi_cmds_tx(ctrl, req->cmds, req->cmds_cnt); + + if (req->cb) + req->cb(ret); +} + +void msm_dsi_cmdlist_rx(struct mdss_dsi_ctrl_pdata *ctrl, + struct dcs_cmd_req *req) +{ + struct dsi_buf *rp; + int len = 0; + + if (req->rbuf) { + rp = &ctrl->rx_buf; + len = msm_dsi_cmds_rx(ctrl, req->cmds, req->rlen); + memcpy(req->rbuf, rp->data, rp->len); + } else { + pr_err("%s: No rx buffer provided\n", __func__); + } + + if (req->cb) + req->cb(len); +} +int msm_dsi_cmdlist_commit(struct mdss_dsi_ctrl_pdata *ctrl, int from_mdp) +{ + struct dcs_cmd_req *req; + int dsi_on; + int ret = -EINVAL; + + mutex_lock(&ctrl->mutex); + dsi_on = dsi_host_private->dsi_on; + mutex_unlock(&ctrl->mutex); + if (!dsi_on) { + pr_err("try to send DSI commands while dsi is off\n"); + return ret; + } + + if (from_mdp) /* from mdp kickoff */ + mutex_lock(&ctrl->cmd_mutex); + req = mdss_dsi_cmdlist_get(ctrl, from_mdp); + + if (!req) { + mutex_unlock(&ctrl->cmd_mutex); + return ret; + } + /* + * mdss interrupt is generated in mdp core clock domain + * mdp clock need to be enabled to receive dsi interrupt + * also, axi bus bandwidth need since dsi controller will + * fetch dcs commands from axi bus + */ + mdp3_res_update(1, 1, MDP3_CLIENT_DMA_P); + msm_dsi_clk_ctrl(&ctrl->panel_data, 1); + + if (0 == (req->flags & CMD_REQ_LP_MODE)) + dsi_set_tx_power_mode(0); + + if (req->flags & CMD_REQ_RX) + msm_dsi_cmdlist_rx(ctrl, req); + else + msm_dsi_cmdlist_tx(ctrl, req); + + if (0 == (req->flags & CMD_REQ_LP_MODE)) + dsi_set_tx_power_mode(1); + + msm_dsi_clk_ctrl(&ctrl->panel_data, 0); + mdp3_res_update(0, 1, MDP3_CLIENT_DMA_P); + + if (from_mdp) /* from mdp kickoff */ + mutex_unlock(&ctrl->cmd_mutex); + return 0; +} + +static int msm_dsi_cal_clk_rate(struct mdss_panel_data *pdata, + u64 *bitclk_rate, + u32 *dsiclk_rate, + u32 *byteclk_rate, + u32 *pclk_rate) +{ + struct mdss_panel_info *pinfo; + struct mipi_panel_info *mipi; + u32 hbp, hfp, vbp, vfp, hspw, vspw, width, height; + int lanes; + u64 clk_rate; + + pinfo = &pdata->panel_info; + mipi = &pdata->panel_info.mipi; + + hbp = pdata->panel_info.lcdc.h_back_porch; + hfp = pdata->panel_info.lcdc.h_front_porch; + vbp = pdata->panel_info.lcdc.v_back_porch; + vfp = pdata->panel_info.lcdc.v_front_porch; + hspw = pdata->panel_info.lcdc.h_pulse_width; + vspw = pdata->panel_info.lcdc.v_pulse_width; + width = pdata->panel_info.xres; + height = pdata->panel_info.yres; + + lanes = 0; + if (mipi->data_lane0) + lanes++; + if (mipi->data_lane1) + lanes++; + if (mipi->data_lane2) + lanes++; + if (mipi->data_lane3) + lanes++; + if (lanes == 0) + return -EINVAL; + + *bitclk_rate = (width + hbp + hfp + hspw) * (height + vbp + vfp + vspw); + *bitclk_rate *= mipi->frame_rate; + *bitclk_rate *= pdata->panel_info.bpp; + do_div(*bitclk_rate, lanes); + clk_rate = *bitclk_rate; + + do_div(clk_rate, 8U); + *byteclk_rate = (u32) clk_rate; + *dsiclk_rate = *byteclk_rate * lanes; + *pclk_rate = *byteclk_rate * lanes * 8 / pdata->panel_info.bpp; + + pr_debug("dsiclk_rate=%u, byteclk=%u, pck_=%u\n", + *dsiclk_rate, *byteclk_rate, *pclk_rate); + return 0; +} + +static int msm_dsi_on(struct mdss_panel_data *pdata) +{ + int ret = 0, i; + u64 clk_rate; + struct mdss_panel_info *pinfo; + struct mipi_panel_info *mipi; + u32 hbp, hfp, vbp, vfp, hspw, vspw, width, height; + u32 ystride, bpp, data; + u32 dummy_xres, dummy_yres; + u64 bitclk_rate = 0 + u32 byteclk_rate = 0, pclk_rate = 0, dsiclk_rate = 0; + unsigned char *ctrl_base = dsi_host_private->dsi_base; + struct mdss_dsi_ctrl_pdata *ctrl_pdata = NULL; + + pr_debug("%s\n", __func__); + + pinfo = &pdata->panel_info; + + ctrl_pdata = container_of(pdata, struct mdss_dsi_ctrl_pdata, + panel_data); + + mutex_lock(&ctrl_pdata->mutex); + + + if (!pdata->panel_info.dynamic_switch_pending) { + for (i = 0; !ret && (i < DSI_MAX_PM); i++) { + ret = msm_mdss_enable_vreg( + ctrl_pdata->power_data[i].vreg_config, + ctrl_pdata->power_data[i].num_vreg, 1); + if (ret) { + pr_err("%s: failed to enable vregs for %s\n", + __func__, __mdss_dsi_pm_name(i)); + goto error_vreg; + } + } + } + + msm_dsi_ahb_ctrl(1); + msm_dsi_phy_sw_reset(dsi_host_private->dsi_base); + msm_dsi_phy_init(dsi_host_private->dsi_base, pdata); + + msm_dsi_cal_clk_rate(pdata, &bitclk_rate, &dsiclk_rate, + &byteclk_rate, &pclk_rate); + msm_dsi_clk_set_rate(DSI_ESC_CLK_RATE, dsiclk_rate, + byteclk_rate, pclk_rate); + msm_dsi_prepare_clocks(); + msm_dsi_clk_enable(); + + clk_rate = pdata->panel_info.clk_rate; + clk_rate = min(clk_rate, pdata->panel_info.clk_max); + + hbp = pdata->panel_info.lcdc.h_back_porch; + hfp = pdata->panel_info.lcdc.h_front_porch; + vbp = pdata->panel_info.lcdc.v_back_porch; + vfp = pdata->panel_info.lcdc.v_front_porch; + hspw = pdata->panel_info.lcdc.h_pulse_width; + vspw = pdata->panel_info.lcdc.v_pulse_width; + width = pdata->panel_info.xres; + height = pdata->panel_info.yres; + + mipi = &pdata->panel_info.mipi; + if (pdata->panel_info.type == MIPI_VIDEO_PANEL) { + dummy_xres = pdata->panel_info.lcdc.xres_pad; + dummy_yres = pdata->panel_info.lcdc.yres_pad; + + MIPI_OUTP(ctrl_base + DSI_VIDEO_MODE_ACTIVE_H, + ((hspw + hbp + width + dummy_xres) << 16 | + (hspw + hbp))); + MIPI_OUTP(ctrl_base + DSI_VIDEO_MODE_ACTIVE_V, + ((vspw + vbp + height + dummy_yres) << 16 | + (vspw + vbp))); + MIPI_OUTP(ctrl_base + DSI_VIDEO_MODE_TOTAL, + (vspw + vbp + height + dummy_yres + + vfp - 1) << 16 | (hspw + hbp + + width + dummy_xres + hfp - 1)); + + MIPI_OUTP(ctrl_base + DSI_VIDEO_MODE_HSYNC, (hspw << 16)); + MIPI_OUTP(ctrl_base + DSI_VIDEO_MODE_VSYNC, 0); + MIPI_OUTP(ctrl_base + DSI_VIDEO_MODE_VSYNC_VPOS, + (vspw << 16)); + + } else { /* command mode */ + if (mipi->dst_format == DSI_CMD_DST_FORMAT_RGB888) + bpp = 3; + else if (mipi->dst_format == DSI_CMD_DST_FORMAT_RGB666) + bpp = 3; + else if (mipi->dst_format == DSI_CMD_DST_FORMAT_RGB565) + bpp = 2; + else + bpp = 3; /* Default format set to RGB888 */ + + ystride = width * bpp + 1; + + data = (ystride << 16) | (mipi->vc << 8) | DTYPE_DCS_LWRITE; + MIPI_OUTP(ctrl_base + DSI_COMMAND_MODE_MDP_STREAM0_CTRL, + data); + MIPI_OUTP(ctrl_base + DSI_COMMAND_MODE_MDP_STREAM1_CTRL, + data); + + data = height << 16 | width; + MIPI_OUTP(ctrl_base + DSI_COMMAND_MODE_MDP_STREAM1_TOTAL, + data); + MIPI_OUTP(ctrl_base + DSI_COMMAND_MODE_MDP_STREAM0_TOTAL, + data); + } + + msm_dsi_sw_reset(); + msm_dsi_host_init(pdata); + + if (mipi->force_clk_lane_hs) { + u32 tmp; + + tmp = MIPI_INP(ctrl_base + DSI_LANE_CTRL); + tmp |= (1<<28); + MIPI_OUTP(ctrl_base + DSI_LANE_CTRL, tmp); + wmb(); /* ensure write is finished before progressing */ + } + + msm_dsi_op_mode_config(mipi->mode, pdata); + + msm_dsi_set_irq(ctrl_pdata, DSI_INTR_ERROR_MASK); + dsi_host_private->clk_count = 1; + dsi_host_private->dsi_on = 1; + +error_vreg: + if (ret) { + for (; i >= 0; i--) + msm_mdss_enable_vreg( + ctrl_pdata->power_data[i].vreg_config, + ctrl_pdata->power_data[i].num_vreg, 0); + } + + mutex_unlock(&ctrl_pdata->mutex); + return ret; +} + +static int msm_dsi_off(struct mdss_panel_data *pdata) +{ + int ret = 0, i; + struct mdss_dsi_ctrl_pdata *ctrl_pdata = NULL; + + if (pdata == NULL) { + pr_err("%s: Invalid input data\n", __func__); + ret = -EINVAL; + return ret; + } + + ctrl_pdata = container_of(pdata, struct mdss_dsi_ctrl_pdata, + panel_data); + + pr_debug("%s\n", __func__); + mutex_lock(&ctrl_pdata->mutex); + msm_dsi_clear_irq(ctrl_pdata, ctrl_pdata->dsi_irq_mask); + msm_dsi_controller_cfg(0); + msm_dsi_clk_set_rate(DSI_ESC_CLK_RATE, 0, 0, 0); + msm_dsi_clk_disable(); + msm_dsi_unprepare_clocks(); + msm_dsi_phy_off(dsi_host_private->dsi_base); + msm_dsi_ahb_ctrl(0); + + if (!pdata->panel_info.dynamic_switch_pending) { + for (i = DSI_MAX_PM - 1; i >= 0; i--) { + ret = msm_mdss_enable_vreg( + ctrl_pdata->power_data[i].vreg_config, + ctrl_pdata->power_data[i].num_vreg, 0); + if (ret) + pr_err("%s: failed to disable vregs for %s\n", + __func__, __mdss_dsi_pm_name(i)); + } + } + dsi_host_private->clk_count = 0; + dsi_host_private->dsi_on = 0; + + mutex_unlock(&ctrl_pdata->mutex); + + return ret; +} + +static int msm_dsi_cont_on(struct mdss_panel_data *pdata) +{ + struct mdss_panel_info *pinfo; + int ret = 0, i; + struct mdss_dsi_ctrl_pdata *ctrl_pdata = NULL; + + if (pdata == NULL) { + pr_err("%s: Invalid input data\n", __func__); + ret = -EINVAL; + return ret; + } + + + pr_debug("%s:\n", __func__); + + ctrl_pdata = container_of(pdata, struct mdss_dsi_ctrl_pdata, + panel_data); + + pinfo = &pdata->panel_info; + mutex_lock(&ctrl_pdata->mutex); + for (i = 0; !ret && (i < DSI_MAX_PM); i++) { + ret = msm_mdss_enable_vreg( + ctrl_pdata->power_data[i].vreg_config, + ctrl_pdata->power_data[i].num_vreg, 1); + if (ret) { + pr_err("%s: failed to enable vregs for %s\n", + __func__, __mdss_dsi_pm_name(i)); + goto error_vreg; + } + } + pinfo->panel_power_state = MDSS_PANEL_POWER_ON; + ret = mdss_dsi_panel_reset(pdata, 1); + if (ret) { + pr_err("%s: Panel reset failed\n", __func__); + mutex_unlock(&ctrl_pdata->mutex); + return ret; + } + + msm_dsi_ahb_ctrl(1); + msm_dsi_prepare_clocks(); + msm_dsi_clk_enable(); + msm_dsi_set_irq(ctrl_pdata, DSI_INTR_ERROR_MASK); + dsi_host_private->clk_count = 1; + dsi_host_private->dsi_on = 1; + +error_vreg: + if (ret) { + for (; i >= 0; i--) + msm_mdss_enable_vreg( + ctrl_pdata->power_data[i].vreg_config, + ctrl_pdata->power_data[i].num_vreg, 0); + } + + mutex_unlock(&ctrl_pdata->mutex); + return ret; +} + +static int msm_dsi_read_status(struct mdss_dsi_ctrl_pdata *ctrl) +{ + struct dcs_cmd_req cmdreq; + + memset(&cmdreq, 0, sizeof(cmdreq)); + cmdreq.cmds = ctrl->status_cmds.cmds; + cmdreq.cmds_cnt = ctrl->status_cmds.cmd_cnt; + cmdreq.flags = CMD_REQ_COMMIT | CMD_REQ_RX; + cmdreq.rlen = 1; + cmdreq.cb = NULL; + cmdreq.rbuf = ctrl->status_buf.data; + + return mdss_dsi_cmdlist_put(ctrl, &cmdreq); +} + + +/** + * msm_dsi_reg_status_check() - Check dsi panel status through reg read + * @ctrl_pdata: pointer to the dsi controller structure + * + * This function can be used to check the panel status through reading the + * status register from the panel. + * + * Return: positive value if the panel is in good state, negative value or + * zero otherwise. + */ +int msm_dsi_reg_status_check(struct mdss_dsi_ctrl_pdata *ctrl_pdata) +{ + int ret = 0; + + if (ctrl_pdata == NULL) { + pr_err("%s: Invalid input data\n", __func__); + return 0; + } + + pr_debug("%s: Checking Register status\n", __func__); + + msm_dsi_clk_ctrl(&ctrl_pdata->panel_data, 1); + + if (ctrl_pdata->status_cmds.link_state == DSI_HS_MODE) + dsi_set_tx_power_mode(0); + + ret = msm_dsi_read_status(ctrl_pdata); + + if (ctrl_pdata->status_cmds.link_state == DSI_HS_MODE) + dsi_set_tx_power_mode(1); + + if (ret == 0) { + if (!mdss_dsi_cmp_panel_reg(ctrl_pdata->status_buf, + ctrl_pdata->status_value, 0)) { + pr_err("%s: Read back value from panel is incorrect\n", + __func__); + ret = -EINVAL; + } else { + ret = 1; + } + } else { + pr_err("%s: Read status register returned error\n", __func__); + } + + msm_dsi_clk_ctrl(&ctrl_pdata->panel_data, 0); + pr_debug("%s: Read register done with ret: %d\n", __func__, ret); + + return ret; +} + +/** + * msm_dsi_bta_status_check() - Check dsi panel status through bta check + * @ctrl_pdata: pointer to the dsi controller structure + * + * This function can be used to check status of the panel using bta check + * for the panel. + * + * Return: positive value if the panel is in good state, negative value or + * zero otherwise. + */ +static int msm_dsi_bta_status_check(struct mdss_dsi_ctrl_pdata *ctrl_pdata) +{ + int ret = 0; + + if (ctrl_pdata == NULL) { + pr_err("%s: Invalid input data\n", __func__); + return 0; + } + + mutex_lock(&ctrl_pdata->cmd_mutex); + msm_dsi_clk_ctrl(&ctrl_pdata->panel_data, 1); + msm_dsi_cmd_mdp_busy(ctrl_pdata); + msm_dsi_set_irq(ctrl_pdata, DSI_INTR_BTA_DONE_MASK); + reinit_completion(&ctrl_pdata->bta_comp); + + /* BTA trigger */ + MIPI_OUTP(dsi_host_private->dsi_base + DSI_CMD_MODE_BTA_SW_TRIGGER, + 0x01); + wmb(); /* ensure write is finished before progressing */ + ret = wait_for_completion_killable_timeout(&ctrl_pdata->bta_comp, + HZ/10); + msm_dsi_clear_irq(ctrl_pdata, DSI_INTR_BTA_DONE_MASK); + msm_dsi_clk_ctrl(&ctrl_pdata->panel_data, 0); + mutex_unlock(&ctrl_pdata->cmd_mutex); + + if (ret <= 0) + pr_err("%s: DSI BTA error: %i\n", __func__, __LINE__); + + pr_debug("%s: BTA done with ret: %d\n", __func__, ret); + return ret; +} + +static void msm_dsi_debug_enable_clock(int on) +{ + if (dsi_host_private->debug_enable_clk) + dsi_host_private->debug_enable_clk(on); + + if (on) + msm_dsi_ahb_ctrl(1); + else + msm_dsi_ahb_ctrl(0); +} + +static int msm_dsi_debug_init(void) +{ + int rc; + + if (!mdss_res) + return 0; + + dsi_host_private->debug_enable_clk = + mdss_res->debug_inf.debug_enable_clock; + + mdss_res->debug_inf.debug_enable_clock = msm_dsi_debug_enable_clock; + + + rc = mdss_debug_register_base("dsi0", + dsi_host_private->dsi_base, + dsi_host_private->dsi_reg_size, + NULL); + + return rc; +} + +static int dsi_get_panel_cfg(char *panel_cfg) +{ + int rc; + struct mdss_panel_cfg *pan_cfg = NULL; + + if (!panel_cfg) + return MDSS_PANEL_INTF_INVALID; + + pan_cfg = mdp3_panel_intf_type(MDSS_PANEL_INTF_DSI); + if (IS_ERR(pan_cfg)) { + panel_cfg[0] = 0; + return PTR_ERR(pan_cfg); + } else if (!pan_cfg) { + panel_cfg[0] = 0; + return 0; + } + + pr_debug("%s:%d: cfg:[%s]\n", __func__, __LINE__, + pan_cfg->arg_cfg); + rc = strlcpy(panel_cfg, pan_cfg->arg_cfg, + MDSS_MAX_PANEL_LEN); + return rc; +} + +static struct device_node *dsi_pref_prim_panel( + struct platform_device *pdev) +{ + struct device_node *dsi_pan_node = NULL; + + pr_debug("%s:%d: Select primary panel from dt\n", + __func__, __LINE__); + dsi_pan_node = of_parse_phandle(pdev->dev.of_node, + "qcom,dsi-pref-prim-pan", 0); + if (!dsi_pan_node) + pr_err("%s:can't find panel phandle\n", __func__); + + return dsi_pan_node; +} + +/** + * dsi_find_panel_of_node(): find device node of dsi panel + * @pdev: platform_device of the dsi ctrl node + * @panel_cfg: string containing intf specific config data + * + * Function finds the panel device node using the interface + * specific configuration data. This configuration data is + * could be derived from the result of bootloader's GCDB + * panel detection mechanism. If such config data doesn't + * exist then this panel returns the default panel configured + * in the device tree. + * + * returns pointer to panel node on success, NULL on error. + */ +static struct device_node *dsi_find_panel_of_node( + struct platform_device *pdev, char *panel_cfg) +{ + int l; + char *panel_name; + struct device_node *dsi_pan_node = NULL, *mdss_node = NULL; + + if (!panel_cfg) + return NULL; + + l = strlen(panel_cfg); + if (!l) { + /* no panel cfg chg, parse dt */ + pr_debug("%s:%d: no cmd line cfg present\n", + __func__, __LINE__); + dsi_pan_node = dsi_pref_prim_panel(pdev); + } else { + if (panel_cfg[0] != '0') { + pr_err("%s:%d:ctrl id=[%d] not supported\n", + __func__, __LINE__, panel_cfg[0]); + return NULL; + } + /* + * skip first two chars '' and + * ':' to get to the panel name + */ + panel_name = panel_cfg + 2; + pr_debug("%s:%d:%s:%s\n", __func__, __LINE__, + panel_cfg, panel_name); + + mdss_node = of_parse_phandle(pdev->dev.of_node, + "qcom,mdss-mdp", 0); + + if (!mdss_node) { + pr_err("%s: %d: mdss_node null\n", + __func__, __LINE__); + return NULL; + } + dsi_pan_node = of_find_node_by_name(mdss_node, + panel_name); + if (!dsi_pan_node) { + pr_err("%s: invalid pan node\n", + __func__); + dsi_pan_node = dsi_pref_prim_panel(pdev); + } + } + return dsi_pan_node; +} + +static int msm_dsi_clk_ctrl(struct mdss_panel_data *pdata, int enable) +{ + u32 bitclk_rate = 0, byteclk_rate = 0, pclk_rate = 0, dsiclk_rate = 0; + struct mdss_dsi_ctrl_pdata *ctrl_pdata = NULL; + + pr_debug("%s:\n", __func__); + + ctrl_pdata = container_of(pdata, struct mdss_dsi_ctrl_pdata, + panel_data); + + mutex_lock(&ctrl_pdata->mutex); + + if (enable) { + dsi_host_private->clk_count++; + if (dsi_host_private->clk_count == 1) { + msm_dsi_ahb_ctrl(1); + msm_dsi_cal_clk_rate(pdata, &bitclk_rate, &dsiclk_rate, + &byteclk_rate, &pclk_rate); + msm_dsi_clk_set_rate(DSI_ESC_CLK_RATE, dsiclk_rate, + byteclk_rate, pclk_rate); + msm_dsi_prepare_clocks(); + msm_dsi_clk_enable(); + } + } else { + dsi_host_private->clk_count--; + if (dsi_host_private->clk_count == 0) { + msm_dsi_clear_irq(ctrl_pdata, ctrl_pdata->dsi_irq_mask); + msm_dsi_clk_set_rate(DSI_ESC_CLK_RATE, 0, 0, 0); + msm_dsi_clk_disable(); + msm_dsi_unprepare_clocks(); + msm_dsi_ahb_ctrl(0); + } + } + mutex_unlock(&ctrl_pdata->mutex); + return 0; +} + +void msm_dsi_ctrl_init(struct mdss_dsi_ctrl_pdata *ctrl) +{ + init_completion(&ctrl->dma_comp); + init_completion(&ctrl->mdp_comp); + init_completion(&ctrl->bta_comp); + init_completion(&ctrl->video_comp); + spin_lock_init(&ctrl->irq_lock); + spin_lock_init(&ctrl->mdp_lock); + mutex_init(&ctrl->mutex); + mutex_init(&ctrl->cmd_mutex); + complete(&ctrl->mdp_comp); + dsi_buf_alloc(&ctrl->tx_buf, SZ_4K); + dsi_buf_alloc(&ctrl->rx_buf, SZ_4K); + dsi_buf_alloc(&ctrl->status_buf, SZ_4K); + ctrl->cmdlist_commit = msm_dsi_cmdlist_commit; + ctrl->panel_mode = ctrl->panel_data.panel_info.mipi.mode; + + if (ctrl->status_mode == ESD_REG) + ctrl->check_status = msm_dsi_reg_status_check; + else if (ctrl->status_mode == ESD_BTA) + ctrl->check_status = msm_dsi_bta_status_check; + + if (ctrl->status_mode == ESD_MAX) { + pr_err("%s: Using default BTA for ESD check\n", __func__); + ctrl->check_status = msm_dsi_bta_status_check; + } +} + +static void msm_dsi_parse_lane_swap(struct device_node *np, char *dlane_swap) +{ + const char *data; + + *dlane_swap = DSI_LANE_MAP_0123; + data = of_get_property(np, "qcom,lane-map", NULL); + if (data) { + if (!strcmp(data, "lane_map_3012")) + *dlane_swap = DSI_LANE_MAP_3012; + else if (!strcmp(data, "lane_map_2301")) + *dlane_swap = DSI_LANE_MAP_2301; + else if (!strcmp(data, "lane_map_1230")) + *dlane_swap = DSI_LANE_MAP_1230; + else if (!strcmp(data, "lane_map_0321")) + *dlane_swap = DSI_LANE_MAP_0321; + else if (!strcmp(data, "lane_map_1032")) + *dlane_swap = DSI_LANE_MAP_1032; + else if (!strcmp(data, "lane_map_2103")) + *dlane_swap = DSI_LANE_MAP_2103; + else if (!strcmp(data, "lane_map_3210")) + *dlane_swap = DSI_LANE_MAP_3210; + } +} + +static int msm_dsi_probe(struct platform_device *pdev) +{ + struct dsi_interface intf; + char panel_cfg[MDSS_MAX_PANEL_LEN]; + struct mdss_dsi_ctrl_pdata *ctrl_pdata = NULL; + int rc = 0; + struct device_node *dsi_pan_node = NULL; + bool cmd_cfg_cont_splash = false; + struct resource *mdss_dsi_mres; + int i; + + pr_debug("%s\n", __func__); + + rc = msm_dsi_init(); + if (rc) + return rc; + + if (!pdev->dev.of_node) { + pr_err("%s: Device node is not accessible\n", __func__); + rc = -ENODEV; + goto error_no_mem; + } + pdev->id = 0; + + ctrl_pdata = platform_get_drvdata(pdev); + if (!ctrl_pdata) { + ctrl_pdata = devm_kzalloc(&pdev->dev, + sizeof(struct mdss_dsi_ctrl_pdata), GFP_KERNEL); + if (!ctrl_pdata) { + rc = -ENOMEM; + goto error_no_mem; + } + platform_set_drvdata(pdev, ctrl_pdata); + } + + ctrl_pdata->mdss_util = mdss_get_util_intf(); + if (mdp3_res->mdss_util == NULL) { + pr_err("Failed to get mdss utility functions\n"); + return -ENODEV; + } + + mdss_dsi_mres = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!mdss_dsi_mres) { + pr_err("%s:%d unable to get the MDSS reg resources", + __func__, __LINE__); + rc = -ENOMEM; + goto error_io_resource; + } else { + dsi_host_private->dsi_reg_size = resource_size(mdss_dsi_mres); + dsi_host_private->dsi_base = ioremap(mdss_dsi_mres->start, + dsi_host_private->dsi_reg_size); + if (!dsi_host_private->dsi_base) { + pr_err("%s:%d unable to remap dsi resources", + __func__, __LINE__); + rc = -ENOMEM; + goto error_io_resource; + } + } + + mdss_dsi_mres = platform_get_resource(pdev, IORESOURCE_IRQ, 0); + if (!mdss_dsi_mres || mdss_dsi_mres->start == 0) { + pr_err("%s:%d unable to get the MDSS irq resources", + __func__, __LINE__); + rc = -ENODEV; + goto error_irq_resource; + } + + rc = of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev); + if (rc) { + dev_err(&pdev->dev, "%s: failed to add child nodes, rc=%d\n", + __func__, rc); + goto error_platform_pop; + } + + /* DSI panels can be different between controllers */ + rc = dsi_get_panel_cfg(panel_cfg); + if (!rc) + /* dsi panel cfg not present */ + pr_warn("%s:%d:dsi specific cfg not present\n", + __func__, __LINE__); + + /* find panel device node */ + dsi_pan_node = dsi_find_panel_of_node(pdev, panel_cfg); + if (!dsi_pan_node) { + pr_err("%s: can't find panel node %s\n", __func__, + panel_cfg); + goto error_pan_node; + } + + cmd_cfg_cont_splash = mdp3_panel_get_boot_cfg() ? true : false; + + rc = mdss_dsi_panel_init(dsi_pan_node, ctrl_pdata, cmd_cfg_cont_splash); + if (rc) { + pr_err("%s: dsi panel init failed\n", __func__); + goto error_pan_node; + } + + rc = dsi_ctrl_config_init(pdev, ctrl_pdata); + if (rc) { + dev_err(&pdev->dev, "%s: failed to parse mdss dtsi rc=%d\n", + __func__, rc); + goto error_pan_node; + } + + msm_dsi_parse_lane_swap(pdev->dev.of_node, &(ctrl_pdata->dlane_swap)); + + for (i = 0; i < DSI_MAX_PM; i++) { + rc = msm_dsi_io_init(pdev, &(ctrl_pdata->power_data[i])); + if (rc) { + dev_err(&pdev->dev, "%s: failed to init IO for %s\n", + __func__, __mdss_dsi_pm_name(i)); + goto error_io_init; + } + } + + pr_debug("%s: Dsi Ctrl->0 initialized\n", __func__); + + dsi_host_private->dis_dev = pdev->dev; + intf.on = msm_dsi_on; + intf.off = msm_dsi_off; + intf.cont_on = msm_dsi_cont_on; + intf.clk_ctrl = msm_dsi_clk_ctrl; + intf.op_mode_config = msm_dsi_op_mode_config; + intf.index = 0; + intf.private = NULL; + dsi_register_interface(&intf); + + msm_dsi_debug_init(); + + msm_dsi_ctrl_init(ctrl_pdata); + + rc = msm_dsi_irq_init(&pdev->dev, mdss_dsi_mres->start, + ctrl_pdata); + if (rc) { + dev_err(&pdev->dev, "%s: failed to init irq, rc=%d\n", + __func__, rc); + goto error_irq_init; + } + + rc = dsi_panel_device_register_v2(pdev, ctrl_pdata); + if (rc) { + pr_err("%s: dsi panel dev reg failed\n", __func__); + goto error_device_register; + } + pr_debug("%s success\n", __func__); + return 0; +error_device_register: + kfree(ctrl_pdata->dsi_hw->irq_info); + kfree(ctrl_pdata->dsi_hw); +error_irq_init: + for (i = DSI_MAX_PM - 1; i >= 0; i--) + msm_dsi_io_deinit(pdev, &(ctrl_pdata->power_data[i])); +error_io_init: + dsi_ctrl_config_deinit(pdev, ctrl_pdata); +error_pan_node: + of_node_put(dsi_pan_node); +error_platform_pop: + msm_dsi_clear_irq(ctrl_pdata, ctrl_pdata->dsi_irq_mask); +error_irq_resource: + if (dsi_host_private->dsi_base) { + iounmap(dsi_host_private->dsi_base); + dsi_host_private->dsi_base = NULL; + } +error_io_resource: + devm_kfree(&pdev->dev, ctrl_pdata); +error_no_mem: + msm_dsi_deinit(); + + return rc; +} + +static int msm_dsi_remove(struct platform_device *pdev) +{ + int i; + struct mdss_dsi_ctrl_pdata *ctrl_pdata = platform_get_drvdata(pdev); + + if (!ctrl_pdata) { + pr_err("%s: no driver data\n", __func__); + return -ENODEV; + } + + msm_dsi_clear_irq(ctrl_pdata, ctrl_pdata->dsi_irq_mask); + for (i = DSI_MAX_PM - 1; i >= 0; i--) + msm_dsi_io_deinit(pdev, &(ctrl_pdata->power_data[i])); + dsi_ctrl_config_deinit(pdev, ctrl_pdata); + iounmap(dsi_host_private->dsi_base); + dsi_host_private->dsi_base = NULL; + msm_dsi_deinit(); + devm_kfree(&pdev->dev, ctrl_pdata); + + return 0; +} + +static const struct of_device_id msm_dsi_v2_dt_match[] = { + {.compatible = "qcom,msm-dsi-v2"}, + {} +}; +MODULE_DEVICE_TABLE(of, msm_dsi_v2_dt_match); + +static struct platform_driver msm_dsi_v2_driver = { + .probe = msm_dsi_probe, + .remove = msm_dsi_remove, + .shutdown = NULL, + .driver = { + .name = "qcom,dsi-panel-v2", + .of_match_table = msm_dsi_v2_dt_match, + }, +}; + +static int msm_dsi_v2_register_driver(void) +{ + return platform_driver_register(&msm_dsi_v2_driver); +} + +static int __init msm_dsi_v2_driver_init(void) +{ + int ret; + + ret = msm_dsi_v2_register_driver(); + if (ret) { + pr_err("msm_dsi_v2_register_driver() failed!\n"); + return ret; + } + + return ret; +} +module_init(msm_dsi_v2_driver_init); + +static void __exit msm_dsi_v2_driver_cleanup(void) +{ + platform_driver_unregister(&msm_dsi_v2_driver); +} +module_exit(msm_dsi_v2_driver_cleanup); diff --git a/drivers/video/fbdev/msm/dsi_host_v2.h b/drivers/video/fbdev/msm/dsi_host_v2.h new file mode 100644 index 0000000000000000000000000000000000000000..d61bcf9ea7f209518ae6edb073b13c5079b66a99 --- /dev/null +++ b/drivers/video/fbdev/msm/dsi_host_v2.h @@ -0,0 +1,178 @@ +/* Copyright (c) 2012-2014, 2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ +#ifndef DSI_HOST_V2_H +#define DSI_HOST_V2_H + +#include + +#define DSI_INTR_ERROR_MASK BIT(25) +#define DSI_INTR_ERROR BIT(24) +#define DSI_INTR_BTA_DONE_MASK BIT(21) +#define DSI_INTR_BTA_DONE BIT(20) +#define DSI_INTR_VIDEO_DONE_MASK BIT(17) +#define DSI_INTR_VIDEO_DONE BIT(16) +#define DSI_INTR_CMD_MDP_DONE_MASK BIT(9) +#define DSI_INTR_CMD_MDP_DONE BIT(8) +#define DSI_INTR_CMD_DMA_DONE_MASK BIT(1) +#define DSI_INTR_CMD_DMA_DONE BIT(0) +#define DSI_INTR_ALL_MASK 0x2220202 + +#define DSI_BTA_TERM BIT(1) + +#define DSI_CTRL 0x0000 +#define DSI_STATUS 0x0004 +#define DSI_FIFO_STATUS 0x0008 +#define DSI_VIDEO_MODE_CTRL 0x000C +#define DSI_VIDEO_MODE_DATA_CTRL 0x001C +#define DSI_VIDEO_MODE_ACTIVE_H 0x0020 +#define DSI_VIDEO_MODE_ACTIVE_V 0x0024 +#define DSI_VIDEO_MODE_TOTAL 0x0028 +#define DSI_VIDEO_MODE_HSYNC 0x002C +#define DSI_VIDEO_MODE_VSYNC 0x0030 +#define DSI_VIDEO_MODE_VSYNC_VPOS 0x0034 +#define DSI_COMMAND_MODE_DMA_CTRL 0x0038 +#define DSI_COMMAND_MODE_MDP_CTRL 0x003C +#define DSI_COMMAND_MODE_MDP_DCS_CMD_CTRL 0x0040 +#define DSI_DMA_CMD_OFFSET 0x0044 +#define DSI_DMA_CMD_LENGTH 0x0048 +#define DSI_DMA_FIFO_CTRL 0x004C +#define DSI_COMMAND_MODE_MDP_STREAM0_CTRL 0x0054 +#define DSI_COMMAND_MODE_MDP_STREAM0_TOTAL 0x0058 +#define DSI_COMMAND_MODE_MDP_STREAM1_CTRL 0x005C +#define DSI_COMMAND_MODE_MDP_STREAM1_TOTAL 0x0060 +#define DSI_ACK_ERR_STATUS 0x0064 +#define DSI_RDBK_DATA0 0x0068 +#define DSI_RDBK_DATA1 0x006C +#define DSI_RDBK_DATA2 0x0070 +#define DSI_RDBK_DATA3 0x0074 +#define DSI_RDBK_DATATYPE0 0x0078 +#define DSI_RDBK_DATATYPE1 0x007C +#define DSI_TRIG_CTRL 0x0080 +#define DSI_EXT_MUX 0x0084 +#define DSI_EXT_TE_PULSE_DETECT_CTRL 0x0088 +#define DSI_CMD_MODE_DMA_SW_TRIGGER 0x008C +#define DSI_CMD_MODE_MDP_SW_TRIGGER 0x0090 +#define DSI_CMD_MODE_BTA_SW_TRIGGER 0x0094 +#define DSI_RESET_SW_TRIGGER 0x0098 +#define DSI_LANE_CTRL 0x00A8 +#define DSI_LANE_SWAP_CTRL 0x00AC +#define DSI_DLN0_PHY_ERR 0x00B0 +#define DSI_TIMEOUT_STATUS 0x00BC +#define DSI_CLKOUT_TIMING_CTRL 0x00C0 +#define DSI_EOT_PACKET 0x00C4 +#define DSI_EOT_PACKET_CTRL 0x00C8 +#define DSI_ERR_INT_MASK0 0x0108 +#define DSI_INT_CTRL 0x010c +#define DSI_SOFT_RESET 0x0114 +#define DSI_CLK_CTRL 0x0118 +#define DSI_CLK_STATUS 0x011C +#define DSI_PHY_SW_RESET 0x0128 +#define DSI_COMMAND_MODE_MDP_IDLE_CTRL 0x0190 +#define DSI_VERSION 0x01F0 + +#define DSI_DSIPHY_PLL_CTRL_0 0x0200 +#define DSI_DSIPHY_PLL_CTRL_1 0x0204 +#define DSI_DSIPHY_PLL_CTRL_2 0x0208 +#define DSI_DSIPHY_PLL_CTRL_3 0x020C +#define DSI_DSIPHY_PLL_CTRL_4 0x0210 +#define DSI_DSIPHY_PLL_CTRL_5 0x0214 +#define DSI_DSIPHY_PLL_CTRL_6 0x0218 +#define DSI_DSIPHY_PLL_CTRL_7 0x021C +#define DSI_DSIPHY_PLL_CTRL_8 0x0220 +#define DSI_DSIPHY_PLL_CTRL_9 0x0224 +#define DSI_DSIPHY_PLL_CTRL_10 0x0228 +#define DSI_DSIPHY_PLL_CTRL_11 0x022C +#define DSI_DSIPHY_PLL_CTRL_12 0x0230 +#define DSI_DSIPHY_PLL_CTRL_13 0x0234 +#define DSI_DSIPHY_PLL_CTRL_14 0x0238 +#define DSI_DSIPHY_PLL_CTRL_15 0x023C +#define DSI_DSIPHY_PLL_CTRL_16 0x0240 +#define DSI_DSIPHY_PLL_CTRL_17 0x0244 +#define DSI_DSIPHY_PLL_CTRL_18 0x0248 +#define DSI_DSIPHY_PLL_CTRL_19 0x024C +#define DSI_DSIPHY_ANA_CTRL0 0x0260 +#define DSI_DSIPHY_ANA_CTRL1 0x0264 +#define DSI_DSIPHY_ANA_CTRL2 0x0268 +#define DSI_DSIPHY_ANA_CTRL3 0x026C +#define DSI_DSIPHY_ANA_CTRL4 0x0270 +#define DSI_DSIPHY_ANA_CTRL5 0x0274 +#define DSI_DSIPHY_ANA_CTRL6 0x0278 +#define DSI_DSIPHY_ANA_CTRL7 0x027C +#define DSI_DSIPHY_PLL_RDY 0x0280 +#define DSI_DSIPHY_PLL_ANA_STATUS0 0x0294 +#define DSI_DSIPHY_PLL_ANA_STATUS1 0x0298 +#define DSI_DSIPHY_PLL_ANA_STATUS2 0x029C +#define DSI_DSIPHY_LN0_CFG0 0x0300 +#define DSI_DSIPHY_LN0_CFG1 0x0304 +#define DSI_DSIPHY_LN0_CFG2 0x0308 +#define DSI_DSIPHY_LN1_CFG0 0x0340 +#define DSI_DSIPHY_LN1_CFG1 0x0344 +#define DSI_DSIPHY_LN1_CFG2 0x0348 +#define DSI_DSIPHY_LN2_CFG0 0x0380 +#define DSI_DSIPHY_LN2_CFG1 0x0384 +#define DSI_DSIPHY_LN2_CFG2 0x0388 +#define DSI_DSIPHY_LN3_CFG0 0x03C0 +#define DSI_DSIPHY_LN3_CFG1 0x03C4 +#define DSI_DSIPHY_LN3_CFG2 0x03C8 +#define DSI_DSIPHY_LNCK_CFG0 0x0400 +#define DSI_DSIPHY_LNCK_CFG1 0x0404 +#define DSI_DSIPHY_LNCK_CFG2 0x0408 +#define DSI_DSIPHY_TIMING_CTRL_0 0x0440 +#define DSI_DSIPHY_TIMING_CTRL_1 0x0444 +#define DSI_DSIPHY_TIMING_CTRL_2 0x0448 +#define DSI_DSIPHY_TIMING_CTRL_3 0x044C +#define DSI_DSIPHY_TIMING_CTRL_4 0x0450 +#define DSI_DSIPHY_TIMING_CTRL_5 0x0454 +#define DSI_DSIPHY_TIMING_CTRL_6 0x0458 +#define DSI_DSIPHY_TIMING_CTRL_7 0x045C +#define DSI_DSIPHY_TIMING_CTRL_8 0x0460 +#define DSI_DSIPHY_TIMING_CTRL_9 0x0464 +#define DSI_DSIPHY_TIMING_CTRL_10 0x0468 +#define DSI_DSIPHY_TIMING_CTRL_11 0x046C +#define DSI_DSIPHY_CTRL_0 0x0470 +#define DSI_DSIPHY_CTRL_1 0x0474 +#define DSI_DSIPHY_CTRL_2 0x0478 +#define DSI_DSIPHY_CTRL_3 0x047C +#define DSI_DSIPHY_STRENGTH_CTRL_0 0x0480 +#define DSI_DSIPHY_STRENGTH_CTRL_1 0x0484 +#define DSI_DSIPHY_STRENGTH_CTRL_2 0x0488 +#define DSI_DSIPHY_LDO_CNTRL 0x04B0 +#define DSI_DSIPHY_REGULATOR_CTRL_0 0x0500 +#define DSI_DSIPHY_REGULATOR_CTRL_1 0x0504 +#define DSI_DSIPHY_REGULATOR_CTRL_2 0x0508 +#define DSI_DSIPHY_REGULATOR_CTRL_3 0x050C +#define DSI_DSIPHY_REGULATOR_CTRL_4 0x0510 +#define DSI_DSIPHY_REGULATOR_TEST 0x0514 +#define DSI_DSIPHY_REGULATOR_CAL_PWR_CFG 0x0518 +#define DSI_DSIPHY_CAL_HW_TRIGGER 0x0528 +#define DSI_DSIPHY_CAL_SW_CFG0 0x052C +#define DSI_DSIPHY_CAL_SW_CFG1 0x0530 +#define DSI_DSIPHY_CAL_SW_CFG2 0x0534 +#define DSI_DSIPHY_CAL_HW_CFG0 0x0538 +#define DSI_DSIPHY_CAL_HW_CFG1 0x053C +#define DSI_DSIPHY_CAL_HW_CFG2 0x0540 +#define DSI_DSIPHY_CAL_HW_CFG3 0x0544 +#define DSI_DSIPHY_CAL_HW_CFG4 0x0548 +#define DSI_DSIPHY_REGULATOR_CAL_STATUS0 0x0550 +#define DSI_DSIPHY_BIST_CTRL0 0x048C +#define DSI_DSIPHY_BIST_CTRL1 0x0490 +#define DSI_DSIPHY_BIST_CTRL2 0x0494 +#define DSI_DSIPHY_BIST_CTRL3 0x0498 +#define DSI_DSIPHY_BIST_CTRL4 0x049C +#define DSI_DSIPHY_BIST_CTRL5 0x04A0 + +#define DSI_EN BIT(0) +#define DSI_VIDEO_MODE_EN BIT(1) +#define DSI_CMD_MODE_EN BIT(2) + +#endif /* DSI_HOST_V2_H */ diff --git a/drivers/video/fbdev/msm/dsi_io_v2.c b/drivers/video/fbdev/msm/dsi_io_v2.c new file mode 100644 index 0000000000000000000000000000000000000000..71c1d1d7c2a7dfab453acbeec3c4463a55e96aaa --- /dev/null +++ b/drivers/video/fbdev/msm/dsi_io_v2.c @@ -0,0 +1,389 @@ +/* Copyright (c) 2013, 2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ +#include +#include +#include +#include +#include +#include +#include + + +#include "dsi_v2.h" +#include "dsi_io_v2.h" +#include "dsi_host_v2.h" + +struct msm_dsi_io_private { + struct clk *dsi_byte_clk; + struct clk *dsi_esc_clk; + struct clk *dsi_pixel_clk; + struct clk *dsi_ahb_clk; + struct clk *dsi_clk; + int msm_dsi_clk_on; + int msm_dsi_ahb_clk_on; +}; + +static struct msm_dsi_io_private *dsi_io_private; + +#define DSI_VDDA_VOLTAGE 1200000 + +void msm_dsi_ahb_ctrl(int enable) +{ + if (enable) { + dsi_io_private->msm_dsi_ahb_clk_on++; + if (dsi_io_private->msm_dsi_ahb_clk_on == 1) + clk_enable(dsi_io_private->dsi_ahb_clk); + } else { + dsi_io_private->msm_dsi_ahb_clk_on--; + if (dsi_io_private->msm_dsi_ahb_clk_on == 0) + clk_disable(dsi_io_private->dsi_ahb_clk); + } +} + +int msm_dsi_io_init(struct platform_device *pdev, struct mdss_module_power *mp) +{ + int rc; + + if (!dsi_io_private) { + dsi_io_private = kzalloc(sizeof(struct msm_dsi_io_private), + GFP_KERNEL); + if (!dsi_io_private) + return -ENOMEM; + } + + rc = msm_dsi_clk_init(pdev); + if (rc) { + pr_err("fail to initialize DSI clock\n"); + return rc; + } + + rc = msm_mdss_config_vreg(&pdev->dev, mp->vreg_config, + mp->num_vreg, 1); + if (rc) { + pr_err("fail to initialize DSI regulator\n"); + return rc; + } + + return 0; +} + +void msm_dsi_io_deinit(struct platform_device *pdev, + struct mdss_module_power *mp) +{ + if (dsi_io_private) { + msm_dsi_clk_deinit(); + msm_mdss_config_vreg(&pdev->dev, mp->vreg_config, + mp->num_vreg, 0); + kfree(dsi_io_private); + dsi_io_private = NULL; + } +} + +int msm_dsi_clk_init(struct platform_device *dev) +{ + int rc = 0; + + dsi_io_private->dsi_clk = clk_get(&dev->dev, "dsi_clk"); + if (IS_ERR(dsi_io_private->dsi_clk)) { + pr_err("can't find dsi core_clk\n"); + rc = PTR_ERR(dsi_io_private->dsi_clk); + dsi_io_private->dsi_clk = NULL; + return rc; + } + dsi_io_private->dsi_byte_clk = clk_get(&dev->dev, "byte_clk"); + if (IS_ERR(dsi_io_private->dsi_byte_clk)) { + pr_err("can't find dsi byte_clk\n"); + rc = PTR_ERR(dsi_io_private->dsi_byte_clk); + dsi_io_private->dsi_byte_clk = NULL; + return rc; + } + + dsi_io_private->dsi_esc_clk = clk_get(&dev->dev, "esc_clk"); + if (IS_ERR(dsi_io_private->dsi_esc_clk)) { + pr_err("can't find dsi esc_clk\n"); + rc = PTR_ERR(dsi_io_private->dsi_esc_clk); + dsi_io_private->dsi_esc_clk = NULL; + return rc; + } + + dsi_io_private->dsi_pixel_clk = clk_get(&dev->dev, "pixel_clk"); + if (IS_ERR(dsi_io_private->dsi_pixel_clk)) { + pr_err("can't find dsi pixel\n"); + rc = PTR_ERR(dsi_io_private->dsi_pixel_clk); + dsi_io_private->dsi_pixel_clk = NULL; + return rc; + } + + dsi_io_private->dsi_ahb_clk = clk_get(&dev->dev, "iface_clk"); + if (IS_ERR(dsi_io_private->dsi_ahb_clk)) { + pr_err("can't find dsi iface_clk\n"); + rc = PTR_ERR(dsi_io_private->dsi_ahb_clk); + dsi_io_private->dsi_ahb_clk = NULL; + return rc; + } + clk_prepare(dsi_io_private->dsi_ahb_clk); + + return 0; +} + +void msm_dsi_clk_deinit(void) +{ + if (dsi_io_private->dsi_clk) { + clk_put(dsi_io_private->dsi_clk); + dsi_io_private->dsi_clk = NULL; + } + if (dsi_io_private->dsi_byte_clk) { + clk_put(dsi_io_private->dsi_byte_clk); + dsi_io_private->dsi_byte_clk = NULL; + } + if (dsi_io_private->dsi_esc_clk) { + clk_put(dsi_io_private->dsi_esc_clk); + dsi_io_private->dsi_esc_clk = NULL; + } + if (dsi_io_private->dsi_pixel_clk) { + clk_put(dsi_io_private->dsi_pixel_clk); + dsi_io_private->dsi_pixel_clk = NULL; + } + if (dsi_io_private->dsi_ahb_clk) { + clk_unprepare(dsi_io_private->dsi_ahb_clk); + clk_put(dsi_io_private->dsi_ahb_clk); + dsi_io_private->dsi_ahb_clk = NULL; + } +} + +int msm_dsi_prepare_clocks(void) +{ + clk_prepare(dsi_io_private->dsi_clk); + clk_prepare(dsi_io_private->dsi_byte_clk); + clk_prepare(dsi_io_private->dsi_esc_clk); + clk_prepare(dsi_io_private->dsi_pixel_clk); + return 0; +} + +int msm_dsi_unprepare_clocks(void) +{ + clk_unprepare(dsi_io_private->dsi_clk); + clk_unprepare(dsi_io_private->dsi_esc_clk); + clk_unprepare(dsi_io_private->dsi_byte_clk); + clk_unprepare(dsi_io_private->dsi_pixel_clk); + return 0; +} + +int msm_dsi_clk_set_rate(unsigned long esc_rate, + unsigned long dsi_rate, + unsigned long byte_rate, + unsigned long pixel_rate) +{ + int rc; + + rc = clk_set_rate(dsi_io_private->dsi_clk, dsi_rate); + if (rc) { + pr_err("dsi_esc_clk - clk_set_rate failed =%d\n", rc); + return rc; + } + + rc = clk_set_rate(dsi_io_private->dsi_esc_clk, esc_rate); + if (rc) { + pr_err("dsi_esc_clk - clk_set_rate failed =%d\n", rc); + return rc; + } + + rc = clk_set_rate(dsi_io_private->dsi_byte_clk, byte_rate); + if (rc) { + pr_err("dsi_byte_clk - clk_set_rate faile = %dd\n", rc); + return rc; + } + + rc = clk_set_rate(dsi_io_private->dsi_pixel_clk, pixel_rate); + if (rc) { + pr_err("dsi_pixel_clk - clk_set_rate failed = %d\n", rc); + return rc; + } + return 0; +} + +int msm_dsi_clk_enable(void) +{ + if (dsi_io_private->msm_dsi_clk_on) { + pr_debug("dsi_clks on already\n"); + return 0; + } + + clk_enable(dsi_io_private->dsi_clk); + clk_enable(dsi_io_private->dsi_esc_clk); + clk_enable(dsi_io_private->dsi_byte_clk); + clk_enable(dsi_io_private->dsi_pixel_clk); + + dsi_io_private->msm_dsi_clk_on = 1; + return 0; +} + +int msm_dsi_clk_disable(void) +{ + if (dsi_io_private->msm_dsi_clk_on == 0) { + pr_debug("mdss_dsi_clks already OFF\n"); + return 0; + } + + clk_disable(dsi_io_private->dsi_clk); + clk_disable(dsi_io_private->dsi_byte_clk); + clk_disable(dsi_io_private->dsi_esc_clk); + clk_disable(dsi_io_private->dsi_pixel_clk); + + dsi_io_private->msm_dsi_clk_on = 0; + return 0; +} + +static void msm_dsi_phy_strength_init(unsigned char *ctrl_base, + struct mdss_dsi_phy_ctrl *pd) +{ + MIPI_OUTP(ctrl_base + DSI_DSIPHY_STRENGTH_CTRL_0, pd->strength[0]); + MIPI_OUTP(ctrl_base + DSI_DSIPHY_STRENGTH_CTRL_2, pd->strength[1]); +} + +static void msm_dsi_phy_ctrl_init(unsigned char *ctrl_base, + struct mdss_panel_data *pdata) +{ + MIPI_OUTP(ctrl_base + DSI_DSIPHY_CTRL_0, 0x5f); + MIPI_OUTP(ctrl_base + DSI_DSIPHY_CTRL_3, 0x10); +} + +static void msm_dsi_phy_regulator_init(unsigned char *ctrl_base, + struct mdss_dsi_phy_ctrl *pd) +{ + MIPI_OUTP(ctrl_base + DSI_DSIPHY_LDO_CNTRL, 0x25); + MIPI_OUTP(ctrl_base + DSI_DSIPHY_REGULATOR_CTRL_0, pd->regulator[0]); + MIPI_OUTP(ctrl_base + DSI_DSIPHY_REGULATOR_CTRL_1, pd->regulator[1]); + MIPI_OUTP(ctrl_base + DSI_DSIPHY_REGULATOR_CTRL_2, pd->regulator[2]); + MIPI_OUTP(ctrl_base + DSI_DSIPHY_REGULATOR_CTRL_3, pd->regulator[3]); + MIPI_OUTP(ctrl_base + DSI_DSIPHY_REGULATOR_CTRL_4, pd->regulator[4]); + MIPI_OUTP(ctrl_base + DSI_DSIPHY_REGULATOR_CAL_PWR_CFG, + pd->regulator[5]); + +} + +static int msm_dsi_phy_calibration(unsigned char *ctrl_base) +{ + int i = 0, term_cnt = 5000, ret = 0, cal_busy; + + MIPI_OUTP(ctrl_base + DSI_DSIPHY_CAL_SW_CFG2, 0x0); + MIPI_OUTP(ctrl_base + DSI_DSIPHY_CAL_HW_CFG1, 0x5a); + MIPI_OUTP(ctrl_base + DSI_DSIPHY_CAL_HW_CFG3, 0x10); + MIPI_OUTP(ctrl_base + DSI_DSIPHY_CAL_HW_CFG4, 0x01); + MIPI_OUTP(ctrl_base + DSI_DSIPHY_CAL_HW_CFG0, 0x01); + MIPI_OUTP(ctrl_base + DSI_DSIPHY_CAL_HW_TRIGGER, 0x01); + usleep_range(5000, 5100); /*per DSI controller spec*/ + MIPI_OUTP(ctrl_base + DSI_DSIPHY_CAL_HW_TRIGGER, 0x00); + + cal_busy = MIPI_INP(ctrl_base + DSI_DSIPHY_REGULATOR_CAL_STATUS0); + while (cal_busy & 0x10) { + i++; + if (i > term_cnt) { + ret = -EINVAL; + pr_err("%s error\n", __func__); + break; + } + cal_busy = MIPI_INP(ctrl_base + + DSI_DSIPHY_REGULATOR_CAL_STATUS0); + } + + return ret; +} + +static void msm_dsi_phy_lane_init(unsigned char *ctrl_base, + struct mdss_dsi_phy_ctrl *pd) +{ + int ln, index; + + /*CFG0, CFG1, CFG2, TEST_DATAPATH, TEST_STR0, TEST_STR1*/ + for (ln = 0; ln < 5; ln++) { + unsigned char *off = ctrl_base + 0x0300 + (ln * 0x40); + + index = ln * 6; + + MIPI_OUTP(off, pd->lanecfg[index]); + MIPI_OUTP(off + 4, pd->lanecfg[index + 1]); + MIPI_OUTP(off + 8, pd->lanecfg[index + 2]); + MIPI_OUTP(off + 12, pd->lanecfg[index + 3]); + MIPI_OUTP(off + 20, pd->lanecfg[index + 4]); + MIPI_OUTP(off + 24, pd->lanecfg[index + 5]); + } + wmb(); /* ensure write is finished before progressing */ +} + +static void msm_dsi_phy_timing_init(unsigned char *ctrl_base, + struct mdss_dsi_phy_ctrl *pd) +{ + int i, off = DSI_DSIPHY_TIMING_CTRL_0; + + for (i = 0; i < 12; i++) { + MIPI_OUTP(ctrl_base + off, pd->timing[i]); + off += 4; + } + wmb(); /* ensure write is finished before progressing */ +} + +static void msm_dsi_phy_bist_init(unsigned char *ctrl_base, + struct mdss_dsi_phy_ctrl *pd) +{ + MIPI_OUTP(ctrl_base + DSI_DSIPHY_BIST_CTRL4, pd->bistctrl[4]); + MIPI_OUTP(ctrl_base + DSI_DSIPHY_BIST_CTRL1, pd->bistctrl[1]); + MIPI_OUTP(ctrl_base + DSI_DSIPHY_BIST_CTRL0, pd->bistctrl[0]); + MIPI_OUTP(ctrl_base + DSI_DSIPHY_BIST_CTRL4, 0); + wmb(); /* ensure write is finished before progressing */ +} + +int msm_dsi_phy_init(unsigned char *ctrl_base, + struct mdss_panel_data *pdata) +{ + struct mdss_dsi_phy_ctrl *pd; + + pd = &(pdata->panel_info.mipi.dsi_phy_db); + + msm_dsi_phy_strength_init(ctrl_base, pd); + + msm_dsi_phy_ctrl_init(ctrl_base, pdata); + + msm_dsi_phy_regulator_init(ctrl_base, pd); + + msm_dsi_phy_calibration(ctrl_base); + + msm_dsi_phy_lane_init(ctrl_base, pd); + + msm_dsi_phy_timing_init(ctrl_base, pd); + + msm_dsi_phy_bist_init(ctrl_base, pd); + + return 0; +} + +void msm_dsi_phy_sw_reset(unsigned char *ctrl_base) +{ + /* start phy sw reset */ + MIPI_OUTP(ctrl_base + DSI_PHY_SW_RESET, 0x0001); + udelay(1000); /*per DSI controller spec*/ + wmb(); /* ensure write is finished before progressing */ + /* end phy sw reset */ + MIPI_OUTP(ctrl_base + DSI_PHY_SW_RESET, 0x0000); + udelay(100); /*per DSI controller spec*/ + wmb(); /* ensure write is finished before progressing */ +} + +void msm_dsi_phy_off(unsigned char *ctrl_base) +{ + MIPI_OUTP(ctrl_base + DSI_DSIPHY_PLL_CTRL_5, 0x05f); + MIPI_OUTP(ctrl_base + DSI_DSIPHY_REGULATOR_CTRL_0, 0x02); + MIPI_OUTP(ctrl_base + DSI_DSIPHY_CTRL_0, 0x00); + MIPI_OUTP(ctrl_base + DSI_DSIPHY_CTRL_1, 0x7f); + MIPI_OUTP(ctrl_base + DSI_CLK_CTRL, 0); +} diff --git a/drivers/video/fbdev/msm/dsi_io_v2.h b/drivers/video/fbdev/msm/dsi_io_v2.h new file mode 100644 index 0000000000000000000000000000000000000000..d0227ecc7b810a06c147daf6d578e441d05c7389 --- /dev/null +++ b/drivers/video/fbdev/msm/dsi_io_v2.h @@ -0,0 +1,49 @@ +/* Copyright (c) 2013, 2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ +#ifndef DSI_IO_V2_H +#define DSI_IO_V2_H + +#include "mdss_panel.h" + +void msm_dsi_ahb_ctrl(int enable); + +int msm_dsi_io_init(struct platform_device *dev, + struct mdss_module_power *mp); + +void msm_dsi_io_deinit(struct platform_device *dev, + struct mdss_module_power *mp); + +int msm_dsi_clk_init(struct platform_device *dev); + +void msm_dsi_clk_deinit(void); + +int msm_dsi_prepare_clocks(void); + +int msm_dsi_unprepare_clocks(void); + +int msm_dsi_clk_set_rate(unsigned long esc_rate, + unsigned long dsi_rate, + unsigned long byte_rate, + unsigned long pixel_rate); + +int msm_dsi_clk_enable(void); + +int msm_dsi_clk_disable(void); + +int msm_dsi_phy_init(unsigned char *ctrl_base, + struct mdss_panel_data *pdata); + +void msm_dsi_phy_sw_reset(unsigned char *ctrl_base); + +void msm_dsi_phy_off(unsigned char *ctrl_base); +#endif /* DSI_IO_V2_H */ diff --git a/drivers/video/fbdev/msm/dsi_status_6g.c b/drivers/video/fbdev/msm/dsi_status_6g.c new file mode 100644 index 0000000000000000000000000000000000000000..8a329026602cc82586ad8787d7f730cd69e31f04 --- /dev/null +++ b/drivers/video/fbdev/msm/dsi_status_6g.c @@ -0,0 +1,188 @@ +/* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include +#include + +#include "mdss_dsi.h" +#include "mdss_mdp.h" +#include "mdss_debug.h" + +/* + * mdss_check_te_status() - Check the status of panel for TE based ESD. + * @ctrl_pdata : dsi controller data + * @pstatus_data : dsi status data + * @interval : duration in milliseconds for panel TE wait + * + * This function is called when the TE signal from the panel doesn't arrive + * after 'interval' milliseconds. If the TE IRQ is not ready, the workqueue + * gets re-scheduled. Otherwise, report the panel to be dead due to ESD attack. + */ +static bool mdss_check_te_status(struct mdss_dsi_ctrl_pdata *ctrl_pdata, + struct dsi_status_data *pstatus_data, uint32_t interval) +{ + bool ret; + + atomic_set(&ctrl_pdata->te_irq_ready, 0); + reinit_completion(&ctrl_pdata->te_irq_comp); + enable_irq(gpio_to_irq(ctrl_pdata->disp_te_gpio)); + /* Define TE interrupt timeout value as 3x(1/fps) */ + ret = wait_for_completion_timeout(&ctrl_pdata->te_irq_comp, + msecs_to_jiffies(interval)); + disable_irq(gpio_to_irq(ctrl_pdata->disp_te_gpio)); + pr_debug("%s: Panel TE check done with ret = %d\n", __func__, ret); + return ret; +} + +/* + * mdss_check_dsi_ctrl_status() - Check MDP5 DSI controller status periodically. + * @work : dsi controller status data + * @interval : duration in milliseconds to schedule work queue + * + * This function calls check_status API on DSI controller to send the BTA + * command. If DSI controller fails to acknowledge the BTA command, it sends + * the PANEL_ALIVE=0 status to HAL layer. + */ +void mdss_check_dsi_ctrl_status(struct work_struct *work, uint32_t interval) +{ + struct dsi_status_data *pstatus_data = NULL; + struct mdss_panel_data *pdata = NULL; + struct mipi_panel_info *mipi = NULL; + struct mdss_dsi_ctrl_pdata *ctrl_pdata = NULL; + struct mdss_overlay_private *mdp5_data = NULL; + struct mdss_mdp_ctl *ctl = NULL; + int ret = 0; + + pstatus_data = container_of(to_delayed_work(work), + struct dsi_status_data, check_status); + if (!pstatus_data || !(pstatus_data->mfd)) { + pr_err("%s: mfd not available\n", __func__); + return; + } + + pdata = dev_get_platdata(&pstatus_data->mfd->pdev->dev); + if (!pdata) { + pr_err("%s: Panel data not available\n", __func__); + return; + } + mipi = &pdata->panel_info.mipi; + + ctrl_pdata = container_of(pdata, struct mdss_dsi_ctrl_pdata, + panel_data); + if (!ctrl_pdata || (!ctrl_pdata->check_status && + (ctrl_pdata->status_mode != ESD_TE))) { + pr_err("%s: DSI ctrl or status_check callback not available\n", + __func__); + return; + } + + if (!pdata->panel_info.esd_rdy) { + pr_debug("%s: unblank not complete, reschedule check status\n", + __func__); + schedule_delayed_work(&pstatus_data->check_status, + msecs_to_jiffies(interval)); + return; + } + + mdp5_data = mfd_to_mdp5_data(pstatus_data->mfd); + ctl = mfd_to_ctl(pstatus_data->mfd); + + if (!ctl) { + pr_err("%s: Display is off\n", __func__); + return; + } + + if (ctrl_pdata->status_mode == ESD_TE) { + uint32_t fps = mdss_panel_get_framerate(&pdata->panel_info, + FPS_RESOLUTION_HZ); + uint32_t timeout = ((1000 / fps) + 1) * + MDSS_STATUS_TE_WAIT_MAX; + + if (mdss_check_te_status(ctrl_pdata, pstatus_data, timeout)) + goto sim; + else + goto status_dead; + } + + /* + * TODO: Because mdss_dsi_cmd_mdp_busy has made sure DMA to + * be idle in mdss_dsi_cmdlist_commit, it is not necessary + * to acquire ov_lock in case of video mode. Removing this + * lock to fix issues so that ESD thread would not block other + * overlay operations. Need refine this lock for command mode + * + * If Burst mode is enabled then we dont have to acquire ov_lock as + * command and data arbitration is possible in h/w + */ + + if ((mipi->mode == DSI_CMD_MODE) && !ctrl_pdata->burst_mode_enabled) + mutex_lock(&mdp5_data->ov_lock); + mutex_lock(&ctl->offlock); + + if (mdss_panel_is_power_off(pstatus_data->mfd->panel_power_state) || + pstatus_data->mfd->shutdown_pending) { + mutex_unlock(&ctl->offlock); + if ((mipi->mode == DSI_CMD_MODE) && + !ctrl_pdata->burst_mode_enabled) + mutex_unlock(&mdp5_data->ov_lock); + pr_err("%s: DSI turning off, avoiding panel status check\n", + __func__); + return; + } + + /* + * For the command mode panels, we return pan display + * IOCTL on vsync interrupt. So, after vsync interrupt comes + * and when DMA_P is in progress, if the panel stops responding + * and if we trigger BTA before DMA_P finishes, then the DSI + * FIFO will not be cleared since the DSI data bus control + * doesn't come back to the host after BTA. This may cause the + * display reset not to be proper. Hence, wait for DMA_P done + * for command mode panels before triggering BTA. + */ + if (ctl->ops.wait_pingpong && !ctrl_pdata->burst_mode_enabled) + ctl->ops.wait_pingpong(ctl, NULL); + + pr_debug("%s: DSI ctrl wait for ping pong done\n", __func__); + MDSS_XLOG(mipi->mode); + + mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON); + ret = ctrl_pdata->check_status(ctrl_pdata); + mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF); + + mutex_unlock(&ctl->offlock); + if ((mipi->mode == DSI_CMD_MODE) && !ctrl_pdata->burst_mode_enabled) + mutex_unlock(&mdp5_data->ov_lock); + + if (pstatus_data->mfd->panel_power_state == MDSS_PANEL_POWER_ON) { + if (ret > 0) + schedule_delayed_work(&pstatus_data->check_status, + msecs_to_jiffies(interval)); + else + goto status_dead; + } +sim: + if (pdata->panel_info.panel_force_dead) { + pr_debug("force_dead=%d\n", pdata->panel_info.panel_force_dead); + pdata->panel_info.panel_force_dead--; + if (!pdata->panel_info.panel_force_dead) + goto status_dead; + } + + return; + +status_dead: + mdss_fb_report_panel_dead(pstatus_data->mfd); +} diff --git a/drivers/video/fbdev/msm/dsi_status_v2.c b/drivers/video/fbdev/msm/dsi_status_v2.c new file mode 100644 index 0000000000000000000000000000000000000000..35b09849f3c88eabcecbe803ae0db1d3dd8f5325 --- /dev/null +++ b/drivers/video/fbdev/msm/dsi_status_v2.c @@ -0,0 +1,167 @@ +/* Copyright (c) 2013-2015, 2017-2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#include +#include +#include +#include +#include +#include + +#include "mdss_dsi.h" +#include "mdp3_ctrl.h" + +/* + * mdp3_check_te_status() - Check the status of panel for TE based ESD. + * @ctrl_pdata : dsi controller data + * @pstatus_data : dsi status data + * @interval : duration in milliseconds for panel TE wait + * + * This function waits for TE signal from the panel for a maximum + * duration of 3 vsyncs. If timeout occurs, report the panel to be + * dead due to ESD attack. + * NOTE: The TE IRQ handling is linked to the ESD thread scheduling, + * i.e. rate of TE IRQs firing is bound by the ESD interval. + */ +static int mdp3_check_te_status(struct mdss_dsi_ctrl_pdata *ctrl_pdata, + struct dsi_status_data *pstatus_data, uint32_t interval) +{ + int ret; + + pr_debug("%s: Checking panel TE status\n", __func__); + + atomic_set(&ctrl_pdata->te_irq_ready, 0); + reinit_completion(&ctrl_pdata->te_irq_comp); + enable_irq(gpio_to_irq(ctrl_pdata->disp_te_gpio)); + + ret = wait_for_completion_timeout(&ctrl_pdata->te_irq_comp, + msecs_to_jiffies(interval)); + + disable_irq(gpio_to_irq(ctrl_pdata->disp_te_gpio)); + pr_debug("%s: Panel TE check done with ret = %d\n", __func__, ret); + + return ret; +} + +/* + * mdp3_check_dsi_ctrl_status() - Check MDP3 DSI controller status periodically. + * @work : dsi controller status data + * @interval : duration in milliseconds to schedule work queue + * + * This function calls check_status API on DSI controller to send the BTA + * command. If DSI controller fails to acknowledge the BTA command, it sends + * the PANEL_ALIVE=0 status to HAL layer. + */ +void mdp3_check_dsi_ctrl_status(struct work_struct *work, + uint32_t interval) +{ + struct dsi_status_data *pdsi_status = NULL; + struct mdss_panel_data *pdata = NULL; + struct mipi_panel_info *mipi = NULL; + struct mdss_dsi_ctrl_pdata *ctrl_pdata = NULL; + struct mdp3_session_data *mdp3_session = NULL; + int ret = 0; + + pdsi_status = container_of(to_delayed_work(work), + struct dsi_status_data, check_status); + + if (!pdsi_status || !(pdsi_status->mfd)) { + pr_err("%s: mfd not available\n", __func__); + return; + } + + pdata = dev_get_platdata(&pdsi_status->mfd->pdev->dev); + if (!pdata) { + pr_err("%s: Panel data not available\n", __func__); + return; + } + + mipi = &pdata->panel_info.mipi; + ctrl_pdata = container_of(pdata, struct mdss_dsi_ctrl_pdata, + panel_data); + + if (!ctrl_pdata || (!ctrl_pdata->check_status && + (ctrl_pdata->status_mode != ESD_TE))) { + pr_err("%s: DSI ctrl or status_check callback not available\n", + __func__); + return; + } + + if (!pdata->panel_info.esd_rdy) { + pr_err("%s: unblank not complete, reschedule check status\n", + __func__); + schedule_delayed_work(&pdsi_status->check_status, + msecs_to_jiffies(interval)); + return; + } + + mdp3_session = pdsi_status->mfd->mdp.private1; + if (!mdp3_session) { + pr_err("%s: Display is off\n", __func__); + return; + } + + if (mdp3_session->in_splash_screen) { + schedule_delayed_work(&pdsi_status->check_status, + msecs_to_jiffies(interval)); + pr_debug("%s: cont splash is on\n", __func__); + return; + } + + if (mipi->mode == DSI_CMD_MODE && + mipi->hw_vsync_mode && + mdss_dsi_is_te_based_esd(ctrl_pdata)) { + uint32_t fps = mdss_panel_get_framerate(&pdata->panel_info, + FPS_RESOLUTION_HZ); + uint32_t timeout = ((1000 / fps) + 1) * + MDSS_STATUS_TE_WAIT_MAX; + + if (mdp3_check_te_status(ctrl_pdata, pdsi_status, timeout) > 0) + goto sim; + goto status_dead; + } + + mutex_lock(&mdp3_session->lock); + if (!mdp3_session->status) { + pr_debug("%s: display off already\n", __func__); + mutex_unlock(&mdp3_session->lock); + return; + } + + if (mdp3_session->wait_for_dma_done) + ret = mdp3_session->wait_for_dma_done(mdp3_session); + mutex_unlock(&mdp3_session->lock); + + if (!ret) + ret = ctrl_pdata->check_status(ctrl_pdata); + else + pr_err("%s: wait_for_dma_done error\n", __func__); + + if (mdss_fb_is_power_on_interactive(pdsi_status->mfd)) { + if (ret > 0) + schedule_delayed_work(&pdsi_status->check_status, + msecs_to_jiffies(interval)); + else + goto status_dead; + } +sim: + if (pdata->panel_info.panel_force_dead) { + pr_debug("force_dead=%d\n", pdata->panel_info.panel_force_dead); + pdata->panel_info.panel_force_dead--; + if (!pdata->panel_info.panel_force_dead) + goto status_dead; + } + return; + +status_dead: + mdss_fb_report_panel_dead(pdsi_status->mfd); +} + diff --git a/drivers/video/fbdev/msm/dsi_v2.c b/drivers/video/fbdev/msm/dsi_v2.c new file mode 100644 index 0000000000000000000000000000000000000000..bfd29416cd1b06bd1877defd77f5e0adae1813d8 --- /dev/null +++ b/drivers/video/fbdev/msm/dsi_v2.c @@ -0,0 +1,616 @@ +/* Copyright (c) 2012-2015, 2017-2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ +#include +#include +#include +#include +#include +#include +#include + +#include "dsi_v2.h" + +static struct dsi_interface dsi_intf; + +static int dsi_off(struct mdss_panel_data *pdata) +{ + int rc = 0; + + pr_debug("turn off dsi controller\n"); + if (dsi_intf.off) + rc = dsi_intf.off(pdata); + + if (rc) { + pr_err("mdss_dsi_off DSI failed %d\n", rc); + return rc; + } + return rc; +} + +static int dsi_on(struct mdss_panel_data *pdata) +{ + int rc = 0; + + pr_debug("%s DSI controller on\n", __func__); + if (dsi_intf.on) + rc = dsi_intf.on(pdata); + + if (rc) { + pr_err("mdss_dsi_on DSI failed %d\n", rc); + return rc; + } + return rc; +} + +static int dsi_update_pconfig(struct mdss_panel_data *pdata, + int mode) +{ + int ret = 0; + struct mdss_panel_info *pinfo = &pdata->panel_info; + struct mdss_dsi_ctrl_pdata *ctrl_pdata = NULL; + + if (!pdata) + return -ENODEV; + ctrl_pdata = container_of(pdata, struct mdss_dsi_ctrl_pdata, + panel_data); + + if (mode == DSI_CMD_MODE) { + pinfo->mipi.mode = DSI_CMD_MODE; + pinfo->type = MIPI_CMD_PANEL; + pinfo->mipi.vsync_enable = 1; + pinfo->mipi.hw_vsync_mode = 1; + } else { + pinfo->mipi.mode = DSI_VIDEO_MODE; + pinfo->type = MIPI_VIDEO_PANEL; + pinfo->mipi.vsync_enable = 0; + pinfo->mipi.hw_vsync_mode = 0; + } + + ctrl_pdata->panel_mode = pinfo->mipi.mode; + mdss_panel_get_dst_fmt(pinfo->bpp, pinfo->mipi.mode, + pinfo->mipi.pixel_packing, &(pinfo->mipi.dst_format)); + pinfo->cont_splash_enabled = 0; + + return ret; +} + +static int dsi_panel_handler(struct mdss_panel_data *pdata, int enable) +{ + int rc = 0; + struct mdss_dsi_ctrl_pdata *ctrl_pdata = NULL; + + pr_debug("%s enable=%d\n", __func__, enable); + if (!pdata) + return -ENODEV; + ctrl_pdata = container_of(pdata, struct mdss_dsi_ctrl_pdata, + panel_data); + + if (enable && + (pdata->panel_info.panel_power_state == MDSS_PANEL_POWER_OFF)) { + if (!pdata->panel_info.dynamic_switch_pending) { + mdss_dsi_panel_reset(pdata, 1); + rc = ctrl_pdata->on(pdata); + if (rc) + pr_err("%s panel on failed %d\n", __func__, rc); + } + pdata->panel_info.panel_power_state = MDSS_PANEL_POWER_ON; + if (pdata->panel_info.type == MIPI_CMD_PANEL) + mdss_dsi_set_tear_on(ctrl_pdata); + } else if (!enable && + (pdata->panel_info.panel_power_state == MDSS_PANEL_POWER_ON)) { + msm_dsi_sw_reset(); + if (dsi_intf.op_mode_config) + dsi_intf.op_mode_config(DSI_CMD_MODE, pdata); + if (pdata->panel_info.dynamic_switch_pending) { + pr_info("%s: switching to %s mode\n", __func__, + (pdata->panel_info.mipi.mode ? "video" : "command")); + if (pdata->panel_info.type == MIPI_CMD_PANEL) { + ctrl_pdata->switch_mode(pdata, DSI_VIDEO_MODE); + } else if (pdata->panel_info.type == MIPI_VIDEO_PANEL) { + ctrl_pdata->switch_mode(pdata, DSI_CMD_MODE); + mdss_dsi_set_tear_off(ctrl_pdata); + } + } + pdata->panel_info.panel_power_state = MDSS_PANEL_POWER_OFF; + if (!pdata->panel_info.dynamic_switch_pending) { + rc = ctrl_pdata->off(pdata); + mdss_dsi_panel_reset(pdata, 0); + } + } + return rc; +} + +static int dsi_splash_on(struct mdss_panel_data *pdata) +{ + int rc = 0; + + pr_debug("%s:\n", __func__); + + if (dsi_intf.cont_on) + rc = dsi_intf.cont_on(pdata); + + if (rc) { + pr_err("mdss_dsi_on DSI failed %d\n", rc); + return rc; + } + return rc; +} + +static int dsi_clk_ctrl(struct mdss_panel_data *pdata, int enable) +{ + int rc = 0; + + pr_debug("%s:\n", __func__); + + if (dsi_intf.clk_ctrl) + rc = dsi_intf.clk_ctrl(pdata, enable); + + return rc; +} + +static int dsi_event_handler(struct mdss_panel_data *pdata, + int event, void *arg) +{ + int rc = 0; + + if (!pdata) { + pr_err("%s: Invalid input data\n", __func__); + return -ENODEV; + } + + switch (event) { + case MDSS_EVENT_UNBLANK: + rc = dsi_on(pdata); + break; + case MDSS_EVENT_BLANK: + rc = dsi_off(pdata); + break; + case MDSS_EVENT_PANEL_ON: + rc = dsi_panel_handler(pdata, 1); + break; + case MDSS_EVENT_PANEL_OFF: + rc = dsi_panel_handler(pdata, 0); + break; + case MDSS_EVENT_CONT_SPLASH_BEGIN: + rc = dsi_splash_on(pdata); + break; + case MDSS_EVENT_PANEL_CLK_CTRL: + rc = dsi_clk_ctrl(pdata, + (int)(((struct dsi_panel_clk_ctrl *)arg)->state)); + break; + case MDSS_EVENT_DSI_UPDATE_PANEL_DATA: + rc = dsi_update_pconfig(pdata, (int)(unsigned long) arg); + break; + default: + pr_debug("%s: unhandled event=%d\n", __func__, event); + break; + } + return rc; +} + +static int dsi_parse_gpio(struct platform_device *pdev, + struct mdss_dsi_ctrl_pdata *ctrl_pdata) +{ + struct device_node *np = pdev->dev.of_node; + + ctrl_pdata->disp_en_gpio = of_get_named_gpio(np, + "qcom,platform-enable-gpio", 0); + + if (!gpio_is_valid(ctrl_pdata->disp_en_gpio)) + pr_err("%s:%d, Disp_en gpio not specified\n", + __func__, __LINE__); + + ctrl_pdata->rst_gpio = of_get_named_gpio(np, + "qcom,platform-reset-gpio", 0); + if (!gpio_is_valid(ctrl_pdata->rst_gpio)) + pr_err("%s:%d, reset gpio not specified\n", + __func__, __LINE__); + + ctrl_pdata->mode_gpio = -1; + if (ctrl_pdata->panel_data.panel_info.mode_gpio_state != + MODE_GPIO_NOT_VALID) { + ctrl_pdata->mode_gpio = of_get_named_gpio(np, + "qcom,platform-mode-gpio", 0); + if (!gpio_is_valid(ctrl_pdata->mode_gpio)) + pr_info("%s:%d, reset gpio not specified\n", + __func__, __LINE__); + } + + ctrl_pdata->bklt_en_gpio = of_get_named_gpio(np, + "qcom,platform-bklight-en-gpio", 0); + if (!gpio_is_valid(ctrl_pdata->bklt_en_gpio)) + pr_err("%s:%d, bklt_en gpio not specified\n", + __func__, __LINE__); + + return 0; +} + +static void mdss_dsi_put_dt_vreg_data(struct device *dev, + struct mdss_module_power *module_power) +{ + if (!module_power) { + pr_err("%s: invalid input\n", __func__); + return; + } + + if (module_power->vreg_config) { + devm_kfree(dev, module_power->vreg_config); + module_power->vreg_config = NULL; + } + module_power->num_vreg = 0; +} + +static int mdss_dsi_get_dt_vreg_data(struct device *dev, + struct mdss_module_power *mp, enum dsi_pm_type module) +{ + int i = 0, rc = 0; + u32 tmp = 0; + struct device_node *of_node = NULL, *supply_node = NULL; + const char *pm_supply_name = NULL; + struct device_node *supply_root_node = NULL; + + if (!dev || !mp) { + pr_err("%s: invalid input\n", __func__); + rc = -EINVAL; + return rc; + } + + of_node = dev->of_node; + + mp->num_vreg = 0; + pm_supply_name = __mdss_dsi_pm_supply_node_name(module); + supply_root_node = of_get_child_by_name(of_node, pm_supply_name); + if (!supply_root_node) { + pr_err("no supply entry present\n"); + goto novreg; + } + + for_each_child_of_node(supply_root_node, supply_node) { + mp->num_vreg++; + } + + if (mp->num_vreg == 0) { + pr_debug("%s: no vreg\n", __func__); + goto novreg; + } else { + pr_debug("%s: vreg found. count=%d\n", __func__, mp->num_vreg); + } + + mp->vreg_config = devm_kzalloc(dev, sizeof(struct mdss_vreg) * + mp->num_vreg, GFP_KERNEL); + if (!mp->vreg_config) { + rc = -ENOMEM; + goto error; + } + + for_each_child_of_node(supply_root_node, supply_node) { + const char *st = NULL; + /* vreg-name */ + rc = of_property_read_string(supply_node, + "qcom,supply-name", &st); + if (rc) { + pr_err("%s: error reading name. rc=%d\n", + __func__, rc); + goto error; + } + snprintf(mp->vreg_config[i].vreg_name, + ARRAY_SIZE((mp->vreg_config[i].vreg_name)), "%s", st); + /* vreg-min-voltage */ + rc = of_property_read_u32(supply_node, + "qcom,supply-min-voltage", &tmp); + if (rc) { + pr_err("%s: error reading min volt. rc=%d\n", + __func__, rc); + goto error; + } + mp->vreg_config[i].min_voltage = tmp; + + /* vreg-max-voltage */ + rc = of_property_read_u32(supply_node, + "qcom,supply-max-voltage", &tmp); + if (rc) { + pr_err("%s: error reading max volt. rc=%d\n", + __func__, rc); + goto error; + } + mp->vreg_config[i].max_voltage = tmp; + + /* enable-load */ + rc = of_property_read_u32(supply_node, + "qcom,supply-enable-load", &tmp); + if (rc) { + pr_err("%s: error reading enable load. rc=%d\n", + __func__, rc); + goto error; + } + mp->vreg_config[i].load[DSS_REG_MODE_ENABLE] = tmp; + + /* disable-load */ + rc = of_property_read_u32(supply_node, + "qcom,supply-disable-load", &tmp); + if (rc) { + pr_err("%s: error reading disable load. rc=%d\n", + __func__, rc); + goto error; + } + mp->vreg_config[i].load[DSS_REG_MODE_DISABLE] = tmp; + + /* ulp-load */ + rc = of_property_read_u32(supply_node, + "qcom,supply-ulp-load", &tmp); + if (rc) + pr_warn("%s: error reading ulp load. rc=%d\n", + __func__, rc); + + mp->vreg_config[i].load[DSS_REG_MODE_ULP] = (!rc ? tmp : + mp->vreg_config[i].load[DSS_REG_MODE_ENABLE]); + + /* pre-sleep */ + rc = of_property_read_u32(supply_node, + "qcom,supply-pre-on-sleep", &tmp); + if (rc) { + pr_debug("%s: error reading supply pre sleep value. rc=%d\n", + __func__, rc); + rc = 0; + } else { + mp->vreg_config[i].pre_on_sleep = tmp; + } + + rc = of_property_read_u32(supply_node, + "qcom,supply-pre-off-sleep", &tmp); + if (rc) { + pr_debug("%s: error reading supply pre sleep value. rc=%d\n", + __func__, rc); + rc = 0; + } else { + mp->vreg_config[i].pre_off_sleep = tmp; + } + + /* post-sleep */ + rc = of_property_read_u32(supply_node, + "qcom,supply-post-on-sleep", &tmp); + if (rc) { + pr_debug("%s: error reading supply post sleep value. rc=%d\n", + __func__, rc); + rc = 0; + } else { + mp->vreg_config[i].post_on_sleep = tmp; + } + + rc = of_property_read_u32(supply_node, + "qcom,supply-post-off-sleep", &tmp); + if (rc) { + pr_debug("%s: error reading supply post sleep value. rc=%d\n", + __func__, rc); + rc = 0; + } else { + mp->vreg_config[i].post_off_sleep = tmp; + } + + pr_debug("%s: %s min=%d, max=%d, enable=%d, disable=%d, ulp=%d, preonsleep=%d, postonsleep=%d, preoffsleep=%d, postoffsleep=%d\n", + __func__, + mp->vreg_config[i].vreg_name, + mp->vreg_config[i].min_voltage, + mp->vreg_config[i].max_voltage, + mp->vreg_config[i].load[DSS_REG_MODE_ENABLE] + mp->vreg_config[i].load[DSS_REG_MODE_DISABLE] + mp->vreg_config[i].load[DSS_REG_MODE_ULP] + mp->vreg_config[i].pre_on_sleep, + mp->vreg_config[i].post_on_sleep, + mp->vreg_config[i].pre_off_sleep, + mp->vreg_config[i].post_off_sleep + ); + ++i; + } + + return rc; + +error: + if (mp->vreg_config) { + devm_kfree(dev, mp->vreg_config); + mp->vreg_config = NULL; + } +novreg: + mp->num_vreg = 0; + + return rc; +} + +static int dsi_parse_phy(struct platform_device *pdev, + struct mdss_dsi_ctrl_pdata *ctrl_pdata) +{ + struct device_node *np = pdev->dev.of_node; + int i, len; + const char *data; + struct mdss_dsi_phy_ctrl *phy_db + = &(ctrl_pdata->panel_data.panel_info.mipi.dsi_phy_db); + + data = of_get_property(np, "qcom,platform-regulator-settings", &len); + if ((!data) || (len != 6)) { + pr_err("%s:%d, Unable to read Phy regulator settings", + __func__, __LINE__); + return -EINVAL; + } + for (i = 0; i < len; i++) + phy_db->regulator[i] = data[i]; + + data = of_get_property(np, "qcom,platform-strength-ctrl", &len); + if ((!data) || (len != 2)) { + pr_err("%s:%d, Unable to read Phy Strength ctrl settings", + __func__, __LINE__); + return -EINVAL; + } + phy_db->strength[0] = data[0]; + phy_db->strength[1] = data[1]; + + data = of_get_property(np, "qcom,platform-bist-ctrl", &len); + if ((!data) || (len != 6)) { + pr_err("%s:%d, Unable to read Phy Bist Ctrl settings", + __func__, __LINE__); + return -EINVAL; + } + for (i = 0; i < len; i++) + phy_db->bistctrl[i] = data[i]; + + data = of_get_property(np, "qcom,platform-lane-config", &len); + if ((!data) || (len != 30)) { + pr_err("%s:%d, Unable to read Phy lane configure settings", + __func__, __LINE__); + return -EINVAL; + } + for (i = 0; i < len; i++) + phy_db->lanecfg[i] = data[i]; + + return 0; +} + +void dsi_ctrl_config_deinit(struct platform_device *pdev, + struct mdss_dsi_ctrl_pdata *ctrl_pdata) +{ + int i; + + for (i = DSI_MAX_PM - 1; i >= 0; i--) { + mdss_dsi_put_dt_vreg_data(&pdev->dev, + &ctrl_pdata->power_data[i]); + } +} + +int dsi_ctrl_config_init(struct platform_device *pdev, + struct mdss_dsi_ctrl_pdata *ctrl_pdata) +{ + int rc = 0, i; + + for (i = 0; i < DSI_MAX_PM; i++) { + rc = mdss_dsi_get_dt_vreg_data(&pdev->dev, + &ctrl_pdata->power_data[i], i); + if (rc) { + DEV_ERR("%s: '%s' get_dt_vreg_data failed.rc=%d\n", + __func__, __mdss_dsi_pm_name(i), rc); + return rc; + } + } + + rc = dsi_parse_gpio(pdev, ctrl_pdata); + if (rc) { + pr_err("fail to parse panel GPIOs\n"); + return rc; + } + + rc = dsi_parse_phy(pdev, ctrl_pdata); + if (rc) { + pr_err("fail to parse DSI PHY settings\n"); + return rc; + } + + return 0; +} +int dsi_panel_device_register_v2(struct platform_device *dev, + struct mdss_dsi_ctrl_pdata *ctrl_pdata) +{ + struct mipi_panel_info *mipi; + int rc; + u8 lanes = 0, bpp; + u32 h_period, v_period; + struct mdss_panel_info *pinfo = &(ctrl_pdata->panel_data.panel_info); + + h_period = ((pinfo->lcdc.h_pulse_width) + + (pinfo->lcdc.h_back_porch) + + (pinfo->xres) + + (pinfo->lcdc.h_front_porch)); + + v_period = ((pinfo->lcdc.v_pulse_width) + + (pinfo->lcdc.v_back_porch) + + (pinfo->yres) + + (pinfo->lcdc.v_front_porch)); + + mipi = &pinfo->mipi; + + pinfo->type = + ((mipi->mode == DSI_VIDEO_MODE) + ? MIPI_VIDEO_PANEL : MIPI_CMD_PANEL); + + if (mipi->data_lane3) + lanes += 1; + if (mipi->data_lane2) + lanes += 1; + if (mipi->data_lane1) + lanes += 1; + if (mipi->data_lane0) + lanes += 1; + + if ((mipi->dst_format == DSI_CMD_DST_FORMAT_RGB888) + || (mipi->dst_format == DSI_VIDEO_DST_FORMAT_RGB888) + || (mipi->dst_format == DSI_VIDEO_DST_FORMAT_RGB666_LOOSE)) + bpp = 3; + else if ((mipi->dst_format == DSI_CMD_DST_FORMAT_RGB565) + || (mipi->dst_format == DSI_VIDEO_DST_FORMAT_RGB565)) + bpp = 2; + else + bpp = 3; /* Default format set to RGB888 */ + + if (pinfo->type == MIPI_VIDEO_PANEL && + !pinfo->clk_rate) { + h_period += pinfo->lcdc.xres_pad; + v_period += pinfo->lcdc.yres_pad; + + if (lanes > 0) { + pinfo->clk_rate = + ((h_period * v_period * (mipi->frame_rate) * bpp * 8) + / lanes); + } else { + pr_err("%s: forcing mdss_dsi lanes to 1\n", __func__); + pinfo->clk_rate = + (h_period * v_period + * (mipi->frame_rate) * bpp * 8); + } + } + + ctrl_pdata->panel_data.event_handler = dsi_event_handler; + + /* + * register in mdp driver + */ + rc = mdss_register_panel(dev, &(ctrl_pdata->panel_data)); + if (rc) { + dev_err(&dev->dev, "unable to register MIPI DSI panel\n"); + return rc; + } + + pr_debug("%s: Panal data initialized\n", __func__); + return 0; +} + +void dsi_register_interface(struct dsi_interface *intf) +{ + dsi_intf = *intf; +} + +int dsi_buf_alloc(struct dsi_buf *dp, int size) +{ + dp->start = kzalloc(size, GFP_KERNEL); + if (!dp->start) + return -ENOMEM; + + dp->end = dp->start + size; + dp->size = size; + + if ((int)dp->start & 0x07) { + pr_err("%s: buf NOT 8 bytes aligned\n", __func__); + return -EINVAL; + } + + dp->data = dp->start; + dp->len = 0; + return 0; +} + diff --git a/drivers/video/fbdev/msm/dsi_v2.h b/drivers/video/fbdev/msm/dsi_v2.h new file mode 100644 index 0000000000000000000000000000000000000000..2f6f4043cddcf98a0d7e45c5a11b88a59740a82b --- /dev/null +++ b/drivers/video/fbdev/msm/dsi_v2.h @@ -0,0 +1,56 @@ +/* Copyright (c) 2012-2014, 2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef DSI_V2_H +#define DSI_V2_H + +#include +#include + +#include "mdss_dsi.h" +#include "mdss_panel.h" + +#define DSI_BUF_SIZE 1024 +#define DSI_MRPS 0x04 /* Maximum Return Packet Size */ + +struct dsi_interface { + int (*on)(struct mdss_panel_data *pdata); + int (*off)(struct mdss_panel_data *pdata); + int (*cont_on)(struct mdss_panel_data *pdata); + int (*clk_ctrl)(struct mdss_panel_data *pdata, int enable); + void (*op_mode_config)(int mode, struct mdss_panel_data *pdata); + int index; + void *private; +}; + +int dsi_panel_device_register_v2(struct platform_device *pdev, + struct mdss_dsi_ctrl_pdata *ctrl_pdata); + +void dsi_register_interface(struct dsi_interface *intf); + +int dsi_buf_alloc(struct dsi_buf *dp, int size); + +void dsi_set_tx_power_mode(int mode); + +void dsi_ctrl_config_deinit(struct platform_device *pdev, + struct mdss_dsi_ctrl_pdata *ctrl_pdata); + +int dsi_ctrl_config_init(struct platform_device *pdev, + struct mdss_dsi_ctrl_pdata *ctrl_pdata); + +struct mdss_panel_cfg *mdp3_panel_intf_type(int intf_val); + +int mdp3_panel_get_boot_cfg(void); + +void msm_dsi_sw_reset(void); +#endif /* DSI_V2_H */ diff --git a/drivers/video/fbdev/msm/mdp3.c b/drivers/video/fbdev/msm/mdp3.c new file mode 100644 index 0000000000000000000000000000000000000000..f85880dd8d76656c199f02f42cf87b36a5d6d611 --- /dev/null +++ b/drivers/video/fbdev/msm/mdp3.c @@ -0,0 +1,2661 @@ +/* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved. + * Copyright (C) 2007 Google Incorporated + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#define pr_fmt(fmt) "%s: " fmt, __func__ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include + +#include "mdp3.h" +#include "mdss_fb.h" +#include "mdp3_hwio.h" +#include "mdp3_ctrl.h" +#include "mdp3_ppp.h" +#include "mdss_debug.h" +#include "mdss_smmu.h" +#include "mdss.h" + +#ifndef EXPORT_COMPAT +#define EXPORT_COMPAT(x) +#endif + +#define AUTOSUSPEND_TIMEOUT_MS 100 +#define MISR_POLL_SLEEP 2000 +#define MISR_POLL_TIMEOUT 32000 +#define MDP3_REG_CAPTURED_DSI_PCLK_MASK 1 + +#define MDP_CORE_HW_VERSION 0x03050306 +struct mdp3_hw_resource *mdp3_res; + +#define MDP_BUS_VECTOR_ENTRY(ab_val, ib_val) \ + { \ + .src = MSM_BUS_MASTER_MDP_PORT0, \ + .dst = MSM_BUS_SLAVE_EBI_CH0, \ + .ab = (ab_val), \ + .ib = (ib_val), \ + } + +#define SET_BIT(value, bit_num) \ +{ \ + value[bit_num >> 3] |= (1 << (bit_num & 7)); \ +} + +#define MAX_BPP_SUPPORTED 4 + +static struct msm_bus_vectors mdp_bus_vectors[] = { + MDP_BUS_VECTOR_ENTRY(0, 0), + MDP_BUS_VECTOR_ENTRY(SZ_128M, SZ_256M), + MDP_BUS_VECTOR_ENTRY(SZ_256M, SZ_512M), +}; +static struct msm_bus_paths + mdp_bus_usecases[ARRAY_SIZE(mdp_bus_vectors)]; +static struct msm_bus_scale_pdata mdp_bus_scale_table = { + .usecase = mdp_bus_usecases, + .num_usecases = ARRAY_SIZE(mdp_bus_usecases), + .name = "mdp3", +}; + +struct mdp3_bus_handle_map mdp3_bus_handle[MDP3_BUS_HANDLE_MAX] = { + [MDP3_BUS_HANDLE] = { + .bus_vector = mdp_bus_vectors, + .usecases = mdp_bus_usecases, + .scale_pdata = &mdp_bus_scale_table, + .current_bus_idx = 0, + .handle = 0, + }, +}; + +static struct mdss_panel_intf pan_types[] = { + {"dsi", MDSS_PANEL_INTF_DSI}, +}; +static char mdss_mdp3_panel[MDSS_MAX_PANEL_LEN]; + +struct mdp3_iommu_domain_map mdp3_iommu_domains[MDP3_IOMMU_DOMAIN_MAX] = { + [MDP3_IOMMU_DOMAIN_UNSECURE] = { + .domain_type = MDP3_IOMMU_DOMAIN_UNSECURE, + .client_name = "mdp_ns", + .npartitions = 1, + .domain_idx = MDP3_IOMMU_DOMAIN_UNSECURE, + }, + [MDP3_IOMMU_DOMAIN_SECURE] = { + .domain_type = MDP3_IOMMU_DOMAIN_SECURE, + .client_name = "mdp_secure", + .npartitions = 1, + .domain_idx = MDP3_IOMMU_DOMAIN_SECURE, + }, +}; + +static irqreturn_t mdp3_irq_handler(int irq, void *ptr) +{ + int i = 0; + struct mdp3_hw_resource *mdata = (struct mdp3_hw_resource *)ptr; + u32 mdp_interrupt = 0; + u32 mdp_status = 0; + + spin_lock(&mdata->irq_lock); + if (!mdata->irq_mask) { + pr_err("spurious interrupt\n"); + spin_unlock(&mdata->irq_lock); + return IRQ_HANDLED; + } + mdp_status = MDP3_REG_READ(MDP3_REG_INTR_STATUS); + mdp_interrupt = mdp_status; + pr_debug("%s irq=%d\n", __func__, mdp_interrupt); + + mdp_interrupt &= mdata->irq_mask; + + while (mdp_interrupt && i < MDP3_MAX_INTR) { + if ((mdp_interrupt & 0x1) && mdata->callbacks[i].cb) + mdata->callbacks[i].cb(i, mdata->callbacks[i].data); + mdp_interrupt = mdp_interrupt >> 1; + i++; + } + MDP3_REG_WRITE(MDP3_REG_INTR_CLEAR, mdp_status); + + spin_unlock(&mdata->irq_lock); + + return IRQ_HANDLED; +} + +void mdp3_irq_enable(int type) +{ + unsigned long flag; + + pr_debug("%s type=%d\n", __func__, type); + spin_lock_irqsave(&mdp3_res->irq_lock, flag); + if (mdp3_res->irq_ref_count[type] > 0) { + pr_debug("interrupt %d already enabled\n", type); + spin_unlock_irqrestore(&mdp3_res->irq_lock, flag); + return; + } + + mdp3_res->irq_mask |= BIT(type); + MDP3_REG_WRITE(MDP3_REG_INTR_ENABLE, mdp3_res->irq_mask); + + mdp3_res->irq_ref_count[type] += 1; + spin_unlock_irqrestore(&mdp3_res->irq_lock, flag); +} + +void mdp3_irq_disable(int type) +{ + unsigned long flag; + + spin_lock_irqsave(&mdp3_res->irq_lock, flag); + mdp3_irq_disable_nosync(type); + spin_unlock_irqrestore(&mdp3_res->irq_lock, flag); +} + +void mdp3_irq_disable_nosync(int type) +{ + if (mdp3_res->irq_ref_count[type] <= 0) { + pr_debug("interrupt %d not enabled\n", type); + return; + } + mdp3_res->irq_ref_count[type] -= 1; + if (mdp3_res->irq_ref_count[type] == 0) { + mdp3_res->irq_mask &= ~BIT(type); + MDP3_REG_WRITE(MDP3_REG_INTR_ENABLE, mdp3_res->irq_mask); + } +} + +int mdp3_set_intr_callback(u32 type, struct mdp3_intr_cb *cb) +{ + unsigned long flag; + + pr_debug("interrupt %d callback\n", type); + spin_lock_irqsave(&mdp3_res->irq_lock, flag); + if (cb) + mdp3_res->callbacks[type] = *cb; + else + mdp3_res->callbacks[type].cb = NULL; + + spin_unlock_irqrestore(&mdp3_res->irq_lock, flag); + return 0; +} + +void mdp3_irq_register(void) +{ + unsigned long flag; + struct mdss_hw *mdp3_hw; + + pr_debug("%s\n", __func__); + mdp3_hw = &mdp3_res->mdp3_hw; + spin_lock_irqsave(&mdp3_res->irq_lock, flag); + mdp3_res->irq_ref_cnt++; + if (mdp3_res->irq_ref_cnt == 1) { + MDP3_REG_WRITE(MDP3_REG_INTR_ENABLE, mdp3_res->irq_mask); + mdp3_res->mdss_util->enable_irq(&mdp3_res->mdp3_hw); + } + spin_unlock_irqrestore(&mdp3_res->irq_lock, flag); +} + +void mdp3_irq_deregister(void) +{ + unsigned long flag; + bool irq_enabled = true; + struct mdss_hw *mdp3_hw; + + pr_debug("%s\n", __func__); + mdp3_hw = &mdp3_res->mdp3_hw; + spin_lock_irqsave(&mdp3_res->irq_lock, flag); + memset(mdp3_res->irq_ref_count, 0, sizeof(u32) * MDP3_MAX_INTR); + mdp3_res->irq_mask = 0; + MDP3_REG_WRITE(MDP3_REG_INTR_ENABLE, 0); + mdp3_res->irq_ref_cnt--; + /* This can happen if suspend is called first */ + if (mdp3_res->irq_ref_cnt < 0) { + irq_enabled = false; + mdp3_res->irq_ref_cnt = 0; + } + if (mdp3_res->irq_ref_cnt == 0 && irq_enabled) + mdp3_res->mdss_util->disable_irq_nosync(&mdp3_res->mdp3_hw); + spin_unlock_irqrestore(&mdp3_res->irq_lock, flag); +} + +void mdp3_irq_suspend(void) +{ + unsigned long flag; + bool irq_enabled = true; + struct mdss_hw *mdp3_hw; + + pr_debug("%s\n", __func__); + mdp3_hw = &mdp3_res->mdp3_hw; + spin_lock_irqsave(&mdp3_res->irq_lock, flag); + mdp3_res->irq_ref_cnt--; + if (mdp3_res->irq_ref_cnt < 0) { + irq_enabled = false; + mdp3_res->irq_ref_cnt = 0; + } + if (mdp3_res->irq_ref_cnt == 0 && irq_enabled) { + MDP3_REG_WRITE(MDP3_REG_INTR_ENABLE, 0); + mdp3_res->mdss_util->disable_irq_nosync(&mdp3_res->mdp3_hw); + } + spin_unlock_irqrestore(&mdp3_res->irq_lock, flag); +} + +static int mdp3_bus_scale_register(void) +{ + int i, j; + + if (!mdp3_res->bus_handle) { + pr_err("No bus handle\n"); + return -EINVAL; + } + for (i = 0; i < MDP3_BUS_HANDLE_MAX; i++) { + struct mdp3_bus_handle_map *bus_handle = + &mdp3_res->bus_handle[i]; + + if (!bus_handle->handle) { + int j; + struct msm_bus_scale_pdata *bus_pdata = + bus_handle->scale_pdata; + + for (j = 0; j < bus_pdata->num_usecases; j++) { + bus_handle->usecases[j].num_paths = 1; + bus_handle->usecases[j].vectors = + &bus_handle->bus_vector[j]; + } + + bus_handle->handle = + msm_bus_scale_register_client(bus_pdata); + if (!bus_handle->handle) { + pr_err("not able to get bus scale i=%d\n", i); + return -ENOMEM; + } + pr_debug("register bus_hdl=%x\n", + bus_handle->handle); + } + + for (j = 0; j < MDP3_CLIENT_MAX; j++) { + bus_handle->ab[j] = 0; + bus_handle->ib[j] = 0; + } + } + return 0; +} + +static void mdp3_bus_scale_unregister(void) +{ + int i; + + if (!mdp3_res->bus_handle) + return; + + for (i = 0; i < MDP3_BUS_HANDLE_MAX; i++) { + pr_debug("unregister index=%d bus_handle=%x\n", + i, mdp3_res->bus_handle[i].handle); + if (mdp3_res->bus_handle[i].handle) { + msm_bus_scale_unregister_client( + mdp3_res->bus_handle[i].handle); + mdp3_res->bus_handle[i].handle = 0; + } + } +} + +int mdp3_bus_scale_set_quota(int client, u64 ab_quota, u64 ib_quota) +{ + struct mdp3_bus_handle_map *bus_handle; + int cur_bus_idx; + int bus_idx; + int client_idx; + u64 total_ib = 0, total_ab = 0; + int i, rc; + + client_idx = MDP3_BUS_HANDLE; + + bus_handle = &mdp3_res->bus_handle[client_idx]; + cur_bus_idx = bus_handle->current_bus_idx; + + if (bus_handle->handle < 1) { + pr_err("invalid bus handle %d\n", bus_handle->handle); + return -EINVAL; + } + + bus_handle->ab[client] = ab_quota; + bus_handle->ib[client] = ib_quota; + + for (i = 0; i < MDP3_CLIENT_MAX; i++) { + total_ab += bus_handle->ab[i]; + total_ib += bus_handle->ib[i]; + } + + if ((total_ab | total_ib) == 0) { + bus_idx = 0; + } else { + int num_cases = bus_handle->scale_pdata->num_usecases; + struct msm_bus_vectors *vect = NULL; + + bus_idx = (cur_bus_idx % (num_cases - 1)) + 1; + + /* aligning to avoid performing updates for small changes */ + total_ab = ALIGN(total_ab, SZ_64M); + total_ib = ALIGN(total_ib, SZ_64M); + + vect = bus_handle->scale_pdata->usecase[cur_bus_idx].vectors; + if ((total_ab == vect->ab) && (total_ib == vect->ib)) { + pr_debug("skip bus scaling, no change in vectors\n"); + return 0; + } + + vect = bus_handle->scale_pdata->usecase[bus_idx].vectors; + vect->ab = total_ab; + vect->ib = total_ib; + + pr_debug("bus scale idx=%d ab=%llu ib=%llu\n", bus_idx, + vect->ab, vect->ib); + } + bus_handle->current_bus_idx = bus_idx; + rc = msm_bus_scale_client_update_request(bus_handle->handle, bus_idx); + + if (!rc && ab_quota != 0 && ib_quota != 0) { + bus_handle->restore_ab[client] = ab_quota; + bus_handle->restore_ib[client] = ib_quota; + } + + return rc; +} + +static int mdp3_clk_update(u32 clk_idx, u32 enable) +{ + int ret = 0; + struct clk *clk; + int count = 0; + + if (clk_idx >= MDP3_MAX_CLK || !mdp3_res->clocks[clk_idx]) + return -ENODEV; + + clk = mdp3_res->clocks[clk_idx]; + + if (enable) + mdp3_res->clock_ref_count[clk_idx]++; + else + mdp3_res->clock_ref_count[clk_idx]--; + + count = mdp3_res->clock_ref_count[clk_idx]; + if (count == 1 && enable) { + pr_debug("clk=%d en=%d\n", clk_idx, enable); + ret = clk_prepare(clk); + if (ret) { + pr_err("%s: Failed to prepare clock %d", + __func__, clk_idx); + mdp3_res->clock_ref_count[clk_idx]--; + return ret; + } + if (clk_idx == MDP3_CLK_MDP_CORE) + MDSS_XLOG(enable); + ret = clk_enable(clk); + if (ret) + pr_err("%s: clock enable failed %d\n", __func__, + clk_idx); + } else if (count == 0) { + pr_debug("clk=%d disable\n", clk_idx); + if (clk_idx == MDP3_CLK_MDP_CORE) + MDSS_XLOG(enable); + clk_disable(clk); + clk_unprepare(clk); + ret = 0; + } else if (count < 0) { + pr_err("clk=%d count=%d\n", clk_idx, count); + ret = -EINVAL; + } + return ret; +} + + + +int mdp3_clk_set_rate(int clk_type, unsigned long clk_rate, + int client) +{ + int ret = 0; + unsigned long rounded_rate; + struct clk *clk = mdp3_res->clocks[clk_type]; + + if (clk) { + mutex_lock(&mdp3_res->res_mutex); + rounded_rate = clk_round_rate(clk, clk_rate); + if (IS_ERR_VALUE(rounded_rate)) { + pr_err("unable to round rate err=%ld\n", rounded_rate); + mutex_unlock(&mdp3_res->res_mutex); + return -EINVAL; + } + if (clk_type == MDP3_CLK_MDP_SRC) { + if (client == MDP3_CLIENT_DMA_P) { + mdp3_res->dma_core_clk_request = rounded_rate; + } else if (client == MDP3_CLIENT_PPP) { + mdp3_res->ppp_core_clk_request = rounded_rate; + } else { + pr_err("unrecognized client=%d\n", client); + mutex_unlock(&mdp3_res->res_mutex); + return -EINVAL; + } + rounded_rate = max(mdp3_res->dma_core_clk_request, + mdp3_res->ppp_core_clk_request); + } + if (rounded_rate != clk_get_rate(clk)) { + ret = clk_set_rate(clk, rounded_rate); + if (ret) + pr_err("clk_set_rate failed ret=%d\n", ret); + else + pr_debug("mdp clk rate=%lu, client = %d\n", + rounded_rate, client); + } + mutex_unlock(&mdp3_res->res_mutex); + } else { + pr_err("mdp src clk not setup properly\n"); + ret = -EINVAL; + } + return ret; +} + +unsigned long mdp3_get_clk_rate(u32 clk_idx) +{ + unsigned long clk_rate = 0; + struct clk *clk; + + if (clk_idx >= MDP3_MAX_CLK) + return -ENODEV; + + clk = mdp3_res->clocks[clk_idx]; + + if (clk) { + mutex_lock(&mdp3_res->res_mutex); + clk_rate = clk_get_rate(clk); + mutex_unlock(&mdp3_res->res_mutex); + } + return clk_rate; +} + +static int mdp3_clk_register(char *clk_name, int clk_idx) +{ + struct clk *tmp; + + if (clk_idx >= MDP3_MAX_CLK) { + pr_err("invalid clk index %d\n", clk_idx); + return -EINVAL; + } + + tmp = devm_clk_get(&mdp3_res->pdev->dev, clk_name); + if (IS_ERR(tmp)) { + pr_err("unable to get clk: %s\n", clk_name); + return PTR_ERR(tmp); + } + + mdp3_res->clocks[clk_idx] = tmp; + + return 0; +} + +static int mdp3_clk_setup(void) +{ + int rc; + + rc = mdp3_clk_register("iface_clk", MDP3_CLK_AHB); + if (rc) + return rc; + + rc = mdp3_clk_register("bus_clk", MDP3_CLK_AXI); + if (rc) + return rc; + + rc = mdp3_clk_register("core_clk_src", MDP3_CLK_MDP_SRC); + if (rc) + return rc; + + rc = mdp3_clk_register("core_clk", MDP3_CLK_MDP_CORE); + if (rc) + return rc; + + rc = mdp3_clk_register("vsync_clk", MDP3_CLK_VSYNC); + if (rc) + return rc; + + rc = mdp3_clk_set_rate(MDP3_CLK_MDP_SRC, MDP_CORE_CLK_RATE_SVS, + MDP3_CLIENT_DMA_P); + if (rc) + pr_err("%s: Error setting max clock during probe\n", __func__); + return rc; +} + +static void mdp3_clk_remove(void) +{ + if (!IS_ERR_OR_NULL(mdp3_res->clocks[MDP3_CLK_AHB])) + clk_put(mdp3_res->clocks[MDP3_CLK_AHB]); + + if (!IS_ERR_OR_NULL(mdp3_res->clocks[MDP3_CLK_AXI])) + clk_put(mdp3_res->clocks[MDP3_CLK_AXI]); + + if (!IS_ERR_OR_NULL(mdp3_res->clocks[MDP3_CLK_MDP_SRC])) + clk_put(mdp3_res->clocks[MDP3_CLK_MDP_SRC]); + + if (!IS_ERR_OR_NULL(mdp3_res->clocks[MDP3_CLK_MDP_CORE])) + clk_put(mdp3_res->clocks[MDP3_CLK_MDP_CORE]); + + if (!IS_ERR_OR_NULL(mdp3_res->clocks[MDP3_CLK_VSYNC])) + clk_put(mdp3_res->clocks[MDP3_CLK_VSYNC]); + +} + +u64 mdp3_clk_round_off(u64 clk_rate) +{ + u64 clk_round_off = 0; + + if (clk_rate <= MDP_CORE_CLK_RATE_SVS) + clk_round_off = MDP_CORE_CLK_RATE_SVS; + else if (clk_rate <= MDP_CORE_CLK_RATE_SUPER_SVS) + clk_round_off = MDP_CORE_CLK_RATE_SUPER_SVS; + else + clk_round_off = MDP_CORE_CLK_RATE_MAX; + + pr_debug("clk = %llu rounded to = %llu\n", + clk_rate, clk_round_off); + return clk_round_off; +} + +int mdp3_clk_enable(int enable, int dsi_clk) +{ + int rc = 0; + int changed = 0; + + pr_debug("MDP CLKS %s\n", (enable ? "Enable" : "Disable")); + + mutex_lock(&mdp3_res->res_mutex); + + if (enable) { + if (mdp3_res->clk_ena == 0) + changed++; + mdp3_res->clk_ena++; + } else { + if (mdp3_res->clk_ena) { + mdp3_res->clk_ena--; + if (mdp3_res->clk_ena == 0) + changed++; + } else { + pr_err("Can not be turned off\n"); + } + } + pr_debug("%s: clk_ena=%d changed=%d enable=%d\n", + __func__, mdp3_res->clk_ena, changed, enable); + + if (changed) { + if (enable) + pm_runtime_get_sync(&mdp3_res->pdev->dev); + + rc = mdp3_clk_update(MDP3_CLK_AHB, enable); + rc |= mdp3_clk_update(MDP3_CLK_AXI, enable); + rc |= mdp3_clk_update(MDP3_CLK_MDP_SRC, enable); + rc |= mdp3_clk_update(MDP3_CLK_MDP_CORE, enable); + rc |= mdp3_clk_update(MDP3_CLK_VSYNC, enable); + + if (!enable) { + pm_runtime_mark_last_busy(&mdp3_res->pdev->dev); + pm_runtime_put_autosuspend(&mdp3_res->pdev->dev); + } + } + + mutex_unlock(&mdp3_res->res_mutex); + return rc; +} + +void mdp3_bus_bw_iommu_enable(int enable, int client) +{ + struct mdp3_bus_handle_map *bus_handle; + int client_idx; + u64 ab = 0, ib = 0; + int ref_cnt; + + client_idx = MDP3_BUS_HANDLE; + + bus_handle = &mdp3_res->bus_handle[client_idx]; + if (bus_handle->handle < 1) { + pr_err("invalid bus handle %d\n", bus_handle->handle); + return; + } + mutex_lock(&mdp3_res->res_mutex); + if (enable) + bus_handle->ref_cnt++; + else + if (bus_handle->ref_cnt) + bus_handle->ref_cnt--; + ref_cnt = bus_handle->ref_cnt; + mutex_unlock(&mdp3_res->res_mutex); + + if (enable) { + if (mdp3_res->allow_iommu_update) + mdp3_iommu_enable(client); + if (ref_cnt == 1) { + pm_runtime_get_sync(&mdp3_res->pdev->dev); + ab = bus_handle->restore_ab[client]; + ib = bus_handle->restore_ib[client]; + mdp3_bus_scale_set_quota(client, ab, ib); + } + } else { + if (ref_cnt == 0) { + mdp3_bus_scale_set_quota(client, 0, 0); + pm_runtime_mark_last_busy(&mdp3_res->pdev->dev); + pm_runtime_put_autosuspend(&mdp3_res->pdev->dev); + } + mdp3_iommu_disable(client); + } + + if (ref_cnt < 0) { + pr_err("Ref count < 0, bus client=%d, ref_cnt=%d", + client_idx, ref_cnt); + } +} + +void mdp3_calc_dma_res(struct mdss_panel_info *panel_info, u64 *clk_rate, + u64 *ab, u64 *ib, uint32_t bpp) +{ + u32 vtotal = mdss_panel_get_vtotal(panel_info); + u32 htotal = mdss_panel_get_htotal(panel_info, 0); + u64 clk = htotal * vtotal * panel_info->mipi.frame_rate; + + pr_debug("clk_rate for dma = %llu, bpp = %d\n", clk, bpp); + if (clk_rate) + *clk_rate = mdp3_clk_round_off(clk); + + /* ab and ib vote should be same for honest voting */ + if (ab || ib) { + *ab = clk * bpp; + *ib = *ab; + } +} + +int mdp3_res_update(int enable, int dsi_clk, int client) +{ + int rc = 0; + + if (enable) { + rc = mdp3_clk_enable(enable, dsi_clk); + if (rc < 0) { + pr_err("mdp3_clk_enable failed, enable=%d, dsi_clk=%d\n", + enable, dsi_clk); + goto done; + } + mdp3_irq_register(); + mdp3_bus_bw_iommu_enable(enable, client); + } else { + mdp3_bus_bw_iommu_enable(enable, client); + mdp3_irq_suspend(); + rc = mdp3_clk_enable(enable, dsi_clk); + if (rc < 0) { + pr_err("mdp3_clk_enable failed, enable=%d, dsi_clk=%d\n", + enable, dsi_clk); + goto done; + } + } + +done: + return rc; +} + +int mdp3_get_mdp_dsi_clk(void) +{ + int rc; + + mutex_lock(&mdp3_res->res_mutex); + rc = mdp3_clk_update(MDP3_CLK_DSI, 1); + mutex_unlock(&mdp3_res->res_mutex); + return rc; +} + +int mdp3_put_mdp_dsi_clk(void) +{ + int rc; + + mutex_lock(&mdp3_res->res_mutex); + rc = mdp3_clk_update(MDP3_CLK_DSI, 0); + mutex_unlock(&mdp3_res->res_mutex); + return rc; +} + +static int mdp3_irq_setup(void) +{ + int ret; + struct mdss_hw *mdp3_hw; + + mdp3_hw = &mdp3_res->mdp3_hw; + ret = devm_request_irq(&mdp3_res->pdev->dev, + mdp3_hw->irq_info->irq, + mdp3_irq_handler, + 0, "MDP", mdp3_res); + if (ret) { + pr_err("mdp request_irq() failed!\n"); + return ret; + } + disable_irq_nosync(mdp3_hw->irq_info->irq); + mdp3_res->irq_registered = true; + return 0; +} + +static int mdp3_get_iommu_domain(u32 type) +{ + if (type >= MDSS_IOMMU_MAX_DOMAIN) + return -EINVAL; + + if (!mdp3_res) + return -ENODEV; + + return mdp3_res->domains[type].domain_idx; +} + +static int mdp3_check_version(void) +{ + int rc; + + rc = mdp3_clk_enable(1, 0); + if (rc) { + pr_err("fail to turn on MDP core clks\n"); + return rc; + } + + mdp3_res->mdp_rev = MDP3_REG_READ(MDP3_REG_HW_VERSION); + + if (mdp3_res->mdp_rev != MDP_CORE_HW_VERSION) { + pr_err("mdp_hw_revision=%x mismatch\n", mdp3_res->mdp_rev); + rc = -ENODEV; + } + + rc = mdp3_clk_enable(0, 0); + if (rc) + pr_err("fail to turn off MDP core clks\n"); + + return rc; +} + +static int mdp3_hw_init(void) +{ + int i; + + for (i = MDP3_DMA_P; i < MDP3_DMA_MAX; i++) { + mdp3_res->dma[i].dma_sel = i; + mdp3_res->dma[i].capability = MDP3_DMA_CAP_ALL; + mdp3_res->dma[i].in_use = 0; + mdp3_res->dma[i].available = 1; + mdp3_res->dma[i].cc_vect_sel = 0; + mdp3_res->dma[i].lut_sts = 0; + mdp3_res->dma[i].hist_cmap = NULL; + mdp3_res->dma[i].gc_cmap = NULL; + mutex_init(&mdp3_res->dma[i].pp_lock); + } + mdp3_res->dma[MDP3_DMA_S].capability = MDP3_DMA_CAP_DITHER; + mdp3_res->dma[MDP3_DMA_E].available = 0; + + for (i = MDP3_DMA_OUTPUT_SEL_AHB; i < MDP3_DMA_OUTPUT_SEL_MAX; i++) { + mdp3_res->intf[i].cfg.type = i; + mdp3_res->intf[i].active = 0; + mdp3_res->intf[i].in_use = 0; + mdp3_res->intf[i].available = 1; + } + mdp3_res->intf[MDP3_DMA_OUTPUT_SEL_AHB].available = 0; + mdp3_res->intf[MDP3_DMA_OUTPUT_SEL_LCDC].available = 0; + mdp3_res->smart_blit_en = SMART_BLIT_RGB_EN | SMART_BLIT_YUV_EN; + mdp3_res->solid_fill_vote_en = false; + return 0; +} + +int mdp3_dynamic_clock_gating_ctrl(int enable) +{ + int rc = 0; + int cgc_cfg = 0; + /*Disable dynamic auto clock gating*/ + pr_debug("%s Status %s\n", __func__, (enable ? "ON":"OFF")); + rc = mdp3_clk_enable(1, 0); + if (rc) { + pr_err("fail to turn on MDP core clks\n"); + return rc; + } + cgc_cfg = MDP3_REG_READ(MDP3_REG_CGC_EN); + if (enable) { + cgc_cfg |= (BIT(10)); + cgc_cfg |= (BIT(18)); + MDP3_REG_WRITE(MDP3_REG_CGC_EN, cgc_cfg); + VBIF_REG_WRITE(MDP3_VBIF_REG_FORCE_EN, 0x0); + } else { + cgc_cfg &= ~(BIT(10)); + cgc_cfg &= ~(BIT(18)); + MDP3_REG_WRITE(MDP3_REG_CGC_EN, cgc_cfg); + VBIF_REG_WRITE(MDP3_VBIF_REG_FORCE_EN, 0x3); + } + + rc = mdp3_clk_enable(0, 0); + if (rc) + pr_err("fail to turn off MDP core clks\n"); + + return rc; +} + +/** + * mdp3_get_panic_lut_cfg() - calculate panic and robust lut mask + * @panel_width: Panel width + * + * DMA buffer has 16 fill levels. Which needs to configured as safe + * and panic levels based on panel resolutions. + * No. of fill levels used = ((panel active width * 8) / 512). + * Roundoff the fill levels if needed. + * half of the total fill levels used will be treated as panic levels. + * Roundoff panic levels if total used fill levels are odd. + * + * Sample calculation for 720p display: + * Fill levels used = (720 * 8) / 512 = 12.5 after round off 13. + * panic levels = 13 / 2 = 6.5 after roundoff 7. + * Panic mask = 0x3FFF (2 bits per level) + * Robust mask = 0xFF80 (1 bit per level) + */ +u64 mdp3_get_panic_lut_cfg(u32 panel_width) +{ + u32 fill_levels = (((panel_width * 8) / 512) + 1); + u32 panic_mask = 0; + u32 robust_mask = 0; + u32 i = 0; + u64 panic_config = 0; + u32 panic_levels = 0; + + panic_levels = fill_levels / 2; + if (fill_levels % 2) + panic_levels++; + + for (i = 0; i < panic_levels; i++) { + panic_mask |= (BIT((i * 2) + 1) | BIT(i * 2)); + robust_mask |= BIT(i); + } + panic_config = ~robust_mask; + panic_config = panic_config << 32; + panic_config |= panic_mask; + return panic_config; +} + +int mdp3_enable_panic_ctrl(void) +{ + int rc = 0; + + if (MDP3_REG_READ(MDP3_PANIC_ROBUST_CTRL) == 0) { + pr_err("%s: Enable Panic Control\n", __func__); + MDP3_REG_WRITE(MDP3_PANIC_ROBUST_CTRL, BIT(0)); + } + return rc; +} + +int mdp3_qos_remapper_setup(struct mdss_panel_data *panel) +{ + int rc = 0; + u64 panic_config = mdp3_get_panic_lut_cfg(panel->panel_info.xres); + + rc = mdp3_clk_update(MDP3_CLK_AHB, 1); + rc |= mdp3_clk_update(MDP3_CLK_AXI, 1); + rc |= mdp3_clk_update(MDP3_CLK_MDP_CORE, 1); + if (rc) { + pr_err("fail to turn on MDP core clks\n"); + return rc; + } + + if (!panel) + return -EINVAL; + /* Program MDP QOS Remapper */ + MDP3_REG_WRITE(MDP3_DMA_P_QOS_REMAPPER, 0x1A9); + MDP3_REG_WRITE(MDP3_DMA_P_WATERMARK_0, 0x0); + MDP3_REG_WRITE(MDP3_DMA_P_WATERMARK_1, 0x0); + MDP3_REG_WRITE(MDP3_DMA_P_WATERMARK_2, 0x0); + /* PANIC setting depends on panel width*/ + MDP3_REG_WRITE(MDP3_PANIC_LUT0, (panic_config & 0xFFFF)); + MDP3_REG_WRITE(MDP3_PANIC_LUT1, ((panic_config >> 16) & 0xFFFF)); + MDP3_REG_WRITE(MDP3_ROBUST_LUT, ((panic_config >> 32) & 0xFFFF)); + MDP3_REG_WRITE(MDP3_PANIC_ROBUST_CTRL, 0x1); + pr_debug("Panel width %d Panic Lut0 %x Lut1 %x Robust %x\n", + panel->panel_info.xres, + MDP3_REG_READ(MDP3_PANIC_LUT0), + MDP3_REG_READ(MDP3_PANIC_LUT1), + MDP3_REG_READ(MDP3_ROBUST_LUT)); + + rc = mdp3_clk_update(MDP3_CLK_AHB, 0); + rc |= mdp3_clk_update(MDP3_CLK_AXI, 0); + rc |= mdp3_clk_update(MDP3_CLK_MDP_CORE, 0); + if (rc) + pr_err("fail to turn off MDP core clks\n"); + return rc; +} + +static int mdp3_res_init(void) +{ + int rc = 0; + + rc = mdp3_irq_setup(); + if (rc) + return rc; + + rc = mdp3_clk_setup(); + if (rc) + return rc; + + mdp3_res->ion_client = msm_ion_client_create(mdp3_res->pdev->name); + if (IS_ERR_OR_NULL(mdp3_res->ion_client)) { + pr_err("msm_ion_client_create() return error (%pK)\n", + mdp3_res->ion_client); + mdp3_res->ion_client = NULL; + return -EINVAL; + } + mutex_init(&mdp3_res->iommu_lock); + + mdp3_res->domains = mdp3_iommu_domains; + mdp3_res->bus_handle = mdp3_bus_handle; + rc = mdp3_bus_scale_register(); + if (rc) { + pr_err("unable to register bus scaling\n"); + return rc; + } + + rc = mdp3_hw_init(); + + return rc; +} + +static void mdp3_res_deinit(void) +{ + struct mdss_hw *mdp3_hw; + int rc = 0; + + mdp3_hw = &mdp3_res->mdp3_hw; + mdp3_bus_scale_unregister(); + mutex_lock(&mdp3_res->iommu_lock); + if (mdp3_res->iommu_ref_cnt) { + mdp3_res->iommu_ref_cnt--; + if (mdp3_res->iommu_ref_cnt == 0) + rc = mdss_smmu_detach(mdss_res); + } else { + pr_err("iommu ref count %d\n", mdp3_res->iommu_ref_cnt); + } + mutex_unlock(&mdp3_res->iommu_lock); + + if (!IS_ERR_OR_NULL(mdp3_res->ion_client)) + ion_client_destroy(mdp3_res->ion_client); + + mdp3_clk_remove(); + + if (mdp3_res->irq_registered) + devm_free_irq(&mdp3_res->pdev->dev, + mdp3_hw->irq_info->irq, mdp3_res); +} + +static int mdp3_get_pan_intf(const char *pan_intf) +{ + int i, rc = MDSS_PANEL_INTF_INVALID; + + if (!pan_intf) + return rc; + + for (i = 0; i < ARRAY_SIZE(pan_types); i++) { + if (!strcmp(pan_intf, pan_types[i].name)) { + rc = pan_types[i].type; + break; + } + } + + return rc; +} + +static int mdp3_parse_dt_pan_intf(struct platform_device *pdev) +{ + int rc; + struct mdp3_hw_resource *mdata = platform_get_drvdata(pdev); + const char *prim_intf = NULL; + + rc = of_property_read_string(pdev->dev.of_node, + "qcom,mdss-pref-prim-intf", &prim_intf); + if (rc) + return -ENODEV; + + rc = mdp3_get_pan_intf(prim_intf); + if (rc < 0) { + mdata->pan_cfg.pan_intf = MDSS_PANEL_INTF_INVALID; + } else { + mdata->pan_cfg.pan_intf = rc; + rc = 0; + } + return rc; +} + +static int mdp3_get_pan_cfg(struct mdss_panel_cfg *pan_cfg) +{ + char *t = NULL; + char pan_intf_str[MDSS_MAX_PANEL_LEN]; + int rc, i, panel_len; + char pan_name[MDSS_MAX_PANEL_LEN]; + + if (!pan_cfg) + return -EINVAL; + + if (mdss_mdp3_panel[0] == '0') { + pan_cfg->lk_cfg = false; + } else if (mdss_mdp3_panel[0] == '1') { + pan_cfg->lk_cfg = true; + } else { + /* read from dt */ + pan_cfg->lk_cfg = true; + pan_cfg->pan_intf = MDSS_PANEL_INTF_INVALID; + return -EINVAL; + } + + /* skip lk cfg and delimiter; ex: "0:" */ + strlcpy(pan_name, &mdss_mdp3_panel[2], MDSS_MAX_PANEL_LEN); + t = strnstr(pan_name, ":", MDSS_MAX_PANEL_LEN); + if (!t) { + pr_err("%s: pan_name=[%s] invalid\n", + __func__, pan_name); + pan_cfg->pan_intf = MDSS_PANEL_INTF_INVALID; + return -EINVAL; + } + + for (i = 0; ((pan_name + i) < t) && (i < 4); i++) + pan_intf_str[i] = *(pan_name + i); + pan_intf_str[i] = 0; + pr_debug("%s:%d panel intf %s\n", __func__, __LINE__, pan_intf_str); + /* point to the start of panel name */ + t = t + 1; + strlcpy(&pan_cfg->arg_cfg[0], t, sizeof(pan_cfg->arg_cfg)); + pr_debug("%s:%d: t=[%s] panel name=[%s]\n", __func__, __LINE__, + t, pan_cfg->arg_cfg); + + panel_len = strlen(pan_cfg->arg_cfg); + if (!panel_len) { + pr_err("%s: Panel name is invalid\n", __func__); + pan_cfg->pan_intf = MDSS_PANEL_INTF_INVALID; + return -EINVAL; + } + + rc = mdp3_get_pan_intf(pan_intf_str); + pan_cfg->pan_intf = (rc < 0) ? MDSS_PANEL_INTF_INVALID : rc; + return 0; +} + +static int mdp3_get_cmdline_config(struct platform_device *pdev) +{ + int rc, len = 0; + int *intf_type; + char *panel_name; + struct mdss_panel_cfg *pan_cfg; + struct mdp3_hw_resource *mdata = platform_get_drvdata(pdev); + + mdata->pan_cfg.arg_cfg[MDSS_MAX_PANEL_LEN] = 0; + pan_cfg = &mdata->pan_cfg; + panel_name = &pan_cfg->arg_cfg[0]; + intf_type = &pan_cfg->pan_intf; + + /* reads from dt by default */ + pan_cfg->lk_cfg = true; + + len = strlen(mdss_mdp3_panel); + + if (len > 0) { + rc = mdp3_get_pan_cfg(pan_cfg); + if (!rc) { + pan_cfg->init_done = true; + return rc; + } + } + + rc = mdp3_parse_dt_pan_intf(pdev); + /* if pref pan intf is not present */ + if (rc) + pr_err("%s:unable to parse device tree for pan intf\n", + __func__); + else + pan_cfg->init_done = true; + + return rc; +} + + +int mdp3_irq_init(u32 irq_start) +{ + struct mdss_hw *mdp3_hw; + + mdp3_hw = &mdp3_res->mdp3_hw; + + mdp3_hw->irq_info = kzalloc(sizeof(struct irq_info), GFP_KERNEL); + if (!mdp3_hw->irq_info) + return -ENOMEM; + + mdp3_hw->hw_ndx = MDSS_HW_MDP; + mdp3_hw->irq_info->irq = irq_start; + mdp3_hw->irq_info->irq_mask = 0; + mdp3_hw->irq_info->irq_ena = false; + mdp3_hw->irq_info->irq_buzy = false; + + mdp3_res->mdss_util->register_irq(&mdp3_res->mdp3_hw); + return 0; +} + +static int mdp3_parse_dt(struct platform_device *pdev) +{ + struct resource *res; + struct property *prop = NULL; + bool panic_ctrl; + int rc; + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mdp_phys"); + if (!res) { + pr_err("unable to get MDP base address\n"); + return -EINVAL; + } + + mdp3_res->mdp_reg_size = resource_size(res); + mdp3_res->mdp_base = devm_ioremap(&pdev->dev, res->start, + mdp3_res->mdp_reg_size); + if (unlikely(!mdp3_res->mdp_base)) { + pr_err("unable to map MDP base\n"); + return -ENOMEM; + } + + pr_debug("MDP HW Base phy_Address=0x%x virt=0x%x\n", + (int) res->start, + (int) mdp3_res->mdp_base); + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "vbif_phys"); + if (!res) { + pr_err("unable to get VBIF base address\n"); + return -EINVAL; + } + + mdp3_res->vbif_reg_size = resource_size(res); + mdp3_res->vbif_base = devm_ioremap(&pdev->dev, res->start, + mdp3_res->vbif_reg_size); + if (unlikely(!mdp3_res->vbif_base)) { + pr_err("unable to map VBIF base\n"); + return -ENOMEM; + } + + pr_debug("VBIF HW Base phy_Address=0x%x virt=0x%x\n", + (int) res->start, + (int) mdp3_res->vbif_base); + + res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); + if (!res) { + pr_err("unable to get MDSS irq\n"); + return -EINVAL; + } + rc = mdp3_irq_init(res->start); + if (rc) { + pr_err("%s: Error in irq initialization:rc=[%d]\n", + __func__, rc); + return rc; + } + + rc = mdp3_get_cmdline_config(pdev); + if (rc) { + pr_err("%s: Error in panel override:rc=[%d]\n", + __func__, rc); + kfree(mdp3_res->mdp3_hw.irq_info); + return rc; + } + + prop = of_find_property(pdev->dev.of_node, "batfet-supply", NULL); + mdp3_res->batfet_required = prop ? true : false; + + panic_ctrl = of_property_read_bool( + pdev->dev.of_node, "qcom,mdss-has-panic-ctrl"); + mdp3_res->dma[MDP3_DMA_P].has_panic_ctrl = panic_ctrl; + + mdp3_res->idle_pc_enabled = of_property_read_bool( + pdev->dev.of_node, "qcom,mdss-idle-power-collapse-enabled"); + + return 0; +} + +void msm_mdp3_cx_ctrl(int enable) +{ + int rc; + + if (!mdp3_res->vdd_cx) { + mdp3_res->vdd_cx = devm_regulator_get(&mdp3_res->pdev->dev, + "vdd-cx"); + if (IS_ERR_OR_NULL(mdp3_res->vdd_cx)) { + pr_debug("unable to get CX reg. rc=%d\n", + PTR_RET(mdp3_res->vdd_cx)); + mdp3_res->vdd_cx = NULL; + return; + } + } + + if (enable) { + rc = regulator_set_voltage( + mdp3_res->vdd_cx, + RPM_REGULATOR_CORNER_SVS_SOC, + RPM_REGULATOR_CORNER_SUPER_TURBO); + if (rc < 0) + goto vreg_set_voltage_fail; + + rc = regulator_enable(mdp3_res->vdd_cx); + if (rc) { + pr_err("Failed to enable regulator vdd_cx.\n"); + return; + } + } else { + rc = regulator_disable(mdp3_res->vdd_cx); + if (rc) { + pr_err("Failed to disable regulator vdd_cx.\n"); + return; + } + rc = regulator_set_voltage( + mdp3_res->vdd_cx, + RPM_REGULATOR_CORNER_NONE, + RPM_REGULATOR_CORNER_SUPER_TURBO); + if (rc < 0) + goto vreg_set_voltage_fail; + } + + return; +vreg_set_voltage_fail: + pr_err("Set vltg failed\n"); +} + +void mdp3_batfet_ctrl(int enable) +{ + int rc; + + if (!mdp3_res->batfet_required) + return; + + if (!mdp3_res->batfet) { + if (enable) { + mdp3_res->batfet = + devm_regulator_get(&mdp3_res->pdev->dev, + "batfet"); + if (IS_ERR_OR_NULL(mdp3_res->batfet)) { + pr_debug("unable to get batfet reg. rc=%d\n", + PTR_RET(mdp3_res->batfet)); + mdp3_res->batfet = NULL; + return; + } + } else { + pr_debug("Batfet regulator disable w/o enable\n"); + return; + } + } + + if (enable) + rc = regulator_enable(mdp3_res->batfet); + else + rc = regulator_disable(mdp3_res->batfet); + + if (rc < 0) + pr_err("%s: reg enable/disable failed", __func__); +} + +void mdp3_enable_regulator(int enable) +{ + mdp3_batfet_ctrl(enable); +} + +int mdp3_put_img(struct mdp3_img_data *data, int client) +{ + struct ion_client *iclient = mdp3_res->ion_client; + int dom = (mdp3_res->domains + MDP3_IOMMU_DOMAIN_UNSECURE)->domain_idx; + int dir = DMA_BIDIRECTIONAL; + + if (data->flags & MDP_MEMORY_ID_TYPE_FB) { + pr_info("%s fb mem buf=0x%pa\n", __func__, &data->addr); + fdput(data->srcp_f); + memset(&data->srcp_f, 0, sizeof(struct fd)); + } else if (!IS_ERR_OR_NULL(data->srcp_dma_buf)) { + pr_debug("ion hdl = %pK buf=0x%pa\n", data->srcp_dma_buf, + &data->addr); + if (!iclient) { + pr_err("invalid ion client\n"); + return -ENOMEM; + } + if (data->mapped) { + if (client == MDP3_CLIENT_PPP || + client == MDP3_CLIENT_DMA_P) + mdss_smmu_unmap_dma_buf(data->tab_clone, + dom, dir, data->srcp_dma_buf); + else + mdss_smmu_unmap_dma_buf(data->srcp_table, + dom, dir, data->srcp_dma_buf); + data->mapped = false; + } + if (!data->skip_detach) { + dma_buf_unmap_attachment(data->srcp_attachment, + data->srcp_table, + mdss_smmu_dma_data_direction(dir)); + dma_buf_detach(data->srcp_dma_buf, + data->srcp_attachment); + dma_buf_put(data->srcp_dma_buf); + data->srcp_dma_buf = NULL; + } + } else { + return -EINVAL; + } + if (client == MDP3_CLIENT_PPP || client == MDP3_CLIENT_DMA_P) { + kfree(data->tab_clone->sgl); + kfree(data->tab_clone); + } + return 0; +} + +int mdp3_get_img(struct msmfb_data *img, struct mdp3_img_data *data, int client) +{ + struct fd f; + int ret = -EINVAL; + int fb_num; + struct ion_client *iclient = mdp3_res->ion_client; + int dom = (mdp3_res->domains + MDP3_IOMMU_DOMAIN_UNSECURE)->domain_idx; + + data->flags = img->flags; + + if (img->flags & MDP_MEMORY_ID_TYPE_FB) { + f = fdget(img->memory_id); + if (f.file == NULL) { + pr_err("invalid framebuffer file (%d)\n", + img->memory_id); + return -EINVAL; + } + if (MAJOR(f.file->f_path.dentry->d_inode->i_rdev) == FB_MAJOR) { + fb_num = MINOR(f.file->f_path.dentry->d_inode->i_rdev); + ret = mdss_fb_get_phys_info(&data->addr, + &data->len, fb_num); + if (ret) { + pr_err("mdss_fb_get_phys_info() failed\n"); + fdput(f); + memset(&f, 0, sizeof(struct fd)); + } + } else { + pr_err("invalid FB_MAJOR\n"); + fdput(f); + ret = -EINVAL; + } + data->srcp_f = f; + if (!ret) + goto done; + } else if (iclient) { + data->srcp_dma_buf = dma_buf_get(img->memory_id); + if (IS_ERR(data->srcp_dma_buf)) { + pr_err("DMA : error on ion_import_fd\n"); + ret = PTR_ERR(data->srcp_dma_buf); + data->srcp_dma_buf = NULL; + return ret; + } + + data->srcp_attachment = + mdss_smmu_dma_buf_attach(data->srcp_dma_buf, + &mdp3_res->pdev->dev, dom); + if (IS_ERR(data->srcp_attachment)) { + ret = PTR_ERR(data->srcp_attachment); + goto err_put; + } + + data->srcp_table = + dma_buf_map_attachment(data->srcp_attachment, + mdss_smmu_dma_data_direction(DMA_BIDIRECTIONAL)); + if (IS_ERR(data->srcp_table)) { + ret = PTR_ERR(data->srcp_table); + goto err_detach; + } + + if (client == MDP3_CLIENT_PPP || + client == MDP3_CLIENT_DMA_P) { + data->tab_clone = + mdss_smmu_sg_table_clone(data->srcp_table, + GFP_KERNEL, true); + if (IS_ERR_OR_NULL(data->tab_clone)) { + if (!(data->tab_clone)) + ret = -EINVAL; + else + ret = PTR_ERR(data->tab_clone); + goto clone_err; + } + ret = mdss_smmu_map_dma_buf(data->srcp_dma_buf, + data->tab_clone, dom, + &data->addr, &data->len, + DMA_BIDIRECTIONAL); + } else { + ret = mdss_smmu_map_dma_buf(data->srcp_dma_buf, + data->srcp_table, dom, &data->addr, + &data->len, DMA_BIDIRECTIONAL); + } + + if (IS_ERR_VALUE(ret)) { + pr_err("smmu map dma buf failed: (%d)\n", ret); + goto err_unmap; + } + + data->mapped = true; + data->skip_detach = false; + } +done: + if (client == MDP3_CLIENT_PPP || client == MDP3_CLIENT_DMA_P) { + data->addr += data->tab_clone->sgl->length; + data->len -= data->tab_clone->sgl->length; + } + if (!ret && (img->offset < data->len)) { + data->addr += img->offset; + data->len -= img->offset; + + pr_debug("mem=%d ihdl=%pK buf=0x%pa len=0x%lx\n", + img->memory_id, data->srcp_dma_buf, + &data->addr, data->len); + + } else { + mdp3_put_img(data, client); + return -EINVAL; + } + return ret; + +clone_err: + dma_buf_unmap_attachment(data->srcp_attachment, data->srcp_table, + mdss_smmu_dma_data_direction(DMA_BIDIRECTIONAL)); +err_detach: + dma_buf_detach(data->srcp_dma_buf, data->srcp_attachment); +err_put: + dma_buf_put(data->srcp_dma_buf); + return ret; +err_unmap: + dma_buf_unmap_attachment(data->srcp_attachment, data->srcp_table, + mdss_smmu_dma_data_direction(DMA_BIDIRECTIONAL)); + dma_buf_detach(data->srcp_dma_buf, data->srcp_attachment); + dma_buf_put(data->srcp_dma_buf); + + if (client == MDP3_CLIENT_PPP || client == MDP3_CLIENT_DMA_P) { + kfree(data->tab_clone->sgl); + kfree(data->tab_clone); + } + return ret; + +} + +int mdp3_iommu_enable(int client) +{ + int rc = 0; + + mutex_lock(&mdp3_res->iommu_lock); + + if (mdp3_res->iommu_ref_cnt == 0) { + rc = mdss_smmu_attach(mdss_res); + if (rc) + rc = mdss_smmu_detach(mdss_res); + } + + if (!rc) + mdp3_res->iommu_ref_cnt++; + mutex_unlock(&mdp3_res->iommu_lock); + + pr_debug("client :%d total_ref_cnt: %d\n", + client, mdp3_res->iommu_ref_cnt); + return rc; +} + +int mdp3_iommu_disable(int client) +{ + int rc = 0; + + mutex_lock(&mdp3_res->iommu_lock); + if (mdp3_res->iommu_ref_cnt) { + mdp3_res->iommu_ref_cnt--; + + pr_debug("client :%d total_ref_cnt: %d\n", + client, mdp3_res->iommu_ref_cnt); + if (mdp3_res->iommu_ref_cnt == 0) + rc = mdss_smmu_detach(mdss_res); + } else { + pr_err("iommu ref count unbalanced for client %d\n", client); + } + mutex_unlock(&mdp3_res->iommu_lock); + + return rc; +} + +int mdp3_iommu_ctrl(int enable) +{ + int rc; + + if (mdp3_res->allow_iommu_update == false) + return 0; + + if (enable) + rc = mdp3_iommu_enable(MDP3_CLIENT_DSI); + else + rc = mdp3_iommu_disable(MDP3_CLIENT_DSI); + return rc; +} + +static int mdp3_init(struct msm_fb_data_type *mfd) +{ + int rc; + + rc = mdp3_ctrl_init(mfd); + if (rc) { + pr_err("mdp3 ctl init fail\n"); + return rc; + } + + rc = mdp3_ppp_res_init(mfd); + if (rc) + pr_err("mdp3 ppp res init fail\n"); + + return rc; +} + +u32 mdp3_fb_stride(u32 fb_index, u32 xres, int bpp) +{ + /* + * The adreno GPU hardware requires that the pitch be aligned to + * 32 pixels for color buffers, so for the cases where the GPU + * is writing directly to fb0, the framebuffer pitch + * also needs to be 32 pixel aligned + */ + + if (fb_index == 0) + return ALIGN(xres, 32) * bpp; + else + return xres * bpp; +} + +__ref int mdp3_parse_dt_splash(struct msm_fb_data_type *mfd) +{ + struct platform_device *pdev = mfd->pdev; + int len = 0, rc = 0; + u32 offsets[2]; + struct device_node *pnode, *child_node; + struct property *prop = NULL; + + mfd->splash_info.splash_logo_enabled = + of_property_read_bool(pdev->dev.of_node, + "qcom,mdss-fb-splash-logo-enabled"); + + prop = of_find_property(pdev->dev.of_node, "qcom,memblock-reserve", + &len); + if (!prop) { + pr_debug("Read memblock reserve settings for fb failed\n"); + pr_debug("Read cont-splash-memory settings\n"); + } + + if (len) { + len = len / sizeof(u32); + + rc = of_property_read_u32_array(pdev->dev.of_node, + "qcom,memblock-reserve", offsets, len); + if (rc) { + pr_err("error reading mem reserve settings for fb\n"); + rc = -EINVAL; + goto error; + } + } else { + child_node = of_get_child_by_name(pdev->dev.of_node, + "qcom,cont-splash-memory"); + if (!child_node) { + pr_err("splash mem child node is not present\n"); + rc = -EINVAL; + goto error; + } + + pnode = of_parse_phandle(child_node, "linux,contiguous-region", + 0); + if (pnode != NULL) { + const u32 *addr; + u64 size; + + addr = of_get_address(pnode, 0, &size, NULL); + if (!addr) { + pr_err("failed to parse the splash memory address\n"); + of_node_put(pnode); + rc = -EINVAL; + goto error; + } + offsets[0] = (u32) of_read_ulong(addr, 2); + offsets[1] = (u32) size; + of_node_put(pnode); + } else { + pr_err("mem reservation for splash screen fb not present\n"); + rc = -EINVAL; + goto error; + } + } + + if (!memblock_is_reserved(offsets[0])) { + pr_debug("failed to reserve memory for fb splash\n"); + rc = -EINVAL; + goto error; + } + + mdp3_res->splash_mem_addr = offsets[0]; + mdp3_res->splash_mem_size = offsets[1]; +error: + if (rc && mfd->panel_info->cont_splash_enabled) + pr_err("no rsvd mem found in DT for splash screen\n"); + else + rc = 0; + + return rc; +} + +void mdp3_release_splash_memory(struct msm_fb_data_type *mfd) +{ + /* Give back the reserved memory to the system */ + if (mdp3_res->splash_mem_addr) { + if ((mfd->panel.type == MIPI_VIDEO_PANEL) && + (mdp3_res->cont_splash_en)) { + mdss_smmu_unmap(MDSS_IOMMU_DOMAIN_UNSECURE, + mdp3_res->splash_mem_addr, + mdp3_res->splash_mem_size); + } + pr_debug("%s\n", __func__); + memblock_free(mdp3_res->splash_mem_addr, + mdp3_res->splash_mem_size); + free_bootmem_late(mdp3_res->splash_mem_addr, + mdp3_res->splash_mem_size); + mdp3_res->splash_mem_addr = 0; + } +} + +struct mdp3_dma *mdp3_get_dma_pipe(int capability) +{ + int i; + + for (i = MDP3_DMA_P; i < MDP3_DMA_MAX; i++) { + if (!mdp3_res->dma[i].in_use && mdp3_res->dma[i].available && + mdp3_res->dma[i].capability & capability) { + mdp3_res->dma[i].in_use = true; + return &mdp3_res->dma[i]; + } + } + return NULL; +} + +struct mdp3_intf *mdp3_get_display_intf(int type) +{ + int i; + + for (i = MDP3_DMA_OUTPUT_SEL_AHB; i < MDP3_DMA_OUTPUT_SEL_MAX; i++) { + if (!mdp3_res->intf[i].in_use && mdp3_res->intf[i].available && + mdp3_res->intf[i].cfg.type == type) { + mdp3_res->intf[i].in_use = true; + return &mdp3_res->intf[i]; + } + } + return NULL; +} + +static int mdp3_fb_mem_get_iommu_domain(void) +{ + if (!mdp3_res) + return -ENODEV; + return mdp3_res->domains[MDP3_IOMMU_DOMAIN_UNSECURE].domain_idx; +} + +int mdp3_get_cont_spash_en(void) +{ + return mdp3_res->cont_splash_en; +} + +static int mdp3_is_display_on(struct mdss_panel_data *pdata) +{ + int rc = 0; + u32 status; + + rc = mdp3_clk_enable(1, 0); + if (rc) { + pr_err("fail to turn on MDP core clks\n"); + return rc; + } + if (pdata->panel_info.type == MIPI_VIDEO_PANEL) { + status = MDP3_REG_READ(MDP3_REG_DSI_VIDEO_EN); + rc = status & 0x1; + } else { + status = MDP3_REG_READ(MDP3_REG_DMA_P_CONFIG); + status &= 0x180000; + rc = (status == 0x080000); + } + + mdp3_res->splash_mem_addr = MDP3_REG_READ(MDP3_REG_DMA_P_IBUF_ADDR); + + if (mdp3_clk_enable(0, 0)) + pr_err("fail to turn off MDP core clks\n"); + return rc; +} + +static int mdp3_continuous_splash_on(struct mdss_panel_data *pdata) +{ + struct mdss_panel_info *panel_info = &pdata->panel_info; + struct mdp3_bus_handle_map *bus_handle; + u64 ab = 0; + u64 ib = 0; + u64 mdp_clk_rate = 0; + int rc = 0; + + pr_debug("mdp3__continuous_splash_on\n"); + + bus_handle = &mdp3_res->bus_handle[MDP3_BUS_HANDLE]; + if (bus_handle->handle < 1) { + pr_err("invalid bus handle %d\n", bus_handle->handle); + return -EINVAL; + } + mdp3_calc_dma_res(panel_info, &mdp_clk_rate, &ab, &ib, panel_info->bpp); + + mdp3_clk_set_rate(MDP3_CLK_VSYNC, MDP_VSYNC_CLK_RATE, + MDP3_CLIENT_DMA_P); + mdp3_clk_set_rate(MDP3_CLK_MDP_SRC, mdp_clk_rate, + MDP3_CLIENT_DMA_P); + + rc = mdp3_bus_scale_set_quota(MDP3_CLIENT_DMA_P, ab, ib); + bus_handle->restore_ab[MDP3_CLIENT_DMA_P] = ab; + bus_handle->restore_ib[MDP3_CLIENT_DMA_P] = ib; + + rc = mdp3_res_update(1, 1, MDP3_CLIENT_DMA_P); + if (rc) { + pr_err("fail to enable clk\n"); + return rc; + } + + rc = mdp3_ppp_init(); + if (rc) { + pr_err("ppp init failed\n"); + goto splash_on_err; + } + + if (panel_info->type == MIPI_VIDEO_PANEL) + mdp3_res->intf[MDP3_DMA_OUTPUT_SEL_DSI_VIDEO].active = 1; + else + mdp3_res->intf[MDP3_DMA_OUTPUT_SEL_DSI_CMD].active = 1; + + mdp3_enable_regulator(true); + mdp3_res->cont_splash_en = 1; + return 0; + +splash_on_err: + if (mdp3_res_update(0, 1, MDP3_CLIENT_DMA_P)) + pr_err("%s: Unable to disable mdp3 clocks\n", __func__); + + return rc; +} + +static int mdp3_panel_register_done(struct mdss_panel_data *pdata) +{ + int rc = 0; + u64 ab = 0; u64 ib = 0; + u64 mdp_clk_rate = 0; + + /* Store max bandwidth supported in mdp res */ + mdp3_calc_dma_res(&pdata->panel_info, &mdp_clk_rate, &ab, &ib, + MAX_BPP_SUPPORTED); + do_div(ab, 1024); + mdp3_res->max_bw = ab+1; + + /* + * If idle pc feature is not enabled, then get a reference to the + * runtime device which will be released when device is turned off + */ + if (!mdp3_res->idle_pc_enabled || + pdata->panel_info.type != MIPI_CMD_PANEL) { + pm_runtime_get_sync(&mdp3_res->pdev->dev); + } + + if (pdata->panel_info.cont_splash_enabled) { + if (!mdp3_is_display_on(pdata)) { + pr_err("continuous splash, but bootloader is not\n"); + return 0; + } + rc = mdp3_continuous_splash_on(pdata); + } else { + if (mdp3_is_display_on(pdata)) { + pr_err("lk continuous splash, but kerenl not\n"); + rc = mdp3_continuous_splash_on(pdata); + } + } + /* + * We want to prevent iommu from being enabled if there is + * continue splash screen. This would have happened in + * res_update in continuous_splash_on without this flag. + */ + if (pdata->panel_info.cont_splash_enabled == false) + mdp3_res->allow_iommu_update = true; + + mdss_res->pdata = pdata; + return rc; +} + +/* mdp3_clear_irq() - Clear interrupt + * @ interrupt_mask : interrupt mask + * + * This function clear sync irq for command mode panel. + * When system is entering in idle screen state. + */ +void mdp3_clear_irq(u32 interrupt_mask) +{ + unsigned long flag; + u32 irq_status = 0; + + spin_lock_irqsave(&mdp3_res->irq_lock, flag); + irq_status = interrupt_mask & + MDP3_REG_READ(MDP3_REG_INTR_STATUS); + if (irq_status) + MDP3_REG_WRITE(MDP3_REG_INTR_CLEAR, irq_status); + spin_unlock_irqrestore(&mdp3_res->irq_lock, flag); + +} + +/* mdp3_autorefresh_disable() - Disable Auto refresh + * @ panel_info : pointer to panel configuration structure + * + * This function disable Auto refresh block for command mode panel. + */ +int mdp3_autorefresh_disable(struct mdss_panel_info *panel_info) +{ + if ((panel_info->type == MIPI_CMD_PANEL) && + (MDP3_REG_READ(MDP3_REG_AUTOREFRESH_CONFIG_P))) + MDP3_REG_WRITE(MDP3_REG_AUTOREFRESH_CONFIG_P, 0); + return 0; +} + +int mdp3_splash_done(struct mdss_panel_info *panel_info) +{ + if (panel_info->cont_splash_enabled) { + pr_err("continuous splash is on and splash done called\n"); + return -EINVAL; + } + mdp3_res->allow_iommu_update = true; + return 0; +} + +static int mdp3_debug_dump_stats_show(struct seq_file *s, void *v) +{ + struct mdp3_hw_resource *res = (struct mdp3_hw_resource *)s->private; + + seq_printf(s, "underrun: %08u\n", res->underrun_cnt); + + return 0; +} +DEFINE_MDSS_DEBUGFS_SEQ_FOPS(mdp3_debug_dump_stats); + +static void mdp3_debug_enable_clock(int on) +{ + if (on) + mdp3_clk_enable(1, 0); + else + mdp3_clk_enable(0, 0); +} + +static int mdp3_debug_init(struct platform_device *pdev) +{ + int rc; + struct mdss_data_type *mdata; + struct mdss_debug_data *mdd; + + mdata = devm_kzalloc(&pdev->dev, sizeof(*mdata), GFP_KERNEL); + if (!mdata) + return -ENOMEM; + + mdss_res = mdata; + mutex_init(&mdata->reg_lock); + mutex_init(&mdata->reg_bus_lock); + mutex_init(&mdata->bus_lock); + INIT_LIST_HEAD(&mdata->reg_bus_clist); + atomic_set(&mdata->sd_client_count, 0); + atomic_set(&mdata->active_intf_cnt, 0); + mdss_res->mdss_util = mdp3_res->mdss_util; + + mdata->debug_inf.debug_enable_clock = mdp3_debug_enable_clock; + mdata->mdp_rev = mdp3_res->mdp_rev; + + rc = mdss_debugfs_init(mdata); + if (rc) + return rc; + + mdd = mdata->debug_inf.debug_data; + if (!mdd) + return -EINVAL; + + debugfs_create_file("stat", 0644, mdd->root, mdp3_res, + &mdp3_debug_dump_stats_fops); + + rc = mdss_debug_register_base(NULL, mdp3_res->mdp_base, + mdp3_res->mdp_reg_size, NULL); + + return rc; +} + +static void mdp3_debug_deinit(struct platform_device *pdev) +{ + if (mdss_res) { + mdss_debugfs_remove(mdss_res); + devm_kfree(&pdev->dev, mdss_res); + mdss_res = NULL; + } +} + +static void mdp3_dma_underrun_intr_handler(int type, void *arg) +{ + struct mdp3_dma *dma = &mdp3_res->dma[MDP3_DMA_P]; + + mdp3_res->underrun_cnt++; + pr_err_ratelimited("display underrun detected count=%d\n", + mdp3_res->underrun_cnt); + ATRACE_INT("mdp3_dma_underrun_intr_handler", mdp3_res->underrun_cnt); + + if (dma->ccs_config.ccs_enable && !dma->ccs_config.ccs_dirty) { + dma->ccs_config.ccs_dirty = true; + schedule_work(&dma->underrun_work); + } +} + +uint32_t ppp_formats_supported[] = { + MDP_RGB_565, + MDP_BGR_565, + MDP_RGB_888, + MDP_BGR_888, + MDP_XRGB_8888, + MDP_ARGB_8888, + MDP_RGBA_8888, + MDP_BGRA_8888, + MDP_RGBX_8888, + MDP_Y_CBCR_H2V1, + MDP_Y_CBCR_H2V2, + MDP_Y_CBCR_H2V2_ADRENO, + MDP_Y_CBCR_H2V2_VENUS, + MDP_Y_CRCB_H2V1, + MDP_Y_CRCB_H2V2, + MDP_YCRYCB_H2V1, + MDP_BGRX_8888, +}; + +uint32_t dma_formats_supported[] = { + MDP_RGB_565, + MDP_RGB_888, + MDP_XRGB_8888, +}; + +static void __mdp3_set_supported_formats(void) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(ppp_formats_supported); i++) + SET_BIT(mdp3_res->ppp_formats, ppp_formats_supported[i]); + + for (i = 0; i < ARRAY_SIZE(dma_formats_supported); i++) + SET_BIT(mdp3_res->dma_formats, dma_formats_supported[i]); +} + +static void __update_format_supported_info(char *buf, int *cnt) +{ + int j; + size_t len = PAGE_SIZE; + int num_bytes = BITS_TO_BYTES(MDP_IMGTYPE_LIMIT1); +#define SPRINT(fmt, ...) \ + (*cnt += scnprintf(buf + *cnt, len - *cnt, fmt, ##__VA_ARGS__)) + + SPRINT("ppp_input_fmts="); + for (j = 0; j < num_bytes; j++) + SPRINT("%d,", mdp3_res->ppp_formats[j]); + SPRINT("\ndma_output_fmts="); + for (j = 0; j < num_bytes; j++) + SPRINT("%d,", mdp3_res->dma_formats[j]); + SPRINT("\n"); +#undef SPRINT +} + +static ssize_t mdp3_show_capabilities(struct device *dev, + struct device_attribute *attr, char *buf) +{ + size_t len = PAGE_SIZE; + int cnt = 0; + +#define SPRINT(fmt, ...) \ + (cnt += scnprintf(buf + cnt, len - cnt, fmt, ##__VA_ARGS__)) + + SPRINT("dma_pipes=%d\n", 1); + SPRINT("mdp_version=3\n"); + SPRINT("hw_rev=%d\n", 305); + SPRINT("pipe_count:%d\n", 1); + SPRINT("pipe_num:%d pipe_type:dma pipe_ndx:%d rects:%d ", 0, 1, 1); + SPRINT("pipe_is_handoff:%d display_id:%d\n", 0, 0); + __update_format_supported_info(buf, &cnt); + SPRINT("rgb_pipes=%d\n", 0); + SPRINT("vig_pipes=%d\n", 0); + SPRINT("dma_pipes=%d\n", 1); + SPRINT("blending_stages=%d\n", 1); + SPRINT("cursor_pipes=%d\n", 0); + SPRINT("max_cursor_size=%d\n", 0); + SPRINT("smp_count=%d\n", 0); + SPRINT("smp_size=%d\n", 0); + SPRINT("smp_mb_per_pipe=%d\n", 0); + SPRINT("max_downscale_ratio=%d\n", PPP_DOWNSCALE_MAX); + SPRINT("max_upscale_ratio=%d\n", PPP_UPSCALE_MAX); + SPRINT("max_pipe_bw=%u\n", mdp3_res->max_bw); + SPRINT("max_bandwidth_low=%u\n", mdp3_res->max_bw); + SPRINT("max_bandwidth_high=%u\n", mdp3_res->max_bw); + SPRINT("max_mdp_clk=%u\n", MDP_CORE_CLK_RATE_MAX); + SPRINT("clk_fudge_factor=%u,%u\n", CLK_FUDGE_NUM, CLK_FUDGE_DEN); + SPRINT("features=has_ppp\n"); + +#undef SPRINT + + return cnt; +} + +static DEVICE_ATTR(caps, 0444, mdp3_show_capabilities, NULL); + +static ssize_t mdp3_store_smart_blit(struct device *dev, + struct device_attribute *attr, const char *buf, size_t len) +{ + u32 data = -1; + ssize_t rc = 0; + + rc = kstrtoint(buf, 10, &data); + if (rc) { + pr_err("kstrtoint failed. rc=%d\n", rc); + return rc; + } + mdp3_res->smart_blit_en = data; + pr_debug("mdp3 smart blit RGB %s YUV %s\n", + (mdp3_res->smart_blit_en & SMART_BLIT_RGB_EN) ? + "ENABLED" : "DISABLED", + (mdp3_res->smart_blit_en & SMART_BLIT_YUV_EN) ? + "ENABLED" : "DISABLED"); + return len; +} + +static ssize_t mdp3_show_smart_blit(struct device *dev, + struct device_attribute *attr, char *buf) +{ + ssize_t ret = 0; + + pr_debug("mdp3 smart blit RGB %s YUV %s\n", + (mdp3_res->smart_blit_en & SMART_BLIT_RGB_EN) ? + "ENABLED" : "DISABLED", + (mdp3_res->smart_blit_en & SMART_BLIT_YUV_EN) ? + "ENABLED" : "DISABLED"); + ret = snprintf(buf, PAGE_SIZE, "%d\n", mdp3_res->smart_blit_en); + return ret; +} + +static DEVICE_ATTR(smart_blit, 0664, + mdp3_show_smart_blit, mdp3_store_smart_blit); + +static struct attribute *mdp3_fs_attrs[] = { + &dev_attr_caps.attr, + &dev_attr_smart_blit.attr, + NULL +}; + +static struct attribute_group mdp3_fs_attr_group = { + .attrs = mdp3_fs_attrs +}; + +static int mdp3_register_sysfs(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + int rc; + + rc = sysfs_create_group(&dev->kobj, &mdp3_fs_attr_group); + + return rc; +} + +int mdp3_create_sysfs_link(struct device *dev) +{ + int rc; + + rc = sysfs_create_link_nowarn(&dev->kobj, + &mdp3_res->pdev->dev.kobj, "mdp"); + + return rc; +} + +int mdp3_misr_get(struct mdp_misr *misr_resp) +{ + int result = 0, ret = -1; + int crc = 0; + + pr_debug("%s CRC Capture on DSI\n", __func__); + switch (misr_resp->block_id) { + case DISPLAY_MISR_DSI0: + MDP3_REG_WRITE(MDP3_REG_DSI_VIDEO_EN, 0); + /* Sleep for one vsync after DSI video engine is disabled */ + msleep(20); + /* Enable DSI_VIDEO_0 MISR Block */ + MDP3_REG_WRITE(MDP3_REG_MODE_DSI_PCLK, 0x20); + /* Reset MISR Block */ + MDP3_REG_WRITE(MDP3_REG_MISR_RESET_DSI_PCLK, 1); + /* Clear MISR capture done bit */ + MDP3_REG_WRITE(MDP3_REG_CAPTURED_DSI_PCLK, 0); + /* Enable MDP DSI interface */ + MDP3_REG_WRITE(MDP3_REG_DSI_VIDEO_EN, 1); + ret = readl_poll_timeout(mdp3_res->mdp_base + + MDP3_REG_CAPTURED_DSI_PCLK, result, + result & MDP3_REG_CAPTURED_DSI_PCLK_MASK, + MISR_POLL_SLEEP, MISR_POLL_TIMEOUT); + MDP3_REG_WRITE(MDP3_REG_MODE_DSI_PCLK, 0); + if (ret == 0) { + /* Disable DSI MISR interface */ + MDP3_REG_WRITE(MDP3_REG_MODE_DSI_PCLK, 0x0); + crc = MDP3_REG_READ(MDP3_REG_MISR_CAPT_VAL_DSI_PCLK); + pr_debug("CRC Val %d\n", crc); + } else { + pr_err("CRC Read Timed Out\n"); + } + break; + + case DISPLAY_MISR_DSI_CMD: + /* Select DSI PCLK Domain */ + MDP3_REG_WRITE(MDP3_REG_SEL_CLK_OR_HCLK_TEST_BUS, 0x004); + /* Select Block id DSI_CMD */ + MDP3_REG_WRITE(MDP3_REG_MODE_DSI_PCLK, 0x10); + /* Reset MISR Block */ + MDP3_REG_WRITE(MDP3_REG_MISR_RESET_DSI_PCLK, 1); + /* Drive Data on Test Bus */ + MDP3_REG_WRITE(MDP3_REG_EXPORT_MISR_DSI_PCLK, 0); + /* Kikk off DMA_P */ + MDP3_REG_WRITE(MDP3_REG_DMA_P_START, 0x11); + /* Wait for DMA_P Done */ + ret = readl_poll_timeout(mdp3_res->mdp_base + + MDP3_REG_INTR_STATUS, result, + result & MDP3_INTR_DMA_P_DONE_BIT, + MISR_POLL_SLEEP, MISR_POLL_TIMEOUT); + if (ret == 0) { + crc = MDP3_REG_READ(MDP3_REG_MISR_CURR_VAL_DSI_PCLK); + pr_debug("CRC Val %d\n", crc); + } else { + pr_err("CRC Read Timed Out\n"); + } + break; + + default: + pr_err("%s CRC Capture not supported\n", __func__); + ret = -EINVAL; + break; + } + + misr_resp->crc_value[0] = crc; + pr_debug("%s, CRC Capture on DSI Param Block = 0x%x, CRC 0x%x\n", + __func__, misr_resp->block_id, misr_resp->crc_value[0]); + return ret; +} + +int mdp3_misr_set(struct mdp_misr *misr_req) +{ + int ret = 0; + + pr_debug("%s Parameters Block = %d Cframe Count = %d CRC = %d\n", + __func__, misr_req->block_id, misr_req->frame_count, + misr_req->crc_value[0]); + + switch (misr_req->block_id) { + case DISPLAY_MISR_DSI0: + pr_debug("In the case DISPLAY_MISR_DSI0\n"); + MDP3_REG_WRITE(MDP3_REG_SEL_CLK_OR_HCLK_TEST_BUS, 1); + MDP3_REG_WRITE(MDP3_REG_MODE_DSI_PCLK, 0x20); + MDP3_REG_WRITE(MDP3_REG_MISR_RESET_DSI_PCLK, 0x1); + break; + + case DISPLAY_MISR_DSI_CMD: + pr_debug("In the case DISPLAY_MISR_DSI_CMD\n"); + MDP3_REG_WRITE(MDP3_REG_SEL_CLK_OR_HCLK_TEST_BUS, 1); + MDP3_REG_WRITE(MDP3_REG_MODE_DSI_PCLK, 0x10); + MDP3_REG_WRITE(MDP3_REG_MISR_RESET_DSI_PCLK, 0x1); + break; + + default: + pr_err("%s CRC Capture not supported\n", __func__); + ret = -EINVAL; + break; + } + return ret; +} + +struct mdss_panel_cfg *mdp3_panel_intf_type(int intf_val) +{ + if (!mdp3_res || !mdp3_res->pan_cfg.init_done) + return ERR_PTR(-EPROBE_DEFER); + + if (mdp3_res->pan_cfg.pan_intf == intf_val) + return &mdp3_res->pan_cfg; + else + return NULL; +} +EXPORT_SYMBOL(mdp3_panel_intf_type); + +int mdp3_footswitch_ctrl(int enable) +{ + int rc = 0; + int active_cnt = 0; + + mutex_lock(&mdp3_res->fs_idle_pc_lock); + MDSS_XLOG(enable); + if (!mdp3_res->fs_ena && enable) { + rc = regulator_enable(mdp3_res->fs); + if (rc) { + pr_err("mdp footswitch ctrl enable failed\n"); + mutex_unlock(&mdp3_res->fs_idle_pc_lock); + return -EINVAL; + } + pr_debug("mdp footswitch ctrl enable success\n"); + mdp3_enable_regulator(true); + mdp3_res->fs_ena = true; + } else if (!enable && mdp3_res->fs_ena) { + active_cnt = atomic_read(&mdp3_res->active_intf_cnt); + if (active_cnt != 0) { + /* + * Turning off GDSC while overlays are still + * active. + */ + mdp3_res->idle_pc = true; + pr_debug("idle pc. active overlays=%d\n", + active_cnt); + } + mdp3_enable_regulator(false); + rc = regulator_disable(mdp3_res->fs); + if (rc) { + pr_err("mdp footswitch ctrl disable failed\n"); + mutex_unlock(&mdp3_res->fs_idle_pc_lock); + return -EINVAL; + } + mdp3_res->fs_ena = false; + pr_debug("mdp3 footswitch ctrl disable configured\n"); + } else { + pr_debug("mdp3 footswitch ctrl already configured\n"); + } + + mutex_unlock(&mdp3_res->fs_idle_pc_lock); + return rc; +} + +int mdp3_panel_get_intf_status(u32 disp_num, u32 intf_type) +{ + int rc = 0, status = 0; + + if (intf_type != MDSS_PANEL_INTF_DSI) + return 0; + + rc = mdp3_clk_enable(1, 0); + if (rc) { + pr_err("fail to turn on MDP core clks\n"); + return rc; + } + + status = (MDP3_REG_READ(MDP3_REG_DMA_P_CONFIG) & 0x180000); + /* DSI video mode or command mode */ + rc = (status == 0x180000) || (status == 0x080000); + + if (mdp3_clk_enable(0, 0)) + pr_err("fail to turn off MDP core clks\n"); + return rc; +} + +static int mdp3_probe(struct platform_device *pdev) +{ + int rc; + static struct msm_mdp_interface mdp3_interface = { + .init_fnc = mdp3_init, + .fb_mem_get_iommu_domain = mdp3_fb_mem_get_iommu_domain, + .panel_register_done = mdp3_panel_register_done, + .fb_stride = mdp3_fb_stride, + .check_dsi_status = mdp3_check_dsi_ctrl_status, + }; + + struct mdp3_intr_cb underrun_cb = { + .cb = mdp3_dma_underrun_intr_handler, + .data = NULL, + }; + + pr_debug("%s: START\n", __func__); + if (!pdev->dev.of_node) { + pr_err("MDP driver only supports device tree probe\n"); + return -ENOTSUPP; + } + + if (mdp3_res) { + pr_err("MDP already initialized\n"); + return -EINVAL; + } + + mdp3_res = devm_kzalloc(&pdev->dev, sizeof(struct mdp3_hw_resource), + GFP_KERNEL); + if (mdp3_res == NULL) + return -ENOMEM; + + pdev->id = 0; + mdp3_res->pdev = pdev; + mutex_init(&mdp3_res->res_mutex); + mutex_init(&mdp3_res->fs_idle_pc_lock); + spin_lock_init(&mdp3_res->irq_lock); + platform_set_drvdata(pdev, mdp3_res); + atomic_set(&mdp3_res->active_intf_cnt, 0); + mutex_init(&mdp3_res->reg_bus_lock); + INIT_LIST_HEAD(&mdp3_res->reg_bus_clist); + + mdp3_res->mdss_util = mdss_get_util_intf(); + if (mdp3_res->mdss_util == NULL) { + pr_err("Failed to get mdss utility functions\n"); + rc = -ENODEV; + goto get_util_fail; + } + mdp3_res->mdss_util->get_iommu_domain = mdp3_get_iommu_domain; + mdp3_res->mdss_util->iommu_attached = is_mdss_iommu_attached; + mdp3_res->mdss_util->iommu_ctrl = mdp3_iommu_ctrl; + mdp3_res->mdss_util->bus_scale_set_quota = mdp3_bus_scale_set_quota; + mdp3_res->mdss_util->panel_intf_type = mdp3_panel_intf_type; + mdp3_res->mdss_util->dyn_clk_gating_ctrl = + mdp3_dynamic_clock_gating_ctrl; + mdp3_res->mdss_util->panel_intf_type = mdp3_panel_intf_type; + mdp3_res->mdss_util->panel_intf_status = mdp3_panel_get_intf_status; + + if (mdp3_res->mdss_util->param_check(mdss_mdp3_panel)) { + mdp3_res->mdss_util->display_disabled = true; + mdp3_res->mdss_util->mdp_probe_done = true; + return 0; + } + + rc = mdp3_parse_dt(pdev); + if (rc) + goto probe_done; + + rc = mdp3_res_init(); + if (rc) { + pr_err("unable to initialize mdp3 resources\n"); + goto probe_done; + } + + mdp3_res->fs_ena = false; + mdp3_res->fs = devm_regulator_get(&pdev->dev, "vdd"); + if (IS_ERR_OR_NULL(mdp3_res->fs)) { + pr_err("unable to get mdss gdsc regulator\n"); + return -EINVAL; + } + + rc = mdp3_debug_init(pdev); + if (rc) { + pr_err("unable to initialize mdp debugging\n"); + goto probe_done; + } + + pm_runtime_set_autosuspend_delay(&pdev->dev, AUTOSUSPEND_TIMEOUT_MS); + if (mdp3_res->idle_pc_enabled) { + pr_debug("%s: Enabling autosuspend\n", __func__); + pm_runtime_use_autosuspend(&pdev->dev); + } + /* Enable PM runtime */ + pm_runtime_set_suspended(&pdev->dev); + pm_runtime_enable(&pdev->dev); + + if (!pm_runtime_enabled(&pdev->dev)) { + rc = mdp3_footswitch_ctrl(1); + if (rc) { + pr_err("unable to turn on FS\n"); + goto probe_done; + } + } + + rc = mdp3_check_version(); + if (rc) { + pr_err("mdp3 check version failed\n"); + goto probe_done; + } + rc = mdp3_register_sysfs(pdev); + if (rc) + pr_err("unable to register mdp sysfs nodes\n"); + + rc = mdss_fb_register_mdp_instance(&mdp3_interface); + if (rc) + pr_err("unable to register mdp instance\n"); + + rc = mdp3_set_intr_callback(MDP3_INTR_LCDC_UNDERFLOW, + &underrun_cb); + if (rc) + pr_err("unable to configure interrupt callback\n"); + + rc = mdss_smmu_init(mdss_res, &pdev->dev); + if (rc) + pr_err("mdss smmu init failed\n"); + + __mdp3_set_supported_formats(); + + mdp3_res->mdss_util->mdp_probe_done = true; + pr_debug("%s: END\n", __func__); + +probe_done: + if (IS_ERR_VALUE(rc)) + kfree(mdp3_res->mdp3_hw.irq_info); +get_util_fail: + if (IS_ERR_VALUE(rc)) { + mdp3_res_deinit(); + + if (mdp3_res->mdp_base) + devm_iounmap(&pdev->dev, mdp3_res->mdp_base); + + devm_kfree(&pdev->dev, mdp3_res); + mdp3_res = NULL; + + if (mdss_res) { + devm_kfree(&pdev->dev, mdss_res); + mdss_res = NULL; + } + } + + return rc; +} + +int mdp3_panel_get_boot_cfg(void) +{ + int rc; + + if (!mdp3_res || !mdp3_res->pan_cfg.init_done) + rc = -EPROBE_DEFER; + else if (mdp3_res->pan_cfg.lk_cfg) + rc = 1; + else + rc = 0; + return rc; +} + +static int mdp3_suspend_sub(void) +{ + mdp3_footswitch_ctrl(0); + return 0; +} + +static int mdp3_resume_sub(void) +{ + mdp3_footswitch_ctrl(1); + return 0; +} + +#ifdef CONFIG_PM_SLEEP +static int mdp3_pm_suspend(struct device *dev) +{ + dev_dbg(dev, "Display pm suspend\n"); + MDSS_XLOG(XLOG_FUNC_ENTRY); + return mdp3_suspend_sub(); +} + +static int mdp3_pm_resume(struct device *dev) +{ + dev_dbg(dev, "Display pm resume\n"); + + /* + * It is possible that the runtime status of the mdp device may + * have been active when the system was suspended. Reset the runtime + * status to suspended state after a complete system resume. + */ + pm_runtime_disable(dev); + pm_runtime_set_suspended(dev); + pm_runtime_enable(dev); + + MDSS_XLOG(XLOG_FUNC_ENTRY); + return mdp3_resume_sub(); +} +#endif + +#if defined(CONFIG_PM) && !defined(CONFIG_PM_SLEEP) +static int mdp3_suspend(struct platform_device *pdev, pm_message_t state) +{ + pr_debug("Display suspend\n"); + + MDSS_XLOG(XLOG_FUNC_ENTRY); + return mdp3_suspend_sub(); +} + +static int mdp3_resume(struct platform_device *pdev) +{ + pr_debug("Display resume\n"); + + MDSS_XLOG(XLOG_FUNC_ENTRY); + return mdp3_resume_sub(); +} +#else +#define mdp3_suspend NULL +#define mdp3_resume NULL +#endif + +#ifdef CONFIG_PM +static int mdp3_runtime_resume(struct device *dev) +{ + bool device_on = true; + + dev_dbg(dev, "Display pm runtime resume, active overlay cnt=%d\n", + atomic_read(&mdp3_res->active_intf_cnt)); + + /* do not resume panels when coming out of idle power collapse */ + if (!mdp3_res->idle_pc) + device_for_each_child(dev, &device_on, mdss_fb_suspres_panel); + + MDSS_XLOG(XLOG_FUNC_ENTRY); + mdp3_footswitch_ctrl(1); + + return 0; +} + +static int mdp3_runtime_idle(struct device *dev) +{ + dev_dbg(dev, "Display pm runtime idle\n"); + + return 0; +} + +static int mdp3_runtime_suspend(struct device *dev) +{ + bool device_on = false; + + dev_dbg(dev, "Display pm runtime suspend, active overlay cnt=%d\n", + atomic_read(&mdp3_res->active_intf_cnt)); + + if (mdp3_res->clk_ena) { + pr_debug("Clk turned on...MDP suspend failed\n"); + return -EBUSY; + } + + MDSS_XLOG(XLOG_FUNC_ENTRY); + mdp3_footswitch_ctrl(0); + + /* do not suspend panels when going in to idle power collapse */ + if (!mdp3_res->idle_pc) + device_for_each_child(dev, &device_on, mdss_fb_suspres_panel); + + return 0; +} +#endif + +static const struct dev_pm_ops mdp3_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(mdp3_pm_suspend, + mdp3_pm_resume) + SET_RUNTIME_PM_OPS(mdp3_runtime_suspend, + mdp3_runtime_resume, + mdp3_runtime_idle) +}; + + +static int mdp3_remove(struct platform_device *pdev) +{ + struct mdp3_hw_resource *mdata = platform_get_drvdata(pdev); + + if (!mdata) + return -ENODEV; + pm_runtime_disable(&pdev->dev); + mdp3_bus_scale_unregister(); + mdp3_clk_remove(); + mdp3_debug_deinit(pdev); + return 0; +} + +static const struct of_device_id mdp3_dt_match[] = { + { .compatible = "qcom,mdss_mdp3",}, + {} +}; +MODULE_DEVICE_TABLE(of, mdp3_dt_match); +EXPORT_COMPAT("qcom,mdss_mdp3"); + +static struct platform_driver mdp3_driver = { + .probe = mdp3_probe, + .remove = mdp3_remove, + .suspend = mdp3_suspend, + .resume = mdp3_resume, + .shutdown = NULL, + .driver = { + .name = "mdp3", + .of_match_table = mdp3_dt_match, + .pm = &mdp3_pm_ops, + }, +}; + +static int __init mdp3_driver_init(void) +{ + int ret; + + ret = platform_driver_register(&mdp3_driver); + if (ret) { + pr_err("register mdp3 driver failed!\n"); + return ret; + } + + return 0; +} + +module_param_string(panel, mdss_mdp3_panel, MDSS_MAX_PANEL_LEN, 0600); +/* + * panel=:: + * where is "1"-lk/gcdb config or "0" non-lk/non-gcdb + * config; is dsi:0 + * is panel interface specific string + * Ex: This string is panel's device node name from DT + * for DSI interface + */ +MODULE_PARM_DESC(panel, "lk supplied panel selection string"); +module_init(mdp3_driver_init); diff --git a/drivers/video/fbdev/msm/mdp3.h b/drivers/video/fbdev/msm/mdp3.h new file mode 100644 index 0000000000000000000000000000000000000000..6fb39a73649dd5bc011829beda2bda836017fb89 --- /dev/null +++ b/drivers/video/fbdev/msm/mdp3.h @@ -0,0 +1,292 @@ +/* Copyright (c) 2013-2014, 2016-2018, The Linux Foundation. All rights reserved. + * Copyright (C) 2007 Google Incorporated + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ +#ifndef MDP3_H +#define MDP3_H + +#include +#include +#include +#include + +#include "mdss_dsi_clk.h" +#include "mdp3_dma.h" +#include "mdss_fb.h" +#include "mdss.h" + +#define MDP_VSYNC_CLK_RATE 19200000 +#define MDP_CORE_CLK_RATE_SVS 160000000 +#define MDP_CORE_CLK_RATE_SUPER_SVS 200000000 +#define MDP_CORE_CLK_RATE_MAX 307200000 + +#define CLK_FUDGE_NUM 12 +#define CLK_FUDGE_DEN 10 + +/* PPP cant work at SVS for panel res above qHD */ +#define SVS_MAX_PIXEL (540 * 960) + +#define KOFF_TIMEOUT_MS 84 +#define KOFF_TIMEOUT msecs_to_jiffies(KOFF_TIMEOUT_MS) +#define WAIT_DMA_TIMEOUT msecs_to_jiffies(84) + +/* + * MDP_DEINTERLACE & MDP_SHARPENING Flags are not valid for MDP3 + * so using them together for MDP_SMART_BLIT. + */ +#define MDP_SMART_BLIT 0xC0000000 + +#define BITS_PER_BYTE 8 +#define MDP_IMGTYPE_LIMIT1 0x100 +#define BITS_TO_BYTES(x) DIV_ROUND_UP(x, BITS_PER_BYTE) + +enum { + MDP3_CLK_AHB, + MDP3_CLK_AXI, + MDP3_CLK_MDP_SRC, + MDP3_CLK_MDP_CORE, + MDP3_CLK_VSYNC, + MDP3_CLK_DSI, + MDP3_MAX_CLK +}; + +enum { + MDP3_BUS_HANDLE, + MDP3_BUS_HANDLE_MAX, +}; + +enum { + MDP3_IOMMU_DOMAIN_UNSECURE, + MDP3_IOMMU_DOMAIN_SECURE, + MDP3_IOMMU_DOMAIN_MAX, +}; + +enum { + MDP3_IOMMU_CTX_MDP_0, + MDP3_IOMMU_CTX_MDP_1, + MDP3_IOMMU_CTX_MAX +}; + +/* Keep DSI entry in sync with mdss + * which is being used by DSI 6G + */ +enum { + MDP3_CLIENT_DMA_P, + MDP3_CLIENT_DSI = 1, + MDP3_CLIENT_PPP, + MDP3_CLIENT_IOMMU, + MDP3_CLIENT_MAX, +}; + +enum { + DI_PARTITION_NUM = 0, + DI_DOMAIN_NUM = 1, + DI_MAX, +}; + +struct mdp3_bus_handle_map { + struct msm_bus_vectors *bus_vector; + struct msm_bus_paths *usecases; + struct msm_bus_scale_pdata *scale_pdata; + int current_bus_idx; + int ref_cnt; + u64 restore_ab[MDP3_CLIENT_MAX]; + u64 restore_ib[MDP3_CLIENT_MAX]; + u64 ab[MDP3_CLIENT_MAX]; + u64 ib[MDP3_CLIENT_MAX]; + u32 handle; +}; + +struct mdp3_iommu_domain_map { + u32 domain_type; + char *client_name; + int npartitions; + int domain_idx; + struct iommu_domain *domain; +}; + +struct mdp3_iommu_ctx_map { + u32 ctx_type; + struct mdp3_iommu_domain_map *domain; + char *ctx_name; + struct device *ctx; + int attached; +}; + +struct mdp3_iommu_meta { + struct rb_node node; + struct ion_handle *handle; + struct rb_root iommu_maps; + struct kref ref; + struct sg_table *table; + struct dma_buf *dbuf; + int mapped_size; + unsigned long size; + dma_addr_t iova_addr; + unsigned long flags; +}; + +#define MDP3_MAX_INTR 28 + +struct mdp3_intr_cb { + void (*cb)(int type, void *); + void *data; +}; + +#define SMART_BLIT_RGB_EN 1 +#define SMART_BLIT_YUV_EN 2 + +struct mdp3_hw_resource { + struct platform_device *pdev; + u32 mdp_rev; + + struct mutex res_mutex; + + struct clk *clocks[MDP3_MAX_CLK]; + int clock_ref_count[MDP3_MAX_CLK]; + unsigned long dma_core_clk_request; + unsigned long ppp_core_clk_request; + struct mdss_hw mdp3_hw; + struct mdss_util_intf *mdss_util; + + char __iomem *mdp_base; + size_t mdp_reg_size; + + char __iomem *vbif_base; + size_t vbif_reg_size; + + struct mdp3_bus_handle_map *bus_handle; + + struct ion_client *ion_client; + struct mdp3_iommu_domain_map *domains; + struct mdp3_iommu_ctx_map *iommu_contexts; + unsigned int iommu_ref_cnt; + bool allow_iommu_update; + struct ion_handle *ion_handle; + struct mutex iommu_lock; + struct mutex fs_idle_pc_lock; + + struct mdp3_dma dma[MDP3_DMA_MAX]; + struct mdp3_intf intf[MDP3_DMA_OUTPUT_SEL_MAX]; + + struct rb_root iommu_root; + spinlock_t irq_lock; + u32 irq_ref_count[MDP3_MAX_INTR]; + u32 irq_mask; + int irq_ref_cnt; + struct mdp3_intr_cb callbacks[MDP3_MAX_INTR]; + u32 underrun_cnt; + + int irq_registered; + + unsigned long splash_mem_addr; + u32 splash_mem_size; + struct mdss_panel_cfg pan_cfg; + + int clk_prepare_count; + int cont_splash_en; + + bool batfet_required; + struct regulator *batfet; + struct regulator *vdd_cx; + struct regulator *fs; + bool fs_ena; + int clk_ena; + bool idle_pc_enabled; + bool idle_pc; + atomic_t active_intf_cnt; + u8 smart_blit_en; + bool solid_fill_vote_en; + struct list_head reg_bus_clist; + struct mutex reg_bus_lock; + + u32 max_bw; + + u8 ppp_formats[BITS_TO_BYTES(MDP_IMGTYPE_LIMIT1)]; + u8 dma_formats[BITS_TO_BYTES(MDP_IMGTYPE_LIMIT1)]; +}; + +struct mdp3_img_data { + dma_addr_t addr; + unsigned long len; + u32 offset; + u32 flags; + u32 padding; + int p_need; + struct ion_handle *srcp_ihdl; + u32 dir; + u32 domain; + bool mapped; + bool skip_detach; + struct fd srcp_f; + struct dma_buf *srcp_dma_buf; + struct dma_buf_attachment *srcp_attachment; + struct sg_table *srcp_table; + struct sg_table *tab_clone; +}; + +extern struct mdp3_hw_resource *mdp3_res; + +struct mdp3_dma *mdp3_get_dma_pipe(int capability); +struct mdp3_intf *mdp3_get_display_intf(int type); +void mdp3_irq_enable(int type); +void mdp3_irq_disable(int type); +void mdp3_irq_disable_nosync(int type); +int mdp3_set_intr_callback(u32 type, struct mdp3_intr_cb *cb); +void mdp3_irq_register(void); +void mdp3_irq_deregister(void); +int mdp3_clk_set_rate(int clk_type, unsigned long clk_rate, int client); +int mdp3_clk_enable(int enable, int dsi_clk); +int mdp3_res_update(int enable, int dsi_clk, int client); +int mdp3_bus_scale_set_quota(int client, u64 ab_quota, u64 ib_quota); +int mdp3_put_img(struct mdp3_img_data *data, int client); +int mdp3_get_img(struct msmfb_data *img, struct mdp3_img_data *data, + int client); +int mdp3_iommu_enable(int client); +int mdp3_iommu_disable(int client); +int mdp3_iommu_is_attached(void); +void mdp3_free(struct msm_fb_data_type *mfd); +int mdp3_parse_dt_splash(struct msm_fb_data_type *mfd); +void mdp3_release_splash_memory(struct msm_fb_data_type *mfd); +int mdp3_create_sysfs_link(struct device *dev); +int mdp3_get_cont_spash_en(void); +int mdp3_get_mdp_dsi_clk(void); +int mdp3_put_mdp_dsi_clk(void); + +int mdp3_misr_set(struct mdp_misr *misr_req); +int mdp3_misr_get(struct mdp_misr *misr_resp); +void mdp3_enable_regulator(int enable); +void mdp3_check_dsi_ctrl_status(struct work_struct *work, + uint32_t interval); +int mdp3_dynamic_clock_gating_ctrl(int enable); +int mdp3_footswitch_ctrl(int enable); +int mdp3_qos_remapper_setup(struct mdss_panel_data *panel); +int mdp3_splash_done(struct mdss_panel_info *panel_info); +int mdp3_autorefresh_disable(struct mdss_panel_info *panel_info); +u64 mdp3_clk_round_off(u64 clk_rate); + +void mdp3_calc_dma_res(struct mdss_panel_info *panel_info, u64 *clk_rate, + u64 *ab, u64 *ib, uint32_t bpp); +void mdp3_clear_irq(u32 interrupt_mask); +int mdp3_enable_panic_ctrl(void); + +int mdp3_layer_pre_commit(struct msm_fb_data_type *mfd, + struct file *file, struct mdp_layer_commit_v1 *commit); +int mdp3_layer_atomic_validate(struct msm_fb_data_type *mfd, + struct file *file, struct mdp_layer_commit_v1 *commit); + +#define MDP3_REG_WRITE(addr, val) writel_relaxed(val, mdp3_res->mdp_base + addr) +#define MDP3_REG_READ(addr) readl_relaxed(mdp3_res->mdp_base + addr) +#define VBIF_REG_WRITE(off, val) writel_relaxed(val, mdp3_res->vbif_base + off) +#define VBIF_REG_READ(off) readl_relaxed(mdp3_res->vbif_base + off) + +#endif /* MDP3_H */ diff --git a/drivers/video/fbdev/msm/mdp3_ctrl.c b/drivers/video/fbdev/msm/mdp3_ctrl.c new file mode 100644 index 0000000000000000000000000000000000000000..19c9a2bd4adba59a3b180c20bea9818b7197dbf1 --- /dev/null +++ b/drivers/video/fbdev/msm/mdp3_ctrl.c @@ -0,0 +1,3024 @@ +/* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#define pr_fmt(fmt) "%s: " fmt, __func__ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "mdp3_ctrl.h" +#include "mdp3.h" +#include "mdp3_ppp.h" +#include "mdss_smmu.h" +#include "mdss_sync.h" + +#define VSYNC_EXPIRE_TICK 4 + +static void mdp3_ctrl_pan_display(struct msm_fb_data_type *mfd); +static int mdp3_overlay_unset(struct msm_fb_data_type *mfd, int ndx); +static int mdp3_histogram_stop(struct mdp3_session_data *session, + u32 block); +static int mdp3_ctrl_clk_enable(struct msm_fb_data_type *mfd, int enable); +static int mdp3_ctrl_vsync_enable(struct msm_fb_data_type *mfd, int enable); +static int mdp3_ctrl_get_intf_type(struct msm_fb_data_type *mfd); +static int mdp3_ctrl_lut_read(struct msm_fb_data_type *mfd, + struct mdp_rgb_lut_data *cfg); +static int mdp3_ctrl_lut_config(struct msm_fb_data_type *mfd, + struct mdp_rgb_lut_data *cfg); +static void mdp3_ctrl_pp_resume(struct msm_fb_data_type *mfd); + +u32 mdp_lut_inverse16[MDP_LUT_SIZE] = { +0, 65536, 32768, 21845, 16384, 13107, 10923, 9362, 8192, 7282, 6554, 5958, +5461, 5041, 4681, 4369, 4096, 3855, 3641, 3449, 3277, 3121, 2979, 2849, 2731, +2621, 2521, 2427, 2341, 2260, 2185, 2114, 2048, 1986, 1928, 1872, 1820, 1771, +1725, 1680, 1638, 1598, 1560, 1524, 1489, 1456, 1425, 1394, 1365, 1337, 1311, +1285, 1260, 1237, 1214, 1192, 1170, 1150, 1130, 1111, 1092, 1074, 1057, 1040, +1024, 1008, 993, 978, 964, 950, 936, 923, 910, 898, 886, 874, 862, 851, 840, +830, 819, 809, 799, 790, 780, 771, 762, 753, 745, 736, 728, 720, 712, 705, 697, +690, 683, 676, 669, 662, 655, 649, 643, 636, 630, 624, 618, 612, 607, 601, 596, +590, 585, 580, 575, 570, 565, 560, 555, 551, 546, 542, 537, 533, 529, 524, 520, +516, 512, 508, 504, 500, 496, 493, 489, 485, 482, 478, 475, 471, 468, 465, 462, +458, 455, 452, 449, 446, 443, 440, 437, 434, 431, 428, 426, 423, 420, 417, 415, +412, 410, 407, 405, 402, 400, 397, 395, 392, 390, 388, 386, 383, 381, 379, 377, +374, 372, 370, 368, 366, 364, 362, 360, 358, 356, 354, 352, 350, 349, 347, 345, +343, 341, 340, 338, 336, 334, 333, 331, 329, 328, 326, 324, 323, 321, 320, 318, +317, 315, 314, 312, 311, 309, 308, 306, 305, 303, 302, 301, 299, 298, 297, 295, +294, 293, 291, 290, 289, 287, 286, 285, 284, 282, 281, 280, 279, 278, 277, 275, +274, 273, 272, 271, 270, 269, 267, 266, 265, 264, 263, 262, 261, 260, 259, 258, +257}; + +static void mdp3_bufq_init(struct mdp3_buffer_queue *bufq) +{ + bufq->count = 0; + bufq->push_idx = 0; + bufq->pop_idx = 0; +} + +void mdp3_bufq_deinit(struct mdp3_buffer_queue *bufq) +{ + int count = bufq->count; + + if (!count) + return; + + while (count-- && (bufq->pop_idx >= 0)) { + struct mdp3_img_data *data = &bufq->img_data[bufq->pop_idx]; + + bufq->pop_idx = (bufq->pop_idx + 1) % MDP3_MAX_BUF_QUEUE; + mdp3_put_img(data, MDP3_CLIENT_DMA_P); + } + bufq->count = 0; + bufq->push_idx = 0; + bufq->pop_idx = 0; +} + +int mdp3_bufq_push(struct mdp3_buffer_queue *bufq, + struct mdp3_img_data *data) +{ + if (bufq->count >= MDP3_MAX_BUF_QUEUE) { + pr_err("bufq full\n"); + return -EPERM; + } + + bufq->img_data[bufq->push_idx] = *data; + bufq->push_idx = (bufq->push_idx + 1) % MDP3_MAX_BUF_QUEUE; + bufq->count++; + return 0; +} + +static struct mdp3_img_data *mdp3_bufq_pop(struct mdp3_buffer_queue *bufq) +{ + struct mdp3_img_data *data; + + if (bufq->count == 0) + return NULL; + + data = &bufq->img_data[bufq->pop_idx]; + bufq->count--; + bufq->pop_idx = (bufq->pop_idx + 1) % MDP3_MAX_BUF_QUEUE; + return data; +} + +static int mdp3_bufq_count(struct mdp3_buffer_queue *bufq) +{ + return bufq->count; +} + +void mdp3_ctrl_notifier_register(struct mdp3_session_data *ses, + struct notifier_block *notifier) +{ + blocking_notifier_chain_register(&ses->notifier_head, notifier); +} + +void mdp3_ctrl_notifier_unregister(struct mdp3_session_data *ses, + struct notifier_block *notifier) +{ + blocking_notifier_chain_unregister(&ses->notifier_head, notifier); +} + +int mdp3_ctrl_notify(struct mdp3_session_data *ses, int event) +{ + return blocking_notifier_call_chain(&ses->notifier_head, event, ses); +} + +static void mdp3_dispatch_dma_done(struct kthread_work *work) +{ + struct mdp3_session_data *session; + int cnt = 0; + + pr_debug("%s\n", __func__); + session = container_of(work, struct mdp3_session_data, + dma_done_work); + if (!session) + return; + + cnt = atomic_read(&session->dma_done_cnt); + MDSS_XLOG(cnt); + while (cnt > 0) { + mdp3_ctrl_notify(session, MDP_NOTIFY_FRAME_DONE); + atomic_dec(&session->dma_done_cnt); + cnt--; + } +} + +static void mdp3_dispatch_clk_off(struct work_struct *work) +{ + struct mdp3_session_data *session; + int rc; + bool dmap_busy; + int retry_count = 2; + + pr_debug("%s\n", __func__); + MDSS_XLOG(XLOG_FUNC_ENTRY, __LINE__); + session = container_of(work, struct mdp3_session_data, + clk_off_work); + if (!session) + return; + + mutex_lock(&session->lock); + if (session->vsync_enabled || + atomic_read(&session->vsync_countdown) > 0) { + mutex_unlock(&session->lock); + pr_debug("%s: Ignoring clk shut down\n", __func__); + MDSS_XLOG(XLOG_FUNC_EXIT, __LINE__); + return; + } + + if (session->intf->active) { +retry_dma_done: + rc = wait_for_completion_timeout(&session->dma_completion, + WAIT_DMA_TIMEOUT); + if (rc <= 0) { + struct mdss_panel_data *panel; + + panel = session->panel; + pr_debug("cmd kickoff timed out (%d)\n", rc); + dmap_busy = session->dma->busy(); + if (dmap_busy) { + if (--retry_count) { + pr_err("dmap is busy, retry %d\n", + retry_count); + goto retry_dma_done; + } + pr_err("dmap is still busy, bug_on\n"); + WARN_ON(1); + } else { + pr_debug("dmap is not busy, continue\n"); + } + } + } + mdp3_ctrl_vsync_enable(session->mfd, 0); + mdp3_ctrl_clk_enable(session->mfd, 0); + MDSS_XLOG(XLOG_FUNC_EXIT, __LINE__); + mutex_unlock(&session->lock); +} + +static void mdp3_vsync_retire_handle_vsync(void *arg) +{ + struct mdp3_session_data *mdp3_session; + + mdp3_session = (struct mdp3_session_data *)arg; + + if (!mdp3_session) { + pr_warn("Invalid handle for vsync\n"); + return; + } + + schedule_work(&mdp3_session->retire_work); +} + +static void mdp3_vsync_retire_signal(struct msm_fb_data_type *mfd, int val) +{ + struct mdp3_session_data *mdp3_session; + + mdp3_session = (struct mdp3_session_data *)mfd->mdp.private1; + + mutex_lock(&mfd->mdp_sync_pt_data.sync_mutex); + if (mdp3_session->retire_cnt > 0) { + mdss_inc_timeline(mfd->mdp_sync_pt_data.timeline_retire, val); + mdp3_session->retire_cnt -= min(val, mdp3_session->retire_cnt); + } + mutex_unlock(&mfd->mdp_sync_pt_data.sync_mutex); +} + +static void mdp3_vsync_retire_work_handler(struct work_struct *work) +{ + struct mdp3_session_data *mdp3_session = + container_of(work, struct mdp3_session_data, retire_work); + + if (!mdp3_session) + return; + + mdp3_vsync_retire_signal(mdp3_session->mfd, 1); +} + +void mdp3_hist_intr_notify(struct mdp3_dma *dma) +{ + dma->hist_events++; + sysfs_notify_dirent(dma->hist_event_sd); + pr_debug("%s:: hist_events = %u\n", __func__, dma->hist_events); +} + +void vsync_notify_handler(void *arg) +{ + struct mdp3_session_data *session = (struct mdp3_session_data *)arg; + + session->vsync_time = ktime_get(); + MDSS_XLOG(ktime_to_ms(session->vsync_time)); + sysfs_notify_dirent(session->vsync_event_sd); +} + +void dma_done_notify_handler(void *arg) +{ + struct mdp3_session_data *session = (struct mdp3_session_data *)arg; + + atomic_inc(&session->dma_done_cnt); + kthread_queue_work(&session->worker, &session->dma_done_work); + complete_all(&session->dma_completion); +} + +void vsync_count_down(void *arg) +{ + struct mdp3_session_data *session = (struct mdp3_session_data *)arg; + + /* We are counting down to turn off clocks */ + if (atomic_read(&session->vsync_countdown) > 0) + atomic_dec(&session->vsync_countdown); + if (atomic_read(&session->vsync_countdown) == 0) + schedule_work(&session->clk_off_work); +} + +void mdp3_ctrl_reset_countdown(struct mdp3_session_data *session, + struct msm_fb_data_type *mfd) +{ + if (mdp3_ctrl_get_intf_type(mfd) == MDP3_DMA_OUTPUT_SEL_DSI_CMD) + atomic_set(&session->vsync_countdown, VSYNC_EXPIRE_TICK); +} + +static int mdp3_ctrl_vsync_enable(struct msm_fb_data_type *mfd, int enable) +{ + struct mdp3_session_data *mdp3_session; + struct mdp3_notification vsync_client; + struct mdp3_notification *arg = NULL; + bool mod_vsync_timer = false; + + pr_debug("%s =%d\n", __func__, enable); + mdp3_session = (struct mdp3_session_data *)mfd->mdp.private1; + if (!mdp3_session || !mdp3_session->panel || !mdp3_session->dma || + !mdp3_session->intf) + return -ENODEV; + + if (!mdp3_session->status) { + pr_debug("fb%d is not on yet", mfd->index); + return -EINVAL; + } + if (enable) { + vsync_client.handler = vsync_notify_handler; + vsync_client.arg = mdp3_session; + arg = &vsync_client; + } else if (atomic_read(&mdp3_session->vsync_countdown) > 0) { + /* + * Now that vsync is no longer needed we will + * shutdown dsi clocks as soon as cnt down == 0 + * for cmd mode panels + */ + vsync_client.handler = vsync_count_down; + vsync_client.arg = mdp3_session; + arg = &vsync_client; + enable = 1; + } + + if (enable) { + if (mdp3_session->status == 1 && + (mdp3_session->vsync_before_commit || + !mdp3_session->intf->active)) { + mod_vsync_timer = true; + } else if (!mdp3_session->clk_on) { + /* Enable clocks before enabling the vsync interrupt */ + mdp3_ctrl_reset_countdown(mdp3_session, mfd); + mdp3_ctrl_clk_enable(mfd, 1); + } + } + + mdp3_clk_enable(1, 0); + mdp3_session->dma->vsync_enable(mdp3_session->dma, arg); + mdp3_clk_enable(0, 0); + + /* + * Need to fake vsync whenever dsi interface is not + * active or when dsi clocks are currently off + */ + if (mod_vsync_timer) { + mod_timer(&mdp3_session->vsync_timer, + jiffies + msecs_to_jiffies(mdp3_session->vsync_period)); + } else if (!enable) { + del_timer(&mdp3_session->vsync_timer); + } + + return 0; +} + +void mdp3_vsync_timer_func(unsigned long arg) +{ + struct mdp3_session_data *session = (struct mdp3_session_data *)arg; + + if (session->status == 1 && (session->vsync_before_commit || + !session->intf->active)) { + pr_debug("%s trigger\n", __func__); + vsync_notify_handler(session); + mod_timer(&session->vsync_timer, + jiffies + msecs_to_jiffies(session->vsync_period)); + } +} + +static int mdp3_ctrl_async_blit_req(struct msm_fb_data_type *mfd, + void __user *p) +{ + struct mdp_async_blit_req_list req_list_header; + int rc, count; + void __user *p_req; + + if (copy_from_user(&req_list_header, p, sizeof(req_list_header))) + return -EFAULT; + p_req = p + sizeof(req_list_header); + count = req_list_header.count; + if (count < 0 || count >= MAX_BLIT_REQ) + return -EINVAL; + rc = mdp3_ppp_parse_req(p_req, &req_list_header, 1); + if (!rc) + rc = copy_to_user(p, &req_list_header, sizeof(req_list_header)); + return rc; +} + +static int mdp3_ctrl_blit_req(struct msm_fb_data_type *mfd, void __user *p) +{ + struct mdp_async_blit_req_list req_list_header; + int rc, count; + void __user *p_req; + + if (copy_from_user(&(req_list_header.count), p, + sizeof(struct mdp_blit_req_list))) + return -EFAULT; + p_req = p + sizeof(struct mdp_blit_req_list); + count = req_list_header.count; + if (count < 0 || count >= MAX_BLIT_REQ) + return -EINVAL; + req_list_header.sync.acq_fen_fd_cnt = 0; + rc = mdp3_ppp_parse_req(p_req, &req_list_header, 0); + return rc; +} + +static ssize_t mdp3_bl_show_event(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct fb_info *fbi = dev_get_drvdata(dev); + struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)fbi->par; + struct mdp3_session_data *mdp3_session = NULL; + int ret; + + if (!mfd || !mfd->mdp.private1) + return -EAGAIN; + + mdp3_session = (struct mdp3_session_data *)mfd->mdp.private1; + ret = scnprintf(buf, PAGE_SIZE, "%d\n", mdp3_session->bl_events); + return ret; +} + +static ssize_t mdp3_hist_show_event(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct fb_info *fbi = dev_get_drvdata(dev); + struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)fbi->par; + struct mdp3_session_data *mdp3_session = NULL; + struct mdp3_dma *dma = NULL; + int ret; + + if (!mfd || !mfd->mdp.private1) + return -EAGAIN; + + mdp3_session = (struct mdp3_session_data *)mfd->mdp.private1; + dma = (struct mdp3_dma *)mdp3_session->dma; + ret = scnprintf(buf, PAGE_SIZE, "%d\n", dma->hist_events); + return ret; +} + +static ssize_t mdp3_vsync_show_event(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct fb_info *fbi = dev_get_drvdata(dev); + struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)fbi->par; + struct mdp3_session_data *mdp3_session = NULL; + u64 vsync_ticks; + int rc; + + if (!mfd || !mfd->mdp.private1) + return -EAGAIN; + + mdp3_session = (struct mdp3_session_data *)mfd->mdp.private1; + + vsync_ticks = ktime_to_ns(mdp3_session->vsync_time); + + pr_debug("fb%d vsync=%llu\n", mfd->index, vsync_ticks); + rc = scnprintf(buf, PAGE_SIZE, "VSYNC=%llu\n", vsync_ticks); + return rc; +} + +static ssize_t mdp3_packpattern_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct fb_info *fbi = dev_get_drvdata(dev); + struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)fbi->par; + struct mdp3_session_data *mdp3_session = NULL; + int rc; + u32 pattern = 0; + + if (!mfd || !mfd->mdp.private1) + return -EAGAIN; + + mdp3_session = (struct mdp3_session_data *)mfd->mdp.private1; + + pattern = mdp3_session->dma->output_config.pack_pattern; + + /* If pattern was found to be 0 then get pattern for fb imagetype */ + if (!pattern) + pattern = mdp3_ctrl_get_pack_pattern(mfd->fb_imgType); + + pr_debug("fb%d pack_pattern c= %d.", mfd->index, pattern); + rc = scnprintf(buf, PAGE_SIZE, "packpattern=%d\n", pattern); + return rc; +} + +static ssize_t mdp3_dyn_pu_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct fb_info *fbi = dev_get_drvdata(dev); + struct msm_fb_data_type *mfd = fbi->par; + struct mdp3_session_data *mdp3_session = NULL; + int ret, state; + + if (!mfd || !mfd->mdp.private1) + return -EAGAIN; + + mdp3_session = (struct mdp3_session_data *)mfd->mdp.private1; + state = (mdp3_session->dyn_pu_state >= 0) ? + mdp3_session->dyn_pu_state : -1; + ret = scnprintf(buf, PAGE_SIZE, "%d", state); + return ret; +} + +static ssize_t mdp3_dyn_pu_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct fb_info *fbi = dev_get_drvdata(dev); + struct msm_fb_data_type *mfd = fbi->par; + struct mdp3_session_data *mdp3_session = NULL; + int ret, dyn_pu; + + if (!mfd || !mfd->mdp.private1) + return -EAGAIN; + + mdp3_session = (struct mdp3_session_data *)mfd->mdp.private1; + ret = kstrtoint(buf, 10, &dyn_pu); + if (ret) { + pr_err("Invalid input for partial update: ret = %d\n", ret); + return ret; + } + + mdp3_session->dyn_pu_state = dyn_pu; + sysfs_notify(&dev->kobj, NULL, "dyn_pu"); + return count; +} + +static DEVICE_ATTR(hist_event, 0444, mdp3_hist_show_event, NULL); +static DEVICE_ATTR(bl_event, 0444, mdp3_bl_show_event, NULL); +static DEVICE_ATTR(vsync_event, 0444, mdp3_vsync_show_event, NULL); +static DEVICE_ATTR(packpattern, 0444, mdp3_packpattern_show, NULL); +static DEVICE_ATTR(dyn_pu, 0664, mdp3_dyn_pu_show, + mdp3_dyn_pu_store); + +static struct attribute *generic_attrs[] = { + &dev_attr_packpattern.attr, + &dev_attr_dyn_pu.attr, + &dev_attr_hist_event.attr, + &dev_attr_bl_event.attr, + NULL, +}; + +static struct attribute *vsync_fs_attrs[] = { + &dev_attr_vsync_event.attr, + NULL, +}; + +static struct attribute_group vsync_fs_attr_group = { + .attrs = vsync_fs_attrs, +}; + +static struct attribute_group generic_attr_group = { + .attrs = generic_attrs, +}; + +static int mdp3_ctrl_clk_enable(struct msm_fb_data_type *mfd, int enable) +{ + struct mdp3_session_data *session; + struct mdss_panel_data *panel; + struct dsi_panel_clk_ctrl clk_ctrl; + int rc = 0; + + pr_debug("%s %d\n", __func__, enable); + + session = mfd->mdp.private1; + panel = session->panel; + + if (!panel->event_handler) + return 0; + + if ((enable && session->clk_on == 0) || + (!enable && session->clk_on == 1)) { + clk_ctrl.client = DSI_CLK_REQ_MDP_CLIENT; + clk_ctrl.state = enable; + rc = panel->event_handler(panel, + MDSS_EVENT_PANEL_CLK_CTRL, (void *)&clk_ctrl); + rc |= mdp3_res_update(enable, 1, MDP3_CLIENT_DMA_P); + } else { + pr_debug("enable = %d, clk_on=%d\n", enable, session->clk_on); + } + + session->clk_on = enable; + return rc; +} + +static int mdp3_ctrl_res_req_bus(struct msm_fb_data_type *mfd, int status) +{ + int rc = 0; + + if (status) { + u64 ab = 0; + u64 ib = 0; + + mdp3_calc_dma_res(mfd->panel_info, NULL, &ab, &ib, + ppp_bpp(mfd->fb_imgType)); + rc = mdp3_bus_scale_set_quota(MDP3_CLIENT_DMA_P, ab, ib); + } else { + rc = mdp3_bus_scale_set_quota(MDP3_CLIENT_DMA_P, 0, 0); + } + return rc; +} + +static int mdp3_ctrl_res_req_clk(struct msm_fb_data_type *mfd, int status) +{ + int rc = 0; + + if (status) { + u64 mdp_clk_rate = 0; + + mdp3_calc_dma_res(mfd->panel_info, &mdp_clk_rate, + NULL, NULL, 0); + + mdp3_clk_set_rate(MDP3_CLK_MDP_SRC, mdp_clk_rate, + MDP3_CLIENT_DMA_P); + mdp3_clk_set_rate(MDP3_CLK_VSYNC, MDP_VSYNC_CLK_RATE, + MDP3_CLIENT_DMA_P); + + rc = mdp3_res_update(1, 1, MDP3_CLIENT_DMA_P); + if (rc) { + pr_err("mdp3 clk enable fail\n"); + return rc; + } + } else { + rc = mdp3_res_update(0, 1, MDP3_CLIENT_DMA_P); + if (rc) + pr_err("mdp3 clk disable fail\n"); + } + return rc; +} + +static int mdp3_ctrl_get_intf_type(struct msm_fb_data_type *mfd) +{ + int type; + + switch (mfd->panel.type) { + case MIPI_VIDEO_PANEL: + type = MDP3_DMA_OUTPUT_SEL_DSI_VIDEO; + break; + case MIPI_CMD_PANEL: + type = MDP3_DMA_OUTPUT_SEL_DSI_CMD; + break; + case LCDC_PANEL: + type = MDP3_DMA_OUTPUT_SEL_LCDC; + break; + default: + type = MDP3_DMA_OUTPUT_SEL_MAX; + } + return type; +} + +int mdp3_ctrl_get_source_format(u32 imgType) +{ + int format; + + switch (imgType) { + case MDP_RGB_565: + format = MDP3_DMA_IBUF_FORMAT_RGB565; + break; + case MDP_RGB_888: + format = MDP3_DMA_IBUF_FORMAT_RGB888; + break; + case MDP_ARGB_8888: + case MDP_RGBA_8888: + format = MDP3_DMA_IBUF_FORMAT_XRGB8888; + break; + default: + format = MDP3_DMA_IBUF_FORMAT_UNDEFINED; + } + return format; +} + +int mdp3_ctrl_get_pack_pattern(u32 imgType) +{ + int packPattern = MDP3_DMA_OUTPUT_PACK_PATTERN_RGB; + + if (imgType == MDP_RGBA_8888 || imgType == MDP_RGB_888) + packPattern = MDP3_DMA_OUTPUT_PACK_PATTERN_BGR; + return packPattern; +} + +static int mdp3_ctrl_intf_init(struct msm_fb_data_type *mfd, + struct mdp3_intf *intf) +{ + int rc = 0; + struct mdp3_intf_cfg cfg; + struct mdp3_video_intf_cfg *video = &cfg.video; + struct mdss_panel_info *p = mfd->panel_info; + int h_back_porch = p->lcdc.h_back_porch; + int h_front_porch = p->lcdc.h_front_porch; + int w = p->xres; + int v_back_porch = p->lcdc.v_back_porch; + int v_front_porch = p->lcdc.v_front_porch; + int h = p->yres; + int h_sync_skew = p->lcdc.hsync_skew; + int h_pulse_width = p->lcdc.h_pulse_width; + int v_pulse_width = p->lcdc.v_pulse_width; + int hsync_period = h_front_porch + h_back_porch + w + h_pulse_width; + int vsync_period = v_front_porch + v_back_porch + h + v_pulse_width; + struct mdp3_session_data *mdp3_session; + + mdp3_session = (struct mdp3_session_data *)mfd->mdp.private1; + vsync_period *= hsync_period; + + cfg.type = mdp3_ctrl_get_intf_type(mfd); + if (cfg.type == MDP3_DMA_OUTPUT_SEL_DSI_VIDEO || + cfg.type == MDP3_DMA_OUTPUT_SEL_LCDC) { + video->hsync_period = hsync_period; + video->hsync_pulse_width = h_pulse_width; + video->vsync_period = vsync_period; + video->vsync_pulse_width = v_pulse_width * hsync_period; + video->display_start_x = h_back_porch + h_pulse_width; + video->display_end_x = hsync_period - h_front_porch - 1; + video->display_start_y = + (v_back_porch + v_pulse_width) * hsync_period; + video->display_end_y = + vsync_period - v_front_porch * hsync_period - 1; + video->active_start_x = video->display_start_x; + video->active_end_x = video->display_end_x; + video->active_h_enable = true; + video->active_start_y = video->display_start_y; + video->active_end_y = video->display_end_y; + video->active_v_enable = true; + video->hsync_skew = h_sync_skew; + video->hsync_polarity = 1; + video->vsync_polarity = 1; + video->de_polarity = 1; + video->underflow_color = p->lcdc.underflow_clr; + } else if (cfg.type == MDP3_DMA_OUTPUT_SEL_DSI_CMD) { + cfg.dsi_cmd.primary_dsi_cmd_id = 0; + cfg.dsi_cmd.secondary_dsi_cmd_id = 1; + cfg.dsi_cmd.dsi_cmd_tg_intf_sel = 0; + } else + return -EINVAL; + + if (!(mdp3_session->in_splash_screen)) { + if (intf->config) + rc = intf->config(intf, &cfg); + else + rc = -EINVAL; + } + return rc; +} + +static int mdp3_ctrl_dma_init(struct msm_fb_data_type *mfd, + struct mdp3_dma *dma) +{ + int rc; + struct mdss_panel_info *panel_info = mfd->panel_info; + struct fb_info *fbi = mfd->fbi; + struct fb_fix_screeninfo *fix; + struct fb_var_screeninfo *var; + struct mdp3_dma_output_config outputConfig; + struct mdp3_dma_source sourceConfig; + int frame_rate = mfd->panel_info->mipi.frame_rate; + int vbp, vfp, vspw; + int vtotal, vporch; + struct mdp3_notification dma_done_callback; + struct mdp3_tear_check te; + struct mdp3_session_data *mdp3_session; + + mdp3_session = (struct mdp3_session_data *)mfd->mdp.private1; + + vbp = panel_info->lcdc.v_back_porch; + vfp = panel_info->lcdc.v_front_porch; + vspw = panel_info->lcdc.v_pulse_width; + vporch = vbp + vfp + vspw; + vtotal = vporch + panel_info->yres; + + fix = &fbi->fix; + var = &fbi->var; + + sourceConfig.width = panel_info->xres; + sourceConfig.height = panel_info->yres; + sourceConfig.x = 0; + sourceConfig.y = 0; + sourceConfig.buf = mfd->iova; + sourceConfig.vporch = vporch; + sourceConfig.vsync_count = + MDP_VSYNC_CLK_RATE / (frame_rate * vtotal); + + outputConfig.dither_en = 0; + outputConfig.out_sel = mdp3_ctrl_get_intf_type(mfd); + outputConfig.bit_mask_polarity = 0; + outputConfig.color_components_flip = 0; + outputConfig.pack_align = MDP3_DMA_OUTPUT_PACK_ALIGN_LSB; + outputConfig.color_comp_out_bits = (MDP3_DMA_OUTPUT_COMP_BITS_8 << 4) | + (MDP3_DMA_OUTPUT_COMP_BITS_8 << 2)| + MDP3_DMA_OUTPUT_COMP_BITS_8; + + if (dma->update_src_cfg) { + /* configuration has been updated through PREPARE call */ + sourceConfig.format = dma->source_config.format; + sourceConfig.stride = dma->source_config.stride; + outputConfig.pack_pattern = dma->output_config.pack_pattern; + } else { + sourceConfig.format = + mdp3_ctrl_get_source_format(mfd->fb_imgType); + outputConfig.pack_pattern = + mdp3_ctrl_get_pack_pattern(mfd->fb_imgType); + sourceConfig.stride = fix->line_length; + } + + te.frame_rate = panel_info->mipi.frame_rate; + te.hw_vsync_mode = panel_info->mipi.hw_vsync_mode; + te.tear_check_en = panel_info->te.tear_check_en; + te.sync_cfg_height = panel_info->te.sync_cfg_height; + te.vsync_init_val = panel_info->te.vsync_init_val; + te.sync_threshold_start = panel_info->te.sync_threshold_start; + te.sync_threshold_continue = panel_info->te.sync_threshold_continue; + te.start_pos = panel_info->te.start_pos; + te.rd_ptr_irq = panel_info->te.rd_ptr_irq; + te.refx100 = panel_info->te.refx100; + + if (dma->dma_config) { + if (!panel_info->partial_update_enabled) { + dma->roi.w = sourceConfig.width; + dma->roi.h = sourceConfig.height; + dma->roi.x = sourceConfig.x; + dma->roi.y = sourceConfig.y; + } + rc = dma->dma_config(dma, &sourceConfig, &outputConfig, + mdp3_session->in_splash_screen); + } else { + pr_err("%s: dma config failed\n", __func__); + rc = -EINVAL; + } + + if (outputConfig.out_sel == MDP3_DMA_OUTPUT_SEL_DSI_CMD) { + if (dma->dma_sync_config) + rc = dma->dma_sync_config(dma, + &sourceConfig, &te); + else + rc = -EINVAL; + dma_done_callback.handler = dma_done_notify_handler; + dma_done_callback.arg = mfd->mdp.private1; + dma->dma_done_notifier(dma, &dma_done_callback); + } + + return rc; +} + +static int mdp3_ctrl_on(struct msm_fb_data_type *mfd) +{ + int rc = 0; + struct mdp3_session_data *mdp3_session; + struct mdss_panel_data *panel; + + pr_debug("%s\n", __func__); + mdp3_session = (struct mdp3_session_data *)mfd->mdp.private1; + if (!mdp3_session || !mdp3_session->panel || !mdp3_session->dma || + !mdp3_session->intf) { + pr_err("%s no device\n", __func__); + return -ENODEV; + } + mutex_lock(&mdp3_session->lock); + + MDSS_XLOG(XLOG_FUNC_ENTRY, __LINE__, mfd->panel_power_state); + panel = mdp3_session->panel; + /* make sure DSI host is initialized properly */ + if (panel) { + pr_debug("%s : dsi host init, power state = %d Splash %d\n", + __func__, mfd->panel_power_state, + mdp3_session->in_splash_screen); + if (mdss_fb_is_power_on_lp(mfd) || + mdp3_session->in_splash_screen) { + /* Turn on panel so that it can exit low power mode */ + mdp3_clk_enable(1, 0); + rc = panel->event_handler(panel, + MDSS_EVENT_LINK_READY, NULL); + rc |= panel->event_handler(panel, + MDSS_EVENT_UNBLANK, NULL); + rc |= panel->event_handler(panel, + MDSS_EVENT_PANEL_ON, NULL); + if (mdss_fb_is_power_on_ulp(mfd)) + rc |= mdp3_enable_panic_ctrl(); + mdp3_clk_enable(0, 0); + } + } + + if (mdp3_session->status) { + pr_debug("fb%d is on already\n", mfd->index); + MDSS_XLOG(XLOG_FUNC_EXIT, __LINE__, mfd->panel_power_state); + goto end; + } + + if (mdp3_session->intf->active) { + pr_debug("continuous splash screen, initialized already\n"); + mdp3_session->status = 1; + goto end; + } + + /* + * Get a reference to the runtime pm device. + * If idle pc feature is enabled, it will be released + * at end of this routine else, when device is turned off. + */ + pm_runtime_get_sync(&mdp3_res->pdev->dev); + + /* Increment the overlay active count */ + atomic_inc(&mdp3_res->active_intf_cnt); + mdp3_ctrl_notifier_register(mdp3_session, + &mdp3_session->mfd->mdp_sync_pt_data.notifier); + + /* request bus bandwidth before DSI DMA traffic */ + rc = mdp3_ctrl_res_req_bus(mfd, 1); + if (rc) { + pr_err("fail to request bus resource\n"); + goto on_error; + } + + rc = mdp3_dynamic_clock_gating_ctrl(0); + if (rc) { + pr_err("fail to disable dynamic clock gating\n"); + goto on_error; + } + mdp3_qos_remapper_setup(panel); + + rc = mdp3_ctrl_res_req_clk(mfd, 1); + if (rc) { + pr_err("fail to request mdp clk resource\n"); + goto on_error; + } + + if (panel->event_handler) { + rc = panel->event_handler(panel, MDSS_EVENT_LINK_READY, NULL); + rc |= panel->event_handler(panel, MDSS_EVENT_UNBLANK, NULL); + rc |= panel->event_handler(panel, MDSS_EVENT_PANEL_ON, NULL); + if (panel->panel_info.type == MIPI_CMD_PANEL) { + struct dsi_panel_clk_ctrl clk_ctrl; + + clk_ctrl.state = MDSS_DSI_CLK_ON; + clk_ctrl.client = DSI_CLK_REQ_MDP_CLIENT; + rc |= panel->event_handler(panel, + MDSS_EVENT_PANEL_CLK_CTRL, + (void *)&clk_ctrl); + } + } + if (rc) { + pr_err("fail to turn on the panel\n"); + goto on_error; + } + + rc = mdp3_ctrl_dma_init(mfd, mdp3_session->dma); + if (rc) { + pr_err("dma init failed\n"); + goto on_error; + } + + rc = mdp3_ppp_init(); + if (rc) { + pr_err("ppp init failed\n"); + goto on_error; + } + + rc = mdp3_ctrl_intf_init(mfd, mdp3_session->intf); + if (rc) { + pr_err("display interface init failed\n"); + goto on_error; + } + mdp3_session->clk_on = 1; + + mdp3_session->first_commit = true; + if (mfd->panel_info->panel_dead) + mdp3_session->esd_recovery = true; + + mdp3_session->status = 1; + + mdp3_ctrl_pp_resume(mfd); + MDSS_XLOG(XLOG_FUNC_EXIT, __LINE__, mfd->panel_power_state); +on_error: + if (rc || (mdp3_res->idle_pc_enabled && + (mfd->panel_info->type == MIPI_CMD_PANEL))) { + if (rc) { + pr_err("Failed to turn on fb%d\n", mfd->index); + atomic_dec(&mdp3_res->active_intf_cnt); + } + pm_runtime_put(&mdp3_res->pdev->dev); + } +end: + mutex_unlock(&mdp3_session->lock); + return rc; +} + +static int mdp3_ctrl_off(struct msm_fb_data_type *mfd) +{ + int rc = 0; + bool intf_stopped = true; + struct mdp3_session_data *mdp3_session; + struct mdss_panel_data *panel; + + pr_debug("%s\n", __func__); + mdp3_session = (struct mdp3_session_data *)mfd->mdp.private1; + if (!mdp3_session || !mdp3_session->panel || !mdp3_session->dma || + !mdp3_session->intf) { + pr_err("mdp3_ctrl_on no device"); + return -ENODEV; + } + + /* + * Keep a reference to the runtime pm until the overlay is turned + * off, and then release this last reference at the end. This will + * help in distinguishing between idle power collapse versus suspend + * power collapse + */ + pm_runtime_get_sync(&mdp3_res->pdev->dev); + + MDSS_XLOG(XLOG_FUNC_ENTRY, __LINE__, mdss_fb_is_power_on_ulp(mfd), + mfd->panel_power_state); + panel = mdp3_session->panel; + mutex_lock(&mdp3_session->lock); + + pr_debug("Requested power state = %d\n", mfd->panel_power_state); + if (mdss_fb_is_power_on_lp(mfd)) { + /* + * Transition to low power + * As display updates are expected in low power mode, + * keep the interface and clocks on. + */ + intf_stopped = false; + } else { + /* Transition to display off */ + if (!mdp3_session->status) { + pr_debug("fb%d is off already", mfd->index); + goto off_error; + } + if (panel && panel->set_backlight) + panel->set_backlight(panel, 0); + } + + /* + * While transitioning from interactive to low power, + * events need to be sent to the interface so that the + * panel can be configured in low power mode + */ + if (panel->event_handler) + rc = panel->event_handler(panel, MDSS_EVENT_BLANK, + (void *) (long int)mfd->panel_power_state); + if (rc) + pr_err("EVENT_BLANK error (%d)\n", rc); + + if (intf_stopped) { + if (!mdp3_session->clk_on) + mdp3_ctrl_clk_enable(mfd, 1); + /* PP related programming for ctrl off */ + mdp3_histogram_stop(mdp3_session, MDP_BLOCK_DMA_P); + mutex_lock(&mdp3_session->dma->pp_lock); + mdp3_session->dma->ccs_config.ccs_dirty = false; + mdp3_session->dma->lut_config.lut_dirty = false; + mutex_unlock(&mdp3_session->dma->pp_lock); + + rc = mdp3_session->dma->stop(mdp3_session->dma, + mdp3_session->intf); + if (rc) + pr_debug("fail to stop the MDP3 dma\n"); + /* Wait to ensure TG to turn off */ + msleep(20); + mfd->panel_info->cont_splash_enabled = 0; + + /* Disable Auto refresh once continuous splash disabled */ + mdp3_autorefresh_disable(mfd->panel_info); + mdp3_splash_done(mfd->panel_info); + + mdp3_irq_deregister(); + } + + if (panel->event_handler) + rc = panel->event_handler(panel, MDSS_EVENT_PANEL_OFF, + (void *) (long int)mfd->panel_power_state); + if (rc) + pr_err("EVENT_PANEL_OFF error (%d)\n", rc); + + if (intf_stopped) { + if (mdp3_session->clk_on) { + pr_debug("%s stop clock\n", __func__); + if (panel->event_handler && + (panel->panel_info.type == MIPI_CMD_PANEL)) { + struct dsi_panel_clk_ctrl clk_ctrl; + + clk_ctrl.state = MDSS_DSI_CLK_OFF; + clk_ctrl.client = DSI_CLK_REQ_MDP_CLIENT; + rc |= panel->event_handler(panel, + MDSS_EVENT_PANEL_CLK_CTRL, + (void *)&clk_ctrl); + } + + rc = mdp3_dynamic_clock_gating_ctrl(1); + rc = mdp3_res_update(0, 1, MDP3_CLIENT_DMA_P); + if (rc) + pr_err("mdp clock resource release failed\n"); + } + + mdp3_ctrl_notifier_unregister(mdp3_session, + &mdp3_session->mfd->mdp_sync_pt_data.notifier); + + mdp3_session->vsync_enabled = 0; + atomic_set(&mdp3_session->vsync_countdown, 0); + atomic_set(&mdp3_session->dma_done_cnt, 0); + mdp3_session->clk_on = 0; + mdp3_session->in_splash_screen = 0; + mdp3_res->solid_fill_vote_en = false; + mdp3_session->status = 0; + if (atomic_dec_return(&mdp3_res->active_intf_cnt) != 0) { + pr_warn("active_intf_cnt unbalanced\n"); + atomic_set(&mdp3_res->active_intf_cnt, 0); + } + /* + * Release the pm runtime reference held when + * idle pc feature is not enabled + */ + if (!mdp3_res->idle_pc_enabled || + (mfd->panel_info->type != MIPI_CMD_PANEL)) { + rc = pm_runtime_put(&mdp3_res->pdev->dev); + if (rc) + pr_err("%s: pm_runtime_put failed (rc %d)\n", + __func__, rc); + } + mdp3_bufq_deinit(&mdp3_session->bufq_out); + if (mdp3_session->overlay.id != MSMFB_NEW_REQUEST) { + mdp3_session->overlay.id = MSMFB_NEW_REQUEST; + mdp3_bufq_deinit(&mdp3_session->bufq_in); + } + } + + if (mdss_fb_is_power_on_ulp(mfd) && + (mfd->panel.type == MIPI_CMD_PANEL)) { + pr_debug("%s: Disable MDP3 clocks in ULP\n", __func__); + if (!mdp3_session->clk_on) + mdp3_ctrl_clk_enable(mfd, 1); + /* + * STOP DMA transfer first and signal vsync notification + * Before releasing the resource in ULP state. + */ + rc = mdp3_session->dma->stop(mdp3_session->dma, + mdp3_session->intf); + if (rc) + pr_warn("fail to stop the MDP3 dma in ULP\n"); + /* Wait to ensure TG to turn off */ + msleep(20); + /* + * Handle ULP request initiated from fb_pm_suspend. + * For ULP panel power state disabling vsync and set + * vsync_count to zero and Turn off MDP3 clocks + */ + atomic_set(&mdp3_session->vsync_countdown, 0); + mdp3_session->vsync_enabled = 0; + mdp3_ctrl_vsync_enable(mdp3_session->mfd, 0); + mdp3_ctrl_clk_enable(mdp3_session->mfd, 0); + } +off_error: + MDSS_XLOG(XLOG_FUNC_EXIT, __LINE__); + mutex_unlock(&mdp3_session->lock); + /* Release the last reference to the runtime device */ + pm_runtime_put(&mdp3_res->pdev->dev); + + return 0; +} + +int mdp3_ctrl_reset(struct msm_fb_data_type *mfd) +{ + int rc = 0; + struct mdp3_session_data *mdp3_session; + struct mdp3_dma *mdp3_dma; + struct mdss_panel_data *panel; + struct mdp3_notification vsync_client; + + pr_debug("%s\n", __func__); + mdp3_session = (struct mdp3_session_data *)mfd->mdp.private1; + if (!mdp3_session || !mdp3_session->panel || !mdp3_session->dma || + !mdp3_session->intf) { + pr_err("%s no device\n", __func__); + return -ENODEV; + } + + panel = mdp3_session->panel; + mdp3_dma = mdp3_session->dma; + mutex_lock(&mdp3_session->lock); + pr_debug("%s idle_pc %s FS_EN %s\n", + __func__, + mdp3_res->idle_pc ? "True":"False", + mdp3_res->fs_ena ? "True":"False"); + if (mdp3_res->idle_pc) { + mdp3_clk_enable(1, 0); + mdp3_dynamic_clock_gating_ctrl(0); + mdp3_qos_remapper_setup(panel); + } + + /*Map the splash addr for VIDEO mode panel before smmu attach*/ + if ((mfd->panel.type == MIPI_VIDEO_PANEL) && + (mdp3_session->in_splash_screen)) { + rc = mdss_smmu_map(MDSS_IOMMU_DOMAIN_UNSECURE, + mdp3_res->splash_mem_addr, + mdp3_res->splash_mem_addr, + mdp3_res->splash_mem_size, + IOMMU_READ | IOMMU_NOEXEC); + } + + rc = mdp3_iommu_enable(MDP3_CLIENT_DMA_P); + if (rc) { + pr_err("fail to attach dma iommu\n"); + if (mdp3_res->idle_pc) + mdp3_clk_enable(0, 0); + goto reset_error; + } + + vsync_client = mdp3_dma->vsync_client; + + mdp3_ctrl_intf_init(mfd, mdp3_session->intf); + mdp3_ctrl_dma_init(mfd, mdp3_dma); + mdp3_ppp_init(); + mdp3_ctrl_pp_resume(mfd); + if (vsync_client.handler) + mdp3_dma->vsync_enable(mdp3_dma, &vsync_client); + + if (!mdp3_res->idle_pc) { + mdp3_session->first_commit = true; + mfd->panel_info->cont_splash_enabled = 0; + mdp3_session->in_splash_screen = 0; + mdp3_splash_done(mfd->panel_info); + /* Disable Auto refresh */ + mdp3_autorefresh_disable(mfd->panel_info); + } else { + mdp3_res->idle_pc = false; + mdp3_clk_enable(0, 0); + mdp3_iommu_disable(MDP3_CLIENT_DMA_P); + } + +reset_error: + mutex_unlock(&mdp3_session->lock); + return rc; +} + +static int mdp3_overlay_get(struct msm_fb_data_type *mfd, + struct mdp_overlay *req) +{ + int rc = 0; + struct mdp3_session_data *mdp3_session = mfd->mdp.private1; + + mutex_lock(&mdp3_session->lock); + + if (mdp3_session->overlay.id == req->id) + *req = mdp3_session->overlay; + else + rc = -EINVAL; + + mutex_unlock(&mdp3_session->lock); + + return rc; +} + +static int mdp3_overlay_set(struct msm_fb_data_type *mfd, + struct mdp_overlay *req) +{ + int rc = 0; + struct mdp3_session_data *mdp3_session = mfd->mdp.private1; + struct mdp3_dma *dma = mdp3_session->dma; + struct fb_fix_screeninfo *fix; + struct fb_info *fbi = mfd->fbi; + int stride; + int format; + + fix = &fbi->fix; + stride = req->src.width * ppp_bpp(req->src.format); + format = mdp3_ctrl_get_source_format(req->src.format); + + + if (mdp3_session->overlay.id != req->id) + pr_err("overlay was not released, continue to recover\n"); + /* + * A change in overlay structure will always come with + * MSMFB_NEW_REQUEST for MDP3 + */ + if (req->id == MSMFB_NEW_REQUEST) { + mutex_lock(&mdp3_session->lock); + if (dma->source_config.stride != stride || + dma->source_config.format != format) { + dma->source_config.format = format; + dma->source_config.stride = stride; + dma->output_config.pack_pattern = + mdp3_ctrl_get_pack_pattern(req->src.format); + dma->update_src_cfg = true; + } + mdp3_session->overlay = *req; + mdp3_session->overlay.id = 1; + req->id = 1; + mutex_unlock(&mdp3_session->lock); + } + + return rc; +} + +static int mdp3_overlay_unset(struct msm_fb_data_type *mfd, int ndx) +{ + int rc = 0; + struct mdp3_session_data *mdp3_session = mfd->mdp.private1; + struct fb_info *fbi = mfd->fbi; + struct fb_fix_screeninfo *fix; + int format; + + fix = &fbi->fix; + format = mdp3_ctrl_get_source_format(mfd->fb_imgType); + mutex_lock(&mdp3_session->lock); + + if (mdp3_session->overlay.id == ndx && ndx == 1) { + mdp3_session->overlay.id = MSMFB_NEW_REQUEST; + mdp3_bufq_deinit(&mdp3_session->bufq_in); + } else { + rc = -EINVAL; + } + + mutex_unlock(&mdp3_session->lock); + + return rc; +} + +static int mdp3_overlay_queue_buffer(struct msm_fb_data_type *mfd, + struct msmfb_overlay_data *req) +{ + int rc; + bool is_panel_type_cmd = false; + struct mdp3_session_data *mdp3_session = mfd->mdp.private1; + struct msmfb_data *img = &req->data; + struct mdp3_img_data data; + struct mdp3_dma *dma = mdp3_session->dma; + + memset(&data, 0, sizeof(struct mdp3_img_data)); + if (mfd->panel.type == MIPI_CMD_PANEL) + is_panel_type_cmd = true; + if (is_panel_type_cmd) { + rc = mdp3_iommu_enable(MDP3_CLIENT_DMA_P); + if (rc) { + pr_err("fail to enable iommu\n"); + return rc; + } + } + rc = mdp3_get_img(img, &data, MDP3_CLIENT_DMA_P); + if (rc) { + pr_err("fail to get overlay buffer\n"); + goto err; + } + + if (data.len < dma->source_config.stride * dma->source_config.height) { + pr_err("buf size(0x%lx) is smaller than dma config(0x%x)\n", + data.len, (dma->source_config.stride * + dma->source_config.height)); + mdp3_put_img(&data, MDP3_CLIENT_DMA_P); + rc = -EINVAL; + goto err; + } + rc = mdp3_bufq_push(&mdp3_session->bufq_in, &data); + if (rc) { + pr_err("fail to queue the overlay buffer, buffer drop\n"); + mdp3_put_img(&data, MDP3_CLIENT_DMA_P); + goto err; + } + rc = 0; +err: + if (is_panel_type_cmd) + mdp3_iommu_disable(MDP3_CLIENT_DMA_P); + return rc; +} + +static int mdp3_overlay_play(struct msm_fb_data_type *mfd, + struct msmfb_overlay_data *req) +{ + struct mdp3_session_data *mdp3_session = mfd->mdp.private1; + int rc = 0; + + pr_debug("%s req id=%x mem_id=%d\n", + __func__, req->id, req->data.memory_id); + + mutex_lock(&mdp3_session->lock); + + if (mdp3_session->overlay.id == MSMFB_NEW_REQUEST) { + pr_err("overlay play without overlay set first\n"); + mutex_unlock(&mdp3_session->lock); + return -EINVAL; + } + + if (mdss_fb_is_power_on(mfd)) + rc = mdp3_overlay_queue_buffer(mfd, req); + else + rc = -EPERM; + + mutex_unlock(&mdp3_session->lock); + + return rc; +} + +bool update_roi(struct mdp3_rect oldROI, struct mdp_rect newROI) +{ + return ((newROI.x != oldROI.x) || (newROI.y != oldROI.y) || + (newROI.w != oldROI.w) || (newROI.h != oldROI.h)); +} + +bool is_roi_valid(struct mdp3_dma_source source_config, struct mdp_rect roi) +{ + return (roi.w > 0) && (roi.h > 0) && + (roi.x >= source_config.x) && + ((roi.x + roi.w) <= source_config.width) && + (roi.y >= source_config.y) && + ((roi.y + roi.h) <= source_config.height); +} + +static int mdp3_ctrl_display_commit_kickoff(struct msm_fb_data_type *mfd, + struct mdp_display_commit *cmt_data) +{ + struct mdp3_session_data *mdp3_session; + struct mdp3_img_data *data; + struct mdss_panel_info *panel_info; + int rc = 0; + static bool splash_done; + struct mdss_panel_data *panel; + + if (!mfd || !mfd->mdp.private1) + return -EINVAL; + + panel_info = mfd->panel_info; + mdp3_session = mfd->mdp.private1; + if (!mdp3_session || !mdp3_session->dma) + return -EINVAL; + + if (mdp3_bufq_count(&mdp3_session->bufq_in) == 0) { + pr_debug("no buffer in queue yet\n"); + return -EPERM; + } + + if (panel_info->partial_update_enabled && + is_roi_valid(mdp3_session->dma->source_config, + cmt_data->l_roi) && + update_roi(mdp3_session->dma->roi, cmt_data->l_roi)) { + mdp3_session->dma->roi.x = cmt_data->l_roi.x; + mdp3_session->dma->roi.y = cmt_data->l_roi.y; + mdp3_session->dma->roi.w = cmt_data->l_roi.w; + mdp3_session->dma->roi.h = cmt_data->l_roi.h; + mdp3_session->dma->update_src_cfg = true; + pr_debug("%s: ROI: x=%d y=%d w=%d h=%d\n", __func__, + mdp3_session->dma->roi.x, + mdp3_session->dma->roi.y, + mdp3_session->dma->roi.w, + mdp3_session->dma->roi.h); + } + + panel = mdp3_session->panel; + mutex_lock(&mdp3_res->fs_idle_pc_lock); + if (mdp3_session->in_splash_screen || + mdp3_res->idle_pc) { + pr_debug("%s: reset- in_splash = %d, idle_pc = %d", __func__, + mdp3_session->in_splash_screen, mdp3_res->idle_pc); + rc = mdp3_ctrl_reset(mfd); + if (rc) { + pr_err("fail to reset display\n"); + mutex_unlock(&mdp3_res->fs_idle_pc_lock); + return -EINVAL; + } + } + mutex_unlock(&mdp3_res->fs_idle_pc_lock); + + mutex_lock(&mdp3_session->lock); + + if (!mdp3_session->status) { + pr_err("%s, display off!\n", __func__); + mutex_unlock(&mdp3_session->lock); + return -EPERM; + } + + mdp3_ctrl_notify(mdp3_session, MDP_NOTIFY_FRAME_BEGIN); + data = mdp3_bufq_pop(&mdp3_session->bufq_in); + if (data) { + mdp3_ctrl_reset_countdown(mdp3_session, mfd); + mdp3_ctrl_clk_enable(mfd, 1); + if (mdp3_session->dma->update_src_cfg && + panel_info->partial_update_enabled) { + panel->panel_info.roi.x = mdp3_session->dma->roi.x; + panel->panel_info.roi.y = mdp3_session->dma->roi.y; + panel->panel_info.roi.w = mdp3_session->dma->roi.w; + panel->panel_info.roi.h = mdp3_session->dma->roi.h; + rc = mdp3_session->dma->update(mdp3_session->dma, + (void *)(int)data->addr, + mdp3_session->intf, (void *)panel); + } else { + rc = mdp3_session->dma->update(mdp3_session->dma, + (void *)(int)data->addr, + mdp3_session->intf, NULL); + } + /* This is for the previous frame */ + if (rc < 0) { + mdp3_ctrl_notify(mdp3_session, + MDP_NOTIFY_FRAME_TIMEOUT); + } else { + if (mdp3_ctrl_get_intf_type(mfd) == + MDP3_DMA_OUTPUT_SEL_DSI_VIDEO) { + mdp3_ctrl_notify(mdp3_session, + MDP_NOTIFY_FRAME_DONE); + } + } + mdp3_session->dma_active = 1; + init_completion(&mdp3_session->dma_completion); + mdp3_ctrl_notify(mdp3_session, MDP_NOTIFY_FRAME_FLUSHED); + mdp3_bufq_push(&mdp3_session->bufq_out, data); + } + + if (mdp3_bufq_count(&mdp3_session->bufq_out) > 1) { + mdp3_release_splash_memory(mfd); + data = mdp3_bufq_pop(&mdp3_session->bufq_out); + if (data) + mdp3_put_img(data, MDP3_CLIENT_DMA_P); + } + + if (mdp3_session->first_commit) { + /*wait to ensure frame is sent to panel*/ + if (panel_info->mipi.post_init_delay) + msleep(((1000 / panel_info->mipi.frame_rate) + 1) * + panel_info->mipi.post_init_delay); + else + msleep(1000 / panel_info->mipi.frame_rate); + mdp3_session->first_commit = false; + if (panel) + rc |= panel->event_handler(panel, + MDSS_EVENT_POST_PANEL_ON, NULL); + } + + mdp3_session->vsync_before_commit = 0; + if (!splash_done || mdp3_session->esd_recovery == true) { + if (panel && panel->set_backlight) + panel->set_backlight(panel, panel->panel_info.bl_max); + splash_done = true; + mdp3_session->esd_recovery = false; + } + + /* start vsync tick countdown for cmd mode if vsync isn't enabled */ + if (mfd->panel.type == MIPI_CMD_PANEL && !mdp3_session->vsync_enabled) + mdp3_ctrl_vsync_enable(mdp3_session->mfd, 0); + + mutex_unlock(&mdp3_session->lock); + + mdss_fb_update_notify_update(mfd); + + return 0; +} + +static int mdp3_map_pan_buff_immediate(struct msm_fb_data_type *mfd) +{ + int rc = 0; + unsigned long length; + dma_addr_t addr; + int domain = mfd->mdp.fb_mem_get_iommu_domain(); + + rc = mdss_smmu_map_dma_buf(mfd->fbmem_buf, mfd->fb_table, domain, + &addr, &length, DMA_BIDIRECTIONAL); + if (IS_ERR_VALUE(rc)) + goto err_unmap; + else + mfd->iova = addr; + + pr_debug("%s : smmu map dma buf VA: (%llx) MFD->iova %llx\n", + __func__, (u64) addr, (u64) mfd->iova); + return rc; + +err_unmap: + pr_err("smmu map dma buf failed: (%d)\n", rc); + dma_buf_unmap_attachment(mfd->fb_attachment, mfd->fb_table, + mdss_smmu_dma_data_direction(DMA_BIDIRECTIONAL)); + dma_buf_detach(mfd->fbmem_buf, mfd->fb_attachment); + dma_buf_put(mfd->fbmem_buf); + return rc; +} + +static void mdp3_ctrl_pan_display(struct msm_fb_data_type *mfd) +{ + struct fb_info *fbi; + struct mdp3_session_data *mdp3_session; + u32 offset; + int bpp; + struct mdss_panel_info *panel_info; + static bool splash_done; + struct mdss_panel_data *panel; + + int rc; + + pr_debug("%s\n", __func__); + if (!mfd || !mfd->mdp.private1) + return; + + panel_info = mfd->panel_info; + mdp3_session = (struct mdp3_session_data *)mfd->mdp.private1; + if (!mdp3_session || !mdp3_session->dma) + return; + + mutex_lock(&mdp3_res->fs_idle_pc_lock); + if (mdp3_session->in_splash_screen || + mdp3_res->idle_pc) { + pr_debug("%s: reset- in_splash = %d, idle_pc = %d", __func__, + mdp3_session->in_splash_screen, mdp3_res->idle_pc); + rc = mdp3_ctrl_reset(mfd); + if (rc) { + pr_err("fail to reset display\n"); + mutex_unlock(&mdp3_res->fs_idle_pc_lock); + return; + } + } + mutex_unlock(&mdp3_res->fs_idle_pc_lock); + + mutex_lock(&mdp3_session->lock); + + if (!mdp3_session->status) { + pr_err("%s, display off!\n", __func__); + goto pan_error; + } + + fbi = mfd->fbi; + + bpp = fbi->var.bits_per_pixel / 8; + offset = fbi->var.xoffset * bpp + + fbi->var.yoffset * fbi->fix.line_length; + + if (offset > fbi->fix.smem_len) { + pr_err("invalid fb offset=%u total length=%u\n", + offset, fbi->fix.smem_len); + goto pan_error; + } + + if (mfd->fbi->screen_base) { + mdp3_ctrl_reset_countdown(mdp3_session, mfd); + mdp3_ctrl_notify(mdp3_session, MDP_NOTIFY_FRAME_BEGIN); + mdp3_ctrl_clk_enable(mfd, 1); + if (mdp3_session->first_commit) { + rc = mdp3_map_pan_buff_immediate(mfd); + if (IS_ERR_VALUE(rc)) + goto pan_error; + } + rc = mdp3_session->dma->update(mdp3_session->dma, + (void *)(int)(mfd->iova + offset), + mdp3_session->intf, NULL); + /* This is for the previous frame */ + if (rc < 0) { + mdp3_ctrl_notify(mdp3_session, + MDP_NOTIFY_FRAME_TIMEOUT); + } else { + if (mdp3_ctrl_get_intf_type(mfd) == + MDP3_DMA_OUTPUT_SEL_DSI_VIDEO) { + mdp3_ctrl_notify(mdp3_session, + MDP_NOTIFY_FRAME_DONE); + } + } + mdp3_session->dma_active = 1; + init_completion(&mdp3_session->dma_completion); + mdp3_ctrl_notify(mdp3_session, MDP_NOTIFY_FRAME_FLUSHED); + } else { + pr_debug("%s no memory, stop interface", __func__); + mdp3_clk_enable(1, 0); + mdp3_session->dma->stop(mdp3_session->dma, mdp3_session->intf); + mdp3_clk_enable(0, 0); + } + + panel = mdp3_session->panel; + if (mdp3_session->first_commit) { + /*wait to ensure frame is sent to panel*/ + if (panel_info->mipi.post_init_delay) + msleep(((1000 / panel_info->mipi.frame_rate) + 1) * + panel_info->mipi.post_init_delay); + else + msleep(1000 / panel_info->mipi.frame_rate); + mdp3_session->first_commit = false; + if (panel) + panel->event_handler(panel, MDSS_EVENT_POST_PANEL_ON, + NULL); + } + + mdp3_session->vsync_before_commit = 0; + if (!splash_done || mdp3_session->esd_recovery == true) { + if (panel && panel->set_backlight) + panel->set_backlight(panel, panel->panel_info.bl_max); + splash_done = true; + mdp3_session->esd_recovery = false; + } + + +pan_error: + mutex_unlock(&mdp3_session->lock); +} + +static int mdp3_set_metadata(struct msm_fb_data_type *mfd, + struct msmfb_metadata *metadata_ptr) +{ + int ret = 0; + + switch (metadata_ptr->op) { + case metadata_op_crc: + ret = mdp3_ctrl_res_req_clk(mfd, 1); + if (ret) { + pr_err("failed to turn on mdp clks\n"); + return ret; + } + ret = mdp3_misr_set(&metadata_ptr->data.misr_request); + ret = mdp3_ctrl_res_req_clk(mfd, 0); + if (ret) { + pr_err("failed to release mdp clks\n"); + return ret; + } + break; + default: + pr_warn("Unsupported request to MDP SET META IOCTL.\n"); + ret = -EINVAL; + break; + } + return ret; +} + +static int mdp3_get_metadata(struct msm_fb_data_type *mfd, + struct msmfb_metadata *metadata) +{ + int ret = 0; + + switch (metadata->op) { + case metadata_op_frame_rate: + metadata->data.panel_frame_rate = + mfd->panel_info->mipi.frame_rate; + break; + case metadata_op_get_caps: + metadata->data.caps.mdp_rev = 305; + metadata->data.caps.rgb_pipes = 0; + metadata->data.caps.vig_pipes = 0; + metadata->data.caps.dma_pipes = 1; + break; + case metadata_op_crc: + ret = mdp3_ctrl_res_req_clk(mfd, 1); + if (ret) { + pr_err("failed to turn on mdp clks\n"); + return ret; + } + ret = mdp3_misr_get(&metadata->data.misr_request); + ret = mdp3_ctrl_res_req_clk(mfd, 0); + if (ret) { + pr_err("failed to release mdp clks\n"); + return ret; + } + break; + case metadata_op_get_ion_fd: + if (mfd->fb_ion_handle && mfd->fb_ion_client) { + get_dma_buf(mfd->fbmem_buf); + metadata->data.fbmem_ionfd = + ion_share_dma_buf_fd(mfd->fb_ion_client, + mfd->fb_ion_handle); + if (metadata->data.fbmem_ionfd < 0) { + dma_buf_put(mfd->fbmem_buf); + pr_err("fd allocation failed. fd = %d\n", + metadata->data.fbmem_ionfd); + } + } + break; + default: + pr_warn("Unsupported request to MDP GET META IOCTL.\n"); + ret = -EINVAL; + break; + } + return ret; +} + +int mdp3_validate_start_req(struct mdp_histogram_start_req *req) +{ + if (req->frame_cnt >= MDP_HISTOGRAM_FRAME_COUNT_MAX) { + pr_err("%s invalid req frame_cnt\n", __func__); + return -EINVAL; + } + if (req->bit_mask >= MDP_HISTOGRAM_BIT_MASK_MAX) { + pr_err("%s invalid req bit mask\n", __func__); + return -EINVAL; + } + if (req->block != MDP_BLOCK_DMA_P || + req->num_bins != MDP_HISTOGRAM_BIN_NUM) { + pr_err("mdp3_histogram_start invalid request\n"); + return -EINVAL; + } + return 0; +} + +int mdp3_validate_scale_config(struct mdp_bl_scale_data *data) +{ + if (data->scale > MDP_HISTOGRAM_BL_SCALE_MAX) { + pr_err("%s invalid bl_scale\n", __func__); + return -EINVAL; + } + if (data->min_lvl > MDP_HISTOGRAM_BL_LEVEL_MAX) { + pr_err("%s invalid bl_min_lvl\n", __func__); + return -EINVAL; + } + return 0; +} + +int mdp3_validate_csc_data(struct mdp_csc_cfg_data *data) +{ + int i; + bool mv_valid = false; + + for (i = 0; i < 9; i++) { + if (data->csc_data.csc_mv[i] >= + MDP_HISTOGRAM_CSC_MATRIX_MAX) + return -EINVAL; + if ((!mv_valid) && (data->csc_data.csc_mv[i] != 0)) + mv_valid = true; + } + if (!mv_valid) { + pr_err("%s: black screen data! csc_mv is all 0s\n", __func__); + return -EINVAL; + } + for (i = 0; i < 3; i++) { + if (data->csc_data.csc_pre_bv[i] >= + MDP_HISTOGRAM_CSC_VECTOR_MAX) + return -EINVAL; + if (data->csc_data.csc_post_bv[i] >= + MDP_HISTOGRAM_CSC_VECTOR_MAX) + return -EINVAL; + } + for (i = 0; i < 6; i++) { + if (data->csc_data.csc_pre_lv[i] >= + MDP_HISTOGRAM_CSC_VECTOR_MAX) + return -EINVAL; + if (data->csc_data.csc_post_lv[i] >= + MDP_HISTOGRAM_CSC_VECTOR_MAX) + return -EINVAL; + } + return 0; +} + +static int mdp3_histogram_start(struct mdp3_session_data *session, + struct mdp_histogram_start_req *req) +{ + int ret; + struct mdp3_dma_histogram_config histo_config; + + mutex_lock(&session->lock); + if (!session->status) { + mutex_unlock(&session->lock); + return -EPERM; + } + + pr_debug("%s\n", __func__); + + ret = mdp3_validate_start_req(req); + if (ret) { + mutex_unlock(&session->lock); + return ret; + } + + if (!session->dma->histo_op || + !session->dma->config_histo) { + pr_err("%s not supported\n", __func__); + mutex_unlock(&session->lock); + return -EINVAL; + } + + mutex_lock(&session->histo_lock); + + if (session->histo_status) { + pr_info("%s already started\n", __func__); + mutex_unlock(&session->histo_lock); + mutex_unlock(&session->lock); + return 0; + } + + mdp3_res_update(1, 0, MDP3_CLIENT_DMA_P); + ret = session->dma->histo_op(session->dma, MDP3_DMA_HISTO_OP_RESET); + if (ret) { + pr_err("%s reset error\n", __func__); + goto histogram_start_err; + } + + histo_config.frame_count = req->frame_cnt; + histo_config.bit_mask = req->bit_mask; + histo_config.auto_clear_en = 1; + histo_config.bit_mask_polarity = 0; + ret = session->dma->config_histo(session->dma, &histo_config); + if (ret) { + pr_err("%s error\n", __func__); + goto histogram_start_err; + } + + ret = session->dma->histo_op(session->dma, MDP3_DMA_HISTO_OP_START); + if (ret) { + pr_err("%s config error\n", __func__); + goto histogram_start_err; + } + + session->histo_status = 1; + +histogram_start_err: + mdp3_res_update(0, 0, MDP3_CLIENT_DMA_P); + mutex_unlock(&session->histo_lock); + mutex_unlock(&session->lock); + return ret; +} + +static int mdp3_histogram_stop(struct mdp3_session_data *session, + u32 block) +{ + int ret; + + pr_debug("%s\n", __func__); + + if (!session->dma->histo_op || block != MDP_BLOCK_DMA_P) { + pr_err("%s not supported\n", __func__); + return -EINVAL; + } + + mutex_lock(&session->histo_lock); + + if (!session->histo_status) { + pr_debug("%s already stopped!", __func__); + ret = 0; + goto histogram_stop_err; + } + + mdp3_clk_enable(1, 0); + ret = session->dma->histo_op(session->dma, MDP3_DMA_HISTO_OP_CANCEL); + mdp3_clk_enable(0, 0); + if (ret) + pr_err("%s error\n", __func__); + + session->histo_status = 0; + +histogram_stop_err: + mutex_unlock(&session->histo_lock); + return ret; +} + +static int mdp3_histogram_collect(struct mdp3_session_data *session, + struct mdp_histogram_data *hist) +{ + int ret; + struct mdp3_dma_histogram_data *mdp3_histo; + + pr_debug("%s\n", __func__); + if (!session->dma->get_histo) { + pr_err("%s not supported\n", __func__); + return -EINVAL; + } + + mutex_lock(&session->histo_lock); + + if (!session->histo_status) { + pr_debug("%s not started\n", __func__); + mutex_unlock(&session->histo_lock); + return -EPROTO; + } + + mutex_unlock(&session->histo_lock); + + if (!session->clk_on) { + pr_debug("mdp/dsi clock off currently\n"); + return -EPERM; + } + + mdp3_clk_enable(1, 0); + ret = session->dma->get_histo(session->dma); + mdp3_clk_enable(0, 0); + if (ret) { + pr_debug("%s error = %d\n", __func__, ret); + return ret; + } + + mdp3_histo = &session->dma->histo_data; + + ret = copy_to_user(hist->c0, mdp3_histo->r_data, + sizeof(uint32_t) * MDP_HISTOGRAM_BIN_NUM); + if (ret) + return ret; + + ret = copy_to_user(hist->c1, mdp3_histo->g_data, + sizeof(uint32_t) * MDP_HISTOGRAM_BIN_NUM); + if (ret) + return ret; + + ret = copy_to_user(hist->c2, mdp3_histo->b_data, + sizeof(uint32_t) * MDP_HISTOGRAM_BIN_NUM); + if (ret) + return ret; + + ret = copy_to_user(hist->extra_info, mdp3_histo->extra, + sizeof(uint32_t) * 2); + if (ret) + return ret; + + hist->bin_cnt = MDP_HISTOGRAM_BIN_NUM; + hist->block = MDP_BLOCK_DMA_P; + return ret; +} + +static int mdp3_bl_scale_config(struct msm_fb_data_type *mfd, + struct mdp_bl_scale_data *data) +{ + int ret = 0; + int curr_bl; + + mutex_lock(&mfd->bl_lock); + curr_bl = mfd->bl_level; + mfd->bl_scale = data->scale; + mfd->bl_min_lvl = data->min_lvl; + pr_debug("update scale = %d, min_lvl = %d\n", mfd->bl_scale, + mfd->bl_min_lvl); + + /* update current backlight to use new scaling*/ + mdss_fb_set_backlight(mfd, curr_bl); + mutex_unlock(&mfd->bl_lock); + return ret; +} + +static int mdp3_csc_config(struct mdp3_session_data *session, + struct mdp_csc_cfg_data *data) +{ + struct mdp3_dma_color_correct_config config; + struct mdp3_dma_ccs ccs; + int ret = -EINVAL; + + if (!data->csc_data.csc_mv || !data->csc_data.csc_pre_bv || + !data->csc_data.csc_post_bv || !data->csc_data.csc_pre_lv || + !data->csc_data.csc_post_lv) { + pr_err("%s : Invalid csc vectors", __func__); + return -EINVAL; + } + + mutex_lock(&session->lock); + mutex_lock(&session->dma->pp_lock); + session->dma->cc_vect_sel = (session->dma->cc_vect_sel + 1) % 2; + + config.ccs_enable = 1; + config.ccs_sel = session->dma->cc_vect_sel; + config.pre_limit_sel = session->dma->cc_vect_sel; + config.post_limit_sel = session->dma->cc_vect_sel; + config.pre_bias_sel = session->dma->cc_vect_sel; + config.post_bias_sel = session->dma->cc_vect_sel; + config.ccs_dirty = true; + + ccs.mv = data->csc_data.csc_mv; + ccs.pre_bv = data->csc_data.csc_pre_bv; + ccs.post_bv = data->csc_data.csc_post_bv; + ccs.pre_lv = data->csc_data.csc_pre_lv; + ccs.post_lv = data->csc_data.csc_post_lv; + + /* cache one copy of setting for suspend/resume reconfiguring */ + session->dma->ccs_cache = *data; + + mdp3_clk_enable(1, 0); + ret = session->dma->config_ccs(session->dma, &config, &ccs); + mdp3_clk_enable(0, 0); + mutex_unlock(&session->dma->pp_lock); + mutex_unlock(&session->lock); + return ret; +} + +static int mdp3_pp_ioctl(struct msm_fb_data_type *mfd, + void __user *argp) +{ + int ret = -EINVAL; + struct msmfb_mdp_pp mdp_pp; + struct mdp_lut_cfg_data *lut; + struct mdp3_session_data *mdp3_session; + + if (!mfd || !mfd->mdp.private1) + return -EINVAL; + + mdp3_session = mfd->mdp.private1; + + ret = copy_from_user(&mdp_pp, argp, sizeof(mdp_pp)); + if (ret) + return ret; + + switch (mdp_pp.op) { + case mdp_bl_scale_cfg: + ret = mdp3_validate_scale_config(&mdp_pp.data.bl_scale_data); + if (ret) { + pr_err("%s: invalid scale config\n", __func__); + break; + } + ret = mdp3_bl_scale_config(mfd, (struct mdp_bl_scale_data *) + &mdp_pp.data.bl_scale_data); + break; + case mdp_op_csc_cfg: + /* Checking state of dyn_pu before programming CSC block */ + if (mdp3_session->dyn_pu_state) { + pr_debug("Partial update feature is enabled.\n"); + return -EPERM; + } + ret = mdp3_validate_csc_data(&(mdp_pp.data.csc_cfg_data)); + if (ret) { + pr_err("%s: invalid csc data\n", __func__); + break; + } + ret = mdp3_csc_config(mdp3_session, + &(mdp_pp.data.csc_cfg_data)); + break; + case mdp_op_lut_cfg: + lut = &mdp_pp.data.lut_cfg_data; + if (lut->lut_type != mdp_lut_rgb) { + pr_err("Lut type %d is not supported", lut->lut_type); + return -EINVAL; + } + if (lut->data.rgb_lut_data.flags & MDP_PP_OPS_READ) + ret = mdp3_ctrl_lut_read(mfd, + &(lut->data.rgb_lut_data)); + else + ret = mdp3_ctrl_lut_config(mfd, + &(lut->data.rgb_lut_data)); + if (ret) + pr_err("RGB LUT ioctl failed\n"); + else + ret = copy_to_user(argp, &mdp_pp, sizeof(mdp_pp)); + break; + + default: + pr_err("Unsupported request to MDP_PP IOCTL.\n"); + ret = -EINVAL; + break; + } + if (!ret) + ret = copy_to_user(argp, &mdp_pp, sizeof(struct msmfb_mdp_pp)); + return ret; +} + +static int mdp3_histo_ioctl(struct msm_fb_data_type *mfd, u32 cmd, + void __user *argp) +{ + int ret = -ENOTSUPP; + struct mdp_histogram_data hist; + struct mdp_histogram_start_req hist_req; + u32 block; + struct mdp3_session_data *mdp3_session; + + if (!mfd || !mfd->mdp.private1) + return -EINVAL; + + mdp3_session = mfd->mdp.private1; + + switch (cmd) { + case MSMFB_HISTOGRAM_START: + ret = copy_from_user(&hist_req, argp, sizeof(hist_req)); + if (ret) + return ret; + + ret = mdp3_histogram_start(mdp3_session, &hist_req); + break; + + case MSMFB_HISTOGRAM_STOP: + ret = copy_from_user(&block, argp, sizeof(int)); + if (ret) + return ret; + + ret = mdp3_histogram_stop(mdp3_session, block); + break; + + case MSMFB_HISTOGRAM: + ret = copy_from_user(&hist, argp, sizeof(hist)); + if (ret) + return ret; + + ret = mdp3_histogram_collect(mdp3_session, &hist); + if (!ret) + ret = copy_to_user(argp, &hist, sizeof(hist)); + break; + default: + break; + } + return ret; +} + +static int mdp3_validate_lut_data(struct fb_cmap *cmap) +{ + u32 i = 0; + + if (!cmap || !cmap->red || !cmap->green || !cmap->blue) { + pr_err("Invalid arguments!\n"); + return -EINVAL; + } + + for (i = 0; i < MDP_LUT_SIZE; i++) { + if (cmap->red[i] > 0xFF || cmap->green[i] > 0xFF || + cmap->blue[i] > 0xFF) { + pr_err("LUT value over 255 (limit) at %d index\n", i); + return -EINVAL; + } + } + + return 0; +} + +static inline int mdp3_copy_lut_buffer(struct fb_cmap *dst, struct fb_cmap *src) +{ + if (!dst || !src || !dst->red || !dst->blue || !dst->green || + !src->red || !src->green || !src->blue) { + pr_err("Invalid params\n"); + return -EINVAL; + } + + dst->start = src->start; + dst->len = src->len; + + memcpy(dst->red, src->red, MDP_LUT_SIZE * sizeof(u16)); + memcpy(dst->green, src->green, MDP_LUT_SIZE * sizeof(u16)); + memcpy(dst->blue, src->blue, MDP_LUT_SIZE * sizeof(u16)); + return 0; +} + +static int mdp3_alloc_lut_buffer(struct platform_device *pdev, void **cmap) +{ + struct fb_cmap *map; + + map = devm_kzalloc(&pdev->dev, sizeof(struct fb_cmap), GFP_KERNEL); + if (map == NULL) + return -ENOMEM; + + memset(map, 0, sizeof(struct fb_cmap)); + + map->red = devm_kzalloc(&pdev->dev, MDP_LUT_SIZE * sizeof(u16), + GFP_KERNEL); + if (map->red == NULL) + goto exit_red; + + memset(map->red, 0, sizeof(u16) * MDP_LUT_SIZE); + + map->green = devm_kzalloc(&pdev->dev, MDP_LUT_SIZE * sizeof(u16), + GFP_KERNEL); + if (map->green == NULL) + goto exit_green; + + memset(map->green, 0, sizeof(u16) * MDP_LUT_SIZE); + + map->blue = devm_kzalloc(&pdev->dev, MDP_LUT_SIZE * sizeof(u16), + GFP_KERNEL); + if (map->blue == NULL) + goto exit_blue; + + memset(map->blue, 0, sizeof(u16) * MDP_LUT_SIZE); + + *cmap = map; + return 0; +exit_blue: + devm_kfree(&pdev->dev, map->green); +exit_green: + devm_kfree(&pdev->dev, map->red); +exit_red: + devm_kfree(&pdev->dev, map); + return -ENOMEM; +} + +static void mdp3_free_lut_buffer(struct platform_device *pdev, void **cmap) +{ + struct fb_cmap *map = (struct fb_cmap *)(*cmap); + + if (map == NULL) + return; + + devm_kfree(&pdev->dev, map->blue); + map->blue = NULL; + devm_kfree(&pdev->dev, map->green); + map->green = NULL; + devm_kfree(&pdev->dev, map->red); + map->red = NULL; + devm_kfree(&pdev->dev, map); + map = NULL; +} + +static int mdp3_lut_combine_gain(struct fb_cmap *cmap, struct mdp3_dma *dma) +{ + int i = 0; + u32 r = 0, g = 0, b = 0; + + if (!cmap || !dma || !dma->gc_cmap || !dma->hist_cmap || + !dma->gc_cmap->red || !dma->gc_cmap->green || + !dma->gc_cmap->blue || !dma->hist_cmap->red || + !dma->hist_cmap->green || !dma->hist_cmap->blue) { + pr_err("Invalid params\n"); + return -EINVAL; + } + + for (i = 1; i < MDP_LUT_SIZE; i++) { + r = MIN(dma->gc_cmap->red[i] * dma->hist_cmap->red[i] * + mdp_lut_inverse16[i], 0xFF0000); + g = MIN(dma->gc_cmap->green[i] * dma->hist_cmap->green[i] * + mdp_lut_inverse16[i], 0xFF0000); + b = MIN(dma->gc_cmap->blue[i] * dma->hist_cmap->blue[i] * + mdp_lut_inverse16[i], 0xFF0000); + + cmap->red[i] = (r >> 16) & 0xFF; + cmap->green[i] = (g >> 16) & 0xFF; + cmap->blue[i] = (b >> 16) & 0xFF; + } + return 0; +} + +/* Called from within pp_lock and session lock locked context */ +static int mdp3_ctrl_lut_update(struct msm_fb_data_type *mfd, + struct fb_cmap *cmap) +{ + int rc = 0; + struct mdp3_session_data *mdp3_session = mfd->mdp.private1; + struct mdp3_dma *dma; + struct mdp3_dma_lut_config lut_config; + + dma = mdp3_session->dma; + + if (!dma->config_lut) { + pr_err("Config LUT not defined!\n"); + return -EINVAL; + } + + lut_config.lut_enable = 7; + lut_config.lut_sel = mdp3_session->lut_sel; + lut_config.lut_position = 1; + lut_config.lut_dirty = true; + + if (!mdp3_session->status) { + pr_err("display off!\n"); + return -EPERM; + } + + mdp3_clk_enable(1, 0); + rc = dma->config_lut(dma, &lut_config, cmap); + mdp3_clk_enable(0, 0); + if (rc) + pr_err("%s failed\n", __func__); + + mdp3_session->lut_sel = (mdp3_session->lut_sel + 1) % 2; + return rc; +} + +static int mdp3_ctrl_lut_config(struct msm_fb_data_type *mfd, + struct mdp_rgb_lut_data *cfg) +{ + int rc = 0; + bool data_validated = false; + struct mdp3_session_data *mdp3_session = mfd->mdp.private1; + struct mdp3_dma *dma; + struct fb_cmap *cmap; + + dma = mdp3_session->dma; + + if ((cfg->cmap.start > MDP_LUT_SIZE) || + (cfg->cmap.len > MDP_LUT_SIZE) || + (cfg->cmap.start + cfg->cmap.len > MDP_LUT_SIZE)) { + pr_err("Invalid arguments.\n"); + return -EINVAL; + } + + rc = mdp3_alloc_lut_buffer(mfd->pdev, (void **) &cmap); + if (rc) { + pr_err("No memory\n"); + return -ENOMEM; + } + + mutex_lock(&mdp3_session->lock); + mutex_lock(&dma->pp_lock); + rc = copy_from_user(cmap->red + cfg->cmap.start, + cfg->cmap.red, sizeof(u16) * cfg->cmap.len); + rc |= copy_from_user(cmap->green + cfg->cmap.start, + cfg->cmap.green, sizeof(u16) * cfg->cmap.len); + rc |= copy_from_user(cmap->blue + cfg->cmap.start, + cfg->cmap.blue, sizeof(u16) * cfg->cmap.len); + if (rc) { + pr_err("Copying user data failed!\n"); + goto exit_err; + } + + switch (cfg->lut_type) { + case mdp_rgb_lut_gc: + if (cfg->flags & MDP_PP_OPS_DISABLE) { + if (dma->lut_sts & MDP3_LUT_GC_EN) + /* Free GC cmap cache since disabled */ + mdp3_free_lut_buffer(mfd->pdev, + (void **)&dma->gc_cmap); + dma->lut_sts &= ~MDP3_LUT_GC_EN; + } else if (!(dma->lut_sts & MDP3_LUT_GC_EN)) { + /* Check if values sent are valid */ + rc = mdp3_validate_lut_data(cmap); + if (rc) { + pr_err("Invalid GC LUT data\n"); + goto exit_err; + } + data_validated = true; + + /* Allocate GC cmap cache to store values */ + rc = mdp3_alloc_lut_buffer(mfd->pdev, + (void **)&dma->gc_cmap); + if (rc) { + pr_err("GC LUT config failed\n"); + goto exit_err; + } + dma->lut_sts |= MDP3_LUT_GC_EN; + } + /* + * Copy the GC values from userspace to maintain the + * correct values user intended to program in cache. + * The values programmed in HW might factor in presence + * of other LUT modifying features hence can be + * different from these user given values. + */ + if (dma->lut_sts & MDP3_LUT_GC_EN) { + /* Validate LUT data if not yet validated */ + if (!data_validated) { + rc = mdp3_validate_lut_data(cmap); + if (rc) { + pr_err("Invalid GC LUT data\n"); + goto exit_err; + } + } + rc = mdp3_copy_lut_buffer(dma->gc_cmap, cmap); + if (rc) { + pr_err("Could not store GC to cache\n"); + goto exit_err; + } + } + break; + case mdp_rgb_lut_hist: + if (cfg->flags & MDP_PP_OPS_DISABLE) { + if (dma->lut_sts & MDP3_LUT_HIST_EN) + /* Free HIST cmap cache since disabled */ + mdp3_free_lut_buffer(mfd->pdev, + (void **)&dma->hist_cmap); + dma->lut_sts &= ~MDP3_LUT_HIST_EN; + } else if (!(dma->lut_sts & MDP3_LUT_HIST_EN)) { + /* Check if values sent are valid */ + rc = mdp3_validate_lut_data(cmap); + if (rc) { + pr_err("Invalid HIST LUT data\n"); + goto exit_err; + } + data_validated = true; + + /* Allocate HIST cmap cache to store values */ + rc = mdp3_alloc_lut_buffer(mfd->pdev, + (void **)&dma->hist_cmap); + if (rc) { + pr_err("HIST LUT config failed\n"); + goto exit_err; + } + dma->lut_sts |= MDP3_LUT_HIST_EN; + } + /* + * Copy the HIST LUT values from userspace to maintain + * correct values user intended to program in cache. + * The values programmed in HW might factor in presence + * of other LUT modifying features hence can be + * different from these user given values. + */ + if (dma->lut_sts & MDP3_LUT_HIST_EN) { + /* Validate LUT data if not yet validated */ + if (!data_validated) { + rc = mdp3_validate_lut_data(cmap); + if (rc) { + pr_err("Invalid H LUT data\n"); + goto exit_err; + } + } + rc = mdp3_copy_lut_buffer(dma->hist_cmap, cmap); + if (rc) { + pr_err("Could not cache Hist LUT\n"); + goto exit_err; + } + } + break; + default: + pr_err("Invalid lut type: %u\n", cfg->lut_type); + rc = -EINVAL; + goto exit_err; + } + + /* + * In case both GC LUT and HIST LUT need to be programmed the gains + * of each the individual LUTs need to be applied onto a single LUT + * and applied in HW + */ + if ((dma->lut_sts & MDP3_LUT_HIST_EN) && + (dma->lut_sts & MDP3_LUT_GC_EN)) { + rc = mdp3_lut_combine_gain(cmap, dma); + if (rc) { + pr_err("Combining gains failed rc = %d\n", rc); + goto exit_err; + } + } + + rc = mdp3_ctrl_lut_update(mfd, cmap); + if (rc) + pr_err("Updating LUT failed! rc = %d\n", rc); +exit_err: + mutex_unlock(&dma->pp_lock); + mutex_unlock(&mdp3_session->lock); + mdp3_free_lut_buffer(mfd->pdev, (void **) &cmap); + return rc; +} + +static int mdp3_ctrl_lut_read(struct msm_fb_data_type *mfd, + struct mdp_rgb_lut_data *cfg) +{ + int rc = 0; + struct fb_cmap *cmap; + struct mdp3_session_data *mdp3_session = mfd->mdp.private1; + struct mdp3_dma *dma = mdp3_session->dma; + + switch (cfg->lut_type) { + case mdp_rgb_lut_gc: + if (!dma->gc_cmap) { + pr_err("GC not programmed\n"); + return -EPERM; + } + cmap = dma->gc_cmap; + break; + case mdp_rgb_lut_hist: + if (!dma->hist_cmap) { + pr_err("Hist LUT not programmed\n"); + return -EPERM; + } + cmap = dma->hist_cmap; + break; + default: + pr_err("Invalid lut type %u\n", cfg->lut_type); + return -EINVAL; + } + + cfg->cmap.start = cmap->start; + cfg->cmap.len = cmap->len; + + mutex_lock(&dma->pp_lock); + rc = copy_to_user(cfg->cmap.red, cmap->red, sizeof(u16) * + MDP_LUT_SIZE); + rc |= copy_to_user(cfg->cmap.green, cmap->green, sizeof(u16) * + MDP_LUT_SIZE); + rc |= copy_to_user(cfg->cmap.blue, cmap->blue, sizeof(u16) * + MDP_LUT_SIZE); + mutex_unlock(&dma->pp_lock); + return rc; +} + +/* Invoked from ctrl_on with session lock locked context */ +static void mdp3_ctrl_pp_resume(struct msm_fb_data_type *mfd) +{ + struct mdp3_session_data *mdp3_session; + struct mdp3_dma *dma; + struct fb_cmap *cmap; + int rc = 0; + + mdp3_session = mfd->mdp.private1; + dma = mdp3_session->dma; + + mutex_lock(&dma->pp_lock); + /* + * if dma->ccs_config.ccs_enable is set then DMA PP block was enabled + * via user space IOCTL. + * Then set dma->ccs_config.ccs_dirty flag + * Then PP block will be reconfigured when next kickoff comes. + */ + if (dma->ccs_config.ccs_enable) + dma->ccs_config.ccs_dirty = true; + + /* + * If gamma correction was enabled then we program the LUT registers + * with the last configuration data before suspend. If gamma correction + * is not enabled then we do not program anything. The LUT from + * histogram processing algorithms will program hardware based on new + * frame data if they are enabled. + */ + if (dma->lut_sts & MDP3_LUT_GC_EN) { + + rc = mdp3_alloc_lut_buffer(mfd->pdev, (void **)&cmap); + if (rc) { + pr_err("No memory for GC LUT, rc = %d\n", rc); + goto exit_err; + } + + if (dma->lut_sts & MDP3_LUT_HIST_EN) { + rc = mdp3_lut_combine_gain(cmap, dma); + if (rc) { + pr_err("Combining the gain failed rc=%d\n", rc); + goto exit_err; + } + } else { + rc = mdp3_copy_lut_buffer(cmap, dma->gc_cmap); + if (rc) { + pr_err("Updating GC failed rc = %d\n", rc); + goto exit_err; + } + } + + rc = mdp3_ctrl_lut_update(mfd, cmap); + if (rc) + pr_err("GC Lut update failed rc=%d\n", rc); +exit_err: + mdp3_free_lut_buffer(mfd->pdev, (void **)&cmap); + } + + mutex_unlock(&dma->pp_lock); +} + +static int mdp3_overlay_prepare(struct msm_fb_data_type *mfd, + struct mdp_overlay_list __user *user_ovlist) +{ + struct mdp_overlay_list ovlist; + struct mdp3_session_data *mdp3_session = mfd->mdp.private1; + struct mdp_overlay *req_list; + struct mdp_overlay *req; + int rc; + + if (!mdp3_session) + return -ENODEV; + + req = &mdp3_session->req_overlay; + + if (copy_from_user(&ovlist, user_ovlist, sizeof(ovlist))) + return -EFAULT; + + if (ovlist.num_overlays != 1) { + pr_err("OV_PREPARE failed: only 1 overlay allowed\n"); + return -EINVAL; + } + + if (copy_from_user(&req_list, ovlist.overlay_list, + sizeof(struct mdp_overlay *))) + return -EFAULT; + + if (copy_from_user(req, req_list, sizeof(*req))) + return -EFAULT; + + rc = mdp3_overlay_set(mfd, req); + if (!IS_ERR_VALUE(rc)) { + if (copy_to_user(req_list, req, sizeof(*req))) + return -EFAULT; + } + + if (put_user(IS_ERR_VALUE(rc) ? 0 : 1, + &user_ovlist->processed_overlays)) + return -EFAULT; + + return rc; +} + +static int mdp3_ctrl_ioctl_handler(struct msm_fb_data_type *mfd, + u32 cmd, void __user *argp) +{ + int rc = -EINVAL; + struct mdp3_session_data *mdp3_session; + struct msmfb_metadata metadata; + struct mdp_overlay *req = NULL; + struct msmfb_overlay_data ov_data; + int val; + + mdp3_session = (struct mdp3_session_data *)mfd->mdp.private1; + if (!mdp3_session) + return -ENODEV; + + req = &mdp3_session->req_overlay; + + if (!mdp3_session->status && cmd != MSMFB_METADATA_GET && + cmd != MSMFB_HISTOGRAM_STOP && cmd != MSMFB_HISTOGRAM) { + pr_err("%s, display off!\n", __func__); + return -EPERM; + } + + switch (cmd) { + case MSMFB_MDP_PP: + rc = mdp3_pp_ioctl(mfd, argp); + break; + case MSMFB_HISTOGRAM_START: + case MSMFB_HISTOGRAM_STOP: + case MSMFB_HISTOGRAM: + rc = mdp3_histo_ioctl(mfd, cmd, argp); + break; + + case MSMFB_VSYNC_CTRL: + case MSMFB_OVERLAY_VSYNC_CTRL: + if (!copy_from_user(&val, argp, sizeof(val))) { + mutex_lock(&mdp3_session->lock); + mdp3_session->vsync_enabled = val; + rc = mdp3_ctrl_vsync_enable(mfd, val); + mutex_unlock(&mdp3_session->lock); + } else { + pr_err("MSMFB_OVERLAY_VSYNC_CTRL failed\n"); + rc = -EFAULT; + } + break; + case MSMFB_ASYNC_BLIT: + mutex_lock(&mdp3_res->fs_idle_pc_lock); + if (mdp3_session->in_splash_screen || mdp3_res->idle_pc) { + pr_debug("%s: reset- in_splash = %d, idle_pc = %d", + __func__, mdp3_session->in_splash_screen, + mdp3_res->idle_pc); + mdp3_ctrl_reset(mfd); + } + mutex_unlock(&mdp3_res->fs_idle_pc_lock); + rc = mdp3_ctrl_async_blit_req(mfd, argp); + break; + case MSMFB_BLIT: + mutex_lock(&mdp3_res->fs_idle_pc_lock); + if (mdp3_session->in_splash_screen) + mdp3_ctrl_reset(mfd); + mutex_unlock(&mdp3_res->fs_idle_pc_lock); + rc = mdp3_ctrl_blit_req(mfd, argp); + break; + case MSMFB_METADATA_GET: + rc = copy_from_user(&metadata, argp, sizeof(metadata)); + if (!rc) + rc = mdp3_get_metadata(mfd, &metadata); + if (!rc) + rc = copy_to_user(argp, &metadata, sizeof(metadata)); + if (rc) + pr_err("mdp3_get_metadata failed (%d)\n", rc); + break; + case MSMFB_METADATA_SET: + rc = copy_from_user(&metadata, argp, sizeof(metadata)); + if (!rc) + rc = mdp3_set_metadata(mfd, &metadata); + if (rc) + pr_err("mdp3_set_metadata failed (%d)\n", rc); + break; + case MSMFB_OVERLAY_GET: + rc = copy_from_user(req, argp, sizeof(*req)); + if (!rc) { + rc = mdp3_overlay_get(mfd, req); + + if (!IS_ERR_VALUE(rc)) + rc = copy_to_user(argp, req, sizeof(*req)); + } + if (rc) + pr_err("OVERLAY_GET failed (%d)\n", rc); + break; + case MSMFB_OVERLAY_SET: + rc = copy_from_user(req, argp, sizeof(*req)); + if (!rc) { + rc = mdp3_overlay_set(mfd, req); + + if (!IS_ERR_VALUE(rc)) + rc = copy_to_user(argp, req, sizeof(*req)); + } + if (rc) + pr_err("OVERLAY_SET failed (%d)\n", rc); + break; + case MSMFB_OVERLAY_UNSET: + if (!IS_ERR_VALUE(copy_from_user(&val, argp, sizeof(val)))) + rc = mdp3_overlay_unset(mfd, val); + break; + case MSMFB_OVERLAY_PLAY: + rc = copy_from_user(&ov_data, argp, sizeof(ov_data)); + mutex_lock(&mdp3_res->fs_idle_pc_lock); + if (mdp3_session->in_splash_screen) + mdp3_ctrl_reset(mfd); + mutex_unlock(&mdp3_res->fs_idle_pc_lock); + if (!rc) + rc = mdp3_overlay_play(mfd, &ov_data); + if (rc) + pr_err("OVERLAY_PLAY failed (%d)\n", rc); + break; + case MSMFB_OVERLAY_PREPARE: + rc = mdp3_overlay_prepare(mfd, argp); + break; + default: + break; + } + return rc; +} + +int mdp3_wait_for_dma_done(struct mdp3_session_data *session) +{ + int rc = 0; + + if (session->dma_active) { + rc = wait_for_completion_timeout(&session->dma_completion, + KOFF_TIMEOUT); + if (rc > 0) { + session->dma_active = 0; + rc = 0; + } else if (rc == 0) { + rc = -ETIME; + } + } + return rc; +} + +static int mdp3_update_panel_info(struct msm_fb_data_type *mfd, int mode, + int dest_ctrl) +{ + int ret = 0; + struct mdp3_session_data *mdp3_session; + struct mdss_panel_data *panel; + u32 intf_type = 0; + + if (!mfd || !mfd->mdp.private1) + return -EINVAL; + + mdp3_session = mfd->mdp.private1; + panel = mdp3_session->panel; + + if (!panel->event_handler) + return 0; + ret = panel->event_handler(panel, MDSS_EVENT_DSI_UPDATE_PANEL_DATA, + (void *)(unsigned long)mode); + if (ret) + pr_err("Dynamic switch to %s mode failed!\n", + mode ? "command" : "video"); + if (mode == 1) + mfd->panel.type = MIPI_CMD_PANEL; + else + mfd->panel.type = MIPI_VIDEO_PANEL; + + if (mfd->panel.type != MIPI_VIDEO_PANEL) + mdp3_session->wait_for_dma_done = mdp3_wait_for_dma_done; + + intf_type = mdp3_ctrl_get_intf_type(mfd); + mdp3_session->intf->cfg.type = intf_type; + mdp3_session->intf->available = 1; + mdp3_session->intf->in_use = 1; + mdp3_res->intf[intf_type].in_use = 1; + + mdp3_intf_init(mdp3_session->intf); + + mdp3_session->dma->output_config.out_sel = intf_type; + mdp3_session->status = mdp3_session->intf->active; + + return 0; +} + +static int mdp3_vsync_retire_setup(struct msm_fb_data_type *mfd) +{ + struct mdp3_session_data *mdp3_session; + struct mdp3_notification retire_client; + char name[24]; + + mdp3_session = (struct mdp3_session_data *)mfd->mdp.private1; + + snprintf(name, sizeof(name), "mdss_fb%d_retire", mfd->index); + mfd->mdp_sync_pt_data.timeline_retire = mdss_create_timeline(name); + if (mfd->mdp_sync_pt_data.timeline_retire == NULL) { + pr_err("cannot vsync create time line"); + return -ENOMEM; + } + + /* Add retire vsync handler */ + retire_client.handler = mdp3_vsync_retire_handle_vsync; + retire_client.arg = mdp3_session; + + if (mdp3_session->dma) + mdp3_session->dma->retire_client = retire_client; + + INIT_WORK(&mdp3_session->retire_work, mdp3_vsync_retire_work_handler); + + return 0; +} + +int mdp3_ctrl_init(struct msm_fb_data_type *mfd) +{ + struct device *dev = mfd->fbi->dev; + struct msm_mdp_interface *mdp3_interface = &mfd->mdp; + struct mdp3_session_data *mdp3_session = NULL; + u32 intf_type = MDP3_DMA_OUTPUT_SEL_DSI_VIDEO; + int rc; + int splash_mismatch = 0; + struct sched_param sched = { .sched_priority = 16 }; + + pr_info("%s\n", __func__); + rc = mdp3_parse_dt_splash(mfd); + if (rc) + splash_mismatch = 1; + + mdp3_interface->on_fnc = mdp3_ctrl_on; + mdp3_interface->off_fnc = mdp3_ctrl_off; + mdp3_interface->do_histogram = NULL; + mdp3_interface->cursor_update = NULL; + mdp3_interface->dma_fnc = mdp3_ctrl_pan_display; + mdp3_interface->ioctl_handler = mdp3_ctrl_ioctl_handler; + mdp3_interface->kickoff_fnc = mdp3_ctrl_display_commit_kickoff; + mdp3_interface->pre_commit = mdp3_layer_pre_commit; + mdp3_interface->atomic_validate = mdp3_layer_atomic_validate; + mdp3_interface->lut_update = NULL; + mdp3_interface->configure_panel = mdp3_update_panel_info; + mdp3_interface->input_event_handler = NULL; + mdp3_interface->signal_retire_fence = NULL; + + mdp3_session = kzalloc(sizeof(struct mdp3_session_data), GFP_KERNEL); + if (!mdp3_session) + return -ENOMEM; + + mutex_init(&mdp3_session->lock); + INIT_WORK(&mdp3_session->clk_off_work, mdp3_dispatch_clk_off); + + kthread_init_worker(&mdp3_session->worker); + kthread_init_work(&mdp3_session->dma_done_work, mdp3_dispatch_dma_done); + + + mdp3_session->thread = kthread_run(kthread_worker_fn, + &mdp3_session->worker, + "mdp3_dispatch_dma_done"); + + if (IS_ERR(mdp3_session->thread)) { + pr_err("Can't initialize mdp3_dispatch_dma_done thread\n"); + rc = -ENODEV; + goto init_done; + } + + sched_setscheduler(mdp3_session->thread, SCHED_FIFO, &sched); + + atomic_set(&mdp3_session->vsync_countdown, 0); + mutex_init(&mdp3_session->histo_lock); + mdp3_session->dma = mdp3_get_dma_pipe(MDP3_DMA_CAP_ALL); + if (!mdp3_session->dma) { + rc = -ENODEV; + goto init_done; + } + + rc = mdp3_dma_init(mdp3_session->dma); + if (rc) { + pr_err("fail to init dma\n"); + goto init_done; + } + + intf_type = mdp3_ctrl_get_intf_type(mfd); + mdp3_session->intf = mdp3_get_display_intf(intf_type); + if (!mdp3_session->intf) { + rc = -ENODEV; + goto init_done; + } + rc = mdp3_intf_init(mdp3_session->intf); + if (rc) { + pr_err("fail to init interface\n"); + goto init_done; + } + + mdp3_session->dma->output_config.out_sel = intf_type; + mdp3_session->mfd = mfd; + mdp3_session->panel = dev_get_platdata(&mfd->pdev->dev); + mdp3_session->status = mdp3_session->intf->active; + mdp3_session->overlay.id = MSMFB_NEW_REQUEST; + mdp3_bufq_init(&mdp3_session->bufq_in); + mdp3_bufq_init(&mdp3_session->bufq_out); + mdp3_session->histo_status = 0; + mdp3_session->lut_sel = 0; + BLOCKING_INIT_NOTIFIER_HEAD(&mdp3_session->notifier_head); + + init_timer(&mdp3_session->vsync_timer); + mdp3_session->vsync_timer.function = mdp3_vsync_timer_func; + mdp3_session->vsync_timer.data = (u32)mdp3_session; + mdp3_session->vsync_period = 1000 / mfd->panel_info->mipi.frame_rate; + mfd->mdp.private1 = mdp3_session; + init_completion(&mdp3_session->dma_completion); + if (intf_type != MDP3_DMA_OUTPUT_SEL_DSI_VIDEO) + mdp3_session->wait_for_dma_done = mdp3_wait_for_dma_done; + + rc = sysfs_create_group(&dev->kobj, &vsync_fs_attr_group); + if (rc) { + pr_err("vsync sysfs group creation failed, ret=%d\n", rc); + goto init_done; + } + rc = sysfs_create_group(&dev->kobj, &generic_attr_group); + if (rc) { + pr_err("generic sysfs group creation failed, ret=%d\n", rc); + goto init_done; + } + + mdp3_session->vsync_event_sd = sysfs_get_dirent(dev->kobj.sd, + "vsync_event"); + if (!mdp3_session->vsync_event_sd) { + pr_err("vsync_event sysfs lookup failed\n"); + rc = -ENODEV; + goto init_done; + } + + mdp3_session->dma->hist_event_sd = sysfs_get_dirent(dev->kobj.sd, + "hist_event"); + if (!mdp3_session->dma->hist_event_sd) { + pr_err("hist_event sysfs lookup failed\n"); + rc = -ENODEV; + goto init_done; + } + + mdp3_session->bl_event_sd = sysfs_get_dirent(dev->kobj.sd, + "bl_event"); + if (!mdp3_session->bl_event_sd) { + pr_err("bl_event sysfs lookup failed\n"); + rc = -ENODEV; + goto init_done; + } + + rc = mdp3_create_sysfs_link(dev); + if (rc) + pr_warn("problem creating link to mdp sysfs\n"); + + /* Enable PM runtime */ + pm_runtime_set_suspended(&mdp3_res->pdev->dev); + pm_runtime_enable(&mdp3_res->pdev->dev); + + kobject_uevent(&dev->kobj, KOBJ_ADD); + pr_debug("vsync kobject_uevent(KOBJ_ADD)\n"); + + if (mdp3_get_cont_spash_en()) { + mdp3_session->clk_on = 1; + mdp3_session->in_splash_screen = 1; + mdp3_ctrl_notifier_register(mdp3_session, + &mdp3_session->mfd->mdp_sync_pt_data.notifier); + } + + /* + * Increment the overlay active count. + * This is needed to ensure that if idle power collapse kicks in + * right away, it would be handled correctly. + */ + atomic_inc(&mdp3_res->active_intf_cnt); + if (splash_mismatch) { + pr_err("splash memory mismatch, stop splash\n"); + mdp3_ctrl_off(mfd); + } + + mdp3_session->vsync_before_commit = true; + mdp3_session->dyn_pu_state = mfd->panel_info->partial_update_enabled; + + if (mfd->panel_info->mipi.dms_mode || + mfd->panel_info->type == MIPI_CMD_PANEL) { + rc = mdp3_vsync_retire_setup(mfd); + if (IS_ERR_VALUE(rc)) { + pr_err("unable to create vsync timeline\n"); + goto init_done; + } + } +init_done: + if (IS_ERR_VALUE(rc)) + kfree(mdp3_session); + + return rc; +} diff --git a/drivers/video/fbdev/msm/mdp3_ctrl.h b/drivers/video/fbdev/msm/mdp3_ctrl.h new file mode 100644 index 0000000000000000000000000000000000000000..b7b667b69a9ae13740eef83c7289c21780696ca2 --- /dev/null +++ b/drivers/video/fbdev/msm/mdp3_ctrl.h @@ -0,0 +1,95 @@ +/* Copyright (c) 2013-2014, 2016-2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef MDP3_CTRL_H +#define MDP3_CTRL_H + +#include +#include +#include +#include +#include + +#include "mdp3.h" +#include "mdp3_dma.h" +#include "mdss_fb.h" +#include "mdss_panel.h" + +#define MDP3_MAX_BUF_QUEUE 8 +#define MDP3_LUT_HIST_EN 0x001 +#define MDP3_LUT_GC_EN 0x002 + +struct mdp3_buffer_queue { + struct mdp3_img_data img_data[MDP3_MAX_BUF_QUEUE]; + int count; + int push_idx; + int pop_idx; +}; + +struct mdp3_session_data { + struct mutex lock; + int status; + struct mdp3_dma *dma; + struct mdss_panel_data *panel; + struct mdp3_intf *intf; + struct msm_fb_data_type *mfd; + ktime_t vsync_time; + struct timer_list vsync_timer; + int vsync_period; + struct kernfs_node *vsync_event_sd; + struct kernfs_node *bl_event_sd; + struct mdp_overlay overlay; + struct mdp_overlay req_overlay; + struct mdp3_buffer_queue bufq_in; + struct mdp3_buffer_queue bufq_out; + struct work_struct clk_off_work; + + struct kthread_work dma_done_work; + struct kthread_worker worker; + struct task_struct *thread; + + atomic_t dma_done_cnt; + int histo_status; + struct mutex histo_lock; + int lut_sel; + bool vsync_before_commit; + bool first_commit; + int clk_on; + struct blocking_notifier_head notifier_head; + + int vsync_enabled; + atomic_t vsync_countdown; /* Used to count down */ + bool in_splash_screen; + bool esd_recovery; + int dyn_pu_state; /* dynamic partial update status */ + u32 bl_events; + + bool dma_active; + struct completion dma_completion; + int (*wait_for_dma_done)(struct mdp3_session_data *session); + + /* For retire fence */ + struct mdss_timeline *vsync_timeline; + int retire_cnt; + struct work_struct retire_work; +}; + +void mdp3_bufq_deinit(struct mdp3_buffer_queue *bufq); +int mdp3_ctrl_init(struct msm_fb_data_type *mfd); +int mdp3_bufq_push(struct mdp3_buffer_queue *bufq, + struct mdp3_img_data *data); +int mdp3_ctrl_get_source_format(u32 imgType); +int mdp3_ctrl_get_pack_pattern(u32 imgType); +int mdp3_ctrl_reset(struct msm_fb_data_type *mfd); + +#endif /* MDP3_CTRL_H */ diff --git a/drivers/video/fbdev/msm/mdp3_dma.c b/drivers/video/fbdev/msm/mdp3_dma.c new file mode 100644 index 0000000000000000000000000000000000000000..8c36ad65694ba2d6b1ea347d914d6a57ad7c3b08 --- /dev/null +++ b/drivers/video/fbdev/msm/mdp3_dma.c @@ -0,0 +1,1290 @@ +/* Copyright (c) 2013-2014, 2016-2018, The Linux Foundation. All rights reserved. + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ +#include +#include + +#include "mdp3.h" +#include "mdp3_dma.h" +#include "mdp3_hwio.h" +#include "mdss_debug.h" + +#define DMA_STOP_POLL_SLEEP_US 1000 +#define DMA_STOP_POLL_TIMEOUT_US 200000 +#define DMA_HISTO_RESET_TIMEOUT_MS 40 +#define DMA_LUT_CONFIG_MASK 0xfffffbe8 +#define DMA_CCS_CONFIG_MASK 0xfffffc17 +#define HIST_WAIT_TIMEOUT(frame) ((75 * HZ * (frame)) / 1000) + +#define VSYNC_SELECT 0x024 +#define VSYNC_TOTAL_LINES_SHIFT 21 +#define VSYNC_COUNT_MASK 0x7ffff +#define VSYNC_THRESH_CONT_SHIFT 16 + +static void mdp3_vsync_intr_handler(int type, void *arg) +{ + struct mdp3_dma *dma = (struct mdp3_dma *)arg; + struct mdp3_notification vsync_client; + struct mdp3_notification retire_client; + unsigned int wait_for_next_vs; + + pr_debug("%s\n", __func__); + spin_lock(&dma->dma_lock); + vsync_client = dma->vsync_client; + retire_client = dma->retire_client; + wait_for_next_vs = !dma->vsync_status; + dma->vsync_status = 0; + if (wait_for_next_vs) + complete(&dma->vsync_comp); + spin_unlock(&dma->dma_lock); + if (vsync_client.handler) { + vsync_client.handler(vsync_client.arg); + } else { + if (wait_for_next_vs) + mdp3_irq_disable_nosync(type); + } + + if (retire_client.handler) + retire_client.handler(retire_client.arg); +} + +static void mdp3_dma_done_intr_handler(int type, void *arg) +{ + struct mdp3_dma *dma = (struct mdp3_dma *)arg; + struct mdp3_notification dma_client; + + pr_debug("%s\n", __func__); + spin_lock(&dma->dma_lock); + dma_client = dma->dma_notifier_client; + complete(&dma->dma_comp); + spin_unlock(&dma->dma_lock); + mdp3_irq_disable_nosync(type); + if (dma_client.handler) + dma_client.handler(dma_client.arg); +} + +static void mdp3_hist_done_intr_handler(int type, void *arg) +{ + struct mdp3_dma *dma = (struct mdp3_dma *)arg; + u32 isr, mask; + + isr = MDP3_REG_READ(MDP3_REG_DMA_P_HIST_INTR_STATUS); + mask = MDP3_REG_READ(MDP3_REG_DMA_P_HIST_INTR_ENABLE); + MDP3_REG_WRITE(MDP3_REG_DMA_P_HIST_INTR_CLEAR, isr); + + isr &= mask; + if (isr == 0) + return; + + if (isr & MDP3_DMA_P_HIST_INTR_HIST_DONE_BIT) { + spin_lock(&dma->histo_lock); + dma->histo_state = MDP3_DMA_HISTO_STATE_READY; + complete(&dma->histo_comp); + spin_unlock(&dma->histo_lock); + mdp3_hist_intr_notify(dma); + } + if (isr & MDP3_DMA_P_HIST_INTR_RESET_DONE_BIT) { + spin_lock(&dma->histo_lock); + dma->histo_state = MDP3_DMA_HISTO_STATE_IDLE; + complete(&dma->histo_comp); + spin_unlock(&dma->histo_lock); + } +} + +void mdp3_dma_callback_enable(struct mdp3_dma *dma, int type) +{ + int irq_bit; + + pr_debug("%s type=%d\n", __func__, type); + + if (dma->dma_sel == MDP3_DMA_P) { + if (type & MDP3_DMA_CALLBACK_TYPE_HIST_RESET_DONE) + mdp3_irq_enable(MDP3_INTR_DMA_P_HISTO); + + if (type & MDP3_DMA_CALLBACK_TYPE_HIST_DONE) + mdp3_irq_enable(MDP3_INTR_DMA_P_HISTO); + } + + if (dma->output_config.out_sel == MDP3_DMA_OUTPUT_SEL_DSI_VIDEO || + dma->output_config.out_sel == MDP3_DMA_OUTPUT_SEL_LCDC) { + if (type & MDP3_DMA_CALLBACK_TYPE_VSYNC) + mdp3_irq_enable(MDP3_INTR_LCDC_START_OF_FRAME); + } else if (dma->output_config.out_sel == MDP3_DMA_OUTPUT_SEL_DSI_CMD) { + if (type & MDP3_DMA_CALLBACK_TYPE_VSYNC) { + irq_bit = MDP3_INTR_SYNC_PRIMARY_LINE; + irq_bit += dma->dma_sel; + mdp3_irq_enable(irq_bit); + } + + if (type & MDP3_DMA_CALLBACK_TYPE_DMA_DONE) { + irq_bit = MDP3_INTR_DMA_P_DONE; + if (dma->dma_sel == MDP3_DMA_S) + irq_bit = MDP3_INTR_DMA_S_DONE; + mdp3_irq_enable(irq_bit); + } + } else { + pr_err("%s not supported interface\n", __func__); + } +} + +void mdp3_dma_callback_disable(struct mdp3_dma *dma, int type) +{ + int irq_bit; + + pr_debug("%s type=%d\n", __func__, type); + + if (dma->dma_sel == MDP3_DMA_P) { + if (type & MDP3_DMA_CALLBACK_TYPE_HIST_RESET_DONE) + mdp3_irq_disable(MDP3_INTR_DMA_P_HISTO); + + if (type & MDP3_DMA_CALLBACK_TYPE_HIST_DONE) + mdp3_irq_disable(MDP3_INTR_DMA_P_HISTO); + } + + if (dma->output_config.out_sel == MDP3_DMA_OUTPUT_SEL_DSI_VIDEO || + dma->output_config.out_sel == MDP3_DMA_OUTPUT_SEL_LCDC) { + if (type & MDP3_DMA_CALLBACK_TYPE_VSYNC) + mdp3_irq_disable(MDP3_INTR_LCDC_START_OF_FRAME); + } else if (dma->output_config.out_sel == MDP3_DMA_OUTPUT_SEL_DSI_CMD) { + if (type & MDP3_DMA_CALLBACK_TYPE_VSYNC) { + irq_bit = MDP3_INTR_SYNC_PRIMARY_LINE; + irq_bit += dma->dma_sel; + mdp3_irq_disable(irq_bit); + /* + * Clear read pointer interrupt before disabling clocks. + * Else pending ISR handling will result in NOC error + * since the clock will be disable after this point. + */ + mdp3_clear_irq(irq_bit); + } + + if (type & MDP3_DMA_CALLBACK_TYPE_DMA_DONE) { + irq_bit = MDP3_INTR_DMA_P_DONE; + if (dma->dma_sel == MDP3_DMA_S) + irq_bit = MDP3_INTR_DMA_S_DONE; + mdp3_irq_disable(irq_bit); + } + } +} + +static int mdp3_dma_callback_setup(struct mdp3_dma *dma) +{ + int rc = 0; + struct mdp3_intr_cb vsync_cb = { + .cb = mdp3_vsync_intr_handler, + .data = dma, + }; + + struct mdp3_intr_cb dma_cb = { + .cb = mdp3_dma_done_intr_handler, + .data = dma, + }; + + + struct mdp3_intr_cb hist_cb = { + .cb = mdp3_hist_done_intr_handler, + .data = dma, + }; + + if (dma->dma_sel == MDP3_DMA_P) + rc = mdp3_set_intr_callback(MDP3_INTR_DMA_P_HISTO, &hist_cb); + + if (dma->output_config.out_sel == MDP3_DMA_OUTPUT_SEL_DSI_VIDEO || + dma->output_config.out_sel == MDP3_DMA_OUTPUT_SEL_LCDC) + rc |= mdp3_set_intr_callback(MDP3_INTR_LCDC_START_OF_FRAME, + &vsync_cb); + else if (dma->output_config.out_sel == MDP3_DMA_OUTPUT_SEL_DSI_CMD) { + int irq_bit = MDP3_INTR_SYNC_PRIMARY_LINE; + + irq_bit += dma->dma_sel; + rc |= mdp3_set_intr_callback(irq_bit, &vsync_cb); + irq_bit = MDP3_INTR_DMA_P_DONE; + if (dma->dma_sel == MDP3_DMA_S) + irq_bit = MDP3_INTR_DMA_S_DONE; + rc |= mdp3_set_intr_callback(irq_bit, &dma_cb); + } else { + pr_err("%s not supported interface\n", __func__); + rc = -ENODEV; + } + + return rc; +} + +static void mdp3_dma_vsync_enable(struct mdp3_dma *dma, + struct mdp3_notification *vsync_client) +{ + unsigned long flag; + int updated = 0; + int cb_type = MDP3_DMA_CALLBACK_TYPE_VSYNC; + + pr_debug("%s\n", __func__); + + spin_lock_irqsave(&dma->dma_lock, flag); + if (vsync_client) { + if (dma->vsync_client.handler != vsync_client->handler) { + dma->vsync_client = *vsync_client; + updated = 1; + } + } else { + if (dma->vsync_client.handler) { + dma->vsync_client.handler = NULL; + dma->vsync_client.arg = NULL; + updated = 1; + } + } + spin_unlock_irqrestore(&dma->dma_lock, flag); + + if (updated) { + if (vsync_client && vsync_client->handler) + mdp3_dma_callback_enable(dma, cb_type); + else + mdp3_dma_callback_disable(dma, cb_type); + } +} + +static void mdp3_dma_done_notifier(struct mdp3_dma *dma, + struct mdp3_notification *dma_client) +{ + unsigned long flag; + + spin_lock_irqsave(&dma->dma_lock, flag); + if (dma_client) { + dma->dma_notifier_client = *dma_client; + } else { + dma->dma_notifier_client.handler = NULL; + dma->dma_notifier_client.arg = NULL; + } + spin_unlock_irqrestore(&dma->dma_lock, flag); +} + +int mdp3_dma_sync_config(struct mdp3_dma *dma, + struct mdp3_dma_source *source_config, struct mdp3_tear_check *te) +{ + u32 vsync_clk_speed_hz, vclks_line, cfg; + int porch = source_config->vporch; + int height = source_config->height; + int total_lines = height + porch; + int dma_sel = dma->dma_sel; + + vsync_clk_speed_hz = MDP_VSYNC_CLK_RATE; + + cfg = total_lines << VSYNC_TOTAL_LINES_SHIFT; + total_lines *= te->frame_rate; + + vclks_line = (total_lines) ? vsync_clk_speed_hz / total_lines : 0; + + cfg |= BIT(19); + if (te->hw_vsync_mode) + cfg |= BIT(20); + + if (te->refx100) { + vclks_line = vclks_line * te->frame_rate * + 100 / te->refx100; + } else { + pr_warn("refx100 cannot be zero! Use 6000 as default\n"); + vclks_line = vclks_line * te->frame_rate * + 100 / 6000; + } + + cfg |= (vclks_line & VSYNC_COUNT_MASK); + + MDP3_REG_WRITE(MDP3_REG_SYNC_CONFIG_0 + dma_sel, cfg); + MDP3_REG_WRITE(MDP3_REG_VSYNC_SEL, VSYNC_SELECT); + MDP3_REG_WRITE(MDP3_REG_PRIMARY_VSYNC_INIT_VAL + dma_sel, + te->vsync_init_val); + MDP3_REG_WRITE(MDP3_REG_PRIMARY_RD_PTR_IRQ, te->rd_ptr_irq); + MDP3_REG_WRITE(MDP3_REG_SYNC_THRESH_0 + dma_sel, + ((te->sync_threshold_continue << VSYNC_THRESH_CONT_SHIFT) | + te->sync_threshold_start)); + MDP3_REG_WRITE(MDP3_REG_PRIMARY_START_P0S + dma_sel, te->start_pos); + MDP3_REG_WRITE(MDP3_REG_TEAR_CHECK_EN, te->tear_check_en); + return 0; +} + +static int mdp3_dmap_config(struct mdp3_dma *dma, + struct mdp3_dma_source *source_config, + struct mdp3_dma_output_config *output_config, + bool splash_screen_active) +{ + u32 dma_p_cfg_reg, dma_p_size, dma_p_out_xy; + + dma_p_cfg_reg = source_config->format << 25; + if (output_config->dither_en) + dma_p_cfg_reg |= BIT(24); + dma_p_cfg_reg |= output_config->out_sel << 19; + dma_p_cfg_reg |= output_config->bit_mask_polarity << 18; + dma_p_cfg_reg |= output_config->color_components_flip << 14; + dma_p_cfg_reg |= output_config->pack_pattern << 8; + dma_p_cfg_reg |= output_config->pack_align << 7; + dma_p_cfg_reg |= output_config->color_comp_out_bits; + + dma_p_size = source_config->width | (source_config->height << 16); + dma_p_out_xy = source_config->x | (source_config->y << 16); + if (!splash_screen_active) { + MDP3_REG_WRITE(MDP3_REG_DMA_P_CONFIG, dma_p_cfg_reg); + MDP3_REG_WRITE(MDP3_REG_DMA_P_SIZE, dma_p_size); + MDP3_REG_WRITE(MDP3_REG_DMA_P_IBUF_ADDR, + (u32)source_config->buf); + MDP3_REG_WRITE(MDP3_REG_DMA_P_IBUF_Y_STRIDE, + source_config->stride); + MDP3_REG_WRITE(MDP3_REG_DMA_P_OUT_XY, dma_p_out_xy); + MDP3_REG_WRITE(MDP3_REG_DMA_P_FETCH_CFG, 0x40); + } + + dma->source_config = *source_config; + dma->output_config = *output_config; + + if (dma->output_config.out_sel != MDP3_DMA_OUTPUT_SEL_DSI_CMD) + mdp3_irq_enable(MDP3_INTR_LCDC_UNDERFLOW); + + mdp3_dma_callback_setup(dma); + return 0; +} + +static void mdp3_dmap_config_source(struct mdp3_dma *dma) +{ + struct mdp3_dma_source *source_config = &dma->source_config; + u32 dma_p_cfg_reg, dma_p_size; + + dma_p_cfg_reg = MDP3_REG_READ(MDP3_REG_DMA_P_CONFIG); + dma_p_cfg_reg &= ~MDP3_DMA_IBUF_FORMAT_MASK; + dma_p_cfg_reg |= source_config->format << 25; + dma_p_cfg_reg &= ~MDP3_DMA_PACK_PATTERN_MASK; + dma_p_cfg_reg |= dma->output_config.pack_pattern << 8; + + dma_p_size = dma->roi.w | (dma->roi.h << 16); + + MDP3_REG_WRITE(MDP3_REG_DMA_P_CONFIG, dma_p_cfg_reg); + MDP3_REG_WRITE(MDP3_REG_DMA_P_SIZE, dma_p_size); + MDP3_REG_WRITE(MDP3_REG_DMA_P_IBUF_Y_STRIDE, source_config->stride); +} + +static int mdp3_dmas_config(struct mdp3_dma *dma, + struct mdp3_dma_source *source_config, + struct mdp3_dma_output_config *output_config, + bool splash_screen_active) +{ + u32 dma_s_cfg_reg, dma_s_size, dma_s_out_xy; + + dma_s_cfg_reg = source_config->format << 25; + if (output_config->dither_en) + dma_s_cfg_reg |= BIT(24); + dma_s_cfg_reg |= output_config->out_sel << 19; + dma_s_cfg_reg |= output_config->bit_mask_polarity << 18; + dma_s_cfg_reg |= output_config->color_components_flip << 14; + dma_s_cfg_reg |= output_config->pack_pattern << 8; + dma_s_cfg_reg |= output_config->pack_align << 7; + dma_s_cfg_reg |= output_config->color_comp_out_bits; + + dma_s_size = source_config->width | (source_config->height << 16); + dma_s_out_xy = source_config->x | (source_config->y << 16); + + if (!splash_screen_active) { + MDP3_REG_WRITE(MDP3_REG_DMA_S_CONFIG, dma_s_cfg_reg); + MDP3_REG_WRITE(MDP3_REG_DMA_S_SIZE, dma_s_size); + MDP3_REG_WRITE(MDP3_REG_DMA_S_IBUF_ADDR, + (u32)source_config->buf); + MDP3_REG_WRITE(MDP3_REG_DMA_S_IBUF_Y_STRIDE, + source_config->stride); + MDP3_REG_WRITE(MDP3_REG_DMA_S_OUT_XY, dma_s_out_xy); + MDP3_REG_WRITE(MDP3_REG_SECONDARY_RD_PTR_IRQ, 0x10); + } + dma->source_config = *source_config; + dma->output_config = *output_config; + + mdp3_dma_callback_setup(dma); + return 0; +} + +static void mdp3_dmas_config_source(struct mdp3_dma *dma) +{ + struct mdp3_dma_source *source_config = &dma->source_config; + u32 dma_s_cfg_reg, dma_s_size; + + dma_s_cfg_reg = MDP3_REG_READ(MDP3_REG_DMA_S_CONFIG); + dma_s_cfg_reg &= ~MDP3_DMA_IBUF_FORMAT_MASK; + dma_s_cfg_reg |= source_config->format << 25; + + dma_s_size = source_config->width | (source_config->height << 16); + + MDP3_REG_WRITE(MDP3_REG_DMA_S_CONFIG, dma_s_cfg_reg); + MDP3_REG_WRITE(MDP3_REG_DMA_S_SIZE, dma_s_size); + MDP3_REG_WRITE(MDP3_REG_DMA_S_IBUF_Y_STRIDE, source_config->stride); +} + +static int mdp3_dmap_cursor_config(struct mdp3_dma *dma, + struct mdp3_dma_cursor *cursor) +{ + u32 cursor_size, cursor_pos, blend_param, trans_mask; + + cursor_size = cursor->width | (cursor->height << 16); + cursor_pos = cursor->x | (cursor->y << 16); + trans_mask = 0; + if (cursor->blend_config.mode == MDP3_DMA_CURSOR_BLEND_CONSTANT_ALPHA) { + blend_param = cursor->blend_config.constant_alpha << 24; + } else if (cursor->blend_config.mode == + MDP3_DMA_CURSOR_BLEND_COLOR_KEYING) { + blend_param = cursor->blend_config.transparent_color; + trans_mask = cursor->blend_config.transparency_mask; + } else { + blend_param = 0; + } + + MDP3_REG_WRITE(MDP3_REG_DMA_P_CURSOR_FORMAT, cursor->format); + MDP3_REG_WRITE(MDP3_REG_DMA_P_CURSOR_SIZE, cursor_size); + MDP3_REG_WRITE(MDP3_REG_DMA_P_CURSOR_BUF_ADDR, (u32)cursor->buf); + MDP3_REG_WRITE(MDP3_REG_DMA_P_CURSOR_POS, cursor_pos); + MDP3_REG_WRITE(MDP3_REG_DMA_P_CURSOR_BLEND_CONFIG, + cursor->blend_config.mode); + MDP3_REG_WRITE(MDP3_REG_DMA_P_CURSOR_BLEND_PARAM, blend_param); + MDP3_REG_WRITE(MDP3_REG_DMA_P_CURSOR_BLEND_TRANS_MASK, trans_mask); + dma->cursor = *cursor; + return 0; +} + +static int mdp3_dmap_ccs_config_internal(struct mdp3_dma *dma, + struct mdp3_dma_color_correct_config *config, + struct mdp3_dma_ccs *ccs) +{ + int i; + u32 addr; + + if (!ccs) + return -EINVAL; + + if (config->ccs_enable) { + addr = MDP3_REG_DMA_P_CSC_MV1; + if (config->ccs_sel) + addr = MDP3_REG_DMA_P_CSC_MV2; + for (i = 0; i < 9; i++) { + MDP3_REG_WRITE(addr, ccs->mv[i]); + addr += 4; + } + + addr = MDP3_REG_DMA_P_CSC_PRE_BV1; + if (config->pre_bias_sel) + addr = MDP3_REG_DMA_P_CSC_PRE_BV2; + for (i = 0; i < 3; i++) { + MDP3_REG_WRITE(addr, ccs->pre_bv[i]); + addr += 4; + } + + addr = MDP3_REG_DMA_P_CSC_POST_BV1; + if (config->post_bias_sel) + addr = MDP3_REG_DMA_P_CSC_POST_BV2; + for (i = 0; i < 3; i++) { + MDP3_REG_WRITE(addr, ccs->post_bv[i]); + addr += 4; + } + + addr = MDP3_REG_DMA_P_CSC_PRE_LV1; + if (config->pre_limit_sel) + addr = MDP3_REG_DMA_P_CSC_PRE_LV2; + for (i = 0; i < 6; i++) { + MDP3_REG_WRITE(addr, ccs->pre_lv[i]); + addr += 4; + } + + addr = MDP3_REG_DMA_P_CSC_POST_LV1; + if (config->post_limit_sel) + addr = MDP3_REG_DMA_P_CSC_POST_LV2; + for (i = 0; i < 6; i++) { + MDP3_REG_WRITE(addr, ccs->post_lv[i]); + addr += 4; + } + } + return 0; +} + +static void mdp3_ccs_update(struct mdp3_dma *dma, bool from_kickoff) +{ + u32 cc_config; + bool ccs_updated = false, lut_updated = false; + struct mdp3_dma_ccs ccs; + + cc_config = MDP3_REG_READ(MDP3_REG_DMA_P_COLOR_CORRECT_CONFIG); + + if (dma->ccs_config.ccs_dirty) { + cc_config &= DMA_CCS_CONFIG_MASK; + if (dma->ccs_config.ccs_enable) + cc_config |= BIT(3); + else + cc_config &= ~BIT(3); + cc_config |= dma->ccs_config.ccs_sel << 5; + cc_config |= dma->ccs_config.pre_bias_sel << 6; + cc_config |= dma->ccs_config.post_bias_sel << 7; + cc_config |= dma->ccs_config.pre_limit_sel << 8; + cc_config |= dma->ccs_config.post_limit_sel << 9; + /* + * CCS dirty flag should be reset when call is made from frame + * kickoff, or else upon resume the flag would be dirty and LUT + * config could call this function thereby causing no register + * programming for CCS, which will cause screen to go dark + */ + if (from_kickoff) + dma->ccs_config.ccs_dirty = false; + ccs_updated = true; + } + + if (dma->lut_config.lut_dirty) { + cc_config &= DMA_LUT_CONFIG_MASK; + cc_config |= dma->lut_config.lut_enable; + cc_config |= dma->lut_config.lut_position << 4; + cc_config |= dma->lut_config.lut_sel << 10; + dma->lut_config.lut_dirty = false; + lut_updated = true; + } + + if (ccs_updated && from_kickoff) { + ccs.mv = dma->ccs_cache.csc_data.csc_mv; + ccs.pre_bv = dma->ccs_cache.csc_data.csc_pre_bv; + ccs.post_bv = dma->ccs_cache.csc_data.csc_post_bv; + ccs.pre_lv = dma->ccs_cache.csc_data.csc_pre_lv; + ccs.post_lv = dma->ccs_cache.csc_data.csc_post_lv; + mdp3_dmap_ccs_config_internal(dma, &dma->ccs_config, &ccs); + } + + if (lut_updated || ccs_updated) { + MDP3_REG_WRITE(MDP3_REG_DMA_P_COLOR_CORRECT_CONFIG, cc_config); + /* + * Make sure ccs configuration update is done before continuing + * with the DMA transfer + */ + wmb(); /* ensure write is finished before progressing */ + } +} + +static int mdp3_dmap_ccs_config(struct mdp3_dma *dma, + struct mdp3_dma_color_correct_config *config, + struct mdp3_dma_ccs *ccs) +{ + mdp3_dmap_ccs_config_internal(dma, config, ccs); + + dma->ccs_config = *config; + + if (dma->output_config.out_sel != MDP3_DMA_OUTPUT_SEL_DSI_CMD) + mdp3_ccs_update(dma, false); + + return 0; +} + +static int mdp3_dmap_lut_config(struct mdp3_dma *dma, + struct mdp3_dma_lut_config *config, + struct fb_cmap *cmap) +{ + u32 addr, color; + int i; + + if (config->lut_enable && cmap) { + addr = MDP3_REG_DMA_P_CSC_LUT1; + if (config->lut_sel) + addr = MDP3_REG_DMA_P_CSC_LUT2; + + for (i = 0; i < MDP_LUT_SIZE; i++) { + color = cmap->green[i] & 0xff; + color |= (cmap->red[i] & 0xff) << 8; + color |= (cmap->blue[i] & 0xff) << 16; + MDP3_REG_WRITE(addr, color); + addr += 4; + } + } + + dma->lut_config = *config; + + if (dma->output_config.out_sel != MDP3_DMA_OUTPUT_SEL_DSI_CMD) + mdp3_ccs_update(dma, false); + + return 0; +} + +static int mdp3_dmap_histo_config(struct mdp3_dma *dma, + struct mdp3_dma_histogram_config *histo_config) +{ + unsigned long flag; + u32 histo_bit_mask = 0, histo_control = 0; + u32 histo_isr_mask = MDP3_DMA_P_HIST_INTR_HIST_DONE_BIT | + MDP3_DMA_P_HIST_INTR_RESET_DONE_BIT; + + spin_lock_irqsave(&dma->histo_lock, flag); + + if (histo_config->bit_mask_polarity) + histo_bit_mask = BIT(31); + histo_bit_mask |= histo_config->bit_mask; + + if (histo_config->auto_clear_en) + histo_control = BIT(0); + MDP3_REG_WRITE(MDP3_REG_DMA_P_HIST_FRAME_CNT, + histo_config->frame_count); + MDP3_REG_WRITE(MDP3_REG_DMA_P_HIST_BIT_MASK, histo_bit_mask); + MDP3_REG_WRITE(MDP3_REG_DMA_P_HIST_CONTROL, histo_control); + MDP3_REG_WRITE(MDP3_REG_DMA_P_HIST_INTR_ENABLE, histo_isr_mask); + + spin_unlock_irqrestore(&dma->histo_lock, flag); + + dma->histogram_config = *histo_config; + return 0; +} + +int dma_bpp(int format) +{ + int bpp; + + switch (format) { + case MDP3_DMA_IBUF_FORMAT_RGB888: + bpp = 3; + break; + case MDP3_DMA_IBUF_FORMAT_RGB565: + bpp = 2; + break; + case MDP3_DMA_IBUF_FORMAT_XRGB8888: + bpp = 4; + break; + default: + bpp = 0; + } + return bpp; +} + +static int mdp3_dmap_update(struct mdp3_dma *dma, void *buf, + struct mdp3_intf *intf, void *data) +{ + unsigned long flag; + int cb_type = MDP3_DMA_CALLBACK_TYPE_VSYNC; + struct mdss_panel_data *panel; + int rc = 0; + int retry_count = 2; + + ATRACE_BEGIN(__func__); + pr_debug("%s\n", __func__); + + if (dma->output_config.out_sel == MDP3_DMA_OUTPUT_SEL_DSI_CMD) { + cb_type = MDP3_DMA_CALLBACK_TYPE_DMA_DONE; + if (intf->active) { + ATRACE_BEGIN("mdp3_wait_for_dma_comp"); +retry_dma_done: + rc = wait_for_completion_timeout(&dma->dma_comp, + KOFF_TIMEOUT); + if (rc <= 0 && --retry_count) { + int vsync_status; + + vsync_status = (1 << MDP3_INTR_DMA_P_DONE) & + MDP3_REG_READ(MDP3_REG_INTR_STATUS); + if (!vsync_status) { + pr_err("%s: cmd timeout retry cnt %d\n", + __func__, retry_count); + goto retry_dma_done; + } + rc = -1; + } + ATRACE_END("mdp3_wait_for_dma_comp"); + } + } + if (dma->update_src_cfg) { + if (dma->output_config.out_sel == + MDP3_DMA_OUTPUT_SEL_DSI_VIDEO && intf->active) + pr_err("configuring dma source while it is active\n"); + dma->dma_config_source(dma); + if (data) { + panel = (struct mdss_panel_data *)data; + if (panel->event_handler) { + panel->event_handler(panel, + MDSS_EVENT_ENABLE_PARTIAL_ROI, NULL); + panel->event_handler(panel, + MDSS_EVENT_DSI_STREAM_SIZE, NULL); + } + } + dma->update_src_cfg = false; + } + mutex_lock(&dma->pp_lock); + if (dma->ccs_config.ccs_dirty) + mdp3_ccs_update(dma, true); + mutex_unlock(&dma->pp_lock); + spin_lock_irqsave(&dma->dma_lock, flag); + MDP3_REG_WRITE(MDP3_REG_DMA_P_IBUF_ADDR, (u32)(buf + + dma->roi.y * dma->source_config.stride + + dma->roi.x * dma_bpp(dma->source_config.format))); + dma->source_config.buf = (int)buf; + if (dma->output_config.out_sel == MDP3_DMA_OUTPUT_SEL_DSI_CMD) + MDP3_REG_WRITE(MDP3_REG_DMA_P_START, 1); + + if (!intf->active) { + pr_debug("%s start interface\n", __func__); + intf->start(intf); + } + + mb(); /* make sure everything is written before enable */ + dma->vsync_status = MDP3_REG_READ(MDP3_REG_INTR_STATUS) & + (1 << MDP3_INTR_LCDC_START_OF_FRAME); + init_completion(&dma->vsync_comp); + spin_unlock_irqrestore(&dma->dma_lock, flag); + + mdp3_dma_callback_enable(dma, cb_type); + pr_debug("%s wait for vsync_comp\n", __func__); + if (dma->output_config.out_sel == MDP3_DMA_OUTPUT_SEL_DSI_VIDEO) { + ATRACE_BEGIN("mdp3_wait_for_vsync_comp"); +retry_vsync: + rc = wait_for_completion_timeout(&dma->vsync_comp, + KOFF_TIMEOUT); + if (rc <= 0 && --retry_count) { + int vsync = MDP3_REG_READ(MDP3_REG_INTR_STATUS) & + (1 << MDP3_INTR_LCDC_START_OF_FRAME); + + if (!vsync) { + pr_err("%s trying again count = %d\n", + __func__, retry_count); + goto retry_vsync; + } + rc = -1; + } + ATRACE_END("mdp3_wait_for_vsync_comp"); + } + pr_debug("%s wait for vsync_comp out\n", __func__); + ATRACE_END(__func__); + return rc; +} + +static int mdp3_dmas_update(struct mdp3_dma *dma, void *buf, + struct mdp3_intf *intf, void *data) +{ + unsigned long flag; + int cb_type = MDP3_DMA_CALLBACK_TYPE_VSYNC; + + if (dma->output_config.out_sel == MDP3_DMA_OUTPUT_SEL_DSI_CMD) { + cb_type = MDP3_DMA_CALLBACK_TYPE_DMA_DONE; + if (intf->active) + wait_for_completion_killable(&dma->dma_comp); + } + + spin_lock_irqsave(&dma->dma_lock, flag); + MDP3_REG_WRITE(MDP3_REG_DMA_S_IBUF_ADDR, (u32)buf); + dma->source_config.buf = (int)buf; + if (dma->output_config.out_sel == MDP3_DMA_OUTPUT_SEL_DSI_CMD) + MDP3_REG_WRITE(MDP3_REG_DMA_S_START, 1); + + if (!intf->active) { + pr_debug("%s start interface\n", __func__); + intf->start(intf); + } + + wmb(); /* ensure write is finished before progressing */ + init_completion(&dma->vsync_comp); + spin_unlock_irqrestore(&dma->dma_lock, flag); + + mdp3_dma_callback_enable(dma, cb_type); + if (dma->output_config.out_sel == MDP3_DMA_OUTPUT_SEL_DSI_VIDEO) + wait_for_completion_killable(&dma->vsync_comp); + return 0; +} + +static int mdp3_dmap_cursor_update(struct mdp3_dma *dma, int x, int y) +{ + u32 cursor_pos; + + cursor_pos = x | (y << 16); + MDP3_REG_WRITE(MDP3_REG_DMA_P_CURSOR_POS, cursor_pos); + dma->cursor.x = x; + dma->cursor.y = y; + return 0; +} + +static int mdp3_dmap_histo_get(struct mdp3_dma *dma) +{ + int i, state, timeout, ret; + u32 addr; + unsigned long flag; + + spin_lock_irqsave(&dma->histo_lock, flag); + state = dma->histo_state; + spin_unlock_irqrestore(&dma->histo_lock, flag); + + if (state != MDP3_DMA_HISTO_STATE_START && + state != MDP3_DMA_HISTO_STATE_READY) { + pr_err("%s invalid state %d\n", __func__, state); + return -EINVAL; + } + + timeout = HIST_WAIT_TIMEOUT(dma->histogram_config.frame_count); + ret = wait_for_completion_killable_timeout(&dma->histo_comp, timeout); + + if (ret == 0) { + pr_debug("%s time out\n", __func__); + ret = -ETIMEDOUT; + } else if (ret < 0) { + pr_err("%s interrupted\n", __func__); + } + + if (ret < 0) + return ret; + + if (dma->histo_state != MDP3_DMA_HISTO_STATE_READY) { + pr_debug("%s dma shut down\n", __func__); + return -EPERM; + } + + addr = MDP3_REG_DMA_P_HIST_R_DATA; + for (i = 0; i < MDP_HISTOGRAM_BIN_NUM; i++) { + dma->histo_data.r_data[i] = MDP3_REG_READ(addr); + addr += 4; + } + + addr = MDP3_REG_DMA_P_HIST_G_DATA; + for (i = 0; i < MDP_HISTOGRAM_BIN_NUM; i++) { + dma->histo_data.g_data[i] = MDP3_REG_READ(addr); + addr += 4; + } + + addr = MDP3_REG_DMA_P_HIST_B_DATA; + for (i = 0; i < MDP_HISTOGRAM_BIN_NUM; i++) { + dma->histo_data.b_data[i] = MDP3_REG_READ(addr); + addr += 4; + } + + dma->histo_data.extra[0] = + MDP3_REG_READ(MDP3_REG_DMA_P_HIST_EXTRA_INFO_0); + dma->histo_data.extra[1] = + MDP3_REG_READ(MDP3_REG_DMA_P_HIST_EXTRA_INFO_1); + + spin_lock_irqsave(&dma->histo_lock, flag); + init_completion(&dma->histo_comp); + MDP3_REG_WRITE(MDP3_REG_DMA_P_HIST_START, 1); + wmb(); /* ensure write is finished before progressing */ + dma->histo_state = MDP3_DMA_HISTO_STATE_START; + spin_unlock_irqrestore(&dma->histo_lock, flag); + + return 0; +} + +static int mdp3_dmap_histo_start(struct mdp3_dma *dma) +{ + unsigned long flag; + + if (dma->histo_state != MDP3_DMA_HISTO_STATE_IDLE) + return -EINVAL; + + spin_lock_irqsave(&dma->histo_lock, flag); + + init_completion(&dma->histo_comp); + MDP3_REG_WRITE(MDP3_REG_DMA_P_HIST_START, 1); + wmb(); /* ensure write is finished before progressing */ + dma->histo_state = MDP3_DMA_HISTO_STATE_START; + + spin_unlock_irqrestore(&dma->histo_lock, flag); + + mdp3_dma_callback_enable(dma, MDP3_DMA_CALLBACK_TYPE_HIST_DONE); + return 0; + +} + +static int mdp3_dmap_histo_reset(struct mdp3_dma *dma) +{ + unsigned long flag; + int ret; + + spin_lock_irqsave(&dma->histo_lock, flag); + + init_completion(&dma->histo_comp); + + + MDP3_REG_WRITE(MDP3_REG_DMA_P_HIST_INTR_ENABLE, BIT(0)|BIT(1)); + MDP3_REG_WRITE(MDP3_REG_DMA_P_HIST_RESET_SEQ_START, 1); + wmb(); /* ensure write is finished before progressing */ + dma->histo_state = MDP3_DMA_HISTO_STATE_RESET; + + spin_unlock_irqrestore(&dma->histo_lock, flag); + + mdp3_dma_callback_enable(dma, MDP3_DMA_CALLBACK_TYPE_HIST_RESET_DONE); + ret = wait_for_completion_killable_timeout(&dma->histo_comp, + msecs_to_jiffies(DMA_HISTO_RESET_TIMEOUT_MS)); + + if (ret == 0) { + pr_err("%s timed out\n", __func__); + ret = -ETIMEDOUT; + } else if (ret < 0) { + pr_err("%s interrupted\n", __func__); + } else { + ret = 0; + } + mdp3_dma_callback_disable(dma, MDP3_DMA_CALLBACK_TYPE_HIST_RESET_DONE); + + return ret; +} + +static int mdp3_dmap_histo_stop(struct mdp3_dma *dma) +{ + unsigned long flag; + int cb_type = MDP3_DMA_CALLBACK_TYPE_HIST_RESET_DONE | + MDP3_DMA_CALLBACK_TYPE_HIST_DONE; + + spin_lock_irqsave(&dma->histo_lock, flag); + + MDP3_REG_WRITE(MDP3_REG_DMA_P_HIST_CANCEL_REQ, 1); + MDP3_REG_WRITE(MDP3_REG_DMA_P_HIST_INTR_ENABLE, 0); + wmb(); /* ensure write is finished before progressing */ + dma->histo_state = MDP3_DMA_HISTO_STATE_IDLE; + complete(&dma->histo_comp); + + spin_unlock_irqrestore(&dma->histo_lock, flag); + + mdp3_dma_callback_disable(dma, cb_type); + return 0; +} + +static int mdp3_dmap_histo_op(struct mdp3_dma *dma, u32 op) +{ + int ret; + + switch (op) { + case MDP3_DMA_HISTO_OP_START: + ret = mdp3_dmap_histo_start(dma); + break; + case MDP3_DMA_HISTO_OP_STOP: + case MDP3_DMA_HISTO_OP_CANCEL: + ret = mdp3_dmap_histo_stop(dma); + break; + case MDP3_DMA_HISTO_OP_RESET: + ret = mdp3_dmap_histo_reset(dma); + break; + default: + ret = -EINVAL; + } + return ret; +} + +bool mdp3_dmap_busy(void) +{ + u32 val; + + val = MDP3_REG_READ(MDP3_REG_DISPLAY_STATUS); + pr_err("%s DMAP Status %s\n", __func__, + (val & MDP3_DMA_P_BUSY_BIT) ? "BUSY":"IDLE"); + return val & MDP3_DMA_P_BUSY_BIT; +} + +/* + * During underrun DMA_P registers are reset. Reprogramming CSC to prevent + * black screen + */ +static void mdp3_dmap_underrun_worker(struct work_struct *work) +{ + struct mdp3_dma *dma; + + dma = container_of(work, struct mdp3_dma, underrun_work); + mutex_lock(&dma->pp_lock); + if (dma->ccs_config.ccs_enable && dma->ccs_config.ccs_dirty) { + dma->cc_vect_sel = (dma->cc_vect_sel + 1) % 2; + dma->ccs_config.ccs_sel = dma->cc_vect_sel; + dma->ccs_config.pre_limit_sel = dma->cc_vect_sel; + dma->ccs_config.post_limit_sel = dma->cc_vect_sel; + dma->ccs_config.pre_bias_sel = dma->cc_vect_sel; + dma->ccs_config.post_bias_sel = dma->cc_vect_sel; + mdp3_ccs_update(dma, true); + } + mutex_unlock(&dma->pp_lock); +} + +static int mdp3_dma_start(struct mdp3_dma *dma, struct mdp3_intf *intf) +{ + unsigned long flag; + int cb_type = MDP3_DMA_CALLBACK_TYPE_VSYNC; + u32 dma_start_offset = MDP3_REG_DMA_P_START; + + if (dma->dma_sel == MDP3_DMA_P) + dma_start_offset = MDP3_REG_DMA_P_START; + else if (dma->dma_sel == MDP3_DMA_S) + dma_start_offset = MDP3_REG_DMA_S_START; + else + return -EINVAL; + + spin_lock_irqsave(&dma->dma_lock, flag); + if (dma->output_config.out_sel == MDP3_DMA_OUTPUT_SEL_DSI_CMD) { + cb_type |= MDP3_DMA_CALLBACK_TYPE_DMA_DONE; + MDP3_REG_WRITE(dma_start_offset, 1); + } + + intf->start(intf); + wmb(); /* ensure write is finished before progressing */ + init_completion(&dma->vsync_comp); + spin_unlock_irqrestore(&dma->dma_lock, flag); + + if (dma->dma_sel == MDP3_DMA_P && dma->has_panic_ctrl) + MDP3_REG_WRITE(MDP3_PANIC_ROBUST_CTRL, BIT(0)); + + mdp3_dma_callback_enable(dma, cb_type); + pr_debug("%s wait for vsync in\n", __func__); + wait_for_completion_killable(&dma->vsync_comp); + pr_debug("%s wait for vsync out\n", __func__); + return 0; +} + +static int mdp3_dma_stop(struct mdp3_dma *dma, struct mdp3_intf *intf) +{ + int ret = 0; + u32 status, display_status_bit; + + if (dma->dma_sel == MDP3_DMA_P) + display_status_bit = BIT(6); + else if (dma->dma_sel == MDP3_DMA_S) + display_status_bit = BIT(7); + else + return -EINVAL; + + if (dma->dma_sel == MDP3_DMA_P && dma->has_panic_ctrl) + MDP3_REG_WRITE(MDP3_PANIC_ROBUST_CTRL, 0); + + if (dma->output_config.out_sel == MDP3_DMA_OUTPUT_SEL_DSI_VIDEO) + display_status_bit |= BIT(11); + + intf->stop(intf); + ret = readl_poll_timeout((mdp3_res->mdp_base + MDP3_REG_DISPLAY_STATUS), + status, + ((status & display_status_bit) == 0), + DMA_STOP_POLL_SLEEP_US, + DMA_STOP_POLL_TIMEOUT_US); + + mdp3_dma_callback_disable(dma, MDP3_DMA_CALLBACK_TYPE_VSYNC | + MDP3_DMA_CALLBACK_TYPE_DMA_DONE); + mdp3_irq_disable(MDP3_INTR_LCDC_UNDERFLOW); + + MDP3_REG_WRITE(MDP3_REG_INTR_ENABLE, 0); + MDP3_REG_WRITE(MDP3_REG_INTR_CLEAR, 0xfffffff); + + init_completion(&dma->dma_comp); + dma->vsync_client.handler = NULL; + return ret; +} + +int mdp3_dma_init(struct mdp3_dma *dma) +{ + int ret = 0; + + pr_debug("%s\n", __func__); + switch (dma->dma_sel) { + case MDP3_DMA_P: + dma->dma_config = mdp3_dmap_config; + dma->dma_sync_config = mdp3_dma_sync_config; + dma->dma_config_source = mdp3_dmap_config_source; + dma->config_cursor = mdp3_dmap_cursor_config; + dma->config_ccs = mdp3_dmap_ccs_config; + dma->config_histo = mdp3_dmap_histo_config; + dma->config_lut = mdp3_dmap_lut_config; + dma->update = mdp3_dmap_update; + dma->update_cursor = mdp3_dmap_cursor_update; + dma->get_histo = mdp3_dmap_histo_get; + dma->histo_op = mdp3_dmap_histo_op; + dma->vsync_enable = mdp3_dma_vsync_enable; + dma->dma_done_notifier = mdp3_dma_done_notifier; + dma->start = mdp3_dma_start; + dma->stop = mdp3_dma_stop; + dma->busy = mdp3_dmap_busy; + INIT_WORK(&dma->underrun_work, mdp3_dmap_underrun_worker); + break; + case MDP3_DMA_S: + dma->dma_config = mdp3_dmas_config; + dma->dma_sync_config = mdp3_dma_sync_config; + dma->dma_config_source = mdp3_dmas_config_source; + dma->config_cursor = NULL; + dma->config_ccs = NULL; + dma->config_histo = NULL; + dma->config_lut = NULL; + dma->update = mdp3_dmas_update; + dma->update_cursor = NULL; + dma->get_histo = NULL; + dma->histo_op = NULL; + dma->vsync_enable = mdp3_dma_vsync_enable; + dma->start = mdp3_dma_start; + dma->stop = mdp3_dma_stop; + break; + case MDP3_DMA_E: + default: + ret = -ENODEV; + break; + } + + spin_lock_init(&dma->dma_lock); + spin_lock_init(&dma->histo_lock); + init_completion(&dma->vsync_comp); + init_completion(&dma->dma_comp); + init_completion(&dma->histo_comp); + dma->vsync_client.handler = NULL; + dma->vsync_client.arg = NULL; + dma->histo_state = MDP3_DMA_HISTO_STATE_IDLE; + dma->update_src_cfg = false; + + memset(&dma->cursor, 0, sizeof(dma->cursor)); + memset(&dma->ccs_config, 0, sizeof(dma->ccs_config)); + memset(&dma->histogram_config, 0, sizeof(dma->histogram_config)); + + return ret; +} + +int lcdc_config(struct mdp3_intf *intf, struct mdp3_intf_cfg *cfg) +{ + u32 temp; + struct mdp3_video_intf_cfg *v = &cfg->video; + + temp = v->hsync_pulse_width | (v->hsync_period << 16); + MDP3_REG_WRITE(MDP3_REG_LCDC_HSYNC_CTL, temp); + MDP3_REG_WRITE(MDP3_REG_LCDC_VSYNC_PERIOD, v->vsync_period); + MDP3_REG_WRITE(MDP3_REG_LCDC_VSYNC_PULSE_WIDTH, v->vsync_pulse_width); + temp = v->display_start_x | (v->display_end_x << 16); + MDP3_REG_WRITE(MDP3_REG_LCDC_DISPLAY_HCTL, temp); + MDP3_REG_WRITE(MDP3_REG_LCDC_DISPLAY_V_START, v->display_start_y); + MDP3_REG_WRITE(MDP3_REG_LCDC_DISPLAY_V_END, v->display_end_y); + temp = v->active_start_x | (v->active_end_x); + if (v->active_h_enable) + temp |= BIT(31); + MDP3_REG_WRITE(MDP3_REG_LCDC_ACTIVE_HCTL, temp); + MDP3_REG_WRITE(MDP3_REG_LCDC_ACTIVE_V_START, v->active_start_y); + MDP3_REG_WRITE(MDP3_REG_LCDC_ACTIVE_V_END, v->active_end_y); + MDP3_REG_WRITE(MDP3_REG_LCDC_HSYNC_SKEW, v->hsync_skew); + temp = 0; + if (!v->hsync_polarity) + temp = BIT(0); + if (!v->vsync_polarity) + temp = BIT(1); + if (!v->de_polarity) + temp = BIT(2); + MDP3_REG_WRITE(MDP3_REG_LCDC_CTL_POLARITY, temp); + + return 0; +} + +int lcdc_start(struct mdp3_intf *intf) +{ + MDP3_REG_WRITE(MDP3_REG_LCDC_EN, BIT(0)); + wmb(); /* ensure write is finished before progressing */ + intf->active = true; + return 0; +} + +int lcdc_stop(struct mdp3_intf *intf) +{ + MDP3_REG_WRITE(MDP3_REG_LCDC_EN, 0); + wmb(); /* ensure write is finished before progressing */ + intf->active = false; + return 0; +} + +int dsi_video_config(struct mdp3_intf *intf, struct mdp3_intf_cfg *cfg) +{ + u32 temp; + struct mdp3_video_intf_cfg *v = &cfg->video; + + pr_debug("%s\n", __func__); + + temp = v->hsync_pulse_width | (v->hsync_period << 16); + MDP3_REG_WRITE(MDP3_REG_DSI_VIDEO_HSYNC_CTL, temp); + MDP3_REG_WRITE(MDP3_REG_DSI_VIDEO_VSYNC_PERIOD, v->vsync_period); + MDP3_REG_WRITE(MDP3_REG_DSI_VIDEO_VSYNC_PULSE_WIDTH, + v->vsync_pulse_width); + temp = v->display_start_x | (v->display_end_x << 16); + MDP3_REG_WRITE(MDP3_REG_DSI_VIDEO_DISPLAY_HCTL, temp); + MDP3_REG_WRITE(MDP3_REG_DSI_VIDEO_DISPLAY_V_START, v->display_start_y); + MDP3_REG_WRITE(MDP3_REG_DSI_VIDEO_DISPLAY_V_END, v->display_end_y); + temp = v->active_start_x | (v->active_end_x << 16); + if (v->active_h_enable) + temp |= BIT(31); + MDP3_REG_WRITE(MDP3_REG_DSI_VIDEO_ACTIVE_HCTL, temp); + + temp = v->active_start_y; + if (v->active_v_enable) + temp |= BIT(31); + MDP3_REG_WRITE(MDP3_REG_DSI_VIDEO_ACTIVE_V_START, temp); + MDP3_REG_WRITE(MDP3_REG_DSI_VIDEO_ACTIVE_V_END, v->active_end_y); + MDP3_REG_WRITE(MDP3_REG_DSI_VIDEO_HSYNC_SKEW, v->hsync_skew); + temp = 0; + if (!v->hsync_polarity) + temp |= BIT(0); + if (!v->vsync_polarity) + temp |= BIT(1); + if (!v->de_polarity) + temp |= BIT(2); + MDP3_REG_WRITE(MDP3_REG_DSI_VIDEO_CTL_POLARITY, temp); + + v->underflow_color |= 0x80000000; + MDP3_REG_WRITE(MDP3_REG_DSI_VIDEO_UNDERFLOW_CTL, v->underflow_color); + + return 0; +} + +int dsi_video_start(struct mdp3_intf *intf) +{ + pr_debug("%s\n", __func__); + MDP3_REG_WRITE(MDP3_REG_DSI_VIDEO_EN, BIT(0)); + wmb(); /* ensure write is finished before progressing */ + intf->active = true; + return 0; +} + +int dsi_video_stop(struct mdp3_intf *intf) +{ + pr_debug("%s\n", __func__); + MDP3_REG_WRITE(MDP3_REG_DSI_VIDEO_EN, 0); + wmb(); /* ensure write is finished before progressing */ + intf->active = false; + return 0; +} + +int dsi_cmd_config(struct mdp3_intf *intf, struct mdp3_intf_cfg *cfg) +{ + u32 id_map = 0; + u32 trigger_en = 0; + + if (cfg->dsi_cmd.primary_dsi_cmd_id) + id_map = BIT(0); + if (cfg->dsi_cmd.secondary_dsi_cmd_id) + id_map = BIT(4); + + if (cfg->dsi_cmd.dsi_cmd_tg_intf_sel) + trigger_en = BIT(4); + + MDP3_REG_WRITE(MDP3_REG_DSI_CMD_MODE_ID_MAP, id_map); + MDP3_REG_WRITE(MDP3_REG_DSI_CMD_MODE_TRIGGER_EN, trigger_en); + + return 0; +} + +int dsi_cmd_start(struct mdp3_intf *intf) +{ + intf->active = true; + return 0; +} + +int dsi_cmd_stop(struct mdp3_intf *intf) +{ + intf->active = false; + return 0; +} + +int mdp3_intf_init(struct mdp3_intf *intf) +{ + switch (intf->cfg.type) { + case MDP3_DMA_OUTPUT_SEL_LCDC: + intf->config = lcdc_config; + intf->start = lcdc_start; + intf->stop = lcdc_stop; + break; + case MDP3_DMA_OUTPUT_SEL_DSI_VIDEO: + intf->config = dsi_video_config; + intf->start = dsi_video_start; + intf->stop = dsi_video_stop; + break; + case MDP3_DMA_OUTPUT_SEL_DSI_CMD: + intf->config = dsi_cmd_config; + intf->start = dsi_cmd_start; + intf->stop = dsi_cmd_stop; + break; + + default: + return -EINVAL; + } + return 0; +} diff --git a/drivers/video/fbdev/msm/mdp3_dma.h b/drivers/video/fbdev/msm/mdp3_dma.h new file mode 100644 index 0000000000000000000000000000000000000000..24caedb931f72253825d1bf6dd1b184ee20b8fb0 --- /dev/null +++ b/drivers/video/fbdev/msm/mdp3_dma.h @@ -0,0 +1,395 @@ +/* Copyright (c) 2013-2014, 2016-2018, The Linux Foundation. All rights reserved. + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef MDP3_DMA_H +#define MDP3_DMA_H + +#include +#include +#include + +#define MDP_HISTOGRAM_BL_SCALE_MAX 1024 +#define MDP_HISTOGRAM_BL_LEVEL_MAX 255 +#define MDP_HISTOGRAM_FRAME_COUNT_MAX 0x20 +#define MDP_HISTOGRAM_BIT_MASK_MAX 0x4 +#define MDP_HISTOGRAM_CSC_MATRIX_MAX 0x2000 +#define MDP_HISTOGRAM_CSC_VECTOR_MAX 0x200 +#define MDP_HISTOGRAM_BIN_NUM 32 +#define MDP_LUT_SIZE 256 + +enum { + MDP3_DMA_P, + MDP3_DMA_S, + MDP3_DMA_E, + MDP3_DMA_MAX +}; + +enum { + MDP3_DMA_CAP_CURSOR = 0x1, + MDP3_DMA_CAP_COLOR_CORRECTION = 0x2, + MDP3_DMA_CAP_HISTOGRAM = 0x4, + MDP3_DMA_CAP_GAMMA_CORRECTION = 0x8, + MDP3_DMA_CAP_DITHER = 0x10, + MDP3_DMA_CAP_ALL = 0x1F +}; + +enum { + MDP3_DMA_OUTPUT_SEL_AHB, + MDP3_DMA_OUTPUT_SEL_DSI_CMD, + MDP3_DMA_OUTPUT_SEL_LCDC, + MDP3_DMA_OUTPUT_SEL_DSI_VIDEO, + MDP3_DMA_OUTPUT_SEL_MAX +}; + +enum { + MDP3_DMA_IBUF_FORMAT_RGB888, + MDP3_DMA_IBUF_FORMAT_RGB565, + MDP3_DMA_IBUF_FORMAT_XRGB8888, + MDP3_DMA_IBUF_FORMAT_UNDEFINED +}; + +enum { + MDP3_DMA_OUTPUT_PACK_PATTERN_RGB = 0x21, + MDP3_DMA_OUTPUT_PACK_PATTERN_RBG = 0x24, + MDP3_DMA_OUTPUT_PACK_PATTERN_BGR = 0x12, + MDP3_DMA_OUTPUT_PACK_PATTERN_BRG = 0x18, + MDP3_DMA_OUTPUT_PACK_PATTERN_GBR = 0x06, + MDP3_DMA_OUTPUT_PACK_PATTERN_GRB = 0x09, +}; + +enum { + MDP3_DMA_OUTPUT_PACK_ALIGN_LSB, + MDP3_DMA_OUTPUT_PACK_ALIGN_MSB +}; + +enum { + MDP3_DMA_OUTPUT_COMP_BITS_4, /*4 bits per color component*/ + MDP3_DMA_OUTPUT_COMP_BITS_5, + MDP3_DMA_OUTPUT_COMP_BITS_6, + MDP3_DMA_OUTPUT_COMP_BITS_8, +}; + +enum { + MDP3_DMA_CURSOR_FORMAT_ARGB888, +}; + +enum { + MDP3_DMA_COLOR_CORRECT_SET_1, + MDP3_DMA_COLOR_CORRECT_SET_2 +}; + +enum { + MDP3_DMA_LUT_POSITION_PRE, + MDP3_DMA_LUT_POSITION_POST +}; + +enum { + MDP3_DMA_LUT_DISABLE = 0x0, + MDP3_DMA_LUT_ENABLE_C0 = 0x01, + MDP3_DMA_LUT_ENABLE_C1 = 0x02, + MDP3_DMA_LUT_ENABLE_C2 = 0x04, + MDP3_DMA_LUT_ENABLE_ALL = 0x07, +}; + +enum { + MDP3_DMA_HISTOGRAM_BIT_MASK_NONE = 0X0, + MDP3_DMA_HISTOGRAM_BIT_MASK_ONE_MSB = 0x1, + MDP3_DMA_HISTOGRAM_BIT_MASK_TWO_MSB = 0x2, + MDP3_DMA_HISTOGRAM_BIT_MASK_THREE_MSB = 0x3 +}; + +enum { + MDP3_DMA_COLOR_FLIP_NONE, + MDP3_DMA_COLOR_FLIP_COMP1 = 0x1, + MDP3_DMA_COLOR_FLIP_COMP2 = 0x2, + MDP3_DMA_COLOR_FLIP_COMP3 = 0x4, +}; + +enum { + MDP3_DMA_CURSOR_BLEND_NONE = 0x0, + MDP3_DMA_CURSOR_BLEND_PER_PIXEL_ALPHA = 0x3, + MDP3_DMA_CURSOR_BLEND_CONSTANT_ALPHA = 0x5, + MDP3_DMA_CURSOR_BLEND_COLOR_KEYING = 0x9 +}; + +enum { + MDP3_DMA_HISTO_OP_START, + MDP3_DMA_HISTO_OP_STOP, + MDP3_DMA_HISTO_OP_CANCEL, + MDP3_DMA_HISTO_OP_RESET +}; + +enum { + MDP3_DMA_HISTO_STATE_UNKNOWN, + MDP3_DMA_HISTO_STATE_IDLE, + MDP3_DMA_HISTO_STATE_RESET, + MDP3_DMA_HISTO_STATE_START, + MDP3_DMA_HISTO_STATE_READY, +}; + +enum { + MDP3_DMA_CALLBACK_TYPE_VSYNC = 0x01, + MDP3_DMA_CALLBACK_TYPE_DMA_DONE = 0x02, + MDP3_DMA_CALLBACK_TYPE_HIST_RESET_DONE = 0x04, + MDP3_DMA_CALLBACK_TYPE_HIST_DONE = 0x08, +}; + +struct mdp3_dma_source { + u32 format; + int width; + int height; + int x; + int y; + dma_addr_t buf; + int stride; + int vsync_count; + int vporch; +}; + +struct mdp3_dma_output_config { + int dither_en; + u32 out_sel; + u32 bit_mask_polarity; + u32 color_components_flip; + u32 pack_pattern; + u32 pack_align; + u32 color_comp_out_bits; +}; + +struct mdp3_dma_cursor_blend_config { + u32 mode; + u32 transparent_color; /*color keying*/ + u32 transparency_mask; + u32 constant_alpha; +}; + +struct mdp3_dma_cursor { + int enable; /* enable cursor or not*/ + u32 format; + int width; + int height; + int x; + int y; + void *buf; + struct mdp3_dma_cursor_blend_config blend_config; +}; + +struct mdp3_dma_ccs { + u32 *mv; /*set1 matrix vector, 3x3 */ + u32 *pre_bv; /*pre-bias vector for set1, 1x3*/ + u32 *post_bv; /*post-bias vecotr for set1, */ + u32 *pre_lv; /*pre-limit vector for set 1, 1x6*/ + u32 *post_lv; +}; + +struct mdp3_dma_lut_config { + int lut_enable; + u32 lut_sel; + u32 lut_position; + bool lut_dirty; +}; + +struct mdp3_dma_color_correct_config { + int ccs_enable; + u32 post_limit_sel; + u32 pre_limit_sel; + u32 post_bias_sel; + u32 pre_bias_sel; + u32 ccs_sel; + bool ccs_dirty; +}; + +struct mdp3_dma_histogram_config { + int frame_count; + u32 bit_mask_polarity; + u32 bit_mask; + int auto_clear_en; +}; + +struct mdp3_dma_histogram_data { + u32 r_data[MDP_HISTOGRAM_BIN_NUM]; + u32 g_data[MDP_HISTOGRAM_BIN_NUM]; + u32 b_data[MDP_HISTOGRAM_BIN_NUM]; + u32 extra[2]; +}; + +struct mdp3_notification { + void (*handler)(void *arg); + void *arg; +}; + +struct mdp3_tear_check { + int frame_rate; + bool hw_vsync_mode; + u32 tear_check_en; + u32 sync_cfg_height; + u32 vsync_init_val; + u32 sync_threshold_start; + u32 sync_threshold_continue; + u32 start_pos; + u32 rd_ptr_irq; + u32 refx100; +}; + +struct mdp3_rect { + u32 x; + u32 y; + u32 w; + u32 h; +}; + +struct mdp3_intf; + +struct mdp3_dma { + u32 dma_sel; + u32 capability; + int in_use; + int available; + + spinlock_t dma_lock; + spinlock_t histo_lock; + struct completion vsync_comp; + struct completion dma_comp; + struct completion histo_comp; + struct kernfs_node *hist_event_sd; + struct mdp3_notification vsync_client; + struct mdp3_notification dma_notifier_client; + struct mdp3_notification retire_client; + + struct mdp3_dma_output_config output_config; + struct mdp3_dma_source source_config; + + struct mdp3_dma_cursor cursor; + struct mdp3_dma_color_correct_config ccs_config; + struct mdp_csc_cfg_data ccs_cache; + int cc_vect_sel; + + struct work_struct underrun_work; + struct mutex pp_lock; + + struct mdp3_dma_lut_config lut_config; + struct mdp3_dma_histogram_config histogram_config; + int histo_state; + struct mdp3_dma_histogram_data histo_data; + unsigned int vsync_status; + bool update_src_cfg; + bool has_panic_ctrl; + struct mdp3_rect roi; + + u32 lut_sts; + u32 hist_events; + struct fb_cmap *gc_cmap; + struct fb_cmap *hist_cmap; + + bool (*busy)(void); + + int (*dma_config)(struct mdp3_dma *dma, + struct mdp3_dma_source *source_config, + struct mdp3_dma_output_config *output_config, + bool splash_screen_active); + + int (*dma_sync_config)(struct mdp3_dma *dma, struct mdp3_dma_source + *source_config, struct mdp3_tear_check *te); + + void (*dma_config_source)(struct mdp3_dma *dma); + + int (*start)(struct mdp3_dma *dma, struct mdp3_intf *intf); + + int (*stop)(struct mdp3_dma *dma, struct mdp3_intf *intf); + + int (*config_cursor)(struct mdp3_dma *dma, + struct mdp3_dma_cursor *cursor); + + int (*config_ccs)(struct mdp3_dma *dma, + struct mdp3_dma_color_correct_config *config, + struct mdp3_dma_ccs *ccs); + + int (*config_lut)(struct mdp3_dma *dma, + struct mdp3_dma_lut_config *config, + struct fb_cmap *cmap); + + int (*update)(struct mdp3_dma *dma, + void *buf, struct mdp3_intf *intf, void *data); + + int (*update_cursor)(struct mdp3_dma *dma, int x, int y); + + int (*get_histo)(struct mdp3_dma *dma); + + int (*config_histo)(struct mdp3_dma *dma, + struct mdp3_dma_histogram_config *histo_config); + + int (*histo_op)(struct mdp3_dma *dma, u32 op); + + void (*vsync_enable)(struct mdp3_dma *dma, + struct mdp3_notification *vsync_client); + + void (*retire_enable)(struct mdp3_dma *dma, + struct mdp3_notification *retire_client); + + void (*dma_done_notifier)(struct mdp3_dma *dma, + struct mdp3_notification *dma_client); +}; + +struct mdp3_video_intf_cfg { + int hsync_period; + int hsync_pulse_width; + int vsync_period; + int vsync_pulse_width; + int display_start_x; + int display_end_x; + int display_start_y; + int display_end_y; + int active_start_x; + int active_end_x; + int active_h_enable; + int active_start_y; + int active_end_y; + int active_v_enable; + int hsync_skew; + int hsync_polarity; + int vsync_polarity; + int de_polarity; + int underflow_color; +}; + +struct mdp3_dsi_cmd_intf_cfg { + int primary_dsi_cmd_id; + int secondary_dsi_cmd_id; + int dsi_cmd_tg_intf_sel; +}; + +struct mdp3_intf_cfg { + u32 type; + struct mdp3_video_intf_cfg video; + struct mdp3_dsi_cmd_intf_cfg dsi_cmd; +}; + +struct mdp3_intf { + struct mdp3_intf_cfg cfg; + int active; + int available; + int in_use; + int (*config)(struct mdp3_intf *intf, struct mdp3_intf_cfg *cfg); + int (*start)(struct mdp3_intf *intf); + int (*stop)(struct mdp3_intf *intf); +}; + +int mdp3_dma_init(struct mdp3_dma *dma); + +int mdp3_intf_init(struct mdp3_intf *intf); + +void mdp3_dma_callback_enable(struct mdp3_dma *dma, int type); + +void mdp3_dma_callback_disable(struct mdp3_dma *dma, int type); + +void mdp3_hist_intr_notify(struct mdp3_dma *dma); +#endif /* MDP3_DMA_H */ diff --git a/drivers/video/fbdev/msm/mdp3_hwio.h b/drivers/video/fbdev/msm/mdp3_hwio.h new file mode 100644 index 0000000000000000000000000000000000000000..2e3d358cd19c4122d315b96e6e2d6d682c6c8757 --- /dev/null +++ b/drivers/video/fbdev/msm/mdp3_hwio.h @@ -0,0 +1,361 @@ +/* Copyright (c) 2013-2014, 2016, 2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef MDP3_HWIO_H +#define MDP3_HWIO_H + +#include + +/*synchronization*/ +#define MDP3_REG_SYNC_CONFIG_0 0x0300 +#define MDP3_REG_SYNC_CONFIG_1 0x0304 +#define MDP3_REG_SYNC_CONFIG_2 0x0308 +#define MDP3_REG_SYNC_STATUS_0 0x030c +#define MDP3_REG_SYNC_STATUS_1 0x0310 +#define MDP3_REG_SYNC_STATUS_2 0x0314 +#define MDP3_REG_PRIMARY_VSYNC_OUT_CTRL 0x0318 +#define MDP3_REG_SECONDARY_VSYNC_OUT_CTRL 0x031c +#define MDP3_REG_EXTERNAL_VSYNC_OUT_CTRL 0x0320 +#define MDP3_REG_VSYNC_SEL 0x0324 +#define MDP3_REG_PRIMARY_VSYNC_INIT_VAL 0x0328 +#define MDP3_REG_SECONDARY_VSYNC_INIT_VAL 0x032c +#define MDP3_REG_EXTERNAL_VSYNC_INIT_VAL 0x0330 +#define MDP3_REG_AUTOREFRESH_CONFIG_P 0x034C +#define MDP3_REG_SYNC_THRESH_0 0x0200 +#define MDP3_REG_SYNC_THRESH_1 0x0204 +#define MDP3_REG_SYNC_THRESH_2 0x0208 +#define MDP3_REG_TEAR_CHECK_EN 0x020C +#define MDP3_REG_PRIMARY_START_P0S 0x0210 +#define MDP3_REG_SECONDARY_START_POS 0x0214 +#define MDP3_REG_EXTERNAL_START_POS 0x0218 + +/*interrupt*/ +#define MDP3_REG_INTR_ENABLE 0x0020 +#define MDP3_REG_INTR_STATUS 0x0024 +#define MDP3_REG_INTR_CLEAR 0x0028 + +#define MDP3_REG_PRIMARY_RD_PTR_IRQ 0x021C +#define MDP3_REG_SECONDARY_RD_PTR_IRQ 0x0220 + +/*operation control*/ +#define MDP3_REG_DMA_P_START 0x0044 +#define MDP3_REG_DMA_S_START 0x0048 +#define MDP3_REG_DMA_E_START 0x004c + +#define MDP3_REG_DISPLAY_STATUS 0x0038 + +#define MDP3_REG_HW_VERSION 0x0070 +#define MDP3_REG_SW_RESET 0x0074 +#define MDP3_REG_SEL_CLK_OR_HCLK_TEST_BUS 0x007C + +/*EBI*/ +#define MDP3_REG_EBI2_LCD0 0x003c +#define MDP3_REG_EBI2_LCD0_YSTRIDE 0x0050 + +/*clock control*/ +#define MDP3_REG_CGC_EN 0x0100 +#define MDP3_VBIF_REG_FORCE_EN 0x0004 + +/* QOS Remapper */ +#define MDP3_DMA_P_QOS_REMAPPER 0x90090 +#define MDP3_DMA_P_WATERMARK_0 0x90094 +#define MDP3_DMA_P_WATERMARK_1 0x90098 +#define MDP3_DMA_P_WATERMARK_2 0x9009C +#define MDP3_PANIC_ROBUST_CTRL 0x900A0 +#define MDP3_PANIC_LUT0 0x900A4 +#define MDP3_PANIC_LUT1 0x900A8 +#define MDP3_ROBUST_LUT 0x900AC + +/*danger safe*/ +#define MDP3_PANIC_ROBUST_CTRL 0x900A0 + +/*DMA_P*/ +#define MDP3_REG_DMA_P_CONFIG 0x90000 +#define MDP3_REG_DMA_P_SIZE 0x90004 +#define MDP3_REG_DMA_P_IBUF_ADDR 0x90008 +#define MDP3_REG_DMA_P_IBUF_Y_STRIDE 0x9000C +#define MDP3_REG_DMA_P_PROFILE_EN 0x90020 +#define MDP3_REG_DMA_P_OUT_XY 0x90010 +#define MDP3_REG_DMA_P_CURSOR_FORMAT 0x90040 +#define MDP3_REG_DMA_P_CURSOR_SIZE 0x90044 +#define MDP3_REG_DMA_P_CURSOR_BUF_ADDR 0x90048 +#define MDP3_REG_DMA_P_CURSOR_POS 0x9004c +#define MDP3_REG_DMA_P_CURSOR_BLEND_CONFIG 0x90060 +#define MDP3_REG_DMA_P_CURSOR_BLEND_PARAM 0x90064 +#define MDP3_REG_DMA_P_CURSOR_BLEND_TRANS_MASK 0x90068 +#define MDP3_REG_DMA_P_COLOR_CORRECT_CONFIG 0x90070 +#define MDP3_REG_DMA_P_CSC_BYPASS 0X93004 +#define MDP3_REG_DMA_P_CSC_MV1 0x93400 +#define MDP3_REG_DMA_P_CSC_MV2 0x93440 +#define MDP3_REG_DMA_P_CSC_PRE_BV1 0x93500 +#define MDP3_REG_DMA_P_CSC_PRE_BV2 0x93540 +#define MDP3_REG_DMA_P_CSC_POST_BV1 0x93580 +#define MDP3_REG_DMA_P_CSC_POST_BV2 0x935c0 +#define MDP3_REG_DMA_P_CSC_PRE_LV1 0x93600 +#define MDP3_REG_DMA_P_CSC_PRE_LV2 0x93640 +#define MDP3_REG_DMA_P_CSC_POST_LV1 0x93680 +#define MDP3_REG_DMA_P_CSC_POST_LV2 0x936c0 +#define MDP3_REG_DMA_P_CSC_LUT1 0x93800 +#define MDP3_REG_DMA_P_CSC_LUT2 0x93c00 +#define MDP3_REG_DMA_P_HIST_START 0x94000 +#define MDP3_REG_DMA_P_HIST_FRAME_CNT 0x94004 +#define MDP3_REG_DMA_P_HIST_BIT_MASK 0x94008 +#define MDP3_REG_DMA_P_HIST_RESET_SEQ_START 0x9400c +#define MDP3_REG_DMA_P_HIST_CONTROL 0x94010 +#define MDP3_REG_DMA_P_HIST_INTR_STATUS 0x94014 +#define MDP3_REG_DMA_P_HIST_INTR_CLEAR 0x94018 +#define MDP3_REG_DMA_P_HIST_INTR_ENABLE 0x9401c +#define MDP3_REG_DMA_P_HIST_STOP_REQ 0x94020 +#define MDP3_REG_DMA_P_HIST_CANCEL_REQ 0x94024 +#define MDP3_REG_DMA_P_HIST_EXTRA_INFO_0 0x94028 +#define MDP3_REG_DMA_P_HIST_EXTRA_INFO_1 0x9402c +#define MDP3_REG_DMA_P_HIST_R_DATA 0x94100 +#define MDP3_REG_DMA_P_HIST_G_DATA 0x94200 +#define MDP3_REG_DMA_P_HIST_B_DATA 0x94300 +#define MDP3_REG_DMA_P_FETCH_CFG 0x90074 +#define MDP3_REG_DMA_P_DCVS_CTRL 0x90080 +#define MDP3_REG_DMA_P_DCVS_STATUS 0x90084 + +/*DMA_S*/ +#define MDP3_REG_DMA_S_CONFIG 0xA0000 +#define MDP3_REG_DMA_S_SIZE 0xA0004 +#define MDP3_REG_DMA_S_IBUF_ADDR 0xA0008 +#define MDP3_REG_DMA_S_IBUF_Y_STRIDE 0xA000C +#define MDP3_REG_DMA_S_OUT_XY 0xA0010 + +/*DMA MASK*/ +#define MDP3_DMA_IBUF_FORMAT_MASK 0x06000000 +#define MDP3_DMA_PACK_PATTERN_MASK 0x00003f00 + +/*MISR*/ +#define MDP3_REG_MODE_CLK 0x000D0000 +#define MDP3_REG_MISR_RESET_CLK 0x000D0004 +#define MDP3_REG_EXPORT_MISR_CLK 0x000D0008 +#define MDP3_REG_MISR_CURR_VAL_CLK 0x000D000C +#define MDP3_REG_MODE_HCLK 0x000D0100 +#define MDP3_REG_MISR_RESET_HCLK 0x000D0104 +#define MDP3_REG_EXPORT_MISR_HCLK 0x000D0108 +#define MDP3_REG_MISR_CURR_VAL_HCLK 0x000D010C +#define MDP3_REG_MODE_DCLK 0x000D0200 +#define MDP3_REG_MISR_RESET_DCLK 0x000D0204 +#define MDP3_REG_EXPORT_MISR_DCLK 0x000D0208 +#define MDP3_REG_MISR_CURR_VAL_DCLK 0x000D020C +#define MDP3_REG_CAPTURED_DCLK 0x000D0210 +#define MDP3_REG_MISR_CAPT_VAL_DCLK 0x000D0214 +#define MDP3_REG_MODE_TVCLK 0x000D0300 +#define MDP3_REG_MISR_RESET_TVCLK 0x000D0304 +#define MDP3_REG_EXPORT_MISR_TVCLK 0x000D0308 +#define MDP3_REG_MISR_CURR_VAL_TVCLK 0x000D030C +#define MDP3_REG_CAPTURED_TVCLK 0x000D0310 +#define MDP3_REG_MISR_CAPT_VAL_TVCLK 0x000D0314 + +/* Select DSI operation type(CMD/VIDEO) */ +#define MDP3_REG_MODE_DSI_PCLK 0x000D0400 +#define MDP3_REG_MODE_DSI_PCLK_BLOCK_DSI_CMD 0x10 +#define MDP3_REG_MODE_DSI_PCLK_BLOCK_DSI_VIDEO1 0x20 +#define MDP3_REG_MODE_DSI_PCLK_BLOCK_DSI_VIDEO2 0x30 +/* RESET DSI MISR STATE */ +#define MDP3_REG_MISR_RESET_DSI_PCLK 0x000D0404 + +/* For reading MISR State(1) and driving data on test bus(0) */ +#define MDP3_REG_EXPORT_MISR_DSI_PCLK 0x000D0408 +/* Read MISR signature */ +#define MDP3_REG_MISR_CURR_VAL_DSI_PCLK 0x000D040C + +/* MISR status Bit0 (1) Capture Done */ +#define MDP3_REG_CAPTURED_DSI_PCLK 0x000D0410 +#define MDP3_REG_MISR_CAPT_VAL_DSI_PCLK 0x000D0414 +#define MDP3_REG_MISR_TESTBUS_CAPT_VAL 0x000D0600 + +/*interface*/ +#define MDP3_REG_LCDC_EN 0xE0000 +#define MDP3_REG_LCDC_HSYNC_CTL 0xE0004 +#define MDP3_REG_LCDC_VSYNC_PERIOD 0xE0008 +#define MDP3_REG_LCDC_VSYNC_PULSE_WIDTH 0xE000C +#define MDP3_REG_LCDC_DISPLAY_HCTL 0xE0010 +#define MDP3_REG_LCDC_DISPLAY_V_START 0xE0014 +#define MDP3_REG_LCDC_DISPLAY_V_END 0xE0018 +#define MDP3_REG_LCDC_ACTIVE_HCTL 0xE001C +#define MDP3_REG_LCDC_ACTIVE_V_START 0xE0020 +#define MDP3_REG_LCDC_ACTIVE_V_END 0xE0024 +#define MDP3_REG_LCDC_BORDER_COLOR 0xE0028 +#define MDP3_REG_LCDC_UNDERFLOW_CTL 0xE002C +#define MDP3_REG_LCDC_HSYNC_SKEW 0xE0030 +#define MDP3_REG_LCDC_TEST_CTL 0xE0034 +#define MDP3_REG_LCDC_CTL_POLARITY 0xE0038 +#define MDP3_REG_LCDC_TEST_COL_VAR1 0xE003C +#define MDP3_REG_LCDC_TEST_COL_VAR2 0xE0040 +#define MDP3_REG_LCDC_UFLOW_HIDING_CTL 0xE0044 +#define MDP3_REG_LCDC_LOST_PIXEL_CNT_VALUE 0xE0048 + +#define MDP3_REG_DSI_VIDEO_EN 0xF0000 +#define MDP3_REG_DSI_VIDEO_HSYNC_CTL 0xF0004 +#define MDP3_REG_DSI_VIDEO_VSYNC_PERIOD 0xF0008 +#define MDP3_REG_DSI_VIDEO_VSYNC_PULSE_WIDTH 0xF000C +#define MDP3_REG_DSI_VIDEO_DISPLAY_HCTL 0xF0010 +#define MDP3_REG_DSI_VIDEO_DISPLAY_V_START 0xF0014 +#define MDP3_REG_DSI_VIDEO_DISPLAY_V_END 0xF0018 +#define MDP3_REG_DSI_VIDEO_ACTIVE_HCTL 0xF001C +#define MDP3_REG_DSI_VIDEO_ACTIVE_V_START 0xF0020 +#define MDP3_REG_DSI_VIDEO_ACTIVE_V_END 0xF0024 +#define MDP3_REG_DSI_VIDEO_BORDER_COLOR 0xF0028 +#define MDP3_REG_DSI_VIDEO_UNDERFLOW_CTL 0xF002C +#define MDP3_REG_DSI_VIDEO_HSYNC_SKEW 0xF0030 +#define MDP3_REG_DSI_VIDEO_TEST_CTL 0xF0034 +#define MDP3_REG_DSI_VIDEO_CTL_POLARITY 0xF0038 +#define MDP3_REG_DSI_VIDEO_TEST_COL_VAR1 0xF003C +#define MDP3_REG_DSI_VIDEO_TEST_COL_VAR2 0xF0040 +#define MDP3_REG_DSI_VIDEO_UFLOW_HIDING_CTL 0xF0044 +#define MDP3_REG_DSI_VIDEO_LOST_PIXEL_CNT_VALUE 0xF0048 + +#define MDP3_REG_DSI_CMD_MODE_ID_MAP 0xF1000 +#define MDP3_REG_DSI_CMD_MODE_TRIGGER_EN 0xF1004 + +#define MDP3_PPP_CSC_PFMVn(n) (0x40400 + (4 * (n))) +#define MDP3_PPP_CSC_PRMVn(n) (0x40440 + (4 * (n))) +#define MDP3_PPP_CSC_PBVn(n) (0x40500 + (4 * (n))) +#define MDP3_PPP_CSC_PLVn(n) (0x40580 + (4 * (n))) + +#define MDP3_PPP_CSC_SFMVn(n) (0x40480 + (4 * (n))) +#define MDP3_PPP_CSC_SRMVn(n) (0x404C0 + (4 * (n))) +#define MDP3_PPP_CSC_SBVn(n) (0x40540 + (4 * (n))) +#define MDP3_PPP_CSC_SLVn(n) (0x405C0 + (4 * (n))) + +#define MDP3_PPP_SCALE_PHASEX_INIT 0x1013C +#define MDP3_PPP_SCALE_PHASEY_INIT 0x10140 +#define MDP3_PPP_SCALE_PHASEX_STEP 0x10144 +#define MDP3_PPP_SCALE_PHASEY_STEP 0x10148 + +#define MDP3_PPP_OP_MODE 0x10138 + +#define MDP3_PPP_PRE_LUT 0x40800 +#define MDP3_PPP_POST_LUT 0x40C00 +#define MDP3_PPP_LUTn(n) ((4 * (n))) + +#define MDP3_PPP_BG_EDGE_REP 0x101BC +#define MDP3_PPP_SRC_EDGE_REP 0x101B8 + +#define MDP3_PPP_STRIDE_MASK 0x3FFF +#define MDP3_PPP_STRIDE1_OFFSET 16 + +#define MDP3_PPP_XY_MASK 0x0FFF +#define MDP3_PPP_XY_OFFSET 16 + +#define MDP3_PPP_SRC_SIZE 0x10108 +#define MDP3_PPP_SRCP0_ADDR 0x1010C +#define MDP3_PPP_SRCP1_ADDR 0x10110 +#define MDP3_PPP_SRCP3_ADDR 0x10118 +#define MDP3_PPP_SRC_YSTRIDE1_ADDR 0x1011C +#define MDP3_PPP_SRC_YSTRIDE2_ADDR 0x10120 +#define MDP3_PPP_SRC_FORMAT 0x10124 +#define MDP3_PPP_SRC_UNPACK_PATTERN1 0x10128 +#define MDP3_PPP_SRC_UNPACK_PATTERN2 0x1012C + +#define MDP3_PPP_OUT_FORMAT 0x10150 +#define MDP3_PPP_OUT_PACK_PATTERN1 0x10154 +#define MDP3_PPP_OUT_PACK_PATTERN2 0x10158 +#define MDP3_PPP_OUT_SIZE 0x10164 +#define MDP3_PPP_OUTP0_ADDR 0x10168 +#define MDP3_PPP_OUTP1_ADDR 0x1016C +#define MDP3_PPP_OUTP3_ADDR 0x10174 +#define MDP3_PPP_OUT_YSTRIDE1_ADDR 0x10178 +#define MDP3_PPP_OUT_YSTRIDE2_ADDR 0x1017C +#define MDP3_PPP_OUT_XY 0x1019C + +#define MDP3_PPP_BGP0_ADDR 0x101C0 +#define MDP3_PPP_BGP1_ADDR 0x101C4 +#define MDP3_PPP_BGP3_ADDR 0x101C8 +#define MDP3_PPP_BG_YSTRIDE1_ADDR 0x101CC +#define MDP3_PPP_BG_YSTRIDE2_ADDR 0x101D0 +#define MDP3_PPP_BG_FORMAT 0x101D4 +#define MDP3_PPP_BG_UNPACK_PATTERN1 0x101D8 +#define MDP3_PPP_BG_UNPACK_PATTERN2 0x101DC + +#define MDP3_TFETCH_SOLID_FILL 0x20004 +#define MDP3_TFETCH_FILL_COLOR 0x20040 + +#define MDP3_PPP_BLEND_PARAM 0x1014C + +#define MDP3_PPP_BLEND_BG_ALPHA_SEL 0x70010 + +#define MDP3_PPP_ACTIVE BIT(0) + +/*interrupt mask*/ + +#define MDP3_INTR_DP0_ROI_DONE_BIT BIT(0) +#define MDP3_INTR_DP1_ROI_DONE_BIT BIT(1) +#define MDP3_INTR_DMA_S_DONE_BIT BIT(2) +#define MDP3_INTR_DMA_E_DONE_BIT BIT(3) +#define MDP3_INTR_DP0_TERMINAL_FRAME_DONE_BIT BIT(4) +#define MDP3_INTR_DP1_TERMINAL_FRAME_DONE_BIT BIT(5) +#define MDP3_INTR_DMA_TV_DONE_BIT BIT(6) +#define MDP3_INTR_TV_ENCODER_UNDER_RUN_BIT BIT(7) +#define MDP3_INTR_SYNC_PRIMARY_LINE_BIT BIT(8) +#define MDP3_INTR_SYNC_SECONDARY_LINE_BIT BIT(9) +#define MDP3_INTR_SYNC_EXTERNAL_LINE_BIT BIT(10) +#define MDP3_INTR_DP0_FETCH_DONE_BIT BIT(11) +#define MDP3_INTR_DP1_FETCH_DONE_BIT BIT(12) +#define MDP3_INTR_TV_OUT_FRAME_START_BIT BIT(13) +#define MDP3_INTR_DMA_P_DONE_BIT BIT(14) +#define MDP3_INTR_LCDC_START_OF_FRAME_BIT BIT(15) +#define MDP3_INTR_LCDC_UNDERFLOW_BIT BIT(16) +#define MDP3_INTR_DMA_P_LINE_BIT BIT(17) +#define MDP3_INTR_DMA_S_LINE_BIT BIT(18) +#define MDP3_INTR_DMA_E_LINE_BIT BIT(19) +#define MDP3_INTR_DMA_P_HISTO_BIT BIT(20) +#define MDP3_INTR_DTV_OUT_DONE_BIT BIT(21) +#define MDP3_INTR_DTV_OUT_START_OF_FRAME_BIT BIT(22) +#define MDP3_INTR_DTV_OUT_UNDERFLOW_BIT BIT(23) +#define MDP3_INTR_DTV_OUT_LINE_BIT BIT(24) +#define MDP3_INTR_DMA_P_AUTO_FREFRESH_START_BIT BIT(25) +#define MDP3_INTR_DMA_S_AUTO_FREFRESH_START_BIT BIT(26) +#define MDP3_INTR_QPIC_EOF_ENABLE_BIT BIT(27) + +enum { + MDP3_INTR_DP0_ROI_DONE, + MDP3_INTR_DP1_ROI_DONE, + MDP3_INTR_DMA_S_DONE, + MDP3_INTR_DMA_E_DONE, + MDP3_INTR_DP0_TERMINAL_FRAME_DONE, + MDP3_INTR_DP1_TERMINAL_FRAME_DONE, + MDP3_INTR_DMA_TV_DONE, + MDP3_INTR_TV_ENCODER_UNDER_RUN, + MDP3_INTR_SYNC_PRIMARY_LINE, + MDP3_INTR_SYNC_SECONDARY_LINE, + MDP3_INTR_SYNC_EXTERNAL_LINE, + MDP3_INTR_DP0_FETCH_DONE, + MDP3_INTR_DP1_FETCH_DONE, + MDP3_INTR_TV_OUT_FRAME_START, + MDP3_INTR_DMA_P_DONE, + MDP3_INTR_LCDC_START_OF_FRAME, + MDP3_INTR_LCDC_UNDERFLOW, + MDP3_INTR_DMA_P_LINE, + MDP3_INTR_DMA_S_LINE, + MDP3_INTR_DMA_E_LINE, + MDP3_INTR_DMA_P_HISTO, + MDP3_INTR_DTV_OUT_DONE, + MDP3_INTR_DTV_OUT_START_OF_FRAME, + MDP3_INTR_DTV_OUT_UNDERFLOW, + MDP3_INTR_DTV_OUT_LINE, + MDP3_INTR_DMA_P_AUTO_FREFRESH_START, + MDP3_INTR_DMA_S_AUTO_FREFRESH_START, + MDP3_INTR_QPIC_EOF_ENABLE, +}; + +#define MDP3_DMA_P_HIST_INTR_RESET_DONE_BIT BIT(0) +#define MDP3_DMA_P_HIST_INTR_HIST_DONE_BIT BIT(1) +#define MDP3_PPP_DONE MDP3_INTR_DP0_ROI_DONE + +#define MDP3_DMA_P_BUSY_BIT BIT(6) + +#endif /* MDP3_HWIO_H */ diff --git a/drivers/video/fbdev/msm/mdp3_layer.c b/drivers/video/fbdev/msm/mdp3_layer.c new file mode 100644 index 0000000000000000000000000000000000000000..0078466dc2a4b326b98ffa59724b7b989aa09b12 --- /dev/null +++ b/drivers/video/fbdev/msm/mdp3_layer.c @@ -0,0 +1,348 @@ +/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#define pr_fmt(fmt) "%s: " fmt, __func__ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include "mdp3_ctrl.h" +#include "mdp3.h" +#include "mdp3_ppp.h" +#include "mdp3_ctrl.h" +#include "mdss_fb.h" +#include "mdss_sync.h" + +enum { + MDP3_RELEASE_FENCE = 0, + MDP3_RETIRE_FENCE, +}; + +static struct mdss_fence *__mdp3_create_fence(struct msm_fb_data_type *mfd, + struct msm_sync_pt_data *sync_pt_data, u32 fence_type, + int *fence_fd, int value) +{ + struct mdss_fence *sync_fence = NULL; + char fence_name[32]; + struct mdp3_session_data *mdp3_session; + + mdp3_session = (struct mdp3_session_data *)mfd->mdp.private1; + + if (fence_type == MDP3_RETIRE_FENCE) + snprintf(fence_name, sizeof(fence_name), "fb%d_retire", + mfd->index); + else + snprintf(fence_name, sizeof(fence_name), "fb%d_release", + mfd->index); + + if ((fence_type == MDP3_RETIRE_FENCE) && + (mfd->panel.type == MIPI_CMD_PANEL)) { + if (sync_pt_data->timeline_retire) { + value = sync_pt_data->timeline_retire->value + 1 + + mdp3_session->retire_cnt++; + sync_fence = mdss_fb_sync_get_fence( + sync_pt_data->timeline_retire, + fence_name, value); + + } else { + return ERR_PTR(-EPERM); + } + } else { + if (fence_type == MDP3_RETIRE_FENCE) + sync_fence = mdss_fb_sync_get_fence( + sync_pt_data->timeline_retire, + fence_name, value); + else + sync_fence = mdss_fb_sync_get_fence( + sync_pt_data->timeline, + fence_name, value); + } + + if (IS_ERR_OR_NULL(sync_fence)) { + pr_err("%s: unable to retrieve release fence\n", fence_name); + goto end; + } + + *fence_fd = mdss_get_sync_fence_fd(sync_fence); + if (*fence_fd < 0) { + pr_err("%s: get_unused_fd_flags failed error:0x%x\n", + fence_name, *fence_fd); + mdss_put_sync_fence(sync_fence); + sync_fence = NULL; + goto end; + } + pr_debug("%s:val=%d\n", mdss_get_sync_fence_name(sync_fence), value); +end: + return sync_fence; +} + +/* + * __handle_buffer_fences() - copy sync fences and return release + * fence to caller. + * + * This function copies all input sync fences to acquire fence array and + * returns release fences to caller. It acts like buff_sync ioctl. + */ +static int __mdp3_handle_buffer_fences(struct msm_fb_data_type *mfd, + struct mdp_layer_commit_v1 *commit, struct mdp_input_layer *layer_list) +{ + struct mdss_fence *fence, *release_fence, *retire_fence; + struct msm_sync_pt_data *sync_pt_data = NULL; + struct mdp_input_layer *layer; + int value; + + u32 acq_fen_count, i, ret = 0; + u32 layer_count = commit->input_layer_cnt; + + sync_pt_data = &mfd->mdp_sync_pt_data; + if (!sync_pt_data) { + pr_err("sync point data are NULL\n"); + return -EINVAL; + } + + i = mdss_fb_wait_for_fence(sync_pt_data); + if (i > 0) + pr_warn("%s: waited on %d active fences\n", + sync_pt_data->fence_name, i); + + mutex_lock(&sync_pt_data->sync_mutex); + for (i = 0, acq_fen_count = 0; i < layer_count; i++) { + layer = &layer_list[i]; + + if (layer->buffer.fence < 0) + continue; + + fence = mdss_get_fd_sync_fence(layer->buffer.fence); + if (!fence) { + pr_err("%s: sync fence get failed! fd=%d\n", + sync_pt_data->fence_name, layer->buffer.fence); + ret = -EINVAL; + goto sync_fence_err; + } else { + sync_pt_data->acq_fen[acq_fen_count++] = fence; + } + } + + sync_pt_data->acq_fen_cnt = acq_fen_count; + if (ret) + goto sync_fence_err; + + value = sync_pt_data->threshold + + atomic_read(&sync_pt_data->commit_cnt); + + release_fence = __mdp3_create_fence(mfd, sync_pt_data, + MDP3_RELEASE_FENCE, &commit->release_fence, value); + if (IS_ERR_OR_NULL(release_fence)) { + pr_err("unable to retrieve release fence\n"); + ret = PTR_ERR(release_fence); + goto release_fence_err; + } + + retire_fence = __mdp3_create_fence(mfd, sync_pt_data, + MDP3_RETIRE_FENCE, &commit->retire_fence, value); + if (IS_ERR_OR_NULL(retire_fence)) { + pr_err("unable to retrieve retire fence\n"); + ret = PTR_ERR(retire_fence); + goto retire_fence_err; + } + + mutex_unlock(&sync_pt_data->sync_mutex); + return ret; + +retire_fence_err: + put_unused_fd(commit->release_fence); + mdss_put_sync_fence(release_fence); +release_fence_err: + commit->retire_fence = -1; + commit->release_fence = -1; +sync_fence_err: + for (i = 0; i < sync_pt_data->acq_fen_cnt; i++) + mdss_put_sync_fence(sync_pt_data->acq_fen[i]); + sync_pt_data->acq_fen_cnt = 0; + + mutex_unlock(&sync_pt_data->sync_mutex); + + return ret; +} + +/* + * __map_layer_buffer() - map input layer buffer + * + */ +static int __mdp3_map_layer_buffer(struct msm_fb_data_type *mfd, + struct mdp_input_layer *input_layer) +{ + struct mdp3_session_data *mdp3_session = mfd->mdp.private1; + struct mdp3_dma *dma = mdp3_session->dma; + struct mdp_input_layer *layer = NULL; + struct mdp_layer_buffer *buffer; + struct msmfb_data img; + bool is_panel_type_cmd = false; + struct mdp3_img_data data; + int rc = 0; + + layer = &input_layer[0]; + buffer = &layer->buffer; + + /* current implementation only supports one plane mapping */ + if (buffer->planes[0].fd < 0) { + pr_err("invalid file descriptor for layer buffer\n"); + goto err; + } + + memset(&img, 0, sizeof(img)); + img.memory_id = buffer->planes[0].fd; + img.offset = buffer->planes[0].offset; + + memset(&data, 0, sizeof(struct mdp3_img_data)); + + if (mfd->panel.type == MIPI_CMD_PANEL) + is_panel_type_cmd = true; + if (is_panel_type_cmd) { + rc = mdp3_iommu_enable(MDP3_CLIENT_DMA_P); + if (rc) { + pr_err("fail to enable iommu\n"); + return rc; + } + } + + rc = mdp3_get_img(&img, &data, MDP3_CLIENT_DMA_P); + if (rc) { + pr_err("fail to get overlay buffer\n"); + goto err; + } + + if (data.len < dma->source_config.stride * dma->source_config.height) { + pr_err("buf size(0x%lx) is smaller than dma config(0x%x)\n", + data.len, (dma->source_config.stride * + dma->source_config.height)); + mdp3_put_img(&data, MDP3_CLIENT_DMA_P); + rc = -EINVAL; + goto err; + } + + rc = mdp3_bufq_push(&mdp3_session->bufq_in, &data); + if (rc) { + pr_err("fail to queue the overlay buffer, buffer drop\n"); + mdp3_put_img(&data, MDP3_CLIENT_DMA_P); + goto err; + } + rc = 0; +err: + if (is_panel_type_cmd) + mdp3_iommu_disable(MDP3_CLIENT_DMA_P); + return rc; +} + +int mdp3_layer_pre_commit(struct msm_fb_data_type *mfd, + struct file *file, struct mdp_layer_commit_v1 *commit) +{ + int ret; + struct mdp_input_layer *layer, *layer_list; + struct mdp3_session_data *mdp3_session; + struct mdp3_dma *dma; + int layer_count = commit->input_layer_cnt; + int stride, format; + + /* Handle NULL commit */ + if (!layer_count) { + pr_debug("Handle NULL commit\n"); + return 0; + } + + mdp3_session = mfd->mdp.private1; + dma = mdp3_session->dma; + + mutex_lock(&mdp3_session->lock); + + mdp3_bufq_deinit(&mdp3_session->bufq_in); + + layer_list = commit->input_layers; + layer = &layer_list[0]; + + stride = layer->buffer.width * ppp_bpp(layer->buffer.format); + format = mdp3_ctrl_get_source_format(layer->buffer.format); + pr_debug("stride:%d layer_width:%d", stride, layer->buffer.width); + + if ((dma->source_config.format != format) || + (dma->source_config.stride != stride)) { + dma->source_config.format = format; + dma->source_config.stride = stride; + dma->output_config.pack_pattern = + mdp3_ctrl_get_pack_pattern(layer->buffer.format); + dma->update_src_cfg = true; + } + mdp3_session->overlay.id = 1; + + ret = __mdp3_handle_buffer_fences(mfd, commit, layer_list); + if (ret) { + pr_err("Failed to handle buffer fences\n"); + mutex_unlock(&mdp3_session->lock); + return ret; + } + + ret = __mdp3_map_layer_buffer(mfd, layer); + if (ret) { + pr_err("Failed to map buffer\n"); + mutex_unlock(&mdp3_session->lock); + return ret; + } + + pr_debug("mdp3 precommit ret = %d\n", ret); + mutex_unlock(&mdp3_session->lock); + return ret; +} + +/* + * mdp3_layer_atomic_validate() - validate input layers + * @mfd: Framebuffer data structure for display + * @commit: Commit version-1 structure for display + * + * This function validates only input layers received from client. It + * does perform any validation for mdp_output_layer defined for writeback + * display. + */ +int mdp3_layer_atomic_validate(struct msm_fb_data_type *mfd, + struct file *file, struct mdp_layer_commit_v1 *commit) +{ + struct mdp3_session_data *mdp3_session; + + if (!mfd || !commit) { + pr_err("invalid input params\n"); + return -EINVAL; + } + + if (mdss_fb_is_power_off(mfd)) { + pr_err("display interface is in off state fb:%d\n", + mfd->index); + return -EPERM; + } + + mdp3_session = mfd->mdp.private1; + + if (mdp3_session->in_splash_screen) { + mdp3_ctrl_reset(mfd); + mdp3_session->in_splash_screen = 0; + } + + return 0; +} + diff --git a/drivers/video/fbdev/msm/mdp3_ppp.c b/drivers/video/fbdev/msm/mdp3_ppp.c new file mode 100644 index 0000000000000000000000000000000000000000..34f08230aa51395446ef9e6ae280d1ba73c4e740 --- /dev/null +++ b/drivers/video/fbdev/msm/mdp3_ppp.c @@ -0,0 +1,1721 @@ +/* Copyright (c) 2007, 2013-2014, 2016-2018, The Linux Foundation. All rights reserved. + * Copyright (C) 2007 Google Incorporated + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "linux/proc_fs.h" +#include + +#include "mdss_fb.h" +#include "mdp3_ppp.h" +#include "mdp3_hwio.h" +#include "mdp3.h" +#include "mdss_debug.h" +#include "mdss_sync.h" + +#define MDP_IS_IMGTYPE_BAD(x) ((x) >= MDP_IMGTYPE_LIMIT) +#define MDP_RELEASE_BW_TIMEOUT 50 + +#define MDP_PPP_MAX_BPP 4 +#define MDP_PPP_DYNAMIC_FACTOR 3 +#define MDP_PPP_MAX_READ_WRITE 3 +#define MDP_PPP_MAX_WIDTH 0xFFF +#define ENABLE_SOLID_FILL 0x2 +#define DISABLE_SOLID_FILL 0x0 +#define BLEND_LATENCY 3 +#define CSC_LATENCY 1 + +#define YUV_BW_FUDGE_NUM 10 +#define YUV_BW_FUDGE_DEN 10 + +struct ppp_resource ppp_res; + +static const bool valid_fmt[MDP_IMGTYPE_LIMIT] = { + [MDP_RGB_565] = true, + [MDP_BGR_565] = true, + [MDP_RGB_888] = true, + [MDP_BGR_888] = true, + [MDP_BGRA_8888] = true, + [MDP_RGBA_8888] = true, + [MDP_ARGB_8888] = true, + [MDP_XRGB_8888] = true, + [MDP_RGBX_8888] = true, + [MDP_Y_CRCB_H2V2] = true, + [MDP_Y_CBCR_H2V2] = true, + [MDP_Y_CBCR_H2V2_ADRENO] = true, + [MDP_Y_CBCR_H2V2_VENUS] = true, + [MDP_YCRYCB_H2V1] = true, + [MDP_Y_CBCR_H2V1] = true, + [MDP_Y_CRCB_H2V1] = true, + [MDP_BGRX_8888] = true, +}; + +#define MAX_LIST_WINDOW 16 +#define MDP3_PPP_MAX_LIST_REQ 8 + +struct blit_req_list { + int count; + struct mdp_blit_req req_list[MAX_LIST_WINDOW]; + struct mdp3_img_data src_data[MAX_LIST_WINDOW]; + struct mdp3_img_data dst_data[MAX_LIST_WINDOW]; + struct mdss_fence *acq_fen[MDP_MAX_FENCE_FD]; + u32 acq_fen_cnt; + int cur_rel_fen_fd; + struct sync_pt *cur_rel_sync_pt; + struct mdss_fence *cur_rel_fence; + struct mdss_fence *last_rel_fence; +}; + +struct blit_req_queue { + struct blit_req_list req[MDP3_PPP_MAX_LIST_REQ]; + int count; + int push_idx; + int pop_idx; +}; + +struct ppp_status { + bool wait_for_pop; + struct completion ppp_comp; + struct completion pop_q_comp; + struct mutex req_mutex; /* Protect request queue */ + struct mutex config_ppp_mutex; /* Only one client configure register */ + struct msm_fb_data_type *mfd; + + struct kthread_work blit_work; + struct kthread_worker kworker; + struct task_struct *blit_thread; + struct blit_req_queue req_q; + + struct mdss_timeline *timeline; + + int timeline_value; + + struct timer_list free_bw_timer; + struct work_struct free_bw_work; + bool bw_update; + bool bw_on; + u32 mdp_clk; +}; + +static struct ppp_status *ppp_stat; +static bool is_blit_optimization_possible(struct blit_req_list *req, int indx); + +static inline u64 fudge_factor(u64 val, u32 numer, u32 denom) +{ + u64 result = (val * (u64)numer); + + do_div(result, denom); + return result; +} + +int ppp_get_bpp(uint32_t format, uint32_t fb_format) +{ + int bpp = -EINVAL; + + if (format == MDP_FB_FORMAT) + format = fb_format; + + bpp = ppp_bpp(format); + if (bpp <= 0) + pr_err("%s incorrect format %d\n", __func__, format); + return bpp; +} + +int mdp3_ppp_get_img(struct mdp_img *img, struct mdp_blit_req *req, + struct mdp3_img_data *data) +{ + struct msmfb_data fb_data; + uint32_t stride; + int bpp = ppp_bpp(img->format); + + if (bpp <= 0) { + pr_err("%s incorrect format %d\n", __func__, img->format); + return -EINVAL; + } + + if (img->width > MDP_PPP_MAX_WIDTH) { + pr_err("%s incorrect width %d\n", __func__, img->width); + return -EINVAL; + } + + fb_data.flags = img->priv; + fb_data.memory_id = img->memory_id; + fb_data.offset = 0; + + stride = img->width * bpp; + data->padding = 16 * stride; + + return mdp3_get_img(&fb_data, data, MDP3_CLIENT_PPP); +} + +/* Check format */ +int mdp3_ppp_verify_fmt(struct mdp_blit_req *req) +{ + if (MDP_IS_IMGTYPE_BAD(req->src.format) || + MDP_IS_IMGTYPE_BAD(req->dst.format)) { + pr_err("%s: Color format out of range\n", __func__); + return -EINVAL; + } + + if (!valid_fmt[req->src.format] || + !valid_fmt[req->dst.format]) { + pr_err("%s: Color format not supported\n", __func__); + return -EINVAL; + } + return 0; +} + +/* Check resolution */ +int mdp3_ppp_verify_res(struct mdp_blit_req *req) +{ + if ((req->src.width == 0) || (req->src.height == 0) || + (req->src_rect.w == 0) || (req->src_rect.h == 0) || + (req->dst.width == 0) || (req->dst.height == 0) || + (req->dst_rect.w == 0) || (req->dst_rect.h == 0)) { + pr_err("%s: Height/width can't be 0\n", __func__); + return -EINVAL; + } + + if (((req->src_rect.x + req->src_rect.w) > req->src.width) || + ((req->src_rect.y + req->src_rect.h) > req->src.height)) { + pr_err("%s: src roi larger than boundary\n", __func__); + return -EINVAL; + } + + if (((req->dst_rect.x + req->dst_rect.w) > req->dst.width) || + ((req->dst_rect.y + req->dst_rect.h) > req->dst.height)) { + pr_err("%s: dst roi larger than boundary\n", __func__); + return -EINVAL; + } + return 0; +} + +/* scaling range check */ +int mdp3_ppp_verify_scale(struct mdp_blit_req *req) +{ + u32 src_width, src_height, dst_width, dst_height; + + src_width = req->src_rect.w; + src_height = req->src_rect.h; + + if (req->flags & MDP_ROT_90) { + dst_width = req->dst_rect.h; + dst_height = req->dst_rect.w; + } else { + dst_width = req->dst_rect.w; + dst_height = req->dst_rect.h; + } + + switch (req->dst.format) { + case MDP_Y_CRCB_H2V2: + case MDP_Y_CBCR_H2V2: + src_width = (src_width / 2) * 2; + src_height = (src_height / 2) * 2; + dst_width = (dst_width / 2) * 2; + dst_height = (dst_height / 2) * 2; + break; + + case MDP_Y_CRCB_H2V1: + case MDP_Y_CBCR_H2V1: + case MDP_YCRYCB_H2V1: + src_width = (src_width / 2) * 2; + dst_width = (dst_width / 2) * 2; + break; + + default: + break; + } + + if (((MDP_SCALE_Q_FACTOR * dst_width) / src_width > + MDP_MAX_X_SCALE_FACTOR) + || ((MDP_SCALE_Q_FACTOR * dst_width) / src_width < + MDP_MIN_X_SCALE_FACTOR)) { + pr_err("%s: x req scale factor beyond capability\n", __func__); + return -EINVAL; + } + + if (((MDP_SCALE_Q_FACTOR * dst_height) / src_height > + MDP_MAX_Y_SCALE_FACTOR) + || ((MDP_SCALE_Q_FACTOR * dst_height) / src_height < + MDP_MIN_Y_SCALE_FACTOR)) { + pr_err("%s: y req scale factor beyond capability\n", __func__); + return -EINVAL; + } + return 0; +} + +/* operation check */ +int mdp3_ppp_verify_op(struct mdp_blit_req *req) +{ + /* + * MDP_DEINTERLACE & MDP_SHARPENING Flags are not valid for MDP3 + * so using them together for MDP_SMART_BLIT. + */ + if ((req->flags & MDP_SMART_BLIT) == MDP_SMART_BLIT) + return 0; + if (req->flags & MDP_DEINTERLACE) { + pr_err("\n%s(): deinterlace not supported", __func__); + return -EINVAL; + } + + if (req->flags & MDP_SHARPENING) { + pr_err("\n%s(): sharpening not supported", __func__); + return -EINVAL; + } + return 0; +} + +int mdp3_ppp_verify_req(struct mdp_blit_req *req) +{ + int rc; + + if (req == NULL) { + pr_err("%s: req == null\n", __func__); + return -EINVAL; + } + + rc = mdp3_ppp_verify_fmt(req); + rc |= mdp3_ppp_verify_res(req); + rc |= mdp3_ppp_verify_scale(req); + rc |= mdp3_ppp_verify_op(req); + + return rc; +} + +int mdp3_ppp_pipe_wait(void) +{ + int ret = 1; + + /* + * wait 200 ms for ppp operation to complete before declaring + * the MDP hung + */ + ret = wait_for_completion_timeout( + &ppp_stat->ppp_comp, msecs_to_jiffies(200)); + if (!ret) + pr_err("%s: Timed out waiting for the MDP.\n", + __func__); + + return ret; +} + +uint32_t mdp3_calc_tpval(struct ppp_img_desc *img, uint32_t old_tp) +{ + uint32_t tpVal; + uint8_t plane_tp; + + tpVal = 0; + if ((img->color_fmt == MDP_RGB_565) + || (img->color_fmt == MDP_BGR_565)) { + /* transparent color conversion into 24 bpp */ + plane_tp = (uint8_t) ((old_tp & 0xF800) >> 11); + tpVal |= ((plane_tp << 3) | ((plane_tp & 0x1C) >> 2)) << 16; + plane_tp = (uint8_t) (old_tp & 0x1F); + tpVal |= ((plane_tp << 3) | ((plane_tp & 0x1C) >> 2)) << 8; + + plane_tp = (uint8_t) ((old_tp & 0x7E0) >> 5); + tpVal |= ((plane_tp << 2) | ((plane_tp & 0x30) >> 4)); + } else { + /* 24bit RGB to RBG conversion */ + tpVal = (old_tp & 0xFF00) >> 8; + tpVal |= (old_tp & 0xFF) << 8; + tpVal |= (old_tp & 0xFF0000); + } + + return tpVal; +} + +static void mdp3_ppp_intr_handler(int type, void *arg) +{ + complete(&ppp_stat->ppp_comp); +} + +static int mdp3_ppp_callback_setup(void) +{ + int rc; + struct mdp3_intr_cb ppp_done_cb = { + .cb = mdp3_ppp_intr_handler, + .data = NULL, + }; + + rc = mdp3_set_intr_callback(MDP3_PPP_DONE, &ppp_done_cb); + return rc; +} + +void mdp3_ppp_kickoff(void) +{ + init_completion(&ppp_stat->ppp_comp); + mdp3_irq_enable(MDP3_PPP_DONE); + ppp_enable(); + ATRACE_BEGIN("mdp3_wait_for_ppp_comp"); + mdp3_ppp_pipe_wait(); + ATRACE_END("mdp3_wait_for_ppp_comp"); + mdp3_irq_disable(MDP3_PPP_DONE); +} + +struct bpp_info { + int bpp_num; + int bpp_den; + int bpp_pln; +}; + +int mdp3_get_bpp_info(int format, struct bpp_info *bpp) +{ + int rc = 0; + + switch (format) { + case MDP_RGB_565: + case MDP_BGR_565: + bpp->bpp_num = 2; + bpp->bpp_den = 1; + bpp->bpp_pln = 2; + break; + case MDP_RGB_888: + case MDP_BGR_888: + bpp->bpp_num = 3; + bpp->bpp_den = 1; + bpp->bpp_pln = 3; + break; + case MDP_BGRA_8888: + case MDP_RGBA_8888: + case MDP_ARGB_8888: + case MDP_XRGB_8888: + case MDP_RGBX_8888: + case MDP_BGRX_8888: + bpp->bpp_num = 4; + bpp->bpp_den = 1; + bpp->bpp_pln = 4; + break; + case MDP_Y_CRCB_H2V2: + case MDP_Y_CBCR_H2V2: + case MDP_Y_CBCR_H2V2_ADRENO: + case MDP_Y_CBCR_H2V2_VENUS: + bpp->bpp_num = 3; + bpp->bpp_den = 2; + bpp->bpp_pln = 1; + break; + case MDP_Y_CBCR_H2V1: + case MDP_Y_CRCB_H2V1: + bpp->bpp_num = 2; + bpp->bpp_den = 1; + bpp->bpp_pln = 1; + break; + case MDP_YCRYCB_H2V1: + bpp->bpp_num = 2; + bpp->bpp_den = 1; + bpp->bpp_pln = 2; + break; + default: + rc = -EINVAL; + } + return rc; +} + +bool mdp3_is_blend(struct mdp_blit_req *req) +{ + if ((req->transp_mask != MDP_TRANSP_NOP) || + (req->alpha < MDP_ALPHA_NOP) || + (req->src.format == MDP_ARGB_8888) || + (req->src.format == MDP_BGRA_8888) || + (req->src.format == MDP_RGBA_8888)) + return true; + return false; +} + +bool mdp3_is_scale(struct mdp_blit_req *req) +{ + if (req->flags & MDP_ROT_90) { + if (req->src_rect.w != req->dst_rect.h || + req->src_rect.h != req->dst_rect.w) + return true; + } else { + if (req->src_rect.h != req->dst_rect.h || + req->src_rect.w != req->dst_rect.w) + return true; + } + return false; +} + +u32 mdp3_clk_calc(struct msm_fb_data_type *mfd, + struct blit_req_list *lreq, u32 fps) +{ + int i, lcount = 0; + struct mdp_blit_req *req; + u64 mdp_clk_rate = 0; + u32 scale_x = 0, scale_y = 0, scale = 0; + u32 blend_l, csc_l; + + lcount = lreq->count; + + blend_l = 100 * BLEND_LATENCY; + csc_l = 100 * CSC_LATENCY; + + for (i = 0; i < lcount; i++) { + req = &(lreq->req_list[i]); + + if (req->flags & MDP_SMART_BLIT) + continue; + + if (mdp3_is_scale(req)) { + if (req->flags & MDP_ROT_90) { + scale_x = 100 * req->src_rect.h / + req->dst_rect.w; + scale_y = 100 * req->src_rect.w / + req->dst_rect.h; + } else { + scale_x = 100 * req->src_rect.w / + req->dst_rect.w; + scale_y = 100 * req->src_rect.h / + req->dst_rect.h; + } + scale = max(scale_x, scale_y); + } + scale = scale >= 100 ? scale : 100; + if (mdp3_is_blend(req)) + scale = max(scale, blend_l); + + if (!check_if_rgb(req->src.format)) + scale = max(scale, csc_l); + + mdp_clk_rate += (req->src_rect.w * req->src_rect.h * + scale / 100) * fps; + } + mdp_clk_rate += (ppp_res.solid_fill_pixel * fps); + mdp_clk_rate = fudge_factor(mdp_clk_rate, + CLK_FUDGE_NUM, CLK_FUDGE_DEN); + pr_debug("mdp_clk_rate for ppp = %llu\n", mdp_clk_rate); + mdp_clk_rate = mdp3_clk_round_off(mdp_clk_rate); + + return mdp_clk_rate; +} + +u64 mdp3_adjust_scale_factor(struct mdp_blit_req *req, u32 bw_req, int bpp) +{ + int src_h, src_w; + int dst_h, dst_w; + + src_h = req->src_rect.h; + src_w = req->src_rect.w; + + dst_h = req->dst_rect.h; + dst_w = req->dst_rect.w; + + if ((!(req->flags & MDP_ROT_90) && src_h == dst_h && + src_w == dst_w) || ((req->flags & MDP_ROT_90) && + src_h == dst_w && src_w == dst_h)) + return bw_req; + + bw_req = (bw_req + (bw_req * dst_h) / (4 * src_h)); + bw_req = (bw_req + (bw_req * dst_w) / (4 * src_w) + + (bw_req * dst_w) / (bpp * src_w)); + return bw_req; +} + +int mdp3_calc_ppp_res(struct msm_fb_data_type *mfd, + struct blit_req_list *lreq) +{ + struct mdss_panel_info *panel_info = mfd->panel_info; + int i, lcount = 0; + struct mdp_blit_req *req; + struct bpp_info bpp; + u64 old_solid_fill_pixel = 0; + u64 new_solid_fill_pixel = 0; + u64 src_read_bw = 0; + u32 bg_read_bw = 0; + u32 dst_write_bw = 0; + u64 honest_ppp_ab = 0; + u32 fps = 0; + int smart_blit_fg_indx = -1; + u32 smart_blit_bg_read_bw = 0; + + ATRACE_BEGIN(__func__); + lcount = lreq->count; + if (lcount == 0) { + pr_err("Blit with request count 0, continue to recover!!!\n"); + ATRACE_END(__func__); + return 0; + } + if (lreq->req_list[0].flags & MDP_SOLID_FILL) { + req = &(lreq->req_list[0]); + mdp3_get_bpp_info(req->dst.format, &bpp); + old_solid_fill_pixel = ppp_res.solid_fill_pixel; + new_solid_fill_pixel = req->dst_rect.w * req->dst_rect.h; + ppp_res.solid_fill_pixel += new_solid_fill_pixel; + ppp_res.solid_fill_byte += req->dst_rect.w * req->dst_rect.h * + bpp.bpp_num / bpp.bpp_den; + if ((old_solid_fill_pixel >= new_solid_fill_pixel) || + (mdp3_res->solid_fill_vote_en)) { + pr_debug("Last fill pixels are higher or fill_en %d\n", + mdp3_res->solid_fill_vote_en); + ATRACE_END(__func__); + return 0; + } + } + + for (i = 0; i < lcount; i++) { + /* Set Smart blit flag before BW calculation */ + is_blit_optimization_possible(lreq, i); + req = &(lreq->req_list[i]); + + if (req->fps > 0 && req->fps <= panel_info->mipi.frame_rate) { + if (fps == 0) + fps = req->fps; + else + fps = panel_info->mipi.frame_rate; + } + + mdp3_get_bpp_info(req->src.format, &bpp); + if (lreq->req_list[i].flags & MDP_SMART_BLIT) { + /* + * Flag for smart blit FG layer index + * If blit request at index "n" has + * MDP_SMART_BLIT flag set then it will be used as BG + * layer in smart blit and request at index "n+1" + * will be used as FG layer + */ + smart_blit_fg_indx = i + 1; + bg_read_bw = req->src_rect.w * req->src_rect.h * + bpp.bpp_num / bpp.bpp_den; + bg_read_bw = mdp3_adjust_scale_factor(req, + bg_read_bw, bpp.bpp_pln); + /* Cache read BW of smart blit BG layer */ + smart_blit_bg_read_bw = bg_read_bw; + } else { + src_read_bw = req->src_rect.w * req->src_rect.h * + bpp.bpp_num / bpp.bpp_den; + src_read_bw = mdp3_adjust_scale_factor(req, + src_read_bw, bpp.bpp_pln); + if (!(check_if_rgb(req->src.format))) { + src_read_bw = fudge_factor(src_read_bw, + YUV_BW_FUDGE_NUM, + YUV_BW_FUDGE_DEN); + } + mdp3_get_bpp_info(req->dst.format, &bpp); + + if (smart_blit_fg_indx == i) { + bg_read_bw = smart_blit_bg_read_bw; + smart_blit_fg_indx = -1; + } else { + if ((req->transp_mask != MDP_TRANSP_NOP) || + (req->alpha < MDP_ALPHA_NOP) || + (req->src.format == MDP_ARGB_8888) || + (req->src.format == MDP_BGRA_8888) || + (req->src.format == MDP_RGBA_8888)) { + bg_read_bw = req->dst_rect.w * + req->dst_rect.h * + bpp.bpp_num / bpp.bpp_den; + bg_read_bw = mdp3_adjust_scale_factor( + req, bg_read_bw, + bpp.bpp_pln); + } else { + bg_read_bw = 0; + } + } + dst_write_bw = req->dst_rect.w * req->dst_rect.h * + bpp.bpp_num / bpp.bpp_den; + honest_ppp_ab += (src_read_bw + bg_read_bw + + dst_write_bw); + } + } + + if (fps == 0) + fps = panel_info->mipi.frame_rate; + + if (lreq->req_list[0].flags & MDP_SOLID_FILL) { + honest_ppp_ab = ppp_res.solid_fill_byte * 4; + pr_debug("solid fill honest_ppp_ab %llu\n", honest_ppp_ab); + } else { + honest_ppp_ab += ppp_res.solid_fill_byte; + mdp3_res->solid_fill_vote_en = true; + } + + honest_ppp_ab = honest_ppp_ab * fps; + if (honest_ppp_ab != ppp_res.next_ab) { + ppp_res.next_ab = honest_ppp_ab; + ppp_res.next_ib = honest_ppp_ab; + ppp_stat->bw_update = true; + pr_debug("solid fill ab = %llx, total ab = %llx ", + (ppp_res.solid_fill_byte * fps), honest_ppp_ab); + pr_debug("(%d fps) Solid_fill_vote %d\n", + fps, mdp3_res->solid_fill_vote_en); + ATRACE_INT("mdp3_ppp_bus_quota", honest_ppp_ab); + } + ppp_res.clk_rate = mdp3_clk_calc(mfd, lreq, fps); + ATRACE_INT("mdp3_ppp_clk_rate", ppp_res.clk_rate); + ATRACE_END(__func__); + return 0; +} + +int mdp3_ppp_turnon(struct msm_fb_data_type *mfd, int on_off) +{ + uint64_t ab = 0, ib = 0; + int rate = 0; + int rc; + + if (on_off) { + rate = ppp_res.clk_rate; + ab = ppp_res.next_ab; + ib = ppp_res.next_ib; + } + mdp3_clk_set_rate(MDP3_CLK_MDP_SRC, rate, MDP3_CLIENT_PPP); + rc = mdp3_res_update(on_off, 0, MDP3_CLIENT_PPP); + if (rc < 0) { + pr_err("%s: mdp3_clk_enable failed\n", __func__); + return rc; + } + rc = mdp3_bus_scale_set_quota(MDP3_CLIENT_PPP, ab, ib); + if (rc < 0) { + mdp3_res_update(!on_off, 0, MDP3_CLIENT_PPP); + pr_err("%s: scale_set_quota failed\n", __func__); + return rc; + } + ppp_stat->bw_on = on_off; + ppp_stat->mdp_clk = MDP_CORE_CLK_RATE_SVS; + ppp_stat->bw_update = false; + return 0; +} + +void mdp3_start_ppp(struct ppp_blit_op *blit_op) +{ + /* Wait for the pipe to clear */ + if (MDP3_REG_READ(MDP3_REG_DISPLAY_STATUS) & + MDP3_PPP_ACTIVE) { + pr_err("ppp core is hung up on previous request\n"); + return; + } + config_ppp_op_mode(blit_op); + if (blit_op->solid_fill) { + MDP3_REG_WRITE(0x10138, 0x10000000); + MDP3_REG_WRITE(0x1014c, 0xffffffff); + MDP3_REG_WRITE(0x101b8, 0); + MDP3_REG_WRITE(0x101bc, 0); + MDP3_REG_WRITE(0x1013c, 0); + MDP3_REG_WRITE(0x10140, 0); + MDP3_REG_WRITE(0x10144, 0); + MDP3_REG_WRITE(0x10148, 0); + MDP3_REG_WRITE(MDP3_TFETCH_FILL_COLOR, + blit_op->solid_fill_color); + MDP3_REG_WRITE(MDP3_TFETCH_SOLID_FILL, + ENABLE_SOLID_FILL); + } else { + MDP3_REG_WRITE(MDP3_TFETCH_SOLID_FILL, + DISABLE_SOLID_FILL); + } + /* Skip PPP kickoff for SMART_BLIT BG layer */ + if (blit_op->mdp_op & MDPOP_SMART_BLIT) + pr_debug("Skip mdp3_ppp_kickoff\n"); + else + mdp3_ppp_kickoff(); + + if (!(blit_op->solid_fill)) { + ppp_res.solid_fill_pixel = 0; + ppp_res.solid_fill_byte = 0; + } +} + +static int solid_fill_workaround(struct mdp_blit_req *req, + struct ppp_blit_op *blit_op) +{ + /* Make width 2 when there is a solid fill of width 1, and make + * sure width does not become zero while trying to avoid odd width + */ + if (blit_op->dst.roi.width == 1) { + if (req->dst_rect.x + 2 > req->dst.width) { + pr_err("%s: Unable to handle solid fill of width 1", + __func__); + return -EINVAL; + } + blit_op->dst.roi.width = 2; + } + if (blit_op->src.roi.width == 1) { + if (req->src_rect.x + 2 > req->src.width) { + pr_err("%s: Unable to handle solid fill of width 1", + __func__); + return -EINVAL; + } + blit_op->src.roi.width = 2; + } + + /* Avoid odd width, as it could hang ppp during solid fill */ + blit_op->dst.roi.width = (blit_op->dst.roi.width / 2) * 2; + blit_op->src.roi.width = (blit_op->src.roi.width / 2) * 2; + + /* Set src format to RGBX, to avoid ppp hang issues */ + blit_op->src.color_fmt = MDP_RGBX_8888; + + /* Avoid RGBA format, as it could hang ppp during solid fill */ + if (blit_op->dst.color_fmt == MDP_RGBA_8888) + blit_op->dst.color_fmt = MDP_RGBX_8888; + return 0; +} + +static int mdp3_ppp_process_req(struct ppp_blit_op *blit_op, + struct mdp_blit_req *req, struct mdp3_img_data *src_data, + struct mdp3_img_data *dst_data) +{ + unsigned long srcp0_start, srcp0_len, dst_start, dst_len; + uint32_t dst_width, dst_height; + int ret = 0; + + srcp0_start = (unsigned long) src_data->addr; + srcp0_len = (unsigned long) src_data->len; + dst_start = (unsigned long) dst_data->addr; + dst_len = (unsigned long) dst_data->len; + + blit_op->dst.prop.width = req->dst.width; + blit_op->dst.prop.height = req->dst.height; + + blit_op->dst.color_fmt = req->dst.format; + blit_op->dst.p0 = (void *) dst_start; + blit_op->dst.p0 += req->dst.offset; + + blit_op->dst.roi.x = req->dst_rect.x; + blit_op->dst.roi.y = req->dst_rect.y; + blit_op->dst.roi.width = req->dst_rect.w; + blit_op->dst.roi.height = req->dst_rect.h; + + blit_op->src.roi.x = req->src_rect.x; + blit_op->src.roi.y = req->src_rect.y; + blit_op->src.roi.width = req->src_rect.w; + blit_op->src.roi.height = req->src_rect.h; + + blit_op->src.prop.width = req->src.width; + blit_op->src.prop.height = req->src.height; + blit_op->src.color_fmt = req->src.format; + + + blit_op->src.p0 = (void *) (srcp0_start + req->src.offset); + if (blit_op->src.color_fmt == MDP_Y_CBCR_H2V2_ADRENO) + blit_op->src.p1 = + (void *) ((uint32_t) blit_op->src.p0 + + ALIGN((ALIGN(req->src.width, 32) * + ALIGN(req->src.height, 32)), 4096)); + else if (blit_op->src.color_fmt == MDP_Y_CBCR_H2V2_VENUS) + blit_op->src.p1 = + (void *) ((uint32_t) blit_op->src.p0 + + ALIGN((ALIGN(req->src.width, 128) * + ALIGN(req->src.height, 32)), 4096)); + else + blit_op->src.p1 = (void *) ((uint32_t) blit_op->src.p0 + + req->src.width * req->src.height); + + if (req->flags & MDP_IS_FG) + blit_op->mdp_op |= MDPOP_LAYER_IS_FG; + + /* blending check */ + if (req->transp_mask != MDP_TRANSP_NOP) { + blit_op->mdp_op |= MDPOP_TRANSP; + blit_op->blend.trans_color = + mdp3_calc_tpval(&blit_op->src, req->transp_mask); + } else { + blit_op->blend.trans_color = 0; + } + + req->alpha &= 0xff; + if (req->alpha < MDP_ALPHA_NOP) { + blit_op->mdp_op |= MDPOP_ALPHAB; + blit_op->blend.const_alpha = req->alpha; + } else { + blit_op->blend.const_alpha = 0xff; + } + + /* rotation check */ + if (req->flags & MDP_FLIP_LR) + blit_op->mdp_op |= MDPOP_LR; + if (req->flags & MDP_FLIP_UD) + blit_op->mdp_op |= MDPOP_UD; + if (req->flags & MDP_ROT_90) + blit_op->mdp_op |= MDPOP_ROT90; + if (req->flags & MDP_DITHER) + blit_op->mdp_op |= MDPOP_DITHER; + + if (req->flags & MDP_BLEND_FG_PREMULT) + blit_op->mdp_op |= MDPOP_FG_PM_ALPHA; + + /* scale check */ + if (req->flags & MDP_ROT_90) { + dst_width = req->dst_rect.h; + dst_height = req->dst_rect.w; + } else { + dst_width = req->dst_rect.w; + dst_height = req->dst_rect.h; + } + + if ((blit_op->src.roi.width != dst_width) || + (blit_op->src.roi.height != dst_height)) + blit_op->mdp_op |= MDPOP_ASCALE; + + if (req->flags & MDP_BLUR) + blit_op->mdp_op |= MDPOP_ASCALE | MDPOP_BLUR; + + if (req->flags & MDP_SOLID_FILL) { + ret = solid_fill_workaround(req, blit_op); + if (ret) + return ret; + + blit_op->solid_fill_color = (req->const_color.g & 0xFF)| + (req->const_color.r & 0xFF) << 8 | + (req->const_color.b & 0xFF) << 16 | + (req->const_color.alpha & 0xFF) << 24; + blit_op->solid_fill = true; + } else { + blit_op->solid_fill = false; + } + + if (req->flags & MDP_SMART_BLIT) + blit_op->mdp_op |= MDPOP_SMART_BLIT; + + return ret; +} + +static void mdp3_ppp_tile_workaround(struct ppp_blit_op *blit_op, + struct mdp_blit_req *req) +{ + int dst_h, src_w, i; + uint32_t mdp_op = blit_op->mdp_op; + void *src_p0 = blit_op->src.p0; + void *src_p1 = blit_op->src.p1; + void *dst_p0 = blit_op->dst.p0; + + src_w = req->src_rect.w; + dst_h = blit_op->dst.roi.height; + /* bg tile fetching HW workaround */ + for (i = 0; i < (req->dst_rect.h / 16); i++) { + /* this tile size */ + blit_op->dst.roi.height = 16; + blit_op->src.roi.width = + (16 * req->src_rect.w) / req->dst_rect.h; + + /* if it's out of scale range... */ + if (((MDP_SCALE_Q_FACTOR * blit_op->dst.roi.height) / + blit_op->src.roi.width) > MDP_MAX_X_SCALE_FACTOR) + blit_op->src.roi.width = + (MDP_SCALE_Q_FACTOR * + blit_op->dst.roi.height) / + MDP_MAX_X_SCALE_FACTOR; + else if (((MDP_SCALE_Q_FACTOR * blit_op->dst.roi.height) / + blit_op->src.roi.width) < MDP_MIN_X_SCALE_FACTOR) + blit_op->src.roi.width = + (MDP_SCALE_Q_FACTOR * + blit_op->dst.roi.height) / + MDP_MIN_X_SCALE_FACTOR; + + mdp3_start_ppp(blit_op); + + /* next tile location */ + blit_op->dst.roi.y += 16; + blit_op->src.roi.x += blit_op->src.roi.width; + + /* this is for a remainder update */ + dst_h -= 16; + src_w -= blit_op->src.roi.width; + /* restore parameters that may have been overwritten */ + blit_op->mdp_op = mdp_op; + blit_op->src.p0 = src_p0; + blit_op->src.p1 = src_p1; + blit_op->dst.p0 = dst_p0; + } + + if ((dst_h < 0) || (src_w < 0)) + pr_err("msm_fb: mdp_blt_ex() unexpected result! line:%d\n", + __LINE__); + + /* remainder update */ + if ((dst_h > 0) && (src_w > 0)) { + u32 tmp_v; + + blit_op->dst.roi.height = dst_h; + blit_op->src.roi.width = src_w; + + if (((MDP_SCALE_Q_FACTOR * blit_op->dst.roi.height) / + blit_op->src.roi.width) > MDP_MAX_X_SCALE_FACTOR) { + tmp_v = (MDP_SCALE_Q_FACTOR * + blit_op->dst.roi.height) / + MDP_MAX_X_SCALE_FACTOR + + ((MDP_SCALE_Q_FACTOR * + blit_op->dst.roi.height) % + MDP_MAX_X_SCALE_FACTOR ? 1 : 0); + + /* move x location as roi width gets bigger */ + blit_op->src.roi.x -= tmp_v - blit_op->src.roi.width; + blit_op->src.roi.width = tmp_v; + } else if (((MDP_SCALE_Q_FACTOR * blit_op->dst.roi.height) / + blit_op->src.roi.width) < MDP_MIN_X_SCALE_FACTOR) { + tmp_v = (MDP_SCALE_Q_FACTOR * + blit_op->dst.roi.height) / + MDP_MIN_X_SCALE_FACTOR + + ((MDP_SCALE_Q_FACTOR * + blit_op->dst.roi.height) % + MDP_MIN_X_SCALE_FACTOR ? 1 : 0); + /* + * we don't move x location for continuity of + * source image + */ + blit_op->src.roi.width = tmp_v; + } + + + mdp3_start_ppp(blit_op); + } +} + +static int mdp3_ppp_blit(struct msm_fb_data_type *mfd, + struct mdp_blit_req *req, struct mdp3_img_data *src_data, + struct mdp3_img_data *dst_data) +{ + struct ppp_blit_op blit_op; + int ret = 0; + + memset(&blit_op, 0, sizeof(blit_op)); + + if (req->dst.format == MDP_FB_FORMAT) + req->dst.format = mfd->fb_imgType; + if (req->src.format == MDP_FB_FORMAT) + req->src.format = mfd->fb_imgType; + + if (mdp3_ppp_verify_req(req)) { + pr_err("%s: invalid image!\n", __func__); + return -EINVAL; + } + + ret = mdp3_ppp_process_req(&blit_op, req, src_data, dst_data); + if (ret) { + pr_err("%s: Failed to process the blit request", __func__); + return ret; + } + + if (((blit_op.mdp_op & (MDPOP_TRANSP | MDPOP_ALPHAB)) || + (req->src.format == MDP_ARGB_8888) || + (req->src.format == MDP_BGRA_8888) || + (req->src.format == MDP_RGBA_8888)) && + (blit_op.mdp_op & MDPOP_ROT90) && (req->dst_rect.w <= 16)) { + mdp3_ppp_tile_workaround(&blit_op, req); + } else { + mdp3_start_ppp(&blit_op); + } + + return 0; +} + +static int mdp3_ppp_blit_workaround(struct msm_fb_data_type *mfd, + struct mdp_blit_req *req, unsigned int remainder, + struct mdp3_img_data *src_data, + struct mdp3_img_data *dst_data) +{ + int ret; + struct mdp_blit_req splitreq; + int s_x_0, s_x_1, s_w_0, s_w_1, s_y_0, s_y_1, s_h_0, s_h_1; + int d_x_0, d_x_1, d_w_0, d_w_1, d_y_0, d_y_1, d_h_0, d_h_1; + + /* make new request as provide by user */ + splitreq = *req; + + /* break dest roi at width*/ + d_y_0 = d_y_1 = req->dst_rect.y; + d_h_0 = d_h_1 = req->dst_rect.h; + d_x_0 = req->dst_rect.x; + + if (remainder == 14 || remainder == 6) + d_w_1 = req->dst_rect.w / 2; + else + d_w_1 = (req->dst_rect.w - 1) / 2 - 1; + + d_w_0 = req->dst_rect.w - d_w_1; + d_x_1 = d_x_0 + d_w_0; + /* blit first region */ + if (((splitreq.flags & 0x07) == 0x07) || + ((splitreq.flags & 0x07) == 0x05) || + ((splitreq.flags & 0x07) == 0x02) || + ((splitreq.flags & 0x07) == 0x0)) { + + if (splitreq.flags & MDP_ROT_90) { + s_x_0 = s_x_1 = req->src_rect.x; + s_w_0 = s_w_1 = req->src_rect.w; + s_y_0 = req->src_rect.y; + s_h_1 = (req->src_rect.h * d_w_1) / + req->dst_rect.w; + s_h_0 = req->src_rect.h - s_h_1; + s_y_1 = s_y_0 + s_h_0; + if (d_w_1 >= 8 * s_h_1) { + s_h_1++; + s_y_1--; + } + } else { + s_y_0 = s_y_1 = req->src_rect.y; + s_h_0 = s_h_1 = req->src_rect.h; + s_x_0 = req->src_rect.x; + s_w_1 = (req->src_rect.w * d_w_1) / + req->dst_rect.w; + s_w_0 = req->src_rect.w - s_w_1; + s_x_1 = s_x_0 + s_w_0; + if (d_w_1 >= 8 * s_w_1) { + s_w_1++; + s_x_1--; + } + } + + splitreq.src_rect.h = s_h_0; + splitreq.src_rect.y = s_y_0; + splitreq.dst_rect.h = d_h_0; + splitreq.dst_rect.y = d_y_0; + splitreq.src_rect.x = s_x_0; + splitreq.src_rect.w = s_w_0; + splitreq.dst_rect.x = d_x_0; + splitreq.dst_rect.w = d_w_0; + } else { + if (splitreq.flags & MDP_ROT_90) { + s_x_0 = s_x_1 = req->src_rect.x; + s_w_0 = s_w_1 = req->src_rect.w; + s_y_0 = req->src_rect.y; + s_h_1 = (req->src_rect.h * d_w_0) / + req->dst_rect.w; + s_h_0 = req->src_rect.h - s_h_1; + s_y_1 = s_y_0 + s_h_0; + if (d_w_0 >= 8 * s_h_1) { + s_h_1++; + s_y_1--; + } + } else { + s_y_0 = s_y_1 = req->src_rect.y; + s_h_0 = s_h_1 = req->src_rect.h; + s_x_0 = req->src_rect.x; + s_w_1 = (req->src_rect.w * d_w_0) / + req->dst_rect.w; + s_w_0 = req->src_rect.w - s_w_1; + s_x_1 = s_x_0 + s_w_0; + if (d_w_0 >= 8 * s_w_1) { + s_w_1++; + s_x_1--; + } + } + splitreq.src_rect.h = s_h_0; + splitreq.src_rect.y = s_y_0; + splitreq.dst_rect.h = d_h_1; + splitreq.dst_rect.y = d_y_1; + splitreq.src_rect.x = s_x_0; + splitreq.src_rect.w = s_w_0; + splitreq.dst_rect.x = d_x_1; + splitreq.dst_rect.w = d_w_1; + } + + /* No need to split in height */ + ret = mdp3_ppp_blit(mfd, &splitreq, src_data, dst_data); + + if (ret) + return ret; + /* blit second region */ + if (((splitreq.flags & 0x07) == 0x07) || + ((splitreq.flags & 0x07) == 0x05) || + ((splitreq.flags & 0x07) == 0x02) || + ((splitreq.flags & 0x07) == 0x0)) { + splitreq.src_rect.h = s_h_1; + splitreq.src_rect.y = s_y_1; + splitreq.dst_rect.h = d_h_1; + splitreq.dst_rect.y = d_y_1; + splitreq.src_rect.x = s_x_1; + splitreq.src_rect.w = s_w_1; + splitreq.dst_rect.x = d_x_1; + splitreq.dst_rect.w = d_w_1; + } else { + splitreq.src_rect.h = s_h_1; + splitreq.src_rect.y = s_y_1; + splitreq.dst_rect.h = d_h_0; + splitreq.dst_rect.y = d_y_0; + splitreq.src_rect.x = s_x_1; + splitreq.src_rect.w = s_w_1; + splitreq.dst_rect.x = d_x_0; + splitreq.dst_rect.w = d_w_0; + } + + /* No need to split in height ... just width */ + return mdp3_ppp_blit(mfd, &splitreq, src_data, dst_data); +} + +int mdp3_ppp_start_blit(struct msm_fb_data_type *mfd, + struct mdp_blit_req *req, + struct mdp3_img_data *src_data, + struct mdp3_img_data *dst_data) +{ + int ret; + unsigned int remainder = 0, is_bpp_4 = 0; + + if (unlikely(req->src_rect.h == 0 || req->src_rect.w == 0)) { + pr_err("mdp_ppp: src img of zero size!\n"); + return -EINVAL; + } + if (unlikely(req->dst_rect.h == 0 || req->dst_rect.w == 0)) + return 0; + + /* MDP width split workaround */ + remainder = (req->dst_rect.w) % 16; + ret = ppp_get_bpp(req->dst.format, mfd->fb_imgType); + if (ret <= 0) { + pr_err("mdp_ppp: incorrect bpp!\n"); + return -EINVAL; + } + is_bpp_4 = (ret == 4) ? 1 : 0; + + if ((is_bpp_4 && (remainder == 6 || remainder == 14)) && + !(req->flags & MDP_SOLID_FILL)) + ret = mdp3_ppp_blit_workaround(mfd, req, remainder, + src_data, dst_data); + else + ret = mdp3_ppp_blit(mfd, req, src_data, dst_data); + return ret; +} + +void mdp3_ppp_wait_for_fence(struct blit_req_list *req) +{ + int i, ret = 0; + + ATRACE_BEGIN(__func__); + /* buf sync */ + for (i = 0; i < req->acq_fen_cnt; i++) { + ret = mdss_wait_sync_fence(req->acq_fen[i], + WAIT_FENCE_FINAL_TIMEOUT); + if (ret < 0) { + pr_err("%s: sync_fence_wait failed! ret = %x\n", + __func__, ret); + break; + } + mdss_put_sync_fence(req->acq_fen[i]); + } + ATRACE_END(__func__); + if (ret < 0) { + while (i < req->acq_fen_cnt) { + mdss_put_sync_fence(req->acq_fen[i]); + i++; + } + } + req->acq_fen_cnt = 0; +} + +void mdp3_ppp_signal_timeline(struct blit_req_list *req) +{ + mdss_inc_timeline(ppp_stat->timeline, 1); + MDSS_XLOG(ppp_stat->timeline->value, ppp_stat->timeline_value); + req->last_rel_fence = req->cur_rel_fence; + req->cur_rel_fence = 0; +} + + +static void mdp3_ppp_deinit_buf_sync(struct blit_req_list *req) +{ + int i; + + put_unused_fd(req->cur_rel_fen_fd); + mdss_put_sync_fence(req->cur_rel_fence); + req->cur_rel_fence = NULL; + req->cur_rel_fen_fd = 0; + ppp_stat->timeline_value--; + for (i = 0; i < req->acq_fen_cnt; i++) + mdss_put_sync_fence(req->acq_fen[i]); + req->acq_fen_cnt = 0; +} + +static int mdp3_ppp_handle_buf_sync(struct blit_req_list *req, + struct mdp_buf_sync *buf_sync) +{ + int i, fence_cnt = 0, ret = 0; + int acq_fen_fd[MDP_MAX_FENCE_FD]; + struct mdss_fence *fence; + + if ((buf_sync->acq_fen_fd_cnt > MDP_MAX_FENCE_FD) || + (ppp_stat->timeline == NULL)) + return -EINVAL; + + if (buf_sync->acq_fen_fd_cnt) + ret = copy_from_user(acq_fen_fd, buf_sync->acq_fen_fd, + buf_sync->acq_fen_fd_cnt * sizeof(int)); + if (ret) { + pr_err("%s: copy_from_user failed\n", __func__); + return ret; + } + for (i = 0; i < buf_sync->acq_fen_fd_cnt; i++) { + fence = mdss_get_fd_sync_fence(acq_fen_fd[i]); + if (fence == NULL) { + pr_info("%s: null fence! i=%d fd=%d\n", __func__, i, + acq_fen_fd[i]); + ret = -EINVAL; + break; + } + req->acq_fen[i] = fence; + } + fence_cnt = i; + if (ret) + goto buf_sync_err_1; + req->acq_fen_cnt = fence_cnt; + if (buf_sync->flags & MDP_BUF_SYNC_FLAG_WAIT) + mdp3_ppp_wait_for_fence(req); + + MDSS_XLOG(ppp_stat->timeline_value); + + /* create fence */ + req->cur_rel_fence = mdss_get_sync_fence(ppp_stat->timeline, + "ppp_fence", NULL, ppp_stat->timeline_value++); + if (req->cur_rel_fence == NULL) { + req->cur_rel_sync_pt = NULL; + pr_err("%s: cannot create fence\n", __func__); + ret = -ENOMEM; + goto buf_sync_err_2; + } + /* create fd */ + return ret; +buf_sync_err_2: + ppp_stat->timeline_value--; +buf_sync_err_1: + for (i = 0; i < fence_cnt; i++) + mdss_put_sync_fence(req->acq_fen[i]); + req->acq_fen_cnt = 0; + return ret; +} + +void mdp3_ppp_req_push(struct blit_req_queue *req_q, struct blit_req_list *req) +{ + int idx = req_q->push_idx; + + req_q->req[idx] = *req; + req_q->count++; + req_q->push_idx = (req_q->push_idx + 1) % MDP3_PPP_MAX_LIST_REQ; +} + +struct blit_req_list *mdp3_ppp_next_req(struct blit_req_queue *req_q) +{ + struct blit_req_list *req; + + if (req_q->count == 0) + return NULL; + req = &req_q->req[req_q->pop_idx]; + return req; +} + +void mdp3_ppp_req_pop(struct blit_req_queue *req_q) +{ + req_q->count--; + req_q->pop_idx = (req_q->pop_idx + 1) % MDP3_PPP_MAX_LIST_REQ; +} + +void mdp3_free_fw_timer_func(unsigned long arg) +{ + mdp3_res->solid_fill_vote_en = false; + schedule_work(&ppp_stat->free_bw_work); +} + +static void mdp3_free_bw_wq_handler(struct work_struct *work) +{ + struct msm_fb_data_type *mfd = ppp_stat->mfd; + + mutex_lock(&ppp_stat->config_ppp_mutex); + if (ppp_stat->bw_on) + mdp3_ppp_turnon(mfd, 0); + mutex_unlock(&ppp_stat->config_ppp_mutex); +} + +static bool is_hw_workaround_needed(struct mdp_blit_req req) +{ + bool result = false; + bool is_bpp_4 = false; + uint32_t remainder = 0; + uint32_t bpp = ppp_get_bpp(req.dst.format, ppp_stat->mfd->fb_imgType); + + /* MDP width split workaround */ + remainder = (req.dst_rect.w) % 16; + is_bpp_4 = (bpp == 4) ? 1 : 0; + if ((is_bpp_4 && (remainder == 6 || remainder == 14)) && + !(req.flags & MDP_SOLID_FILL)) + result = true; + + /* bg tile fetching HW workaround */ + if (((req.alpha < MDP_ALPHA_NOP) || + (req.transp_mask != MDP_TRANSP_NOP) || + (req.src.format == MDP_ARGB_8888) || + (req.src.format == MDP_BGRA_8888) || + (req.src.format == MDP_RGBA_8888)) && + (req.flags & MDP_ROT_90) && (req.dst_rect.w <= 16)) + result = true; + + return result; +} + +static bool is_roi_equal(struct mdp_blit_req req0, + struct mdp_blit_req req1) +{ + bool result = false; + struct mdss_panel_info *panel_info = ppp_stat->mfd->panel_info; + + /* + * Check req0 and req1 layer destination ROI and return true if + * they are equal. + */ + if ((req0.dst_rect.x == req1.dst_rect.x) && + (req0.dst_rect.y == req1.dst_rect.y) && + (req0.dst_rect.w == req1.dst_rect.w) && + (req0.dst_rect.h == req1.dst_rect.h)) + result = true; + /* + * Layers are source cropped and cropped layer width and hight are + * same panel width and height + */ + else if ((req0.dst_rect.w == req1.dst_rect.w) && + (req0.dst_rect.h == req1.dst_rect.h) && + (req0.dst_rect.w == panel_info->xres) && + (req0.dst_rect.h == panel_info->yres)) + result = true; + + return result; +} + +static bool is_scaling_needed(struct mdp_blit_req req) +{ + bool result = true; + + /* Return true if layer need scaling else return false */ + if ((req.src_rect.w == req.dst_rect.w) && + (req.src_rect.h == req.dst_rect.h)) + result = false; + return result; +} + +static bool is_blit_optimization_possible(struct blit_req_list *req, int indx) +{ + int next = indx + 1; + bool status = false; + struct mdp3_img_data tmp_data; + bool dst_roi_equal = false; + bool hw_woraround_active = false; + struct mdp_blit_req bg_req; + struct mdp_blit_req fg_req; + + if (!(mdp3_res->smart_blit_en)) { + pr_debug("Smart BLIT disabled from sysfs\n"); + return status; + } + if (next < req->count) { + bg_req = req->req_list[indx]; + fg_req = req->req_list[next]; + hw_woraround_active = is_hw_workaround_needed(bg_req); + dst_roi_equal = is_roi_equal(bg_req, fg_req); + /* + * Check userspace Smart BLIT Flag for current and next + * request Flag for smart blit FG layer index If blit + * request at index "n" has MDP_SMART_BLIT flag set then + * it will be used as BG layer in smart blit + * and request at index "n+1" will be used as FG layer + */ + if ((bg_req.flags & MDP_SMART_BLIT) && + (!(fg_req.flags & MDP_SMART_BLIT)) && + (!(hw_woraround_active))) + status = true; + /* + * Enable SMART blit between request 0(BG) & request 1(FG) when + * destination ROI of BG and FG layer are same, + * No scaling on BG layer + * No rotation on BG Layer. + * BG Layer color format is RGB and marked as MDP_IS_FG. + */ + else if ((mdp3_res->smart_blit_en & SMART_BLIT_RGB_EN) && + (indx == 0) && (dst_roi_equal) && + (bg_req.flags & MDP_IS_FG) && + (!(is_scaling_needed(bg_req))) && + (!(bg_req.flags & (MDP_ROT_90))) && + (check_if_rgb(bg_req.src.format)) && + (!(hw_woraround_active))) { + status = true; + req->req_list[indx].flags |= MDP_SMART_BLIT; + pr_debug("Optimize RGB Blit for Req Indx %d\n", indx); + } + /* + * Swap BG and FG layer to enable SMART blit between request + * 0(BG) & request 1(FG) when destination ROI of BG and FG + * layer are same, No scaling on FG and BG layer + * No rotation on FG Layer. BG Layer color format is YUV + */ + else if ((indx == 0) && + (mdp3_res->smart_blit_en & SMART_BLIT_YUV_EN) && + (!(fg_req.flags & (MDP_ROT_90))) && (dst_roi_equal) && + (!(check_if_rgb(bg_req.src.format))) && + (!(hw_woraround_active))) { + /* + * swap blit requests at index 0 and 1. YUV layer at + * index 0 is replaced with UI layer request present + * at index 1. Since UI layer will be in background + * set IS_FG flag and clear it from YUV layer flags + */ + if (!(is_scaling_needed(req->req_list[next]))) { + if (bg_req.flags & MDP_IS_FG) { + req->req_list[indx].flags &= + ~MDP_IS_FG; + req->req_list[next].flags |= MDP_IS_FG; + } + bg_req = req->req_list[next]; + req->req_list[next] = req->req_list[indx]; + req->req_list[indx] = bg_req; + + tmp_data = req->src_data[next]; + req->src_data[next] = req->src_data[indx]; + req->src_data[indx] = tmp_data; + + tmp_data = req->dst_data[next]; + req->dst_data[next] = req->dst_data[indx]; + req->dst_data[indx] = tmp_data; + status = true; + req->req_list[indx].flags |= MDP_SMART_BLIT; + pr_debug("Optimize YUV Blit for Req Indx %d\n", + indx); + } + } + } + return status; +} + +static void mdp3_ppp_blit_handler(struct kthread_work *work) +{ + struct msm_fb_data_type *mfd = ppp_stat->mfd; + struct blit_req_list *req; + int i, rc = 0; + bool smart_blit = false; + int smart_blit_fg_index = -1; + + mutex_lock(&ppp_stat->config_ppp_mutex); + req = mdp3_ppp_next_req(&ppp_stat->req_q); + if (!req) { + mutex_unlock(&ppp_stat->config_ppp_mutex); + return; + } + + if (!ppp_stat->bw_on) { + mdp3_ppp_turnon(mfd, 1); + if (rc < 0) { + mutex_unlock(&ppp_stat->config_ppp_mutex); + pr_err("%s: Enable ppp resources failed\n", __func__); + return; + } + } + while (req) { + mdp3_ppp_wait_for_fence(req); + mdp3_calc_ppp_res(mfd, req); + if (ppp_res.clk_rate != ppp_stat->mdp_clk) { + ppp_stat->mdp_clk = ppp_res.clk_rate; + mdp3_clk_set_rate(MDP3_CLK_MDP_SRC, + ppp_stat->mdp_clk, MDP3_CLIENT_PPP); + } + if (ppp_stat->bw_update) { + rc = mdp3_bus_scale_set_quota(MDP3_CLIENT_PPP, + ppp_res.next_ab, ppp_res.next_ib); + if (rc < 0) { + pr_err("%s: bw set quota failed\n", __func__); + return; + } + ppp_stat->bw_update = false; + } + ATRACE_BEGIN("mpd3_ppp_start"); + for (i = 0; i < req->count; i++) { + smart_blit = is_blit_optimization_possible(req, i); + if (smart_blit) + /* + * Blit request index of FG layer in + * smart blit + */ + smart_blit_fg_index = i + 1; + if (!(req->req_list[i].flags & MDP_NO_BLIT)) { + /* Do the actual blit. */ + if (!rc) { + rc = mdp3_ppp_start_blit(mfd, + &(req->req_list[i]), + &req->src_data[i], + &req->dst_data[i]); + } + /* Unmap blit source buffer */ + if (smart_blit == false) { + mdp3_put_img(&req->src_data[i], + MDP3_CLIENT_PPP); + } + if (smart_blit_fg_index == i) { + /* Unmap smart blit BG buffer */ + mdp3_put_img(&req->src_data[i - 1], + MDP3_CLIENT_PPP); + smart_blit_fg_index = -1; + } + mdp3_put_img(&req->dst_data[i], + MDP3_CLIENT_PPP); + smart_blit = false; + } + } + ATRACE_END("mdp3_ppp_start"); + /* Signal to release fence */ + mutex_lock(&ppp_stat->req_mutex); + mdp3_ppp_signal_timeline(req); + mdp3_ppp_req_pop(&ppp_stat->req_q); + req = mdp3_ppp_next_req(&ppp_stat->req_q); + if (ppp_stat->wait_for_pop) + complete(&ppp_stat->pop_q_comp); + mutex_unlock(&ppp_stat->req_mutex); + } + mod_timer(&ppp_stat->free_bw_timer, jiffies + + msecs_to_jiffies(MDP_RELEASE_BW_TIMEOUT)); + mutex_unlock(&ppp_stat->config_ppp_mutex); +} + +int mdp3_ppp_parse_req(void __user *p, + struct mdp_async_blit_req_list *req_list_header, + int async) +{ + struct blit_req_list *req; + struct blit_req_queue *req_q = &ppp_stat->req_q; + struct mdss_fence *fence = NULL; + int count, rc, idx, i; + + count = req_list_header->count; + + mutex_lock(&ppp_stat->req_mutex); + while (req_q->count >= MDP3_PPP_MAX_LIST_REQ) { + ppp_stat->wait_for_pop = true; + mutex_unlock(&ppp_stat->req_mutex); + rc = wait_for_completion_timeout( + &ppp_stat->pop_q_comp, 5 * HZ); + if (rc == 0) { + /* This will only occur if there is serious problem */ + pr_err("%s: timeout exiting queuing request\n", + __func__); + return -EBUSY; + } + mutex_lock(&ppp_stat->req_mutex); + ppp_stat->wait_for_pop = false; + } + idx = req_q->push_idx; + req = &req_q->req[idx]; + + if (copy_from_user(&req->req_list, p, + sizeof(struct mdp_blit_req) * count)) { + mutex_unlock(&ppp_stat->req_mutex); + return -EFAULT; + } + + rc = mdp3_ppp_handle_buf_sync(req, &req_list_header->sync); + if (rc < 0) { + pr_err("%s: Failed create sync point\n", __func__); + mutex_unlock(&ppp_stat->req_mutex); + return rc; + } + req->count = count; + + /* We need to grab ion handle while running in client thread */ + for (i = 0; i < count; i++) { + rc = mdp3_ppp_get_img(&req->req_list[i].src, + &req->req_list[i], &req->src_data[i]); + if (rc < 0 || req->src_data[i].len == 0) { + pr_err("mdp_ppp: couldn't retrieve src img from mem\n"); + goto parse_err_1; + } + + rc = mdp3_ppp_get_img(&req->req_list[i].dst, + &req->req_list[i], &req->dst_data[i]); + if (rc < 0 || req->dst_data[i].len == 0) { + mdp3_put_img(&req->src_data[i], MDP3_CLIENT_PPP); + pr_err("mdp_ppp: couldn't retrieve dest img from mem\n"); + goto parse_err_1; + } + } + + if (async) { + req->cur_rel_fen_fd = mdss_get_sync_fence_fd( + req->cur_rel_fence); + rc = copy_to_user(req_list_header->sync.rel_fen_fd, + &req->cur_rel_fen_fd, sizeof(int)); + if (rc) { + pr_err("%s:copy_to_user failed\n", __func__); + goto parse_err_2; + } + } else { + fence = req->cur_rel_fence; + } + + mdp3_ppp_req_push(req_q, req); + mutex_unlock(&ppp_stat->req_mutex); + kthread_queue_work(&ppp_stat->kworker, &ppp_stat->blit_work); + if (!async) { + /* wait for release fence */ + rc = mdss_wait_sync_fence(fence, + 5 * MSEC_PER_SEC); + if (rc < 0) + pr_err("%s: sync blit! rc = %x\n", __func__, rc); + + mdss_put_sync_fence(fence); + fence = NULL; + } + return 0; + +parse_err_2: + put_unused_fd(req->cur_rel_fen_fd); +parse_err_1: + for (i--; i >= 0; i--) { + mdp3_put_img(&req->src_data[i], MDP3_CLIENT_PPP); + mdp3_put_img(&req->dst_data[i], MDP3_CLIENT_PPP); + } + mdp3_ppp_deinit_buf_sync(req); + mutex_unlock(&ppp_stat->req_mutex); + return rc; +} + +int mdp3_ppp_res_init(struct msm_fb_data_type *mfd) +{ + int rc; + struct sched_param param = {.sched_priority = 16}; + const char timeline_name[] = "mdp3_ppp"; + + ppp_stat = kzalloc(sizeof(struct ppp_status), GFP_KERNEL); + if (!ppp_stat) + return -ENOMEM; + + /*Setup sync_pt timeline for ppp*/ + ppp_stat->timeline = mdss_create_timeline(timeline_name); + if (ppp_stat->timeline == NULL) { + pr_err("%s: cannot create time line\n", __func__); + return -ENOMEM; + } + ppp_stat->timeline_value = 1; + + kthread_init_worker(&ppp_stat->kworker); + kthread_init_work(&ppp_stat->blit_work, mdp3_ppp_blit_handler); + ppp_stat->blit_thread = kthread_run(kthread_worker_fn, + &ppp_stat->kworker, + "mdp3_ppp"); + + if (IS_ERR(ppp_stat->blit_thread)) { + rc = PTR_ERR(ppp_stat->blit_thread); + pr_err("ERROR: unable to start ppp blit thread,err = %d\n", + rc); + ppp_stat->blit_thread = NULL; + return rc; + } + if (sched_setscheduler(ppp_stat->blit_thread, SCHED_FIFO, ¶m)) + pr_warn("set priority failed for mdp3 blit thread\n"); + + INIT_WORK(&ppp_stat->free_bw_work, mdp3_free_bw_wq_handler); + init_completion(&ppp_stat->pop_q_comp); + mutex_init(&ppp_stat->req_mutex); + mutex_init(&ppp_stat->config_ppp_mutex); + init_timer(&ppp_stat->free_bw_timer); + ppp_stat->free_bw_timer.function = mdp3_free_fw_timer_func; + ppp_stat->free_bw_timer.data = 0; + ppp_stat->mfd = mfd; + mdp3_ppp_callback_setup(); + return 0; +} diff --git a/drivers/video/fbdev/msm/mdp3_ppp.h b/drivers/video/fbdev/msm/mdp3_ppp.h new file mode 100644 index 0000000000000000000000000000000000000000..1f82851384368babaf4422166dc3c25067be9a6b --- /dev/null +++ b/drivers/video/fbdev/msm/mdp3_ppp.h @@ -0,0 +1,430 @@ +/* Copyright (c) 2007, 2013, 2016, 2018, The Linux Foundation. All rights reserved. + * Copyright (C) 2007 Google Incorporated + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef MDP3_PPP_H +#define MDP3_PPP_H +#include "mdp3.h" +#include "mdss_fb.h" + +#define PPP_WRITEL(val, off) MDP3_REG_WRITE(off, val) + +#define MAX_BLIT_REQ 16 +#define PPP_UPSCALE_MAX 64 +#define PPP_BLUR_SCALE_MAX 128 +#define PPP_LUT_MAX 256 + +#define MDPOP_SMART_BLIT BIT(31) /* blit optimization flag */ + +/* MDP PPP Operations */ +#define MDPOP_NOP 0 +#define MDPOP_LR BIT(0) /* left to right flip */ +#define MDPOP_UD BIT(1) /* up and down flip */ +#define MDPOP_ROT90 BIT(2) /* rotate image to 90 degree */ +#define MDPOP_ROT180 (MDPOP_UD|MDPOP_LR) +#define MDPOP_ROT270 (MDPOP_ROT90|MDPOP_UD|MDPOP_LR) +#define MDPOP_ASCALE BIT(7) +#define MDPOP_ALPHAB BIT(8) /* enable alpha blending */ +#define MDPOP_TRANSP BIT(9) /* enable transparency */ +#define MDPOP_DITHER BIT(10) /* enable dither */ +#define MDPOP_SHARPENING BIT(11) /* enable sharpening */ +#define MDPOP_BLUR BIT(12) /* enable blur */ +#define MDPOP_FG_PM_ALPHA BIT(13) +#define MDPOP_LAYER_IS_FG BIT(14) + +#define MDPOP_ROTATION (MDPOP_ROT90|MDPOP_LR|MDPOP_UD) + +#define PPP_OP_CONVERT_YCBCR2RGB BIT(2) +#define PPP_OP_CONVERT_ON BIT(3) +#define PPP_OP_SCALE_X_ON BIT(0) +#define PPP_OP_SCALE_Y_ON BIT(1) +#define PPP_OP_ROT_ON BIT(8) +#define PPP_OP_ROT_90 BIT(9) +#define PPP_OP_FLIP_LR BIT(10) +#define PPP_OP_FLIP_UD BIT(11) +#define PPP_OP_BLEND_ON BIT(12) +#define PPP_OP_BLEND_CONSTANT_ALPHA BIT(14) +#define PPP_OP_BLEND_BG_ALPHA BIT(13) +#define PPP_OP_BLEND_EQ_REVERSE BIT(15) +#define PPP_OP_DITHER_EN BIT(16) +#define PPP_BLEND_CALPHA_TRNASP BIT(24) + +#define PPP_OP_BLEND_SRCPIXEL_ALPHA 0 +#define PPP_OP_BLEND_ALPHA_BLEND_NORMAL 0 +#define PPP_OP_BLEND_ALPHA_BLEND_REVERSE BIT(15) + +#define PPP_BLEND_BG_USE_ALPHA_SEL (1 << 0) +#define PPP_BLEND_BG_ALPHA_REVERSE (1 << 3) +#define PPP_BLEND_BG_SRCPIXEL_ALPHA (0 << 1) +#define PPP_BLEND_BG_DSTPIXEL_ALPHA (1 << 1) +#define PPP_BLEND_BG_CONSTANT_ALPHA (2 << 1) +#define PPP_BLEND_BG_CONST_ALPHA_VAL(x) ((x) << 24) +#define PPP_OP_BG_CHROMA_H2V1 BIT(25) + +#define CLR_G 0x0 +#define CLR_B 0x1 +#define CLR_R 0x2 +#define CLR_ALPHA 0x3 + +#define CLR_Y CLR_G +#define CLR_CB CLR_B +#define CLR_CR CLR_R + +/* from lsb to msb */ +#define PPP_GET_PACK_PATTERN(a, x, y, z, bit) \ + (((a)<<(bit*3))|((x)<<(bit*2))|((y)< + +#include "mdss_fb.h" +#include "mdp3_ppp.h" + +#define MDP_IS_IMGTYPE_BAD(x) ((x) >= MDP_IMGTYPE_LIMIT) + +/* bg_config_lut not needed since it is same as src */ +const uint32_t src_cfg_lut[MDP_IMGTYPE_LIMIT] = { + [MDP_RGB_565] = MDP_RGB_565_SRC_REG, + [MDP_BGR_565] = MDP_RGB_565_SRC_REG, + [MDP_RGB_888] = MDP_RGB_888_SRC_REG, + [MDP_BGR_888] = MDP_RGB_888_SRC_REG, + [MDP_BGRA_8888] = MDP_RGBX_8888_SRC_REG, + [MDP_RGBA_8888] = MDP_RGBX_8888_SRC_REG, + [MDP_ARGB_8888] = MDP_RGBX_8888_SRC_REG, + [MDP_XRGB_8888] = MDP_RGBX_8888_SRC_REG, + [MDP_RGBX_8888] = MDP_RGBX_8888_SRC_REG, + [MDP_Y_CRCB_H2V2] = MDP_Y_CBCR_H2V2_SRC_REG, + [MDP_Y_CBCR_H2V2] = MDP_Y_CBCR_H2V2_SRC_REG, + [MDP_Y_CBCR_H2V2_ADRENO] = MDP_Y_CBCR_H2V2_SRC_REG, + [MDP_Y_CBCR_H2V2_VENUS] = MDP_Y_CBCR_H2V2_SRC_REG, + [MDP_YCRYCB_H2V1] = MDP_YCRYCB_H2V1_SRC_REG, + [MDP_Y_CBCR_H2V1] = MDP_Y_CRCB_H2V1_SRC_REG, + [MDP_Y_CRCB_H2V1] = MDP_Y_CRCB_H2V1_SRC_REG, + [MDP_BGRX_8888] = MDP_RGBX_8888_SRC_REG, +}; + +const uint32_t out_cfg_lut[MDP_IMGTYPE_LIMIT] = { + [MDP_RGB_565] = MDP_RGB_565_DST_REG, + [MDP_BGR_565] = MDP_RGB_565_DST_REG, + [MDP_RGB_888] = MDP_RGB_888_DST_REG, + [MDP_BGR_888] = MDP_RGB_888_DST_REG, + [MDP_BGRA_8888] = MDP_RGBX_8888_DST_REG, + [MDP_RGBA_8888] = MDP_RGBX_8888_DST_REG, + [MDP_ARGB_8888] = MDP_RGBX_8888_DST_REG, + [MDP_XRGB_8888] = MDP_RGBX_8888_DST_REG, + [MDP_RGBX_8888] = MDP_RGBX_8888_DST_REG, + [MDP_Y_CRCB_H2V2] = MDP_Y_CBCR_H2V2_DST_REG, + [MDP_Y_CBCR_H2V2] = MDP_Y_CBCR_H2V2_DST_REG, + [MDP_Y_CBCR_H2V2_ADRENO] = MDP_Y_CBCR_H2V2_DST_REG, + [MDP_Y_CBCR_H2V2_VENUS] = MDP_Y_CBCR_H2V2_DST_REG, + [MDP_YCRYCB_H2V1] = MDP_YCRYCB_H2V1_DST_REG, + [MDP_Y_CBCR_H2V1] = MDP_Y_CRCB_H2V1_DST_REG, + [MDP_Y_CRCB_H2V1] = MDP_Y_CRCB_H2V1_DST_REG, + [MDP_BGRX_8888] = MDP_RGBX_8888_DST_REG, +}; + +const uint32_t pack_patt_lut[MDP_IMGTYPE_LIMIT] = { + [MDP_RGB_565] = PPP_GET_PACK_PATTERN(0, CLR_B, CLR_G, CLR_R, 8), + [MDP_BGR_565] = PPP_GET_PACK_PATTERN(0, CLR_R, CLR_G, CLR_B, 8), + [MDP_RGB_888] = PPP_GET_PACK_PATTERN(0, CLR_R, CLR_G, CLR_B, 8), + [MDP_BGR_888] = PPP_GET_PACK_PATTERN(0, CLR_B, CLR_G, CLR_R, 8), + [MDP_BGRA_8888] = PPP_GET_PACK_PATTERN(CLR_ALPHA, CLR_B, + CLR_G, CLR_R, 8), + [MDP_RGBA_8888] = PPP_GET_PACK_PATTERN(CLR_ALPHA, CLR_R, + CLR_G, CLR_B, 8), + [MDP_ARGB_8888] = PPP_GET_PACK_PATTERN(CLR_R, + CLR_G, CLR_B, CLR_ALPHA, 8), + [MDP_XRGB_8888] = PPP_GET_PACK_PATTERN(CLR_R, + CLR_G, CLR_B, CLR_ALPHA, 8), + [MDP_RGBX_8888] = PPP_GET_PACK_PATTERN(CLR_ALPHA, CLR_R, + CLR_G, CLR_B, 8), + [MDP_Y_CRCB_H2V2] = PPP_GET_PACK_PATTERN(0, 0, CLR_CR, CLR_CB, 8), + [MDP_Y_CBCR_H2V2] = PPP_GET_PACK_PATTERN(0, 0, CLR_CB, CLR_CR, 8), + [MDP_Y_CBCR_H2V2_ADRENO] = PPP_GET_PACK_PATTERN(0, 0, CLR_CB, + CLR_CR, 8), + [MDP_Y_CBCR_H2V2_VENUS] = PPP_GET_PACK_PATTERN(0, 0, CLR_CB, + CLR_CR, 8), + [MDP_YCRYCB_H2V1] = PPP_GET_PACK_PATTERN(CLR_Y, + CLR_CR, CLR_Y, CLR_CB, 8), + [MDP_Y_CBCR_H2V1] = PPP_GET_PACK_PATTERN(0, 0, CLR_CB, CLR_CR, 8), + [MDP_Y_CRCB_H2V1] = PPP_GET_PACK_PATTERN(0, 0, CLR_CR, CLR_CB, 8), + [MDP_BGRX_8888] = PPP_GET_PACK_PATTERN(CLR_ALPHA, CLR_B, + CLR_G, CLR_R, 8), +}; + +const uint32_t swapped_pack_patt_lut[MDP_IMGTYPE_LIMIT] = { + [MDP_RGB_565] = PPP_GET_PACK_PATTERN(0, CLR_B, CLR_G, CLR_R, 8), + [MDP_BGR_565] = PPP_GET_PACK_PATTERN(0, CLR_R, CLR_G, CLR_B, 8), + [MDP_RGB_888] = PPP_GET_PACK_PATTERN(0, CLR_B, CLR_G, CLR_R, 8), + [MDP_BGR_888] = PPP_GET_PACK_PATTERN(0, CLR_R, CLR_G, CLR_B, 8), + [MDP_BGRA_8888] = PPP_GET_PACK_PATTERN(CLR_ALPHA, CLR_R, + CLR_G, CLR_B, 8), + [MDP_RGBA_8888] = PPP_GET_PACK_PATTERN(CLR_ALPHA, CLR_B, + CLR_G, CLR_R, 8), + [MDP_ARGB_8888] = PPP_GET_PACK_PATTERN(CLR_ALPHA, CLR_B, + CLR_G, CLR_R, 8), + [MDP_XRGB_8888] = PPP_GET_PACK_PATTERN(CLR_ALPHA, CLR_B, + CLR_G, CLR_R, 8), + [MDP_RGBX_8888] = PPP_GET_PACK_PATTERN(CLR_ALPHA, CLR_B, + CLR_G, CLR_R, 8), + [MDP_Y_CRCB_H2V2] = PPP_GET_PACK_PATTERN(0, 0, CLR_CB, CLR_CR, 8), + [MDP_Y_CBCR_H2V2] = PPP_GET_PACK_PATTERN(0, 0, CLR_CR, CLR_CB, 8), + [MDP_Y_CBCR_H2V2_ADRENO] = PPP_GET_PACK_PATTERN(0, 0, CLR_CR, + CLR_CB, 8), + [MDP_Y_CBCR_H2V2_VENUS] = PPP_GET_PACK_PATTERN(0, 0, CLR_CR, + CLR_CB, 8), + [MDP_YCRYCB_H2V1] = PPP_GET_PACK_PATTERN(CLR_Y, + CLR_CB, CLR_Y, CLR_CR, 8), + [MDP_Y_CBCR_H2V1] = PPP_GET_PACK_PATTERN(0, 0, CLR_CR, CLR_CB, 8), + [MDP_Y_CRCB_H2V1] = PPP_GET_PACK_PATTERN(0, 0, CLR_CB, CLR_CR, 8), + [MDP_BGRX_8888] = PPP_GET_PACK_PATTERN(CLR_ALPHA, CLR_R, + CLR_G, CLR_B, 8), +}; + +const uint32_t dst_op_reg[MDP_IMGTYPE_LIMIT] = { + [MDP_Y_CRCB_H2V2] = PPP_OP_DST_CHROMA_420, + [MDP_Y_CBCR_H2V2] = PPP_OP_DST_CHROMA_420, + [MDP_Y_CBCR_H2V1] = PPP_OP_DST_CHROMA_H2V1, + [MDP_Y_CRCB_H2V1] = PPP_OP_DST_CHROMA_H2V1, + [MDP_YCRYCB_H2V1] = PPP_OP_DST_CHROMA_H2V1, +}; + +const uint32_t src_op_reg[MDP_IMGTYPE_LIMIT] = { + [MDP_Y_CRCB_H2V2] = PPP_OP_SRC_CHROMA_420 | PPP_OP_COLOR_SPACE_YCBCR, + [MDP_Y_CBCR_H2V2] = PPP_OP_SRC_CHROMA_420 | PPP_OP_COLOR_SPACE_YCBCR, + [MDP_Y_CBCR_H2V2_ADRENO] = PPP_OP_SRC_CHROMA_420 | + PPP_OP_COLOR_SPACE_YCBCR, + [MDP_Y_CBCR_H2V2_VENUS] = PPP_OP_SRC_CHROMA_420 | + PPP_OP_COLOR_SPACE_YCBCR, + [MDP_Y_CBCR_H2V1] = PPP_OP_SRC_CHROMA_H2V1, + [MDP_Y_CRCB_H2V1] = PPP_OP_SRC_CHROMA_H2V1, + [MDP_YCRYCB_H2V1] = PPP_OP_SRC_CHROMA_H2V1, +}; + +const uint32_t bytes_per_pixel[MDP_IMGTYPE_LIMIT] = { + [MDP_RGB_565] = 2, + [MDP_BGR_565] = 2, + [MDP_RGB_888] = 3, + [MDP_BGR_888] = 3, + [MDP_XRGB_8888] = 4, + [MDP_ARGB_8888] = 4, + [MDP_RGBA_8888] = 4, + [MDP_BGRA_8888] = 4, + [MDP_RGBX_8888] = 4, + [MDP_Y_CBCR_H2V1] = 1, + [MDP_Y_CBCR_H2V2] = 1, + [MDP_Y_CBCR_H2V2_ADRENO] = 1, + [MDP_Y_CBCR_H2V2_VENUS] = 1, + [MDP_Y_CRCB_H2V1] = 1, + [MDP_Y_CRCB_H2V2] = 1, + [MDP_YCRYCB_H2V1] = 2, + [MDP_BGRX_8888] = 4, +}; + +const bool per_pixel_alpha[MDP_IMGTYPE_LIMIT] = { + [MDP_BGRA_8888] = true, + [MDP_RGBA_8888] = true, + [MDP_ARGB_8888] = true, +}; + +const bool multi_plane[MDP_IMGTYPE_LIMIT] = { + [MDP_Y_CRCB_H2V2] = true, + [MDP_Y_CBCR_H2V2] = true, + [MDP_Y_CBCR_H2V1] = true, + [MDP_Y_CRCB_H2V1] = true, +}; + +/* lut default */ +uint32_t default_pre_lut_val[PPP_LUT_MAX] = { + 0x0, + 0x151515, + 0x1d1d1d, + 0x232323, + 0x272727, + 0x2b2b2b, + 0x2f2f2f, + 0x333333, + 0x363636, + 0x393939, + 0x3b3b3b, + 0x3e3e3e, + 0x404040, + 0x434343, + 0x454545, + 0x474747, + 0x494949, + 0x4b4b4b, + 0x4d4d4d, + 0x4f4f4f, + 0x515151, + 0x535353, + 0x555555, + 0x565656, + 0x585858, + 0x5a5a5a, + 0x5b5b5b, + 0x5d5d5d, + 0x5e5e5e, + 0x606060, + 0x616161, + 0x636363, + 0x646464, + 0x666666, + 0x676767, + 0x686868, + 0x6a6a6a, + 0x6b6b6b, + 0x6c6c6c, + 0x6e6e6e, + 0x6f6f6f, + 0x707070, + 0x717171, + 0x727272, + 0x747474, + 0x757575, + 0x767676, + 0x777777, + 0x787878, + 0x797979, + 0x7a7a7a, + 0x7c7c7c, + 0x7d7d7d, + 0x7e7e7e, + 0x7f7f7f, + 0x808080, + 0x818181, + 0x828282, + 0x838383, + 0x848484, + 0x858585, + 0x868686, + 0x878787, + 0x888888, + 0x898989, + 0x8a8a8a, + 0x8b8b8b, + 0x8c8c8c, + 0x8d8d8d, + 0x8e8e8e, + 0x8f8f8f, + 0x8f8f8f, + 0x909090, + 0x919191, + 0x929292, + 0x939393, + 0x949494, + 0x959595, + 0x969696, + 0x969696, + 0x979797, + 0x989898, + 0x999999, + 0x9a9a9a, + 0x9b9b9b, + 0x9c9c9c, + 0x9c9c9c, + 0x9d9d9d, + 0x9e9e9e, + 0x9f9f9f, + 0xa0a0a0, + 0xa0a0a0, + 0xa1a1a1, + 0xa2a2a2, + 0xa3a3a3, + 0xa4a4a4, + 0xa4a4a4, + 0xa5a5a5, + 0xa6a6a6, + 0xa7a7a7, + 0xa7a7a7, + 0xa8a8a8, + 0xa9a9a9, + 0xaaaaaa, + 0xaaaaaa, + 0xababab, + 0xacacac, + 0xadadad, + 0xadadad, + 0xaeaeae, + 0xafafaf, + 0xafafaf, + 0xb0b0b0, + 0xb1b1b1, + 0xb2b2b2, + 0xb2b2b2, + 0xb3b3b3, + 0xb4b4b4, + 0xb4b4b4, + 0xb5b5b5, + 0xb6b6b6, + 0xb6b6b6, + 0xb7b7b7, + 0xb8b8b8, + 0xb8b8b8, + 0xb9b9b9, + 0xbababa, + 0xbababa, + 0xbbbbbb, + 0xbcbcbc, + 0xbcbcbc, + 0xbdbdbd, + 0xbebebe, + 0xbebebe, + 0xbfbfbf, + 0xc0c0c0, + 0xc0c0c0, + 0xc1c1c1, + 0xc1c1c1, + 0xc2c2c2, + 0xc3c3c3, + 0xc3c3c3, + 0xc4c4c4, + 0xc5c5c5, + 0xc5c5c5, + 0xc6c6c6, + 0xc6c6c6, + 0xc7c7c7, + 0xc8c8c8, + 0xc8c8c8, + 0xc9c9c9, + 0xc9c9c9, + 0xcacaca, + 0xcbcbcb, + 0xcbcbcb, + 0xcccccc, + 0xcccccc, + 0xcdcdcd, + 0xcecece, + 0xcecece, + 0xcfcfcf, + 0xcfcfcf, + 0xd0d0d0, + 0xd0d0d0, + 0xd1d1d1, + 0xd2d2d2, + 0xd2d2d2, + 0xd3d3d3, + 0xd3d3d3, + 0xd4d4d4, + 0xd4d4d4, + 0xd5d5d5, + 0xd6d6d6, + 0xd6d6d6, + 0xd7d7d7, + 0xd7d7d7, + 0xd8d8d8, + 0xd8d8d8, + 0xd9d9d9, + 0xd9d9d9, + 0xdadada, + 0xdbdbdb, + 0xdbdbdb, + 0xdcdcdc, + 0xdcdcdc, + 0xdddddd, + 0xdddddd, + 0xdedede, + 0xdedede, + 0xdfdfdf, + 0xdfdfdf, + 0xe0e0e0, + 0xe0e0e0, + 0xe1e1e1, + 0xe1e1e1, + 0xe2e2e2, + 0xe3e3e3, + 0xe3e3e3, + 0xe4e4e4, + 0xe4e4e4, + 0xe5e5e5, + 0xe5e5e5, + 0xe6e6e6, + 0xe6e6e6, + 0xe7e7e7, + 0xe7e7e7, + 0xe8e8e8, + 0xe8e8e8, + 0xe9e9e9, + 0xe9e9e9, + 0xeaeaea, + 0xeaeaea, + 0xebebeb, + 0xebebeb, + 0xececec, + 0xececec, + 0xededed, + 0xededed, + 0xeeeeee, + 0xeeeeee, + 0xefefef, + 0xefefef, + 0xf0f0f0, + 0xf0f0f0, + 0xf1f1f1, + 0xf1f1f1, + 0xf2f2f2, + 0xf2f2f2, + 0xf2f2f2, + 0xf3f3f3, + 0xf3f3f3, + 0xf4f4f4, + 0xf4f4f4, + 0xf5f5f5, + 0xf5f5f5, + 0xf6f6f6, + 0xf6f6f6, + 0xf7f7f7, + 0xf7f7f7, + 0xf8f8f8, + 0xf8f8f8, + 0xf9f9f9, + 0xf9f9f9, + 0xfafafa, + 0xfafafa, + 0xfafafa, + 0xfbfbfb, + 0xfbfbfb, + 0xfcfcfc, + 0xfcfcfc, + 0xfdfdfd, + 0xfdfdfd, + 0xfefefe, + 0xfefefe, + 0xffffff, + 0xffffff, +}; + +uint32_t default_post_lut_val[PPP_LUT_MAX] = { + 0x0, + 0x0, + 0x0, + 0x0, + 0x0, + 0x0, + 0x0, + 0x0, + 0x0, + 0x0, + 0x0, + 0x0, + 0x0, + 0x0, + 0x0, + 0x0, + 0x10101, + 0x10101, + 0x10101, + 0x10101, + 0x10101, + 0x10101, + 0x10101, + 0x10101, + 0x10101, + 0x10101, + 0x20202, + 0x20202, + 0x20202, + 0x20202, + 0x20202, + 0x20202, + 0x30303, + 0x30303, + 0x30303, + 0x30303, + 0x30303, + 0x40404, + 0x40404, + 0x40404, + 0x40404, + 0x40404, + 0x50505, + 0x50505, + 0x50505, + 0x50505, + 0x60606, + 0x60606, + 0x60606, + 0x70707, + 0x70707, + 0x70707, + 0x70707, + 0x80808, + 0x80808, + 0x80808, + 0x90909, + 0x90909, + 0xa0a0a, + 0xa0a0a, + 0xa0a0a, + 0xb0b0b, + 0xb0b0b, + 0xb0b0b, + 0xc0c0c, + 0xc0c0c, + 0xd0d0d, + 0xd0d0d, + 0xe0e0e, + 0xe0e0e, + 0xe0e0e, + 0xf0f0f, + 0xf0f0f, + 0x101010, + 0x101010, + 0x111111, + 0x111111, + 0x121212, + 0x121212, + 0x131313, + 0x131313, + 0x141414, + 0x151515, + 0x151515, + 0x161616, + 0x161616, + 0x171717, + 0x171717, + 0x181818, + 0x191919, + 0x191919, + 0x1a1a1a, + 0x1b1b1b, + 0x1b1b1b, + 0x1c1c1c, + 0x1c1c1c, + 0x1d1d1d, + 0x1e1e1e, + 0x1f1f1f, + 0x1f1f1f, + 0x202020, + 0x212121, + 0x212121, + 0x222222, + 0x232323, + 0x242424, + 0x242424, + 0x252525, + 0x262626, + 0x272727, + 0x272727, + 0x282828, + 0x292929, + 0x2a2a2a, + 0x2b2b2b, + 0x2c2c2c, + 0x2c2c2c, + 0x2d2d2d, + 0x2e2e2e, + 0x2f2f2f, + 0x303030, + 0x313131, + 0x323232, + 0x333333, + 0x333333, + 0x343434, + 0x353535, + 0x363636, + 0x373737, + 0x383838, + 0x393939, + 0x3a3a3a, + 0x3b3b3b, + 0x3c3c3c, + 0x3d3d3d, + 0x3e3e3e, + 0x3f3f3f, + 0x404040, + 0x414141, + 0x424242, + 0x434343, + 0x444444, + 0x464646, + 0x474747, + 0x484848, + 0x494949, + 0x4a4a4a, + 0x4b4b4b, + 0x4c4c4c, + 0x4d4d4d, + 0x4f4f4f, + 0x505050, + 0x515151, + 0x525252, + 0x535353, + 0x545454, + 0x565656, + 0x575757, + 0x585858, + 0x595959, + 0x5b5b5b, + 0x5c5c5c, + 0x5d5d5d, + 0x5e5e5e, + 0x606060, + 0x616161, + 0x626262, + 0x646464, + 0x656565, + 0x666666, + 0x686868, + 0x696969, + 0x6a6a6a, + 0x6c6c6c, + 0x6d6d6d, + 0x6f6f6f, + 0x707070, + 0x717171, + 0x737373, + 0x747474, + 0x767676, + 0x777777, + 0x797979, + 0x7a7a7a, + 0x7c7c7c, + 0x7d7d7d, + 0x7f7f7f, + 0x808080, + 0x828282, + 0x838383, + 0x858585, + 0x868686, + 0x888888, + 0x898989, + 0x8b8b8b, + 0x8d8d8d, + 0x8e8e8e, + 0x909090, + 0x919191, + 0x939393, + 0x959595, + 0x969696, + 0x989898, + 0x9a9a9a, + 0x9b9b9b, + 0x9d9d9d, + 0x9f9f9f, + 0xa1a1a1, + 0xa2a2a2, + 0xa4a4a4, + 0xa6a6a6, + 0xa7a7a7, + 0xa9a9a9, + 0xababab, + 0xadadad, + 0xafafaf, + 0xb0b0b0, + 0xb2b2b2, + 0xb4b4b4, + 0xb6b6b6, + 0xb8b8b8, + 0xbababa, + 0xbbbbbb, + 0xbdbdbd, + 0xbfbfbf, + 0xc1c1c1, + 0xc3c3c3, + 0xc5c5c5, + 0xc7c7c7, + 0xc9c9c9, + 0xcbcbcb, + 0xcdcdcd, + 0xcfcfcf, + 0xd1d1d1, + 0xd3d3d3, + 0xd5d5d5, + 0xd7d7d7, + 0xd9d9d9, + 0xdbdbdb, + 0xdddddd, + 0xdfdfdf, + 0xe1e1e1, + 0xe3e3e3, + 0xe5e5e5, + 0xe7e7e7, + 0xe9e9e9, + 0xebebeb, + 0xeeeeee, + 0xf0f0f0, + 0xf2f2f2, + 0xf4f4f4, + 0xf6f6f6, + 0xf8f8f8, + 0xfbfbfb, + 0xfdfdfd, + 0xffffff, +}; + +struct ppp_csc_table rgb2yuv = { + .fwd_matrix = { + 0x83, + 0x102, + 0x32, + 0xffb5, + 0xff6c, + 0xe1, + 0xe1, + 0xff45, + 0xffdc, + }, + .rev_matrix = { + 0x254, + 0x0, + 0x331, + 0x254, + 0xff38, + 0xfe61, + 0x254, + 0x409, + 0x0, + }, + .bv = { + 0x10, + 0x80, + 0x80, + }, + .lv = { + 0x10, + 0xeb, + 0x10, + 0xf0, + }, +}; + +struct ppp_csc_table default_table2 = { + .fwd_matrix = { + 0x5d, + 0x13a, + 0x20, + 0xffcd, + 0xff54, + 0xe1, + 0xe1, + 0xff35, + }, + .rev_matrix = { + 0x254, + 0x0, + 0x396, + 0x254, + 0xff94, + 0xfef0, + 0x254, + 0x43a, + 0x0, + }, + .bv = { + 0x10, + 0x80, + 0x80, + }, + .lv = { + 0x10, + 0xeb, + 0x10, + 0xf0, + }, +}; + +const struct ppp_table upscale_table[PPP_UPSCALE_MAX] = { + { 0x5fffc, 0x0 }, + { 0x50200, 0x7fc00000 }, + { 0x5fffc, 0xff80000d }, + { 0x50204, 0x7ec003f9 }, + { 0x5fffc, 0xfec0001c }, + { 0x50208, 0x7d4003f3 }, + { 0x5fffc, 0xfe40002b }, + { 0x5020c, 0x7b8003ed }, + { 0x5fffc, 0xfd80003c }, + { 0x50210, 0x794003e8 }, + { 0x5fffc, 0xfcc0004d }, + { 0x50214, 0x76c003e4 }, + { 0x5fffc, 0xfc40005f }, + { 0x50218, 0x73c003e0 }, + { 0x5fffc, 0xfb800071 }, + { 0x5021c, 0x708003de }, + { 0x5fffc, 0xfac00085 }, + { 0x50220, 0x6d0003db }, + { 0x5fffc, 0xfa000098 }, + { 0x50224, 0x698003d9 }, + { 0x5fffc, 0xf98000ac }, + { 0x50228, 0x654003d8 }, + { 0x5fffc, 0xf8c000c1 }, + { 0x5022c, 0x610003d7 }, + { 0x5fffc, 0xf84000d5 }, + { 0x50230, 0x5c8003d7 }, + { 0x5fffc, 0xf7c000e9 }, + { 0x50234, 0x580003d7 }, + { 0x5fffc, 0xf74000fd }, + { 0x50238, 0x534003d8 }, + { 0x5fffc, 0xf6c00112 }, + { 0x5023c, 0x4e8003d8 }, + { 0x5fffc, 0xf6800126 }, + { 0x50240, 0x494003da }, + { 0x5fffc, 0xf600013a }, + { 0x50244, 0x448003db }, + { 0x5fffc, 0xf600014d }, + { 0x50248, 0x3f4003dd }, + { 0x5fffc, 0xf5c00160 }, + { 0x5024c, 0x3a4003df }, + { 0x5fffc, 0xf5c00172 }, + { 0x50250, 0x354003e1 }, + { 0x5fffc, 0xf5c00184 }, + { 0x50254, 0x304003e3 }, + { 0x5fffc, 0xf6000195 }, + { 0x50258, 0x2b0003e6 }, + { 0x5fffc, 0xf64001a6 }, + { 0x5025c, 0x260003e8 }, + { 0x5fffc, 0xf6c001b4 }, + { 0x50260, 0x214003eb }, + { 0x5fffc, 0xf78001c2 }, + { 0x50264, 0x1c4003ee }, + { 0x5fffc, 0xf80001cf }, + { 0x50268, 0x17c003f1 }, + { 0x5fffc, 0xf90001db }, + { 0x5026c, 0x134003f3 }, + { 0x5fffc, 0xfa0001e5 }, + { 0x50270, 0xf0003f6 }, + { 0x5fffc, 0xfb4001ee }, + { 0x50274, 0xac003f9 }, + { 0x5fffc, 0xfcc001f5 }, + { 0x50278, 0x70003fb }, + { 0x5fffc, 0xfe4001fb }, + { 0x5027c, 0x34003fe }, +}; + +const struct ppp_table mdp_gaussian_blur_table[PPP_BLUR_SCALE_MAX] = { + /* max variance */ + { 0x5fffc, 0x20000080 }, + { 0x50280, 0x20000080 }, + { 0x5fffc, 0x20000080 }, + { 0x50284, 0x20000080 }, + { 0x5fffc, 0x20000080 }, + { 0x50288, 0x20000080 }, + { 0x5fffc, 0x20000080 }, + { 0x5028c, 0x20000080 }, + { 0x5fffc, 0x20000080 }, + { 0x50290, 0x20000080 }, + { 0x5fffc, 0x20000080 }, + { 0x50294, 0x20000080 }, + { 0x5fffc, 0x20000080 }, + { 0x50298, 0x20000080 }, + { 0x5fffc, 0x20000080 }, + { 0x5029c, 0x20000080 }, + { 0x5fffc, 0x20000080 }, + { 0x502a0, 0x20000080 }, + { 0x5fffc, 0x20000080 }, + { 0x502a4, 0x20000080 }, + { 0x5fffc, 0x20000080 }, + { 0x502a8, 0x20000080 }, + { 0x5fffc, 0x20000080 }, + { 0x502ac, 0x20000080 }, + { 0x5fffc, 0x20000080 }, + { 0x502b0, 0x20000080 }, + { 0x5fffc, 0x20000080 }, + { 0x502b4, 0x20000080 }, + { 0x5fffc, 0x20000080 }, + { 0x502b8, 0x20000080 }, + { 0x5fffc, 0x20000080 }, + { 0x502bc, 0x20000080 }, + { 0x5fffc, 0x20000080 }, + { 0x502c0, 0x20000080 }, + { 0x5fffc, 0x20000080 }, + { 0x502c4, 0x20000080 }, + { 0x5fffc, 0x20000080 }, + { 0x502c8, 0x20000080 }, + { 0x5fffc, 0x20000080 }, + { 0x502cc, 0x20000080 }, + { 0x5fffc, 0x20000080 }, + { 0x502d0, 0x20000080 }, + { 0x5fffc, 0x20000080 }, + { 0x502d4, 0x20000080 }, + { 0x5fffc, 0x20000080 }, + { 0x502d8, 0x20000080 }, + { 0x5fffc, 0x20000080 }, + { 0x502dc, 0x20000080 }, + { 0x5fffc, 0x20000080 }, + { 0x502e0, 0x20000080 }, + { 0x5fffc, 0x20000080 }, + { 0x502e4, 0x20000080 }, + { 0x5fffc, 0x20000080 }, + { 0x502e8, 0x20000080 }, + { 0x5fffc, 0x20000080 }, + { 0x502ec, 0x20000080 }, + { 0x5fffc, 0x20000080 }, + { 0x502f0, 0x20000080 }, + { 0x5fffc, 0x20000080 }, + { 0x502f4, 0x20000080 }, + { 0x5fffc, 0x20000080 }, + { 0x502f8, 0x20000080 }, + { 0x5fffc, 0x20000080 }, + { 0x502fc, 0x20000080 }, + { 0x5fffc, 0x20000080 }, + { 0x50300, 0x20000080 }, + { 0x5fffc, 0x20000080 }, + { 0x50304, 0x20000080 }, + { 0x5fffc, 0x20000080 }, + { 0x50308, 0x20000080 }, + { 0x5fffc, 0x20000080 }, + { 0x5030c, 0x20000080 }, + { 0x5fffc, 0x20000080 }, + { 0x50310, 0x20000080 }, + { 0x5fffc, 0x20000080 }, + { 0x50314, 0x20000080 }, + { 0x5fffc, 0x20000080 }, + { 0x50318, 0x20000080 }, + { 0x5fffc, 0x20000080 }, + { 0x5031c, 0x20000080 }, + { 0x5fffc, 0x20000080 }, + { 0x50320, 0x20000080 }, + { 0x5fffc, 0x20000080 }, + { 0x50324, 0x20000080 }, + { 0x5fffc, 0x20000080 }, + { 0x50328, 0x20000080 }, + { 0x5fffc, 0x20000080 }, + { 0x5032c, 0x20000080 }, + { 0x5fffc, 0x20000080 }, + { 0x50330, 0x20000080 }, + { 0x5fffc, 0x20000080 }, + { 0x50334, 0x20000080 }, + { 0x5fffc, 0x20000080 }, + { 0x50338, 0x20000080 }, + { 0x5fffc, 0x20000080 }, + { 0x5033c, 0x20000080 }, + { 0x5fffc, 0x20000080 }, + { 0x50340, 0x20000080 }, + { 0x5fffc, 0x20000080 }, + { 0x50344, 0x20000080 }, + { 0x5fffc, 0x20000080 }, + { 0x50348, 0x20000080 }, + { 0x5fffc, 0x20000080 }, + { 0x5034c, 0x20000080 }, + { 0x5fffc, 0x20000080 }, + { 0x50350, 0x20000080 }, + { 0x5fffc, 0x20000080 }, + { 0x50354, 0x20000080 }, + { 0x5fffc, 0x20000080 }, + { 0x50358, 0x20000080 }, + { 0x5fffc, 0x20000080 }, + { 0x5035c, 0x20000080 }, + { 0x5fffc, 0x20000080 }, + { 0x50360, 0x20000080 }, + { 0x5fffc, 0x20000080 }, + { 0x50364, 0x20000080 }, + { 0x5fffc, 0x20000080 }, + { 0x50368, 0x20000080 }, + { 0x5fffc, 0x20000080 }, + { 0x5036c, 0x20000080 }, + { 0x5fffc, 0x20000080 }, + { 0x50370, 0x20000080 }, + { 0x5fffc, 0x20000080 }, + { 0x50374, 0x20000080 }, + { 0x5fffc, 0x20000080 }, + { 0x50378, 0x20000080 }, + { 0x5fffc, 0x20000080 }, + { 0x5037c, 0x20000080 }, +}; + +const struct ppp_table downscale_x_table_pt2topt4[] = { + { 0x5fffc, 0x740008c }, + { 0x50280, 0x33800088 }, + { 0x5fffc, 0x800008e }, + { 0x50284, 0x33400084 }, + { 0x5fffc, 0x8400092 }, + { 0x50288, 0x33000080 }, + { 0x5fffc, 0x9000094 }, + { 0x5028c, 0x3300007b }, + { 0x5fffc, 0x9c00098 }, + { 0x50290, 0x32400077 }, + { 0x5fffc, 0xa40009b }, + { 0x50294, 0x32000073 }, + { 0x5fffc, 0xb00009d }, + { 0x50298, 0x31c0006f }, + { 0x5fffc, 0xbc000a0 }, + { 0x5029c, 0x3140006b }, + { 0x5fffc, 0xc8000a2 }, + { 0x502a0, 0x31000067 }, + { 0x5fffc, 0xd8000a5 }, + { 0x502a4, 0x30800062 }, + { 0x5fffc, 0xe4000a8 }, + { 0x502a8, 0x2fc0005f }, + { 0x5fffc, 0xec000aa }, + { 0x502ac, 0x2fc0005b }, + { 0x5fffc, 0xf8000ad }, + { 0x502b0, 0x2f400057 }, + { 0x5fffc, 0x108000b0 }, + { 0x502b4, 0x2e400054 }, + { 0x5fffc, 0x114000b2 }, + { 0x502b8, 0x2e000050 }, + { 0x5fffc, 0x124000b4 }, + { 0x502bc, 0x2d80004c }, + { 0x5fffc, 0x130000b6 }, + { 0x502c0, 0x2d000049 }, + { 0x5fffc, 0x140000b8 }, + { 0x502c4, 0x2c800045 }, + { 0x5fffc, 0x150000b9 }, + { 0x502c8, 0x2c000042 }, + { 0x5fffc, 0x15c000bd }, + { 0x502cc, 0x2b40003e }, + { 0x5fffc, 0x16c000bf }, + { 0x502d0, 0x2a80003b }, + { 0x5fffc, 0x17c000bf }, + { 0x502d4, 0x2a000039 }, + { 0x5fffc, 0x188000c2 }, + { 0x502d8, 0x29400036 }, + { 0x5fffc, 0x19c000c4 }, + { 0x502dc, 0x28800032 }, + { 0x5fffc, 0x1ac000c5 }, + { 0x502e0, 0x2800002f }, + { 0x5fffc, 0x1bc000c7 }, + { 0x502e4, 0x2740002c }, + { 0x5fffc, 0x1cc000c8 }, + { 0x502e8, 0x26c00029 }, + { 0x5fffc, 0x1dc000c9 }, + { 0x502ec, 0x26000027 }, + { 0x5fffc, 0x1ec000cc }, + { 0x502f0, 0x25000024 }, + { 0x5fffc, 0x200000cc }, + { 0x502f4, 0x24800021 }, + { 0x5fffc, 0x210000cd }, + { 0x502f8, 0x23800020 }, + { 0x5fffc, 0x220000ce }, + { 0x502fc, 0x2300001d }, +}; + +static const struct ppp_table downscale_x_table_pt4topt6[] = { + { 0x5fffc, 0x740008c }, + { 0x50280, 0x33800088 }, + { 0x5fffc, 0x800008e }, + { 0x50284, 0x33400084 }, + { 0x5fffc, 0x8400092 }, + { 0x50288, 0x33000080 }, + { 0x5fffc, 0x9000094 }, + { 0x5028c, 0x3300007b }, + { 0x5fffc, 0x9c00098 }, + { 0x50290, 0x32400077 }, + { 0x5fffc, 0xa40009b }, + { 0x50294, 0x32000073 }, + { 0x5fffc, 0xb00009d }, + { 0x50298, 0x31c0006f }, + { 0x5fffc, 0xbc000a0 }, + { 0x5029c, 0x3140006b }, + { 0x5fffc, 0xc8000a2 }, + { 0x502a0, 0x31000067 }, + { 0x5fffc, 0xd8000a5 }, + { 0x502a4, 0x30800062 }, + { 0x5fffc, 0xe4000a8 }, + { 0x502a8, 0x2fc0005f }, + { 0x5fffc, 0xec000aa }, + { 0x502ac, 0x2fc0005b }, + { 0x5fffc, 0xf8000ad }, + { 0x502b0, 0x2f400057 }, + { 0x5fffc, 0x108000b0 }, + { 0x502b4, 0x2e400054 }, + { 0x5fffc, 0x114000b2 }, + { 0x502b8, 0x2e000050 }, + { 0x5fffc, 0x124000b4 }, + { 0x502bc, 0x2d80004c }, + { 0x5fffc, 0x130000b6 }, + { 0x502c0, 0x2d000049 }, + { 0x5fffc, 0x140000b8 }, + { 0x502c4, 0x2c800045 }, + { 0x5fffc, 0x150000b9 }, + { 0x502c8, 0x2c000042 }, + { 0x5fffc, 0x15c000bd }, + { 0x502cc, 0x2b40003e }, + { 0x5fffc, 0x16c000bf }, + { 0x502d0, 0x2a80003b }, + { 0x5fffc, 0x17c000bf }, + { 0x502d4, 0x2a000039 }, + { 0x5fffc, 0x188000c2 }, + { 0x502d8, 0x29400036 }, + { 0x5fffc, 0x19c000c4 }, + { 0x502dc, 0x28800032 }, + { 0x5fffc, 0x1ac000c5 }, + { 0x502e0, 0x2800002f }, + { 0x5fffc, 0x1bc000c7 }, + { 0x502e4, 0x2740002c }, + { 0x5fffc, 0x1cc000c8 }, + { 0x502e8, 0x26c00029 }, + { 0x5fffc, 0x1dc000c9 }, + { 0x502ec, 0x26000027 }, + { 0x5fffc, 0x1ec000cc }, + { 0x502f0, 0x25000024 }, + { 0x5fffc, 0x200000cc }, + { 0x502f4, 0x24800021 }, + { 0x5fffc, 0x210000cd }, + { 0x502f8, 0x23800020 }, + { 0x5fffc, 0x220000ce }, + { 0x502fc, 0x2300001d }, +}; + +static const struct ppp_table downscale_x_table_pt6topt8[] = { + { 0x5fffc, 0xfe000070 }, + { 0x50280, 0x4bc00068 }, + { 0x5fffc, 0xfe000078 }, + { 0x50284, 0x4bc00060 }, + { 0x5fffc, 0xfe000080 }, + { 0x50288, 0x4b800059 }, + { 0x5fffc, 0xfe000089 }, + { 0x5028c, 0x4b000052 }, + { 0x5fffc, 0xfe400091 }, + { 0x50290, 0x4a80004b }, + { 0x5fffc, 0xfe40009a }, + { 0x50294, 0x4a000044 }, + { 0x5fffc, 0xfe8000a3 }, + { 0x50298, 0x4940003d }, + { 0x5fffc, 0xfec000ac }, + { 0x5029c, 0x48400037 }, + { 0x5fffc, 0xff0000b4 }, + { 0x502a0, 0x47800031 }, + { 0x5fffc, 0xff8000bd }, + { 0x502a4, 0x4640002b }, + { 0x5fffc, 0xc5 }, + { 0x502a8, 0x45000026 }, + { 0x5fffc, 0x8000ce }, + { 0x502ac, 0x43800021 }, + { 0x5fffc, 0x10000d6 }, + { 0x502b0, 0x4240001c }, + { 0x5fffc, 0x18000df }, + { 0x502b4, 0x40800018 }, + { 0x5fffc, 0x24000e6 }, + { 0x502b8, 0x3f000014 }, + { 0x5fffc, 0x30000ee }, + { 0x502bc, 0x3d400010 }, + { 0x5fffc, 0x40000f5 }, + { 0x502c0, 0x3b80000c }, + { 0x5fffc, 0x50000fc }, + { 0x502c4, 0x39800009 }, + { 0x5fffc, 0x6000102 }, + { 0x502c8, 0x37c00006 }, + { 0x5fffc, 0x7000109 }, + { 0x502cc, 0x35800004 }, + { 0x5fffc, 0x840010e }, + { 0x502d0, 0x33800002 }, + { 0x5fffc, 0x9800114 }, + { 0x502d4, 0x31400000 }, + { 0x5fffc, 0xac00119 }, + { 0x502d8, 0x2f4003fe }, + { 0x5fffc, 0xc40011e }, + { 0x502dc, 0x2d0003fc }, + { 0x5fffc, 0xdc00121 }, + { 0x502e0, 0x2b0003fb }, + { 0x5fffc, 0xf400125 }, + { 0x502e4, 0x28c003fa }, + { 0x5fffc, 0x11000128 }, + { 0x502e8, 0x268003f9 }, + { 0x5fffc, 0x12c0012a }, + { 0x502ec, 0x244003f9 }, + { 0x5fffc, 0x1480012c }, + { 0x502f0, 0x224003f8 }, + { 0x5fffc, 0x1640012e }, + { 0x502f4, 0x200003f8 }, + { 0x5fffc, 0x1800012f }, + { 0x502f8, 0x1e0003f8 }, + { 0x5fffc, 0x1a00012f }, + { 0x502fc, 0x1c0003f8 }, +}; + +static const struct ppp_table downscale_x_table_pt8topt1[] = { + { 0x5fffc, 0x0 }, + { 0x50280, 0x7fc00000 }, + { 0x5fffc, 0xff80000d }, + { 0x50284, 0x7ec003f9 }, + { 0x5fffc, 0xfec0001c }, + { 0x50288, 0x7d4003f3 }, + { 0x5fffc, 0xfe40002b }, + { 0x5028c, 0x7b8003ed }, + { 0x5fffc, 0xfd80003c }, + { 0x50290, 0x794003e8 }, + { 0x5fffc, 0xfcc0004d }, + { 0x50294, 0x76c003e4 }, + { 0x5fffc, 0xfc40005f }, + { 0x50298, 0x73c003e0 }, + { 0x5fffc, 0xfb800071 }, + { 0x5029c, 0x708003de }, + { 0x5fffc, 0xfac00085 }, + { 0x502a0, 0x6d0003db }, + { 0x5fffc, 0xfa000098 }, + { 0x502a4, 0x698003d9 }, + { 0x5fffc, 0xf98000ac }, + { 0x502a8, 0x654003d8 }, + { 0x5fffc, 0xf8c000c1 }, + { 0x502ac, 0x610003d7 }, + { 0x5fffc, 0xf84000d5 }, + { 0x502b0, 0x5c8003d7 }, + { 0x5fffc, 0xf7c000e9 }, + { 0x502b4, 0x580003d7 }, + { 0x5fffc, 0xf74000fd }, + { 0x502b8, 0x534003d8 }, + { 0x5fffc, 0xf6c00112 }, + { 0x502bc, 0x4e8003d8 }, + { 0x5fffc, 0xf6800126 }, + { 0x502c0, 0x494003da }, + { 0x5fffc, 0xf600013a }, + { 0x502c4, 0x448003db }, + { 0x5fffc, 0xf600014d }, + { 0x502c8, 0x3f4003dd }, + { 0x5fffc, 0xf5c00160 }, + { 0x502cc, 0x3a4003df }, + { 0x5fffc, 0xf5c00172 }, + { 0x502d0, 0x354003e1 }, + { 0x5fffc, 0xf5c00184 }, + { 0x502d4, 0x304003e3 }, + { 0x5fffc, 0xf6000195 }, + { 0x502d8, 0x2b0003e6 }, + { 0x5fffc, 0xf64001a6 }, + { 0x502dc, 0x260003e8 }, + { 0x5fffc, 0xf6c001b4 }, + { 0x502e0, 0x214003eb }, + { 0x5fffc, 0xf78001c2 }, + { 0x502e4, 0x1c4003ee }, + { 0x5fffc, 0xf80001cf }, + { 0x502e8, 0x17c003f1 }, + { 0x5fffc, 0xf90001db }, + { 0x502ec, 0x134003f3 }, + { 0x5fffc, 0xfa0001e5 }, + { 0x502f0, 0xf0003f6 }, + { 0x5fffc, 0xfb4001ee }, + { 0x502f4, 0xac003f9 }, + { 0x5fffc, 0xfcc001f5 }, + { 0x502f8, 0x70003fb }, + { 0x5fffc, 0xfe4001fb }, + { 0x502fc, 0x34003fe }, +}; + +static const struct ppp_table *downscale_x_table[PPP_DOWNSCALE_MAX] = { + [PPP_DOWNSCALE_PT2TOPT4] = downscale_x_table_pt2topt4, + [PPP_DOWNSCALE_PT4TOPT6] = downscale_x_table_pt4topt6, + [PPP_DOWNSCALE_PT6TOPT8] = downscale_x_table_pt6topt8, + [PPP_DOWNSCALE_PT8TOPT1] = downscale_x_table_pt8topt1, +}; + +static const struct ppp_table downscale_y_table_pt2topt4[] = { + { 0x5fffc, 0x740008c }, + { 0x50300, 0x33800088 }, + { 0x5fffc, 0x800008e }, + { 0x50304, 0x33400084 }, + { 0x5fffc, 0x8400092 }, + { 0x50308, 0x33000080 }, + { 0x5fffc, 0x9000094 }, + { 0x5030c, 0x3300007b }, + { 0x5fffc, 0x9c00098 }, + { 0x50310, 0x32400077 }, + { 0x5fffc, 0xa40009b }, + { 0x50314, 0x32000073 }, + { 0x5fffc, 0xb00009d }, + { 0x50318, 0x31c0006f }, + { 0x5fffc, 0xbc000a0 }, + { 0x5031c, 0x3140006b }, + { 0x5fffc, 0xc8000a2 }, + { 0x50320, 0x31000067 }, + { 0x5fffc, 0xd8000a5 }, + { 0x50324, 0x30800062 }, + { 0x5fffc, 0xe4000a8 }, + { 0x50328, 0x2fc0005f }, + { 0x5fffc, 0xec000aa }, + { 0x5032c, 0x2fc0005b }, + { 0x5fffc, 0xf8000ad }, + { 0x50330, 0x2f400057 }, + { 0x5fffc, 0x108000b0 }, + { 0x50334, 0x2e400054 }, + { 0x5fffc, 0x114000b2 }, + { 0x50338, 0x2e000050 }, + { 0x5fffc, 0x124000b4 }, + { 0x5033c, 0x2d80004c }, + { 0x5fffc, 0x130000b6 }, + { 0x50340, 0x2d000049 }, + { 0x5fffc, 0x140000b8 }, + { 0x50344, 0x2c800045 }, + { 0x5fffc, 0x150000b9 }, + { 0x50348, 0x2c000042 }, + { 0x5fffc, 0x15c000bd }, + { 0x5034c, 0x2b40003e }, + { 0x5fffc, 0x16c000bf }, + { 0x50350, 0x2a80003b }, + { 0x5fffc, 0x17c000bf }, + { 0x50354, 0x2a000039 }, + { 0x5fffc, 0x188000c2 }, + { 0x50358, 0x29400036 }, + { 0x5fffc, 0x19c000c4 }, + { 0x5035c, 0x28800032 }, + { 0x5fffc, 0x1ac000c5 }, + { 0x50360, 0x2800002f }, + { 0x5fffc, 0x1bc000c7 }, + { 0x50364, 0x2740002c }, + { 0x5fffc, 0x1cc000c8 }, + { 0x50368, 0x26c00029 }, + { 0x5fffc, 0x1dc000c9 }, + { 0x5036c, 0x26000027 }, + { 0x5fffc, 0x1ec000cc }, + { 0x50370, 0x25000024 }, + { 0x5fffc, 0x200000cc }, + { 0x50374, 0x24800021 }, + { 0x5fffc, 0x210000cd }, + { 0x50378, 0x23800020 }, + { 0x5fffc, 0x220000ce }, + { 0x5037c, 0x2300001d }, +}; + +static const struct ppp_table downscale_y_table_pt4topt6[] = { + { 0x5fffc, 0x740008c }, + { 0x50300, 0x33800088 }, + { 0x5fffc, 0x800008e }, + { 0x50304, 0x33400084 }, + { 0x5fffc, 0x8400092 }, + { 0x50308, 0x33000080 }, + { 0x5fffc, 0x9000094 }, + { 0x5030c, 0x3300007b }, + { 0x5fffc, 0x9c00098 }, + { 0x50310, 0x32400077 }, + { 0x5fffc, 0xa40009b }, + { 0x50314, 0x32000073 }, + { 0x5fffc, 0xb00009d }, + { 0x50318, 0x31c0006f }, + { 0x5fffc, 0xbc000a0 }, + { 0x5031c, 0x3140006b }, + { 0x5fffc, 0xc8000a2 }, + { 0x50320, 0x31000067 }, + { 0x5fffc, 0xd8000a5 }, + { 0x50324, 0x30800062 }, + { 0x5fffc, 0xe4000a8 }, + { 0x50328, 0x2fc0005f }, + { 0x5fffc, 0xec000aa }, + { 0x5032c, 0x2fc0005b }, + { 0x5fffc, 0xf8000ad }, + { 0x50330, 0x2f400057 }, + { 0x5fffc, 0x108000b0 }, + { 0x50334, 0x2e400054 }, + { 0x5fffc, 0x114000b2 }, + { 0x50338, 0x2e000050 }, + { 0x5fffc, 0x124000b4 }, + { 0x5033c, 0x2d80004c }, + { 0x5fffc, 0x130000b6 }, + { 0x50340, 0x2d000049 }, + { 0x5fffc, 0x140000b8 }, + { 0x50344, 0x2c800045 }, + { 0x5fffc, 0x150000b9 }, + { 0x50348, 0x2c000042 }, + { 0x5fffc, 0x15c000bd }, + { 0x5034c, 0x2b40003e }, + { 0x5fffc, 0x16c000bf }, + { 0x50350, 0x2a80003b }, + { 0x5fffc, 0x17c000bf }, + { 0x50354, 0x2a000039 }, + { 0x5fffc, 0x188000c2 }, + { 0x50358, 0x29400036 }, + { 0x5fffc, 0x19c000c4 }, + { 0x5035c, 0x28800032 }, + { 0x5fffc, 0x1ac000c5 }, + { 0x50360, 0x2800002f }, + { 0x5fffc, 0x1bc000c7 }, + { 0x50364, 0x2740002c }, + { 0x5fffc, 0x1cc000c8 }, + { 0x50368, 0x26c00029 }, + { 0x5fffc, 0x1dc000c9 }, + { 0x5036c, 0x26000027 }, + { 0x5fffc, 0x1ec000cc }, + { 0x50370, 0x25000024 }, + { 0x5fffc, 0x200000cc }, + { 0x50374, 0x24800021 }, + { 0x5fffc, 0x210000cd }, + { 0x50378, 0x23800020 }, + { 0x5fffc, 0x220000ce }, + { 0x5037c, 0x2300001d }, +}; + +static const struct ppp_table downscale_y_table_pt6topt8[] = { + { 0x5fffc, 0xfe000070 }, + { 0x50300, 0x4bc00068 }, + { 0x5fffc, 0xfe000078 }, + { 0x50304, 0x4bc00060 }, + { 0x5fffc, 0xfe000080 }, + { 0x50308, 0x4b800059 }, + { 0x5fffc, 0xfe000089 }, + { 0x5030c, 0x4b000052 }, + { 0x5fffc, 0xfe400091 }, + { 0x50310, 0x4a80004b }, + { 0x5fffc, 0xfe40009a }, + { 0x50314, 0x4a000044 }, + { 0x5fffc, 0xfe8000a3 }, + { 0x50318, 0x4940003d }, + { 0x5fffc, 0xfec000ac }, + { 0x5031c, 0x48400037 }, + { 0x5fffc, 0xff0000b4 }, + { 0x50320, 0x47800031 }, + { 0x5fffc, 0xff8000bd }, + { 0x50324, 0x4640002b }, + { 0x5fffc, 0xc5 }, + { 0x50328, 0x45000026 }, + { 0x5fffc, 0x8000ce }, + { 0x5032c, 0x43800021 }, + { 0x5fffc, 0x10000d6 }, + { 0x50330, 0x4240001c }, + { 0x5fffc, 0x18000df }, + { 0x50334, 0x40800018 }, + { 0x5fffc, 0x24000e6 }, + { 0x50338, 0x3f000014 }, + { 0x5fffc, 0x30000ee }, + { 0x5033c, 0x3d400010 }, + { 0x5fffc, 0x40000f5 }, + { 0x50340, 0x3b80000c }, + { 0x5fffc, 0x50000fc }, + { 0x50344, 0x39800009 }, + { 0x5fffc, 0x6000102 }, + { 0x50348, 0x37c00006 }, + { 0x5fffc, 0x7000109 }, + { 0x5034c, 0x35800004 }, + { 0x5fffc, 0x840010e }, + { 0x50350, 0x33800002 }, + { 0x5fffc, 0x9800114 }, + { 0x50354, 0x31400000 }, + { 0x5fffc, 0xac00119 }, + { 0x50358, 0x2f4003fe }, + { 0x5fffc, 0xc40011e }, + { 0x5035c, 0x2d0003fc }, + { 0x5fffc, 0xdc00121 }, + { 0x50360, 0x2b0003fb }, + { 0x5fffc, 0xf400125 }, + { 0x50364, 0x28c003fa }, + { 0x5fffc, 0x11000128 }, + { 0x50368, 0x268003f9 }, + { 0x5fffc, 0x12c0012a }, + { 0x5036c, 0x244003f9 }, + { 0x5fffc, 0x1480012c }, + { 0x50370, 0x224003f8 }, + { 0x5fffc, 0x1640012e }, + { 0x50374, 0x200003f8 }, + { 0x5fffc, 0x1800012f }, + { 0x50378, 0x1e0003f8 }, + { 0x5fffc, 0x1a00012f }, + { 0x5037c, 0x1c0003f8 }, +}; + +static const struct ppp_table downscale_y_table_pt8topt1[] = { + { 0x5fffc, 0x0 }, + { 0x50300, 0x7fc00000 }, + { 0x5fffc, 0xff80000d }, + { 0x50304, 0x7ec003f9 }, + { 0x5fffc, 0xfec0001c }, + { 0x50308, 0x7d4003f3 }, + { 0x5fffc, 0xfe40002b }, + { 0x5030c, 0x7b8003ed }, + { 0x5fffc, 0xfd80003c }, + { 0x50310, 0x794003e8 }, + { 0x5fffc, 0xfcc0004d }, + { 0x50314, 0x76c003e4 }, + { 0x5fffc, 0xfc40005f }, + { 0x50318, 0x73c003e0 }, + { 0x5fffc, 0xfb800071 }, + { 0x5031c, 0x708003de }, + { 0x5fffc, 0xfac00085 }, + { 0x50320, 0x6d0003db }, + { 0x5fffc, 0xfa000098 }, + { 0x50324, 0x698003d9 }, + { 0x5fffc, 0xf98000ac }, + { 0x50328, 0x654003d8 }, + { 0x5fffc, 0xf8c000c1 }, + { 0x5032c, 0x610003d7 }, + { 0x5fffc, 0xf84000d5 }, + { 0x50330, 0x5c8003d7 }, + { 0x5fffc, 0xf7c000e9 }, + { 0x50334, 0x580003d7 }, + { 0x5fffc, 0xf74000fd }, + { 0x50338, 0x534003d8 }, + { 0x5fffc, 0xf6c00112 }, + { 0x5033c, 0x4e8003d8 }, + { 0x5fffc, 0xf6800126 }, + { 0x50340, 0x494003da }, + { 0x5fffc, 0xf600013a }, + { 0x50344, 0x448003db }, + { 0x5fffc, 0xf600014d }, + { 0x50348, 0x3f4003dd }, + { 0x5fffc, 0xf5c00160 }, + { 0x5034c, 0x3a4003df }, + { 0x5fffc, 0xf5c00172 }, + { 0x50350, 0x354003e1 }, + { 0x5fffc, 0xf5c00184 }, + { 0x50354, 0x304003e3 }, + { 0x5fffc, 0xf6000195 }, + { 0x50358, 0x2b0003e6 }, + { 0x5fffc, 0xf64001a6 }, + { 0x5035c, 0x260003e8 }, + { 0x5fffc, 0xf6c001b4 }, + { 0x50360, 0x214003eb }, + { 0x5fffc, 0xf78001c2 }, + { 0x50364, 0x1c4003ee }, + { 0x5fffc, 0xf80001cf }, + { 0x50368, 0x17c003f1 }, + { 0x5fffc, 0xf90001db }, + { 0x5036c, 0x134003f3 }, + { 0x5fffc, 0xfa0001e5 }, + { 0x50370, 0xf0003f6 }, + { 0x5fffc, 0xfb4001ee }, + { 0x50374, 0xac003f9 }, + { 0x5fffc, 0xfcc001f5 }, + { 0x50378, 0x70003fb }, + { 0x5fffc, 0xfe4001fb }, + { 0x5037c, 0x34003fe }, +}; + +static const struct ppp_table *downscale_y_table[PPP_DOWNSCALE_MAX] = { + [PPP_DOWNSCALE_PT2TOPT4] = downscale_y_table_pt2topt4, + [PPP_DOWNSCALE_PT4TOPT6] = downscale_y_table_pt4topt6, + [PPP_DOWNSCALE_PT6TOPT8] = downscale_y_table_pt6topt8, + [PPP_DOWNSCALE_PT8TOPT1] = downscale_y_table_pt8topt1, +}; + +void ppp_load_table(const struct ppp_table *table, int len) +{ + int i; + + for (i = 0; i < len; i++) + PPP_WRITEL(table[i].val, table[i].reg); +} + +void ppp_load_up_lut(void) +{ + ppp_load_table(upscale_table, + PPP_UPSCALE_MAX); +} + +void ppp_load_gaussian_lut(void) +{ + ppp_load_table(mdp_gaussian_blur_table, + PPP_BLUR_SCALE_MAX); +} + +void ppp_load_x_scale_table(int idx) +{ + ppp_load_table(downscale_x_table[idx], 64); +} + +void ppp_load_y_scale_table(int idx) +{ + ppp_load_table(downscale_y_table[idx], 64); +} + +uint32_t ppp_bpp(uint32_t type) +{ + if (MDP_IS_IMGTYPE_BAD(type)) + return 0; + return bytes_per_pixel[type]; +} + +uint32_t ppp_src_config(uint32_t type) +{ + if (MDP_IS_IMGTYPE_BAD(type)) + return 0; + return src_cfg_lut[type]; +} + +uint32_t ppp_out_config(uint32_t type) +{ + if (MDP_IS_IMGTYPE_BAD(type)) + return 0; + return out_cfg_lut[type]; +} + +uint32_t ppp_pack_pattern(uint32_t type, uint32_t yuv2rgb) +{ + if (MDP_IS_IMGTYPE_BAD(type)) + return 0; + if (yuv2rgb) + return swapped_pack_patt_lut[type]; + + return pack_patt_lut[type]; +} + +uint32_t ppp_dst_op_reg(uint32_t type) +{ + if (MDP_IS_IMGTYPE_BAD(type)) + return 0; + return dst_op_reg[type]; +} + +uint32_t ppp_src_op_reg(uint32_t type) +{ + if (MDP_IS_IMGTYPE_BAD(type)) + return 0; + return src_op_reg[type]; +} + +bool ppp_per_p_alpha(uint32_t type) +{ + if (MDP_IS_IMGTYPE_BAD(type)) + return 0; + return per_pixel_alpha[type]; +} + +bool ppp_multi_plane(uint32_t type) +{ + if (MDP_IS_IMGTYPE_BAD(type)) + return 0; + return multi_plane[type]; +} + +uint32_t *ppp_default_pre_lut(void) +{ + return default_pre_lut_val; +} + +uint32_t *ppp_default_post_lut(void) +{ + return default_post_lut_val; +} + +struct ppp_csc_table *ppp_csc_rgb2yuv(void) +{ + return &rgb2yuv; +} + +struct ppp_csc_table *ppp_csc_table2(void) +{ + return &default_table2; +} diff --git a/drivers/video/fbdev/msm/mdp3_ppp_hwio.c b/drivers/video/fbdev/msm/mdp3_ppp_hwio.c new file mode 100644 index 0000000000000000000000000000000000000000..15f240989b4bbfcc3a19db2d42c099baba77f353 --- /dev/null +++ b/drivers/video/fbdev/msm/mdp3_ppp_hwio.c @@ -0,0 +1,1365 @@ +/* Copyright (c) 2007, 2012-2013, 2016-2018, The Linux Foundation. All rights reserved. + * Copyright (C) 2007 Google Incorporated + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include +#include +#include +#include +#include "linux/proc_fs.h" + +#include "mdss_fb.h" +#include "mdp3_ppp.h" +#include "mdp3_hwio.h" +#include "mdss_debug.h" + +/* SHIM Q Factor */ +#define PHI_Q_FACTOR 29 +#define PQF_PLUS_5 (PHI_Q_FACTOR + 5) /* due to 32 phases */ +#define PQF_PLUS_4 (PHI_Q_FACTOR + 4) +#define PQF_PLUS_2 (PHI_Q_FACTOR + 2) /* to get 4.0 */ +#define PQF_MINUS_2 (PHI_Q_FACTOR - 2) /* to get 0.25 */ +#define PQF_PLUS_5_PLUS_2 (PQF_PLUS_5 + 2) +#define PQF_PLUS_5_MINUS_2 (PQF_PLUS_5 - 2) + +enum { + LAYER_FG = 0, + LAYER_BG, + LAYER_FB, + LAYER_MAX, +}; + +static long long mdp_do_div(uint64_t num, uint64_t den) +{ + do_div(num, den); + return num; +} + +static int mdp_calc_scale_params(uint32_t org, uint32_t dim_in, + uint32_t dim_out, bool is_W, int32_t *phase_init_ptr, + uint32_t *phase_step_ptr) +{ + bool rpa_on = false; + int init_phase = 0; + uint64_t numer = 0; + uint64_t denom = 0; + int64_t point5 = 1; + int64_t one = 1; + int64_t k1, k2, k3, k4; /* linear equation coefficients */ + uint64_t int_mask; + uint64_t fract_mask; + uint64_t Os; + int64_t Osprime; + int64_t Od; + int64_t Odprime; + int64_t Oreq; + int64_t init_phase_temp; + int64_t delta; + uint32_t mult; + + /* + * The phase accumulator should really be rational for all cases in a + * general purpose polyphase scaler for a tiled architecture with + * non-zero * origin capability because there is no way to represent + * certain scale factors in fixed point regardless of precision. + * The error incurred in attempting to use fixed point is most + * eggregious for SF where 1/SF is an integral multiple of 1/3. + * + * Set the RPA flag for this dimension. + * + * In order for 1/SF (dim_in/dim_out) to be an integral multiple of + * 1/3, dim_out must be an integral multiple of 3. + */ + if (!(dim_out % 3)) { + mult = dim_out / 3; + rpa_on = (!(dim_in % mult)); + } + + numer = dim_out; + denom = dim_in; + + /* + * convert to U30.34 before division + * + * The K vectors carry 4 extra bits of precision + * and are rounded. + * + * We initially go 5 bits over then round by adding + * 1 and right shifting by 1 + * so final result is U31.33 + */ + numer <<= PQF_PLUS_5; + + /* now calculate the scale factor (aka k3) */ + k3 = ((mdp_do_div(numer, denom) + 1) >> 1); + + /* check scale factor for legal range [0.25 - 4.0] */ + if (((k3 >> 4) < (1LL << PQF_MINUS_2)) || + ((k3 >> 4) > (1LL << PQF_PLUS_2))) { + return -EINVAL; + } + + /* calculate inverse scale factor (aka k1) for phase init */ + numer = dim_in; + denom = dim_out; + numer <<= PQF_PLUS_5; + k1 = ((mdp_do_div(numer, denom) + 1) >> 1); + + /* + * calculate initial phase and ROI overfetch + */ + /* convert point5 & one to S39.24 (will always be positive) */ + point5 <<= (PQF_PLUS_4 - 1); + one <<= PQF_PLUS_4; + k2 = ((k1 - one) >> 1); + init_phase = (int)(k2 >> 4); + k4 = ((k3 - one) >> 1); + if (k3 != one) { + /* calculate the masks */ + fract_mask = one - 1; + int_mask = ~fract_mask; + + if (!rpa_on) { + /* + * FIXED POINT IMPLEMENTATION + */ + if (org) { + /* + * The complicated case; ROI origin != 0 + * init_phase needs to be adjusted + * OF is also position dependent + */ + + /* map (org - .5) into destination space */ + Os = ((uint64_t) org << 1) - 1; + Od = ((k3 * Os) >> 1) + k4; + + /* take the ceiling */ + Odprime = (Od & int_mask); + if (Odprime != Od) + Odprime += one; + + /* now map that back to source space */ + Osprime = (k1 * (Odprime >> PQF_PLUS_4)) + k2; + + /* then floor & decrement to calc the required + * starting coordinate + */ + Oreq = (Osprime & int_mask) - one; + + /* calculate initial phase */ + init_phase_temp = Osprime - Oreq; + delta = ((int64_t) (org) << PQF_PLUS_4) - Oreq; + init_phase_temp -= delta; + + /* limit to valid range before left shift */ + delta = (init_phase_temp & (1LL << 63)) ? + 4 : -4; + delta <<= PQF_PLUS_4; + while (abs((int)(init_phase_temp >> + PQF_PLUS_4)) > 4) + init_phase_temp += delta; + + /* + * right shift to account for extra bits of + * precision + */ + init_phase = (int)(init_phase_temp >> 4); + + } + } else { + /* + * RPA IMPLEMENTATION + * + * init_phase needs to be calculated in all RPA_on + * cases because it's a numerator, not a fixed + * point value. + */ + + /* map (org - .5) into destination space */ + Os = ((uint64_t) org << PQF_PLUS_4) - point5; + Od = mdp_do_div((dim_out * (Os + point5)), + dim_in); + Od -= point5; + + /* take the ceiling */ + Odprime = (Od & int_mask); + if (Odprime != Od) + Odprime += one; + + /* now map that back to source space */ + Osprime = + mdp_do_div((dim_in * (Odprime + point5)), + dim_out); + Osprime -= point5; + + /* + * then floor & decrement to calculate the required + * starting coordinate + */ + Oreq = (Osprime & int_mask) - one; + + /* calculate initial phase */ + init_phase_temp = Osprime - Oreq; + delta = ((int64_t) (org) << PQF_PLUS_4) - Oreq; + init_phase_temp -= delta; + + /* limit to valid range before the left shift */ + delta = (init_phase_temp & (1LL << 63)) ? 4 : -4; + delta <<= PQF_PLUS_4; + while (abs((int)(init_phase_temp >> PQF_PLUS_4)) > 4) + init_phase_temp += delta; + + /* + * right shift to account for extra bits of precision + */ + init_phase = (int)(init_phase_temp >> 4); + } + } + + /* return the scale parameters */ + *phase_init_ptr = init_phase; + *phase_step_ptr = (uint32_t) (k1 >> 4); + + return 0; +} + +static int scale_idx(int factor) +{ + int idx; + + if (factor > 80) + idx = PPP_DOWNSCALE_PT8TOPT1; + else if (factor > 60) + idx = PPP_DOWNSCALE_PT6TOPT8; + else if (factor > 40) + idx = PPP_DOWNSCALE_PT4TOPT6; + else + idx = PPP_DOWNSCALE_PT2TOPT4; + + return idx; +} + +inline int32_t comp_conv_rgb2yuv(int32_t comp, int32_t y_high, + int32_t y_low, int32_t c_high, int32_t c_low) +{ + if (comp < 0) + comp = 0; + if (comp > 255) + comp = 255; + + /* clamp */ + if (comp < y_low) + comp = y_low; + if (comp > y_high) + comp = y_high; + return comp; +} + +static uint32_t conv_rgb2yuv(uint32_t input_pixel, + uint16_t *matrix_vector, + uint16_t *bv, + uint16_t *clamp_vector) +{ + uint8_t input_C2, input_C0, input_C1; + uint32_t output; + int32_t comp_C2, comp_C1, comp_C0, temp; + int32_t temp1, temp2, temp3; + int32_t matrix[9]; + int32_t bias_vector[3]; + int32_t Y_low_limit, Y_high_limit, C_low_limit, C_high_limit; + int32_t i; + + input_C2 = (input_pixel >> 16) & 0xFF; + input_C1 = (input_pixel >> 8) & 0xFF; + input_C0 = (input_pixel >> 0) & 0xFF; + + comp_C0 = input_C0; + comp_C1 = input_C1; + comp_C2 = input_C2; + + for (i = 0; i < MDP_CSC_SIZE; i++) + matrix[i] = + ((int32_t) (((int32_t) matrix_vector[i]) << 20)) >> 20; + + bias_vector[0] = (int32_t) (bv[0] & 0xFF); + bias_vector[1] = (int32_t) (bv[1] & 0xFF); + bias_vector[2] = (int32_t) (bv[2] & 0xFF); + + Y_low_limit = (int32_t) clamp_vector[0]; + Y_high_limit = (int32_t) clamp_vector[1]; + C_low_limit = (int32_t) clamp_vector[2]; + C_high_limit = (int32_t) clamp_vector[3]; + + /* + * Color Conversion + * reorder input colors + */ + temp = comp_C2; + comp_C2 = comp_C1; + comp_C1 = comp_C0; + comp_C0 = temp; + + /* matrix multiplication */ + temp1 = comp_C0 * matrix[0] + comp_C1 * matrix[1] + + comp_C2 * matrix[2]; + temp2 = comp_C0 * matrix[3] + comp_C1 * matrix[4] + + comp_C2 * matrix[5]; + temp3 = comp_C0 * matrix[6] + comp_C1 * matrix[7] + + comp_C2 * matrix[8]; + + comp_C0 = temp1 + 0x100; + comp_C1 = temp2 + 0x100; + comp_C2 = temp3 + 0x100; + + /* take integer part */ + comp_C0 >>= 9; + comp_C1 >>= 9; + comp_C2 >>= 9; + + /* post bias (+) */ + comp_C0 += bias_vector[0]; + comp_C1 += bias_vector[1]; + comp_C2 += bias_vector[2]; + + /* limit pixel to 8-bit */ + comp_C0 = comp_conv_rgb2yuv(comp_C0, Y_high_limit, + Y_low_limit, C_high_limit, C_low_limit); + comp_C1 = comp_conv_rgb2yuv(comp_C1, Y_high_limit, + Y_low_limit, C_high_limit, C_low_limit); + comp_C2 = comp_conv_rgb2yuv(comp_C2, Y_high_limit, + Y_low_limit, C_high_limit, C_low_limit); + + output = (comp_C2 << 16) | (comp_C1 << 8) | comp_C0; + return output; +} + +inline void y_h_even_num(struct ppp_img_desc *img) +{ + img->roi.y = (img->roi.y / 2) * 2; + img->roi.height = (img->roi.height / 2) * 2; +} + +inline void x_w_even_num(struct ppp_img_desc *img) +{ + img->roi.x = (img->roi.x / 2) * 2; + img->roi.width = (img->roi.width / 2) * 2; +} + +bool check_if_rgb(int color) +{ + bool rgb = false; + + switch (color) { + case MDP_RGB_565: + case MDP_BGR_565: + case MDP_RGB_888: + case MDP_BGR_888: + case MDP_BGRA_8888: + case MDP_RGBA_8888: + case MDP_ARGB_8888: + case MDP_XRGB_8888: + case MDP_RGBX_8888: + case MDP_BGRX_8888: + rgb = true; + default: + break; + } + return rgb; +} + +uint8_t *mdp_adjust_rot_addr(struct ppp_blit_op *iBuf, + uint8_t *addr, uint32_t bpp, uint32_t uv, uint32_t layer) +{ + uint32_t ystride = 0; + uint32_t h_slice = 1; + uint32_t roi_width = 0; + uint32_t roi_height = 0; + uint32_t color_fmt = 0; + + if (layer == LAYER_BG) { + ystride = iBuf->bg.prop.width * bpp; + roi_width = iBuf->bg.roi.width; + roi_height = iBuf->bg.roi.height; + color_fmt = iBuf->bg.color_fmt; + } else { + ystride = iBuf->dst.prop.width * bpp; + roi_width = iBuf->dst.roi.width; + roi_height = iBuf->dst.roi.height; + color_fmt = iBuf->dst.color_fmt; + } + if (uv && ((color_fmt == MDP_Y_CBCR_H2V2) || + (color_fmt == MDP_Y_CRCB_H2V2))) + h_slice = 2; + + if (((iBuf->mdp_op & MDPOP_ROT90) == MDPOP_ROT90) ^ + ((iBuf->mdp_op & MDPOP_LR) == MDPOP_LR)) { + addr += (roi_width - MIN(16, roi_width)) * bpp; + } + if ((iBuf->mdp_op & MDPOP_UD) == MDPOP_UD) { + addr += ((roi_height - MIN(16, roi_height))/h_slice) * + ystride; + } + + return addr; +} + +void mdp_adjust_start_addr(struct ppp_blit_op *blit_op, + struct ppp_img_desc *img, int v_slice, + int h_slice, uint32_t layer) +{ + uint32_t bpp = ppp_bpp(img->color_fmt); + int x = img->roi.x; + int y = img->roi.y; + uint32_t width = img->prop.width; + + if (img->color_fmt == MDP_Y_CBCR_H2V2_ADRENO && layer == 0) + img->p0 += (x + y * ALIGN(width, 32)) * bpp; + else if (img->color_fmt == MDP_Y_CBCR_H2V2_VENUS && layer == 0) + img->p0 += (x + y * ALIGN(width, 128)) * bpp; + else + img->p0 += (x + y * width) * bpp; + if (layer != LAYER_FG) + img->p0 = mdp_adjust_rot_addr(blit_op, img->p0, bpp, 0, layer); + + if (img->p1) { + /* + * MDP_Y_CBCR_H2V2/MDP_Y_CRCB_H2V2 cosite for now + * we need to shift x direction same as y dir for offsite + */ + if ((img->color_fmt == MDP_Y_CBCR_H2V2_ADRENO || + img->color_fmt == MDP_Y_CBCR_H2V2_VENUS) + && layer == 0) + img->p1 += ((x / h_slice) * h_slice + ((y == 0) ? 0 : + (((y + 1) / v_slice - 1) * (ALIGN(width/2, 32) * 2)))) + * bpp; + else + img->p1 += ((x / h_slice) * h_slice + + ((y == 0) ? 0 : ((y + 1) / v_slice - 1) * width)) * bpp; + + if (layer != LAYER_FG) + img->p1 = mdp_adjust_rot_addr(blit_op, + img->p1, bpp, 0, layer); + } +} + +int load_ppp_lut(int tableType, uint32_t *lut) +{ + int i; + uint32_t base_addr; + + base_addr = tableType ? MDP3_PPP_POST_LUT : MDP3_PPP_PRE_LUT; + for (i = 0; i < PPP_LUT_MAX; i++) + PPP_WRITEL(lut[i], base_addr + MDP3_PPP_LUTn(i)); + + return 0; +} + +/* Configure Primary CSC Matrix */ +int load_primary_matrix(struct ppp_csc_table *csc) +{ + int i; + + for (i = 0; i < MDP_CSC_SIZE; i++) + PPP_WRITEL(csc->fwd_matrix[i], MDP3_PPP_CSC_PFMVn(i)); + + for (i = 0; i < MDP_CSC_SIZE; i++) + PPP_WRITEL(csc->rev_matrix[i], MDP3_PPP_CSC_PRMVn(i)); + + for (i = 0; i < MDP_BV_SIZE; i++) + PPP_WRITEL(csc->bv[i], MDP3_PPP_CSC_PBVn(i)); + + for (i = 0; i < MDP_LV_SIZE; i++) + PPP_WRITEL(csc->lv[i], MDP3_PPP_CSC_PLVn(i)); + + return 0; +} + +/* Load Secondary CSC Matrix */ +int load_secondary_matrix(struct ppp_csc_table *csc) +{ + int i; + + for (i = 0; i < MDP_CSC_SIZE; i++) + PPP_WRITEL(csc->fwd_matrix[i], MDP3_PPP_CSC_SFMVn(i)); + + for (i = 0; i < MDP_CSC_SIZE; i++) + PPP_WRITEL(csc->rev_matrix[i], MDP3_PPP_CSC_SRMVn(i)); + + for (i = 0; i < MDP_BV_SIZE; i++) + PPP_WRITEL(csc->bv[i], MDP3_PPP_CSC_SBVn(i)); + + for (i = 0; i < MDP_LV_SIZE; i++) + PPP_WRITEL(csc->lv[i], MDP3_PPP_CSC_SLVn(i)); + return 0; +} + +int load_csc_matrix(int matrix_type, struct ppp_csc_table *csc) +{ + if (matrix_type == CSC_PRIMARY_MATRIX) + return load_primary_matrix(csc); + + return load_secondary_matrix(csc); +} + +int config_ppp_src(struct ppp_img_desc *src, uint32_t yuv2rgb) +{ + uint32_t val; + + val = ((src->roi.height & MDP3_PPP_XY_MASK) << MDP3_PPP_XY_OFFSET) | + (src->roi.width & MDP3_PPP_XY_MASK); + PPP_WRITEL(val, MDP3_PPP_SRC_SIZE); + + PPP_WRITEL(src->p0, MDP3_PPP_SRCP0_ADDR); + PPP_WRITEL(src->p1, MDP3_PPP_SRCP1_ADDR); + PPP_WRITEL(src->p3, MDP3_PPP_SRCP3_ADDR); + + val = (src->stride0 & MDP3_PPP_STRIDE_MASK) | + ((src->stride1 & MDP3_PPP_STRIDE_MASK) << + MDP3_PPP_STRIDE1_OFFSET); + PPP_WRITEL(val, MDP3_PPP_SRC_YSTRIDE1_ADDR); + val = ((src->stride2 & MDP3_PPP_STRIDE_MASK) << + MDP3_PPP_STRIDE1_OFFSET); + PPP_WRITEL(val, MDP3_PPP_SRC_YSTRIDE2_ADDR); + + val = ppp_src_config(src->color_fmt); + val |= (src->roi.x % 2) ? PPP_SRC_BPP_ROI_ODD_X : 0; + val |= (src->roi.y % 2) ? PPP_SRC_BPP_ROI_ODD_Y : 0; + PPP_WRITEL(val, MDP3_PPP_SRC_FORMAT); + PPP_WRITEL(ppp_pack_pattern(src->color_fmt, yuv2rgb), + MDP3_PPP_SRC_UNPACK_PATTERN1); + return 0; +} + +int config_ppp_out(struct ppp_img_desc *dst, uint32_t yuv2rgb) +{ + uint32_t val; + bool pseudoplanr_output = false; + + switch (dst->color_fmt) { + case MDP_Y_CBCR_H2V2: + case MDP_Y_CRCB_H2V2: + case MDP_Y_CBCR_H2V1: + case MDP_Y_CRCB_H2V1: + pseudoplanr_output = true; + break; + default: + break; + } + val = ppp_out_config(dst->color_fmt); + if (pseudoplanr_output) + val |= PPP_DST_PLANE_PSEUDOPLN; + PPP_WRITEL(val, MDP3_PPP_OUT_FORMAT); + PPP_WRITEL(ppp_pack_pattern(dst->color_fmt, yuv2rgb), + MDP3_PPP_OUT_PACK_PATTERN1); + + val = ((dst->roi.height & MDP3_PPP_XY_MASK) << MDP3_PPP_XY_OFFSET) | + (dst->roi.width & MDP3_PPP_XY_MASK); + PPP_WRITEL(val, MDP3_PPP_OUT_SIZE); + + PPP_WRITEL(dst->p0, MDP3_PPP_OUTP0_ADDR); + PPP_WRITEL(dst->p1, MDP3_PPP_OUTP1_ADDR); + PPP_WRITEL(dst->p3, MDP3_PPP_OUTP3_ADDR); + + val = (dst->stride0 & MDP3_PPP_STRIDE_MASK) | + ((dst->stride1 & MDP3_PPP_STRIDE_MASK) << + MDP3_PPP_STRIDE1_OFFSET); + PPP_WRITEL(val, MDP3_PPP_OUT_YSTRIDE1_ADDR); + val = ((dst->stride2 & MDP3_PPP_STRIDE_MASK) << + MDP3_PPP_STRIDE1_OFFSET); + PPP_WRITEL(val, MDP3_PPP_OUT_YSTRIDE2_ADDR); + return 0; +} + +int config_ppp_background(struct ppp_img_desc *bg, uint32_t yuv2rgb) +{ + uint32_t val; + + PPP_WRITEL(bg->p0, MDP3_PPP_BGP0_ADDR); + PPP_WRITEL(bg->p1, MDP3_PPP_BGP1_ADDR); + PPP_WRITEL(bg->p3, MDP3_PPP_BGP3_ADDR); + + val = (bg->stride0 & MDP3_PPP_STRIDE_MASK) | + ((bg->stride1 & MDP3_PPP_STRIDE_MASK) << + MDP3_PPP_STRIDE1_OFFSET); + PPP_WRITEL(val, MDP3_PPP_BG_YSTRIDE1_ADDR); + val = ((bg->stride2 & MDP3_PPP_STRIDE_MASK) << + MDP3_PPP_STRIDE1_OFFSET); + PPP_WRITEL(val, MDP3_PPP_BG_YSTRIDE2_ADDR); + + PPP_WRITEL(ppp_src_config(bg->color_fmt), + MDP3_PPP_BG_FORMAT); + PPP_WRITEL(ppp_pack_pattern(bg->color_fmt, yuv2rgb), + MDP3_PPP_BG_UNPACK_PATTERN1); + return 0; +} + +void ppp_edge_rep_luma_pixel(struct ppp_blit_op *blit_op, + struct ppp_edge_rep *er) +{ + if (blit_op->mdp_op & MDPOP_ASCALE) { + + er->is_scale_enabled = 1; + + if (blit_op->mdp_op & MDPOP_ROT90) { + er->dst_roi_width = blit_op->dst.roi.height; + er->dst_roi_height = blit_op->dst.roi.width; + } else { + er->dst_roi_width = blit_op->dst.roi.width; + er->dst_roi_height = blit_op->dst.roi.height; + } + + /* + * Find out the luma pixels needed for scaling in the + * x direction (LEFT and RIGHT). Locations of pixels are + * relative to the ROI. Upper-left corner of ROI corresponds + * to coordinates (0,0). Also set the number of luma pixel + * to repeat. + */ + if (blit_op->src.roi.width > 3 * er->dst_roi_width) { + /* scale factor < 1/3 */ + er->luma_interp_point_right = + (blit_op->src.roi.width - 1); + } else if (blit_op->src.roi.width == 3 * er->dst_roi_width) { + /* scale factor == 1/3 */ + er->luma_interp_point_right = + (blit_op->src.roi.width - 1) + 1; + er->luma_repeat_right = 1; + } else if ((blit_op->src.roi.width > er->dst_roi_width) && + (blit_op->src.roi.width < 3 * er->dst_roi_width)) { + /* 1/3 < scale factor < 1 */ + er->luma_interp_point_left = -1; + er->luma_interp_point_right = + (blit_op->src.roi.width - 1) + 1; + er->luma_repeat_left = 1; + er->luma_repeat_right = 1; + } else if (blit_op->src.roi.width == er->dst_roi_width) { + /* scale factor == 1 */ + er->luma_interp_point_left = -1; + er->luma_interp_point_right = + (blit_op->src.roi.width - 1) + 2; + er->luma_repeat_left = 1; + er->luma_repeat_right = 2; + } else { + /* scale factor > 1 */ + er->luma_interp_point_left = -2; + er->luma_interp_point_right = + (blit_op->src.roi.width - 1) + 2; + er->luma_repeat_left = 2; + er->luma_repeat_right = 2; + } + + /* + * Find out the number of pixels needed for scaling in the + * y direction (TOP and BOTTOM). Locations of pixels are + * relative to the ROI. Upper-left corner of ROI corresponds + * to coordinates (0,0). Also set the number of luma pixel + * to repeat. + */ + if (blit_op->src.roi.height > 3 * er->dst_roi_height) { + er->luma_interp_point_bottom = + (blit_op->src.roi.height - 1); + } else if (blit_op->src.roi.height == 3 * er->dst_roi_height) { + er->luma_interp_point_bottom = + (blit_op->src.roi.height - 1) + 1; + er->luma_repeat_bottom = 1; + } else if ((blit_op->src.roi.height > er->dst_roi_height) && + (blit_op->src.roi.height < 3 * er->dst_roi_height)) { + er->luma_interp_point_top = -1; + er->luma_interp_point_bottom = + (blit_op->src.roi.height - 1) + 1; + er->luma_repeat_top = 1; + er->luma_repeat_bottom = 1; + } else if (blit_op->src.roi.height == er->dst_roi_height) { + er->luma_interp_point_top = -1; + er->luma_interp_point_bottom = + (blit_op->src.roi.height - 1) + 2; + er->luma_repeat_top = 1; + er->luma_repeat_bottom = 2; + } else { + er->luma_interp_point_top = -2; + er->luma_interp_point_bottom = + (blit_op->src.roi.height - 1) + 2; + er->luma_repeat_top = 2; + er->luma_repeat_bottom = 2; + } + } else { + /* + * Since no scaling needed, Tile Fetch does not require any + * more luma pixel than what the ROI contains. + */ + er->luma_interp_point_right = + (int32_t) (blit_op->src.roi.width - 1); + er->luma_interp_point_bottom = + (int32_t) (blit_op->src.roi.height - 1); + } + /* After adding the ROI offsets, we have locations of + * luma_interp_points relative to the image. + */ + er->luma_interp_point_left += (int32_t) (blit_op->src.roi.x); + er->luma_interp_point_right += (int32_t) (blit_op->src.roi.x); + er->luma_interp_point_top += (int32_t) (blit_op->src.roi.y); + er->luma_interp_point_bottom += (int32_t) (blit_op->src.roi.y); +} + +void ppp_edge_rep_chroma_pixel(struct ppp_blit_op *blit_op, + struct ppp_edge_rep *er) +{ + bool chroma_edge_enable = true; + uint32_t is_yuv_offsite_vertical = 0; + + /* find out which chroma pixels are needed for chroma upsampling. */ + switch (blit_op->src.color_fmt) { + case MDP_Y_CBCR_H2V1: + case MDP_Y_CRCB_H2V1: + case MDP_YCRYCB_H2V1: + er->chroma_interp_point_left = er->luma_interp_point_left >> 1; + er->chroma_interp_point_right = + (er->luma_interp_point_right + 1) >> 1; + er->chroma_interp_point_top = er->luma_interp_point_top; + er->chroma_interp_point_bottom = er->luma_interp_point_bottom; + break; + + case MDP_Y_CBCR_H2V2: + case MDP_Y_CBCR_H2V2_ADRENO: + case MDP_Y_CBCR_H2V2_VENUS: + case MDP_Y_CRCB_H2V2: + er->chroma_interp_point_left = er->luma_interp_point_left >> 1; + er->chroma_interp_point_right = + (er->luma_interp_point_right + 1) >> 1; + er->chroma_interp_point_top = + (er->luma_interp_point_top - 1) >> 1; + er->chroma_interp_point_bottom = + (er->luma_interp_point_bottom + 1) >> 1; + is_yuv_offsite_vertical = 1; + break; + + default: + chroma_edge_enable = false; + er->chroma_interp_point_left = er->luma_interp_point_left; + er->chroma_interp_point_right = er->luma_interp_point_right; + er->chroma_interp_point_top = er->luma_interp_point_top; + er->chroma_interp_point_bottom = er->luma_interp_point_bottom; + + break; + } + + if (chroma_edge_enable) { + /* Defines which chroma pixels belongs to the roi */ + switch (blit_op->src.color_fmt) { + case MDP_Y_CBCR_H2V1: + case MDP_Y_CRCB_H2V1: + case MDP_YCRYCB_H2V1: + er->chroma_bound_left = blit_op->src.roi.x / 2; + /* there are half as many chroma pixel as luma pixels */ + er->chroma_bound_right = + (blit_op->src.roi.width + + blit_op->src.roi.x - 1) / 2; + er->chroma_bound_top = blit_op->src.roi.y; + er->chroma_bound_bottom = + (blit_op->src.roi.height + blit_op->src.roi.y - 1); + break; + case MDP_Y_CBCR_H2V2: + case MDP_Y_CBCR_H2V2_ADRENO: + case MDP_Y_CBCR_H2V2_VENUS: + case MDP_Y_CRCB_H2V2: + /* + * cosite in horizontal dir, and offsite in vertical dir + * width of chroma ROI is 1/2 of size of luma ROI + * height of chroma ROI is 1/2 of size of luma ROI + */ + er->chroma_bound_left = blit_op->src.roi.x / 2; + er->chroma_bound_right = + (blit_op->src.roi.width + + blit_op->src.roi.x - 1) / 2; + er->chroma_bound_top = blit_op->src.roi.y / 2; + er->chroma_bound_bottom = + (blit_op->src.roi.height + + blit_op->src.roi.y - 1) / 2; + break; + + default: + /* + * If no valid chroma sub-sampling format specified, + * assume 4:4:4 ( i.e. fully sampled). + */ + er->chroma_bound_left = blit_op->src.roi.x; + er->chroma_bound_right = blit_op->src.roi.width + + blit_op->src.roi.x - 1; + er->chroma_bound_top = blit_op->src.roi.y; + er->chroma_bound_bottom = + (blit_op->src.roi.height + blit_op->src.roi.y - 1); + break; + } + + /* + * Knowing which chroma pixels are needed, and which chroma + * pixels belong to the ROI (i.e. available for fetching ), + * calculate how many chroma pixels Tile Fetch needs to + * duplicate. If any required chroma pixels falls outside + * of the ROI, Tile Fetch must obtain them by replicating + * pixels. + */ + if (er->chroma_bound_left > er->chroma_interp_point_left) + er->chroma_repeat_left = + er->chroma_bound_left - + er->chroma_interp_point_left; + else + er->chroma_repeat_left = 0; + + if (er->chroma_interp_point_right > er->chroma_bound_right) + er->chroma_repeat_right = + er->chroma_interp_point_right - + er->chroma_bound_right; + else + er->chroma_repeat_right = 0; + + if (er->chroma_bound_top > er->chroma_interp_point_top) + er->chroma_repeat_top = + er->chroma_bound_top - + er->chroma_interp_point_top; + else + er->chroma_repeat_top = 0; + + if (er->chroma_interp_point_bottom > er->chroma_bound_bottom) + er->chroma_repeat_bottom = + er->chroma_interp_point_bottom - + er->chroma_bound_bottom; + else + er->chroma_repeat_bottom = 0; + + if (er->is_scale_enabled && (blit_op->src.roi.height == 1) + && is_yuv_offsite_vertical) { + er->chroma_repeat_bottom = 3; + er->chroma_repeat_top = 0; + } + } +} + +int config_ppp_edge_rep(struct ppp_blit_op *blit_op) +{ + uint32_t reg = 0; + struct ppp_edge_rep er; + + memset(&er, 0, sizeof(er)); + + ppp_edge_rep_luma_pixel(blit_op, &er); + + /* + * After adding the ROI offsets, we have locations of + * chroma_interp_points relative to the image. + */ + er.chroma_interp_point_left = er.luma_interp_point_left; + er.chroma_interp_point_right = er.luma_interp_point_right; + er.chroma_interp_point_top = er.luma_interp_point_top; + er.chroma_interp_point_bottom = er.luma_interp_point_bottom; + + ppp_edge_rep_chroma_pixel(blit_op, &er); + /* ensure repeats are >=0 and no larger than 3 pixels */ + if ((er.chroma_repeat_left < 0) || (er.chroma_repeat_right < 0) || + (er.chroma_repeat_top < 0) || (er.chroma_repeat_bottom < 0)) + return -EINVAL; + if ((er.chroma_repeat_left > 3) || (er.chroma_repeat_right > 3) || + (er.chroma_repeat_top > 3) || (er.chroma_repeat_bottom > 3)) + return -EINVAL; + if ((er.luma_repeat_left < 0) || (er.luma_repeat_right < 0) || + (er.luma_repeat_top < 0) || (er.luma_repeat_bottom < 0)) + return -EINVAL; + if ((er.luma_repeat_left > 3) || (er.luma_repeat_right > 3) || + (er.luma_repeat_top > 3) || (er.luma_repeat_bottom > 3)) + return -EINVAL; + + reg |= (er.chroma_repeat_left & 3) << MDP_LEFT_CHROMA; + reg |= (er.chroma_repeat_right & 3) << MDP_RIGHT_CHROMA; + reg |= (er.chroma_repeat_top & 3) << MDP_TOP_CHROMA; + reg |= (er.chroma_repeat_bottom & 3) << MDP_BOTTOM_CHROMA; + reg |= (er.luma_repeat_left & 3) << MDP_LEFT_LUMA; + reg |= (er.luma_repeat_right & 3) << MDP_RIGHT_LUMA; + reg |= (er.luma_repeat_top & 3) << MDP_TOP_LUMA; + reg |= (er.luma_repeat_bottom & 3) << MDP_BOTTOM_LUMA; + PPP_WRITEL(reg, MDP3_PPP_SRC_EDGE_REP); + return 0; +} + +int config_ppp_bg_edge_rep(struct ppp_blit_op *blit_op) +{ + uint32_t reg = 0; + + switch (blit_op->dst.color_fmt) { + case MDP_Y_CBCR_H2V2: + case MDP_Y_CRCB_H2V2: + if (blit_op->dst.roi.y == 0) + reg |= BIT(MDP_TOP_CHROMA); + + if ((blit_op->dst.roi.y + blit_op->dst.roi.height) == + blit_op->dst.prop.height) { + reg |= BIT(MDP_BOTTOM_CHROMA); + } + + if (((blit_op->dst.roi.x + blit_op->dst.roi.width) == + blit_op->dst.prop.width) && + ((blit_op->dst.roi.width % 2) == 0)) + reg |= BIT(MDP_RIGHT_CHROMA); + break; + case MDP_Y_CBCR_H2V1: + case MDP_Y_CRCB_H2V1: + case MDP_YCRYCB_H2V1: + if (((blit_op->dst.roi.x + blit_op->dst.roi.width) == + blit_op->dst.prop.width) && + ((blit_op->dst.roi.width % 2) == 0)) + reg |= BIT(MDP_RIGHT_CHROMA); + break; + default: + break; + } + PPP_WRITEL(reg, MDP3_PPP_BG_EDGE_REP); + return 0; +} + +int config_ppp_lut(uint32_t *pppop_reg_ptr, int lut_c0_en, + int lut_c1_en, int lut_c2_en) +{ + if (lut_c0_en) + *pppop_reg_ptr |= MDP_LUT_C0_EN; + if (lut_c1_en) + *pppop_reg_ptr |= MDP_LUT_C1_EN; + if (lut_c2_en) + *pppop_reg_ptr |= MDP_LUT_C2_EN; + return 0; +} + +int config_ppp_scale(struct ppp_blit_op *blit_op, uint32_t *pppop_reg_ptr) +{ + struct ppp_img_desc *src = &blit_op->src; + struct ppp_img_desc *dst = &blit_op->dst; + uint32_t dstW, dstH; + uint32_t x_fac, y_fac; + uint32_t mdp_blur = 0; + uint32_t phase_init_x, phase_init_y, phase_step_x, phase_step_y; + int x_idx, y_idx; + + if (blit_op->mdp_op & MDPOP_ASCALE) { + if (blit_op->mdp_op & MDPOP_ROT90) { + dstW = dst->roi.height; + dstH = dst->roi.width; + } else { + dstW = dst->roi.width; + dstH = dst->roi.height; + } + *pppop_reg_ptr |= + (PPP_OP_SCALE_Y_ON | PPP_OP_SCALE_X_ON); + + mdp_blur = blit_op->mdp_op & MDPOP_BLUR; + + if ((dstW != src->roi.width) || + (dstH != src->roi.height) || mdp_blur) { + + /* + * Use source origin as 0 for computing initial + * phase and step size. Incorrect initial phase and + * step size value results in green line issue. + */ + mdp_calc_scale_params(0, + blit_op->src.roi.width, + dstW, 1, &phase_init_x, + &phase_step_x); + mdp_calc_scale_params(0, + blit_op->src.roi.height, + dstH, 0, &phase_init_y, + &phase_step_y); + + PPP_WRITEL(phase_init_x, MDP3_PPP_SCALE_PHASEX_INIT); + PPP_WRITEL(phase_init_y, MDP3_PPP_SCALE_PHASEY_INIT); + PPP_WRITEL(phase_step_x, MDP3_PPP_SCALE_PHASEX_STEP); + PPP_WRITEL(phase_step_y, MDP3_PPP_SCALE_PHASEY_STEP); + + + if (dstW > src->roi.width || dstH > src->roi.height) + ppp_load_up_lut(); + + if (mdp_blur) + ppp_load_gaussian_lut(); + + if (dstW <= src->roi.width) { + x_fac = (dstW * 100) / src->roi.width; + x_idx = scale_idx(x_fac); + ppp_load_x_scale_table(x_idx); + } + if (dstH <= src->roi.height) { + y_fac = (dstH * 100) / src->roi.height; + y_idx = scale_idx(y_fac); + ppp_load_y_scale_table(y_idx); + } + + } else { + blit_op->mdp_op &= ~(MDPOP_ASCALE); + } + } + config_ppp_edge_rep(blit_op); + config_ppp_bg_edge_rep(blit_op); + return 0; +} + +int config_ppp_csc(int src_color, int dst_color, uint32_t *pppop_reg_ptr) +{ + bool inputRGB, outputRGB; + + inputRGB = check_if_rgb(src_color); + outputRGB = check_if_rgb(dst_color); + + if ((!inputRGB) && (outputRGB)) + *pppop_reg_ptr |= PPP_OP_CONVERT_YCBCR2RGB | + PPP_OP_CONVERT_ON; + if ((inputRGB) && (!outputRGB)) + *pppop_reg_ptr |= PPP_OP_CONVERT_ON; + + return 0; +} + +int config_ppp_blend(struct ppp_blit_op *blit_op, + uint32_t *pppop_reg_ptr, + bool is_yuv_smart_blit, int smart_blit_bg_alpha) +{ + struct ppp_csc_table *csc; + uint32_t alpha, trans_color; + uint32_t val = 0; + int c_fmt = blit_op->src.color_fmt; + int bg_alpha; + + csc = ppp_csc_rgb2yuv(); + alpha = blit_op->blend.const_alpha; + trans_color = blit_op->blend.trans_color; + if (blit_op->mdp_op & MDPOP_FG_PM_ALPHA) { + if (ppp_per_p_alpha(c_fmt)) { + *pppop_reg_ptr |= PPP_OP_ROT_ON | + PPP_OP_BLEND_ON | + PPP_OP_BLEND_CONSTANT_ALPHA; + } else { + if ((blit_op->mdp_op & MDPOP_ALPHAB) + && (blit_op->blend.const_alpha == 0xff)) { + blit_op->mdp_op &= ~(MDPOP_ALPHAB); + } + + if ((blit_op->mdp_op & MDPOP_ALPHAB) + || (blit_op->mdp_op & MDPOP_TRANSP)) { + + *pppop_reg_ptr |= PPP_OP_ROT_ON | + PPP_OP_BLEND_ON | + PPP_OP_BLEND_CONSTANT_ALPHA | + PPP_OP_BLEND_ALPHA_BLEND_NORMAL; + } + } + + bg_alpha = PPP_BLEND_BG_USE_ALPHA_SEL | + PPP_BLEND_BG_ALPHA_REVERSE; + + if ((ppp_per_p_alpha(c_fmt)) && !(blit_op->mdp_op & + MDPOP_LAYER_IS_FG)) { + bg_alpha |= PPP_BLEND_BG_SRCPIXEL_ALPHA; + } else { + bg_alpha |= PPP_BLEND_BG_CONSTANT_ALPHA; + bg_alpha |= blit_op->blend.const_alpha << 24; + } + PPP_WRITEL(bg_alpha, MDP3_PPP_BLEND_BG_ALPHA_SEL); + + if (blit_op->mdp_op & MDPOP_TRANSP) + *pppop_reg_ptr |= PPP_BLEND_CALPHA_TRNASP; + } else if (ppp_per_p_alpha(c_fmt)) { + if (blit_op->mdp_op & MDPOP_LAYER_IS_FG) + *pppop_reg_ptr |= PPP_OP_ROT_ON | + PPP_OP_BLEND_ON | + PPP_OP_BLEND_CONSTANT_ALPHA; + else + *pppop_reg_ptr |= PPP_OP_ROT_ON | + PPP_OP_BLEND_ON | + PPP_OP_BLEND_SRCPIXEL_ALPHA; + PPP_WRITEL(0, MDP3_PPP_BLEND_BG_ALPHA_SEL); + } else { + if ((blit_op->mdp_op & MDPOP_ALPHAB) + && (blit_op->blend.const_alpha == 0xff)) { + blit_op->mdp_op &= + ~(MDPOP_ALPHAB); + } + + if ((blit_op->mdp_op & MDPOP_ALPHAB) + || (blit_op->mdp_op & MDPOP_TRANSP)) { + *pppop_reg_ptr |= PPP_OP_ROT_ON | + PPP_OP_BLEND_ON | + PPP_OP_BLEND_CONSTANT_ALPHA | + PPP_OP_BLEND_ALPHA_BLEND_NORMAL; + } + + if (blit_op->mdp_op & MDPOP_TRANSP) + *pppop_reg_ptr |= + PPP_BLEND_CALPHA_TRNASP; + if (is_yuv_smart_blit) { + *pppop_reg_ptr |= PPP_OP_ROT_ON | + PPP_OP_BLEND_ON | + PPP_OP_BLEND_BG_ALPHA | + PPP_OP_BLEND_EQ_REVERSE; + + if (smart_blit_bg_alpha < 0xFF) + bg_alpha = PPP_BLEND_BG_USE_ALPHA_SEL | + PPP_BLEND_BG_DSTPIXEL_ALPHA; + else + bg_alpha = PPP_BLEND_BG_USE_ALPHA_SEL | + PPP_BLEND_BG_DSTPIXEL_ALPHA | + PPP_BLEND_BG_CONSTANT_ALPHA; + + bg_alpha |= smart_blit_bg_alpha << 24; + PPP_WRITEL(bg_alpha, MDP3_PPP_BLEND_BG_ALPHA_SEL); + } else { + PPP_WRITEL(0, MDP3_PPP_BLEND_BG_ALPHA_SEL); + } + } + + if (*pppop_reg_ptr & PPP_OP_BLEND_ON) { + if (is_yuv_smart_blit) + config_ppp_background(&blit_op->bg, 1); + else + config_ppp_background(&blit_op->bg, 0); + + if (blit_op->dst.color_fmt == MDP_YCRYCB_H2V1) { + *pppop_reg_ptr |= PPP_OP_BG_CHROMA_H2V1; + if (blit_op->mdp_op & MDPOP_TRANSP) { + trans_color = conv_rgb2yuv(trans_color, + &csc->fwd_matrix[0], + &csc->bv[0], + &csc->lv[0]); + } + } + } + if (is_yuv_smart_blit) { + PPP_WRITEL(0, MDP3_PPP_BLEND_PARAM); + } else { + val = (alpha << MDP_BLEND_CONST_ALPHA); + val |= (trans_color & MDP_BLEND_TRASP_COL_MASK); + PP_WRITEL(val, MDP3_PPP_BLEND_PARAM); + } + return 0; +} + +int config_ppp_rotation(uint32_t mdp_op, uint32_t *pppop_reg_ptr) +{ + *pppop_reg_ptr |= PPP_OP_ROT_ON; + + if (mdp_op & MDPOP_ROT90) + *pppop_reg_ptr |= PPP_OP_ROT_90; + if (mdp_op & MDPOP_LR) + *pppop_reg_ptr |= PPP_OP_FLIP_LR; + if (mdp_op & MDPOP_UD) + *pppop_reg_ptr |= PPP_OP_FLIP_UD; + + return 0; +} + +int config_ppp_op_mode(struct ppp_blit_op *blit_op) +{ + uint32_t yuv2rgb; + uint32_t ppp_operation_reg = 0; + int sv_slice, sh_slice; + int dv_slice, dh_slice; + static struct ppp_img_desc bg_img_param; + static int bg_alpha; + static int bg_mdp_ops; + bool is_yuv_smart_blit = false; + + /* + * Detect YUV smart blit, + * Check cached BG image plane 0 address is not NILL and + * source color format is YUV than it is YUV smart blit + * mark is_yuv_smart_blit true. + */ + if ((bg_img_param.p0) && + (!(check_if_rgb(blit_op->src.color_fmt)))) + is_yuv_smart_blit = true; + + sv_slice = sh_slice = dv_slice = dh_slice = 1; + + ppp_operation_reg |= ppp_dst_op_reg(blit_op->dst.color_fmt); + switch (blit_op->dst.color_fmt) { + case MDP_Y_CBCR_H2V2: + case MDP_Y_CRCB_H2V2: + y_h_even_num(&blit_op->dst); + y_h_even_num(&blit_op->src); + dv_slice = 2; + /* fall-through */ + case MDP_Y_CBCR_H2V1: + case MDP_Y_CRCB_H2V1: + case MDP_YCRYCB_H2V1: + x_w_even_num(&blit_op->dst); + x_w_even_num(&blit_op->src); + dh_slice = 2; + break; + default: + break; + } + + ppp_operation_reg |= ppp_src_op_reg(blit_op->src.color_fmt); + switch (blit_op->src.color_fmt) { + case MDP_Y_CBCR_H2V2: + case MDP_Y_CBCR_H2V2_ADRENO: + case MDP_Y_CBCR_H2V2_VENUS: + case MDP_Y_CRCB_H2V2: + sh_slice = sv_slice = 2; + break; + case MDP_YCRYCB_H2V1: + x_w_even_num(&blit_op->dst); + x_w_even_num(&blit_op->src); + /* fall-through */ + case MDP_Y_CBCR_H2V1: + case MDP_Y_CRCB_H2V1: + sh_slice = 2; + break; + default: + break; + } + + config_ppp_csc(blit_op->src.color_fmt, + blit_op->dst.color_fmt, &ppp_operation_reg); + yuv2rgb = ppp_operation_reg & PPP_OP_CONVERT_YCBCR2RGB; + + if (blit_op->mdp_op & MDPOP_DITHER) + ppp_operation_reg |= PPP_OP_DITHER_EN; + + if (blit_op->mdp_op & MDPOP_ROTATION) + config_ppp_rotation(blit_op->mdp_op, &ppp_operation_reg); + + if (blit_op->src.color_fmt == MDP_Y_CBCR_H2V2_ADRENO) { + blit_op->src.stride0 = ALIGN(blit_op->src.prop.width, 32) * + ppp_bpp(blit_op->src.color_fmt); + blit_op->src.stride1 = 2 * ALIGN(blit_op->src.prop.width/2, 32); + } else if (blit_op->src.color_fmt == MDP_Y_CBCR_H2V2_VENUS) { + blit_op->src.stride0 = ALIGN(blit_op->src.prop.width, 128) * + ppp_bpp(blit_op->src.color_fmt); + blit_op->src.stride1 = blit_op->src.stride0; + } else { + blit_op->src.stride0 = blit_op->src.prop.width * + ppp_bpp(blit_op->src.color_fmt); + blit_op->src.stride1 = blit_op->src.stride0; + } + + blit_op->dst.stride0 = blit_op->dst.prop.width * + ppp_bpp(blit_op->dst.color_fmt); + + if (ppp_multi_plane(blit_op->dst.color_fmt)) { + blit_op->dst.p1 = blit_op->dst.p0; + blit_op->dst.p1 += blit_op->dst.prop.width * + blit_op->dst.prop.height * + ppp_bpp(blit_op->dst.color_fmt); + } else { + blit_op->dst.p1 = NULL; + } + + if ((bg_img_param.p0) && (!(blit_op->mdp_op & MDPOP_SMART_BLIT))) { + /* + * Use cached smart blit BG layer info in + * smart Blit FG request + */ + blit_op->bg = bg_img_param; + if (check_if_rgb(blit_op->bg.color_fmt)) { + blit_op->bg.p1 = 0; + blit_op->bg.stride1 = 0; + } + memset(&bg_img_param, 0, sizeof(bg_img_param)); + } else { + blit_op->bg = blit_op->dst; + } + /* Cache smart blit BG layer info */ + if (blit_op->mdp_op & MDPOP_SMART_BLIT) + bg_img_param = blit_op->src; + + /* Jumping from Y-Plane to Chroma Plane */ + /* first pixel addr calculation */ + mdp_adjust_start_addr(blit_op, &blit_op->src, sv_slice, + sh_slice, LAYER_FG); + mdp_adjust_start_addr(blit_op, &blit_op->bg, dv_slice, + dh_slice, LAYER_BG); + mdp_adjust_start_addr(blit_op, &blit_op->dst, dv_slice, + dh_slice, LAYER_FB); + + config_ppp_scale(blit_op, &ppp_operation_reg); + + config_ppp_blend(blit_op, &ppp_operation_reg, is_yuv_smart_blit, + bg_alpha); + + config_ppp_src(&blit_op->src, yuv2rgb); + config_ppp_out(&blit_op->dst, yuv2rgb); + + /* Cache Smart blit BG alpha adn MDP OP values */ + if (blit_op->mdp_op & MDPOP_SMART_BLIT) { + bg_alpha = blit_op->blend.const_alpha; + bg_mdp_ops = blit_op->mdp_op; + } else { + bg_alpha = 0; + bg_mdp_ops = 0; + } + pr_debug("BLIT FG Param Fmt %d (x %d,y %d,w %d,h %d), ", + blit_op->src.color_fmt, blit_op->src.prop.x, + blit_op->src.prop.y, blit_op->src.prop.width, + blit_op->src.prop.height); + pr_debug("ROI(x %d,y %d,w %d, h %d) ", + blit_op->src.roi.x, blit_op->src.roi.y, + blit_op->src.roi.width, blit_op->src.roi.height); + pr_debug("Addr_P0 %pK, Stride S0 %d Addr_P1 %pK, Stride S1 %d\n", + blit_op->src.p0, blit_op->src.stride0, + blit_op->src.p1, blit_op->src.stride1); + + if (blit_op->bg.p0 != blit_op->dst.p0) { + pr_debug("BLIT BG Param Fmt %d (x %d,y %d,w %d,h %d), ", + blit_op->bg.color_fmt, blit_op->bg.prop.x, + blit_op->bg.prop.y, blit_op->bg.prop.width, + blit_op->bg.prop.height); + pr_debug("ROI(x %d,y %d, w %d, h %d) ", + blit_op->bg.roi.x, blit_op->bg.roi.y, + blit_op->bg.roi.width, blit_op->bg.roi.height); + pr_debug("Addr %pK, Stride S0 %d Addr_P1 %pK, Stride S1 %d\n", + blit_op->bg.p0, blit_op->bg.stride0, + blit_op->bg.p1, blit_op->bg.stride1); + } + pr_debug("BLIT FB Param Fmt %d (x %d,y %d,w %d,h %d), ", + blit_op->dst.color_fmt, blit_op->dst.prop.x, + blit_op->dst.prop.y, blit_op->dst.prop.width, + blit_op->dst.prop.height); + pr_debug("ROI(x %d,y %d, w %d, h %d) ", + blit_op->dst.roi.x, blit_op->dst.roi.y, + blit_op->dst.roi.width, blit_op->dst.roi.height); + pr_debug("Addr %p, Stride S0 %d Addr_P1 %p, Stride S1 %d\n", + blit_op->dst.p0, blit_op->dst.stride0, + blit_op->dst.p1, blit_op->dst.stride1); + + PPP_WRITEL(ppp_operation_reg, MDP3_PPP_OP_MODE); + mb(); /* make sure everything is written before enable */ + MDSS_XLOG(ppp_operation_reg, blit_op->src.roi.x, blit_op->src.roi.y, + blit_op->src.roi.width, blit_op->src.roi.height); + MDSS_XLOG(blit_op->dst.roi.x, blit_op->dst.roi.y, + blit_op->dst.roi.width, blit_op->dst.roi.height); + return 0; +} + +void ppp_enable(void) +{ + PPP_WRITEL(0x1000, 0x30); + mb(); /* make sure everything is written before enable */ +} + +int mdp3_ppp_init(void) +{ + load_ppp_lut(LUT_PRE_TABLE, ppp_default_pre_lut()); + load_ppp_lut(LUT_POST_TABLE, ppp_default_post_lut()); + load_csc_matrix(CSC_PRIMARY_MATRIX, ppp_csc_rgb2yuv()); + load_csc_matrix(CSC_SECONDARY_MATRIX, ppp_csc_table2()); + return 0; +} diff --git a/drivers/video/fbdev/msm/mdss.h b/drivers/video/fbdev/msm/mdss.h new file mode 100644 index 0000000000000000000000000000000000000000..b7b8de427a96e121fe41faa10d6ff88d677a0743 --- /dev/null +++ b/drivers/video/fbdev/msm/mdss.h @@ -0,0 +1,617 @@ +/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef MDSS_H +#define MDSS_H + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include "mdss_panel.h" + +#define MAX_DRV_SUP_MMB_BLKS 44 +#define MAX_DRV_SUP_PIPES 10 +#define MAX_CLIENT_NAME_LEN 20 + +#define MDSS_PINCTRL_STATE_DEFAULT "mdss_default" +#define MDSS_PINCTRL_STATE_SLEEP "mdss_sleep" + +enum mdss_mdp_clk_type { + MDSS_CLK_AHB, + MDSS_CLK_AXI, + MDSS_CLK_MDP_CORE, + MDSS_CLK_MDP_LUT, + MDSS_CLK_MDP_VSYNC, + MDSS_CLK_MNOC_AHB, + MDSS_CLK_THROTTLE_AXI, + MDSS_MAX_CLK +}; + +enum mdss_iommu_domain_type { + MDSS_IOMMU_DOMAIN_UNSECURE, + MDSS_IOMMU_DOMAIN_ROT_UNSECURE, + MDSS_IOMMU_DOMAIN_SECURE, + MDSS_IOMMU_DOMAIN_ROT_SECURE, + MDSS_IOMMU_MAX_DOMAIN +}; + +enum mdss_bus_vote_type { + VOTE_INDEX_DISABLE, + VOTE_INDEX_LOW, + VOTE_INDEX_MID, + VOTE_INDEX_HIGH, + VOTE_INDEX_MAX, +}; + +struct mdss_hw_settings { + char __iomem *reg; + u32 val; +}; + +struct mdss_max_bw_settings { + u32 mdss_max_bw_mode; + u32 mdss_max_bw_val; +}; + +struct mdss_debug_inf { + void *debug_data; + void (*debug_enable_clock)(int on); +}; + +struct mdss_perf_tune { + unsigned long min_mdp_clk; + u64 min_bus_vote; +}; + +#define MDSS_IRQ_SUSPEND -1 +#define MDSS_IRQ_RESUME 1 +#define MDSS_IRQ_REQ 0 + +struct mdss_intr { + /* requested intr */ + u32 req; + /* currently enabled intr */ + u32 curr; + int state; + spinlock_t lock; +}; + +struct simplified_prefill_factors { + u32 fmt_mt_nv12_factor; + u32 fmt_mt_factor; + u32 fmt_linear_factor; + u32 scale_factor; + u32 xtra_ff_factor; +}; + +struct mdss_prefill_data { + u32 ot_bytes; + u32 y_buf_bytes; + u32 y_scaler_lines_bilinear; + u32 y_scaler_lines_caf; + u32 post_scaler_pixels; + u32 pp_pixels; + u32 fbc_lines; + u32 ts_threshold; + u32 ts_end; + u32 ts_overhead; + struct mult_factor ts_rate; + struct simplified_prefill_factors prefill_factors; +}; + +struct mdss_mdp_dsc { + u32 num; + char __iomem *base; +}; + +enum mdss_hw_index { + MDSS_HW_MDP, + MDSS_HW_DSI0 = 1, + MDSS_HW_DSI1, + MDSS_HW_HDMI, + MDSS_HW_EDP, + MDSS_HW_MISC, + MDSS_MAX_HW_BLK +}; + +enum mdss_bus_clients { + MDSS_MDP_RT, + MDSS_DSI_RT, + MDSS_HW_RT, + MDSS_MDP_NRT, + MDSS_MAX_BUS_CLIENTS +}; + +struct mdss_pp_block_off { + u32 sspp_igc_lut_off; + u32 vig_pcc_off; + u32 rgb_pcc_off; + u32 dma_pcc_off; + u32 lm_pgc_off; + u32 dspp_gamut_off; + u32 dspp_pcc_off; + u32 dspp_pgc_off; +}; + +enum mdss_hw_quirk { + MDSS_QUIRK_BWCPANIC, + MDSS_QUIRK_ROTCDP, + MDSS_QUIRK_DOWNSCALE_HANG, + MDSS_QUIRK_DSC_RIGHT_ONLY_PU, + MDSS_QUIRK_DSC_2SLICE_PU_THRPUT, + MDSS_QUIRK_DMA_BI_DIR, + MDSS_QUIRK_FMT_PACK_PATTERN, + MDSS_QUIRK_NEED_SECURE_MAP, + MDSS_QUIRK_SRC_SPLIT_ALWAYS, + MDSS_QUIRK_HDR_SUPPORT_ENABLED, + MDSS_QUIRK_MDP_CLK_SET_RATE, + MDSS_QUIRK_MAX, +}; + +enum mdss_hw_capabilities { + MDSS_CAPS_YUV_CONFIG, + MDSS_CAPS_SCM_RESTORE_NOT_REQUIRED, + MDSS_CAPS_3D_MUX_UNDERRUN_RECOVERY_SUPPORTED, + MDSS_CAPS_MIXER_1_FOR_WB, + MDSS_CAPS_QSEED3, + MDSS_CAPS_DEST_SCALER, + MDSS_CAPS_10_BIT_SUPPORTED, + MDSS_CAPS_MAX, +}; + +enum mdss_qos_settings { + MDSS_QOS_PER_PIPE_IB, + MDSS_QOS_OVERHEAD_FACTOR, + MDSS_QOS_CDP, + MDSS_QOS_OTLIM, + MDSS_QOS_PER_PIPE_LUT, + MDSS_QOS_SIMPLIFIED_PREFILL, + MDSS_QOS_VBLANK_PANIC_CTRL, + MDSS_QOS_TS_PREFILL, + MDSS_QOS_REMAPPER, + MDSS_QOS_IB_NOCR, + MDSS_QOS_MAX, +}; + +enum mdss_mdp_pipe_type { + MDSS_MDP_PIPE_TYPE_INVALID = -1, + MDSS_MDP_PIPE_TYPE_VIG = 0, + MDSS_MDP_PIPE_TYPE_RGB, + MDSS_MDP_PIPE_TYPE_DMA, + MDSS_MDP_PIPE_TYPE_CURSOR, + MDSS_MDP_PIPE_TYPE_MAX, +}; + +struct reg_bus_client { + char name[MAX_CLIENT_NAME_LEN]; + short usecase_ndx; + u32 id; + struct list_head list; +}; + +struct mdss_smmu_client { + struct device *dev; + struct dma_iommu_mapping *mmu_mapping; + struct mdss_module_power mp; + struct reg_bus_client *reg_bus_clt; + bool domain_attached; + bool handoff_pending; + char __iomem *mmu_base; +}; + +struct mdss_mdp_qseed3_lut_tbl { + bool valid; + u32 *dir_lut; + u32 *cir_lut; + u32 *sep_lut; +}; + +struct mdss_scaler_block { + u32 vig_scaler_off; + u32 vig_scaler_lut_off; + u32 has_dest_scaler; + char __iomem *dest_base; + u32 ndest_scalers; + u32 *dest_scaler_off; + u32 *dest_scaler_lut_off; + struct mdss_mdp_qseed3_lut_tbl lut_tbl; + + /* + * Lock is mainly to serialize access to LUT. + * LUT values come asynchronously from userspace + * via ioctl. + */ + struct mutex scaler_lock; +}; + +struct mdss_data_type; + +struct mdss_smmu_ops { + int (*smmu_attach)(struct mdss_data_type *mdata); + int (*smmu_detach)(struct mdss_data_type *mdata); + int (*smmu_get_domain_id)(u32 type); + struct dma_buf_attachment * (*smmu_dma_buf_attach)( + struct dma_buf *dma_buf, struct device *devce, + int domain); + int (*smmu_map_dma_buf)(struct dma_buf *dma_buf, + struct sg_table *table, int domain, + dma_addr_t *iova, unsigned long *size, int dir); + void (*smmu_unmap_dma_buf)(struct sg_table *table, int domain, + int dir, struct dma_buf *dma_buf); + int (*smmu_dma_alloc_coherent)(struct device *dev, size_t size, + dma_addr_t *phys, dma_addr_t *iova, void **cpu_addr, + gfp_t gfp, int domain); + void (*smmu_dma_free_coherent)(struct device *dev, size_t size, + void *cpu_addr, dma_addr_t phys, dma_addr_t iova, + int domain); + int (*smmu_map)(int domain, phys_addr_t iova, phys_addr_t phys, int + gfp_order, int prot); + void (*smmu_unmap)(int domain, unsigned long iova, int gfp_order); + char * (*smmu_dsi_alloc_buf)(struct device *dev, int size, + dma_addr_t *dmap, gfp_t gfp); + int (*smmu_dsi_map_buffer)(phys_addr_t phys, unsigned int domain, + unsigned long size, dma_addr_t *dma_addr, + void *cpu_addr, int dir); + void (*smmu_dsi_unmap_buffer)(dma_addr_t dma_addr, int domain, + unsigned long size, int dir); + void (*smmu_deinit)(struct mdss_data_type *mdata); + struct sg_table * (*smmu_sg_table_clone)(struct sg_table *orig_table, + gfp_t gfp_mask, bool padding); +}; + +struct mdss_data_type { + u32 mdp_rev; + struct clk *mdp_clk[MDSS_MAX_CLK]; + struct regulator *fs; + struct regulator *venus; + struct regulator *vdd_cx; + bool batfet_required; + struct regulator *batfet; + bool en_svs_high; + u32 max_mdp_clk_rate; + struct mdss_util_intf *mdss_util; + struct mdss_panel_data *pdata; + unsigned long mdp_clk_rate; + + struct platform_device *pdev; + struct mdss_io_data mdss_io; + struct mdss_io_data vbif_io; + struct mdss_io_data vbif_nrt_io; + char __iomem *mdp_base; + + struct mdss_smmu_client mdss_smmu[MDSS_IOMMU_MAX_DOMAIN]; + struct mdss_smmu_ops smmu_ops; + struct mutex reg_lock; + + /* bitmap to track pipes that have BWC enabled */ + DECLARE_BITMAP(bwc_enable_map, MAX_DRV_SUP_PIPES); + /* bitmap to track hw workarounds */ + DECLARE_BITMAP(mdss_quirk_map, MDSS_QUIRK_MAX); + /* bitmap to track total mmbs in use */ + DECLARE_BITMAP(mmb_alloc_map, MAX_DRV_SUP_MMB_BLKS); + /* bitmap to track qos applicable settings */ + DECLARE_BITMAP(mdss_qos_map, MDSS_QOS_MAX); + /* bitmap to track hw capabilities/features */ + DECLARE_BITMAP(mdss_caps_map, MDSS_CAPS_MAX); + + u32 has_bwc; + /* values used when HW has a common panic/robust LUT */ + u32 default_panic_lut0; + u32 default_panic_lut1; + u32 default_robust_lut; + + /* values used when HW has panic/robust LUTs per pipe */ + u32 default_panic_lut_per_pipe_linear; + u32 default_panic_lut_per_pipe_tile; + u32 default_robust_lut_per_pipe_linear; + u32 default_robust_lut_per_pipe_tile; + + u32 has_decimation; + bool has_fixed_qos_arbiter_enabled; + bool has_panic_ctrl; + u32 wfd_mode; + u32 has_no_lut_read; + atomic_t sd_client_count; + u8 has_wb_ad; + u8 has_non_scalar_rgb; + bool has_src_split; + bool idle_pc_enabled; + bool has_pingpong_split; + bool has_pixel_ram; + bool needs_hist_vote; + bool has_ubwc; + bool has_wb_ubwc; + bool has_separate_rotator; + + u32 default_ot_rd_limit; + u32 default_ot_wr_limit; + + struct irq_domain *irq_domain; + u32 *mdp_irq_mask; + u32 mdp_hist_irq_mask; + u32 mdp_intf_irq_mask; + + int suspend_fs_ena; + u8 clk_ena; + u8 fs_ena; + u8 vsync_ena; + + struct notifier_block gdsc_cb; + + u32 res_init; + + u32 highest_bank_bit; + u32 smp_mb_cnt; + u32 smp_mb_size; + u32 smp_mb_per_pipe; + u32 pixel_ram_size; + + u32 rot_block_size; + + /* HW RT bus (AXI) */ + u32 hw_rt_bus_hdl; + u32 hw_rt_bus_ref_cnt; + + /* data bus (AXI) */ + u32 bus_hdl; + u32 bus_ref_cnt; + struct mutex bus_lock; + + /* register bus (AHB) */ + u32 reg_bus_hdl; + u32 reg_bus_usecase_ndx; + struct list_head reg_bus_clist; + struct mutex reg_bus_lock; + struct reg_bus_client *reg_bus_clt; + struct reg_bus_client *pp_reg_bus_clt; + + u32 axi_port_cnt; + u32 nrt_axi_port_cnt; + u32 bus_channels; + u32 curr_bw_uc_idx; + u32 ao_bw_uc_idx; /* active only idx */ + struct msm_bus_scale_pdata *bus_scale_table; + struct msm_bus_scale_pdata *reg_bus_scale_table; + struct msm_bus_scale_pdata *hw_rt_bus_scale_table; + u32 max_bw_low; + u32 max_bw_high; + u32 max_bw_per_pipe; + u32 *vbif_rt_qos; + u32 *vbif_nrt_qos; + u32 npriority_lvl; + u32 rot_dwnscale_min; + u32 rot_dwnscale_max; + + struct mult_factor ab_factor; + struct mult_factor ib_factor; + struct mult_factor ib_factor_overlap; + struct mult_factor clk_factor; + struct mult_factor per_pipe_ib_factor; + bool apply_post_scale_bytes; + bool hflip_buffer_reused; + + u32 disable_prefill; + u32 *clock_levels; + u32 nclk_lvl; + + u32 enable_gate; + u32 enable_bw_release; + u32 enable_rotator_bw_release; + u32 enable_cdp; + u32 serialize_wait4pp; + u32 wait4autorefresh; + u32 lines_before_active; + + struct mdss_hw_settings *hw_settings; + + int rects_per_sspp[MDSS_MDP_PIPE_TYPE_MAX]; + struct mdss_mdp_pipe *vig_pipes; + struct mdss_mdp_pipe *rgb_pipes; + struct mdss_mdp_pipe *dma_pipes; + struct mdss_mdp_pipe *cursor_pipes; + u32 nvig_pipes; + u32 nrgb_pipes; + u32 ndma_pipes; + u32 max_target_zorder; + u8 ncursor_pipes; + u32 max_cursor_size; + + u32 nppb_ctl; + u32 *ppb_ctl; + u32 nppb_cfg; + u32 *ppb_cfg; + char __iomem *slave_pingpong_base; + + struct mdss_mdp_mixer *mixer_intf; + struct mdss_mdp_mixer *mixer_wb; + u32 nmixers_intf; + u32 nmixers_wb; + u32 max_mixer_width; + u32 max_pipe_width; + + struct mdss_mdp_writeback *wb; + u32 nwb; + u32 *wb_offsets; + u32 nwb_offsets; + struct mutex wb_lock; + + struct mdss_mdp_ctl *ctl_off; + u32 nctl; + u32 ndspp; + + struct mdss_mdp_dp_intf *dp_off; + u32 ndp; + void *video_intf; + u32 nintf; + + struct mdss_mdp_ad *ad_off; + struct mdss_ad_info *ad_cfgs; + u32 nad_cfgs; + u32 nmax_concurrent_ad_hw; + struct workqueue_struct *ad_calc_wq; + u32 ad_debugen; + bool mem_retain; + + struct mdss_intr hist_intr; + + struct ion_client *iclient; + int iommu_attached; + + struct debug_bus *dbg_bus; + u32 dbg_bus_size; + struct vbif_debug_bus *vbif_dbg_bus; + u32 vbif_dbg_bus_size; + struct vbif_debug_bus *nrt_vbif_dbg_bus; + u32 nrt_vbif_dbg_bus_size; + struct mdss_debug_inf debug_inf; + bool mixer_switched; + struct mdss_panel_cfg pan_cfg; + struct mdss_prefill_data prefill_data; + u32 min_prefill_lines; /* this changes within different chipsets */ + u32 props; + + int handoff_pending; + bool idle_pc; + struct mdss_perf_tune perf_tune; + bool traffic_shaper_en; + int iommu_ref_cnt; + u32 latency_buff_per; + atomic_t active_intf_cnt; + bool has_rot_dwnscale; + bool regulator_notif_register; + + u64 ab[MDSS_MAX_BUS_CLIENTS]; + u64 ib[MDSS_MAX_BUS_CLIENTS]; + struct mdss_pp_block_off pp_block_off; + + struct mdss_mdp_cdm *cdm_off; + u32 ncdm; + struct mutex cdm_lock; + + struct mdss_mdp_dsc *dsc_off; + u32 ndsc; + + struct mdss_max_bw_settings *max_bw_settings; + u32 bw_mode_bitmap; + u32 max_bw_settings_cnt; + bool bw_limit_pending; + + struct mdss_max_bw_settings *max_per_pipe_bw_settings; + u32 mdss_per_pipe_bw_cnt; + u32 min_bw_per_pipe; + + u32 bcolor0; + u32 bcolor1; + u32 bcolor2; + struct mdss_scaler_block *scaler_off; + + u32 splash_intf_sel; + u32 splash_split_disp; + struct mult_factor bus_throughput_factor; +}; + +extern struct mdss_data_type *mdss_res; + +struct irq_info { + u32 irq; + u32 irq_mask; + u32 irq_wake_mask; + u32 irq_ena; + u32 irq_wake_ena; + u32 irq_buzy; +}; + +struct mdss_hw { + u32 hw_ndx; + void *ptr; + struct irq_info *irq_info; + irqreturn_t (*irq_handler)(int irq, void *ptr); +}; + +struct irq_info *mdss_intr_line(void); +void mdss_bus_bandwidth_ctrl(int enable); +int mdss_iommu_ctrl(int enable); +int mdss_bus_scale_set_quota(int client, u64 ab_quota, u64 ib_quota); +int mdss_update_reg_bus_vote(struct reg_bus_client *bus_client, + u32 usecase_ndx); +struct reg_bus_client *mdss_reg_bus_vote_client_create(char *client_name); +void mdss_reg_bus_vote_client_destroy(struct reg_bus_client *bus_client); + +struct mdss_util_intf { + bool mdp_probe_done; + int (*register_irq)(struct mdss_hw *hw); + void (*enable_irq)(struct mdss_hw *hw); + void (*disable_irq)(struct mdss_hw *hw); + void (*enable_wake_irq)(struct mdss_hw *hw); + void (*disable_wake_irq)(struct mdss_hw *hw); + void (*disable_irq_nosync)(struct mdss_hw *hw); + int (*irq_dispatch)(u32 hw_ndx, int irq, void *ptr); + int (*get_iommu_domain)(u32 type); + int (*iommu_attached)(void); + int (*iommu_ctrl)(int enable); + void (*iommu_lock)(void); + void (*iommu_unlock)(void); + void (*bus_bandwidth_ctrl)(int enable); + int (*bus_scale_set_quota)(int client, u64 ab_quota, u64 ib_quota); + int (*panel_intf_status)(u32 disp_num, u32 intf_type); + struct mdss_panel_cfg* (*panel_intf_type)(int intf_val); + int (*dyn_clk_gating_ctrl)(int enable); + bool (*param_check)(char *param_string); + bool display_disabled; +}; + +struct mdss_util_intf *mdss_get_util_intf(void); +bool mdss_get_irq_enable_state(struct mdss_hw *hw); + +static inline int mdss_get_sd_client_cnt(void) +{ + if (!mdss_res) + return 0; + else + return atomic_read(&mdss_res->sd_client_count); +} + +static inline void mdss_set_quirk(struct mdss_data_type *mdata, + enum mdss_hw_quirk bit) +{ + set_bit(bit, mdata->mdss_quirk_map); +} + +static inline bool mdss_has_quirk(struct mdss_data_type *mdata, + enum mdss_hw_quirk bit) +{ + return test_bit(bit, mdata->mdss_quirk_map); +} + +#define MDSS_VBIF_WRITE(mdata, offset, value, nrt_vbif) \ + (nrt_vbif ? mdss_reg_w(&mdata->vbif_nrt_io, offset, value, 0) :\ + mdss_reg_w(&mdata->vbif_io, offset, value, 0)) +#define MDSS_VBIF_READ(mdata, offset, nrt_vbif) \ + (nrt_vbif ? mdss_reg_r(&mdata->vbif_nrt_io, offset, 0) :\ + mdss_reg_r(&mdata->vbif_io, offset, 0)) +#define MDSS_REG_WRITE(mdata, offset, value) \ + mdss_reg_w(&mdata->mdss_io, offset, value, 0) +#define MDSS_REG_READ(mdata, offset) \ + mdss_reg_r(&mdata->mdss_io, offset, 0) + +#endif /* MDSS_H */ diff --git a/drivers/video/fbdev/msm/mdss_cec_core.c b/drivers/video/fbdev/msm/mdss_cec_core.c new file mode 100644 index 0000000000000000000000000000000000000000..23a3ce55c2d62f68302b48d6678bb7c40095a825 --- /dev/null +++ b/drivers/video/fbdev/msm/mdss_cec_core.c @@ -0,0 +1,799 @@ +/* Copyright (c) 2015-2016, 2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#define pr_fmt(fmt) "%s: " fmt, __func__ + +#include +#include +#include +#include +#include +#include + +#include "mdss_fb.h" +#include "mdss_cec_core.h" + +#define CEC_ENABLE_MASK BIT(0) +#define CEC_WAKEUP_ENABLE_MASK BIT(1) + +struct cec_msg_node { + struct cec_msg msg; + struct list_head list; +}; + +struct cec_ctl { + bool enabled; + bool compliance_enabled; + bool cec_wakeup_en; + + u8 logical_addr; + + spinlock_t lock; + struct list_head msg_head; + struct cec_abstract_init_data init_data; + +}; + +static struct cec_ctl *cec_get_ctl(struct device *dev) +{ + struct fb_info *fbi; + struct msm_fb_data_type *mfd; + struct mdss_panel_info *pinfo; + + if (!dev) { + pr_err("invalid input\n"); + goto error; + } + + fbi = dev_get_drvdata(dev); + if (!fbi) { + pr_err("invalid fbi\n"); + goto error; + } + + mfd = fbi->par; + if (!mfd) { + pr_err("invalid mfd\n"); + goto error; + } + + pinfo = mfd->panel_info; + if (!pinfo) { + pr_err("invalid pinfo\n"); + goto error; + } + + return pinfo->cec_data; + +error: + return NULL; +} + +static int cec_msg_send(struct cec_ctl *ctl, struct cec_msg *msg) +{ + int ret = -EINVAL; + struct cec_ops *ops; + + if (!ctl || !msg) { + pr_err("invalid input\n"); + goto end; + } + + ops = ctl->init_data.ops; + + if (ops && ops->send_msg) + ret = ops->send_msg(ops->data, msg); +end: + return ret; +} + +static void cec_dump_msg(struct cec_ctl *ctl, struct cec_msg *msg) +{ + int i; + unsigned long flags; + + if (!ctl || !msg) { + pr_err("invalid input\n"); + return; + } + + spin_lock_irqsave(&ctl->lock, flags); + pr_debug("==%pS dump start ==\n", + __builtin_return_address(0)); + + pr_debug("cec: sender_id: %d\n", msg->sender_id); + pr_debug("cec: recvr_id: %d\n", msg->recvr_id); + + if (msg->frame_size < 2) { + pr_debug("cec: polling message\n"); + spin_unlock_irqrestore(&ctl->lock, flags); + return; + } + + pr_debug("cec: opcode: %02x\n", msg->opcode); + for (i = 0; i < msg->frame_size - 2; i++) + pr_debug("cec: operand(%2d) : %02x\n", i + 1, msg->operand[i]); + + pr_debug("==%pS dump end ==\n", + __builtin_return_address(0)); + spin_unlock_irqrestore(&ctl->lock, flags); +} + +static int cec_disable(struct cec_ctl *ctl) +{ + unsigned long flags; + int ret = -EINVAL; + struct cec_msg_node *msg_node, *tmp; + struct cec_ops *ops; + + if (!ctl) { + pr_err("Invalid input\n"); + goto end; + } + + spin_lock_irqsave(&ctl->lock, flags); + list_for_each_entry_safe(msg_node, tmp, &ctl->msg_head, list) { + list_del(&msg_node->list); + kfree(msg_node); + } + spin_unlock_irqrestore(&ctl->lock, flags); + + ops = ctl->init_data.ops; + + if (ops && ops->enable) + ret = ops->enable(ops->data, false); + + if (!ret) + ctl->enabled = false; + +end: + return ret; +} + +static int cec_enable(struct cec_ctl *ctl) +{ + int ret = -EINVAL; + struct cec_ops *ops; + + if (!ctl) { + pr_err("Invalid input\n"); + goto end; + } + + INIT_LIST_HEAD(&ctl->msg_head); + + ops = ctl->init_data.ops; + + if (ops && ops->enable) + ret = ops->enable(ops->data, true); + + if (!ret) + ctl->enabled = true; + +end: + return ret; +} + +static int cec_send_abort_opcode(struct cec_ctl *ctl, + struct cec_msg *in_msg, u8 reason_operand) +{ + int i = 0; + struct cec_msg out_msg; + + if (!ctl || !in_msg) { + pr_err("Invalid input\n"); + return -EINVAL; + } + + out_msg.sender_id = 0x4; + out_msg.recvr_id = in_msg->sender_id; + out_msg.opcode = 0x0; /* opcode for feature abort */ + out_msg.operand[i++] = in_msg->opcode; + out_msg.operand[i++] = reason_operand; + out_msg.frame_size = i + 2; + + return cec_msg_send(ctl, &out_msg); +} + +static int cec_msg_parser(struct cec_ctl *ctl, struct cec_msg *in_msg) +{ + int rc = 0, i = 0; + struct cec_msg out_msg; + + if (!ctl || !in_msg) { + pr_err("Invalid input\n"); + rc = -EINVAL; + goto end; + } + + pr_debug("in_msg->opcode = 0x%x\n", in_msg->opcode); + switch (in_msg->opcode) { + case CEC_MSG_SET_OSD_STRING: + /* Set OSD String */ + pr_debug("Recvd OSD Str=[0x%x]\n", + in_msg->operand[3]); + break; + case CEC_MSG_GIVE_PHYS_ADDR: + /* Give Phy Addr */ + pr_debug("Recvd a Give Phy Addr cmd\n"); + + out_msg.sender_id = 0x4; + /* Broadcast */ + out_msg.recvr_id = 0xF; + out_msg.opcode = 0x84; + out_msg.operand[i++] = 0x10; + out_msg.operand[i++] = 0x0; + out_msg.operand[i++] = 0x04; + out_msg.frame_size = i + 2; + + rc = cec_msg_send(ctl, &out_msg); + break; + case CEC_MSG_ABORT: + /* Abort */ + pr_debug("Recvd an abort cmd.\n"); + + /* reason = "Refused" */ + rc = cec_send_abort_opcode(ctl, in_msg, 0x04); + break; + case CEC_MSG_GIVE_OSD_NAME: + /* Give OSD name */ + pr_debug("Recvd 'Give OSD name' cmd.\n"); + + out_msg.sender_id = 0x4; + out_msg.recvr_id = in_msg->sender_id; + out_msg.opcode = 0x47; /* OSD Name */ + /* Display control byte */ + out_msg.operand[i++] = 0x0; + out_msg.operand[i++] = 'H'; + out_msg.operand[i++] = 'e'; + out_msg.operand[i++] = 'l'; + out_msg.operand[i++] = 'l'; + out_msg.operand[i++] = 'o'; + out_msg.operand[i++] = ' '; + out_msg.operand[i++] = 'W'; + out_msg.operand[i++] = 'o'; + out_msg.operand[i++] = 'r'; + out_msg.operand[i++] = 'l'; + out_msg.operand[i++] = 'd'; + out_msg.frame_size = i + 2; + + rc = cec_msg_send(ctl, &out_msg); + break; + case CEC_MSG_GIVE_POWER_STATUS: + /* Give Device Power status */ + pr_debug("Recvd a Power status message\n"); + + out_msg.sender_id = 0x4; + out_msg.recvr_id = in_msg->sender_id; + out_msg.opcode = 0x90; /* OSD String */ + out_msg.operand[i++] = 'H'; + out_msg.operand[i++] = 'e'; + out_msg.operand[i++] = 'l'; + out_msg.operand[i++] = 'l'; + out_msg.operand[i++] = 'o'; + out_msg.operand[i++] = ' '; + out_msg.operand[i++] = 'W'; + out_msg.operand[i++] = 'o'; + out_msg.operand[i++] = 'r'; + out_msg.operand[i++] = 'l'; + out_msg.operand[i++] = 'd'; + out_msg.frame_size = i + 2; + + rc = cec_msg_send(ctl, &out_msg); + break; + case CEC_MSG_ROUTE_CHANGE_CMD: + /* Routing Change cmd */ + case CEC_MSG_SET_STREAM_PATH: + /* Set Stream Path */ + pr_debug("Recvd Set Stream or Routing Change cmd\n"); + + out_msg.sender_id = 0x4; + out_msg.recvr_id = 0xF; /* broadcast this message */ + out_msg.opcode = 0x82; /* Active Source */ + out_msg.operand[i++] = 0x10; + out_msg.operand[i++] = 0x0; + out_msg.frame_size = i + 2; + + rc = cec_msg_send(ctl, &out_msg); + if (rc) + goto end; + + /* sending message */ + memset(&out_msg, 0x0, sizeof(struct cec_msg)); + i = 0; + out_msg.sender_id = 0x4; + out_msg.recvr_id = in_msg->sender_id; + out_msg.opcode = 0x04; /* opcode for Image View On */ + out_msg.frame_size = i + 2; + + rc = cec_msg_send(ctl, &out_msg); + break; + case CEC_MSG_USER_CTRL_PRESS: + /* User Control Pressed */ + pr_debug("User Control Pressed\n"); + break; + case CEC_MSG_USER_CTRL_RELEASE: + /* User Control Released */ + pr_debug("User Control Released\n"); + break; + default: + pr_debug("Recvd an unknown cmd = [%u]\n", + in_msg->opcode); + + /* reason = "Unrecognized opcode" */ + rc = cec_send_abort_opcode(ctl, in_msg, 0x0); + break; + } +end: + return rc; +} + +static int cec_msg_recv(void *data, struct cec_msg *msg) +{ + unsigned long flags; + struct cec_ctl *ctl = data; + struct cec_msg_node *msg_node; + int ret = 0; + + if (!ctl) { + pr_err("invalid input\n"); + ret = -EINVAL; + goto end; + } + + if (!ctl->enabled) { + pr_err("cec not enabled\n"); + ret = -ENODEV; + goto end; + } + + msg_node = kzalloc(sizeof(*msg_node), GFP_KERNEL); + if (!msg_node) { + ret = -ENOMEM; + goto end; + } + + msg_node->msg = *msg; + + pr_debug("CEC read frame done\n"); + cec_dump_msg(ctl, &msg_node->msg); + + spin_lock_irqsave(&ctl->lock, flags); + if (ctl->compliance_enabled) { + spin_unlock_irqrestore(&ctl->lock, flags); + + ret = cec_msg_parser(ctl, &msg_node->msg); + if (ret) + pr_err("msg parsing failed\n"); + + kfree(msg_node); + } else { + list_add_tail(&msg_node->list, &ctl->msg_head); + spin_unlock_irqrestore(&ctl->lock, flags); + + /* wake-up sysfs read_msg context */ + sysfs_notify(ctl->init_data.kobj, "cec", "rd_msg"); + } +end: + return ret; +} + +static ssize_t cec_rda_enable(struct device *dev, + struct device_attribute *attr, char *buf) +{ + ssize_t ret = 0; + unsigned long flags; + struct cec_ctl *ctl = cec_get_ctl(dev); + + if (!ctl) { + pr_err("Invalid input\n"); + ret = -EINVAL; + goto end; + } + + spin_lock_irqsave(&ctl->lock, flags); + if (ctl->enabled) { + pr_debug("cec is enabled\n"); + ret = snprintf(buf, PAGE_SIZE, "%d\n", 1); + } else { + pr_err("cec is disabled\n"); + ret = snprintf(buf, PAGE_SIZE, "%d\n", 0); + } + spin_unlock_irqrestore(&ctl->lock, flags); +end: + return ret; +} + +static ssize_t cec_wta_enable(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + int val; + bool cec_en; + ssize_t ret; + struct cec_ctl *ctl = cec_get_ctl(dev); + struct cec_ops *ops; + + if (!ctl) { + pr_err("Invalid input\n"); + ret = -EINVAL; + goto end; + } + + ops = ctl->init_data.ops; + + ret = kstrtoint(buf, 10, &val); + if (ret) { + pr_err("kstrtoint failed.\n"); + goto end; + } + + cec_en = (val & CEC_ENABLE_MASK) ? true : false; + + /* bit 1 is used for wakeup feature */ + if ((val & CEC_ENABLE_MASK) && (val & CEC_WAKEUP_ENABLE_MASK)) + ctl->cec_wakeup_en = true; + else + ctl->cec_wakeup_en = false; + + if (ops && ops->wakeup_en) + ops->wakeup_en(ops->data, ctl->cec_wakeup_en); + + if (ctl->enabled == cec_en) { + pr_debug("cec is already %s\n", + cec_en ? "enabled" : "disabled"); + goto bail; + } + + if (cec_en) + ret = cec_enable(ctl); + else + ret = cec_disable(ctl); + + if (ret) + goto end; + +bail: + ret = strnlen(buf, PAGE_SIZE); +end: + return ret; +} + +static ssize_t cec_rda_enable_compliance(struct device *dev, + struct device_attribute *attr, char *buf) +{ + unsigned long flags; + ssize_t ret; + struct cec_ctl *ctl = cec_get_ctl(dev); + + if (!ctl) { + pr_err("Invalid ctl\n"); + return -EINVAL; + } + + spin_lock_irqsave(&ctl->lock, flags); + ret = snprintf(buf, PAGE_SIZE, "%d\n", + ctl->compliance_enabled); + + spin_unlock_irqrestore(&ctl->lock, flags); + + return ret; +} + +static ssize_t cec_wta_enable_compliance(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + int val; + ssize_t ret; + struct cec_ctl *ctl = cec_get_ctl(dev); + struct cec_ops *ops; + + if (!ctl) { + pr_err("Invalid ctl\n"); + ret = -EINVAL; + goto end; + } + + ops = ctl->init_data.ops; + + ret = kstrtoint(buf, 10, &val); + if (ret) { + pr_err("kstrtoint failed.\n"); + goto end; + } + + ctl->compliance_enabled = (val == 1) ? true : false; + + if (ctl->compliance_enabled) { + ret = cec_enable(ctl); + if (ret) + goto end; + + ctl->logical_addr = 0x4; + + if (ops && ops->wt_logical_addr) + ops->wt_logical_addr(ops->data, ctl->logical_addr); + + } else { + ctl->logical_addr = 0; + + ret = cec_disable(ctl); + if (ret) + goto end; + } + + ret = strnlen(buf, PAGE_SIZE); +end: + return ret; +} + +static ssize_t cec_rda_logical_addr(struct device *dev, + struct device_attribute *attr, char *buf) +{ + unsigned long flags; + ssize_t ret; + struct cec_ctl *ctl = cec_get_ctl(dev); + + if (!ctl) { + pr_err("Invalid ctl\n"); + return -EINVAL; + } + + spin_lock_irqsave(&ctl->lock, flags); + ret = snprintf(buf, PAGE_SIZE, "%d\n", ctl->logical_addr); + spin_unlock_irqrestore(&ctl->lock, flags); + + return ret; +} + +static ssize_t cec_wta_logical_addr(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + int logical_addr; + unsigned long flags; + ssize_t ret = strnlen(buf, PAGE_SIZE); + struct cec_ctl *ctl = cec_get_ctl(dev); + struct cec_ops *ops; + + if (!ctl) { + pr_err("Invalid ctl\n"); + ret = -EINVAL; + goto end; + } + + ops = ctl->init_data.ops; + + ret = kstrtoint(buf, 10, &logical_addr); + if (ret) { + pr_err("kstrtoint failed\n"); + goto end; + } + + if (logical_addr < 0 || logical_addr > 15) { + pr_err("Invalid logical address\n"); + ret = -EINVAL; + goto end; + } + + spin_lock_irqsave(&ctl->lock, flags); + ctl->logical_addr = (u8)logical_addr; + if (ctl->enabled) { + if (ops && ops->wt_logical_addr) + ops->wt_logical_addr(ops->data, ctl->logical_addr); + } + spin_unlock_irqrestore(&ctl->lock, flags); +end: + return ret; +} + +static ssize_t cec_rda_msg(struct device *dev, + struct device_attribute *attr, char *buf) +{ + int i = 0; + unsigned long flags; + struct cec_msg_node *msg_node, *tmp; + struct cec_ctl *ctl = cec_get_ctl(dev); + ssize_t ret; + + if (!ctl) { + pr_err("Invalid ctl\n"); + ret = -EINVAL; + goto end; + } + + if (!ctl->enabled) { + pr_err("cec not enabled\n"); + ret = -EINVAL; + goto end; + } + + spin_lock_irqsave(&ctl->lock, flags); + + if (ctl->compliance_enabled) { + spin_unlock_irqrestore(&ctl->lock, flags); + pr_err("Read no allowed in compliance mode\n"); + ret = -EPERM; + goto end; + } + + if (list_empty_careful(&ctl->msg_head)) { + spin_unlock_irqrestore(&ctl->lock, flags); + pr_err("CEC message queue is empty\n"); + ret = -EINVAL; + goto end; + } + + list_for_each_entry_safe(msg_node, tmp, &ctl->msg_head, list) { + if ((i + 1) * sizeof(struct cec_msg) > PAGE_SIZE) { + pr_debug("Overflowing PAGE_SIZE.\n"); + break; + } + + memcpy(buf + (i * sizeof(struct cec_msg)), &msg_node->msg, + sizeof(struct cec_msg)); + list_del(&msg_node->list); + kfree(msg_node); + i++; + } + + spin_unlock_irqrestore(&ctl->lock, flags); + + ret = i * sizeof(struct cec_msg); +end: + return ret; +} + +static ssize_t cec_wta_msg(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + ssize_t ret; + unsigned long flags; + struct cec_msg *msg = (struct cec_msg *)buf; + struct cec_ctl *ctl = cec_get_ctl(dev); + + if (!ctl) { + pr_err("Invalid ctl\n"); + ret = -EINVAL; + goto end; + } + + spin_lock_irqsave(&ctl->lock, flags); + if (ctl->compliance_enabled) { + spin_unlock_irqrestore(&ctl->lock, flags); + pr_err("Write not allowed in compliance mode\n"); + ret = -EPERM; + goto end; + } + + if (!ctl->enabled) { + spin_unlock_irqrestore(&ctl->lock, flags); + pr_err("CEC is not configed.\n"); + ret = -EPERM; + goto end; + } + spin_unlock_irqrestore(&ctl->lock, flags); + + if (msg->frame_size > MAX_OPERAND_SIZE) { + pr_err("msg frame too big!\n"); + ret = -EINVAL; + goto end; + } + ret = cec_msg_send(ctl, msg); + if (ret) { + pr_err("cec_msg_send failed\n"); + goto end; + } + + ret = sizeof(struct cec_msg); +end: + return ret; +} + +static DEVICE_ATTR(enable, 0644, cec_rda_enable, + cec_wta_enable); +static DEVICE_ATTR(enable_compliance, 0644, + cec_rda_enable_compliance, cec_wta_enable_compliance); +static DEVICE_ATTR(logical_addr, 0600, + cec_rda_logical_addr, cec_wta_logical_addr); +static DEVICE_ATTR(rd_msg, 0444, cec_rda_msg, NULL); +static DEVICE_ATTR(wr_msg, 0600, NULL, cec_wta_msg); + +static struct attribute *cec_fs_attrs[] = { + &dev_attr_enable.attr, + &dev_attr_enable_compliance.attr, + &dev_attr_logical_addr.attr, + &dev_attr_rd_msg.attr, + &dev_attr_wr_msg.attr, + NULL, +}; + +static struct attribute_group cec_fs_attr_group = { + .name = "cec", + .attrs = cec_fs_attrs, +}; + +/** + * cec_abstract_deinit() - Release CEC abstract module + * @input: CEC abstract data + * + * This API release all the resources allocated for this + * module. + * + * Return: 0 on success otherwise error code. + */ +int cec_abstract_deinit(void *input) +{ + struct cec_ctl *ctl = (struct cec_ctl *)input; + + if (!ctl) + return -EINVAL; + + sysfs_remove_group(ctl->init_data.kobj, &cec_fs_attr_group); + + kfree(ctl); + + return 0; +} + +/** + * cec_abstract_init() - Initialize CEC abstract module + * @init_data: data needed to initialize the CEC abstraction module + * + * This API will initialize the CEC abstract module which connects + * CEC client with CEC hardware. It creates sysfs nodes for client + * to read and write CEC messages. It interacts with hardware with + * provided operation function pointers. Also provides callback + * function pointers to let the hardware inform about incoming + * CEC message. + * + * Return: pinter to cec abstract data which needs to be passed + * as parameter with callback functions. + */ +void *cec_abstract_init(struct cec_abstract_init_data *init_data) +{ + struct cec_ctl *ctl = NULL; + int ret = 0; + + if (!init_data) { + pr_err("invalid input\n"); + ret = -EINVAL; + goto end; + } + + ctl = kzalloc(sizeof(*ctl), GFP_KERNEL); + if (!ctl) { + ret = -ENOMEM; + goto end; + } + + /* keep a copy of init data */ + ctl->init_data = *init_data; + + ret = sysfs_create_group(ctl->init_data.kobj, &cec_fs_attr_group); + if (ret) { + pr_err("cec sysfs group creation failed\n"); + goto end; + } + + spin_lock_init(&ctl->lock); + + /* provide callback function pointers */ + if (init_data->cbs) { + init_data->cbs->msg_recv_notify = cec_msg_recv; + init_data->cbs->data = ctl; + } + + return ctl; +end: + kfree(ctl); + return ERR_PTR(ret); +} + diff --git a/drivers/video/fbdev/msm/mdss_cec_core.h b/drivers/video/fbdev/msm/mdss_cec_core.h new file mode 100644 index 0000000000000000000000000000000000000000..f8196a0aa38408a5dcd4c04aa6beec440ce0426d --- /dev/null +++ b/drivers/video/fbdev/msm/mdss_cec_core.h @@ -0,0 +1,105 @@ +/* Copyright (c) 2015-2016, 2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef __MDSS_CEC_CORE_H__ +#define __MDSS_CEC_CORE_H__ + +#define MAX_OPERAND_SIZE 14 + +/* total size: HEADER block (1) + opcode block (1) + operands (14) */ +#define MAX_CEC_FRAME_SIZE (MAX_OPERAND_SIZE + 2) + +/* CEC message set */ +#define CEC_MSG_SET_OSD_STRING 0x64 +#define CEC_MSG_GIVE_PHYS_ADDR 0x83 +#define CEC_MSG_ABORT 0xFF +#define CEC_MSG_GIVE_OSD_NAME 0x46 +#define CEC_MSG_GIVE_POWER_STATUS 0x8F +#define CEC_MSG_ROUTE_CHANGE_CMD 0x80 +#define CEC_MSG_SET_STREAM_PATH 0x86 +#define CEC_MSG_USER_CTRL_PRESS 0x44 +#define CEC_MSG_USER_CTRL_RELEASE 0x45 + +/** + * struct cec_msg - CEC message related data + * @sender_id: CEC message initiator's id + * @recvr_id: CEC message destination's id + * @opcode: CEC message opcode + * @operand: CEC message operands corresponding to opcode + * @frame_size: total CEC frame size + * @retransmit: number of re-tries to transmit message + * + * Basic CEC message structure used by both client and driver. + */ +struct cec_msg { + u8 sender_id; + u8 recvr_id; + u8 opcode; + u8 operand[MAX_OPERAND_SIZE]; + u8 frame_size; + u8 retransmit; +}; + +/** + * struct cec_ops - CEC operations function pointers + * @enable: function pointer to enable CEC + * @send_msg: function pointer to send CEC message + * @wt_logical_addr: function pointer to write logical address + * @wakeup_en: function pointer to enable wakeup feature + * @is_wakeup_en: function pointer to query wakeup feature state + * @device_suspend: function pointer to update device suspend state + * @data: pointer to the data needed to send with operation functions + * + * Defines all the operations that abstract module can call + * to programe the CEC driver. + */ +struct cec_ops { + int (*enable)(void *data, bool enable); + int (*send_msg)(void *data, + struct cec_msg *msg); + void (*wt_logical_addr)(void *data, u8 addr); + void (*wakeup_en)(void *data, bool en); + bool (*is_wakeup_en)(void *data); + void (*device_suspend)(void *data, bool suspend); + void *data; +}; + +/** + * struct cec_cbs - CEC callback function pointers + * @msg_recv_notify: function pointer called CEC driver to notify incoming msg + * @data: pointer to data needed to be send with the callback function + * + * Defines callback functions which CEC driver can callback to notify any + * change in the hardware. + */ +struct cec_cbs { + int (*msg_recv_notify)(void *data, struct cec_msg *msg); + void *data; +}; + +/** + * struct cec_abstract_init_data - initalization data for abstract module + * @ops: pointer to struct containing all operation function pointers + * @cbs: pointer to struct containing all callack function pointers + * @kobj: pointer to kobject instance associated with CEC driver. + * + * Defines initialization data needed by init API to initialize the module. + */ +struct cec_abstract_init_data { + struct cec_ops *ops; + struct cec_cbs *cbs; + struct kobject *kobj; +}; + +void *cec_abstract_init(struct cec_abstract_init_data *init_data); +int cec_abstract_deinit(void *input); +#endif /* __MDSS_CEC_CORE_H_*/ diff --git a/drivers/video/fbdev/msm/mdss_compat_utils.c b/drivers/video/fbdev/msm/mdss_compat_utils.c new file mode 100644 index 0000000000000000000000000000000000000000..a81f149f59ce9f97d95f4a0dc2ef135590eac9a4 --- /dev/null +++ b/drivers/video/fbdev/msm/mdss_compat_utils.c @@ -0,0 +1,4318 @@ +/* + * Copyright (c) 2013-2018, The Linux Foundation. All rights reserved. + * Copyright (C) 1994 Martin Schaller + * + * 2001 - Documented with DocBook + * - Brad Douglas + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include + +#include + +#include "mdss_fb.h" +#include "mdss_compat_utils.h" +#include "mdss_mdp_hwio.h" +#include "mdss_mdp.h" + +#define MSMFB_CURSOR32 _IOW(MSMFB_IOCTL_MAGIC, 130, struct fb_cursor32) +#define MSMFB_SET_LUT32 _IOW(MSMFB_IOCTL_MAGIC, 131, struct fb_cmap32) +#define MSMFB_HISTOGRAM32 _IOWR(MSMFB_IOCTL_MAGIC, 132,\ + struct mdp_histogram_data32) +#define MSMFB_GET_CCS_MATRIX32 _IOWR(MSMFB_IOCTL_MAGIC, 133, struct mdp_ccs32) +#define MSMFB_SET_CCS_MATRIX32 _IOW(MSMFB_IOCTL_MAGIC, 134, struct mdp_ccs32) +#define MSMFB_OVERLAY_SET32 _IOWR(MSMFB_IOCTL_MAGIC, 135,\ + struct mdp_overlay32) + +#define MSMFB_OVERLAY_GET32 _IOR(MSMFB_IOCTL_MAGIC, 140,\ + struct mdp_overlay32) +#define MSMFB_OVERLAY_BLT32 _IOWR(MSMFB_IOCTL_MAGIC, 142,\ + struct msmfb_overlay_blt32) +#define MSMFB_HISTOGRAM_START32 _IOR(MSMFB_IOCTL_MAGIC, 144,\ + struct mdp_histogram_start_req32) + +#define MSMFB_OVERLAY_3D32 _IOWR(MSMFB_IOCTL_MAGIC, 147,\ + struct msmfb_overlay_3d32) + +#define MSMFB_MIXER_INFO32 _IOWR(MSMFB_IOCTL_MAGIC, 148,\ + struct msmfb_mixer_info_req32) +#define MSMFB_MDP_PP32 _IOWR(MSMFB_IOCTL_MAGIC, 156, struct msmfb_mdp_pp32) +#define MSMFB_BUFFER_SYNC32 _IOW(MSMFB_IOCTL_MAGIC, 162, struct mdp_buf_sync32) +#define MSMFB_OVERLAY_PREPARE32 _IOWR(MSMFB_IOCTL_MAGIC, 169, \ + struct mdp_overlay_list32) +#define MSMFB_ATOMIC_COMMIT32 _IOWR(MDP_IOCTL_MAGIC, 128, compat_caddr_t) + +#define MSMFB_ASYNC_POSITION_UPDATE_32 _IOWR(MDP_IOCTL_MAGIC, 129, \ + struct mdp_position_update32) + +static int __copy_layer_pp_info_params(struct mdp_input_layer *layer, + struct mdp_input_layer32 *layer32); + +static unsigned int __do_compat_ioctl_nr(unsigned int cmd32) +{ + unsigned int cmd; + + switch (cmd32) { + case MSMFB_CURSOR32: + cmd = MSMFB_CURSOR; + break; + case MSMFB_SET_LUT32: + cmd = MSMFB_SET_LUT; + break; + case MSMFB_HISTOGRAM32: + cmd = MSMFB_HISTOGRAM; + break; + case MSMFB_GET_CCS_MATRIX32: + cmd = MSMFB_GET_CCS_MATRIX; + break; + case MSMFB_SET_CCS_MATRIX32: + cmd = MSMFB_SET_CCS_MATRIX; + break; + case MSMFB_OVERLAY_SET32: + cmd = MSMFB_OVERLAY_SET; + break; + case MSMFB_OVERLAY_GET32: + cmd = MSMFB_OVERLAY_GET; + break; + case MSMFB_OVERLAY_BLT32: + cmd = MSMFB_OVERLAY_BLT; + break; + case MSMFB_OVERLAY_3D32: + cmd = MSMFB_OVERLAY_3D; + break; + case MSMFB_MIXER_INFO32: + cmd = MSMFB_MIXER_INFO; + break; + case MSMFB_MDP_PP32: + cmd = MSMFB_MDP_PP; + break; + case MSMFB_BUFFER_SYNC32: + cmd = MSMFB_BUFFER_SYNC; + break; + case MSMFB_OVERLAY_PREPARE32: + cmd = MSMFB_OVERLAY_PREPARE; + break; + case MSMFB_ATOMIC_COMMIT32: + cmd = MSMFB_ATOMIC_COMMIT; + break; + case MSMFB_ASYNC_POSITION_UPDATE_32: + cmd = MSMFB_ASYNC_POSITION_UPDATE; + break; + default: + cmd = cmd32; + break; + } + + return cmd; +} + +static void __copy_atomic_commit_struct(struct mdp_layer_commit *commit, + struct mdp_layer_commit32 *commit32) +{ + unsigned int destsize = sizeof(commit->commit_v1.reserved); + unsigned int srcsize = sizeof(commit32->commit_v1.reserved); + unsigned int count = (destsize <= srcsize ? destsize : srcsize); + + commit->version = commit32->version; + commit->commit_v1.flags = commit32->commit_v1.flags; + commit->commit_v1.input_layer_cnt = + commit32->commit_v1.input_layer_cnt; + commit->commit_v1.left_roi = commit32->commit_v1.left_roi; + commit->commit_v1.right_roi = commit32->commit_v1.right_roi; + commit->commit_v1.bl_level = commit32->commit_v1.bl_level; + memcpy(&commit->commit_v1.reserved, &commit32->commit_v1.reserved, + count); +} + +static struct mdp_input_layer32 *__create_layer_list32( + struct mdp_layer_commit32 *commit32, + u32 layer_count) +{ + u32 buffer_size32; + struct mdp_input_layer32 *layer_list32; + int ret; + + buffer_size32 = sizeof(struct mdp_input_layer32) * layer_count; + + layer_list32 = kmalloc(buffer_size32, GFP_KERNEL); + if (!layer_list32) { + layer_list32 = ERR_PTR(-ENOMEM); + goto end; + } + + ret = copy_from_user(layer_list32, + compat_ptr(commit32->commit_v1.input_layers), + sizeof(struct mdp_input_layer32) * layer_count); + if (ret) { + pr_err("layer list32 copy from user failed, ptr %pK\n", + compat_ptr(commit32->commit_v1.input_layers)); + kfree(layer_list32); + ret = -EFAULT; + layer_list32 = ERR_PTR(ret); + } + +end: + return layer_list32; +} + +static int __copy_scale_params(struct mdp_input_layer *layer, + struct mdp_input_layer32 *layer32) +{ + struct mdp_scale_data *scale; + int ret; + + if (!(layer->flags & MDP_LAYER_ENABLE_PIXEL_EXT)) + return 0; + + scale = kmalloc(sizeof(struct mdp_scale_data), GFP_KERNEL); + if (!scale) { + ret = -ENOMEM; + goto end; + } + + /* scale structure size is same for compat and 64bit version */ + ret = copy_from_user(scale, compat_ptr(layer32->scale), + sizeof(struct mdp_scale_data)); + if (ret) { + kfree(scale); + pr_err("scale param copy from user failed, ptr %pK\n", + compat_ptr(layer32->scale)); + ret = -EFAULT; + } else { + layer->scale = scale; + } +end: + return ret; +} + +static struct mdp_input_layer *__create_layer_list( + struct mdp_layer_commit *commit, + struct mdp_input_layer32 *layer_list32, + u32 layer_count) +{ + int i, ret = 0; + u32 buffer_size; + struct mdp_input_layer *layer, *layer_list; + struct mdp_input_layer32 *layer32; + + buffer_size = sizeof(struct mdp_input_layer) * layer_count; + + layer_list = kmalloc(buffer_size, GFP_KERNEL); + if (!layer_list) { + layer_list = ERR_PTR(-ENOMEM); + goto end; + } + + commit->commit_v1.input_layers = layer_list; + + for (i = 0; i < layer_count; i++) { + layer = &layer_list[i]; + layer32 = &layer_list32[i]; + + layer->flags = layer32->flags; + layer->pipe_ndx = layer32->pipe_ndx; + layer->horz_deci = layer32->horz_deci; + layer->vert_deci = layer32->vert_deci; + layer->z_order = layer32->z_order; + layer->transp_mask = layer32->transp_mask; + layer->bg_color = layer32->bg_color; + layer->blend_op = layer32->blend_op; + layer->alpha = layer32->alpha; + layer->color_space = layer32->color_space; + layer->src_rect = layer32->src_rect; + layer->dst_rect = layer32->dst_rect; + layer->buffer = layer32->buffer; + memcpy(&layer->reserved, &layer32->reserved, + sizeof(layer->reserved)); + + layer->scale = NULL; + ret = __copy_scale_params(layer, layer32); + if (ret) + break; + + layer->pp_info = NULL; + ret = __copy_layer_pp_info_params(layer, layer32); + if (ret) + break; + } + + if (ret) { + for (i--; i >= 0; i--) { + kfree(layer_list[i].scale); + mdss_mdp_free_layer_pp_info(&layer_list[i]); + } + kfree(layer_list); + layer_list = ERR_PTR(ret); + } + +end: + return layer_list; +} + +static int __copy_to_user_atomic_commit(struct mdp_layer_commit *commit, + struct mdp_layer_commit32 *commit32, + struct mdp_input_layer32 *layer_list32, + unsigned long argp, u32 layer_count) +{ + int i, ret; + struct mdp_input_layer *layer_list; + + layer_list = commit->commit_v1.input_layers; + + for (i = 0; i < layer_count; i++) + layer_list32[i].error_code = layer_list[i].error_code; + + ret = copy_to_user(compat_ptr(commit32->commit_v1.input_layers), + layer_list32, + sizeof(struct mdp_input_layer32) * layer_count); + if (ret) + goto end; + + ret = copy_to_user(compat_ptr(commit32->commit_v1.output_layer), + commit->commit_v1.output_layer, + sizeof(struct mdp_output_layer)); + if (ret) + goto end; + + commit32->commit_v1.release_fence = + commit->commit_v1.release_fence; + commit32->commit_v1.retire_fence = + commit->commit_v1.retire_fence; + + ret = copy_to_user((void __user *)argp, commit32, + sizeof(struct mdp_layer_commit32)); + +end: + return ret; +} + +static int __compat_atomic_commit(struct fb_info *info, unsigned int cmd, + unsigned long argp, struct file *file) +{ + int ret, i; + struct mdp_layer_commit commit; + struct mdp_layer_commit32 commit32; + u32 layer_count; + struct mdp_input_layer *layer_list = NULL; + struct mdp_input_layer32 *layer_list32 = NULL; + struct mdp_output_layer *output_layer = NULL; + struct mdp_frc_info *frc_info = NULL; + + /* copy top level memory from 32 bit structure to kernel memory */ + ret = copy_from_user(&commit32, (void __user *)argp, + sizeof(struct mdp_layer_commit32)); + if (ret) { + pr_err("%s:copy_from_user failed, ptr %pK\n", __func__, + (void __user *)argp); + ret = -EFAULT; + return ret; + } + + memset(&commit, 0, sizeof(struct mdp_layer_commit)); + __copy_atomic_commit_struct(&commit, &commit32); + + if (commit32.commit_v1.output_layer) { + int buffer_size = sizeof(struct mdp_output_layer); + + output_layer = kzalloc(buffer_size, GFP_KERNEL); + if (!output_layer) + return -ENOMEM; + + ret = copy_from_user(output_layer, + compat_ptr(commit32.commit_v1.output_layer), + buffer_size); + if (ret) { + pr_err("fail to copy output layer from user, ptr %pK\n", + compat_ptr(commit32.commit_v1.output_layer)); + ret = -EFAULT; + goto layer_list_err; + } + + commit.commit_v1.output_layer = output_layer; + } + + layer_count = commit32.commit_v1.input_layer_cnt; + if (layer_count > MAX_LAYER_COUNT) { + ret = -EINVAL; + goto layer_list_err; + } else if (layer_count) { + /* + * allocate memory for layer list in 32bit domain and copy it + * from user + */ + layer_list32 = __create_layer_list32(&commit32, layer_count); + if (IS_ERR_OR_NULL(layer_list32)) { + ret = PTR_ERR(layer_list32); + goto layer_list_err; + } + + /* + * allocate memory for layer list in kernel memory domain and + * copy layer info from 32bit structures to kernel memory + */ + layer_list = __create_layer_list(&commit, layer_list32, + layer_count); + if (IS_ERR_OR_NULL(layer_list)) { + ret = PTR_ERR(layer_list); + goto layer_list_err; + } + } + + if (commit32.commit_v1.frc_info) { + int buffer_size = sizeof(struct mdp_frc_info); + + frc_info = kzalloc(buffer_size, GFP_KERNEL); + if (!frc_info) { + ret = -ENOMEM; + goto frc_err; + } + + ret = copy_from_user(frc_info, + compat_ptr(commit32.commit_v1.frc_info), + buffer_size); + if (ret) { + pr_err("fail to copy frc info from user, ptr %p\n", + compat_ptr(commit32.commit_v1.frc_info)); + kfree(frc_info); + ret = -EFAULT; + goto frc_err; + } + + commit.commit_v1.frc_info = frc_info; + } + + ret = mdss_fb_atomic_commit(info, &commit, file); + if (ret) + pr_err("atomic commit failed ret:%d\n", ret); + + if (layer_count) + __copy_to_user_atomic_commit(&commit, &commit32, layer_list32, + argp, layer_count); + + for (i = 0; i < layer_count; i++) { + kfree(layer_list[i].scale); + mdss_mdp_free_layer_pp_info(&layer_list[i]); + } + + kfree(frc_info); +frc_err: + kfree(layer_list); +layer_list_err: + kfree(layer_list32); + kfree(output_layer); + return ret; +} + +static int __copy_to_user_async_position_update( + struct mdp_position_update *update_pos, + struct mdp_position_update32 *update_pos32, + unsigned long argp, u32 layer_cnt) +{ + int ret; + + ret = copy_to_user(update_pos32->input_layers, + update_pos->input_layers, + sizeof(struct mdp_async_layer) * layer_cnt); + if (ret) + goto end; + + ret = copy_to_user((void __user *) argp, update_pos32, + sizeof(struct mdp_position_update32)); + +end: + return ret; +} + +static struct mdp_async_layer *__create_async_layer_list( + struct mdp_position_update32 *update_pos32, u32 layer_cnt) +{ + u32 buffer_size; + struct mdp_async_layer *layer_list; + int ret; + + buffer_size = sizeof(struct mdp_async_layer) * layer_cnt; + + layer_list = kmalloc(buffer_size, GFP_KERNEL); + if (!layer_list) { + layer_list = ERR_PTR(-ENOMEM); + goto end; + } + + ret = copy_from_user(layer_list, + update_pos32->input_layers, buffer_size); + if (ret) { + pr_err("layer list32 copy from user failed\n"); + kfree(layer_list); + layer_list = ERR_PTR(ret); + } + +end: + return layer_list; +} + +static int __compat_async_position_update(struct fb_info *info, + unsigned int cmd, unsigned long argp) +{ + struct mdp_position_update update_pos; + struct mdp_position_update32 update_pos32; + struct mdp_async_layer *layer_list = NULL; + u32 layer_cnt, ret; + + /* copy top level memory from 32 bit structure to kernel memory */ + ret = copy_from_user(&update_pos32, (void __user *)argp, + sizeof(struct mdp_position_update32)); + if (ret) { + pr_err("%s:copy_from_user failed\n", __func__); + return ret; + } + + update_pos.input_layer_cnt = update_pos32.input_layer_cnt; + layer_cnt = update_pos32.input_layer_cnt; + if ((!layer_cnt) || (layer_cnt > MAX_LAYER_COUNT)) { + pr_err("invalid async layers :%d to update\n", layer_cnt); + return -EINVAL; + } + + layer_list = __create_async_layer_list(&update_pos32, + layer_cnt); + if (IS_ERR_OR_NULL(layer_list)) + return PTR_ERR(layer_list); + + update_pos.input_layers = layer_list; + + ret = mdss_fb_async_position_update(info, &update_pos); + if (ret) + pr_err("async position update failed ret:%d\n", ret); + + ret = __copy_to_user_async_position_update(&update_pos, &update_pos32, + argp, layer_cnt); + if (ret) + pr_err("copy to user of async update position failed\n"); + + kfree(layer_list); + return ret; +} + +static int mdss_fb_compat_buf_sync(struct fb_info *info, unsigned int cmd, + unsigned long arg, struct file *file) +{ + struct mdp_buf_sync32 __user *buf_sync32; + struct mdp_buf_sync __user *buf_sync; + u32 data; + int ret; + + buf_sync = compat_alloc_user_space(sizeof(*buf_sync)); + if (!buf_sync) { + pr_err("%s:%u: compat alloc error [%zu] bytes\n", + __func__, __LINE__, sizeof(*buf_sync)); + return -EINVAL; + } + buf_sync32 = compat_ptr(arg); + + if (copy_in_user(&buf_sync->flags, &buf_sync32->flags, + 3 * sizeof(u32))) + return -EFAULT; + + if (get_user(data, &buf_sync32->acq_fen_fd) || + put_user(compat_ptr(data), &buf_sync->acq_fen_fd) || + get_user(data, &buf_sync32->rel_fen_fd) || + put_user(compat_ptr(data), &buf_sync->rel_fen_fd) || + get_user(data, &buf_sync32->retire_fen_fd) || + put_user(compat_ptr(data), &buf_sync->retire_fen_fd)) + return -EFAULT; + + ret = mdss_fb_do_ioctl(info, cmd, (unsigned long) buf_sync, file); + if (ret) { + pr_err("%s: failed %d\n", __func__, ret); + return ret; + } + + if (copy_in_user(compat_ptr(buf_sync32->rel_fen_fd), + buf_sync->rel_fen_fd, + sizeof(int))) + return -EFAULT; + if (copy_in_user(compat_ptr(buf_sync32->retire_fen_fd), + buf_sync->retire_fen_fd, + sizeof(int))) { + if (buf_sync->flags & MDP_BUF_SYNC_FLAG_RETIRE_FENCE) + return -EFAULT; + pr_debug("%s: no retire fence fd for wb\n", + __func__); + } + + return ret; +} + +static int __from_user_fb_cmap(struct fb_cmap __user *cmap, + struct fb_cmap32 __user *cmap32) +{ + __u32 data; + + if (copy_in_user(&cmap->start, &cmap32->start, 2 * sizeof(__u32))) + return -EFAULT; + + if (get_user(data, &cmap32->red) || + put_user(compat_ptr(data), &cmap->red) || + get_user(data, &cmap32->green) || + put_user(compat_ptr(data), &cmap->green) || + get_user(data, &cmap32->blue) || + put_user(compat_ptr(data), &cmap->blue) || + get_user(data, &cmap32->transp) || + put_user(compat_ptr(data), &cmap->transp)) + return -EFAULT; + + return 0; +} + +static int __to_user_fb_cmap(struct fb_cmap __user *cmap, + struct fb_cmap32 __user *cmap32) +{ + unsigned long data; + + if (copy_in_user(&cmap32->start, &cmap->start, 2 * sizeof(__u32))) + return -EFAULT; + + if (get_user(data, (unsigned long *) &cmap->red) || + put_user((compat_caddr_t) data, &cmap32->red) || + get_user(data, (unsigned long *) &cmap->green) || + put_user((compat_caddr_t) data, &cmap32->green) || + get_user(data, (unsigned long *) &cmap->blue) || + put_user((compat_caddr_t) data, &cmap32->blue) || + get_user(data, (unsigned long *) &cmap->transp) || + put_user((compat_caddr_t) data, &cmap32->transp)) + return -EFAULT; + + return 0; +} + +static int __from_user_fb_image(struct fb_image __user *image, + struct fb_image32 __user *image32) +{ + __u32 data; + + if (copy_in_user(&image->dx, &image32->dx, 6 * sizeof(u32)) || + copy_in_user(&image->depth, &image32->depth, sizeof(u8))) + return -EFAULT; + + if (get_user(data, &image32->data) || + put_user(compat_ptr(data), &image->data)) + return -EFAULT; + + if (__from_user_fb_cmap(&image->cmap, &image32->cmap)) + return -EFAULT; + + return 0; +} + +static int mdss_fb_compat_cursor(struct fb_info *info, unsigned int cmd, + unsigned long arg, struct file *file) +{ + struct fb_cursor32 __user *cursor32; + struct fb_cursor __user *cursor; + __u32 data; + int ret; + + cursor = compat_alloc_user_space(sizeof(*cursor)); + if (!cursor) { + pr_err("%s:%u: compat alloc error [%zu] bytes\n", + __func__, __LINE__, sizeof(*cursor)); + return -EINVAL; + } + cursor32 = compat_ptr(arg); + + if (copy_in_user(&cursor->set, &cursor32->set, 3 * sizeof(u16))) + return -EFAULT; + + if (get_user(data, &cursor32->mask) || + put_user(compat_ptr(data), &cursor->mask)) + return -EFAULT; + + if (copy_in_user(&cursor->hot, &cursor32->hot, sizeof(struct fbcurpos))) + return -EFAULT; + + if (__from_user_fb_image(&cursor->image, &cursor32->image)) + return -EFAULT; + + ret = mdss_fb_do_ioctl(info, cmd, (unsigned long) cursor, file); + return ret; +} + +static int mdss_fb_compat_set_lut(struct fb_info *info, unsigned long arg, + struct file *file) +{ + struct fb_cmap_user __user *cmap; + struct fb_cmap32 __user *cmap32; + __u32 data; + int ret; + + cmap = compat_alloc_user_space(sizeof(*cmap)); + cmap32 = compat_ptr(arg); + + if (copy_in_user(&cmap->start, &cmap32->start, 2 * sizeof(__u32))) + return -EFAULT; + + if (get_user(data, &cmap32->red) || + put_user(compat_ptr(data), &cmap->red) || + get_user(data, &cmap32->green) || + put_user(compat_ptr(data), &cmap->green) || + get_user(data, &cmap32->blue) || + put_user(compat_ptr(data), &cmap->blue) || + get_user(data, &cmap32->transp) || + put_user(compat_ptr(data), &cmap->transp)) + return -EFAULT; + + ret = mdss_fb_do_ioctl(info, MSMFB_SET_LUT, (unsigned long) cmap, file); + if (!ret) + pr_debug("%s: compat ioctl successful\n", __func__); + + return ret; +} + +static int __from_user_sharp_cfg( + struct mdp_sharp_cfg32 __user *sharp_cfg32, + struct mdp_sharp_cfg __user *sharp_cfg) +{ + if (copy_in_user(&sharp_cfg->flags, + &sharp_cfg32->flags, + sizeof(uint32_t)) || + copy_in_user(&sharp_cfg->strength, + &sharp_cfg32->strength, + sizeof(uint32_t)) || + copy_in_user(&sharp_cfg->edge_thr, + &sharp_cfg32->edge_thr, + sizeof(uint32_t)) || + copy_in_user(&sharp_cfg->smooth_thr, + &sharp_cfg32->smooth_thr, + sizeof(uint32_t)) || + copy_in_user(&sharp_cfg->noise_thr, + &sharp_cfg32->noise_thr, + sizeof(uint32_t))) + return -EFAULT; + + return 0; +} + +static int __to_user_sharp_cfg( + struct mdp_sharp_cfg32 __user *sharp_cfg32, + struct mdp_sharp_cfg __user *sharp_cfg) +{ + if (copy_in_user(&sharp_cfg32->flags, + &sharp_cfg->flags, + sizeof(uint32_t)) || + copy_in_user(&sharp_cfg32->strength, + &sharp_cfg->strength, + sizeof(uint32_t)) || + copy_in_user(&sharp_cfg32->edge_thr, + &sharp_cfg->edge_thr, + sizeof(uint32_t)) || + copy_in_user(&sharp_cfg32->smooth_thr, + &sharp_cfg->smooth_thr, + sizeof(uint32_t)) || + copy_in_user(&sharp_cfg32->noise_thr, + &sharp_cfg->noise_thr, + sizeof(uint32_t))) + return -EFAULT; + + return 0; +} + +static int __from_user_histogram_cfg( + struct mdp_histogram_cfg32 __user *hist_cfg32, + struct mdp_histogram_cfg __user *hist_cfg) +{ + if (copy_in_user(&hist_cfg->ops, + &hist_cfg32->ops, + sizeof(uint32_t)) || + copy_in_user(&hist_cfg->block, + &hist_cfg32->block, + sizeof(uint32_t)) || + copy_in_user(&hist_cfg->frame_cnt, + &hist_cfg32->frame_cnt, + sizeof(uint8_t)) || + copy_in_user(&hist_cfg->bit_mask, + &hist_cfg32->bit_mask, + sizeof(uint8_t)) || + copy_in_user(&hist_cfg->num_bins, + &hist_cfg32->num_bins, + sizeof(uint16_t))) + return -EFAULT; + + return 0; +} + +static int __to_user_histogram_cfg( + struct mdp_histogram_cfg32 __user *hist_cfg32, + struct mdp_histogram_cfg __user *hist_cfg) +{ + if (copy_in_user(&hist_cfg32->ops, + &hist_cfg->ops, + sizeof(uint32_t)) || + copy_in_user(&hist_cfg32->block, + &hist_cfg->block, + sizeof(uint32_t)) || + copy_in_user(&hist_cfg32->frame_cnt, + &hist_cfg->frame_cnt, + sizeof(uint8_t)) || + copy_in_user(&hist_cfg32->bit_mask, + &hist_cfg->bit_mask, + sizeof(uint8_t)) || + copy_in_user(&hist_cfg32->num_bins, + &hist_cfg->num_bins, + sizeof(uint16_t))) + return -EFAULT; + + return 0; +} + +static int __from_user_pcc_coeff( + struct mdp_pcc_coeff32 __user *pcc_coeff32, + struct mdp_pcc_coeff __user *pcc_coeff) +{ + if (copy_in_user(&pcc_coeff->c, + &pcc_coeff32->c, + sizeof(uint32_t)) || + copy_in_user(&pcc_coeff->r, + &pcc_coeff32->r, + sizeof(uint32_t)) || + copy_in_user(&pcc_coeff->g, + &pcc_coeff32->g, + sizeof(uint32_t)) || + copy_in_user(&pcc_coeff->b, + &pcc_coeff32->b, + sizeof(uint32_t)) || + copy_in_user(&pcc_coeff->rr, + &pcc_coeff32->rr, + sizeof(uint32_t)) || + copy_in_user(&pcc_coeff->gg, + &pcc_coeff32->gg, + sizeof(uint32_t)) || + copy_in_user(&pcc_coeff->bb, + &pcc_coeff32->bb, + sizeof(uint32_t)) || + copy_in_user(&pcc_coeff->rg, + &pcc_coeff32->rg, + sizeof(uint32_t)) || + copy_in_user(&pcc_coeff->gb, + &pcc_coeff32->gb, + sizeof(uint32_t)) || + copy_in_user(&pcc_coeff->rb, + &pcc_coeff32->rb, + sizeof(uint32_t)) || + copy_in_user(&pcc_coeff->rgb_0, + &pcc_coeff32->rgb_0, + sizeof(uint32_t)) || + copy_in_user(&pcc_coeff->rgb_1, + &pcc_coeff32->rgb_1, + sizeof(uint32_t))) + return -EFAULT; + + return 0; +} + +static int __to_user_pcc_coeff( + struct mdp_pcc_coeff32 __user *pcc_coeff32, + struct mdp_pcc_coeff __user *pcc_coeff) +{ + if (copy_in_user(&pcc_coeff32->c, + &pcc_coeff->c, + sizeof(uint32_t)) || + copy_in_user(&pcc_coeff32->r, + &pcc_coeff->r, + sizeof(uint32_t)) || + copy_in_user(&pcc_coeff32->g, + &pcc_coeff->g, + sizeof(uint32_t)) || + copy_in_user(&pcc_coeff32->b, + &pcc_coeff->b, + sizeof(uint32_t)) || + copy_in_user(&pcc_coeff32->rr, + &pcc_coeff->rr, + sizeof(uint32_t)) || + copy_in_user(&pcc_coeff32->gg, + &pcc_coeff->gg, + sizeof(uint32_t)) || + copy_in_user(&pcc_coeff32->bb, + &pcc_coeff->bb, + sizeof(uint32_t)) || + copy_in_user(&pcc_coeff32->rg, + &pcc_coeff->rg, + sizeof(uint32_t)) || + copy_in_user(&pcc_coeff32->gb, + &pcc_coeff->gb, + sizeof(uint32_t)) || + copy_in_user(&pcc_coeff32->rb, + &pcc_coeff->rb, + sizeof(uint32_t)) || + copy_in_user(&pcc_coeff32->rgb_0, + &pcc_coeff->rgb_0, + sizeof(uint32_t)) || + copy_in_user(&pcc_coeff32->rgb_1, + &pcc_coeff->rgb_1, + sizeof(uint32_t))) + return -EFAULT; + + return 0; +} + +static int __from_user_pcc_coeff_v17( + struct mdp_pcc_cfg_data32 __user *pcc_cfg32, + struct mdp_pcc_cfg_data __user *pcc_cfg) +{ + struct mdp_pcc_data_v1_7_32 pcc_cfg_payload32; + struct mdp_pcc_data_v1_7 pcc_cfg_payload; + + if (copy_from_user(&pcc_cfg_payload32, + compat_ptr(pcc_cfg32->cfg_payload), + sizeof(struct mdp_pcc_data_v1_7_32))) { + pr_err("failed to copy payload for pcc from user\n"); + return -EFAULT; + } + + memset(&pcc_cfg_payload, 0, sizeof(pcc_cfg_payload)); + pcc_cfg_payload.r.b = pcc_cfg_payload32.r.b; + pcc_cfg_payload.r.g = pcc_cfg_payload32.r.g; + pcc_cfg_payload.r.c = pcc_cfg_payload32.r.c; + pcc_cfg_payload.r.r = pcc_cfg_payload32.r.r; + pcc_cfg_payload.r.gb = pcc_cfg_payload32.r.gb; + pcc_cfg_payload.r.rb = pcc_cfg_payload32.r.rb; + pcc_cfg_payload.r.rg = pcc_cfg_payload32.r.rg; + pcc_cfg_payload.r.rgb = pcc_cfg_payload32.r.rgb; + + pcc_cfg_payload.g.b = pcc_cfg_payload32.g.b; + pcc_cfg_payload.g.g = pcc_cfg_payload32.g.g; + pcc_cfg_payload.g.c = pcc_cfg_payload32.g.c; + pcc_cfg_payload.g.r = pcc_cfg_payload32.g.r; + pcc_cfg_payload.g.gb = pcc_cfg_payload32.g.gb; + pcc_cfg_payload.g.rb = pcc_cfg_payload32.g.rb; + pcc_cfg_payload.g.rg = pcc_cfg_payload32.g.rg; + pcc_cfg_payload.g.rgb = pcc_cfg_payload32.g.rgb; + + pcc_cfg_payload.b.b = pcc_cfg_payload32.b.b; + pcc_cfg_payload.b.g = pcc_cfg_payload32.b.g; + pcc_cfg_payload.b.c = pcc_cfg_payload32.b.c; + pcc_cfg_payload.b.r = pcc_cfg_payload32.b.r; + pcc_cfg_payload.b.gb = pcc_cfg_payload32.b.gb; + pcc_cfg_payload.b.rb = pcc_cfg_payload32.b.rb; + pcc_cfg_payload.b.rg = pcc_cfg_payload32.b.rg; + pcc_cfg_payload.b.rgb = pcc_cfg_payload32.b.rgb; + + if (copy_to_user(pcc_cfg->cfg_payload, &pcc_cfg_payload, + sizeof(pcc_cfg_payload))) { + pr_err("failed to copy payload for pcc to user\n"); + return -EFAULT; + } + return 0; +} + +static int __from_user_pcc_cfg_data( + struct mdp_pcc_cfg_data32 __user *pcc_cfg32, + struct mdp_pcc_cfg_data __user *pcc_cfg) +{ + u32 version; + + if (copy_in_user(&pcc_cfg->block, + &pcc_cfg32->block, + sizeof(uint32_t)) || + copy_in_user(&pcc_cfg->ops, + &pcc_cfg32->ops, + sizeof(uint32_t)) || + copy_in_user(&pcc_cfg->version, + &pcc_cfg32->version, + sizeof(uint32_t))) + return -EFAULT; + + if (copy_from_user(&version, &pcc_cfg32->version, sizeof(u32))) { + pr_err("failed to copy version for pcc\n"); + return -EFAULT; + } + + switch (version) { + case mdp_pcc_v1_7: + if (__from_user_pcc_coeff_v17(pcc_cfg32, pcc_cfg)) { + pr_err("failed to copy pcc v17 data\n"); + return -EFAULT; + } + break; + default: + pr_debug("pcc version %d not supported use legacy\n", version); + if (__from_user_pcc_coeff( + compat_ptr((uintptr_t)&pcc_cfg32->r), + &pcc_cfg->r) || + __from_user_pcc_coeff( + compat_ptr((uintptr_t)&pcc_cfg32->g), + &pcc_cfg->g) || + __from_user_pcc_coeff( + compat_ptr((uintptr_t)&pcc_cfg32->b), + &pcc_cfg->b)) + return -EFAULT; + break; + } + return 0; +} + +static int __to_user_pcc_coeff_v1_7( + struct mdp_pcc_cfg_data32 __user *pcc_cfg32, + struct mdp_pcc_cfg_data __user *pcc_cfg) +{ + struct mdp_pcc_data_v1_7_32 pcc_cfg_payload32; + struct mdp_pcc_data_v1_7 pcc_cfg_payload; + + memset(&pcc_cfg_payload32, 0, sizeof(pcc_cfg_payload32)); + if (copy_from_user(&pcc_cfg_payload, + pcc_cfg->cfg_payload, + sizeof(struct mdp_pcc_data_v1_7))) { + pr_err("failed to copy payload for pcc from user\n"); + return -EFAULT; + } + + pcc_cfg_payload32.r.b = pcc_cfg_payload.r.b; + pcc_cfg_payload32.r.g = pcc_cfg_payload.r.g; + pcc_cfg_payload32.r.c = pcc_cfg_payload.r.c; + pcc_cfg_payload32.r.r = pcc_cfg_payload.r.r; + pcc_cfg_payload32.r.gb = pcc_cfg_payload.r.gb; + pcc_cfg_payload32.r.rb = pcc_cfg_payload.r.rb; + pcc_cfg_payload32.r.rg = pcc_cfg_payload.r.rg; + pcc_cfg_payload32.r.rgb = pcc_cfg_payload.r.rgb; + + pcc_cfg_payload32.g.b = pcc_cfg_payload.g.b; + pcc_cfg_payload32.g.g = pcc_cfg_payload.g.g; + pcc_cfg_payload32.g.c = pcc_cfg_payload.g.c; + pcc_cfg_payload32.g.r = pcc_cfg_payload.g.r; + pcc_cfg_payload32.g.gb = pcc_cfg_payload.g.gb; + pcc_cfg_payload32.g.rb = pcc_cfg_payload.g.rb; + pcc_cfg_payload32.g.rg = pcc_cfg_payload.g.rg; + pcc_cfg_payload32.g.rgb = pcc_cfg_payload.g.rgb; + + pcc_cfg_payload32.b.b = pcc_cfg_payload.b.b; + pcc_cfg_payload32.b.g = pcc_cfg_payload.b.g; + pcc_cfg_payload32.b.c = pcc_cfg_payload.b.c; + pcc_cfg_payload32.b.r = pcc_cfg_payload.b.r; + pcc_cfg_payload32.b.gb = pcc_cfg_payload.b.gb; + pcc_cfg_payload32.b.rb = pcc_cfg_payload.b.rb; + pcc_cfg_payload32.b.rg = pcc_cfg_payload.b.rg; + pcc_cfg_payload32.b.rgb = pcc_cfg_payload.b.rgb; + + if (copy_to_user(compat_ptr(pcc_cfg32->cfg_payload), + &pcc_cfg_payload32, + sizeof(pcc_cfg_payload32))) { + pr_err("failed to copy payload for pcc to user\n"); + return -EFAULT; + } + + return 0; +} + + +static int __to_user_pcc_cfg_data( + struct mdp_pcc_cfg_data32 __user *pcc_cfg32, + struct mdp_pcc_cfg_data __user *pcc_cfg) +{ + u32 version; + u32 ops; + + if (copy_from_user(&ops, &pcc_cfg->ops, sizeof(u32))) { + pr_err("failed to copy op for pcc\n"); + return -EFAULT; + } + + if (!(ops & MDP_PP_OPS_READ)) { + pr_debug("Read op is not set. Skipping compat copyback\n"); + return 0; + } + + if (copy_from_user(&version, &pcc_cfg->version, sizeof(u32))) { + pr_err("failed to copy version for pcc\n"); + return -EFAULT; + } + + switch (version) { + case mdp_pcc_v1_7: + if (__to_user_pcc_coeff_v1_7(pcc_cfg32, pcc_cfg)) { + pr_err("failed to copy pcc v1_7 data\n"); + return -EFAULT; + } + break; + default: + pr_debug("version invalid, fallback to legacy\n"); + + if (__to_user_pcc_coeff( + compat_ptr((uintptr_t)&pcc_cfg32->r), + &pcc_cfg->r) || + __to_user_pcc_coeff( + compat_ptr((uintptr_t)&pcc_cfg32->g), + &pcc_cfg->g) || + __to_user_pcc_coeff( + compat_ptr((uintptr_t)&pcc_cfg32->b), + &pcc_cfg->b)) + return -EFAULT; + break; + } + + return 0; +} + +static int __from_user_csc_cfg( + struct mdp_csc_cfg32 __user *csc_data32, + struct mdp_csc_cfg __user *csc_data) +{ + if (copy_in_user(&csc_data->flags, + &csc_data32->flags, + sizeof(uint32_t)) || + copy_in_user(&csc_data->csc_mv[0], + &csc_data32->csc_mv[0], + 9 * sizeof(uint32_t)) || + copy_in_user(&csc_data->csc_pre_bv[0], + &csc_data32->csc_pre_bv[0], + 3 * sizeof(uint32_t)) || + copy_in_user(&csc_data->csc_post_bv[0], + &csc_data32->csc_post_bv[0], + 3 * sizeof(uint32_t)) || + copy_in_user(&csc_data->csc_pre_lv[0], + &csc_data32->csc_pre_lv[0], + 6 * sizeof(uint32_t)) || + copy_in_user(&csc_data->csc_post_lv[0], + &csc_data32->csc_post_lv[0], + 6 * sizeof(uint32_t))) + return -EFAULT; + + return 0; +} +static int __to_user_csc_cfg( + struct mdp_csc_cfg32 __user *csc_data32, + struct mdp_csc_cfg __user *csc_data) +{ + if (copy_in_user(&csc_data32->flags, + &csc_data->flags, + sizeof(uint32_t)) || + copy_in_user(&csc_data32->csc_mv[0], + &csc_data->csc_mv[0], + 9 * sizeof(uint32_t)) || + copy_in_user(&csc_data32->csc_pre_bv[0], + &csc_data->csc_pre_bv[0], + 3 * sizeof(uint32_t)) || + copy_in_user(&csc_data32->csc_post_bv[0], + &csc_data->csc_post_bv[0], + 3 * sizeof(uint32_t)) || + copy_in_user(&csc_data32->csc_pre_lv[0], + &csc_data->csc_pre_lv[0], + 6 * sizeof(uint32_t)) || + copy_in_user(&csc_data32->csc_post_lv[0], + &csc_data->csc_post_lv[0], + 6 * sizeof(uint32_t))) + return -EFAULT; + + return 0; +} + +static int __from_user_csc_cfg_data( + struct mdp_csc_cfg_data32 __user *csc_cfg32, + struct mdp_csc_cfg_data __user *csc_cfg) +{ + if (copy_in_user(&csc_cfg->block, + &csc_cfg32->block, + sizeof(uint32_t))) + return -EFAULT; + + if (__from_user_csc_cfg( + compat_ptr((uintptr_t)&csc_cfg32->csc_data), + &csc_cfg->csc_data)) + return -EFAULT; + + return 0; +} + +static int __to_user_csc_cfg_data( + struct mdp_csc_cfg_data32 __user *csc_cfg32, + struct mdp_csc_cfg_data __user *csc_cfg) +{ + if (copy_in_user(&csc_cfg32->block, + &csc_cfg->block, + sizeof(uint32_t))) + return -EFAULT; + + if (__to_user_csc_cfg( + compat_ptr((uintptr_t)&csc_cfg32->csc_data), + &csc_cfg->csc_data)) + return -EFAULT; + + return 0; +} + +static int __from_user_igc_lut_data_v17( + struct mdp_igc_lut_data32 __user *igc_lut32, + struct mdp_igc_lut_data __user *igc_lut) +{ + struct mdp_igc_lut_data_v1_7_32 igc_cfg_payload_32; + struct mdp_igc_lut_data_v1_7 igc_cfg_payload; + + if (copy_from_user(&igc_cfg_payload_32, + compat_ptr(igc_lut32->cfg_payload), + sizeof(igc_cfg_payload_32))) { + pr_err("failed to copy payload from user for igc\n"); + return -EFAULT; + } + + memset(&igc_cfg_payload, 0, sizeof(igc_cfg_payload)); + igc_cfg_payload.c0_c1_data = compat_ptr(igc_cfg_payload_32.c0_c1_data); + igc_cfg_payload.c2_data = compat_ptr(igc_cfg_payload_32.c2_data); + igc_cfg_payload.len = igc_cfg_payload_32.len; + igc_cfg_payload.table_fmt = igc_cfg_payload_32.table_fmt; + if (copy_to_user(igc_lut->cfg_payload, &igc_cfg_payload, + sizeof(igc_cfg_payload))) { + pr_err("failed to copy payload to user for igc\n"); + return -EFAULT; + } + return 0; +} + +static int __from_user_igc_lut_data( + struct mdp_igc_lut_data32 __user *igc_lut32, + struct mdp_igc_lut_data __user *igc_lut) +{ + uint32_t data; + uint32_t version = mdp_igc_vmax; + int ret = 0; + + if (copy_in_user(&igc_lut->block, + &igc_lut32->block, + sizeof(uint32_t)) || + copy_in_user(&igc_lut->len, + &igc_lut32->len, + sizeof(uint32_t)) || + copy_in_user(&igc_lut->ops, + &igc_lut32->ops, + sizeof(uint32_t)) || + copy_in_user(&igc_lut->version, + &igc_lut32->version, + sizeof(uint32_t))) + return -EFAULT; + + if (get_user(version, &igc_lut32->version)) { + pr_err("failed to copy the version for IGC\n"); + return -EFAULT; + } + + switch (version) { + case mdp_igc_v1_7: + ret = __from_user_igc_lut_data_v17(igc_lut32, igc_lut); + if (ret) + pr_err("failed to copy payload for igc version %d ret %d\n", + version, ret); + break; + default: + pr_debug("version not supported fallback to legacy %d\n", + version); + if (get_user(data, &igc_lut32->c0_c1_data) || + put_user(compat_ptr(data), &igc_lut->c0_c1_data) || + get_user(data, &igc_lut32->c2_data) || + put_user(compat_ptr(data), &igc_lut->c2_data)) + return -EFAULT; + break; + } + return ret; +} + +static int __to_user_igc_lut_data( + struct mdp_igc_lut_data32 __user *igc_lut32, + struct mdp_igc_lut_data __user *igc_lut) +{ + unsigned long data; + + if (copy_in_user(&igc_lut32->block, + &igc_lut->block, + sizeof(uint32_t)) || + copy_in_user(&igc_lut32->len, + &igc_lut->len, + sizeof(uint32_t)) || + copy_in_user(&igc_lut32->ops, + &igc_lut->ops, + sizeof(uint32_t))) + return -EFAULT; + + if (get_user(data, (unsigned long *) &igc_lut->c0_c1_data) || + put_user((compat_caddr_t) data, &igc_lut32->c0_c1_data) || + get_user(data, (unsigned long *) &igc_lut->c2_data) || + put_user((compat_caddr_t) data, &igc_lut32->c2_data)) + return -EFAULT; + + return 0; +} + +static int __from_user_ar_gc_lut_data( + struct mdp_ar_gc_lut_data32 __user *ar_gc_data32, + struct mdp_ar_gc_lut_data __user *ar_gc_data) +{ + if (copy_in_user(&ar_gc_data->x_start, + &ar_gc_data32->x_start, + sizeof(uint32_t)) || + copy_in_user(&ar_gc_data->slope, + &ar_gc_data32->slope, + sizeof(uint32_t)) || + copy_in_user(&ar_gc_data->offset, + &ar_gc_data32->offset, + sizeof(uint32_t))) + return -EFAULT; + + return 0; +} + +static int __to_user_ar_gc_lut_data( + struct mdp_ar_gc_lut_data32 __user *ar_gc_data32, + struct mdp_ar_gc_lut_data __user *ar_gc_data) +{ + if (copy_in_user(&ar_gc_data32->x_start, + &ar_gc_data->x_start, + sizeof(uint32_t)) || + copy_in_user(&ar_gc_data32->slope, + &ar_gc_data->slope, + sizeof(uint32_t)) || + copy_in_user(&ar_gc_data32->offset, + &ar_gc_data->offset, + sizeof(uint32_t))) + return -EFAULT; + + return 0; +} + + +static int __from_user_pgc_lut_data_v1_7( + struct mdp_pgc_lut_data32 __user *pgc_lut32, + struct mdp_pgc_lut_data __user *pgc_lut) +{ + struct mdp_pgc_lut_data_v1_7_32 pgc_cfg_payload_32; + struct mdp_pgc_lut_data_v1_7 pgc_cfg_payload; + + if (copy_from_user(&pgc_cfg_payload_32, + compat_ptr(pgc_lut32->cfg_payload), + sizeof(pgc_cfg_payload_32))) { + pr_err("failed to copy from user the pgc32 payload\n"); + return -EFAULT; + } + memset(&pgc_cfg_payload, 0, sizeof(pgc_cfg_payload)); + pgc_cfg_payload.c0_data = compat_ptr(pgc_cfg_payload_32.c0_data); + pgc_cfg_payload.c1_data = compat_ptr(pgc_cfg_payload_32.c1_data); + pgc_cfg_payload.c2_data = compat_ptr(pgc_cfg_payload_32.c2_data); + pgc_cfg_payload.len = pgc_cfg_payload_32.len; + if (copy_to_user(pgc_lut->cfg_payload, &pgc_cfg_payload, + sizeof(pgc_cfg_payload))) { + pr_err("failed to copy to user pgc payload\n"); + return -EFAULT; + } + return 0; +} + +static int __from_user_pgc_lut_data_legacy( + struct mdp_pgc_lut_data32 __user *pgc_lut32, + struct mdp_pgc_lut_data __user *pgc_lut) +{ + struct mdp_ar_gc_lut_data32 __user *r_data_temp32; + struct mdp_ar_gc_lut_data32 __user *g_data_temp32; + struct mdp_ar_gc_lut_data32 __user *b_data_temp32; + struct mdp_ar_gc_lut_data __user *r_data_temp; + struct mdp_ar_gc_lut_data __user *g_data_temp; + struct mdp_ar_gc_lut_data __user *b_data_temp; + uint8_t num_r_stages, num_g_stages, num_b_stages; + int i; + + if (copy_from_user(&num_r_stages, + &pgc_lut32->num_r_stages, + sizeof(uint8_t)) || + copy_from_user(&num_g_stages, + &pgc_lut32->num_g_stages, + sizeof(uint8_t)) || + copy_from_user(&num_b_stages, + &pgc_lut32->num_b_stages, + sizeof(uint8_t))) + return -EFAULT; + + if (num_r_stages > GC_LUT_SEGMENTS || num_b_stages > GC_LUT_SEGMENTS + || num_r_stages > GC_LUT_SEGMENTS || !num_r_stages || !num_b_stages + || !num_g_stages) { + pr_err("invalid number of stages r_stages %d b_stages %d g_stages %d\n", + num_r_stages, num_b_stages, num_r_stages); + return -EFAULT; + } + + r_data_temp32 = compat_ptr((uintptr_t)pgc_lut32->r_data); + r_data_temp = pgc_lut->r_data; + + for (i = 0; i < num_r_stages; i++) { + if (__from_user_ar_gc_lut_data( + &r_data_temp32[i], + &r_data_temp[i])) + return -EFAULT; + } + + g_data_temp32 = compat_ptr((uintptr_t)pgc_lut32->g_data); + g_data_temp = pgc_lut->g_data; + + for (i = 0; i < num_g_stages; i++) { + if (__from_user_ar_gc_lut_data( + &g_data_temp32[i], + &g_data_temp[i])) + return -EFAULT; + } + + b_data_temp32 = compat_ptr((uintptr_t)pgc_lut32->b_data); + b_data_temp = pgc_lut->b_data; + + for (i = 0; i < num_b_stages; i++) { + if (__from_user_ar_gc_lut_data( + &b_data_temp32[i], + &b_data_temp[i])) + return -EFAULT; + } + return 0; +} + +static int __from_user_pgc_lut_data( + struct mdp_pgc_lut_data32 __user *pgc_lut32, + struct mdp_pgc_lut_data __user *pgc_lut) +{ + u32 version = mdp_pgc_vmax; + int ret = 0; + + if (copy_in_user(&pgc_lut->block, + &pgc_lut32->block, + sizeof(uint32_t)) || + copy_in_user(&pgc_lut->flags, + &pgc_lut32->flags, + sizeof(uint32_t)) || + copy_in_user(&pgc_lut->num_r_stages, + &pgc_lut32->num_r_stages, + sizeof(uint8_t)) || + copy_in_user(&pgc_lut->num_g_stages, + &pgc_lut32->num_g_stages, + sizeof(uint8_t)) || + copy_in_user(&pgc_lut->num_b_stages, + &pgc_lut32->num_b_stages, + sizeof(uint8_t)) || + copy_in_user(&pgc_lut->version, + &pgc_lut32->version, + sizeof(uint32_t))) + return -EFAULT; + if (copy_from_user(&version, &pgc_lut32->version, sizeof(u32))) { + pr_err("version copying failed\n"); + return -EFAULT; + } + switch (version) { + case mdp_pgc_v1_7: + ret = __from_user_pgc_lut_data_v1_7(pgc_lut32, pgc_lut); + if (ret) + pr_err("failed to copy pgc v17\n"); + break; + default: + pr_debug("version %d not supported fallback to legacy\n", + version); + ret = __from_user_pgc_lut_data_legacy(pgc_lut32, pgc_lut); + if (ret) + pr_err("copy from user pgc lut legacy failed ret %d\n", + ret); + break; + } + return ret; +} + +static int __to_user_pgc_lut_data( + struct mdp_pgc_lut_data32 __user *pgc_lut32, + struct mdp_pgc_lut_data __user *pgc_lut) +{ + struct mdp_ar_gc_lut_data32 __user *r_data_temp32; + struct mdp_ar_gc_lut_data32 __user *g_data_temp32; + struct mdp_ar_gc_lut_data32 __user *b_data_temp32; + struct mdp_ar_gc_lut_data __user *r_data_temp; + struct mdp_ar_gc_lut_data __user *g_data_temp; + struct mdp_ar_gc_lut_data __user *b_data_temp; + uint8_t num_r_stages, num_g_stages, num_b_stages; + int i; + + if (copy_in_user(&pgc_lut32->block, + &pgc_lut->block, + sizeof(uint32_t)) || + copy_in_user(&pgc_lut32->flags, + &pgc_lut->flags, + sizeof(uint32_t)) || + copy_in_user(&pgc_lut32->num_r_stages, + &pgc_lut->num_r_stages, + sizeof(uint8_t)) || + copy_in_user(&pgc_lut32->num_g_stages, + &pgc_lut->num_g_stages, + sizeof(uint8_t)) || + copy_in_user(&pgc_lut32->num_b_stages, + &pgc_lut->num_b_stages, + sizeof(uint8_t))) + return -EFAULT; + + if (copy_from_user(&num_r_stages, + &pgc_lut->num_r_stages, + sizeof(uint8_t)) || + copy_from_user(&num_g_stages, + &pgc_lut->num_g_stages, + sizeof(uint8_t)) || + copy_from_user(&num_b_stages, + &pgc_lut->num_b_stages, + sizeof(uint8_t))) + return -EFAULT; + + r_data_temp32 = compat_ptr((uintptr_t)pgc_lut32->r_data); + r_data_temp = pgc_lut->r_data; + for (i = 0; i < num_r_stages; i++) { + if (__to_user_ar_gc_lut_data( + &r_data_temp32[i], + &r_data_temp[i])) + return -EFAULT; + } + + g_data_temp32 = compat_ptr((uintptr_t)pgc_lut32->g_data); + g_data_temp = pgc_lut->g_data; + for (i = 0; i < num_g_stages; i++) { + if (__to_user_ar_gc_lut_data( + &g_data_temp32[i], + &g_data_temp[i])) + return -EFAULT; + } + + b_data_temp32 = compat_ptr((uintptr_t)pgc_lut32->b_data); + b_data_temp = pgc_lut->b_data; + for (i = 0; i < num_b_stages; i++) { + if (__to_user_ar_gc_lut_data( + &b_data_temp32[i], + &b_data_temp[i])) + return -EFAULT; + } + + return 0; +} + +static int __from_user_hist_lut_data_v1_7( + struct mdp_hist_lut_data32 __user *hist_lut32, + struct mdp_hist_lut_data __user *hist_lut) +{ + struct mdp_hist_lut_data_v1_7_32 hist_lut_cfg_payload32; + struct mdp_hist_lut_data_v1_7 hist_lut_cfg_payload; + + if (copy_from_user(&hist_lut_cfg_payload32, + compat_ptr(hist_lut32->cfg_payload), + sizeof(hist_lut_cfg_payload32))) { + pr_err("failed to copy the Hist Lut payload from userspace\n"); + return -EFAULT; + } + + memset(&hist_lut_cfg_payload, 0, sizeof(hist_lut_cfg_payload)); + hist_lut_cfg_payload.len = hist_lut_cfg_payload32.len; + hist_lut_cfg_payload.data = compat_ptr(hist_lut_cfg_payload32.data); + + if (copy_to_user(hist_lut->cfg_payload, + &hist_lut_cfg_payload, + sizeof(hist_lut_cfg_payload))) { + pr_err("Failed to copy to user hist lut cfg payload\n"); + return -EFAULT; + } + + return 0; +} + +static int __from_user_hist_lut_data( + struct mdp_hist_lut_data32 __user *hist_lut32, + struct mdp_hist_lut_data __user *hist_lut) +{ + uint32_t version = 0; + uint32_t data; + + if (copy_in_user(&hist_lut->block, + &hist_lut32->block, + sizeof(uint32_t)) || + copy_in_user(&hist_lut->version, + &hist_lut32->version, + sizeof(uint32_t)) || + copy_in_user(&hist_lut->hist_lut_first, + &hist_lut32->hist_lut_first, + sizeof(uint32_t)) || + copy_in_user(&hist_lut->ops, + &hist_lut32->ops, + sizeof(uint32_t)) || + copy_in_user(&hist_lut->len, + &hist_lut32->len, + sizeof(uint32_t))) + return -EFAULT; + + if (copy_from_user(&version, + &hist_lut32->version, + sizeof(uint32_t))) { + pr_err("failed to copy the version info\n"); + return -EFAULT; + } + + switch (version) { + case mdp_hist_lut_v1_7: + if (__from_user_hist_lut_data_v1_7(hist_lut32, hist_lut)) { + pr_err("failed to get hist lut data for version %d\n", + version); + return -EFAULT; + } + break; + default: + pr_debug("version invalid, fallback to legacy\n"); + if (get_user(data, &hist_lut32->data) || + put_user(compat_ptr(data), &hist_lut->data)) + return -EFAULT; + break; + } + + return 0; +} + +static int __to_user_hist_lut_data( + struct mdp_hist_lut_data32 __user *hist_lut32, + struct mdp_hist_lut_data __user *hist_lut) +{ + unsigned long data; + + if (copy_in_user(&hist_lut32->block, + &hist_lut->block, + sizeof(uint32_t)) || + copy_in_user(&hist_lut32->ops, + &hist_lut->ops, + sizeof(uint32_t)) || + copy_in_user(&hist_lut32->len, + &hist_lut->len, + sizeof(uint32_t))) + return -EFAULT; + + if (get_user(data, (unsigned long *) &hist_lut->data) || + put_user((compat_caddr_t) data, &hist_lut32->data)) + return -EFAULT; + + return 0; +} + +static int __from_user_rgb_lut_data( + struct mdp_rgb_lut_data32 __user *rgb_lut32, + struct mdp_rgb_lut_data __user *rgb_lut) +{ + if (copy_in_user(&rgb_lut->flags, &rgb_lut32->flags, + sizeof(uint32_t)) || + copy_in_user(&rgb_lut->lut_type, &rgb_lut32->lut_type, + sizeof(uint32_t))) + return -EFAULT; + + return __from_user_fb_cmap(&rgb_lut->cmap, &rgb_lut32->cmap); +} + +static int __to_user_rgb_lut_data( + struct mdp_rgb_lut_data32 __user *rgb_lut32, + struct mdp_rgb_lut_data __user *rgb_lut) +{ + if (copy_in_user(&rgb_lut32->flags, &rgb_lut->flags, + sizeof(uint32_t)) || + copy_in_user(&rgb_lut32->lut_type, &rgb_lut->lut_type, + sizeof(uint32_t))) + return -EFAULT; + + return __to_user_fb_cmap(&rgb_lut->cmap, &rgb_lut32->cmap); +} + +static int __from_user_lut_cfg_data( + struct mdp_lut_cfg_data32 __user *lut_cfg32, + struct mdp_lut_cfg_data __user *lut_cfg) +{ + uint32_t lut_type; + int ret = 0; + + if (copy_from_user(&lut_type, &lut_cfg32->lut_type, + sizeof(uint32_t))) + return -EFAULT; + + if (copy_in_user(&lut_cfg->lut_type, + &lut_cfg32->lut_type, + sizeof(uint32_t))) + return -EFAULT; + + switch (lut_type) { + case mdp_lut_igc: + ret = __from_user_igc_lut_data( + compat_ptr((uintptr_t)&lut_cfg32->data.igc_lut_data), + &lut_cfg->data.igc_lut_data); + break; + case mdp_lut_pgc: + ret = __from_user_pgc_lut_data( + compat_ptr((uintptr_t)&lut_cfg32->data.pgc_lut_data), + &lut_cfg->data.pgc_lut_data); + break; + case mdp_lut_hist: + ret = __from_user_hist_lut_data( + compat_ptr((uintptr_t)&lut_cfg32->data.hist_lut_data), + &lut_cfg->data.hist_lut_data); + break; + case mdp_lut_rgb: + ret = __from_user_rgb_lut_data( + compat_ptr((uintptr_t)&lut_cfg32->data.rgb_lut_data), + &lut_cfg->data.rgb_lut_data); + break; + default: + break; + } + + return ret; +} + +static int __to_user_lut_cfg_data( + struct mdp_lut_cfg_data32 __user *lut_cfg32, + struct mdp_lut_cfg_data __user *lut_cfg) +{ + uint32_t lut_type; + int ret = 0; + + if (copy_from_user(&lut_type, &lut_cfg->lut_type, + sizeof(uint32_t))) + return -EFAULT; + + if (copy_in_user(&lut_cfg32->lut_type, + &lut_cfg->lut_type, + sizeof(uint32_t))) + return -EFAULT; + + switch (lut_type) { + case mdp_lut_igc: + ret = __to_user_igc_lut_data( + compat_ptr((uintptr_t)&lut_cfg32->data.igc_lut_data), + &lut_cfg->data.igc_lut_data); + break; + case mdp_lut_pgc: + ret = __to_user_pgc_lut_data( + compat_ptr((uintptr_t)&lut_cfg32->data.pgc_lut_data), + &lut_cfg->data.pgc_lut_data); + break; + case mdp_lut_hist: + ret = __to_user_hist_lut_data( + compat_ptr((uintptr_t)&lut_cfg32->data.hist_lut_data), + &lut_cfg->data.hist_lut_data); + break; + case mdp_lut_rgb: + ret = __to_user_rgb_lut_data( + compat_ptr((uintptr_t)&lut_cfg32->data.rgb_lut_data), + &lut_cfg->data.rgb_lut_data); + break; + default: + break; + } + + return ret; +} + +static int __from_user_qseed_cfg( + struct mdp_qseed_cfg32 __user *qseed_data32, + struct mdp_qseed_cfg __user *qseed_data) +{ + uint32_t data; + + if (copy_in_user(&qseed_data->table_num, + &qseed_data32->table_num, + sizeof(uint32_t)) || + copy_in_user(&qseed_data->ops, + &qseed_data32->ops, + sizeof(uint32_t)) || + copy_in_user(&qseed_data->len, + &qseed_data32->len, + sizeof(uint32_t))) + return -EFAULT; + + if (get_user(data, &qseed_data32->data) || + put_user(compat_ptr(data), &qseed_data->data)) + return -EFAULT; + + return 0; +} + +static int __to_user_qseed_cfg( + struct mdp_qseed_cfg32 __user *qseed_data32, + struct mdp_qseed_cfg __user *qseed_data) +{ + unsigned long data; + + if (copy_in_user(&qseed_data32->table_num, + &qseed_data->table_num, + sizeof(uint32_t)) || + copy_in_user(&qseed_data32->ops, + &qseed_data->ops, + sizeof(uint32_t)) || + copy_in_user(&qseed_data32->len, + &qseed_data->len, + sizeof(uint32_t))) + return -EFAULT; + + if (get_user(data, (unsigned long *) &qseed_data->data) || + put_user((compat_caddr_t) data, &qseed_data32->data)) + return -EFAULT; + + return 0; +} + +static int __from_user_qseed_cfg_data( + struct mdp_qseed_cfg_data32 __user *qseed_cfg32, + struct mdp_qseed_cfg_data __user *qseed_cfg) +{ + if (copy_in_user(&qseed_cfg->block, + &qseed_cfg32->block, + sizeof(uint32_t))) + return -EFAULT; + + if (__from_user_qseed_cfg( + compat_ptr((uintptr_t)&qseed_cfg32->qseed_data), + &qseed_cfg->qseed_data)) + return -EFAULT; + + return 0; +} + +static int __to_user_qseed_cfg_data( + struct mdp_qseed_cfg_data32 __user *qseed_cfg32, + struct mdp_qseed_cfg_data __user *qseed_cfg) +{ + if (copy_in_user(&qseed_cfg32->block, + &qseed_cfg->block, + sizeof(uint32_t))) + return -EFAULT; + + if (__to_user_qseed_cfg( + compat_ptr((uintptr_t)&qseed_cfg32->qseed_data), + &qseed_cfg->qseed_data)) + return -EFAULT; + + return 0; +} + +static int __from_user_bl_scale_data( + struct mdp_bl_scale_data32 __user *bl_scale32, + struct mdp_bl_scale_data __user *bl_scale) +{ + if (copy_in_user(&bl_scale->min_lvl, + &bl_scale32->min_lvl, + sizeof(uint32_t)) || + copy_in_user(&bl_scale->scale, + &bl_scale32->scale, + sizeof(uint32_t))) + return -EFAULT; + + return 0; +} + +static int __from_user_pa_cfg( + struct mdp_pa_cfg32 __user *pa_data32, + struct mdp_pa_cfg __user *pa_data) +{ + if (copy_in_user(&pa_data->flags, + &pa_data32->flags, + sizeof(uint32_t)) || + copy_in_user(&pa_data->hue_adj, + &pa_data32->hue_adj, + sizeof(uint32_t)) || + copy_in_user(&pa_data->sat_adj, + &pa_data32->sat_adj, + sizeof(uint32_t)) || + copy_in_user(&pa_data->val_adj, + &pa_data32->val_adj, + sizeof(uint32_t)) || + copy_in_user(&pa_data->cont_adj, + &pa_data32->cont_adj, + sizeof(uint32_t))) + return -EFAULT; + + return 0; +} + +static int __to_user_pa_cfg( + struct mdp_pa_cfg32 __user *pa_data32, + struct mdp_pa_cfg __user *pa_data) +{ + if (copy_in_user(&pa_data32->flags, + &pa_data->flags, + sizeof(uint32_t)) || + copy_in_user(&pa_data32->hue_adj, + &pa_data->hue_adj, + sizeof(uint32_t)) || + copy_in_user(&pa_data32->sat_adj, + &pa_data->sat_adj, + sizeof(uint32_t)) || + copy_in_user(&pa_data32->val_adj, + &pa_data->val_adj, + sizeof(uint32_t)) || + copy_in_user(&pa_data32->cont_adj, + &pa_data->cont_adj, + sizeof(uint32_t))) + return -EFAULT; + + return 0; +} + +static int __from_user_pa_cfg_data( + struct mdp_pa_cfg_data32 __user *pa_cfg32, + struct mdp_pa_cfg_data __user *pa_cfg) +{ + if (copy_in_user(&pa_cfg->block, + &pa_cfg32->block, + sizeof(uint32_t))) + return -EFAULT; + if (__from_user_pa_cfg( + compat_ptr((uintptr_t)&pa_cfg32->pa_data), + &pa_cfg->pa_data)) + return -EFAULT; + + return 0; +} + +static int __to_user_pa_cfg_data( + struct mdp_pa_cfg_data32 __user *pa_cfg32, + struct mdp_pa_cfg_data __user *pa_cfg) +{ + if (copy_in_user(&pa_cfg32->block, + &pa_cfg->block, + sizeof(uint32_t))) + return -EFAULT; + if (__to_user_pa_cfg( + compat_ptr((uintptr_t)&pa_cfg32->pa_data), + &pa_cfg->pa_data)) + return -EFAULT; + + return 0; +} + +static int __from_user_mem_col_cfg( + struct mdp_pa_mem_col_cfg32 __user *mem_col_cfg32, + struct mdp_pa_mem_col_cfg __user *mem_col_cfg) +{ + if (copy_in_user(&mem_col_cfg->color_adjust_p0, + &mem_col_cfg32->color_adjust_p0, + sizeof(uint32_t)) || + copy_in_user(&mem_col_cfg->color_adjust_p1, + &mem_col_cfg32->color_adjust_p1, + sizeof(uint32_t)) || + copy_in_user(&mem_col_cfg->hue_region, + &mem_col_cfg32->hue_region, + sizeof(uint32_t)) || + copy_in_user(&mem_col_cfg->sat_region, + &mem_col_cfg32->sat_region, + sizeof(uint32_t)) || + copy_in_user(&mem_col_cfg->val_region, + &mem_col_cfg32->val_region, + sizeof(uint32_t))) + return -EFAULT; + + return 0; +} + +static int __to_user_mem_col_cfg( + struct mdp_pa_mem_col_cfg32 __user *mem_col_cfg32, + struct mdp_pa_mem_col_cfg __user *mem_col_cfg) +{ + if (copy_in_user(&mem_col_cfg32->color_adjust_p0, + &mem_col_cfg->color_adjust_p0, + sizeof(uint32_t)) || + copy_in_user(&mem_col_cfg32->color_adjust_p1, + &mem_col_cfg->color_adjust_p1, + sizeof(uint32_t)) || + copy_in_user(&mem_col_cfg32->hue_region, + &mem_col_cfg->hue_region, + sizeof(uint32_t)) || + copy_in_user(&mem_col_cfg32->sat_region, + &mem_col_cfg->sat_region, + sizeof(uint32_t)) || + copy_in_user(&mem_col_cfg32->val_region, + &mem_col_cfg->val_region, + sizeof(uint32_t))) + return -EFAULT; + + return 0; +} + +static int __from_user_pa_v2_data( + struct mdp_pa_v2_data32 __user *pa_v2_data32, + struct mdp_pa_v2_data __user *pa_v2_data) +{ + uint32_t data; + + if (copy_in_user(&pa_v2_data->flags, + &pa_v2_data32->flags, + sizeof(uint32_t)) || + copy_in_user(&pa_v2_data->global_hue_adj, + &pa_v2_data32->global_hue_adj, + sizeof(uint32_t)) || + copy_in_user(&pa_v2_data->global_sat_adj, + &pa_v2_data32->global_sat_adj, + sizeof(uint32_t)) || + copy_in_user(&pa_v2_data->global_val_adj, + &pa_v2_data32->global_val_adj, + sizeof(uint32_t)) || + copy_in_user(&pa_v2_data->global_cont_adj, + &pa_v2_data32->global_cont_adj, + sizeof(uint32_t)) || + copy_in_user(&pa_v2_data->six_zone_thresh, + &pa_v2_data32->six_zone_thresh, + sizeof(uint32_t)) || + copy_in_user(&pa_v2_data->six_zone_len, + &pa_v2_data32->six_zone_len, + sizeof(uint32_t))) + return -EFAULT; + + if (get_user(data, &pa_v2_data32->six_zone_curve_p0) || + put_user(compat_ptr(data), &pa_v2_data->six_zone_curve_p0) || + get_user(data, &pa_v2_data32->six_zone_curve_p1) || + put_user(compat_ptr(data), &pa_v2_data->six_zone_curve_p1)) + return -EFAULT; + + if (__from_user_mem_col_cfg( + compat_ptr((uintptr_t)&pa_v2_data32->skin_cfg), + &pa_v2_data->skin_cfg) || + __from_user_mem_col_cfg( + compat_ptr((uintptr_t)&pa_v2_data32->sky_cfg), + &pa_v2_data->sky_cfg) || + __from_user_mem_col_cfg( + compat_ptr((uintptr_t)&pa_v2_data32->fol_cfg), + &pa_v2_data->fol_cfg)) + return -EFAULT; + + return 0; +} + +static int __to_user_pa_v2_data( + struct mdp_pa_v2_data32 __user *pa_v2_data32, + struct mdp_pa_v2_data __user *pa_v2_data) +{ + unsigned long data; + + if (copy_in_user(&pa_v2_data32->flags, + &pa_v2_data->flags, + sizeof(uint32_t)) || + copy_in_user(&pa_v2_data32->global_hue_adj, + &pa_v2_data->global_hue_adj, + sizeof(uint32_t)) || + copy_in_user(&pa_v2_data32->global_sat_adj, + &pa_v2_data->global_sat_adj, + sizeof(uint32_t)) || + copy_in_user(&pa_v2_data32->global_val_adj, + &pa_v2_data->global_val_adj, + sizeof(uint32_t)) || + copy_in_user(&pa_v2_data32->global_cont_adj, + &pa_v2_data->global_cont_adj, + sizeof(uint32_t)) || + copy_in_user(&pa_v2_data32->six_zone_thresh, + &pa_v2_data->six_zone_thresh, + sizeof(uint32_t)) || + copy_in_user(&pa_v2_data32->six_zone_len, + &pa_v2_data->six_zone_len, + sizeof(uint32_t))) + return -EFAULT; + + if (get_user(data, (unsigned long *) &pa_v2_data->six_zone_curve_p0) || + put_user((compat_caddr_t) data, &pa_v2_data32->six_zone_curve_p0) || + get_user(data, (unsigned long *) &pa_v2_data->six_zone_curve_p1) || + put_user((compat_caddr_t) data, &pa_v2_data32->six_zone_curve_p1)) + return -EFAULT; + + if (__to_user_mem_col_cfg( + compat_ptr((uintptr_t)&pa_v2_data32->skin_cfg), + &pa_v2_data->skin_cfg) || + __to_user_mem_col_cfg( + compat_ptr((uintptr_t)&pa_v2_data32->sky_cfg), + &pa_v2_data->sky_cfg) || + __to_user_mem_col_cfg( + compat_ptr((uintptr_t)&pa_v2_data32->fol_cfg), + &pa_v2_data->fol_cfg)) + return -EFAULT; + + return 0; +} + +static inline void __from_user_pa_mem_col_data_v1_7( + struct mdp_pa_mem_col_data_v1_7_32 *mem_col_data32, + struct mdp_pa_mem_col_data_v1_7 *mem_col_data) +{ + mem_col_data->color_adjust_p0 = mem_col_data32->color_adjust_p0; + mem_col_data->color_adjust_p1 = mem_col_data32->color_adjust_p1; + mem_col_data->color_adjust_p2 = mem_col_data32->color_adjust_p2; + mem_col_data->blend_gain = mem_col_data32->blend_gain; + mem_col_data->sat_hold = mem_col_data32->sat_hold; + mem_col_data->val_hold = mem_col_data32->val_hold; + mem_col_data->hue_region = mem_col_data32->hue_region; + mem_col_data->sat_region = mem_col_data32->sat_region; + mem_col_data->val_region = mem_col_data32->val_region; +} + + +static int __from_user_pa_data_v1_7( + struct mdp_pa_v2_cfg_data32 __user *pa_v2_cfg32, + struct mdp_pa_v2_cfg_data __user *pa_v2_cfg) +{ + struct mdp_pa_data_v1_7_32 pa_cfg_payload32; + struct mdp_pa_data_v1_7 pa_cfg_payload; + + if (copy_from_user(&pa_cfg_payload32, + compat_ptr(pa_v2_cfg32->cfg_payload), + sizeof(pa_cfg_payload32))) { + pr_err("failed to copy the PA payload from userspace\n"); + return -EFAULT; + } + + memset(&pa_cfg_payload, 0, sizeof(pa_cfg_payload)); + pa_cfg_payload.mode = pa_cfg_payload32.mode; + pa_cfg_payload.global_hue_adj = pa_cfg_payload32.global_hue_adj; + pa_cfg_payload.global_sat_adj = pa_cfg_payload32.global_sat_adj; + pa_cfg_payload.global_val_adj = pa_cfg_payload32.global_val_adj; + pa_cfg_payload.global_cont_adj = pa_cfg_payload32.global_cont_adj; + + __from_user_pa_mem_col_data_v1_7(&pa_cfg_payload32.skin_cfg, + &pa_cfg_payload.skin_cfg); + __from_user_pa_mem_col_data_v1_7(&pa_cfg_payload32.sky_cfg, + &pa_cfg_payload.sky_cfg); + __from_user_pa_mem_col_data_v1_7(&pa_cfg_payload32.fol_cfg, + &pa_cfg_payload.fol_cfg); + + pa_cfg_payload.six_zone_thresh = pa_cfg_payload32.six_zone_thresh; + pa_cfg_payload.six_zone_adj_p0 = pa_cfg_payload32.six_zone_adj_p0; + pa_cfg_payload.six_zone_adj_p1 = pa_cfg_payload32.six_zone_adj_p1; + pa_cfg_payload.six_zone_sat_hold = pa_cfg_payload32.six_zone_sat_hold; + pa_cfg_payload.six_zone_val_hold = pa_cfg_payload32.six_zone_val_hold; + pa_cfg_payload.six_zone_len = pa_cfg_payload32.six_zone_len; + + pa_cfg_payload.six_zone_curve_p0 = + compat_ptr(pa_cfg_payload32.six_zone_curve_p0); + pa_cfg_payload.six_zone_curve_p1 = + compat_ptr(pa_cfg_payload32.six_zone_curve_p1); + + if (copy_to_user(pa_v2_cfg->cfg_payload, &pa_cfg_payload, + sizeof(pa_cfg_payload))) { + pr_err("Failed to copy to user pa cfg payload\n"); + return -EFAULT; + } + + return 0; +} + +static int __from_user_pa_v2_cfg_data( + struct mdp_pa_v2_cfg_data32 __user *pa_v2_cfg32, + struct mdp_pa_v2_cfg_data __user *pa_v2_cfg) +{ + uint32_t version; + + if (copy_in_user(&pa_v2_cfg->block, + &pa_v2_cfg32->block, + sizeof(uint32_t)) || + copy_in_user(&pa_v2_cfg->version, + &pa_v2_cfg32->version, + sizeof(uint32_t)) || + copy_in_user(&pa_v2_cfg->flags, + &pa_v2_cfg32->flags, + sizeof(uint32_t))) + return -EFAULT; + + if (copy_from_user(&version, + &pa_v2_cfg32->version, + sizeof(uint32_t))) { + pr_err("failed to copy the version info\n"); + return -EFAULT; + } + + switch (version) { + case mdp_pa_v1_7: + if (__from_user_pa_data_v1_7(pa_v2_cfg32, pa_v2_cfg)) { + pr_err("failed to get pa data for version %d\n", + version); + return -EFAULT; + } + break; + default: + pr_debug("version invalid, fallback to legacy\n"); + if (__from_user_pa_v2_data( + compat_ptr((uintptr_t)&pa_v2_cfg32->pa_v2_data), + &pa_v2_cfg->pa_v2_data)) + return -EFAULT; + break; + } + + return 0; +} + +static inline void __to_user_pa_mem_col_data_v1_7( + struct mdp_pa_mem_col_data_v1_7_32 *mem_col_data32, + struct mdp_pa_mem_col_data_v1_7 *mem_col_data) +{ + mem_col_data32->color_adjust_p0 = mem_col_data->color_adjust_p0; + mem_col_data32->color_adjust_p1 = mem_col_data->color_adjust_p1; + mem_col_data32->color_adjust_p2 = mem_col_data->color_adjust_p2; + mem_col_data32->blend_gain = mem_col_data->blend_gain; + mem_col_data32->sat_hold = mem_col_data->sat_hold; + mem_col_data32->val_hold = mem_col_data->val_hold; + mem_col_data32->hue_region = mem_col_data->hue_region; + mem_col_data32->sat_region = mem_col_data->sat_region; + mem_col_data32->val_region = mem_col_data->val_region; +} + +static int __to_user_pa_data_v1_7( + struct mdp_pa_v2_cfg_data32 __user *pa_v2_cfg32, + struct mdp_pa_v2_cfg_data __user *pa_v2_cfg) +{ + struct mdp_pa_data_v1_7_32 pa_cfg_payload32; + struct mdp_pa_data_v1_7 pa_cfg_payload; + + memset(&pa_cfg_payload32, 0, sizeof(pa_cfg_payload32)); + if (copy_from_user(&pa_cfg_payload, + pa_v2_cfg->cfg_payload, + sizeof(pa_cfg_payload))) { + pr_err("failed to copy the PA payload from userspace\n"); + return -EFAULT; + } + + pa_cfg_payload32.mode = pa_cfg_payload.mode; + pa_cfg_payload32.global_hue_adj = pa_cfg_payload.global_hue_adj; + pa_cfg_payload32.global_sat_adj = pa_cfg_payload.global_sat_adj; + pa_cfg_payload32.global_val_adj = pa_cfg_payload.global_val_adj; + pa_cfg_payload32.global_cont_adj = pa_cfg_payload.global_cont_adj; + + __to_user_pa_mem_col_data_v1_7(&pa_cfg_payload32.skin_cfg, + &pa_cfg_payload.skin_cfg); + __to_user_pa_mem_col_data_v1_7(&pa_cfg_payload32.sky_cfg, + &pa_cfg_payload.sky_cfg); + __to_user_pa_mem_col_data_v1_7(&pa_cfg_payload32.fol_cfg, + &pa_cfg_payload.fol_cfg); + + pa_cfg_payload32.six_zone_thresh = pa_cfg_payload.six_zone_thresh; + pa_cfg_payload32.six_zone_adj_p0 = pa_cfg_payload.six_zone_adj_p0; + pa_cfg_payload32.six_zone_adj_p1 = pa_cfg_payload.six_zone_adj_p1; + pa_cfg_payload32.six_zone_sat_hold = pa_cfg_payload.six_zone_sat_hold; + pa_cfg_payload32.six_zone_val_hold = pa_cfg_payload.six_zone_val_hold; + pa_cfg_payload32.six_zone_len = pa_cfg_payload.six_zone_len; + + if (copy_to_user(compat_ptr(pa_v2_cfg32->cfg_payload), + &pa_cfg_payload32, + sizeof(pa_cfg_payload32))) { + pr_err("Failed to copy to user pa cfg payload\n"); + return -EFAULT; + } + + return 0; +} + +static int __to_user_pa_v2_cfg_data( + struct mdp_pa_v2_cfg_data32 __user *pa_v2_cfg32, + struct mdp_pa_v2_cfg_data __user *pa_v2_cfg) +{ + uint32_t version = 0; + uint32_t flags = 0; + + if (copy_from_user(&version, + &pa_v2_cfg32->version, + sizeof(uint32_t))) + return -EFAULT; + + switch (version) { + case mdp_pa_v1_7: + if (copy_from_user(&flags, + &pa_v2_cfg32->flags, + sizeof(uint32_t))) { + pr_err("failed to get PA v1_7 flags\n"); + return -EFAULT; + } + + if (!(flags & MDP_PP_OPS_READ)) { + pr_debug("Read op not set. Skipping compat copyback\n"); + return 0; + } + + if (__to_user_pa_data_v1_7(pa_v2_cfg32, pa_v2_cfg)) { + pr_err("failed to set pa data for version %d\n", + version); + return -EFAULT; + } + break; + default: + pr_debug("version invalid, fallback to legacy\n"); + + if (copy_from_user(&flags, + &pa_v2_cfg32->pa_v2_data.flags, + sizeof(uint32_t))) { + pr_err("failed to get PAv2 flags\n"); + return -EFAULT; + } + + if (!(flags & MDP_PP_OPS_READ)) { + pr_debug("Read op not set. Skipping compat copyback\n"); + return 0; + } + + if (__to_user_pa_v2_data( + compat_ptr((uintptr_t)&pa_v2_cfg32->pa_v2_data), + &pa_v2_cfg->pa_v2_data)) + return -EFAULT; + break; + } + + return 0; +} + +static int __from_user_dither_cfg_data( + struct mdp_dither_cfg_data32 __user *dither_cfg32, + struct mdp_dither_cfg_data __user *dither_cfg) +{ + if (copy_in_user(&dither_cfg->block, + &dither_cfg32->block, + sizeof(uint32_t)) || + copy_in_user(&dither_cfg->flags, + &dither_cfg32->flags, + sizeof(uint32_t)) || + copy_in_user(&dither_cfg->g_y_depth, + &dither_cfg32->g_y_depth, + sizeof(uint32_t)) || + copy_in_user(&dither_cfg->r_cr_depth, + &dither_cfg32->r_cr_depth, + sizeof(uint32_t)) || + copy_in_user(&dither_cfg->b_cb_depth, + &dither_cfg32->b_cb_depth, + sizeof(uint32_t))) + return -EFAULT; + + return 0; +} + +static int __to_user_dither_cfg_data( + struct mdp_dither_cfg_data32 __user *dither_cfg32, + struct mdp_dither_cfg_data __user *dither_cfg) +{ + if (copy_in_user(&dither_cfg32->block, + &dither_cfg->block, + sizeof(uint32_t)) || + copy_in_user(&dither_cfg32->flags, + &dither_cfg->flags, + sizeof(uint32_t)) || + copy_in_user(&dither_cfg32->g_y_depth, + &dither_cfg->g_y_depth, + sizeof(uint32_t)) || + copy_in_user(&dither_cfg32->r_cr_depth, + &dither_cfg->r_cr_depth, + sizeof(uint32_t)) || + copy_in_user(&dither_cfg32->b_cb_depth, + &dither_cfg->b_cb_depth, + sizeof(uint32_t))) + return -EFAULT; + + return 0; +} + +static int __from_user_gamut_cfg_data_v17( + struct mdp_gamut_cfg_data32 __user *gamut_cfg32, + struct mdp_gamut_cfg_data __user *gamut_cfg) +{ + struct mdp_gamut_data_v1_7 gamut_cfg_payload; + struct mdp_gamut_data_v1_7_32 gamut_cfg_payload32; + u32 i = 0; + + if (copy_from_user(&gamut_cfg_payload32, + compat_ptr(gamut_cfg32->cfg_payload), + sizeof(gamut_cfg_payload32))) { + pr_err("failed to copy the gamut payload from userspace\n"); + return -EFAULT; + } + + memset(&gamut_cfg_payload, 0, sizeof(gamut_cfg_payload)); + gamut_cfg_payload.mode = gamut_cfg_payload32.mode; + for (i = 0; i < MDP_GAMUT_TABLE_NUM_V1_7; i++) { + gamut_cfg_payload.tbl_size[i] = + gamut_cfg_payload32.tbl_size[i]; + gamut_cfg_payload.c0_data[i] = + compat_ptr(gamut_cfg_payload32.c0_data[i]); + gamut_cfg_payload.c1_c2_data[i] = + compat_ptr(gamut_cfg_payload32.c1_c2_data[i]); + } + for (i = 0; i < MDP_GAMUT_SCALE_OFF_TABLE_NUM; i++) { + gamut_cfg_payload.tbl_scale_off_sz[i] = + gamut_cfg_payload32.tbl_scale_off_sz[i]; + gamut_cfg_payload.scale_off_data[i] = + compat_ptr(gamut_cfg_payload32.scale_off_data[i]); + } + if (copy_to_user(gamut_cfg->cfg_payload, &gamut_cfg_payload, + sizeof(gamut_cfg_payload))) { + pr_err("failed to copy the gamut payload to userspace\n"); + return -EFAULT; + } + return 0; +} + +static int __from_user_gamut_cfg_data( + struct mdp_gamut_cfg_data32 __user *gamut_cfg32, + struct mdp_gamut_cfg_data __user *gamut_cfg) +{ + uint32_t data, version; + int i; + + if (copy_in_user(&gamut_cfg->block, + &gamut_cfg32->block, + sizeof(uint32_t)) || + copy_in_user(&gamut_cfg->flags, + &gamut_cfg32->flags, + sizeof(uint32_t)) || + copy_in_user(&gamut_cfg->gamut_first, + &gamut_cfg32->gamut_first, + sizeof(uint32_t)) || + copy_in_user(&gamut_cfg->tbl_size[0], + &gamut_cfg32->tbl_size[0], + MDP_GAMUT_TABLE_NUM * sizeof(uint32_t)) || + copy_in_user(&gamut_cfg->version, + &gamut_cfg32->version, + sizeof(uint32_t))) + return 0; + + if (copy_from_user(&version, &gamut_cfg32->version, sizeof(u32))) { + pr_err("failed to copy the version info\n"); + return -EFAULT; + } + + switch (version) { + case mdp_gamut_v1_7: + if (__from_user_gamut_cfg_data_v17(gamut_cfg32, gamut_cfg)) { + pr_err("failed to get the gamut data for version %d\n", + version); + return -EFAULT; + } + break; + default: + pr_debug("version invalid fallback to legacy\n"); + /* The Gamut LUT data contains 3 static arrays for R, G, and B + * gamut data. Each these arrays contains pointers dynamic arrays + * which hold the gamut LUTs for R, G, and B. Must copy the array of + * pointers from 32 bit to 64 bit addresses. + */ + for (i = 0; i < MDP_GAMUT_TABLE_NUM; i++) { + if (get_user(data, &gamut_cfg32->r_tbl[i]) || + put_user(compat_ptr(data), &gamut_cfg->r_tbl[i])) + return -EFAULT; + } + + for (i = 0; i < MDP_GAMUT_TABLE_NUM; i++) { + if (get_user(data, &gamut_cfg32->g_tbl[i]) || + put_user(compat_ptr(data), &gamut_cfg->g_tbl[i])) + return -EFAULT; + } + + for (i = 0; i < MDP_GAMUT_TABLE_NUM; i++) { + if (get_user(data, &gamut_cfg32->b_tbl[i]) || + put_user(compat_ptr(data), &gamut_cfg->b_tbl[i])) + return -EFAULT; + } + break; + } + return 0; +} + +static int __to_user_gamut_cfg_data( + struct mdp_gamut_cfg_data32 __user *gamut_cfg32, + struct mdp_gamut_cfg_data __user *gamut_cfg) +{ + unsigned long data; + int i; + + if (copy_in_user(&gamut_cfg32->block, + &gamut_cfg->block, + sizeof(uint32_t)) || + copy_in_user(&gamut_cfg32->flags, + &gamut_cfg->flags, + sizeof(uint32_t)) || + copy_in_user(&gamut_cfg32->gamut_first, + &gamut_cfg->gamut_first, + sizeof(uint32_t)) || + copy_in_user(&gamut_cfg32->tbl_size[0], + &gamut_cfg->tbl_size[0], + MDP_GAMUT_TABLE_NUM * sizeof(uint32_t))) + return 0; + + for (i = 0; i < MDP_GAMUT_TABLE_NUM; i++) { + if (get_user(data, (unsigned long *) &gamut_cfg->r_tbl[i]) || + put_user((compat_caddr_t)data, &gamut_cfg32->r_tbl[i])) + return -EFAULT; + } + + for (i = 0; i < MDP_GAMUT_TABLE_NUM; i++) { + if (get_user(data, (unsigned long *) &gamut_cfg->g_tbl[i]) || + put_user((compat_caddr_t)data, &gamut_cfg32->g_tbl[i])) + return -EFAULT; + } + + for (i = 0; i < MDP_GAMUT_TABLE_NUM; i++) { + if (get_user(data, (unsigned long *) &gamut_cfg->b_tbl[i]) || + put_user((compat_caddr_t)data, &gamut_cfg32->g_tbl[i])) + return -EFAULT; + } + + return 0; +} + +static int __from_user_calib_config_data( + struct mdp_calib_config_data32 __user *calib_cfg32, + struct mdp_calib_config_data __user *calib_cfg) +{ + if (copy_in_user(&calib_cfg->ops, + &calib_cfg32->ops, + sizeof(uint32_t)) || + copy_in_user(&calib_cfg->addr, + &calib_cfg32->addr, + sizeof(uint32_t)) || + copy_in_user(&calib_cfg->data, + &calib_cfg32->data, + sizeof(uint32_t))) + return -EFAULT; + + return 0; +} + +static int __to_user_calib_config_data( + struct mdp_calib_config_data32 __user *calib_cfg32, + struct mdp_calib_config_data __user *calib_cfg) +{ + if (copy_in_user(&calib_cfg32->ops, + &calib_cfg->ops, + sizeof(uint32_t)) || + copy_in_user(&calib_cfg32->addr, + &calib_cfg->addr, + sizeof(uint32_t)) || + copy_in_user(&calib_cfg32->data, + &calib_cfg->data, + sizeof(uint32_t))) + return -EFAULT; + + return 0; +} + +static int __from_user_ad_init( + struct mdss_ad_init32 __user *ad_init32, + struct mdss_ad_init __user *ad_init) +{ + uint32_t data; + + if (copy_in_user(&ad_init->asym_lut[0], + &ad_init32->asym_lut[0], + 33 * sizeof(uint32_t)) || + copy_in_user(&ad_init->color_corr_lut[0], + &ad_init32->color_corr_lut[0], + 33 * sizeof(uint32_t)) || + copy_in_user(&ad_init->i_control[0], + &ad_init32->i_control[0], + 2 * sizeof(uint8_t)) || + copy_in_user(&ad_init->black_lvl, + &ad_init32->black_lvl, + sizeof(uint16_t)) || + copy_in_user(&ad_init->white_lvl, + &ad_init32->white_lvl, + sizeof(uint16_t)) || + copy_in_user(&ad_init->var, + &ad_init32->var, + sizeof(uint8_t)) || + copy_in_user(&ad_init->limit_ampl, + &ad_init32->limit_ampl, + sizeof(uint8_t)) || + copy_in_user(&ad_init->i_dither, + &ad_init32->i_dither, + sizeof(uint8_t)) || + copy_in_user(&ad_init->slope_max, + &ad_init32->slope_max, + sizeof(uint8_t)) || + copy_in_user(&ad_init->slope_min, + &ad_init32->slope_min, + sizeof(uint8_t)) || + copy_in_user(&ad_init->dither_ctl, + &ad_init32->dither_ctl, + sizeof(uint8_t)) || + copy_in_user(&ad_init->format, + &ad_init32->format, + sizeof(uint8_t)) || + copy_in_user(&ad_init->auto_size, + &ad_init32->auto_size, + sizeof(uint8_t)) || + copy_in_user(&ad_init->frame_w, + &ad_init32->frame_w, + sizeof(uint16_t)) || + copy_in_user(&ad_init->frame_h, + &ad_init32->frame_h, + sizeof(uint16_t)) || + copy_in_user(&ad_init->logo_v, + &ad_init32->logo_v, + sizeof(uint8_t)) || + copy_in_user(&ad_init->logo_h, + &ad_init32->logo_h, + sizeof(uint8_t)) || + copy_in_user(&ad_init->alpha, + &ad_init32->alpha, + sizeof(uint32_t)) || + copy_in_user(&ad_init->alpha_base, + &ad_init32->alpha_base, + sizeof(uint32_t)) || + copy_in_user(&ad_init->bl_lin_len, + &ad_init32->bl_lin_len, + sizeof(uint32_t)) || + copy_in_user(&ad_init->bl_att_len, + &ad_init32->bl_att_len, + sizeof(uint32_t))) + return -EFAULT; + + + if (get_user(data, &ad_init32->bl_lin) || + put_user(compat_ptr(data), &ad_init->bl_lin) || + get_user(data, &ad_init32->bl_lin_inv) || + put_user(compat_ptr(data), &ad_init->bl_lin_inv) || + get_user(data, &ad_init32->bl_att_lut) || + put_user(compat_ptr(data), &ad_init->bl_att_lut)) + return -EFAULT; + + return 0; +} + +static int __from_user_ad_cfg( + struct mdss_ad_cfg32 __user *ad_cfg32, + struct mdss_ad_cfg __user *ad_cfg) +{ + if (copy_in_user(&ad_cfg->mode, + &ad_cfg32->mode, + sizeof(uint32_t)) || + copy_in_user(&ad_cfg->al_calib_lut[0], + &ad_cfg32->al_calib_lut[0], + 33 * sizeof(uint32_t)) || + copy_in_user(&ad_cfg->backlight_min, + &ad_cfg32->backlight_min, + sizeof(uint16_t)) || + copy_in_user(&ad_cfg->backlight_max, + &ad_cfg32->backlight_max, + sizeof(uint16_t)) || + copy_in_user(&ad_cfg->backlight_scale, + &ad_cfg32->backlight_scale, + sizeof(uint16_t)) || + copy_in_user(&ad_cfg->amb_light_min, + &ad_cfg32->amb_light_min, + sizeof(uint16_t)) || + copy_in_user(&ad_cfg->filter[0], + &ad_cfg32->filter[0], + 2 * sizeof(uint16_t)) || + copy_in_user(&ad_cfg->calib[0], + &ad_cfg32->calib[0], + 4 * sizeof(uint16_t)) || + copy_in_user(&ad_cfg->strength_limit, + &ad_cfg32->strength_limit, + sizeof(uint8_t)) || + copy_in_user(&ad_cfg->t_filter_recursion, + &ad_cfg32->t_filter_recursion, + sizeof(uint8_t)) || + copy_in_user(&ad_cfg->stab_itr, + &ad_cfg32->stab_itr, + sizeof(uint16_t)) || + copy_in_user(&ad_cfg->bl_ctrl_mode, + &ad_cfg32->bl_ctrl_mode, + sizeof(uint32_t))) + return -EFAULT; + + return 0; +} + +static int __from_user_ad_init_cfg( + struct mdss_ad_init_cfg32 __user *ad_info32, + struct mdss_ad_init_cfg __user *ad_info) +{ + uint32_t op; + + if (copy_from_user(&op, &ad_info32->ops, + sizeof(uint32_t))) + return -EFAULT; + + if (copy_in_user(&ad_info->ops, + &ad_info32->ops, + sizeof(uint32_t))) + return -EFAULT; + + if (op & MDP_PP_AD_INIT) { + if (__from_user_ad_init( + compat_ptr((uintptr_t)&ad_info32->params.init), + &ad_info->params.init)) + return -EFAULT; + } else if (op & MDP_PP_AD_CFG) { + if (__from_user_ad_cfg( + compat_ptr((uintptr_t)&ad_info32->params.cfg), + &ad_info->params.cfg)) + return -EFAULT; + } else { + pr_err("Invalid AD init/config operation\n"); + return -EINVAL; + } + + return 0; +} + +static int __from_user_ad_input( + struct mdss_ad_input32 __user *ad_input32, + struct mdss_ad_input __user *ad_input) +{ + int mode; + + if (copy_from_user(&mode, + &ad_input32->mode, + sizeof(uint32_t))) + return -EFAULT; + + if (copy_in_user(&ad_input->mode, + &ad_input32->mode, + sizeof(uint32_t)) || + copy_in_user(&ad_input->output, + &ad_input32->output, + sizeof(uint32_t))) + return -EFAULT; + + switch (mode) { + case MDSS_AD_MODE_AUTO_BL: + case MDSS_AD_MODE_AUTO_STR: + if (copy_in_user(&ad_input->in.amb_light, + &ad_input32->in.amb_light, + sizeof(uint32_t))) + return -EFAULT; + break; + case MDSS_AD_MODE_TARG_STR: + case MDSS_AD_MODE_MAN_STR: + if (copy_in_user(&ad_input->in.strength, + &ad_input32->in.strength, + sizeof(uint32_t))) + return -EFAULT; + break; + case MDSS_AD_MODE_CALIB: + if (copy_in_user(&ad_input->in.calib_bl, + &ad_input32->in.calib_bl, + sizeof(uint32_t))) + return -EFAULT; + break; + } + + return 0; +} + +static int __to_user_ad_input( + struct mdss_ad_input32 __user *ad_input32, + struct mdss_ad_input __user *ad_input) +{ + int mode; + + if (copy_from_user(&mode, + &ad_input->mode, + sizeof(uint32_t))) + return -EFAULT; + + if (copy_in_user(&ad_input32->mode, + &ad_input->mode, + sizeof(uint32_t)) || + copy_in_user(&ad_input32->output, + &ad_input->output, + sizeof(uint32_t))) + return -EFAULT; + + switch (mode) { + case MDSS_AD_MODE_AUTO_BL: + case MDSS_AD_MODE_AUTO_STR: + if (copy_in_user(&ad_input32->in.amb_light, + &ad_input->in.amb_light, + sizeof(uint32_t))) + return -EFAULT; + break; + case MDSS_AD_MODE_TARG_STR: + case MDSS_AD_MODE_MAN_STR: + if (copy_in_user(&ad_input32->in.strength, + &ad_input->in.strength, + sizeof(uint32_t))) + return -EFAULT; + break; + case MDSS_AD_MODE_CALIB: + if (copy_in_user(&ad_input32->in.calib_bl, + &ad_input->in.calib_bl, + sizeof(uint32_t))) + return -EFAULT; + break; + } + + return 0; +} + +static int __from_user_calib_cfg( + struct mdss_calib_cfg32 __user *calib_cfg32, + struct mdss_calib_cfg __user *calib_cfg) +{ + if (copy_in_user(&calib_cfg->ops, + &calib_cfg32->ops, + sizeof(uint32_t)) || + copy_in_user(&calib_cfg->calib_mask, + &calib_cfg32->calib_mask, + sizeof(uint32_t))) + return -EFAULT; + + return 0; +} + +static int __from_user_calib_config_buffer( + struct mdp_calib_config_buffer32 __user *calib_buffer32, + struct mdp_calib_config_buffer __user *calib_buffer) +{ + uint32_t data; + + if (copy_in_user(&calib_buffer->ops, + &calib_buffer32->ops, + sizeof(uint32_t)) || + copy_in_user(&calib_buffer->size, + &calib_buffer32->size, + sizeof(uint32_t))) + return -EFAULT; + + if (get_user(data, &calib_buffer32->buffer) || + put_user(compat_ptr(data), &calib_buffer->buffer)) + return -EFAULT; + + return 0; +} + +static int __to_user_calib_config_buffer( + struct mdp_calib_config_buffer32 __user *calib_buffer32, + struct mdp_calib_config_buffer __user *calib_buffer) +{ + unsigned long data; + + if (copy_in_user(&calib_buffer32->ops, + &calib_buffer->ops, + sizeof(uint32_t)) || + copy_in_user(&calib_buffer32->size, + &calib_buffer->size, + sizeof(uint32_t))) + return -EFAULT; + + if (get_user(data, (unsigned long *) &calib_buffer->buffer) || + put_user((compat_caddr_t) data, &calib_buffer32->buffer)) + return -EFAULT; + + return 0; +} + +static int __from_user_calib_dcm_state( + struct mdp_calib_dcm_state32 __user *calib_dcm32, + struct mdp_calib_dcm_state __user *calib_dcm) +{ + if (copy_in_user(&calib_dcm->ops, + &calib_dcm32->ops, + sizeof(uint32_t)) || + copy_in_user(&calib_dcm->dcm_state, + &calib_dcm32->dcm_state, + sizeof(uint32_t))) + return -EFAULT; + + return 0; +} + +static u32 __pp_compat_size_igc(void) +{ + u32 alloc_size = 0; + /* When we have multiple versions pick largest struct size */ + alloc_size = sizeof(struct mdp_igc_lut_data_v1_7); + return alloc_size; +} + +static u32 __pp_compat_size_hist_lut(void) +{ + u32 alloc_size = 0; + /* When we have multiple versions pick largest struct size */ + alloc_size = sizeof(struct mdp_hist_lut_data_v1_7); + return alloc_size; +} + +static u32 __pp_compat_size_pgc(void) +{ + u32 tbl_sz_max = 0; + + tbl_sz_max = 3 * GC_LUT_SEGMENTS * sizeof(struct mdp_ar_gc_lut_data); + tbl_sz_max += sizeof(struct mdp_pgc_lut_data_v1_7); + return tbl_sz_max; +} + +static u32 __pp_compat_size_pcc(void) +{ + /* if new version of PCC is added return max struct size */ + return sizeof(struct mdp_pcc_data_v1_7); +} + +static u32 __pp_compat_size_pa(void) +{ + /* if new version of PA is added return max struct size */ + return sizeof(struct mdp_pa_data_v1_7); +} + +static u32 __pp_compat_size_gamut(void) +{ + return sizeof(struct mdp_gamut_data_v1_7); +} + +static int __pp_compat_alloc(struct msmfb_mdp_pp32 __user *pp32, + struct msmfb_mdp_pp __user **pp, + uint32_t op) +{ + uint32_t alloc_size = 0, lut_type, pgc_size = 0; + + alloc_size = sizeof(struct msmfb_mdp_pp); + switch (op) { + case mdp_op_lut_cfg: + if (copy_from_user(&lut_type, + &pp32->data.lut_cfg_data.lut_type, + sizeof(uint32_t))) + return -EFAULT; + + switch (lut_type) { + case mdp_lut_pgc: + + pgc_size = GC_LUT_SEGMENTS * + sizeof(struct mdp_ar_gc_lut_data); + alloc_size += __pp_compat_size_pgc(); + + *pp = compat_alloc_user_space(alloc_size); + if (*pp == NULL) + return -ENOMEM; + memset(*pp, 0, alloc_size); + + (*pp)->data.lut_cfg_data.data.pgc_lut_data.r_data = + (struct mdp_ar_gc_lut_data *) + ((unsigned long) *pp + + sizeof(struct msmfb_mdp_pp)); + (*pp)->data.lut_cfg_data.data.pgc_lut_data.g_data = + (struct mdp_ar_gc_lut_data *) + ((unsigned long) *pp + + sizeof(struct msmfb_mdp_pp) + + pgc_size); + (*pp)->data.lut_cfg_data.data.pgc_lut_data.b_data = + (struct mdp_ar_gc_lut_data *) + ((unsigned long) *pp + + sizeof(struct msmfb_mdp_pp) + + (2 * pgc_size)); + (*pp)->data.lut_cfg_data.data.pgc_lut_data.cfg_payload + = (void *)((unsigned long) *pp + + sizeof(struct msmfb_mdp_pp) + + (3 * pgc_size)); + break; + case mdp_lut_igc: + alloc_size += __pp_compat_size_igc(); + *pp = compat_alloc_user_space(alloc_size); + if (*pp == NULL) { + pr_err("failed to alloc from user size %d for igc\n", + alloc_size); + return -ENOMEM; + } + memset(*pp, 0, alloc_size); + (*pp)->data.lut_cfg_data.data.igc_lut_data.cfg_payload + = (void *)((unsigned long)(*pp) + + sizeof(struct msmfb_mdp_pp)); + break; + case mdp_lut_hist: + alloc_size += __pp_compat_size_hist_lut(); + *pp = compat_alloc_user_space(alloc_size); + if (*pp == NULL) { + pr_err("failed to alloc from user size %d for hist lut\n", + alloc_size); + return -ENOMEM; + } + memset(*pp, 0, alloc_size); + (*pp)->data.lut_cfg_data.data.hist_lut_data.cfg_payload + = (void *)((unsigned long)(*pp) + + sizeof(struct msmfb_mdp_pp)); + break; + default: + *pp = compat_alloc_user_space(alloc_size); + if (*pp == NULL) { + pr_err("failed to alloc from user size %d for lut_type %d\n", + alloc_size, lut_type); + return -ENOMEM; + } + memset(*pp, 0, alloc_size); + break; + } + break; + case mdp_op_pcc_cfg: + alloc_size += __pp_compat_size_pcc(); + *pp = compat_alloc_user_space(alloc_size); + if (*pp == NULL) { + pr_err("alloc from user size %d for pcc fail\n", + alloc_size); + return -ENOMEM; + } + memset(*pp, 0, alloc_size); + (*pp)->data.pcc_cfg_data.cfg_payload = + (void *)((unsigned long)(*pp) + + sizeof(struct msmfb_mdp_pp)); + break; + case mdp_op_gamut_cfg: + alloc_size += __pp_compat_size_gamut(); + *pp = compat_alloc_user_space(alloc_size); + if (*pp == NULL) { + pr_err("alloc from user size %d for pcc fail\n", + alloc_size); + return -ENOMEM; + } + memset(*pp, 0, alloc_size); + (*pp)->data.gamut_cfg_data.cfg_payload = + (void *)((unsigned long)(*pp) + + sizeof(struct msmfb_mdp_pp)); + break; + case mdp_op_pa_v2_cfg: + alloc_size += __pp_compat_size_pa(); + *pp = compat_alloc_user_space(alloc_size); + if (*pp == NULL) { + pr_err("alloc from user size %d for pcc fail\n", + alloc_size); + return -ENOMEM; + } + memset(*pp, 0, alloc_size); + (*pp)->data.pa_v2_cfg_data.cfg_payload = + (void *)((unsigned long)(*pp) + + sizeof(struct msmfb_mdp_pp)); + break; + default: + *pp = compat_alloc_user_space(alloc_size); + if (*pp == NULL) + return -ENOMEM; + memset(*pp, 0, alloc_size); + break; + } + return 0; +} + +static int mdss_compat_pp_ioctl(struct fb_info *info, unsigned int cmd, + unsigned long arg, struct file *file) +{ + uint32_t op; + int ret = 0; + struct msmfb_mdp_pp32 __user *pp32; + struct msmfb_mdp_pp __user *pp; + + pp32 = compat_ptr(arg); + if (copy_from_user(&op, &pp32->op, sizeof(uint32_t))) + return -EFAULT; + + ret = __pp_compat_alloc(pp32, &pp, op); + if (ret) + return ret; + + if (copy_in_user(&pp->op, &pp32->op, sizeof(uint32_t))) + return -EFAULT; + + switch (op) { + case mdp_op_pcc_cfg: + ret = __from_user_pcc_cfg_data( + compat_ptr((uintptr_t)&pp32->data.pcc_cfg_data), + &pp->data.pcc_cfg_data); + if (ret) + goto pp_compat_exit; + ret = mdss_fb_do_ioctl(info, cmd, (unsigned long) pp, file); + if (ret) + goto pp_compat_exit; + ret = __to_user_pcc_cfg_data( + compat_ptr((uintptr_t)&pp32->data.pcc_cfg_data), + &pp->data.pcc_cfg_data); + break; + case mdp_op_csc_cfg: + ret = __from_user_csc_cfg_data( + compat_ptr((uintptr_t)&pp32->data.csc_cfg_data), + &pp->data.csc_cfg_data); + if (ret) + goto pp_compat_exit; + ret = mdss_fb_do_ioctl(info, cmd, (unsigned long) pp, file); + if (ret) + goto pp_compat_exit; + ret = __to_user_csc_cfg_data( + compat_ptr((uintptr_t)&pp32->data.csc_cfg_data), + &pp->data.csc_cfg_data); + break; + case mdp_op_lut_cfg: + ret = __from_user_lut_cfg_data( + compat_ptr((uintptr_t)&pp32->data.lut_cfg_data), + &pp->data.lut_cfg_data); + if (ret) + goto pp_compat_exit; + ret = mdss_fb_do_ioctl(info, cmd, (unsigned long) pp, file); + if (ret) + goto pp_compat_exit; + ret = __to_user_lut_cfg_data( + compat_ptr((uintptr_t)&pp32->data.lut_cfg_data), + &pp->data.lut_cfg_data); + break; + case mdp_op_qseed_cfg: + ret = __from_user_qseed_cfg_data( + compat_ptr((uintptr_t)&pp32->data.qseed_cfg_data), + &pp->data.qseed_cfg_data); + if (ret) + goto pp_compat_exit; + ret = mdss_fb_do_ioctl(info, cmd, (unsigned long) pp, file); + if (ret) + goto pp_compat_exit; + ret = __to_user_qseed_cfg_data( + compat_ptr((uintptr_t)&pp32->data.qseed_cfg_data), + &pp->data.qseed_cfg_data); + break; + case mdp_bl_scale_cfg: + ret = __from_user_bl_scale_data( + compat_ptr((uintptr_t)&pp32->data.bl_scale_data), + &pp->data.bl_scale_data); + if (ret) + goto pp_compat_exit; + ret = mdss_fb_do_ioctl(info, cmd, (unsigned long) pp, file); + break; + case mdp_op_pa_cfg: + ret = __from_user_pa_cfg_data( + compat_ptr((uintptr_t)&pp32->data.pa_cfg_data), + &pp->data.pa_cfg_data); + if (ret) + goto pp_compat_exit; + ret = mdss_fb_do_ioctl(info, cmd, (unsigned long) pp, file); + if (ret) + goto pp_compat_exit; + ret = __to_user_pa_cfg_data( + compat_ptr((uintptr_t)&pp32->data.pa_cfg_data), + &pp->data.pa_cfg_data); + break; + case mdp_op_pa_v2_cfg: + ret = __from_user_pa_v2_cfg_data( + compat_ptr((uintptr_t)&pp32->data.pa_v2_cfg_data), + &pp->data.pa_v2_cfg_data); + if (ret) + goto pp_compat_exit; + ret = mdss_fb_do_ioctl(info, cmd, (unsigned long) pp, file); + if (ret) + goto pp_compat_exit; + ret = __to_user_pa_v2_cfg_data( + compat_ptr((uintptr_t)&pp32->data.pa_v2_cfg_data), + &pp->data.pa_v2_cfg_data); + break; + case mdp_op_dither_cfg: + ret = __from_user_dither_cfg_data( + compat_ptr((uintptr_t)&pp32->data.dither_cfg_data), + &pp->data.dither_cfg_data); + if (ret) + goto pp_compat_exit; + ret = mdss_fb_do_ioctl(info, cmd, (unsigned long) pp, file); + if (ret) + goto pp_compat_exit; + ret = __to_user_dither_cfg_data( + compat_ptr((uintptr_t)&pp32->data.dither_cfg_data), + &pp->data.dither_cfg_data); + break; + case mdp_op_gamut_cfg: + ret = __from_user_gamut_cfg_data( + compat_ptr((uintptr_t)&pp32->data.gamut_cfg_data), + &pp->data.gamut_cfg_data); + if (ret) + goto pp_compat_exit; + ret = mdss_fb_do_ioctl(info, cmd, (unsigned long) pp, file); + if (ret) + goto pp_compat_exit; + ret = __to_user_gamut_cfg_data( + compat_ptr((uintptr_t)&pp32->data.gamut_cfg_data), + &pp->data.gamut_cfg_data); + break; + case mdp_op_calib_cfg: + ret = __from_user_calib_config_data( + compat_ptr((uintptr_t)&pp32->data.calib_cfg), + &pp->data.calib_cfg); + if (ret) + goto pp_compat_exit; + ret = mdss_fb_do_ioctl(info, cmd, (unsigned long) pp, file); + if (ret) + goto pp_compat_exit; + ret = __to_user_calib_config_data( + compat_ptr((uintptr_t)&pp32->data.calib_cfg), + &pp->data.calib_cfg); + break; + case mdp_op_ad_cfg: + ret = __from_user_ad_init_cfg( + compat_ptr((uintptr_t)&pp32->data.ad_init_cfg), + &pp->data.ad_init_cfg); + if (ret) + goto pp_compat_exit; + ret = mdss_fb_do_ioctl(info, cmd, (unsigned long) pp, file); + break; + case mdp_op_ad_input: + ret = __from_user_ad_input( + compat_ptr((uintptr_t)&pp32->data.ad_input), + &pp->data.ad_input); + if (ret) + goto pp_compat_exit; + ret = mdss_fb_do_ioctl(info, cmd, (unsigned long) pp, file); + if (ret) + goto pp_compat_exit; + ret = __to_user_ad_input( + compat_ptr((uintptr_t)&pp32->data.ad_input), + &pp->data.ad_input); + break; + case mdp_op_calib_mode: + ret = __from_user_calib_cfg( + compat_ptr((uintptr_t)&pp32->data.mdss_calib_cfg), + &pp->data.mdss_calib_cfg); + if (ret) + goto pp_compat_exit; + ret = mdss_fb_do_ioctl(info, cmd, (unsigned long) pp, file); + break; + case mdp_op_calib_buffer: + ret = __from_user_calib_config_buffer( + compat_ptr((uintptr_t)&pp32->data.calib_buffer), + &pp->data.calib_buffer); + if (ret) + goto pp_compat_exit; + ret = mdss_fb_do_ioctl(info, cmd, (unsigned long) pp, file); + if (ret) + goto pp_compat_exit; + ret = __to_user_calib_config_buffer( + compat_ptr((uintptr_t)&pp32->data.calib_buffer), + &pp->data.calib_buffer); + break; + case mdp_op_calib_dcm_state: + ret = __from_user_calib_dcm_state( + compat_ptr((uintptr_t)&pp32->data.calib_dcm), + &pp->data.calib_dcm); + if (ret) + goto pp_compat_exit; + ret = mdss_fb_do_ioctl(info, cmd, (unsigned long) pp, file); + break; + default: + break; + } + +pp_compat_exit: + return ret; +} + +static int __from_user_pp_params(struct mdp_overlay_pp_params32 *ppp32, + struct mdp_overlay_pp_params *ppp) +{ + int ret = 0; + + if (copy_in_user(&ppp->config_ops, + &ppp32->config_ops, + sizeof(uint32_t))) + return -EFAULT; + + ret = __from_user_csc_cfg( + compat_ptr((uintptr_t)&ppp32->csc_cfg), + &ppp->csc_cfg); + if (ret) + return ret; + ret = __from_user_qseed_cfg( + compat_ptr((uintptr_t)&ppp32->qseed_cfg[0]), + &ppp->qseed_cfg[0]); + if (ret) + return ret; + ret = __from_user_qseed_cfg( + compat_ptr((uintptr_t)&ppp32->qseed_cfg[1]), + &ppp->qseed_cfg[1]); + if (ret) + return ret; + ret = __from_user_pa_cfg( + compat_ptr((uintptr_t)&ppp32->pa_cfg), + &ppp->pa_cfg); + if (ret) + return ret; + ret = __from_user_igc_lut_data( + compat_ptr((uintptr_t)&ppp32->igc_cfg), + &ppp->igc_cfg); + if (ret) + return ret; + ret = __from_user_sharp_cfg( + compat_ptr((uintptr_t)&ppp32->sharp_cfg), + &ppp->sharp_cfg); + if (ret) + return ret; + ret = __from_user_histogram_cfg( + compat_ptr((uintptr_t)&ppp32->hist_cfg), + &ppp->hist_cfg); + if (ret) + return ret; + ret = __from_user_hist_lut_data( + compat_ptr((uintptr_t)&ppp32->hist_lut_cfg), + &ppp->hist_lut_cfg); + if (ret) + return ret; + ret = __from_user_pa_v2_data( + compat_ptr((uintptr_t)&ppp32->pa_v2_cfg), + &ppp->pa_v2_cfg); + + return ret; +} + +static int __to_user_pp_params(struct mdp_overlay_pp_params *ppp, + struct mdp_overlay_pp_params32 *ppp32) +{ + int ret = 0; + + if (copy_in_user(&ppp32->config_ops, + &ppp->config_ops, + sizeof(uint32_t))) + return -EFAULT; + + ret = __to_user_csc_cfg( + compat_ptr((uintptr_t)&ppp32->csc_cfg), + &ppp->csc_cfg); + if (ret) + return ret; + ret = __to_user_qseed_cfg( + compat_ptr((uintptr_t)&ppp32->qseed_cfg[0]), + &ppp->qseed_cfg[0]); + if (ret) + return ret; + ret = __to_user_qseed_cfg( + compat_ptr((uintptr_t)&ppp32->qseed_cfg[1]), + &ppp->qseed_cfg[1]); + if (ret) + return ret; + ret = __to_user_pa_cfg( + compat_ptr((uintptr_t)&ppp32->pa_cfg), + &ppp->pa_cfg); + if (ret) + return ret; + ret = __to_user_igc_lut_data( + compat_ptr((uintptr_t)&ppp32->igc_cfg), + &ppp->igc_cfg); + if (ret) + return ret; + ret = __to_user_sharp_cfg( + compat_ptr((uintptr_t)&ppp32->sharp_cfg), + &ppp->sharp_cfg); + if (ret) + return ret; + ret = __to_user_histogram_cfg( + compat_ptr((uintptr_t)&ppp32->hist_cfg), + &ppp->hist_cfg); + if (ret) + return ret; + ret = __to_user_hist_lut_data( + compat_ptr((uintptr_t)&ppp32->hist_lut_cfg), + &ppp->hist_lut_cfg); + if (ret) + return ret; + ret = __to_user_pa_v2_data( + compat_ptr((uintptr_t)&ppp32->pa_v2_cfg), + &ppp->pa_v2_cfg); + + return ret; +} + +static int __from_user_hist_start_req( + struct mdp_histogram_start_req32 __user *hist_req32, + struct mdp_histogram_start_req __user *hist_req) +{ + if (copy_in_user(&hist_req->block, + &hist_req32->block, + sizeof(uint32_t)) || + copy_in_user(&hist_req->frame_cnt, + &hist_req32->frame_cnt, + sizeof(uint8_t)) || + copy_in_user(&hist_req->bit_mask, + &hist_req32->bit_mask, + sizeof(uint8_t)) || + copy_in_user(&hist_req->num_bins, + &hist_req32->num_bins, + sizeof(uint16_t))) + return -EFAULT; + + return 0; +} + +static int __from_user_hist_data( + struct mdp_histogram_data32 __user *hist_data32, + struct mdp_histogram_data __user *hist_data) +{ + uint32_t data; + + if (copy_in_user(&hist_data->block, + &hist_data32->block, + sizeof(uint32_t)) || + copy_in_user(&hist_data->bin_cnt, + &hist_data32->bin_cnt, + sizeof(uint32_t))) + return -EFAULT; + + if (get_user(data, &hist_data32->c0) || + put_user(compat_ptr(data), &hist_data->c0) || + get_user(data, &hist_data32->c1) || + put_user(compat_ptr(data), &hist_data->c1) || + get_user(data, &hist_data32->c2) || + put_user(compat_ptr(data), &hist_data->c2) || + get_user(data, &hist_data32->extra_info) || + put_user(compat_ptr(data), &hist_data->extra_info)) + return -EFAULT; + + return 0; +} + +static int __to_user_hist_data( + struct mdp_histogram_data32 __user *hist_data32, + struct mdp_histogram_data __user *hist_data) +{ + unsigned long data; + + if (copy_in_user(&hist_data32->block, + &hist_data->block, + sizeof(uint32_t)) || + copy_in_user(&hist_data32->bin_cnt, + &hist_data->bin_cnt, + sizeof(uint32_t))) + return -EFAULT; + + if (get_user(data, (unsigned long *) &hist_data->c0) || + put_user((compat_caddr_t) data, &hist_data32->c0) || + get_user(data, (unsigned long *) &hist_data->c1) || + put_user((compat_caddr_t) data, &hist_data32->c1) || + get_user(data, (unsigned long *) &hist_data->c2) || + put_user((compat_caddr_t) data, &hist_data32->c2) || + get_user(data, (unsigned long *) &hist_data->extra_info) || + put_user((compat_caddr_t) data, &hist_data32->extra_info)) + return -EFAULT; + + return 0; +} + +static int mdss_histo_compat_ioctl(struct fb_info *info, unsigned int cmd, + unsigned long arg, struct file *file) +{ + struct mdp_histogram_data __user *hist; + struct mdp_histogram_data32 __user *hist32; + struct mdp_histogram_start_req __user *hist_req; + struct mdp_histogram_start_req32 __user *hist_req32; + int ret = 0; + + switch (cmd) { + case MSMFB_HISTOGRAM_START: + hist_req32 = compat_ptr(arg); + hist_req = compat_alloc_user_space( + sizeof(struct mdp_histogram_start_req)); + if (!hist_req) { + pr_err("%s:%u: compat alloc error [%zu] bytes\n", + __func__, __LINE__, + sizeof(struct mdp_histogram_start_req)); + return -EINVAL; + } + memset(hist_req, 0, sizeof(struct mdp_histogram_start_req)); + ret = __from_user_hist_start_req(hist_req32, hist_req); + if (ret) + goto histo_compat_err; + ret = mdss_fb_do_ioctl(info, cmd, + (unsigned long) hist_req, file); + break; + case MSMFB_HISTOGRAM_STOP: + ret = mdss_fb_do_ioctl(info, cmd, arg, file); + break; + case MSMFB_HISTOGRAM: + hist32 = compat_ptr(arg); + hist = compat_alloc_user_space( + sizeof(struct mdp_histogram_data)); + if (!hist) { + pr_err("%s:%u: compat alloc error [%zu] bytes\n", + __func__, __LINE__, + sizeof(struct mdp_histogram_data)); + return -EINVAL; + } + memset(hist, 0, sizeof(struct mdp_histogram_data)); + ret = __from_user_hist_data(hist32, hist); + if (ret) + goto histo_compat_err; + ret = mdss_fb_do_ioctl(info, cmd, (unsigned long) hist, file); + if (ret) + goto histo_compat_err; + ret = __to_user_hist_data(hist32, hist); + break; + default: + break; + } + +histo_compat_err: + return ret; +} + +static int __copy_layer_pp_info_qseed_params( + struct mdp_overlay_pp_params *pp_info, + struct mdp_overlay_pp_params32 *pp_info32) +{ + pp_info->qseed_cfg[0].table_num = pp_info32->qseed_cfg[0].table_num; + pp_info->qseed_cfg[0].ops = pp_info32->qseed_cfg[0].ops; + pp_info->qseed_cfg[0].len = pp_info32->qseed_cfg[0].len; + pp_info->qseed_cfg[0].data = compat_ptr(pp_info32->qseed_cfg[0].data); + + pp_info->qseed_cfg[1].table_num = pp_info32->qseed_cfg[1].table_num; + pp_info->qseed_cfg[1].ops = pp_info32->qseed_cfg[1].ops; + pp_info->qseed_cfg[1].len = pp_info32->qseed_cfg[1].len; + pp_info->qseed_cfg[1].data = compat_ptr(pp_info32->qseed_cfg[1].data); + + return 0; +} + +static int __copy_layer_igc_lut_data_v1_7( + struct mdp_igc_lut_data_v1_7 *cfg_payload, + struct mdp_igc_lut_data_v1_7_32 __user *cfg_payload32) +{ + struct mdp_igc_lut_data_v1_7_32 local_cfg_payload32; + int ret = 0; + + ret = copy_from_user(&local_cfg_payload32, + cfg_payload32, + sizeof(struct mdp_igc_lut_data_v1_7_32)); + if (ret) { + pr_err("copy from user failed, IGC cfg payload = %pK\n", + cfg_payload32); + ret = -EFAULT; + goto exit; + } + + cfg_payload->table_fmt = local_cfg_payload32.table_fmt; + cfg_payload->len = local_cfg_payload32.len; + cfg_payload->c0_c1_data = compat_ptr(local_cfg_payload32.c0_c1_data); + cfg_payload->c2_data = compat_ptr(local_cfg_payload32.c2_data); + +exit: + return ret; +} + +static int __copy_layer_pp_info_igc_params( + struct mdp_overlay_pp_params *pp_info, + struct mdp_overlay_pp_params32 *pp_info32) +{ + void *cfg_payload = NULL; + uint32_t payload_size = 0; + int ret = 0; + + pp_info->igc_cfg.block = pp_info32->igc_cfg.block; + pp_info->igc_cfg.version = pp_info32->igc_cfg.version; + pp_info->igc_cfg.ops = pp_info32->igc_cfg.ops; + + if (pp_info->igc_cfg.version != 0) { + payload_size = __pp_compat_size_igc(); + + cfg_payload = kmalloc(payload_size, GFP_KERNEL); + if (!cfg_payload) { + ret = -ENOMEM; + goto exit; + } + } + + switch (pp_info->igc_cfg.version) { + case mdp_igc_v1_7: + ret = __copy_layer_igc_lut_data_v1_7(cfg_payload, + compat_ptr(pp_info32->igc_cfg.cfg_payload)); + if (ret) { + pr_err("compat copy of IGC cfg payload failed, ret %d\n", + ret); + kfree(cfg_payload); + cfg_payload = NULL; + goto exit; + } + break; + default: + pr_debug("No version set, fallback to legacy IGC version\n"); + pp_info->igc_cfg.len = pp_info32->igc_cfg.len; + pp_info->igc_cfg.c0_c1_data = + compat_ptr(pp_info32->igc_cfg.c0_c1_data); + pp_info->igc_cfg.c2_data = + compat_ptr(pp_info32->igc_cfg.c2_data); + kfree(cfg_payload); + cfg_payload = NULL; + break; + } +exit: + pp_info->igc_cfg.cfg_payload = cfg_payload; + return ret; +} + +static int __copy_layer_hist_lut_data_v1_7( + struct mdp_hist_lut_data_v1_7 *cfg_payload, + struct mdp_hist_lut_data_v1_7_32 __user *cfg_payload32) +{ + struct mdp_hist_lut_data_v1_7_32 local_cfg_payload32; + int ret = 0; + + ret = copy_from_user(&local_cfg_payload32, + cfg_payload32, + sizeof(struct mdp_hist_lut_data_v1_7_32)); + if (ret) { + pr_err("copy from user failed, hist lut cfg_payload = %pK\n", + cfg_payload32); + ret = -EFAULT; + goto exit; + } + + cfg_payload->len = local_cfg_payload32.len; + cfg_payload->data = compat_ptr(local_cfg_payload32.data); +exit: + return ret; +} + +static int __copy_layer_pp_info_hist_lut_params( + struct mdp_overlay_pp_params *pp_info, + struct mdp_overlay_pp_params32 *pp_info32) +{ + void *cfg_payload = NULL; + uint32_t payload_size = 0; + int ret = 0; + + pp_info->hist_lut_cfg.block = pp_info32->hist_lut_cfg.block; + pp_info->hist_lut_cfg.version = pp_info32->hist_lut_cfg.version; + pp_info->hist_lut_cfg.ops = pp_info32->hist_lut_cfg.ops; + pp_info->hist_lut_cfg.hist_lut_first = + pp_info32->hist_lut_cfg.hist_lut_first; + + if (pp_info->hist_lut_cfg.version != 0) { + payload_size = __pp_compat_size_hist_lut(); + + cfg_payload = kmalloc(payload_size, GFP_KERNEL); + if (!cfg_payload) { + ret = -ENOMEM; + goto exit; + } + } + + switch (pp_info->hist_lut_cfg.version) { + case mdp_hist_lut_v1_7: + ret = __copy_layer_hist_lut_data_v1_7(cfg_payload, + compat_ptr(pp_info32->hist_lut_cfg.cfg_payload)); + if (ret) { + pr_err("compat copy of Hist LUT cfg payload failed, ret %d\n", + ret); + kfree(cfg_payload); + cfg_payload = NULL; + goto exit; + } + break; + default: + pr_debug("version invalid, fallback to legacy\n"); + pp_info->hist_lut_cfg.len = pp_info32->hist_lut_cfg.len; + pp_info->hist_lut_cfg.data = + compat_ptr(pp_info32->hist_lut_cfg.data); + kfree(cfg_payload); + cfg_payload = NULL; + break; + } +exit: + pp_info->hist_lut_cfg.cfg_payload = cfg_payload; + return ret; +} + +static int __copy_layer_pa_data_v1_7( + struct mdp_pa_data_v1_7 *cfg_payload, + struct mdp_pa_data_v1_7_32 __user *cfg_payload32) +{ + struct mdp_pa_data_v1_7_32 local_cfg_payload32; + int ret = 0; + + ret = copy_from_user(&local_cfg_payload32, + cfg_payload32, + sizeof(struct mdp_pa_data_v1_7_32)); + if (ret) { + pr_err("copy from user failed, pa cfg_payload = %pK\n", + cfg_payload32); + ret = -EFAULT; + goto exit; + } + + cfg_payload->mode = local_cfg_payload32.mode; + cfg_payload->global_hue_adj = local_cfg_payload32.global_hue_adj; + cfg_payload->global_sat_adj = local_cfg_payload32.global_sat_adj; + cfg_payload->global_val_adj = local_cfg_payload32.global_val_adj; + cfg_payload->global_cont_adj = local_cfg_payload32.global_cont_adj; + + memcpy(&cfg_payload->skin_cfg, &local_cfg_payload32.skin_cfg, + sizeof(struct mdp_pa_mem_col_data_v1_7)); + memcpy(&cfg_payload->sky_cfg, &local_cfg_payload32.sky_cfg, + sizeof(struct mdp_pa_mem_col_data_v1_7)); + memcpy(&cfg_payload->fol_cfg, &local_cfg_payload32.fol_cfg, + sizeof(struct mdp_pa_mem_col_data_v1_7)); + + cfg_payload->six_zone_thresh = local_cfg_payload32.six_zone_thresh; + cfg_payload->six_zone_adj_p0 = local_cfg_payload32.six_zone_adj_p0; + cfg_payload->six_zone_adj_p1 = local_cfg_payload32.six_zone_adj_p1; + cfg_payload->six_zone_sat_hold = local_cfg_payload32.six_zone_sat_hold; + cfg_payload->six_zone_val_hold = local_cfg_payload32.six_zone_val_hold; + cfg_payload->six_zone_len = local_cfg_payload32.six_zone_len; + + cfg_payload->six_zone_curve_p0 = + compat_ptr(local_cfg_payload32.six_zone_curve_p0); + cfg_payload->six_zone_curve_p1 = + compat_ptr(local_cfg_payload32.six_zone_curve_p1); +exit: + return ret; +} + +static int __copy_layer_pp_info_pa_v2_params( + struct mdp_overlay_pp_params *pp_info, + struct mdp_overlay_pp_params32 *pp_info32) +{ + void *cfg_payload = NULL; + uint32_t payload_size = 0; + int ret = 0; + + pp_info->pa_v2_cfg_data.block = pp_info32->pa_v2_cfg_data.block; + pp_info->pa_v2_cfg_data.version = pp_info32->pa_v2_cfg_data.version; + pp_info->pa_v2_cfg_data.flags = pp_info32->pa_v2_cfg_data.flags; + + if (pp_info->pa_v2_cfg_data.version != 0) { + payload_size = __pp_compat_size_pa(); + + cfg_payload = kmalloc(payload_size, GFP_KERNEL); + if (!cfg_payload) { + ret = -ENOMEM; + goto exit; + } + } + + switch (pp_info->pa_v2_cfg_data.version) { + case mdp_pa_v1_7: + ret = __copy_layer_pa_data_v1_7(cfg_payload, + compat_ptr(pp_info32->pa_v2_cfg_data.cfg_payload)); + if (ret) { + pr_err("compat copy of PA cfg payload failed, ret %d\n", + ret); + kfree(cfg_payload); + cfg_payload = NULL; + goto exit; + } + break; + default: + pr_debug("version invalid\n"); + kfree(cfg_payload); + cfg_payload = NULL; + break; + } +exit: + pp_info->pa_v2_cfg_data.cfg_payload = cfg_payload; + return ret; +} + +static int __copy_layer_pp_info_legacy_pa_v2_params( + struct mdp_overlay_pp_params *pp_info, + struct mdp_overlay_pp_params32 *pp_info32) +{ + pp_info->pa_v2_cfg.global_hue_adj = + pp_info32->pa_v2_cfg.global_hue_adj; + pp_info->pa_v2_cfg.global_sat_adj = + pp_info32->pa_v2_cfg.global_sat_adj; + pp_info->pa_v2_cfg.global_val_adj = + pp_info32->pa_v2_cfg.global_val_adj; + pp_info->pa_v2_cfg.global_cont_adj = + pp_info32->pa_v2_cfg.global_cont_adj; + + memcpy(&pp_info->pa_v2_cfg.skin_cfg, + &pp_info32->pa_v2_cfg.skin_cfg, + sizeof(struct mdp_pa_mem_col_cfg)); + memcpy(&pp_info->pa_v2_cfg.sky_cfg, + &pp_info32->pa_v2_cfg.sky_cfg, + sizeof(struct mdp_pa_mem_col_cfg)); + memcpy(&pp_info->pa_v2_cfg.fol_cfg, + &pp_info32->pa_v2_cfg.fol_cfg, + sizeof(struct mdp_pa_mem_col_cfg)); + + pp_info->pa_v2_cfg.six_zone_thresh = + pp_info32->pa_v2_cfg.six_zone_thresh; + pp_info->pa_v2_cfg.six_zone_len = + pp_info32->pa_v2_cfg.six_zone_len; + + pp_info->pa_v2_cfg.six_zone_curve_p0 = + compat_ptr(pp_info32->pa_v2_cfg.six_zone_curve_p0); + pp_info->pa_v2_cfg.six_zone_curve_p1 = + compat_ptr(pp_info32->pa_v2_cfg.six_zone_curve_p1); + + return 0; +} + +static int __copy_layer_pp_info_pcc_params( + struct mdp_overlay_pp_params *pp_info, + struct mdp_overlay_pp_params32 *pp_info32) +{ + void *cfg_payload = NULL; + uint32_t payload_size = 0; + int ret = 0; + + pp_info->pcc_cfg_data.block = pp_info32->pcc_cfg_data.block; + pp_info->pcc_cfg_data.version = pp_info32->pcc_cfg_data.version; + pp_info->pcc_cfg_data.ops = pp_info32->pcc_cfg_data.ops; + + if (pp_info->pcc_cfg_data.version != 0) { + payload_size = __pp_compat_size_pcc(); + + cfg_payload = kmalloc(payload_size, GFP_KERNEL); + if (!cfg_payload) { + ret = -ENOMEM; + goto exit; + } + } + + switch (pp_info->pcc_cfg_data.version) { + case mdp_pcc_v1_7: + ret = copy_from_user(cfg_payload, + compat_ptr(pp_info32->pcc_cfg_data.cfg_payload), + sizeof(struct mdp_pcc_data_v1_7)); + if (ret) { + pr_err("compat copy of PCC cfg payload failed, ptr %pK\n", + compat_ptr( + pp_info32->pcc_cfg_data.cfg_payload)); + ret = -EFAULT; + kfree(cfg_payload); + cfg_payload = NULL; + goto exit; + } + break; + default: + pr_debug("version invalid, fallback to legacy\n"); + kfree(cfg_payload); + cfg_payload = NULL; + break; + } +exit: + pp_info->pcc_cfg_data.cfg_payload = cfg_payload; + return ret; +} + + +static int __copy_layer_pp_info_params(struct mdp_input_layer *layer, + struct mdp_input_layer32 *layer32) +{ + struct mdp_overlay_pp_params *pp_info; + struct mdp_overlay_pp_params32 pp_info32; + int ret = 0; + + if (!(layer->flags & MDP_LAYER_PP)) + return 0; + + ret = copy_from_user(&pp_info32, + compat_ptr(layer32->pp_info), + sizeof(struct mdp_overlay_pp_params32)); + if (ret) { + pr_err("pp info copy from user failed, pp_info %pK\n", + compat_ptr(layer32->pp_info)); + ret = -EFAULT; + goto exit; + } + + pp_info = kmalloc(sizeof(struct mdp_overlay_pp_params), GFP_KERNEL); + if (!pp_info) { + ret = -ENOMEM; + goto exit; + } + memset(pp_info, 0, sizeof(struct mdp_overlay_pp_params)); + + pp_info->config_ops = pp_info32.config_ops; + + memcpy(&pp_info->csc_cfg, &pp_info32.csc_cfg, + sizeof(struct mdp_csc_cfg)); + memcpy(&pp_info->sharp_cfg, &pp_info32.sharp_cfg, + sizeof(struct mdp_sharp_cfg)); + memcpy(&pp_info->hist_cfg, &pp_info32.hist_cfg, + sizeof(struct mdp_histogram_cfg)); + memcpy(&pp_info->pa_cfg, &pp_info32.pa_cfg, + sizeof(struct mdp_pa_cfg)); + + ret = __copy_layer_pp_info_qseed_params(pp_info, &pp_info32); + if (ret) { + pr_err("compat copy pp_info QSEED params failed, ret %d\n", + ret); + goto exit_pp_info; + } + ret = __copy_layer_pp_info_legacy_pa_v2_params(pp_info, &pp_info32); + if (ret) { + pr_err("compat copy pp_info Legacy PAv2 params failed, ret %d\n", + ret); + goto exit_pp_info; + } + ret = __copy_layer_pp_info_igc_params(pp_info, &pp_info32); + if (ret) { + pr_err("compat copy pp_info IGC params failed, ret %d\n", + ret); + goto exit_pp_info; + } + ret = __copy_layer_pp_info_hist_lut_params(pp_info, &pp_info32); + if (ret) { + pr_err("compat copy pp_info Hist LUT params failed, ret %d\n", + ret); + goto exit_igc; + } + ret = __copy_layer_pp_info_pa_v2_params(pp_info, &pp_info32); + if (ret) { + pr_err("compat copy pp_info PAv2 params failed, ret %d\n", + ret); + goto exit_hist_lut; + } + ret = __copy_layer_pp_info_pcc_params(pp_info, &pp_info32); + if (ret) { + pr_err("compat copy pp_info PCC params failed, ret %d\n", + ret); + goto exit_pa; + } + + layer->pp_info = pp_info; + + return ret; + +exit_pa: + kfree(pp_info->pa_v2_cfg_data.cfg_payload); +exit_hist_lut: + kfree(pp_info->hist_lut_cfg.cfg_payload); +exit_igc: + kfree(pp_info->igc_cfg.cfg_payload); +exit_pp_info: + kfree(pp_info); +exit: + return ret; +} + + +static int __to_user_mdp_overlay(struct mdp_overlay32 __user *ov32, + struct mdp_overlay __user *ov) +{ + int ret = 0; + + ret = copy_in_user(&ov32->src, &ov->src, sizeof(ov32->src)) || + copy_in_user(&ov32->src_rect, + &ov->src_rect, sizeof(ov32->src_rect)) || + copy_in_user(&ov32->dst_rect, + &ov->dst_rect, sizeof(ov32->dst_rect)); + if (ret) + return -EFAULT; + + ret |= put_user(ov->z_order, &ov32->z_order); + ret |= put_user(ov->is_fg, &ov32->is_fg); + ret |= put_user(ov->alpha, &ov32->alpha); + ret |= put_user(ov->blend_op, &ov32->blend_op); + ret |= put_user(ov->transp_mask, &ov32->transp_mask); + ret |= put_user(ov->flags, &ov32->flags); + ret |= put_user(ov->id, &ov32->id); + ret |= put_user(ov->priority, &ov32->priority); + if (ret) + return -EFAULT; + + ret = copy_in_user(&ov32->user_data, &ov->user_data, + sizeof(ov32->user_data)); + if (ret) + return -EFAULT; + + ret |= put_user(ov->horz_deci, &ov32->horz_deci); + ret |= put_user(ov->vert_deci, &ov32->vert_deci); + if (ret) + return -EFAULT; + + ret = __to_user_pp_params( + &ov->overlay_pp_cfg, + compat_ptr((uintptr_t) &ov32->overlay_pp_cfg)); + if (ret) + return -EFAULT; + + ret = copy_in_user(&ov32->scale, &ov->scale, + sizeof(struct mdp_scale_data)); + if (ret) + return -EFAULT; + + ret = put_user(ov->frame_rate, &ov32->frame_rate); + if (ret) + return -EFAULT; + + return 0; +} + + +static int __from_user_mdp_overlay(struct mdp_overlay *ov, + struct mdp_overlay32 __user *ov32) +{ + __u32 data; + + if (copy_in_user(&ov->src, &ov32->src, + sizeof(ov32->src)) || + copy_in_user(&ov->src_rect, &ov32->src_rect, + sizeof(ov32->src_rect)) || + copy_in_user(&ov->dst_rect, &ov32->dst_rect, + sizeof(ov32->dst_rect))) + return -EFAULT; + + if (get_user(data, &ov32->z_order) || + put_user(data, &ov->z_order) || + get_user(data, &ov32->is_fg) || + put_user(data, &ov->is_fg) || + get_user(data, &ov32->alpha) || + put_user(data, &ov->alpha) || + get_user(data, &ov32->blend_op) || + put_user(data, &ov->blend_op) || + get_user(data, &ov32->transp_mask) || + put_user(data, &ov->transp_mask) || + get_user(data, &ov32->flags) || + put_user(data, &ov->flags) || + get_user(data, &ov32->pipe_type) || + put_user(data, &ov->pipe_type) || + get_user(data, &ov32->id) || + put_user(data, &ov->id) || + get_user(data, &ov32->priority) || + put_user(data, &ov->priority)) + return -EFAULT; + + if (copy_in_user(&ov->user_data, &ov32->user_data, + sizeof(ov32->user_data))) + return -EFAULT; + + if (get_user(data, &ov32->horz_deci) || + put_user(data, &ov->horz_deci) || + get_user(data, &ov32->vert_deci) || + put_user(data, &ov->vert_deci)) + return -EFAULT; + + if (__from_user_pp_params( + compat_ptr((uintptr_t) &ov32->overlay_pp_cfg), + &ov->overlay_pp_cfg)) + return -EFAULT; + + if (copy_in_user(&ov->scale, &ov32->scale, + sizeof(struct mdp_scale_data))) + return -EFAULT; + + if (get_user(data, &ov32->frame_rate) || + put_user(data, &ov->frame_rate)) + return -EFAULT; + + return 0; +} + +static int __from_user_mdp_overlaylist(struct mdp_overlay_list *ovlist, + struct mdp_overlay_list32 *ovlist32, + struct mdp_overlay **to_list_head) +{ + __u32 i, ret; + unsigned long data, from_list_head; + struct mdp_overlay32 *iter; + + if (!to_list_head || !ovlist32 || !ovlist) { + pr_err("%s:%u: null error\n", __func__, __LINE__); + return -EINVAL; + } + + if (copy_in_user(&ovlist->num_overlays, &ovlist32->num_overlays, + sizeof(ovlist32->num_overlays))) + return -EFAULT; + + if (copy_in_user(&ovlist->flags, &ovlist32->flags, + sizeof(ovlist32->flags))) + return -EFAULT; + + if (copy_in_user(&ovlist->processed_overlays, + &ovlist32->processed_overlays, + sizeof(ovlist32->processed_overlays))) + return -EFAULT; + + if (get_user(data, &ovlist32->overlay_list)) { + ret = -EFAULT; + goto validate_exit; + } + for (i = 0; i < ovlist32->num_overlays; i++) { + if (get_user(from_list_head, (__u32 *)data + i)) { + ret = -EFAULT; + goto validate_exit; + } + + iter = compat_ptr(from_list_head); + if (__from_user_mdp_overlay(to_list_head[i], + (struct mdp_overlay32 *)(iter))) { + ret = -EFAULT; + goto validate_exit; + } + } + ovlist->overlay_list = to_list_head; + + return 0; + +validate_exit: + pr_err("%s: %u: copy error\n", __func__, __LINE__); + return -EFAULT; +} + +static int __to_user_mdp_overlaylist(struct mdp_overlay_list32 *ovlist32, + struct mdp_overlay_list *ovlist, + struct mdp_overlay **l_ptr) +{ + __u32 i, ret; + unsigned long data, data1; + struct mdp_overlay32 *temp; + struct mdp_overlay *l = l_ptr[0]; + + if (copy_in_user(&ovlist32->num_overlays, &ovlist->num_overlays, + sizeof(ovlist32->num_overlays))) + return -EFAULT; + + if (get_user(data, &ovlist32->overlay_list)) { + ret = -EFAULT; + pr_err("%s:%u: err\n", __func__, __LINE__); + goto validate_exit; + } + + for (i = 0; i < ovlist32->num_overlays; i++) { + if (get_user(data1, (__u32 *)data + i)) { + ret = -EFAULT; + goto validate_exit; + } + temp = compat_ptr(data1); + if (__to_user_mdp_overlay( + (struct mdp_overlay32 *) temp, + l + i)) { + ret = -EFAULT; + goto validate_exit; + } + } + + if (copy_in_user(&ovlist32->flags, &ovlist->flags, + sizeof(ovlist32->flags))) + return -EFAULT; + + if (copy_in_user(&ovlist32->processed_overlays, + &ovlist->processed_overlays, + sizeof(ovlist32->processed_overlays))) + return -EFAULT; + + return 0; + +validate_exit: + pr_err("%s: %u: copy error\n", __func__, __LINE__); + return -EFAULT; + +} + +void mdss_compat_align_list(void __user *total_mem_chunk, + struct mdp_overlay __user **list_ptr, u32 num_ov) +{ + int i = 0; + struct mdp_overlay __user *contig_overlays; + + contig_overlays = total_mem_chunk + sizeof(struct mdp_overlay_list) + + (num_ov * sizeof(struct mdp_overlay *)); + + for (i = 0; i < num_ov; i++) + list_ptr[i] = contig_overlays + i; +} + +static u32 __pp_sspp_size(void) +{ + u32 size = 0; + /* pick the largest of the revision when multiple revs are supported */ + size = sizeof(struct mdp_igc_lut_data_v1_7); + size += sizeof(struct mdp_pa_data_v1_7); + size += sizeof(struct mdp_pcc_data_v1_7); + size += sizeof(struct mdp_hist_lut_data_v1_7); + return size; +} + +static int __pp_sspp_set_offsets(struct mdp_overlay *ov) +{ + if (!ov) { + pr_err("invalid overlay pointer\n"); + return -EFAULT; + } + ov->overlay_pp_cfg.igc_cfg.cfg_payload = (void *)((unsigned long)ov + + sizeof(struct mdp_overlay)); + ov->overlay_pp_cfg.pa_v2_cfg_data.cfg_payload = + ov->overlay_pp_cfg.igc_cfg.cfg_payload + + sizeof(struct mdp_igc_lut_data_v1_7); + ov->overlay_pp_cfg.pcc_cfg_data.cfg_payload = + ov->overlay_pp_cfg.pa_v2_cfg_data.cfg_payload + + sizeof(struct mdp_pa_data_v1_7); + ov->overlay_pp_cfg.hist_lut_cfg.cfg_payload = + ov->overlay_pp_cfg.pcc_cfg_data.cfg_payload + + sizeof(struct mdp_pcc_data_v1_7); + return 0; +} + +int mdss_compat_overlay_ioctl(struct fb_info *info, unsigned int cmd, + unsigned long arg, struct file *file) +{ + struct mdp_overlay *ov, **layers_head; + struct mdp_overlay32 *ov32; + struct mdp_overlay_list __user *ovlist; + struct mdp_overlay_list32 __user *ovlist32; + size_t layers_refs_sz, layers_sz, prepare_sz; + void __user *total_mem_chunk; + uint32_t num_overlays; + uint32_t alloc_size = 0; + int ret; + + if (!info || !info->par) + return -EINVAL; + + + switch (cmd) { + case MSMFB_MDP_PP: + ret = mdss_compat_pp_ioctl(info, cmd, arg, file); + break; + case MSMFB_HISTOGRAM_START: + case MSMFB_HISTOGRAM_STOP: + case MSMFB_HISTOGRAM: + ret = mdss_histo_compat_ioctl(info, cmd, arg, file); + break; + case MSMFB_OVERLAY_GET: + alloc_size += sizeof(*ov) + __pp_sspp_size(); + ov = compat_alloc_user_space(alloc_size); + if (!ov) { + pr_err("%s:%u: compat alloc error [%zu] bytes\n", + __func__, __LINE__, sizeof(*ov)); + return -EINVAL; + } + ov32 = compat_ptr(arg); + ret = __pp_sspp_set_offsets(ov); + if (ret) { + pr_err("setting the pp offsets failed ret %d\n", ret); + return ret; + } + ret = __from_user_mdp_overlay(ov, ov32); + if (ret) + pr_err("%s: compat mdp overlay failed\n", __func__); + else + ret = mdss_fb_do_ioctl(info, cmd, + (unsigned long) ov, file); + ret = __to_user_mdp_overlay(ov32, ov); + break; + case MSMFB_OVERLAY_SET: + alloc_size += sizeof(*ov) + __pp_sspp_size(); + ov = compat_alloc_user_space(alloc_size); + if (!ov) { + pr_err("%s:%u: compat alloc error [%zu] bytes\n", + __func__, __LINE__, sizeof(*ov)); + return -EINVAL; + } + ret = __pp_sspp_set_offsets(ov); + if (ret) { + pr_err("setting the pp offsets failed ret %d\n", ret); + return ret; + } + ov32 = compat_ptr(arg); + ret = __from_user_mdp_overlay(ov, ov32); + if (ret) { + pr_err("%s: compat mdp overlay failed\n", __func__); + } else { + ret = mdss_fb_do_ioctl(info, cmd, + (unsigned long) ov, file); + ret = __to_user_mdp_overlay(ov32, ov); + } + break; + case MSMFB_OVERLAY_PREPARE: + ovlist32 = compat_ptr(arg); + if (get_user(num_overlays, &ovlist32->num_overlays)) { + pr_err("compat mdp prepare failed: invalid arg\n"); + return -EFAULT; + } + + if (num_overlays >= OVERLAY_MAX) { + pr_err("%s: No: of overlays exceeds max\n", __func__); + return -EINVAL; + } + + layers_sz = num_overlays * sizeof(struct mdp_overlay); + prepare_sz = sizeof(struct mdp_overlay_list); + layers_refs_sz = num_overlays * sizeof(struct mdp_overlay *); + + total_mem_chunk = compat_alloc_user_space( + prepare_sz + layers_refs_sz + layers_sz); + if (!total_mem_chunk) { + pr_err("%s:%u: compat alloc error [%zu] bytes\n", + __func__, __LINE__, + layers_refs_sz + layers_sz + prepare_sz); + return -EINVAL; + } + + layers_head = total_mem_chunk + prepare_sz; + mdss_compat_align_list(total_mem_chunk, layers_head, + num_overlays); + ovlist = (struct mdp_overlay_list *)total_mem_chunk; + + ret = __from_user_mdp_overlaylist(ovlist, ovlist32, + layers_head); + if (ret) { + pr_err("compat mdp overlaylist failed\n"); + } else { + ret = mdss_fb_do_ioctl(info, cmd, + (unsigned long) ovlist, file); + if (!ret) + ret = __to_user_mdp_overlaylist(ovlist32, + ovlist, layers_head); + } + break; + case MSMFB_OVERLAY_UNSET: + case MSMFB_OVERLAY_PLAY: + case MSMFB_OVERLAY_VSYNC_CTRL: + case MSMFB_METADATA_SET: + case MSMFB_METADATA_GET: + default: + pr_debug("%s: overlay ioctl cmd=[%u]\n", __func__, cmd); + ret = mdss_fb_do_ioctl(info, cmd, (unsigned long) arg, file); + break; + } + return ret; +} + +/* + * mdss_fb_compat_ioctl() - MDSS Framebuffer compat ioctl function + * @info: pointer to framebuffer info + * @cmd: ioctl command + * @arg: argument to ioctl + * + * This function adds the compat translation layer for framebuffer + * ioctls to allow 32-bit userspace call ioctls on the mdss + * framebuffer device driven in 64-bit kernel. + */ +int mdss_fb_compat_ioctl(struct fb_info *info, unsigned int cmd, + unsigned long arg, struct file *file) +{ + int ret; + + if (!info || !info->par) + return -EINVAL; + + cmd = __do_compat_ioctl_nr(cmd); + switch (cmd) { + case MSMFB_CURSOR: + ret = mdss_fb_compat_cursor(info, cmd, arg, file); + break; + case MSMFB_SET_LUT: + ret = mdss_fb_compat_set_lut(info, arg, file); + break; + case MSMFB_BUFFER_SYNC: + ret = mdss_fb_compat_buf_sync(info, cmd, arg, file); + break; + case MSMFB_ATOMIC_COMMIT: + ret = __compat_atomic_commit(info, cmd, arg, file); + break; + case MSMFB_ASYNC_POSITION_UPDATE: + ret = __compat_async_position_update(info, cmd, arg); + break; + case MSMFB_MDP_PP: + case MSMFB_HISTOGRAM_START: + case MSMFB_HISTOGRAM_STOP: + case MSMFB_HISTOGRAM: + case MSMFB_OVERLAY_GET: + case MSMFB_OVERLAY_SET: + case MSMFB_OVERLAY_UNSET: + case MSMFB_OVERLAY_PLAY: + case MSMFB_OVERLAY_VSYNC_CTRL: + case MSMFB_METADATA_SET: + case MSMFB_METADATA_GET: + case MSMFB_OVERLAY_PREPARE: + ret = mdss_compat_overlay_ioctl(info, cmd, arg, file); + break; + case MSMFB_NOTIFY_UPDATE: + case MSMFB_DISPLAY_COMMIT: + default: + ret = mdss_fb_do_ioctl(info, cmd, arg, file); + break; + } + + if (ret == -ENOTSUPP) + pr_err("%s: unsupported ioctl\n", __func__); + else if (ret) + pr_debug("%s: ioctl err cmd=%u ret=%d\n", __func__, cmd, ret); + + return ret; +} +EXPORT_SYMBOL(mdss_fb_compat_ioctl); diff --git a/drivers/video/fbdev/msm/mdss_compat_utils.h b/drivers/video/fbdev/msm/mdss_compat_utils.h new file mode 100644 index 0000000000000000000000000000000000000000..819106b6bf3d4edcf6e986e318d71d9b7d3e1250 --- /dev/null +++ b/drivers/video/fbdev/msm/mdss_compat_utils.h @@ -0,0 +1,558 @@ +/* + * Copyright (c) 2014-2016, 2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef MDSS_COMPAT_UTILS_H +#define MDSS_COMPAT_UTILS_H + +/* + * To allow proper structure padding for 64bit/32bit target + */ +#ifndef MDP_LAYER_COMMIT_V1_PAD +#ifdef __LP64 +#define MDP_LAYER_COMMIT_V1_PAD 2 +#else +#define MDP_LAYER_COMMIT_V1_PAD 3 +#endif +#endif + +struct mdp_buf_sync32 { + u32 flags; + u32 acq_fen_fd_cnt; + u32 session_id; + compat_caddr_t acq_fen_fd; + compat_caddr_t rel_fen_fd; + compat_caddr_t retire_fen_fd; +}; + +struct fb_cmap32 { + u32 start; + u32 len; + compat_caddr_t red; + compat_caddr_t green; + compat_caddr_t blue; + compat_caddr_t transp; +}; + +struct fb_image32 { + u32 dx; + u32 dy; + u32 width; + u32 height; + u32 fg_color; + u32 bg_color; + u8 depth; + compat_caddr_t data; + struct fb_cmap32 cmap; +}; + +struct fb_cursor32 { + u16 set; + u16 enable; + u16 rop; + compat_caddr_t mask; + struct fbcurpos hot; + struct fb_image32 image; +}; + +struct mdp_ccs32 { +}; + +struct msmfb_overlay_blt32 { +}; + +struct msmfb_overlay_3d32 { +}; + +struct msmfb_mixer_info_req32 { +}; + +struct msmfb_metadata32 { + uint32_t op; + uint32_t flags; + union { + struct mdp_misr misr_request; + struct mdp_blend_cfg blend_cfg; + struct mdp_mixer_cfg mixer_cfg; + uint32_t panel_frame_rate; + uint32_t video_info_code; + struct mdss_hw_caps caps; + uint8_t secure_en; + } data; +}; + +struct mdp_histogram_start_req32 { + uint32_t block; + uint8_t frame_cnt; + uint8_t bit_mask; + uint16_t num_bins; +}; + +struct mdp_histogram_data32 { + uint32_t block; + uint32_t bin_cnt; + compat_caddr_t c0; + compat_caddr_t c1; + compat_caddr_t c2; + compat_caddr_t extra_info; +}; + +struct mdp_pcc_coeff32 { + uint32_t c, r, g, b, rr, gg, bb, rg, gb, rb, rgb_0, rgb_1; +}; + +struct mdp_pcc_coeff_v1_7_32 { + uint32_t c, r, g, b, rg, gb, rb, rgb; +}; + +struct mdp_pcc_data_v1_7_32 { + struct mdp_pcc_coeff_v1_7_32 r, g, b; +}; +struct mdp_pcc_cfg_data32 { + uint32_t version; + uint32_t block; + uint32_t ops; + struct mdp_pcc_coeff32 r, g, b; + compat_caddr_t cfg_payload; +}; + +struct mdp_csc_cfg32 { + /* flags for enable CSC, toggling RGB,YUV input/output */ + uint32_t flags; + uint32_t csc_mv[9]; + uint32_t csc_pre_bv[3]; + uint32_t csc_post_bv[3]; + uint32_t csc_pre_lv[6]; + uint32_t csc_post_lv[6]; +}; + +struct mdp_csc_cfg_data32 { + uint32_t block; + struct mdp_csc_cfg32 csc_data; +}; + +struct mdp_bl_scale_data32 { + uint32_t min_lvl; + uint32_t scale; +}; + +struct mdp_pa_mem_col_cfg32 { + uint32_t color_adjust_p0; + uint32_t color_adjust_p1; + uint32_t hue_region; + uint32_t sat_region; + uint32_t val_region; +}; + +struct mdp_pa_v2_data32 { + /* Mask bits for PA features */ + uint32_t flags; + uint32_t global_hue_adj; + uint32_t global_sat_adj; + uint32_t global_val_adj; + uint32_t global_cont_adj; + struct mdp_pa_mem_col_cfg32 skin_cfg; + struct mdp_pa_mem_col_cfg32 sky_cfg; + struct mdp_pa_mem_col_cfg32 fol_cfg; + uint32_t six_zone_len; + uint32_t six_zone_thresh; + compat_caddr_t six_zone_curve_p0; + compat_caddr_t six_zone_curve_p1; +}; + +struct mdp_pa_mem_col_data_v1_7_32 { + uint32_t color_adjust_p0; + uint32_t color_adjust_p1; + uint32_t color_adjust_p2; + uint32_t blend_gain; + uint8_t sat_hold; + uint8_t val_hold; + uint32_t hue_region; + uint32_t sat_region; + uint32_t val_region; +}; + +struct mdp_pa_data_v1_7_32 { + uint32_t mode; + uint32_t global_hue_adj; + uint32_t global_sat_adj; + uint32_t global_val_adj; + uint32_t global_cont_adj; + struct mdp_pa_mem_col_data_v1_7_32 skin_cfg; + struct mdp_pa_mem_col_data_v1_7_32 sky_cfg; + struct mdp_pa_mem_col_data_v1_7_32 fol_cfg; + uint32_t six_zone_thresh; + uint32_t six_zone_adj_p0; + uint32_t six_zone_adj_p1; + uint8_t six_zone_sat_hold; + uint8_t six_zone_val_hold; + uint32_t six_zone_len; + compat_caddr_t six_zone_curve_p0; + compat_caddr_t six_zone_curve_p1; +}; + +struct mdp_pa_v2_cfg_data32 { + uint32_t version; + uint32_t block; + uint32_t flags; + struct mdp_pa_v2_data32 pa_v2_data; + compat_caddr_t cfg_payload; +}; + +struct mdp_pa_cfg32 { + uint32_t flags; + uint32_t hue_adj; + uint32_t sat_adj; + uint32_t val_adj; + uint32_t cont_adj; +}; + +struct mdp_pa_cfg_data32 { + uint32_t block; + struct mdp_pa_cfg32 pa_data; +}; + +struct mdp_igc_lut_data_v1_7_32 { + uint32_t table_fmt; + uint32_t len; + compat_caddr_t c0_c1_data; + compat_caddr_t c2_data; +}; + +struct mdp_rgb_lut_data32 { + uint32_t flags; + uint32_t lut_type; + struct fb_cmap32 cmap; +}; + +struct mdp_igc_lut_data32 { + uint32_t block; + uint32_t version; + uint32_t len, ops; + compat_caddr_t c0_c1_data; + compat_caddr_t c2_data; + compat_caddr_t cfg_payload; +}; + +struct mdp_hist_lut_data_v1_7_32 { + uint32_t len; + compat_caddr_t data; +}; + +struct mdp_hist_lut_data32 { + uint32_t block; + uint32_t version; + uint32_t hist_lut_first; + uint32_t ops; + uint32_t len; + compat_caddr_t data; + compat_caddr_t cfg_payload; +}; + +struct mdp_ar_gc_lut_data32 { + uint32_t x_start; + uint32_t slope; + uint32_t offset; +}; + +struct mdp_pgc_lut_data_v1_7_32 { + uint32_t len; + compat_caddr_t c0_data; + compat_caddr_t c1_data; + compat_caddr_t c2_data; +}; + +struct mdp_pgc_lut_data32 { + uint32_t version; + uint32_t block; + uint32_t flags; + uint8_t num_r_stages; + uint8_t num_g_stages; + uint8_t num_b_stages; + compat_caddr_t r_data; + compat_caddr_t g_data; + compat_caddr_t b_data; + compat_caddr_t cfg_payload; +}; + +struct mdp_lut_cfg_data32 { + uint32_t lut_type; + union { + struct mdp_igc_lut_data32 igc_lut_data; + struct mdp_pgc_lut_data32 pgc_lut_data; + struct mdp_hist_lut_data32 hist_lut_data; + struct mdp_rgb_lut_data32 rgb_lut_data; + } data; +}; + +struct mdp_qseed_cfg32 { + uint32_t table_num; + uint32_t ops; + uint32_t len; + compat_caddr_t data; +}; + +struct mdp_qseed_cfg_data32 { + uint32_t block; + struct mdp_qseed_cfg32 qseed_data; +}; + +struct mdp_dither_cfg_data32 { + uint32_t block; + uint32_t flags; + uint32_t g_y_depth; + uint32_t r_cr_depth; + uint32_t b_cb_depth; +}; + +struct mdp_gamut_data_v1_7_32 { + uint32_t mode; + uint32_t tbl_size[MDP_GAMUT_TABLE_NUM_V1_7]; + compat_caddr_t c0_data[MDP_GAMUT_TABLE_NUM_V1_7]; + compat_caddr_t c1_c2_data[MDP_GAMUT_TABLE_NUM_V1_7]; + uint32_t tbl_scale_off_sz[MDP_GAMUT_SCALE_OFF_TABLE_NUM]; + compat_caddr_t scale_off_data[MDP_GAMUT_SCALE_OFF_TABLE_NUM]; +}; + +struct mdp_gamut_cfg_data32 { + uint32_t block; + uint32_t flags; + uint32_t version; + uint32_t gamut_first; + uint32_t tbl_size[MDP_GAMUT_TABLE_NUM]; + compat_caddr_t r_tbl[MDP_GAMUT_TABLE_NUM]; + compat_caddr_t g_tbl[MDP_GAMUT_TABLE_NUM]; + compat_caddr_t b_tbl[MDP_GAMUT_TABLE_NUM]; + compat_caddr_t cfg_payload; +}; + +struct mdp_calib_config_data32 { + uint32_t ops; + uint32_t addr; + uint32_t data; +}; + +struct mdp_calib_config_buffer32 { + uint32_t ops; + uint32_t size; + compat_caddr_t buffer; +}; + +struct mdp_calib_dcm_state32 { + uint32_t ops; + uint32_t dcm_state; +}; + +struct mdss_ad_init32 { + uint32_t asym_lut[33]; + uint32_t color_corr_lut[33]; + uint8_t i_control[2]; + uint16_t black_lvl; + uint16_t white_lvl; + uint8_t var; + uint8_t limit_ampl; + uint8_t i_dither; + uint8_t slope_max; + uint8_t slope_min; + uint8_t dither_ctl; + uint8_t format; + uint8_t auto_size; + uint16_t frame_w; + uint16_t frame_h; + uint8_t logo_v; + uint8_t logo_h; + uint32_t alpha; + uint32_t alpha_base; + uint32_t bl_lin_len; + uint32_t bl_att_len; + compat_caddr_t bl_lin; + compat_caddr_t bl_lin_inv; + compat_caddr_t bl_att_lut; +}; + +struct mdss_ad_cfg32 { + uint32_t mode; + uint32_t al_calib_lut[33]; + uint16_t backlight_min; + uint16_t backlight_max; + uint16_t backlight_scale; + uint16_t amb_light_min; + uint16_t filter[2]; + uint16_t calib[4]; + uint8_t strength_limit; + uint8_t t_filter_recursion; + uint16_t stab_itr; + uint32_t bl_ctrl_mode; +}; + +/* ops uses standard MDP_PP_* flags */ +struct mdss_ad_init_cfg32 { + uint32_t ops; + union { + struct mdss_ad_init32 init; + struct mdss_ad_cfg32 cfg; + } params; +}; + +struct mdss_ad_input32 { + uint32_t mode; + union { + uint32_t amb_light; + uint32_t strength; + uint32_t calib_bl; + } in; + uint32_t output; +}; + +struct mdss_calib_cfg32 { + uint32_t ops; + uint32_t calib_mask; +}; + +struct mdp_histogram_cfg32 { + uint32_t ops; + uint32_t block; + uint8_t frame_cnt; + uint8_t bit_mask; + uint16_t num_bins; +}; + +struct mdp_sharp_cfg32 { + uint32_t flags; + uint32_t strength; + uint32_t edge_thr; + uint32_t smooth_thr; + uint32_t noise_thr; +}; + +struct mdp_overlay_pp_params32 { + uint32_t config_ops; + struct mdp_csc_cfg32 csc_cfg; + struct mdp_qseed_cfg32 qseed_cfg[2]; + struct mdp_pa_cfg32 pa_cfg; + struct mdp_pa_v2_data32 pa_v2_cfg; + struct mdp_igc_lut_data32 igc_cfg; + struct mdp_sharp_cfg32 sharp_cfg; + struct mdp_histogram_cfg32 hist_cfg; + struct mdp_hist_lut_data32 hist_lut_cfg; + struct mdp_pa_v2_cfg_data32 pa_v2_cfg_data; + struct mdp_pcc_cfg_data32 pcc_cfg_data; +}; + +struct msmfb_mdp_pp32 { + uint32_t op; + union { + struct mdp_pcc_cfg_data32 pcc_cfg_data; + struct mdp_csc_cfg_data32 csc_cfg_data; + struct mdp_lut_cfg_data32 lut_cfg_data; + struct mdp_qseed_cfg_data32 qseed_cfg_data; + struct mdp_bl_scale_data32 bl_scale_data; + struct mdp_pa_cfg_data32 pa_cfg_data; + struct mdp_pa_v2_cfg_data32 pa_v2_cfg_data; + struct mdp_dither_cfg_data32 dither_cfg_data; + struct mdp_gamut_cfg_data32 gamut_cfg_data; + struct mdp_calib_config_data32 calib_cfg; + struct mdss_ad_init_cfg32 ad_init_cfg; + struct mdss_calib_cfg32 mdss_calib_cfg; + struct mdss_ad_input32 ad_input; + struct mdp_calib_config_buffer32 calib_buffer; + struct mdp_calib_dcm_state32 calib_dcm; + } data; +}; + +struct mdp_overlay32 { + struct msmfb_img src; + struct mdp_rect src_rect; + struct mdp_rect dst_rect; + uint32_t z_order; /* stage number */ + uint32_t is_fg; /* control alpha & transp */ + uint32_t alpha; + uint32_t blend_op; + uint32_t transp_mask; + uint32_t flags; + uint32_t pipe_type; + uint32_t id; + uint8_t priority; + uint32_t user_data[6]; + uint32_t bg_color; + uint8_t horz_deci; + uint8_t vert_deci; + struct mdp_overlay_pp_params32 overlay_pp_cfg; + struct mdp_scale_data scale; + uint8_t color_space; + uint32_t frame_rate; +}; + +struct mdp_overlay_list32 { + uint32_t num_overlays; + compat_caddr_t overlay_list; + uint32_t flags; + uint32_t processed_overlays; +}; + +struct mdp_input_layer32 { + uint32_t flags; + uint32_t pipe_ndx; + uint8_t horz_deci; + uint8_t vert_deci; + uint8_t alpha; + uint16_t z_order; + uint32_t transp_mask; + uint32_t bg_color; + enum mdss_mdp_blend_op blend_op; + enum mdp_color_space color_space; + struct mdp_rect src_rect; + struct mdp_rect dst_rect; + compat_caddr_t scale; + struct mdp_layer_buffer buffer; + compat_caddr_t pp_info; + int error_code; + uint32_t reserved[6]; +}; + +struct mdp_output_layer32 { + uint32_t flags; + uint32_t writeback_ndx; + struct mdp_layer_buffer buffer; + enum mdp_color_space color_space; + uint32_t reserved[5]; +}; +struct mdp_layer_commit_v1_32 { + uint32_t flags; + int release_fence; + struct mdp_rect left_roi; + struct mdp_rect right_roi; + compat_caddr_t input_layers; + uint32_t input_layer_cnt; + compat_caddr_t output_layer; + int retire_fence; + compat_caddr_t dest_scaler; + uint32_t dest_scaler_cnt; + compat_caddr_t frc_info; + uint32_t bl_level; /* BL level to be updated in commit */ + uint32_t reserved[MDP_LAYER_COMMIT_V1_PAD]; +}; + +struct mdp_layer_commit32 { + uint32_t version; + union { + struct mdp_layer_commit_v1_32 commit_v1; + }; +}; + +struct mdp_position_update32 { + compat_caddr_t __user *input_layers; + uint32_t input_layer_cnt; +}; + +#endif diff --git a/drivers/video/fbdev/msm/mdss_dba_utils.c b/drivers/video/fbdev/msm/mdss_dba_utils.c new file mode 100644 index 0000000000000000000000000000000000000000..2758a5a7d2343557ec1254bf7ad8405a10321672 --- /dev/null +++ b/drivers/video/fbdev/msm/mdss_dba_utils.c @@ -0,0 +1,912 @@ +/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#define pr_fmt(fmt) "%s: " fmt, __func__ + +#include