diff --git a/Documentation/devicetree/bindings/arm/msm/hidqvr-smp2p.txt b/Documentation/devicetree/bindings/arm/msm/hidqvr-smp2p.txt new file mode 100644 index 0000000000000000000000000000000000000000..84af8d212ee9528a1f0ddf115c74e5b38e9104ca --- /dev/null +++ b/Documentation/devicetree/bindings/arm/msm/hidqvr-smp2p.txt @@ -0,0 +1,15 @@ +Qualcomm Technologies, Inc. HID QVR (hid-qvr) driver + +Required properties: +-compatible : + To communicate with cdsp + qcom,smp2p_interrupt_qvrexternal_5_out (outbound) + +Example: + qcom,smp2p_interrupt_qvrexternal_5_out { + compatible = "qcom,smp2p-interrupt-qvrexternal-5-out"; + qcom,smem-states = <&smp2p_qvrexternal5_out 0>; + qcom,smem-state-names = "qvrexternal-smp2p-out"; + }; + + diff --git a/Documentation/devicetree/bindings/arm/msm/lpm-levels.txt b/Documentation/devicetree/bindings/arm/msm/lpm-levels.txt index 1a357b10418de6dd6e7fee49cbf8d9414e8afff0..a547067df2a371df12fbbb1ddf4f929ec161ad73 100644 --- a/Documentation/devicetree/bindings/arm/msm/lpm-levels.txt +++ b/Documentation/devicetree/bindings/arm/msm/lpm-levels.txt @@ -50,14 +50,10 @@ Optional properties: - qcom,min-child-idx: The minimum level that a child CPU should be in before this level can be chosen. This property is required for all non-default level. - - qcom,latency-us: The latency in handling the interrupt if this level - was chosen, in uSec - - qcom,ss-power: The steady state power expelled when the processor is - in this level in mWatts - - qcom,energy-overhead: The energy used up in entering and exiting - this level in mWatts.uSec - - qcom,time-overhead: The time spent in entering and exiting this - level in uS + - qcom,entry-latency-us: The latency to enter LPM level, in uSec + - qcom,exit-latency-us: The latency to exit LPM level, in uSec + - qcom,min-residency-us: The minimum residency value from which entering + to low power mode is beneficial, in uSec Optional properties: - qcom,notify-rpm: When set, the driver configures the sleep and wake @@ -107,14 +103,10 @@ Optional properties: - reg: The numeric cpu level id - label: Name to identify the low power mode in stats - qcom,psci-cpu-mode: ID to be passed into PSCI firmware. - - qcom,latency-us: The latency in handling the interrupt if this level - was chosen, in uSec - - qcom,ss-power: The steady state power expelled when the processor is - in this level in mWatts - - qcom,energy-overhead: The energy used up in entering and exiting - this level in mWatts.uSec - - qcom,time-overhead: The time spent in entering and exiting this - level in uS + - qcom,entry-latency-us: The latency to enter LPM level, in uSec + - qcom,exit-latency-us: The latency to exit LPM level, in uSec + - qcom,min-residency-us: The minimum residency value from which entering + to low power mode is beneficial, in uSec Optional properties: - qcom,is-reset: This boolean property maps to "power state" bit in PSCI @@ -144,31 +136,27 @@ Optional properties: reg = <0>; label = "l3-wfi"; qcom,psci-mode = <0x1>; - qcom,latency-us = <51>; - qcom,ss-power = <452>; - qcom,energy-overhead = <69355>; - qcom,time-overhead = <99>; + qcom,entry-latency-us = <48>; + qcom,exit-latency-us = <51>; + qcom,min-residency-us = <99>; }; qcom,pm-cluster-level@1 { /* D2 */ reg = <1>; label = "l3-dyn-ret"; qcom,psci-mode = <0x2>; - qcom,latency-us = <659>; - qcom,ss-power = <434>; - qcom,energy-overhead = <465725>; - qcom,time-overhead = <976>; - qcom,min-child-idx = <1>; + qcom,entry-latency-us = <317>; + qcom,exit-latency-us = <659>; + qcom,min-residency-us = <4065>; }; qcom,pm-cluster-level@2 { /* D4, D3 is not supported */ reg = <2>; label = "l3-pc"; qcom,psci-mode = <0x4>; - qcom,latency-us = <4562>; - qcom,ss-power = <408>; - qcom,energy-overhead = <2421840>; - qcom,time-overhead = <5376>; + qcom,entry-latency-us = <814>; + qcom,exit-latency-us = <4562>; + qcom,min-residency-us = <7085>; qcom,min-child-idx = <2>; qcom,is-reset; }; @@ -177,10 +165,9 @@ Optional properties: reg = <3>; label = "cx-off"; qcom,psci-mode = <0x224>; - qcom,latency-us = <5562>; - qcom,ss-power = <308>; - qcom,energy-overhead = <2521840>; - qcom,time-overhead = <6376>; + qcom,entry-latency-us = <814>; + qcom,exit-latency-us = <5562>; + qcom,min-residency-us = <9987>; qcom,min-child-idx = <3>; qcom,is-reset; qcom,notify-rpm; @@ -190,10 +177,9 @@ Optional properties: reg = <4>; label = "llcc-off"; qcom,psci-mode = <0xC24>; - qcom,latency-us = <6562>; - qcom,ss-power = <108>; - qcom,energy-overhead = <2621840>; - qcom,time-overhead = <7376>; + qcom,entry-latency-us = <814>; + qcom,exit-latency-us = <6562>; + qcom,min-residency-us = <10100>; qcom,min-child-idx = <3>; qcom,is-reset; qcom,notify-rpm; @@ -210,30 +196,27 @@ Optional properties: reg = <0>; label = "wfi"; qcom,psci-cpu-mode = <0x1>; - qcom,latency-us = <43>; - qcom,ss-power = <454>; - qcom,energy-overhead = <38639>; - qcom,time-overhead = <83>; + qcom,entry-latency-us = <40>; + qcom,exit-latency-us = <43>; + qcom,min-residency-us = <100>; }; qcom,pm-cpu-level@1 { /* C2D */ reg = <1>; label = "ret"; qcom,psci-cpu-mode = <0x2>; - qcom,latency-us = <86>; - qcom,ss-power = <449>; - qcom,energy-overhead = <78456>; - qcom,time-overhead = <167>; + qcom,entry-latency-us = <81>; + qcom,exit-latency-us = <86>; + qcom,min-residency-us = <965>; }; qcom,pm-cpu-level@2 { /* C3 */ reg = <2>; label = "pc"; qcom,psci-cpu-mode = <0x3>; - qcom,latency-us = <612>; - qcom,ss-power = <436>; - qcom,energy-overhead = <418225>; - qcom,time-overhead = <885>; + qcom,entry-latency-us = <273>; + qcom,exit-latency-us = <612>; + qcom,min-residency-us = <1890>; qcom,is-reset; }; @@ -241,10 +224,9 @@ Optional properties: reg = <3>; label = "rail-pc"; qcom,psci-cpu-mode = <0x4>; - qcom,latency-us = <700>; - qcom,ss-power = <400>; - qcom,energy-overhead = <428225>; - qcom,time-overhead = <1000>; + qcom,entry-latency-us = <300>; + qcom,exit-latency-us = <700>; + qcom,min-residency-us = <3934>; qcom,is-reset; }; }; @@ -260,30 +242,27 @@ Optional properties: reg = <0>; label = "wfi"; qcom,psci-cpu-mode = <0x1>; - qcom,latency-us = <43>; - qcom,ss-power = <454>; - qcom,energy-overhead = <38639>; - qcom,time-overhead = <83>; + qcom,entry-latency-us = <40>; + qcom,exit-latency-us = <43>; + qcom,min-residency-us = <83>; }; qcom,pm-cpu-level@1 { /* C2D */ reg = <1>; label = "ret"; qcom,psci-cpu-mode = <0x2>; - qcom,latency-us = <86>; - qcom,ss-power = <449>; - qcom,energy-overhead = <78456>; - qcom,time-overhead = <167>; + qcom,entry-latency-us = <81>; + qcom,exit-latency-us = <86>; + qcom,min-residency-us = <637>; }; qcom,pm-cpu-level@2 { /* C3 */ reg = <2>; label = "pc"; qcom,psci-cpu-mode = <0x3>; - qcom,latency-us = <612>; - qcom,ss-power = <436>; - qcom,energy-overhead = <418225>; - qcom,time-overhead = <885>; + qcom,entry-latency-us = <273>; + qcom,exit-latency-us = <612>; + qcom,min-residency-us = <952>; qcom,is-reset; }; @@ -291,10 +270,9 @@ Optional properties: reg = <3>; label = "rail-pc"; qcom,psci-cpu-mode = <0x4>; - qcom,latency-us = <700>; - qcom,ss-power = <400>; - qcom,energy-overhead = <428225>; - qcom,time-overhead = <1000>; + qcom,entry-latency-us = <300>; + qcom,exit-latency-us = <700>; + qcom,min-residency-us = <4488>; qcom,is-reset; }; }; diff --git a/Documentation/devicetree/bindings/arm/msm/qcom,osm.txt b/Documentation/devicetree/bindings/arm/msm/qcom,osm.txt index 70c4e23759de4d68601786ca4171663c2e64f22c..f99295ebca5fd2170e922bd2af43e074baadef30 100644 --- a/Documentation/devicetree/bindings/arm/msm/qcom,osm.txt +++ b/Documentation/devicetree/bindings/arm/msm/qcom,osm.txt @@ -10,7 +10,8 @@ Properties: Usage: required Value type: Definition: must be "qcom,clk-cpu-osm" or "qcom,clk-cpu-osm-sdmshrike" - or "qcom,clk-cpu-osm-sm6150". + or "qcom,clk-cpu-osm-sm6150" or + "qcom,clk-cpu-osm-sdmmagpie". - reg Usage: required diff --git a/Documentation/devicetree/bindings/arm/msm/sleepstate-smp2p.txt b/Documentation/devicetree/bindings/arm/msm/sleepstate-smp2p.txt index 4e25d06cc84b7eddf3ca482f59c82750e774dba0..d82d521b60f00d515ec1c0ef39ece3827707a1f1 100644 --- a/Documentation/devicetree/bindings/arm/msm/sleepstate-smp2p.txt +++ b/Documentation/devicetree/bindings/arm/msm/sleepstate-smp2p.txt @@ -4,9 +4,16 @@ Required properties: -compatible : should be one of the following: - "qcom,smp2p-sleepstate" -qcom,smem-states : the relevant outgoing smp2p entry +- interrupt-parent: specifies the phandle to the parent interrupt controller + this one is cascaded from +- interrupts: specifies the interrupt number, the irq line to be used +- interrupt-names: Interrupt name string, must be "smp2p-sleepstate-in" Example: qcom,smp2p_sleepstate { compatible = "qcom,smp2p-sleepstate"; qcom,smem-states = <&sleepstate_smp2p_out 0>; + interrupt-parent = <&sleepstate_smp2p_in>; + interrupts = <0 0>; + interrupt-names = "smp2p-sleepstate-in"; }; diff --git a/Documentation/devicetree/bindings/clock/qcom,camcc.txt b/Documentation/devicetree/bindings/clock/qcom,camcc.txt index 92fe09eda177c008022c7d8f48276252b2caa6df..a4df74597828fb85251bae9aaa497810b86dac77 100644 --- a/Documentation/devicetree/bindings/clock/qcom,camcc.txt +++ b/Documentation/devicetree/bindings/clock/qcom,camcc.txt @@ -2,8 +2,12 @@ Qualcomm Technologies, Inc. Camera Clock & Reset Controller Binding ------------------------------------------------------------------- Required properties : -- compatible : must contain "qcom,camcc-sm8150", "qcom,camcc-sm8150-v2" - or "qcom,camcc-sdmshrike" or "qcom,camcc-sm6150". +- compatible : must contain one of the following: + "qcom,camcc-sm8150", + "qcom,camcc-sm8150-v2", + "qcom,camcc-sdmshrike", + "qcom,camcc-sm6150", + "qcom,camcc-sdmmagpie". - reg : shall contain base register location and length. - reg-names: names of registers listed in the same order as in the reg property. diff --git a/Documentation/devicetree/bindings/clock/qcom,debugcc.txt b/Documentation/devicetree/bindings/clock/qcom,debugcc.txt index 339357eb52590755f760815d6c3fda8d570400f7..e8db516b35633cbafdaddd6fbc5f85cad55971cd 100644 --- a/Documentation/devicetree/bindings/clock/qcom,debugcc.txt +++ b/Documentation/devicetree/bindings/clock/qcom,debugcc.txt @@ -4,7 +4,8 @@ Qualcomm Technologies, Inc. Debug Clock Controller Binding Required properties : - compatible: Shall contain "qcom,debugcc-sm8150", "qcom,debugcc-qcs405", - "qcom,debugcc-sm6150". + "qcom,debugcc-sm6150", + "qcom,debugcc-sdmmagpie". - qcom,gcc: phandle to the GCC device node. - qcom,videocc: phandle to the Video CC device node. - qcom,camcc: phandle to the Camera CC device node. diff --git a/Documentation/devicetree/bindings/clock/qcom,scc.txt b/Documentation/devicetree/bindings/clock/qcom,scc.txt new file mode 100644 index 0000000000000000000000000000000000000000..ff38f4b1b2abee6ee7eea8b3192c49d0d84c4d4e --- /dev/null +++ b/Documentation/devicetree/bindings/clock/qcom,scc.txt @@ -0,0 +1,15 @@ +Qualcomm Technologies, Inc. Sensor Clock Controller Bindings + +Required properties: +- compatible: shall contain "qcom,scc-sm8150" or "qcom,scc-sm8150-v2". +- reg: shall contain base register location and length. +- vdd_scc_cx-supply: the logic rail supply. +- #clock-cells: shall contain 1. + +Example: + qcom,scc@2b10000 { + compatible = "qcom,scc-sm8150"; + reg = <0x2b10000 0x30000>; + vdd_scc_cx-supply = <&VDD_CX_LEVEL>; + #clock-cells = <1>; + }; diff --git a/Documentation/devicetree/bindings/display/msm/sde.txt b/Documentation/devicetree/bindings/display/msm/sde.txt index 13ff2e904617f26e9351db2821d500f8fcb1c500..93c5d80c3f6893f1833b1ca49d1c374e2d5a4c8e 100644 --- a/Documentation/devicetree/bindings/display/msm/sde.txt +++ b/Documentation/devicetree/bindings/display/msm/sde.txt @@ -424,6 +424,10 @@ Optional properties: for the mixer block. Possible values: "primary" - preferred for primary display "none" - no preference on display +- qcom,sde-mixer-cwb-pref: A string array indicating the preferred mixer block. + for CWB. Possible values: + "cwb" - preferred for cwb + "none" - no preference on display - qcom,sde-ctl-display-pref: A string array indicating the preferred display type for the ctl block. Possible values: "primary" - preferred for primary display @@ -523,6 +527,8 @@ Example: 0x00047000 0x0004a000>; qcom,sde-mixer-display-pref = "primary", "none", "none", "none"; + qcom,sde-mixer-cwb-pref = "none", "none", + "cwb", "none"; qcom,sde-dspp-top-off = <0x1300>; qcom,sde-dspp-off = <0x00055000 0x00057000>; qcom,sde-dspp-ad-off = <0x24000 0x22800>; diff --git a/Documentation/devicetree/bindings/fb/mdss-spi-client.txt b/Documentation/devicetree/bindings/fb/mdss-spi-client.txt new file mode 100644 index 0000000000000000000000000000000000000000..b8945383bad79a0735cafc8c2e1be6aa5260a876 --- /dev/null +++ b/Documentation/devicetree/bindings/fb/mdss-spi-client.txt @@ -0,0 +1,26 @@ +Qualcomm Technologies, Inc. mdss-spi-client + +mdss-spi-client is for spi display send the FB data to spi master. + +Required properties: +- compatible : should be "qcom,mdss-spi-client" +- spi-max-frequency : Maximum SPI clocking speed of device in Hz + +Optional properties: +- label: A string used to describe the controller used. +- spi-cpol : Empty property indicating device requires inverse + clock polarity (CPOL) mode +- spi-cpha : Empty property indicating device requires shifted + clock phase (CPHA) mode +- spi-cs-high : Empty property indicating device requires + chip select active high + +Example: +spi@78b9000 { /* BLSP1 QUP5 */ + qcom,mdss_spi_client{ + reg = <0>; + compatible = "qcom,mdss-spi-client"; + label = "MDSS SPI QUP5 CLIENT"; + spi-max-frequency = <50000000>; + }; +}; diff --git a/Documentation/devicetree/bindings/fb/mdss-spi-display.txt b/Documentation/devicetree/bindings/fb/mdss-spi-display.txt new file mode 100644 index 0000000000000000000000000000000000000000..a48ba3f2f45b04017e8b9588b60b1c08a59123d3 --- /dev/null +++ b/Documentation/devicetree/bindings/fb/mdss-spi-display.txt @@ -0,0 +1,24 @@ +Qualcomm Technologies, Inc. mdss-spi-display + +mdss-spi-display is a spi interface display which support send frame +data and command to panel, compatible with SPI interface specification. + +Required properties: +- compatible: Must be "qcom,mdss-spi-display" +- qcom,mdss-fb-map: pHandle that specifies the framebuffer to which the + interface is mapped. + +Optional properties: +- label: A string used to describe the controller used. + +Example: +mdss_spi_display: qcom,mdss_spi_display { + compatible = "qcom,mdss-spi-display"; + label = "mdss spi display"; + + mdss_fb0: qcom,mdss_fb_primary { + cell-index = <0>; + compatible = "qcom,mdss-fb"; + }; +}; + diff --git a/Documentation/devicetree/bindings/fb/mdss-spi-panel.txt b/Documentation/devicetree/bindings/fb/mdss-spi-panel.txt new file mode 100644 index 0000000000000000000000000000000000000000..7b303d1d5043005e637224270f9266e050d169e8 --- /dev/null +++ b/Documentation/devicetree/bindings/fb/mdss-spi-panel.txt @@ -0,0 +1,204 @@ +Qualcomm Technologies, Inc. mdss-spi-panel + +mdss-spi-panel is a spi panel device which supports panels that +are compatible with display serial interface specification. + +Required properties: +- qcom,mdss-spi-panel-controller: Specifies the phandle for the SPI controller that + this panel will be mapped to. +- qcom,mdss-spi-panel-width: Specifies panel width in pixels. +- qcom,mdss-spi-panel-height: Specifies panel height in pixels. +- qcom,mdss-spi-bpp: Specifies the panel bits per pixels. + 3 = for rgb111 + 8 = for rgb332 + 12 = for rgb444 + 16 = for rgb565 + 18 = for rgb666 + 24 = for rgb888 +- qcom,mdss-spi-panel-destination: A string that specifies the destination display for the panel. + "display_1" = DISPLAY_1 + "display_2" = DISPLAY_2 +- qcom,mdss-spi-on-command: A byte stream formed by multiple packets + byte 0: wait number of specified ms after command + transmitted + byte 1: 8 bits length in network byte order + byte 3 and beyond: number byte of payload +- qcom,mdss-spi-off-command: A byte stream formed by multiple packets + byte 0: wait number of specified ms after command + transmitted + byte 1: 8 bits length in network byte order + byte 3 and beyond: number byte of payload +Optional properties: +- qcom,mdss-spi-panel-name: A string used as a descriptive name of the panel +- qcom,cont-splash-enabled: Boolean used to enable continuous splash mode. + If this property is specified, it is required to + to specify the memory reserved for the splash + screen using the qcom,memblock-reserve binding + for the framebuffer device attached to the panel. +- qcom,mdss-spi-h-back-porch: Horizontal back porch value in pixels. + 6 = default value. +- qcom,mdss-spi-h-front-porch: Horizontal front porch value in pixels. + 6 = default value. +- qcom,mdss-spi-h-pulse-width: Horizontal pulse width. + 2 = default value. +- qcom,mdss-spi-h-sync-skew: Horizontal sync skew value. + 0 = default value. +- qcom,mdss-spi-v-back-porch: Vertical back porch value in pixels. + 6 = default value. +- qcom,mdss-spi-v-front-porch: Vertical front porch value in pixels. + 6 = default value. +- qcom,mdss-spi-v-pulse-width: Vertical pulse width. + 2 = default value. +- qcom,mdss-spi-bl-pmic-control-type: A string that specifies the implementation of backlight + control for this panel. + "bl_ctrl_pwm" = Backlight controlled by PWM gpio. + "bl_ctrl_wled" = Backlight controlled by WLED. + other: Unknown backlight control. (default) +- qcom,mdss-spi-bl-min-level: Specifies the min backlight level supported by the panel. + 0 = default value. +- qcom,mdss-spi-bl-max-level: Specifies the max backlight level supported by the panel. + 255 = default value. +- qcom,mdss-spi-panel-framerate: Specifies the frame rate for the panel. +- qcom,mdss-spi-panel-vsync-per-te: Specifies the number of how many TE will trigger a VSYNC. +- qcom,esd-check-enabled: Boolean used to enable ESD recovery feature. +- qcom,mdss-spi-panel-status-check-mode:Specifies the panel status check method for ESD recovery. + "send_init_command" = Regardless of panel status, direct send the panel + initial code to recover panel status + "reg_read" = Reads panel status register to check the panel status +- qcom,mdss-spi-panel-status-reg: Unsigned 8bits integer value that specifies the value of panel status register address. +- qcom,mdss-spi-panel-status-read-length: + Unsigned 8bits integer value that specifies the expected read-back length of the + panel register. +- qcom,mdss-spi-panel-status-value: An unsigned 8bits integer araray that specifies the values of the panel status register + which is used to check the panel status. The size of this array + is specified by qcom,mdss-dsi-panel-status-read-length. +Example: +&mdss_spi_display { + spi_gc9305_qvga_cmd: qcom,mdss_spi_gc9305_qvga_cmd { + qcom,mdss-spi-panel-name = "gc9305 qvga command mode spi panel"; + qcom,mdss-spi-panel-destination = "display_1"; + qcom,mdss-spi-panel-controller = <&mdss_spi>; + qcom,mdss-spi-panel-framerate = <30>; + qcom,mdss-spi-panel-width = <240>; + qcom,mdss-spi-panel-height = <320>; + qcom,mdss-spi-h-front-porch = <79>; + qcom,mdss-spi-h-back-porch = <59>; + qcom,mdss-spi-h-pulse-width = <60>; + qcom,mdss-spi-v-back-porch = <10>; + qcom,mdss-spi-v-front-porch = <7>; + qcom,mdss-spi-v-pulse-width = <2>; + qcom,mdss-spi-h-left-border = <0>; + qcom,mdss-spi-h-right-border = <0>; + qcom,mdss-spi-v-top-border = <0>; + qcom,mdss-spi-v-bottom-border = <0>; + qcom,mdss-spi-bpp = <16>; + qcom,mdss-spi-on-command = [00 01 FE + 00 01 EF + 00 02 36 48 + 00 02 3A 05 + 00 02 35 00 + 00 03 A4 44 44 + 00 03 A5 42 42 + 00 03 AA 88 88 + 00 03 E8 12 40 + 00 03 E3 01 10 + 00 02 FF 61 + 00 02 AC 00 + 00 03 A6 2A 2A + 00 03 A7 2B 2B + 00 03 A8 18 18 + 00 03 A9 2A 2A + 00 02 AD 33 + 00 02 AF 55 + 00 02 AE 2B + 00 05 2A 00 00 00 EF + 00 05 2B 00 00 01 3F + 00 01 2C + 00 07 F0 02 02 00 08 0C 10 + 00 07 F1 01 00 00 14 1D 0E + 00 07 F2 10 09 37 04 04 48 + 00 07 F3 10 0B 3F 05 05 4E + 00 07 F4 0D 19 17 1D 1E 0F + 00 07 F5 06 12 13 1A 1B 0F + 78 01 11 + 00 01 29 + 00 01 2C]; + qcom,mdss-spi-off-command = [20 01 28 + 20 01 10]; + qcom,mdss-spi-bl-min-level = <1>; + qcom,mdss-spi-bl-max-level = <4095>; + qcom,esd-check-enabled; + qcom,mdss-spi-panel-status-check-mode = "reg_read"; + qcom,mdss-spi-panel-status-reg = /bits/ 8 <0x09>; + qcom,mdss-spi-panel-status-read-length = <4>; + qcom,mdss-spi-panel-status-value = /bits/ 8 <0x52 0x29 0x83 0x00>; + }; +}; + +mdss-spi-panel is a SPI interface panel which uses SPI protocol for data +receive and send. + +Required properties: +- compatible: Must be "qcom,mdss-spi-panel" +- vdd-supply: Phandle for vdd regulator device node. +- vddio-supply: Phandle for vdd-io regulator device node. +- qcom,panel-supply-entries: A node that lists the elements of the supply used to + power the DSI panel. There can be more than one instance + of this binding, in which case the entry would be appended + with the supply entry index. For a detailed description of + fields in the supply entry, refer to the qcom,ctrl-supply-entries + binding above. + +Optional properties: +- pwms: + Value type: + Definition: The PWM device (phandle) used for controlling backlight. +- qcom,platform-spi-dc-gpio: Pull down this gpio indicate current package is command, + Pull up this gpio indicate current package is parameter or pixels. +- qcom,platform-reset-gpio: Specifies the panel reset gpio. +- qcom,platform-te-gpio: Specifies the gpio used for TE. +- label: A string used to describe the controller used. + -- qcom,supply-name: name of the supply (vdd/vdda/vddio) + -- qcom,supply-min-voltage: minimum voltage level (uV) + -- qcom,supply-max-voltage: maximum voltage level (uV) + -- qcom,supply-enable-load: load drawn (uA) from enabled supply + -- qcom,supply-disable-load: load drawn (uA) from disabled supply + -- qcom,supply-pre-on-sleep: time to sleep (ms) before turning on + -- qcom,supply-post-on-sleep: time to sleep (ms) after turning on + -- qcom,supply-pre-off-sleep: time to sleep (ms) before turning off + -- qcom,supply-post-off-sleep: time to sleep (ms) after turning off +Example: + mdss_spi_panel: qcom,mdss_spi_panel { + compatible = "qcom,mdss-spi-panel"; + label = "mdss spi panel"; + + vdd-supply = <&pms405_l1>; + vddio-supply = <&pms405_l6>; + qcom,platform-te-gpio = <&tlmm 57 0>; + qcom,platform-reset-gpio = <&tlmm 42 0>; + qcom,platform-spi-dc-gpio = <&tlmm 39 0>; + pwms = <&pms405_l1 0 1000000>; + + qcom,panel-supply-entries { + #address-cells = <1>; + #size-cells = <0>; + + qcom,panel-supply-entry@0 { + reg = <0>; + qcom,supply-name = "vdd"; + qcom,supply-min-voltage = <2850000>; + qcom,supply-max-voltage = <2850000>; + qcom,supply-enable-load = <100000>; + qcom,supply-disable-load = <100>; + }; + + qcom,panel-supply-entry@1 { + reg = <1>; + qcom,supply-name = "vddio"; + qcom,supply-min-voltage = <1800000>; + qcom,supply-max-voltage = <1800000>; + qcom,supply-enable-load = <100000>; + qcom,supply-disable-load = <100>; + }; + }; + }; diff --git a/Documentation/devicetree/bindings/gpu/adreno.txt b/Documentation/devicetree/bindings/gpu/adreno.txt index 0441fc3cbbcce5a4ba51249eabdfde9921f9461c..cf84dba7fece7d55aafcd6a2909e4a3cc50f3886 100644 --- a/Documentation/devicetree/bindings/gpu/adreno.txt +++ b/Documentation/devicetree/bindings/gpu/adreno.txt @@ -113,6 +113,20 @@ Optional Properties: - qcom,l2pc-cpu-mask-latency: The CPU mask latency in microseconds to avoid L2PC on masked CPUs. + +- qcom,gpu-cx-ipeak: + CX Ipeak is a mitigation scheme which throttles cDSP frequency + if all the clients are running at their respective threshold + frequencies to limit CX peak current. + + phandle - phandle of CX Ipeak device node + bit - Every bit corresponds to a client of CX Ipeak + driver in the relevant register. +- qcom, gpu-cx-ipeak-freq: + GPU frequency threshold for CX Ipeak voting. GPU votes + to CX Ipeak driver when GPU clock crosses this threshold. + CX Ipeak can limit peak current based on voting from other clients. + - qcom,force-32bit: Force the GPU to use 32 bit data sizes even if it is capable of doing 64 bit. diff --git a/Documentation/devicetree/bindings/iommu/arm,smmu.txt b/Documentation/devicetree/bindings/iommu/arm,smmu.txt index 04faf3cf51eabd1240e44364e4774a10f8795f81..c8ad1b304dc5a3f866127c5a5cac2c848783834b 100644 --- a/Documentation/devicetree/bindings/iommu/arm,smmu.txt +++ b/Documentation/devicetree/bindings/iommu/arm,smmu.txt @@ -117,12 +117,26 @@ conditions. Some hardware may not have full support for atos debugging in tandem with other features like power collapse. +-qcom,opt-out-tbu-halting: + Allow certain TBUs to opt-out from being halted for the + ATOS operation to proceed. Halting certain TBUs would cause + considerable impact to the system such as deadlocks on demand. + Such TBUs can be opted out to be halted from software. + - qcom,deferred-regulator-disable-delay : The time delay for deferred regulator disable in ms. In case of unmap call, regulator is enabled/disabled. This may introduce additional delay. For clients who do not detach, it's not possible to keep regulator vote while smmu is attached. Type is . +- qcom,min-iova-align: + Some hardware revision might have the deep prefetch bug where + invalid entries in the prefetch window would cause improper + permissions to be cached for the valid entries in this window. + Enable the workaround on such hardware by aligning the start + and end of all mapped buffers to prefetch size boundary, which + is defined by ARM_SMMU_MIN_IOVA_ALIGN. + - clocks : List of clocks to be used during SMMU register access. See Documentation/devicetree/bindings/clock/clock-bindings.txt for information about the format. For each clock specified diff --git a/Documentation/devicetree/bindings/leds/backlight/qcom-spmi-wled.txt b/Documentation/devicetree/bindings/leds/backlight/qcom-spmi-wled.txt index c4192683007ac345ba6de088909e34b18982d124..cace01944a10a745c017d404be11546e2adb066c 100644 --- a/Documentation/devicetree/bindings/leds/backlight/qcom-spmi-wled.txt +++ b/Documentation/devicetree/bindings/leds/backlight/qcom-spmi-wled.txt @@ -142,6 +142,13 @@ platforms. The PMIC is connected to the host processor via SPMI bus. Value type: Definition: If specified, can be used to get PMIC revision information. +- qcom,leds-per-string + Usage: optional + Value type: + Definition: If specified, can be used to calculate available current + during selfie flash operation. If not specified, available + current calculated is simply the configured threshold. + Following properties are for child subnodes that are needed for WLED preflash (or torch), flash and switch. These child subnodes can be specified only for PMICs that has WLED5 (e.g. PM8150L). diff --git a/Documentation/devicetree/bindings/leds/leds-qpnp-flash-v2.txt b/Documentation/devicetree/bindings/leds/leds-qpnp-flash-v2.txt index 169f848b534d77ba2afd7d271123e2404648e909..8728a1bdbc6a212a667894d6cc6ae772b97a37aa 100644 --- a/Documentation/devicetree/bindings/leds/leds-qpnp-flash-v2.txt +++ b/Documentation/devicetree/bindings/leds/leds-qpnp-flash-v2.txt @@ -24,11 +24,11 @@ Optional properties: 128, 192. Unit is uS. - qcom,short-circuit-det : Boolean property which enables short circuit fault detection. - qcom,open-circuit-det : Boolean property which enables open circuit fault detection. -- qcom,vph-droop-det : Boolean property which enables VPH droop detection. -- qcom,vph-droop-hys-mv : Integer property to specify VPH droop hysteresis. It is only used if - qcom,vph-droop-det is specified. Valid values are 0, 25, 50 and 75. - Unit is mV. -- qcom,vph-droop-thresh-mv : Integer property to specify VPH droop threshold. It is only used if +- qcom,vph-droop-det : Boolean property which enables VPH droop detection. +- qcom,vph-droop-hysteresis-mv : Integer property to specify VPH droop hysteresis. It is only used if + qcom,vph-droop-det is specified. Valid values are 0, 25, 50 and 75. + Unit is mV. +- qcom,vph-droop-threshold-mv : Integer property to specify VPH droop threshold. It is only used if qcom,vph-droop-det is specified. Valid values are 2500 to 3200 with step size of 100. Unit is mV. - qcom,vph-droop-debounce-us : Integer property to specify VPH droop debounce time. It is only used diff --git a/Documentation/devicetree/bindings/net/dsa/qca8k.txt b/Documentation/devicetree/bindings/net/dsa/qca8k.txt index 9c67ee4890d749af16ea27997e235f5f12594cb1..bbcb255c3150230978fba796b320a71c206ddbad 100644 --- a/Documentation/devicetree/bindings/net/dsa/qca8k.txt +++ b/Documentation/devicetree/bindings/net/dsa/qca8k.txt @@ -2,7 +2,10 @@ Required properties: -- compatible: should be "qca,qca8337" +- compatible: should be one of: + "qca,qca8334" + "qca,qca8337" + - #size-cells: must be 0 - #address-cells: must be 1 @@ -14,6 +17,20 @@ port and PHY id, each subnode describing a port needs to have a valid phandle referencing the internal PHY connected to it. The CPU port of this switch is always port 0. +A CPU port node has the following optional node: + +- fixed-link : Fixed-link subnode describing a link to a non-MDIO + managed entity. See + Documentation/devicetree/bindings/net/fixed-link.txt + for details. + +For QCA8K the 'fixed-link' sub-node supports only the following properties: + +- 'speed' (integer, mandatory), to indicate the link speed. Accepted + values are 10, 100 and 1000 +- 'full-duplex' (boolean, optional), to indicate that full duplex is + used. When absent, half duplex is assumed. + Example: @@ -53,6 +70,10 @@ Example: label = "cpu"; ethernet = <&gmac1>; phy-mode = "rgmii"; + fixed-link { + speed = 1000; + full-duplex; + }; }; port@1 { diff --git a/Documentation/devicetree/bindings/net/meson-dwmac.txt b/Documentation/devicetree/bindings/net/meson-dwmac.txt index 354dd9896bb54bbe9cbcb7da56a659d5846f80b3..910187ebf1ce281e05c5e658b877ed6857bb0804 100644 --- a/Documentation/devicetree/bindings/net/meson-dwmac.txt +++ b/Documentation/devicetree/bindings/net/meson-dwmac.txt @@ -10,6 +10,7 @@ Required properties on all platforms: - "amlogic,meson6-dwmac" - "amlogic,meson8b-dwmac" - "amlogic,meson-gxbb-dwmac" + - "amlogic,meson-axg-dwmac" Additionally "snps,dwmac" and any applicable more detailed version number described in net/stmmac.txt should be used. diff --git a/Documentation/devicetree/bindings/pci/msm_ep_pcie.txt b/Documentation/devicetree/bindings/pci/msm_ep_pcie.txt new file mode 100644 index 0000000000000000000000000000000000000000..faf56c2cdb3d57f585f3b2d32ae2b5e9aa580a16 --- /dev/null +++ b/Documentation/devicetree/bindings/pci/msm_ep_pcie.txt @@ -0,0 +1,141 @@ +MSM PCI express endpoint + +Required properties: + - compatible: should be "qcom,pcie-ep". + - reg: should contain PCIe register maps. + - reg-names: indicates various resources passed to driver by name. + Should be "msi", "dm_core", "elbi", "parf", "phy", "mmio". + These correspond to different modules within the PCIe domain. + - #address-cells: Should provide a value of 0. + - interrupt-parent: Should be the PCIe device node itself here. + - interrupts: Should be in the format <0 1 2> and it is an index to the + interrupt-map that contains PCIe related interrupts. + - #interrupt-cells: Should provide a value of 1. + - #interrupt-map-mask: should provide a value of 0xffffffff. + - interrupt-map: Must create mapping for the number of interrupts + that are defined in above interrupts property. + For PCIe device node, it should define 6 mappings for + the corresponding PCIe interrupts supporting the + specification. + - interrupt-names: indicates interrupts passed to driver by name. + Should be "int_pm_turnoff", "int_dstate_change", + "int_l1sub_timeout", "int_link_up", + "int_link_down", "int_bridge_flush_n". + - perst-gpio: PERST GPIO specified by PCIe spec. + - wake-gpio: WAKE GPIO specified by PCIe spec. + - clkreq-gpio: CLKREQ GPIO specified by PCIe spec. + - -supply: phandle to the regulator device tree node. + Refer to the schematics for the corresponding voltage regulators. + vreg-1.8-supply: phandle to the analog supply for the PCIe controller. + vreg-0.9-supply: phandle to the analog supply for the PCIe controller. + +Optional Properties: + - qcom,-voltage-level: specifies voltage levels for supply. + Should be specified in pairs (max, min, optimal), units uV. + - clock-names: list of names of clock inputs. + Should be "pcie_0_pipe_clk", + "pcie_0_aux_clk", "pcie_0_cfg_ahb_clk", + "pcie_0_mstr_axi_clk", "pcie_0_slv_axi_clk", + "pcie_0_ldo"; + - max-clock-frequency-hz: list of the maximum operating frequencies stored + in the same order of clock names; + - resets: reset specifier pair consists of phandle for the reset controller + and reset lines used by this controller. + - reset-names: reset signal names sorted in the same order as the property + of resets. + - qcom,pcie-phy-ver: version of PCIe PHY. + - qcom,phy-init: The initialization sequence to bring up the PCIe PHY. + Should be specified in groups (offset, value, delay, direction). + - qcom,phy-status-reg: Register offset for PHY status. + - qcom,dbi-base-reg: Register offset for DBI base address. + - qcom,slv-space-reg: Register offset for slave address space size. + - qcom,pcie-link-speed: generation of PCIe link speed. The value could be + 1, 2 or 3. + - qcom,pcie-active-config: boolean type; active configuration of PCIe + addressing. + - qcom,pcie-aggregated-irq: boolean type; interrupts are aggregated. + - qcom,pcie-mhi-a7-irq: boolean type; MHI a7 has separate irq. + - qcom,pcie-perst-enum: Link enumeration will be triggered by PERST + deassertion. + - mdm2apstatus-gpio: GPIO used by PCIe endpoint side to notify the host side. + - Refer to "Documentation/devicetree/bindings/arm/msm/msm_bus.txt" for + below optional properties: + - qcom,msm-bus,name + - qcom,msm-bus,num-cases + - qcom,msm-bus,num-paths + - qcom,msm-bus,vectors-KBps + +Example: + + pcie_ep: qcom,pcie@bfffd000 { + compatible = "qcom,pcie-ep"; + + reg = <0xbfffd000 0x1000>, + <0xbfffe000 0x1000>, + <0xbffff000 0x1000>, + <0xfc520000 0x2000>, + <0xfc526000 0x1000>, + <0xfc527000 0x1000>; + reg-names = "msi", "dm_core", "elbi", "parf", "phy", "mmio"; + + #address-cells = <0>; + interrupt-parent = <&pcie_ep>; + interrupts = <0 1 2 3 4 5>; + #interrupt-cells = <1>; + interrupt-map-mask = <0xffffffff>; + interrupt-map = <0 &intc 0 44 0 + 1 &intc 0 46 0 + 2 &intc 0 47 0 + 3 &intc 0 50 0 + 4 &intc 0 51 0 + 5 &intc 0 52 0>; + interrupt-names = "int_pm_turnoff", "int_dstate_change", + "int_l1sub_timeout", "int_link_up", + "int_link_down", "int_bridge_flush_n"; + + perst-gpio = <&msmgpio 65 0>; + wake-gpio = <&msmgpio 61 0>; + clkreq-gpio = <&msmgpio 64 0>; + mdm2apstatus-gpio = <&tlmm_pinmux 16 0>; + + gdsc-vdd-supply = <&gdsc_pcie_0>; + vreg-1.8-supply = <&pmd9635_l8>; + vreg-0.9-supply = <&pmd9635_l4>; + + qcom,vreg-1.8-voltage-level = <1800000 1800000 1000>; + qcom,vreg-0.9-voltage-level = <950000 950000 24000>; + + clock-names = "pcie_0_pipe_clk", + "pcie_0_aux_clk", "pcie_0_cfg_ahb_clk", + "pcie_0_mstr_axi_clk", "pcie_0_slv_axi_clk", + "pcie_0_ldo"; + max-clock-frequency-hz = <62500000>, <1000000>, + <0>, <0>, <0>, <0>; + + resets = <&clock_gcc GCC_PCIE_BCR>, + <&clock_gcc GCC_PCIE_PHY_BCR>; + + reset-names = "pcie_0_core_reset", "pcie_0_phy_reset"; + + qcom,msm-bus,name = "pcie-ep"; + qcom,msm-bus,num-cases = <2>; + qcom,msm-bus,num-paths = <1>; + qcom,msm-bus,vectors-KBps = + <45 512 0 0>, + <45 512 500 800>; + + qcom,pcie-link-speed = <1>; + qcom,pcie-active-config; + qcom,pcie-aggregated-irq; + qcom,pcie-mhi-a7-irq; + qcom,pcie-perst-enum; + qcom,phy-status-reg = <0x728>; + qcom,dbi-base-reg = <0x168>; + qcom,slv-space-reg = <0x16c>; + + qcom,phy-init = <0x604 0x03 0x0 0x1 + 0x048 0x08 0x0 0x1 + 0x64c 0x4d 0x0 0x1 + 0x600 0x00 0x0 0x1 + 0x608 0x03 0x0 0x1>; + }; diff --git a/Documentation/devicetree/bindings/pci/msm_pcie.txt b/Documentation/devicetree/bindings/pci/msm_pcie.txt index 63eb0b70b3b609f339744836979bf455b84734c1..767dc1ee7da284703e43a948065fa20606ca5d72 100644 --- a/Documentation/devicetree/bindings/pci/msm_pcie.txt +++ b/Documentation/devicetree/bindings/pci/msm_pcie.txt @@ -38,6 +38,8 @@ Required properties: - perst-gpio: PERST GPIO specified by PCIe spec. - wake-gpio: WAKE GPIO specified by PCIe spec. - phy-status-offset: Offset from PCIe PHY base to check if PCIe PHY is up. + - pcie_rc: PCI node is a sub-node of PCIe controller node. + node. This node holds root complex specific configurations and properties. - -supply: phandle to the regulator device tree node. Refer to the schematics for the corresponding voltage regulators. vreg-1.8-supply: phandle to the analog supply for the PCIe controller. @@ -95,7 +97,6 @@ Optional Properties: - iommus: the phandle and stream IDs for the SMMU used by this root complex. This should be used in separate nodes from the main root complex nodes, and is the only property needed in that case. - - qcom,smmu-exist: PCIe uses a SMMU. - qcom,smmu-sid-base: The base SMMU SID that PCIe bus driver will use to calculate and assign for each endpoint. - qcom,ep-latency: The time (unit: ms) to wait for the PCIe endpoint to become @@ -128,6 +129,28 @@ Optional Properties: - reset-names: reset signal name strings sorted in the same order as the resets property. +================= +Root Complex node +================= + +Root complex are defined as subnodes of the PCIe controller node. + +Required properties: +- reg: Array (5-cell PCI resource) of . First cell is devfn, which is + determined by pci bus topology. Assign the other cells 0 since they are not + used. + +Optional properties: + - qcom,iommu-cfg: Determines whether PCIe bus driver is required to configure + SMMU that sits behind the PCIe controller. + Bit mask: + BIT(0) : Indicates if SMMU is present + BIT(1) : Set IOMMU attribute S1_BYPASS + BIT(2) : Set IOMMU attribute FAST + BIT(3) : Set IOMMU attribute ATOMIC + BIT(4) : Set IOMMU attribute FORCE COHERENT + - qcom,iommu-range: Pair of values describing iova base and size to allocate. + Example: pcie0: qcom,pcie@fc520000 { @@ -269,7 +292,6 @@ Example: qcom,msi-gicm-base = <0x160>; qcom,ext-ref-clk; qcom,tlp-rd-size = <0x5>; - qcom,smmu-exist; qcom,smmu-sid-base = <0x1480>; qcom,ep-latency = <100>; qcom,switch-latency = <100>; @@ -304,4 +326,10 @@ Example: qcom,msm-bus,vectors-KBps = <45 512 0 0>, <45 512 500 800>; + + pcie_rc0: pcie_rc0 { + reg = <0x0 0x0 0x0 0x0 0x0>; + qcom,iommu-cfg = <0x3> /* SMMU PRESENT. SET S1 BYPASS */ + qcom,iommu-range = <0x0 0x10000000 0x0 0x40000000>; + }; }; diff --git a/Documentation/devicetree/bindings/pinctrl/meson,pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/meson,pinctrl.txt index 2392557ede2777a2463e76f77e78be4ddadbd607..df77d394edc024586ace6be0616cbfc9db054199 100644 --- a/Documentation/devicetree/bindings/pinctrl/meson,pinctrl.txt +++ b/Documentation/devicetree/bindings/pinctrl/meson,pinctrl.txt @@ -3,8 +3,10 @@ Required properties for the root node: - compatible: one of "amlogic,meson8-cbus-pinctrl" "amlogic,meson8b-cbus-pinctrl" + "amlogic,meson8m2-cbus-pinctrl" "amlogic,meson8-aobus-pinctrl" "amlogic,meson8b-aobus-pinctrl" + "amlogic,meson8m2-aobus-pinctrl" "amlogic,meson-gxbb-periphs-pinctrl" "amlogic,meson-gxbb-aobus-pinctrl" "amlogic,meson-gxl-periphs-pinctrl" diff --git a/Documentation/devicetree/bindings/pinctrl/qcom,slpi-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/qcom,slpi-pinctrl.txt new file mode 100644 index 0000000000000000000000000000000000000000..632bab8e5ad60f134c7be849a57ce21a841dbd60 --- /dev/null +++ b/Documentation/devicetree/bindings/pinctrl/qcom,slpi-pinctrl.txt @@ -0,0 +1,123 @@ +Qualcomm Technologies, Inc. SLPI Pin controller driver + +This DT bindings describes the Pin controller driver +being added for supporting SLPI (Sensor Low Power Island) TLMM +from QTI chipsets. + +Following properties are for SLPI Pin controller device main node. +- compatible: + Usage: required + Value type: + Definition: must be "qcom,slpi-pinctrl" + +- reg: + Usage: required + Value type: + Definition: Base address and size of the SLPI TLMM register space. + +- qcom,num-pins: + Usage: required + Value type: + Definition: Number of PINs supported by the SLPI TLMM. + +Please refer to pinctrl-bindings.txt in this directory for details of the +common pinctrl bindings used by client devices, including the meaning of the +phrase "pin configuration node". + +The pin configuration nodes act as a container for an arbitrary number of +subnodes. Each of these subnodes represents some desired configuration for a +pin or a list of pins. This configuration can include the +mux function to select on those pin(s), and various pin configuration +parameters, as listed below. + +SUBNODES: + +The name of each subnode is not important; all subnodes should be enumerated +and processed purely based on their content. + +Each subnode only affects those parameters that are explicitly listed. In +other words, a subnode that lists a mux function but no pin configuration +parameters implies no information about any pin configuration parameters. +Similarly, a pin subnode that describes a pullup parameter implies no +information about e.g. the mux function. + +The following generic properties as defined in pinctrl-bindings.txt are valid +to specify in a pin configuration subnode: + +- pins: + Usage: required + Value type: + Definition: List of gpio pins affected by the properties specified in + this subnode. Valid pins are: gpio0-gpio31 for LPI. + +- function: + Usage: required + Value type: + Definition: Specify the alternative function to be configured for the + specified pins. Valid values are: + "gpio", + "func1", + "func2", + "func3", + "func4", + "func5" + +- bias-disable: + Usage: optional + Value type: + Definition: The specified pins should be configured as no pull. + +- bias-pull-down: + Usage: optional + Value type: + Definition: The specified pins should be configured as pull down. + +- bias-bus-hold: + Usage: optional + Value type: + Definition: The specified pins should be configured as bus-keeper mode. + +- bias-pull-up: + Usage: optional + Value type: + Definition: The specified pins should be configured as pull up. + +- qcom,drive-strength: + Usage: optional + Value type: + Definition: Selects the drive strength for the specified pins. + +Example: + + slpi_tlmm: slpi_pinctrl@02b40000 { + compatible = "qcom,slpi-pinctrl"; + qcom,num-pins = <14>; + reg = <0x02b40000 0x20000>; + + slpi_mclk0_active: slpi_mclk0_active { + mux { + pins = "gpio0", "gpio1", "gpio2", "gpio3"; + function = "func2"; + }; + + config { + pins = "gpio0", "gpio1", "gpio2", "gpio3"; + drive-strength = <8>; + bias-disable; + }; + }; + + slpi_mclk0_sleep: slpi_mclk0_sleep { + mux { + pins = "gpio0", "gpio1", "gpio2", "gpio3"; + function = "func2"; + }; + + config { + pins = "gpio0", "gpio1", "gpio2", "gpio3"; + drive-strength = <2>; + bias-pull-down; + }; + }; + + }; diff --git a/Documentation/devicetree/bindings/power/supply/qcom/qpnp-fg-gen4.txt b/Documentation/devicetree/bindings/power/supply/qcom/qpnp-fg-gen4.txt index 91d9047f193d044857e2b8557848af90093e8c1b..b656f268c1a41fdaf8c246f25bcb5e1f8f50b4d2 100644 --- a/Documentation/devicetree/bindings/power/supply/qcom/qpnp-fg-gen4.txt +++ b/Documentation/devicetree/bindings/power/supply/qcom/qpnp-fg-gen4.txt @@ -323,6 +323,16 @@ First Level Node - FG Gen4 device charging. Value has no unit. Allowed range is 62 to 15564 in micro units. +- qcom,ki-coeff-full-dischg + Usage: optional + Value type: + Definition: Array of Ki coefficient full SOC values that needs to be + applied during discharging. If not specified, a value of + 0 will be set. + Allowed range is from 62 to 15564. + Element 0 - Ki coefficient for full SOC in room temperature + Element 1 - Ki coefficient for full SOC in low temperature + - qcom,fg-rconn-uohms Usage: optional Value type: diff --git a/Documentation/devicetree/bindings/power/supply/qcom/qpnp-smb5.txt b/Documentation/devicetree/bindings/power/supply/qcom/qpnp-smb5.txt index ed38a7c15dd2cef4f4875b0d8cb14e1876cce3d0..2515f05c50a5a1fc171c197a64cca5279e2da1a2 100644 --- a/Documentation/devicetree/bindings/power/supply/qcom/qpnp-smb5.txt +++ b/Documentation/devicetree/bindings/power/supply/qcom/qpnp-smb5.txt @@ -245,6 +245,12 @@ Charger specific properties: This is only applicable to certain PMICs like PMI632 which has SCHGM_FLASH peripheral. +- qcom,fcc-stepping-enable + Usage: optional + Value type: bool + Definition: Boolean flag which when present enables stepwise change in FCC. + The default stepping rate is 100mA/sec. + ============================================= Second Level Nodes - SMB5 Charger Peripherals ============================================= diff --git a/Documentation/devicetree/bindings/regulator/gdsc-regulator.txt b/Documentation/devicetree/bindings/regulator/gdsc-regulator.txt index c661c71aee3b343a75d3e7c78b3cc1f5457d51a9..9c12fb871cfe5acbe8fef20b836b4f6c21bfc2bf 100644 --- a/Documentation/devicetree/bindings/regulator/gdsc-regulator.txt +++ b/Documentation/devicetree/bindings/regulator/gdsc-regulator.txt @@ -76,6 +76,10 @@ Optional properties: [1] for details. - qcom,msm-bus,vectors-KBps: Required if qcom,msm-bus,name is specified. See [1] for an explanation of the data format. + - mboxes: Mailbox tuple containing QMP mailbox phandle and channel + identifier. If this is specified, then a QMP message + should be sent to enable the GDSC instead of setting + SW_COLLAPSE=0. [1]: Documentation/devicetree/bindings/arm/msm/msm_bus.txt diff --git a/Documentation/devicetree/bindings/soc/qcom/qcom,cx_ipeak.txt b/Documentation/devicetree/bindings/soc/qcom/qcom,cx_ipeak.txt index d5fb03cc931870cd682ed2943bc8d77b5b502978..7c42f30fac22e963539068ae23c0d734cd50070e 100644 --- a/Documentation/devicetree/bindings/soc/qcom/qcom,cx_ipeak.txt +++ b/Documentation/devicetree/bindings/soc/qcom/qcom,cx_ipeak.txt @@ -8,8 +8,8 @@ an interrupt to cDSP block to throttle cDSP's fmax. Required properties: -- compatible : name of the component used for driver matching, should be - "qcom,cx-ipeak-sdm660" +- compatible : name of the component used for driver matching, should be one + "qcom,cx-ipeak-sdm660", "qcom,cx-ipeak-sm6150" - reg : physical base address and length of the register set(s), SRAM and XPU of the component. diff --git a/Documentation/devicetree/bindings/sound/wcd_codec.txt b/Documentation/devicetree/bindings/sound/wcd_codec.txt index 8d582798fddfe0346fb9043380a4459e35743b76..cd2d2a3c4d9f4fb6c275201dc7cb6a67e80106c1 100644 --- a/Documentation/devicetree/bindings/sound/wcd_codec.txt +++ b/Documentation/devicetree/bindings/sound/wcd_codec.txt @@ -154,6 +154,7 @@ Optional properties: - cdc-vdd-buck-sido-supply: phandle of vdd buck sido supply's regulator device tree node. - qcom,cdc-vdd-buck-sido-voltage: vdd buck sido supply's voltage level min and max in mV. - qcom,cdc-vdd-buck-sido-current: lvdd buck sido supply's max current in mA. + - qcom,vreg-micb-supply: phandle to change micbias load depending on usecase. Example: pahu_codec { @@ -221,6 +222,8 @@ pahu_codec { qcom,cdc-dmic-sample-rate = <4800000>; qcom,cdc-mad-dmic-rate = <600000>; + qcom,vreg-micb-supply = <&BOB>; + qcom,wdsp-cmpnt-dev-name = "pahu_codec"; wcd_spi_0: wcd_spi { diff --git a/Documentation/devicetree/bindings/soundwire/swr-mstr-ctrl.txt b/Documentation/devicetree/bindings/soundwire/swr-mstr-ctrl.txt index 41822f28acd68545067f6052340fe9bd53a61054..d775547997a70c4b24f63b65d6b407f4e4f15455 100644 --- a/Documentation/devicetree/bindings/soundwire/swr-mstr-ctrl.txt +++ b/Documentation/devicetree/bindings/soundwire/swr-mstr-ctrl.txt @@ -12,6 +12,11 @@ Required properties: corresponding ch-mask entries for possible port types of the master port. +Optional properties: +- qcom,swr-wakeup-required : should be set to 1 if wakeup is required +from LPASS as part of soundwire data-toggle interrupt sequence +for given target. + * wsa881x Required properties: @@ -34,6 +39,7 @@ swr0: swr_master { #size-cells = <0>; qcom,swr-num-ports = <8>; + qcom,swr-wakeup-required = <1>; qcom,swr-port-mapping = <1 SPKR_L 0x1>, <2 SPKR_L_COMP 0xF>, <3 SPKR_L_BOOST 0x3>, <4 SPKR_R 0x1>, <5 SPKR_R_COMP 0xF>, diff --git a/Documentation/devicetree/bindings/spi/qcom,spi-geni-qcom.txt b/Documentation/devicetree/bindings/spi/qcom,spi-geni-qcom.txt index 866d004200caff7d4087daea120e2b298c50e425..8b6d5a25125f69dd22d0ee8d484d5ebdb337fc6a 100644 --- a/Documentation/devicetree/bindings/spi/qcom,spi-geni-qcom.txt +++ b/Documentation/devicetree/bindings/spi/qcom,spi-geni-qcom.txt @@ -28,6 +28,7 @@ Required properties: Optional properties: - qcom,rt: Specifies if the framework worker thread for this controller device should have "real-time" priority. +- qcom,disable-autosuspend: Specifies to disable runtime PM auto suspend. SPI slave nodes must be children of the SPI master node and can contain properties described in Documentation/devicetree/bindings/spi/spi-bus.txt diff --git a/Documentation/devicetree/bindings/thermal/qti-qmi-sensor.txt b/Documentation/devicetree/bindings/thermal/qti-qmi-sensor.txt new file mode 100644 index 0000000000000000000000000000000000000000..950f9edd9dcb3ded3e805e0b5d27b8f3de35bf71 --- /dev/null +++ b/Documentation/devicetree/bindings/thermal/qti-qmi-sensor.txt @@ -0,0 +1,52 @@ +QMI thermal mitigation(TS) sensor. + +The QMI TS Sensor driver can list the sensors that are available in the +remote subsystem. This driver can read the temperature, set threshold and +get threshold notification. + +Each child node of the QMI TS devicetree node represents a remote +subsystem and it can have more than one remote sensor names. + +Properties: + +- compatible: + Usage: required + Value type: + Definition: should be "qcom,qmi-sensors" + +- #thermal-sensor-cells: + Usage: required + Value type: + Definition: Must be 1. See thermal.txt for description. + +Subsystem properties: +- qcom,instance-id: + Usage: required + Value type: + Definition: Remote subsystem QMI server instance id to be used for + communicating with QMI. + +- qcom,qmi-sensor-names: + Usage: required + Value type: + Definition: Remote sensor names. Below strings + are the only acceptable sensor names, + 1. pa + 2. pa1 + 3. qfe_pa0 + 4. qfe_wtr0 + +Example: + +qmi_sensor: qmi-ts-sensors { + compatible = "qcom,qmi-sensors"; + #thermal-sensor-cells = <1>; + + modem { + qcom,instance-id = <0x0>; + qcom,qmi-sensor-names = "pa", + "pa_1", + "qfe_pa0", + "qfe_wtr0"; + }; +}; diff --git a/Documentation/devicetree/bindings/usb/msm-ssusb.txt b/Documentation/devicetree/bindings/usb/msm-ssusb.txt index 880dc1e15c31c5e34e2df5c020c9d3e23327e3ef..33beda5a55ba11945540b99d7031896fcda858b7 100644 --- a/Documentation/devicetree/bindings/usb/msm-ssusb.txt +++ b/Documentation/devicetree/bindings/usb/msm-ssusb.txt @@ -69,6 +69,9 @@ Optional properties : events. - qcom,num-gsi-evt-buffs: If present, specifies number of GSI based hardware accelerated event buffers. 1 event buffer is needed per h/w accelerated endpoint. +- qcom,gsi-reg-offset: USB GSI wrapper registers offset. It is must to provide this + if qcom,num-gsi-evt-buffs property is specified. Check dwc3-msm driver for order + and name of register offset need to provide. - qcom,pm-qos-latency: This represents max tolerable CPU latency in microsecs, which is used as a vote by driver to get max performance in perf mode. - qcom,smmu-s1-bypass: If present, configure SMMU to bypass stage 1 translation. diff --git a/Documentation/devicetree/bindings/vendor-prefixes.txt b/Documentation/devicetree/bindings/vendor-prefixes.txt index caa4fc38898edca3e2abe6c8c8adeb0edbe25b55..0753f394367ac50d1aea32ca7ba172bd9f2bc1aa 100644 --- a/Documentation/devicetree/bindings/vendor-prefixes.txt +++ b/Documentation/devicetree/bindings/vendor-prefixes.txt @@ -331,7 +331,7 @@ summit Summit microelectronics sunchip Shenzhen Sunchip Technology Co., Ltd SUNW Sun Microsystems, Inc swir Sierra Wireless -syna Synaptics Inc. +synaptics Synaptics Inc. synology Synology, Inc. tbs TBS Technologies tcg Trusted Computing Group diff --git a/Documentation/vfio-mediated-device.txt b/Documentation/vfio-mediated-device.txt index 1b395034653205b5a6c127488053daced91075d6..c3f69bcaf96e1f9caaae06fbcdc0cafde22bbf0b 100644 --- a/Documentation/vfio-mediated-device.txt +++ b/Documentation/vfio-mediated-device.txt @@ -145,6 +145,11 @@ The functions in the mdev_parent_ops structure are as follows: * create: allocate basic resources in a driver for a mediated device * remove: free resources in a driver when a mediated device is destroyed +(Note that mdev-core provides no implicit serialization of create/remove +callbacks per mdev parent device, per mdev type, or any other categorization. +Vendor drivers are expected to be fully asynchronous in this respect or +provide their own internal resource protection.) + The callbacks in the mdev_parent_ops structure are as follows: * open: open callback of mediated device diff --git a/Makefile b/Makefile index 55c8e72948c3301afeeda83534eb6d35f303e7bb..af8f68cb819850fba069d68a85c76716cf020296 100644 --- a/Makefile +++ b/Makefile @@ -1,7 +1,7 @@ # SPDX-License-Identifier: GPL-2.0 VERSION = 4 PATCHLEVEL = 14 -SUBLEVEL = 56 +SUBLEVEL = 62 EXTRAVERSION = NAME = Petit Gorille @@ -637,7 +637,9 @@ all: vmlinux KBUILD_CFLAGS += $(call cc-option,-fno-PIE) KBUILD_AFLAGS += $(call cc-option,-fno-PIE) -CFLAGS_GCOV := -fprofile-arcs -ftest-coverage -fno-tree-loop-im $(call cc-disable-warning,maybe-uninitialized,) +CFLAGS_GCOV := -fprofile-arcs -ftest-coverage \ + $(call cc-option,-fno-tree-loop-im) \ + $(call cc-disable-warning,maybe-uninitialized,) CFLAGS_KCOV := $(call cc-option,-fsanitize-coverage=trace-pc,) export CFLAGS_GCOV CFLAGS_KCOV @@ -667,6 +669,7 @@ KBUILD_CFLAGS += $(call cc-disable-warning,frame-address,) KBUILD_CFLAGS += $(call cc-disable-warning, format-truncation) KBUILD_CFLAGS += $(call cc-disable-warning, format-overflow) KBUILD_CFLAGS += $(call cc-disable-warning, int-in-bool-context) +KBUILD_CFLAGS += $(call cc-disable-warning, attribute-alias) ifdef CONFIG_CC_OPTIMIZE_FOR_SIZE KBUILD_CFLAGS += $(call cc-option,-Oz,-Os) @@ -752,6 +755,7 @@ KBUILD_CFLAGS += -fno-builtin KBUILD_CFLAGS += $(call cc-option, -Wno-undefined-optimized) KBUILD_CFLAGS += $(call cc-option, -Wno-tautological-constant-out-of-range-compare) KBUILD_CFLAGS += $(call cc-option, -mllvm -disable-struct-const-merge) +KBUILD_CFLAGS += $(call cc-option, -Wno-sometimes-uninitialized) # Quiet clang warning: comparison of unsigned expression < 0 is always false diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c index 75a5c35a2067d05c10746138470249dfd6ba4e6a..a48976dc9bcd0ad8638aadfd927e0480f3816c3e 100644 --- a/arch/alpha/kernel/osf_sys.c +++ b/arch/alpha/kernel/osf_sys.c @@ -1183,13 +1183,10 @@ SYSCALL_DEFINE2(osf_getrusage, int, who, struct rusage32 __user *, ru) SYSCALL_DEFINE4(osf_wait4, pid_t, pid, int __user *, ustatus, int, options, struct rusage32 __user *, ur) { - unsigned int status = 0; struct rusage r; - long err = kernel_wait4(pid, &status, options, &r); + long err = kernel_wait4(pid, ustatus, options, &r); if (err <= 0) return err; - if (put_user(status, ustatus)) - return -EFAULT; if (!ur) return err; if (put_tv32(&ur->ru_utime, &r.ru_utime)) diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig index 4383313b064a0439a74a8b8184d210d0d9160a12..5c8caf85c35054722402d86f58033820d3de7640 100644 --- a/arch/arc/Kconfig +++ b/arch/arc/Kconfig @@ -408,7 +408,7 @@ config ARC_HAS_DIV_REM config ARC_HAS_ACCL_REGS bool "Reg Pair ACCL:ACCH (FPU and/or MPY > 6)" - default n + default y help Depending on the configuration, CPU can contain accumulator reg-pair (also referred to as r58:r59). These can also be used by gcc as GPR so diff --git a/arch/arc/configs/axs101_defconfig b/arch/arc/configs/axs101_defconfig index ec7c849a5c8e9887fbd60acf176f194cc0c4ba03..a8242362e55199e550f4da319f1f5f2bd33f23c2 100644 --- a/arch/arc/configs/axs101_defconfig +++ b/arch/arc/configs/axs101_defconfig @@ -11,7 +11,6 @@ CONFIG_NAMESPACES=y # CONFIG_UTS_NS is not set # CONFIG_PID_NS is not set CONFIG_BLK_DEV_INITRD=y -CONFIG_INITRAMFS_SOURCE="../arc_initramfs/" CONFIG_EMBEDDED=y CONFIG_PERF_EVENTS=y # CONFIG_VM_EVENT_COUNTERS is not set diff --git a/arch/arc/configs/axs103_defconfig b/arch/arc/configs/axs103_defconfig index 63d3cf69e0b02efe45fca035649951836fc1e948..ef3c31cd77378c0fcfed9f4e97a05ba1460c5410 100644 --- a/arch/arc/configs/axs103_defconfig +++ b/arch/arc/configs/axs103_defconfig @@ -11,7 +11,6 @@ CONFIG_NAMESPACES=y # CONFIG_UTS_NS is not set # CONFIG_PID_NS is not set CONFIG_BLK_DEV_INITRD=y -CONFIG_INITRAMFS_SOURCE="../../arc_initramfs_hs/" CONFIG_EMBEDDED=y CONFIG_PERF_EVENTS=y # CONFIG_VM_EVENT_COUNTERS is not set diff --git a/arch/arc/configs/axs103_smp_defconfig b/arch/arc/configs/axs103_smp_defconfig index f613ecac14a750e6008dc817d3a06727a4631989..1757ac9cecbc10fb189673d23a76ad5137e9ed9f 100644 --- a/arch/arc/configs/axs103_smp_defconfig +++ b/arch/arc/configs/axs103_smp_defconfig @@ -11,7 +11,6 @@ CONFIG_NAMESPACES=y # CONFIG_UTS_NS is not set # CONFIG_PID_NS is not set CONFIG_BLK_DEV_INITRD=y -CONFIG_INITRAMFS_SOURCE="../../arc_initramfs_hs/" CONFIG_EMBEDDED=y CONFIG_PERF_EVENTS=y # CONFIG_VM_EVENT_COUNTERS is not set diff --git a/arch/arc/configs/haps_hs_defconfig b/arch/arc/configs/haps_hs_defconfig index db04ea4dd2d97237b843fd4b90706f2e0c8ef6d9..aa8240a92b60791b8f63fd18230813bb972a019a 100644 --- a/arch/arc/configs/haps_hs_defconfig +++ b/arch/arc/configs/haps_hs_defconfig @@ -11,7 +11,6 @@ CONFIG_NAMESPACES=y # CONFIG_UTS_NS is not set # CONFIG_PID_NS is not set CONFIG_BLK_DEV_INITRD=y -CONFIG_INITRAMFS_SOURCE="../../arc_initramfs_hs/" CONFIG_EXPERT=y CONFIG_PERF_EVENTS=y # CONFIG_COMPAT_BRK is not set diff --git a/arch/arc/configs/haps_hs_smp_defconfig b/arch/arc/configs/haps_hs_smp_defconfig index 3507be2af6fe3684f4acca6de7009903da82b97e..bc5a24ea6cf7b704a5fb5a9688d5d6724ab1013f 100644 --- a/arch/arc/configs/haps_hs_smp_defconfig +++ b/arch/arc/configs/haps_hs_smp_defconfig @@ -11,7 +11,6 @@ CONFIG_NAMESPACES=y # CONFIG_UTS_NS is not set # CONFIG_PID_NS is not set CONFIG_BLK_DEV_INITRD=y -CONFIG_INITRAMFS_SOURCE="../../arc_initramfs_hs/" CONFIG_EMBEDDED=y CONFIG_PERF_EVENTS=y # CONFIG_VM_EVENT_COUNTERS is not set diff --git a/arch/arc/configs/hsdk_defconfig b/arch/arc/configs/hsdk_defconfig index 7b8f8faf8a24315d3379d189cab69506539e04a8..762b1fcd93dc1c3a503de3776833de726b05bc8b 100644 --- a/arch/arc/configs/hsdk_defconfig +++ b/arch/arc/configs/hsdk_defconfig @@ -9,7 +9,6 @@ CONFIG_NAMESPACES=y # CONFIG_UTS_NS is not set # CONFIG_PID_NS is not set CONFIG_BLK_DEV_INITRD=y -CONFIG_INITRAMFS_SOURCE="../../arc_initramfs_hs/" CONFIG_EMBEDDED=y CONFIG_PERF_EVENTS=y # CONFIG_VM_EVENT_COUNTERS is not set diff --git a/arch/arc/configs/nsim_700_defconfig b/arch/arc/configs/nsim_700_defconfig index 6dff83a238b8591ba08f889e7d7013734ff587db..b1a78222699c69f10342d96c2c112e37ef9a2640 100644 --- a/arch/arc/configs/nsim_700_defconfig +++ b/arch/arc/configs/nsim_700_defconfig @@ -11,7 +11,6 @@ CONFIG_NAMESPACES=y # CONFIG_UTS_NS is not set # CONFIG_PID_NS is not set CONFIG_BLK_DEV_INITRD=y -CONFIG_INITRAMFS_SOURCE="../arc_initramfs/" CONFIG_KALLSYMS_ALL=y CONFIG_EMBEDDED=y CONFIG_PERF_EVENTS=y diff --git a/arch/arc/configs/nsim_hs_defconfig b/arch/arc/configs/nsim_hs_defconfig index 31ee51b987e7c5b97c2f044794f9d1c5b4eea73e..217d7ea3c9569078bc539cd45927b41fcbe2b3a2 100644 --- a/arch/arc/configs/nsim_hs_defconfig +++ b/arch/arc/configs/nsim_hs_defconfig @@ -11,7 +11,6 @@ CONFIG_NAMESPACES=y # CONFIG_UTS_NS is not set # CONFIG_PID_NS is not set CONFIG_BLK_DEV_INITRD=y -CONFIG_INITRAMFS_SOURCE="../../arc_initramfs_hs/" CONFIG_KALLSYMS_ALL=y CONFIG_EMBEDDED=y CONFIG_PERF_EVENTS=y diff --git a/arch/arc/configs/nsim_hs_smp_defconfig b/arch/arc/configs/nsim_hs_smp_defconfig index 8d3b1f67cae421c423e55c2622b5a78ea3d67794..e733e4f1a3208f4310a77ec44dde042a239a821a 100644 --- a/arch/arc/configs/nsim_hs_smp_defconfig +++ b/arch/arc/configs/nsim_hs_smp_defconfig @@ -9,7 +9,6 @@ CONFIG_NAMESPACES=y # CONFIG_UTS_NS is not set # CONFIG_PID_NS is not set CONFIG_BLK_DEV_INITRD=y -CONFIG_INITRAMFS_SOURCE="../arc_initramfs_hs/" CONFIG_KALLSYMS_ALL=y CONFIG_EMBEDDED=y CONFIG_PERF_EVENTS=y diff --git a/arch/arc/configs/nsimosci_defconfig b/arch/arc/configs/nsimosci_defconfig index 6168ce2ac2efdd869705a8a75e78f7f9b9c55af4..14377b8234f79baa62a5145fe0ba9d40af1bca1f 100644 --- a/arch/arc/configs/nsimosci_defconfig +++ b/arch/arc/configs/nsimosci_defconfig @@ -11,7 +11,6 @@ CONFIG_NAMESPACES=y # CONFIG_UTS_NS is not set # CONFIG_PID_NS is not set CONFIG_BLK_DEV_INITRD=y -CONFIG_INITRAMFS_SOURCE="../arc_initramfs/" CONFIG_KALLSYMS_ALL=y CONFIG_EMBEDDED=y CONFIG_PERF_EVENTS=y diff --git a/arch/arc/configs/nsimosci_hs_defconfig b/arch/arc/configs/nsimosci_hs_defconfig index a70bdeb2b3fd03bdefd10388e69cd8e5b120ac0f..7e61c923a3cdd1227fdf2d391b479b67f566c57b 100644 --- a/arch/arc/configs/nsimosci_hs_defconfig +++ b/arch/arc/configs/nsimosci_hs_defconfig @@ -11,7 +11,6 @@ CONFIG_NAMESPACES=y # CONFIG_UTS_NS is not set # CONFIG_PID_NS is not set CONFIG_BLK_DEV_INITRD=y -CONFIG_INITRAMFS_SOURCE="../arc_initramfs_hs/" CONFIG_KALLSYMS_ALL=y CONFIG_EMBEDDED=y CONFIG_PERF_EVENTS=y diff --git a/arch/arc/configs/nsimosci_hs_smp_defconfig b/arch/arc/configs/nsimosci_hs_smp_defconfig index ef96406c446e823d4706e5c84b6ac0626d4b5812..299fbe8003b28ed738c35bafa6863e13c7f745cb 100644 --- a/arch/arc/configs/nsimosci_hs_smp_defconfig +++ b/arch/arc/configs/nsimosci_hs_smp_defconfig @@ -9,7 +9,6 @@ CONFIG_IKCONFIG_PROC=y # CONFIG_UTS_NS is not set # CONFIG_PID_NS is not set CONFIG_BLK_DEV_INITRD=y -CONFIG_INITRAMFS_SOURCE="../arc_initramfs_hs/" CONFIG_PERF_EVENTS=y # CONFIG_COMPAT_BRK is not set CONFIG_KPROBES=y diff --git a/arch/arc/include/asm/page.h b/arch/arc/include/asm/page.h index 109baa06831cecc38cf1d9f11ba447a31c0c4b14..09ddddf71cc5049a570d11a5114ccec945df56ad 100644 --- a/arch/arc/include/asm/page.h +++ b/arch/arc/include/asm/page.h @@ -105,7 +105,7 @@ typedef pte_t * pgtable_t; #define virt_addr_valid(kaddr) pfn_valid(virt_to_pfn(kaddr)) /* Default Permissions for stack/heaps pages (Non Executable) */ -#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE) +#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) #define WANT_PAGE_VIRTUAL 1 diff --git a/arch/arc/include/asm/pgtable.h b/arch/arc/include/asm/pgtable.h index 08fe33830d4b17337757d548399c897aa240de36..77676e18da698b22aad76d609db662e7661fd50b 100644 --- a/arch/arc/include/asm/pgtable.h +++ b/arch/arc/include/asm/pgtable.h @@ -379,7 +379,7 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, /* Decode a PTE containing swap "identifier "into constituents */ #define __swp_type(pte_lookalike) (((pte_lookalike).val) & 0x1f) -#define __swp_offset(pte_lookalike) ((pte_lookalike).val << 13) +#define __swp_offset(pte_lookalike) ((pte_lookalike).val >> 13) /* NOPs, to keep generic kernel happy */ #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) diff --git a/arch/arc/plat-hsdk/Kconfig b/arch/arc/plat-hsdk/Kconfig index 19ab3cf98f0f34904b8431a6d4cf36642066c513..fcc9a9e27e9cb6696c333d93a9cda99a0ebfc5a8 100644 --- a/arch/arc/plat-hsdk/Kconfig +++ b/arch/arc/plat-hsdk/Kconfig @@ -7,5 +7,7 @@ menuconfig ARC_SOC_HSDK bool "ARC HS Development Kit SOC" + depends on ISA_ARCV2 + select ARC_HAS_ACCL_REGS select CLK_HSDK select RESET_HSDK diff --git a/arch/arm/boot/dts/emev2.dtsi b/arch/arm/boot/dts/emev2.dtsi index 42ea246e71cb4e207d2e451da2df4d7ce465856f..fec1241b858ff96f316cd5988f3cbb8ba778fb62 100644 --- a/arch/arm/boot/dts/emev2.dtsi +++ b/arch/arm/boot/dts/emev2.dtsi @@ -31,13 +31,13 @@ #address-cells = <1>; #size-cells = <0>; - cpu@0 { + cpu0: cpu@0 { device_type = "cpu"; compatible = "arm,cortex-a9"; reg = <0>; clock-frequency = <533000000>; }; - cpu@1 { + cpu1: cpu@1 { device_type = "cpu"; compatible = "arm,cortex-a9"; reg = <1>; @@ -57,6 +57,7 @@ compatible = "arm,cortex-a9-pmu"; interrupts = , ; + interrupt-affinity = <&cpu0>, <&cpu1>; }; clocks@e0110000 { diff --git a/arch/arm/boot/dts/sh73a0.dtsi b/arch/arm/boot/dts/sh73a0.dtsi index 4ea5c5a16c57eed5235fc0f7365ce1a9d5e005f5..5fc24d4c2d5d4cbdc0dfce67440aefd5b5a4d8de 100644 --- a/arch/arm/boot/dts/sh73a0.dtsi +++ b/arch/arm/boot/dts/sh73a0.dtsi @@ -22,7 +22,7 @@ #address-cells = <1>; #size-cells = <0>; - cpu@0 { + cpu0: cpu@0 { device_type = "cpu"; compatible = "arm,cortex-a9"; reg = <0>; @@ -30,7 +30,7 @@ power-domains = <&pd_a2sl>; next-level-cache = <&L2>; }; - cpu@1 { + cpu1: cpu@1 { device_type = "cpu"; compatible = "arm,cortex-a9"; reg = <1>; @@ -89,6 +89,7 @@ compatible = "arm,cortex-a9-pmu"; interrupts = , ; + interrupt-affinity = <&cpu0>, <&cpu1>; }; cmt1: timer@e6138000 { diff --git a/arch/arm/boot/dts/stih407-pinctrl.dtsi b/arch/arm/boot/dts/stih407-pinctrl.dtsi index bd1a82e8fffee706d8f8030bb01c209fe23b8521..fe501d32d05925aee2e2aab9b0f7f9c05e87714c 100644 --- a/arch/arm/boot/dts/stih407-pinctrl.dtsi +++ b/arch/arm/boot/dts/stih407-pinctrl.dtsi @@ -52,7 +52,7 @@ st,syscfg = <&syscfg_sbc>; reg = <0x0961f080 0x4>; reg-names = "irqmux"; - interrupts = ; + interrupts = ; interrupt-names = "irqmux"; ranges = <0 0x09610000 0x6000>; @@ -376,7 +376,7 @@ st,syscfg = <&syscfg_front>; reg = <0x0920f080 0x4>; reg-names = "irqmux"; - interrupts = ; + interrupts = ; interrupt-names = "irqmux"; ranges = <0 0x09200000 0x10000>; @@ -936,7 +936,7 @@ st,syscfg = <&syscfg_front>; reg = <0x0921f080 0x4>; reg-names = "irqmux"; - interrupts = ; + interrupts = ; interrupt-names = "irqmux"; ranges = <0 0x09210000 0x10000>; @@ -969,7 +969,7 @@ st,syscfg = <&syscfg_rear>; reg = <0x0922f080 0x4>; reg-names = "irqmux"; - interrupts = ; + interrupts = ; interrupt-names = "irqmux"; ranges = <0 0x09220000 0x6000>; @@ -1164,7 +1164,7 @@ st,syscfg = <&syscfg_flash>; reg = <0x0923f080 0x4>; reg-names = "irqmux"; - interrupts = ; + interrupts = ; interrupt-names = "irqmux"; ranges = <0 0x09230000 0x3000>; diff --git a/arch/arm/configs/vendor/qcs405-perf_defconfig b/arch/arm/configs/vendor/qcs405-perf_defconfig index 66231a7ceddd81521a776082e4a6f413fb558eeb..cac4e74b3625b4602033e92836f7279a781b9cf2 100644 --- a/arch/arm/configs/vendor/qcs405-perf_defconfig +++ b/arch/arm/configs/vendor/qcs405-perf_defconfig @@ -26,6 +26,7 @@ CONFIG_CC_OPTIMIZE_FOR_SIZE=y CONFIG_KALLSYMS_ALL=y CONFIG_EMBEDDED=y CONFIG_PROFILING=y +CONFIG_CC_STACKPROTECTOR_STRONG=y CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y CONFIG_MODULE_FORCE_UNLOAD=y @@ -257,13 +258,17 @@ CONFIG_INPUT_UINPUT=y CONFIG_INPUT_GPIO=y # CONFIG_LEGACY_PTYS is not set # CONFIG_DEVMEM is not set +CONFIG_SERIAL_MSM_HS=y CONFIG_HW_RANDOM=y CONFIG_DIAG_CHAR=y CONFIG_MSM_ADSPRPC=y CONFIG_I2C_CHARDEV=y +CONFIG_I2C_MSM_V2=y CONFIG_SPI=y +CONFIG_SPI_QUP=y CONFIG_SPI_SPIDEV=y CONFIG_SPMI=y +CONFIG_SLIMBUS_MSM_NGD=y CONFIG_PTP_1588_CLOCK=y CONFIG_PINCTRL_QCS405=y CONFIG_FRAGMENTED_GPIO_ADDRESS_SPACE=y @@ -370,6 +375,7 @@ CONFIG_LEDS_TRIGGERS=y CONFIG_RTC_CLASS=y CONFIG_RTC_DRV_QPNP=y CONFIG_DMADEVICES=y +CONFIG_QCOM_SPS_DMA=y CONFIG_UIO=y CONFIG_STAGING=y CONFIG_ASHMEM=y diff --git a/arch/arm/configs/vendor/qcs405_defconfig b/arch/arm/configs/vendor/qcs405_defconfig index 1448d2995939c59d1abd3580c3d314906bc339f8..4b6729037b8690742a071bfd1e654c11521e08fa 100644 --- a/arch/arm/configs/vendor/qcs405_defconfig +++ b/arch/arm/configs/vendor/qcs405_defconfig @@ -27,6 +27,7 @@ CONFIG_CC_OPTIMIZE_FOR_SIZE=y CONFIG_KALLSYMS_ALL=y CONFIG_EMBEDDED=y CONFIG_PROFILING=y +CONFIG_CC_STACKPROTECTOR_STRONG=y CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y CONFIG_MODULE_FORCE_UNLOAD=y @@ -230,7 +231,7 @@ CONFIG_DM_VERITY_FEC=y CONFIG_NETDEVICES=y CONFIG_DUMMY=y CONFIG_TUN=y -CONFIG_KS8851=y +CONFIG_AT803X_PHY=y CONFIG_PPP=y CONFIG_PPP_BSDCOMP=y CONFIG_PPP_DEFLATE=y @@ -265,15 +266,19 @@ CONFIG_INPUT_GPIO=y # CONFIG_DEVMEM is not set CONFIG_SERIAL_MSM=y CONFIG_SERIAL_MSM_CONSOLE=y +CONFIG_SERIAL_MSM_HS=y CONFIG_HW_RANDOM=y CONFIG_DIAG_CHAR=y CONFIG_MSM_ADSPRPC=y CONFIG_I2C_CHARDEV=y +CONFIG_I2C_MSM_V2=y CONFIG_SPI=y CONFIG_SPI_DEBUG=y +CONFIG_SPI_QUP=y CONFIG_SPI_SPIDEV=y CONFIG_SPMI=y CONFIG_SPMI_MSM_PMIC_ARB_DEBUG=y +CONFIG_SLIMBUS_MSM_NGD=y CONFIG_PTP_1588_CLOCK=y CONFIG_PINCTRL_QCS405=y CONFIG_FRAGMENTED_GPIO_ADDRESS_SPACE=y @@ -384,6 +389,7 @@ CONFIG_LEDS_TRIGGERS=y CONFIG_RTC_CLASS=y CONFIG_RTC_DRV_QPNP=y CONFIG_DMADEVICES=y +CONFIG_QCOM_SPS_DMA=y CONFIG_UIO=y CONFIG_STAGING=y CONFIG_ASHMEM=y diff --git a/arch/arm/configs/vendor/sdxprairie-perf_defconfig b/arch/arm/configs/vendor/sdxprairie-perf_defconfig index b4ef2bd487e4133bb6173e0f671c0a05750ca05a..381f2fc1e2f228abe5af6e1631884583797a48dd 100644 --- a/arch/arm/configs/vendor/sdxprairie-perf_defconfig +++ b/arch/arm/configs/vendor/sdxprairie-perf_defconfig @@ -1,4 +1,6 @@ CONFIG_LOCALVERSION="-perf" +CONFIG_AUDIT=y +# CONFIG_AUDITSYSCALL is not set CONFIG_NO_HZ=y CONFIG_HIGH_RES_TIMERS=y CONFIG_IKCONFIG=y @@ -54,6 +56,7 @@ CONFIG_IPV6_MROUTE_MULTIPLE_TABLES=y CONFIG_IPV6_PIMSM_V2=y CONFIG_NETFILTER=y CONFIG_NF_CONNTRACK=y +CONFIG_NF_CONNTRACK_SECMARK=y CONFIG_NF_CONNTRACK_EVENTS=y CONFIG_NF_CONNTRACK_TIMEOUT=y CONFIG_NF_CONNTRACK_TIMESTAMP=y @@ -68,6 +71,7 @@ CONFIG_NF_CONNTRACK_SIP=y CONFIG_NF_CONNTRACK_TFTP=y CONFIG_NF_CT_NETLINK=y CONFIG_NF_CT_NETLINK_TIMEOUT=y +CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=y CONFIG_NETFILTER_XT_TARGET_LOG=y CONFIG_NETFILTER_XT_TARGET_MARK=y CONFIG_NETFILTER_XT_TARGET_NFLOG=y @@ -75,6 +79,7 @@ CONFIG_NETFILTER_XT_TARGET_NFQUEUE=y CONFIG_NETFILTER_XT_TARGET_NOTRACK=y CONFIG_NETFILTER_XT_TARGET_TPROXY=y CONFIG_NETFILTER_XT_TARGET_TRACE=y +CONFIG_NETFILTER_XT_TARGET_SECMARK=y CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=y CONFIG_NETFILTER_XT_MATCH_CONNLABEL=y CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=y @@ -104,6 +109,7 @@ CONFIG_IP_NF_MANGLE=y CONFIG_IP_NF_TARGET_ECN=y CONFIG_IP_NF_TARGET_TTL=y CONFIG_IP_NF_RAW=y +CONFIG_IP_NF_SECURITY=y CONFIG_IP_NF_ARPTABLES=y CONFIG_IP_NF_ARPFILTER=y CONFIG_IP_NF_ARP_MANGLE=y @@ -265,6 +271,7 @@ CONFIG_RMNET_IPA3=y CONFIG_ECM_IPA=y CONFIG_RNDIS_IPA=y CONFIG_IPA_UT=y +CONFIG_MSM_CLK_RPMH=y CONFIG_HWSPINLOCK=y CONFIG_HWSPINLOCK_QCOM=y CONFIG_IOMMU_IO_PGTABLE_FAST=y @@ -274,6 +281,8 @@ CONFIG_IOMMU_DEBUG_TRACKING=y CONFIG_IOMMU_TESTS=y CONFIG_QCOM_QMI_HELPERS=y CONFIG_QCOM_SMEM=y +CONFIG_QCOM_COMMAND_DB=y +CONFIG_QTI_RPMH_API=y CONFIG_PWM=y CONFIG_ANDROID=y CONFIG_EXT3_FS=y @@ -293,3 +302,8 @@ CONFIG_PANIC_TIMEOUT=5 CONFIG_SCHEDSTATS=y CONFIG_IPC_LOGGING=y # CONFIG_FTRACE is not set +CONFIG_SECURITY=y +CONFIG_SECURITY_NETWORK=y +CONFIG_SECURITY_NETWORK_XFRM=y +CONFIG_SECURITY_SELINUX=y +# CONFIG_SECURITY_SELINUX_AVC_STATS is not set diff --git a/arch/arm/configs/vendor/sdxprairie_defconfig b/arch/arm/configs/vendor/sdxprairie_defconfig index 1b8fdfc6f1ad00e973b36ea59a739b250440b60a..9f419abd5388705c52fbdaa9b997b288073065e5 100644 --- a/arch/arm/configs/vendor/sdxprairie_defconfig +++ b/arch/arm/configs/vendor/sdxprairie_defconfig @@ -1,3 +1,5 @@ +CONFIG_AUDIT=y +# CONFIG_AUDITSYSCALL is not set CONFIG_NO_HZ=y CONFIG_HIGH_RES_TIMERS=y CONFIG_IKCONFIG=y @@ -54,6 +56,7 @@ CONFIG_IPV6_MROUTE_MULTIPLE_TABLES=y CONFIG_IPV6_PIMSM_V2=y CONFIG_NETFILTER=y CONFIG_NF_CONNTRACK=y +CONFIG_NF_CONNTRACK_SECMARK=y CONFIG_NF_CONNTRACK_EVENTS=y CONFIG_NF_CONNTRACK_TIMEOUT=y CONFIG_NF_CONNTRACK_TIMESTAMP=y @@ -68,6 +71,7 @@ CONFIG_NF_CONNTRACK_SIP=y CONFIG_NF_CONNTRACK_TFTP=y CONFIG_NF_CT_NETLINK=y CONFIG_NF_CT_NETLINK_TIMEOUT=y +CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=y CONFIG_NETFILTER_XT_TARGET_LOG=y CONFIG_NETFILTER_XT_TARGET_MARK=y CONFIG_NETFILTER_XT_TARGET_NFLOG=y @@ -75,6 +79,7 @@ CONFIG_NETFILTER_XT_TARGET_NFQUEUE=y CONFIG_NETFILTER_XT_TARGET_NOTRACK=y CONFIG_NETFILTER_XT_TARGET_TPROXY=y CONFIG_NETFILTER_XT_TARGET_TRACE=y +CONFIG_NETFILTER_XT_TARGET_SECMARK=y CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=y CONFIG_NETFILTER_XT_MATCH_CONNLABEL=y CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=y @@ -104,6 +109,7 @@ CONFIG_IP_NF_MANGLE=y CONFIG_IP_NF_TARGET_ECN=y CONFIG_IP_NF_TARGET_TTL=y CONFIG_IP_NF_RAW=y +CONFIG_IP_NF_SECURITY=y CONFIG_IP_NF_ARPTABLES=y CONFIG_IP_NF_ARPFILTER=y CONFIG_IP_NF_ARP_MANGLE=y @@ -255,6 +261,7 @@ CONFIG_RMNET_IPA3=y CONFIG_ECM_IPA=y CONFIG_RNDIS_IPA=y CONFIG_IPA_UT=y +CONFIG_MSM_CLK_RPMH=y CONFIG_HWSPINLOCK=y CONFIG_HWSPINLOCK_QCOM=y CONFIG_IOMMU_IO_PGTABLE_FAST=y @@ -265,6 +272,8 @@ CONFIG_IOMMU_TESTS=y CONFIG_QCOM_QMI_HELPERS=y CONFIG_QCOM_SMEM=y CONFIG_MSM_BOOT_STATS=y +CONFIG_QCOM_COMMAND_DB=y +CONFIG_QTI_RPMH_API=y CONFIG_PWM=y CONFIG_ANDROID=y CONFIG_STM=y @@ -299,6 +308,11 @@ CONFIG_FAULT_INJECTION_STACKTRACE_FILTER=y CONFIG_IPC_LOGGING=y # CONFIG_FTRACE is not set CONFIG_DEBUG_USER=y +CONFIG_SECURITY=y +CONFIG_SECURITY_NETWORK=y +CONFIG_SECURITY_NETWORK_XFRM=y +CONFIG_SECURITY_SELINUX=y +# CONFIG_SECURITY_SELINUX_AVC_STATS is not set CONFIG_CRYPTO_ECB=y CONFIG_CRYPTO_CMAC=y CONFIG_CRYPTO_SHA256=y diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h index 8f973e3b7348e4aaac5bc15c9497d87e8d543790..65572e14306c877403a93860164b64204e58b064 100644 --- a/arch/arm/include/asm/kvm_host.h +++ b/arch/arm/include/asm/kvm_host.h @@ -302,4 +302,16 @@ static inline bool kvm_arm_harden_branch_predictor(void) return false; } +#define KVM_SSBD_UNKNOWN -1 +#define KVM_SSBD_FORCE_DISABLE 0 +#define KVM_SSBD_KERNEL 1 +#define KVM_SSBD_FORCE_ENABLE 2 +#define KVM_SSBD_MITIGATED 3 + +static inline int kvm_arm_have_ssbd(void) +{ + /* No way to detect it yet, pretend it is not there. */ + return KVM_SSBD_UNKNOWN; +} + #endif /* __ARM_KVM_HOST_H__ */ diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h index 08cd720eae0110e354d7055b8a8841ffc81a7075..8a098e65f5f857bb35b64b70677dd0261ba9beeb 100644 --- a/arch/arm/include/asm/kvm_mmu.h +++ b/arch/arm/include/asm/kvm_mmu.h @@ -28,6 +28,13 @@ */ #define kern_hyp_va(kva) (kva) +/* Contrary to arm64, there is no need to generate a PC-relative address */ +#define hyp_symbol_addr(s) \ + ({ \ + typeof(s) *addr = &(s); \ + addr; \ + }) + /* * KVM_MMU_CACHE_MIN_PAGES is the number of stage2 page table translation levels. */ @@ -247,6 +254,11 @@ static inline int kvm_map_vectors(void) return 0; } +static inline int hyp_map_aux_data(void) +{ + return 0; +} + #endif /* !__ASSEMBLY__ */ #endif /* __ARM_KVM_MMU_H__ */ diff --git a/arch/arm/mach-qcom/board-qcs405.c b/arch/arm/mach-qcom/board-qcs405.c index 4bee37c4393e1a2e4aa42d33c71fc9b6ae425a53..c8b9aa92778e04fdf1f16516952f2ac3988b6cbc 100644 --- a/arch/arm/mach-qcom/board-qcs405.c +++ b/arch/arm/mach-qcom/board-qcs405.c @@ -30,3 +30,19 @@ DT_MACHINE_START(QCS405_DT, .init_machine = qcs405_init, .dt_compat = qcs405_dt_match, MACHINE_END + +static const char *qcs403_dt_match[] __initconst = { + "qcom,qcs403", + NULL +}; + +static void __init qcs403_init(void) +{ + board_dt_populate(NULL); +} + +DT_MACHINE_START(QCS403_DT, + "Qualcomm Technologies, Inc. QCS403 (Flattened Device Tree)") + .init_machine = qcs403_init, + .dt_compat = qcs403_dt_match, +MACHINE_END diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c index 323a4df59a6c0ec6657b42b714b161a6d3729cca..ece2d1d43724b88c8bde71db75606677ca749731 100644 --- a/arch/arm/net/bpf_jit_32.c +++ b/arch/arm/net/bpf_jit_32.c @@ -718,7 +718,7 @@ static inline void emit_a32_arsh_r64(const u8 dst[], const u8 src[], bool dstk, } /* dst = dst >> src */ -static inline void emit_a32_lsr_r64(const u8 dst[], const u8 src[], bool dstk, +static inline void emit_a32_rsh_r64(const u8 dst[], const u8 src[], bool dstk, bool sstk, struct jit_ctx *ctx) { const u8 *tmp = bpf2a32[TMP_REG_1]; const u8 *tmp2 = bpf2a32[TMP_REG_2]; @@ -734,7 +734,7 @@ static inline void emit_a32_lsr_r64(const u8 dst[], const u8 src[], bool dstk, emit(ARM_LDR_I(rm, ARM_SP, STACK_VAR(dst_hi)), ctx); } - /* Do LSH operation */ + /* Do RSH operation */ emit(ARM_RSB_I(ARM_IP, rt, 32), ctx); emit(ARM_SUBS_I(tmp2[0], rt, 32), ctx); emit(ARM_MOV_SR(ARM_LR, rd, SRTYPE_LSR, rt), ctx); @@ -784,7 +784,7 @@ static inline void emit_a32_lsh_i64(const u8 dst[], bool dstk, } /* dst = dst >> val */ -static inline void emit_a32_lsr_i64(const u8 dst[], bool dstk, +static inline void emit_a32_rsh_i64(const u8 dst[], bool dstk, const u32 val, struct jit_ctx *ctx) { const u8 *tmp = bpf2a32[TMP_REG_1]; const u8 *tmp2 = bpf2a32[TMP_REG_2]; @@ -1340,7 +1340,7 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx) case BPF_ALU64 | BPF_RSH | BPF_K: if (unlikely(imm > 63)) return -EINVAL; - emit_a32_lsr_i64(dst, dstk, imm, ctx); + emit_a32_rsh_i64(dst, dstk, imm, ctx); break; /* dst = dst << src */ case BPF_ALU64 | BPF_LSH | BPF_X: @@ -1348,7 +1348,7 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx) break; /* dst = dst >> src */ case BPF_ALU64 | BPF_RSH | BPF_X: - emit_a32_lsr_r64(dst, src, dstk, sstk, ctx); + emit_a32_rsh_r64(dst, src, dstk, sstk, ctx); break; /* dst = dst >> src (signed) */ case BPF_ALU64 | BPF_ARSH | BPF_X: diff --git a/arch/arm64/boot/dts/qcom/dsi-panel-nt36850-truly-dualmipi-wqhd-cmd.dtsi b/arch/arm64/boot/dts/qcom/dsi-panel-nt36850-truly-dualmipi-wqhd-cmd.dtsi new file mode 100644 index 0000000000000000000000000000000000000000..ca28261a82a763f726db7990eb127270209dc370 --- /dev/null +++ b/arch/arm64/boot/dts/qcom/dsi-panel-nt36850-truly-dualmipi-wqhd-cmd.dtsi @@ -0,0 +1,87 @@ +/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +&mdss_mdp { + dsi_dual_nt36850_truly_cmd: qcom,mdss_dsi_nt36850_truly_wqhd_cmd { + qcom,mdss-dsi-panel-name = + "Dual nt36850 cmd mode dsi truly panel without DSC"; + qcom,mdss-dsi-panel-type = "dsi_cmd_mode"; + qcom,mdss-dsi-virtual-channel-id = <0>; + qcom,mdss-dsi-stream = <0>; + qcom,mdss-dsi-bpp = <24>; + qcom,mdss-dsi-color-order = "rgb_swap_rgb"; + qcom,mdss-dsi-underflow-color = <0xff>; + qcom,mdss-dsi-border-color = <0>; + + qcom,mdss-dsi-traffic-mode = "non_burst_sync_event"; + qcom,mdss-dsi-lane-map = "lane_map_0123"; + qcom,mdss-dsi-bllp-eof-power-mode; + qcom,mdss-dsi-bllp-power-mode; + qcom,mdss-dsi-tx-eot-append; + qcom,mdss-dsi-lane-0-state; + qcom,mdss-dsi-lane-1-state; + qcom,mdss-dsi-lane-2-state; + qcom,mdss-dsi-lane-3-state; + qcom,mdss-dsi-wr-mem-start = <0x2c>; + qcom,mdss-dsi-wr-mem-continue = <0x3c>; + qcom,mdss-dsi-te-pin-select = <1>; + qcom,mdss-dsi-te-dcs-command = <1>; + qcom,mdss-dsi-te-check-enable; + qcom,mdss-dsi-te-using-te-pin; + qcom,mdss-dsi-dma-trigger = "trigger_sw"; + qcom,mdss-dsi-mdp-trigger = "none"; + qcom,mdss-dsi-lp11-init; + qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled"; + qcom,mdss-dsi-bl-min-level = <1>; + qcom,mdss-dsi-bl-max-level = <4095>; + qcom,mdss-dsi-reset-sequence = <1 10>, <0 10>, <1 50>; + qcom,mdss-dsi-display-timings { + timing@0 { + qcom,mdss-dsi-panel-framerate = <60>; + qcom,mdss-dsi-panel-width = <720>; + qcom,mdss-dsi-panel-height = <2560>; + qcom,mdss-dsi-h-front-porch = <120>; + qcom,mdss-dsi-h-back-porch = <140>; + qcom,mdss-dsi-h-pulse-width = <20>; + qcom,mdss-dsi-h-sync-skew = <0>; + qcom,mdss-dsi-v-back-porch = <20>; + qcom,mdss-dsi-v-front-porch = <8>; + qcom,mdss-dsi-v-pulse-width = <4>; + qcom,mdss-dsi-h-left-border = <0>; + qcom,mdss-dsi-h-right-border = <0>; + qcom,mdss-dsi-v-top-border = <0>; + qcom,mdss-dsi-v-bottom-border = <0>; + qcom,mdss-dsi-on-command = [ + 15 01 00 00 00 00 02 ff 10 + 15 01 00 00 00 00 02 fb 01 + 15 01 00 00 00 00 02 36 00 + 15 01 00 00 00 00 02 35 00 + 39 01 00 00 00 00 03 44 03 e8 + 15 01 00 00 00 00 02 51 ff + 15 01 00 00 00 00 02 53 2c + 15 01 00 00 00 00 02 55 01 + 05 01 00 00 0a 00 02 20 00 + 15 01 00 00 00 00 02 bb 10 + 05 01 00 00 78 00 02 11 00 + 05 01 00 00 78 00 02 29 00 + ]; + qcom,mdss-dsi-off-command = [ + 05 01 00 00 78 00 02 28 00 + 05 01 00 00 78 00 02 10 00 + ]; + qcom,mdss-dsi-on-command-state = "dsi_lp_mode"; + qcom,mdss-dsi-off-command-state = "dsi_hs_mode"; + qcom,mdss-dsi-h-sync-pulse = <0>; + }; + }; + }; +}; diff --git a/arch/arm64/boot/dts/qcom/fg-gen4-batterydata-alium-3600mah.dtsi b/arch/arm64/boot/dts/qcom/fg-gen4-batterydata-alium-3600mah.dtsi index 22a8d112a783b6ebc59f2c1810adc4d04d6feef7..83efb69d45836e7bda2a791a0e52404d313ded44 100644 --- a/arch/arm64/boot/dts/qcom/fg-gen4-batterydata-alium-3600mah.dtsi +++ b/arch/arm64/boot/dts/qcom/fg-gen4-batterydata-alium-3600mah.dtsi @@ -11,8 +11,8 @@ * GNU General Public License for more details. */ -qcom,alium_860_89032_0000_3600mah_averaged_masterslave_jun15th2018 { - /* #Alium_860_89032_0000_3600mAh_averaged_MasterSlave_Jun15th2018*/ +qcom,alium_860_89032_0000_3600mah_averaged_masterslave_aug6th2018 { + /* #Alium_860_89032_0000_3600mAh_averaged_MasterSlave_Aug6th2018*/ qcom,max-voltage-uv = <4350000>; qcom,fastchg-current-ma = <5400>; qcom,jeita-fcc-ranges = <0 100 2500000 @@ -28,29 +28,29 @@ qcom,alium_860_89032_0000_3600mah_averaged_masterslave_jun15th2018 { qcom,battery-beta = <4250>; qcom,therm-room-temp = <100000>; qcom,fg-cc-cv-threshold-mv = <4340>; - qcom,battery-type = "alium_860_89032_0000_3600mah_jun15th2018"; + qcom,battery-type = "alium_860_89032_0000_3600mah_aug6th2018"; qcom,therm-coefficients = <0x2318 0xd0c 0xdaf7 0xc556 0x848d>; qcom,therm-center-offset = <0x70>; qcom,therm-pull-up = <100>; qcom,rslow-normal-coeffs = <0xa4 0x01 0x24 0x13>; - qcom,rslow-low-coeffs = <0xa7 0xd5 0x0e 0x13>; - qcom,checksum = <0xCDFB>; - qcom,gui-version = "PM8150GUI - 1.0.0.7"; + qcom,rslow-low-coeffs = <0xa4 0x01 0x24 0x13>; + qcom,checksum = <0x99F7>; + qcom,gui-version = "PM855GUI - 1.0.0.10"; qcom,fg-profile-data = [ - 09 00 B5 EA - 3F CC 33 AA - E7 C2 00 00 - 13 BC 83 8A - 03 80 D1 92 - AB 9D 47 80 + 09 00 BD EA + 40 CC E8 BC + DD C3 00 00 + B0 C5 72 92 + F3 87 C8 A2 + E6 9C E2 87 18 00 A4 01 24 13 47 FD A9 F2 CE 07 - 32 00 A6 00 - EF F5 CB FD - 11 0D 4A 23 - 60 2A C1 23 - 6F 42 F8 43 + 32 00 0E E3 + 06 ED 2E EA + 83 FD 5B 14 + B8 1C 75 3A + 5C 42 CA 3A 40 00 3A 00 40 00 48 00 3B 00 34 00 diff --git a/arch/arm64/boot/dts/qcom/msm-arm-smmu-sdmmagpie.dtsi b/arch/arm64/boot/dts/qcom/msm-arm-smmu-sdmmagpie.dtsi index b5b2e87c9be2434110dcbb0b30cc1bfe841008ed..581e1f9b97611791952eb66e0b1c37246ddd88d6 100644 --- a/arch/arm64/boot/dts/qcom/msm-arm-smmu-sdmmagpie.dtsi +++ b/arch/arm64/boot/dts/qcom/msm-arm-smmu-sdmmagpie.dtsi @@ -11,6 +11,7 @@ */ #include +#include &soc { kgsl_smmu: arm,smmu-kgsl@5040000 { @@ -144,6 +145,17 @@ , , ; + qcom,msm-bus,name = "apps_smmu"; + qcom,msm-bus,num-cases = <2>; + qcom,msm-bus,active-only; + qcom,msm-bus,num-paths = <1>; + qcom,msm-bus,vectors-KBps = + , + , + <0 0>, + , + , + <0 1000>; anoc_1_tbu: anoc_1_tbu@0x15185000 { compatible = "qcom,qsmmuv500-tbu"; @@ -153,6 +165,17 @@ qcom,stream-id-range = <0x0 0x400>; qcom,regulator-names = "vdd"; vdd-supply = <&hlos1_vote_aggre_noc_mmu_tbu1_gdsc>; + qcom,msm-bus,name = "apps_smmu"; + qcom,msm-bus,num-cases = <2>; + qcom,msm-bus,active-only; + qcom,msm-bus,num-paths = <1>; + qcom,msm-bus,vectors-KBps = + , + , + <0 0>, + , + , + <0 1000>; }; anoc_2_tbu: anoc_2_tbu@0x15189000 { @@ -163,6 +186,17 @@ qcom,stream-id-range = <0x400 0x400>; qcom,regulator-names = "vdd"; vdd-supply = <&hlos1_vote_aggre_noc_mmu_tbu2_gdsc>; + qcom,msm-bus,name = "apps_smmu"; + qcom,msm-bus,num-cases = <2>; + qcom,msm-bus,active-only; + qcom,msm-bus,num-paths = <1>; + qcom,msm-bus,vectors-KBps = + , + , + <0 0>, + , + , + <0 1000>; }; mnoc_hf_0_tbu: mnoc_hf_0_tbu@0x1518d000 { @@ -173,6 +207,17 @@ qcom,stream-id-range = <0x800 0x400>; qcom,regulator-names = "vdd"; vdd-supply = <&hlos1_vote_mmnoc_mmu_tbu_hf0_gdsc>; + qcom,msm-bus,name = "mnoc_hf_0_tbu"; + qcom,msm-bus,num-cases = <2>; + qcom,msm-bus,active-only; + qcom,msm-bus,num-paths = <1>; + qcom,msm-bus,vectors-KBps = + , + , + <0 0>, + , + , + <0 1000>; }; mnoc_hf_1_tbu: mnoc_hf_1_tbu@0x15191000 { @@ -183,6 +228,17 @@ qcom,stream-id-range = <0xc00 0x400>; qcom,regulator-names = "vdd"; vdd-supply = <&hlos1_vote_mmnoc_mmu_tbu_hf1_gdsc>; + qcom,msm-bus,name = "mnoc_hf_1_tbu"; + qcom,msm-bus,num-cases = <2>; + qcom,msm-bus,active-only; + qcom,msm-bus,num-paths = <1>; + qcom,msm-bus,vectors-KBps = + , + , + <0 0>, + , + , + <0 1000>; }; mnoc_sf_0_tbu: mnoc_sf_0_tbu@0x15195000 { @@ -193,6 +249,17 @@ qcom,stream-id-range = <0x1000 0x400>; qcom,regulator-names = "vdd"; vdd-supply = <&hlos1_vote_mmnoc_mmu_tbu_sf_gdsc>; + qcom,msm-bus,name = "mnoc_sf_0_tbu"; + qcom,msm-bus,num-cases = <2>; + qcom,msm-bus,active-only; + qcom,msm-bus,num-paths = <1>; + qcom,msm-bus,vectors-KBps = + , + , + <0 0>, + , + , + <0 1000>; }; compute_dsp_0_tbu: compute_dsp_0_tbu@0x15199000 { @@ -202,6 +269,17 @@ reg-names = "base", "status-reg"; qcom,stream-id-range = <0x1400 0x400>; /* No GDSC */ + qcom,msm-bus,name = "apps_smmu"; + qcom,msm-bus,num-cases = <2>; + qcom,msm-bus,active-only; + qcom,msm-bus,num-paths = <1>; + qcom,msm-bus,vectors-KBps = + , + , + <0 0>, + , + , + <0 1000>; }; adsp_tbu: adsp_tbu@0x1519d000 { @@ -212,6 +290,17 @@ qcom,stream-id-range = <0x1800 0x400>; qcom,regulator-names = "vdd"; vdd-supply = <&hlos1_vote_aggre_noc_mmu_audio_tbu_gdsc>; + qcom,msm-bus,name = "apps_smmu"; + qcom,msm-bus,num-cases = <2>; + qcom,msm-bus,active-only; + qcom,msm-bus,num-paths = <1>; + qcom,msm-bus,vectors-KBps = + , + , + <0 0>, + , + , + <0 1000>; }; anoc_1_pcie_tbu: anoc_1_pcie_tbu@0x151a1000 { @@ -224,6 +313,17 @@ vdd-supply = <&hlos1_vote_aggre_noc_mmu_pcie_tbu_gdsc>; clock-names = "gcc_aggre_noc_pcie_tbu_clk"; clocks = <&clock_gcc GCC_AGGRE_NOC_PCIE_TBU_CLK>; + qcom,msm-bus,name = "apps_smmu"; + qcom,msm-bus,num-cases = <2>; + qcom,msm-bus,active-only; + qcom,msm-bus,num-paths = <1>; + qcom,msm-bus,vectors-KBps = + , + , + <0 0>, + , + , + <0 1000>; }; }; diff --git a/arch/arm64/boot/dts/qcom/msm-arm-smmu-sm8150-v2.dtsi b/arch/arm64/boot/dts/qcom/msm-arm-smmu-sm8150-v2.dtsi index 64e136120d82b6c533d8360c16bc029f0768eccb..c30e790780acb40a1347e76ca419cc97b3adbe47 100644 --- a/arch/arm64/boot/dts/qcom/msm-arm-smmu-sm8150-v2.dtsi +++ b/arch/arm64/boot/dts/qcom/msm-arm-smmu-sm8150-v2.dtsi @@ -23,7 +23,6 @@ qcom,dynamic; qcom,skip-init; qcom,use-3-lvl-tables; - qcom,no-asid-retention; #global-interrupts = <1>; qcom,regulator-names = "vdd"; vdd-supply = <&gpu_cx_gdsc>; @@ -71,7 +70,6 @@ #iommu-cells = <2>; qcom,skip-init; qcom,use-3-lvl-tables; - qcom,no-asid-retention; qcom,disable-atos; #global-interrupts = <1>; #size-cells = <1>; @@ -321,6 +319,7 @@ <0x15182238 0x8>; reg-names = "base", "status-reg"; qcom,stream-id-range = <0x1c00 0x400>; + qcom,opt-out-tbu-halting; qcom,regulator-names = "vdd"; vdd-supply = <&hlos1_vote_aggre_noc_mmu_pcie_tbu_gdsc>; clock-names = "gcc_aggre_noc_pcie_tbu_clk"; @@ -383,12 +382,24 @@ }; }; -&kgsl_smmu { /* */ - qcom,actlr = <0x0 0x407 0x303>, - <0x1460 0x0 0x303>, - <0x61 0x3400 0x303>, - <0x62 0x3401 0x303>, - <0x64 0x3400 0x303>, - <0x65 0x3400 0x303>; +&kgsl_smmu { + qcom,actlr = + /* All CBs of GFX: +15 deep PF */ + <0x0 0x407 0x303>; +}; + +&apps_smmu { + qcom,actlr = + /* HF0 and HF1 TBUs: +3 deep PF */ + <0x800 0x7ff 0x103>, + + /* SF TBU: +3 deep PF */ + <0x2000 0x3ff 0x103>, + + /* NPU SIDs: +15 deep PF */ + <0x1480 0x3 0x303>, + <0x1484 0x1 0x303>, + <0x1080 0x3 0x303>, + <0x1084 0x1 0x303>; }; diff --git a/arch/arm64/boot/dts/qcom/msm-arm-smmu-sm8150.dtsi b/arch/arm64/boot/dts/qcom/msm-arm-smmu-sm8150.dtsi index f4c9cd30ed0afd6bde06b056c371fe9928c377c5..7e59f3c78f18ad4516e38594a2db00470cb69300 100644 --- a/arch/arm64/boot/dts/qcom/msm-arm-smmu-sm8150.dtsi +++ b/arch/arm64/boot/dts/qcom/msm-arm-smmu-sm8150.dtsi @@ -23,8 +23,8 @@ qcom,dynamic; qcom,skip-init; qcom,use-3-lvl-tables; - qcom,no-asid-retention; qcom,disable-atos; + qcom,min-iova-align; #global-interrupts = <1>; qcom,regulator-names = "vdd"; vdd-supply = <&gpu_cx_gdsc>; @@ -72,8 +72,8 @@ #iommu-cells = <2>; qcom,skip-init; qcom,use-3-lvl-tables; - qcom,no-asid-retention; qcom,disable-atos; + qcom,min-iova-align; #global-interrupts = <1>; #size-cells = <1>; #address-cells = <1>; @@ -323,6 +323,7 @@ <0x15182238 0x8>; reg-names = "base", "status-reg"; qcom,stream-id-range = <0x1c00 0x400>; + qcom,opt-out-tbu-halting; qcom,regulator-names = "vdd"; vdd-supply = <&hlos1_vote_aggre_noc_mmu_pcie_tbu_gdsc>; clock-names = "gcc_aggre_noc_pcie_tbu_clk"; @@ -385,6 +386,35 @@ }; &kgsl_smmu { - qcom,actlr = <0x0 0x407 0x303>; + qcom,actlr = + /* All CBs of GFX: +15 deep PF */ + <0x0 0x407 0x303>; +}; + +&apps_smmu { + qcom,actlr = + /* SIDs 0x1460 - 0x1463 of NPU: +3 deep PF */ + <0x1460 0x3 0x103>, + + /* SIDs 0x1464 - 0x1465 of NPU: +3 deep PF */ + <0x1464 0x1 0x103>, + + /* SIDs 0x2060 - 0x2063 of NPU: +3 deep PF */ + <0x2060 0x3 0x103>, + + /* SIDs 0x2064 - 0x2065 of NPU: +3 deep PF */ + <0x2064 0x1 0x103>, + + /* Display SIDs: +3 deep PF */ + <0x0800 0x0420 0x103>, + <0x0801 0x0420 0x103>, + <0x1040 0x0001 0x103>, + + /* Video SIDs: +3 deep PF */ + <0x1300 0x0060 0x103>, + <0x1301 0x0004 0x103>, + <0x1303 0x0020 0x103>, + <0x1304 0x0060 0x103>, + <0x1342 0x0000 0x103>; }; diff --git a/arch/arm64/boot/dts/qcom/msm-qvr-external.dtsi b/arch/arm64/boot/dts/qcom/msm-qvr-external.dtsi new file mode 100644 index 0000000000000000000000000000000000000000..3bc08d65cb654f2171d0f0be6c7867cb2e06d5e7 --- /dev/null +++ b/arch/arm64/boot/dts/qcom/msm-qvr-external.dtsi @@ -0,0 +1,21 @@ +/* Copyright (c) 2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +&soc { + + qcom,smp2p_interrupt_qvrexternal_5_out { + compatible = "qcom,smp2p-interrupt-qvrexternal-5-out"; + qcom,smem-states = <&smp2p_qvrexternal5_out 0>; + qcom,smem-state-names = "qvrexternal-smp2p-out"; + }; + +}; diff --git a/arch/arm64/boot/dts/qcom/pm6150.dtsi b/arch/arm64/boot/dts/qcom/pm6150.dtsi index bf4e90043c635c9418f4933a6e225c47d3255fb9..5da6364a2815e47a2885404910122f6ac1401afb 100644 --- a/arch/arm64/boot/dts/qcom/pm6150.dtsi +++ b/arch/arm64/boot/dts/qcom/pm6150.dtsi @@ -405,6 +405,12 @@ #address-cells = <1>; #size-cells = <1>; + qcom,vbatt-cutoff-mv = <3200>; + qcom,vbatt-low-mv = <3300>; + qcom,vbatt-low-cold-mv = <3700>; + qcom,vbatt-empty-mv = <3000>; + qcom,vbatt-empty-cold-mv = <3000>; + qcom,s3-entry-fifo-length = <2>; qcom,qg-iterm-ma = <100>; qcom,hold-soc-while-full; qcom,linearize-soc; diff --git a/arch/arm64/boot/dts/qcom/pm6150l.dtsi b/arch/arm64/boot/dts/qcom/pm6150l.dtsi index cfb5052030e1ae7e03de44a28e58fab62c3d99ee..ae731febad45744155f4a56d87f060640ae969f5 100644 --- a/arch/arm64/boot/dts/qcom/pm6150l.dtsi +++ b/arch/arm64/boot/dts/qcom/pm6150l.dtsi @@ -166,6 +166,13 @@ regulator-min-microvolt = <4000000>; regulator-max-microvolt = <6000000>; }; + + lcdb_bst_vreg: bst { + label = "bst"; + regulator-name = "lcdb_bst"; + regulator-min-microvolt = <4700000>; + regulator-max-microvolt = <6275000>; + }; }; flash_led: qcom,leds@d300 { diff --git a/arch/arm64/boot/dts/qcom/pm6155.dtsi b/arch/arm64/boot/dts/qcom/pm6155.dtsi new file mode 100644 index 0000000000000000000000000000000000000000..1f0f34445f35b1393733e72afda24dbf8adba7bd --- /dev/null +++ b/arch/arm64/boot/dts/qcom/pm6155.dtsi @@ -0,0 +1,175 @@ +/* + * Copyright (c) 2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include +#include +#include + +&spmi_bus { + qcom,pm6155@0 { + compatible = "qcom,spmi-pmic"; + reg = <0x0 SPMI_USID>; + #address-cells = <2>; + #size-cells = <0>; + + pm6155_1_tz: qcom,temp-alarm@2400 { + compatible = "qcom,spmi-temp-alarm"; + reg = <0x2400 0x100>; + interrupts = <0x0 0x24 0x0 IRQ_TYPE_EDGE_RISING>; + #thermal-sensor-cells = <0>; + qcom,temperature-threshold-set = <1>; + }; + + qcom,power-on@800 { + compatible = "qcom,qpnp-power-on"; + reg = <0x800 0x100>; + interrupts = <0x0 0x8 0x0 IRQ_TYPE_NONE>, + <0x0 0x8 0x1 IRQ_TYPE_NONE>; + interrupt-names = "kpdpwr", "resin"; + qcom,pon-dbc-delay = <15625>; + qcom,kpdpwr-sw-debounce; + qcom,system-reset; + qcom,store-hard-reset-reason; + + qcom,pon_1 { + qcom,pon-type = ; + qcom,pull-up = <1>; + linux,code = ; + }; + + qcom,pon_2 { + qcom,pon-type = ; + qcom,pull-up = <1>; + linux,code = ; + }; + }; + + pm6155_1_clkdiv: clock-controller@5b00 { + compatible = "qcom,spmi-clkdiv"; + reg = <0x5b00 0x200>; + #clock-cells = <1>; + qcom,num-clkdivs = <2>; + clock-output-names = "pm6155_1_div_clk1", + "pm6155_1_div_clk2"; + clocks = <&clock_rpmh RPMH_CXO_CLK>; + clock-names = "xo"; + }; + + pm6155_1_rtc: qcom,pm6155_1_rtc { + compatible = "qcom,qpnp-rtc"; + #address-cells = <1>; + #size-cells = <1>; + qcom,qpnp-rtc-write = <0>; + qcom,qpnp-rtc-alarm-pwrup = <0>; + + qcom,pm6155_1_rtc_rw@6000 { + reg = <0x6000 0x100>; + }; + qcom,pm6155_1_rtc_alarm@6100 { + reg = <0x6100 0x100>; + interrupts = <0x0 0x61 0x1 IRQ_TYPE_NONE>; + }; + }; + + pm6155_1_gpios: pinctrl@c000 { + compatible = "qcom,spmi-gpio"; + reg = <0xc000 0xa00>; + interrupts = <0x0 0xc0 0 IRQ_TYPE_NONE>, + <0x0 0xc1 0 IRQ_TYPE_NONE>, + <0x0 0xc2 0 IRQ_TYPE_NONE>, + <0x0 0xc3 0 IRQ_TYPE_NONE>, + <0x0 0xc4 0 IRQ_TYPE_NONE>, + <0x0 0xc5 0 IRQ_TYPE_NONE>, + <0x0 0xc6 0 IRQ_TYPE_NONE>, + <0x0 0xc7 0 IRQ_TYPE_NONE>, + <0x0 0xc8 0 IRQ_TYPE_NONE>, + <0x0 0xc9 0 IRQ_TYPE_NONE>; + interrupt-names = "pm6155_1_gpio1", "pm6155_1_gpio2", + "pm6155_1_gpio3", "pm6155_1_gpio4", + "pm6155_1_gpio5", "pm6155_1_gpio6", + "pm6155_1_gpio7", "pm6155_1_gpio8", + "pm6155_1_gpio9", "pm6155_1_gpio10"; + gpio-controller; + #gpio-cells = <2>; + }; + + pm6155_1_sdam_2: sdam@b100 { + compatible = "qcom,spmi-sdam"; + reg = <0xb100 0x100>; + }; + + }; + + qcom,pm6155@1 { + compatible ="qcom,spmi-pmic"; + reg = <0x1 SPMI_USID>; + #address-cells = <2>; + #size-cells = <0>; + }; + + /* below definitions are for the second instance of pm6155 */ + qcom,pm6155@4 { + compatible = "qcom,spmi-pmic"; + reg = <0x4 SPMI_USID>; + #address-cells = <1>; + #size-cells = <1>; + + qcom,power-on@800 { + compatible = "qcom,qpnp-power-on"; + reg = <0x800 0x100>; + }; + + pm6155_2_clkdiv: clock-controller@5b00 { + compatible = "qcom,spmi-clkdiv"; + reg = <0x5b00 0x200>; + #clock-cells = <1>; + qcom,num-clkdivs = <2>; + clock-output-names = "pm6155_2_div_clk1", + "pm6155_2_div_clk2"; + clocks = <&clock_rpmh RPMH_CXO_CLK>; + clock-names = "xo"; + }; + + pm6155_2_gpios: pinctrl@c000 { + compatible = "qcom,spmi-gpio"; + reg = <0xc000 0xa00>; + interrupts = <0x4 0xc0 0 IRQ_TYPE_NONE>, + <0x4 0xc1 0 IRQ_TYPE_NONE>, + <0x4 0xc2 0 IRQ_TYPE_NONE>, + <0x4 0xc3 0 IRQ_TYPE_NONE>, + <0x4 0xc4 0 IRQ_TYPE_NONE>, + <0x4 0xc5 0 IRQ_TYPE_NONE>, + <0x4 0xc6 0 IRQ_TYPE_NONE>, + <0x4 0xc7 0 IRQ_TYPE_NONE>, + <0x4 0xc8 0 IRQ_TYPE_NONE>, + <0x4 0xc9 0 IRQ_TYPE_NONE>; + interrupt-names = "pm6155_2_gpio1", "pm6155_2_gpio2", + "pm6155_2_gpio3", "pm6155_2_gpio4", + "pm6155_2_gpio5", "pm6155_2_gpio6", + "pm6155_2_gpio7", "pm6155_2_gpio8", + "pm6155_2_gpio9", "pm6155_2_gpio10"; + gpio-controller; + #gpio-cells = <2>; + }; + }; + + qcom,pm6155@5 { + compatible ="qcom,spmi-pmic"; + reg = <0x5 SPMI_USID>; + #address-cells = <1>; + #size-cells = <1>; + }; +}; diff --git a/arch/arm64/boot/dts/qcom/pm8150b.dtsi b/arch/arm64/boot/dts/qcom/pm8150b.dtsi index 9b0ad6f380ed9a4354179819e2de78be9c076e69..a1e857558625109ac1e415e4640dd8b05a0d3693 100644 --- a/arch/arm64/boot/dts/qcom/pm8150b.dtsi +++ b/arch/arm64/boot/dts/qcom/pm8150b.dtsi @@ -170,6 +170,9 @@ qcom,pmic-revid = <&pm8150b_revid>; + qcom,thermal-mitigation + = <3000000 1500000 1000000 500000>; + qcom,charger-temp-max = <800>; qcom,smb-temp-max = <800>; @@ -433,11 +436,14 @@ <0x2 0x43 0x1 IRQ_TYPE_EDGE_BOTH>, <0x2 0x43 0x2 IRQ_TYPE_EDGE_BOTH>, <0x2 0x43 0x3 - IRQ_TYPE_EDGE_RISING>; + IRQ_TYPE_EDGE_RISING>, + <0x2 0x43 0x4 + IRQ_TYPE_EDGE_FALLING>; interrupt-names = "ima-rdy", "ima-xcp", "dma-xcp", - "dma-grant"; + "dma-grant", + "mem-attn"; }; }; }; @@ -479,7 +485,7 @@ <0x3 0xc0 0x1 IRQ_TYPE_EDGE_BOTH>; interrupt-names = "hap-sc-irq", "hap-play-irq"; qcom,actuator-type = "lra"; - qcom,vmax-mv = <3600>; + qcom,vmax-mv = <3400>; qcom,play-rate-us = <6667>; qcom,lra-resonance-sig-shape = "sine"; qcom,lra-auto-resonance-mode = "qwd"; @@ -488,38 +494,54 @@ wf_0 { /* CLICK */ qcom,effect-id = <0>; + qcom,wf-vmax-mv = <3600>; qcom,wf-pattern = [3e 3e 3e]; qcom,wf-play-rate-us = <6667>; + qcom,wf-brake-pattern = [01 00 00 00]; + qcom,lra-auto-resonance-disable; }; wf_1 { /* DOUBLE CLICK */ qcom,effect-id = <1>; - qcom,wf-pattern = [7e 7e 02 02 02 02 7e 7e]; + qcom,wf-vmax-mv = <3600>; + qcom,wf-pattern = [7e 7e 02 02 02 02 02 02]; qcom,wf-play-rate-us = <7143>; + qcom,wf-repeat-count = <2>; + qcom,wf-s-repeat-count = <1>; + qcom,lra-auto-resonance-disable; }; wf_2 { /* TICK */ qcom,effect-id = <2>; + qcom,wf-vmax-mv = <3600>; qcom,wf-pattern = [7e 7e]; qcom,wf-play-rate-us = <4000>; + qcom,lra-auto-resonance-disable; }; wf_3 { /* THUD */ qcom,effect-id = <3>; + qcom,wf-vmax-mv = <3600>; qcom,wf-pattern = [7e 7e 7e]; - qcom,wf-play-rate-us = <5714>; + qcom,wf-play-rate-us = <6667>; + qcom,lra-auto-resonance-disable; }; wf_4 { /* POP */ qcom,effect-id = <4>; + qcom,wf-vmax-mv = <3600>; qcom,wf-pattern = [7e 7e]; qcom,wf-play-rate-us = <5000>; + qcom,lra-auto-resonance-disable; }; wf_5 { /* HEAVY CLICK */ qcom,effect-id = <5>; + qcom,wf-vmax-mv = <3600>; qcom,wf-pattern = [7e 7e 7e]; qcom,wf-play-rate-us = <6667>; + qcom,wf-brake-pattern = [03 00 00 00]; + qcom,lra-auto-resonance-disable; }; }; }; @@ -581,7 +603,7 @@ }; pm8150b-ibat-lvl1 { - polling-delay-passive = <0>; + polling-delay-passive = <100>; polling-delay = <0>; thermal-governor = "step_wise"; thermal-sensors = <&pm8150b_bcl 1>; @@ -612,7 +634,7 @@ }; pm8150b-vbat-lvl1 { - polling-delay-passive = <0>; + polling-delay-passive = <100>; polling-delay = <0>; thermal-governor = "low_limits_cap"; thermal-sensors = <&pm8150b_bcl 3>; @@ -628,7 +650,7 @@ }; pm8150b-vbat-lvl2 { - polling-delay-passive = <0>; + polling-delay-passive = <100>; polling-delay = <0>; thermal-governor = "low_limits_cap"; thermal-sensors = <&pm8150b_bcl 4>; diff --git a/arch/arm64/boot/dts/qcom/pm8150l.dtsi b/arch/arm64/boot/dts/qcom/pm8150l.dtsi index fdee85928eec3e10aa16586dfe2208f267dac501..c05ed990e278df4343fed2fffb5d585b2d603d1f 100644 --- a/arch/arm64/boot/dts/qcom/pm8150l.dtsi +++ b/arch/arm64/boot/dts/qcom/pm8150l.dtsi @@ -161,6 +161,13 @@ regulator-min-microvolt = <4000000>; regulator-max-microvolt = <6000000>; }; + + lcdb_bst_vreg: bst { + label = "bst"; + regulator-name = "lcdb_bst"; + regulator-min-microvolt = <4700000>; + regulator-max-microvolt = <6275000>; + }; }; flash_led: qcom,leds@d300 { @@ -422,7 +429,7 @@ }; pm8150l-vph-lvl1 { - polling-delay-passive = <0>; + polling-delay-passive = <100>; polling-delay = <0>; thermal-governor = "low_limits_cap"; thermal-sensors = <&pm8150l_bcl 3>; @@ -438,7 +445,7 @@ }; pm8150l-vph-lvl2 { - polling-delay-passive = <0>; + polling-delay-passive = <100>; polling-delay = <0>; thermal-governor = "low_limits_cap"; thermal-sensors = <&pm8150l_bcl 4>; diff --git a/arch/arm64/boot/dts/qcom/qcs403-iot-sku1.dts b/arch/arm64/boot/dts/qcom/qcs403-iot-sku1.dts index f0b91c4cf7df25be98fea7c516759d8e23f56d8a..5b6d6d4a3fd864cc24bb6dc418535b50813ec462 100644 --- a/arch/arm64/boot/dts/qcom/qcs403-iot-sku1.dts +++ b/arch/arm64/boot/dts/qcom/qcs403-iot-sku1.dts @@ -77,3 +77,7 @@ /delete-node/ cpuss-2-step; /delete-node/ cpuss-3-step; }; + +&qnand_1 { + status = "ok"; +}; diff --git a/arch/arm64/boot/dts/qcom/qcs403-iot-sku2.dts b/arch/arm64/boot/dts/qcom/qcs403-iot-sku2.dts index fc6b265c4dada301b6dcb17b14b30122960211fc..38f8a9c8424075f42cd0f6ca5025a4a5503e3aeb 100644 --- a/arch/arm64/boot/dts/qcom/qcs403-iot-sku2.dts +++ b/arch/arm64/boot/dts/qcom/qcs403-iot-sku2.dts @@ -14,7 +14,7 @@ /dts-v1/; #include "qcs403.dtsi" -#include "qcs405-nowcd-audio-overlay.dtsi" +#include "qcs405-audio-overlay.dtsi" / { model = "Qualcomm Technologies, Inc. QCS403 RCM IOT"; @@ -77,3 +77,19 @@ /delete-node/ cpuss-2-step; /delete-node/ cpuss-3-step; }; + +&usb3 { + status = "disabled"; +}; + +&usb_ss_phy { + status = "disabled"; +}; + +&usb2_phy1 { + status = "disabled"; +}; + +&qnand_1 { + status = "ok"; +}; diff --git a/arch/arm64/boot/dts/qcom/qcs405-audio.dtsi b/arch/arm64/boot/dts/qcom/qcs405-audio.dtsi index bd0c17f9acb6dbfe56b1b9c1411686ee3e19d2f1..2a9453224eb0b4eecc5bf5408b94660b8b6f6f55 100644 --- a/arch/arm64/boot/dts/qcom/qcs405-audio.dtsi +++ b/arch/arm64/boot/dts/qcom/qcs405-audio.dtsi @@ -60,6 +60,7 @@ qcom,mi2s-audio-intf = <1>; qcom,auxpcm-audio-intf = <1>; qcom,spdif-audio-intf = <1>; + qcom,wcn-btfm = <1>; qcom,msm-mi2s-master = <1>, <0>, <1>, <1>, <1>; qcom,ep92-name = "ep92.3-0064"; diff --git a/arch/arm64/boot/dts/qcom/qcs405-cpu.dtsi b/arch/arm64/boot/dts/qcom/qcs405-cpu.dtsi index 3f6e6868f1605ced3f8dbce75f929cb68a771b2b..28be044c03a5d27edbc6e42d7f9f49c60252faf8 100644 --- a/arch/arm64/boot/dts/qcom/qcs405-cpu.dtsi +++ b/arch/arm64/boot/dts/qcom/qcs405-cpu.dtsi @@ -158,35 +158,35 @@ }; qcom,l1_i_cache100 { qcom,dump-node = <&L1_I_100>; - qcom,dump-id = <0x64>; + qcom,dump-id = <0x60>; }; qcom,l1_i_cache101 { qcom,dump-node = <&L1_I_101>; - qcom,dump-id = <0x65>; + qcom,dump-id = <0x61>; }; qcom,l1_i_cache102 { qcom,dump-node = <&L1_I_102>; - qcom,dump-id = <0x66>; + qcom,dump-id = <0x62>; }; qcom,l1_i_cache103 { qcom,dump-node = <&L1_I_103>; - qcom,dump-id = <0x67>; + qcom,dump-id = <0x63>; }; qcom,l1_d_cache100 { qcom,dump-node = <&L1_D_100>; - qcom,dump-id = <0x84>; + qcom,dump-id = <0x80>; }; qcom,l1_d_cache101 { qcom,dump-node = <&L1_D_101>; - qcom,dump-id = <0x85>; + qcom,dump-id = <0x81>; }; qcom,l1_d_cache102 { qcom,dump-node = <&L1_D_102>; - qcom,dump-id = <0x86>; + qcom,dump-id = <0x82>; }; qcom,l1_d_cache103 { qcom,dump-node = <&L1_D_103>; - qcom,dump-id = <0x87>; + qcom,dump-id = <0x83>; }; }; }; diff --git a/arch/arm64/boot/dts/qcom/qcs405-iot-sku2.dts b/arch/arm64/boot/dts/qcom/qcs405-iot-sku2.dts index 2f034a8360f8cc9ea7e917459342c622e667b27c..8f22d4c3b334a594f8ebaa641387fa87bc2cb3c5 100644 --- a/arch/arm64/boot/dts/qcom/qcs405-iot-sku2.dts +++ b/arch/arm64/boot/dts/qcom/qcs405-iot-sku2.dts @@ -22,3 +22,150 @@ compatible = "qcom,qcs405-iot", "qcom,qcs405", "qcom,iot"; qcom,board-id = <0x010020 0x1>; }; + +&gdsc_mdss { + status = "disabled"; +}; + +&clock_gcc_mdss { + status = "disabled"; +}; + +&mdss_mdp { + status = "disabled"; + /delete-node/ qcom,mdss_fb_primary; +}; + +&mdss_dsi { + status = "disabled"; +}; + +&mdss_dsi0 { + status = "disabled"; +}; + +&soc { + qcom,mdss_wb_panel { + status = "disabled"; + }; +}; + +&mdss_dsi0_pll { + status = "disabled"; +}; + +&mdss_dsi1_pll { + status = "disabled"; +}; + +&soc { + mdss_spi_display: qcom,mdss_spi_display { + compatible = "qcom,mdss-spi-display"; + label = "mdss spi display"; + + mdss_fb0: qcom,mdss_fb_primary { + cell-index = <0>; + compatible = "qcom,mdss-fb"; + }; + }; +}; + +#include "spi-panel-st7789v2-qvga-cmd.dtsi" + +&soc { + mdss_spi_panel: qcom,mdss_spi_panel { + compatible = "qcom,mdss-spi-panel"; + label = "mdss spi panel"; + qcom,spi-pref-prim-pan = <&spi_st7789v2_qvga_cmd>; + qcom,mdss-spi = <&mdss_spi_display>; + qcom,mdss-fb-map = <&mdss_fb0>; + }; + + spi@7af5000 { /* BLSP1 QUP2 */ + status = "ok"; + mdss_spi_client: qcom,mdss_spi_client { + reg = <0>; + compatible = "qcom,mdss-spi-client"; + label = "MDSS SPI QUP2 CLIENT"; + spi-max-frequency = <50000000>; + }; + }; +}; + +&mdss_te_active { + mux { + pins = "gpio57"; + function = "gpio"; + }; + config { + pins = "gpio57"; + drive-strength = <2>; /* 2 mA */ + bias-pull-down; /* pull down*/ + }; +}; + +&mdss_te_suspend { + mux { + pins = "gpio57"; + function = "gpio"; + }; + config { + pins = "gpio57"; + drive-strength = <2>; /* 2 mA */ + bias-pull-down; /* pull down*/ + }; +}; + +&pmx_mdss{ + mdss_spi_panel_active: mdss_spi_panel_active { + mux { + pins = "gpio39", "gpio42"; + drive-strength = <8>; /* 8 mA */ + bias-disable = <0>; /* no pull */ + output-high; + }; + }; + + mdss_spi_panel_suspend: mdss_spi_panel_suspend { + mux { + pins = "gpio39", "gpio42"; + drive-strength = <2>; /* 2 mA */ + bias-pull-down; /* pull down */ + input-enable; + }; + }; +}; + +&mdss_spi_panel { + pinctrl-names = "mdss_default", "mdss_sleep"; + pinctrl-0 = <&mdss_te_active &mdss_spi_panel_active>; + pinctrl-1 = <&mdss_te_suspend &mdss_spi_panel_suspend>; + + qcom,platform-te-gpio = <&tlmm 57 0>; + qcom,platform-reset-gpio = <&tlmm 42 0>; + qcom,platform-spi-dc-gpio = <&tlmm 39 0>; + + vddio-supply = <&pms405_l6>; + + qcom,panel-supply-entries { + #address-cells = <1>; + #size-cells = <0>; + qcom,panel-supply-entry@0 { + reg = <0>; + qcom,supply-name = "vdd"; + qcom,supply-min-voltage = <2800000>; + qcom,supply-max-voltage = <2800000>; + qcom,supply-enable-load = <100000>; + qcom,supply-disable-load = <100>; + }; + + qcom,panel-supply-entry@1 { + reg = <1>; + qcom,supply-name = "vddio"; + qcom,supply-min-voltage = <1800000>; + qcom,supply-max-voltage = <1800000>; + qcom,supply-enable-load = <100000>; + qcom,supply-disable-load = <100>; + }; + }; +}; diff --git a/arch/arm64/boot/dts/qcom/qcs405-iot-sku3.dts b/arch/arm64/boot/dts/qcom/qcs405-iot-sku3.dts index 198ccc831376a62f0d5338d3a0eb31a4e624902e..9ec18758df4dd289c25e4bad9b9310330a275716 100644 --- a/arch/arm64/boot/dts/qcom/qcs405-iot-sku3.dts +++ b/arch/arm64/boot/dts/qcom/qcs405-iot-sku3.dts @@ -14,7 +14,7 @@ /dts-v1/; #include "qcs405.dtsi" -#include "qcs405-nowcd-audio-overlay.dtsi" +#include "qcs405-audio-overlay.dtsi" #include "qcs405-geni-ir-overlay.dtsi" #include "qcs405-pinctrl.dtsi" diff --git a/arch/arm64/boot/dts/qcom/qcs405-iot-sku9.dts b/arch/arm64/boot/dts/qcom/qcs405-iot-sku9.dts index 5b3bfd106a88d98a8cf8bbb135d6f26c9b72f010..980bca7ed64492c59c02190c3b7f90236a374563 100644 --- a/arch/arm64/boot/dts/qcom/qcs405-iot-sku9.dts +++ b/arch/arm64/boot/dts/qcom/qcs405-iot-sku9.dts @@ -14,7 +14,7 @@ /dts-v1/; #include "qcs405.dtsi" -#include "qcs405-nowcd-audio-overlay.dtsi" +#include "qcs405-audio-overlay.dtsi" #include "qcs405-pinctrl.dtsi" / { diff --git a/arch/arm64/boot/dts/qcom/qcs405-pm.dtsi b/arch/arm64/boot/dts/qcom/qcs405-pm.dtsi index 36b8d2cdcef316748938449c6e83e5f531d31817..183da184bd538200566eecdb183caca548b08bca 100644 --- a/arch/arm64/boot/dts/qcom/qcs405-pm.dtsi +++ b/arch/arm64/boot/dts/qcom/qcs405-pm.dtsi @@ -48,20 +48,18 @@ reg = <0>; label = "perf-l2-wfi"; qcom,psci-mode = <1>; - qcom,latency-us = <180>; - qcom,ss-power = <429>; - qcom,energy-overhead = <162991>; - qcom,time-overhead = <305>; + qcom,entry-latency-us = <125>; + qcom,exit-latency-us = <180>; + qcom,min-residency-us = <305>; }; qcom,pm-cluster-level@1{ reg = <1>; label = "perf-l2-gdhs"; qcom,psci-mode = <4>; - qcom,latency-us = <280>; - qcom,ss-power = <421>; - qcom,energy-overhead = <257510>; - qcom,time-overhead = <520>; + qcom,entry-latency-us = <240>; + qcom,exit-latency-us = <280>; + qcom,min-residency-us = <806>; qcom,min-child-idx = <1>; qcom,reset-level = ; }; @@ -70,10 +68,9 @@ reg = <2>; label = "perf-l2-retention"; qcom,psci-mode = <2>; - qcom,latency-us = <650>; - qcom,ss-power = <350>; - qcom,energy-overhead = <651061>; - qcom,time-overhead = <1350>; + qcom,entry-latency-us = <700>; + qcom,exit-latency-us = <650>; + qcom,min-residency-us = <1972>; qcom,min-child-idx = <1>; qcom,reset-level = ; }; @@ -82,10 +79,9 @@ reg = <3>; label = "perf-l2-pc"; qcom,psci-mode = <5>; - qcom,latency-us = <11200>; - qcom,ss-power = <320>; - qcom,energy-overhead = <917561>; - qcom,time-overhead = <1700>; + qcom,entry-latency-us = <700>; + qcom,exit-latency-us = <1000>; + qcom,min-residency-us = <6500>; qcom,min-child-idx = <1>; qcom,is-reset; qcom,notify-rpm; @@ -103,20 +99,18 @@ reg = <0>; qcom,psci-cpu-mode = <0>; label = "wfi"; - qcom,latency-us = <12>; - qcom,ss-power = <463>; - qcom,energy-overhead = <23520>; - qcom,time-overhead = <25>; + qcom,entry-latency-us = <13>; + qcom,exit-latency-us = <12>; + qcom,min-residency-us = <25>; }; qcom,pm-cpu-level@1 { reg = <1>; qcom,psci-cpu-mode = <3>; label = "pc"; - qcom,latency-us = <180>; - qcom,ss-power = <429>; - qcom,energy-overhead = <162991>; - qcom,time-overhead = <305>; + qcom,entry-latency-us = <125>; + qcom,exit-latency-us = <180>; + qcom,min-residency-us = <595>; qcom,use-broadcast-timer; qcom,is-reset; qcom,reset-level = ; @@ -138,7 +132,7 @@ qcom,rpm-master-stats@60150 { compatible = "qcom,rpm-master-stats"; reg = <0x60150 0x5000>; - qcom,masters = "APSS", "MPSS", "PRONTO", "TZ", "LPASS"; + qcom,masters = "APSS", "MPSS", "LPASS", "CDSP", "TZ"; qcom,master-stats-version = <2>; qcom,master-offset = <4096>; }; diff --git a/arch/arm64/boot/dts/qcom/qcs405.dtsi b/arch/arm64/boot/dts/qcom/qcs405.dtsi index 73dc28b6e96a48bdbe2524b1add691c62d4d44b6..14e5dc1c87beea85979e4bd7cfb689b63537d3f6 100644 --- a/arch/arm64/boot/dts/qcom/qcs405.dtsi +++ b/arch/arm64/boot/dts/qcom/qcs405.dtsi @@ -39,10 +39,10 @@ #size-cells = <2>; ranges; - removed_region0: removed_region@85600000 { + removed_region0: removed_region@85800000 { compatible = "removed-dma-pool"; no-map; - reg = <0x0 0x85600000 0x0 0x900000>; + reg = <0x0 0x85800000 0x0 0x700000>; }; smem_region: smem@85f00000 { @@ -59,25 +59,25 @@ wlan_fw_mem: wlan_fw_mem@86400000 { compatible = "removed-dma-pool"; no-map; - reg = <0x0 0x86400000 0x0 0x1c00000>; + reg = <0x0 0x86400000 0x0 0x1100000>; }; - adsp_fw_mem: adsp_fw_mem@88000000 { + adsp_fw_mem: adsp_fw_mem@87500000 { compatible = "removed-dma-pool"; no-map; - reg = <0x0 0x88000000 0x0 0x1a00000>; + reg = <0x0 0x87500000 0x0 0x1a00000>; }; - cdsp_fw_mem: cdsp_fw_mem@89a00000 { + cdsp_fw_mem: cdsp_fw_mem@88f00000 { compatible = "removed-dma-pool"; no-map; - reg = <0x0 0x89a00000 0x0 0x600000>; + reg = <0x0 0x88f00000 0x0 0x600000>; }; - wlan_msa_mem: wlan_msa_region@8a000000 { + wlan_msa_mem: wlan_msa_region@89500000 { compatible = "removed-dma-pool"; no-map; - reg = <0x0 0x8a000000 0x0 0x100000>; + reg = <0x0 0x89500000 0x0 0x100000>; }; secure_mem: secure_region { @@ -309,7 +309,7 @@ reg-names = "cc_base"; vdd_cx-supply = <&pms405_s1_level>; clocks = <&clock_rpmcc RPM_SMD_XO_CLK_SRC>; - qcom,gfx3d_clk_src-opp-handle = <&msm_gpu>; + qcom,gcc_oxili_gfx3d_clk-opp-handle = <&msm_gpu>; clock-names = "cxo"; #clock-cells = <1>; #reset-cells = <1>; @@ -628,7 +628,8 @@ "qcom,shutdown-ack"; /* GPIO output to wcnss */ - qcom,gpio-force-stop = <&modem_smp2p_out 0 0>; + qcom,smem-states = <&modem_smp2p_out 0>; + qcom,smem-state-names = "qcom,force-stop"; memory-region = <&wlan_fw_mem>; }; @@ -667,9 +668,9 @@ reg = <0x60000 0x6000>; }; - apcs: syscon@b011000 { + apcs: syscon@b011008 { compatible = "syscon"; - reg = <0xb011000 0x4>; + reg = <0xb011008 0x4>; }; apcs_glb: mailbox@b011000 { @@ -758,6 +759,8 @@ compatible = "qcom,msm-mdf-cb"; label = "adsp"; qcom,smmu-enabled; + iommus = <&apps_smmu 0x0800 0x0>; + qcom,smmu-sid-mask = /bits/ 64 <0xf>; }; qcom,msm_mdf_cb2 { @@ -810,7 +813,7 @@ interrupts = ; label = "wcnss"; - qcom,glink-label = "wcnss"; + qcom,glink-label = "mpss"; qcom,wcnss_qrtr { qcom,glink-channels = "IPCRTR"; @@ -1192,11 +1195,22 @@ reg-names = "nand_phys", "bam_phys"; qcom,reg-adjustment-offset = <0x4000>; - qcom,qpic-clk-rpmh; interrupts = <0 49 0>; interrupt-names = "bam_irq"; + qcom,msm-bus,name = "qpic_nand"; + qcom,msm-bus,num-cases = <2>; + qcom,msm-bus,num-paths = <1>; + + qcom,msm-bus,vectors-KBps = + <91 512 0 0>, + /* Voting for max b/w on PNOC bus for now */ + <91 512 400000 400000>; + + clock-names = "core_clk"; + clocks = <&clock_rpmcc RPM_SMD_QPIC_CLK>; + status = "disabled"; }; diff --git a/arch/arm64/boot/dts/qcom/sa6155-adp-star.dtsi b/arch/arm64/boot/dts/qcom/sa6155-adp-star.dtsi index bed9204466fa617c4928651a818b7cc951083189..973cb497e701ee614882f1271d565e05bd0e02f8 100644 --- a/arch/arm64/boot/dts/qcom/sa6155-adp-star.dtsi +++ b/arch/arm64/boot/dts/qcom/sa6155-adp-star.dtsi @@ -21,3 +21,73 @@ }; }; }; + +&ufsphy_mem { + compatible = "qcom,ufs-phy-qmp-v3-660"; + + vdda-phy-supply = <&pm6155_1_l5>; /* 0.9v */ + vdda-pll-supply = <&pm6155_1_l12>; + vdda-phy-max-microamp = <30000>; + vdda-pll-max-microamp = <12000>; + + status = "ok"; +}; + +&ufshc_mem { + vdd-hba-supply = <&ufs_phy_gdsc>; + vdd-hba-fixed-regulator; + vcc-supply = <&pm6155_1_l17>; + vcc-voltage-level = <2950000 2960000>; + vccq2-supply = <&pm6155_1_s4>; + vcc-max-microamp = <800000>; + vccq2-max-microamp = <600000>; + + qcom,vddp-ref-clk-supply = <&pm6155_1_l11>; + qcom,vddp-ref-clk-max-microamp = <100>; + qcom,vddp-ref-clk-min-uV = <1232000>; + qcom,vddp-ref-clk-max-uV = <1260000>; + + status = "ok"; +}; + +&sdhc_1 { + vdd-supply = <&pm6155_1_l17>; + qcom,vdd-voltage-level = <2950000 2950000>; + qcom,vdd-current-level = <0 570000>; + + vdd-io-supply = <&pm6155_1_s4>; + qcom,vdd-io-always-on; + qcom,vdd-io-lpm-sup; + qcom,vdd-io-voltage-level = <1800000 1800000>; + qcom,vdd-io-current-level = <0 325000>; + + pinctrl-names = "active", "sleep"; + pinctrl-0 = <&sdc1_clk_on &sdc1_cmd_on &sdc1_data_on &sdc1_rclk_on>; + pinctrl-1 = <&sdc1_clk_off &sdc1_cmd_off &sdc1_data_off &sdc1_rclk_off>; + + status = "ok"; +}; + +&sdhc_2 { + vdd-supply = <&pm6155_1_l10>; + qcom,vdd-voltage-level = <2950000 2950000>; + qcom,vdd-current-level = <0 800000>; + + vdd-io-supply = <&pm6155_1_l2>; + qcom,vdd-io-voltage-level = <1800000 3100000>; + qcom,vdd-io-current-level = <0 22000>; + + pinctrl-names = "active", "sleep"; + pinctrl-0 = <&sdc2_clk_on &sdc2_cmd_on &sdc2_data_on &sdc2_cd_on>; + pinctrl-1 = <&sdc2_clk_off &sdc2_cmd_off &sdc2_data_off &sdc2_cd_off>; + + cd-gpios = <&tlmm 99 1>; + + status = "ok"; +}; + +&usb0 { + dwc3@a600000 { + dr_mode = "peripheral"; + }; +}; diff --git a/arch/arm64/boot/dts/qcom/sa6155-pmic.dtsi b/arch/arm64/boot/dts/qcom/sa6155-pmic.dtsi new file mode 100644 index 0000000000000000000000000000000000000000..5351f31544d35292015fefc27e760a25ffc0ae29 --- /dev/null +++ b/arch/arm64/boot/dts/qcom/sa6155-pmic.dtsi @@ -0,0 +1,172 @@ +/* Copyright (c) 2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +/* Remove regulator nodes specific to SM6150 */ +&soc { + /delete-node/ rpmh-regulator-mxlvl; + /delete-node/ rpmh-regulator-cxlvl; + /delete-node/ rpmh-regulator-smpc1; + /delete-node/ rpmh-regulator-smpc2; + /delete-node/ rpmh-regulator-modemlvl; + /delete-node/ rpmh-regulator-smpc8; + /delete-node/ rpmh-regulator-ldoa1; + /delete-node/ rpmh-regulator-ldoa2; + /delete-node/ rpmh-regulator-ldoa3; + /delete-node/ rpmh-regulator-ldoa4; + /delete-node/ rpmh-regulator-ldoa5; + /delete-node/ rpmh-regulator-ldoa6; + /delete-node/ rpmh-regulator-lmxlvl; + /delete-node/ rpmh-regulator-lcxlvl; + /delete-node/ rpmh-regulator-ldoa9; + /delete-node/ rpmh-regulator-ldoa10; + /delete-node/ rpmh-regulator-ldoa11; + /delete-node/ rpmh-regulator-ldoa12; + /delete-node/ rpmh-regulator-ldoa13; + /delete-node/ rpmh-regulator-ldoa14; + /delete-node/ rpmh-regulator-ldoa15; + /delete-node/ rpmh-regulator-ldoa16; + /delete-node/ rpmh-regulator-ldoa17; + /delete-node/ rpmh-regulator-ldoa18; + /delete-node/ rpmh-regulator-ldoa19; + /delete-node/ rpmh-regulator-ldoc1; + /delete-node/ rpmh-regulator-ldoc2; + /delete-node/ rpmh-regulator-ldoc3; + /delete-node/ rpmh-regulator-ldoc4; + /delete-node/ rpmh-regulator-ldoc5; + /delete-node/ rpmh-regulator-ldoc6; + /delete-node/ rpmh-regulator-ldoc7; + /delete-node/ rpmh-regulator-ldoc8; + /delete-node/ rpmh-regulator-ldoc9; + /delete-node/ rpmh-regulator-ldoc10; + /delete-node/ rpmh-regulator-ldoc11; + /delete-node/ rpmh-regulator-bobc1; +}; + +&qusb_phy0 { + /delete-property/ vdd-supply; + /delete-property/ vdda18-supply; + /delete-property/ vdda33-supply; +}; + +&qusb_phy1 { + /delete-property/ vdd-supply; + /delete-property/ vdda18-supply; + /delete-property/ vdda33-supply; +}; + +&usb0 { + /delete-property/ extcon; + /delete-property/ vbus_dwc3-supply; +}; + +&pm6150_pdphy { + /delete-property/ vdd-pdphy-supply; +}; + +&usb_qmp_phy { + /delete-property/ vdd-supply; + /delete-property/ core-supply; +}; + +&mdss_dsi0 { + /delete-property/ vdda-1p2-supply; +}; + +&sde_dp { + /delete-property/ vdda-1p2-supply; + /delete-property/ vdda-0p9-supply; +}; + +&mdss_dsi_phy0 { + /delete-property/ vdda-0p9-supply; +}; + +&cam_csiphy0 { + /delete-property/ mipi-csi-vdd-supply; +}; + +&cam_csiphy1 { + /delete-property/ mipi-csi-vdd-supply; +}; + +&cam_csiphy2 { + /delete-property/ mipi-csi-vdd-supply; +}; + +&bluetooth { + /delete-property/ qca,bt-vdd-core-supply; + /delete-property/ qca,bt-vdd-pa-supply; + /delete-property/ qca,bt-vdd-ldo-supply; +}; + +&icnss { + /delete-property/ vdd-0.8-cx-mx-supply; + /delete-property/ vdd-1.8-xo-supply ; + /delete-property/ vdd-1.3-rfa-supply; + /delete-property/ vdd-3.3-ch0-supply; +}; + +&soc { + qcom,lpass@62400000 { + vdd_cx-supply = <&VDD_CX_LEVEL>; + }; +}; + + +&spmi_bus { + /delete-node/ qcom,pm6150@0; + /delete-node/ qcom,pm6150@1; + /delete-node/ qcom,pm6150l@4; + /delete-node/ qcom,pm6150l@5; +}; + +&thermal_zones { + /delete-node/ pm6150l-tz; + /delete-node/ pm6150-tz; + /delete-node/ pm6150-ibat-lvl0; + /delete-node/ pm6150-ibat-lvl1; + /delete-node/ pm6150-vbat-lvl0; + /delete-node/ pm6150-vbat-lvl1; + /delete-node/ pm6150-vbat-lvl2; + /delete-node/ pm6150l-vph-lvl0; + /delete-node/ pm6150l-vph-lvl1; + /delete-node/ pm6150l-vph-lvl2; + /delete-node/ xo-therm; + /delete-node/ sdm-therm; + /delete-node/ conn-therm; + /delete-node/ emmc_ufs-therm; + /delete-node/ rf_pa0_therm-therm; + /delete-node/ camera_flash-therm; + /delete-node/ quiet-therm; + /delete-node/ aoss-lowf; + /delete-node/ cpuss-0-lowf; + /delete-node/ cpuss-1-lowf; + /delete-node/ cpuss-2-lowf; + /delete-node/ cpuss-3-lowf; + /delete-node/ cpu-1-0-lowf; + /delete-node/ cpu-1-1-lowf; + /delete-node/ cpu-1-2-lowf; + /delete-node/ cpu-1-3-lowf; + /delete-node/ gpu-lowf; + /delete-node/ q6-hvx-lowf; + /delete-node/ mdm-core-lowf; + /delete-node/ video-lowf; + /delete-node/ display-lowf; + /delete-node/ wlan-lowf; + /delete-node/ camera-lowf; + soc { + /delete-property/ thermal-sensors; + }; +}; + +#include "sa6155-regulator.dtsi" +#include "pm6155.dtsi" diff --git a/arch/arm64/boot/dts/qcom/sa6155-regulator.dtsi b/arch/arm64/boot/dts/qcom/sa6155-regulator.dtsi new file mode 100644 index 0000000000000000000000000000000000000000..1e6fe05effea60c36829e235fdee961763039399 --- /dev/null +++ b/arch/arm64/boot/dts/qcom/sa6155-regulator.dtsi @@ -0,0 +1,505 @@ +/* Copyright (c) 2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include + +&soc { + rpmh-regulator-smpa2 { + compatible = "qcom,rpmh-vrm-regulator"; + mboxes = <&apps_rsc 0>; + qcom,resource-name = "cx.lvl"; + + VDD_CX_LEVEL: VDD_MX_LEVEL: + S2A_LEVEL: pm6155_1_s2_level: regulator-pm6155-1-s2-level { + regulator-name = "pm6155_1_s2_level"; + qcom,set = ; + regulator-min-microvolt + = ; + regulator-max-microvolt + = ; + qcom,init-voltage-level + = ; + qcom,min-dropout-voltage-level = <(-1)>; + }; + + VDD_CX_LEVEL_AO: VDD_MX_LEVEL_AO: S2A_LEVEL_AO: + pm6155_1_s2_level_ao: regulator-pm6155-1-s2-level-ao { + regulator-name = "pm6155_1_s2_level_ao"; + qcom,set = ; + regulator-min-microvolt + = ; + regulator-max-microvolt + = ; + qcom,init-voltage-level + = ; + qcom,min-dropout-voltage-level = <(-1)>; + }; + }; + + rpmh-regulator-smpa3 { + compatible = "qcom,rpmh-vrm-regulator"; + mboxes = <&apps_rsc 0>; + qcom,resource-name = "smpa3"; + S3A: pm6155_1_s3: regulator-pm6155-1-s3 { + regulator-name = "pm6155_1_s3"; + qcom,set = ; + regulator-min-microvolt = <600000>; + regulator-max-microvolt = <650000>; + qcom,init-voltage = <600000>; + }; + }; + + rpmh-regulator-smpa4 { + compatible = "qcom,rpmh-vrm-regulator"; + mboxes = <&apps_rsc 0>; + qcom,resource-name = "smpa4"; + S4A: pm6155_1_s4: regulator-pm6155-1-s4 { + regulator-name = "pm6155_1_s4"; + qcom,set = ; + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <1829000>; + qcom,init-voltage = <1800000>; + }; + }; + + rpmh-regulator-smpa6 { + compatible = "qcom,rpmh-vrm-regulator"; + mboxes = <&apps_rsc 0>; + qcom,resource-name = "smpa6"; + S6A: pm6155_1_s6: regulator-pm6155-1-s6 { + regulator-name = "pm6155_1_s6"; + qcom,set = ; + regulator-min-microvolt = <947000>; + regulator-max-microvolt = <1404000>; + qcom,init-voltage = <947000>; + }; + }; + + rpmh-regulator-ldoa1 { + compatible = "qcom,rpmh-vrm-regulator"; + mboxes = <&apps_rsc 0>; + qcom,resource-name = "ldoa1"; + qcom,regulator-type = "pmic5-ldo"; + qcom,supported-modes = + ; + qcom,mode-threshold-currents = <0 1>; + L1A: pm6155_1_l1: regulator-pm6155-1-l1 { + regulator-name = "pm6155_1_l1"; + qcom,set = ; + regulator-min-microvolt = <488000>; + regulator-max-microvolt = <852000>; + qcom,init-voltage = <488000>; + qcom,init-mode = ; + }; + }; + + rpmh-regulator-ldoa2 { + compatible = "qcom,rpmh-vrm-regulator"; + mboxes = <&apps_rsc 0>; + qcom,resource-name = "ldoa2"; + qcom,regulator-type = "pmic5-ldo"; + qcom,supported-modes = + ; + qcom,mode-threshold-currents = <0 1>; + L2A: pm6155_1_l2: regulator-pm6155-1-l2 { + regulator-name = "pm6155_1_l2"; + qcom,set = ; + regulator-min-microvolt = <1650000>; + regulator-max-microvolt = <3100000>; + qcom,init-voltage = <1650000>; + qcom,init-mode = ; + }; + }; + + rpmh-regulator-ldoa5 { + compatible = "qcom,rpmh-vrm-regulator"; + mboxes = <&apps_rsc 0>; + qcom,resource-name = "ldoa5"; + qcom,regulator-type = "pmic5-ldo"; + qcom,supported-modes = + ; + qcom,mode-threshold-currents = <0 1>; + L5A: pm6155_1_l5: regulator-pm6155-1-l5 { + regulator-name = "pm6155_1_l5"; + qcom,set = ; + regulator-min-microvolt = <875000>; + regulator-max-microvolt = <975000>; + qcom,init-voltage = <875000>; + qcom,init-mode = ; + }; + }; + + rpmh-regulator-ldoa7 { + compatible = "qcom,rpmh-vrm-regulator"; + mboxes = <&apps_rsc 0>; + qcom,resource-name = "ldoa7"; + qcom,regulator-type = "pmic5-ldo"; + qcom,supported-modes = + ; + qcom,mode-threshold-currents = <0 1>; + L7A: pm6155_1_l7: regulator-pm6155-1-l7 { + regulator-name = "pm6155_1_l7"; + qcom,set = ; + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <1900000>; + qcom,init-voltage = <1800000>; + qcom,init-mode = ; + }; + }; + + rpmh-regulator-ldoa8 { + compatible = "qcom,rpmh-vrm-regulator"; + mboxes = <&apps_rsc 0>; + qcom,resource-name = "ldoa8"; + qcom,regulator-type = "pmic5-ldo"; + qcom,supported-modes = + ; + qcom,mode-threshold-currents = <0 1>; + L8A: pm6155_1_l8: regulator-pm6155-1-l8 { + regulator-name = "pm6155_1_l8"; + qcom,set = ; + regulator-min-microvolt = <1150000>; + regulator-max-microvolt = <1350000>; + qcom,init-voltage = <1150000>; + qcom,init-mode = ; + }; + }; + + rpmh-regulator-ldoa9 { + compatible = "qcom,rpmh-vrm-regulator"; + mboxes = <&apps_rsc 0>; + qcom,resource-name = "ldoa9"; + qcom,regulator-type = "pmic5-ldo"; + qcom,supported-modes = + ; + qcom,mode-threshold-currents = <0 1>; + L9A: pm6155_1_l9: regulator-pm6155-1-l9 { + regulator-name = "pm6155_1_l9"; + qcom,set = ; + regulator-min-microvolt = <1232000>; + regulator-max-microvolt = <1232000>; + qcom,init-voltage = <1232000>; + qcom,init-mode = ; + }; + }; + + rpmh-regulator-ldoa10 { + compatible = "qcom,rpmh-vrm-regulator"; + mboxes = <&apps_rsc 0>; + qcom,resource-name = "ldoa10"; + qcom,regulator-type = "pmic5-ldo"; + qcom,supported-modes = + ; + qcom,mode-threshold-currents = <0 1>; + L10A: pm6155_1_l10: regulator-pm6155-1-l10 { + regulator-name = "pm6155_1_l10"; + qcom,set = ; + regulator-min-microvolt = <2950000>; + regulator-max-microvolt = <3312000>; + qcom,init-voltage = <2950000>; + qcom,init-mode = ; + }; + }; + + rpmh-regulator-ldoa11 { + compatible = "qcom,rpmh-vrm-regulator"; + mboxes = <&apps_rsc 0>; + qcom,resource-name = "ldoa11"; + qcom,regulator-type = "pmic5-ldo"; + qcom,supported-modes = + ; + qcom,mode-threshold-currents = <0 1>; + L11A: pm6155_1_l11: regulator-pm6155-1-l11 { + regulator-name = "pm6155_1_l11"; + qcom,set = ; + regulator-min-microvolt = <1232000>; + regulator-max-microvolt = <1260000>; + qcom,init-voltage = <1232000>; + qcom,init-mode = ; + }; + }; + + rpmh-regulator-ldoa12 { + compatible = "qcom,rpmh-vrm-regulator"; + mboxes = <&apps_rsc 0>; + qcom,resource-name = "ldoa12"; + qcom,regulator-type = "pmic5-ldo"; + qcom,supported-modes = + ; + qcom,mode-threshold-currents = <0 1>; + L12A: pm6155_1_l12: regulator-pm6155-1-l12 { + regulator-name = "pm6155_1_l12"; + qcom,set = ; + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <1890000>; + qcom,init-voltage = <1800000>; + qcom,init-mode = ; + }; + }; + + rpmh-regulator-ldoa13 { + compatible = "qcom,rpmh-vrm-regulator"; + mboxes = <&apps_rsc 0>; + qcom,resource-name = "ldoa13"; + qcom,regulator-type = "pmic5-ldo"; + qcom,supported-modes = + ; + qcom,mode-threshold-currents = <0 1>; + L13A: pm6155_1_l13: regulator-pm6155-1-l13 { + regulator-name = "pm6155_1_l13"; + qcom,set = ; + regulator-min-microvolt = <3000000>; + regulator-max-microvolt = <3230000>; + qcom,init-voltage = <3000000>; + qcom,init-mode = ; + }; + }; + + rpmh-regulator-ldoa15 { + compatible = "qcom,rpmh-vrm-regulator"; + mboxes = <&apps_rsc 0>; + qcom,resource-name = "ldoa15"; + qcom,regulator-type = "pmic5-ldo"; + qcom,supported-modes = + ; + qcom,mode-threshold-currents = <0 1>; + L15A: pm6155_1_l15: regulator-pm6155-1-l15 { + regulator-name = "pm6155_1_l15"; + qcom,set = ; + regulator-min-microvolt = <1904000>; + regulator-max-microvolt = <1904000>; + qcom,init-voltage = <1904000>; + qcom,init-mode = ; + }; + }; + + rpmh-regulator-ldoa16 { + compatible = "qcom,rpmh-vrm-regulator"; + mboxes = <&apps_rsc 0>; + qcom,resource-name = "ldoa16"; + qcom,regulator-type = "pmic5-ldo"; + qcom,supported-modes = + ; + qcom,mode-threshold-currents = <0 1>; + L16A: pm6155_1_l16: regulator-pm6155-1-l16 { + regulator-name = "pm6155_1_l16"; + qcom,set = ; + regulator-min-microvolt = <3000000>; + regulator-max-microvolt = <3312000>; + qcom,init-voltage = <3000000>; + qcom,init-mode = ; + }; + }; + + rpmh-regulator-ldoa17 { + compatible = "qcom,rpmh-vrm-regulator"; + mboxes = <&apps_rsc 0>; + qcom,resource-name = "ldoa17"; + qcom,regulator-type = "pmic5-ldo"; + qcom,supported-modes = + ; + qcom,mode-threshold-currents = <0 1>; + L17A: pm6155_1_l17: regulator-pm6155-1-l17 { + regulator-name = "pm6155_1_l17"; + qcom,set = ; + regulator-min-microvolt = <2950000>; + regulator-max-microvolt = <3312000>; + qcom,init-voltage = <3000000>; + qcom,init-mode = ; + }; + }; + + /* PM6155 S1 - VDD_MSS supply */ + rpmh-regulator-modemlvl { + compatible = "qcom,rpmh-arc-regulator"; + mboxes = <&apps_rsc 0>; + qcom,resource-name = "mss.lvl"; + + VDD_MSS_LEVEL: + S1C_LEVEL: pm6155_2_s1_level: regulator-pm6155-2-s1-level { + regulator-name = "pm6155_2_s1_level"; + qcom,set = ; + regulator-min-microvolt = + ; + regulator-max-microvolt = + ; + qcom,init-voltage-level = + ; + }; + }; + + rpmh-regulator-ldoc2 { + compatible = "qcom,rpmh-vrm-regulator"; + mboxes = <&apps_rsc 0>; + qcom,resource-name = "ldoc2"; + qcom,regulator-type = "pmic5-ldo"; + qcom,supported-modes = + ; + qcom,mode-threshold-currents = <0 1>; + L2C: pm6155_2_l2: regulator-pm6155-2-l2 { + regulator-name = "pm6155_2_l2"; + qcom,set = ; + regulator-min-microvolt = <2430000>; + regulator-max-microvolt = <2970000>; + qcom,init-voltage = <2430000>; + qcom,init-mode = ; + }; + }; + + rpmh-regulator-ldoc3 { + compatible = "qcom,rpmh-vrm-regulator"; + mboxes = <&apps_rsc 0>; + qcom,resource-name = "ldoc3"; + qcom,regulator-type = "pmic5-ldo"; + qcom,supported-modes = + ; + qcom,mode-threshold-currents = <0 1>; + L3C: pm6155_2_l3: regulator-pm6155-2-l3 { + regulator-name = "pm6155_2_l3"; + qcom,set = ; + regulator-min-microvolt = <1200000>; + regulator-max-microvolt = <1252000>; + qcom,init-voltage = <1200000>; + qcom,init-mode = ; + }; + }; + + rpmh-regulator-ldoc4 { + compatible = "qcom,rpmh-vrm-regulator"; + mboxes = <&apps_rsc 0>; + qcom,resource-name = "ldoc4"; + qcom,regulator-type = "pmic5-ldo"; + qcom,supported-modes = + ; + qcom,mode-threshold-currents = <0 1>; + L4C: pm6155_2_l4: regulator-pm6155-2-l4 { + regulator-name = "pm6155_2_l4"; + qcom,set = ; + regulator-min-microvolt = <1200000>; + regulator-max-microvolt = <1252000>; + qcom,init-voltage = <1200000>; + qcom,init-mode = ; + }; + }; + + rpmh-regulator-ldoc13 { + compatible = "qcom,rpmh-vrm-regulator"; + mboxes = <&apps_rsc 0>; + qcom,resource-name = "ldoc13"; + qcom,regulator-type = "pmic5-ldo"; + qcom,supported-modes = + ; + qcom,mode-threshold-currents = <0 1>; + L13C: pm6155_2_l13: regulator-pm6155-2-l13 { + regulator-name = "pm6155_2_l13"; + qcom,set = ; + regulator-min-microvolt = <1650000>; + regulator-max-microvolt = <2950000>; + qcom,init-voltage = <1650000>; + qcom,init-mode = ; + }; + }; + + rpmh-regulator-ldoc14 { + compatible = "qcom,rpmh-vrm-regulator"; + mboxes = <&apps_rsc 0>; + qcom,resource-name = "ldoc14"; + qcom,regulator-type = "pmic5-ldo"; + qcom,supported-modes = + ; + qcom,mode-threshold-currents = <0 1>; + L14C: pm6155_2_l14: regulator-pm6155-2-l14 { + regulator-name = "pm6155_2_l14"; + qcom,set = ; + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <1850000>; + qcom,init-voltage = <1800000>; + qcom,init-mode = ; + }; + }; + + rpmh-regulator-ldoc16 { + compatible = "qcom,rpmh-vrm-regulator"; + mboxes = <&apps_rsc 0>; + qcom,resource-name = "ldoc16"; + qcom,regulator-type = "pmic5-ldo"; + qcom,supported-modes = + ; + qcom,mode-threshold-currents = <0 1>; + L16C: pm6155_2_l16: regulator-pm6155-2-l16 { + regulator-name = "pm6155_2_l16"; + qcom,set = ; + regulator-min-microvolt = <1650000>; + regulator-max-microvolt = <2950000>; + qcom,init-voltage = <1650000>; + qcom,init-mode = ; + }; + }; + + rpmh-regulator-ldoc17 { + compatible = "qcom,rpmh-vrm-regulator"; + mboxes = <&apps_rsc 0>; + qcom,resource-name = "ldoc17"; + qcom,regulator-type = "pmic5-ldo"; + qcom,supported-modes = + ; + qcom,mode-threshold-currents = <0 1>; + L17C: pm6155_2_l17: regulator-pm6155-2-l17 { + regulator-name = "pm6155_2_l17"; + qcom,set = ; + regulator-min-microvolt = <1650000>; + regulator-max-microvolt = <2950000>; + qcom,init-voltage = <1650000>; + qcom,init-mode = ; + }; + }; + + rpmh-regulator-ldoc18 { + compatible = "qcom,rpmh-vrm-regulator"; + mboxes = <&apps_rsc 0>; + qcom,resource-name = "ldoc18"; + qcom,regulator-type = "pmic5-ldo"; + qcom,supported-modes = + ; + qcom,mode-threshold-currents = <0 1>; + L18C: pm6155_2_l18: regulator-pm6155-2-l18 { + regulator-name = "pm6155_2_l18"; + qcom,set = ; + regulator-min-microvolt = <1000000>; + regulator-max-microvolt = <1050000>; + qcom,init-voltage = <1000000>; + qcom,init-mode = ; + }; + }; +}; + diff --git a/arch/arm64/boot/dts/qcom/sa6155.dtsi b/arch/arm64/boot/dts/qcom/sa6155.dtsi index 0c7d23ff50473d986385d1ea8085177d97e74600..67e3d5e5f9965e1c24bc5054f60847eee673c402 100644 --- a/arch/arm64/boot/dts/qcom/sa6155.dtsi +++ b/arch/arm64/boot/dts/qcom/sa6155.dtsi @@ -11,6 +11,7 @@ */ #include "sm6150.dtsi" +#include "sa6155-pmic.dtsi" / { model = "Qualcomm Technologies, Inc. SA6155"; @@ -18,3 +19,20 @@ qcom,msm-name = "SA6155"; qcom,msm-id = <384 0x10000>; }; + +&qusb_phy0 { + vdd-supply = <&L5A>; + vdda18-supply = <&L12A>; + vdda33-supply = <&L13A>; +}; + +&usb_qmp_phy { + vdd-supply = <&L5A>; + core-supply = <&L12A>; +}; + +&qusb_phy1 { + vdd-supply = <&L5A>; + vdda18-supply = <&L12A>; + vdda33-supply = <&L13A>; +}; diff --git a/arch/arm64/boot/dts/qcom/sa6155p.dtsi b/arch/arm64/boot/dts/qcom/sa6155p.dtsi index 83c0783cd5caf0285bec9f3872b9bb8574241000..369144d9c23c17c900450d45f67c25639ee6bfc1 100644 --- a/arch/arm64/boot/dts/qcom/sa6155p.dtsi +++ b/arch/arm64/boot/dts/qcom/sa6155p.dtsi @@ -11,9 +11,49 @@ */ #include "sm6150.dtsi" +#include "sa6155-pmic.dtsi" / { model = "Qualcomm Technologies, Inc. SA6155P"; qcom,msm-name = "SA6155P"; qcom,msm-id = <377 0>; }; + +/* Delete second instance of pm6155 definitions for APQ version */ +&spmi_bus { + /delete-node/ qcom,pm6155@4; + /delete-node/ qcom,pm6155@5; +}; + +&soc { + /delete-node/ rpmh-regulator-modemlvl; + /delete-node/ rpmh-regulator-ldoc2; + /delete-node/ rpmh-regulator-ldoc3; + /delete-node/ rpmh-regulator-ldoc4; + /delete-node/ rpmh-regulator-ldoc13; + /delete-node/ rpmh-regulator-ldoc14; + /delete-node/ rpmh-regulator-ldoc16; + /delete-node/ rpmh-regulator-ldoc17; + /delete-node/ rpmh-regulator-ldoc18; +}; + +&pil_modem { + /delete-property/ vdd_mss-supply; +}; + +&qusb_phy0 { + vdd-supply = <&L5A>; + vdda18-supply = <&L12A>; + vdda33-supply = <&L13A>; +}; + +&usb_qmp_phy { + vdd-supply = <&L5A>; + core-supply = <&L12A>; +}; + +&qusb_phy1 { + vdd-supply = <&L5A>; + vdda18-supply = <&L12A>; + vdda33-supply = <&L13A>; +}; diff --git a/arch/arm64/boot/dts/qcom/sa8155-adp-star.dtsi b/arch/arm64/boot/dts/qcom/sa8155-adp-star.dtsi index 1547722722c51d6696b2280cd64c9ff39dc1943c..69b5b38fb564b233bb5d1e2e8536f8e672613831 100644 --- a/arch/arm64/boot/dts/qcom/sa8155-adp-star.dtsi +++ b/arch/arm64/boot/dts/qcom/sa8155-adp-star.dtsi @@ -24,7 +24,7 @@ interrupts = <38 0>; spi-max-frequency = <5000000>; qcom,clk-freq-mhz = <40000000>; - qcom,max-can-channels = <4>; + qcom,max-can-channels = <1>; qcom,bits-per-word = <8>; qcom,support-can-fd; }; diff --git a/arch/arm64/boot/dts/qcom/sa8155.dtsi b/arch/arm64/boot/dts/qcom/sa8155.dtsi index e232fdf7892b7c30b068088e7576462fa9d40160..a09f501e2c4e18806e397c8772d435b74469f61e 100644 --- a/arch/arm64/boot/dts/qcom/sa8155.dtsi +++ b/arch/arm64/boot/dts/qcom/sa8155.dtsi @@ -70,6 +70,10 @@ /* Add regulator nodes specific to SA8155 */ #include "sa8155-regulator.dtsi" +&slpi_tlmm { + status = "ok"; +}; + &cam_csiphy0 { mipi-csi-vdd-supply = <&pm8150_2_l8>; }; @@ -175,6 +179,11 @@ vdda-phy-supply = <&pm8150_2_l18>; }; +&clock_scc { + vdd_scc_cx-supply = <&VDD_CX_LEVEL>; + status = "ok"; +}; + &thermal_zones { aoss0-lowf { cooling-maps { @@ -602,8 +611,18 @@ qcom,panel-force-clock-lane-hs; }; +&mdss_dsi0_pll { + /delete-property/ qcom,dsi-pll-ssc-en; +}; + +&mdss_dsi1_pll { + /delete-property/ qcom,dsi-pll-ssc-en; +}; + &mdss_mdp { - connectors = <&dsi_dp1 &dsi_dp2 &sde_dp &sde_wb>; + connectors = <&sde_rscc &dsi_dp1 &dsi_dp2 &sde_dp &sde_wb>; + qcom,sde-mixer-display-pref = "primary", "none", "none", + "none", "none", "none"; }; #include diff --git a/arch/arm64/boot/dts/qcom/sdmmagpie-bus.dtsi b/arch/arm64/boot/dts/qcom/sdmmagpie-bus.dtsi new file mode 100644 index 0000000000000000000000000000000000000000..07ca398a4128d2debb0c7e45507b4d6b76424cb0 --- /dev/null +++ b/arch/arm64/boot/dts/qcom/sdmmagpie-bus.dtsi @@ -0,0 +1,1937 @@ +/* Copyright (c) 2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include + +&soc { + ad_hoc_bus: ad-hoc-bus { + compatible = "qcom,msm-bus-device"; + reg = <0x16E0000 0x11080>, + <0x1700000 0x1F080>, + <0x1500000 0x28000>, + <0x9160000 0x03200>, + <0x9680000 0x3E200>, + <0x1380000 0x40000>, + <0x1740000 0x1C100>, + <0x1620000 0x18080>, + <0x1620000 0x40000>, + <0x1620000 0x40000>, + <0x80A8000 0x01400>; + + reg-names = "aggre1_noc-base", "aggre2_noc-base", + "config_noc-base", "dc_noc-base", + "gem_noc-base", "mc_virt-base", + "mmss_noc-base", "system_noc-base", + "ipa_virt-base", "camnoc_virt-base", + "compute_noc-base"; + + mbox-names = "apps_rsc", "disp_rsc"; + mboxes = <&apps_rsc 0 &disp_rsc 0>; + + /*RSCs*/ + rsc_apps: rsc-apps { + cell-id = ; + label = "apps_rsc"; + qcom,rsc-dev; + qcom,req_state = <2>; + }; + + rsc_disp: rsc-disp { + cell-id = ; + label = "disp_rsc"; + qcom,rsc-dev; + qcom,req_state = <3>; + }; + + /*BCMs*/ + bcm_acv: bcm-acv { + cell-id = ; + label = "ACV"; + qcom,bcm-name = "ACV"; + qcom,rscs = <&rsc_apps>; + qcom,bcm-dev; + }; + + bcm_alc: bcm-alc { + cell-id = ; + label = "ALC"; + qcom,bcm-name = "ALC"; + qcom,rscs = <&rsc_apps>; + qcom,bcm-dev; + }; + + bcm_mc0: bcm-mc0 { + cell-id = ; + label = "MC0"; + qcom,bcm-name = "MC0"; + qcom,rscs = <&rsc_apps>; + qcom,bcm-dev; + }; + + bcm_sh0: bcm-sh0 { + cell-id = ; + label = "SH0"; + qcom,bcm-name = "SH0"; + qcom,rscs = <&rsc_apps>; + qcom,bcm-dev; + }; + + bcm_mm0: bcm-mm0 { + cell-id = ; + label = "MM0"; + qcom,bcm-name = "MM0"; + qcom,rscs = <&rsc_apps>; + qcom,bcm-dev; + }; + + bcm_mm1: bcm-mm1 { + cell-id = ; + label = "MM1"; + qcom,bcm-name = "MM1"; + qcom,rscs = <&rsc_apps>; + qcom,bcm-dev; + }; + + bcm_sh2: bcm-sh2 { + cell-id = ; + label = "SH2"; + qcom,bcm-name = "SH2"; + qcom,rscs = <&rsc_apps>; + qcom,bcm-dev; + }; + + bcm_mm2: bcm-mm2 { + cell-id = ; + label = "MM2"; + qcom,bcm-name = "MM2"; + qcom,rscs = <&rsc_apps>; + qcom,bcm-dev; + }; + + bcm_mm3: bcm-mm3 { + cell-id = ; + label = "MM3"; + qcom,bcm-name = "MM3"; + qcom,rscs = <&rsc_apps>; + qcom,bcm-dev; + }; + + bcm_sh5: bcm-sh5 { + cell-id = ; + label = "SH5"; + qcom,bcm-name = "SH5"; + qcom,rscs = <&rsc_apps>; + qcom,bcm-dev; + }; + + bcm_sn0: bcm-sn0 { + cell-id = ; + label = "SN0"; + qcom,bcm-name = "SN0"; + qcom,rscs = <&rsc_apps>; + qcom,bcm-dev; + }; + + bcm_ce0: bcm-ce0 { + cell-id = ; + label = "CE0"; + qcom,bcm-name = "CE0"; + qcom,rscs = <&rsc_apps>; + qcom,bcm-dev; + }; + + bcm_ip0: bcm-ip0 { + cell-id = ; + label = "IP0"; + qcom,bcm-name = "IP0"; + qcom,rscs = <&rsc_apps>; + qcom,bcm-dev; + }; + + bcm_cn0: bcm-cn0 { + cell-id = ; + label = "CN0"; + qcom,bcm-name = "CN0"; + qcom,rscs = <&rsc_apps>; + qcom,bcm-dev; + }; + + bcm_qup0: bcm-qup0 { + cell-id = ; + label = "QUP0"; + qcom,bcm-name = "QUP0"; + qcom,rscs = <&rsc_apps>; + qcom,bcm-dev; + }; + + bcm_sn1: bcm-sn1 { + cell-id = ; + label = "SN1"; + qcom,bcm-name = "SN1"; + qcom,rscs = <&rsc_apps>; + qcom,bcm-dev; + }; + + bcm_sn2: bcm-sn2 { + cell-id = ; + label = "SN2"; + qcom,bcm-name = "SN2"; + qcom,rscs = <&rsc_apps>; + qcom,bcm-dev; + }; + + bcm_sn4: bcm-sn4 { + cell-id = ; + label = "SN4"; + qcom,bcm-name = "SN4"; + qcom,rscs = <&rsc_apps>; + qcom,bcm-dev; + }; + + bcm_sn9: bcm-sn9 { + cell-id = ; + label = "SN9"; + qcom,bcm-name = "SN9"; + qcom,rscs = <&rsc_apps>; + qcom,bcm-dev; + }; + + bcm_sn11: bcm-sn11 { + cell-id = ; + label = "SN11"; + qcom,bcm-name = "SN11"; + qcom,rscs = <&rsc_apps>; + qcom,bcm-dev; + }; + + bcm_sn12: bcm-sn12 { + cell-id = ; + label = "SN12"; + qcom,bcm-name = "SN12"; + qcom,rscs = <&rsc_apps>; + qcom,bcm-dev; + }; + + bcm_sn14: bcm-sn14 { + cell-id = ; + label = "SN14"; + qcom,bcm-name = "SN14"; + qcom,rscs = <&rsc_apps>; + qcom,bcm-dev; + }; + + bcm_sn15: bcm-sn15 { + cell-id = ; + label = "SN15"; + qcom,bcm-name = "SN15"; + qcom,rscs = <&rsc_apps>; + qcom,bcm-dev; + }; + + bcm_acv_display: bcm-acv_display { + cell-id = ; + label = "ACV_DISPLAY"; + qcom,bcm-name = "ACV"; + qcom,rscs = <&rsc_disp>; + qcom,bcm-dev; + }; + + bcm_alc_display: bcm-alc_display { + cell-id = ; + label = "ALC_DISPLAY"; + qcom,bcm-name = "ALC"; + qcom,rscs = <&rsc_disp>; + qcom,bcm-dev; + }; + + bcm_mc0_display: bcm-mc0_display { + cell-id = ; + label = "MC0_DISPLAY"; + qcom,bcm-name = "MC0"; + qcom,rscs = <&rsc_disp>; + qcom,bcm-dev; + }; + + bcm_sh0_display: bcm-sh0_display { + cell-id = ; + label = "SH0_DISPLAY"; + qcom,bcm-name = "SH0"; + qcom,rscs = <&rsc_disp>; + qcom,bcm-dev; + }; + + bcm_mm0_display: bcm-mm0_display { + cell-id = ; + label = "MM0_DISPLAY"; + qcom,bcm-name = "MM0"; + qcom,rscs = <&rsc_disp>; + qcom,bcm-dev; + }; + + bcm_mm1_display: bcm-mm1_display { + cell-id = ; + label = "MM1_DISPLAY"; + qcom,bcm-name = "MM1"; + qcom,rscs = <&rsc_disp>; + qcom,bcm-dev; + }; + + bcm_mm2_display: bcm-mm2_display { + cell-id = ; + label = "MM2_DISPLAY"; + qcom,bcm-name = "MM2"; + qcom,rscs = <&rsc_disp>; + qcom,bcm-dev; + }; + + bcm_mm3_display: bcm-mm3_display { + cell-id = ; + label = "MM3_DISPLAY"; + qcom,bcm-name = "MM3"; + qcom,rscs = <&rsc_disp>; + qcom,bcm-dev; + }; + + /*Buses*/ + fab_aggre1_noc: fab-aggre1_noc{ + cell-id = ; + label = "fab-aggre1_noc"; + qcom,fab-dev; + qcom,base-name = "aggre1_noc-base"; + qcom,qos-off = <4096>; + qcom,base-offset = <16384>; + qcom,sbm-offset = <0>; + qcom,bypass-qos-prg; + qcom,bus-type = <1>; + clocks = <>; + }; + + fab_aggre2_noc: fab-aggre2_noc{ + cell-id = ; + label = "fab-aggre2_noc"; + qcom,fab-dev; + qcom,base-name = "aggre2_noc-base"; + qcom,qos-off = <4096>; + qcom,base-offset = <20480>; + qcom,sbm-offset = <0>; + qcom,bypass-qos-prg; + qcom,bus-type = <1>; + clocks = <>; + }; + + fab_camnoc_virt: fab-camnoc_virt{ + cell-id = ; + label = "fab-camnoc_virt"; + qcom,fab-dev; + qcom,base-name = "camnoc_virt-base"; + qcom,qos-off = <0>; + qcom,base-offset = <0>; + qcom,sbm-offset = <0>; + qcom,bypass-qos-prg; + clocks = <>; + }; + + fab_compute_noc: fab-compute_noc{ + cell-id = ; + label = "fab-compute_noc"; + qcom,fab-dev; + qcom,base-name = "compute_noc-base"; + qcom,qos-off = <12288>; + qcom,base-offset = <49152>; + qcom,sbm-offset = <0>; + qcom,bypass-qos-prg; + qcom,bus-type = <1>; + clocks = <>; + }; + + fab_config_noc: fab-config_noc{ + cell-id = ; + label = "fab-config_noc"; + qcom,fab-dev; + qcom,base-name = "config_noc-base"; + qcom,qos-off = <0>; + qcom,base-offset = <0>; + qcom,sbm-offset = <0>; + qcom,bypass-qos-prg; + qcom,bus-type = <1>; + clocks = <>; + }; + + fab_dc_noc: fab-dc_noc{ + cell-id = ; + label = "fab-dc_noc"; + qcom,fab-dev; + qcom,base-name = "dc_noc-base"; + qcom,qos-off = <0>; + qcom,base-offset = <0>; + qcom,sbm-offset = <0>; + qcom,bypass-qos-prg; + qcom,bus-type = <1>; + clocks = <>; + }; + + fab_gem_noc: fab-gem_noc{ + cell-id = ; + label = "fab-gem_noc"; + qcom,fab-dev; + qcom,base-name = "gem_noc-base"; + qcom,qos-off = <128>; + qcom,base-offset = <176128>; + qcom,sbm-offset = <0>; + qcom,bypass-qos-prg; + qcom,bus-type = <1>; + clocks = <>; + }; + + fab_ipa_virt: fab-ipa_virt{ + cell-id = ; + label = "fab-ipa_virt"; + qcom,fab-dev; + qcom,base-name = "ipa_virt-base"; + qcom,qos-off = <0>; + qcom,base-offset = <0>; + qcom,sbm-offset = <0>; + qcom,bypass-qos-prg; + clocks = <>; + }; + + fab_mc_virt: fab-mc_virt{ + cell-id = ; + label = "fab-mc_virt"; + qcom,fab-dev; + qcom,base-name = "mc_virt-base"; + qcom,qos-off = <0>; + qcom,base-offset = <0>; + qcom,sbm-offset = <0>; + qcom,bypass-qos-prg; + clocks = <>; + }; + + fab_mmss_noc: fab-mmss_noc{ + cell-id = ; + label = "fab-mmss_noc"; + qcom,fab-dev; + qcom,base-name = "mmss_noc-base"; + qcom,qos-off = <128>; + qcom,base-offset = <36864>; + qcom,sbm-offset = <0>; + qcom,bypass-qos-prg; + qcom,bus-type = <1>; + clocks = <>; + }; + + fab_system_noc: fab-system_noc{ + cell-id = ; + label = "fab-system_noc"; + qcom,fab-dev; + qcom,base-name = "system_noc-base"; + qcom,qos-off = <4096>; + qcom,base-offset = <40960>; + qcom,sbm-offset = <0>; + qcom,bypass-qos-prg; + qcom,bus-type = <1>; + clocks = <>; + }; + + fab_gem_noc_display: fab-gem_noc_display{ + cell-id = ; + label = "fab-gem_noc_display"; + qcom,fab-dev; + qcom,base-name = "gem_noc-base"; + qcom,qos-off = <128>; + qcom,base-offset = <176128>; + qcom,sbm-offset = <0>; + qcom,bypass-qos-prg; + qcom,bus-type = <1>; + clocks = <>; + }; + + fab_mc_virt_display: fab-mc_virt_display{ + cell-id = ; + label = "fab-mc_virt_display"; + qcom,fab-dev; + qcom,base-name = "mc_virt-base"; + qcom,qos-off = <0>; + qcom,base-offset = <0>; + qcom,sbm-offset = <0>; + qcom,bypass-qos-prg; + clocks = <>; + }; + + fab_mmss_noc_display: fab-mmss_noc_display{ + cell-id = ; + label = "fab-mmss_noc_display"; + qcom,fab-dev; + qcom,base-name = "mmss_noc-base"; + qcom,qos-off = <128>; + qcom,base-offset = <36864>; + qcom,sbm-offset = <0>; + qcom,bypass-qos-prg; + qcom,bus-type = <1>; + clocks = <>; + }; + + /*Masters*/ + mas_qhm_a1noc_cfg: mas-qhm-a1noc-cfg { + cell-id = ; + label = "mas-qhm-a1noc-cfg"; + qcom,buswidth = <4>; + qcom,agg-ports = <1>; + qcom,connections = <&slv_srvc_aggre1_noc>; + qcom,bus-dev = <&fab_aggre1_noc>; + }; + + mas_qhm_qup_center: mas-qhm-qup-center { + cell-id = ; + label = "mas-qhm-qup-center"; + qcom,buswidth = <4>; + qcom,agg-ports = <1>; + qcom,qport = <5>; + qcom,connections = <&slv_qns_a1noc_snoc>; + qcom,bus-dev = <&fab_aggre1_noc>; + qcom,bcms = <&bcm_qup0>; + qcom,ap-owned; + qcom,prio = <2>; + }; + + mas_qhm_tsif: mas-qhm-tsif { + cell-id = ; + label = "mas-qhm-tsif"; + qcom,buswidth = <4>; + qcom,agg-ports = <1>; + qcom,connections = <&slv_qns_a1noc_snoc>; + qcom,bus-dev = <&fab_aggre1_noc>; + qcom,bcms = <&bcm_cn0>; + }; + + mas_xm_emmc: mas-xm-emmc { + cell-id = ; + label = "mas-xm-emmc"; + qcom,buswidth = <8>; + qcom,agg-ports = <1>; + qcom,qport = <3>; + qcom,connections = <&slv_qns_a1noc_snoc>; + qcom,bus-dev = <&fab_aggre1_noc>; + qcom,bcms = <&bcm_cn0>; + qcom,ap-owned; + qcom,prio = <0>; + }; + + mas_xm_sdc2: mas-xm-sdc2 { + cell-id = ; + label = "mas-xm-sdc2"; + qcom,buswidth = <8>; + qcom,agg-ports = <1>; + qcom,qport = <1>; + qcom,connections = <&slv_qns_a1noc_snoc>; + qcom,bus-dev = <&fab_aggre1_noc>; + qcom,bcms = <&bcm_cn0>; + qcom,ap-owned; + qcom,prio = <1>; + }; + + mas_xm_sdc4: mas-xm-sdc4 { + cell-id = ; + label = "mas-xm-sdc4"; + qcom,buswidth = <8>; + qcom,agg-ports = <1>; + qcom,qport = <2>; + qcom,connections = <&slv_qns_a1noc_snoc>; + qcom,bus-dev = <&fab_aggre1_noc>; + qcom,bcms = <&bcm_cn0>; + qcom,ap-owned; + qcom,prio = <1>; + }; + + mas_xm_ufs_mem: mas-xm-ufs-mem { + cell-id = ; + label = "mas-xm-ufs-mem"; + qcom,buswidth = <8>; + qcom,agg-ports = <1>; + qcom,qport = <4>; + qcom,connections = <&slv_qns_a1noc_snoc>; + qcom,bus-dev = <&fab_aggre1_noc>; + qcom,ap-owned; + qcom,prio = <2>; + }; + + mas_qhm_a2noc_cfg: mas-qhm-a2noc-cfg { + cell-id = ; + label = "mas-qhm-a2noc-cfg"; + qcom,buswidth = <4>; + qcom,agg-ports = <1>; + qcom,connections = <&slv_srvc_aggre2_noc>; + qcom,bus-dev = <&fab_aggre2_noc>; + }; + + mas_qhm_qdss_bam: mas-qhm-qdss-bam { + cell-id = ; + label = "mas-qhm-qdss-bam"; + qcom,buswidth = <4>; + qcom,agg-ports = <1>; + qcom,qport = <8>; + qcom,connections = <&slv_qns_a2noc_snoc>; + qcom,bus-dev = <&fab_aggre2_noc>; + qcom,ap-owned; + qcom,prio = <2>; + }; + + mas_qhm_qup_north: mas-qhm-qup-north { + cell-id = ; + label = "mas-qhm-qup-north"; + qcom,buswidth = <4>; + qcom,agg-ports = <1>; + qcom,qport = <0>; + qcom,connections = <&slv_qns_a2noc_snoc>; + qcom,bus-dev = <&fab_aggre2_noc>; + qcom,bcms = <&bcm_qup0>; + qcom,ap-owned; + qcom,prio = <2>; + }; + + mas_qnm_cnoc: mas-qnm-cnoc { + cell-id = ; + label = "mas-qnm-cnoc"; + qcom,buswidth = <8>; + qcom,agg-ports = <1>; + qcom,qport = <3>; + qcom,connections = <&slv_qns_a2noc_snoc>; + qcom,bus-dev = <&fab_aggre2_noc>; + qcom,ap-owned; + qcom,prio = <1>; + }; + + mas_qxm_crypto: mas-qxm-crypto { + cell-id = ; + label = "mas-qxm-crypto"; + qcom,buswidth = <8>; + qcom,agg-ports = <1>; + qcom,qport = <1>; + qcom,connections = <&slv_qns_a2noc_snoc>; + qcom,bus-dev = <&fab_aggre2_noc>; + qcom,bcms = <&bcm_ce0>; + qcom,ap-owned; + qcom,prio = <2>; + }; + + mas_qxm_ipa: mas-qxm-ipa { + cell-id = ; + label = "mas-qxm-ipa"; + qcom,buswidth = <8>; + qcom,agg-ports = <1>; + qcom,qport = <2>; + qcom,connections = <&slv_qns_a2noc_snoc>; + qcom,bus-dev = <&fab_aggre2_noc>; + qcom,ap-owned; + qcom,prio = <2>; + }; + + mas_xm_pcie3_0: mas-xm-pcie3-0 { + cell-id = ; + label = "mas-xm-pcie3-0"; + qcom,buswidth = <8>; + qcom,agg-ports = <1>; + qcom,qport = <11>; + qcom,connections = <&slv_qns_pcie_gemnoc>; + qcom,bus-dev = <&fab_aggre2_noc>; + qcom,ap-owned; + qcom,prio = <0>; + }; + + mas_xm_qdss_etr: mas-xm-qdss-etr { + cell-id = ; + label = "mas-xm-qdss-etr"; + qcom,buswidth = <8>; + qcom,agg-ports = <1>; + qcom,qport = <9>; + qcom,connections = <&slv_qns_a2noc_snoc>; + qcom,bus-dev = <&fab_aggre2_noc>; + qcom,ap-owned; + qcom,prio = <2>; + }; + + mas_xm_usb3_0: mas-xm-usb3-0 { + cell-id = ; + label = "mas-xm-usb3-0"; + qcom,buswidth = <8>; + qcom,agg-ports = <1>; + qcom,qport = <12>; + qcom,connections = <&slv_qns_a2noc_snoc>; + qcom,bus-dev = <&fab_aggre2_noc>; + qcom,ap-owned; + qcom,prio = <2>; + }; + + mas_qxm_camnoc_hf0_uncomp: mas-qxm-camnoc-hf0-uncomp { + cell-id = ; + label = "mas-qxm-camnoc-hf0-uncomp"; + qcom,buswidth = <32>; + qcom,agg-ports = <1>; + qcom,connections = <&slv_qns_camnoc_uncomp>; + qcom,bus-dev = <&fab_camnoc_virt>; + qcom,bcms = <&bcm_mm1>; + }; + + mas_qxm_camnoc_hf1_uncomp: mas-qxm-camnoc-hf1-uncomp { + cell-id = ; + label = "mas-qxm-camnoc-hf1-uncomp"; + qcom,buswidth = <32>; + qcom,agg-ports = <1>; + qcom,connections = <&slv_qns_camnoc_uncomp>; + qcom,bus-dev = <&fab_camnoc_virt>; + qcom,bcms = <&bcm_mm1>; + }; + + mas_qxm_camnoc_sf_uncomp: mas-qxm-camnoc-sf-uncomp { + cell-id = ; + label = "mas-qxm-camnoc-sf-uncomp"; + qcom,buswidth = <32>; + qcom,agg-ports = <1>; + qcom,connections = <&slv_qns_camnoc_uncomp>; + qcom,bus-dev = <&fab_camnoc_virt>; + qcom,bcms = <&bcm_mm1>; + }; + + mas_qnm_npu: mas-qnm-npu { + cell-id = ; + label = "mas-qnm-npu"; + qcom,buswidth = <32>; + qcom,agg-ports = <1>; + qcom,qport = <1>; + qcom,connections = <&slv_qns_cdsp_gemnoc>; + qcom,bus-dev = <&fab_compute_noc>; + qcom,ap-owned; + qcom,prio = <0>; + }; + + mas_qhm_spdm: mas-qhm-spdm { + cell-id = ; + label = "mas-qhm-spdm"; + qcom,buswidth = <4>; + qcom,agg-ports = <1>; + qcom,connections = <&slv_qns_cnoc_a2noc>; + qcom,bus-dev = <&fab_config_noc>; + qcom,bcms = <&bcm_cn0>; + }; + + mas_qnm_snoc: mas-qnm-snoc { + cell-id = ; + label = "mas-qnm-snoc"; + qcom,buswidth = <8>; + qcom,agg-ports = <1>; + qcom,connections = <&slv_qhs_tlmm_south + &slv_qhs_camera_cfg &slv_qhs_sdc4 + &slv_qhs_sdc2 &slv_qhs_mnoc_cfg + &slv_qhs_ufs_mem_cfg &slv_qhs_qupv3_center + &slv_qhs_glm &slv_qhs_pdm + &slv_qhs_camera_nrt_throttle_cfg + &slv_qhs_a2_noc_cfg &slv_qhs_qdss_cfg + &slv_qhs_camera_rt_throttle_cfg + &slv_qhs_display_cfg &slv_qhs_pcie_cfg + &slv_qhs_display_throttle_cfg &slv_qhs_tcsr + &slv_qhs_venus_cvp_throttle_cfg + &slv_qhs_ddrss_cfg &slv_qhs_ahb2phy_north + &slv_qhs_snoc_cfg &slv_qhs_gpuss_cfg + &slv_qhs_venus_cfg &slv_qhs_tsif + &slv_qhs_compute_dsp_cfg &slv_qhs_clk_ctl + &slv_qhs_aop &slv_qhs_qupv3_north + &slv_qhs_ahb2phy_south &slv_srvc_cnoc + &slv_qhs_ahb2phy_west &slv_qhs_usb3_0 + &slv_qhs_venus_throttle_cfg &slv_qhs_ipa + &slv_qhs_cpr_cx &slv_qhs_tlmm_west + &slv_qhs_a1_noc_cfg &slv_qhs_aoss + &slv_qhs_prng &slv_qhs_vsense_ctrl_cfg + &slv_qhs_emmc_cfg &slv_qhs_spdm + &slv_qhs_crypto0_cfg &slv_qhs_pimem_cfg + &slv_qhs_tlmm_north &slv_qhs_cpr_mx + &slv_qhs_imem_cfg>; + qcom,bus-dev = <&fab_config_noc>; + qcom,bcms = <&bcm_cn0>; + }; + + mas_xm_qdss_dap: mas-xm-qdss-dap { + cell-id = ; + label = "mas-xm-qdss-dap"; + qcom,buswidth = <8>; + qcom,agg-ports = <1>; + qcom,connections = <&slv_qhs_tlmm_south + &slv_qhs_camera_cfg &slv_qhs_sdc4 + &slv_qhs_sdc2 &slv_qhs_mnoc_cfg + &slv_qhs_ufs_mem_cfg &slv_qhs_qupv3_center + &slv_qhs_glm &slv_qhs_pdm + &slv_qhs_camera_nrt_throttle_cfg + &slv_qhs_a2_noc_cfg &slv_qhs_qdss_cfg + &slv_qhs_camera_rt_throttle_cfg + &slv_qhs_display_cfg &slv_qhs_pcie_cfg + &slv_qhs_display_throttle_cfg &slv_qhs_tcsr + &slv_qhs_venus_cvp_throttle_cfg + &slv_qhs_ddrss_cfg &slv_qns_cnoc_a2noc + &slv_qhs_ahb2phy_north &slv_qhs_snoc_cfg + &slv_qhs_gpuss_cfg &slv_qhs_venus_cfg + &slv_qhs_tsif &slv_qhs_compute_dsp_cfg + &slv_qhs_clk_ctl &slv_qhs_aop + &slv_qhs_qupv3_north &slv_qhs_ahb2phy_south + &slv_srvc_cnoc &slv_qhs_ahb2phy_west + &slv_qhs_usb3_0 &slv_qhs_venus_throttle_cfg + &slv_qhs_ipa &slv_qhs_cpr_cx + &slv_qhs_tlmm_west &slv_qhs_a1_noc_cfg + &slv_qhs_aoss &slv_qhs_prng + &slv_qhs_vsense_ctrl_cfg &slv_qhs_emmc_cfg + &slv_qhs_spdm &slv_qhs_crypto0_cfg + &slv_qhs_pimem_cfg &slv_qhs_tlmm_north + &slv_qhs_cpr_mx &slv_qhs_imem_cfg>; + qcom,bus-dev = <&fab_config_noc>; + }; + + mas_qhm_cnoc_dc_noc: mas-qhm-cnoc-dc-noc { + cell-id = ; + label = "mas-qhm-cnoc-dc-noc"; + qcom,buswidth = <4>; + qcom,agg-ports = <1>; + qcom,connections = <&slv_qhs_llcc &slv_qhs_gemnoc>; + qcom,bus-dev = <&fab_dc_noc>; + }; + + mas_acm_apps: mas-acm-apps { + cell-id = ; + label = "mas-acm-apps"; + qcom,buswidth = <16>; + qcom,agg-ports = <1>; + qcom,connections = <&slv_qns_llcc + &slv_qns_gem_noc_snoc>; + qcom,bus-dev = <&fab_gem_noc>; + qcom,bcms = <&bcm_sh5>; + }; + + mas_qhm_gemnoc_cfg: mas-qhm-gemnoc-cfg { + cell-id = ; + label = "mas-qhm-gemnoc-cfg"; + qcom,buswidth = <4>; + qcom,agg-ports = <1>; + qcom,connections = <&slv_srvc_gemnoc + &slv_qhs_mdsp_ms_mpu_cfg>; + qcom,bus-dev = <&fab_gem_noc>; + }; + + mas_qnm_cmpnoc: mas-qnm-cmpnoc { + cell-id = ; + label = "mas-qnm-cmpnoc"; + qcom,buswidth = <32>; + qcom,agg-ports = <1>; + qcom,qport = <64>; + qcom,connections = <&slv_qns_llcc + &slv_qns_gem_noc_snoc>; + qcom,bus-dev = <&fab_gem_noc>; + qcom,ap-owned; + qcom,prio = <0>; + }; + + mas_qnm_mnoc_hf: mas-qnm-mnoc-hf { + cell-id = ; + label = "mas-qnm-mnoc-hf"; + qcom,buswidth = <32>; + qcom,agg-ports = <2>; + qcom,qport = <128 129>; + qcom,connections = <&slv_qns_llcc>; + qcom,bus-dev = <&fab_gem_noc>; + qcom,ap-owned; + qcom,prio = <0>; + }; + + mas_qnm_mnoc_sf: mas-qnm-mnoc-sf { + cell-id = ; + label = "mas-qnm-mnoc-sf"; + qcom,buswidth = <32>; + qcom,agg-ports = <1>; + qcom,qport = <320>; + qcom,connections = <&slv_qns_llcc + &slv_qns_gem_noc_snoc>; + qcom,bus-dev = <&fab_gem_noc>; + qcom,ap-owned; + qcom,prio = <0>; + }; + + mas_qnm_pcie: mas-qnm-pcie { + cell-id = ; + label = "mas-qnm-pcie"; + qcom,buswidth = <8>; + qcom,agg-ports = <1>; + qcom,qport = <224>; + qcom,connections = <&slv_qns_llcc + &slv_qns_gem_noc_snoc>; + qcom,bus-dev = <&fab_gem_noc>; + qcom,ap-owned; + qcom,prio = <0>; + }; + + mas_qnm_snoc_gc: mas-qnm-snoc-gc { + cell-id = ; + label = "mas-qnm-snoc-gc"; + qcom,buswidth = <8>; + qcom,agg-ports = <1>; + qcom,qport = <192>; + qcom,connections = <&slv_qns_llcc>; + qcom,bus-dev = <&fab_gem_noc>; + qcom,ap-owned; + qcom,prio = <0>; + }; + + mas_qnm_snoc_sf: mas-qnm-snoc-sf { + cell-id = ; + label = "mas-qnm-snoc-sf"; + qcom,buswidth = <16>; + qcom,agg-ports = <1>; + qcom,qport = <160>; + qcom,connections = <&slv_qns_llcc>; + qcom,bus-dev = <&fab_gem_noc>; + qcom,ap-owned; + qcom,prio = <0>; + }; + + mas_qxm_gpu: mas-qxm-gpu { + cell-id = ; + label = "mas-qxm-gpu"; + qcom,buswidth = <32>; + qcom,agg-ports = <2>; + qcom,qport = <288 289>; + qcom,connections = <&slv_qns_llcc + &slv_qns_gem_noc_snoc>; + qcom,bus-dev = <&fab_gem_noc>; + qcom,ap-owned; + qcom,prio = <0>; + }; + + mas_ipa_core_master: mas-ipa-core-master { + cell-id = ; + label = "mas-ipa-core-master"; + qcom,buswidth = <8>; + qcom,agg-ports = <1>; + qcom,connections = <&slv_ipa_core_slave>; + qcom,bus-dev = <&fab_ipa_virt>; + }; + + mas_llcc_mc: mas-llcc-mc { + cell-id = ; + label = "mas-llcc-mc"; + qcom,buswidth = <4>; + qcom,agg-ports = <2>; + qcom,connections = <&slv_ebi>; + qcom,bus-dev = <&fab_mc_virt>; + }; + + mas_qhm_mnoc_cfg: mas-qhm-mnoc-cfg { + cell-id = ; + label = "mas-qhm-mnoc-cfg"; + qcom,buswidth = <4>; + qcom,agg-ports = <1>; + qcom,connections = <&slv_srvc_mnoc>; + qcom,bus-dev = <&fab_mmss_noc>; + }; + + mas_qxm_camnoc_hf: mas-qxm-camnoc-hf { + cell-id = ; + label = "mas-qxm-camnoc-hf"; + qcom,buswidth = <32>; + qcom,agg-ports = <2>; + qcom,qport = <32 64>; + qcom,connections = <&slv_qns_mem_noc_hf>; + qcom,bus-dev = <&fab_mmss_noc>; + qcom,bcms = <&bcm_mm1>; + qcom,ap-owned; + qcom,prio = <0>; + }; + + mas_qxm_camnoc_nrt: mas-qxm-camnoc-nrt { + cell-id = ; + label = "mas-qxm-camnoc-nrt"; + qcom,buswidth = <8>; + qcom,agg-ports = <1>; + qcom,qport = <258>; + qcom,connections = <&slv_qns2_mem_noc>; + qcom,bus-dev = <&fab_mmss_noc>; + qcom,bcms = <&bcm_mm2>; + qcom,ap-owned; + qcom,prio = <0>; + }; + + mas_qxm_camnoc_rt: mas-qxm-camnoc-rt { + cell-id = ; + label = "mas-qxm-camnoc-rt"; + qcom,buswidth = <32>; + qcom,agg-ports = <1>; + qcom,qport = <257>; + qcom,connections = <&slv_qns_mem_noc_hf>; + qcom,bus-dev = <&fab_mmss_noc>; + qcom,bcms = <&bcm_mm1>; + qcom,ap-owned; + qcom,prio = <0>; + }; + + mas_qxm_camnoc_sf: mas-qxm-camnoc-sf { + cell-id = ; + label = "mas-qxm-camnoc-sf"; + qcom,buswidth = <32>; + qcom,agg-ports = <1>; + qcom,qport = <0>; + qcom,connections = <&slv_qns2_mem_noc>; + qcom,bus-dev = <&fab_mmss_noc>; + qcom,bcms = <&bcm_mm3>; + qcom,ap-owned; + qcom,prio = <0>; + }; + + mas_qxm_mdp0: mas-qxm-mdp0 { + cell-id = ; + label = "mas-qxm-mdp0"; + qcom,buswidth = <32>; + qcom,agg-ports = <1>; + qcom,qport = <96>; + qcom,connections = <&slv_qns_mem_noc_hf>; + qcom,bus-dev = <&fab_mmss_noc>; + qcom,bcms = <&bcm_mm1>; + qcom,ap-owned; + qcom,prio = <0>; + }; + + mas_qxm_mdp1: mas-qxm-mdp1 { + cell-id = ; + label = "mas-qxm-mdp1"; + qcom,buswidth = <32>; + qcom,agg-ports = <1>; + qcom,qport = <128>; + qcom,connections = <&slv_qns_mem_noc_hf>; + qcom,bus-dev = <&fab_mmss_noc>; + qcom,bcms = <&bcm_mm1>; + qcom,ap-owned; + qcom,prio = <0>; + }; + + mas_qxm_rot: mas-qxm-rot { + cell-id = ; + label = "mas-qxm-rot"; + qcom,buswidth = <32>; + qcom,agg-ports = <1>; + qcom,qport = <160>; + qcom,connections = <&slv_qns2_mem_noc>; + qcom,bus-dev = <&fab_mmss_noc>; + qcom,bcms = <&bcm_mm3>; + qcom,ap-owned; + qcom,prio = <0>; + }; + + mas_qxm_venus0: mas-qxm-venus0 { + cell-id = ; + label = "mas-qxm-venus0"; + qcom,buswidth = <32>; + qcom,agg-ports = <1>; + qcom,qport = <192>; + qcom,connections = <&slv_qns2_mem_noc>; + qcom,bus-dev = <&fab_mmss_noc>; + qcom,bcms = <&bcm_mm3>; + qcom,ap-owned; + qcom,prio = <0>; + }; + + mas_qxm_venus1: mas-qxm-venus1 { + cell-id = ; + label = "mas-qxm-venus1"; + qcom,buswidth = <32>; + qcom,agg-ports = <1>; + qcom,qport = <224>; + qcom,connections = <&slv_qns2_mem_noc>; + qcom,bus-dev = <&fab_mmss_noc>; + qcom,bcms = <&bcm_mm3>; + qcom,ap-owned; + qcom,prio = <0>; + }; + + mas_qxm_venus_arm9: mas-qxm-venus-arm9 { + cell-id = ; + label = "mas-qxm-venus-arm9"; + qcom,buswidth = <8>; + qcom,agg-ports = <1>; + qcom,qport = <256>; + qcom,connections = <&slv_qns2_mem_noc>; + qcom,bus-dev = <&fab_mmss_noc>; + qcom,bcms = <&bcm_mm3>; + qcom,ap-owned; + qcom,prio = <0>; + }; + + mas_qhm_snoc_cfg: mas-qhm-snoc-cfg { + cell-id = ; + label = "mas-qhm-snoc-cfg"; + qcom,buswidth = <4>; + qcom,agg-ports = <1>; + qcom,connections = <&slv_srvc_snoc>; + qcom,bus-dev = <&fab_system_noc>; + }; + + mas_qnm_aggre1_noc: mas-qnm-aggre1-noc { + cell-id = ; + label = "mas-qnm-aggre1-noc"; + qcom,buswidth = <16>; + qcom,agg-ports = <1>; + qcom,connections = <&slv_qns_gemnoc_sf + &slv_qxs_pimem &slv_qxs_imem + &slv_qhs_apss &slv_qns_cnoc + &slv_xs_qdss_stm>; + qcom,bus-dev = <&fab_system_noc>; + qcom,bcms = <&bcm_sn9>; + }; + + mas_qnm_aggre2_noc: mas-qnm-aggre2-noc { + cell-id = ; + label = "mas-qnm-aggre2-noc"; + qcom,buswidth = <16>; + qcom,agg-ports = <1>; + qcom,connections = <&slv_qns_gemnoc_sf + &slv_qxs_pimem &slv_qxs_imem + &slv_qhs_apss &slv_qns_cnoc + &slv_xs_sys_tcu_cfg &slv_xs_qdss_stm>; + qcom,bus-dev = <&fab_system_noc>; + qcom,bcms = <&bcm_sn11>; + }; + + mas_qnm_gemnoc: mas-qnm-gemnoc { + cell-id = ; + label = "mas-qnm-gemnoc"; + qcom,buswidth = <8>; + qcom,agg-ports = <1>; + qcom,connections = <&slv_qxs_pimem &slv_qxs_imem + &slv_qhs_apss &slv_qns_cnoc + &slv_xs_sys_tcu_cfg &slv_xs_qdss_stm>; + qcom,bus-dev = <&fab_system_noc>; + qcom,bcms = <&bcm_sn15>; + }; + + mas_qxm_pimem: mas-qxm-pimem { + cell-id = ; + label = "mas-qxm-pimem"; + qcom,buswidth = <8>; + qcom,agg-ports = <1>; + qcom,qport = <3>; + qcom,connections = <&slv_qns_gemnoc_gc &slv_qxs_imem>; + qcom,bus-dev = <&fab_system_noc>; + qcom,bcms = <&bcm_sn12>; + qcom,ap-owned; + qcom,prio = <2>; + }; + + mas_xm_gic: mas-xm-gic { + cell-id = ; + label = "mas-xm-gic"; + qcom,buswidth = <8>; + qcom,agg-ports = <1>; + qcom,qport = <0>; + qcom,connections = <&slv_qns_gemnoc_gc &slv_qxs_imem>; + qcom,bus-dev = <&fab_system_noc>; + qcom,bcms = <&bcm_sn12>; + qcom,ap-owned; + qcom,prio = <1>; + }; + + mas_alc: mas-alc { + cell-id = ; + label = "mas-alc"; + qcom,buswidth = <1>; + qcom,agg-ports = <1>; + qcom,bus-dev = <&fab_mc_virt>; + qcom,bcms = <&bcm_alc>; + }; + + mas_qnm_mnoc_hf_display: mas-qnm-mnoc-hf_display { + cell-id = ; + label = "mas-qnm-mnoc-hf_display"; + qcom,buswidth = <32>; + qcom,agg-ports = <2>; + qcom,qport = <128 129>; + qcom,connections = <&slv_qns_llcc_display>; + qcom,bus-dev = <&fab_gem_noc_display>; + }; + + mas_qnm_mnoc_sf_display: mas-qnm-mnoc-sf_display { + cell-id = ; + label = "mas-qnm-mnoc-sf_display"; + qcom,buswidth = <32>; + qcom,agg-ports = <1>; + qcom,qport = <320>; + qcom,connections = <&slv_qns_llcc_display>; + qcom,bus-dev = <&fab_gem_noc_display>; + }; + + mas_llcc_mc_display: mas-llcc-mc_display { + cell-id = ; + label = "mas-llcc-mc_display"; + qcom,buswidth = <4>; + qcom,agg-ports = <2>; + qcom,connections = <&slv_ebi_display>; + qcom,bus-dev = <&fab_mc_virt_display>; + }; + + mas_qxm_mdp0_display: mas-qxm-mdp0_display { + cell-id = ; + label = "mas-qxm-mdp0_display"; + qcom,buswidth = <32>; + qcom,agg-ports = <1>; + qcom,qport = <96>; + qcom,connections = <&slv_qns_mem_noc_hf_display>; + qcom,bus-dev = <&fab_mmss_noc_display>; + qcom,bcms = <&bcm_mm1_display>; + }; + + mas_qxm_mdp1_display: mas-qxm-mdp1_display { + cell-id = ; + label = "mas-qxm-mdp1_display"; + qcom,buswidth = <32>; + qcom,agg-ports = <1>; + qcom,qport = <128>; + qcom,connections = <&slv_qns_mem_noc_hf_display>; + qcom,bus-dev = <&fab_mmss_noc_display>; + qcom,bcms = <&bcm_mm1_display>; + }; + + mas_qxm_rot_display: mas-qxm-rot_display { + cell-id = ; + label = "mas-qxm-rot_display"; + qcom,buswidth = <32>; + qcom,agg-ports = <1>; + qcom,qport = <160>; + qcom,connections = <&slv_qns2_mem_noc_display>; + qcom,bus-dev = <&fab_mmss_noc_display>; + qcom,bcms = <&bcm_mm3_display>; + }; + + /*Slaves*/ + slv_qns_a1noc_snoc:slv-qns-a1noc-snoc { + cell-id = ; + label = "slv-qns-a1noc-snoc"; + qcom,buswidth = <16>; + qcom,agg-ports = <1>; + qcom,bus-dev = <&fab_aggre1_noc>; + qcom,connections = <&mas_qnm_aggre1_noc>; + qcom,bcms = <&bcm_sn9>; + }; + + slv_srvc_aggre1_noc:slv-srvc-aggre1-noc { + cell-id = ; + label = "slv-srvc-aggre1-noc"; + qcom,buswidth = <4>; + qcom,agg-ports = <1>; + qcom,bus-dev = <&fab_aggre1_noc>; + }; + + slv_qns_a2noc_snoc:slv-qns-a2noc-snoc { + cell-id = ; + label = "slv-qns-a2noc-snoc"; + qcom,buswidth = <16>; + qcom,agg-ports = <1>; + qcom,bus-dev = <&fab_aggre2_noc>; + qcom,connections = <&mas_qnm_aggre2_noc>; + qcom,bcms = <&bcm_sn11>; + }; + + slv_qns_pcie_gemnoc:slv-qns-pcie-gemnoc { + cell-id = ; + label = "slv-qns-pcie-gemnoc"; + qcom,buswidth = <8>; + qcom,agg-ports = <1>; + qcom,bus-dev = <&fab_aggre2_noc>; + qcom,connections = <&mas_qnm_pcie>; + qcom,bcms = <&bcm_sn14>; + }; + + slv_srvc_aggre2_noc:slv-srvc-aggre2-noc { + cell-id = ; + label = "slv-srvc-aggre2-noc"; + qcom,buswidth = <4>; + qcom,agg-ports = <1>; + qcom,bus-dev = <&fab_aggre2_noc>; + }; + + slv_qns_camnoc_uncomp:slv-qns-camnoc-uncomp { + cell-id = ; + label = "slv-qns-camnoc-uncomp"; + qcom,buswidth = <32>; + qcom,agg-ports = <1>; + qcom,bus-dev = <&fab_camnoc_virt>; + }; + + slv_qns_cdsp_gemnoc:slv-qns-cdsp-gemnoc { + cell-id = ; + label = "slv-qns-cdsp-gemnoc"; + qcom,buswidth = <32>; + qcom,agg-ports = <1>; + qcom,bus-dev = <&fab_compute_noc>; + qcom,connections = <&mas_qnm_cmpnoc>; + }; + + slv_qhs_a1_noc_cfg:slv-qhs-a1-noc-cfg { + cell-id = ; + label = "slv-qhs-a1-noc-cfg"; + qcom,buswidth = <4>; + qcom,agg-ports = <1>; + qcom,bus-dev = <&fab_config_noc>; + qcom,connections = <&mas_qhm_a1noc_cfg>; + qcom,bcms = <&bcm_cn0>; + }; + + slv_qhs_a2_noc_cfg:slv-qhs-a2-noc-cfg { + cell-id = ; + label = "slv-qhs-a2-noc-cfg"; + qcom,buswidth = <4>; + qcom,agg-ports = <1>; + qcom,bus-dev = <&fab_config_noc>; + qcom,connections = <&mas_qhm_a2noc_cfg>; + qcom,bcms = <&bcm_cn0>; + }; + + slv_qhs_ahb2phy_north:slv-qhs-ahb2phy-north { + cell-id = ; + label = "slv-qhs-ahb2phy-north"; + qcom,buswidth = <4>; + qcom,agg-ports = <1>; + qcom,bus-dev = <&fab_config_noc>; + qcom,bcms = <&bcm_cn0>; + }; + + slv_qhs_ahb2phy_south:slv-qhs-ahb2phy-south { + cell-id = ; + label = "slv-qhs-ahb2phy-south"; + qcom,buswidth = <4>; + qcom,agg-ports = <1>; + qcom,bus-dev = <&fab_config_noc>; + qcom,bcms = <&bcm_cn0>; + }; + + slv_qhs_ahb2phy_west:slv-qhs-ahb2phy-west { + cell-id = ; + label = "slv-qhs-ahb2phy-west"; + qcom,buswidth = <4>; + qcom,agg-ports = <1>; + qcom,bus-dev = <&fab_config_noc>; + qcom,bcms = <&bcm_cn0>; + }; + + slv_qhs_aop:slv-qhs-aop { + cell-id = ; + label = "slv-qhs-aop"; + qcom,buswidth = <4>; + qcom,agg-ports = <1>; + qcom,bus-dev = <&fab_config_noc>; + qcom,bcms = <&bcm_cn0>; + }; + + slv_qhs_aoss:slv-qhs-aoss { + cell-id = ; + label = "slv-qhs-aoss"; + qcom,buswidth = <4>; + qcom,agg-ports = <1>; + qcom,bus-dev = <&fab_config_noc>; + qcom,bcms = <&bcm_cn0>; + }; + + slv_qhs_camera_cfg:slv-qhs-camera-cfg { + cell-id = ; + label = "slv-qhs-camera-cfg"; + qcom,buswidth = <4>; + qcom,agg-ports = <1>; + qcom,bus-dev = <&fab_config_noc>; + qcom,bcms = <&bcm_cn0>; + qcom,disable-ports = <70 71>; + }; + + slv_qhs_camera_nrt_throttle_cfg:slv-qhs-camera-nrt-thrott-cfg { + cell-id = ; + label = "slv-qhs-camera-nrt-throttle-cfg"; + qcom,buswidth = <4>; + qcom,agg-ports = <1>; + qcom,bus-dev = <&fab_config_noc>; + qcom,bcms = <&bcm_cn0>; + }; + + slv_qhs_camera_rt_throttle_cfg:slv-qhs-camera-rt-throttle-cfg { + cell-id = ; + label = "slv-qhs-camera-rt-throttle-cfg"; + qcom,buswidth = <4>; + qcom,agg-ports = <1>; + qcom,bus-dev = <&fab_config_noc>; + qcom,bcms = <&bcm_cn0>; + }; + + slv_qhs_clk_ctl:slv-qhs-clk-ctl { + cell-id = ; + label = "slv-qhs-clk-ctl"; + qcom,buswidth = <4>; + qcom,agg-ports = <1>; + qcom,bus-dev = <&fab_config_noc>; + qcom,bcms = <&bcm_cn0>; + }; + + slv_qhs_compute_dsp_cfg:slv-qhs-compute-dsp-cfg { + cell-id = ; + label = "slv-qhs-compute-dsp-cfg"; + qcom,buswidth = <4>; + qcom,agg-ports = <1>; + qcom,bus-dev = <&fab_config_noc>; + qcom,bcms = <&bcm_cn0>; + }; + + slv_qhs_cpr_cx:slv-qhs-cpr-cx { + cell-id = ; + label = "slv-qhs-cpr-cx"; + qcom,buswidth = <4>; + qcom,agg-ports = <1>; + qcom,bus-dev = <&fab_config_noc>; + qcom,bcms = <&bcm_cn0>; + }; + + slv_qhs_cpr_mx:slv-qhs-cpr-mx { + cell-id = ; + label = "slv-qhs-cpr-mx"; + qcom,buswidth = <4>; + qcom,agg-ports = <1>; + qcom,bus-dev = <&fab_config_noc>; + qcom,bcms = <&bcm_cn0>; + }; + + slv_qhs_crypto0_cfg:slv-qhs-crypto0-cfg { + cell-id = ; + label = "slv-qhs-crypto0-cfg"; + qcom,buswidth = <4>; + qcom,agg-ports = <1>; + qcom,bus-dev = <&fab_config_noc>; + qcom,bcms = <&bcm_cn0>; + }; + + slv_qhs_ddrss_cfg:slv-qhs-ddrss-cfg { + cell-id = ; + label = "slv-qhs-ddrss-cfg"; + qcom,buswidth = <4>; + qcom,agg-ports = <1>; + qcom,bus-dev = <&fab_config_noc>; + qcom,connections = <&mas_qhm_cnoc_dc_noc>; + qcom,bcms = <&bcm_cn0>; + }; + + slv_qhs_display_cfg:slv-qhs-display-cfg { + cell-id = ; + label = "slv-qhs-display-cfg"; + qcom,buswidth = <4>; + qcom,agg-ports = <1>; + qcom,bus-dev = <&fab_config_noc>; + qcom,bcms = <&bcm_cn0>; + qcom,disable-ports = <72 73>; + }; + + slv_qhs_display_throttle_cfg:slv-qhs-display-throttle-cfg { + cell-id = ; + label = "slv-qhs-display-throttle-cfg"; + qcom,buswidth = <4>; + qcom,agg-ports = <1>; + qcom,bus-dev = <&fab_config_noc>; + qcom,bcms = <&bcm_cn0>; + }; + + slv_qhs_emmc_cfg:slv-qhs-emmc-cfg { + cell-id = ; + label = "slv-qhs-emmc-cfg"; + qcom,buswidth = <4>; + qcom,agg-ports = <1>; + qcom,bus-dev = <&fab_config_noc>; + qcom,bcms = <&bcm_cn0>; + }; + + slv_qhs_glm:slv-qhs-glm { + cell-id = ; + label = "slv-qhs-glm"; + qcom,buswidth = <4>; + qcom,agg-ports = <1>; + qcom,bus-dev = <&fab_config_noc>; + qcom,bcms = <&bcm_cn0>; + }; + + slv_qhs_gpuss_cfg:slv-qhs-gpuss-cfg { + cell-id = ; + label = "slv-qhs-gpuss-cfg"; + qcom,buswidth = <8>; + qcom,agg-ports = <1>; + qcom,bus-dev = <&fab_config_noc>; + qcom,bcms = <&bcm_cn0>; + }; + + slv_qhs_imem_cfg:slv-qhs-imem-cfg { + cell-id = ; + label = "slv-qhs-imem-cfg"; + qcom,buswidth = <4>; + qcom,agg-ports = <1>; + qcom,bus-dev = <&fab_config_noc>; + qcom,bcms = <&bcm_cn0>; + }; + + slv_qhs_ipa:slv-qhs-ipa { + cell-id = ; + label = "slv-qhs-ipa"; + qcom,buswidth = <4>; + qcom,agg-ports = <1>; + qcom,bus-dev = <&fab_config_noc>; + qcom,bcms = <&bcm_cn0>; + }; + + slv_qhs_mnoc_cfg:slv-qhs-mnoc-cfg { + cell-id = ; + label = "slv-qhs-mnoc-cfg"; + qcom,buswidth = <4>; + qcom,agg-ports = <1>; + qcom,bus-dev = <&fab_config_noc>; + qcom,connections = <&mas_qhm_mnoc_cfg>; + qcom,bcms = <&bcm_cn0>; + }; + + slv_qhs_pcie_cfg:slv-qhs-pcie-cfg { + cell-id = ; + label = "slv-qhs-pcie-cfg"; + qcom,buswidth = <4>; + qcom,agg-ports = <1>; + qcom,bus-dev = <&fab_config_noc>; + qcom,bcms = <&bcm_cn0>; + }; + + slv_qhs_pdm:slv-qhs-pdm { + cell-id = ; + label = "slv-qhs-pdm"; + qcom,buswidth = <4>; + qcom,agg-ports = <1>; + qcom,bus-dev = <&fab_config_noc>; + qcom,bcms = <&bcm_cn0>; + }; + + slv_qhs_pimem_cfg:slv-qhs-pimem-cfg { + cell-id = ; + label = "slv-qhs-pimem-cfg"; + qcom,buswidth = <4>; + qcom,agg-ports = <1>; + qcom,bus-dev = <&fab_config_noc>; + qcom,bcms = <&bcm_cn0>; + }; + + slv_qhs_prng:slv-qhs-prng { + cell-id = ; + label = "slv-qhs-prng"; + qcom,buswidth = <4>; + qcom,agg-ports = <1>; + qcom,bus-dev = <&fab_config_noc>; + qcom,bcms = <&bcm_cn0>; + }; + + slv_qhs_qdss_cfg:slv-qhs-qdss-cfg { + cell-id = ; + label = "slv-qhs-qdss-cfg"; + qcom,buswidth = <4>; + qcom,agg-ports = <1>; + qcom,bus-dev = <&fab_config_noc>; + qcom,bcms = <&bcm_cn0>; + }; + + slv_qhs_qupv3_center:slv-qhs-qupv3-center { + cell-id = ; + label = "slv-qhs-qupv3-center"; + qcom,buswidth = <4>; + qcom,agg-ports = <1>; + qcom,bus-dev = <&fab_config_noc>; + qcom,bcms = <&bcm_cn0>; + }; + + slv_qhs_qupv3_north:slv-qhs-qupv3-north { + cell-id = ; + label = "slv-qhs-qupv3-north"; + qcom,buswidth = <4>; + qcom,agg-ports = <1>; + qcom,bus-dev = <&fab_config_noc>; + qcom,bcms = <&bcm_cn0>; + }; + + slv_qhs_sdc2:slv-qhs-sdc2 { + cell-id = ; + label = "slv-qhs-sdc2"; + qcom,buswidth = <4>; + qcom,agg-ports = <1>; + qcom,bus-dev = <&fab_config_noc>; + qcom,bcms = <&bcm_cn0>; + }; + + slv_qhs_sdc4:slv-qhs-sdc4 { + cell-id = ; + label = "slv-qhs-sdc4"; + qcom,buswidth = <4>; + qcom,agg-ports = <1>; + qcom,bus-dev = <&fab_config_noc>; + qcom,bcms = <&bcm_cn0>; + }; + + slv_qhs_snoc_cfg:slv-qhs-snoc-cfg { + cell-id = ; + label = "slv-qhs-snoc-cfg"; + qcom,buswidth = <4>; + qcom,agg-ports = <1>; + qcom,bus-dev = <&fab_config_noc>; + qcom,connections = <&mas_qhm_snoc_cfg>; + qcom,bcms = <&bcm_cn0>; + }; + + slv_qhs_spdm:slv-qhs-spdm { + cell-id = ; + label = "slv-qhs-spdm"; + qcom,buswidth = <4>; + qcom,agg-ports = <1>; + qcom,bus-dev = <&fab_config_noc>; + qcom,bcms = <&bcm_cn0>; + }; + + slv_qhs_tcsr:slv-qhs-tcsr { + cell-id = ; + label = "slv-qhs-tcsr"; + qcom,buswidth = <4>; + qcom,agg-ports = <1>; + qcom,bus-dev = <&fab_config_noc>; + qcom,bcms = <&bcm_cn0>; + }; + + slv_qhs_tlmm_north:slv-qhs-tlmm-north { + cell-id = ; + label = "slv-qhs-tlmm-north"; + qcom,buswidth = <4>; + qcom,agg-ports = <1>; + qcom,bus-dev = <&fab_config_noc>; + qcom,bcms = <&bcm_cn0>; + }; + + slv_qhs_tlmm_south:slv-qhs-tlmm-south { + cell-id = ; + label = "slv-qhs-tlmm-south"; + qcom,buswidth = <4>; + qcom,agg-ports = <1>; + qcom,bus-dev = <&fab_config_noc>; + qcom,bcms = <&bcm_cn0>; + }; + + slv_qhs_tlmm_west:slv-qhs-tlmm-west { + cell-id = ; + label = "slv-qhs-tlmm-west"; + qcom,buswidth = <4>; + qcom,agg-ports = <1>; + qcom,bus-dev = <&fab_config_noc>; + qcom,bcms = <&bcm_cn0>; + }; + + slv_qhs_tsif:slv-qhs-tsif { + cell-id = ; + label = "slv-qhs-tsif"; + qcom,buswidth = <4>; + qcom,agg-ports = <1>; + qcom,bus-dev = <&fab_config_noc>; + qcom,bcms = <&bcm_cn0>; + }; + + slv_qhs_ufs_mem_cfg:slv-qhs-ufs-mem-cfg { + cell-id = ; + label = "slv-qhs-ufs-mem-cfg"; + qcom,buswidth = <4>; + qcom,agg-ports = <1>; + qcom,bus-dev = <&fab_config_noc>; + qcom,bcms = <&bcm_cn0>; + }; + + slv_qhs_usb3_0:slv-qhs-usb3-0 { + cell-id = ; + label = "slv-qhs-usb3-0"; + qcom,buswidth = <4>; + qcom,agg-ports = <1>; + qcom,bus-dev = <&fab_config_noc>; + qcom,bcms = <&bcm_cn0>; + }; + + slv_qhs_venus_cfg:slv-qhs-venus-cfg { + cell-id = ; + label = "slv-qhs-venus-cfg"; + qcom,buswidth = <4>; + qcom,agg-ports = <1>; + qcom,bus-dev = <&fab_config_noc>; + qcom,bcms = <&bcm_cn0>; + qcom,disable-ports = <75 76>; + }; + + slv_qhs_venus_cvp_throttle_cfg:slv-qhs-venus-cvp-throttle-cfg { + cell-id = ; + label = "slv-qhs-venus-cvp-throttle-cfg"; + qcom,buswidth = <4>; + qcom,agg-ports = <1>; + qcom,bus-dev = <&fab_config_noc>; + qcom,bcms = <&bcm_cn0>; + }; + + slv_qhs_venus_throttle_cfg:slv-qhs-venus-throttle-cfg { + cell-id = ; + label = "slv-qhs-venus-throttle-cfg"; + qcom,buswidth = <4>; + qcom,agg-ports = <1>; + qcom,bus-dev = <&fab_config_noc>; + qcom,bcms = <&bcm_cn0>; + }; + + slv_qhs_vsense_ctrl_cfg:slv-qhs-vsense-ctrl-cfg { + cell-id = ; + label = "slv-qhs-vsense-ctrl-cfg"; + qcom,buswidth = <4>; + qcom,agg-ports = <1>; + qcom,bus-dev = <&fab_config_noc>; + qcom,bcms = <&bcm_cn0>; + }; + + slv_qns_cnoc_a2noc:slv-qns-cnoc-a2noc { + cell-id = ; + label = "slv-qns-cnoc-a2noc"; + qcom,buswidth = <8>; + qcom,agg-ports = <1>; + qcom,bus-dev = <&fab_config_noc>; + qcom,connections = <&mas_qnm_cnoc>; + qcom,bcms = <&bcm_cn0>; + }; + + slv_srvc_cnoc:slv-srvc-cnoc { + cell-id = ; + label = "slv-srvc-cnoc"; + qcom,buswidth = <4>; + qcom,agg-ports = <1>; + qcom,bus-dev = <&fab_config_noc>; + qcom,bcms = <&bcm_cn0>; + }; + + slv_qhs_gemnoc:slv-qhs-gemnoc { + cell-id = ; + label = "slv-qhs-gemnoc"; + qcom,buswidth = <4>; + qcom,agg-ports = <1>; + qcom,bus-dev = <&fab_dc_noc>; + qcom,connections = <&mas_qhm_gemnoc_cfg>; + }; + + slv_qhs_llcc:slv-qhs-llcc { + cell-id = ; + label = "slv-qhs-llcc"; + qcom,buswidth = <4>; + qcom,agg-ports = <1>; + qcom,bus-dev = <&fab_dc_noc>; + }; + + slv_qhs_mdsp_ms_mpu_cfg:slv-qhs-mdsp-ms-mpu-cfg { + cell-id = ; + label = "slv-qhs-mdsp-ms-mpu-cfg"; + qcom,buswidth = <4>; + qcom,agg-ports = <1>; + qcom,bus-dev = <&fab_gem_noc>; + }; + + slv_qns_gem_noc_snoc:slv-qns-gem-noc-snoc { + cell-id = ; + label = "slv-qns-gem-noc-snoc"; + qcom,buswidth = <8>; + qcom,agg-ports = <1>; + qcom,bus-dev = <&fab_gem_noc>; + qcom,connections = <&mas_qnm_gemnoc>; + qcom,bcms = <&bcm_sh2>; + }; + + slv_qns_llcc:slv-qns-llcc { + cell-id = ; + label = "slv-qns-llcc"; + qcom,buswidth = <16>; + qcom,agg-ports = <2>; + qcom,bus-dev = <&fab_gem_noc>; + qcom,connections = <&mas_llcc_mc>; + qcom,bcms = <&bcm_sh0>; + }; + + slv_srvc_gemnoc:slv-srvc-gemnoc { + cell-id = ; + label = "slv-srvc-gemnoc"; + qcom,buswidth = <4>; + qcom,agg-ports = <1>; + qcom,bus-dev = <&fab_gem_noc>; + }; + + slv_ipa_core_slave:slv-ipa-core-slave { + cell-id = ; + label = "slv-ipa-core-slave"; + qcom,buswidth = <8>; + qcom,agg-ports = <1>; + qcom,bus-dev = <&fab_ipa_virt>; + qcom,bcms = <&bcm_ip0>; + }; + + slv_ebi:slv-ebi { + cell-id = ; + label = "slv-ebi"; + qcom,buswidth = <4>; + qcom,agg-ports = <2>; + qcom,bus-dev = <&fab_mc_virt>; + qcom,bcms = <&bcm_mc0>, <&bcm_acv>; + }; + + slv_qns2_mem_noc:slv-qns2-mem-noc { + cell-id = ; + label = "slv-qns2-mem-noc"; + qcom,buswidth = <32>; + qcom,agg-ports = <1>; + qcom,bus-dev = <&fab_mmss_noc>; + qcom,connections = <&mas_qnm_mnoc_sf>; + qcom,bcms = <&bcm_mm2>; + }; + + slv_qns_mem_noc_hf:slv-qns-mem-noc-hf { + cell-id = ; + label = "slv-qns-mem-noc-hf"; + qcom,buswidth = <32>; + qcom,agg-ports = <2>; + qcom,bus-dev = <&fab_mmss_noc>; + qcom,connections = <&mas_qnm_mnoc_hf>; + qcom,bcms = <&bcm_mm0>; + }; + + slv_srvc_mnoc:slv-srvc-mnoc { + cell-id = ; + label = "slv-srvc-mnoc"; + qcom,buswidth = <4>; + qcom,agg-ports = <1>; + qcom,bus-dev = <&fab_mmss_noc>; + }; + + slv_qhs_apss:slv-qhs-apss { + cell-id = ; + label = "slv-qhs-apss"; + qcom,buswidth = <8>; + qcom,agg-ports = <1>; + qcom,bus-dev = <&fab_system_noc>; + }; + + slv_qns_cnoc:slv-qns-cnoc { + cell-id = ; + label = "slv-qns-cnoc"; + qcom,buswidth = <8>; + qcom,agg-ports = <1>; + qcom,bus-dev = <&fab_system_noc>; + qcom,connections = <&mas_qnm_snoc>; + }; + + slv_qns_gemnoc_gc:slv-qns-gemnoc-gc { + cell-id = ; + label = "slv-qns-gemnoc-gc"; + qcom,buswidth = <8>; + qcom,agg-ports = <1>; + qcom,bus-dev = <&fab_system_noc>; + qcom,connections = <&mas_qnm_snoc_gc>; + qcom,bcms = <&bcm_sn2>; + }; + + slv_qns_gemnoc_sf:slv-qns-gemnoc-sf { + cell-id = ; + label = "slv-qns-gemnoc-sf"; + qcom,buswidth = <16>; + qcom,agg-ports = <1>; + qcom,bus-dev = <&fab_system_noc>; + qcom,connections = <&mas_qnm_snoc_sf>; + qcom,bcms = <&bcm_sn0>; + }; + + slv_qxs_imem:slv-qxs-imem { + cell-id = ; + label = "slv-qxs-imem"; + qcom,buswidth = <8>; + qcom,agg-ports = <1>; + qcom,bus-dev = <&fab_system_noc>; + qcom,bcms = <&bcm_sn1>; + }; + + slv_qxs_pimem:slv-qxs-pimem { + cell-id = ; + label = "slv-qxs-pimem"; + qcom,buswidth = <8>; + qcom,agg-ports = <1>; + qcom,bus-dev = <&fab_system_noc>; + qcom,bcms = <&bcm_sn4>; + }; + + slv_srvc_snoc:slv-srvc-snoc { + cell-id = ; + label = "slv-srvc-snoc"; + qcom,buswidth = <4>; + qcom,agg-ports = <1>; + qcom,bus-dev = <&fab_system_noc>; + }; + + slv_xs_qdss_stm:slv-xs-qdss-stm { + cell-id = ; + label = "slv-xs-qdss-stm"; + qcom,buswidth = <4>; + qcom,agg-ports = <1>; + qcom,bus-dev = <&fab_system_noc>; + }; + + slv_xs_sys_tcu_cfg:slv-xs-sys-tcu-cfg { + cell-id = ; + label = "slv-xs-sys-tcu-cfg"; + qcom,buswidth = <8>; + qcom,agg-ports = <1>; + qcom,bus-dev = <&fab_system_noc>; + }; + + slv_qns_llcc_display:slv-qns-llcc_display { + cell-id = ; + label = "slv-qns-llcc_display"; + qcom,buswidth = <16>; + qcom,agg-ports = <2>; + qcom,bus-dev = <&fab_gem_noc_display>; + qcom,connections = <&mas_llcc_mc_display>; + qcom,bcms = <&bcm_sh0_display>; + }; + + slv_ebi_display:slv-ebi_display { + cell-id = ; + label = "slv-ebi_display"; + qcom,buswidth = <4>; + qcom,agg-ports = <2>; + qcom,bus-dev = <&fab_mc_virt_display>; + qcom,bcms = <&bcm_mc0_display>, <&bcm_acv_display>; + }; + + slv_qns2_mem_noc_display:slv-qns2-mem-noc_display { + cell-id = ; + label = "slv-qns2-mem-noc_display"; + qcom,buswidth = <32>; + qcom,agg-ports = <1>; + qcom,bus-dev = <&fab_mmss_noc_display>; + qcom,connections = <&mas_qnm_mnoc_sf_display>; + qcom,bcms = <&bcm_mm2_display>; + }; + + slv_qns_mem_noc_hf_display:slv-qns-mem-noc-hf_display { + cell-id = ; + label = "slv-qns-mem-noc-hf_display"; + qcom,buswidth = <32>; + qcom,agg-ports = <2>; + qcom,bus-dev = <&fab_mmss_noc_display>; + qcom,connections = <&mas_qnm_mnoc_hf_display>; + qcom,bcms = <&bcm_mm0_display>; + }; + }; +}; diff --git a/arch/arm64/boot/dts/qcom/sdmmagpie-gdsc.dtsi b/arch/arm64/boot/dts/qcom/sdmmagpie-gdsc.dtsi index 6c4228397337cad666f7b576085bf9aed21b7b3b..81b3e58ee8f1b6ede68657d63deca9e27a598e88 100644 --- a/arch/arm64/boot/dts/qcom/sdmmagpie-gdsc.dtsi +++ b/arch/arm64/boot/dts/qcom/sdmmagpie-gdsc.dtsi @@ -110,7 +110,7 @@ /* GDSCs in Camera CC */ bps_gdsc: qcom,gdsc@ad07004 { - compatible = "regulator-fixed"; + compatible = "qcom,gdsc"; regulator-name = "bps_gdsc"; reg = <0xad07004 0x4>; qcom,poll-cfg-gdscr; @@ -118,7 +118,7 @@ }; ife_0_gdsc: qcom,gdsc@ad0a004 { - compatible = "regulator-fixed"; + compatible = "qcom,gdsc"; regulator-name = "ife_0_gdsc"; reg = <0xad0a004 0x4>; qcom,poll-cfg-gdscr; @@ -126,7 +126,7 @@ }; ife_1_gdsc: qcom,gdsc@ad0b004 { - compatible = "regulator-fixed"; + compatible = "qcom,gdsc"; regulator-name = "ife_1_gdsc"; reg = <0xad0b004 0x4>; qcom,poll-cfg-gdscr; @@ -134,7 +134,7 @@ }; ipe_0_gdsc: qcom,gdsc@ad08004 { - compatible = "regulator-fixed"; + compatible = "qcom,gdsc"; regulator-name = "ipe_0_gdsc"; reg = <0xad08004 0x4>; qcom,poll-cfg-gdscr; @@ -142,7 +142,7 @@ }; ipe_1_gdsc: qcom,gdsc@ad09004 { - compatible = "regulator-fixed"; + compatible = "qcom,gdsc"; regulator-name = "ipe_1_gdsc"; reg = <0xad09004 0x4>; qcom,poll-cfg-gdscr; @@ -150,7 +150,7 @@ }; titan_top_gdsc: qcom,gdsc@ad0c1c4 { - compatible = "regulator-fixed"; + compatible = "qcom,gdsc"; regulator-name = "titan_top_gdsc"; reg = <0xad0c1c4 0x4>; qcom,poll-cfg-gdscr; @@ -159,7 +159,7 @@ /* GDSCs in Display CC */ mdss_core_gdsc: qcom,gdsc@0f03000 { - compatible = "regulator-fixed"; + compatible = "qcom,gdsc"; regulator-name = "mdss_core_gdsc"; reg = <0xaf03000 0x4>; qcom,poll-cfg-gdscr; diff --git a/arch/arm64/boot/dts/qcom/sdmmagpie-idp.dtsi b/arch/arm64/boot/dts/qcom/sdmmagpie-idp.dtsi index f02366838bfee13d77f06e17b0ea9f0f52b66eb3..3367bd3c8dad25d3c5c3e11ca466781b1a1f96aa 100644 --- a/arch/arm64/boot/dts/qcom/sdmmagpie-idp.dtsi +++ b/arch/arm64/boot/dts/qcom/sdmmagpie-idp.dtsi @@ -10,9 +10,73 @@ * GNU General Public License for more details. */ +#include + &soc { }; &qupv3_se8_2uart { status = "ok"; }; + +&ufsphy_mem { + compatible = "qcom,ufs-phy-qmp-v3"; + + vdda-phy-supply = <&pm6150_l4>; /* 0.88v */ + vdda-pll-supply = <&pm6150l_l3>; /* 1.2v */ + vdda-phy-max-microamp = <62900>; + vdda-pll-max-microamp = <18300>; + + status = "ok"; +}; + +&ufshc_mem { + vdd-hba-supply = <&ufs_phy_gdsc>; + vdd-hba-fixed-regulator; + vcc-supply = <&pm6150_l19>; + vcc-voltage-level = <2950000 2960000>; + vccq2-supply = <&pm6150_l12>; + vcc-max-microamp = <600000>; + vccq2-max-microamp = <600000>; + + qcom,vddp-ref-clk-supply = <&pm6150l_l3>; + qcom,vddp-ref-clk-max-microamp = <100>; + + status = "ok"; +}; + +&sdhc_1 { + vdd-supply = <&pm6150_l19>; + qcom,vdd-voltage-level = <2950000 2950000>; + qcom,vdd-current-level = <0 570000>; + + vdd-io-supply = <&pm6150_l12>; + qcom,vdd-io-always-on; + qcom,vdd-io-lpm-sup; + qcom,vdd-io-voltage-level = <1800000 1800000>; + qcom,vdd-io-current-level = <0 325000>; + + pinctrl-names = "active", "sleep"; + pinctrl-0 = <&sdc1_clk_on &sdc1_cmd_on &sdc1_data_on &sdc1_rclk_on>; + pinctrl-1 = <&sdc1_clk_off &sdc1_cmd_off &sdc1_data_off &sdc1_rclk_off>; + + status = "ok"; +}; + +&sdhc_2 { + vdd-supply = <&pm6150l_l9>; + qcom,vdd-voltage-level = <2950000 2950000>; + qcom,vdd-current-level = <0 800000>; + + vdd-io-supply = <&pm6150l_l6>; + qcom,vdd-io-voltage-level = <1800000 2950000>; + qcom,vdd-io-current-level = <0 22000>; + + pinctrl-names = "active", "sleep"; + pinctrl-0 = <&sdc2_clk_on &sdc2_cmd_on &sdc2_data_on &sdc2_cd_on>; + pinctrl-1 = <&sdc2_clk_off &sdc2_cmd_off &sdc2_data_off &sdc2_cd_off>; + + cd-gpios = <&tlmm 69 GPIO_ACTIVE_HIGH>; + + status = "ok"; +}; diff --git a/arch/arm64/boot/dts/qcom/sdmmagpie-pinctrl.dtsi b/arch/arm64/boot/dts/qcom/sdmmagpie-pinctrl.dtsi index dd5a6096e01a17d18d20996e4c43a668c003f5d5..9aaf3f5a673041f70f962cbff5e922f19ebf75a0 100644 --- a/arch/arm64/boot/dts/qcom/sdmmagpie-pinctrl.dtsi +++ b/arch/arm64/boot/dts/qcom/sdmmagpie-pinctrl.dtsi @@ -20,6 +20,52 @@ interrupt-controller; #interrupt-cells = <2>; + ufs_dev_reset_assert: ufs_dev_reset_assert { + config { + pins = "ufs_reset"; + bias-pull-down; /* default: pull down */ + /* + * UFS_RESET driver strengths are having + * different values/steps compared to typical + * GPIO drive strengths. + * + * Following table clarifies: + * + * HDRV value | UFS_RESET | Typical GPIO + * (dec) | (mA) | (mA) + * 0 | 0.8 | 2 + * 1 | 1.55 | 4 + * 2 | 2.35 | 6 + * 3 | 3.1 | 8 + * 4 | 3.9 | 10 + * 5 | 4.65 | 12 + * 6 | 5.4 | 14 + * 7 | 6.15 | 16 + * + * POR value for UFS_RESET HDRV is 3 which means + * 3.1mA and we want to use that. Hence just + * specify 8mA to "drive-strength" binding and + * that should result into writing 3 to HDRV + * field. + */ + drive-strength = <8>; /* default: 3.1 mA */ + output-low; /* active low reset */ + }; + }; + + ufs_dev_reset_deassert: ufs_dev_reset_deassert { + config { + pins = "ufs_reset"; + bias-pull-down; /* default: pull down */ + /* + * default: 3.1 mA + * check comments under ufs_dev_reset_assert + */ + drive-strength = <8>; + output-high; /* active low reset */ + }; + }; + /* QUPv3 South SE mappings */ /* SE 0 pin mappings */ qupv3_se0_i2c_pins: qupv3_se0_i2c_pins { @@ -931,5 +977,31 @@ drive-strength = <2>; /* 2 MA */ }; }; + + sdc2_cd_on: cd_on { + mux { + pins = "gpio69"; + function = "gpio"; + }; + + config { + pins = "gpio69"; + drive-strength = <2>; + bias-pull-up; + }; + }; + + sdc2_cd_off: cd_off { + mux { + pins = "gpio69"; + function = "gpio"; + }; + + config { + pins = "gpio69"; + drive-strength = <2>; + bias-disable; + }; + }; }; }; diff --git a/arch/arm64/boot/dts/qcom/sdmmagpie-pm.dtsi b/arch/arm64/boot/dts/qcom/sdmmagpie-pm.dtsi index dda1f4ec0f0e992fe7e77a19d6674e9eedac130d..ef2190a63c7ca86f96de34508e28d50b3436b598 100644 --- a/arch/arm64/boot/dts/qcom/sdmmagpie-pm.dtsi +++ b/arch/arm64/boot/dts/qcom/sdmmagpie-pm.dtsi @@ -28,20 +28,18 @@ reg = <0>; label = "l3-wfi"; qcom,psci-mode = <0x1>; - qcom,latency-us = <600>; - qcom,ss-power = <420>; - qcom,energy-overhead = <4254140>; - qcom,time-overhead = <1260>; + qcom,entry-latency-us = <660>; + qcom,exit-latency-us = <600>; + qcom,min-residency-us = <1260>; }; qcom,pm-cluster-level@1 { /* D4 */ reg = <1>; label = "l3-pc"; qcom,psci-mode = <0x4>; - qcom,latency-us = <3048>; - qcom,ss-power = <329>; - qcom,energy-overhead = <6189829>; - qcom,time-overhead = <5800>; + qcom,entry-latency-us = <2752>; + qcom,exit-latency-us = <3048>; + qcom,min-residency-us = <6118>; qcom,min-child-idx = <2>; qcom,is-reset; }; @@ -50,10 +48,9 @@ reg = <2>; label = "cx-off"; qcom,psci-mode = <0x224>; - qcom,latency-us = <4562>; - qcom,ss-power = <290>; - qcom,energy-overhead = <6989829>; - qcom,time-overhead = <8200>; + qcom,entry-latency-us = <3638>; + qcom,exit-latency-us = <4562>; + qcom,min-residency-us = <8467>; qcom,min-child-idx = <2>; qcom,is-reset; qcom,notify-rpm; @@ -63,10 +60,9 @@ reg = <3>; label = "llcc-off"; qcom,psci-mode = <0xC24>; - qcom,latency-us = <6562>; - qcom,ss-power = <165>; - qcom,energy-overhead = <7000029>; - qcom,time-overhead = <9825>; + qcom,entry-latency-us = <3263>; + qcom,exit-latency-us = <6562>; + qcom,min-residency-us = <9826>; qcom,min-child-idx = <2>; qcom,is-reset; qcom,notify-rpm; @@ -84,20 +80,18 @@ reg = <0>; label = "wfi"; qcom,psci-cpu-mode = <0x1>; - qcom,latency-us = <60>; - qcom,ss-power = <383>; - qcom,energy-overhead = <64140>; - qcom,time-overhead = <121>; + qcom,entry-latency-us = <61>; + qcom,exit-latency-us = <60>; + qcom,min-residency-us = <121>; }; qcom,pm-cpu-level@1 { /* C3 */ reg = <1>; label = "pc"; qcom,psci-cpu-mode = <0x3>; - qcom,latency-us = <901>; - qcom,ss-power = <364>; - qcom,energy-overhead = <579285>; - qcom,time-overhead = <1450>; + qcom,entry-latency-us = <549>; + qcom,exit-latency-us = <901>; + qcom,min-residency-us = <1774>; qcom,is-reset; qcom,use-broadcast-timer; }; @@ -106,10 +100,9 @@ reg = <2>; label = "rail-pc"; qcom,psci-cpu-mode = <0x4>; - qcom,latency-us = <915>; - qcom,ss-power = <353>; - qcom,energy-overhead = <666292>; - qcom,time-overhead = <1617>; + qcom,entry-latency-us = <702>; + qcom,exit-latency-us = <915>; + qcom,min-residency-us = <4001>; qcom,is-reset; qcom,use-broadcast-timer; }; @@ -126,20 +119,18 @@ reg = <0>; label = "wfi"; qcom,psci-cpu-mode = <0x1>; - qcom,latency-us = <66>; - qcom,ss-power = <427>; - qcom,energy-overhead = <68410>; - qcom,time-overhead = <121>; + qcom,entry-latency-us = <55>; + qcom,exit-latency-us = <66>; + qcom,min-residency-us = <121>; }; qcom,pm-cpu-level@1 { /* C3 */ reg = <1>; label = "pc"; qcom,psci-cpu-mode = <0x3>; - qcom,latency-us = <1244>; - qcom,ss-power = <373>; - qcom,energy-overhead = <795006>; - qcom,time-overhead = <1767>; + qcom,entry-latency-us = <523>; + qcom,exit-latency-us = <1244>; + qcom,min-residency-us = <2207>; qcom,is-reset; qcom,use-broadcast-timer; }; @@ -148,10 +139,9 @@ reg = <2>; label = "rail-pc"; qcom,psci-cpu-mode = <0x4>; - qcom,latency-us = <1854>; - qcom,ss-power = <359>; - qcom,energy-overhead = <1068095>; - qcom,time-overhead = <2380>; + qcom,entry-latency-us = <526>; + qcom,exit-latency-us = <1854>; + qcom,min-residency-us = <5555>; qcom,is-reset; qcom,use-broadcast-timer; }; diff --git a/arch/arm64/boot/dts/qcom/sdmmagpie-qrd.dtsi b/arch/arm64/boot/dts/qcom/sdmmagpie-qrd.dtsi index f02366838bfee13d77f06e17b0ea9f0f52b66eb3..3367bd3c8dad25d3c5c3e11ca466781b1a1f96aa 100644 --- a/arch/arm64/boot/dts/qcom/sdmmagpie-qrd.dtsi +++ b/arch/arm64/boot/dts/qcom/sdmmagpie-qrd.dtsi @@ -10,9 +10,73 @@ * GNU General Public License for more details. */ +#include + &soc { }; &qupv3_se8_2uart { status = "ok"; }; + +&ufsphy_mem { + compatible = "qcom,ufs-phy-qmp-v3"; + + vdda-phy-supply = <&pm6150_l4>; /* 0.88v */ + vdda-pll-supply = <&pm6150l_l3>; /* 1.2v */ + vdda-phy-max-microamp = <62900>; + vdda-pll-max-microamp = <18300>; + + status = "ok"; +}; + +&ufshc_mem { + vdd-hba-supply = <&ufs_phy_gdsc>; + vdd-hba-fixed-regulator; + vcc-supply = <&pm6150_l19>; + vcc-voltage-level = <2950000 2960000>; + vccq2-supply = <&pm6150_l12>; + vcc-max-microamp = <600000>; + vccq2-max-microamp = <600000>; + + qcom,vddp-ref-clk-supply = <&pm6150l_l3>; + qcom,vddp-ref-clk-max-microamp = <100>; + + status = "ok"; +}; + +&sdhc_1 { + vdd-supply = <&pm6150_l19>; + qcom,vdd-voltage-level = <2950000 2950000>; + qcom,vdd-current-level = <0 570000>; + + vdd-io-supply = <&pm6150_l12>; + qcom,vdd-io-always-on; + qcom,vdd-io-lpm-sup; + qcom,vdd-io-voltage-level = <1800000 1800000>; + qcom,vdd-io-current-level = <0 325000>; + + pinctrl-names = "active", "sleep"; + pinctrl-0 = <&sdc1_clk_on &sdc1_cmd_on &sdc1_data_on &sdc1_rclk_on>; + pinctrl-1 = <&sdc1_clk_off &sdc1_cmd_off &sdc1_data_off &sdc1_rclk_off>; + + status = "ok"; +}; + +&sdhc_2 { + vdd-supply = <&pm6150l_l9>; + qcom,vdd-voltage-level = <2950000 2950000>; + qcom,vdd-current-level = <0 800000>; + + vdd-io-supply = <&pm6150l_l6>; + qcom,vdd-io-voltage-level = <1800000 2950000>; + qcom,vdd-io-current-level = <0 22000>; + + pinctrl-names = "active", "sleep"; + pinctrl-0 = <&sdc2_clk_on &sdc2_cmd_on &sdc2_data_on &sdc2_cd_on>; + pinctrl-1 = <&sdc2_clk_off &sdc2_cmd_off &sdc2_data_off &sdc2_cd_off>; + + cd-gpios = <&tlmm 69 GPIO_ACTIVE_HIGH>; + + status = "ok"; +}; diff --git a/arch/arm64/boot/dts/qcom/sdmmagpie-qupv3.dtsi b/arch/arm64/boot/dts/qcom/sdmmagpie-qupv3.dtsi index 70e03825fc965172cb38d83ccbc9fc1697883681..b6987a2fa25611db851189277969373c33853d15 100644 --- a/arch/arm64/boot/dts/qcom/sdmmagpie-qupv3.dtsi +++ b/arch/arm64/boot/dts/qcom/sdmmagpie-qupv3.dtsi @@ -17,7 +17,7 @@ qupv3_0: qcom,qupv3_0_geni_se@0x8c0000 { compatible = "qcom,qupv3-geni-se"; reg = <0x8c0000 0x2000>; - qcom,bus-mas-id = ; + qcom,bus-mas-id = ; qcom,bus-slv-id = ; qcom,iommu-s1-bypass; @@ -263,7 +263,7 @@ qupv3_1: qcom,qupv3_1_geni_se@0xac0000 { compatible = "qcom,qupv3-geni-se"; reg = <0xac0000 0x2000>; - qcom,bus-mas-id = ; + qcom,bus-mas-id = ; qcom,bus-slv-id = ; qcom,iommu-s1-bypass; diff --git a/arch/arm64/boot/dts/qcom/sdmmagpie-rumi.dtsi b/arch/arm64/boot/dts/qcom/sdmmagpie-rumi.dtsi index b78176c4ad0b573e8cbad69d4ad8fe8af3cd214e..1df64a4350c4323dcfe8f10846d3b195a6279ff8 100644 --- a/arch/arm64/boot/dts/qcom/sdmmagpie-rumi.dtsi +++ b/arch/arm64/boot/dts/qcom/sdmmagpie-rumi.dtsi @@ -92,6 +92,41 @@ status = "disabled"; }; +&ufsphy_mem { + compatible = "qcom,ufs-phy-qrbtc-sdm845"; + + vdda-phy-supply = <&pm6150_l4>; /* 0.88v */ + vdda-pll-supply = <&pm6150l_l3>; /* 1.2v */ + vdda-phy-max-microamp = <62900>; + vdda-pll-max-microamp = <18300>; + + status = "ok"; +}; + +&ufshc_mem { + limit-tx-hs-gear = <1>; + limit-rx-hs-gear = <1>; + scsi-cmd-timeout = <300000>; + + vdd-hba-supply = <&ufs_phy_gdsc>; + vdd-hba-fixed-regulator; + vcc-supply = <&pm6150_l19>; + vccq2-supply = <&pm6150_l12>; + vcc-max-microamp = <600000>; + vccq2-max-microamp = <600000>; + + qcom,vddp-ref-clk-supply = <&pm6150l_l3>; + qcom,vddp-ref-clk-max-microamp = <100>; + qcom,vddp-ref-clk-min-uV = <1200000>; + qcom,vddp-ref-clk-max-uV = <1200000>; + + qcom,disable-lpm; + rpm-level = <0>; + spm-level = <0>; + status = "ok"; +}; + + &qupv3_se8_2uart { status = "ok"; }; diff --git a/arch/arm64/boot/dts/qcom/sdmmagpie-thermal.dtsi b/arch/arm64/boot/dts/qcom/sdmmagpie-thermal.dtsi index f3720892fa8a20f48e1906367f883529c3aa5992..de85b18cbd72d02793e45fe588a75a3f52a6a541 100644 --- a/arch/arm64/boot/dts/qcom/sdmmagpie-thermal.dtsi +++ b/arch/arm64/boot/dts/qcom/sdmmagpie-thermal.dtsi @@ -349,4 +349,102 @@ }; }; }; + + xo-therm-adc { + polling-delay-passive = <0>; + polling-delay = <0>; + thermal-governor = "user_space"; + thermal-sensors = <&pm6150_adc_tm ADC_XO_THERM_PU2>; + trips { + active-config0 { + temperature = <125000>; + hysteresis = <1000>; + type = "passive"; + }; + }; + }; + + rf-pa0-therm-adc { + polling-delay-passive = <0>; + polling-delay = <0>; + thermal-governor = "user_space"; + thermal-sensors = <&pm6150_adc_tm ADC_AMUX_THM2_PU2>; + trips { + active-config0 { + temperature = <125000>; + hysteresis = <1000>; + type = "passive"; + }; + }; + }; + + rf-pa1-therm-adc { + polling-delay-passive = <0>; + polling-delay = <0>; + thermal-governor = "user_space"; + thermal-sensors = <&pm6150_adc_tm ADC_AMUX_THM3_PU2>; + trips { + active-config0 { + temperature = <125000>; + hysteresis = <1000>; + type = "passive"; + }; + }; + }; + + quiet-therm-adc { + polling-delay-passive = <0>; + polling-delay = <0>; + thermal-governor = "user_space"; + thermal-sensors = <&pm6150_adc_tm ADC_AMUX_THM4_PU2>; + trips { + active-config0 { + temperature = <125000>; + hysteresis = <1000>; + type = "passive"; + }; + }; + }; + + conn-therm-adc { + polling-delay-passive = <0>; + polling-delay = <0>; + thermal-governor = "user_space"; + thermal-sensors = <&pm6150l_adc_tm ADC_AMUX_THM1_PU2>; + trips { + active-config0 { + temperature = <125000>; + hysteresis = <1000>; + type = "passive"; + }; + }; + }; + + camera-ftherm-adc { + polling-delay-passive = <0>; + polling-delay = <0>; + thermal-governor = "user_space"; + thermal-sensors = <&pm6150l_adc_tm ADC_AMUX_THM3_PU2>; + trips { + active-config0 { + temperature = <125000>; + hysteresis = <1000>; + type = "passive"; + }; + }; + }; + + nvm-therm-adc { + polling-delay-passive = <0>; + polling-delay = <0>; + thermal-governor = "user_space"; + thermal-sensors = <&pm6150l_adc_tm ADC_GPIO4_PU2>; + trips { + active-config0 { + temperature = <125000>; + hysteresis = <1000>; + type = "passive"; + }; + }; + }; }; diff --git a/arch/arm64/boot/dts/qcom/sdmmagpie.dtsi b/arch/arm64/boot/dts/qcom/sdmmagpie.dtsi index 2d7e7e3c3806a0211f7eea88d680208ef51df6f2..b9afb21889bb8247544adbbfd9c1eab2136b466a 100644 --- a/arch/arm64/boot/dts/qcom/sdmmagpie.dtsi +++ b/arch/arm64/boot/dts/qcom/sdmmagpie.dtsi @@ -39,6 +39,7 @@ hsuart0 = &qupv3_se3_4uart; sdhc1 = &sdhc_1; /* SDC1 eMMC slot */ sdhc2 = &sdhc_2; /* SDC2 SD Card slot */ + ufshc1 = &ufshc_mem; /* Embedded UFS slot */ }; cpus { @@ -50,6 +51,8 @@ compatible = "arm,armv8"; reg = <0x0 0x0>; enable-method = "psci"; + capacity-dmips-mhz = <1024>; + sched-energy-costs = <&CPU_COST_0 &CLUSTER_COST_0>; cache-size = <0x8000>; next-level-cache = <&L2_0>; L2_0: l2-cache { @@ -85,6 +88,8 @@ compatible = "arm,armv8"; reg = <0x0 0x100>; enable-method = "psci"; + capacity-dmips-mhz = <1024>; + sched-energy-costs = <&CPU_COST_0 &CLUSTER_COST_0>; cache-size = <0x8000>; next-level-cache = <&L2_100>; L2_100: l2-cache { @@ -115,6 +120,8 @@ compatible = "arm,armv8"; reg = <0x0 0x200>; enable-method = "psci"; + capacity-dmips-mhz = <1024>; + sched-energy-costs = <&CPU_COST_0 &CLUSTER_COST_0>; cache-size = <0x8000>; next-level-cache = <&L2_200>; L2_200: l2-cache { @@ -144,6 +151,8 @@ compatible = "arm,armv8"; reg = <0x0 0x300>; enable-method = "psci"; + capacity-dmips-mhz = <1024>; + sched-energy-costs = <&CPU_COST_0 &CLUSTER_COST_0>; cache-size = <0x8000>; next-level-cache = <&L2_300>; L2_300: l2-cache { @@ -173,6 +182,8 @@ compatible = "arm,armv8"; reg = <0x0 0x400>; enable-method = "psci"; + capacity-dmips-mhz = <1024>; + sched-energy-costs = <&CPU_COST_0 &CLUSTER_COST_0>; cache-size = <0x8000>; next-level-cache = <&L2_400>; L2_400: l2-cache { @@ -202,6 +213,8 @@ compatible = "arm,armv8"; reg = <0x0 0x500>; enable-method = "psci"; + capacity-dmips-mhz = <1024>; + sched-energy-costs = <&CPU_COST_0 &CLUSTER_COST_0>; cache-size = <0x8000>; next-level-cache = <&L2_500>; L2_500: l2-cache { @@ -231,6 +244,8 @@ compatible = "arm,armv8"; reg = <0x0 0x600>; enable-method = "psci"; + capacity-dmips-mhz = <1740>; + sched-energy-costs = <&CPU_COST_1 &CLUSTER_COST_1>; cache-size = <0x10000>; next-level-cache = <&L2_600>; L2_600: l2-cache { @@ -269,6 +284,8 @@ compatible = "arm,armv8"; reg = <0x0 0x700>; enable-method = "psci"; + capacity-dmips-mhz = <1740>; + sched-energy-costs = <&CPU_COST_1 &CLUSTER_COST_1>; cache-size = <0x10000>; next-level-cache = <&L2_700>; L2_700: l2-cache { @@ -331,17 +348,85 @@ }; cluster1 { - core6 { + core0 { cpu = <&CPU6>; }; - core7 { + core1 { cpu = <&CPU7>; }; }; }; }; + energy_costs: energy-costs { + compatible = "sched-energy"; + + CPU_COST_0: core-cost0 { + busy-cost-data = < + 300000 18 + 768000 23 + 1017600 36 + 1248000 52 + 1497600 76 + 1612800 92 + 1804800 119 + >; + idle-cost-data = < + 16 12 8 6 + >; + }; + + CPU_COST_1: core-cost1 { + busy-cost-data = < + 300000 166 + 806400 293 + 1094400 470 + 1324800 676 + 1708800 1060 + 1939200 1362 + 2169600 1801 + 2361600 2326 + 2438400 2568 + >; + idle-cost-data = < + 100 80 60 40 + >; + }; + + CLUSTER_COST_0: cluster-cost0 { + busy-cost-data = < + 300000 5 + 768000 5 + 1017600 7 + 1248000 8 + 1497600 10 + 1612800 12 + 1804800 14 + >; + idle-cost-data = < + 4 3 2 1 + >; + }; + + CLUSTER_COST_1: cluster-cost1 { + busy-cost-data = < + 300000 19 + 806400 21 + 1094400 26 + 1324800 33 + 1708800 43 + 1939200 50 + 2169600 60 + 2361600 62 + 2438400 63 + >; + idle-cost-data = < + 4 3 2 1 + >; + }; + }; + psci { compatible = "arm,psci-1.0"; method = "smc"; @@ -654,8 +739,11 @@ }; clock_camcc: qcom,camcc { - compatible = "qcom,dummycc"; - clock-output-names = "camcc_clocks"; + compatible = "qcom,camcc-sdmmagpie", "syscon"; + vdd_cx-supply = <&VDD_CX_LEVEL>; + vdd_mx-supply = <&VDD_MX_LEVEL>; + reg = <0xad00000 0x10000>; + reg-names = "cc_base"; #clock-cells = <1>; #reset-cells = <1>; }; @@ -680,9 +768,11 @@ #reset-cells = <1>; }; - clock_dispcc: qcom,dispcc { - compatible = "qcom,dummycc"; - clock-output-names = "dispcc_clocks"; + clock_dispcc: qcom,dispcc@af00000 { + compatible = "qcom,dispcc-sdmmagpie", "syscon"; + vdd_cx-supply = <&VDD_CX_LEVEL>; + reg = <0xaf00000 0x20000>; + reg-names = "cc_base"; #clock-cells = <1>; #reset-cells = <1>; }; @@ -697,6 +787,43 @@ #reset-cells = <1>; }; + clock_cpucc: qcom,cpucc@18321000 { + compatible = "qcom,clk-cpu-osm-sdmmagpie"; + reg = <0x18321000 0x1400>, + <0x18323000 0x1400>, + <0x18325800 0x1400>; + reg-names = "osm_l3_base", "osm_pwrcl_base", + "osm_perfcl_base"; + #clock-cells = <1>; + status = "disabled"; + }; + + cpucc_debug: syscon@182a0018 { + compatible = "syscon"; + reg = <0x182a0018 0x4>; + }; + + mccc_debug: syscon@90b0000 { + compatible = "syscon"; + reg = <0x90b0000 0x1000>; + }; + + clock_debug: qcom,cc-debug { + compatible = "qcom,debugcc-sdmmagpie"; + qcom,cc-count = <8>; + qcom,gcc = <&clock_gcc>; + qcom,videocc = <&clock_videocc>; + qcom,camcc = <&clock_camcc>; + qcom,dispcc = <&clock_dispcc>; + qcom,gpucc = <&clock_gpucc>; + qcom,npucc = <&clock_npucc>; + qcom,cpucc = <&cpucc_debug>; + qcom,mccc = <&mccc_debug>; + clocks = <&clock_rpmh RPMH_CXO_CLK>; + clock-names = "xo_clk_src"; + #clock-cells = <1>; + }; + qcom,sps { compatible = "qcom,msm-sps-4k"; qcom,pipe-attr-ee; @@ -708,6 +835,13 @@ interrupts = ; }; + dsu_pmu@0 { + compatible = "arm,dsu-pmu"; + interrupts = ; + cpus = <&CPU0>, <&CPU1>, <&CPU2>, <&CPU3>, + <&CPU4>, <&CPU5>, <&CPU6>, <&CPU7>; + }; + qcom,msm-imem@146aa000 { compatible = "qcom,msm-imem"; reg = <0x146aa000 0x1000>; @@ -843,6 +977,15 @@ qcom,wakeup-enable; }; + eud: qcom,msm-eud@88e0000 { + compatible = "qcom,msm-eud"; + interrupt-names = "eud_irq"; + interrupts = ; + reg = <0x88e0000 0x2000>; + reg-names = "eud_base"; + status = "ok"; + }; + qcom,chd_sliver { compatible = "qcom,core-hang-detect"; label = "silver"; @@ -1028,6 +1171,16 @@ qcom,dump-node = <&L2_TLB_700>; qcom,dump-id = <0x127>; }; + + qcom,llcc1_d_cache { + qcom,dump-node = <&LLCC_1>; + qcom,dump-id = <0x140>; + }; + + qcom,llcc2_d_cache { + qcom,dump-node = <&LLCC_2>; + qcom,dump-id = <0x141>; + }; }; thermal_zones: thermal-zones {}; @@ -1077,6 +1230,10 @@ cap-based-alloc-and-pwr-collapse; }; + qcom,llcc-perfmon { + compatible = "qcom,llcc-perfmon"; + }; + qcom,llcc-erp { compatible = "qcom,llcc-erp"; interrupt-names = "ecc_irq"; @@ -1086,6 +1243,14 @@ qcom,llcc-amon { compatible = "qcom,llcc-amon"; }; + + LLCC_1: llcc_1_dcache { + qcom,dump-size = <0x6c000>; + }; + + LLCC_2: llcc_2_dcache { + qcom,dump-size = <0x6c000>; + }; }; apps_rsc: mailbox@18220000 { @@ -1601,6 +1766,111 @@ status = "disabled"; }; + ufsphy_mem: ufsphy_mem@1d87000 { + reg = <0x1d87000 0xddc>; /* PHY regs */ + reg-names = "phy_mem"; + #phy-cells = <0>; + + lanes-per-direction = <1>; + + clock-names = "ref_clk_src", + "ref_clk", + "ref_aux_clk"; + clocks = <&clock_rpmh RPMH_CXO_CLK>, + <&clock_gcc GCC_UFS_MEM_CLKREF_CLK>, + <&clock_gcc GCC_UFS_PHY_PHY_AUX_CLK>; + + status = "disabled"; + }; + + ufshc_mem: ufshc@1d84000 { + compatible = "qcom,ufshc"; + reg = <0x1d84000 0x3000>; + interrupts = <0 265 0>; + phys = <&ufsphy_mem>; + phy-names = "ufsphy"; + + lanes-per-direction = <1>; + dev-ref-clk-freq = <0>; /* 19.2 MHz */ + spm-level = <5>; + + clock-names = + "core_clk", + "bus_aggr_clk", + "iface_clk", + "core_clk_unipro", + "core_clk_ice", + "ref_clk", + "tx_lane0_sync_clk", + "rx_lane0_sync_clk"; + clocks = + <&clock_gcc GCC_UFS_PHY_AXI_CLK>, + <&clock_gcc GCC_AGGRE_UFS_PHY_AXI_CLK>, + <&clock_gcc GCC_UFS_PHY_AHB_CLK>, + <&clock_gcc GCC_UFS_PHY_UNIPRO_CORE_CLK>, + <&clock_gcc GCC_UFS_PHY_ICE_CORE_CLK>, + <&clock_rpmh RPMH_CXO_CLK>, + <&clock_gcc GCC_UFS_PHY_TX_SYMBOL_0_CLK>, + <&clock_gcc GCC_UFS_PHY_RX_SYMBOL_0_CLK>; + freq-table-hz = + <50000000 240000000>, + <0 0>, + <0 0>, + <37500000 150000000>, + <75000000 300000000>, + <0 0>, + <0 0>, + <0 0>; + + qcom,msm-bus,name = "ufshc_mem"; + qcom,msm-bus,num-cases = <12>; + qcom,msm-bus,num-paths = <2>; + qcom,msm-bus,vectors-KBps = + /* + * During HS G3 UFS runs at nominal voltage corner, vote + * higher bandwidth to push other buses in the data path + * to run at nominal to achieve max throughput. + * 4GBps pushes BIMC to run at nominal. + * 200MBps pushes CNOC to run at nominal. + * Vote for half of this bandwidth for HS G3 1-lane. + * For max bandwidth, vote high enough to push the buses + * to run in turbo voltage corner. + */ + <123 512 0 0>, <1 757 0 0>, /* No vote */ + <123 512 922 0>, <1 757 1000 0>, /* PWM G1 */ + <123 512 1844 0>, <1 757 1000 0>, /* PWM G2 */ + <123 512 3688 0>, <1 757 1000 0>, /* PWM G3 */ + <123 512 7376 0>, <1 757 1000 0>, /* PWM G4 */ + <123 512 127796 0>, <1 757 1000 0>, /* HS G1 RA */ + <123 512 255591 0>, <1 757 1000 0>, /* HS G2 RA */ + <123 512 2097152 0>, <1 757 102400 0>, /* HS G3 RA */ + <123 512 149422 0>, <1 757 1000 0>, /* HS G1 RB */ + <123 512 298189 0>, <1 757 1000 0>, /* HS G2 RB */ + <123 512 2097152 0>, <1 757 102400 0>, /* HS G3 RB */ + <123 512 7643136 0>, <1 757 307200 0>; /* Max. bandwidth */ + + qcom,bus-vector-names = "MIN", + "PWM_G1_L1", "PWM_G2_L1", "PWM_G3_L1", "PWM_G4_L1", + "HS_RA_G1_L1", "HS_RA_G2_L1", "HS_RA_G3_L1", + "HS_RB_G1_L1", "HS_RB_G2_L1", "HS_RB_G3_L1", + "MAX"; + + /* PM QoS */ + qcom,pm-qos-cpu-groups = <0x3f 0xC0>; + qcom,pm-qos-cpu-group-latency-us = <67 67>; + qcom,pm-qos-default-cpu = <0>; + + pinctrl-names = "dev-reset-assert", "dev-reset-deassert"; + pinctrl-0 = <&ufs_dev_reset_assert>; + pinctrl-1 = <&ufs_dev_reset_deassert>; + + resets = <&clock_gcc GCC_UFS_PHY_BCR>; + reset-names = "core_reset"; + non-removable; + + status = "disabled"; + }; + qcom,lpass@62400000 { compatible = "qcom,pil-tz-generic"; reg = <0x62400000 0x00100>; @@ -1717,8 +1987,8 @@ #include "sdmmagpie-pinctrl.dtsi" #include "sdmmagpie-gdsc.dtsi" +#include "sdmmagpie-bus.dtsi" #include "sdmmagpie-qupv3.dtsi" -#include "sdmmagpie-thermal.dtsi" &pcie_0_gdsc { status = "ok"; @@ -1839,3 +2109,120 @@ #include "pm6150l.dtsi" #include "sdmmagpie-regulator.dtsi" #include "sdmmagpie-coresight.dtsi" +#include "sdmmagpie-thermal.dtsi" + +&pm6150_vadc { + rf_pa0_therm { + reg = ; + label = "rf_pa0_therm"; + qcom,ratiometric; + qcom,hw-settle-time = <200>; + qcom,pre-scaling = <1 1>; + }; + + rf_pa1_therm { + reg = ; + label = "rf_pa1_therm"; + qcom,ratiometric; + qcom,hw-settle-time = <200>; + qcom,pre-scaling = <1 1>; + }; + + quiet_therm { + reg = ; + label = "quiet_therm"; + qcom,ratiometric; + qcom,hw-settle-time = <200>; + qcom,pre-scaling = <1 1>; + }; +}; + +&pm6150_adc_tm { + io-channels = <&pm6150_vadc ADC_XO_THERM_PU2>, + <&pm6150_vadc ADC_AMUX_THM2_PU2>, + <&pm6150_vadc ADC_AMUX_THM3_PU2>, + <&pm6150_vadc ADC_AMUX_THM4_PU2>; + + /* Channel nodes */ + xo_therm { + reg = ; + qcom,ratiometric; + qcom,hw-settle-time = <200>; + }; + + rf_pa0_therm { + reg = ; + qcom,ratiometric; + qcom,hw-settle-time = <200>; + }; + + rf_pa1_therm { + reg = ; + qcom,ratiometric; + qcom,hw-settle-time = <200>; + }; + + quiet_therm { + reg = ; + qcom,ratiometric; + qcom,hw-settle-time = <200>; + }; +}; + +&pm6150l_vadc { + conn_therm { + reg = ; + label = "conn_therm"; + qcom,ratiometric; + qcom,hw-settle-time = <200>; + qcom,pre-scaling = <1 1>; + }; + + smb_therm { + reg = ; + label = "smb_therm"; + qcom,hw-settle-time = <200>; + qcom,pre-scaling = <1 1>; + }; + + camera_ftherm { + reg = ; + label = "camera_ftherm"; + qcom,ratiometric; + qcom,hw-settle-time = <200>; + qcom,pre-scaling = <1 1>; + }; + + nvm_therm { + reg = ; + label = "nvm_therm"; + qcom,ratiometric; + qcom,hw-settle-time = <200>; + qcom,pre-scaling = <1 1>; + }; +}; + +&pm6150l_adc_tm { + io-channels = <&pm6150l_vadc ADC_AMUX_THM1_PU2>, + <&pm6150l_vadc ADC_AMUX_THM3_PU2>, + <&pm6150l_vadc ADC_GPIO4_PU2>; + + /* Channel nodes */ + conn_therm { + reg = ; + qcom,ratiometric; + qcom,hw-settle-time = <200>; + }; + + camera_ftherm { + reg = ; + qcom,ratiometric; + qcom,hw-settle-time = <200>; + }; + + nvm_therm { + reg = ; + qcom,ratiometric; + qcom,hw-settle-time = <200>; + }; +}; diff --git a/arch/arm64/boot/dts/qcom/sdmshrike-usb.dtsi b/arch/arm64/boot/dts/qcom/sdmshrike-usb.dtsi index 5ad4710c9468857a931300d8e7702314fe9e9f91..9b5e2e1be88b08da95ad8289f716db51a96408a8 100644 --- a/arch/arm64/boot/dts/qcom/sdmshrike-usb.dtsi +++ b/arch/arm64/boot/dts/qcom/sdmshrike-usb.dtsi @@ -47,6 +47,13 @@ qcom,core-clk-rate = <200000000>; qcom,core-clk-rate-hs = <66666667>; qcom,num-gsi-evt-buffs = <0x3>; + qcom,gsi-reg-offset = + <0x0fc /* GSI_GENERAL_CFG */ + 0x110 /* GSI_DBL_ADDR_L */ + 0x120 /* GSI_DBL_ADDR_H */ + 0x130 /* GSI_RING_BASE_ADDR_L */ + 0x144 /* GSI_RING_BASE_ADDR_H */ + 0x1a4>; /* GSI_IF_STS */ qcom,dwc-usb3-msm-tx-fifo-size = <27696>; qcom,msm-bus,name = "usb0"; diff --git a/arch/arm64/boot/dts/qcom/sdxprairie.dtsi b/arch/arm64/boot/dts/qcom/sdxprairie.dtsi index d8ed44602fefef74c3efae0668ad5bb264de5253..97d17fe9db290f90cc047bf43dea9c24dcdd9c99 100644 --- a/arch/arm64/boot/dts/qcom/sdxprairie.dtsi +++ b/arch/arm64/boot/dts/qcom/sdxprairie.dtsi @@ -13,6 +13,8 @@ #include "skeleton.dtsi" #include +#include +#include #include / { @@ -158,10 +160,23 @@ qcom,force-warm-reboot; }; + clock_rpmh: qcom,rpmh { + compatible = "qcom,dummycc"; + clock-output-names = "rpmh_clocks"; + #clock-cells = <1>; + }; + + clock_aop: qcom,aop { + compatible = "qcom,dummycc"; + clock-output-names = "aop_clocks"; + #clock-cells = <1>; + }; + clock_gcc: qcom,gcc { compatible = "qcom,dummycc"; clock-output-names = "gcc_clocks"; #clock-cells = <1>; + #reset-cells = <1>; }; serial_uart: serial@831000 { diff --git a/arch/arm64/boot/dts/qcom/sm6150-audio-overlay.dtsi b/arch/arm64/boot/dts/qcom/sm6150-audio-overlay.dtsi index 874ad3cee11fc991b426b1b6d0b7b72e5afa1601..f4883c37dbbdd733ecc6b05c31d7d4c4adb023ee 100644 --- a/arch/arm64/boot/dts/qcom/sm6150-audio-overlay.dtsi +++ b/arch/arm64/boot/dts/qcom/sm6150-audio-overlay.dtsi @@ -11,13 +11,6 @@ * GNU General Public License for more details. */ -/{ - aliases { - swr1 = &swr_1; - swr2 = &swr_2; - }; -}; - #include "sm6150-lpi.dtsi" #include #include @@ -32,8 +25,8 @@ clocks = <&clock_audio_tx_1 0>, <&clock_audio_tx_2 0>; qcom,tx-swr-gpios = <&tx_swr_gpios>; - qcom,tx-dmic-sample-rate = <4800000>; - swr_2: tx_swr_master { + qcom,tx-dmic-sample-rate = <2400000>; + swr2: tx_swr_master { compatible = "qcom,swr-mstr"; #address-cells = <2>; #size-cells = <0>; @@ -41,6 +34,7 @@ swrm-io-base = <0x62ed0000 0x0>; interrupts = <0 137 0>, <0 528 0>; interrupt-names = "swr_master_irq", "swr_wake_irq"; + qcom,swr-wakeup-required = <1>; qcom,swr-num-ports = <5>; qcom,swr-port-mapping = <1 PCM_OUT1 0xF>, <2 ADC1 0x1>, <2 ADC2 0x2>, @@ -66,7 +60,7 @@ <&clock_audio_rx_2 0>; qcom,rx-swr-gpios = <&rx_swr_gpios>; qcom,rx_mclk_mode_muxsel = <0x62c25020>; - swr_1: rx_swr_master { + swr1: rx_swr_master { compatible = "qcom,swr-mstr"; #address-cells = <2>; #size-cells = <0>; @@ -96,6 +90,46 @@ clocks = <&clock_audio_wsa_1 0>, <&clock_audio_wsa_2 0>; qcom,wsa-swr-gpios = <&wsa_swr_gpios>; + swr0: wsa_swr_master { + compatible = "qcom,swr-mstr"; + #address-cells = <2>; + #size-cells = <0>; + qcom,swr_master_id = <1>; + swrm-io-base = <0x62f10000 0x0>; + interrupts = <0 136 0>; + interrupt-names = "swr_master_irq"; + qcom,swr-num-ports = <8>; + qcom,swr-port-mapping = <1 SPKR_L 0x1>, + <2 SPKR_L_COMP 0xF>, <3 SPKR_L_BOOST 0x3>, + <4 SPKR_R 0x1>, <5 SPKR_R_COMP 0xF>, + <6 SPKR_R_BOOST 0x3>, <7 SPKR_L_VI 0x3>, + <8 SPKR_R_VI 0x3>; + qcom,swr-num-dev = <2>; + wsa881x_0211: wsa881x@20170211 { + compatible = "qcom,wsa881x"; + reg = <0x0 0x20170211>; + qcom,spkr-sd-n-node = <&wsa_spkr_en1>; + }; + + wsa881x_0212: wsa881x@20170212 { + compatible = "qcom,wsa881x"; + reg = <0x0 0x20170212>; + qcom,spkr-sd-n-node = <&wsa_spkr_en2>; + }; + + wsa881x_0213: wsa881x@21170213 { + compatible = "qcom,wsa881x"; + reg = <0x0 0x21170213>; + qcom,spkr-sd-n-node = <&wsa_spkr_en1>; + }; + + wsa881x_0214: wsa881x@21170214 { + compatible = "qcom,wsa881x"; + reg = <0x0 0x21170214>; + qcom,spkr-sd-n-node = <&wsa_spkr_en2>; + }; + }; + }; va_macro: va-macro@62f20000 { @@ -106,8 +140,6 @@ }; }; -#include "sm6150-wsa881x.dtsi" - &sm6150_snd { qcom,model = "sm6150-idp-snd-card"; qcom,msm-mi2s-master = <1>, <1>, <1>, <1>, <1>; @@ -136,6 +168,9 @@ "IN3_AUX", "AUX_OUT", "TX SWR_ADC0", "ADC1_OUTPUT", "TX SWR_ADC2", "ADC2_OUTPUT", + "WSA SRC0_INP", "SRC0", + "WSA_TX DEC0_INP", "TX DEC0 MUX", + "WSA_TX DEC1_INP", "TX DEC1 MUX", "RX_TX DEC0_INP", "TX DEC0 MUX", "RX_TX DEC1_INP", "TX DEC1 MUX", "RX_TX DEC2_INP", "TX DEC2 MUX", @@ -193,6 +228,10 @@ qcom,cdc-vdd-mic-bias-voltage = <3296000 3296000>; qcom,cdc-vdd-mic-bias-current = <25000>; + qcom,cdc-micbias1-mv = <1800>; + qcom,cdc-micbias2-mv = <1800>; + qcom,cdc-micbias3-mv = <1800>; + qcom,cdc-static-supplies = "cdc-vdd-ldo-rxtx", "cdc-vddpx-1", "cdc-vdd-buck", @@ -440,6 +479,7 @@ qcom,cdc-mad-dmic-rate = <600000>; qcom,wdsp-cmpnt-dev-name = "tavil_codec"; + qcom,vreg-micb-supply = <&BOB>; tavil_spi_0: wcd_spi { compatible = "qcom,wcd-spi-v2"; diff --git a/arch/arm64/boot/dts/qcom/sm6150-audio.dtsi b/arch/arm64/boot/dts/qcom/sm6150-audio.dtsi index 3b4b5795c5b5325ea65cbb2820e68cfa00487a4f..a997e860aafd3ce4b85e52479744b928dbde67ad 100644 --- a/arch/arm64/boot/dts/qcom/sm6150-audio.dtsi +++ b/arch/arm64/boot/dts/qcom/sm6150-audio.dtsi @@ -34,6 +34,19 @@ compatible = "qcom,q6core-audio"; bolero: bolero-cdc { compatible = "qcom,bolero-codec"; + tx_macro: tx-macro@62ec0000 { + swr2: tx_swr_master { + }; + }; + + rx_macro: rx-macro@62ee0000 { + swr1: rx_swr_master { + }; + }; + wsa_macro: wsa-macro@62f00000 { + swr0: wsa_swr_master { + }; + }; }; }; }; diff --git a/arch/arm64/boot/dts/qcom/sm6150-bus.dtsi b/arch/arm64/boot/dts/qcom/sm6150-bus.dtsi index 8858acb6e6d43dce6c3b5b7282b7501f9504cdf6..06262c3108166c001229a0ed293f5d788b4bc0ba 100644 --- a/arch/arm64/boot/dts/qcom/sm6150-bus.dtsi +++ b/arch/arm64/boot/dts/qcom/sm6150-bus.dtsi @@ -47,7 +47,7 @@ cell-id = ; label = "disp_rsc"; qcom,rsc-dev; - qcom,req_state = <3>; + qcom,req_state = <2>; }; /*BCMs*/ @@ -179,6 +179,14 @@ qcom,bcm-dev; }; + bcm_cn1: bcm-cn1 { + cell-id = ; + label = "CN1"; + qcom,bcm-name = "CN1"; + qcom,rscs = <&rsc_apps>; + qcom,bcm-dev; + }; + bcm_sn2: bcm-sn2 { cell-id = ; label = "SN2"; @@ -308,6 +316,7 @@ qcom,qos-off = <4096>; qcom,base-offset = <16384>; qcom,sbm-offset = <0>; + qcom,bypass-qos-prg; qcom,bus-type = <1>; clocks = <>; }; @@ -358,6 +367,7 @@ qcom,qos-off = <128>; qcom,base-offset = <176128>; qcom,sbm-offset = <0>; + qcom,bypass-qos-prg; qcom,bus-type = <1>; clocks = <>; }; @@ -394,6 +404,7 @@ qcom,qos-off = <4096>; qcom,base-offset = <36864>; qcom,sbm-offset = <0>; + qcom,bypass-qos-prg; qcom,bus-type = <1>; clocks = <>; }; @@ -407,6 +418,7 @@ qcom,base-offset = <45056>; qcom,sbm-offset = <0>; qcom,bus-type = <1>; + qcom,bypass-qos-prg; clocks = <>; }; @@ -479,6 +491,7 @@ qcom,qport = <19>; qcom,connections = <&slv_qns_a1noc_snoc>; qcom,bus-dev = <&fab_aggre1_noc>; + qcom,bcms = <&bcm_cn1>; qcom,ap-owned; qcom,prio = <2>; }; @@ -595,6 +608,7 @@ qcom,qport = <10>; qcom,connections = <&slv_qns_a1noc_snoc>; qcom,bus-dev = <&fab_aggre1_noc>; + qcom,bcms = <&bcm_cn1>; qcom,ap-owned; qcom,prio = <2>; }; @@ -607,6 +621,7 @@ qcom,qport = <18>; qcom,connections = <&slv_qns_a1noc_snoc>; qcom,bus-dev = <&fab_aggre1_noc>; + qcom,bcms = <&bcm_cn1>; qcom,ap-owned; qcom,prio = <2>; }; @@ -906,7 +921,7 @@ cell-id = ; label = "mas-llcc-mc"; qcom,buswidth = <4>; - qcom,agg-ports = <4>; + qcom,agg-ports = <2>; qcom,connections = <&slv_ebi>; qcom,bus-dev = <&fab_mc_virt>; }; @@ -1157,7 +1172,7 @@ cell-id = ; label = "mas-llcc-mc_display"; qcom,buswidth = <4>; - qcom,agg-ports = <4>; + qcom,agg-ports = <2>; qcom,connections = <&slv_ebi_display>; qcom,bus-dev = <&fab_mc_virt_display>; }; @@ -1249,7 +1264,7 @@ qcom,buswidth = <4>; qcom,agg-ports = <1>; qcom,bus-dev = <&fab_config_noc>; - qcom,bcms = <&bcm_cn0>; + qcom,bcms = <&bcm_cn1>; }; slv_qhs_ahb2phy_west:slv-qhs-ahb2phy-west { @@ -1258,7 +1273,7 @@ qcom,buswidth = <4>; qcom,agg-ports = <1>; qcom,bus-dev = <&fab_config_noc>; - qcom,bcms = <&bcm_cn0>; + qcom,bcms = <&bcm_cn1>; }; slv_qhs_aop:slv-qhs-aop { @@ -1440,7 +1455,7 @@ qcom,buswidth = <4>; qcom,agg-ports = <1>; qcom,bus-dev = <&fab_config_noc>; - qcom,bcms = <&bcm_cn0>; + qcom,bcms = <&bcm_cn1>; }; slv_qhs_qup0:slv-qhs-qup0 { @@ -1467,7 +1482,7 @@ qcom,buswidth = <4>; qcom,agg-ports = <1>; qcom,bus-dev = <&fab_config_noc>; - qcom,bcms = <&bcm_cn0>; + qcom,bcms = <&bcm_cn1>; }; slv_qhs_sdc2:slv-qhs-sdc2 { @@ -1476,7 +1491,7 @@ qcom,buswidth = <4>; qcom,agg-ports = <1>; qcom,bus-dev = <&fab_config_noc>; - qcom,bcms = <&bcm_cn0>; + qcom,bcms = <&bcm_cn1>; }; slv_qhs_snoc_cfg:slv-qhs-snoc-cfg { @@ -1673,7 +1688,7 @@ cell-id = ; label = "slv-ebi"; qcom,buswidth = <4>; - qcom,agg-ports = <4>; + qcom,agg-ports = <2>; qcom,bus-dev = <&fab_mc_virt>; qcom,bcms = <&bcm_mc0>, <&bcm_acv>; }; @@ -1810,7 +1825,7 @@ cell-id = ; label = "slv-ebi_display"; qcom,buswidth = <4>; - qcom,agg-ports = <4>; + qcom,agg-ports = <2>; qcom,bus-dev = <&fab_mc_virt_display>; qcom,bcms = <&bcm_mc0_display>; }; diff --git a/arch/arm64/boot/dts/qcom/sm6150-camera-sensor-idp.dtsi b/arch/arm64/boot/dts/qcom/sm6150-camera-sensor-idp.dtsi index eaf3c8a9ab61c8819bdc410ba2b887f46c848eb4..b3e5fee63fce6c92f7764ea2fc2cc8cb1c4db8cc 100644 --- a/arch/arm64/boot/dts/qcom/sm6150-camera-sensor-idp.dtsi +++ b/arch/arm64/boot/dts/qcom/sm6150-camera-sensor-idp.dtsi @@ -106,7 +106,7 @@ rgltr-cntrl-support; rgltr-min-voltage = <2800000>; rgltr-max-voltage = <2800000>; - rgltr-load-current = <0>; + rgltr-load-current = <100000>; }; actuator_front: qcom,actuator@1 { @@ -119,7 +119,7 @@ rgltr-cntrl-support; rgltr-min-voltage = <2800000>; rgltr-max-voltage = <2800000>; - rgltr-load-current = <0>; + rgltr-load-current = <100000>; }; ois_rear: qcom,ois@0 { @@ -132,7 +132,7 @@ rgltr-cntrl-support; rgltr-min-voltage = <2800000>; rgltr-max-voltage = <2800000>; - rgltr-load-current = <0>; + rgltr-load-current = <100000>; status = "disabled"; }; @@ -150,7 +150,7 @@ rgltr-cntrl-support; rgltr-min-voltage = <1800000 2850000 1200000 0 2800000>; rgltr-max-voltage = <1800000 2850000 1200000 0 2800000>; - rgltr-load-current = <0 80000 105000 0 0>; + rgltr-load-current = <0 80000 105000 0 100000>; gpio-no-mux = <0>; pinctrl-names = "cam_default", "cam_suspend"; pinctrl-0 = <&cam_sensor_mclk0_active @@ -187,7 +187,7 @@ rgltr-cntrl-support; rgltr-min-voltage = <1800000 2850000 1200000 0 2800000>; rgltr-max-voltage = <1800000 2850000 1200000 0 2800000>; - rgltr-load-current = <105000 0 80000 0 0>; + rgltr-load-current = <105000 0 80000 0 100000>; gpio-no-mux = <0>; pinctrl-names = "cam_default", "cam_suspend"; pinctrl-0 = <&cam_sensor_mclk1_active @@ -224,7 +224,7 @@ rgltr-cntrl-support; rgltr-min-voltage = <1800000 2850000 1200000 0 2800000>; rgltr-max-voltage = <1800000 2850000 1200000 0 2800000>; - rgltr-load-current = <0 80000 105000 0 0>; + rgltr-load-current = <0 80000 105000 0 100000>; gpio-no-mux = <0>; pinctrl-names = "cam_default", "cam_suspend"; pinctrl-0 = <&cam_sensor_mclk2_active diff --git a/arch/arm64/boot/dts/qcom/sm6150-camera-sensor-qrd.dtsi b/arch/arm64/boot/dts/qcom/sm6150-camera-sensor-qrd.dtsi new file mode 100644 index 0000000000000000000000000000000000000000..214e7ea2bcf32770ac28a386c68247716b63e91e --- /dev/null +++ b/arch/arm64/boot/dts/qcom/sm6150-camera-sensor-qrd.dtsi @@ -0,0 +1,363 @@ +/* + * Copyright (c) 2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include + +&soc { + + led_flash_rear: qcom,camera-flash@0 { + cell-index = <0>; + reg = <0x00 0x00>; + compatible = "qcom,camera-flash"; + flash-source = <&pm6150l_flash0 &pm6150l_flash1>; + torch-source = <&pm6150l_torch0 &pm6150l_torch1>; + switch-source = <&pm6150l_switch2 &pm6150l_switch2>; + status = "ok"; + }; + + led_flash_rear_aux: qcom,camera-flash@1 { + cell-index = <1>; + reg = <0x01 0x00>; + compatible = "qcom,camera-flash"; + flash-source = <&pm6150l_flash0 &pm6150l_flash1>; + torch-source = <&pm6150l_torch0 &pm6150l_torch1>; + switch-source = <&pm6150l_switch2 &pm6150l_switch2>; + status = "ok"; + }; + + led_flash_front: qcom,camera-flash@2 { + cell-index = <2>; + reg = <0x02 0x00>; + compatible = "qcom,camera-flash"; + flash-source = <&pm6150l_flash2>; + torch-source = <&pm6150l_torch2>; + switch-source = <&pm6150l_switch2>; + status = "ok"; + enable-active-high; + gpio = <&tlmm 38 0>; + pinctrl-names = "default"; + pinctrl-0 = <&flash_led3_front_en>; + }; + + camera_ldo: gpio-regulator@0 { + compatible = "regulator-fixed"; + reg = <0x00 0x00>; + regulator-name = "camera_ldo"; + regulator-min-microvolt = <1200000>; + regulator-max-microvolt = <1200000>; + regulator-enable-ramp-delay = <135>; + enable-active-high; + gpio = <&pm6150l_gpios 3 0>; + pinctrl-names = "default"; + pinctrl-0 = <&cam_sensor_dvdd_en>; + vin-supply = <&pm6150l_s8>; + }; + + camera_vana0_ldo: gpio-regulator@1 { + compatible = "regulator-fixed"; + reg = <0x01 0x00>; + regulator-name = "camera_vana0_ldo"; + regulator-min-microvolt = <2850000>; + regulator-max-microvolt = <2850000>; + regulator-enable-ramp-delay = <233>; + enable-active-high; + gpio = <&pm6150l_gpios 9 0>; + pinctrl-names = "default"; + pinctrl-0 = <&cam_sensor_0_vana>; + vin-supply = <&pm6150l_bob>; + }; + + camera_vana1_2_ldo: gpio-regulator@2 { + compatible = "regulator-fixed"; + reg = <0x02 0x00>; + regulator-name = "camera_vana1_2_ldo"; + regulator-min-microvolt = <2850000>; + regulator-max-microvolt = <2850000>; + regulator-enable-ramp-delay = <233>; + enable-active-high; + gpio = <&pm6150l_gpios 4 0>; + pinctrl-names = "default"; + pinctrl-0 = <&cam_sensor_1_2_vana>; + vin-supply = <&pm6150l_bob>; + }; +}; + +&cam_cci { + qcom,cam-res-mgr { + compatible = "qcom,cam-res-mgr"; + status = "ok"; + }; + + actuator_rear: qcom,actuator@0 { + cell-index = <0>; + reg = <0x0>; + compatible = "qcom,actuator"; + cci-master = <0>; + cam_vaf-supply = <&pm6150_l19>; + regulator-names = "cam_vaf"; + rgltr-cntrl-support; + rgltr-min-voltage = <2800000>; + rgltr-max-voltage = <2800000>; + rgltr-load-current = <100000>; + }; + + actuator_front: qcom,actuator@1 { + cell-index = <1>; + reg = <0x1>; + compatible = "qcom,actuator"; + cci-master = <1>; + cam_vaf-supply = <&pm6150_l19>; + regulator-names = "cam_vaf"; + rgltr-cntrl-support; + rgltr-min-voltage = <2800000>; + rgltr-max-voltage = <2800000>; + rgltr-load-current = <100000>; + }; + + eeprom_rear: qcom,eeprom@0 { + cell-index = <0>; + reg = <0>; + compatible = "qcom,eeprom"; + cam_vio-supply = <&pm6150_l13>; + cam_vana-supply = <&camera_vana0_ldo>; + cam_vdig-supply = <&camera_ldo>; + cam_clk-supply = <&titan_top_gdsc>; + cam_vaf-supply = <&pm6150_l19>; + regulator-names = "cam_vio", "cam_vana", "cam_vdig", + "cam_clk", "cam_vaf"; + rgltr-cntrl-support; + rgltr-min-voltage = <1800000 2850000 1200000 0 2800000>; + rgltr-max-voltage = <1800000 2850000 1200000 0 2800000>; + rgltr-load-current = <0 80000 105000 0 100000>; + gpio-no-mux = <0>; + pinctrl-names = "cam_default", "cam_suspend"; + pinctrl-0 = <&cam_sensor_mclk0_active + &cam_sensor_rear_active>; + pinctrl-1 = <&cam_sensor_mclk0_suspend + &cam_sensor_rear_suspend>; + gpios = <&tlmm 28 0>, + <&tlmm 47 0>; + gpio-reset = <1>; + gpio-req-tbl-num = <0 1>; + gpio-req-tbl-flags = <1 0>; + gpio-req-tbl-label = "CAMIF_MCLK0", + "CAM_RESET0"; + sensor-mode = <0>; + cci-master = <0>; + status = "ok"; + clocks = <&clock_camcc CAM_CC_MCLK0_CLK>; + clock-names = "cam_clk"; + clock-cntl-level = "turbo"; + clock-rates = <24000000>; + }; + + eeprom_rear_aux: qcom,eeprom@1 { + cell-index = <1>; + reg = <0x1>; + compatible = "qcom,eeprom"; + cam_vio-supply = <&pm6150_l13>; + cam_vana-supply = <&camera_vana1_2_ldo>; + cam_vdig-supply = <&camera_ldo>; + cam_clk-supply = <&titan_top_gdsc>; + cam_vaf-supply = <&pm6150_l19>; + regulator-names = "cam_vio", "cam_vana", "cam_vdig", + "cam_clk", "cam_vaf"; + rgltr-cntrl-support; + rgltr-min-voltage = <1800000 2850000 1200000 0 2800000>; + rgltr-max-voltage = <1800000 2850000 1200000 0 2800000>; + rgltr-load-current = <105000 0 80000 0 100000>; + gpio-no-mux = <0>; + pinctrl-names = "cam_default", "cam_suspend"; + pinctrl-0 = <&cam_sensor_mclk1_active + &cam_sensor_rear2_active>; + pinctrl-1 = <&cam_sensor_mclk1_suspend + &cam_sensor_rear2_suspend>; + gpios = <&tlmm 29 0>, + <&tlmm 45 0>; + gpio-reset = <1>; + gpio-req-tbl-num = <0 1>; + gpio-req-tbl-flags = <1 0>; + gpio-req-tbl-label = "CAMIF_MCLK1", + "CAM_RESET1"; + sensor-position = <0>; + sensor-mode = <0>; + cci-master = <0>; + status = "ok"; + clock-names = "cam_clk"; + clock-cntl-level = "turbo"; + clock-rates = <24000000>; + }; + + eeprom_front: qcom,eeprom@2 { + cell-index = <2>; + reg = <0x2>; + compatible = "qcom,eeprom"; + cam_vio-supply = <&pm6150_l13>; + cam_vana-supply = <&camera_vana1_2_ldo>; + cam_vdig-supply = <&camera_ldo>; + cam_clk-supply = <&titan_top_gdsc>; + cam_vaf-supply = <&pm6150_l19>; + regulator-names = "cam_vio", "cam_vana", "cam_vdig", + "cam_clk", "cam_vaf"; + rgltr-cntrl-support; + rgltr-min-voltage = <1800000 2850000 1200000 0 2800000>; + rgltr-max-voltage = <1800000 2850000 1200000 0 2800000>; + rgltr-load-current = <0 80000 105000 0 100000>; + gpio-no-mux = <0>; + pinctrl-names = "cam_default", "cam_suspend"; + pinctrl-0 = <&cam_sensor_mclk2_active + &cam_sensor_front_active>; + pinctrl-1 = <&cam_sensor_mclk2_suspend + &cam_sensor_front_suspend>; + gpios = <&tlmm 30 0>, + <&tlmm 37 0>; + gpio-reset = <1>; + gpio-req-tbl-num = <0 1>; + gpio-req-tbl-flags = <1 0>; + gpio-req-tbl-label = "CAMIF_MCLK2", + "CAM_RESET2"; + sensor-mode = <0>; + cci-master = <1>; + status = "ok"; + clocks = <&clock_camcc CAM_CC_MCLK2_CLK>; + clock-names = "cam_clk"; + clock-cntl-level = "turbo"; + clock-rates = <24000000>; + }; + + qcom,cam-sensor@0 { + cell-index = <0>; + compatible = "qcom,cam-sensor"; + reg = <0x0>; + csiphy-sd-index = <0>; + sensor-position-roll = <90>; + sensor-position-pitch = <0>; + sensor-position-yaw = <180>; + actuator-src = <&actuator_rear>; + led-flash-src = <&led_flash_rear>; + eeprom-src = <&eeprom_rear>; + cam_vio-supply = <&pm6150_l13>; + cam_vana-supply = <&camera_vana0_ldo>; + cam_vdig-supply = <&camera_ldo>; + cam_clk-supply = <&titan_top_gdsc>; + regulator-names = "cam_vio", "cam_vana", "cam_vdig", + "cam_clk"; + rgltr-cntrl-support; + rgltr-min-voltage = <1800000 2850000 1200000 0>; + rgltr-max-voltage = <1800000 2850000 1200000 0>; + rgltr-load-current = <0 80000 105000 0>; + gpio-no-mux = <0>; + pinctrl-names = "cam_default", "cam_suspend"; + pinctrl-0 = <&cam_sensor_mclk0_active + &cam_sensor_rear_active>; + pinctrl-1 = <&cam_sensor_mclk0_suspend + &cam_sensor_rear_suspend>; + gpios = <&tlmm 28 0>, + <&tlmm 47 0>; + gpio-reset = <1>; + gpio-req-tbl-num = <0 1>; + gpio-req-tbl-flags = <1 0>; + gpio-req-tbl-label = "CAMIF_MCLK0", + "CAM_RESET0"; + sensor-mode = <0>; + cci-master = <0>; + status = "ok"; + clocks = <&clock_camcc CAM_CC_MCLK0_CLK>; + clock-names = "cam_clk"; + clock-cntl-level = "turbo"; + clock-rates = <24000000>; + }; + + qcom,cam-sensor@1 { + cell-index = <1>; + compatible = "qcom,cam-sensor"; + reg = <0x1>; + csiphy-sd-index = <1>; + sensor-position-roll = <90>; + sensor-position-pitch = <0>; + sensor-position-yaw = <180>; + led-flash-src = <&led_flash_rear>; + cam_vio-supply = <&pm6150_l13>; + cam_vana-supply = <&camera_vana1_2_ldo>; + cam_vdig-supply = <&camera_ldo>; + cam_clk-supply = <&titan_top_gdsc>; + regulator-names = "cam_vio", "cam_vana", "cam_vdig", + "cam_clk"; + rgltr-cntrl-support; + rgltr-min-voltage = <1800000 2850000 1200000 0>; + rgltr-max-voltage = <1800000 2850000 1200000 0>; + rgltr-load-current = <105000 0 80000 0>; + gpio-no-mux = <0>; + pinctrl-names = "cam_default", "cam_suspend"; + pinctrl-0 = <&cam_sensor_mclk1_active + &cam_sensor_rear2_active>; + pinctrl-1 = <&cam_sensor_mclk1_suspend + &cam_sensor_rear2_suspend>; + gpios = <&tlmm 29 0>, + <&tlmm 45 0>; + gpio-reset = <1>; + gpio-req-tbl-num = <0 1>; + gpio-req-tbl-flags = <1 0>; + gpio-req-tbl-label = "CAMIF_MCLK1", + "CAM_RESET1"; + sensor-mode = <0>; + cci-master = <0>; + status = "ok"; + clocks = <&clock_camcc CAM_CC_MCLK1_CLK>; + clock-names = "cam_clk"; + clock-cntl-level = "turbo"; + clock-rates = <24000000>; + }; + + qcom,cam-sensor@2 { + cell-index = <2>; + compatible = "qcom,cam-sensor"; + reg = <0x02>; + csiphy-sd-index = <2>; + sensor-position-roll = <270>; + sensor-position-pitch = <0>; + sensor-position-yaw = <0>; + actuator-src = <&actuator_front>; + eeprom-src = <&eeprom_front>; + cam_vio-supply = <&pm6150_l13>; + cam_vana-supply = <&camera_vana1_2_ldo>; + cam_vdig-supply = <&camera_ldo>; + cam_clk-supply = <&titan_top_gdsc>; + regulator-names = "cam_vio", "cam_vana", "cam_vdig", + "cam_clk"; + rgltr-cntrl-support; + rgltr-min-voltage = <1800000 2850000 1200000 0>; + rgltr-max-voltage = <1800000 2850000 1200000 0>; + rgltr-load-current = <0 80000 105000 0>; + gpio-no-mux = <0>; + pinctrl-names = "cam_default", "cam_suspend"; + pinctrl-0 = <&cam_sensor_mclk2_active + &cam_sensor_front_active>; + pinctrl-1 = <&cam_sensor_mclk2_suspend + &cam_sensor_front_suspend>; + gpios = <&tlmm 30 0>, + <&tlmm 37 0>; + gpio-reset = <1>; + gpio-req-tbl-num = <0 1>; + gpio-req-tbl-flags = <1 0>; + gpio-req-tbl-label = "CAMIF_MCLK2", + "CAM_RESET2"; + sensor-mode = <0>; + cci-master = <1>; + status = "ok"; + clocks = <&clock_camcc CAM_CC_MCLK2_CLK>; + clock-names = "cam_clk"; + clock-cntl-level = "turbo"; + clock-rates = <24000000>; + }; +}; diff --git a/arch/arm64/boot/dts/qcom/sm6150-camera.dtsi b/arch/arm64/boot/dts/qcom/sm6150-camera.dtsi index 50506ac400384092c8de227168a2ea799eedef8e..626215292c82ba280e2c30d723af1c91649da7fd 100644 --- a/arch/arm64/boot/dts/qcom/sm6150-camera.dtsi +++ b/arch/arm64/boot/dts/qcom/sm6150-camera.dtsi @@ -275,7 +275,8 @@ msm_cam_smmu_jpeg { compatible = "qcom,msm-cam-smmu-cb"; - iommus = <&apps_smmu 0x1060 0x8>; + iommus = <&apps_smmu 0xd80 0x20>, + <&apps_smmu 0xda0 0x20>; label = "jpeg"; jpeg_iova_mem_map: iova-mem-map { /* IO region is approximately 3.4 GB */ diff --git a/arch/arm64/boot/dts/qcom/sm6150-cmd-mode-display-idp-overlay.dts b/arch/arm64/boot/dts/qcom/sm6150-cmd-mode-display-idp-overlay.dts index 12f9774b0fa29e975b6685c1560d1bad3b7f5233..efab136e8944b8a68da7bac41001d8bb52fac80c 100644 --- a/arch/arm64/boot/dts/qcom/sm6150-cmd-mode-display-idp-overlay.dts +++ b/arch/arm64/boot/dts/qcom/sm6150-cmd-mode-display-idp-overlay.dts @@ -14,6 +14,7 @@ /plugin/; #include +#include #include "sm6150-idp.dtsi" #include "sm6150-audio-overlay.dtsi" @@ -25,6 +26,37 @@ qcom,board-id = <34 3>; }; -&dsi_hx83112a_truly_vid_display { +&qupv3_se1_i2c { + synaptics_dsx@20 { + compatible = "synaptics,dsx-i2c"; + reg = <0x20>; + interrupt-parent = <&tlmm>; + interrupts = <89 0x2008>; + vdd-supply = <&pm6150_l10>; + avdd-supply = <&pm6150l_l7>; + pinctrl-names = "pmx_ts_active","pmx_ts_suspend", + "pmx_ts_release"; + pinctrl-0 = <&ts_int_active &ts_reset_active>; + pinctrl-1 = <&ts_int_suspend &ts_reset_suspend>; + pinctrl-2 = <&ts_release>; + synaptics,pwr-reg-name = "avdd"; + synaptics,bus-reg-name = "vdd"; + synaptics,ub-i2c-addr = <0x20>; + synaptics,max-y-for-2d = <2159>; + synaptics,irq-gpio = <&tlmm 89 0x2008>; + synaptics,reset-gpio = <&tlmm 88 0x0>; + synaptics,irq-on-state = <0>; + synaptics,power-delay-ms = <200>; + synaptics,reset-delay-ms = <200>; + synaptics,reset-on-state = <0>; + synaptics,reset-active-ms = <20>; + }; + + himax_ts@48 { + status = "disabled"; + }; +}; + +&dsi_td4328_truly_cmd_display { qcom,dsi-display-active; }; diff --git a/arch/arm64/boot/dts/qcom/sm6150-coresight.dtsi b/arch/arm64/boot/dts/qcom/sm6150-coresight.dtsi index 6f9c9fcf93bd413fb290b79e7c0182353de13abd..be03427c5ba02cb1217a5d42e152796299813fe1 100644 --- a/arch/arm64/boot/dts/qcom/sm6150-coresight.dtsi +++ b/arch/arm64/boot/dts/qcom/sm6150-coresight.dtsi @@ -109,7 +109,6 @@ <0x6064000 0x15000>; reg-names = "tmc-base", "bam-base"; - qcom,smmu-s1-bypass; iommus = <&apps_smmu 0x01e0 0>, <&apps_smmu 0x00a0 0>; arm,buffer-size = <0x400000>; diff --git a/arch/arm64/boot/dts/qcom/sm6150-external-codec-idp-overlay.dts b/arch/arm64/boot/dts/qcom/sm6150-external-codec-idp-overlay.dts index de315d3ca9cec4c0dffca38f6a4d38cacb056a4d..345251a025aee913861cf7fea90333600a975b15 100644 --- a/arch/arm64/boot/dts/qcom/sm6150-external-codec-idp-overlay.dts +++ b/arch/arm64/boot/dts/qcom/sm6150-external-codec-idp-overlay.dts @@ -14,6 +14,7 @@ /plugin/; #include +#include #include "sm6150-idp.dtsi" #include "sm6150-ext-codec-audio-overlay.dtsi" diff --git a/arch/arm64/boot/dts/qcom/sm6150-external-codec.dtsi b/arch/arm64/boot/dts/qcom/sm6150-external-codec.dtsi index 7bbe80ab99a5ea5ca22b5a124a0181dad70a55bb..cf0c52ce43b3592b9e9bd5c97f422e98abe09a40 100644 --- a/arch/arm64/boot/dts/qcom/sm6150-external-codec.dtsi +++ b/arch/arm64/boot/dts/qcom/sm6150-external-codec.dtsi @@ -11,12 +11,6 @@ * GNU General Public License for more details. */ -/{ - aliases { - swr0 = &swr_tavil; - }; -}; - #include #include "sm6150-wcd.dtsi" @@ -111,7 +105,7 @@ &slim_aud { status = "okay"; tavil_codec { - swr_tavil: swr_master { + swr3: swr_master { compatible = "qcom,swr-mstr"; #address-cells = <2>; #size-cells = <0>; diff --git a/arch/arm64/boot/dts/qcom/sm6150-gpu.dtsi b/arch/arm64/boot/dts/qcom/sm6150-gpu.dtsi index 2c8fea8b75923abc0ae1e2c7a2c3f267c11bb46c..e120affdd1f137b576a04909a948b0cf9cf26990 100644 --- a/arch/arm64/boot/dts/qcom/sm6150-gpu.dtsi +++ b/arch/arm64/boot/dts/qcom/sm6150-gpu.dtsi @@ -33,7 +33,7 @@ label = "kgsl-3d0"; compatible = "qcom,kgsl-3d0", "qcom,kgsl-3d"; status = "ok"; - reg = <0x5000000 0x40000>, + reg = <0x5000000 0x90000>, <0x780000 0x6fff>; reg-names = "kgsl_3d0_reg_memory", "qfprom_memory"; @@ -177,7 +177,7 @@ /* NOM L1 */ qcom,gpu-pwrlevel@1 { reg = <1>; - qcom,gpu-freq = <706000000>; + qcom,gpu-freq = <745000000>; qcom,bus-freq = <10>; qcom,bus-min = <9>; qcom,bus-max = <11>; @@ -186,7 +186,7 @@ /* NOM */ qcom,gpu-pwrlevel@2 { reg = <2>; - qcom,gpu-freq = <645000000>; + qcom,gpu-freq = <700000000>; qcom,bus-freq = <9>; qcom,bus-min = <8>; qcom,bus-max = <10>; @@ -195,7 +195,7 @@ /* SVS L1 */ qcom,gpu-pwrlevel@3 { reg = <3>; - qcom,gpu-freq = <513000000>; + qcom,gpu-freq = <550000000>; qcom,bus-freq = <8>; qcom,bus-min = <7>; qcom,bus-max = <9>; @@ -204,7 +204,7 @@ /* SVS */ qcom,gpu-pwrlevel@4 { reg = <4>; - qcom,gpu-freq = <400000000>; + qcom,gpu-freq = <435000000>; qcom,bus-freq = <7>; qcom,bus-min = <5>; qcom,bus-max = <8>; @@ -250,7 +250,7 @@ /* NOM L1 */ qcom,gpu-pwrlevel@1 { reg = <1>; - qcom,gpu-freq = <706000000>; + qcom,gpu-freq = <745000000>; qcom,bus-freq = <10>; qcom,bus-min = <9>; qcom,bus-max = <11>; @@ -259,7 +259,7 @@ /* NOM */ qcom,gpu-pwrlevel@2 { reg = <2>; - qcom,gpu-freq = <645000000>; + qcom,gpu-freq = <700000000>; qcom,bus-freq = <9>; qcom,bus-min = <8>; qcom,bus-max = <10>; @@ -268,7 +268,7 @@ /* SVS L1 */ qcom,gpu-pwrlevel@3 { reg = <3>; - qcom,gpu-freq = <513000000>; + qcom,gpu-freq = <550000000>; qcom,bus-freq = <8>; qcom,bus-min = <7>; qcom,bus-max = <9>; @@ -277,7 +277,7 @@ /* SVS */ qcom,gpu-pwrlevel@4 { reg = <4>; - qcom,gpu-freq = <400000000>; + qcom,gpu-freq = <435000000>; qcom,bus-freq = <7>; qcom,bus-min = <5>; qcom,bus-max = <8>; @@ -330,7 +330,7 @@ /* NOM L1 */ qcom,gpu-pwrlevel@2 { reg = <2>; - qcom,gpu-freq = <706000000>; + qcom,gpu-freq = <745000000>; qcom,bus-freq = <10>; qcom,bus-min = <9>; qcom,bus-max = <11>; @@ -339,7 +339,7 @@ /* NOM */ qcom,gpu-pwrlevel@3 { reg = <3>; - qcom,gpu-freq = <645000000>; + qcom,gpu-freq = <700000000>; qcom,bus-freq = <9>; qcom,bus-min = <8>; qcom,bus-max = <10>; @@ -348,7 +348,7 @@ /* SVS L1 */ qcom,gpu-pwrlevel@4 { reg = <4>; - qcom,gpu-freq = <513000000>; + qcom,gpu-freq = <550000000>; qcom,bus-freq = <8>; qcom,bus-min = <7>; qcom,bus-max = <9>; @@ -357,7 +357,7 @@ /* SVS */ qcom,gpu-pwrlevel@5 { reg = <5>; - qcom,gpu-freq = <400000000>; + qcom,gpu-freq = <435000000>; qcom,bus-freq = <7>; qcom,bus-min = <5>; qcom,bus-max = <8>; diff --git a/arch/arm64/boot/dts/qcom/sm6150-idp.dtsi b/arch/arm64/boot/dts/qcom/sm6150-idp.dtsi index fb1f17c47338d376184dfd8f82ca0a302393fb08..193b92ec447852fb990d58a2706776245f28c06e 100644 --- a/arch/arm64/boot/dts/qcom/sm6150-idp.dtsi +++ b/arch/arm64/boot/dts/qcom/sm6150-idp.dtsi @@ -12,7 +12,9 @@ #include "sm6150-thermal-overlay.dtsi" #include "sm6150-camera-sensor-idp.dtsi" +#include #include +#include #include "sm6150-sde-display.dtsi" &qupv3_se3_i2c { @@ -21,7 +23,36 @@ #include "smb1355.dtsi" }; +&pm6150l_gpios { + key_vol_up { + key_vol_up_default: key_vol_up_default { + pins = "gpio2"; + function = "normal"; + input-enable; + bias-pull-up; + power-source = <0>; + }; + }; +}; + &soc { + gpio_keys { + compatible = "gpio-keys"; + label = "gpio-keys"; + + pinctrl-names = "default"; + pinctrl-0 = <&key_vol_up_default>; + + vol_up { + label = "volume_up"; + gpios = <&pm6150l_gpios 2 GPIO_ACTIVE_LOW>; + linux,input-type = <1>; + linux,code = ; + linux,can-disable; + debounce-interval = <15>; + gpio-key,wakeup; + }; + }; }; &qupv3_se0_2uart { @@ -91,6 +122,27 @@ }; }; +&qupv3_se5_i2c { + status = "ok"; + qcom,clk-freq-out = <1000000>; + nq@28 { + compatible = "qcom,nq-nci"; + reg = <0x28>; + qcom,nq-irq = <&tlmm 86 0x00>; + qcom,nq-ven = <&tlmm 84 0x00>; + qcom,nq-firm = <&tlmm 85 0x00>; + qcom,nq-clkreq = <&tlmm 50 0x00>; + interrupt-parent = <&tlmm>; + interrupts = <86 0>; + interrupt-names = "nfc_irq"; + pinctrl-names = "nfc_active", "nfc_suspend"; + pinctrl-0 = <&nfc_int_active &nfc_enable_active + &nfc_clk_req_active>; + pinctrl-1 = <&nfc_int_suspend &nfc_enable_suspend + &nfc_clk_req_suspend>; + }; +}; + &sdhc_1 { vdd-supply = <&pm6150l_l11>; qcom,vdd-voltage-level = <2950000 2950000>; @@ -141,6 +193,7 @@ qcom,battery-data = <&mtp_batterydata>; qcom,step-charging-enable; qcom,sw-jeita-enable; + qcom,fcc-stepping-enable; qcom,sec-charger-config = <3>; }; diff --git a/arch/arm64/boot/dts/qcom/sm6150-pinctrl.dtsi b/arch/arm64/boot/dts/qcom/sm6150-pinctrl.dtsi index 7bb36d5013a8abe02f36cd1f06ec50f009887211..397181ff4c2a8d86d6836afbcd8d63107bb072be 100644 --- a/arch/arm64/boot/dts/qcom/sm6150-pinctrl.dtsi +++ b/arch/arm64/boot/dts/qcom/sm6150-pinctrl.dtsi @@ -308,6 +308,98 @@ }; }; + nfc { + nfc_int_active: nfc_int_active { + /* active state */ + mux { + /* GPIO 86 NFC Read Interrupt */ + pins = "gpio86"; + function = "gpio"; + }; + + config { + pins = "gpio86"; + drive-strength = <2>; /* 2 MA */ + bias-pull-up; + }; + }; + + nfc_int_suspend: nfc_int_suspend { + /* sleep state */ + mux { + /* GPIO 86 NFC Read Interrupt */ + pins = "gpio86"; + function = "gpio"; + }; + + config { + pins = "gpio86"; + drive-strength = <2>; /* 2 MA */ + bias-pull-up; + }; + }; + + nfc_enable_active: nfc_enable_active { + /* active state */ + mux { + /* 84: Enable 85: Firmware */ + pins = "gpio84", "gpio85"; + function = "gpio"; + }; + + config { + pins = "gpio84", "gpio85"; + drive-strength = <2>; /* 2 MA */ + bias-pull-up; + }; + }; + + nfc_enable_suspend: nfc_enable_suspend { + /* sleep state */ + mux { + /* 84: Enable 85: Firmware */ + pins = "gpio84", "gpio85"; + function = "gpio"; + }; + + config { + pins = "gpio84", "gpio85"; + drive-strength = <2>; /* 2 MA */ + bias-disable; + }; + }; + + nfc_clk_req_active: nfc_clk_req_active { + /* active state */ + mux { + /* GPIO 50: NFC CLOCK REQUEST */ + pins = "gpio50"; + function = "gpio"; + }; + + config { + pins = "gpio50"; + drive-strength = <2>; /* 2 MA */ + bias-pull-up; + }; + }; + + nfc_clk_req_suspend: nfc_clk_req_suspend { + /* sleep state */ + mux { + /* GPIO 50: NFC CLOCK REQUEST */ + pins = "gpio50"; + function = "gpio"; + }; + + config { + pins = "gpio50"; + drive-strength = <2>; /* 2 MA */ + bias-disable; + }; + }; + }; + /* SE 6 pin mappings */ qupv3_se6_i2c_pins: qupv3_se6_i2c_pins { qupv3_se6_i2c_active: qupv3_se6_i2c_active { diff --git a/arch/arm64/boot/dts/qcom/sm6150-pm.dtsi b/arch/arm64/boot/dts/qcom/sm6150-pm.dtsi index e15beb75a9712b09bbf3a873fdaffb2bb1ac5e88..ca97a23d825cdb9b1b61eb004320e3fee86d9b4d 100644 --- a/arch/arm64/boot/dts/qcom/sm6150-pm.dtsi +++ b/arch/arm64/boot/dts/qcom/sm6150-pm.dtsi @@ -28,20 +28,18 @@ reg = <0>; label = "l3-wfi"; qcom,psci-mode = <0x1>; - qcom,latency-us = <600>; - qcom,ss-power = <420>; - qcom,energy-overhead = <4254140>; - qcom,time-overhead = <1260>; + qcom,entry-latency-us = <660>; + qcom,exit-latency-us = <600>; + qcom,min-residency-us = <1260>; }; qcom,pm-cluster-level@1 { /* D4 */ reg = <1>; label = "l3-pc"; qcom,psci-mode = <0x4>; - qcom,latency-us = <3048>; - qcom,ss-power = <329>; - qcom,energy-overhead = <6189829>; - qcom,time-overhead = <5800>; + qcom,entry-latency-us = <2752>; + qcom,exit-latency-us = <3048>; + qcom,min-residency-us = <6118>; qcom,min-child-idx = <2>; qcom,is-reset; }; @@ -50,10 +48,9 @@ reg = <2>; label = "cx-ret"; qcom,psci-mode = <0x124>; - qcom,latency-us = <4562>; - qcom,ss-power = <290>; - qcom,energy-overhead = <6989829>; - qcom,time-overhead = <8200>; + qcom,entry-latency-us = <3638>; + qcom,exit-latency-us = <4562>; + qcom,min-residency-us = <8467>; qcom,min-child-idx = <2>; qcom,is-reset; qcom,notify-rpm; @@ -63,10 +60,9 @@ reg = <3>; label = "llcc-off"; qcom,psci-mode = <0xB24>; - qcom,latency-us = <6562>; - qcom,ss-power = <165>; - qcom,energy-overhead = <7000029>; - qcom,time-overhead = <9825>; + qcom,entry-latency-us = <3263>; + qcom,exit-latency-us = <6562>; + qcom,min-residency-us = <9826>; qcom,min-child-idx = <2>; qcom,is-reset; qcom,notify-rpm; @@ -77,7 +73,6 @@ #size-cells = <0>; qcom,psci-mode-shift = <0>; qcom,psci-mode-mask = <0xf>; - qcom,use-prediction; qcom,cpu = <&CPU0 &CPU1 &CPU2 &CPU3 &CPU4 &CPU5>; @@ -85,20 +80,18 @@ reg = <0>; label = "wfi"; qcom,psci-cpu-mode = <0x1>; - qcom,latency-us = <60>; - qcom,ss-power = <383>; - qcom,energy-overhead = <64140>; - qcom,time-overhead = <121>; + qcom,entry-latency-us = <61>; + qcom,exit-latency-us = <60>; + qcom,min-residency-us = <121>; }; qcom,pm-cpu-level@1 { /* C3 */ reg = <1>; label = "pc"; qcom,psci-cpu-mode = <0x3>; - qcom,latency-us = <901>; - qcom,ss-power = <364>; - qcom,energy-overhead = <579285>; - qcom,time-overhead = <1450>; + qcom,entry-latency-us = <549>; + qcom,exit-latency-us = <901>; + qcom,min-residency-us = <1774>; qcom,is-reset; qcom,use-broadcast-timer; }; @@ -107,10 +100,9 @@ reg = <2>; label = "rail-pc"; qcom,psci-cpu-mode = <0x4>; - qcom,latency-us = <915>; - qcom,ss-power = <353>; - qcom,energy-overhead = <666292>; - qcom,time-overhead = <1617>; + qcom,entry-latency-us = <702>; + qcom,exit-latency-us = <915>; + qcom,min-residency-us = <4001>; qcom,is-reset; qcom,use-broadcast-timer; }; @@ -121,27 +113,24 @@ #size-cells = <0>; qcom,psci-mode-shift = <0>; qcom,psci-mode-mask = <0xf>; - qcom,use-prediction; qcom,cpu = <&CPU6 &CPU7>; qcom,pm-cpu-level@0 { /* C1 */ reg = <0>; label = "wfi"; qcom,psci-cpu-mode = <0x1>; - qcom,latency-us = <66>; - qcom,ss-power = <427>; - qcom,energy-overhead = <68410>; - qcom,time-overhead = <121>; + qcom,entry-latency-us = <55>; + qcom,exit-latency-us = <66>; + qcom,min-residency-us = <121>; }; qcom,pm-cpu-level@1 { /* C3 */ reg = <1>; label = "pc"; qcom,psci-cpu-mode = <0x3>; - qcom,latency-us = <1244>; - qcom,ss-power = <373>; - qcom,energy-overhead = <795006>; - qcom,time-overhead = <1767>; + qcom,entry-latency-us = <523>; + qcom,exit-latency-us = <1244>; + qcom,min-residency-us = <2207>; qcom,is-reset; qcom,use-broadcast-timer; }; @@ -150,10 +139,9 @@ reg = <2>; label = "rail-pc"; qcom,psci-cpu-mode = <0x4>; - qcom,latency-us = <1854>; - qcom,ss-power = <359>; - qcom,energy-overhead = <1068095>; - qcom,time-overhead = <2380>; + qcom,entry-latency-us = <526>; + qcom,exit-latency-us = <1854>; + qcom,min-residency-us = <5555>; qcom,is-reset; qcom,use-broadcast-timer; }; diff --git a/arch/arm64/boot/dts/qcom/sm6150-qrd-overlay.dts b/arch/arm64/boot/dts/qcom/sm6150-qrd-overlay.dts index 05e667d052e684c771201869794f3187e3b76d7e..798260de18464dbff3bcb3744546c6f50b00b82d 100644 --- a/arch/arm64/boot/dts/qcom/sm6150-qrd-overlay.dts +++ b/arch/arm64/boot/dts/qcom/sm6150-qrd-overlay.dts @@ -14,6 +14,7 @@ /plugin/; #include +#include #include "sm6150-audio-overlay.dtsi" #include "sm6150-qrd.dtsi" diff --git a/arch/arm64/boot/dts/qcom/sm6150-qrd.dtsi b/arch/arm64/boot/dts/qcom/sm6150-qrd.dtsi index 321708df7e12fd76024a3b52b6c789181c4abdcf..ea391e03ac8bce8fca85dd22123f8679b292123e 100644 --- a/arch/arm64/boot/dts/qcom/sm6150-qrd.dtsi +++ b/arch/arm64/boot/dts/qcom/sm6150-qrd.dtsi @@ -11,8 +11,11 @@ */ #include "sm6150-thermal-overlay.dtsi" +#include #include +#include #include "sm6150-sde-display.dtsi" +#include "sm6150-camera-sensor-qrd.dtsi" &qupv3_se3_i2c { status = "ok"; @@ -20,7 +23,36 @@ #include "smb1355.dtsi" }; +&pm6150l_gpios { + key_vol_up { + key_vol_up_default: key_vol_up_default { + pins = "gpio2"; + function = "normal"; + input-enable; + bias-pull-up; + power-source = <0>; + }; + }; +}; + &soc { + gpio_keys { + compatible = "gpio-keys"; + label = "gpio-keys"; + + pinctrl-names = "default"; + pinctrl-0 = <&key_vol_up_default>; + + vol_up { + label = "volume_up"; + gpios = <&pm6150l_gpios 2 GPIO_ACTIVE_LOW>; + linux,input-type = <1>; + linux,code = ; + linux,can-disable; + debounce-interval = <15>; + gpio-key,wakeup; + }; + }; }; &qupv3_se7_4uart { @@ -36,6 +68,10 @@ status = "ok"; }; +&qupv3_se0_2uart { + status = "ok"; +}; + &ufsphy_mem { compatible = "qcom,ufs-phy-qmp-v3-660"; @@ -78,6 +114,7 @@ qcom,battery-data = <&mtp_batterydata>; qcom,step-charging-enable; qcom,sw-jeita-enable; + qcom,fcc-stepping-enable; qcom,sec-charger-config = <1>; }; @@ -117,6 +154,27 @@ }; }; +&qupv3_se5_i2c { + status = "ok"; + qcom,clk-freq-out = <1000000>; + nq@28 { + compatible = "qcom,nq-nci"; + reg = <0x28>; + qcom,nq-irq = <&tlmm 86 0x00>; + qcom,nq-ven = <&tlmm 84 0x00>; + qcom,nq-firm = <&tlmm 85 0x00>; + qcom,nq-clkreq = <&tlmm 50 0x00>; + interrupt-parent = <&tlmm>; + interrupts = <86 0>; + interrupt-names = "nfc_irq"; + pinctrl-names = "nfc_active", "nfc_suspend"; + pinctrl-0 = <&nfc_int_active &nfc_enable_active + &nfc_clk_req_active>; + pinctrl-1 = <&nfc_int_suspend &nfc_enable_suspend + &nfc_clk_req_suspend>; + }; +}; + &dsi_hx83112a_truly_video { qcom,panel-supply-entries = <&dsi_panel_pwr_supply>; qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled"; diff --git a/arch/arm64/boot/dts/qcom/sm6150-regulator.dtsi b/arch/arm64/boot/dts/qcom/sm6150-regulator.dtsi index 8b540cf966270f948134a08338d0796360f803c7..4450b713c7c246887c296e1e73b77f555977ae64 100644 --- a/arch/arm64/boot/dts/qcom/sm6150-regulator.dtsi +++ b/arch/arm64/boot/dts/qcom/sm6150-regulator.dtsi @@ -219,6 +219,7 @@ ; qcom,mode-threshold-currents = <0 1>; + proxy-supply = <&pm6150_l4>; L4A: pm6150_l4: regulator-pm6150-l4 { regulator-name = "pm6150_l4"; qcom,set = ; @@ -226,6 +227,8 @@ regulator-max-microvolt = <975000>; qcom,init-voltage = <875000>; qcom,init-mode = ; + qcom,proxy-consumer-enable; + qcom,proxy-consumer-current = <23800>; }; }; @@ -387,6 +390,7 @@ ; qcom,mode-threshold-currents = <0 1>; + proxy-supply = <&pm6150_l13>; L13A: pm6150_l13: regulator-pm6150-l13 { regulator-name = "pm6150_l13"; qcom,set = ; @@ -394,6 +398,8 @@ regulator-max-microvolt = <1900000>; qcom,init-voltage = <1800000>; qcom,init-mode = ; + qcom,proxy-consumer-enable; + qcom,proxy-consumer-current = <115000>; }; }; @@ -558,6 +564,7 @@ ; qcom,mode-threshold-currents = <0 1>; + proxy-supply = <&pm6150l_l3>; L3C: pm6150l_l3: regulator-pm6150l-l3 { regulator-name = "pm6150l_l3"; qcom,set = ; @@ -565,6 +572,8 @@ regulator-max-microvolt = <1260000>; qcom,init-voltage = <1232000>; qcom,init-mode = ; + qcom,proxy-consumer-enable; + qcom,proxy-consumer-current = <51800>; }; }; diff --git a/arch/arm64/boot/dts/qcom/sm6150-rumi-overlay.dts b/arch/arm64/boot/dts/qcom/sm6150-rumi-overlay.dts index 47b69d4655115b69ebc0d10dd0b1dbb652754ccd..40685f9ae444e821504a6b96869f95ddaa0fa7e1 100644 --- a/arch/arm64/boot/dts/qcom/sm6150-rumi-overlay.dts +++ b/arch/arm64/boot/dts/qcom/sm6150-rumi-overlay.dts @@ -14,6 +14,7 @@ /plugin/; #include +#include #include "sm6150-rumi.dtsi" diff --git a/arch/arm64/boot/dts/qcom/sm6150-sde-display.dtsi b/arch/arm64/boot/dts/qcom/sm6150-sde-display.dtsi index 834d377bd9f762211e8d29f6d08e6193124105c4..6ef9101edc97b04d3b0b5cc032f81dea9e4a5272 100644 --- a/arch/arm64/boot/dts/qcom/sm6150-sde-display.dtsi +++ b/arch/arm64/boot/dts/qcom/sm6150-sde-display.dtsi @@ -165,7 +165,7 @@ }; &mdss_mdp { - connectors = <&sde_wb &sde_dsi>; + connectors = <&sde_rscc &sde_wb &sde_dsi>; }; &dsi_sim_vid { @@ -269,6 +269,13 @@ &dsi_td4328_truly_video { qcom,mdss-dsi-t-clk-post = <0x0e>; qcom,mdss-dsi-t-clk-pre = <0x32>; + qcom,esd-check-enabled; + qcom,mdss-dsi-panel-status-check-mode = "reg_read"; + qcom,mdss-dsi-panel-status-command = [06 01 00 01 00 00 01 0a]; + qcom,mdss-dsi-panel-status-command-state = "dsi_hs_mode"; + qcom,mdss-dsi-panel-status-value = <0x1c>; + qcom,mdss-dsi-panel-on-check-value = <0x1c>; + qcom,mdss-dsi-panel-status-read-length = <1>; qcom,mdss-dsi-display-timings { timing@0{ qcom,mdss-dsi-panel-phy-timings = @@ -287,6 +294,13 @@ &dsi_td4328_truly_cmd { qcom,mdss-dsi-t-clk-post = <0x0e>; qcom,mdss-dsi-t-clk-pre = <0x32>; + qcom,esd-check-enabled; + qcom,mdss-dsi-panel-status-check-mode = "reg_read"; + qcom,mdss-dsi-panel-status-command = [06 01 00 01 00 00 01 0a]; + qcom,mdss-dsi-panel-status-command-state = "dsi_hs_mode"; + qcom,mdss-dsi-panel-status-value = <0x1c>; + qcom,mdss-dsi-panel-on-check-value = <0x1c>; + qcom,mdss-dsi-panel-status-read-length = <1>; qcom,mdss-dsi-display-timings { timing@0{ qcom,mdss-dsi-panel-phy-timings = @@ -298,6 +312,8 @@ qcom,display-topology = <1 0 1>; qcom,default-topology-index = <0>; + qcom,partial-update-enabled = "single_roi"; + qcom,panel-roi-alignment = <16 16 1 1 16 16>; }; }; }; diff --git a/arch/arm64/boot/dts/qcom/sm6150-sde-pll.dtsi b/arch/arm64/boot/dts/qcom/sm6150-sde-pll.dtsi index ae37a8c22eb7f436d59bdcaedab13289395a0d83..afc898c577396390567f4c8eeca2c08fbca6a33b 100644 --- a/arch/arm64/boot/dts/qcom/sm6150-sde-pll.dtsi +++ b/arch/arm64/boot/dts/qcom/sm6150-sde-pll.dtsi @@ -23,6 +23,8 @@ clock-names = "iface_clk"; clock-rate = <0>; gdsc-supply = <&mdss_core_gdsc>; + qcom,dsi-pll-ssc-en; + qcom,dsi-pll-ssc-mode = "down-spread"; qcom,platform-supply-entries { #address-cells = <1>; #size-cells = <0>; diff --git a/arch/arm64/boot/dts/qcom/sm6150-sde.dtsi b/arch/arm64/boot/dts/qcom/sm6150-sde.dtsi index 7804c58b579b2eb0a8cd6d4bf2518401dfdf5440..525cd651798865f1aac28b9487ae0ff145615d77 100644 --- a/arch/arm64/boot/dts/qcom/sm6150-sde.dtsi +++ b/arch/arm64/boot/dts/qcom/sm6150-sde.dtsi @@ -31,7 +31,7 @@ clock-names = "gcc_iface", "gcc_bus", "iface_clk", "core_clk", "vsync_clk", "lut_clk", "rot_clk"; - clock-rate = <0 0 0 256000000 19200000 256000000>; + clock-rate = <0 0 0 256000000 19200000 192000000>; clock-max-rate = <0 0 0 307000000 19200000 307000000>; sde-vdd-supply = <&mdss_core_gdsc>; @@ -63,6 +63,9 @@ qcom,sde-mixer-display-pref = "primary", "none", "none", "none", "none", "none"; + qcom,sde-mixer-cwb-pref = "none", "none", "cwb", + "none", "none", "none"; + qcom,sde-dspp-top-off = <0x1300>; qcom,sde-dspp-top-size = <0x80>; qcom,sde-dspp-off = <0x55000>; @@ -127,7 +130,7 @@ qcom,sde-mixer-linewidth = <2560>; qcom,sde-sspp-linewidth = <2160>; qcom,sde-wb-linewidth = <2160>; - qcom,sde-mixer-blendstages = <0xb>; + qcom,sde-mixer-blendstages = <0x9>; qcom,sde-highest-bank-bit = <0x1>; qcom,sde-ubwc-version = <0x200>; qcom,sde-panic-per-pipe; @@ -253,8 +256,7 @@ reg = <0xaf20000 0x1c44>, <0xaf30000 0x3fd4>; reg-names = "drv", "wrapper"; - qcom,sde-rsc-version = <1>; - status = "disabled"; + qcom,sde-rsc-version = <2>; vdd-supply = <&mdss_core_gdsc>; clocks = <&clock_dispcc DISP_CC_MDSS_RSCC_VSYNC_CLK>, diff --git a/arch/arm64/boot/dts/qcom/sm6150-usb.dtsi b/arch/arm64/boot/dts/qcom/sm6150-usb.dtsi index 73150062fa224e899c2473efae286d3d08b1e7b2..035adb3fef537d0dc7d05f36615ecabf45c7ba82 100644 --- a/arch/arm64/boot/dts/qcom/sm6150-usb.dtsi +++ b/arch/arm64/boot/dts/qcom/sm6150-usb.dtsi @@ -47,6 +47,13 @@ qcom,core-clk-rate = <200000000>; qcom,core-clk-rate-hs = <66666667>; qcom,num-gsi-evt-buffs = <0x3>; + qcom,gsi-reg-offset = + <0x0fc /* GSI_GENERAL_CFG */ + 0x110 /* GSI_DBL_ADDR_L */ + 0x120 /* GSI_DBL_ADDR_H */ + 0x130 /* GSI_RING_BASE_ADDR_L */ + 0x144 /* GSI_RING_BASE_ADDR_H */ + 0x1a4>; /* GSI_IF_STS */ qcom,dwc-usb3-msm-tx-fifo-size = <21288>; qcom,msm-bus,name = "usb0"; @@ -163,7 +170,7 @@ /* Primary USB port related QMP USB PHY */ usb_qmp_phy: ssphy@88e6000 { - compatible = "qcom,usb-ssphy-qmp-v2"; + compatible = "qcom,usb-ssphy-qmp-usb3-or-dp"; reg = <0x88e6000 0x1000>; reg-names = "qmp_phy_base"; @@ -297,9 +304,9 @@ clock-names = "aux_clk", "pipe_clk", "ref_clk_src", "ref_clk", "com_aux_clk", "cfg_ahb_clk"; - resets = <&clock_gcc GCC_USB3_DP_PHY_PRIM_SP0_BCR>, + resets = <&clock_gcc GCC_USB3_PHY_PRIM_SP0_BCR>, <&clock_gcc GCC_USB3PHY_PHY_PRIM_SP0_BCR>; - reset-names = "phy_phy_reset", "phy_reset"; + reset-names = "phy_reset", "phy_phy_reset"; }; usb_audio_qmi_dev { diff --git a/arch/arm64/boot/dts/qcom/sm6150-usbc-idp-overlay.dts b/arch/arm64/boot/dts/qcom/sm6150-usbc-idp-overlay.dts index e8325109410d3b443b990136a8f21f058c6a339f..634d127d31eeb406e33619ea658cf5c58057f67f 100644 --- a/arch/arm64/boot/dts/qcom/sm6150-usbc-idp-overlay.dts +++ b/arch/arm64/boot/dts/qcom/sm6150-usbc-idp-overlay.dts @@ -14,6 +14,7 @@ /plugin/; #include +#include #include "sm6150-idp.dtsi" diff --git a/arch/arm64/boot/dts/qcom/sm6150-wsa881x.dtsi b/arch/arm64/boot/dts/qcom/sm6150-wsa881x.dtsi deleted file mode 100644 index 0772619d8dea60ad5ea96ba8043da33aff081330..0000000000000000000000000000000000000000 --- a/arch/arm64/boot/dts/qcom/sm6150-wsa881x.dtsi +++ /dev/null @@ -1,62 +0,0 @@ -/* - * Copyright (c) 2018, The Linux Foundation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 and - * only version 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ - -/{ - aliases { - swr0 = &swr_0; - }; -}; - -#include - -&wsa_macro { - swr_0: wsa_swr_master { - compatible = "qcom,swr-mstr"; - #address-cells = <2>; - #size-cells = <0>; - qcom,swr_master_id = <1>; - swrm-io-base = <0x62f10000 0x0>; - interrupts = <0 136 0>; - interrupt-names = "swr_master_irq"; - qcom,swr-num-ports = <8>; - qcom,swr-port-mapping = <1 SPKR_L 0x1>, - <2 SPKR_L_COMP 0xF>, <3 SPKR_L_BOOST 0x3>, - <4 SPKR_R 0x1>, <5 SPKR_R_COMP 0xF>, - <6 SPKR_R_BOOST 0x3>, <7 SPKR_L_VI 0x3>, - <8 SPKR_R_VI 0x3>; - qcom,swr-num-dev = <2>; - wsa881x_0211: wsa881x@20170211 { - compatible = "qcom,wsa881x"; - reg = <0x0 0x20170211>; - qcom,spkr-sd-n-node = <&wsa_spkr_en1>; - }; - - wsa881x_0212: wsa881x@20170212 { - compatible = "qcom,wsa881x"; - reg = <0x0 0x20170212>; - qcom,spkr-sd-n-node = <&wsa_spkr_en2>; - }; - - wsa881x_0213: wsa881x@21170213 { - compatible = "qcom,wsa881x"; - reg = <0x0 0x21170213>; - qcom,spkr-sd-n-node = <&wsa_spkr_en1>; - }; - - wsa881x_0214: wsa881x@21170214 { - compatible = "qcom,wsa881x"; - reg = <0x0 0x21170214>; - qcom,spkr-sd-n-node = <&wsa_spkr_en2>; - }; - }; -}; diff --git a/arch/arm64/boot/dts/qcom/sm6150.dtsi b/arch/arm64/boot/dts/qcom/sm6150.dtsi index efb07c163de1c5e3e2fb88d221920b992fbe0cbb..bd949aefa80c377fc9f87c450ba2e546a35906b0 100644 --- a/arch/arm64/boot/dts/qcom/sm6150.dtsi +++ b/arch/arm64/boot/dts/qcom/sm6150.dtsi @@ -44,6 +44,9 @@ i2c1 = &qupv3_se1_i2c; i2c2 = &qupv3_se3_i2c; hsuart0 = &qupv3_se7_4uart; + swr0 = &swr0; + swr1 = &swr1; + swr2 = &swr2; }; cpus { @@ -801,6 +804,19 @@ vdd_mx-supply = <&VDD_MX_LEVEL>; #clock-cells = <1>; #reset-cells = <1>; + qcom,cam_cc_csi0phytimer_clk_src-opp-handle = <&cam_csiphy0>; + qcom,cam_cc_csi1phytimer_clk_src-opp-handle = <&cam_csiphy1>; + qcom,cam_cc_csi2phytimer_clk_src-opp-handle = <&cam_csiphy2>; + qcom,cam_cc_cci_clk_src-opp-handle = <&cam_cci>; + qcom,cam_cc_ife_0_csid_clk_src-opp-handle = <&cam_csid0>; + qcom,cam_cc_ife_0_clk_src-opp-handle = <&cam_vfe0>; + qcom,cam_cc_ife_1_csid_clk_src-opp-handle = <&cam_csid1>; + qcom,cam_cc_ife_1_clk_src-opp-handle = <&cam_vfe1>; + qcom,cam_cc_ife_lite_csid_clk_src-opp-handle = <&cam_csid_lite>; + qcom,cam_cc_ife_lite_clk_src-opp-handle = <&cam_vfe_lite>; + qcom,cam_cc_icp_clk_src-opp-handle = <&cam_a5>; + qcom,cam_cc_ipe_0_clk_src-opp-handle = <&cam_ipe0>; + qcom,cam_cc_bps_clk_src-opp-handle = <&cam_bps>; }; clock_dispcc: qcom,dispcc@af00000 { @@ -820,6 +836,7 @@ vdd_mx-supply = <&VDD_MX_LEVEL>; #clock-cells = <1>; #reset-cells = <1>; + qcom,gpu_cc_gx_gfx3d_clk_src-opp-handle = <&msm_gpu>; }; cpucc_debug: syscon@182a0018 { @@ -1184,52 +1201,52 @@ compatible = "qcom,mem-dump"; memory-region = <&dump_mem>; - rpmh_dump { + rpmh { qcom,dump-size = <0x2000000>; qcom,dump-id = <0xec>; }; - rpm_sw_dump { + rpm_sw { qcom,dump-size = <0x28000>; qcom,dump-id = <0xea>; }; - pmic_dump { + pmic { qcom,dump-size = <0x10000>; qcom,dump-id = <0xe4>; }; - fcm_dump { + fcm { qcom,dump-size = <0x8400>; qcom,dump-id = <0xee>; }; - tmc_etf_dump { + tmc_etf { qcom,dump-size = <0x8000>; qcom,dump-id = <0xf0>; }; - tmc_etf_swao_dump { + etf_swao { qcom,dump-size = <0x8000>; qcom,dump-id = <0xf1>; }; - tmc_etr_reg_dump { + etr_reg { qcom,dump-size = <0x1000>; qcom,dump-id = <0x100>; }; - tmc_etf_reg_dump { + etf_reg { qcom,dump-size = <0x1000>; qcom,dump-id = <0x101>; }; - tmc_etf_swao_reg_dump { + etfswao_reg { qcom,dump-size = <0x1000>; qcom,dump-id = <0x102>; }; - misc_data_dump { + misc_data { qcom,dump-size = <0x1000>; qcom,dump-id = <0xe8>; }; @@ -1296,6 +1313,8 @@ qcom,llcc-perfmon { compatible = "qcom,llcc-perfmon"; + clocks = <&clock_aop QDSS_CLK>; + clock-names = "qdss_clk"; }; qcom,llcc-erp { @@ -1313,6 +1332,28 @@ }; }; + sdcc1_ice: sdcc1ice@7C8000{ + compatible = "qcom,ice"; + reg = <0x7C8000 0x8000>; + qcom,enable-ice-clk; + clock-names = "ice_core_clk_src", "ice_core_clk", + "bus_clk", "iface_clk"; + clocks = <&clock_gcc GCC_SDCC1_ICE_CORE_CLK_SRC>, + <&clock_gcc GCC_SDCC1_ICE_CORE_CLK>, + <&clock_gcc GCC_SDCC1_AHB_CLK>, + <&clock_gcc GCC_SDCC1_APPS_CLK>; + qcom,op-freq-hz = <300000000>, <0>, <0>, <0>; + qcom,msm-bus,name = "sdcc_ice_noc"; + qcom,msm-bus,num-cases = <2>; + qcom,msm-bus,num-paths = <1>; + qcom,msm-bus,vectors-KBps = + <1 757 0 0>, /* No vote */ + <1 757 1000 0>; /* Max. bandwidth */ + qcom,bus-vector-names = "MIN", + "MAX"; + qcom,instance-type = "sdcc"; + }; + sdhc_1: sdhci@7c4000 { compatible = "qcom,sdhci-msm-v5"; reg = <0x7c4000 0x1000>, <0x7c5000 0x1000>; @@ -1320,6 +1361,7 @@ interrupts = <0 641 0>, <0 644 0>; interrupt-names = "hc_irq", "pwr_irq"; + sdhc-msm-crypto = <&sdcc1_ice>; qcom,bus-width = <8>; qcom,large-address-bus; @@ -1330,6 +1372,46 @@ qcom,devfreq,freq-table = <50000000 200000000>; + qcom,msm-bus,name = "sdhc1"; + qcom,msm-bus,num-cases = <9>; + qcom,msm-bus,num-paths = <2>; + qcom,msm-bus,vectors-KBps = + /* No vote */ + <78 512 0 0>, <1 606 0 0>, + /* 400 KB/s*/ + <78 512 1046 3200>, + <1 606 1600 3200>, + /* 20 MB/s */ + <78 512 52286 160000>, + <1 606 80000 160000>, + /* 25 MB/s */ + <78 512 65360 200000>, + <1 606 100000 200000>, + /* 50 MB/s */ + <78 512 130718 400000>, + <1 606 133320 133320>, + /* 100 MB/s */ + <78 512 261438 400000>, + <1 606 150000 300000>, + /* 200 MB/s */ + <78 512 261438 400000>, + <1 606 300000 300000>, + /* 400 MB/s */ + <78 512 261438 1100000>, + <1 606 300000 300000>, + /* Max. bandwidth */ + <78 512 1338562 4096000>, + <1 606 1338562 4096000>; + qcom,bus-bw-vectors-bps = <0 400000 20000000 25000000 50000000 + 100750000 200000000 400000000 4294967295>; + + /* PM QoS */ + qcom,pm-qos-irq-type = "affine_irq"; + qcom,pm-qos-irq-latency = <67 67>; + qcom,pm-qos-cpu-groups = <0x3f 0xc0>; + qcom,pm-qos-cmdq-latency-us = <67 67>, <67 67>; + qcom,pm-qos-legacy-latency-us = <67 67>, <67 67>; + clocks = <&clock_gcc GCC_SDCC1_AHB_CLK>, <&clock_gcc GCC_SDCC1_APPS_CLK>; clock-names = "iface_clk", "core_clk"; @@ -1356,6 +1438,42 @@ qcom,devfreq,freq-table = <50000000 202000000>; + qcom,msm-bus,name = "sdhc2"; + qcom,msm-bus,num-cases = <8>; + qcom,msm-bus,num-paths = <2>; + qcom,msm-bus,vectors-KBps = + /* No vote */ + <81 512 0 0>, <1 608 0 0>, + /* 400 KB/s*/ + <81 512 1046 1600>, + <1 608 1600 1600>, + /* 20 MB/s */ + <81 512 52286 80000>, + <1 608 80000 80000>, + /* 25 MB/s */ + <81 512 65360 100000>, + <1 608 100000 100000>, + /* 50 MB/s */ + <81 512 130718 200000>, + <1 608 133320 133320>, + /* 100 MB/s */ + <81 512 261438 200000>, + <1 608 150000 150000>, + /* 200 MB/s */ + <81 512 261438 400000>, + <1 608 300000 300000>, + /* Max. bandwidth */ + <81 512 1338562 4096000>, + <1 608 1338562 4096000>; + qcom,bus-bw-vectors-bps = <0 400000 20000000 25000000 50000000 + 100750000 200000000 4294967295>; + + /* PM QoS */ + qcom,pm-qos-irq-type = "affine_irq"; + qcom,pm-qos-irq-latency = <67 67>; + qcom,pm-qos-cpu-groups = <0x3f 0xc0>; + qcom,pm-qos-legacy-latency-us = <67 67>, <67 67>; + clocks = <&clock_gcc GCC_SDCC2_AHB_CLK>, <&clock_gcc GCC_SDCC2_APPS_CLK>; clock-names = "iface_clk", "core_clk"; @@ -1897,6 +2015,9 @@ tx-descriptors = <0x12000 0x12004>; rx-descriptors = <0x1200c 0x12010>; + label = "wdsp"; + qcom,glink-label = "wdsp"; + qcom,wdsp_ctrl { qcom,glink-channels = "g_glink_ctrl"; qcom,intents = <0x400 1>; @@ -2256,6 +2377,7 @@ qcom,use-64-bit-dma-mask; qcom,arm-smmu; qcom,smmu-fast-map; + qcom,use-ipa-pm; qcom,bandwidth-vote-for-ipa; qcom,msm-bus,name = "ipa"; qcom,msm-bus,num-cases = <5>; @@ -2267,27 +2389,29 @@ , , /* SVS2 */ - , - , - , - , + , + , + , + , /* SVS */ - , - , - , - , + , + , + , + , /* NOMINAL */ - , - , - , - , + , + , + , + , /* TURBO */ - , - , - , - ; + , + , + , + ; qcom,bus-vector-names = "MIN", "SVS2", "SVS", "NOMINAL", "TURBO"; + qcom,throughput-threshold = <310 600 1000>; + qcom,scaling-exceptions = <>; /* smp2p information */ qcom,smp2p_map_ipa_1_out { @@ -2630,6 +2754,19 @@ < 1708800 MHZ_TO_MBPS(1017, 4) >, < 2208000 MHZ_TO_MBPS(1804, 4) >; }; + + bus_proxy_client: qcom,bus_proxy_client { + compatible = "qcom,bus-proxy-client"; + qcom,msm-bus,name = "bus-proxy-client"; + qcom,msm-bus,num-cases = <2>; + qcom,msm-bus,num-paths = <1>; + qcom,msm-bus,vectors-KBps = + , + ; + qcom,msm-bus,active-only; + status = "ok"; + }; }; #include "pm6150.dtsi" @@ -2649,7 +2786,7 @@ }; &usb0 { - extcon = <&pm6150_pdphy>; + extcon = <&pm6150_pdphy>, <&pm6150_charger>, <&eud>; }; &pm6150_vadc { @@ -2862,6 +2999,7 @@ }; &vcodec0_gdsc { + qcom,support-hw-trigger; status = "ok"; }; diff --git a/arch/arm64/boot/dts/qcom/sm6150p-idp-overlay.dts b/arch/arm64/boot/dts/qcom/sm6150p-idp-overlay.dts index aebdcf06bbb5d9afd135b5d43612ed80608e1a74..a5423f1ef523a83604105c8b9fce4010be6dcb93 100644 --- a/arch/arm64/boot/dts/qcom/sm6150p-idp-overlay.dts +++ b/arch/arm64/boot/dts/qcom/sm6150p-idp-overlay.dts @@ -14,6 +14,7 @@ /plugin/; #include +#include #include "sm6150-idp.dtsi" diff --git a/arch/arm64/boot/dts/qcom/sm6150p-qrd-overlay.dts b/arch/arm64/boot/dts/qcom/sm6150p-qrd-overlay.dts index e6286ee3ca58c888d2e9b12e1cdb0f3622eb9c72..b08051cdc0c1bfe95fa4ade86a029a4cee0d6c7a 100644 --- a/arch/arm64/boot/dts/qcom/sm6150p-qrd-overlay.dts +++ b/arch/arm64/boot/dts/qcom/sm6150p-qrd-overlay.dts @@ -14,6 +14,7 @@ /plugin/; #include +#include #include "sm6150-qrd.dtsi" diff --git a/arch/arm64/boot/dts/qcom/sm8150-bus.dtsi b/arch/arm64/boot/dts/qcom/sm8150-bus.dtsi index a4c32e4df8c1704fb1ef0ffec80fb0fd6c60f6de..f419222d6fd55ebb83d373f56ce11ec04eb87e0a 100644 --- a/arch/arm64/boot/dts/qcom/sm8150-bus.dtsi +++ b/arch/arm64/boot/dts/qcom/sm8150-bus.dtsi @@ -666,6 +666,15 @@ qcom,prio = <2>; }; + mas_qhm_sensorss_ahb: mas-qhm-sensorss-ahb { + cell-id = ; + label = "mas-qhm-sensorss-ahb"; + qcom,buswidth = <4>; + qcom,agg-ports = <1>; + qcom,connections = <&slv_qns_a2noc_snoc>; + qcom,bus-dev = <&fab_aggre2_noc>; + }; + mas_qhm_tsif: mas-qhm-tsif { cell-id = ; label = "mas-qhm-tsif"; diff --git a/arch/arm64/boot/dts/qcom/sm8150-camera-sensor-mtp.dtsi b/arch/arm64/boot/dts/qcom/sm8150-camera-sensor-mtp.dtsi index bdce6a16fb3af14fb9167b4474ab3e0d51b26da6..42bf4d5e2c1b6db4df630bbcfef055328e003ca4 100644 --- a/arch/arm64/boot/dts/qcom/sm8150-camera-sensor-mtp.dtsi +++ b/arch/arm64/boot/dts/qcom/sm8150-camera-sensor-mtp.dtsi @@ -389,15 +389,18 @@ pinctrl-0 = <&cam_sensor_mclk3_active &cam_sensor_active_iris>; pinctrl-1 = <&cam_sensor_mclk3_suspend - &cam_sensor_active_iris>; + &cam_sensor_suspend_iris>; gpios = <&tlmm 16 0>, - <&tlmm 23 0>; + <&tlmm 23 0>, + <&tlmm 26 0>; gpio-reset = <1>; - gpio-req-tbl-num = <0 1>; - gpio-req-tbl-flags = <1 0>; + gpio-req-tbl-num = <0 1 2>; + gpio-req-tbl-flags = <1 0 1>; gpio-req-tbl-label = "CAMIF_MCLK3", - "CAM_RESET3"; + "CAM_RESET3", + "IMG_START"; sensor-mode = <0>; + cci-device = <1>; cci-master = <1>; status = "ok"; clocks = <&clock_camcc CAM_CC_MCLK3_CLK>; diff --git a/arch/arm64/boot/dts/qcom/sm8150-camera.dtsi b/arch/arm64/boot/dts/qcom/sm8150-camera.dtsi index 8acc37dddd575dfacd9cde189dfd0a43c681be6a..c8dafeeb7bd4548e888093c8de262f9a13b1ae11 100644 --- a/arch/arm64/boot/dts/qcom/sm8150-camera.dtsi +++ b/arch/arm64/boot/dts/qcom/sm8150-camera.dtsi @@ -405,10 +405,10 @@ }; iova-mem-region-io { - /* IO region is approximately 3.3 GB */ + /* IO region is approximately 3 GB */ iova-region-name = "io"; iova-region-start = <0xda00000>; - iova-region-len = <0xd2500000>; + iova-region-len = <0xace00000>; iova-region-id = <0x3>; status = "ok"; }; @@ -1031,11 +1031,12 @@ <&clock_camcc CAM_CC_IPE_0_CLK>; clock-rates = + <0 0 0 300000000 0>, <0 0 0 475000000 0>, <0 0 0 520000000 0>, <0 0 0 600000000 0>, <0 0 0 600000000 0>; - clock-cntl-level = "svs", "svs_l1", + clock-cntl-level = "lowsvs", "svs", "svs_l1", "nominal", "turbo"; status = "ok"; }; @@ -1064,11 +1065,12 @@ <&clock_camcc CAM_CC_IPE_1_CLK>; clock-rates = + <0 0 0 300000000 0>, <0 0 0 475000000 0>, <0 0 0 520000000 0>, <0 0 0 600000000 0>, <0 0 0 600000000 0>; - clock-cntl-level = "svs", "svs_l1", + clock-cntl-level = "lowsvs", "svs", "svs_l1", "nominal", "turbo"; status = "ok"; }; @@ -1097,11 +1099,12 @@ <&clock_camcc CAM_CC_BPS_CLK>; clock-rates = + <0 0 0 200000000 0>, <0 0 0 400000000 0>, <0 0 0 480000000 0>, <0 0 0 600000000 0>, <0 0 0 600000000 0>; - clock-cntl-level = "svs", "svs_l1", + clock-cntl-level = "lowsvs", "svs", "svs_l1", "nominal", "turbo"; status = "ok"; }; diff --git a/arch/arm64/boot/dts/qcom/sm8150-cdp.dtsi b/arch/arm64/boot/dts/qcom/sm8150-cdp.dtsi index 161a36783e1ce023dd07b59bac39b8c9b9f76ab5..9a1a9fc87777cee5c149d8553f9ed25c5baeaea9 100644 --- a/arch/arm64/boot/dts/qcom/sm8150-cdp.dtsi +++ b/arch/arm64/boot/dts/qcom/sm8150-cdp.dtsi @@ -309,6 +309,7 @@ &pm8150l_wled { qcom,string-cfg= <7>; + qcom,leds-per-string = <6>; status = "ok"; }; diff --git a/arch/arm64/boot/dts/qcom/sm8150-coresight.dtsi b/arch/arm64/boot/dts/qcom/sm8150-coresight.dtsi index 3a6441e486d3314935639b6f1b3aedec0f9a73c5..812a8e10ed346dfb75a075e3c8e73f24979bbc1c 100644 --- a/arch/arm64/boot/dts/qcom/sm8150-coresight.dtsi +++ b/arch/arm64/boot/dts/qcom/sm8150-coresight.dtsi @@ -199,10 +199,10 @@ }; port@1 { reg = <5>; - funnel_swao_in_funnel_ssc: endpoint { + funnel_swao_in_ssc_etm0: endpoint { slave-mode; remote-endpoint= - <&funnel_ssc_out_funnel_swao>; + <&ssc_etm0_out_funnel_swao>; }; }; port@2 { @@ -1890,7 +1890,7 @@ reg = <0x6a02000 0x1000>; reg-names = "cti-base"; - coresight-name = "coresight-cti-ddr_dl_0_cti0"; + coresight-name = "coresight-cti-ddr_dl_0_cti_0"; clocks = <&clock_aop QDSS_CLK>; clock-names = "apb_pclk"; @@ -1902,7 +1902,7 @@ reg = <0x6a03000 0x1000>; reg-names = "cti-base"; - coresight-name = "coresight-cti-ddr_dl_0_cti1"; + coresight-name = "coresight-cti-ddr_dl_0_cti_1"; clocks = <&clock_aop QDSS_CLK>; clock-names = "apb_pclk"; @@ -1914,7 +1914,7 @@ reg = <0x6a10000 0x1000>; reg-names = "cti-base"; - coresight-name = "coresight-cti-ddr_dl_1_cti0"; + coresight-name = "coresight-cti-ddr_dl_1_cti_0"; clocks = <&clock_aop QDSS_CLK>; clock-names = "apb_pclk"; @@ -1926,7 +1926,7 @@ reg = <0x6a11000 0x1000>; reg-names = "cti-base"; - coresight-name = "coresight-cti-ddr_dl_1_cti1"; + coresight-name = "coresight-cti-ddr_dl_1_cti_1"; clocks = <&clock_aop QDSS_CLK>; clock-names = "apb_pclk"; @@ -2388,9 +2388,9 @@ qcom,inst-id = <8>; port { - ssc_etm0_out_funnel_ssc: endpoint { + ssc_etm0_out_funnel_swao: endpoint { remote-endpoint = - <&funnel_ssc_in_ssc_etm0>; + <&funnel_swao_in_ssc_etm0>; }; }; }; @@ -2715,39 +2715,4 @@ }; }; }; - - funnel_ssc: funnel@6b14000 { - compatible = "arm,primecell"; - arm,primecell-periphid = <0x0003b908>; - - reg = <0x6b14000 0x1000>; - reg-names = "funnel-base"; - - coresight-name = "coresight-funnel-ssc"; - - clocks = <&clock_aop QDSS_CLK>; - clock-names = "apb_pclk"; - - ports { - #address-cells = <1>; - #size-cells = <0>; - - port@0 { - reg = <0>; - funnel_ssc_out_funnel_swao: endpoint { - remote-endpoint = - <&funnel_swao_in_funnel_ssc>; - }; - }; - - port@1 { - reg = <0>; - funnel_ssc_in_ssc_etm0: endpoint { - slave-mode; - remote-endpoint = - <&ssc_etm0_out_funnel_ssc>; - }; - }; - }; - }; }; diff --git a/arch/arm64/boot/dts/qcom/sm8150-gdsc.dtsi b/arch/arm64/boot/dts/qcom/sm8150-gdsc.dtsi index 6baac00ed014ce78cc9a17c36846a17cb2bf5660..af4a8712c26acfa0e5c3b4ed4cac5ff395353b39 100644 --- a/arch/arm64/boot/dts/qcom/sm8150-gdsc.dtsi +++ b/arch/arm64/boot/dts/qcom/sm8150-gdsc.dtsi @@ -211,6 +211,7 @@ qcom,skip-disable; qcom,gds-timeout = <500>; qcom,clk-dis-wait-val = <8>; + mboxes = <&qmp_aop 0>; status = "disabled"; }; diff --git a/arch/arm64/boot/dts/qcom/sm8150-gpu-v2.dtsi b/arch/arm64/boot/dts/qcom/sm8150-gpu-v2.dtsi new file mode 100644 index 0000000000000000000000000000000000000000..871ddc3f53ac90887f6a783e6c9540f95ab1c90e --- /dev/null +++ b/arch/arm64/boot/dts/qcom/sm8150-gpu-v2.dtsi @@ -0,0 +1,37 @@ +/* Copyright (c) 2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +&soc { + gpu_opp_table_v2: gpu_opp_table_v2 { + compatible = "operating-points-v2"; + + opp-585000000 { + opp-hz = /bits/ 64 <585000000>; + opp-microvolt = ; + }; + + opp-427000000 { + opp-hz = /bits/ 64 <427000000>; + opp-microvolt = ; + }; + + opp-345000000 { + opp-hz = /bits/ 64 <345000000>; + opp-microvolt = ; + }; + + opp-257000000 { + opp-hz = /bits/ 64 <257000000>; + opp-microvolt = ; + }; + }; +}; diff --git a/arch/arm64/boot/dts/qcom/sm8150-gpu.dtsi b/arch/arm64/boot/dts/qcom/sm8150-gpu.dtsi index 284c821e77fdc3328430575983af8b581ea3509f..0b10cbcb1089bc20ca227f2dea7bd84aeb622bf6 100644 --- a/arch/arm64/boot/dts/qcom/sm8150-gpu.dtsi +++ b/arch/arm64/boot/dts/qcom/sm8150-gpu.dtsi @@ -78,6 +78,7 @@ qcom,initial-pwrlevel = <5>; qcom,gpu-quirk-secvid-set-once; + qcom,gpu-quirk-cx-gdsc; qcom,idle-timeout = <80>; //msecs qcom,no-nap; diff --git a/arch/arm64/boot/dts/qcom/sm8150-mhi.dtsi b/arch/arm64/boot/dts/qcom/sm8150-mhi.dtsi index 3479d30dbb703146f40d8678814171cc81e5a368..86221703c85526dcb92030d0e2b3131804032ff9 100644 --- a/arch/arm64/boot/dts/qcom/sm8150-mhi.dtsi +++ b/arch/arm64/boot/dts/qcom/sm8150-mhi.dtsi @@ -11,840 +11,836 @@ * GNU General Public License for more details. */ -&pcie1 { - pci,bus@1 { - reg = <0 0 0 0 0>; - - mhi_0: qcom,mhi@0 { - reg = <0 0 0 0 0 >; - - /* controller specific configuration */ - qcom,smmu-cfg = <0x3>; - qcom,msm-bus,name = "mhi"; - qcom,msm-bus,num-cases = <2>; - qcom,msm-bus,num-paths = <1>; - qcom,msm-bus,vectors-KBps = - <100 512 0 0>, - <100 512 1200000000 650000000>; - - /* mhi bus specific settings */ - mhi,max-channels = <106>; - mhi,timeout = <2000>; - - #address-cells = <1>; - #size-cells = <0>; - - mhi_chan@0 { - reg = <0>; - label = "LOOPBACK"; - mhi,num-elements = <64>; - mhi,event-ring = <2>; - mhi,chan-dir = <1>; - mhi,data-type = <0>; - mhi,doorbell-mode = <2>; - mhi,ee = <0x4>; - }; - - mhi_chan@1 { - reg = <1>; - label = "LOOPBACK"; - mhi,num-elements = <64>; - mhi,event-ring = <2>; - mhi,chan-dir = <2>; - mhi,data-type = <0>; - mhi,doorbell-mode = <2>; - mhi,ee = <0x4>; - }; - - mhi_chan@2 { - reg = <2>; - label = "SAHARA"; - mhi,num-elements = <128>; - mhi,event-ring = <1>; - mhi,chan-dir = <1>; - mhi,data-type = <0>; - mhi,doorbell-mode = <2>; - mhi,ee = <0x2>; - }; - - mhi_chan@3 { - reg = <3>; - label = "SAHARA"; - mhi,num-elements = <128>; - mhi,event-ring = <1>; - mhi,chan-dir = <2>; - mhi,data-type = <0>; - mhi,doorbell-mode = <2>; - mhi,ee = <0x2>; - }; - - mhi_chan@4 { - reg = <4>; - label = "DIAG"; - mhi,num-elements = <64>; - mhi,event-ring = <1>; - mhi,chan-dir = <1>; - mhi,data-type = <0>; - mhi,doorbell-mode = <2>; - mhi,ee = <0x4>; - }; - - mhi_chan@5 { - reg = <5>; - label = "DIAG"; - mhi,num-elements = <64>; - mhi,event-ring = <3>; - mhi,chan-dir = <2>; - mhi,data-type = <0>; - mhi,doorbell-mode = <2>; - mhi,ee = <0x4>; - }; - - mhi_chan@8 { - reg = <8>; - label = "QDSS"; - mhi,num-elements = <64>; - mhi,event-ring = <1>; - mhi,chan-dir = <1>; - mhi,data-type = <0>; - mhi,doorbell-mode = <2>; - mhi,ee = <0x4>; - }; - - mhi_chan@9 { - reg = <9>; - label = "QDSS"; - mhi,num-elements = <64>; - mhi,event-ring = <1>; - mhi,chan-dir = <2>; - mhi,data-type = <0>; - mhi,doorbell-mode = <2>; - mhi,ee = <0x4>; - }; - - mhi_chan@10 { - reg = <10>; - label = "EFS"; - mhi,num-elements = <64>; - mhi,event-ring = <1>; - mhi,chan-dir = <1>; - mhi,data-type = <0>; - mhi,doorbell-mode = <2>; - mhi,ee = <0x4>; - }; - - mhi_chan@11 { - reg = <11>; - label = "EFS"; - mhi,num-elements = <64>; - mhi,event-ring = <1>; - mhi,chan-dir = <2>; - mhi,data-type = <0>; - mhi,doorbell-mode = <2>; - mhi,ee = <0x4>; - }; - - mhi_chan@14 { - reg = <14>; - label = "QMI0"; - mhi,num-elements = <64>; - mhi,event-ring = <1>; - mhi,chan-dir = <1>; - mhi,data-type = <0>; - mhi,doorbell-mode = <2>; - mhi,ee = <0x4>; - }; - - mhi_chan@15 { - reg = <15>; - label = "QMI0"; - mhi,num-elements = <64>; - mhi,event-ring = <2>; - mhi,chan-dir = <2>; - mhi,data-type = <0>; - mhi,doorbell-mode = <2>; - mhi,ee = <0x4>; - }; - - mhi_chan@16 { - reg = <16>; - label = "QMI1"; - mhi,num-elements = <64>; - mhi,event-ring = <3>; - mhi,chan-dir = <1>; - mhi,data-type = <0>; - mhi,doorbell-mode = <2>; - mhi,ee = <0x4>; - }; - - mhi_chan@17 { - reg = <17>; - label = "QMI1"; - mhi,num-elements = <64>; - mhi,event-ring = <3>; - mhi,chan-dir = <2>; - mhi,data-type = <0>; - mhi,doorbell-mode = <2>; - mhi,ee = <0x4>; - }; - - mhi_chan@18 { - reg = <18>; - label = "IP_CTRL"; - mhi,num-elements = <64>; - mhi,event-ring = <1>; - mhi,chan-dir = <1>; - mhi,data-type = <0>; - mhi,doorbell-mode = <2>; - mhi,ee = <0x4>; - }; - - mhi_chan@19 { - reg = <19>; - label = "IP_CTRL"; - mhi,num-elements = <64>; - mhi,event-ring = <1>; - mhi,chan-dir = <2>; - mhi,data-type = <0>; - mhi,doorbell-mode = <2>; - mhi,ee = <0x4>; - mhi,auto-queue; - }; - - mhi_chan@20 { - reg = <20>; - label = "IPCR"; - mhi,num-elements = <64>; - mhi,event-ring = <2>; - mhi,chan-dir = <1>; - mhi,data-type = <1>; - mhi,doorbell-mode = <2>; - mhi,ee = <0x4>; - mhi,auto-start; - }; - - mhi_chan@21 { - reg = <21>; - label = "IPCR"; - mhi,num-elements = <64>; - mhi,event-ring = <2>; - mhi,chan-dir = <2>; - mhi,data-type = <0>; - mhi,doorbell-mode = <2>; - mhi,ee = <0x4>; - mhi,auto-queue; - mhi,auto-start; - }; - - mhi_chan@22 { - reg = <22>; - label = "TF"; - mhi,num-elements = <64>; - mhi,event-ring = <2>; - mhi,chan-dir = <1>; - mhi,data-type = <0>; - mhi,doorbell-mode = <2>; - mhi,ee = <0x4>; - }; - - mhi_chan@23 { - reg = <23>; - label = "TF"; - mhi,num-elements = <64>; - mhi,event-ring = <2>; - mhi,chan-dir = <2>; - mhi,data-type = <0>; - mhi,doorbell-mode = <2>; - mhi,ee = <0x4>; - }; - - mhi_chan@25 { - reg = <25>; - label = "BL"; - mhi,num-elements = <64>; - mhi,event-ring = <2>; - mhi,chan-dir = <2>; - mhi,data-type = <0>; - mhi,doorbell-mode = <2>; - mhi,ee = <0x2>; - mhi,auto-queue; - mhi,auto-start; - }; - - mhi_chan@26 { - reg = <26>; - label = "DCI"; - mhi,num-elements = <64>; - mhi,event-ring = <3>; - mhi,chan-dir = <1>; - mhi,data-type = <0>; - mhi,doorbell-mode = <2>; - mhi,ee = <0x4>; - }; - - mhi_chan@27 { - reg = <27>; - label = "DCI"; - mhi,num-elements = <64>; - mhi,event-ring = <3>; - mhi,chan-dir = <2>; - mhi,data-type = <0>; - mhi,doorbell-mode = <2>; - mhi,ee = <0x4>; - }; - - mhi_chan@32 { - reg = <32>; - label = "DUN"; - mhi,num-elements = <64>; - mhi,event-ring = <3>; - mhi,chan-dir = <1>; - mhi,data-type = <0>; - mhi,doorbell-mode = <2>; - mhi,ee = <0x4>; - }; - - mhi_chan@33 { - reg = <33>; - label = "DUN"; - mhi,num-elements = <64>; - mhi,event-ring = <3>; - mhi,chan-dir = <2>; - mhi,data-type = <0>; - mhi,doorbell-mode = <2>; - mhi,ee = <0x4>; - }; - - mhi_chan@100 { - reg = <100>; - label = "IP_HW0"; - mhi,num-elements = <512>; - mhi,event-ring = <4>; - mhi,chan-dir = <1>; - mhi,data-type = <1>; - mhi,doorbell-mode = <3>; - mhi,ee = <0x4>; - mhi,db-mode-switch; - }; - - mhi_chan@101 { - reg = <101>; - label = "IP_HW0"; - mhi,num-elements = <512>; - mhi,event-ring = <5>; - mhi,chan-dir = <2>; - mhi,data-type = <1>; - mhi,doorbell-mode = <3>; - mhi,ee = <0x4>; - }; - - mhi_chan@104 { - reg = <104>; - label = "IP_HW_OFFLOAD_0"; - mhi,event-ring = <4>; - mhi,chan-dir = <1>; - mhi,data-type = <3>; - mhi,ee = <0x4>; - mhi,offload-chan; - }; - - mhi_chan@105 { - reg = <105>; - label = "IP_HW_OFFLOAD_0"; - mhi,event-ring = <5>; - mhi,chan-dir = <2>; - mhi,data-type = <3>; - mhi,ee = <0x4>; - mhi,offload-chan; - mhi,lpm-notify; - }; - - mhi_event@0 { - mhi,num-elements = <32>; - mhi,intmod = <1>; - mhi,msi = <1>; - mhi,priority = <1>; - mhi,brstmode = <2>; - mhi,data-type = <1>; - }; - - mhi_event@1 { - mhi,num-elements = <256>; - mhi,intmod = <1>; - mhi,msi = <2>; - mhi,priority = <1>; - mhi,brstmode = <2>; - }; - - mhi_event@2 { - mhi,num-elements = <256>; - mhi,intmod = <1>; - mhi,msi = <3>; - mhi,priority = <1>; - mhi,brstmode = <2>; - }; - - mhi_event@3 { - mhi,num-elements = <256>; - mhi,intmod = <1>; - mhi,msi = <4>; - mhi,priority = <1>; - mhi,brstmode = <2>; - }; - - mhi_event@4 { - mhi,num-elements = <1024>; - mhi,intmod = <5>; - mhi,msi = <5>; - mhi,chan = <100>; - mhi,priority = <1>; - mhi,brstmode = <3>; - mhi,hw-ev; - }; - - mhi_event@5 { - mhi,num-elements = <1024>; - mhi,intmod = <5>; - mhi,msi = <6>; - mhi,chan = <101>; - mhi,priority = <1>; - mhi,brstmode = <3>; - mhi,hw-ev; - mhi,client-manage; - }; - - mhi_netdev_0: mhi_rmnet@0 { - reg = <0x0>; - mhi,chan = "IP_HW0"; - mhi,interface-name = "rmnet_mhi"; - mhi,mru = <0x4000>; - }; - - mhi_netdev_1: mhi_rmnet@1 { - reg = <0x1>; - mhi,chan = "IP_HW_ADPL"; - mhi,interface-name = "rmnet_mhi"; - mhi,mru = <0x4000>; - }; +&pcie_rc1 { + reg = <0 0 0 0 0>; + + mhi_0: qcom,mhi@0 { + reg = <0 0 0 0 0 >; + + /* controller specific configuration */ + qcom,smmu-cfg = <0x3>; + qcom,msm-bus,name = "mhi"; + qcom,msm-bus,num-cases = <2>; + qcom,msm-bus,num-paths = <1>; + qcom,msm-bus,vectors-KBps = + <100 512 0 0>, + <100 512 1200000000 650000000>; + + /* mhi bus specific settings */ + mhi,max-channels = <106>; + mhi,timeout = <2000>; + + #address-cells = <1>; + #size-cells = <0>; + + mhi_chan@0 { + reg = <0>; + label = "LOOPBACK"; + mhi,num-elements = <64>; + mhi,event-ring = <2>; + mhi,chan-dir = <1>; + mhi,data-type = <0>; + mhi,doorbell-mode = <2>; + mhi,ee = <0x4>; + }; + + mhi_chan@1 { + reg = <1>; + label = "LOOPBACK"; + mhi,num-elements = <64>; + mhi,event-ring = <2>; + mhi,chan-dir = <2>; + mhi,data-type = <0>; + mhi,doorbell-mode = <2>; + mhi,ee = <0x4>; + }; + + mhi_chan@2 { + reg = <2>; + label = "SAHARA"; + mhi,num-elements = <128>; + mhi,event-ring = <1>; + mhi,chan-dir = <1>; + mhi,data-type = <0>; + mhi,doorbell-mode = <2>; + mhi,ee = <0x2>; + }; + + mhi_chan@3 { + reg = <3>; + label = "SAHARA"; + mhi,num-elements = <128>; + mhi,event-ring = <1>; + mhi,chan-dir = <2>; + mhi,data-type = <0>; + mhi,doorbell-mode = <2>; + mhi,ee = <0x2>; + }; + + mhi_chan@4 { + reg = <4>; + label = "DIAG"; + mhi,num-elements = <64>; + mhi,event-ring = <1>; + mhi,chan-dir = <1>; + mhi,data-type = <0>; + mhi,doorbell-mode = <2>; + mhi,ee = <0x4>; + }; + + mhi_chan@5 { + reg = <5>; + label = "DIAG"; + mhi,num-elements = <64>; + mhi,event-ring = <3>; + mhi,chan-dir = <2>; + mhi,data-type = <0>; + mhi,doorbell-mode = <2>; + mhi,ee = <0x4>; + }; + + mhi_chan@8 { + reg = <8>; + label = "QDSS"; + mhi,num-elements = <64>; + mhi,event-ring = <1>; + mhi,chan-dir = <1>; + mhi,data-type = <0>; + mhi,doorbell-mode = <2>; + mhi,ee = <0x4>; + }; + + mhi_chan@9 { + reg = <9>; + label = "QDSS"; + mhi,num-elements = <64>; + mhi,event-ring = <1>; + mhi,chan-dir = <2>; + mhi,data-type = <0>; + mhi,doorbell-mode = <2>; + mhi,ee = <0x4>; + }; + + mhi_chan@10 { + reg = <10>; + label = "EFS"; + mhi,num-elements = <64>; + mhi,event-ring = <1>; + mhi,chan-dir = <1>; + mhi,data-type = <0>; + mhi,doorbell-mode = <2>; + mhi,ee = <0x4>; + }; + + mhi_chan@11 { + reg = <11>; + label = "EFS"; + mhi,num-elements = <64>; + mhi,event-ring = <1>; + mhi,chan-dir = <2>; + mhi,data-type = <0>; + mhi,doorbell-mode = <2>; + mhi,ee = <0x4>; + }; + + mhi_chan@14 { + reg = <14>; + label = "QMI0"; + mhi,num-elements = <64>; + mhi,event-ring = <1>; + mhi,chan-dir = <1>; + mhi,data-type = <0>; + mhi,doorbell-mode = <2>; + mhi,ee = <0x4>; + }; + + mhi_chan@15 { + reg = <15>; + label = "QMI0"; + mhi,num-elements = <64>; + mhi,event-ring = <2>; + mhi,chan-dir = <2>; + mhi,data-type = <0>; + mhi,doorbell-mode = <2>; + mhi,ee = <0x4>; + }; + + mhi_chan@16 { + reg = <16>; + label = "QMI1"; + mhi,num-elements = <64>; + mhi,event-ring = <3>; + mhi,chan-dir = <1>; + mhi,data-type = <0>; + mhi,doorbell-mode = <2>; + mhi,ee = <0x4>; + }; + + mhi_chan@17 { + reg = <17>; + label = "QMI1"; + mhi,num-elements = <64>; + mhi,event-ring = <3>; + mhi,chan-dir = <2>; + mhi,data-type = <0>; + mhi,doorbell-mode = <2>; + mhi,ee = <0x4>; + }; + + mhi_chan@18 { + reg = <18>; + label = "IP_CTRL"; + mhi,num-elements = <64>; + mhi,event-ring = <1>; + mhi,chan-dir = <1>; + mhi,data-type = <0>; + mhi,doorbell-mode = <2>; + mhi,ee = <0x4>; + }; + + mhi_chan@19 { + reg = <19>; + label = "IP_CTRL"; + mhi,num-elements = <64>; + mhi,event-ring = <1>; + mhi,chan-dir = <2>; + mhi,data-type = <0>; + mhi,doorbell-mode = <2>; + mhi,ee = <0x4>; + mhi,auto-queue; + }; + + mhi_chan@20 { + reg = <20>; + label = "IPCR"; + mhi,num-elements = <64>; + mhi,event-ring = <2>; + mhi,chan-dir = <1>; + mhi,data-type = <1>; + mhi,doorbell-mode = <2>; + mhi,ee = <0x4>; + mhi,auto-start; + }; + + mhi_chan@21 { + reg = <21>; + label = "IPCR"; + mhi,num-elements = <64>; + mhi,event-ring = <2>; + mhi,chan-dir = <2>; + mhi,data-type = <0>; + mhi,doorbell-mode = <2>; + mhi,ee = <0x4>; + mhi,auto-queue; + mhi,auto-start; + }; + + mhi_chan@22 { + reg = <22>; + label = "TF"; + mhi,num-elements = <64>; + mhi,event-ring = <2>; + mhi,chan-dir = <1>; + mhi,data-type = <0>; + mhi,doorbell-mode = <2>; + mhi,ee = <0x4>; + }; + + mhi_chan@23 { + reg = <23>; + label = "TF"; + mhi,num-elements = <64>; + mhi,event-ring = <2>; + mhi,chan-dir = <2>; + mhi,data-type = <0>; + mhi,doorbell-mode = <2>; + mhi,ee = <0x4>; + }; + + mhi_chan@25 { + reg = <25>; + label = "BL"; + mhi,num-elements = <64>; + mhi,event-ring = <2>; + mhi,chan-dir = <2>; + mhi,data-type = <0>; + mhi,doorbell-mode = <2>; + mhi,ee = <0x2>; + mhi,auto-queue; + mhi,auto-start; + }; + + mhi_chan@26 { + reg = <26>; + label = "DCI"; + mhi,num-elements = <64>; + mhi,event-ring = <3>; + mhi,chan-dir = <1>; + mhi,data-type = <0>; + mhi,doorbell-mode = <2>; + mhi,ee = <0x4>; + }; + + mhi_chan@27 { + reg = <27>; + label = "DCI"; + mhi,num-elements = <64>; + mhi,event-ring = <3>; + mhi,chan-dir = <2>; + mhi,data-type = <0>; + mhi,doorbell-mode = <2>; + mhi,ee = <0x4>; + }; + + mhi_chan@32 { + reg = <32>; + label = "DUN"; + mhi,num-elements = <64>; + mhi,event-ring = <3>; + mhi,chan-dir = <1>; + mhi,data-type = <0>; + mhi,doorbell-mode = <2>; + mhi,ee = <0x4>; + }; + + mhi_chan@33 { + reg = <33>; + label = "DUN"; + mhi,num-elements = <64>; + mhi,event-ring = <3>; + mhi,chan-dir = <2>; + mhi,data-type = <0>; + mhi,doorbell-mode = <2>; + mhi,ee = <0x4>; + }; + + mhi_chan@100 { + reg = <100>; + label = "IP_HW0"; + mhi,num-elements = <512>; + mhi,event-ring = <4>; + mhi,chan-dir = <1>; + mhi,data-type = <1>; + mhi,doorbell-mode = <3>; + mhi,ee = <0x4>; + mhi,db-mode-switch; + }; + + mhi_chan@101 { + reg = <101>; + label = "IP_HW0"; + mhi,num-elements = <512>; + mhi,event-ring = <5>; + mhi,chan-dir = <2>; + mhi,data-type = <1>; + mhi,doorbell-mode = <3>; + mhi,ee = <0x4>; + }; + + mhi_chan@104 { + reg = <104>; + label = "IP_HW_OFFLOAD_0"; + mhi,event-ring = <4>; + mhi,chan-dir = <1>; + mhi,data-type = <3>; + mhi,ee = <0x4>; + mhi,offload-chan; + }; + + mhi_chan@105 { + reg = <105>; + label = "IP_HW_OFFLOAD_0"; + mhi,event-ring = <5>; + mhi,chan-dir = <2>; + mhi,data-type = <3>; + mhi,ee = <0x4>; + mhi,offload-chan; + mhi,lpm-notify; + }; + + mhi_event@0 { + mhi,num-elements = <32>; + mhi,intmod = <1>; + mhi,msi = <1>; + mhi,priority = <1>; + mhi,brstmode = <2>; + mhi,data-type = <1>; + }; + + mhi_event@1 { + mhi,num-elements = <256>; + mhi,intmod = <1>; + mhi,msi = <2>; + mhi,priority = <1>; + mhi,brstmode = <2>; + }; + + mhi_event@2 { + mhi,num-elements = <256>; + mhi,intmod = <1>; + mhi,msi = <3>; + mhi,priority = <1>; + mhi,brstmode = <2>; + }; + + mhi_event@3 { + mhi,num-elements = <256>; + mhi,intmod = <1>; + mhi,msi = <4>; + mhi,priority = <1>; + mhi,brstmode = <2>; + }; + + mhi_event@4 { + mhi,num-elements = <1024>; + mhi,intmod = <5>; + mhi,msi = <5>; + mhi,chan = <100>; + mhi,priority = <1>; + mhi,brstmode = <3>; + mhi,hw-ev; + }; + + mhi_event@5 { + mhi,num-elements = <1024>; + mhi,intmod = <5>; + mhi,msi = <6>; + mhi,chan = <101>; + mhi,priority = <1>; + mhi,brstmode = <3>; + mhi,hw-ev; + mhi,client-manage; + }; + + mhi_netdev_0: mhi_rmnet@0 { + reg = <0x0>; + mhi,chan = "IP_HW0"; + mhi,interface-name = "rmnet_mhi"; + mhi,mru = <0x4000>; + }; + + mhi_netdev_1: mhi_rmnet@1 { + reg = <0x1>; + mhi,chan = "IP_HW_ADPL"; + mhi,interface-name = "rmnet_mhi"; + mhi,mru = <0x4000>; }; }; }; -&pcie0 { - pci,bus@1 { - reg = <0 0 0 0 0>; - - mhi_1: qcom,mhi@0 { - reg = <0 0 0 0 0 >; - - /* controller specific configuration */ - qcom,smmu-cfg = <0x3>; - qcom,msm-bus,name = "mhi"; - qcom,msm-bus,num-cases = <2>; - qcom,msm-bus,num-paths = <1>; - qcom,msm-bus,vectors-KBps = - <45 512 0 0>, - <45 512 1200000000 650000000>; - - /* mhi bus specific settings */ - mhi,max-channels = <106>; - mhi,timeout = <2000>; - - #address-cells = <1>; - #size-cells = <0>; - - mhi_chan@0 { - reg = <0>; - label = "LOOPBACK"; - mhi,num-elements = <64>; - mhi,event-ring = <2>; - mhi,chan-dir = <1>; - mhi,data-type = <0>; - mhi,doorbell-mode = <2>; - mhi,ee = <0x4>; - }; - - mhi_chan@1 { - reg = <1>; - label = "LOOPBACK"; - mhi,num-elements = <64>; - mhi,event-ring = <2>; - mhi,chan-dir = <2>; - mhi,data-type = <0>; - mhi,doorbell-mode = <2>; - mhi,ee = <0x4>; - }; - - mhi_chan@2 { - reg = <2>; - label = "SAHARA"; - mhi,num-elements = <128>; - mhi,event-ring = <1>; - mhi,chan-dir = <1>; - mhi,data-type = <0>; - mhi,doorbell-mode = <2>; - mhi,ee = <0x2>; - }; - - mhi_chan@3 { - reg = <3>; - label = "SAHARA"; - mhi,num-elements = <128>; - mhi,event-ring = <1>; - mhi,chan-dir = <2>; - mhi,data-type = <0>; - mhi,doorbell-mode = <2>; - mhi,ee = <0x2>; - }; - - mhi_chan@4 { - reg = <4>; - label = "DIAG"; - mhi,num-elements = <64>; - mhi,event-ring = <1>; - mhi,chan-dir = <1>; - mhi,data-type = <0>; - mhi,doorbell-mode = <2>; - mhi,ee = <0x4>; - }; - - mhi_chan@5 { - reg = <5>; - label = "DIAG"; - mhi,num-elements = <64>; - mhi,event-ring = <3>; - mhi,chan-dir = <2>; - mhi,data-type = <0>; - mhi,doorbell-mode = <2>; - mhi,ee = <0x4>; - }; - - mhi_chan@8 { - reg = <8>; - label = "QDSS"; - mhi,num-elements = <64>; - mhi,event-ring = <1>; - mhi,chan-dir = <1>; - mhi,data-type = <0>; - mhi,doorbell-mode = <2>; - mhi,ee = <0x4>; - }; - - mhi_chan@9 { - reg = <9>; - label = "QDSS"; - mhi,num-elements = <64>; - mhi,event-ring = <1>; - mhi,chan-dir = <2>; - mhi,data-type = <0>; - mhi,doorbell-mode = <2>; - mhi,ee = <0x4>; - }; - - mhi_chan@10 { - reg = <10>; - label = "EFS"; - mhi,num-elements = <64>; - mhi,event-ring = <1>; - mhi,chan-dir = <1>; - mhi,data-type = <0>; - mhi,doorbell-mode = <2>; - mhi,ee = <0x4>; - }; - - mhi_chan@11 { - reg = <11>; - label = "EFS"; - mhi,num-elements = <64>; - mhi,event-ring = <1>; - mhi,chan-dir = <2>; - mhi,data-type = <0>; - mhi,doorbell-mode = <2>; - mhi,ee = <0x4>; - }; - - mhi_chan@14 { - reg = <14>; - label = "QMI0"; - mhi,num-elements = <64>; - mhi,event-ring = <1>; - mhi,chan-dir = <1>; - mhi,data-type = <0>; - mhi,doorbell-mode = <2>; - mhi,ee = <0x4>; - }; - - mhi_chan@15 { - reg = <15>; - label = "QMI0"; - mhi,num-elements = <64>; - mhi,event-ring = <2>; - mhi,chan-dir = <2>; - mhi,data-type = <0>; - mhi,doorbell-mode = <2>; - mhi,ee = <0x4>; - }; - - mhi_chan@16 { - reg = <16>; - label = "QMI1"; - mhi,num-elements = <64>; - mhi,event-ring = <3>; - mhi,chan-dir = <1>; - mhi,data-type = <0>; - mhi,doorbell-mode = <2>; - mhi,ee = <0x4>; - }; - - mhi_chan@17 { - reg = <17>; - label = "QMI1"; - mhi,num-elements = <64>; - mhi,event-ring = <3>; - mhi,chan-dir = <2>; - mhi,data-type = <0>; - mhi,doorbell-mode = <2>; - mhi,ee = <0x4>; - }; - - mhi_chan@18 { - reg = <18>; - label = "IP_CTRL"; - mhi,num-elements = <64>; - mhi,event-ring = <1>; - mhi,chan-dir = <1>; - mhi,data-type = <0>; - mhi,doorbell-mode = <2>; - mhi,ee = <0x4>; - }; - - mhi_chan@19 { - reg = <19>; - label = "IP_CTRL"; - mhi,num-elements = <64>; - mhi,event-ring = <1>; - mhi,chan-dir = <2>; - mhi,data-type = <0>; - mhi,doorbell-mode = <2>; - mhi,ee = <0x4>; - mhi,auto-queue; - }; - - mhi_chan@20 { - reg = <20>; - label = "IPCR"; - mhi,num-elements = <64>; - mhi,event-ring = <2>; - mhi,chan-dir = <1>; - mhi,data-type = <1>; - mhi,doorbell-mode = <2>; - mhi,ee = <0x4>; - mhi,auto-start; - }; - - mhi_chan@21 { - reg = <21>; - label = "IPCR"; - mhi,num-elements = <64>; - mhi,event-ring = <2>; - mhi,chan-dir = <2>; - mhi,data-type = <0>; - mhi,doorbell-mode = <2>; - mhi,ee = <0x4>; - mhi,auto-queue; - mhi,auto-start; - }; - - mhi_chan@22 { - reg = <22>; - label = "TF"; - mhi,num-elements = <64>; - mhi,event-ring = <2>; - mhi,chan-dir = <1>; - mhi,data-type = <0>; - mhi,doorbell-mode = <2>; - mhi,ee = <0x4>; - }; - - mhi_chan@23 { - reg = <23>; - label = "TF"; - mhi,num-elements = <64>; - mhi,event-ring = <2>; - mhi,chan-dir = <2>; - mhi,data-type = <0>; - mhi,doorbell-mode = <2>; - mhi,ee = <0x4>; - }; - - mhi_chan@25 { - reg = <25>; - label = "BL"; - mhi,num-elements = <64>; - mhi,event-ring = <2>; - mhi,chan-dir = <2>; - mhi,data-type = <0>; - mhi,doorbell-mode = <2>; - mhi,ee = <0x2>; - mhi,auto-queue; - mhi,auto-start; - }; - - mhi_chan@26 { - reg = <26>; - label = "DCI"; - mhi,num-elements = <64>; - mhi,event-ring = <3>; - mhi,chan-dir = <1>; - mhi,data-type = <0>; - mhi,doorbell-mode = <2>; - mhi,ee = <0x4>; - }; - - mhi_chan@27 { - reg = <27>; - label = "DCI"; - mhi,num-elements = <64>; - mhi,event-ring = <3>; - mhi,chan-dir = <2>; - mhi,data-type = <0>; - mhi,doorbell-mode = <2>; - mhi,ee = <0x4>; - }; - - mhi_chan@32 { - reg = <32>; - label = "DUN"; - mhi,num-elements = <64>; - mhi,event-ring = <3>; - mhi,chan-dir = <1>; - mhi,data-type = <0>; - mhi,doorbell-mode = <2>; - mhi,ee = <0x4>; - }; - - mhi_chan@33 { - reg = <33>; - label = "DUN"; - mhi,num-elements = <64>; - mhi,event-ring = <3>; - mhi,chan-dir = <2>; - mhi,data-type = <0>; - mhi,doorbell-mode = <2>; - mhi,ee = <0x4>; - }; - - mhi_chan@100 { - reg = <100>; - label = "IP_HW0"; - mhi,num-elements = <512>; - mhi,event-ring = <4>; - mhi,chan-dir = <1>; - mhi,data-type = <1>; - mhi,doorbell-mode = <3>; - mhi,ee = <0x4>; - mhi,db-mode-switch; - }; - - mhi_chan@101 { - reg = <101>; - label = "IP_HW0"; - mhi,num-elements = <512>; - mhi,event-ring = <5>; - mhi,chan-dir = <2>; - mhi,data-type = <1>; - mhi,doorbell-mode = <3>; - mhi,ee = <0x4>; - }; - - mhi_chan@104 { - reg = <104>; - label = "IP_HW_OFFLOAD_0"; - mhi,event-ring = <4>; - mhi,chan-dir = <1>; - mhi,data-type = <3>; - mhi,ee = <0x4>; - mhi,offload-chan; - }; - - mhi_chan@105 { - reg = <105>; - label = "IP_HW_OFFLOAD_0"; - mhi,event-ring = <5>; - mhi,chan-dir = <2>; - mhi,data-type = <3>; - mhi,ee = <0x4>; - mhi,offload-chan; - mhi,lpm-notify; - }; - - mhi_event@0 { - mhi,num-elements = <32>; - mhi,intmod = <1>; - mhi,msi = <1>; - mhi,priority = <1>; - mhi,brstmode = <2>; - mhi,data-type = <1>; - }; - - mhi_event@1 { - mhi,num-elements = <256>; - mhi,intmod = <1>; - mhi,msi = <2>; - mhi,priority = <1>; - mhi,brstmode = <2>; - }; - - mhi_event@2 { - mhi,num-elements = <256>; - mhi,intmod = <1>; - mhi,msi = <3>; - mhi,priority = <1>; - mhi,brstmode = <2>; - }; - - mhi_event@3 { - mhi,num-elements = <256>; - mhi,intmod = <1>; - mhi,msi = <4>; - mhi,priority = <1>; - mhi,brstmode = <2>; - }; - - mhi_event@4 { - mhi,num-elements = <1024>; - mhi,intmod = <5>; - mhi,msi = <5>; - mhi,chan = <100>; - mhi,priority = <1>; - mhi,brstmode = <3>; - mhi,hw-ev; - }; - - mhi_event@5 { - mhi,num-elements = <1024>; - mhi,intmod = <5>; - mhi,msi = <6>; - mhi,chan = <101>; - mhi,priority = <1>; - mhi,brstmode = <3>; - mhi,hw-ev; - mhi,client-manage; - }; - - mhi_netdev_2: mhi_rmnet@0 { - reg = <0x0>; - mhi,chan = "IP_HW0"; - mhi,interface-name = "rmnet_mhi"; - mhi,mru = <0x4000>; - }; - - mhi_netdev_3: mhi_rmnet@1 { - reg = <0x1>; - mhi,chan = "IP_HW_ADPL"; - mhi,interface-name = "rmnet_mhi"; - mhi,mru = <0x4000>; - }; +&pcie_rc0 { + reg = <0 0 0 0 0>; + + mhi_1: qcom,mhi@0 { + reg = <0 0 0 0 0 >; + + /* controller specific configuration */ + qcom,smmu-cfg = <0x3>; + qcom,msm-bus,name = "mhi"; + qcom,msm-bus,num-cases = <2>; + qcom,msm-bus,num-paths = <1>; + qcom,msm-bus,vectors-KBps = + <45 512 0 0>, + <45 512 1200000000 650000000>; + + /* mhi bus specific settings */ + mhi,max-channels = <106>; + mhi,timeout = <2000>; + + #address-cells = <1>; + #size-cells = <0>; + + mhi_chan@0 { + reg = <0>; + label = "LOOPBACK"; + mhi,num-elements = <64>; + mhi,event-ring = <2>; + mhi,chan-dir = <1>; + mhi,data-type = <0>; + mhi,doorbell-mode = <2>; + mhi,ee = <0x4>; + }; + + mhi_chan@1 { + reg = <1>; + label = "LOOPBACK"; + mhi,num-elements = <64>; + mhi,event-ring = <2>; + mhi,chan-dir = <2>; + mhi,data-type = <0>; + mhi,doorbell-mode = <2>; + mhi,ee = <0x4>; + }; + + mhi_chan@2 { + reg = <2>; + label = "SAHARA"; + mhi,num-elements = <128>; + mhi,event-ring = <1>; + mhi,chan-dir = <1>; + mhi,data-type = <0>; + mhi,doorbell-mode = <2>; + mhi,ee = <0x2>; + }; + + mhi_chan@3 { + reg = <3>; + label = "SAHARA"; + mhi,num-elements = <128>; + mhi,event-ring = <1>; + mhi,chan-dir = <2>; + mhi,data-type = <0>; + mhi,doorbell-mode = <2>; + mhi,ee = <0x2>; + }; + + mhi_chan@4 { + reg = <4>; + label = "DIAG"; + mhi,num-elements = <64>; + mhi,event-ring = <1>; + mhi,chan-dir = <1>; + mhi,data-type = <0>; + mhi,doorbell-mode = <2>; + mhi,ee = <0x4>; + }; + + mhi_chan@5 { + reg = <5>; + label = "DIAG"; + mhi,num-elements = <64>; + mhi,event-ring = <3>; + mhi,chan-dir = <2>; + mhi,data-type = <0>; + mhi,doorbell-mode = <2>; + mhi,ee = <0x4>; + }; + + mhi_chan@8 { + reg = <8>; + label = "QDSS"; + mhi,num-elements = <64>; + mhi,event-ring = <1>; + mhi,chan-dir = <1>; + mhi,data-type = <0>; + mhi,doorbell-mode = <2>; + mhi,ee = <0x4>; + }; + + mhi_chan@9 { + reg = <9>; + label = "QDSS"; + mhi,num-elements = <64>; + mhi,event-ring = <1>; + mhi,chan-dir = <2>; + mhi,data-type = <0>; + mhi,doorbell-mode = <2>; + mhi,ee = <0x4>; + }; + + mhi_chan@10 { + reg = <10>; + label = "EFS"; + mhi,num-elements = <64>; + mhi,event-ring = <1>; + mhi,chan-dir = <1>; + mhi,data-type = <0>; + mhi,doorbell-mode = <2>; + mhi,ee = <0x4>; + }; + + mhi_chan@11 { + reg = <11>; + label = "EFS"; + mhi,num-elements = <64>; + mhi,event-ring = <1>; + mhi,chan-dir = <2>; + mhi,data-type = <0>; + mhi,doorbell-mode = <2>; + mhi,ee = <0x4>; + }; + + mhi_chan@14 { + reg = <14>; + label = "QMI0"; + mhi,num-elements = <64>; + mhi,event-ring = <1>; + mhi,chan-dir = <1>; + mhi,data-type = <0>; + mhi,doorbell-mode = <2>; + mhi,ee = <0x4>; + }; + + mhi_chan@15 { + reg = <15>; + label = "QMI0"; + mhi,num-elements = <64>; + mhi,event-ring = <2>; + mhi,chan-dir = <2>; + mhi,data-type = <0>; + mhi,doorbell-mode = <2>; + mhi,ee = <0x4>; + }; + + mhi_chan@16 { + reg = <16>; + label = "QMI1"; + mhi,num-elements = <64>; + mhi,event-ring = <3>; + mhi,chan-dir = <1>; + mhi,data-type = <0>; + mhi,doorbell-mode = <2>; + mhi,ee = <0x4>; + }; + + mhi_chan@17 { + reg = <17>; + label = "QMI1"; + mhi,num-elements = <64>; + mhi,event-ring = <3>; + mhi,chan-dir = <2>; + mhi,data-type = <0>; + mhi,doorbell-mode = <2>; + mhi,ee = <0x4>; + }; + + mhi_chan@18 { + reg = <18>; + label = "IP_CTRL"; + mhi,num-elements = <64>; + mhi,event-ring = <1>; + mhi,chan-dir = <1>; + mhi,data-type = <0>; + mhi,doorbell-mode = <2>; + mhi,ee = <0x4>; + }; + + mhi_chan@19 { + reg = <19>; + label = "IP_CTRL"; + mhi,num-elements = <64>; + mhi,event-ring = <1>; + mhi,chan-dir = <2>; + mhi,data-type = <0>; + mhi,doorbell-mode = <2>; + mhi,ee = <0x4>; + mhi,auto-queue; + }; + + mhi_chan@20 { + reg = <20>; + label = "IPCR"; + mhi,num-elements = <64>; + mhi,event-ring = <2>; + mhi,chan-dir = <1>; + mhi,data-type = <1>; + mhi,doorbell-mode = <2>; + mhi,ee = <0x4>; + mhi,auto-start; + }; + + mhi_chan@21 { + reg = <21>; + label = "IPCR"; + mhi,num-elements = <64>; + mhi,event-ring = <2>; + mhi,chan-dir = <2>; + mhi,data-type = <0>; + mhi,doorbell-mode = <2>; + mhi,ee = <0x4>; + mhi,auto-queue; + mhi,auto-start; + }; + + mhi_chan@22 { + reg = <22>; + label = "TF"; + mhi,num-elements = <64>; + mhi,event-ring = <2>; + mhi,chan-dir = <1>; + mhi,data-type = <0>; + mhi,doorbell-mode = <2>; + mhi,ee = <0x4>; + }; + + mhi_chan@23 { + reg = <23>; + label = "TF"; + mhi,num-elements = <64>; + mhi,event-ring = <2>; + mhi,chan-dir = <2>; + mhi,data-type = <0>; + mhi,doorbell-mode = <2>; + mhi,ee = <0x4>; + }; + + mhi_chan@25 { + reg = <25>; + label = "BL"; + mhi,num-elements = <64>; + mhi,event-ring = <2>; + mhi,chan-dir = <2>; + mhi,data-type = <0>; + mhi,doorbell-mode = <2>; + mhi,ee = <0x2>; + mhi,auto-queue; + mhi,auto-start; + }; + + mhi_chan@26 { + reg = <26>; + label = "DCI"; + mhi,num-elements = <64>; + mhi,event-ring = <3>; + mhi,chan-dir = <1>; + mhi,data-type = <0>; + mhi,doorbell-mode = <2>; + mhi,ee = <0x4>; + }; + + mhi_chan@27 { + reg = <27>; + label = "DCI"; + mhi,num-elements = <64>; + mhi,event-ring = <3>; + mhi,chan-dir = <2>; + mhi,data-type = <0>; + mhi,doorbell-mode = <2>; + mhi,ee = <0x4>; + }; + + mhi_chan@32 { + reg = <32>; + label = "DUN"; + mhi,num-elements = <64>; + mhi,event-ring = <3>; + mhi,chan-dir = <1>; + mhi,data-type = <0>; + mhi,doorbell-mode = <2>; + mhi,ee = <0x4>; + }; + + mhi_chan@33 { + reg = <33>; + label = "DUN"; + mhi,num-elements = <64>; + mhi,event-ring = <3>; + mhi,chan-dir = <2>; + mhi,data-type = <0>; + mhi,doorbell-mode = <2>; + mhi,ee = <0x4>; + }; + + mhi_chan@100 { + reg = <100>; + label = "IP_HW0"; + mhi,num-elements = <512>; + mhi,event-ring = <4>; + mhi,chan-dir = <1>; + mhi,data-type = <1>; + mhi,doorbell-mode = <3>; + mhi,ee = <0x4>; + mhi,db-mode-switch; + }; + + mhi_chan@101 { + reg = <101>; + label = "IP_HW0"; + mhi,num-elements = <512>; + mhi,event-ring = <5>; + mhi,chan-dir = <2>; + mhi,data-type = <1>; + mhi,doorbell-mode = <3>; + mhi,ee = <0x4>; + }; + + mhi_chan@104 { + reg = <104>; + label = "IP_HW_OFFLOAD_0"; + mhi,event-ring = <4>; + mhi,chan-dir = <1>; + mhi,data-type = <3>; + mhi,ee = <0x4>; + mhi,offload-chan; + }; + + mhi_chan@105 { + reg = <105>; + label = "IP_HW_OFFLOAD_0"; + mhi,event-ring = <5>; + mhi,chan-dir = <2>; + mhi,data-type = <3>; + mhi,ee = <0x4>; + mhi,offload-chan; + mhi,lpm-notify; + }; + + mhi_event@0 { + mhi,num-elements = <32>; + mhi,intmod = <1>; + mhi,msi = <1>; + mhi,priority = <1>; + mhi,brstmode = <2>; + mhi,data-type = <1>; + }; + + mhi_event@1 { + mhi,num-elements = <256>; + mhi,intmod = <1>; + mhi,msi = <2>; + mhi,priority = <1>; + mhi,brstmode = <2>; + }; + + mhi_event@2 { + mhi,num-elements = <256>; + mhi,intmod = <1>; + mhi,msi = <3>; + mhi,priority = <1>; + mhi,brstmode = <2>; + }; + + mhi_event@3 { + mhi,num-elements = <256>; + mhi,intmod = <1>; + mhi,msi = <4>; + mhi,priority = <1>; + mhi,brstmode = <2>; + }; + + mhi_event@4 { + mhi,num-elements = <1024>; + mhi,intmod = <5>; + mhi,msi = <5>; + mhi,chan = <100>; + mhi,priority = <1>; + mhi,brstmode = <3>; + mhi,hw-ev; + }; + + mhi_event@5 { + mhi,num-elements = <1024>; + mhi,intmod = <5>; + mhi,msi = <6>; + mhi,chan = <101>; + mhi,priority = <1>; + mhi,brstmode = <3>; + mhi,hw-ev; + mhi,client-manage; + }; + + mhi_netdev_2: mhi_rmnet@0 { + reg = <0x0>; + mhi,chan = "IP_HW0"; + mhi,interface-name = "rmnet_mhi"; + mhi,mru = <0x4000>; + }; + + mhi_netdev_3: mhi_rmnet@1 { + reg = <0x1>; + mhi,chan = "IP_HW_ADPL"; + mhi,interface-name = "rmnet_mhi"; + mhi,mru = <0x4000>; }; }; }; diff --git a/arch/arm64/boot/dts/qcom/sm8150-mtp.dtsi b/arch/arm64/boot/dts/qcom/sm8150-mtp.dtsi index bae3dd7b4ea3cd27fd64ea392e4beca7af0c1a56..419be9122e87e175c572e5e0b506f75d3a5b6d57 100644 --- a/arch/arm64/boot/dts/qcom/sm8150-mtp.dtsi +++ b/arch/arm64/boot/dts/qcom/sm8150-mtp.dtsi @@ -280,6 +280,7 @@ &pm8150l_wled { qcom,string-cfg= <7>; + qcom,leds-per-string = <6>; status = "ok"; }; diff --git a/arch/arm64/boot/dts/qcom/sm8150-npu.dtsi b/arch/arm64/boot/dts/qcom/sm8150-npu.dtsi index b17b9a60f5da2de836e347144007c03145e986cc..a06d5305bd8dd2cbfd8080bbe4ffa60c36957995 100644 --- a/arch/arm64/boot/dts/qcom/sm8150-npu.dtsi +++ b/arch/arm64/boot/dts/qcom/sm8150-npu.dtsi @@ -18,7 +18,7 @@ reg-names = "npu_base"; interrupts = , , - ; + ; interrupt-names = "error_irq", "wdg_bite_irq", "ipc_irq"; iommus = <&apps_smmu 0x1461 0x0>, <&apps_smmu 0x2061 0x0>; cache-slice-names = "npu"; diff --git a/arch/arm64/boot/dts/qcom/sm8150-pcie.dtsi b/arch/arm64/boot/dts/qcom/sm8150-pcie.dtsi index f11fc041bd2bd0c844539c399556c598039cb828..b4130aad5a2bcc0ade5b2bc5e2d3284d1f58d003 100644 --- a/arch/arm64/boot/dts/qcom/sm8150-pcie.dtsi +++ b/arch/arm64/boot/dts/qcom/sm8150-pcie.dtsi @@ -134,17 +134,19 @@ 0x0034 0x06 0x0 0x029c 0x12 0x0 0x0284 0x05 0x0 - 0x0c38 0x03 0x0 + 0x051c 0x03 0x0 0x0518 0x1c 0x0 0x0524 0x14 0x0 - 0x04e8 0x00 0x0 - 0x04ec 0x0e 0x0 - 0x04f0 0x4a 0x0 - 0x04f4 0x0f 0x0 + 0x04e8 0x07 0x0 + 0x04ec 0x6e 0x0 + 0x04f0 0x6e 0x0 + 0x04f4 0x4a 0x0 0x05b4 0x04 0x0 0x0434 0x7f 0x0 0x0444 0x70 0x0 0x0510 0x17 0x0 + 0x04d4 0x54 0x0 + 0x04d8 0x37 0x0 0x0598 0xd4 0x0 0x059c 0x54 0x0 0x05a0 0xdb 0x0 @@ -154,7 +156,7 @@ 0x0588 0xe4 0x0 0x058c 0xec 0x0 0x0590 0x39 0x0 - 0x0594 0x37 0x0 + 0x0594 0x36 0x0 0x0570 0x7f 0x0 0x0574 0xff 0x0 0x0578 0xff 0x0 @@ -162,31 +164,25 @@ 0x0580 0x75 0x0 0x04fc 0x00 0x0 0x04f8 0xc0 0x0 - 0x0414 0x04 0x0 + 0x0460 0xa0 0x0 + 0x0464 0xc0 0x0 + 0x05bc 0x0c 0x0 + 0x04dc 0x05 0x0 + 0x0408 0x0c 0x0 + 0x0414 0x03 0x0 0x09a4 0x01 0x0 0x0c90 0x00 0x0 0x0c40 0x01 0x0 0x0c48 0x01 0x0 0x0c50 0x00 0x0 + 0x0cbc 0x00 0x0 + 0x0ce0 0x58 0x0 0x0048 0x90 0x0 0x0c1c 0xc1 0x0 - 0x0988 0x66 0x0 - 0x0998 0x08 0x0 + 0x0988 0xaa 0x0 + 0x0998 0x0b 0x0 0x08dc 0x0d 0x0 0x09ec 0x01 0x0 - 0x04b4 0x02 0x0 - 0x04b8 0x02 0x0 - 0x04bc 0xaa 0x0 - 0x04c0 0x00 0x0 - 0x04d4 0x54 0x0 - 0x04d8 0x07 0x0 - 0x0460 0xa0 0x0 - 0x05c4 0x0c 0x0 - 0x0464 0x00 0x0 - 0x05c0 0x10 0x0 - 0x04dc 0x05 0x0 - 0x0408 0x0c 0x0 - 0x0414 0x03 0x0 0x0800 0x00 0x0 0x0844 0x03 0x0>; @@ -282,6 +278,10 @@ reset-names = "pcie_0_core_reset", "pcie_0_phy_reset"; + + pcie_rc0: pcie_rc0 { + reg = <0 0 0 0 0>; + }; }; pcie1: qcom,pcie@1c08000 { @@ -369,7 +369,6 @@ 0x0030 0x4c 0x0 0x0034 0x06 0x0 0x0048 0x90 0x0 - 0x0050 0x07 0x0 0x0058 0x0f 0x0 0x0074 0x06 0x0 0x0078 0x06 0x0 @@ -395,25 +394,29 @@ 0x0110 0x24 0x0 0x0118 0xb4 0x0 0x011c 0x03 0x0 - 0x0154 0x32 0x0 + 0x0154 0x34 0x0 0x0158 0x01 0x0 0x016c 0x08 0x0 0x01ac 0xb9 0x0 0x01b0 0x1e 0x0 0x01b4 0x94 0x0 0x01b8 0x18 0x0 - 0x01bc 0x01 0x0 + 0x01bc 0x11 0x0 0x0284 0x05 0x0 0x029c 0x12 0x0 0x0408 0x0c 0x0 0x0414 0x03 0x0 0x0434 0x7f 0x0 0x0444 0x70 0x0 - 0x04d8 0x01 0x0 - 0x04e8 0x00 0x0 - 0x04ec 0x0e 0x0 - 0x04f0 0x4a 0x0 - 0x04f4 0x0f 0x0 + 0x0460 0xa0 0x0 + 0x0464 0xc0 0x0 + 0x04d4 0x54 0x0 + 0x04d8 0x37 0x0 + 0x04dc 0x05 0x0 + 0x04e8 0x07 0x0 + 0x04ec 0x6e 0x0 + 0x04f0 0x6e 0x0 + 0x04f4 0x4a 0x0 0x04f8 0xc0 0x0 0x04fc 0x00 0x0 0x0510 0x17 0x0 @@ -435,29 +438,22 @@ 0x05a0 0xdb 0x0 0x05a4 0x39 0x0 0x05a8 0x31 0x0 - 0x05b4 0x04 0x0 - 0x04b4 0x02 0x0 - 0x04b8 0x02 0x0 - 0x04bc 0xaa 0x0 - 0x04c0 0x00 0x0 - 0x04d4 0x54 0x0 - 0x04d8 0x07 0x0 - 0x0460 0xa0 0x0 - 0x05c4 0x0c 0x0 - 0x0464 0x00 0x0 - 0x05c0 0x10 0x0 - 0x04dc 0x05 0x0 + 0x05bc 0x0c 0x0 0x0684 0x05 0x0 0x069c 0x12 0x0 0x0808 0x0c 0x0 0x0814 0x03 0x0 0x0834 0x7f 0x0 0x0844 0x70 0x0 - 0x08d8 0x01 0x0 - 0x08e8 0x00 0x0 - 0x08ec 0x0e 0x0 - 0x08f0 0x4a 0x0 - 0x08f4 0x0f 0x0 + 0x0860 0xa0 0x0 + 0x0864 0xc0 0x0 + 0x08d4 0x54 0x0 + 0x08d8 0x37 0x0 + 0x08dc 0x05 0x0 + 0x08e8 0x07 0x0 + 0x08ec 0x6e 0x0 + 0x08f0 0x6e 0x0 + 0x08f4 0x4a 0x0 0x08f8 0xc0 0x0 0x08fc 0x00 0x0 0x0910 0x17 0x0 @@ -472,41 +468,27 @@ 0x0984 0x24 0x0 0x0988 0xe4 0x0 0x098c 0xec 0x0 - 0x0990 0x3a 0x0 + 0x0990 0x39 0x0 0x0994 0x36 0x0 0x0998 0xd4 0x0 0x099c 0x54 0x0 0x09a0 0xdb 0x0 0x09a4 0x39 0x0 0x09a8 0x31 0x0 - 0x09b4 0x04 0x0 - 0x08b4 0x02 0x0 - 0x08b8 0x02 0x0 - 0x08bc 0xaa 0x0 - 0x08c0 0x00 0x0 - 0x08d4 0x54 0x0 - 0x08d8 0x07 0x0 - 0x0860 0xa0 0x0 - 0x09c4 0x0c 0x0 - 0x0864 0x00 0x0 - 0x09c0 0x10 0x0 - 0x08dc 0x05 0x0 - 0x0a98 0x01 0x0 - 0x0abc 0x56 0x0 - 0x0adc 0x0d 0x0 - 0x0b88 0x66 0x0 + 0x09bc 0x0c 0x0 + 0x0adc 0x05 0x0 + 0x0b88 0xaa 0x0 + 0x0b98 0x0b 0x0 0x0ba4 0x01 0x0 - 0x0b98 0x08 0x0 + 0x0bec 0x01 0x0 + 0x0e0c 0x0d 0x0 0x0e14 0x07 0x0 0x0e1c 0xc1 0x0 0x0e40 0x01 0x0 0x0e48 0x01 0x0 - 0x0e78 0x50 0x0 0x0e90 0x00 0x0 - 0x0ea0 0x11 0x0 - 0x0e38 0x03 0x0 - 0x0e50 0x00 0x0 - 0x0e20 0x01 0x0 + 0x0ebc 0x00 0x0 + 0x0ee0 0x58 0x0 0x0a00 0x00 0x0 0x0a44 0x03 0x0>; @@ -602,5 +584,9 @@ reset-names = "pcie_1_core_reset", "pcie_1_phy_reset"; + + pcie_rc1: pcie_rc1 { + reg = <0 0 0 0 0>; + }; }; }; diff --git a/arch/arm64/boot/dts/qcom/sm8150-pinctrl.dtsi b/arch/arm64/boot/dts/qcom/sm8150-pinctrl.dtsi index 13e81f156c47b1067b5c5bd1cbfb595d34c07b1d..8a689c7dcd230b921533950fab20319e1a719622 100644 --- a/arch/arm64/boot/dts/qcom/sm8150-pinctrl.dtsi +++ b/arch/arm64/boot/dts/qcom/sm8150-pinctrl.dtsi @@ -131,7 +131,7 @@ config { pins = "sdc2_cmd"; bias-pull-up; /* pull up */ - drive-strength = <10>; /* 10 MA */ + drive-strength = <16>; /* 16 MA */ }; }; @@ -147,7 +147,7 @@ config { pins = "sdc2_cmd"; bias-pull-up; /* pull up */ - drive-strength = <10>; /* 10 MA */ + drive-strength = <16>; /* 16 MA */ }; }; @@ -155,7 +155,7 @@ config { pins = "sdc2_cmd"; bias-pull-up; /* pull up */ - drive-strength = <10>; /* 10 MA */ + drive-strength = <16>; /* 16 MA */ }; }; @@ -163,7 +163,7 @@ config { pins = "sdc2_cmd"; bias-pull-up; /* pull up */ - drive-strength = <10>; /* 10 MA */ + drive-strength = <16>; /* 16 MA */ }; }; @@ -171,7 +171,7 @@ config { pins = "sdc2_cmd"; bias-pull-up; /* pull up */ - drive-strength = <10>; /* 10 MA */ + drive-strength = <16>; /* 16 MA */ }; }; @@ -179,7 +179,7 @@ config { pins = "sdc2_data"; bias-pull-up; /* pull up */ - drive-strength = <10>; /* 10 MA */ + drive-strength = <16>; /* 16 MA */ }; }; @@ -195,7 +195,7 @@ config { pins = "sdc2_data"; bias-pull-up; /* pull up */ - drive-strength = <10>; /* 10 MA */ + drive-strength = <16>; /* 16 MA */ }; }; @@ -203,7 +203,7 @@ config { pins = "sdc2_data"; bias-pull-up; /* pull up */ - drive-strength = <10>; /* 10 MA */ + drive-strength = <16>; /* 16 MA */ }; }; @@ -211,7 +211,7 @@ config { pins = "sdc2_data"; bias-pull-up; /* pull up */ - drive-strength = <10>; /* 10 MA */ + drive-strength = <16>; /* 16 MA */ }; }; @@ -219,7 +219,7 @@ config { pins = "sdc2_data"; bias-pull-up; /* pull up */ - drive-strength = <10>; /* 10 MA */ + drive-strength = <16>; /* 16 MA */ }; }; @@ -3879,12 +3879,12 @@ cam_sensor_active_iris: cam_sensor_active_iris { /* RESET IRIS */ mux { - pins = "gpio23"; + pins = "gpio23", "gpio26"; function = "gpio"; }; config { - pins = "gpio23"; + pins = "gpio23", "gpio26"; bias-disable; /* No PULL */ drive-strength = <2>; /* 2 MA */ }; @@ -3893,12 +3893,12 @@ cam_sensor_suspend_iris: cam_sensor_suspend_iris { /* RESET IRIS */ mux { - pins = "gpio23"; + pins = "gpio23", "gpio26"; function = "gpio"; }; config { - pins = "gpio23"; + pins = "gpio23", "gpio26"; bias-pull-down; /* PULL DOWN */ drive-strength = <2>; /* 2 MA */ output-low; diff --git a/arch/arm64/boot/dts/qcom/sm8150-pm.dtsi b/arch/arm64/boot/dts/qcom/sm8150-pm.dtsi index 1a2ac061dadb0282def391b2d6ac6eb896f38258..31dcce67f48aa07beb59243992439c6ec44ffb89 100644 --- a/arch/arm64/boot/dts/qcom/sm8150-pm.dtsi +++ b/arch/arm64/boot/dts/qcom/sm8150-pm.dtsi @@ -30,21 +30,19 @@ reg = <0>; label = "l3-wfi"; qcom,psci-mode = <0x1>; - qcom,latency-us = <51>; - qcom,ss-power = <452>; - qcom,energy-overhead = <69355>; - qcom,time-overhead = <99>; + qcom,entry-latency-us = <48>; + qcom,exit-latency-us = <51>; + qcom,min-residency-us = <99>; }; qcom,pm-cluster-level@1 { /* LLCC off, AOSS sleep */ reg = <1>; label = "llcc-off"; qcom,psci-mode = <0xC24>; - qcom,latency-us = <6562>; - qcom,ss-power = <108>; - qcom,energy-overhead = <4000000>; - qcom,time-overhead = <5000>; - qcom,min-child-idx = <2>; + qcom,entry-latency-us = <3263>; + qcom,exit-latency-us = <6562>; + qcom,min-residency-us = <9987>; + qcom,min-child-idx = <1>; qcom,is-reset; qcom,notify-rpm; }; @@ -63,32 +61,18 @@ reg = <0>; label = "wfi"; qcom,psci-cpu-mode = <0x1>; - qcom,latency-us = <43>; - qcom,ss-power = <150>; - qcom,energy-overhead = <10000>; - qcom,time-overhead = <100>; + qcom,entry-latency-us = <57>; + qcom,exit-latency-us = <43>; + qcom,min-residency-us = <100>; }; - qcom,pm-cpu-level@1 { /* C3 */ + qcom,pm-cpu-level@1 { /* C4 */ reg = <1>; - label = "pc"; - qcom,psci-cpu-mode = <0x3>; - qcom,latency-us = <461>; - qcom,ss-power = <100>; - qcom,energy-overhead = <400000>; - qcom,time-overhead = <500>; - qcom,is-reset; - qcom,use-broadcast-timer; - }; - - qcom,pm-cpu-level@2 { /* C4 */ - reg = <2>; label = "rail-pc"; qcom,psci-cpu-mode = <0x4>; - qcom,latency-us = <531>; - qcom,ss-power = <73>; - qcom,energy-overhead = <500000>; - qcom,time-overhead = <600>; + qcom,entry-latency-us = <360>; + qcom,exit-latency-us = <531>; + qcom,min-residency-us = <3934>; qcom,is-reset; qcom,use-broadcast-timer; }; @@ -105,32 +89,18 @@ reg = <0>; label = "wfi"; qcom,psci-cpu-mode = <0x1>; - qcom,latency-us = <43>; - qcom,ss-power = <454>; - qcom,energy-overhead = <38639>; - qcom,time-overhead = <83>; + qcom,entry-latency-us = <57>; + qcom,exit-latency-us = <43>; + qcom,min-residency-us = <83>; }; - qcom,pm-cpu-level@1 { /* C3 */ + qcom,pm-cpu-level@1 { /* C4 */ reg = <1>; - label = "pc"; - qcom,psci-cpu-mode = <0x3>; - qcom,latency-us = <621>; - qcom,ss-power = <436>; - qcom,energy-overhead = <418225>; - qcom,time-overhead = <885>; - qcom,is-reset; - qcom,use-broadcast-timer; - }; - - qcom,pm-cpu-level@2 { /* C4 */ - reg = <2>; label = "rail-pc"; qcom,psci-cpu-mode = <0x4>; - qcom,latency-us = <1061>; - qcom,ss-power = <400>; - qcom,energy-overhead = <428225>; - qcom,time-overhead = <1000>; + qcom,entry-latency-us = <702>; + qcom,exit-latency-us = <1061>; + qcom,min-residency-us = <4488>; qcom,is-reset; qcom,use-broadcast-timer; }; diff --git a/arch/arm64/boot/dts/qcom/sm8150-qrd.dtsi b/arch/arm64/boot/dts/qcom/sm8150-qrd.dtsi index 6336bbce62d11e3a71c7be5742ae49908967ddbe..e2b84564ac8babe1115eb885ec3e9eb09d16ca0f 100644 --- a/arch/arm64/boot/dts/qcom/sm8150-qrd.dtsi +++ b/arch/arm64/boot/dts/qcom/sm8150-qrd.dtsi @@ -49,6 +49,13 @@ qcom,battery-data = <&qrd_batterydata>; qcom,hold-soc-while-full; qcom,linearize-soc; + /* ESR fast calibration */ + qcom,fg-esr-timer-chg-fast = <0 7>; + qcom,fg-esr-timer-dischg-fast = <0 7>; + qcom,fg-esr-timer-chg-slow = <0 96>; + qcom,fg-esr-timer-dischg-slow = <0 96>; + qcom,fg-esr-cal-soc-thresh = <26 230>; + qcom,fg-esr-cal-temp-thresh = <10 40>; }; &soc { @@ -521,6 +528,38 @@ status = "ok"; }; +&pm8150b_haptics { + qcom,vmax-mv = <2545>; + qcom,play-rate-us = <4255>; + wf_0 { + /* CLICK */ + qcom,wf-pattern = [3e 3e 3e 3e]; + qcom,wf-play-rate-us = <4255>; + }; + wf_1 { + /* DOUBLE CLICK */ + qcom,wf-play-rate-us = <7143>; + }; + wf_2 { + /* TICK */ + qcom,wf-play-rate-us = <4000>; + }; + wf_3 { + /* THUD */ + qcom,wf-pattern = [7e 7e 7e 7e]; + qcom,wf-play-rate-us = <4255>; + }; + wf_4 { + /* POP */ + qcom,wf-play-rate-us = <5000>; + }; + wf_5 { + /* HEAVY CLICK */ + qcom,wf-pattern = [7e 7e 7e 7e]; + qcom,wf-play-rate-us = <4255>; + }; +}; + &pm8150b_charger { qcom,sec-charger-config = <1>; qcom,auto-recharge-soc = <98>; diff --git a/arch/arm64/boot/dts/qcom/sm8150-sde-display.dtsi b/arch/arm64/boot/dts/qcom/sm8150-sde-display.dtsi index 51a1c53c7b4e39cd213f2d7d6893144420c0c9cd..68d297e3fb6f89d2f1adfb94cc2800eeb995d219 100644 --- a/arch/arm64/boot/dts/qcom/sm8150-sde-display.dtsi +++ b/arch/arm64/boot/dts/qcom/sm8150-sde-display.dtsi @@ -30,6 +30,7 @@ #include "dsi-panel-sw43404-amoled-dsc-wqhd-cmd.dtsi" #include "dsi-panel-sw43404-amoled-dsc-wqhd-video.dtsi" #include "dsi-panel-sw43404-amoled-dsc-fhd-plus-cmd.dtsi" +#include "dsi-panel-nt36850-truly-dualmipi-wqhd-cmd.dtsi" #include &tlmm { @@ -345,6 +346,17 @@ qcom,dsi-panel = <&dsi_sw43404_amoled_fhd_plus_cmd>; }; + dsi_dual_nt36850_truly_cmd_display: qcom,dsi-display@19 { + label = "dsi_dual_nt36850_truly_cmd_display"; + qcom,display-type = "primary"; + + qcom,dsi-ctrl-num = <0 1>; + qcom,dsi-phy-num = <0 1>; + qcom,dsi-select-clocks = "src_byte_clk0", "src_pixel_clk0"; + + qcom,dsi-panel = <&dsi_dual_nt36850_truly_cmd>; + }; + sde_dsi: qcom,dsi-display-primary { compatible = "qcom,dsi-display"; label = "primary"; @@ -390,7 +402,8 @@ &dsi_nt35695b_truly_fhd_cmd_display &dsi_nt35695b_truly_fhd_video_display &dsi_sw43404_amoled_video_display - &dsi_sw43404_amoled_fhd_plus_cmd_display>; + &dsi_sw43404_amoled_fhd_plus_cmd_display + &dsi_dual_nt36850_truly_cmd_display>; }; sde_dsi1: qcom,dsi-display-secondary { @@ -549,6 +562,7 @@ qcom,mdss-dsi-panel-status-value = <0x77>; qcom,mdss-dsi-panel-on-check-value = <0x77>; qcom,mdss-dsi-panel-status-read-length = <1>; + qcom,mdss-dsi-qsync-min-refresh-rate = <55>; qcom,mdss-dsi-display-timings { timing@0{ qcom,mdss-dsi-panel-phy-timings = [00 1e 08 07 24 22 08 @@ -747,6 +761,13 @@ }; &dsi_sw43404_amoled_cmd { + qcom,esd-check-enabled; + qcom,mdss-dsi-panel-status-check-mode = "reg_read"; + qcom,mdss-dsi-panel-status-command = [06 01 00 01 00 00 01 0a]; + qcom,mdss-dsi-panel-status-command-state = "dsi_lp_mode"; + qcom,mdss-dsi-panel-status-value = <0x9c>; + qcom,mdss-dsi-panel-on-check-value = <0x9c>; + qcom,mdss-dsi-panel-status-read-length = <1>; qcom,mdss-dsi-display-timings { timing@0 { qcom,mdss-dsi-panel-phy-timings = [00 16 05 05 20 1f 06 @@ -792,3 +813,15 @@ }; }; }; + +&dsi_dual_nt36850_truly_cmd { + qcom,mdss-dsi-display-timings { + timing@0{ + qcom,mdss-dsi-panel-phy-timings = [00 1f 08 08 24 23 08 + 08 05 03 04 00 1a 18]; + qcom,display-topology = <2 0 2>; + qcom,default-topology-index = <0>; + }; + }; +}; + diff --git a/arch/arm64/boot/dts/qcom/sm8150-sde-pll.dtsi b/arch/arm64/boot/dts/qcom/sm8150-sde-pll.dtsi index ea5861d6196319e33d6ed2ab2d73c0d803d18755..eb4318423ae74a16bbdadf59e52f8502b34fa33e 100644 --- a/arch/arm64/boot/dts/qcom/sm8150-sde-pll.dtsi +++ b/arch/arm64/boot/dts/qcom/sm8150-sde-pll.dtsi @@ -24,6 +24,8 @@ clock-names = "iface_clk"; clock-rate = <0>; gdsc-supply = <&mdss_core_gdsc>; + qcom,dsi-pll-ssc-en; + qcom,dsi-pll-ssc-mode = "down-spread"; qcom,platform-supply-entries { #address-cells = <1>; #size-cells = <0>; @@ -51,6 +53,8 @@ clock-names = "iface_clk"; clock-rate = <0>; gdsc-supply = <&mdss_core_gdsc>; + qcom,dsi-pll-ssc-en; + qcom,dsi-pll-ssc-mode = "down-spread"; qcom,platform-supply-entries { #address-cells = <1>; #size-cells = <0>; diff --git a/arch/arm64/boot/dts/qcom/sm8150-sde.dtsi b/arch/arm64/boot/dts/qcom/sm8150-sde.dtsi index 8fed5c7d190786395a86402fb4e43f5cdd1f7c1b..6deadd72e4d4484a83c61e23cc4ae2e56263c639 100644 --- a/arch/arm64/boot/dts/qcom/sm8150-sde.dtsi +++ b/arch/arm64/boot/dts/qcom/sm8150-sde.dtsi @@ -512,7 +512,7 @@ compatible = "qcom,dsi-phy-v4.0"; label = "dsi-phy-0"; cell-index = <0>; - reg = <0xae94400 0x7c0>; + reg = <0xae94400 0x760>; reg-names = "dsi_phy"; vdda-0p9-supply = <&pm8150_l5>; qcom,platform-strength-ctrl = [55 03 @@ -544,7 +544,7 @@ compatible = "qcom,dsi-phy-v4.0"; label = "dsi-phy-1"; cell-index = <1>; - reg = <0xae96400 0x7c0>; + reg = <0xae96400 0x760>; reg-names = "dsi_phy"; vdda-0p9-supply = <&pm8150_l5>; qcom,platform-strength-ctrl = [55 03 @@ -607,7 +607,6 @@ <&clock_gcc GCC_USB3_PRIM_PHY_PIPE_CLK>, <&clock_dispcc DISP_CC_MDSS_DP_LINK_CLK>, <&clock_dispcc DISP_CC_MDSS_DP_LINK_INTF_CLK>, - <&clock_dispcc DISP_CC_MDSS_DP_PIXEL_CLK>, <&clock_dispcc DISP_CC_MDSS_DP_CRYPTO_CLK>, <&clock_dispcc DISP_CC_MDSS_DP_PIXEL_CLK_SRC>, <&mdss_dp_pll DP_VCO_DIVIDED_CLK_SRC_MUX>, @@ -616,9 +615,8 @@ <&clock_dispcc DISP_CC_MDSS_DP_PIXEL_CLK>, <&clock_dispcc DISP_CC_MDSS_DP_PIXEL1_CLK>; clock-names = "core_aux_clk", "core_usb_ref_clk_src", - "core_usb_ref_clk", - "core_usb_pipe_clk", "ctrl_link_clk", - "ctrl_link_iface_clk", "ctrl_pixel_clk", + "core_usb_ref_clk", "core_usb_pipe_clk", + "link_clk", "link_iface_clk", "crypto_clk", "pixel_clk_rcg", "pixel_parent", "pixel1_clk_rcg", "pixel1_parent", "strm0_pixel_clk", "strm1_pixel_clk"; diff --git a/arch/arm64/boot/dts/qcom/sm8150-sdx50m.dtsi b/arch/arm64/boot/dts/qcom/sm8150-sdx50m.dtsi index 0f66048824b6dc6d48cd9a7aac483f4261899946..10f831205a19e7adc05773943c91a6b5a57eeb8d 100644 --- a/arch/arm64/boot/dts/qcom/sm8150-sdx50m.dtsi +++ b/arch/arm64/boot/dts/qcom/sm8150-sdx50m.dtsi @@ -147,11 +147,6 @@ }; }; -&pcie0 { - iommu-map = <0x0 &apps_smmu 0x1d80 0x1>, - <0x100 &apps_smmu 0x1dff 0x1>; -}; - &pcie1 { pinctrl-0 = <&pcie1_clkreq_default &pcie1_perst_default @@ -167,6 +162,80 @@ qcom,mhi-chdb-base = <0x40300300>; qcom,mhi-erdb-base = <0x40300700>; }; + + qmi-tmd-devices { + modem1 { + qcom,instance-id = <0x64>; + + modem1_pa: modem1_pa { + qcom,qmi-dev-name = "pa"; + #cooling-cells = <2>; + }; + + modem1_proc: modem1_proc { + qcom,qmi-dev-name = "modem"; + #cooling-cells = <2>; + }; + + modem1_current: modem1_current { + qcom,qmi-dev-name = "modem_current"; + #cooling-cells = <2>; + }; + + modem1_skin: modem1_skin { + qcom,qmi-dev-name = "modem_skin"; + #cooling-cells = <2>; + }; + + modem1_vdd: modem1_vdd { + qcom,qmi-dev-name = "cpuv_restriction_cold"; + #cooling-cells = <2>; + }; + }; + }; + + qmi_sensor: qmi-ts-sensors { + compatible = "qcom,qmi-sensors"; + #thermal-sensor-cells = <1>; + + modem { + qcom,instance-id = <0x0>; + qcom,qmi-sensor-names = "pa", + "pa_1", + "qfe_pa0", + "qfe_wtr0"; + }; + }; +}; + +&thermal_zones { + modem0-pa0-usr { + polling-delay-passive = <0>; + polling-delay = <0>; + thermal-governor = "user_space"; + thermal-sensors = <&qmi_sensor 0>; + trips { + active-config0 { + temperature = <125000>; + hysteresis = <1000>; + type = "passive"; + }; + }; + }; + + modem0-pa1-usr { + polling-delay-passive = <0>; + polling-delay = <0>; + thermal-governor = "user_space"; + thermal-sensors = <&qmi_sensor 1>; + trips { + active-config0 { + temperature = <125000>; + hysteresis = <1000>; + type = "passive"; + }; + }; + }; }; &reserved_memory { diff --git a/arch/arm64/boot/dts/qcom/sm8150-slpi-pinctrl.dtsi b/arch/arm64/boot/dts/qcom/sm8150-slpi-pinctrl.dtsi new file mode 100644 index 0000000000000000000000000000000000000000..8c7e14e42b7fb1664531d2f175da4ca7af4c509f --- /dev/null +++ b/arch/arm64/boot/dts/qcom/sm8150-slpi-pinctrl.dtsi @@ -0,0 +1,20 @@ +/* Copyright (c) 2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +&soc { + slpi_tlmm: slpi_pinctrl@02B40000 { + compatible = "qcom,slpi-pinctrl"; + reg = <0x2B40000 0x20000>; + qcom,num-pins = <14>; + status = "disabled"; + }; +}; diff --git a/arch/arm64/boot/dts/qcom/sm8150-smp2p.dtsi b/arch/arm64/boot/dts/qcom/sm8150-smp2p.dtsi index 64d4be3cf4c070e352e65c40de6933edf8f25250..829c52d90733d2912b0b61fccea792cf5d57f551 100644 --- a/arch/arm64/boot/dts/qcom/sm8150-smp2p.dtsi +++ b/arch/arm64/boot/dts/qcom/sm8150-smp2p.dtsi @@ -100,6 +100,12 @@ qcom,entry-name = "sleepstate"; #qcom,smem-state-cells = <1>; }; + + sleepstate_smp2p_in: qcom,sleepstate-in { + qcom,entry-name = "sleepstate_see"; + interrupt-controller; + #interrupt-cells = <2>; + }; }; qcom,smp2p-cdsp@1799000c { @@ -131,6 +137,11 @@ interrupt-controller; #interrupt-cells = <2>; }; + + smp2p_qvrexternal5_out: qcom,smp2p-qvrexternal5-out { + qcom,entry-name = "qvrexternal"; + #qcom,smem-state-cells = <1>; + }; }; /* wlan - inbound entry from mss/WLAN PD */ diff --git a/arch/arm64/boot/dts/qcom/sm8150-thermal-overlay.dtsi b/arch/arm64/boot/dts/qcom/sm8150-thermal-overlay.dtsi index 7a874b26835b9372591831f15591184615426358..9c8b8ea200f73f3311674729b77dbee6b244012c 100644 --- a/arch/arm64/boot/dts/qcom/sm8150-thermal-overlay.dtsi +++ b/arch/arm64/boot/dts/qcom/sm8150-thermal-overlay.dtsi @@ -168,18 +168,41 @@ <&CPU5 THERMAL_MAX_LIMIT THERMAL_MAX_LIMIT>; }; - vbat_cpu6 { + vbat_gpu0 { trip = <&vbat_lvl0>; + cooling-device = <&msm_gpu 2 2>; + }; + }; + }; + + pm8150b-vbat-lvl1 { + cooling-maps { + vbat_cpu6 { + trip = <&vbat_lvl1>; cooling-device = <&CPU6 THERMAL_MAX_LIMIT THERMAL_MAX_LIMIT>; }; vbat_cpu7 { - trip = <&vbat_lvl0>; + trip = <&vbat_lvl1>; cooling-device = <&CPU7 THERMAL_MAX_LIMIT THERMAL_MAX_LIMIT>; }; + vbat_gpu1 { + trip = <&vbat_lvl1>; + cooling-device = <&msm_gpu 4 4>; + }; + }; + }; + + pm8150b-vbat-lvl2 { + cooling-maps { + vbat_gpu2 { + trip = <&vbat_lvl2>; + cooling-device = <&msm_gpu THERMAL_MAX_LIMIT + THERMAL_MAX_LIMIT>; + }; }; }; @@ -197,18 +220,31 @@ <&CPU5 THERMAL_MAX_LIMIT THERMAL_MAX_LIMIT>; }; - ibat_cpu6 { + ibat_gpu0 { trip = <&ibat_lvl0>; + cooling-device = <&msm_gpu 2 2>; + }; + }; + }; + + pm8150b-ibat-lvl1 { + cooling-maps { + ibat_cpu6 { + trip = <&ibat_lvl1>; cooling-device = <&CPU6 THERMAL_MAX_LIMIT THERMAL_MAX_LIMIT>; }; ibat_cpu7 { - trip = <&ibat_lvl0>; + trip = <&ibat_lvl1>; cooling-device = <&CPU7 THERMAL_MAX_LIMIT THERMAL_MAX_LIMIT>; }; + ibat_gpu1 { + trip = <&ibat_lvl1>; + cooling-device = <&msm_gpu 4 4>; + }; }; }; @@ -227,18 +263,43 @@ <&CPU5 THERMAL_MAX_LIMIT THERMAL_MAX_LIMIT>; }; - vph_cpu6 { + vph_gpu0 { trip = <&vph_lvl0>; + cooling-device = <&msm_gpu 2 2>; + }; + }; + }; + + pm8150l-vph-lvl1 { + disable-thermal-zone; + cooling-maps { + vph_cpu6 { + trip = <&vph_lvl1>; cooling-device = <&CPU6 THERMAL_MAX_LIMIT THERMAL_MAX_LIMIT>; }; vph_cpu7 { - trip = <&vph_lvl0>; + trip = <&vph_lvl1>; cooling-device = <&CPU7 THERMAL_MAX_LIMIT THERMAL_MAX_LIMIT>; }; + vph_gpu1 { + trip = <&vph_lvl1>; + cooling-device = <&msm_gpu 4 4>; + }; + }; + }; + + pm8150l-vph-lvl2 { + disable-thermal-zone; + cooling-maps { + vph_gpu2 { + trip = <&vph_lvl2>; + cooling-device = <&msm_gpu THERMAL_MAX_LIMIT + THERMAL_MAX_LIMIT>; + }; }; }; }; diff --git a/arch/arm64/boot/dts/qcom/sm8150-thermal.dtsi b/arch/arm64/boot/dts/qcom/sm8150-thermal.dtsi index 43cd13701f8cc26c015e2e970d5e4b256953eb71..456ff38abd06f3b2b73873ae4923e5c12dde8c92 100644 --- a/arch/arm64/boot/dts/qcom/sm8150-thermal.dtsi +++ b/arch/arm64/boot/dts/qcom/sm8150-thermal.dtsi @@ -2493,7 +2493,7 @@ cpu11_cdev { trip = <&cpu11_config>; cooling-device = - <&CPU4 THERMAL_MAX_LIMIT + <&CPU5 THERMAL_MAX_LIMIT THERMAL_MAX_LIMIT>; }; }; @@ -2515,7 +2515,7 @@ cpu12_cdev { trip = <&cpu12_config>; cooling-device = - <&CPU5 THERMAL_MAX_LIMIT + <&CPU6 THERMAL_MAX_LIMIT THERMAL_MAX_LIMIT>; }; }; @@ -2537,7 +2537,7 @@ cpu13_cdev { trip = <&cpu13_config>; cooling-device = - <&CPU5 THERMAL_MAX_LIMIT + <&CPU7 THERMAL_MAX_LIMIT THERMAL_MAX_LIMIT>; }; }; @@ -2559,7 +2559,7 @@ cpu14_cdev { trip = <&cpu14_config>; cooling-device = - <&CPU6 THERMAL_MAX_LIMIT + <&CPU4 THERMAL_MAX_LIMIT THERMAL_MAX_LIMIT>; }; }; @@ -2581,7 +2581,7 @@ cpu15_cdev { trip = <&cpu15_config>; cooling-device = - <&CPU6 THERMAL_MAX_LIMIT + <&CPU5 THERMAL_MAX_LIMIT THERMAL_MAX_LIMIT>; }; }; @@ -2603,7 +2603,7 @@ cpu16_cdev { trip = <&cpu16_config>; cooling-device = - <&CPU7 THERMAL_MAX_LIMIT + <&CPU6 THERMAL_MAX_LIMIT THERMAL_MAX_LIMIT>; }; }; diff --git a/arch/arm64/boot/dts/qcom/sm8150-usb.dtsi b/arch/arm64/boot/dts/qcom/sm8150-usb.dtsi index 74d0929f0c484a474738b6c9919289759dbd661e..1351a7e09ac5528cb8b63b0baca040169d29ef0d 100644 --- a/arch/arm64/boot/dts/qcom/sm8150-usb.dtsi +++ b/arch/arm64/boot/dts/qcom/sm8150-usb.dtsi @@ -52,10 +52,17 @@ qcom,core-clk-rate = <200000000>; qcom,core-clk-rate-hs = <66666667>; qcom,num-gsi-evt-buffs = <0x3>; + qcom,gsi-reg-offset = + <0x0fc /* GSI_GENERAL_CFG */ + 0x110 /* GSI_DBL_ADDR_L */ + 0x120 /* GSI_DBL_ADDR_H */ + 0x130 /* GSI_RING_BASE_ADDR_L */ + 0x144 /* GSI_RING_BASE_ADDR_H */ + 0x1a4>; /* GSI_IF_STS */ qcom,dwc-usb3-msm-tx-fifo-size = <27696>; qcom,msm-bus,name = "usb0"; - qcom,msm-bus,num-cases = <3>; + qcom,msm-bus,num-cases = <4>; qcom,msm-bus,num-paths = <3>; qcom,msm-bus,vectors-KBps = /* suspend vote */ @@ -73,7 +80,12 @@ , , - ; + , + + /* min vote */ + , + , + ; dwc3@a600000 { compatible = "snps,dwc3"; @@ -293,6 +305,7 @@ USB3_DP_PCS_POWER_DOWN_CONTROL USB3_DP_PCS_SW_RESET USB3_DP_PCS_START_CONTROL + 0xffff /* USB3_PHY_PCS_MISC_TYPEC_CTRL */ 0x2a18 /* USB3_DP_DP_PHY_PD_CTL */ USB3_DP_COM_POWER_DOWN_CTRL USB3_DP_COM_SW_RESET @@ -362,6 +375,13 @@ qcom,core-clk-rate = <200000000>; qcom,core-clk-rate-hs = <66666667>; qcom,num-gsi-evt-buffs = <0x3>; + qcom,gsi-reg-offset = + <0x0fc /* GSI_GENERAL_CFG */ + 0x110 /* GSI_DBL_ADDR_L */ + 0x120 /* GSI_DBL_ADDR_H */ + 0x130 /* GSI_RING_BASE_ADDR_L */ + 0x144 /* GSI_RING_BASE_ADDR_H */ + 0x1a4>; /* GSI_IF_STS */ qcom,dwc-usb3-msm-tx-fifo-size = <27696>; qcom,charging-disabled; @@ -398,6 +418,7 @@ snps,has-lpm-erratum; snps,hird-threshold = /bits/ 8 <0x10>; snps,usb3_lpm_capable; + snps,bus-suspend-enable; usb-core-id = <0>; maximum-speed = "super-speed"; dr_mode = "otg"; diff --git a/arch/arm64/boot/dts/qcom/sm8150-v2.dtsi b/arch/arm64/boot/dts/qcom/sm8150-v2.dtsi index dc2b32403ffb0ae0e12dd2f1d4eb38a6f3fe4a36..12ef0f5e43ac8c75e85b7ba7c4c67baa0fb279ce 100644 --- a/arch/arm64/boot/dts/qcom/sm8150-v2.dtsi +++ b/arch/arm64/boot/dts/qcom/sm8150-v2.dtsi @@ -23,6 +23,12 @@ /delete-node/ &apps_smmu; /delete-node/ &kgsl_smmu; +&soc { + /delete-node/ llcc-bw-opp-table; + /delete-node/ ddr-bw-opp-table; + /delete-node/ suspendable-ddr-bw-opp-table; +}; + &mdss_mdp { qcom,fullsize-va-map; }; @@ -69,6 +75,11 @@ &clock_npucc { compatible = "qcom,npucc-sm8150-v2", "syscon"; }; + +&clock_scc { + compatible = "qcom,scc-sm8150-v2"; +}; + #include "msm-arm-smmu-sm8150-v2.dtsi" &pcie0 { @@ -80,6 +91,103 @@ <0x60100000 0x100000>, <0x60200000 0x100000>, <0x60300000 0x3d00000>; + + qcom,phy-sequence = <0x0840 0x03 0x0 + 0x0094 0x08 0x0 + 0x0154 0x34 0x0 + 0x016c 0x08 0x0 + 0x0058 0x0f 0x0 + 0x00a4 0x42 0x0 + 0x0110 0x24 0x0 + 0x011c 0x03 0x0 + 0x0118 0xb4 0x0 + 0x010c 0x02 0x0 + 0x01bc 0x11 0x0 + 0x00bc 0x82 0x0 + 0x00d4 0x03 0x0 + 0x00d0 0x55 0x0 + 0x00cc 0x55 0x0 + 0x00b0 0x1a 0x0 + 0x00ac 0x0a 0x0 + 0x00c4 0x68 0x0 + 0x00e0 0x02 0x0 + 0x00dc 0xaa 0x0 + 0x00d8 0xab 0x0 + 0x00b8 0x34 0x0 + 0x00b4 0x14 0x0 + 0x0158 0x01 0x0 + 0x0074 0x06 0x0 + 0x007c 0x16 0x0 + 0x0084 0x36 0x0 + 0x0078 0x06 0x0 + 0x0080 0x16 0x0 + 0x0088 0x36 0x0 + 0x01b0 0x1e 0x0 + 0x01ac 0xb9 0x0 + 0x01b8 0x18 0x0 + 0x01b4 0x94 0x0 + 0x0050 0x07 0x0 + 0x0010 0x00 0x0 + 0x001c 0x31 0x0 + 0x0020 0x01 0x0 + 0x0024 0xde 0x0 + 0x0028 0x07 0x0 + 0x0030 0x4c 0x0 + 0x0034 0x06 0x0 + 0x029c 0x12 0x0 + 0x0284 0x35 0x0 + 0x023c 0x11 0x0 + 0x051c 0x03 0x0 + 0x0518 0x1c 0x0 + 0x0524 0x1e 0x0 + 0x04e8 0x07 0x0 + 0x04ec 0x6e 0x0 + 0x04f0 0x6e 0x0 + 0x04f4 0x4a 0x0 + 0x05b4 0x04 0x0 + 0x0434 0x7f 0x0 + 0x0444 0x70 0x0 + 0x0510 0x17 0x0 + 0x04d4 0x54 0x0 + 0x04d8 0x37 0x0 + 0x0598 0xd4 0x0 + 0x059c 0x54 0x0 + 0x05a0 0xdb 0x0 + 0x05a4 0x39 0x0 + 0x05a8 0x31 0x0 + 0x0584 0x24 0x0 + 0x0588 0xe4 0x0 + 0x058c 0xec 0x0 + 0x0590 0x3b 0x0 + 0x0594 0x36 0x0 + 0x0570 0xff 0x0 + 0x0574 0xff 0x0 + 0x0578 0xff 0x0 + 0x057c 0x7f 0x0 + 0x0580 0x66 0x0 + 0x04fc 0x00 0x0 + 0x04f8 0xc0 0x0 + 0x0460 0xb0 0x0 + 0x0464 0xc0 0x0 + 0x05bc 0x0c 0x0 + 0x04dc 0x0d 0x0 + 0x0408 0x0c 0x0 + 0x0414 0x03 0x0 + 0x09a4 0x01 0x0 + 0x0c90 0x00 0x0 + 0x0c40 0x01 0x0 + 0x0c48 0x01 0x0 + 0x0c50 0x00 0x0 + 0x0cbc 0x00 0x0 + 0x0ce0 0x58 0x0 + 0x0048 0x90 0x0 + 0x0c1c 0xc1 0x0 + 0x0988 0x88 0x0 + 0x0998 0x0b 0x0 + 0x08dc 0x0d 0x0 + 0x09ec 0x01 0x0 + 0x0800 0x00 0x0 + 0x0844 0x03 0x0>; }; &pcie1 { @@ -91,6 +199,142 @@ <0x40100000 0x100000>, <0x40200000 0x100000>, <0x40300000 0x1fd00000>; + + qcom,phy-sequence = <0x0a40 0x03 0x0 + 0x0010 0x00 0x0 + 0x001c 0x31 0x0 + 0x0020 0x01 0x0 + 0x0024 0xde 0x0 + 0x0028 0x07 0x0 + 0x0030 0x4c 0x0 + 0x0034 0x06 0x0 + 0x0048 0x90 0x0 + 0x0058 0x0f 0x0 + 0x0074 0x06 0x0 + 0x0078 0x06 0x0 + 0x007c 0x16 0x0 + 0x0080 0x16 0x0 + 0x0084 0x36 0x0 + 0x0088 0x36 0x0 + 0x0094 0x08 0x0 + 0x00a4 0x42 0x0 + 0x00ac 0x0a 0x0 + 0x00b0 0x1a 0x0 + 0x00b4 0x14 0x0 + 0x00b8 0x34 0x0 + 0x00bc 0x82 0x0 + 0x00c4 0x68 0x0 + 0x00cc 0x55 0x0 + 0x00d0 0x55 0x0 + 0x00d4 0x03 0x0 + 0x00d8 0xab 0x0 + 0x00dc 0xaa 0x0 + 0x00e0 0x02 0x0 + 0x010c 0x02 0x0 + 0x0110 0x24 0x0 + 0x0118 0xb4 0x0 + 0x011c 0x03 0x0 + 0x0154 0x34 0x0 + 0x0158 0x01 0x0 + 0x016c 0x08 0x0 + 0x01ac 0xb9 0x0 + 0x01b0 0x1e 0x0 + 0x01b4 0x94 0x0 + 0x01b8 0x18 0x0 + 0x01bc 0x11 0x0 + 0x023c 0x11 0x0 + 0x0284 0x35 0x0 + 0x029c 0x12 0x0 + 0x0304 0x02 0x0 + 0x0408 0x0c 0x0 + 0x0414 0x03 0x0 + 0x0434 0x7f 0x0 + 0x0444 0x70 0x0 + 0x0460 0xb0 0x0 + 0x0464 0xc0 0x0 + 0x04d4 0x54 0x0 + 0x04d8 0x37 0x0 + 0x04dc 0x0d 0x0 + 0x04e8 0x07 0x0 + 0x04ec 0x6e 0x0 + 0x04f0 0x6e 0x0 + 0x04f4 0x4a 0x0 + 0x04f8 0xc0 0x0 + 0x04fc 0x00 0x0 + 0x0510 0x17 0x0 + 0x0518 0x1c 0x0 + 0x051c 0x03 0x0 + 0x0524 0x1e 0x0 + 0x0570 0xff 0x0 + 0x0574 0xff 0x0 + 0x0578 0xff 0x0 + 0x057c 0x7f 0x0 + 0x0580 0x66 0x0 + 0x0584 0x24 0x0 + 0x0588 0xe4 0x0 + 0x058c 0xec 0x0 + 0x0590 0x3b 0x0 + 0x0594 0x36 0x0 + 0x0598 0xd4 0x0 + 0x059c 0x54 0x0 + 0x05a0 0xdb 0x0 + 0x05a4 0x3b 0x0 + 0x05a8 0x31 0x0 + 0x05bc 0x0c 0x0 + 0x063c 0x11 0x0 + 0x0684 0x35 0x0 + 0x069c 0x12 0x0 + 0x0704 0x20 0x0 + 0x0808 0x0c 0x0 + 0x0814 0x03 0x0 + 0x0834 0x7f 0x0 + 0x0844 0x70 0x0 + 0x0860 0xb0 0x0 + 0x0864 0xc0 0x0 + 0x08d4 0x54 0x0 + 0x08d8 0x37 0x0 + 0x08dc 0x0d 0x0 + 0x08e8 0x07 0x0 + 0x08ec 0x6e 0x0 + 0x08f0 0x6e 0x0 + 0x08f4 0x4a 0x0 + 0x08f8 0xc0 0x0 + 0x08fc 0x00 0x0 + 0x0910 0x17 0x0 + 0x0918 0x1c 0x0 + 0x091c 0x03 0x0 + 0x0924 0x1e 0x0 + 0x0970 0xff 0x0 + 0x0974 0xff 0x0 + 0x0978 0xff 0x0 + 0x097c 0x7f 0x0 + 0x0980 0x66 0x0 + 0x0984 0x24 0x0 + 0x0988 0xe4 0x0 + 0x098c 0xec 0x0 + 0x0990 0x3b 0x0 + 0x0994 0x36 0x0 + 0x0998 0xd4 0x0 + 0x099c 0x54 0x0 + 0x09a0 0xdb 0x0 + 0x09a4 0x3b 0x0 + 0x09a8 0x31 0x0 + 0x09bc 0x0c 0x0 + 0x0adc 0x05 0x0 + 0x0b88 0x88 0x0 + 0x0b98 0x0b 0x0 + 0x0ba4 0x01 0x0 + 0x0bec 0x01 0x0 + 0x0e0c 0x0d 0x0 + 0x0e14 0x07 0x0 + 0x0e1c 0xc1 0x0 + 0x0e40 0x01 0x0 + 0x0e48 0x01 0x0 + 0x0e90 0x00 0x0 + 0x0ebc 0x00 0x0 + 0x0ee0 0x58 0x0 + 0x0a00 0x00 0x0 + 0x0a44 0x03 0x0>; }; &msm_vidc { @@ -308,29 +552,8 @@ >; }; }; -&gpu_opp_table { - compatible = "operating-points-v2"; - - opp-585000000 { - opp-hz = /bits/ 64 <585000000>; - opp-microvolt = ; - }; - - opp-427000000 { - opp-hz = /bits/ 64 <427000000>; - opp-microvolt = ; - }; - opp-345000000 { - opp-hz = /bits/ 64 <345000000>; - opp-microvolt = ; - }; - - opp-257000000 { - opp-hz = /bits/ 64 <257000000>; - opp-microvolt = ; - }; -}; +#include "sm8150-gpu-v2.dtsi" /* GPU overrides */ &msm_gpu { @@ -360,6 +583,10 @@ <26 512 0 7211000>, // 10 bus=1804 <26 512 0 8363000>; // 11 bus=2092 + qcom,initial-pwrlevel = <3>; + + operating-points-v2 = <&gpu_opp_table_v2>; + qcom,gpu-pwrlevels { #address-cells = <1>; #size-cells = <0>; @@ -371,7 +598,7 @@ qcom,gpu-freq = <585000000>; qcom,bus-freq = <7>; qcom,bus-min = <6>; - qcom,bus-max = <8>; + qcom,bus-max = <11>; }; qcom,gpu-pwrlevel@1 { @@ -379,7 +606,7 @@ qcom,gpu-freq = <427000000>; qcom,bus-freq = <6>; qcom,bus-min = <5>; - qcom,bus-max = <7>; + qcom,bus-max = <9>; }; qcom,gpu-pwrlevel@2 { @@ -387,7 +614,7 @@ qcom,gpu-freq = <345000000>; qcom,bus-freq = <3>; qcom,bus-min = <3>; - qcom,bus-max = <5>; + qcom,bus-max = <8>; }; qcom,gpu-pwrlevel@3 { @@ -395,7 +622,7 @@ qcom,gpu-freq = <257000000>; qcom,bus-freq = <2>; qcom,bus-min = <1>; - qcom,bus-max = <3>; + qcom,bus-max = <8>; }; qcom,gpu-pwrlevel@4 { @@ -405,6 +632,8 @@ qcom,bus-min = <0>; qcom,bus-max = <0>; }; + /delete-node/ qcom,gpu-pwrlevel@5; + /delete-node/ qcom,gpu-pwrlevel@6; }; qcom,l3-pwrlevels { @@ -437,7 +666,7 @@ #address-cells = <1>; #size-cells = <0>; compatible = "qcom,npu-pwrlevels"; - initial-pwrlevel = <3>; + initial-pwrlevel = <5>; qcom,npu-pwrlevel@0 { reg = <0>; clk-freq = <300000000 @@ -509,12 +738,12 @@ }; qcom,npu-pwrlevel@3 { reg = <3>; - clk-freq = <773000000 + clk-freq = <652000000 19200000 300000000 19200000 19200000 - 773000000 + 652000000 403000000 75000000 19200000 @@ -523,7 +752,7 @@ 150000000 300000000 19200000 - 773000000 + 652000000 19200000 0 0 @@ -532,6 +761,29 @@ }; qcom,npu-pwrlevel@4 { reg = <4>; + clk-freq = <811000000 + 19200000 + 400000000 + 19200000 + 19200000 + 811000000 + 533000000 + 75000000 + 19200000 + 300000000 + 400000000 + 150000000 + 400000000 + 19200000 + 811000000 + 19200000 + 0 + 0 + 0 + 0>; + }; + qcom,npu-pwrlevel@5 { + reg = <5>; clk-freq = <908000000 19200000 400000000 @@ -553,56 +805,56 @@ 0 0>; }; - /delete-node/ qcom,npu-pwrlevel@5; }; }; -&llcc_bw_opp_table { - compatible = "operating-points-v2"; - BW_OPP_ENTRY( 150, 16); /* 2288 MB/s */ - BW_OPP_ENTRY( 300, 16); /* 4577 MB/s */ - BW_OPP_ENTRY( 466, 16); /* 7110 MB/s */ - BW_OPP_ENTRY( 600, 16); /* 9155 MB/s */ - BW_OPP_ENTRY( 806, 16); /* 12298 MB/s */ - BW_OPP_ENTRY( 933, 16); /* 14236 MB/s */ - BW_OPP_ENTRY(1000, 16); /* 15258 MB/s */ -}; +&soc { + llcc_bw_opp_table: llcc-bw-opp-table { + compatible = "operating-points-v2"; + BW_OPP_ENTRY( 150, 16); /* 2288 MB/s */ + BW_OPP_ENTRY( 300, 16); /* 4577 MB/s */ + BW_OPP_ENTRY( 466, 16); /* 7110 MB/s */ + BW_OPP_ENTRY( 600, 16); /* 9155 MB/s */ + BW_OPP_ENTRY( 806, 16); /* 12298 MB/s */ + BW_OPP_ENTRY( 933, 16); /* 14236 MB/s */ + BW_OPP_ENTRY(1000, 16); /* 15258 MB/s */ + }; -&ddr_bw_opp_table { - compatible = "operating-points-v2"; - BW_OPP_ENTRY( 200, 4); /* 762 MB/s */ - BW_OPP_ENTRY( 300, 4); /* 1144 MB/s */ - BW_OPP_ENTRY( 451, 4); /* 1720 MB/s */ - BW_OPP_ENTRY( 547, 4); /* 2086 MB/s */ - BW_OPP_ENTRY( 681, 4); /* 2597 MB/s */ - BW_OPP_ENTRY( 768, 4); /* 2929 MB/s */ - BW_OPP_ENTRY(1017, 4); /* 3879 MB/s */ - BW_OPP_ENTRY(1353, 4); /* 5161 MB/s */ - BW_OPP_ENTRY(1555, 4); /* 5931 MB/s */ - BW_OPP_ENTRY(1804, 4); /* 6881 MB/s */ - BW_OPP_ENTRY(2092, 4); /* 7980 MB/s */ -}; + ddr_bw_opp_table: ddr-bw-opp-table { + compatible = "operating-points-v2"; + BW_OPP_ENTRY( 200, 4); /* 762 MB/s */ + BW_OPP_ENTRY( 300, 4); /* 1144 MB/s */ + BW_OPP_ENTRY( 451, 4); /* 1720 MB/s */ + BW_OPP_ENTRY( 547, 4); /* 2086 MB/s */ + BW_OPP_ENTRY( 681, 4); /* 2597 MB/s */ + BW_OPP_ENTRY( 768, 4); /* 2929 MB/s */ + BW_OPP_ENTRY(1017, 4); /* 3879 MB/s */ + BW_OPP_ENTRY(1353, 4); /* 5161 MB/s */ + BW_OPP_ENTRY(1555, 4); /* 5931 MB/s */ + BW_OPP_ENTRY(1804, 4); /* 6881 MB/s */ + BW_OPP_ENTRY(2092, 4); /* 7980 MB/s */ + }; -&suspendable_ddr_bw_opp_table { - compatible = "operating-points-v2"; - BW_OPP_ENTRY( 0, 4); /* 0 MB/s */ - BW_OPP_ENTRY( 200, 4); /* 762 MB/s */ - BW_OPP_ENTRY( 300, 4); /* 1144 MB/s */ - BW_OPP_ENTRY( 451, 4); /* 1720 MB/s */ - BW_OPP_ENTRY( 547, 4); /* 2086 MB/s */ - BW_OPP_ENTRY( 681, 4); /* 2597 MB/s */ - BW_OPP_ENTRY( 768, 4); /* 2929 MB/s */ - BW_OPP_ENTRY(1017, 4); /* 3879 MB/s */ - BW_OPP_ENTRY(1353, 4); /* 5161 MB/s */ - BW_OPP_ENTRY(1555, 4); /* 5931 MB/s */ - BW_OPP_ENTRY(1804, 4); /* 6881 MB/s */ - BW_OPP_ENTRY(2092, 4); /* 7980 MB/s */ + suspendable_ddr_bw_opp_table: suspendable-ddr-bw-opp-table { + compatible = "operating-points-v2"; + BW_OPP_ENTRY( 0, 4); /* 0 MB/s */ + BW_OPP_ENTRY( 200, 4); /* 762 MB/s */ + BW_OPP_ENTRY( 300, 4); /* 1144 MB/s */ + BW_OPP_ENTRY( 451, 4); /* 1720 MB/s */ + BW_OPP_ENTRY( 547, 4); /* 2086 MB/s */ + BW_OPP_ENTRY( 681, 4); /* 2597 MB/s */ + BW_OPP_ENTRY( 768, 4); /* 2929 MB/s */ + BW_OPP_ENTRY(1017, 4); /* 3879 MB/s */ + BW_OPP_ENTRY(1353, 4); /* 5161 MB/s */ + BW_OPP_ENTRY(1555, 4); /* 5931 MB/s */ + BW_OPP_ENTRY(1804, 4); /* 6881 MB/s */ + BW_OPP_ENTRY(2092, 4); /* 7980 MB/s */ + }; }; - &cpu4_computemon { qcom,core-dev-table = < 1920000 MHZ_TO_MBPS( 200, 4) >, - < 2841600 MHZ_TO_MBPS(1017, 4) >, + < 2793600 MHZ_TO_MBPS(1017, 4) >, < 3000000 MHZ_TO_MBPS(2092, 4) >; }; diff --git a/arch/arm64/boot/dts/qcom/sm8150-vidc.dtsi b/arch/arm64/boot/dts/qcom/sm8150-vidc.dtsi index 026725d1853af883c2c9d395d1b9ee792b609ca8..47d8127b3e81d613b5ed4410936d28ddd42ed797 100644 --- a/arch/arm64/boot/dts/qcom/sm8150-vidc.dtsi +++ b/arch/arm64/boot/dts/qcom/sm8150-vidc.dtsi @@ -80,7 +80,7 @@ qcom,bus-master = ; qcom,bus-slave = ; qcom,bus-governor = "msm-vidc-llcc"; - qcom,bus-range-kbps = <1000 1326000>; + qcom,bus-range-kbps = <1000 6533000>; }; /* MMUs */ diff --git a/arch/arm64/boot/dts/qcom/sm8150.dtsi b/arch/arm64/boot/dts/qcom/sm8150.dtsi index 7d2bbd0bfffe59e2fa51af088e67805920e71543..0b077bfa980dd286f8258119ba7b98bc02a41e91 100644 --- a/arch/arm64/boot/dts/qcom/sm8150.dtsi +++ b/arch/arm64/boot/dts/qcom/sm8150.dtsi @@ -15,6 +15,7 @@ #include #include #include +#include #include #include #include @@ -702,7 +703,7 @@ }; disp_rdump_memory: disp_rdump_region@9c000000 { - reg = <0x0 0x9c000000 0x0 0x00800000>; + reg = <0x0 0x9c000000 0x0 0x02400000>; label = "disp_rdump_region"; }; @@ -711,7 +712,7 @@ alloc-ranges = <0x0 0x00000000 0x0 0xffffffff>; reusable; alignment = <0x0 0x400000>; - size = <0x0 0xc00000>; + size = <0x0 0x1000000>; }; cdsp_mem: cdsp_region { @@ -758,7 +759,7 @@ alloc-ranges = <0x0 0x00000000 0x0 0xffffffff>; reusable; alignment = <0x0 0x400000>; - size = <0x0 0x2000000>; + size = <0x0 0x2800000>; linux,cma-default; }; }; @@ -779,6 +780,8 @@ #include "sm8150-camera.dtsi" +#include "msm-qvr-external.dtsi" + &soc { #address-cells = <1>; #size-cells = <1>; @@ -1463,6 +1466,14 @@ #reset-cells = <1>; }; + clock_scc: qcom,scc@2b10000 { + compatible = "qcom,scc-sm8150"; + reg = <0x2b10000 0x30000>; + vdd_scc_cx-supply = <&pm8150_l8_level>; + #clock-cells = <1>; + status = "disabled"; + }; + cpucc_debug: syscon@182a0018 { compatible = "syscon"; reg = <0x182a0018 0x4>; @@ -2455,7 +2466,7 @@ compatible = "qcom,msm-fastrpc-compute-cb"; label = "sdsprpc-smd"; iommus = <&apps_smmu 0x5a3 0x0>; - shared-cb = <2>; + shared-cb = <4>; dma-coherent; }; }; @@ -2885,6 +2896,9 @@ qcom,smp2p_sleepstate { compatible = "qcom,smp2p-sleepstate"; qcom,smem-states = <&sleepstate_smp2p_out 0>; + interrupt-parent = <&sleepstate_smp2p_in>; + interrupts = <0 0>; + interrupt-names = "smp2p-sleepstate-in"; }; system_pm { @@ -2969,7 +2983,7 @@ compatible = "qcom,qcedev,context-bank"; label = "secure_context"; iommus = <&apps_smmu 0x513 0>; - virtual-addr = <0x60200000>; + virtual-addr = <0xa0000000>; virtual-size = <0x40000000>; qcom,secure-context-bank; }; @@ -3013,52 +3027,52 @@ compatible = "qcom,mem-dump"; memory-region = <&dump_mem>; - rpmh_dump { + rpmh { qcom,dump-size = <0x2000000>; qcom,dump-id = <0xec>; }; - rpm_sw_dump { + rpm_sw { qcom,dump-size = <0x28000>; qcom,dump-id = <0xea>; }; - pmic_dump { + pmic { qcom,dump-size = <0x10000>; qcom,dump-id = <0xe4>; }; - fcm_dump { + fcm { qcom,dump-size = <0x8400>; qcom,dump-id = <0xee>; }; - tmc_etf_dump { + tmc_etf { qcom,dump-size = <0x10000>; qcom,dump-id = <0xf0>; }; - tmc_etf_swao_dump { + etf_swao { qcom,dump-size = <0x8400>; qcom,dump-id = <0xf1>; }; - tmc_etr_reg_dump { + etr_reg { qcom,dump-size = <0x1000>; qcom,dump-id = <0x100>; }; - tmc_etf_reg_dump { + etf_reg { qcom,dump-size = <0x1000>; qcom,dump-id = <0x101>; }; - tmc_etf_swao_reg_dump { + etfswao_reg { qcom,dump-size = <0x1000>; qcom,dump-id = <0x102>; }; - misc_data_dump { + misc_data { qcom,dump-size = <0x1000>; qcom,dump-id = <0xe8>; }; @@ -3588,6 +3602,106 @@ mbox-names = "aop"; }; + qcom,cnss-qca6390@a0000000 { + compatible = "qcom,cnss-qca6390"; + reg = <0xa0000000 0x10000000>, + <0xb0000000 0x10000>; + reg-names = "smmu_iova_base", "smmu_iova_ipa"; + pinctrl-names = "disabled"; + pinctrl-0 = <>; + qcom,wlan-rc-num = <0>; + qcom,wlan-ramdump-dynamic = <0x400000>; + + mhi,max-channels = <30>; + mhi,timeout = <1000>; + + mhi_chan@0 { + reg = <0>; + label = "LOOPBACK"; + mhi,num-elements = <32>; + mhi,event-ring = <1>; + mhi,chan-dir = <1>; + mhi,data-type = <0>; + mhi,doorbell-mode = <2>; + mhi,ee = <0x14>; + }; + + mhi_chan@1 { + reg = <1>; + label = "LOOPBACK"; + mhi,num-elements = <32>; + mhi,event-ring = <1>; + mhi,chan-dir = <2>; + mhi,data-type = <0>; + mhi,doorbell-mode = <2>; + mhi,ee = <0x14>; + }; + + mhi_chan@4 { + reg = <4>; + label = "DIAG"; + mhi,num-elements = <32>; + mhi,event-ring = <1>; + mhi,chan-dir = <1>; + mhi,data-type = <0>; + mhi,doorbell-mode = <2>; + mhi,ee = <0x14>; + }; + + mhi_chan@5 { + reg = <5>; + label = "DIAG"; + mhi,num-elements = <32>; + mhi,event-ring = <1>; + mhi,chan-dir = <2>; + mhi,data-type = <0>; + mhi,doorbell-mode = <2>; + mhi,ee = <0x14>; + }; + + mhi_chan@20 { + reg = <20>; + label = "IPCR"; + mhi,num-elements = <32>; + mhi,event-ring = <1>; + mhi,chan-dir = <1>; + mhi,data-type = <1>; + mhi,doorbell-mode = <2>; + mhi,ee = <0x14>; + mhi,auto-start; + }; + + mhi_chan@21 { + reg = <21>; + label = "IPCR"; + mhi,num-elements = <32>; + mhi,event-ring = <1>; + mhi,chan-dir = <2>; + mhi,data-type = <0>; + mhi,doorbell-mode = <2>; + mhi,ee = <0x14>; + mhi,auto-queue; + mhi,auto-start; + }; + + mhi_event@0 { + mhi,num-elements = <32>; + mhi,intmod = <1>; + mhi,msi = <1>; + mhi,priority = <1>; + mhi,brstmode = <2>; + mhi,data-type = <1>; + }; + + mhi_event@1 { + mhi,num-elements = <256>; + mhi,intmod = <1>; + mhi,msi = <2>; + mhi,priority = <1>; + mhi,brstmode = <2>; + }; + }; + icnss: qcom,icnss@18800000 { compatible = "qcom,icnss"; reg = <0x18800000 0x800000>, @@ -3635,9 +3749,8 @@ qcom,use-ext-supply; vddio-supply= <&pm8150_s5>; qcom,use-ext-clocks; - clocks = <&clock_rpmh RPMH_RF_CLK3>, - <&clock_rpmh RPMH_RF_CLK3_A>; - clock-names = "rf_clk3_clk", "rf_clk3_pin_clk"; + clocks = <&clock_rpmh RPMH_RF_CLK3>; + clock-names = "rf_clk3_clk"; qcom,smmu-support; qcom,smmu-mapping = <0x20000000 0xe0000000>; qcom,smmu-s1-en; @@ -3928,6 +4041,7 @@ status = "ok"; }; #include "sm8150-pinctrl.dtsi" +#include "sm8150-slpi-pinctrl.dtsi" #include "sm8150-regulator.dtsi" #include "sm8150-ion.dtsi" #include "sm8150-bus.dtsi" diff --git a/arch/arm64/boot/dts/qcom/spi-panel-st7789v2-qvga-cmd.dtsi b/arch/arm64/boot/dts/qcom/spi-panel-st7789v2-qvga-cmd.dtsi new file mode 100644 index 0000000000000000000000000000000000000000..6340528838bfa59ad661f532621cbd23b6d621d0 --- /dev/null +++ b/arch/arm64/boot/dts/qcom/spi-panel-st7789v2-qvga-cmd.dtsi @@ -0,0 +1,50 @@ +/* Copyright (c) 2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +&mdss_spi_display { + spi_st7789v2_qvga_cmd: qcom,mdss_spi_st7789v2_qvga_cmd { + qcom,mdss-spi-panel-name = + "st7789v2 qvga command mode spi panel"; + qcom,mdss-spi-panel-destination = "display_1"; + qcom,mdss-spi-panel-controller = <&mdss_spi_panel>; + qcom,mdss-spi-panel-framerate = <30>; + qcom,mdss-spi-panel-te-per-vsync = <2>; + qcom,mdss-spi-panel-width = <240>; + qcom,mdss-spi-panel-height = <240>; + qcom,mdss-spi-bpp = <16>; + qcom,mdss-spi-on-command = [ + 96 01 11 + 00 02 36 00 + 00 02 3A 05 + 00 02 35 00 + 00 06 B2 0C 0C 00 33 33 + 00 02 B7 75 + 00 02 BB 3D + 00 02 C2 01 + 00 02 C3 19 + 00 02 04 20 + 00 02 C6 0F + 00 03 D0 A4 A1 + 00 0F E0 70 04 08 09 09 05 2A 33 + 41 07 13 13 29 2F + 00 0F E1 70 03 09 0A 09 06 2B 34 + 41 07 12 14 28 2E + 00 01 21 + 00 01 29 + 00 05 2A 00 00 00 EF + 00 05 2B 00 00 00 EF + 00 01 2C]; + qcom,mdss-spi-off-command = [20 01 28 + 20 01 10]; + qcom,mdss-spi-reset-sequence = <1 20>, <0 1>, <1 20>; + }; +}; diff --git a/arch/arm64/boot/dts/renesas/salvator-common.dtsi b/arch/arm64/boot/dts/renesas/salvator-common.dtsi index 9eb11a8d9edacf93227b93a37a10e9e1e010fdd9..26a978616071da1aab135ff66ae3cd31d6e1ed0f 100644 --- a/arch/arm64/boot/dts/renesas/salvator-common.dtsi +++ b/arch/arm64/boot/dts/renesas/salvator-common.dtsi @@ -93,20 +93,12 @@ regulator-always-on; }; - rsnd_ak4613: sound { - compatible = "simple-audio-card"; + sound_card: sound { + compatible = "audio-graph-card"; - simple-audio-card,format = "left_j"; - simple-audio-card,bitclock-master = <&sndcpu>; - simple-audio-card,frame-master = <&sndcpu>; + label = "rcar-sound"; - sndcpu: simple-audio-card,cpu { - sound-dai = <&rcar_sound>; - }; - - sndcodec: simple-audio-card,codec { - sound-dai = <&ak4613>; - }; + dais = <&rsnd_port0>; }; vbus0_usb2: regulator-vbus0-usb2 { @@ -320,6 +312,12 @@ asahi-kasei,out4-single-end; asahi-kasei,out5-single-end; asahi-kasei,out6-single-end; + + port { + ak4613_endpoint: endpoint { + remote-endpoint = <&rsnd_endpoint0>; + }; + }; }; cs2000: clk_multiplier@4f { @@ -538,10 +536,18 @@ <&audio_clk_c>, <&cpg CPG_CORE CPG_AUDIO_CLK_I>; - rcar_sound,dai { - dai0 { - playback = <&ssi0 &src0 &dvc0>; - capture = <&ssi1 &src1 &dvc1>; + ports { + rsnd_port0: port@0 { + rsnd_endpoint0: endpoint { + remote-endpoint = <&ak4613_endpoint>; + + dai-format = "left_j"; + bitclock-master = <&rsnd_endpoint0>; + frame-master = <&rsnd_endpoint0>; + + playback = <&ssi0 &src0 &dvc0>; + capture = <&ssi1 &src1 &dvc1>; + }; }; }; }; diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig index 36e21112113e2db2c6074cd265cd3ae69fc6216d..14f170fa433c805943547cedff2b3ca6ee1f8e97 100644 --- a/arch/arm64/configs/defconfig +++ b/arch/arm64/configs/defconfig @@ -304,6 +304,8 @@ CONFIG_GPIO_XGENE_SB=y CONFIG_GPIO_PCA953X=y CONFIG_GPIO_PCA953X_IRQ=y CONFIG_GPIO_MAX77620=y +CONFIG_POWER_AVS=y +CONFIG_ROCKCHIP_IODOMAIN=y CONFIG_POWER_RESET_MSM=y CONFIG_POWER_RESET_XGENE=y CONFIG_POWER_RESET_SYSCON=y diff --git a/arch/arm64/configs/vendor/qcs405-perf_defconfig b/arch/arm64/configs/vendor/qcs405-perf_defconfig index 5f8f1d9159bd06fe5bb5bd22b1edf1ba9ff1d578..77719a3dc9d41ce1ea505eef914fc0a705f18ae7 100644 --- a/arch/arm64/configs/vendor/qcs405-perf_defconfig +++ b/arch/arm64/configs/vendor/qcs405-perf_defconfig @@ -26,6 +26,7 @@ CONFIG_CC_OPTIMIZE_FOR_SIZE=y CONFIG_KALLSYMS_ALL=y CONFIG_EMBEDDED=y CONFIG_PROFILING=y +CONFIG_CC_STACKPROTECTOR_STRONG=y CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y CONFIG_MODULE_FORCE_UNLOAD=y @@ -223,7 +224,7 @@ CONFIG_DM_VERITY_FEC=y CONFIG_NETDEVICES=y CONFIG_DUMMY=y CONFIG_TUN=y -CONFIG_KS8851=y +CONFIG_AT803X_PHY=y CONFIG_PPP=y CONFIG_PPP_BSDCOMP=y CONFIG_PPP_DEFLATE=y @@ -268,6 +269,7 @@ CONFIG_SPI_QUP=y CONFIG_SPI_SPIDEV=y CONFIG_SPMI=y CONFIG_SLIMBUS_MSM_NGD=y +CONFIG_PTP_1588_CLOCK=y CONFIG_PINCTRL_QCS405=y CONFIG_FRAGMENTED_GPIO_ADDRESS_SPACE=y CONFIG_PINCTRL_QCOM_SPMI_PMIC=y diff --git a/arch/arm64/configs/vendor/qcs405_defconfig b/arch/arm64/configs/vendor/qcs405_defconfig index 405ebbad24bff92c3ba5c89df0732b0f778a20cb..08c4fd2a25a96decaf8ab7975d5018495826a1a3 100644 --- a/arch/arm64/configs/vendor/qcs405_defconfig +++ b/arch/arm64/configs/vendor/qcs405_defconfig @@ -27,6 +27,7 @@ CONFIG_CC_OPTIMIZE_FOR_SIZE=y CONFIG_KALLSYMS_ALL=y CONFIG_EMBEDDED=y CONFIG_PROFILING=y +CONFIG_CC_STACKPROTECTOR_STRONG=y CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y CONFIG_MODULE_FORCE_UNLOAD=y @@ -279,6 +280,7 @@ CONFIG_SPI_SPIDEV=y CONFIG_SPMI=y CONFIG_SPMI_MSM_PMIC_ARB_DEBUG=y CONFIG_SLIMBUS_MSM_NGD=y +CONFIG_PTP_1588_CLOCK=y CONFIG_PINCTRL_QCS405=y CONFIG_FRAGMENTED_GPIO_ADDRESS_SPACE=y CONFIG_PINCTRL_QCOM_SPMI_PMIC=y @@ -322,6 +324,7 @@ CONFIG_FB=y CONFIG_FB_MSM=y CONFIG_FB_MSM_MDSS=y CONFIG_FB_MSM_MDSS_WRITEBACK=y +CONFIG_FB_MSM_MDSS_SPI_PANEL=y CONFIG_FB_MSM_MDSS_DSI_CTRL_STATUS=y CONFIG_FB_MSM_MDSS_XLOG_DEBUG=y CONFIG_BACKLIGHT_LCD_SUPPORT=y diff --git a/arch/arm64/configs/vendor/sa8155-perf_defconfig b/arch/arm64/configs/vendor/sa8155-perf_defconfig index 40413a4c7c398c79ee547d9968a79f284a5bfad0..b902162ba3d4102e0a03ccf7c1c7811c33fb872c 100644 --- a/arch/arm64/configs/vendor/sa8155-perf_defconfig +++ b/arch/arm64/configs/vendor/sa8155-perf_defconfig @@ -472,6 +472,8 @@ CONFIG_QCOM_GENI_SE=y CONFIG_QPNP_REVID=y CONFIG_SPS=y CONFIG_SPS_SUPPORT_NDP_BAM=y +CONFIG_EP_PCIE=y +CONFIG_EP_PCIE_HW=y CONFIG_USB_BAM=y CONFIG_IPA3=y CONFIG_IPA_WDI_UNIFIED_API=y @@ -484,6 +486,7 @@ CONFIG_SPMI_PMIC_CLKDIV=y CONFIG_MSM_CLK_AOP_QMP=y CONFIG_MSM_GCC_SM8150=y CONFIG_MSM_NPUCC_SM8150=y +CONFIG_MSM_SCC_SM8150=y CONFIG_MSM_VIDEOCC_SM8150=y CONFIG_MSM_CAMCC_SM8150=y CONFIG_CLOCK_CPU_OSM=y diff --git a/arch/arm64/configs/vendor/sa8155_defconfig b/arch/arm64/configs/vendor/sa8155_defconfig index ddc6887dfbbcac9201b6ceaac51564601f13c1f0..de9e0ec721004c6949d49f3aa001150480941c22 100644 --- a/arch/arm64/configs/vendor/sa8155_defconfig +++ b/arch/arm64/configs/vendor/sa8155_defconfig @@ -495,6 +495,8 @@ CONFIG_QCOM_GENI_SE=y CONFIG_QPNP_REVID=y CONFIG_SPS=y CONFIG_SPS_SUPPORT_NDP_BAM=y +CONFIG_EP_PCIE=y +CONFIG_EP_PCIE_HW=y CONFIG_USB_BAM=y CONFIG_IPA3=y CONFIG_IPA_WDI_UNIFIED_API=y @@ -507,6 +509,7 @@ CONFIG_SPMI_PMIC_CLKDIV=y CONFIG_MSM_CLK_AOP_QMP=y CONFIG_MSM_GCC_SM8150=y CONFIG_MSM_NPUCC_SM8150=y +CONFIG_MSM_SCC_SM8150=y CONFIG_MSM_VIDEOCC_SM8150=y CONFIG_MSM_CAMCC_SM8150=y CONFIG_CLOCK_CPU_OSM=y diff --git a/arch/arm64/configs/vendor/sdmsteppe-perf_defconfig b/arch/arm64/configs/vendor/sdmsteppe-perf_defconfig index e938ff5a29c00f0c62d53fa594068f8758a90f73..5b996869e990eb9a31bb3791423349c72606b009 100644 --- a/arch/arm64/configs/vendor/sdmsteppe-perf_defconfig +++ b/arch/arm64/configs/vendor/sdmsteppe-perf_defconfig @@ -352,6 +352,7 @@ CONFIG_QTI_CX_IPEAK_COOLING_DEVICE=y CONFIG_MFD_I2C_PMIC=y CONFIG_MFD_SPMI_PMIC=y CONFIG_REGULATOR_FIXED_VOLTAGE=y +CONFIG_REGULATOR_PROXY_CONSUMER=y CONFIG_REGULATOR_QPNP_LCDB=y CONFIG_REGULATOR_REFGEN=y CONFIG_REGULATOR_RPMH=y @@ -403,6 +404,8 @@ CONFIG_USB_DWC3=y CONFIG_USB_DWC3_MSM=y CONFIG_USB_ISP1760=y CONFIG_USB_ISP1760_HOST_ROLE=y +CONFIG_USB_EHSET_TEST_FIXTURE=y +CONFIG_USB_LINK_LAYER_TEST=y CONFIG_NOP_USB_XCEIV=y CONFIG_USB_QCOM_EMU_PHY=y CONFIG_USB_MSM_SSPHY_QMP=y @@ -490,6 +493,8 @@ CONFIG_MSM_VIDEOCC_SM6150=y CONFIG_MSM_DEBUGCC_SM6150=y CONFIG_MSM_CAMCC_SM6150=y CONFIG_MSM_DISPCC_SM6150=y +CONFIG_MSM_CAMCC_SDMMAGPIE=y +CONFIG_MSM_DISPCC_SDMMAGPIE=y CONFIG_MSM_GCC_SDMMAGPIE=y CONFIG_MSM_VIDEOCC_SDMMAGPIE=y CONFIG_MSM_NPUCC_SDMMAGPIE=y @@ -512,6 +517,7 @@ CONFIG_QCOM_RUN_QUEUE_STATS=y CONFIG_QCOM_LLCC=y CONFIG_QCOM_SM6150_LLCC=y CONFIG_QCOM_SDMMAGPIE_LLCC=y +CONFIG_QCOM_LLCC_PERFMON=m CONFIG_QCOM_QMI_HELPERS=y CONFIG_QCOM_SMEM=y CONFIG_QCOM_MEMORY_DUMP_V2=y @@ -525,12 +531,15 @@ CONFIG_MSM_SUBSYSTEM_RESTART=y CONFIG_MSM_PIL=y CONFIG_MSM_SYSMON_QMI_COMM=y CONFIG_MSM_PIL_SSR_GENERIC=y +CONFIG_SETUP_SSR_NOTIF_TIMEOUTS=y +CONFIG_PANIC_ON_SSR_NOTIF_TIMEOUT=y CONFIG_MSM_BOOT_STATS=y CONFIG_QCOM_DCC_V2=y CONFIG_QCOM_SECURE_BUFFER=y CONFIG_ICNSS=y CONFIG_ICNSS_QMI=y CONFIG_QCOM_EUD=y +CONFIG_QCOM_MINIDUMP=y CONFIG_QCOM_BUS_SCALING=y CONFIG_QCOM_BUS_CONFIG_RPMH=y CONFIG_QCOM_COMMAND_DB=y diff --git a/arch/arm64/configs/vendor/sdmsteppe_defconfig b/arch/arm64/configs/vendor/sdmsteppe_defconfig index e088e015df3a1edff54b32b0980b475147743525..2f2fefd44afc727a25accb7d0268c54283702194 100644 --- a/arch/arm64/configs/vendor/sdmsteppe_defconfig +++ b/arch/arm64/configs/vendor/sdmsteppe_defconfig @@ -362,6 +362,7 @@ CONFIG_QTI_CX_IPEAK_COOLING_DEVICE=y CONFIG_MFD_I2C_PMIC=y CONFIG_MFD_SPMI_PMIC=y CONFIG_REGULATOR_FIXED_VOLTAGE=y +CONFIG_REGULATOR_PROXY_CONSUMER=y CONFIG_REGULATOR_QPNP_LCDB=y CONFIG_REGULATOR_REFGEN=y CONFIG_REGULATOR_RPMH=y @@ -415,6 +416,8 @@ CONFIG_USB_DWC3=y CONFIG_USB_DWC3_MSM=y CONFIG_USB_ISP1760=y CONFIG_USB_ISP1760_HOST_ROLE=y +CONFIG_USB_EHSET_TEST_FIXTURE=y +CONFIG_USB_LINK_LAYER_TEST=y CONFIG_NOP_USB_XCEIV=y CONFIG_USB_QCOM_EMU_PHY=y CONFIG_USB_MSM_SSPHY_QMP=y @@ -507,6 +510,8 @@ CONFIG_MSM_VIDEOCC_SM6150=y CONFIG_MSM_DEBUGCC_SM6150=y CONFIG_MSM_CAMCC_SM6150=y CONFIG_MSM_DISPCC_SM6150=y +CONFIG_MSM_CAMCC_SDMMAGPIE=y +CONFIG_MSM_DISPCC_SDMMAGPIE=y CONFIG_MSM_GCC_SDMMAGPIE=y CONFIG_MSM_VIDEOCC_SDMMAGPIE=y CONFIG_MSM_NPUCC_SDMMAGPIE=y @@ -529,6 +534,7 @@ CONFIG_QCOM_RUN_QUEUE_STATS=y CONFIG_QCOM_LLCC=y CONFIG_QCOM_SM6150_LLCC=y CONFIG_QCOM_SDMMAGPIE_LLCC=y +CONFIG_QCOM_LLCC_PERFMON=m CONFIG_QCOM_QMI_HELPERS=y CONFIG_QCOM_SMEM=y CONFIG_QCOM_MEMORY_DUMP_V2=y @@ -543,6 +549,8 @@ CONFIG_MSM_SUBSYSTEM_RESTART=y CONFIG_MSM_PIL=y CONFIG_MSM_SYSMON_QMI_COMM=y CONFIG_MSM_PIL_SSR_GENERIC=y +CONFIG_SETUP_SSR_NOTIF_TIMEOUTS=y +CONFIG_PANIC_ON_SSR_NOTIF_TIMEOUT=y CONFIG_MSM_BOOT_STATS=y CONFIG_MSM_CORE_HANG_DETECT=y CONFIG_QCOM_DCC_V2=y @@ -552,6 +560,7 @@ CONFIG_ICNSS=y CONFIG_ICNSS_DEBUG=y CONFIG_ICNSS_QMI=y CONFIG_QCOM_EUD=y +CONFIG_QCOM_MINIDUMP=y CONFIG_QCOM_BUS_SCALING=y CONFIG_QCOM_BUS_CONFIG_RPMH=y CONFIG_QCOM_COMMAND_DB=y @@ -633,6 +642,7 @@ CONFIG_DEBUG_PAGEALLOC=y CONFIG_SLUB_DEBUG_PANIC_ON=y CONFIG_DEBUG_PAGEALLOC_ENABLE_DEFAULT=y CONFIG_PAGE_POISONING=y +CONFIG_PAGE_POISONING_ENABLE_DEFAULT=y CONFIG_DEBUG_OBJECTS=y CONFIG_DEBUG_OBJECTS_FREE=y CONFIG_DEBUG_OBJECTS_TIMERS=y diff --git a/arch/arm64/configs/vendor/sm8150-perf_defconfig b/arch/arm64/configs/vendor/sm8150-perf_defconfig index 9598ff49e9cd385832a31d40dcc452709dd0dda3..3daa53ecab0db3113841395433f2819c6f29846c 100644 --- a/arch/arm64/configs/vendor/sm8150-perf_defconfig +++ b/arch/arm64/configs/vendor/sm8150-perf_defconfig @@ -70,6 +70,7 @@ CONFIG_MEMORY_HOTPLUG_MOVABLE_NODE=y CONFIG_MEMORY_HOTREMOVE=y CONFIG_CMA=y CONFIG_ZSMALLOC=y +CONFIG_BALANCE_ANON_FILE_RECLAIM=y CONFIG_SECCOMP=y # CONFIG_UNMAP_KERNEL_AT_EL0 is not set # CONFIG_HARDEN_BRANCH_PREDICTOR is not set @@ -273,6 +274,8 @@ CONFIG_SCSI_UFS_QCOM=y CONFIG_SCSI_UFS_QCOM_ICE=y CONFIG_MD=y CONFIG_BLK_DEV_DM=y +CONFIG_DM_CRYPT=y +CONFIG_DM_DEFAULT_KEY=y CONFIG_DM_UEVENT=y CONFIG_DM_VERITY=y CONFIG_DM_VERITY_FEC=y @@ -358,6 +361,7 @@ CONFIG_QTI_THERMAL_LIMITS_DCVS=y CONFIG_QTI_VIRTUAL_SENSOR=y CONFIG_QTI_AOP_REG_COOLING_DEVICE=y CONFIG_QTI_QMI_COOLING_DEVICE=y +CONFIG_QTI_QMI_SENSOR=y CONFIG_REGULATOR_COOLING_DEVICE=y CONFIG_QTI_BCL_PMIC5=y CONFIG_QTI_BCL_SOC_DRIVER=y @@ -412,6 +416,7 @@ CONFIG_HID_MAGICMOUSE=y CONFIG_HID_MICROSOFT=y CONFIG_HID_MULTITOUCH=y CONFIG_HID_PLANTRONICS=y +CONFIG_HID_QVR=y CONFIG_USB=y CONFIG_USB_ANNOUNCE_NEW_DEVICES=y CONFIG_USB_XHCI_HCD=y @@ -649,6 +654,7 @@ CONFIG_CORESIGHT_REMOTE_ETM_DEFAULT_ENABLE=0 CONFIG_CORESIGHT_TGU=y CONFIG_CORESIGHT_EVENT=y CONFIG_PFK=y +CONFIG_PFK_WRAPPED_KEY_SUPPORTED=y CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y CONFIG_SECURITY=y CONFIG_HARDENED_USERCOPY=y diff --git a/arch/arm64/configs/vendor/sm8150_defconfig b/arch/arm64/configs/vendor/sm8150_defconfig index b954cd674178de99af651e80a625d4787ff0de6f..9b18221f1bf2c5b147677845df9e75e7a98a6243 100644 --- a/arch/arm64/configs/vendor/sm8150_defconfig +++ b/arch/arm64/configs/vendor/sm8150_defconfig @@ -76,6 +76,7 @@ CONFIG_CLEANCACHE=y CONFIG_CMA=y CONFIG_CMA_DEBUGFS=y CONFIG_ZSMALLOC=y +CONFIG_BALANCE_ANON_FILE_RECLAIM=y CONFIG_SECCOMP=y # CONFIG_UNMAP_KERNEL_AT_EL0 is not set # CONFIG_HARDEN_BRANCH_PREDICTOR is not set @@ -286,6 +287,8 @@ CONFIG_SCSI_UFS_QCOM_ICE=y CONFIG_SCSI_UFSHCD_CMD_LOGGING=y CONFIG_MD=y CONFIG_BLK_DEV_DM=y +CONFIG_DM_CRYPT=y +CONFIG_DM_DEFAULT_KEY=y CONFIG_DM_UEVENT=y CONFIG_DM_VERITY=y CONFIG_DM_VERITY_FEC=y @@ -325,6 +328,7 @@ CONFIG_INPUT_UINPUT=y # CONFIG_SERIO_SERPORT is not set # CONFIG_VT is not set # CONFIG_LEGACY_PTYS is not set +# CONFIG_DEVMEM is not set CONFIG_SERIAL_MSM_GENI=y CONFIG_SERIAL_MSM_GENI_CONSOLE=y CONFIG_SERIAL_DEV_BUS=y @@ -372,6 +376,7 @@ CONFIG_QTI_THERMAL_LIMITS_DCVS=y CONFIG_QTI_VIRTUAL_SENSOR=y CONFIG_QTI_AOP_REG_COOLING_DEVICE=y CONFIG_QTI_QMI_COOLING_DEVICE=y +CONFIG_QTI_QMI_SENSOR=y CONFIG_REGULATOR_COOLING_DEVICE=y CONFIG_QTI_BCL_PMIC5=y CONFIG_QTI_BCL_SOC_DRIVER=y @@ -428,6 +433,7 @@ CONFIG_HID_MAGICMOUSE=y CONFIG_HID_MICROSOFT=y CONFIG_HID_MULTITOUCH=y CONFIG_HID_PLANTRONICS=y +CONFIG_HID_QVR=y CONFIG_USB=y CONFIG_USB_ANNOUNCE_NEW_DEVICES=y CONFIG_USB_XHCI_HCD=y @@ -668,6 +674,7 @@ CONFIG_DEBUG_PAGEALLOC=y CONFIG_SLUB_DEBUG_PANIC_ON=y CONFIG_DEBUG_PAGEALLOC_ENABLE_DEFAULT=y CONFIG_PAGE_POISONING=y +CONFIG_PAGE_POISONING_ENABLE_DEFAULT=y CONFIG_DEBUG_OBJECTS=y CONFIG_DEBUG_OBJECTS_FREE=y CONFIG_DEBUG_OBJECTS_TIMERS=y @@ -729,6 +736,7 @@ CONFIG_CORESIGHT_REMOTE_ETM_DEFAULT_ENABLE=0 CONFIG_CORESIGHT_TGU=y CONFIG_CORESIGHT_EVENT=y CONFIG_PFK=y +CONFIG_PFK_WRAPPED_KEY_SUPPORTED=y CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y CONFIG_SECURITY=y CONFIG_HARDENED_USERCOPY=y diff --git a/arch/arm64/include/asm/alternative.h b/arch/arm64/include/asm/alternative.h index 4a85c6952a221ec8e02df17b39b09ddf8f9c13d4..a91933b1e2e62ba235ef05ddf8f9d34dbb6bcf49 100644 --- a/arch/arm64/include/asm/alternative.h +++ b/arch/arm64/include/asm/alternative.h @@ -5,6 +5,8 @@ #include #include +#define ARM64_CB_PATCH ARM64_NCAPS + #ifndef __ASSEMBLY__ #include @@ -12,6 +14,8 @@ #include #include +extern int alternatives_applied; + struct alt_instr { s32 orig_offset; /* offset to original instruction */ s32 alt_offset; /* offset to replacement instruction */ @@ -20,12 +24,19 @@ struct alt_instr { u8 alt_len; /* size of new instruction(s), <= orig_len */ }; +typedef void (*alternative_cb_t)(struct alt_instr *alt, + __le32 *origptr, __le32 *updptr, int nr_inst); + void __init apply_alternatives_all(void); void apply_alternatives(void *start, size_t length); -#define ALTINSTR_ENTRY(feature) \ +#define ALTINSTR_ENTRY(feature,cb) \ " .word 661b - .\n" /* label */ \ + " .if " __stringify(cb) " == 0\n" \ " .word 663f - .\n" /* new instruction */ \ + " .else\n" \ + " .word " __stringify(cb) "- .\n" /* callback */ \ + " .endif\n" \ " .hword " __stringify(feature) "\n" /* feature bit */ \ " .byte 662b-661b\n" /* source len */ \ " .byte 664f-663f\n" /* replacement len */ @@ -43,15 +54,18 @@ void apply_alternatives(void *start, size_t length); * but most assemblers die if insn1 or insn2 have a .inst. This should * be fixed in a binutils release posterior to 2.25.51.0.2 (anything * containing commit 4e4d08cf7399b606 or c1baaddf8861). + * + * Alternatives with callbacks do not generate replacement instructions. */ -#define __ALTERNATIVE_CFG(oldinstr, newinstr, feature, cfg_enabled) \ +#define __ALTERNATIVE_CFG(oldinstr, newinstr, feature, cfg_enabled, cb) \ ".if "__stringify(cfg_enabled)" == 1\n" \ "661:\n\t" \ oldinstr "\n" \ "662:\n" \ ".pushsection .altinstructions,\"a\"\n" \ - ALTINSTR_ENTRY(feature) \ + ALTINSTR_ENTRY(feature,cb) \ ".popsection\n" \ + " .if " __stringify(cb) " == 0\n" \ ".pushsection .altinstr_replacement, \"a\"\n" \ "663:\n\t" \ newinstr "\n" \ @@ -59,11 +73,17 @@ void apply_alternatives(void *start, size_t length); ".popsection\n\t" \ ".org . - (664b-663b) + (662b-661b)\n\t" \ ".org . - (662b-661b) + (664b-663b)\n" \ + ".else\n\t" \ + "663:\n\t" \ + "664:\n\t" \ + ".endif\n" \ ".endif\n" #define _ALTERNATIVE_CFG(oldinstr, newinstr, feature, cfg, ...) \ - __ALTERNATIVE_CFG(oldinstr, newinstr, feature, IS_ENABLED(cfg)) + __ALTERNATIVE_CFG(oldinstr, newinstr, feature, IS_ENABLED(cfg), 0) +#define ALTERNATIVE_CB(oldinstr, cb) \ + __ALTERNATIVE_CFG(oldinstr, "NOT_AN_INSTRUCTION", ARM64_CB_PATCH, 1, cb) #else #include @@ -130,6 +150,14 @@ void apply_alternatives(void *start, size_t length); 661: .endm +.macro alternative_cb cb + .set .Lasm_alt_mode, 0 + .pushsection .altinstructions, "a" + altinstruction_entry 661f, \cb, ARM64_CB_PATCH, 662f-661f, 0 + .popsection +661: +.endm + /* * Provide the other half of the alternative code sequence. */ @@ -155,6 +183,13 @@ void apply_alternatives(void *start, size_t length); .org . - (662b-661b) + (664b-663b) .endm +/* + * Callback-based alternative epilogue + */ +.macro alternative_cb_end +662: +.endm + /* * Provides a trivial alternative or default sequence consisting solely * of NOPs. The number of NOPs is chosen automatically to match the diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h index 1851e217e2c9070cc7d4f05ffc0bb948d58994de..fe2a97ab7b92d2e3c973295a9bd87eebe6d613e7 100644 --- a/arch/arm64/include/asm/assembler.h +++ b/arch/arm64/include/asm/assembler.h @@ -272,7 +272,11 @@ lr .req x30 // link register #else adr_l \dst, \sym #endif +alternative_if_not ARM64_HAS_VIRT_HOST_EXTN mrs \tmp, tpidr_el1 +alternative_else + mrs \tmp, tpidr_el2 +alternative_endif add \dst, \dst, \tmp .endm @@ -283,7 +287,11 @@ lr .req x30 // link register */ .macro ldr_this_cpu dst, sym, tmp adr_l \dst, \sym +alternative_if_not ARM64_HAS_VIRT_HOST_EXTN mrs \tmp, tpidr_el1 +alternative_else + mrs \tmp, tpidr_el2 +alternative_endif ldr \dst, [\dst, \tmp] .endm diff --git a/arch/arm64/include/asm/cmpxchg.h b/arch/arm64/include/asm/cmpxchg.h index ae852add053d835cdeb98ede5458419dd2958c6a..0f2e1ab5e16669d3cc7dd27170243f5f79424a6b 100644 --- a/arch/arm64/include/asm/cmpxchg.h +++ b/arch/arm64/include/asm/cmpxchg.h @@ -229,7 +229,9 @@ static inline void __cmpwait_case_##name(volatile void *ptr, \ unsigned long tmp; \ \ asm volatile( \ - " ldxr" #sz "\t%" #w "[tmp], %[v]\n" \ + " sevl\n" \ + " wfe\n" \ + " ldxr" #sz "\t%" #w "[tmp], %[v]\n" \ " eor %" #w "[tmp], %" #w "[tmp], %" #w "[val]\n" \ " cbnz %" #w "[tmp], 1f\n" \ " wfe\n" \ diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h index a7ef5a0519115f9325bd33e5cbf46df8fe2a899f..1a6d02350fc68955924ff6c3a728403acf50447b 100644 --- a/arch/arm64/include/asm/kvm_asm.h +++ b/arch/arm64/include/asm/kvm_asm.h @@ -33,6 +33,10 @@ #define KVM_ARM64_DEBUG_DIRTY_SHIFT 0 #define KVM_ARM64_DEBUG_DIRTY (1 << KVM_ARM64_DEBUG_DIRTY_SHIFT) +#define VCPU_WORKAROUND_2_FLAG_SHIFT 0 +#define VCPU_WORKAROUND_2_FLAG (_AC(1, UL) << VCPU_WORKAROUND_2_FLAG_SHIFT) + +/* Translate a kernel address of @sym into its equivalent linear mapping */ #define kvm_ksym_ref(sym) \ ({ \ void *val = &sym; \ @@ -68,6 +72,43 @@ extern u32 __init_stage2_translation(void); extern void __qcom_hyp_sanitize_btac_predictors(void); +/* Home-grown __this_cpu_{ptr,read} variants that always work at HYP */ +#define __hyp_this_cpu_ptr(sym) \ + ({ \ + void *__ptr = hyp_symbol_addr(sym); \ + __ptr += read_sysreg(tpidr_el2); \ + (typeof(&sym))__ptr; \ + }) + +#define __hyp_this_cpu_read(sym) \ + ({ \ + *__hyp_this_cpu_ptr(sym); \ + }) + +#else /* __ASSEMBLY__ */ + +.macro hyp_adr_this_cpu reg, sym, tmp + adr_l \reg, \sym + mrs \tmp, tpidr_el2 + add \reg, \reg, \tmp +.endm + +.macro hyp_ldr_this_cpu reg, sym, tmp + adr_l \reg, \sym + mrs \tmp, tpidr_el2 + ldr \reg, [\reg, \tmp] +.endm + +.macro get_host_ctxt reg, tmp + hyp_adr_this_cpu \reg, kvm_host_cpu_state, \tmp +.endm + +.macro get_vcpu_ptr vcpu, ctxt + get_host_ctxt \ctxt, \vcpu + ldr \vcpu, [\ctxt, #HOST_CONTEXT_VCPU] + kern_hyp_va \vcpu +.endm + #endif #endif /* __ARM_KVM_ASM_H__ */ diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index 8abec9f7f430cde43759a92c4253a1c26a10fc2f..b01ad3489bd8dc9567d1c159833ea1069b879adc 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -194,6 +194,8 @@ struct kvm_cpu_context { u64 sys_regs[NR_SYS_REGS]; u32 copro[NR_COPRO_REGS]; }; + + struct kvm_vcpu *__hyp_running_vcpu; }; typedef struct kvm_cpu_context kvm_cpu_context_t; @@ -208,6 +210,9 @@ struct kvm_vcpu_arch { /* Exception Information */ struct kvm_vcpu_fault_info fault; + /* State of various workarounds, see kvm_asm.h for bit assignment */ + u64 workaround_flags; + /* Guest debug state */ u64 debug_flags; @@ -348,10 +353,15 @@ int kvm_perf_teardown(void); struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr); +void __kvm_set_tpidr_el2(u64 tpidr_el2); +DECLARE_PER_CPU(kvm_cpu_context_t, kvm_host_cpu_state); + static inline void __cpu_init_hyp_mode(phys_addr_t pgd_ptr, unsigned long hyp_stack_ptr, unsigned long vector_ptr) { + u64 tpidr_el2; + /* * Call initialization code, and switch to the full blown HYP code. * If the cpucaps haven't been finalized yet, something has gone very @@ -360,6 +370,16 @@ static inline void __cpu_init_hyp_mode(phys_addr_t pgd_ptr, */ BUG_ON(!static_branch_likely(&arm64_const_caps_ready)); __kvm_call_hyp((void *)pgd_ptr, hyp_stack_ptr, vector_ptr); + + /* + * Calculate the raw per-cpu offset without a translation from the + * kernel's mapping to the linear mapping, and store it in tpidr_el2 + * so that we can use adr_l to access per-cpu variables in EL2. + */ + tpidr_el2 = (u64)this_cpu_ptr(&kvm_host_cpu_state) + - (u64)kvm_ksym_ref(kvm_host_cpu_state); + + kvm_call_hyp(__kvm_set_tpidr_el2, tpidr_el2); } static inline void kvm_arch_hardware_unsetup(void) {} @@ -392,4 +412,27 @@ static inline bool kvm_arm_harden_branch_predictor(void) return cpus_have_const_cap(ARM64_HARDEN_BRANCH_PREDICTOR); } +#define KVM_SSBD_UNKNOWN -1 +#define KVM_SSBD_FORCE_DISABLE 0 +#define KVM_SSBD_KERNEL 1 +#define KVM_SSBD_FORCE_ENABLE 2 +#define KVM_SSBD_MITIGATED 3 + +static inline int kvm_arm_have_ssbd(void) +{ + switch (arm64_get_ssbd_state()) { + case ARM64_SSBD_FORCE_DISABLE: + return KVM_SSBD_FORCE_DISABLE; + case ARM64_SSBD_KERNEL: + return KVM_SSBD_KERNEL; + case ARM64_SSBD_FORCE_ENABLE: + return KVM_SSBD_FORCE_ENABLE; + case ARM64_SSBD_MITIGATED: + return KVM_SSBD_MITIGATED; + case ARM64_SSBD_UNKNOWN: + default: + return KVM_SSBD_UNKNOWN; + } +} + #endif /* __ARM64_KVM_HOST_H__ */ diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h index fe55b516f018d0adf8d0b31d1d93d66618931cce..e42c1f0ae6cf7febfccba956ec0eed8ab47f4717 100644 --- a/arch/arm64/include/asm/kvm_mmu.h +++ b/arch/arm64/include/asm/kvm_mmu.h @@ -130,6 +130,26 @@ static inline unsigned long __kern_hyp_va(unsigned long v) #define kern_hyp_va(v) ((typeof(v))(__kern_hyp_va((unsigned long)(v)))) +/* + * Obtain the PC-relative address of a kernel symbol + * s: symbol + * + * The goal of this macro is to return a symbol's address based on a + * PC-relative computation, as opposed to a loading the VA from a + * constant pool or something similar. This works well for HYP, as an + * absolute VA is guaranteed to be wrong. Only use this if trying to + * obtain the address of a symbol (i.e. not something you obtained by + * following a pointer). + */ +#define hyp_symbol_addr(s) \ + ({ \ + typeof(s) *addr; \ + asm("adrp %0, %1\n" \ + "add %0, %0, :lo12:%1\n" \ + : "=r" (addr) : "S" (&s)); \ + addr; \ + }) + /* * We currently only support a 40bit IPA. */ @@ -363,5 +383,29 @@ static inline int kvm_map_vectors(void) } #endif +#ifdef CONFIG_ARM64_SSBD +DECLARE_PER_CPU_READ_MOSTLY(u64, arm64_ssbd_callback_required); + +static inline int hyp_map_aux_data(void) +{ + int cpu, err; + + for_each_possible_cpu(cpu) { + u64 *ptr; + + ptr = per_cpu_ptr(&arm64_ssbd_callback_required, cpu); + err = create_hyp_mappings(ptr, ptr + 1, PAGE_HYP); + if (err) + return err; + } + return 0; +} +#else +static inline int hyp_map_aux_data(void) +{ + return 0; +} +#endif + #endif /* __ASSEMBLY__ */ #endif /* __ARM64_KVM_MMU_H__ */ diff --git a/arch/arm64/include/asm/percpu.h b/arch/arm64/include/asm/percpu.h index 3bd498e4de4cf298c8a24bb5e97e235ebed17245..43393208229eb8d64ec476f5b7b098bc73f498f9 100644 --- a/arch/arm64/include/asm/percpu.h +++ b/arch/arm64/include/asm/percpu.h @@ -16,11 +16,15 @@ #ifndef __ASM_PERCPU_H #define __ASM_PERCPU_H +#include #include static inline void set_my_cpu_offset(unsigned long off) { - asm volatile("msr tpidr_el1, %0" :: "r" (off) : "memory"); + asm volatile(ALTERNATIVE("msr tpidr_el1, %0", + "msr tpidr_el2, %0", + ARM64_HAS_VIRT_HOST_EXTN) + :: "r" (off) : "memory"); } static inline unsigned long __my_cpu_offset(void) @@ -31,7 +35,10 @@ static inline unsigned long __my_cpu_offset(void) * We want to allow caching the value, so avoid using volatile and * instead use a fake stack read to hazard against barrier(). */ - asm("mrs %0, tpidr_el1" : "=r" (off) : + asm(ALTERNATIVE("mrs %0, tpidr_el1", + "mrs %0, tpidr_el2", + ARM64_HAS_VIRT_HOST_EXTN) + : "=r" (off) : "Q" (*(const unsigned long *)current_stack_pointer)); return off; diff --git a/arch/arm64/include/asm/thread_info.h b/arch/arm64/include/asm/thread_info.h index e873dc6f40a45d72233e9685efd1acb3f4fbba2e..3f547dc09ae88cd2bf36229fc83f9ac32fdece35 100644 --- a/arch/arm64/include/asm/thread_info.h +++ b/arch/arm64/include/asm/thread_info.h @@ -92,8 +92,8 @@ void arch_setup_new_exec(void); #define TIF_RESTORE_SIGMASK 20 #define TIF_SINGLESTEP 21 #define TIF_32BIT 22 /* 32bit process */ +#define TIF_SSBD 23 /* Wants SSB mitigation */ #define TIF_MM_RELEASED 24 -#define TIF_SSBD 25 /* Wants SSB mitigation */ #define _TIF_SIGPENDING (1 << TIF_SIGPENDING) #define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED) diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile index a15a07e94182a3978b4b00639791f472f45cc923..031c211bb69c4bc6350508b2693d68cd8ed2cae8 100644 --- a/arch/arm64/kernel/Makefile +++ b/arch/arm64/kernel/Makefile @@ -55,6 +55,7 @@ arm64-obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o \ arm64-obj-$(CONFIG_ARM64_RELOC_TEST) += arm64-reloc-test.o arm64-reloc-test-y := reloc_test_core.o reloc_test_syms.o arm64-obj-$(CONFIG_CRASH_DUMP) += crash_dump.o +arm64-obj-$(CONFIG_ARM64_SSBD) += ssbd.o ifeq ($(CONFIG_KVM),y) arm64-obj-$(CONFIG_HARDEN_BRANCH_PREDICTOR) += bpi.o diff --git a/arch/arm64/kernel/alternative.c b/arch/arm64/kernel/alternative.c index 6dd0a3a3e5c98d447f7017c70b328a70bb82c9d3..5c4bce4ac381a4ab87107e4aa47a9b7beef7d891 100644 --- a/arch/arm64/kernel/alternative.c +++ b/arch/arm64/kernel/alternative.c @@ -32,6 +32,8 @@ #define ALT_ORIG_PTR(a) __ALT_PTR(a, orig_offset) #define ALT_REPL_PTR(a) __ALT_PTR(a, alt_offset) +int alternatives_applied; + struct alt_region { struct alt_instr *begin; struct alt_instr *end; @@ -105,32 +107,53 @@ static u32 get_alt_insn(struct alt_instr *alt, __le32 *insnptr, __le32 *altinsnp return insn; } +static void patch_alternative(struct alt_instr *alt, + __le32 *origptr, __le32 *updptr, int nr_inst) +{ + __le32 *replptr; + int i; + + replptr = ALT_REPL_PTR(alt); + for (i = 0; i < nr_inst; i++) { + u32 insn; + + insn = get_alt_insn(alt, origptr + i, replptr + i); + updptr[i] = cpu_to_le32(insn); + } +} + static void __apply_alternatives(void *alt_region, bool use_linear_alias) { struct alt_instr *alt; struct alt_region *region = alt_region; - __le32 *origptr, *replptr, *updptr; + __le32 *origptr, *updptr; + alternative_cb_t alt_cb; for (alt = region->begin; alt < region->end; alt++) { - u32 insn; - int i, nr_inst; + int nr_inst; - if (!cpus_have_cap(alt->cpufeature)) + /* Use ARM64_CB_PATCH as an unconditional patch */ + if (alt->cpufeature < ARM64_CB_PATCH && + !cpus_have_cap(alt->cpufeature)) continue; - BUG_ON(alt->alt_len != alt->orig_len); + if (alt->cpufeature == ARM64_CB_PATCH) + BUG_ON(alt->alt_len != 0); + else + BUG_ON(alt->alt_len != alt->orig_len); pr_info_once("patching kernel code\n"); origptr = ALT_ORIG_PTR(alt); - replptr = ALT_REPL_PTR(alt); updptr = use_linear_alias ? lm_alias(origptr) : origptr; - nr_inst = alt->alt_len / sizeof(insn); + nr_inst = alt->orig_len / AARCH64_INSN_SIZE; - for (i = 0; i < nr_inst; i++) { - insn = get_alt_insn(alt, origptr + i, replptr + i); - updptr[i] = cpu_to_le32(insn); - } + if (alt->cpufeature < ARM64_CB_PATCH) + alt_cb = patch_alternative; + else + alt_cb = ALT_REPL_PTR(alt); + + alt_cb(alt, origptr, updptr, nr_inst); flush_icache_range((uintptr_t)origptr, (uintptr_t)(origptr + nr_inst)); @@ -143,7 +166,6 @@ static void __apply_alternatives(void *alt_region, bool use_linear_alias) */ static int __apply_alternatives_multi_stop(void *unused) { - static int patched = 0; struct alt_region region = { .begin = (struct alt_instr *)__alt_instructions, .end = (struct alt_instr *)__alt_instructions_end, @@ -151,14 +173,14 @@ static int __apply_alternatives_multi_stop(void *unused) /* We always have a CPU 0 at this point (__init) */ if (smp_processor_id()) { - while (!READ_ONCE(patched)) + while (!READ_ONCE(alternatives_applied)) cpu_relax(); isb(); } else { - BUG_ON(patched); + BUG_ON(alternatives_applied); __apply_alternatives(®ion, true); /* Barriers provided by the cache flushing */ - WRITE_ONCE(patched, 1); + WRITE_ONCE(alternatives_applied, 1); } return 0; diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c index af247d10252f5019e8b44361bb4a8a724300d7e1..b5e43b01b396c80a0ef2252a3d835232ad277b7f 100644 --- a/arch/arm64/kernel/asm-offsets.c +++ b/arch/arm64/kernel/asm-offsets.c @@ -131,11 +131,13 @@ int main(void) BLANK(); #ifdef CONFIG_KVM_ARM_HOST DEFINE(VCPU_CONTEXT, offsetof(struct kvm_vcpu, arch.ctxt)); + DEFINE(VCPU_WORKAROUND_FLAGS, offsetof(struct kvm_vcpu, arch.workaround_flags)); DEFINE(CPU_GP_REGS, offsetof(struct kvm_cpu_context, gp_regs)); DEFINE(CPU_USER_PT_REGS, offsetof(struct kvm_regs, regs)); DEFINE(CPU_FP_REGS, offsetof(struct kvm_regs, fp_regs)); DEFINE(VCPU_FPEXC32_EL2, offsetof(struct kvm_vcpu, arch.ctxt.sys_regs[FPEXC32_EL2])); DEFINE(VCPU_HOST_CONTEXT, offsetof(struct kvm_vcpu, arch.host_cpu_context)); + DEFINE(HOST_CONTEXT_VCPU, offsetof(struct kvm_cpu_context, __hyp_running_vcpu)); #endif #ifdef CONFIG_CPU_PM DEFINE(CPU_SUSPEND_SZ, sizeof(struct cpu_suspend_ctx)); diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c index e28b8bed64172470c4b7980329abeddef781c11f..57dde8be92a4bf294bf918877a6e7a6889e97ba9 100644 --- a/arch/arm64/kernel/cpu_errata.c +++ b/arch/arm64/kernel/cpu_errata.c @@ -362,7 +362,7 @@ static bool has_ssbd_mitigation(const struct arm64_cpu_capabilities *entry, return required; } -#endif /* CONFIG_ARM64_SSBD */ +#endif /* CONFIG_ARM64_SSBD */ #define MIDR_RANGE(model, min, max) \ .def_scope = SCOPE_LOCAL_CPU, \ diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index 5c0a83096aaa7a22b9b8d66633160ebb92d47194..5e621db943c9326931f6b6a9be0cf75c46ce0b3d 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -958,6 +958,22 @@ static bool has_hw_dbm(const struct arm64_cpu_capabilities *cap, #endif +static int cpu_copy_el2regs(void *__unused) +{ + /* + * Copy register values that aren't redirected by hardware. + * + * Before code patching, we only set tpidr_el1, all CPUs need to copy + * this value to tpidr_el2 before we patch the code. Once we've done + * that, freshly-onlined CPUs will set tpidr_el2, so we don't need to + * do anything here. + */ + if (!alternatives_applied) + write_sysreg(read_sysreg(tpidr_el1), tpidr_el2); + + return 0; +} + static const struct arm64_cpu_capabilities arm64_features[] = { { .desc = "GIC system register CPU interface", @@ -1027,6 +1043,7 @@ static const struct arm64_cpu_capabilities arm64_features[] = { .capability = ARM64_HAS_VIRT_HOST_EXTN, .def_scope = SCOPE_SYSTEM, .matches = runs_at_el2, + .enable = cpu_copy_el2regs, }, { .desc = "32-bit EL0 Support", diff --git a/arch/arm64/kernel/ssbd.c b/arch/arm64/kernel/ssbd.c index 3432e5ef9f41882c06462b7f3ec4ff91f02fd931..0560738c1d5ccad415d1f50d2ffec56ba7e9ff08 100644 --- a/arch/arm64/kernel/ssbd.c +++ b/arch/arm64/kernel/ssbd.c @@ -4,6 +4,7 @@ */ #include +#include #include #include @@ -11,9 +12,7 @@ /* * prctl interface for SSBD - * FIXME: Drop the below ifdefery once merged in 4.18. */ -#ifdef PR_SPEC_STORE_BYPASS static int ssbd_prctl_set(struct task_struct *task, unsigned long ctrl) { int state = arm64_get_ssbd_state(); @@ -107,4 +106,3 @@ int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which) return -ENODEV; } } -#endif /* PR_SPEC_STORE_BYPASS */ diff --git a/arch/arm64/kvm/hyp-init.S b/arch/arm64/kvm/hyp-init.S index 870828c364c508f825eacc1c49c17886dc9c8cb2..dea20651a5f167e70f1d38b9728d263a0c78eb58 100644 --- a/arch/arm64/kvm/hyp-init.S +++ b/arch/arm64/kvm/hyp-init.S @@ -122,6 +122,10 @@ CPU_BE( orr x4, x4, #SCTLR_ELx_EE) kern_hyp_va x2 msr vbar_el2, x2 + /* copy tpidr_el1 into tpidr_el2 for use by HYP */ + mrs x1, tpidr_el1 + msr tpidr_el2, x1 + /* Hello, World! */ eret ENDPROC(__kvm_hyp_init) diff --git a/arch/arm64/kvm/hyp/entry.S b/arch/arm64/kvm/hyp/entry.S index 9c45c6af1f5828099ca00ad8ca16d17197138857..a7b3c198d4de87ff7d2f103a01971be4d40b543b 100644 --- a/arch/arm64/kvm/hyp/entry.S +++ b/arch/arm64/kvm/hyp/entry.S @@ -62,9 +62,6 @@ ENTRY(__guest_enter) // Store the host regs save_callee_saved_regs x1 - // Store the host_ctxt for use at exit time - str x1, [sp, #-16]! - add x18, x0, #VCPU_CONTEXT // Restore guest regs x0-x17 @@ -118,8 +115,7 @@ ENTRY(__guest_exit) // Store the guest regs x19-x29, lr save_callee_saved_regs x1 - // Restore the host_ctxt from the stack - ldr x2, [sp], #16 + get_host_ctxt x2, x3 // Now restore the host regs restore_callee_saved_regs x2 @@ -159,6 +155,10 @@ abort_guest_exit_end: ENDPROC(__guest_exit) ENTRY(__fpsimd_guest_restore) + // x0: esr + // x1: vcpu + // x2-x29,lr: vcpu regs + // vcpu x0-x1 on the stack stp x2, x3, [sp, #-16]! stp x4, lr, [sp, #-16]! @@ -173,7 +173,7 @@ alternative_else alternative_endif isb - mrs x3, tpidr_el2 + mov x3, x1 ldr x0, [x3, #VCPU_HOST_CONTEXT] kern_hyp_va x0 diff --git a/arch/arm64/kvm/hyp/hyp-entry.S b/arch/arm64/kvm/hyp/hyp-entry.S index f49b53331d28118d0b3e5419c17b4f3ec1513ca3..3c283fd8c8f5a5fd258475069a4cf280845c7929 100644 --- a/arch/arm64/kvm/hyp/hyp-entry.S +++ b/arch/arm64/kvm/hyp/hyp-entry.S @@ -57,13 +57,8 @@ ENDPROC(__vhe_hyp_call) el1_sync: // Guest trapped into EL2 stp x0, x1, [sp, #-16]! -alternative_if_not ARM64_HAS_VIRT_HOST_EXTN - mrs x1, esr_el2 -alternative_else - mrs x1, esr_el1 -alternative_endif - lsr x0, x1, #ESR_ELx_EC_SHIFT - + mrs x0, esr_el2 + lsr x0, x0, #ESR_ELx_EC_SHIFT cmp x0, #ESR_ELx_EC_HVC64 ccmp x0, #ESR_ELx_EC_HVC32, #4, ne b.ne el1_trap @@ -111,14 +106,55 @@ el1_hvc_guest: */ ldr x1, [sp] // Guest's x0 eor w1, w1, #ARM_SMCCC_ARCH_WORKAROUND_1 + cbz w1, wa_epilogue + + /* ARM_SMCCC_ARCH_WORKAROUND_2 handling */ + eor w1, w1, #(ARM_SMCCC_ARCH_WORKAROUND_1 ^ \ + ARM_SMCCC_ARCH_WORKAROUND_2) cbnz w1, el1_trap - mov x0, x1 + +#ifdef CONFIG_ARM64_SSBD +alternative_cb arm64_enable_wa2_handling + b wa2_end +alternative_cb_end + get_vcpu_ptr x2, x0 + ldr x0, [x2, #VCPU_WORKAROUND_FLAGS] + + // Sanitize the argument and update the guest flags + ldr x1, [sp, #8] // Guest's x1 + clz w1, w1 // Murphy's device: + lsr w1, w1, #5 // w1 = !!w1 without using + eor w1, w1, #1 // the flags... + bfi x0, x1, #VCPU_WORKAROUND_2_FLAG_SHIFT, #1 + str x0, [x2, #VCPU_WORKAROUND_FLAGS] + + /* Check that we actually need to perform the call */ + hyp_ldr_this_cpu x0, arm64_ssbd_callback_required, x2 + cbz x0, wa2_end + + mov w0, #ARM_SMCCC_ARCH_WORKAROUND_2 + smc #0 + + /* Don't leak data from the SMC call */ + mov x3, xzr +wa2_end: + mov x2, xzr + mov x1, xzr +#endif + +wa_epilogue: + mov x0, xzr add sp, sp, #16 eret el1_trap: + get_vcpu_ptr x1, x0 + + mrs x0, esr_el2 + lsr x0, x0, #ESR_ELx_EC_SHIFT /* * x0: ESR_EC + * x1: vcpu pointer */ /* @@ -132,19 +168,18 @@ alternative_if_not ARM64_HAS_NO_FPSIMD b.eq __fpsimd_guest_restore alternative_else_nop_endif - mrs x1, tpidr_el2 mov x0, #ARM_EXCEPTION_TRAP b __guest_exit el1_irq: stp x0, x1, [sp, #-16]! - mrs x1, tpidr_el2 + get_vcpu_ptr x1, x0 mov x0, #ARM_EXCEPTION_IRQ b __guest_exit el1_error: stp x0, x1, [sp, #-16]! - mrs x1, tpidr_el2 + get_vcpu_ptr x1, x0 mov x0, #ARM_EXCEPTION_EL1_SERROR b __guest_exit @@ -179,6 +214,11 @@ ENTRY(__hyp_do_panic) eret ENDPROC(__hyp_do_panic) +ENTRY(__hyp_panic) + get_host_ctxt x0, x1 + b hyp_panic +ENDPROC(__hyp_panic) + .macro invalid_vector label, target = __hyp_panic .align 2 \label: diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c index e08ae6b6b63e9f8ad63af77b338cc9279d55cc05..b2f1992c6234cbbff1c1fbf6629160292bae4cc9 100644 --- a/arch/arm64/kvm/hyp/switch.c +++ b/arch/arm64/kvm/hyp/switch.c @@ -15,6 +15,7 @@ * along with this program. If not, see . */ +#include #include #include #include @@ -281,6 +282,39 @@ static void __hyp_text __skip_instr(struct kvm_vcpu *vcpu) write_sysreg_el2(*vcpu_pc(vcpu), elr); } +static inline bool __hyp_text __needs_ssbd_off(struct kvm_vcpu *vcpu) +{ + if (!cpus_have_const_cap(ARM64_SSBD)) + return false; + + return !(vcpu->arch.workaround_flags & VCPU_WORKAROUND_2_FLAG); +} + +static void __hyp_text __set_guest_arch_workaround_state(struct kvm_vcpu *vcpu) +{ +#ifdef CONFIG_ARM64_SSBD + /* + * The host runs with the workaround always present. If the + * guest wants it disabled, so be it... + */ + if (__needs_ssbd_off(vcpu) && + __hyp_this_cpu_read(arm64_ssbd_callback_required)) + arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_2, 0, NULL); +#endif +} + +static void __hyp_text __set_host_arch_workaround_state(struct kvm_vcpu *vcpu) +{ +#ifdef CONFIG_ARM64_SSBD + /* + * If the guest has disabled the workaround, bring it back on. + */ + if (__needs_ssbd_off(vcpu) && + __hyp_this_cpu_read(arm64_ssbd_callback_required)) + arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_2, 1, NULL); +#endif +} + int __hyp_text __kvm_vcpu_run(struct kvm_vcpu *vcpu) { struct kvm_cpu_context *host_ctxt; @@ -289,9 +323,9 @@ int __hyp_text __kvm_vcpu_run(struct kvm_vcpu *vcpu) u64 exit_code; vcpu = kern_hyp_va(vcpu); - write_sysreg(vcpu, tpidr_el2); host_ctxt = kern_hyp_va(vcpu->arch.host_cpu_context); + host_ctxt->__hyp_running_vcpu = vcpu; guest_ctxt = &vcpu->arch.ctxt; __sysreg_save_host_state(host_ctxt); @@ -311,6 +345,8 @@ int __hyp_text __kvm_vcpu_run(struct kvm_vcpu *vcpu) __sysreg_restore_guest_state(guest_ctxt); __debug_restore_state(vcpu, kern_hyp_va(vcpu->arch.debug_ptr), guest_ctxt); + __set_guest_arch_workaround_state(vcpu); + /* Jump in the fire! */ again: exit_code = __guest_enter(vcpu, host_ctxt); @@ -367,6 +403,8 @@ int __hyp_text __kvm_vcpu_run(struct kvm_vcpu *vcpu) /* 0 falls through to be handled out of EL2 */ } + __set_host_arch_workaround_state(vcpu); + if (cpus_have_const_cap(ARM64_HARDEN_BP_POST_GUEST_EXIT)) { u32 midr = read_cpuid_id(); @@ -406,7 +444,8 @@ int __hyp_text __kvm_vcpu_run(struct kvm_vcpu *vcpu) static const char __hyp_panic_string[] = "HYP panic:\nPS:%08llx PC:%016llx ESR:%08llx\nFAR:%016llx HPFAR:%016llx PAR:%016llx\nVCPU:%p\n"; -static void __hyp_text __hyp_call_panic_nvhe(u64 spsr, u64 elr, u64 par) +static void __hyp_text __hyp_call_panic_nvhe(u64 spsr, u64 elr, u64 par, + struct kvm_vcpu *vcpu) { unsigned long str_va; @@ -420,35 +459,32 @@ static void __hyp_text __hyp_call_panic_nvhe(u64 spsr, u64 elr, u64 par) __hyp_do_panic(str_va, spsr, elr, read_sysreg(esr_el2), read_sysreg_el2(far), - read_sysreg(hpfar_el2), par, - (void *)read_sysreg(tpidr_el2)); + read_sysreg(hpfar_el2), par, vcpu); } -static void __hyp_text __hyp_call_panic_vhe(u64 spsr, u64 elr, u64 par) +static void __hyp_text __hyp_call_panic_vhe(u64 spsr, u64 elr, u64 par, + struct kvm_vcpu *vcpu) { panic(__hyp_panic_string, spsr, elr, read_sysreg_el2(esr), read_sysreg_el2(far), - read_sysreg(hpfar_el2), par, - (void *)read_sysreg(tpidr_el2)); + read_sysreg(hpfar_el2), par, vcpu); } static hyp_alternate_select(__hyp_call_panic, __hyp_call_panic_nvhe, __hyp_call_panic_vhe, ARM64_HAS_VIRT_HOST_EXTN); -void __hyp_text __noreturn __hyp_panic(void) +void __hyp_text __noreturn hyp_panic(struct kvm_cpu_context *host_ctxt) { + struct kvm_vcpu *vcpu = NULL; + u64 spsr = read_sysreg_el2(spsr); u64 elr = read_sysreg_el2(elr); u64 par = read_sysreg(par_el1); if (read_sysreg(vttbr_el2)) { - struct kvm_vcpu *vcpu; - struct kvm_cpu_context *host_ctxt; - - vcpu = (struct kvm_vcpu *)read_sysreg(tpidr_el2); - host_ctxt = kern_hyp_va(vcpu->arch.host_cpu_context); + vcpu = host_ctxt->__hyp_running_vcpu; __timer_save_state(vcpu); __deactivate_traps(vcpu); __deactivate_vm(vcpu); @@ -456,7 +492,7 @@ void __hyp_text __noreturn __hyp_panic(void) } /* Call panic for real */ - __hyp_call_panic()(spsr, elr, par); + __hyp_call_panic()(spsr, elr, par, vcpu); unreachable(); } diff --git a/arch/arm64/kvm/hyp/sysreg-sr.c b/arch/arm64/kvm/hyp/sysreg-sr.c index 9341376478370a836c55ede6fb0a8f17481bdd14..e19d89cabf2a2e09c8a27f0f348f93f5546bd883 100644 --- a/arch/arm64/kvm/hyp/sysreg-sr.c +++ b/arch/arm64/kvm/hyp/sysreg-sr.c @@ -27,8 +27,8 @@ static void __hyp_text __sysreg_do_nothing(struct kvm_cpu_context *ctxt) { } /* * Non-VHE: Both host and guest must save everything. * - * VHE: Host must save tpidr*_el[01], actlr_el1, mdscr_el1, sp0, pc, - * pstate, and guest must save everything. + * VHE: Host must save tpidr*_el0, actlr_el1, mdscr_el1, sp_el0, + * and guest must save everything. */ static void __hyp_text __sysreg_save_common_state(struct kvm_cpu_context *ctxt) @@ -36,11 +36,8 @@ static void __hyp_text __sysreg_save_common_state(struct kvm_cpu_context *ctxt) ctxt->sys_regs[ACTLR_EL1] = read_sysreg(actlr_el1); ctxt->sys_regs[TPIDR_EL0] = read_sysreg(tpidr_el0); ctxt->sys_regs[TPIDRRO_EL0] = read_sysreg(tpidrro_el0); - ctxt->sys_regs[TPIDR_EL1] = read_sysreg(tpidr_el1); ctxt->sys_regs[MDSCR_EL1] = read_sysreg(mdscr_el1); ctxt->gp_regs.regs.sp = read_sysreg(sp_el0); - ctxt->gp_regs.regs.pc = read_sysreg_el2(elr); - ctxt->gp_regs.regs.pstate = read_sysreg_el2(spsr); } static void __hyp_text __sysreg_save_state(struct kvm_cpu_context *ctxt) @@ -62,10 +59,13 @@ static void __hyp_text __sysreg_save_state(struct kvm_cpu_context *ctxt) ctxt->sys_regs[AMAIR_EL1] = read_sysreg_el1(amair); ctxt->sys_regs[CNTKCTL_EL1] = read_sysreg_el1(cntkctl); ctxt->sys_regs[PAR_EL1] = read_sysreg(par_el1); + ctxt->sys_regs[TPIDR_EL1] = read_sysreg(tpidr_el1); ctxt->gp_regs.sp_el1 = read_sysreg(sp_el1); ctxt->gp_regs.elr_el1 = read_sysreg_el1(elr); ctxt->gp_regs.spsr[KVM_SPSR_EL1]= read_sysreg_el1(spsr); + ctxt->gp_regs.regs.pc = read_sysreg_el2(elr); + ctxt->gp_regs.regs.pstate = read_sysreg_el2(spsr); } static hyp_alternate_select(__sysreg_call_save_host_state, @@ -89,11 +89,8 @@ static void __hyp_text __sysreg_restore_common_state(struct kvm_cpu_context *ctx write_sysreg(ctxt->sys_regs[ACTLR_EL1], actlr_el1); write_sysreg(ctxt->sys_regs[TPIDR_EL0], tpidr_el0); write_sysreg(ctxt->sys_regs[TPIDRRO_EL0], tpidrro_el0); - write_sysreg(ctxt->sys_regs[TPIDR_EL1], tpidr_el1); write_sysreg(ctxt->sys_regs[MDSCR_EL1], mdscr_el1); write_sysreg(ctxt->gp_regs.regs.sp, sp_el0); - write_sysreg_el2(ctxt->gp_regs.regs.pc, elr); - write_sysreg_el2(ctxt->gp_regs.regs.pstate, spsr); } static void __hyp_text __sysreg_restore_state(struct kvm_cpu_context *ctxt) @@ -115,10 +112,13 @@ static void __hyp_text __sysreg_restore_state(struct kvm_cpu_context *ctxt) write_sysreg_el1(ctxt->sys_regs[AMAIR_EL1], amair); write_sysreg_el1(ctxt->sys_regs[CNTKCTL_EL1], cntkctl); write_sysreg(ctxt->sys_regs[PAR_EL1], par_el1); + write_sysreg(ctxt->sys_regs[TPIDR_EL1], tpidr_el1); write_sysreg(ctxt->gp_regs.sp_el1, sp_el1); write_sysreg_el1(ctxt->gp_regs.elr_el1, elr); write_sysreg_el1(ctxt->gp_regs.spsr[KVM_SPSR_EL1],spsr); + write_sysreg_el2(ctxt->gp_regs.regs.pc, elr); + write_sysreg_el2(ctxt->gp_regs.regs.pstate, spsr); } static hyp_alternate_select(__sysreg_call_restore_host_state, @@ -183,3 +183,8 @@ void __hyp_text __sysreg32_restore_state(struct kvm_vcpu *vcpu) if (vcpu->arch.debug_flags & KVM_ARM64_DEBUG_DIRTY) write_sysreg(sysreg[DBGVCR32_EL2], dbgvcr32_el2); } + +void __hyp_text __kvm_set_tpidr_el2(u64 tpidr_el2) +{ + asm("msr tpidr_el2, %0": : "r" (tpidr_el2)); +} diff --git a/arch/arm64/kvm/reset.c b/arch/arm64/kvm/reset.c index 3256b9228e75801a7d91ad9c95235bd7c420d779..a74311beda35d9f54a15e5c9cb1230367f70865f 100644 --- a/arch/arm64/kvm/reset.c +++ b/arch/arm64/kvm/reset.c @@ -122,6 +122,10 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu) /* Reset PMU */ kvm_pmu_vcpu_reset(vcpu); + /* Default workaround setup is enabled (if supported) */ + if (kvm_arm_have_ssbd() == KVM_SSBD_KERNEL) + vcpu->arch.workaround_flags |= VCPU_WORKAROUND_2_FLAG; + /* Reset timer */ return kvm_timer_vcpu_reset(vcpu); } diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c index fdf9525d375ee4c6bab292fd7cb05a01715c60e3..cc1b7acea13e1527476267c12fd32cce3c8dd213 100644 --- a/arch/arm64/mm/init.c +++ b/arch/arm64/mm/init.c @@ -721,11 +721,13 @@ void __init mem_init(void) BUILD_BUG_ON(TASK_SIZE_32 > TASK_SIZE_64); #endif +#ifdef CONFIG_SPARSEMEM_VMEMMAP /* * Make sure we chose the upper bound of sizeof(struct page) - * correctly. + * correctly when sizing the VMEMMAP array. */ BUILD_BUG_ON(sizeof(struct page) > (1 << STRUCT_PAGE_MAX_SHIFT)); +#endif if (PAGE_SIZE >= 16384 && get_num_physpages() <= 128) { extern int sysctl_overcommit_memory; diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S index 22f0c81e5504cec16d97f49c715e921d287463be..f2ca818eb7958fcfa7dc8a6bae17a647ee0f52a1 100644 --- a/arch/arm64/mm/proc.S +++ b/arch/arm64/mm/proc.S @@ -116,7 +116,11 @@ ENTRY(cpu_do_suspend) mrs x8, mdscr_el1 mrs x9, oslsr_el1 mrs x10, sctlr_el1 +alternative_if_not ARM64_HAS_VIRT_HOST_EXTN mrs x11, tpidr_el1 +alternative_else + mrs x11, tpidr_el2 +alternative_endif mrs x12, sp_el0 stp x2, x3, [x0] stp x4, xzr, [x0, #16] @@ -162,7 +166,11 @@ ENTRY(cpu_do_resume) msr mdscr_el1, x10 msr sctlr_el1, x12 +alternative_if_not ARM64_HAS_VIRT_HOST_EXTN msr tpidr_el1, x13 +alternative_else + msr tpidr_el2, x13 +alternative_endif msr sp_el0, x14 /* * Restore oslsr_el1 by writing oslar_el1 diff --git a/arch/microblaze/boot/Makefile b/arch/microblaze/boot/Makefile index 47f94cc383b65746eb5698db4038c2fea0c1b522..7c2f52d4a0e45f47c381fd38a4aec4180352c8cf 100644 --- a/arch/microblaze/boot/Makefile +++ b/arch/microblaze/boot/Makefile @@ -22,17 +22,19 @@ $(obj)/linux.bin.gz: $(obj)/linux.bin FORCE quiet_cmd_cp = CP $< $@$2 cmd_cp = cat $< >$@$2 || (rm -f $@ && echo false) -quiet_cmd_strip = STRIP $@ +quiet_cmd_strip = STRIP $< $@$2 cmd_strip = $(STRIP) -K microblaze_start -K _end -K __log_buf \ - -K _fdt_start vmlinux -o $@ + -K _fdt_start $< -o $@$2 UIMAGE_LOADADDR = $(CONFIG_KERNEL_BASE_ADDR) +UIMAGE_IN = $@ +UIMAGE_OUT = $@.ub $(obj)/simpleImage.%: vmlinux FORCE $(call if_changed,cp,.unstrip) $(call if_changed,objcopy) $(call if_changed,uimage) - $(call if_changed,strip) - @echo 'Kernel: $@ is ready' ' (#'`cat .version`')' + $(call if_changed,strip,.strip) + @echo 'Kernel: $(UIMAGE_OUT) is ready' ' (#'`cat .version`')' clean-files += simpleImage.*.unstrip linux.bin.ub dts/*.dtb diff --git a/arch/mips/ath79/common.c b/arch/mips/ath79/common.c index 10a405d593df3b5c64fa84ce9ae27eaa7ba222df..c782b10ddf50d6a09713edc21f23356399ce1a4b 100644 --- a/arch/mips/ath79/common.c +++ b/arch/mips/ath79/common.c @@ -58,7 +58,7 @@ EXPORT_SYMBOL_GPL(ath79_ddr_ctrl_init); void ath79_ddr_wb_flush(u32 reg) { - void __iomem *flush_reg = ath79_ddr_wb_flush_base + reg; + void __iomem *flush_reg = ath79_ddr_wb_flush_base + (reg * 4); /* Flush the DDR write buffer. */ __raw_writel(0x1, flush_reg); diff --git a/arch/mips/pci/pci.c b/arch/mips/pci/pci.c index 9632436d74d7a74b3d584ab6e87a1fc7e55827cc..c2e94cf5ecdab7c7f3263bd65e76c30cf8eb32fc 100644 --- a/arch/mips/pci/pci.c +++ b/arch/mips/pci/pci.c @@ -54,5 +54,5 @@ void pci_resource_to_user(const struct pci_dev *dev, int bar, phys_addr_t size = resource_size(rsrc); *start = fixup_bigphys_addr(rsrc->start, size); - *end = rsrc->start + size; + *end = rsrc->start + size - 1; } diff --git a/arch/powerpc/include/asm/barrier.h b/arch/powerpc/include/asm/barrier.h index c7c63959ba91066f69b1147f59d4df4e20b39b1f..e582d2c880922504c6d93b3df72f655953437aae 100644 --- a/arch/powerpc/include/asm/barrier.h +++ b/arch/powerpc/include/asm/barrier.h @@ -76,6 +76,21 @@ do { \ ___p1; \ }) +#ifdef CONFIG_PPC_BOOK3S_64 +/* + * Prevent execution of subsequent instructions until preceding branches have + * been fully resolved and are no longer executing speculatively. + */ +#define barrier_nospec_asm ori 31,31,0 + +// This also acts as a compiler barrier due to the memory clobber. +#define barrier_nospec() asm (stringify_in_c(barrier_nospec_asm) ::: "memory") + +#else /* !CONFIG_PPC_BOOK3S_64 */ +#define barrier_nospec_asm +#define barrier_nospec() +#endif + #include #endif /* _ASM_POWERPC_BARRIER_H */ diff --git a/arch/powerpc/include/asm/cache.h b/arch/powerpc/include/asm/cache.h index c1d257aa4c2d350d23774420eafed6deb57ec5f9..66298461b640f06a247106af6045dfe1f2f8c963 100644 --- a/arch/powerpc/include/asm/cache.h +++ b/arch/powerpc/include/asm/cache.h @@ -9,11 +9,14 @@ #if defined(CONFIG_PPC_8xx) || defined(CONFIG_403GCX) #define L1_CACHE_SHIFT 4 #define MAX_COPY_PREFETCH 1 +#define IFETCH_ALIGN_SHIFT 2 #elif defined(CONFIG_PPC_E500MC) #define L1_CACHE_SHIFT 6 #define MAX_COPY_PREFETCH 4 +#define IFETCH_ALIGN_SHIFT 3 #elif defined(CONFIG_PPC32) #define MAX_COPY_PREFETCH 4 +#define IFETCH_ALIGN_SHIFT 3 /* 603 fetches 2 insn at a time */ #if defined(CONFIG_PPC_47x) #define L1_CACHE_SHIFT 7 #else diff --git a/arch/powerpc/include/asm/mmu_context.h b/arch/powerpc/include/asm/mmu_context.h index 44fdf4786638b1fe2f21b8c15927eea8c19ee47f..6f67ff5a52672329f52f2c02c44daa655803368e 100644 --- a/arch/powerpc/include/asm/mmu_context.h +++ b/arch/powerpc/include/asm/mmu_context.h @@ -35,9 +35,9 @@ extern struct mm_iommu_table_group_mem_t *mm_iommu_lookup_rm( extern struct mm_iommu_table_group_mem_t *mm_iommu_find(struct mm_struct *mm, unsigned long ua, unsigned long entries); extern long mm_iommu_ua_to_hpa(struct mm_iommu_table_group_mem_t *mem, - unsigned long ua, unsigned long *hpa); + unsigned long ua, unsigned int pageshift, unsigned long *hpa); extern long mm_iommu_ua_to_hpa_rm(struct mm_iommu_table_group_mem_t *mem, - unsigned long ua, unsigned long *hpa); + unsigned long ua, unsigned int pageshift, unsigned long *hpa); extern long mm_iommu_mapped_inc(struct mm_iommu_table_group_mem_t *mem); extern void mm_iommu_mapped_dec(struct mm_iommu_table_group_mem_t *mem); #endif diff --git a/arch/powerpc/kernel/eeh_driver.c b/arch/powerpc/kernel/eeh_driver.c index ca2243df9cb2ecce7a805c33aff253d6678f9252..470284f9e4f6661f6716b0be1a78b14bae9f33b3 100644 --- a/arch/powerpc/kernel/eeh_driver.c +++ b/arch/powerpc/kernel/eeh_driver.c @@ -450,9 +450,11 @@ static void *eeh_add_virt_device(void *data, void *userdata) driver = eeh_pcid_get(dev); if (driver) { - eeh_pcid_put(dev); - if (driver->err_handler) + if (driver->err_handler) { + eeh_pcid_put(dev); return NULL; + } + eeh_pcid_put(dev); } #ifdef CONFIG_PPC_POWERNV @@ -489,17 +491,19 @@ static void *eeh_rmv_device(void *data, void *userdata) if (eeh_dev_removed(edev)) return NULL; - driver = eeh_pcid_get(dev); - if (driver) { - eeh_pcid_put(dev); - if (removed && - eeh_pe_passed(edev->pe)) - return NULL; - if (removed && - driver->err_handler && - driver->err_handler->error_detected && - driver->err_handler->slot_reset) + if (removed) { + if (eeh_pe_passed(edev->pe)) return NULL; + driver = eeh_pcid_get(dev); + if (driver) { + if (driver->err_handler && + driver->err_handler->error_detected && + driver->err_handler->slot_reset) { + eeh_pcid_put(dev); + return NULL; + } + eeh_pcid_put(dev); + } } /* Remove it from PCI subsystem */ diff --git a/arch/powerpc/kernel/head_8xx.S b/arch/powerpc/kernel/head_8xx.S index 4fee00d414e87c78d8a09ef3e9662b7bb7234c66..2d0d89e2cb9a8c5765583e0f2d99031dbf0c7e80 100644 --- a/arch/powerpc/kernel/head_8xx.S +++ b/arch/powerpc/kernel/head_8xx.S @@ -958,7 +958,7 @@ start_here: tovirt(r6,r6) lis r5, abatron_pteptrs@h ori r5, r5, abatron_pteptrs@l - stw r5, 0xf0(r0) /* Must match your Abatron config file */ + stw r5, 0xf0(0) /* Must match your Abatron config file */ tophys(r5,r5) stw r6, 0(r5) diff --git a/arch/powerpc/kernel/idle_book3s.S b/arch/powerpc/kernel/idle_book3s.S index e35cebd45c35e0bfb90075a99a7d8de54f1d24ab..4efbde0984b2dece9e3c4f10893706204270db48 100644 --- a/arch/powerpc/kernel/idle_book3s.S +++ b/arch/powerpc/kernel/idle_book3s.S @@ -140,6 +140,8 @@ power9_restore_additional_sprs: ld r4, STOP_MMCR2(r13) mtspr SPRN_MMCR1, r3 mtspr SPRN_MMCR2, r4 + ld r4, PACA_SPRG_VDSO(r13) + mtspr SPRN_SPRG3, r4 blr /* diff --git a/arch/powerpc/kernel/pci_32.c b/arch/powerpc/kernel/pci_32.c index 1d817f4d97d960cae8b2c75ec580588538c31282..2094f2b249fd5ecb0de9cc29fb49a390bbd7a9a8 100644 --- a/arch/powerpc/kernel/pci_32.c +++ b/arch/powerpc/kernel/pci_32.c @@ -11,6 +11,7 @@ #include #include #include +#include #include #include #include diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c index 02190e90c7aef4a1a71d513542ebcef556295753..f8782c7ef50f1350a58b4fb28e303cb7f8a68f2e 100644 --- a/arch/powerpc/kernel/prom_init.c +++ b/arch/powerpc/kernel/prom_init.c @@ -334,6 +334,7 @@ static void __init prom_print_dec(unsigned long val) call_prom("write", 3, 1, prom.stdout, buf+i, size); } +__printf(1, 2) static void __init prom_printf(const char *format, ...) { const char *p, *q, *s; @@ -1148,7 +1149,7 @@ static void __init prom_send_capabilities(void) */ cores = DIV_ROUND_UP(NR_CPUS, prom_count_smt_threads()); - prom_printf("Max number of cores passed to firmware: %lu (NR_CPUS = %lu)\n", + prom_printf("Max number of cores passed to firmware: %u (NR_CPUS = %d)\n", cores, NR_CPUS); ibm_architecture_vec.vec5.max_cpus = cpu_to_be32(cores); @@ -1230,7 +1231,7 @@ static unsigned long __init alloc_up(unsigned long size, unsigned long align) if (align) base = _ALIGN_UP(base, align); - prom_debug("alloc_up(%x, %x)\n", size, align); + prom_debug("%s(%lx, %lx)\n", __func__, size, align); if (ram_top == 0) prom_panic("alloc_up() called with mem not initialized\n"); @@ -1241,7 +1242,7 @@ static unsigned long __init alloc_up(unsigned long size, unsigned long align) for(; (base + size) <= alloc_top; base = _ALIGN_UP(base + 0x100000, align)) { - prom_debug(" trying: 0x%x\n\r", base); + prom_debug(" trying: 0x%lx\n\r", base); addr = (unsigned long)prom_claim(base, size, 0); if (addr != PROM_ERROR && addr != 0) break; @@ -1253,12 +1254,12 @@ static unsigned long __init alloc_up(unsigned long size, unsigned long align) return 0; alloc_bottom = addr + size; - prom_debug(" -> %x\n", addr); - prom_debug(" alloc_bottom : %x\n", alloc_bottom); - prom_debug(" alloc_top : %x\n", alloc_top); - prom_debug(" alloc_top_hi : %x\n", alloc_top_high); - prom_debug(" rmo_top : %x\n", rmo_top); - prom_debug(" ram_top : %x\n", ram_top); + prom_debug(" -> %lx\n", addr); + prom_debug(" alloc_bottom : %lx\n", alloc_bottom); + prom_debug(" alloc_top : %lx\n", alloc_top); + prom_debug(" alloc_top_hi : %lx\n", alloc_top_high); + prom_debug(" rmo_top : %lx\n", rmo_top); + prom_debug(" ram_top : %lx\n", ram_top); return addr; } @@ -1273,7 +1274,7 @@ static unsigned long __init alloc_down(unsigned long size, unsigned long align, { unsigned long base, addr = 0; - prom_debug("alloc_down(%x, %x, %s)\n", size, align, + prom_debug("%s(%lx, %lx, %s)\n", __func__, size, align, highmem ? "(high)" : "(low)"); if (ram_top == 0) prom_panic("alloc_down() called with mem not initialized\n"); @@ -1301,7 +1302,7 @@ static unsigned long __init alloc_down(unsigned long size, unsigned long align, base = _ALIGN_DOWN(alloc_top - size, align); for (; base > alloc_bottom; base = _ALIGN_DOWN(base - 0x100000, align)) { - prom_debug(" trying: 0x%x\n\r", base); + prom_debug(" trying: 0x%lx\n\r", base); addr = (unsigned long)prom_claim(base, size, 0); if (addr != PROM_ERROR && addr != 0) break; @@ -1312,12 +1313,12 @@ static unsigned long __init alloc_down(unsigned long size, unsigned long align, alloc_top = addr; bail: - prom_debug(" -> %x\n", addr); - prom_debug(" alloc_bottom : %x\n", alloc_bottom); - prom_debug(" alloc_top : %x\n", alloc_top); - prom_debug(" alloc_top_hi : %x\n", alloc_top_high); - prom_debug(" rmo_top : %x\n", rmo_top); - prom_debug(" ram_top : %x\n", ram_top); + prom_debug(" -> %lx\n", addr); + prom_debug(" alloc_bottom : %lx\n", alloc_bottom); + prom_debug(" alloc_top : %lx\n", alloc_top); + prom_debug(" alloc_top_hi : %lx\n", alloc_top_high); + prom_debug(" rmo_top : %lx\n", rmo_top); + prom_debug(" ram_top : %lx\n", ram_top); return addr; } @@ -1443,7 +1444,7 @@ static void __init prom_init_mem(void) if (size == 0) continue; - prom_debug(" %x %x\n", base, size); + prom_debug(" %lx %lx\n", base, size); if (base == 0 && (of_platform & PLATFORM_LPAR)) rmo_top = size; if ((base + size) > ram_top) @@ -1463,12 +1464,12 @@ static void __init prom_init_mem(void) if (prom_memory_limit) { if (prom_memory_limit <= alloc_bottom) { - prom_printf("Ignoring mem=%x <= alloc_bottom.\n", - prom_memory_limit); + prom_printf("Ignoring mem=%lx <= alloc_bottom.\n", + prom_memory_limit); prom_memory_limit = 0; } else if (prom_memory_limit >= ram_top) { - prom_printf("Ignoring mem=%x >= ram_top.\n", - prom_memory_limit); + prom_printf("Ignoring mem=%lx >= ram_top.\n", + prom_memory_limit); prom_memory_limit = 0; } else { ram_top = prom_memory_limit; @@ -1500,12 +1501,13 @@ static void __init prom_init_mem(void) alloc_bottom = PAGE_ALIGN(prom_initrd_end); prom_printf("memory layout at init:\n"); - prom_printf(" memory_limit : %x (16 MB aligned)\n", prom_memory_limit); - prom_printf(" alloc_bottom : %x\n", alloc_bottom); - prom_printf(" alloc_top : %x\n", alloc_top); - prom_printf(" alloc_top_hi : %x\n", alloc_top_high); - prom_printf(" rmo_top : %x\n", rmo_top); - prom_printf(" ram_top : %x\n", ram_top); + prom_printf(" memory_limit : %lx (16 MB aligned)\n", + prom_memory_limit); + prom_printf(" alloc_bottom : %lx\n", alloc_bottom); + prom_printf(" alloc_top : %lx\n", alloc_top); + prom_printf(" alloc_top_hi : %lx\n", alloc_top_high); + prom_printf(" rmo_top : %lx\n", rmo_top); + prom_printf(" ram_top : %lx\n", ram_top); } static void __init prom_close_stdin(void) @@ -1566,7 +1568,7 @@ static void __init prom_instantiate_opal(void) return; } - prom_printf("instantiating opal at 0x%x...", base); + prom_printf("instantiating opal at 0x%llx...", base); if (call_prom_ret("call-method", 4, 3, rets, ADDR("load-opal-runtime"), @@ -1582,10 +1584,10 @@ static void __init prom_instantiate_opal(void) reserve_mem(base, size); - prom_debug("opal base = 0x%x\n", base); - prom_debug("opal align = 0x%x\n", align); - prom_debug("opal entry = 0x%x\n", entry); - prom_debug("opal size = 0x%x\n", (long)size); + prom_debug("opal base = 0x%llx\n", base); + prom_debug("opal align = 0x%llx\n", align); + prom_debug("opal entry = 0x%llx\n", entry); + prom_debug("opal size = 0x%llx\n", size); prom_setprop(opal_node, "/ibm,opal", "opal-base-address", &base, sizeof(base)); @@ -1662,7 +1664,7 @@ static void __init prom_instantiate_rtas(void) prom_debug("rtas base = 0x%x\n", base); prom_debug("rtas entry = 0x%x\n", entry); - prom_debug("rtas size = 0x%x\n", (long)size); + prom_debug("rtas size = 0x%x\n", size); prom_debug("prom_instantiate_rtas: end...\n"); } @@ -1720,7 +1722,7 @@ static void __init prom_instantiate_sml(void) if (base == 0) prom_panic("Could not allocate memory for sml\n"); - prom_printf("instantiating sml at 0x%x...", base); + prom_printf("instantiating sml at 0x%llx...", base); memset((void *)base, 0, size); @@ -1739,8 +1741,8 @@ static void __init prom_instantiate_sml(void) prom_setprop(ibmvtpm_node, "/vdevice/vtpm", "linux,sml-size", &size, sizeof(size)); - prom_debug("sml base = 0x%x\n", base); - prom_debug("sml size = 0x%x\n", (long)size); + prom_debug("sml base = 0x%llx\n", base); + prom_debug("sml size = 0x%x\n", size); prom_debug("prom_instantiate_sml: end...\n"); } @@ -1841,7 +1843,7 @@ static void __init prom_initialize_tce_table(void) prom_debug("TCE table: %s\n", path); prom_debug("\tnode = 0x%x\n", node); - prom_debug("\tbase = 0x%x\n", base); + prom_debug("\tbase = 0x%llx\n", base); prom_debug("\tsize = 0x%x\n", minsize); /* Initialize the table to have a one-to-one mapping @@ -1928,12 +1930,12 @@ static void __init prom_hold_cpus(void) } prom_debug("prom_hold_cpus: start...\n"); - prom_debug(" 1) spinloop = 0x%x\n", (unsigned long)spinloop); - prom_debug(" 1) *spinloop = 0x%x\n", *spinloop); - prom_debug(" 1) acknowledge = 0x%x\n", + prom_debug(" 1) spinloop = 0x%lx\n", (unsigned long)spinloop); + prom_debug(" 1) *spinloop = 0x%lx\n", *spinloop); + prom_debug(" 1) acknowledge = 0x%lx\n", (unsigned long)acknowledge); - prom_debug(" 1) *acknowledge = 0x%x\n", *acknowledge); - prom_debug(" 1) secondary_hold = 0x%x\n", secondary_hold); + prom_debug(" 1) *acknowledge = 0x%lx\n", *acknowledge); + prom_debug(" 1) secondary_hold = 0x%lx\n", secondary_hold); /* Set the common spinloop variable, so all of the secondary cpus * will block when they are awakened from their OF spinloop. @@ -1961,7 +1963,7 @@ static void __init prom_hold_cpus(void) prom_getprop(node, "reg", ®, sizeof(reg)); cpu_no = be32_to_cpu(reg); - prom_debug("cpu hw idx = %lu\n", cpu_no); + prom_debug("cpu hw idx = %u\n", cpu_no); /* Init the acknowledge var which will be reset by * the secondary cpu when it awakens from its OF @@ -1971,7 +1973,7 @@ static void __init prom_hold_cpus(void) if (cpu_no != prom.cpu) { /* Primary Thread of non-boot cpu or any thread */ - prom_printf("starting cpu hw idx %lu... ", cpu_no); + prom_printf("starting cpu hw idx %u... ", cpu_no); call_prom("start-cpu", 3, 0, node, secondary_hold, cpu_no); @@ -1982,11 +1984,11 @@ static void __init prom_hold_cpus(void) if (*acknowledge == cpu_no) prom_printf("done\n"); else - prom_printf("failed: %x\n", *acknowledge); + prom_printf("failed: %lx\n", *acknowledge); } #ifdef CONFIG_SMP else - prom_printf("boot cpu hw idx %lu\n", cpu_no); + prom_printf("boot cpu hw idx %u\n", cpu_no); #endif /* CONFIG_SMP */ } @@ -2264,7 +2266,7 @@ static void __init *make_room(unsigned long *mem_start, unsigned long *mem_end, while ((*mem_start + needed) > *mem_end) { unsigned long room, chunk; - prom_debug("Chunk exhausted, claiming more at %x...\n", + prom_debug("Chunk exhausted, claiming more at %lx...\n", alloc_bottom); room = alloc_top - alloc_bottom; if (room > DEVTREE_CHUNK_SIZE) @@ -2490,7 +2492,7 @@ static void __init flatten_device_tree(void) room = alloc_top - alloc_bottom - 0x4000; if (room > DEVTREE_CHUNK_SIZE) room = DEVTREE_CHUNK_SIZE; - prom_debug("starting device tree allocs at %x\n", alloc_bottom); + prom_debug("starting device tree allocs at %lx\n", alloc_bottom); /* Now try to claim that */ mem_start = (unsigned long)alloc_up(room, PAGE_SIZE); @@ -2553,7 +2555,7 @@ static void __init flatten_device_tree(void) int i; prom_printf("reserved memory map:\n"); for (i = 0; i < mem_reserve_cnt; i++) - prom_printf(" %x - %x\n", + prom_printf(" %llx - %llx\n", be64_to_cpu(mem_reserve_map[i].base), be64_to_cpu(mem_reserve_map[i].size)); } @@ -2563,9 +2565,9 @@ static void __init flatten_device_tree(void) */ mem_reserve_cnt = MEM_RESERVE_MAP_SIZE; - prom_printf("Device tree strings 0x%x -> 0x%x\n", + prom_printf("Device tree strings 0x%lx -> 0x%lx\n", dt_string_start, dt_string_end); - prom_printf("Device tree struct 0x%x -> 0x%x\n", + prom_printf("Device tree struct 0x%lx -> 0x%lx\n", dt_struct_start, dt_struct_end); } @@ -2997,7 +2999,7 @@ static void __init prom_find_boot_cpu(void) prom_getprop(cpu_pkg, "reg", &rval, sizeof(rval)); prom.cpu = be32_to_cpu(rval); - prom_debug("Booting CPU hw index = %lu\n", prom.cpu); + prom_debug("Booting CPU hw index = %d\n", prom.cpu); } static void __init prom_check_initrd(unsigned long r3, unsigned long r4) @@ -3019,8 +3021,8 @@ static void __init prom_check_initrd(unsigned long r3, unsigned long r4) reserve_mem(prom_initrd_start, prom_initrd_end - prom_initrd_start); - prom_debug("initrd_start=0x%x\n", prom_initrd_start); - prom_debug("initrd_end=0x%x\n", prom_initrd_end); + prom_debug("initrd_start=0x%lx\n", prom_initrd_start); + prom_debug("initrd_end=0x%lx\n", prom_initrd_end); } #endif /* CONFIG_BLK_DEV_INITRD */ } @@ -3273,7 +3275,7 @@ unsigned long __init prom_init(unsigned long r3, unsigned long r4, /* Don't print anything after quiesce under OPAL, it crashes OFW */ if (of_platform != PLATFORM_OPAL) { prom_printf("Booting Linux via __start() @ 0x%lx ...\n", kbase); - prom_debug("->dt_header_start=0x%x\n", hdr); + prom_debug("->dt_header_start=0x%lx\n", hdr); } #ifdef CONFIG_PPC32 diff --git a/arch/powerpc/kvm/book3s_64_vio.c b/arch/powerpc/kvm/book3s_64_vio.c index 4dffa611376d67850ac4ef8730a547fcccf63491..e14cec6bc3398ef4d9d7ae2b608ae0a4a806ed26 100644 --- a/arch/powerpc/kvm/book3s_64_vio.c +++ b/arch/powerpc/kvm/book3s_64_vio.c @@ -433,7 +433,7 @@ long kvmppc_tce_iommu_map(struct kvm *kvm, struct iommu_table *tbl, /* This only handles v2 IOMMU type, v1 is handled via ioctl() */ return H_TOO_HARD; - if (WARN_ON_ONCE(mm_iommu_ua_to_hpa(mem, ua, &hpa))) + if (WARN_ON_ONCE(mm_iommu_ua_to_hpa(mem, ua, tbl->it_page_shift, &hpa))) return H_HARDWARE; if (mm_iommu_mapped_inc(mem)) diff --git a/arch/powerpc/kvm/book3s_64_vio_hv.c b/arch/powerpc/kvm/book3s_64_vio_hv.c index c32e9bfe75b1abbf6b27a574f91bf5b3fcaf5e66..648cf6c0134899b67181f30122810669387631f5 100644 --- a/arch/powerpc/kvm/book3s_64_vio_hv.c +++ b/arch/powerpc/kvm/book3s_64_vio_hv.c @@ -262,7 +262,8 @@ static long kvmppc_rm_tce_iommu_map(struct kvm *kvm, struct iommu_table *tbl, if (!mem) return H_TOO_HARD; - if (WARN_ON_ONCE_RM(mm_iommu_ua_to_hpa_rm(mem, ua, &hpa))) + if (WARN_ON_ONCE_RM(mm_iommu_ua_to_hpa_rm(mem, ua, tbl->it_page_shift, + &hpa))) return H_HARDWARE; pua = (void *) vmalloc_to_phys(pua); @@ -431,7 +432,8 @@ long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu, mem = mm_iommu_lookup_rm(vcpu->kvm->mm, ua, IOMMU_PAGE_SIZE_4K); if (mem) - prereg = mm_iommu_ua_to_hpa_rm(mem, ua, &tces) == 0; + prereg = mm_iommu_ua_to_hpa_rm(mem, ua, + IOMMU_PAGE_SHIFT_4K, &tces) == 0; } if (!prereg) { diff --git a/arch/powerpc/lib/string.S b/arch/powerpc/lib/string.S index a787776822d86639964febb518943b2e24435507..0378def28d411debec742d282991b8ef5c3ba097 100644 --- a/arch/powerpc/lib/string.S +++ b/arch/powerpc/lib/string.S @@ -12,6 +12,7 @@ #include #include #include +#include .text @@ -23,7 +24,7 @@ _GLOBAL(strncpy) mtctr r5 addi r6,r3,-1 addi r4,r4,-1 - .balign 16 + .balign IFETCH_ALIGN_BYTES 1: lbzu r0,1(r4) cmpwi 0,r0,0 stbu r0,1(r6) @@ -43,7 +44,7 @@ _GLOBAL(strncmp) mtctr r5 addi r5,r3,-1 addi r4,r4,-1 - .balign 16 + .balign IFETCH_ALIGN_BYTES 1: lbzu r3,1(r5) cmpwi 1,r3,0 lbzu r0,1(r4) @@ -77,7 +78,7 @@ _GLOBAL(memchr) beq- 2f mtctr r5 addi r3,r3,-1 - .balign 16 + .balign IFETCH_ALIGN_BYTES 1: lbzu r0,1(r3) cmpw 0,r0,r4 bdnzf 2,1b diff --git a/arch/powerpc/mm/mmu_context_iommu.c b/arch/powerpc/mm/mmu_context_iommu.c index e0a2d8e806edb01a3b24ab063ec41e87defb0e1f..816055927ee47ba05db7e1d3675461de66052636 100644 --- a/arch/powerpc/mm/mmu_context_iommu.c +++ b/arch/powerpc/mm/mmu_context_iommu.c @@ -19,6 +19,7 @@ #include #include #include +#include static DEFINE_MUTEX(mem_list_mutex); @@ -27,6 +28,7 @@ struct mm_iommu_table_group_mem_t { struct rcu_head rcu; unsigned long used; atomic64_t mapped; + unsigned int pageshift; u64 ua; /* userspace address */ u64 entries; /* number of entries in hpas[] */ u64 *hpas; /* vmalloc'ed */ @@ -126,6 +128,8 @@ long mm_iommu_get(struct mm_struct *mm, unsigned long ua, unsigned long entries, { struct mm_iommu_table_group_mem_t *mem; long i, j, ret = 0, locked_entries = 0; + unsigned int pageshift; + unsigned long flags; struct page *page = NULL; mutex_lock(&mem_list_mutex); @@ -160,6 +164,12 @@ long mm_iommu_get(struct mm_struct *mm, unsigned long ua, unsigned long entries, goto unlock_exit; } + /* + * For a starting point for a maximum page size calculation + * we use @ua and @entries natural alignment to allow IOMMU pages + * smaller than huge pages but still bigger than PAGE_SIZE. + */ + mem->pageshift = __ffs(ua | (entries << PAGE_SHIFT)); mem->hpas = vzalloc(entries * sizeof(mem->hpas[0])); if (!mem->hpas) { kfree(mem); @@ -200,6 +210,23 @@ long mm_iommu_get(struct mm_struct *mm, unsigned long ua, unsigned long entries, } } populate: + pageshift = PAGE_SHIFT; + if (PageCompound(page)) { + pte_t *pte; + struct page *head = compound_head(page); + unsigned int compshift = compound_order(head); + + local_irq_save(flags); /* disables as well */ + pte = find_linux_pte(mm->pgd, ua, NULL, &pageshift); + local_irq_restore(flags); + + /* Double check it is still the same pinned page */ + if (pte && pte_page(*pte) == head && + pageshift == compshift) + pageshift = max_t(unsigned int, pageshift, + PAGE_SHIFT); + } + mem->pageshift = min(mem->pageshift, pageshift); mem->hpas[i] = page_to_pfn(page) << PAGE_SHIFT; } @@ -350,7 +377,7 @@ struct mm_iommu_table_group_mem_t *mm_iommu_find(struct mm_struct *mm, EXPORT_SYMBOL_GPL(mm_iommu_find); long mm_iommu_ua_to_hpa(struct mm_iommu_table_group_mem_t *mem, - unsigned long ua, unsigned long *hpa) + unsigned long ua, unsigned int pageshift, unsigned long *hpa) { const long entry = (ua - mem->ua) >> PAGE_SHIFT; u64 *va = &mem->hpas[entry]; @@ -358,6 +385,9 @@ long mm_iommu_ua_to_hpa(struct mm_iommu_table_group_mem_t *mem, if (entry >= mem->entries) return -EFAULT; + if (pageshift > mem->pageshift) + return -EFAULT; + *hpa = *va | (ua & ~PAGE_MASK); return 0; @@ -365,7 +395,7 @@ long mm_iommu_ua_to_hpa(struct mm_iommu_table_group_mem_t *mem, EXPORT_SYMBOL_GPL(mm_iommu_ua_to_hpa); long mm_iommu_ua_to_hpa_rm(struct mm_iommu_table_group_mem_t *mem, - unsigned long ua, unsigned long *hpa) + unsigned long ua, unsigned int pageshift, unsigned long *hpa) { const long entry = (ua - mem->ua) >> PAGE_SHIFT; void *va = &mem->hpas[entry]; @@ -374,6 +404,9 @@ long mm_iommu_ua_to_hpa_rm(struct mm_iommu_table_group_mem_t *mem, if (entry >= mem->entries) return -EFAULT; + if (pageshift > mem->pageshift) + return -EFAULT; + pa = (void *) vmalloc_to_phys(va); if (!pa) return -EFAULT; diff --git a/arch/powerpc/mm/slb.c b/arch/powerpc/mm/slb.c index 13cfe413b40d48cf0bca57fd757dc63bc01487dc..6d9bf014b3e78173fec41ade3d33d4d8bbc97512 100644 --- a/arch/powerpc/mm/slb.c +++ b/arch/powerpc/mm/slb.c @@ -62,14 +62,14 @@ static inline void slb_shadow_update(unsigned long ea, int ssize, * updating it. No write barriers are needed here, provided * we only update the current CPU's SLB shadow buffer. */ - p->save_area[index].esid = 0; - p->save_area[index].vsid = cpu_to_be64(mk_vsid_data(ea, ssize, flags)); - p->save_area[index].esid = cpu_to_be64(mk_esid_data(ea, ssize, index)); + WRITE_ONCE(p->save_area[index].esid, 0); + WRITE_ONCE(p->save_area[index].vsid, cpu_to_be64(mk_vsid_data(ea, ssize, flags))); + WRITE_ONCE(p->save_area[index].esid, cpu_to_be64(mk_esid_data(ea, ssize, index))); } static inline void slb_shadow_clear(enum slb_index index) { - get_slb_shadow()->save_area[index].esid = 0; + WRITE_ONCE(get_slb_shadow()->save_area[index].esid, 0); } static inline void create_shadowed_slbe(unsigned long ea, int ssize, diff --git a/arch/powerpc/net/bpf_jit_comp64.c b/arch/powerpc/net/bpf_jit_comp64.c index bd0786c23109b51f61ab352d873e01c32c7cd45d..254634fb3fc75198b037c3a8e68da1baf16ac424 100644 --- a/arch/powerpc/net/bpf_jit_comp64.c +++ b/arch/powerpc/net/bpf_jit_comp64.c @@ -203,25 +203,37 @@ static void bpf_jit_build_epilogue(u32 *image, struct codegen_context *ctx) static void bpf_jit_emit_func_call(u32 *image, struct codegen_context *ctx, u64 func) { + unsigned int i, ctx_idx = ctx->idx; + + /* Load function address into r12 */ + PPC_LI64(12, func); + + /* For bpf-to-bpf function calls, the callee's address is unknown + * until the last extra pass. As seen above, we use PPC_LI64() to + * load the callee's address, but this may optimize the number of + * instructions required based on the nature of the address. + * + * Since we don't want the number of instructions emitted to change, + * we pad the optimized PPC_LI64() call with NOPs to guarantee that + * we always have a five-instruction sequence, which is the maximum + * that PPC_LI64() can emit. + */ + for (i = ctx->idx - ctx_idx; i < 5; i++) + PPC_NOP(); + #ifdef PPC64_ELF_ABI_v1 - /* func points to the function descriptor */ - PPC_LI64(b2p[TMP_REG_2], func); - /* Load actual entry point from function descriptor */ - PPC_BPF_LL(b2p[TMP_REG_1], b2p[TMP_REG_2], 0); - /* ... and move it to LR */ - PPC_MTLR(b2p[TMP_REG_1]); /* * Load TOC from function descriptor at offset 8. * We can clobber r2 since we get called through a * function pointer (so caller will save/restore r2) * and since we don't use a TOC ourself. */ - PPC_BPF_LL(2, b2p[TMP_REG_2], 8); -#else - /* We can clobber r12 */ - PPC_FUNC_ADDR(12, func); - PPC_MTLR(12); + PPC_BPF_LL(2, 12, 8); + /* Load actual entry point from function descriptor */ + PPC_BPF_LL(12, 12, 0); #endif + + PPC_MTLR(12); PPC_BLRL(); } diff --git a/arch/powerpc/platforms/chrp/time.c b/arch/powerpc/platforms/chrp/time.c index 03d115aaa1916638d01e782326a8f3fc6412f730..acde7bbe07164ebfb1459c5fda7255863f8a30ed 100644 --- a/arch/powerpc/platforms/chrp/time.c +++ b/arch/powerpc/platforms/chrp/time.c @@ -28,6 +28,8 @@ #include #include +#include + extern spinlock_t rtc_lock; #define NVRAM_AS0 0x74 @@ -63,7 +65,7 @@ long __init chrp_time_init(void) return 0; } -int chrp_cmos_clock_read(int addr) +static int chrp_cmos_clock_read(int addr) { if (nvram_as1 != 0) outb(addr>>8, nvram_as1); @@ -71,7 +73,7 @@ int chrp_cmos_clock_read(int addr) return (inb(nvram_data)); } -void chrp_cmos_clock_write(unsigned long val, int addr) +static void chrp_cmos_clock_write(unsigned long val, int addr) { if (nvram_as1 != 0) outb(addr>>8, nvram_as1); diff --git a/arch/powerpc/platforms/embedded6xx/hlwd-pic.c b/arch/powerpc/platforms/embedded6xx/hlwd-pic.c index 89c54de88b7a0f3a13ebd851d91f481fcb82d47b..bf4a125faec66664cd8c76d29852ee37c3bee166 100644 --- a/arch/powerpc/platforms/embedded6xx/hlwd-pic.c +++ b/arch/powerpc/platforms/embedded6xx/hlwd-pic.c @@ -35,6 +35,8 @@ */ #define HW_BROADWAY_ICR 0x00 #define HW_BROADWAY_IMR 0x04 +#define HW_STARLET_ICR 0x08 +#define HW_STARLET_IMR 0x0c /* @@ -74,6 +76,9 @@ static void hlwd_pic_unmask(struct irq_data *d) void __iomem *io_base = irq_data_get_irq_chip_data(d); setbits32(io_base + HW_BROADWAY_IMR, 1 << irq); + + /* Make sure the ARM (aka. Starlet) doesn't handle this interrupt. */ + clrbits32(io_base + HW_STARLET_IMR, 1 << irq); } diff --git a/arch/powerpc/platforms/powermac/bootx_init.c b/arch/powerpc/platforms/powermac/bootx_init.c index c3c9bbb3573ae6bcbc0ff77e73bd68305bf50f43..ba0964c1762082e6eb5b982f8d6336192f98fbc1 100644 --- a/arch/powerpc/platforms/powermac/bootx_init.c +++ b/arch/powerpc/platforms/powermac/bootx_init.c @@ -468,7 +468,7 @@ void __init bootx_init(unsigned long r3, unsigned long r4) boot_infos_t *bi = (boot_infos_t *) r4; unsigned long hdr; unsigned long space; - unsigned long ptr, x; + unsigned long ptr; char *model; unsigned long offset = reloc_offset(); @@ -562,6 +562,8 @@ void __init bootx_init(unsigned long r3, unsigned long r4) * MMU switched OFF, so this should not be useful anymore. */ if (bi->version < 4) { + unsigned long x __maybe_unused; + bootx_printf("Touching pages...\n"); /* diff --git a/arch/powerpc/platforms/powermac/setup.c b/arch/powerpc/platforms/powermac/setup.c index ab668cb72263ce69da918e712183c3083d2140d9..8b2eab1340f4ecb3a8d9ac7b95e46de0ac803982 100644 --- a/arch/powerpc/platforms/powermac/setup.c +++ b/arch/powerpc/platforms/powermac/setup.c @@ -352,6 +352,7 @@ static int pmac_late_init(void) } machine_late_initcall(powermac, pmac_late_init); +void note_bootable_part(dev_t dev, int part, int goodness); /* * This is __ref because we check for "initializing" before * touching any of the __init sensitive things and "initializing" diff --git a/arch/s390/include/asm/cpu_mf.h b/arch/s390/include/asm/cpu_mf.h index 05480e4cc5cabdab4e8bcc5006b8d0691b4bfef7..bc764a674594e040989345795d37dba9512895db 100644 --- a/arch/s390/include/asm/cpu_mf.h +++ b/arch/s390/include/asm/cpu_mf.h @@ -116,7 +116,7 @@ struct hws_basic_entry { struct hws_diag_entry { unsigned int def:16; /* 0-15 Data Entry Format */ - unsigned int R:14; /* 16-19 and 20-30 reserved */ + unsigned int R:15; /* 16-19 and 20-30 reserved */ unsigned int I:1; /* 31 entry valid or invalid */ u8 data[]; /* Machine-dependent sample data */ } __packed; @@ -132,7 +132,9 @@ struct hws_trailer_entry { unsigned int f:1; /* 0 - Block Full Indicator */ unsigned int a:1; /* 1 - Alert request control */ unsigned int t:1; /* 2 - Timestamp format */ - unsigned long long:61; /* 3 - 63: Reserved */ + unsigned int :29; /* 3 - 31: Reserved */ + unsigned int bsdes:16; /* 32-47: size of basic SDE */ + unsigned int dsdes:16; /* 48-63: size of diagnostic SDE */ }; unsigned long long flags; /* 0 - 63: All indicators */ }; diff --git a/arch/x86/configs/x86_64_cuttlefish_defconfig b/arch/x86/configs/x86_64_cuttlefish_defconfig index 26a42b91140b39d5ad5a4547d38d5cd3ab73a6fc..19e3a812306b028d96127c152375f3db9ca70934 100644 --- a/arch/x86/configs/x86_64_cuttlefish_defconfig +++ b/arch/x86/configs/x86_64_cuttlefish_defconfig @@ -219,7 +219,9 @@ CONFIG_DM_MIRROR=y CONFIG_DM_ZERO=y CONFIG_DM_UEVENT=y CONFIG_DM_VERITY=y +CONFIG_DM_VERITY_HASH_PREFETCH_MIN_SIZE=1 CONFIG_DM_VERITY_FEC=y +CONFIG_DM_ANDROID_VERITY=y CONFIG_NETDEVICES=y CONFIG_NETCONSOLE=y CONFIG_NETCONSOLE_DYNAMIC=y @@ -447,6 +449,12 @@ CONFIG_SECURITY_PATH=y CONFIG_HARDENED_USERCOPY=y CONFIG_SECURITY_SELINUX=y CONFIG_SECURITY_SELINUX_CHECKREQPROT_VALUE=1 +CONFIG_CRYPTO_RSA=y # CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set CONFIG_CRYPTO_SHA512=y CONFIG_CRYPTO_DEV_VIRTIO=y +CONFIG_ASYMMETRIC_KEY_TYPE=y +CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=y +CONFIG_X509_CERTIFICATE_PARSER=y +CONFIG_SYSTEM_TRUSTED_KEYRING=y +CONFIG_SYSTEM_TRUSTED_KEYS="verity_dev_keys.x509" diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S index f7bfa701219b3cd67d2891b85901be88ada912af..0fae7096ae23cc5372cc78fa43ebfaa84f0e66a7 100644 --- a/arch/x86/entry/entry_64.S +++ b/arch/x86/entry/entry_64.S @@ -933,7 +933,7 @@ ENTRY(\sym) call \do_sym - jmp error_exit /* %ebx: no swapgs flag */ + jmp error_exit .endif END(\sym) .endm @@ -1166,7 +1166,6 @@ END(paranoid_exit) /* * Save all registers in pt_regs, and switch GS if needed. - * Return: EBX=0: came from user mode; EBX=1: otherwise */ ENTRY(error_entry) UNWIND_HINT_FUNC @@ -1213,7 +1212,6 @@ ENTRY(error_entry) * for these here too. */ .Lerror_kernelspace: - incl %ebx leaq native_irq_return_iret(%rip), %rcx cmpq %rcx, RIP+8(%rsp) je .Lerror_bad_iret @@ -1247,28 +1245,20 @@ ENTRY(error_entry) /* * Pretend that the exception came from user mode: set up pt_regs - * as if we faulted immediately after IRET and clear EBX so that - * error_exit knows that we will be returning to user mode. + * as if we faulted immediately after IRET. */ mov %rsp, %rdi call fixup_bad_iret mov %rax, %rsp - decl %ebx jmp .Lerror_entry_from_usermode_after_swapgs END(error_entry) - -/* - * On entry, EBX is a "return to kernel mode" flag: - * 1: already in kernel mode, don't need SWAPGS - * 0: user gsbase is loaded, we need SWAPGS and standard preparation for return to usermode - */ ENTRY(error_exit) UNWIND_HINT_REGS DISABLE_INTERRUPTS(CLBR_ANY) TRACE_IRQS_OFF - testl %ebx, %ebx - jnz retint_kernel + testb $3, CS(%rsp) + jz retint_kernel jmp retint_user END(error_exit) diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c index 10b39d44981c69a2002dfc9608e7a705eaa562f0..25386be0d7576252ac1757b54d06040577747a30 100644 --- a/arch/x86/events/intel/ds.c +++ b/arch/x86/events/intel/ds.c @@ -410,9 +410,11 @@ static int alloc_bts_buffer(int cpu) ds->bts_buffer_base = (unsigned long) cea; ds_update_cea(cea, buffer, BTS_BUFFER_SIZE, PAGE_KERNEL); ds->bts_index = ds->bts_buffer_base; - max = BTS_RECORD_SIZE * (BTS_BUFFER_SIZE / BTS_RECORD_SIZE); - ds->bts_absolute_maximum = ds->bts_buffer_base + max; - ds->bts_interrupt_threshold = ds->bts_absolute_maximum - (max / 16); + max = BTS_BUFFER_SIZE / BTS_RECORD_SIZE; + ds->bts_absolute_maximum = ds->bts_buffer_base + + max * BTS_RECORD_SIZE; + ds->bts_interrupt_threshold = ds->bts_absolute_maximum - + (max / 16) * BTS_RECORD_SIZE; return 0; } diff --git a/arch/x86/events/intel/uncore.c b/arch/x86/events/intel/uncore.c index d45e06346f14d8636f1b4348a84a6e503012c686..c56cb37b88e337dd34a46cd7ffc421062cdaddfd 100644 --- a/arch/x86/events/intel/uncore.c +++ b/arch/x86/events/intel/uncore.c @@ -218,7 +218,7 @@ void uncore_perf_event_update(struct intel_uncore_box *box, struct perf_event *e u64 prev_count, new_count, delta; int shift; - if (event->hw.idx >= UNCORE_PMC_IDX_FIXED) + if (event->hw.idx == UNCORE_PMC_IDX_FIXED) shift = 64 - uncore_fixed_ctr_bits(box); else shift = 64 - uncore_perf_ctr_bits(box); diff --git a/arch/x86/events/intel/uncore_nhmex.c b/arch/x86/events/intel/uncore_nhmex.c index 93e7a8397cde249625a624ffa2c3f248f3b21009..173e2674be6ef24293c5113b43d738af3d3f1981 100644 --- a/arch/x86/events/intel/uncore_nhmex.c +++ b/arch/x86/events/intel/uncore_nhmex.c @@ -246,7 +246,7 @@ static void nhmex_uncore_msr_enable_event(struct intel_uncore_box *box, struct p { struct hw_perf_event *hwc = &event->hw; - if (hwc->idx >= UNCORE_PMC_IDX_FIXED) + if (hwc->idx == UNCORE_PMC_IDX_FIXED) wrmsrl(hwc->config_base, NHMEX_PMON_CTL_EN_BIT0); else if (box->pmu->type->event_mask & NHMEX_PMON_CTL_EN_BIT0) wrmsrl(hwc->config_base, hwc->config | NHMEX_PMON_CTL_EN_BIT22); diff --git a/arch/x86/include/asm/apm.h b/arch/x86/include/asm/apm.h index c356098b6fb92b8ff7d42b2fd813c2a8551d3db1..4d4015ddcf2633e9e8388216f9e9c8639e2eced8 100644 --- a/arch/x86/include/asm/apm.h +++ b/arch/x86/include/asm/apm.h @@ -7,8 +7,6 @@ #ifndef _ASM_X86_MACH_DEFAULT_APM_H #define _ASM_X86_MACH_DEFAULT_APM_H -#include - #ifdef APM_ZERO_SEGS # define APM_DO_ZERO_SEGS \ "pushl %%ds\n\t" \ @@ -34,7 +32,6 @@ static inline void apm_bios_call_asm(u32 func, u32 ebx_in, u32 ecx_in, * N.B. We do NOT need a cld after the BIOS call * because we always save and restore the flags. */ - firmware_restrict_branch_speculation_start(); __asm__ __volatile__(APM_DO_ZERO_SEGS "pushl %%edi\n\t" "pushl %%ebp\n\t" @@ -47,7 +44,6 @@ static inline void apm_bios_call_asm(u32 func, u32 ebx_in, u32 ecx_in, "=S" (*esi) : "a" (func), "b" (ebx_in), "c" (ecx_in) : "memory", "cc"); - firmware_restrict_branch_speculation_end(); } static inline bool apm_bios_call_simple_asm(u32 func, u32 ebx_in, @@ -60,7 +56,6 @@ static inline bool apm_bios_call_simple_asm(u32 func, u32 ebx_in, * N.B. We do NOT need a cld after the BIOS call * because we always save and restore the flags. */ - firmware_restrict_branch_speculation_start(); __asm__ __volatile__(APM_DO_ZERO_SEGS "pushl %%edi\n\t" "pushl %%ebp\n\t" @@ -73,7 +68,6 @@ static inline bool apm_bios_call_simple_asm(u32 func, u32 ebx_in, "=S" (si) : "a" (func), "b" (ebx_in), "c" (ecx_in) : "memory", "cc"); - firmware_restrict_branch_speculation_end(); return error; } diff --git a/arch/x86/include/asm/asm.h b/arch/x86/include/asm/asm.h index 386a6900e206f6578e3b38ee7f085d36ac50a928..3bf87f92b932d1a023c55e5cf28bd7cd085f9198 100644 --- a/arch/x86/include/asm/asm.h +++ b/arch/x86/include/asm/asm.h @@ -46,6 +46,65 @@ #define _ASM_SI __ASM_REG(si) #define _ASM_DI __ASM_REG(di) +#ifndef __x86_64__ +/* 32 bit */ + +#define _ASM_ARG1 _ASM_AX +#define _ASM_ARG2 _ASM_DX +#define _ASM_ARG3 _ASM_CX + +#define _ASM_ARG1L eax +#define _ASM_ARG2L edx +#define _ASM_ARG3L ecx + +#define _ASM_ARG1W ax +#define _ASM_ARG2W dx +#define _ASM_ARG3W cx + +#define _ASM_ARG1B al +#define _ASM_ARG2B dl +#define _ASM_ARG3B cl + +#else +/* 64 bit */ + +#define _ASM_ARG1 _ASM_DI +#define _ASM_ARG2 _ASM_SI +#define _ASM_ARG3 _ASM_DX +#define _ASM_ARG4 _ASM_CX +#define _ASM_ARG5 r8 +#define _ASM_ARG6 r9 + +#define _ASM_ARG1Q rdi +#define _ASM_ARG2Q rsi +#define _ASM_ARG3Q rdx +#define _ASM_ARG4Q rcx +#define _ASM_ARG5Q r8 +#define _ASM_ARG6Q r9 + +#define _ASM_ARG1L edi +#define _ASM_ARG2L esi +#define _ASM_ARG3L edx +#define _ASM_ARG4L ecx +#define _ASM_ARG5L r8d +#define _ASM_ARG6L r9d + +#define _ASM_ARG1W di +#define _ASM_ARG2W si +#define _ASM_ARG3W dx +#define _ASM_ARG4W cx +#define _ASM_ARG5W r8w +#define _ASM_ARG6W r9w + +#define _ASM_ARG1B dil +#define _ASM_ARG2B sil +#define _ASM_ARG3B dl +#define _ASM_ARG4B cl +#define _ASM_ARG5B r8b +#define _ASM_ARG6B r9b + +#endif + /* * Macros to generate condition code outputs from inline assembly, * The output operand must be type "bool". diff --git a/arch/x86/include/asm/irqflags.h b/arch/x86/include/asm/irqflags.h index 89f08955fff733c688a5ce4f4a0b8d74050ee617..c4fc17220df959f2d5feb493af6374e7dacce613 100644 --- a/arch/x86/include/asm/irqflags.h +++ b/arch/x86/include/asm/irqflags.h @@ -13,7 +13,7 @@ * Interrupt control: */ -static inline unsigned long native_save_fl(void) +extern inline unsigned long native_save_fl(void) { unsigned long flags; diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile index 295abaa58addefb01f99051c5b9bc32b2e48054d..4137f7ba0f881ad6478a26af3706ce36c30e1d5d 100644 --- a/arch/x86/kernel/Makefile +++ b/arch/x86/kernel/Makefile @@ -58,6 +58,7 @@ obj-y += alternative.o i8253.o pci-nommu.o hw_breakpoint.o obj-y += tsc.o tsc_msr.o io_delay.o rtc.o obj-y += pci-iommu_table.o obj-y += resource.o +obj-y += irqflags.o obj-y += process.o obj-y += fpu/ diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c index ebdcc368a2d3dc34c0f81b86c1afd2ac457bf1c3..f48a51335538813cdd96ba82a46e87e745d9ef3f 100644 --- a/arch/x86/kernel/apic/apic.c +++ b/arch/x86/kernel/apic/apic.c @@ -580,6 +580,9 @@ static u32 skx_deadline_rev(void) case 0x04: return 0x02000014; } + if (boot_cpu_data.x86_stepping > 4) + return 0; + return ~0U; } diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c index 2a7fd56e67b364abd75e4d340faba01998f57b1c..63d3e6a6b5efc463f4e2b2003bdbf032df27d5d9 100644 --- a/arch/x86/kernel/apm_32.c +++ b/arch/x86/kernel/apm_32.c @@ -240,6 +240,7 @@ #include #include #include +#include #if defined(CONFIG_APM_DISPLAY_BLANK) && defined(CONFIG_VT) extern int (*console_blank_hook)(int); @@ -614,11 +615,13 @@ static long __apm_bios_call(void *_call) gdt[0x40 / 8] = bad_bios_desc; apm_irq_save(flags); + firmware_restrict_branch_speculation_start(); APM_DO_SAVE_SEGS; apm_bios_call_asm(call->func, call->ebx, call->ecx, &call->eax, &call->ebx, &call->ecx, &call->edx, &call->esi); APM_DO_RESTORE_SEGS; + firmware_restrict_branch_speculation_end(); apm_irq_restore(flags); gdt[0x40 / 8] = save_desc_40; put_cpu(); @@ -690,10 +693,12 @@ static long __apm_bios_call_simple(void *_call) gdt[0x40 / 8] = bad_bios_desc; apm_irq_save(flags); + firmware_restrict_branch_speculation_start(); APM_DO_SAVE_SEGS; error = apm_bios_call_simple_asm(call->func, call->ebx, call->ecx, &call->eax); APM_DO_RESTORE_SEGS; + firmware_restrict_branch_speculation_end(); apm_irq_restore(flags); gdt[0x40 / 8] = save_desc_40; put_cpu(); diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c index 58f887f5e03636de85b7785c95bf3d7d658ce573..98e4e4dc4a3bc6ddac0af7098b6a1847f83a176b 100644 --- a/arch/x86/kernel/cpu/mcheck/mce.c +++ b/arch/x86/kernel/cpu/mcheck/mce.c @@ -2150,9 +2150,6 @@ static ssize_t store_int_with_restart(struct device *s, if (check_interval == old_check_interval) return ret; - if (check_interval < 1) - check_interval = 1; - mutex_lock(&mce_sysfs_mutex); mce_restart(); mutex_unlock(&mce_sysfs_mutex); diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c index c8e0cda0f272f3b005fa7bba2726fccd41e87f80..4fc0e08a30b9981faf809132ef74a129bd1572a1 100644 --- a/arch/x86/kernel/cpu/microcode/core.c +++ b/arch/x86/kernel/cpu/microcode/core.c @@ -70,7 +70,7 @@ static DEFINE_MUTEX(microcode_mutex); /* * Serialize late loading so that CPUs get updated one-by-one. */ -static DEFINE_SPINLOCK(update_lock); +static DEFINE_RAW_SPINLOCK(update_lock); struct ucode_cpu_info ucode_cpu_info[NR_CPUS]; @@ -560,9 +560,9 @@ static int __reload_late(void *info) if (__wait_for_cpus(&late_cpus_in, NSEC_PER_SEC)) return -1; - spin_lock(&update_lock); + raw_spin_lock(&update_lock); apply_microcode_local(&err); - spin_unlock(&update_lock); + raw_spin_unlock(&update_lock); /* siblings return UCODE_OK because their engine got updated already */ if (err > UCODE_NFOUND) { diff --git a/arch/x86/kernel/irqflags.S b/arch/x86/kernel/irqflags.S new file mode 100644 index 0000000000000000000000000000000000000000..ddeeaac8addadcb0556975d31a3262e5473a0141 --- /dev/null +++ b/arch/x86/kernel/irqflags.S @@ -0,0 +1,26 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#include +#include +#include + +/* + * unsigned long native_save_fl(void) + */ +ENTRY(native_save_fl) + pushf + pop %_ASM_AX + ret +ENDPROC(native_save_fl) +EXPORT_SYMBOL(native_save_fl) + +/* + * void native_restore_fl(unsigned long flags) + * %eax/%rdi: flags + */ +ENTRY(native_restore_fl) + push %_ASM_ARG1 + popf + ret +ENDPROC(native_restore_fl) +EXPORT_SYMBOL(native_restore_fl) diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 43bbece92632a7bc9c1693bae2e75d99ed3d6d33..2ef2f1fe875bf7aa908876014a134d6e91e13e9e 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -890,7 +890,7 @@ static int mmu_topup_memory_cache_page(struct kvm_mmu_memory_cache *cache, if (cache->nobjs >= min) return 0; while (cache->nobjs < ARRAY_SIZE(cache->objects)) { - page = (void *)__get_free_page(GFP_KERNEL); + page = (void *)__get_free_page(GFP_KERNEL_ACCOUNT); if (!page) return -ENOMEM; cache->objects[cache->nobjs++] = page; diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 90747865205d2eba664bfe16ebe678893c99f1e4..8d000fde14140e43f7b535a19c2346033bdbb203 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -7354,6 +7354,8 @@ static int enter_vmx_operation(struct kvm_vcpu *vcpu) HRTIMER_MODE_REL_PINNED); vmx->nested.preemption_timer.function = vmx_preemption_timer_fn; + vmx->nested.vpid02 = allocate_vpid(); + vmx->nested.vmxon = true; return 0; @@ -9802,10 +9804,8 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id) goto free_vmcs; } - if (nested) { + if (nested) nested_vmx_setup_ctls_msrs(vmx); - vmx->nested.vpid02 = allocate_vpid(); - } vmx->nested.posted_intr_nv = -1; vmx->nested.current_vmptr = -1ull; @@ -9822,7 +9822,6 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id) return &vmx->vcpu; free_vmcs: - free_vpid(vmx->nested.vpid02); free_loaded_vmcs(vmx->loaded_vmcs); free_msrs: kfree(vmx->guest_msrs); diff --git a/arch/x86/xen/xen-pvh.S b/arch/x86/xen/xen-pvh.S index e1a5fbeae08d8a3bf3cb619c023bab096ad4ba2d..5d7554c025fd3b82e5cab0459030079f595a35ee 100644 --- a/arch/x86/xen/xen-pvh.S +++ b/arch/x86/xen/xen-pvh.S @@ -54,6 +54,9 @@ * charge of setting up it's own stack, GDT and IDT. */ +#define PVH_GDT_ENTRY_CANARY 4 +#define PVH_CANARY_SEL (PVH_GDT_ENTRY_CANARY * 8) + ENTRY(pvh_start_xen) cld @@ -98,6 +101,12 @@ ENTRY(pvh_start_xen) /* 64-bit entry point. */ .code64 1: + /* Set base address in stack canary descriptor. */ + mov $MSR_GS_BASE,%ecx + mov $_pa(canary), %eax + xor %edx, %edx + wrmsr + call xen_prepare_pvh /* startup_64 expects boot_params in %rsi. */ @@ -107,6 +116,17 @@ ENTRY(pvh_start_xen) #else /* CONFIG_X86_64 */ + /* Set base address in stack canary descriptor. */ + movl $_pa(gdt_start),%eax + movl $_pa(canary),%ecx + movw %cx, (PVH_GDT_ENTRY_CANARY * 8) + 2(%eax) + shrl $16, %ecx + movb %cl, (PVH_GDT_ENTRY_CANARY * 8) + 4(%eax) + movb %ch, (PVH_GDT_ENTRY_CANARY * 8) + 7(%eax) + + mov $PVH_CANARY_SEL,%eax + mov %eax,%gs + call mk_early_pgtbl_32 mov $_pa(initial_page_table), %eax @@ -150,9 +170,13 @@ gdt_start: .quad GDT_ENTRY(0xc09a, 0, 0xfffff) /* __KERNEL_CS */ #endif .quad GDT_ENTRY(0xc092, 0, 0xfffff) /* __KERNEL_DS */ + .quad GDT_ENTRY(0x4090, 0, 0x18) /* PVH_CANARY_SEL */ gdt_end: - .balign 4 + .balign 16 +canary: + .fill 48, 1, 0 + early_stack: .fill 256, 1, 0 early_stack_end: diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c index 56c9cd01fd1d6cc70e4c895acb292f5f80808b9e..4a4b7d3c909a846ac66144cedca8fa891e09080d 100644 --- a/block/bfq-iosched.c +++ b/block/bfq-iosched.c @@ -1678,7 +1678,6 @@ static void bfq_requests_merged(struct request_queue *q, struct request *rq, if (!RB_EMPTY_NODE(&rq->rb_node)) goto end; - spin_lock_irq(&bfqq->bfqd->lock); /* * If next and rq belong to the same bfq_queue and next is older @@ -1702,7 +1701,6 @@ static void bfq_requests_merged(struct request_queue *q, struct request *rq, bfq_remove_request(q, next); - spin_unlock_irq(&bfqq->bfqd->lock); end: bfqg_stats_update_io_merged(bfqq_group(bfqq), next->cmd_flags); } diff --git a/block/bio.c b/block/bio.c index 6328c6e71b27bab3f1ce3b070e989b2ab231c182..3fa014e609997ff693fcd3d98a565a4d05ff57be 100644 --- a/block/bio.c +++ b/block/bio.c @@ -580,8 +580,12 @@ EXPORT_SYMBOL(bio_phys_segments); static inline void bio_clone_crypt_key(struct bio *dst, const struct bio *src) { #ifdef CONFIG_PFK - dst->bi_crypt_key = src->bi_crypt_key; dst->bi_iter.bi_dun = src->bi_iter.bi_dun; +#ifdef CONFIG_DM_DEFAULT_KEY + dst->bi_crypt_key = src->bi_crypt_key; + dst->bi_crypt_skip = src->bi_crypt_skip; +#endif + dst->bi_dio_inode = src->bi_dio_inode; #endif } @@ -890,16 +894,16 @@ EXPORT_SYMBOL(bio_add_page); */ int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter) { - unsigned short nr_pages = bio->bi_max_vecs - bio->bi_vcnt; + unsigned short nr_pages = bio->bi_max_vecs - bio->bi_vcnt, idx; struct bio_vec *bv = bio->bi_io_vec + bio->bi_vcnt; struct page **pages = (struct page **)bv; - size_t offset, diff; + size_t offset; ssize_t size; size = iov_iter_get_pages(iter, pages, LONG_MAX, nr_pages, &offset); if (unlikely(size <= 0)) return size ? size : -EFAULT; - nr_pages = (size + offset + PAGE_SIZE - 1) / PAGE_SIZE; + idx = nr_pages = (size + offset + PAGE_SIZE - 1) / PAGE_SIZE; /* * Deep magic below: We need to walk the pinned pages backwards @@ -912,17 +916,15 @@ int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter) bio->bi_iter.bi_size += size; bio->bi_vcnt += nr_pages; - diff = (nr_pages * PAGE_SIZE - offset) - size; - while (nr_pages--) { - bv[nr_pages].bv_page = pages[nr_pages]; - bv[nr_pages].bv_len = PAGE_SIZE; - bv[nr_pages].bv_offset = 0; + while (idx--) { + bv[idx].bv_page = pages[idx]; + bv[idx].bv_len = PAGE_SIZE; + bv[idx].bv_offset = 0; } bv[0].bv_offset += offset; bv[0].bv_len -= offset; - if (diff) - bv[bio->bi_vcnt - 1].bv_len -= diff; + bv[nr_pages - 1].bv_len -= nr_pages * PAGE_SIZE - offset - size; iov_iter_advance(iter, size); return 0; @@ -1900,6 +1902,7 @@ struct bio *bio_split(struct bio *bio, int sectors, bio_integrity_trim(split); bio_advance(bio, split->bi_iter.bi_size); + bio->bi_iter.bi_done = 0; if (bio_flagged(bio, BIO_TRACE_COMPLETION)) bio_set_flag(split, BIO_TRACE_COMPLETION); diff --git a/block/blk-core.c b/block/blk-core.c index c13b98d883a02cb721cd1bd7765ce9cf98a80aae..a8ca7d4ed992b482e872598317287e34a0652abf 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -781,7 +781,6 @@ EXPORT_SYMBOL(blk_alloc_queue); int blk_queue_enter(struct request_queue *q, bool nowait) { while (true) { - int ret; if (percpu_ref_tryget_live(&q->q_usage_counter)) return 0; @@ -798,13 +797,11 @@ int blk_queue_enter(struct request_queue *q, bool nowait) */ smp_rmb(); - ret = wait_event_interruptible(q->mq_freeze_wq, - !atomic_read(&q->mq_freeze_depth) || - blk_queue_dying(q)); + wait_event(q->mq_freeze_wq, + !atomic_read(&q->mq_freeze_depth) || + blk_queue_dying(q)); if (blk_queue_dying(q)) return -ENODEV; - if (ret) - return ret; } } diff --git a/certs/system_keyring.c b/certs/system_keyring.c index 6251d1b27f0cbd1414287770c8510774a61ba4bc..0e1ea235c12a31363234964bac2482d4836a4928 100644 --- a/certs/system_keyring.c +++ b/certs/system_keyring.c @@ -263,5 +263,46 @@ int verify_pkcs7_signature(const void *data, size_t len, return ret; } EXPORT_SYMBOL_GPL(verify_pkcs7_signature); - #endif /* CONFIG_SYSTEM_DATA_VERIFICATION */ + +/** + * verify_signature_one - Verify a signature with keys from given keyring + * @sig: The signature to be verified + * @trusted_keys: Trusted keys to use (NULL for builtin trusted keys only, + * (void *)1UL for all trusted keys). + * @keyid: key description (not partial) + */ +int verify_signature_one(const struct public_key_signature *sig, + struct key *trusted_keys, const char *keyid) +{ + key_ref_t ref; + struct key *key; + int ret; + + if (!sig) + return -EBADMSG; + if (!trusted_keys) { + trusted_keys = builtin_trusted_keys; + } else if (trusted_keys == (void *)1UL) { +#ifdef CONFIG_SECONDARY_TRUSTED_KEYRING + trusted_keys = secondary_trusted_keys; +#else + trusted_keys = builtin_trusted_keys; +#endif + } + + ref = keyring_search(make_key_ref(trusted_keys, 1), + &key_type_asymmetric, keyid); + if (IS_ERR(ref)) { + pr_err("Asymmetric key (%s) not found in keyring(%s)\n", + keyid, trusted_keys->description); + return -ENOKEY; + } + + key = key_ref_to_ptr(ref); + ret = verify_signature(key, sig); + key_put(key); + return ret; +} +EXPORT_SYMBOL_GPL(verify_signature_one); + diff --git a/crypto/af_alg.c b/crypto/af_alg.c index 815ee1075574af7a03aca33b4d820e5bb4bbaa9b..42dfdd1fd6d88cc1db65b65048c707189b546a0c 100644 --- a/crypto/af_alg.c +++ b/crypto/af_alg.c @@ -1183,8 +1183,10 @@ int af_alg_get_rsgl(struct sock *sk, struct msghdr *msg, int flags, /* make one iovec available as scatterlist */ err = af_alg_make_sg(&rsgl->sgl, &msg->msg_iter, seglen); - if (err < 0) + if (err < 0) { + rsgl->sg_num_bytes = 0; return err; + } /* chain the new scatterlist with previous one */ if (areq->last_rsgl) diff --git a/crypto/authenc.c b/crypto/authenc.c index 875470b0e026fb5b61a9e6dfea4dc6e7d9ff06a3..0db344d5a01adc68150145e9b0f32f3303b51569 100644 --- a/crypto/authenc.c +++ b/crypto/authenc.c @@ -108,6 +108,7 @@ static int crypto_authenc_setkey(struct crypto_aead *authenc, const u8 *key, CRYPTO_TFM_RES_MASK); out: + memzero_explicit(&keys, sizeof(keys)); return err; badkey: diff --git a/crypto/authencesn.c b/crypto/authencesn.c index 0cf5fefdb859b1158460faa278017a524f54a93a..6de852ce4cf8f24f7345376c33f0bb64053f658b 100644 --- a/crypto/authencesn.c +++ b/crypto/authencesn.c @@ -90,6 +90,7 @@ static int crypto_authenc_esn_setkey(struct crypto_aead *authenc_esn, const u8 * CRYPTO_TFM_RES_MASK); out: + memzero_explicit(&keys, sizeof(keys)); return err; badkey: diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c index 602ae58ee2d81a9754b9a3beda4c3084695c964d..75c3cb377b98b1073cf1e5f9d97f3385e2c05325 100644 --- a/drivers/acpi/acpi_lpss.c +++ b/drivers/acpi/acpi_lpss.c @@ -69,6 +69,10 @@ ACPI_MODULE_NAME("acpi_lpss"); #define LPSS_SAVE_CTX BIT(4) #define LPSS_NO_D3_DELAY BIT(5) +/* Crystal Cove PMIC shares same ACPI ID between different platforms */ +#define BYT_CRC_HRV 2 +#define CHT_CRC_HRV 3 + struct lpss_private_data; struct lpss_device_desc { @@ -162,7 +166,7 @@ static void byt_pwm_setup(struct lpss_private_data *pdata) if (!adev->pnp.unique_id || strcmp(adev->pnp.unique_id, "1")) return; - if (!acpi_dev_present("INT33FD", NULL, -1)) + if (!acpi_dev_present("INT33FD", NULL, BYT_CRC_HRV)) pwm_add_table(byt_pwm_lookup, ARRAY_SIZE(byt_pwm_lookup)); } diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c index 6fc204a524932e97f4a2743ae7076f1e12e86ad8..eb857d6ea1fef04725c28d479e3f3268754f82a3 100644 --- a/drivers/acpi/pci_root.c +++ b/drivers/acpi/pci_root.c @@ -472,9 +472,11 @@ static void negotiate_os_control(struct acpi_pci_root *root, int *no_aspm) } control = OSC_PCI_EXPRESS_CAPABILITY_CONTROL - | OSC_PCI_EXPRESS_NATIVE_HP_CONTROL | OSC_PCI_EXPRESS_PME_CONTROL; + if (IS_ENABLED(CONFIG_HOTPLUG_PCI_PCIE)) + control |= OSC_PCI_EXPRESS_NATIVE_HP_CONTROL; + if (pci_aer_available()) { if (aer_acpi_firmware_first()) dev_info(&device->dev, diff --git a/drivers/android/Kconfig b/drivers/android/Kconfig index 7dce3795b8874ba83d29215e5c70daf7e4a3cee9..ee4880bfdcdc98d21f139bdce51bb5776f3f17ab 100644 --- a/drivers/android/Kconfig +++ b/drivers/android/Kconfig @@ -10,7 +10,7 @@ if ANDROID config ANDROID_BINDER_IPC bool "Android Binder IPC Driver" - depends on MMU + depends on MMU && !M68K default n ---help--- Binder is used in Android for both communication between processes, @@ -32,19 +32,6 @@ config ANDROID_BINDER_DEVICES created. Each binder device has its own context manager, and is therefore logically separated from the other devices. -config ANDROID_BINDER_IPC_32BIT - bool "Use old (Android 4.4 and earlier) 32-bit binder API" - depends on !64BIT && ANDROID_BINDER_IPC - default y - ---help--- - The Binder API has been changed to support both 32 and 64bit - applications in a mixed environment. - - Enable this to support an old 32-bit Android user-space (v4.4 and - earlier). - - Note that enabling this will break newer Android user-space. - config ANDROID_BINDER_IPC_SELFTEST bool "Android Binder IPC Driver Selftest" depends on ANDROID_BINDER_IPC diff --git a/drivers/android/binder.c b/drivers/android/binder.c index 751777fd64bb7274376ca110c2cea32b2eaca19b..9c06e7f46d7f0f1037b6c36bccfa4a939f926946 100644 --- a/drivers/android/binder.c +++ b/drivers/android/binder.c @@ -72,10 +72,6 @@ #include #include -#ifdef CONFIG_ANDROID_BINDER_IPC_32BIT -#define BINDER_IPC_32BIT 1 -#endif - #include #include #include "binder_alloc.h" @@ -142,7 +138,7 @@ enum { }; static uint32_t binder_debug_mask = BINDER_DEBUG_USER_ERROR | BINDER_DEBUG_FAILED_TRANSACTION | BINDER_DEBUG_DEAD_TRANSACTION; -module_param_named(debug_mask, binder_debug_mask, uint, S_IWUSR | S_IRUGO); +module_param_named(debug_mask, binder_debug_mask, uint, 0644); static char *binder_devices_param = CONFIG_ANDROID_BINDER_DEVICES; module_param_named(devices, binder_devices_param, charp, 0444); @@ -161,7 +157,7 @@ static int binder_set_stop_on_user_error(const char *val, return ret; } module_param_call(stop_on_user_error, binder_set_stop_on_user_error, - param_get_int, &binder_stop_on_user_error, S_IWUSR | S_IRUGO); + param_get_int, &binder_stop_on_user_error, 0644); #define binder_debug(mask, x...) \ do { \ @@ -250,7 +246,7 @@ static struct binder_transaction_log_entry *binder_transaction_log_add( unsigned int cur = atomic_inc_return(&log->cur); if (cur >= ARRAY_SIZE(log->entry)) - log->full = 1; + log->full = true; e = &log->entry[cur % ARRAY_SIZE(log->entry)]; WRITE_ONCE(e->debug_id_done, 0); /* @@ -2212,8 +2208,8 @@ static size_t binder_validate_object(struct binder_buffer *buffer, u64 offset) struct binder_object_header *hdr; size_t object_size = 0; - if (offset > buffer->data_size - sizeof(*hdr) || - buffer->data_size < sizeof(*hdr) || + if (buffer->data_size < sizeof(*hdr) || + offset > buffer->data_size - sizeof(*hdr) || !IS_ALIGNED(offset, sizeof(u32))) return 0; @@ -2802,7 +2798,7 @@ static bool binder_proc_transaction(struct binder_transaction *t, if (node->has_async_transaction) { pending_async = true; } else { - node->has_async_transaction = 1; + node->has_async_transaction = true; } } @@ -3667,7 +3663,7 @@ static int binder_thread_write(struct binder_proc *proc, w = binder_dequeue_work_head_ilocked( &buf_node->async_todo); if (!w) { - buf_node->has_async_transaction = 0; + buf_node->has_async_transaction = false; } else { binder_enqueue_work_ilocked( w, &proc->todo); @@ -4095,6 +4091,7 @@ static int binder_thread_read(struct binder_proc *proc, binder_inner_proc_unlock(proc); if (put_user(e->cmd, (uint32_t __user *)ptr)) return -EFAULT; + cmd = e->cmd; e->cmd = BR_OK; ptr += sizeof(uint32_t); @@ -4901,7 +4898,9 @@ static int binder_mmap(struct file *filp, struct vm_area_struct *vma) failure_string = "bad vm_flags"; goto err_bad_arg; } - vma->vm_flags = (vma->vm_flags | VM_DONTCOPY) & ~VM_MAYWRITE; + vma->vm_flags |= VM_DONTCOPY | VM_MIXEDMAP; + vma->vm_flags &= ~VM_MAYWRITE; + vma->vm_ops = &binder_vm_ops; vma->vm_private_data = proc; @@ -4914,7 +4913,7 @@ static int binder_mmap(struct file *filp, struct vm_area_struct *vma) return 0; err_bad_arg: - pr_err("binder_mmap: %d %lx-%lx %s failed %d\n", + pr_err("%s: %d %lx-%lx %s failed %d\n", __func__, proc->pid, vma->vm_start, vma->vm_end, failure_string, ret); return ret; } @@ -4924,7 +4923,7 @@ static int binder_open(struct inode *nodp, struct file *filp) struct binder_proc *proc; struct binder_device *binder_dev; - binder_debug(BINDER_DEBUG_OPEN_CLOSE, "binder_open: %d:%d\n", + binder_debug(BINDER_DEBUG_OPEN_CLOSE, "%s: %d:%d\n", __func__, current->group_leader->pid, current->pid); proc = kzalloc(sizeof(*proc), GFP_KERNEL); @@ -4970,7 +4969,7 @@ static int binder_open(struct inode *nodp, struct file *filp) * anyway print all contexts that a given PID has, so this * is not a problem. */ - proc->debugfs_entry = debugfs_create_file(strbuf, S_IRUGO, + proc->debugfs_entry = debugfs_create_file(strbuf, 0444, binder_debugfs_dir_entry_proc, (void *)(unsigned long)proc->pid, &binder_proc_fops); @@ -5801,7 +5800,9 @@ static int __init binder_init(void) struct binder_device *device; struct hlist_node *tmp; - binder_alloc_shrinker_init(); + ret = binder_alloc_shrinker_init(); + if (ret) + return ret; atomic_set(&binder_transaction_log.cur, ~0U); atomic_set(&binder_transaction_log_failed.cur, ~0U); @@ -5813,27 +5814,27 @@ static int __init binder_init(void) if (binder_debugfs_dir_entry_root) { debugfs_create_file("state", - S_IRUGO, + 0444, binder_debugfs_dir_entry_root, NULL, &binder_state_fops); debugfs_create_file("stats", - S_IRUGO, + 0444, binder_debugfs_dir_entry_root, NULL, &binder_stats_fops); debugfs_create_file("transactions", - S_IRUGO, + 0444, binder_debugfs_dir_entry_root, NULL, &binder_transactions_fops); debugfs_create_file("transaction_log", - S_IRUGO, + 0444, binder_debugfs_dir_entry_root, &binder_transaction_log, &binder_transaction_log_fops); debugfs_create_file("failed_transaction_log", - S_IRUGO, + 0444, binder_debugfs_dir_entry_root, &binder_transaction_log_failed, &binder_transaction_log_fops); diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c index ba6d8d23f20658563cfdea89d850965cf371ee4d..4f382d51def11f4816694be6e7e02aa1598f720f 100644 --- a/drivers/android/binder_alloc.c +++ b/drivers/android/binder_alloc.c @@ -219,7 +219,7 @@ static int binder_update_page_range(struct binder_alloc *alloc, int allocate, mm = alloc->vma_vm_mm; if (mm) { - down_write(&mm->mmap_sem); + down_read(&mm->mmap_sem); vma = alloc->vma; } @@ -288,7 +288,7 @@ static int binder_update_page_range(struct binder_alloc *alloc, int allocate, /* vm_insert_page does not seem to increment the refcount */ } if (mm) { - up_write(&mm->mmap_sem); + up_read(&mm->mmap_sem); mmput(mm); } return 0; @@ -321,17 +321,18 @@ static int binder_update_page_range(struct binder_alloc *alloc, int allocate, } err_no_vma: if (mm) { - up_write(&mm->mmap_sem); + up_read(&mm->mmap_sem); mmput(mm); } return vma ? -ENOMEM : -ESRCH; } -struct binder_buffer *binder_alloc_new_buf_locked(struct binder_alloc *alloc, - size_t data_size, - size_t offsets_size, - size_t extra_buffers_size, - int is_async) +static struct binder_buffer *binder_alloc_new_buf_locked( + struct binder_alloc *alloc, + size_t data_size, + size_t offsets_size, + size_t extra_buffers_size, + int is_async) { struct rb_node *n = alloc->free_buffers.rb_node; struct binder_buffer *buffer; @@ -1006,8 +1007,14 @@ void binder_alloc_init(struct binder_alloc *alloc) INIT_LIST_HEAD(&alloc->buffers); } -void binder_alloc_shrinker_init(void) +int binder_alloc_shrinker_init(void) { - list_lru_init(&binder_alloc_lru); - register_shrinker(&binder_shrinker); + int ret = list_lru_init(&binder_alloc_lru); + + if (ret == 0) { + ret = register_shrinker(&binder_shrinker); + if (ret) + list_lru_destroy(&binder_alloc_lru); + } + return ret; } diff --git a/drivers/android/binder_alloc.h b/drivers/android/binder_alloc.h index 0b145307f1fd1cfdc76fd03c819bb3c533c28414..9ef64e56385667a53abeab2f41f67b198a8ce86e 100644 --- a/drivers/android/binder_alloc.h +++ b/drivers/android/binder_alloc.h @@ -130,7 +130,7 @@ extern struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc, size_t extra_buffers_size, int is_async); extern void binder_alloc_init(struct binder_alloc *alloc); -void binder_alloc_shrinker_init(void); +extern int binder_alloc_shrinker_init(void); extern void binder_alloc_vma_close(struct binder_alloc *alloc); extern struct binder_buffer * binder_alloc_prepare_to_free(struct binder_alloc *alloc, diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c index 711dd91b5e2c457211a2b34044cc77123a391a0f..2651c81d1edff9e43766b01ee738b7c8c84a2f10 100644 --- a/drivers/ata/libata-eh.c +++ b/drivers/ata/libata-eh.c @@ -2217,12 +2217,16 @@ static void ata_eh_link_autopsy(struct ata_link *link) if (qc->err_mask & ~AC_ERR_OTHER) qc->err_mask &= ~AC_ERR_OTHER; - /* SENSE_VALID trumps dev/unknown error and revalidation */ + /* + * SENSE_VALID trumps dev/unknown error and revalidation. Upper + * layers will determine whether the command is worth retrying + * based on the sense data and device class/type. Otherwise, + * determine directly if the command is worth retrying using its + * error mask and flags. + */ if (qc->flags & ATA_QCFLAG_SENSE_VALID) qc->err_mask &= ~(AC_ERR_DEV | AC_ERR_OTHER); - - /* determine whether the command is worth retrying */ - if (ata_eh_worth_retry(qc)) + else if (ata_eh_worth_retry(qc)) qc->flags |= ATA_QCFLAG_RETRY; /* accumulate error info */ diff --git a/drivers/atm/zatm.c b/drivers/atm/zatm.c index a8d2eb0ceb8d8f78788182f81f8e1e9f9dc8fbbb..2c288d1f42bba0fcdf31ccec72c069bfa60688b9 100644 --- a/drivers/atm/zatm.c +++ b/drivers/atm/zatm.c @@ -1483,6 +1483,8 @@ static int zatm_ioctl(struct atm_dev *dev,unsigned int cmd,void __user *arg) return -EFAULT; if (pool < 0 || pool > ZATM_LAST_POOL) return -EINVAL; + pool = array_index_nospec(pool, + ZATM_LAST_POOL + 1); if (copy_from_user(&info, &((struct zatm_pool_req __user *) arg)->info, sizeof(info))) return -EFAULT; diff --git a/drivers/base/dd.c b/drivers/base/dd.c index 692d397ea1e84d8b46c51fae2c3dfbc23a6d178e..a027ef52d77d16ec14103fdf62c001f0ad9eb382 100644 --- a/drivers/base/dd.c +++ b/drivers/base/dd.c @@ -422,14 +422,6 @@ static int really_probe(struct device *dev, struct device_driver *drv) goto probe_failed; } - /* - * Ensure devices are listed in devices_kset in correct order - * It's important to move Dev to the end of devices_kset before - * calling .probe, because it could be recursive and parent Dev - * should always go first - */ - devices_kset_move_last(dev); - if (dev->bus->probe) { ret = dev->bus->probe(dev); if (ret) diff --git a/drivers/bluetooth/bluetooth-power.c b/drivers/bluetooth/bluetooth-power.c index 456f981150715d09b96fa4ee7ad19bda159d5d99..7ee9e97a0b0bde511e4e67435e7707c8d23124a9 100644 --- a/drivers/bluetooth/bluetooth-power.c +++ b/drivers/bluetooth/bluetooth-power.c @@ -53,6 +53,7 @@ static bool previous; static int pwr_state; struct class *bt_class; static int bt_major; +static int soc_id; static int bt_vreg_init(struct bt_power_vreg_data *vreg) { @@ -684,9 +685,16 @@ int bt_register_slimdev(struct device *dev) return 0; } +int get_chipset_version(void) +{ + BT_PWR_DBG(""); + return soc_id; +} + static long bt_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { int ret = 0, pwr_cntrl = 0; + int chipset_version = 0; switch (cmd) { case BT_CMD_SLIM_TEST: @@ -711,6 +719,16 @@ static long bt_ioctl(struct file *file, unsigned int cmd, unsigned long arg) ret = 0; } break; + case BT_CMD_CHIPSET_VERS: + chipset_version = (int)arg; + BT_PWR_ERR("BT_CMD_CHIP_VERS soc_version:%x", chipset_version); + if (chipset_version) { + soc_id = chipset_version; + } else { + BT_PWR_ERR("got invalid soc version"); + soc_id = 0; + } + break; default: return -EINVAL; } diff --git a/drivers/bluetooth/btfm_slim.c b/drivers/bluetooth/btfm_slim.c index 92ea0135d363e09fabc9d11b0d1e44b08f7a3597..a52e89bf2876f433c3f1b95a44db78bbf501d6fb 100644 --- a/drivers/bluetooth/btfm_slim.c +++ b/drivers/bluetooth/btfm_slim.c @@ -307,15 +307,29 @@ static int btfm_slim_get_logical_addr(struct slim_device *slim) static int btfm_slim_alloc_port(struct btfmslim *btfmslim) { int ret = -EINVAL, i; + int chipset_ver; struct btfmslim_ch *rx_chs; struct btfmslim_ch *tx_chs; if (!btfmslim) return ret; + chipset_ver = get_chipset_version(); + BTFMSLIM_INFO("chipset soc version:%x", chipset_ver); + rx_chs = btfmslim->rx_chs; tx_chs = btfmslim->tx_chs; - + if (chipset_ver == QCA_CHEROKEE_SOC_ID_0300) { + for (i = 0; (tx_chs->port != BTFM_SLIM_PGD_PORT_LAST) && + (i < BTFM_SLIM_NUM_CODEC_DAIS); i++, tx_chs++) { + if (tx_chs->port == CHRK_SB_PGD_PORT_TX1_FM) + tx_chs->port = CHRKVER3_SB_PGD_PORT_TX1_FM; + else if (tx_chs->port == CHRK_SB_PGD_PORT_TX2_FM) + tx_chs->port = CHRKVER3_SB_PGD_PORT_TX2_FM; + BTFMSLIM_INFO("Tx port:%d", tx_chs->port); + } + tx_chs = btfmslim->tx_chs; + } if (!rx_chs || !tx_chs) return ret; diff --git a/drivers/bluetooth/btfm_slim_wcn3990.c b/drivers/bluetooth/btfm_slim_wcn3990.c index 6615189350861d550fcbd45c039605ca33e5bf5f..8cebb5064ad7494ed884905f03d9e72b83992539 100644 --- a/drivers/bluetooth/btfm_slim_wcn3990.c +++ b/drivers/bluetooth/btfm_slim_wcn3990.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved. +/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -72,6 +72,8 @@ int btfm_slim_chrk_hw_init(struct btfmslim *btfmslim) static inline int is_fm_port(uint8_t port_num) { if (port_num == CHRK_SB_PGD_PORT_TX1_FM || + port_num == CHRKVER3_SB_PGD_PORT_TX1_FM || + port_num == CHRKVER3_SB_PGD_PORT_TX2_FM || port_num == CHRK_SB_PGD_PORT_TX2_FM) return 1; else @@ -138,9 +140,16 @@ int btfm_slim_chrk_enable_port(struct btfmslim *btfmslim, uint8_t port_num, /* txport */ /* Multiple Channel Setting */ if (is_fm_port(port_num)) { - reg_val = (0x1 << CHRK_SB_PGD_PORT_TX1_FM) | - (0x1 << CHRK_SB_PGD_PORT_TX2_FM); + if (port_num == CHRKVER3_SB_PGD_PORT_TX1_FM) + reg_val = (0x1 << CHRKVER3_SB_PGD_PORT_TX1_FM); + else if (port_num == CHRKVER3_SB_PGD_PORT_TX2_FM) + reg_val = (0x1 << CHRKVER3_SB_PGD_PORT_TX2_FM); + else + reg_val = (0x1 << CHRK_SB_PGD_PORT_TX1_FM) | + (0x1 << CHRK_SB_PGD_PORT_TX2_FM); + reg = CHRK_SB_PGD_TX_PORTn_MULTI_CHNL_0(port_num); + BTFMSLIM_INFO("writing reg_val (%d) to reg(%x)", reg_val, reg); ret = btfm_slim_write(btfmslim, reg, 1, ®_val, IFD); if (ret) { BTFMSLIM_ERR("failed to write (%d) reg 0x%x", ret, reg); diff --git a/drivers/bluetooth/btfm_slim_wcn3990.h b/drivers/bluetooth/btfm_slim_wcn3990.h index b2723ff961034e943ffe8054b50b9ed9d2188130..694fe75168f7ea1187bc5e470a16ec8bd9e07395 100644 --- a/drivers/bluetooth/btfm_slim_wcn3990.h +++ b/drivers/bluetooth/btfm_slim_wcn3990.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved. +/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -78,9 +78,36 @@ #define CHRK_SB_PGD_PORT_TX_SCO 0 #define CHRK_SB_PGD_PORT_TX1_FM 1 #define CHRK_SB_PGD_PORT_TX2_FM 2 +#define CHRKVER3_SB_PGD_PORT_TX1_FM 4 +#define CHRKVER3_SB_PGD_PORT_TX2_FM 5 #define CHRK_SB_PGD_PORT_RX_SCO 16 #define CHRK_SB_PGD_PORT_RX_A2P 17 +enum { + QCA_CHEROKEE_SOC_ID_0100 = 0x40010100, + QCA_CHEROKEE_SOC_ID_0200 = 0x40010200, + QCA_CHEROKEE_SOC_ID_0201 = 0x40010201, + QCA_CHEROKEE_SOC_ID_0210 = 0x40010214, + QCA_CHEROKEE_SOC_ID_0211 = 0x40010224, + QCA_CHEROKEE_SOC_ID_0300 = 0x40010300, +}; + +enum{ + QCA_APACHE_SOC_ID_0005 = 0x40020100, + QCA_APACHE_SOC_ID_0006 = 0x40020110, + QCA_APACHE_SOC_ID_0100 = 0x40020120, + QCA_APACHE_SOC_ID_0101 = 0x40020121, + QCA_APACHE_SOC_ID_0102 = 0x40020122, + QCA_APACHE_SOC_ID_0103 = 0x40020123, + QCA_APACHE_SOC_ID_0110 = 0x40020130, + QCA_APACHE_SOC_ID_0111 = 0x40020140, +}; + +enum { + QCA_COMANCHE_SOC_ID_0100 = 0x40070100, + QCA_COMANCHE_SOC_ID_0101 = 0x40070101, + QCA_COMANCHE_SOC_ID_0110 = 0x40070110, +}; /* Function Prototype */ diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c index 86d7975afaeb84c20f94ebab1c7ed6cac8cd044c..819521d5895e1dd3b78105d0b7f52612358ed982 100644 --- a/drivers/bluetooth/btusb.c +++ b/drivers/bluetooth/btusb.c @@ -279,6 +279,7 @@ static const struct usb_device_id blacklist_table[] = { { USB_DEVICE(0x04ca, 0x3011), .driver_info = BTUSB_QCA_ROME }, { USB_DEVICE(0x04ca, 0x3015), .driver_info = BTUSB_QCA_ROME }, { USB_DEVICE(0x04ca, 0x3016), .driver_info = BTUSB_QCA_ROME }, + { USB_DEVICE(0x04ca, 0x301a), .driver_info = BTUSB_QCA_ROME }, /* Broadcom BCM2035 */ { USB_DEVICE(0x0a5c, 0x2009), .driver_info = BTUSB_BCM92035 }, @@ -373,6 +374,9 @@ static const struct usb_device_id blacklist_table[] = { /* Additional Realtek 8723BU Bluetooth devices */ { USB_DEVICE(0x7392, 0xa611), .driver_info = BTUSB_REALTEK }, + /* Additional Realtek 8723DE Bluetooth devices */ + { USB_DEVICE(0x2ff8, 0xb011), .driver_info = BTUSB_REALTEK }, + /* Additional Realtek 8821AE Bluetooth devices */ { USB_DEVICE(0x0b05, 0x17dc), .driver_info = BTUSB_REALTEK }, { USB_DEVICE(0x13d3, 0x3414), .driver_info = BTUSB_REALTEK }, diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c index 6f4ebd5e54c8ef3577c960a91e86f5646dfd8779..a6173ddfb5a76afaef672894bd7f130cfe3e507d 100644 --- a/drivers/bluetooth/hci_qca.c +++ b/drivers/bluetooth/hci_qca.c @@ -881,7 +881,7 @@ static int qca_set_baudrate(struct hci_dev *hdev, uint8_t baudrate) */ set_current_state(TASK_UNINTERRUPTIBLE); schedule_timeout(msecs_to_jiffies(BAUDRATE_SETTLE_TIMEOUT_MS)); - set_current_state(TASK_INTERRUPTIBLE); + set_current_state(TASK_RUNNING); return 0; } diff --git a/drivers/bus/arm-ccn.c b/drivers/bus/arm-ccn.c index 72fd1750134d2d5107ffdfef9fe482977548d9e3..942d076cbb0af19c5ac8d73e8bf6dc2f6b4aab80 100644 --- a/drivers/bus/arm-ccn.c +++ b/drivers/bus/arm-ccn.c @@ -736,7 +736,7 @@ static int arm_ccn_pmu_event_init(struct perf_event *event) ccn = pmu_to_arm_ccn(event->pmu); if (hw->sample_period) { - dev_warn(ccn->dev, "Sampling not supported!\n"); + dev_dbg(ccn->dev, "Sampling not supported!\n"); return -EOPNOTSUPP; } @@ -744,12 +744,12 @@ static int arm_ccn_pmu_event_init(struct perf_event *event) event->attr.exclude_kernel || event->attr.exclude_hv || event->attr.exclude_idle || event->attr.exclude_host || event->attr.exclude_guest) { - dev_warn(ccn->dev, "Can't exclude execution levels!\n"); + dev_dbg(ccn->dev, "Can't exclude execution levels!\n"); return -EINVAL; } if (event->cpu < 0) { - dev_warn(ccn->dev, "Can't provide per-task data!\n"); + dev_dbg(ccn->dev, "Can't provide per-task data!\n"); return -EOPNOTSUPP; } /* @@ -771,13 +771,13 @@ static int arm_ccn_pmu_event_init(struct perf_event *event) switch (type) { case CCN_TYPE_MN: if (node_xp != ccn->mn_id) { - dev_warn(ccn->dev, "Invalid MN ID %d!\n", node_xp); + dev_dbg(ccn->dev, "Invalid MN ID %d!\n", node_xp); return -EINVAL; } break; case CCN_TYPE_XP: if (node_xp >= ccn->num_xps) { - dev_warn(ccn->dev, "Invalid XP ID %d!\n", node_xp); + dev_dbg(ccn->dev, "Invalid XP ID %d!\n", node_xp); return -EINVAL; } break; @@ -785,11 +785,11 @@ static int arm_ccn_pmu_event_init(struct perf_event *event) break; default: if (node_xp >= ccn->num_nodes) { - dev_warn(ccn->dev, "Invalid node ID %d!\n", node_xp); + dev_dbg(ccn->dev, "Invalid node ID %d!\n", node_xp); return -EINVAL; } if (!arm_ccn_pmu_type_eq(type, ccn->node[node_xp].type)) { - dev_warn(ccn->dev, "Invalid type 0x%x for node %d!\n", + dev_dbg(ccn->dev, "Invalid type 0x%x for node %d!\n", type, node_xp); return -EINVAL; } @@ -808,19 +808,19 @@ static int arm_ccn_pmu_event_init(struct perf_event *event) if (event_id != e->event) continue; if (e->num_ports && port >= e->num_ports) { - dev_warn(ccn->dev, "Invalid port %d for node/XP %d!\n", + dev_dbg(ccn->dev, "Invalid port %d for node/XP %d!\n", port, node_xp); return -EINVAL; } if (e->num_vcs && vc >= e->num_vcs) { - dev_warn(ccn->dev, "Invalid vc %d for node/XP %d!\n", + dev_dbg(ccn->dev, "Invalid vc %d for node/XP %d!\n", vc, node_xp); return -EINVAL; } valid = 1; } if (!valid) { - dev_warn(ccn->dev, "Invalid event 0x%x for node/XP %d!\n", + dev_dbg(ccn->dev, "Invalid event 0x%x for node/XP %d!\n", event_id, node_xp); return -EINVAL; } diff --git a/drivers/char/adsprpc.c b/drivers/char/adsprpc.c index fd86e1ba09f68be78c34238883fd7303bd5baa1b..b16ad9b705c45446a749e38e2cc7a3e3e73b5e7f 100644 --- a/drivers/char/adsprpc.c +++ b/drivers/char/adsprpc.c @@ -2035,6 +2035,7 @@ static int fastrpc_init_process(struct fastrpc_file *fl, DMA_ATTR_NO_KERNEL_MAPPING | DMA_ATTR_FORCE_NON_COHERENT; err = fastrpc_buf_alloc(fl, memlen, imem_dma_attr, 0, 0, &imem); + imem->virt = NULL; if (err) goto bail; fl->init_mem = imem; @@ -2576,6 +2577,7 @@ static int fastrpc_internal_mmap(struct fastrpc_file *fl, 1, &rbuf); if (err) goto bail; + rbuf->virt = NULL; err = fastrpc_mmap_on_dsp(fl, ud->flags, (uintptr_t)rbuf->virt, rbuf->phys, rbuf->size, &raddr); @@ -3092,7 +3094,7 @@ static int fastrpc_get_info(struct fastrpc_file *fl, uint32_t *info) * invoke any other methods without failure */ if (fl->apps->channel[cid].secure == SECURE_CHANNEL) { - err = -EPERM; + err = -EACCES; goto bail; } } diff --git a/drivers/char/diag/diag_masks.c b/drivers/char/diag/diag_masks.c index 39e4933a0fdc225d642714b1560d77692f997f6f..4fd21d8327cc8ad54d2d773119fb80829fe69dd0 100644 --- a/drivers/char/diag/diag_masks.c +++ b/drivers/char/diag/diag_masks.c @@ -336,12 +336,13 @@ static void diag_send_msg_mask_update(uint8_t peripheral, int first, int last) int header_len = sizeof(struct diag_ctrl_msg_mask); uint8_t *buf = NULL, *temp = NULL; uint8_t upd = 0; - uint8_t msg_mask_tbl_count_local; + uint8_t msg_mask_tbl_count_local = 0; uint32_t mask_size = 0, pd_mask = 0; struct diag_mask_info *mask_info = NULL; struct diag_msg_mask_t *mask = NULL; struct diag_ctrl_msg_mask header; struct diagfwd_info *fwd_info = NULL; + struct diag_md_session_t *md_session_info = NULL; if (peripheral >= NUM_PERIPHERALS) return; @@ -357,14 +358,19 @@ static void diag_send_msg_mask_update(uint8_t peripheral, int first, int last) if (driver->md_session_mask != 0) { if (driver->md_session_mask & MD_PERIPHERAL_MASK(peripheral)) { - if (driver->md_session_map[peripheral]) + if (driver->md_session_map[peripheral]) { mask_info = driver->md_session_map[peripheral]->msg_mask; + md_session_info = + driver->md_session_map[peripheral]; + } } else if (driver->md_session_mask & pd_mask) { upd = diag_mask_to_pd_value(driver->md_session_mask); - if (upd && driver->md_session_map[upd]) + if (upd && driver->md_session_map[upd]) { mask_info = driver->md_session_map[upd]->msg_mask; + md_session_info = driver->md_session_map[upd]; + } } else { DIAG_LOG(DIAG_DEBUG_MASKS, "asking for mask update with unknown session mask\n"); @@ -383,7 +389,10 @@ static void diag_send_msg_mask_update(uint8_t peripheral, int first, int last) return; } buf = mask_info->update_buf; - msg_mask_tbl_count_local = driver->msg_mask_tbl_count; + if (md_session_info) + msg_mask_tbl_count_local = md_session_info->msg_mask_tbl_count; + else + msg_mask_tbl_count_local = driver->msg_mask_tbl_count; mutex_unlock(&driver->msg_mask_lock); mutex_lock(&mask_info->lock); switch (mask_info->status) { @@ -565,6 +574,7 @@ static int diag_cmd_get_ssid_range(unsigned char *src_buf, int src_len, { int i; int write_len = 0; + uint8_t msg_mask_tbl_count = 0; struct diag_msg_mask_t *mask_ptr = NULL; struct diag_msg_ssid_query_t rsp; struct diag_ssid_range_t ssid_range; @@ -594,15 +604,17 @@ static int diag_cmd_get_ssid_range(unsigned char *src_buf, int src_len, return 0; } mutex_lock(&driver->msg_mask_lock); + msg_mask_tbl_count = (info) ? info->msg_mask_tbl_count : + driver->msg_mask_tbl_count; rsp.cmd_code = DIAG_CMD_MSG_CONFIG; rsp.sub_cmd = DIAG_CMD_OP_GET_SSID_RANGE; rsp.status = MSG_STATUS_SUCCESS; rsp.padding = 0; - rsp.count = driver->msg_mask_tbl_count; + rsp.count = msg_mask_tbl_count; memcpy(dest_buf, &rsp, sizeof(rsp)); write_len += sizeof(rsp); mask_ptr = (struct diag_msg_mask_t *)mask_info->ptr; - for (i = 0; i < driver->msg_mask_tbl_count; i++, mask_ptr++) { + for (i = 0; i < msg_mask_tbl_count; i++, mask_ptr++) { if (write_len + sizeof(ssid_range) > dest_len) { pr_err("diag: In %s, Truncating response due to size limitations of rsp buffer\n", __func__); @@ -679,6 +691,7 @@ static int diag_cmd_get_msg_mask(unsigned char *src_buf, int src_len, int i; int write_len = 0; uint32_t mask_size = 0; + uint8_t msg_mask_tbl_count = 0; struct diag_msg_mask_t *mask = NULL; struct diag_build_mask_req_t *req = NULL; struct diag_msg_build_mask_t rsp; @@ -709,6 +722,8 @@ static int diag_cmd_get_msg_mask(unsigned char *src_buf, int src_len, } mutex_lock(&driver->msg_mask_lock); + msg_mask_tbl_count = (info) ? info->msg_mask_tbl_count : + driver->msg_mask_tbl_count; req = (struct diag_build_mask_req_t *)src_buf; rsp.cmd_code = DIAG_CMD_MSG_CONFIG; rsp.sub_cmd = DIAG_CMD_OP_GET_MSG_MASK; @@ -724,7 +739,7 @@ static int diag_cmd_get_msg_mask(unsigned char *src_buf, int src_len, mutex_unlock(&driver->md_session_lock); return -EINVAL; } - for (i = 0; i < driver->msg_mask_tbl_count; i++, mask++) { + for (i = 0; i < msg_mask_tbl_count; i++, mask++) { if (!mask->ptr) continue; if ((req->ssid_first < mask->ssid_first) || @@ -760,6 +775,7 @@ static int diag_cmd_set_msg_mask(unsigned char *src_buf, int src_len, struct diag_mask_info *mask_info = NULL; struct diag_msg_mask_t *mask_next = NULL; struct diag_md_session_t *info = NULL; + uint8_t msg_mask_tbl_count = 0; mutex_lock(&driver->md_session_lock); info = diag_md_session_get_pid(pid); @@ -792,10 +808,12 @@ static int diag_cmd_set_msg_mask(unsigned char *src_buf, int src_len, mutex_unlock(&driver->md_session_lock); return -EINVAL; } - for (i = 0; i < driver->msg_mask_tbl_count; i++, mask++) { + msg_mask_tbl_count = (info) ? info->msg_mask_tbl_count : + driver->msg_mask_tbl_count; + for (i = 0; i < msg_mask_tbl_count; i++, mask++) { if (!mask->ptr) continue; - if (i < (driver->msg_mask_tbl_count - 1)) { + if (i < (msg_mask_tbl_count - 1)) { mask_next = mask; mask_next++; } else @@ -905,6 +923,7 @@ static int diag_cmd_set_all_msg_mask(unsigned char *src_buf, int src_len, struct diag_msg_mask_t *mask = NULL; struct diag_mask_info *mask_info = NULL; struct diag_md_session_t *info = NULL; + uint8_t msg_mask_tbl_count = 0; mutex_lock(&driver->md_session_lock); info = diag_md_session_get_pid(pid); @@ -939,9 +958,11 @@ static int diag_cmd_set_all_msg_mask(unsigned char *src_buf, int src_len, mutex_unlock(&driver->md_session_lock); return -EINVAL; } + msg_mask_tbl_count = (info) ? info->msg_mask_tbl_count : + driver->msg_mask_tbl_count; mask_info->status = (req->rt_mask) ? DIAG_CTRL_MASK_ALL_ENABLED : DIAG_CTRL_MASK_ALL_DISABLED; - for (i = 0; i < driver->msg_mask_tbl_count; i++, mask++) { + for (i = 0; i < msg_mask_tbl_count; i++, mask++) { if (mask && mask->ptr) { mutex_lock(&mask->lock); memset(mask->ptr, req->rt_mask, @@ -1755,7 +1776,6 @@ static int __diag_mask_init(struct diag_mask_info *mask_info, int mask_len, } kmemleak_not_leak(mask_info->update_buf); } - mutex_init(&mask_info->lock); return 0; } @@ -1778,9 +1798,10 @@ int diag_log_mask_copy(struct diag_mask_info *dest, struct diag_mask_info *src) struct diag_log_mask_t *src_mask = NULL; struct diag_log_mask_t *dest_mask = NULL; - if (!src) + if (!src || !dest) return -EINVAL; + mutex_init(&dest->lock); err = __diag_mask_init(dest, LOG_MASK_SIZE, APPS_BUF_SIZE); if (err) return err; @@ -1842,9 +1863,11 @@ static int diag_msg_mask_init(void) { int err = 0, i; + mutex_init(&msg_mask.lock); err = __diag_mask_init(&msg_mask, MSG_MASK_SIZE, APPS_BUF_SIZE); if (err) return err; + err = diag_create_msg_mask_table(); if (err) { pr_err("diag: Unable to create msg masks, err: %d\n", err); @@ -1859,7 +1882,8 @@ static int diag_msg_mask_init(void) return 0; } -int diag_msg_mask_copy(struct diag_mask_info *dest, struct diag_mask_info *src) +int diag_msg_mask_copy(struct diag_md_session_t *new_session, + struct diag_mask_info *dest, struct diag_mask_info *src) { int i, err = 0, mask_size = 0; struct diag_msg_mask_t *src_mask = NULL; @@ -1869,17 +1893,25 @@ int diag_msg_mask_copy(struct diag_mask_info *dest, struct diag_mask_info *src) if (!src || !dest) return -EINVAL; - err = __diag_mask_init(dest, MSG_MASK_SIZE, APPS_BUF_SIZE); - if (err) - return err; + mutex_init(&dest->lock); mutex_lock(&dest->lock); mutex_lock(&driver->msg_mask_lock); + new_session->msg_mask_tbl_count = + driver->msg_mask_tbl_count; + err = __diag_mask_init(dest, + (new_session->msg_mask_tbl_count * + sizeof(struct diag_msg_mask_t)), APPS_BUF_SIZE); + if (err) { + mutex_unlock(&driver->msg_mask_lock); + mutex_unlock(&dest->lock); + return err; + } src_mask = (struct diag_msg_mask_t *)src->ptr; dest_mask = (struct diag_msg_mask_t *)dest->ptr; dest->mask_len = src->mask_len; dest->status = src->status; - for (i = 0; i < driver->msg_mask_tbl_count; i++) { + for (i = 0; i < new_session->msg_mask_tbl_count; i++) { range.ssid_first = src_mask->ssid_first; range.ssid_last = src_mask->ssid_last; err = diag_create_msg_mask_table_entry(dest_mask, &range); @@ -1898,10 +1930,12 @@ int diag_msg_mask_copy(struct diag_mask_info *dest, struct diag_mask_info *src) return err; } -void diag_msg_mask_free(struct diag_mask_info *mask_info) +void diag_msg_mask_free(struct diag_mask_info *mask_info, + struct diag_md_session_t *session_info) { int i; struct diag_msg_mask_t *mask = NULL; + uint8_t msg_mask_tbl_count = 0; if (!mask_info || !mask_info->ptr) return; @@ -1915,7 +1949,10 @@ void diag_msg_mask_free(struct diag_mask_info *mask_info) mutex_unlock(&mask_info->lock); return; } - for (i = 0; i < driver->msg_mask_tbl_count; i++, mask++) { + msg_mask_tbl_count = (session_info) ? + session_info->msg_mask_tbl_count : + driver->msg_mask_tbl_count; + for (i = 0; i < msg_mask_tbl_count; i++, mask++) { kfree(mask->ptr); mask->ptr = NULL; } @@ -1947,6 +1984,7 @@ static int diag_build_time_mask_init(void) int err = 0; /* There is no need for update buffer for Build Time masks */ + mutex_init(&msg_bt_mask.lock); err = __diag_mask_init(&msg_bt_mask, MSG_MASK_SIZE, 0); if (err) return err; @@ -1980,6 +2018,7 @@ static int diag_log_mask_init(void) { int err = 0, i; + mutex_init(&log_mask.lock); err = __diag_mask_init(&log_mask, LOG_MASK_SIZE, APPS_BUF_SIZE); if (err) return err; @@ -2013,6 +2052,7 @@ static int diag_event_mask_init(void) { int err = 0, i; + mutex_init(&event_mask.lock); err = __diag_mask_init(&event_mask, EVENT_MASK_SIZE, APPS_BUF_SIZE); if (err) return err; @@ -2034,6 +2074,7 @@ int diag_event_mask_copy(struct diag_mask_info *dest, if (!src || !dest) return -EINVAL; + mutex_init(&dest->lock); err = __diag_mask_init(dest, EVENT_MASK_SIZE, APPS_BUF_SIZE); if (err) return err; @@ -2070,6 +2111,7 @@ int diag_copy_to_user_msg_mask(char __user *buf, size_t count, struct diag_mask_info *mask_info = NULL; struct diag_msg_mask_t *mask = NULL; unsigned char *ptr = NULL; + uint8_t msg_mask_tbl_count = 0; if (!buf || count == 0) return -EINVAL; @@ -2094,7 +2136,9 @@ int diag_copy_to_user_msg_mask(char __user *buf, size_t count, mutex_unlock(&mask_info->lock); return -EINVAL; } - for (i = 0; i < driver->msg_mask_tbl_count; i++, mask++) { + msg_mask_tbl_count = (info) ? info->msg_mask_tbl_count : + driver->msg_mask_tbl_count; + for (i = 0; i < msg_mask_tbl_count; i++, mask++) { if (!mask->ptr) continue; ptr = mask_info->update_buf; diff --git a/drivers/char/diag/diag_masks.h b/drivers/char/diag/diag_masks.h index 0e87f3835113d4496bc929b5f910d85b6b204970..0ccadf30f092b4a59833e97ad88c22f7d9c02c0b 100644 --- a/drivers/char/diag/diag_masks.h +++ b/drivers/char/diag/diag_masks.h @@ -160,12 +160,13 @@ int diag_masks_init(void); void diag_masks_exit(void); int diag_log_mask_copy(struct diag_mask_info *dest, struct diag_mask_info *src); -int diag_msg_mask_copy(struct diag_mask_info *dest, - struct diag_mask_info *src); +int diag_msg_mask_copy(struct diag_md_session_t *new_session, + struct diag_mask_info *dest, struct diag_mask_info *src); int diag_event_mask_copy(struct diag_mask_info *dest, struct diag_mask_info *src); void diag_log_mask_free(struct diag_mask_info *mask_info); -void diag_msg_mask_free(struct diag_mask_info *mask_info); +void diag_msg_mask_free(struct diag_mask_info *mask_info, + struct diag_md_session_t *session_info); void diag_event_mask_free(struct diag_mask_info *mask_info); int diag_process_apps_masks(unsigned char *buf, int len, int pid); void diag_send_updates_peripheral(uint8_t peripheral); diff --git a/drivers/char/diag/diag_usb.c b/drivers/char/diag/diag_usb.c index 06169336dad88d106e07e5bd2d39f27262f6ca8e..e5c7aa8b9c0ef84ea26aecd78c7e152fee4a6369 100644 --- a/drivers/char/diag/diag_usb.c +++ b/drivers/char/diag/diag_usb.c @@ -286,11 +286,12 @@ static void usb_read_work_fn(struct work_struct *work) req->buf = ch->read_buf; req->length = USB_MAX_OUT_BUF; err = usb_diag_read(ch->hdl, req); - if (err && err != -EIO) { + if (err) { pr_debug("diag: In %s, error in reading from USB %s, err: %d\n", __func__, ch->name, err); atomic_set(&ch->read_pending, 0); - queue_work(ch->usb_wq, &(ch->read_work)); + if (err != -EIO) + queue_work(ch->usb_wq, &(ch->read_work)); } } else { pr_err_ratelimited("diag: In %s invalid read req\n", __func__); @@ -658,8 +659,8 @@ int diag_usb_register(int id, int ctxt, struct diag_mux_ops *ops) INIT_WORK(&(ch->connect_work), usb_connect_work_fn); INIT_WORK(&(ch->disconnect_work), usb_disconnect_work_fn); init_waitqueue_head(&ch->wait_q); - strlcpy(wq_name, "DIAG_USB_", DIAG_USB_STRING_SZ); - strlcat(wq_name, ch->name, sizeof(ch->name)); + strlcpy(wq_name, "DIAG_USB_", sizeof(wq_name)); + strlcat(wq_name, ch->name, sizeof(wq_name)); ch->usb_wq = create_singlethread_workqueue(wq_name); if (!ch->usb_wq) goto err; diff --git a/drivers/char/diag/diagchar.h b/drivers/char/diag/diagchar.h index 083622f84eed8bd22a5afdcfd68f35226a75a79e..cb33b3619e02134862f041831045cfd3d7ef1c34 100644 --- a/drivers/char/diag/diagchar.h +++ b/drivers/char/diag/diagchar.h @@ -482,6 +482,7 @@ struct diag_md_session_t { int pid; int peripheral_mask; uint8_t hdlc_disabled; + uint8_t msg_mask_tbl_count; struct timer_list hdlc_reset_timer; struct diag_mask_info *msg_mask; struct diag_mask_info *log_mask; diff --git a/drivers/char/diag/diagchar_core.c b/drivers/char/diag/diagchar_core.c index 4678e48a266a5ad4fcae3b885ddf5139ad2ec1c2..fb5bee20ef8c7347f799823bdd317b3db53f77cb 100644 --- a/drivers/char/diag/diagchar_core.c +++ b/drivers/char/diag/diagchar_core.c @@ -1265,7 +1265,8 @@ static void diag_md_session_exit(void) diag_log_mask_free(session_info->log_mask); kfree(session_info->log_mask); session_info->log_mask = NULL; - diag_msg_mask_free(session_info->msg_mask); + diag_msg_mask_free(session_info->msg_mask, + session_info); kfree(session_info->msg_mask); session_info->msg_mask = NULL; diag_event_mask_free(session_info->event_mask); @@ -1336,7 +1337,9 @@ int diag_md_session_create(int mode, int peripheral_mask, int proc) "return value of event copy. err %d\n", err); goto fail_peripheral; } - err = diag_msg_mask_copy(new_session->msg_mask, &msg_mask); + new_session->msg_mask_tbl_count = 0; + err = diag_msg_mask_copy(new_session, new_session->msg_mask, + &msg_mask); if (err) { DIAG_LOG(DIAG_DEBUG_USERSPACE, "return value of msg copy. err %d\n", err); @@ -1372,7 +1375,8 @@ int diag_md_session_create(int mode, int peripheral_mask, int proc) diag_event_mask_free(new_session->event_mask); kfree(new_session->event_mask); new_session->event_mask = NULL; - diag_msg_mask_free(new_session->msg_mask); + diag_msg_mask_free(new_session->msg_mask, + new_session); kfree(new_session->msg_mask); new_session->msg_mask = NULL; kfree(new_session); @@ -1400,7 +1404,8 @@ static void diag_md_session_close(int pid) diag_log_mask_free(session_info->log_mask); kfree(session_info->log_mask); session_info->log_mask = NULL; - diag_msg_mask_free(session_info->msg_mask); + diag_msg_mask_free(session_info->msg_mask, + session_info); kfree(session_info->msg_mask); session_info->msg_mask = NULL; diag_event_mask_free(session_info->event_mask); @@ -4046,7 +4051,7 @@ static int __init diagchar_init(void) pr_debug("diagchar initializing ..\n"); driver->num = 1; driver->name = ((void *)driver) + sizeof(struct diagchar_dev); - strlcpy(driver->name, "diag", 4); + strlcpy(driver->name, "diag", 5); /* Get major number from kernel and initialize */ ret = alloc_chrdev_region(&dev, driver->minor_start, driver->num, driver->name); diff --git a/drivers/char/diag/diagfwd_bridge.c b/drivers/char/diag/diagfwd_bridge.c index 17d26867b93e2588330ea23394103fbe73e1aba2..55a16b2a26e1a6d59af95270f01666012065244f 100644 --- a/drivers/char/diag/diagfwd_bridge.c +++ b/drivers/char/diag/diagfwd_bridge.c @@ -176,8 +176,8 @@ int diagfwd_bridge_register(int id, int ctxt, struct diag_remote_dev_ops *ops) if (!ch->dci_read_buf) return -ENOMEM; ch->dci_read_len = 0; - strlcpy(wq_name, "diag_dci_", 10); - strlcat(wq_name, ch->name, sizeof(ch->name)); + strlcpy(wq_name, "diag_dci_", sizeof(wq_name)); + strlcat(wq_name, ch->name, sizeof(wq_name)); INIT_WORK(&(ch->dci_read_work), bridge_dci_read_work_fn); ch->dci_wq = create_singlethread_workqueue(wq_name); if (!ch->dci_wq) { @@ -319,7 +319,9 @@ uint16_t diag_get_remote_device_mask(void) for (i = 0; i < NUM_REMOTE_DEV; i++) { if (bridge_info[i].inited && - bridge_info[i].type == DIAG_DATA_TYPE) { + bridge_info[i].type == DIAG_DATA_TYPE && + (bridge_info[i].dev_ops->remote_proc_check && + bridge_info[i].dev_ops->remote_proc_check())) { remote_dev |= 1 << i; } } diff --git a/drivers/char/diag/diagfwd_bridge.h b/drivers/char/diag/diagfwd_bridge.h index ec3eb073b7126433c60e25f620e01ef52969faf9..c8043c46d5eab53bf11dcb839d69319a29ff542b 100644 --- a/drivers/char/diag/diagfwd_bridge.h +++ b/drivers/char/diag/diagfwd_bridge.h @@ -33,6 +33,7 @@ struct diag_remote_dev_ops { int (*queue_read)(int id); int (*write)(int id, unsigned char *buf, int len, int ctxt); int (*fwd_complete)(int id, unsigned char *buf, int len, int ctxt); + int (*remote_proc_check)(void); }; struct diagfwd_bridge_info { diff --git a/drivers/char/diag/diagfwd_cntl.c b/drivers/char/diag/diagfwd_cntl.c index 1d3de3dd1644d438c015e4e8a40a851b97a2324b..7f98b2fc0e2301541569a794322a91081124d84a 100644 --- a/drivers/char/diag/diagfwd_cntl.c +++ b/drivers/char/diag/diagfwd_cntl.c @@ -719,6 +719,7 @@ int diag_add_diag_id_to_list(uint8_t diag_id, char *process_name, uint8_t pd_val, uint8_t peripheral) { struct diag_id_tbl_t *new_item = NULL; + int process_len = 0; if (!process_name || diag_id == 0) return -EINVAL; @@ -727,7 +728,8 @@ int diag_add_diag_id_to_list(uint8_t diag_id, char *process_name, if (!new_item) return -ENOMEM; kmemleak_not_leak(new_item); - new_item->process_name = kzalloc(strlen(process_name) + 1, GFP_KERNEL); + process_len = strlen(process_name); + new_item->process_name = kzalloc(process_len + 1, GFP_KERNEL); if (!new_item->process_name) { kfree(new_item); new_item = NULL; @@ -737,7 +739,7 @@ int diag_add_diag_id_to_list(uint8_t diag_id, char *process_name, new_item->diag_id = diag_id; new_item->pd_val = pd_val; new_item->peripheral = peripheral; - strlcpy(new_item->process_name, process_name, strlen(process_name) + 1); + strlcpy(new_item->process_name, process_name, process_len + 1); INIT_LIST_HEAD(&new_item->link); mutex_lock(&driver->diag_id_mutex); list_add_tail(&new_item->link, &driver->diag_id_list); @@ -833,7 +835,7 @@ static void process_diagid(uint8_t *buf, uint32_t len, ctrl_pkt.pkt_id = DIAG_CTRL_MSG_DIAGID; ctrl_pkt.version = 1; strlcpy((char *)&ctrl_pkt.process_name, process_name, - strlen(process_name) + 1); + sizeof(ctrl_pkt.process_name)); ctrl_pkt.len = sizeof(ctrl_pkt.diag_id) + sizeof(ctrl_pkt.version) + strlen(process_name) + 1; err = diagfwd_write(peripheral, TYPE_CNTL, &ctrl_pkt, ctrl_pkt.len + diff --git a/drivers/char/diag/diagfwd_hsic.c b/drivers/char/diag/diagfwd_hsic.c index 7be446bfdf858d7001ec1f95134914fc37d248e0..0a291bb3cb6a8bbedfcc1d78feb69e9a69addf0a 100644 --- a/drivers/char/diag/diagfwd_hsic.c +++ b/drivers/char/diag/diagfwd_hsic.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2012-2014, 2016-2017 The Linux Foundation. All rights reserved. +/* Copyright (c) 2012-2014, 2016-2018 The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -392,6 +392,7 @@ static struct diag_remote_dev_ops diag_hsic_fwd_ops = { .queue_read = hsic_queue_read, .write = hsic_write, .fwd_complete = hsic_fwd_complete, + .remote_proc_check = NULL, }; int diag_hsic_init(void) @@ -407,8 +408,8 @@ int diag_hsic_init(void) INIT_WORK(&(ch->read_work), hsic_read_work_fn); INIT_WORK(&(ch->open_work), hsic_open_work_fn); INIT_WORK(&(ch->close_work), hsic_close_work_fn); - strlcpy(wq_name, "DIAG_HSIC_", DIAG_HSIC_STRING_SZ); - strlcat(wq_name, ch->name, sizeof(ch->name)); + strlcpy(wq_name, "DIAG_HSIC_", sizeof(wq_name)); + strlcat(wq_name, ch->name, sizeof(wq_name)); ch->hsic_wq = create_singlethread_workqueue(wq_name); if (!ch->hsic_wq) goto fail; diff --git a/drivers/char/diag/diagfwd_mhi.c b/drivers/char/diag/diagfwd_mhi.c index cd2af65ffa088b22233b5bc357df9ffe0cdde474..032d3035b07be991bdffd2384f4a9be0d37635bd 100644 --- a/drivers/char/diag/diagfwd_mhi.c +++ b/drivers/char/diag/diagfwd_mhi.c @@ -515,6 +515,11 @@ static int mhi_fwd_complete(int id, unsigned char *buf, int len, int ctxt) return 0; } +static int mhi_remote_proc_check(void) +{ + return diag_mhi[MHI_1].enabled; +} + static struct diag_mhi_info *diag_get_mhi_info(struct mhi_device *mhi_dev) { struct diag_mhi_info *mhi_info = NULL; @@ -635,6 +640,7 @@ static struct diag_remote_dev_ops diag_mhi_fwd_ops = { .queue_read = mhi_queue_read, .write = mhi_write, .fwd_complete = mhi_fwd_complete, + .remote_proc_check = mhi_remote_proc_check, }; static void diag_mhi_dev_exit(int dev) @@ -672,8 +678,8 @@ int diag_mhi_init(void) INIT_WORK(&(mhi_info->read_done_work), mhi_read_done_work_fn); INIT_WORK(&(mhi_info->open_work), mhi_open_work_fn); INIT_WORK(&(mhi_info->close_work), mhi_close_work_fn); - strlcpy(wq_name, "diag_mhi_", DIAG_MHI_STRING_SZ); - strlcat(wq_name, mhi_info->name, sizeof(mhi_info->name)); + strlcpy(wq_name, "diag_mhi_", sizeof(wq_name)); + strlcat(wq_name, mhi_info->name, sizeof(wq_name)); diagmem_init(driver, mhi_info->mempool); mhi_info->mempool_init = 1; mhi_info->mhi_wq = create_singlethread_workqueue(wq_name); diff --git a/drivers/char/diag/diagfwd_rpmsg.c b/drivers/char/diag/diagfwd_rpmsg.c index 3647c18ed5bd532f7abd9d7614d9c88e22fd6011..085b6062d4ce271f1922a17a93cc639fc7b23d98 100644 --- a/drivers/char/diag/diagfwd_rpmsg.c +++ b/drivers/char/diag/diagfwd_rpmsg.c @@ -500,7 +500,6 @@ static void diag_rpmsg_close_work_fn(struct work_struct *work) if (!rpmsg_info || !rpmsg_info->inited || !rpmsg_info->hdl) return; atomic_set(&rpmsg_info->opened, 0); - dev_set_drvdata(&rpmsg_info->hdl->dev, NULL); rpmsg_info->hdl = NULL; diagfwd_channel_close(rpmsg_info->fwd_ctxt); } @@ -622,8 +621,8 @@ static void __diag_rpmsg_init(struct diag_rpmsg_info *rpmsg_info) init_waitqueue_head(&rpmsg_info->wait_q); init_waitqueue_head(&rpmsg_info->read_wait_q); mutex_init(&rpmsg_info->lock); - strlcpy(wq_name, "DIAG_RPMSG_", 12); - strlcat(wq_name, rpmsg_info->name, sizeof(rpmsg_info->name)); + strlcpy(wq_name, "DIAG_RPMSG_", sizeof(wq_name)); + strlcat(wq_name, rpmsg_info->name, sizeof(wq_name)); rpmsg_info->wq = create_singlethread_workqueue(wq_name); if (!rpmsg_info->wq) { pr_err("diag: In %s, unable to create workqueue for rpmsg ch:%s\n", diff --git a/drivers/char/diag/diagfwd_smux.c b/drivers/char/diag/diagfwd_smux.c index 33f91d15c0b3e0031aab44f7131fa721a432af2a..4465cc470a935aa84cf6bc22db2662c974ef9a36 100644 --- a/drivers/char/diag/diagfwd_smux.c +++ b/drivers/char/diag/diagfwd_smux.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2012, 2014, 2016 The Linux Foundation. All rights reserved. +/* Copyright (c) 2012, 2014, 2016, 2018 The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -275,6 +275,7 @@ static struct diag_remote_dev_ops diag_smux_fwd_ops = { .queue_read = smux_queue_read, .write = smux_write, .fwd_complete = smux_fwd_complete, + .remote_proc_check = NULL, }; int diag_smux_init(void) @@ -286,8 +287,8 @@ int diag_smux_init(void) for (i = 0; i < NUM_SMUX_DEV; i++) { ch = &diag_smux[i]; - strlcpy(wq_name, "DIAG_SMUX_", 11); - strlcat(wq_name, ch->name, sizeof(ch->name)); + strlcpy(wq_name, "DIAG_SMUX_", sizeof(wq_name)); + strlcat(wq_name, ch->name, sizeof(wq_name)); ch->smux_wq = create_singlethread_workqueue(wq_name); if (!ch->smux_wq) { err = -ENOMEM; diff --git a/drivers/char/diag/diagfwd_socket.c b/drivers/char/diag/diagfwd_socket.c index d159db1f48b1cbdea9a477a4c6eac9206e904168..f959a332ccd3ef6ddd045e12141551c50d2f3d06 100644 --- a/drivers/char/diag/diagfwd_socket.c +++ b/drivers/char/diag/diagfwd_socket.c @@ -826,7 +826,7 @@ static int diag_socket_write(void *ctxt, unsigned char *buf, int len) * -EAGAIN means that the number of packets in flight is at * max capactity and the peripheral hasn't read the data. */ - if (err != -EAGAIN) { + if (err != -EAGAIN && err != -ECONNRESET) { pr_err_ratelimited("diag: In %s, error sending data, err: %d, ch: %s\n", __func__, err, info->name); } @@ -861,7 +861,7 @@ static void __diag_socket_init(struct diag_socket_info *info) info->data_ready = 0; atomic_set(&info->flow_cnt, 0); spin_lock_init(&info->lock); - strlcpy(wq_name, info->name, sizeof(info->name)); + strlcpy(wq_name, info->name, sizeof(wq_name)); init_waitqueue_head(&info->read_wait_q); info->wq = create_singlethread_workqueue(wq_name); if (!info->wq) { diff --git a/drivers/char/random.c b/drivers/char/random.c index ddc493d976fdc5e559cac9999156649445d741f5..ea4dbfa3065747a0cd24754abc8e51e08403cbc1 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c @@ -1897,14 +1897,22 @@ static int write_pool(struct entropy_store *r, const char __user *buffer, size_t count) { size_t bytes; - __u32 buf[16]; + __u32 t, buf[16]; const char __user *p = buffer; while (count > 0) { + int b, i = 0; + bytes = min(count, sizeof(buf)); if (copy_from_user(&buf, p, bytes)) return -EFAULT; + for (b = bytes ; b > 0 ; b -= sizeof(__u32), i++) { + if (!arch_get_random_int(&t)) + break; + buf[i] ^= t; + } + count -= bytes; p += bytes; diff --git a/drivers/clk/qcom/Kconfig b/drivers/clk/qcom/Kconfig index ce3df73c66680aca18ca2bc95d1c871dcd1d44ce..74c68391051f3d63fe930d4dfc7a4a044e5ee097 100644 --- a/drivers/clk/qcom/Kconfig +++ b/drivers/clk/qcom/Kconfig @@ -234,6 +234,16 @@ config MSM_NPUCC_SM8150 SM8150 devices. Say Y if you want to enable use of the Network Processing Unit. +config MSM_SCC_SM8150 + tristate "SM8150 Sensor Clock Controller" + depends on COMMON_CLK_QCOM + help + Support for the sensor clock controller on Qualcomm Technologies, Inc. + SM8150 devices. The sensor controller contains several QUP clocks + which can be managed using DFS. This controller is typically owned by + the sensor processor. However, on some systems it is instead owned + by the application processor. + config MSM_VIDEOCC_SM8150 tristate "SM8150 Video Clock Controller" depends on COMMON_CLK_QCOM @@ -396,6 +406,15 @@ config MSM_DISPCC_SM6150 Say Y if you want to support display devices and functionality such as splash screen. +config MSM_CAMCC_SDMMAGPIE + tristate "SDMMAGPIE Camera Clock Controller" + depends on COMMON_CLK_QCOM + help + Support for the camera clock controller on Qualcomm Technologies, Inc + SDMMAGPIE devices. + Say Y if you want to support camera devices and functionality such as + capturing pictures. + config MSM_DISPCC_SDMMAGPIE tristate "SDMMAGPIE Display Clock Controller" depends on COMMON_CLK_QCOM @@ -439,3 +458,11 @@ config MSM_GPUCC_SDMMAGPIE Support for the graphics clock controller on Qualcomm Technologies, Inc. SDMMAGPIE devices. Say Y if you want to support graphics clocks. + +config MSM_DEBUGCC_SDMMAGPIE + tristate "SDMMAGPIE Debug Clock Controller" + depends on COMMON_CLK_QCOM + help + Support for the debug clock controller on Qualcomm Technologies, Inc + SDMMAGPIE devices. + Say Y if you want to support the clock measurement functionality. diff --git a/drivers/clk/qcom/Makefile b/drivers/clk/qcom/Makefile index a454f82e8d345335c1d77a284da34555c4929a23..fba12da98c02d071ddde65ac58f89db3cf578f4a 100644 --- a/drivers/clk/qcom/Makefile +++ b/drivers/clk/qcom/Makefile @@ -30,6 +30,7 @@ obj-$(CONFIG_MDM_GCC_QCS405) += gcc-qcs405.o obj-$(CONFIG_MDM_LCC_9615) += lcc-mdm9615.o obj-$(CONFIG_MSM_CAMCC_SM6150) += camcc-sm6150.o obj-$(CONFIG_MSM_CAMCC_SM8150) += camcc-sm8150.o +obj-$(CONFIG_MSM_CAMCC_SDMMAGPIE) += camcc-sdmmagpie.o obj-$(CONFIG_MSM_CAMCC_SDMSHRIKE) += camcc-sdmshrike.o obj-$(CONFIG_MSM_CLK_AOP_QMP) += clk-aop-qmp.o obj-$(CONFIG_MSM_CLK_RPMH) += clk-rpmh.o @@ -39,6 +40,7 @@ obj-$(CONFIG_MSM_DISPCC_SM6150) += dispcc-sm6150.o obj-$(CONFIG_MSM_DISPCC_SM8150) += dispcc-sm8150.o obj-$(CONFIG_MSM_DISPCC_SDMMAGPIE) += dispcc-sdmmagpie.o obj-$(CONFIG_MDM_DEBUGCC_QCS405) += debugcc-qcs405.o +obj-$(CONFIG_MSM_DEBUGCC_SDMMAGPIE) += debugcc-sdmmagpie.o obj-$(CONFIG_MSM_GCC_8660) += gcc-msm8660.o obj-$(CONFIG_MSM_GCC_8916) += gcc-msm8916.o obj-$(CONFIG_MSM_GCC_8960) += gcc-msm8960.o @@ -58,6 +60,7 @@ obj-$(CONFIG_MSM_MMCC_8974) += mmcc-msm8974.o obj-$(CONFIG_MSM_MMCC_8996) += mmcc-msm8996.o obj-$(CONFIG_MSM_NPUCC_SDMMAGPIE) += npucc-sdmmagpie.o obj-$(CONFIG_MSM_NPUCC_SM8150) += npucc-sm8150.o +obj-$(CONFIG_MSM_SCC_SM8150) += scc-sm8150.o obj-$(CONFIG_MSM_VIDEOCC_SDMMAGPIE) += videocc-sdmmagpie.o obj-$(CONFIG_MSM_VIDEOCC_SM6150) += videocc-sm6150.o obj-$(CONFIG_MSM_VIDEOCC_SM8150) += videocc-sm8150.o diff --git a/drivers/clk/qcom/camcc-sdmmagpie.c b/drivers/clk/qcom/camcc-sdmmagpie.c new file mode 100644 index 0000000000000000000000000000000000000000..50593202ec8b1980d354375f3a347032f84d9b17 --- /dev/null +++ b/drivers/clk/qcom/camcc-sdmmagpie.c @@ -0,0 +1,2260 @@ +/* + * Copyright (c) 2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#define pr_fmt(fmt) "clk: %s: " fmt, __func__ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "clk-alpha-pll.h" +#include "clk-branch.h" +#include "clk-rcg.h" +#include "clk-regmap.h" +#include "common.h" +#include "reset.h" +#include "vdd-level.h" + +#define F(f, s, h, m, n) { (f), (s), (2 * (h) - 1), (m), (n) } + +static DEFINE_VDD_REGULATORS(vdd_cx, VDD_NUM, 1, vdd_corner); +static DEFINE_VDD_REGULATORS(vdd_mx, VDD_NUM, 1, vdd_corner); + +enum { + P_BI_TCXO, + P_BI_TCXO_MX, + P_CAM_CC_PLL0_OUT_EVEN, + P_CAM_CC_PLL0_OUT_MAIN, + P_CAM_CC_PLL0_OUT_ODD, + P_CAM_CC_PLL1_OUT_EVEN, + P_CAM_CC_PLL2_OUT_AUX, + P_CAM_CC_PLL2_OUT_EARLY, + P_CAM_CC_PLL2_OUT_MAIN, + P_CAM_CC_PLL3_OUT_EVEN, + P_CAM_CC_PLL4_OUT_EVEN, + P_CHIP_SLEEP_CLK, + P_CORE_BI_PLL_TEST_SE, +}; + +static const struct parent_map cam_cc_parent_map_0[] = { + { P_BI_TCXO, 0 }, + { P_CAM_CC_PLL0_OUT_MAIN, 1 }, + { P_CAM_CC_PLL0_OUT_EVEN, 2 }, + { P_CAM_CC_PLL0_OUT_ODD, 3 }, + { P_CAM_CC_PLL2_OUT_MAIN, 5 }, + { P_CORE_BI_PLL_TEST_SE, 7 }, +}; + +static const char * const cam_cc_parent_names_0[] = { + "bi_tcxo", + "cam_cc_pll0", + "cam_cc_pll0_out_even", + "cam_cc_pll0_out_odd", + "cam_cc_pll2_out_main", + "core_bi_pll_test_se", +}; + +static const struct parent_map cam_cc_parent_map_1[] = { + { P_BI_TCXO, 0 }, + { P_CAM_CC_PLL0_OUT_MAIN, 1 }, + { P_CAM_CC_PLL0_OUT_EVEN, 2 }, + { P_CAM_CC_PLL0_OUT_ODD, 3 }, + { P_CAM_CC_PLL1_OUT_EVEN, 4 }, + { P_CAM_CC_PLL2_OUT_EARLY, 5 }, + { P_CORE_BI_PLL_TEST_SE, 7 }, +}; + +static const char * const cam_cc_parent_names_1[] = { + "bi_tcxo", + "cam_cc_pll0", + "cam_cc_pll0_out_even", + "cam_cc_pll0_out_odd", + "cam_cc_pll1_out_even", + "cam_cc_pll2_out_early", + "core_bi_pll_test_se", +}; + +static const struct parent_map cam_cc_parent_map_2[] = { + { P_BI_TCXO_MX, 0 }, + { P_CAM_CC_PLL2_OUT_AUX, 5 }, + { P_CORE_BI_PLL_TEST_SE, 7 }, +}; + +static const char * const cam_cc_parent_names_2[] = { + "bi_tcxo", + "cam_cc_pll2_out_aux", + "core_bi_pll_test_se", +}; + +static const struct parent_map cam_cc_parent_map_3[] = { + { P_BI_TCXO, 0 }, + { P_CAM_CC_PLL0_OUT_MAIN, 1 }, + { P_CAM_CC_PLL0_OUT_EVEN, 2 }, + { P_CAM_CC_PLL0_OUT_ODD, 3 }, + { P_CAM_CC_PLL2_OUT_EARLY, 5 }, + { P_CAM_CC_PLL4_OUT_EVEN, 6 }, + { P_CORE_BI_PLL_TEST_SE, 7 }, +}; + +static const char * const cam_cc_parent_names_3[] = { + "bi_tcxo", + "cam_cc_pll0", + "cam_cc_pll0_out_even", + "cam_cc_pll0_out_odd", + "cam_cc_pll2_out_early", + "cam_cc_pll4_out_even", + "core_bi_pll_test_se", +}; + +static const struct parent_map cam_cc_parent_map_4[] = { + { P_BI_TCXO, 0 }, + { P_CAM_CC_PLL3_OUT_EVEN, 6 }, + { P_CORE_BI_PLL_TEST_SE, 7 }, +}; + +static const char * const cam_cc_parent_names_4[] = { + "bi_tcxo", + "cam_cc_pll3_out_even", + "core_bi_pll_test_se", +}; + +static const struct parent_map cam_cc_parent_map_5[] = { + { P_BI_TCXO, 0 }, + { P_CAM_CC_PLL4_OUT_EVEN, 6 }, + { P_CORE_BI_PLL_TEST_SE, 7 }, +}; + +static const char * const cam_cc_parent_names_5[] = { + "bi_tcxo", + "cam_cc_pll4_out_even", + "core_bi_pll_test_se", +}; + +static const struct parent_map cam_cc_parent_map_6[] = { + { P_BI_TCXO, 0 }, + { P_CAM_CC_PLL1_OUT_EVEN, 4 }, + { P_CORE_BI_PLL_TEST_SE, 7 }, +}; + +static const char * const cam_cc_parent_names_6[] = { + "bi_tcxo", + "cam_cc_pll1_out_even", + "core_bi_pll_test_se", +}; + +static const struct parent_map cam_cc_parent_map_7[] = { + { P_CHIP_SLEEP_CLK, 0 }, + { P_CORE_BI_PLL_TEST_SE, 7 }, +}; + +static const char * const cam_cc_parent_names_7[] = { + "chip_sleep_clk", + "core_bi_pll_test_se", +}; + +static const struct parent_map cam_cc_parent_map_8[] = { + { P_BI_TCXO, 0 }, + { P_CAM_CC_PLL0_OUT_ODD, 3 }, + { P_CORE_BI_PLL_TEST_SE, 7 }, +}; + +static const char * const cam_cc_parent_names_8[] = { + "bi_tcxo", + "cam_cc_pll0_out_odd", + "core_bi_pll_test_se", +}; + +static const struct parent_map cam_cc_parent_map_9[] = { + { P_BI_TCXO, 0 }, + { P_CORE_BI_PLL_TEST_SE, 7 }, +}; + +static const char * const cam_cc_parent_names_9[] = { + "bi_tcxo", + "core_bi_pll_test_se", +}; + +static struct pll_vco fabia_vco[] = { + { 249600000, 2000000000, 0 }, +}; + +/* 1200MHz configuration */ +static const struct alpha_pll_config cam_cc_pll0_config = { + .l = 0x3E, + .frac = 0x8000, + .post_div_mask = 0xff << 8, + .post_div_val = 0x31 << 8, + .test_ctl_val = 0x40000000, +}; + +static struct clk_alpha_pll cam_cc_pll0 = { + .offset = 0x0, + .vco_table = fabia_vco, + .num_vco = ARRAY_SIZE(fabia_vco), + .type = FABIA_PLL, + .clkr = { + .hw.init = &(struct clk_init_data){ + .name = "cam_cc_pll0", + .parent_names = (const char *[]){ "bi_tcxo" }, + .num_parents = 1, + .ops = &clk_fabia_pll_ops, + .vdd_class = &vdd_mx, + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_MIN] = 615000000, + [VDD_LOW] = 1066000000, + [VDD_LOW_L1] = 1600000000, + [VDD_NOMINAL] = 2000000000}, + }, + }, +}; + +static struct clk_fixed_factor cam_cc_pll0_out_even = { + .mult = 1, + .div = 2, + .hw.init = &(struct clk_init_data){ + .name = "cam_cc_pll0_out_even", + .parent_names = (const char *[]){ "cam_cc_pll0" }, + .num_parents = 1, + .ops = &clk_fixed_factor_ops, + }, +}; + +static struct clk_fixed_factor cam_cc_pll0_out_odd = { + .mult = 1, + .div = 3, + .hw.init = &(struct clk_init_data){ + .name = "cam_cc_pll0_out_odd", + .parent_names = (const char *[]){ "cam_cc_pll0" }, + .num_parents = 1, + .ops = &clk_fixed_factor_ops, + }, +}; + +/* 680MHz configuration */ +static const struct alpha_pll_config cam_cc_pll1_config = { + .l = 0x23, + .frac = 0x6AAA, + .post_div_mask = 0xf << 8, + .post_div_val = 0x1 << 8, + .test_ctl_val = 0x40000000, +}; + +static struct clk_alpha_pll cam_cc_pll1 = { + .offset = 0x1000, + .vco_table = fabia_vco, + .num_vco = ARRAY_SIZE(fabia_vco), + .type = FABIA_PLL, + .clkr = { + .hw.init = &(struct clk_init_data){ + .name = "cam_cc_pll1", + .parent_names = (const char *[]){ "bi_tcxo" }, + .num_parents = 1, + .ops = &clk_fabia_pll_ops, + .vdd_class = &vdd_mx, + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_MIN] = 615000000, + [VDD_LOW] = 1066000000, + [VDD_LOW_L1] = 1600000000, + [VDD_NOMINAL] = 2000000000}, + }, + }, +}; + +static struct clk_fixed_factor cam_cc_pll1_out_even = { + .mult = 1, + .div = 2, + .hw.init = &(struct clk_init_data){ + .name = "cam_cc_pll1_out_even", + .parent_names = (const char *[]){ "cam_cc_pll1" }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_fixed_factor_ops, + }, +}; + +/* 1920MHz configuration */ +static const struct alpha_pll_config cam_cc_pll2_config = { + .l = 0x64, + .post_div_val = 0x3 << 8, + .post_div_mask = 0x3 << 8, + .early_output_mask = BIT(3), + .aux_output_mask = BIT(1), + .main_output_mask = BIT(0), + .config_ctl_hi_val = 0x400003d6, + .config_ctl_val = 0x20000954, +}; + +static struct clk_alpha_pll cam_cc_pll2 = { + .offset = 0x2000, + .type = AGERA_PLL, + .clkr = { + .hw.init = &(struct clk_init_data){ + .name = "cam_cc_pll2", + .parent_names = (const char *[]){ "bi_tcxo" }, + .num_parents = 1, + .ops = &clk_agera_pll_ops, + .vdd_class = &vdd_mx, + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_MIN] = 1200000000, + [VDD_LOWER] = 1800000000, + [VDD_LOW] = 2400000000, + [VDD_NOMINAL] = 3000000000, + [VDD_HIGH] = 3300000000}, + }, + }, +}; + +static struct clk_fixed_factor cam_cc_pll2_out_early = { + .mult = 1, + .div = 2, + .hw.init = &(struct clk_init_data){ + .name = "cam_cc_pll2_out_early", + .parent_names = (const char *[]){ "cam_cc_pll2" }, + .num_parents = 1, + .ops = &clk_fixed_factor_ops, + }, +}; + +static struct clk_alpha_pll_postdiv cam_cc_pll2_out_aux = { + .offset = 0x2000, + .post_div_shift = 8, + .width = 2, + .type = AGERA_PLL, + .clkr.hw.init = &(struct clk_init_data){ + .name = "cam_cc_pll2_out_aux", + .parent_names = (const char *[]){ "cam_cc_pll2" }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_alpha_pll_postdiv_ops, + }, +}; + +static struct clk_alpha_pll_postdiv cam_cc_pll2_out_main = { + .offset = 0x2000, + .post_div_shift = 8, + .width = 2, + .type = AGERA_PLL, + .clkr.hw.init = &(struct clk_init_data){ + .name = "cam_cc_pll2_out_main", + .parent_names = (const char *[]){ "cam_cc_pll2" }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_alpha_pll_postdiv_ops, + }, +}; + +/* 760MHz configuration */ +static const struct alpha_pll_config cam_cc_pll3_config = { + .l = 0x27, + .frac = 0x9555, + .post_div_mask = 0xf << 8, + .post_div_val = 0x1 << 8, + .test_ctl_val = 0x40000000, +}; + +static struct clk_alpha_pll cam_cc_pll3 = { + .offset = 0x3000, + .vco_table = fabia_vco, + .num_vco = ARRAY_SIZE(fabia_vco), + .type = FABIA_PLL, + .clkr = { + .hw.init = &(struct clk_init_data){ + .name = "cam_cc_pll3", + .parent_names = (const char *[]){ "bi_tcxo" }, + .num_parents = 1, + .ops = &clk_fabia_pll_ops, + .vdd_class = &vdd_mx, + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_MIN] = 615000000, + [VDD_LOW] = 1066000000, + [VDD_LOW_L1] = 1600000000, + [VDD_NOMINAL] = 2000000000}, + }, + }, +}; + +static struct clk_fixed_factor cam_cc_pll3_out_even = { + .mult = 1, + .div = 2, + .hw.init = &(struct clk_init_data){ + .name = "cam_cc_pll3_out_even", + .parent_names = (const char *[]){ "cam_cc_pll3" }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_fixed_factor_ops, + }, +}; + +static struct clk_alpha_pll cam_cc_pll4 = { + .offset = 0x4000, + .vco_table = fabia_vco, + .num_vco = ARRAY_SIZE(fabia_vco), + .type = FABIA_PLL, + .clkr = { + .hw.init = &(struct clk_init_data){ + .name = "cam_cc_pll4", + .parent_names = (const char *[]){ "bi_tcxo" }, + .num_parents = 1, + .ops = &clk_fabia_pll_ops, + .vdd_class = &vdd_mx, + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_MIN] = 615000000, + [VDD_LOW] = 1066000000, + [VDD_LOW_L1] = 1600000000, + [VDD_NOMINAL] = 2000000000}, + }, + }, +}; + +static struct clk_fixed_factor cam_cc_pll4_out_even = { + .mult = 1, + .div = 2, + .hw.init = &(struct clk_init_data){ + .name = "cam_cc_pll4_out_even", + .parent_names = (const char *[]){ "cam_cc_pll4" }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_fixed_factor_ops, + }, +}; + +static const struct freq_tbl ftbl_cam_cc_bps_clk_src[] = { + F(19200000, P_BI_TCXO, 1, 0, 0), + F(100000000, P_CAM_CC_PLL0_OUT_EVEN, 6, 0, 0), + F(200000000, P_CAM_CC_PLL0_OUT_ODD, 2, 0, 0), + F(400000000, P_CAM_CC_PLL0_OUT_ODD, 1, 0, 0), + F(480000000, P_CAM_CC_PLL2_OUT_MAIN, 1, 0, 0), + F(600000000, P_CAM_CC_PLL0_OUT_MAIN, 2, 0, 0), + { } +}; + +static struct clk_rcg2 cam_cc_bps_clk_src = { + .cmd_rcgr = 0x7010, + .mnd_width = 0, + .hid_width = 5, + .parent_map = cam_cc_parent_map_0, + .freq_tbl = ftbl_cam_cc_bps_clk_src, + .enable_safe_config = true, + .clkr.hw.init = &(struct clk_init_data){ + .name = "cam_cc_bps_clk_src", + .parent_names = cam_cc_parent_names_0, + .num_parents = 6, + .ops = &clk_rcg2_ops, + .vdd_class = &vdd_cx, + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_LOWER] = 200000000, + [VDD_LOW] = 400000000, + [VDD_LOW_L1] = 480000000, + [VDD_NOMINAL] = 600000000}, + }, +}; + +static const struct freq_tbl ftbl_cam_cc_camnoc_axi_clk_src[] = { + F(19200000, P_BI_TCXO, 1, 0, 0), + F(150000000, P_CAM_CC_PLL0_OUT_EVEN, 4, 0, 0), + F(240000000, P_CAM_CC_PLL2_OUT_MAIN, 2, 0, 0), + F(320000000, P_CAM_CC_PLL2_OUT_MAIN, 1.5, 0, 0), + F(400000000, P_CAM_CC_PLL0_OUT_MAIN, 3, 0, 0), + F(480000000, P_CAM_CC_PLL2_OUT_MAIN, 1, 0, 0), + { } +}; + +static struct clk_rcg2 cam_cc_camnoc_axi_clk_src = { + .cmd_rcgr = 0xc12c, + .mnd_width = 0, + .hid_width = 5, + .parent_map = cam_cc_parent_map_0, + .freq_tbl = ftbl_cam_cc_camnoc_axi_clk_src, + .enable_safe_config = true, + .clkr.hw.init = &(struct clk_init_data){ + .name = "cam_cc_camnoc_axi_clk_src", + .parent_names = cam_cc_parent_names_0, + .num_parents = 6, + .ops = &clk_rcg2_ops, + .vdd_class = &vdd_cx, + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_LOWER] = 150000000, + [VDD_LOW] = 240000000, + [VDD_LOW_L1] = 320000000, + [VDD_NOMINAL] = 400000000, + [VDD_HIGH] = 480000000}, + }, +}; + +static const struct freq_tbl ftbl_cam_cc_cci_0_clk_src[] = { + F(19200000, P_BI_TCXO, 1, 0, 0), + F(37500000, P_CAM_CC_PLL0_OUT_EVEN, 16, 0, 0), + { } +}; + +static struct clk_rcg2 cam_cc_cci_0_clk_src = { + .cmd_rcgr = 0xc0c4, + .mnd_width = 8, + .hid_width = 5, + .parent_map = cam_cc_parent_map_0, + .freq_tbl = ftbl_cam_cc_cci_0_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "cam_cc_cci_0_clk_src", + .parent_names = cam_cc_parent_names_0, + .num_parents = 6, + .ops = &clk_rcg2_ops, + .vdd_class = &vdd_cx, + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_LOWER] = 37500000}, + }, +}; + +static struct clk_rcg2 cam_cc_cci_1_clk_src = { + .cmd_rcgr = 0xc0e0, + .mnd_width = 8, + .hid_width = 5, + .parent_map = cam_cc_parent_map_0, + .freq_tbl = ftbl_cam_cc_cci_0_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "cam_cc_cci_1_clk_src", + .parent_names = cam_cc_parent_names_0, + .num_parents = 6, + .ops = &clk_rcg2_ops, + .vdd_class = &vdd_cx, + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_LOWER] = 37500000}, + }, +}; + +static const struct freq_tbl ftbl_cam_cc_cphy_rx_clk_src[] = { + F(19200000, P_BI_TCXO, 1, 0, 0), + F(300000000, P_CAM_CC_PLL0_OUT_EVEN, 2, 0, 0), + F(384000000, P_CAM_CC_PLL2_OUT_EARLY, 2.5, 0, 0), + F(400000000, P_CAM_CC_PLL0_OUT_MAIN, 3, 0, 0), + { } +}; + +static struct clk_rcg2 cam_cc_cphy_rx_clk_src = { + .cmd_rcgr = 0xa064, + .mnd_width = 0, + .hid_width = 5, + .parent_map = cam_cc_parent_map_1, + .freq_tbl = ftbl_cam_cc_cphy_rx_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "cam_cc_cphy_rx_clk_src", + .parent_names = cam_cc_parent_names_1, + .num_parents = 7, + .ops = &clk_rcg2_ops, + .vdd_class = &vdd_cx, + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_LOWER] = 300000000, + [VDD_LOW] = 384000000, + [VDD_LOW_L1] = 400000000}, + }, +}; + +static const struct freq_tbl ftbl_cam_cc_csi0phytimer_clk_src[] = { + F(19200000, P_BI_TCXO, 1, 0, 0), + F(300000000, P_CAM_CC_PLL0_OUT_EVEN, 2, 0, 0), + { } +}; + +static struct clk_rcg2 cam_cc_csi0phytimer_clk_src = { + .cmd_rcgr = 0x6004, + .mnd_width = 0, + .hid_width = 5, + .parent_map = cam_cc_parent_map_0, + .freq_tbl = ftbl_cam_cc_csi0phytimer_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "cam_cc_csi0phytimer_clk_src", + .parent_names = cam_cc_parent_names_0, + .num_parents = 6, + .ops = &clk_rcg2_ops, + .vdd_class = &vdd_cx, + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_LOWER] = 300000000}, + }, +}; + +static struct clk_rcg2 cam_cc_csi1phytimer_clk_src = { + .cmd_rcgr = 0x6028, + .mnd_width = 0, + .hid_width = 5, + .parent_map = cam_cc_parent_map_0, + .freq_tbl = ftbl_cam_cc_csi0phytimer_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "cam_cc_csi1phytimer_clk_src", + .parent_names = cam_cc_parent_names_0, + .num_parents = 6, + .ops = &clk_rcg2_ops, + .vdd_class = &vdd_cx, + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_LOWER] = 300000000}, + }, +}; + +static struct clk_rcg2 cam_cc_csi2phytimer_clk_src = { + .cmd_rcgr = 0x604c, + .mnd_width = 0, + .hid_width = 5, + .parent_map = cam_cc_parent_map_0, + .freq_tbl = ftbl_cam_cc_csi0phytimer_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "cam_cc_csi2phytimer_clk_src", + .parent_names = cam_cc_parent_names_0, + .num_parents = 6, + .ops = &clk_rcg2_ops, + .vdd_class = &vdd_cx, + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_LOWER] = 300000000}, + }, +}; + +static struct clk_rcg2 cam_cc_csi3phytimer_clk_src = { + .cmd_rcgr = 0x6070, + .mnd_width = 0, + .hid_width = 5, + .parent_map = cam_cc_parent_map_0, + .freq_tbl = ftbl_cam_cc_csi0phytimer_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "cam_cc_csi3phytimer_clk_src", + .parent_names = cam_cc_parent_names_0, + .num_parents = 6, + .ops = &clk_rcg2_ops, + .vdd_class = &vdd_cx, + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_LOWER] = 300000000}, + }, +}; + +static const struct freq_tbl ftbl_cam_cc_fast_ahb_clk_src[] = { + F(19200000, P_BI_TCXO, 1, 0, 0), + F(50000000, P_CAM_CC_PLL0_OUT_EVEN, 12, 0, 0), + F(100000000, P_CAM_CC_PLL0_OUT_EVEN, 6, 0, 0), + F(200000000, P_CAM_CC_PLL0_OUT_EVEN, 3, 0, 0), + F(300000000, P_CAM_CC_PLL0_OUT_MAIN, 4, 0, 0), + F(400000000, P_CAM_CC_PLL0_OUT_MAIN, 3, 0, 0), + { } +}; + +static struct clk_rcg2 cam_cc_fast_ahb_clk_src = { + .cmd_rcgr = 0x703c, + .mnd_width = 0, + .hid_width = 5, + .parent_map = cam_cc_parent_map_0, + .freq_tbl = ftbl_cam_cc_fast_ahb_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "cam_cc_fast_ahb_clk_src", + .parent_names = cam_cc_parent_names_0, + .num_parents = 6, + .ops = &clk_rcg2_ops, + .vdd_class = &vdd_cx, + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_LOWER] = 100000000, + [VDD_LOW] = 200000000, + [VDD_LOW_L1] = 300000000, + [VDD_NOMINAL] = 400000000}, + }, +}; + +static const struct freq_tbl ftbl_cam_cc_fd_core_clk_src[] = { + F(19200000, P_BI_TCXO, 1, 0, 0), + F(380000000, P_CAM_CC_PLL4_OUT_EVEN, 1, 0, 0), + F(384000000, P_CAM_CC_PLL2_OUT_EARLY, 2.5, 0, 0), + F(480000000, P_CAM_CC_PLL2_OUT_EARLY, 2, 0, 0), + F(600000000, P_CAM_CC_PLL0_OUT_MAIN, 2, 0, 0), + { } +}; + +static struct clk_rcg2 cam_cc_fd_core_clk_src = { + .cmd_rcgr = 0xc09c, + .mnd_width = 0, + .hid_width = 5, + .parent_map = cam_cc_parent_map_3, + .freq_tbl = ftbl_cam_cc_fd_core_clk_src, + .enable_safe_config = true, + .clkr.hw.init = &(struct clk_init_data){ + .name = "cam_cc_fd_core_clk_src", + .parent_names = cam_cc_parent_names_3, + .num_parents = 7, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_ops, + .vdd_class = &vdd_cx, + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_LOWER] = 380000000, + [VDD_LOW] = 384000000, + [VDD_LOW_L1] = 480000000, + [VDD_NOMINAL] = 600000000}, + }, +}; + +static const struct freq_tbl ftbl_cam_cc_icp_clk_src[] = { + F(19200000, P_BI_TCXO, 1, 0, 0), + F(400000000, P_CAM_CC_PLL0_OUT_ODD, 1, 0, 0), + F(600000000, P_CAM_CC_PLL0_OUT_MAIN, 2, 0, 0), + { } +}; + +static struct clk_rcg2 cam_cc_icp_clk_src = { + .cmd_rcgr = 0xc074, + .mnd_width = 0, + .hid_width = 5, + .parent_map = cam_cc_parent_map_0, + .freq_tbl = ftbl_cam_cc_icp_clk_src, + .enable_safe_config = true, + .clkr.hw.init = &(struct clk_init_data){ + .name = "cam_cc_icp_clk_src", + .parent_names = cam_cc_parent_names_0, + .num_parents = 6, + .ops = &clk_rcg2_ops, + .vdd_class = &vdd_cx, + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_LOWER] = 400000000, + [VDD_LOW_L1] = 600000000}, + }, +}; + +static const struct freq_tbl ftbl_cam_cc_ife_0_clk_src[] = { + F(19200000, P_BI_TCXO, 1, 0, 0), + F(380000000, P_CAM_CC_PLL3_OUT_EVEN, 1, 0, 0), + F(510000000, P_CAM_CC_PLL3_OUT_EVEN, 1, 0, 0), + F(637000000, P_CAM_CC_PLL3_OUT_EVEN, 1, 0, 0), + F(760000000, P_CAM_CC_PLL3_OUT_EVEN, 1, 0, 0), + { } +}; + +static struct clk_rcg2 cam_cc_ife_0_clk_src = { + .cmd_rcgr = 0xa010, + .mnd_width = 0, + .hid_width = 5, + .parent_map = cam_cc_parent_map_4, + .freq_tbl = ftbl_cam_cc_ife_0_clk_src, + .enable_safe_config = true, + .clkr.hw.init = &(struct clk_init_data){ + .name = "cam_cc_ife_0_clk_src", + .parent_names = cam_cc_parent_names_4, + .num_parents = 3, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_ops, + .vdd_class = &vdd_cx, + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_LOWER] = 380000000, + [VDD_LOW] = 510000000, + [VDD_LOW_L1] = 637000000, + [VDD_NOMINAL] = 760000000}, + }, +}; + +static const struct freq_tbl ftbl_cam_cc_ife_0_csid_clk_src[] = { + F(19200000, P_BI_TCXO, 1, 0, 0), + F(75000000, P_CAM_CC_PLL0_OUT_EVEN, 8, 0, 0), + F(300000000, P_CAM_CC_PLL0_OUT_EVEN, 2, 0, 0), + F(384000000, P_CAM_CC_PLL2_OUT_EARLY, 2.5, 0, 0), + F(400000000, P_CAM_CC_PLL0_OUT_MAIN, 3, 0, 0), + { } +}; + +static struct clk_rcg2 cam_cc_ife_0_csid_clk_src = { + .cmd_rcgr = 0xa03c, + .mnd_width = 0, + .hid_width = 5, + .parent_map = cam_cc_parent_map_1, + .freq_tbl = ftbl_cam_cc_ife_0_csid_clk_src, + .enable_safe_config = true, + .clkr.hw.init = &(struct clk_init_data){ + .name = "cam_cc_ife_0_csid_clk_src", + .parent_names = cam_cc_parent_names_1, + .num_parents = 7, + .ops = &clk_rcg2_ops, + .vdd_class = &vdd_cx, + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_LOWER] = 300000000, + [VDD_LOW] = 384000000, + [VDD_LOW_L1] = 400000000}, + }, +}; + +static const struct freq_tbl ftbl_cam_cc_ife_1_clk_src[] = { + F(19200000, P_BI_TCXO, 1, 0, 0), + F(380000000, P_CAM_CC_PLL4_OUT_EVEN, 1, 0, 0), + F(510000000, P_CAM_CC_PLL4_OUT_EVEN, 1, 0, 0), + F(637000000, P_CAM_CC_PLL4_OUT_EVEN, 1, 0, 0), + F(760000000, P_CAM_CC_PLL4_OUT_EVEN, 1, 0, 0), + { } +}; + +static struct clk_rcg2 cam_cc_ife_1_clk_src = { + .cmd_rcgr = 0xb010, + .mnd_width = 0, + .hid_width = 5, + .parent_map = cam_cc_parent_map_5, + .freq_tbl = ftbl_cam_cc_ife_1_clk_src, + .enable_safe_config = true, + .clkr.hw.init = &(struct clk_init_data){ + .name = "cam_cc_ife_1_clk_src", + .parent_names = cam_cc_parent_names_5, + .num_parents = 3, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_ops, + .vdd_class = &vdd_cx, + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_LOWER] = 380000000, + [VDD_LOW] = 510000000, + [VDD_LOW_L1] = 637000000, + [VDD_NOMINAL] = 760000000}, + }, +}; + +static struct clk_rcg2 cam_cc_ife_1_csid_clk_src = { + .cmd_rcgr = 0xb034, + .mnd_width = 0, + .hid_width = 5, + .parent_map = cam_cc_parent_map_1, + .freq_tbl = ftbl_cam_cc_ife_0_csid_clk_src, + .enable_safe_config = true, + .clkr.hw.init = &(struct clk_init_data){ + .name = "cam_cc_ife_1_csid_clk_src", + .parent_names = cam_cc_parent_names_1, + .num_parents = 7, + .ops = &clk_rcg2_ops, + .vdd_class = &vdd_cx, + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_LOWER] = 300000000, + [VDD_LOW] = 384000000, + [VDD_LOW_L1] = 400000000}, + }, +}; + +static const struct freq_tbl ftbl_cam_cc_ife_lite_clk_src[] = { + F(19200000, P_BI_TCXO, 1, 0, 0), + F(320000000, P_CAM_CC_PLL2_OUT_MAIN, 1.5, 0, 0), + F(400000000, P_CAM_CC_PLL0_OUT_ODD, 1, 0, 0), + F(480000000, P_CAM_CC_PLL2_OUT_MAIN, 1, 0, 0), + F(600000000, P_CAM_CC_PLL0_OUT_MAIN, 2, 0, 0), + { } +}; + +static struct clk_rcg2 cam_cc_ife_lite_clk_src = { + .cmd_rcgr = 0xc004, + .mnd_width = 0, + .hid_width = 5, + .parent_map = cam_cc_parent_map_0, + .freq_tbl = ftbl_cam_cc_ife_lite_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "cam_cc_ife_lite_clk_src", + .parent_names = cam_cc_parent_names_0, + .num_parents = 6, + .ops = &clk_rcg2_ops, + .vdd_class = &vdd_cx, + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_LOWER] = 320000000, + [VDD_LOW] = 400000000, + [VDD_LOW_L1] = 480000000, + [VDD_NOMINAL] = 600000000}, + }, +}; + +static struct clk_rcg2 cam_cc_ife_lite_csid_clk_src = { + .cmd_rcgr = 0xc020, + .mnd_width = 0, + .hid_width = 5, + .parent_map = cam_cc_parent_map_1, + .freq_tbl = ftbl_cam_cc_cphy_rx_clk_src, + .enable_safe_config = true, + .clkr.hw.init = &(struct clk_init_data){ + .name = "cam_cc_ife_lite_csid_clk_src", + .parent_names = cam_cc_parent_names_1, + .num_parents = 7, + .ops = &clk_rcg2_ops, + .vdd_class = &vdd_cx, + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_LOWER] = 300000000, + [VDD_LOW] = 384000000, + [VDD_LOW_L1] = 400000000}, + }, +}; + +static const struct freq_tbl ftbl_cam_cc_ipe_0_clk_src[] = { + F(19200000, P_BI_TCXO, 1, 0, 0), + F(340000000, P_CAM_CC_PLL1_OUT_EVEN, 1, 0, 0), + F(430000000, P_CAM_CC_PLL1_OUT_EVEN, 1, 0, 0), + F(520000000, P_CAM_CC_PLL1_OUT_EVEN, 1, 0, 0), + F(600000000, P_CAM_CC_PLL1_OUT_EVEN, 1, 0, 0), + { } +}; + +static struct clk_rcg2 cam_cc_ipe_0_clk_src = { + .cmd_rcgr = 0x8010, + .mnd_width = 0, + .hid_width = 5, + .parent_map = cam_cc_parent_map_6, + .freq_tbl = ftbl_cam_cc_ipe_0_clk_src, + .enable_safe_config = true, + .clkr.hw.init = &(struct clk_init_data){ + .name = "cam_cc_ipe_0_clk_src", + .parent_names = cam_cc_parent_names_6, + .num_parents = 3, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_ops, + .vdd_class = &vdd_cx, + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_LOWER] = 340000000, + [VDD_LOW] = 430000000, + [VDD_LOW_L1] = 520000000, + [VDD_NOMINAL] = 600000000}, + }, +}; + +static struct clk_rcg2 cam_cc_jpeg_clk_src = { + .cmd_rcgr = 0xc048, + .mnd_width = 0, + .hid_width = 5, + .parent_map = cam_cc_parent_map_0, + .freq_tbl = ftbl_cam_cc_bps_clk_src, + .enable_safe_config = true, + .clkr.hw.init = &(struct clk_init_data){ + .name = "cam_cc_jpeg_clk_src", + .parent_names = cam_cc_parent_names_0, + .num_parents = 6, + .ops = &clk_rcg2_ops, + .vdd_class = &vdd_cx, + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_LOWER] = 200000000, + [VDD_LOW] = 400000000, + [VDD_LOW_L1] = 480000000, + [VDD_NOMINAL] = 600000000}, + }, +}; + +static const struct freq_tbl ftbl_cam_cc_lrme_clk_src[] = { + F(19200000, P_BI_TCXO, 1, 0, 0), + F(100000000, P_CAM_CC_PLL0_OUT_EVEN, 6, 0, 0), + F(240000000, P_CAM_CC_PLL2_OUT_MAIN, 2, 0, 0), + F(300000000, P_CAM_CC_PLL0_OUT_EVEN, 2, 0, 0), + F(320000000, P_CAM_CC_PLL2_OUT_MAIN, 1.5, 0, 0), + F(400000000, P_CAM_CC_PLL0_OUT_MAIN, 3, 0, 0), + { } +}; + +static struct clk_rcg2 cam_cc_lrme_clk_src = { + .cmd_rcgr = 0xc100, + .mnd_width = 0, + .hid_width = 5, + .parent_map = cam_cc_parent_map_0, + .freq_tbl = ftbl_cam_cc_lrme_clk_src, + .enable_safe_config = true, + .clkr.hw.init = &(struct clk_init_data){ + .name = "cam_cc_lrme_clk_src", + .parent_names = cam_cc_parent_names_0, + .num_parents = 6, + .ops = &clk_rcg2_ops, + .vdd_class = &vdd_cx, + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_LOWER] = 240000000, + [VDD_LOW] = 300000000, + [VDD_LOW_L1] = 320000000, + [VDD_NOMINAL] = 400000000}, + }, +}; + +static const struct freq_tbl ftbl_cam_cc_mclk0_clk_src[] = { + F(19200000, P_BI_TCXO_MX, 1, 0, 0), + F(24000000, P_CAM_CC_PLL2_OUT_AUX, 1, 1, 20), + F(34285714, P_CAM_CC_PLL2_OUT_AUX, 14, 0, 0), + { } +}; + +static struct clk_rcg2 cam_cc_mclk0_clk_src = { + .cmd_rcgr = 0x5004, + .mnd_width = 8, + .hid_width = 5, + .parent_map = cam_cc_parent_map_2, + .freq_tbl = ftbl_cam_cc_mclk0_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "cam_cc_mclk0_clk_src", + .parent_names = cam_cc_parent_names_2, + .num_parents = 3, + .ops = &clk_rcg2_ops, + .vdd_class = &vdd_mx, + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_LOWER] = 34285714}, + }, +}; + +static struct clk_rcg2 cam_cc_mclk1_clk_src = { + .cmd_rcgr = 0x5024, + .mnd_width = 8, + .hid_width = 5, + .parent_map = cam_cc_parent_map_2, + .freq_tbl = ftbl_cam_cc_mclk0_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "cam_cc_mclk1_clk_src", + .parent_names = cam_cc_parent_names_2, + .num_parents = 3, + .ops = &clk_rcg2_ops, + .vdd_class = &vdd_mx, + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_LOWER] = 34285714}, + }, +}; + +static struct clk_rcg2 cam_cc_mclk2_clk_src = { + .cmd_rcgr = 0x5044, + .mnd_width = 8, + .hid_width = 5, + .parent_map = cam_cc_parent_map_2, + .freq_tbl = ftbl_cam_cc_mclk0_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "cam_cc_mclk2_clk_src", + .parent_names = cam_cc_parent_names_2, + .num_parents = 3, + .ops = &clk_rcg2_ops, + .vdd_class = &vdd_mx, + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_LOWER] = 34285714}, + }, +}; + +static struct clk_rcg2 cam_cc_mclk3_clk_src = { + .cmd_rcgr = 0x5064, + .mnd_width = 8, + .hid_width = 5, + .parent_map = cam_cc_parent_map_2, + .freq_tbl = ftbl_cam_cc_mclk0_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "cam_cc_mclk3_clk_src", + .parent_names = cam_cc_parent_names_2, + .num_parents = 3, + .ops = &clk_rcg2_ops, + .vdd_class = &vdd_mx, + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_LOWER] = 34285714}, + }, +}; + +static const struct freq_tbl ftbl_cam_cc_sleep_clk_src[] = { + F(32000, P_CHIP_SLEEP_CLK, 1, 0, 0), + { } +}; + +static struct clk_rcg2 cam_cc_sleep_clk_src = { + .cmd_rcgr = 0xc1a4, + .mnd_width = 0, + .hid_width = 5, + .parent_map = cam_cc_parent_map_7, + .freq_tbl = ftbl_cam_cc_sleep_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "cam_cc_sleep_clk_src", + .parent_names = cam_cc_parent_names_7, + .num_parents = 2, + .ops = &clk_rcg2_ops, + .vdd_class = &vdd_cx, + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_LOWER] = 32000}, + }, +}; + +static const struct freq_tbl ftbl_cam_cc_slow_ahb_clk_src[] = { + F(19200000, P_BI_TCXO, 1, 0, 0), + F(80000000, P_CAM_CC_PLL0_OUT_ODD, 5, 0, 0), + { } +}; + +static struct clk_rcg2 cam_cc_slow_ahb_clk_src = { + .cmd_rcgr = 0x7058, + .mnd_width = 0, + .hid_width = 5, + .parent_map = cam_cc_parent_map_8, + .freq_tbl = ftbl_cam_cc_slow_ahb_clk_src, + .enable_safe_config = true, + .clkr.hw.init = &(struct clk_init_data){ + .name = "cam_cc_slow_ahb_clk_src", + .parent_names = cam_cc_parent_names_8, + .num_parents = 3, + .ops = &clk_rcg2_ops, + .vdd_class = &vdd_cx, + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_LOWER] = 80000000}, + }, +}; + +static const struct freq_tbl ftbl_cam_cc_xo_clk_src[] = { + F(19200000, P_BI_TCXO, 1, 0, 0), + { } +}; + +static struct clk_rcg2 cam_cc_xo_clk_src = { + .cmd_rcgr = 0xc188, + .mnd_width = 0, + .hid_width = 5, + .parent_map = cam_cc_parent_map_9, + .freq_tbl = ftbl_cam_cc_xo_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "cam_cc_xo_clk_src", + .parent_names = cam_cc_parent_names_9, + .num_parents = 2, + .ops = &clk_rcg2_ops, + }, +}; + +static struct clk_branch cam_cc_bps_ahb_clk = { + .halt_reg = 0x7070, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x7070, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "cam_cc_bps_ahb_clk", + .parent_names = (const char *[]){ + "cam_cc_slow_ahb_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_bps_areg_clk = { + .halt_reg = 0x7054, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x7054, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "cam_cc_bps_areg_clk", + .parent_names = (const char *[]){ + "cam_cc_fast_ahb_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_bps_axi_clk = { + .halt_reg = 0x7038, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x7038, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "cam_cc_bps_axi_clk", + .parent_names = (const char *[]){ + "cam_cc_camnoc_axi_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_bps_clk = { + .halt_reg = 0x7028, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x7028, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "cam_cc_bps_clk", + .parent_names = (const char *[]){ + "cam_cc_bps_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_camnoc_axi_clk = { + .halt_reg = 0xc148, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0xc148, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "cam_cc_camnoc_axi_clk", + .parent_names = (const char *[]){ + "cam_cc_camnoc_axi_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_camnoc_dcd_xo_clk = { + .halt_reg = 0xc150, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0xc150, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "cam_cc_camnoc_dcd_xo_clk", + .parent_names = (const char *[]){ + "cam_cc_xo_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_cci_0_clk = { + .halt_reg = 0xc0dc, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0xc0dc, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "cam_cc_cci_0_clk", + .parent_names = (const char *[]){ + "cam_cc_cci_0_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_cci_1_clk = { + .halt_reg = 0xc0f8, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0xc0f8, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "cam_cc_cci_1_clk", + .parent_names = (const char *[]){ + "cam_cc_cci_1_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_core_ahb_clk = { + .halt_reg = 0xc184, + .halt_check = BRANCH_HALT_DELAY, + .clkr = { + .enable_reg = 0xc184, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "cam_cc_core_ahb_clk", + .parent_names = (const char *[]){ + "cam_cc_slow_ahb_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_cpas_ahb_clk = { + .halt_reg = 0xc124, + .halt_check = BRANCH_HALT, + .aggr_sibling_rates = true, + .clkr = { + .enable_reg = 0xc124, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "cam_cc_cpas_ahb_clk", + .parent_names = (const char *[]){ + "cam_cc_slow_ahb_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_csi0phytimer_clk = { + .halt_reg = 0x601c, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x601c, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "cam_cc_csi0phytimer_clk", + .parent_names = (const char *[]){ + "cam_cc_csi0phytimer_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_csi1phytimer_clk = { + .halt_reg = 0x6040, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x6040, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "cam_cc_csi1phytimer_clk", + .parent_names = (const char *[]){ + "cam_cc_csi1phytimer_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_csi2phytimer_clk = { + .halt_reg = 0x6064, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x6064, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "cam_cc_csi2phytimer_clk", + .parent_names = (const char *[]){ + "cam_cc_csi2phytimer_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_csi3phytimer_clk = { + .halt_reg = 0x6088, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x6088, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "cam_cc_csi3phytimer_clk", + .parent_names = (const char *[]){ + "cam_cc_csi3phytimer_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_csiphy0_clk = { + .halt_reg = 0x6020, + .halt_check = BRANCH_HALT, + .aggr_sibling_rates = true, + .clkr = { + .enable_reg = 0x6020, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "cam_cc_csiphy0_clk", + .parent_names = (const char *[]){ + "cam_cc_cphy_rx_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_csiphy1_clk = { + .halt_reg = 0x6044, + .halt_check = BRANCH_HALT, + .aggr_sibling_rates = true, + .clkr = { + .enable_reg = 0x6044, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "cam_cc_csiphy1_clk", + .parent_names = (const char *[]){ + "cam_cc_cphy_rx_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_csiphy2_clk = { + .halt_reg = 0x6068, + .halt_check = BRANCH_HALT, + .aggr_sibling_rates = true, + .clkr = { + .enable_reg = 0x6068, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "cam_cc_csiphy2_clk", + .parent_names = (const char *[]){ + "cam_cc_cphy_rx_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_csiphy3_clk = { + .halt_reg = 0x608c, + .halt_check = BRANCH_HALT, + .aggr_sibling_rates = true, + .clkr = { + .enable_reg = 0x608c, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "cam_cc_csiphy3_clk", + .parent_names = (const char *[]){ + "cam_cc_cphy_rx_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_fd_core_clk = { + .halt_reg = 0xc0b4, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0xc0b4, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "cam_cc_fd_core_clk", + .parent_names = (const char *[]){ + "cam_cc_fd_core_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_fd_core_uar_clk = { + .halt_reg = 0xc0bc, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0xc0bc, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "cam_cc_fd_core_uar_clk", + .parent_names = (const char *[]){ + "cam_cc_fd_core_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_gdsc_clk = { + .halt_reg = 0xc1a0, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0xc1a0, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "cam_cc_gdsc_clk", + .parent_names = (const char *[]){ + "cam_cc_xo_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_icp_ahb_clk = { + .halt_reg = 0xc094, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0xc094, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "cam_cc_icp_ahb_clk", + .parent_names = (const char *[]){ + "cam_cc_slow_ahb_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_icp_clk = { + .halt_reg = 0xc08c, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0xc08c, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "cam_cc_icp_clk", + .parent_names = (const char *[]){ + "cam_cc_icp_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_ife_0_axi_clk = { + .halt_reg = 0xa080, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0xa080, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "cam_cc_ife_0_axi_clk", + .parent_names = (const char *[]){ + "cam_cc_camnoc_axi_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_ife_0_clk = { + .halt_reg = 0xa028, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0xa028, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "cam_cc_ife_0_clk", + .parent_names = (const char *[]){ + "cam_cc_ife_0_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_ife_0_cphy_rx_clk = { + .halt_reg = 0xa07c, + .halt_check = BRANCH_HALT, + .aggr_sibling_rates = true, + .clkr = { + .enable_reg = 0xa07c, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "cam_cc_ife_0_cphy_rx_clk", + .parent_names = (const char *[]){ + "cam_cc_cphy_rx_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_ife_0_csid_clk = { + .halt_reg = 0xa054, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0xa054, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "cam_cc_ife_0_csid_clk", + .parent_names = (const char *[]){ + "cam_cc_ife_0_csid_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_ife_0_dsp_clk = { + .halt_reg = 0xa038, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0xa038, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "cam_cc_ife_0_dsp_clk", + .parent_names = (const char *[]){ + "cam_cc_ife_0_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_ife_1_axi_clk = { + .halt_reg = 0xb058, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0xb058, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "cam_cc_ife_1_axi_clk", + .parent_names = (const char *[]){ + "cam_cc_camnoc_axi_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_ife_1_clk = { + .halt_reg = 0xb028, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0xb028, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "cam_cc_ife_1_clk", + .parent_names = (const char *[]){ + "cam_cc_ife_1_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_ife_1_cphy_rx_clk = { + .halt_reg = 0xb054, + .halt_check = BRANCH_HALT, + .aggr_sibling_rates = true, + .clkr = { + .enable_reg = 0xb054, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "cam_cc_ife_1_cphy_rx_clk", + .parent_names = (const char *[]){ + "cam_cc_cphy_rx_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_ife_1_csid_clk = { + .halt_reg = 0xb04c, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0xb04c, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "cam_cc_ife_1_csid_clk", + .parent_names = (const char *[]){ + "cam_cc_ife_1_csid_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_ife_1_dsp_clk = { + .halt_reg = 0xb030, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0xb030, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "cam_cc_ife_1_dsp_clk", + .parent_names = (const char *[]){ + "cam_cc_ife_1_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_ife_lite_clk = { + .halt_reg = 0xc01c, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0xc01c, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "cam_cc_ife_lite_clk", + .parent_names = (const char *[]){ + "cam_cc_ife_lite_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_ife_lite_cphy_rx_clk = { + .halt_reg = 0xc040, + .halt_check = BRANCH_HALT, + .aggr_sibling_rates = true, + .clkr = { + .enable_reg = 0xc040, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "cam_cc_ife_lite_cphy_rx_clk", + .parent_names = (const char *[]){ + "cam_cc_cphy_rx_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_ife_lite_csid_clk = { + .halt_reg = 0xc038, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0xc038, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "cam_cc_ife_lite_csid_clk", + .parent_names = (const char *[]){ + "cam_cc_ife_lite_csid_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_ipe_0_ahb_clk = { + .halt_reg = 0x8040, + .halt_check = BRANCH_HALT, + .aggr_sibling_rates = true, + .clkr = { + .enable_reg = 0x8040, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "cam_cc_ipe_0_ahb_clk", + .parent_names = (const char *[]){ + "cam_cc_slow_ahb_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_ipe_0_areg_clk = { + .halt_reg = 0x803c, + .halt_check = BRANCH_HALT, + .aggr_sibling_rates = true, + .clkr = { + .enable_reg = 0x803c, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "cam_cc_ipe_0_areg_clk", + .parent_names = (const char *[]){ + "cam_cc_fast_ahb_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_ipe_0_axi_clk = { + .halt_reg = 0x8038, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x8038, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "cam_cc_ipe_0_axi_clk", + .parent_names = (const char *[]){ + "cam_cc_camnoc_axi_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_ipe_0_clk = { + .halt_reg = 0x8028, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x8028, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "cam_cc_ipe_0_clk", + .parent_names = (const char *[]){ + "cam_cc_ipe_0_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_ipe_1_ahb_clk = { + .halt_reg = 0x9028, + .halt_check = BRANCH_HALT, + .aggr_sibling_rates = true, + .clkr = { + .enable_reg = 0x9028, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "cam_cc_ipe_1_ahb_clk", + .parent_names = (const char *[]){ + "cam_cc_slow_ahb_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_ipe_1_areg_clk = { + .halt_reg = 0x9024, + .halt_check = BRANCH_HALT, + .aggr_sibling_rates = true, + .clkr = { + .enable_reg = 0x9024, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "cam_cc_ipe_1_areg_clk", + .parent_names = (const char *[]){ + "cam_cc_fast_ahb_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_ipe_1_axi_clk = { + .halt_reg = 0x9020, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x9020, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "cam_cc_ipe_1_axi_clk", + .parent_names = (const char *[]){ + "cam_cc_camnoc_axi_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_ipe_1_clk = { + .halt_reg = 0x9010, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x9010, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "cam_cc_ipe_1_clk", + .parent_names = (const char *[]){ + "cam_cc_ipe_0_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_jpeg_clk = { + .halt_reg = 0xc060, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0xc060, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "cam_cc_jpeg_clk", + .parent_names = (const char *[]){ + "cam_cc_jpeg_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_lrme_clk = { + .halt_reg = 0xc118, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0xc118, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "cam_cc_lrme_clk", + .parent_names = (const char *[]){ + "cam_cc_lrme_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_mclk0_clk = { + .halt_reg = 0x501c, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x501c, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "cam_cc_mclk0_clk", + .parent_names = (const char *[]){ + "cam_cc_mclk0_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_mclk1_clk = { + .halt_reg = 0x503c, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x503c, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "cam_cc_mclk1_clk", + .parent_names = (const char *[]){ + "cam_cc_mclk1_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_mclk2_clk = { + .halt_reg = 0x505c, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x505c, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "cam_cc_mclk2_clk", + .parent_names = (const char *[]){ + "cam_cc_mclk2_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_mclk3_clk = { + .halt_reg = 0x507c, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x507c, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "cam_cc_mclk3_clk", + .parent_names = (const char *[]){ + "cam_cc_mclk3_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_sleep_clk = { + .halt_reg = 0xc1bc, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0xc1bc, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "cam_cc_sleep_clk", + .parent_names = (const char *[]){ + "cam_cc_sleep_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +struct clk_hw *cam_cc_sdmmagpie_hws[] = { + [CAM_CC_PLL0_OUT_EVEN] = &cam_cc_pll0_out_even.hw, + [CAM_CC_PLL0_OUT_ODD] = &cam_cc_pll0_out_odd.hw, + [CAM_CC_PLL1_OUT_EVEN] = &cam_cc_pll1_out_even.hw, + [CAM_CC_PLL2_OUT_EARLY] = &cam_cc_pll2_out_early.hw, + [CAM_CC_PLL3_OUT_EVEN] = &cam_cc_pll3_out_even.hw, + [CAM_CC_PLL4_OUT_EVEN] = &cam_cc_pll4_out_even.hw, +}; + +static struct clk_regmap *cam_cc_sdmmagpie_clocks[] = { + [CAM_CC_BPS_AHB_CLK] = &cam_cc_bps_ahb_clk.clkr, + [CAM_CC_BPS_AREG_CLK] = &cam_cc_bps_areg_clk.clkr, + [CAM_CC_BPS_AXI_CLK] = &cam_cc_bps_axi_clk.clkr, + [CAM_CC_BPS_CLK] = &cam_cc_bps_clk.clkr, + [CAM_CC_BPS_CLK_SRC] = &cam_cc_bps_clk_src.clkr, + [CAM_CC_CAMNOC_AXI_CLK] = &cam_cc_camnoc_axi_clk.clkr, + [CAM_CC_CAMNOC_AXI_CLK_SRC] = &cam_cc_camnoc_axi_clk_src.clkr, + [CAM_CC_CAMNOC_DCD_XO_CLK] = &cam_cc_camnoc_dcd_xo_clk.clkr, + [CAM_CC_CCI_0_CLK] = &cam_cc_cci_0_clk.clkr, + [CAM_CC_CCI_0_CLK_SRC] = &cam_cc_cci_0_clk_src.clkr, + [CAM_CC_CCI_1_CLK] = &cam_cc_cci_1_clk.clkr, + [CAM_CC_CCI_1_CLK_SRC] = &cam_cc_cci_1_clk_src.clkr, + [CAM_CC_CORE_AHB_CLK] = &cam_cc_core_ahb_clk.clkr, + [CAM_CC_CPAS_AHB_CLK] = &cam_cc_cpas_ahb_clk.clkr, + [CAM_CC_CPHY_RX_CLK_SRC] = &cam_cc_cphy_rx_clk_src.clkr, + [CAM_CC_CSI0PHYTIMER_CLK] = &cam_cc_csi0phytimer_clk.clkr, + [CAM_CC_CSI0PHYTIMER_CLK_SRC] = &cam_cc_csi0phytimer_clk_src.clkr, + [CAM_CC_CSI1PHYTIMER_CLK] = &cam_cc_csi1phytimer_clk.clkr, + [CAM_CC_CSI1PHYTIMER_CLK_SRC] = &cam_cc_csi1phytimer_clk_src.clkr, + [CAM_CC_CSI2PHYTIMER_CLK] = &cam_cc_csi2phytimer_clk.clkr, + [CAM_CC_CSI2PHYTIMER_CLK_SRC] = &cam_cc_csi2phytimer_clk_src.clkr, + [CAM_CC_CSI3PHYTIMER_CLK] = &cam_cc_csi3phytimer_clk.clkr, + [CAM_CC_CSI3PHYTIMER_CLK_SRC] = &cam_cc_csi3phytimer_clk_src.clkr, + [CAM_CC_CSIPHY0_CLK] = &cam_cc_csiphy0_clk.clkr, + [CAM_CC_CSIPHY1_CLK] = &cam_cc_csiphy1_clk.clkr, + [CAM_CC_CSIPHY2_CLK] = &cam_cc_csiphy2_clk.clkr, + [CAM_CC_CSIPHY3_CLK] = &cam_cc_csiphy3_clk.clkr, + [CAM_CC_FAST_AHB_CLK_SRC] = &cam_cc_fast_ahb_clk_src.clkr, + [CAM_CC_FD_CORE_CLK] = &cam_cc_fd_core_clk.clkr, + [CAM_CC_FD_CORE_CLK_SRC] = &cam_cc_fd_core_clk_src.clkr, + [CAM_CC_FD_CORE_UAR_CLK] = &cam_cc_fd_core_uar_clk.clkr, + [CAM_CC_GDSC_CLK] = &cam_cc_gdsc_clk.clkr, + [CAM_CC_ICP_AHB_CLK] = &cam_cc_icp_ahb_clk.clkr, + [CAM_CC_ICP_CLK] = &cam_cc_icp_clk.clkr, + [CAM_CC_ICP_CLK_SRC] = &cam_cc_icp_clk_src.clkr, + [CAM_CC_IFE_0_AXI_CLK] = &cam_cc_ife_0_axi_clk.clkr, + [CAM_CC_IFE_0_CLK] = &cam_cc_ife_0_clk.clkr, + [CAM_CC_IFE_0_CLK_SRC] = &cam_cc_ife_0_clk_src.clkr, + [CAM_CC_IFE_0_CPHY_RX_CLK] = &cam_cc_ife_0_cphy_rx_clk.clkr, + [CAM_CC_IFE_0_CSID_CLK] = &cam_cc_ife_0_csid_clk.clkr, + [CAM_CC_IFE_0_CSID_CLK_SRC] = &cam_cc_ife_0_csid_clk_src.clkr, + [CAM_CC_IFE_0_DSP_CLK] = &cam_cc_ife_0_dsp_clk.clkr, + [CAM_CC_IFE_1_AXI_CLK] = &cam_cc_ife_1_axi_clk.clkr, + [CAM_CC_IFE_1_CLK] = &cam_cc_ife_1_clk.clkr, + [CAM_CC_IFE_1_CLK_SRC] = &cam_cc_ife_1_clk_src.clkr, + [CAM_CC_IFE_1_CPHY_RX_CLK] = &cam_cc_ife_1_cphy_rx_clk.clkr, + [CAM_CC_IFE_1_CSID_CLK] = &cam_cc_ife_1_csid_clk.clkr, + [CAM_CC_IFE_1_CSID_CLK_SRC] = &cam_cc_ife_1_csid_clk_src.clkr, + [CAM_CC_IFE_1_DSP_CLK] = &cam_cc_ife_1_dsp_clk.clkr, + [CAM_CC_IFE_LITE_CLK] = &cam_cc_ife_lite_clk.clkr, + [CAM_CC_IFE_LITE_CLK_SRC] = &cam_cc_ife_lite_clk_src.clkr, + [CAM_CC_IFE_LITE_CPHY_RX_CLK] = &cam_cc_ife_lite_cphy_rx_clk.clkr, + [CAM_CC_IFE_LITE_CSID_CLK] = &cam_cc_ife_lite_csid_clk.clkr, + [CAM_CC_IFE_LITE_CSID_CLK_SRC] = &cam_cc_ife_lite_csid_clk_src.clkr, + [CAM_CC_IPE_0_AHB_CLK] = &cam_cc_ipe_0_ahb_clk.clkr, + [CAM_CC_IPE_0_AREG_CLK] = &cam_cc_ipe_0_areg_clk.clkr, + [CAM_CC_IPE_0_AXI_CLK] = &cam_cc_ipe_0_axi_clk.clkr, + [CAM_CC_IPE_0_CLK] = &cam_cc_ipe_0_clk.clkr, + [CAM_CC_IPE_0_CLK_SRC] = &cam_cc_ipe_0_clk_src.clkr, + [CAM_CC_IPE_1_AHB_CLK] = &cam_cc_ipe_1_ahb_clk.clkr, + [CAM_CC_IPE_1_AREG_CLK] = &cam_cc_ipe_1_areg_clk.clkr, + [CAM_CC_IPE_1_AXI_CLK] = &cam_cc_ipe_1_axi_clk.clkr, + [CAM_CC_IPE_1_CLK] = &cam_cc_ipe_1_clk.clkr, + [CAM_CC_JPEG_CLK] = &cam_cc_jpeg_clk.clkr, + [CAM_CC_JPEG_CLK_SRC] = &cam_cc_jpeg_clk_src.clkr, + [CAM_CC_LRME_CLK] = &cam_cc_lrme_clk.clkr, + [CAM_CC_LRME_CLK_SRC] = &cam_cc_lrme_clk_src.clkr, + [CAM_CC_MCLK0_CLK] = &cam_cc_mclk0_clk.clkr, + [CAM_CC_MCLK0_CLK_SRC] = &cam_cc_mclk0_clk_src.clkr, + [CAM_CC_MCLK1_CLK] = &cam_cc_mclk1_clk.clkr, + [CAM_CC_MCLK1_CLK_SRC] = &cam_cc_mclk1_clk_src.clkr, + [CAM_CC_MCLK2_CLK] = &cam_cc_mclk2_clk.clkr, + [CAM_CC_MCLK2_CLK_SRC] = &cam_cc_mclk2_clk_src.clkr, + [CAM_CC_MCLK3_CLK] = &cam_cc_mclk3_clk.clkr, + [CAM_CC_MCLK3_CLK_SRC] = &cam_cc_mclk3_clk_src.clkr, + [CAM_CC_PLL0] = &cam_cc_pll0.clkr, + [CAM_CC_PLL1] = &cam_cc_pll1.clkr, + [CAM_CC_PLL2] = &cam_cc_pll2.clkr, + [CAM_CC_PLL2_OUT_AUX] = &cam_cc_pll2_out_aux.clkr, + [CAM_CC_PLL2_OUT_MAIN] = &cam_cc_pll2_out_main.clkr, + [CAM_CC_PLL3] = &cam_cc_pll3.clkr, + [CAM_CC_PLL4] = &cam_cc_pll4.clkr, + [CAM_CC_SLEEP_CLK] = &cam_cc_sleep_clk.clkr, + [CAM_CC_SLEEP_CLK_SRC] = &cam_cc_sleep_clk_src.clkr, + [CAM_CC_SLOW_AHB_CLK_SRC] = &cam_cc_slow_ahb_clk_src.clkr, + [CAM_CC_XO_CLK_SRC] = &cam_cc_xo_clk_src.clkr, +}; + +static const struct regmap_config cam_cc_sdmmagpie_regmap_config = { + .reg_bits = 32, + .reg_stride = 4, + .val_bits = 32, + .max_register = 0xd024, + .fast_io = true, +}; + +static const struct qcom_cc_desc cam_cc_sdmmagpie_desc = { + .config = &cam_cc_sdmmagpie_regmap_config, + .clks = cam_cc_sdmmagpie_clocks, + .num_clks = ARRAY_SIZE(cam_cc_sdmmagpie_clocks), + .hwclks = cam_cc_sdmmagpie_hws, + .num_hwclks = ARRAY_SIZE(cam_cc_sdmmagpie_hws), +}; + +static const struct of_device_id cam_cc_sdmmagpie_match_table[] = { + { .compatible = "qcom,camcc-sdmmagpie" }, + { } +}; +MODULE_DEVICE_TABLE(of, cam_cc_sdmmagpie_match_table); + +static int cam_cc_sdmmagpie_probe(struct platform_device *pdev) +{ + struct regmap *regmap; + int ret; + + vdd_cx.regulator[0] = devm_regulator_get(&pdev->dev, "vdd_cx"); + if (IS_ERR(vdd_cx.regulator[0])) { + if (PTR_ERR(vdd_cx.regulator[0]) != -EPROBE_DEFER) + dev_err(&pdev->dev, + "Unable to get vdd_cx regulator\n"); + return PTR_ERR(vdd_cx.regulator[0]); + } + + vdd_mx.regulator[0] = devm_regulator_get(&pdev->dev, "vdd_mx"); + if (IS_ERR(vdd_mx.regulator[0])) { + if (PTR_ERR(vdd_mx.regulator[0]) != -EPROBE_DEFER) + dev_err(&pdev->dev, + "Unable to get vdd_mx regulator\n"); + return PTR_ERR(vdd_mx.regulator[0]); + } + + regmap = qcom_cc_map(pdev, &cam_cc_sdmmagpie_desc); + if (IS_ERR(regmap)) { + pr_err("Failed to map the cam_cc registers\n"); + return PTR_ERR(regmap); + } + + clk_fabia_pll_configure(&cam_cc_pll0, regmap, &cam_cc_pll0_config); + clk_fabia_pll_configure(&cam_cc_pll1, regmap, &cam_cc_pll1_config); + clk_agera_pll_configure(&cam_cc_pll2, regmap, &cam_cc_pll2_config); + clk_fabia_pll_configure(&cam_cc_pll3, regmap, &cam_cc_pll3_config); + clk_fabia_pll_configure(&cam_cc_pll4, regmap, &cam_cc_pll3_config); + + ret = qcom_cc_really_probe(pdev, &cam_cc_sdmmagpie_desc, regmap); + if (ret) { + dev_err(&pdev->dev, "Failed to register Camera CC clocks\n"); + return ret; + } + + dev_info(&pdev->dev, "Registered Camera CC clocks\n"); + return ret; +} + +static struct platform_driver cam_cc_sdmmagpie_driver = { + .probe = cam_cc_sdmmagpie_probe, + .driver = { + .name = "cam_cc-sdmmagpie", + .of_match_table = cam_cc_sdmmagpie_match_table, + }, +}; + +static int __init cam_cc_sdmmagpie_init(void) +{ + return platform_driver_register(&cam_cc_sdmmagpie_driver); +} +subsys_initcall(cam_cc_sdmmagpie_init); + +static void __exit cam_cc_sdmmagpie_exit(void) +{ + platform_driver_unregister(&cam_cc_sdmmagpie_driver); +} +module_exit(cam_cc_sdmmagpie_exit); + +MODULE_DESCRIPTION("QTI CAM_CC sdmmagpie Driver"); +MODULE_LICENSE("GPL v2"); +MODULE_ALIAS("platform:cam_cc-sdmmagpie"); diff --git a/drivers/clk/qcom/camcc-sm6150.c b/drivers/clk/qcom/camcc-sm6150.c index c281f244a493659b9721ebfe369c03c03ee25c26..59dac4e1eb90d943c6d82e34b3fd0d6b292cdf46 100644 --- a/drivers/clk/qcom/camcc-sm6150.c +++ b/drivers/clk/qcom/camcc-sm6150.c @@ -855,6 +855,7 @@ static struct clk_rcg2 cam_cc_slow_ahb_clk_src = { .mnd_width = 0, .hid_width = 5, .parent_map = cam_cc_parent_map_0, + .enable_safe_config = true, .freq_tbl = ftbl_cam_cc_slow_ahb_clk_src, .clkr.hw.init = &(struct clk_init_data){ .name = "cam_cc_slow_ahb_clk_src", @@ -981,7 +982,7 @@ static struct clk_branch cam_cc_cci_clk = { static struct clk_branch cam_cc_core_ahb_clk = { .halt_reg = 0xb144, - .halt_check = BRANCH_HALT, + .halt_check = BRANCH_HALT_DELAY, .clkr = { .enable_reg = 0xb144, .enable_mask = BIT(0), diff --git a/drivers/clk/qcom/camcc-sm8150.c b/drivers/clk/qcom/camcc-sm8150.c index d1ef105fc75212e7b370d008b50986a1fdfc09b3..0e10133e04bcb2aec1eed3dee5e4e95e63dad9f0 100644 --- a/drivers/clk/qcom/camcc-sm8150.c +++ b/drivers/clk/qcom/camcc-sm8150.c @@ -38,7 +38,7 @@ #define F(f, s, h, m, n) { (f), (s), (2 * (h) - 1), (m), (n) } -static DEFINE_VDD_REGULATORS(vdd_mm, VDD_NUM, 1, vdd_corner); +static DEFINE_VDD_REGULATORS(vdd_mm, VDD_MM_NUM, 1, vdd_corner); static DEFINE_VDD_REGULATORS(vdd_mx, VDD_NUM, 1, vdd_corner); enum { @@ -137,7 +137,7 @@ static const struct alpha_pll_config cam_cc_pll0_config = { .config_ctl_hi1_val = 0x00000024, .test_ctl_val = 0x00000000, .test_ctl_hi_val = 0x00000002, - .test_ctl_hi1_val = 0x00000000, + .test_ctl_hi1_val = 0x00000020, .user_ctl_val = 0x00000000, .user_ctl_hi_val = 0x00000805, .user_ctl_hi1_val = 0x000000D0, @@ -149,6 +149,9 @@ static const struct alpha_pll_config cam_cc_pll0_config_sm8150_v2 = { .config_ctl_val = 0x20485699, .config_ctl_hi_val = 0x00002267, .config_ctl_hi1_val = 0x00000024, + .test_ctl_val = 0x00000000, + .test_ctl_hi_val = 0x00000000, + .test_ctl_hi1_val = 0x00000020, .user_ctl_val = 0x00000000, .user_ctl_hi_val = 0x00000805, .user_ctl_hi1_val = 0x000000D0, @@ -223,14 +226,14 @@ static struct clk_alpha_pll_postdiv cam_cc_pll0_out_odd = { }; static const struct alpha_pll_config cam_cc_pll1_config = { - .l = 0x27, - .alpha = 0x1000, + .l = 0x1F, + .alpha = 0x4000, .config_ctl_val = 0x20485699, .config_ctl_hi_val = 0x00002267, .config_ctl_hi1_val = 0x00000024, .test_ctl_val = 0x00000000, .test_ctl_hi_val = 0x00000002, - .test_ctl_hi1_val = 0x00000000, + .test_ctl_hi1_val = 0x00000020, .user_ctl_val = 0x00000000, .user_ctl_hi_val = 0x00000805, .user_ctl_hi1_val = 0x000000D0, @@ -242,6 +245,9 @@ static const struct alpha_pll_config cam_cc_pll1_config_sm8150_v2 = { .config_ctl_val = 0x20485699, .config_ctl_hi_val = 0x00002267, .config_ctl_hi1_val = 0x00000024, + .test_ctl_val = 0x00000000, + .test_ctl_hi_val = 0x00000000, + .test_ctl_hi1_val = 0x00000020, .user_ctl_val = 0x00000000, .user_ctl_hi_val = 0x00000805, .user_ctl_hi1_val = 0x000000D0, @@ -287,7 +293,7 @@ static struct clk_alpha_pll_postdiv cam_cc_pll1_out_even = { static const struct alpha_pll_config cam_cc_pll2_config = { .l = 0x32, .alpha = 0x0, - .config_ctl_val = 0x10000927, + .config_ctl_val = 0x10000807, .config_ctl_hi_val = 0x00000011, .config_ctl_hi1_val = 0x04300142, .test_ctl_val = 0x04000400, @@ -349,7 +355,7 @@ static const struct alpha_pll_config cam_cc_pll3_config = { .config_ctl_hi1_val = 0x00000024, .test_ctl_val = 0x00000000, .test_ctl_hi_val = 0x00000002, - .test_ctl_hi1_val = 0x00000000, + .test_ctl_hi1_val = 0x00000020, .user_ctl_val = 0x00000000, .user_ctl_hi_val = 0x00000805, .user_ctl_hi1_val = 0x000000D0, @@ -361,6 +367,9 @@ static const struct alpha_pll_config cam_cc_pll3_config_sm8150_v2 = { .config_ctl_val = 0x20485699, .config_ctl_hi_val = 0x00002267, .config_ctl_hi1_val = 0x00000024, + .test_ctl_val = 0x00000000, + .test_ctl_hi_val = 0x00000000, + .test_ctl_hi1_val = 0x00000020, .user_ctl_val = 0x00000000, .user_ctl_hi_val = 0x00000805, .user_ctl_hi1_val = 0x000000D0, @@ -411,7 +420,7 @@ static const struct alpha_pll_config cam_cc_pll4_config = { .config_ctl_hi1_val = 0x00000024, .test_ctl_val = 0x00000000, .test_ctl_hi_val = 0x00000002, - .test_ctl_hi1_val = 0x00000000, + .test_ctl_hi1_val = 0x00000020, .user_ctl_val = 0x00000000, .user_ctl_hi_val = 0x00000805, .user_ctl_hi1_val = 0x000000D0, @@ -423,6 +432,9 @@ static const struct alpha_pll_config cam_cc_pll4_config_sm8150_v2 = { .config_ctl_val = 0x20485699, .config_ctl_hi_val = 0x00002267, .config_ctl_hi1_val = 0x00000024, + .test_ctl_val = 0x00000000, + .test_ctl_hi_val = 0x00000000, + .test_ctl_hi1_val = 0x00000020, .user_ctl_val = 0x00000000, .user_ctl_hi_val = 0x00000805, .user_ctl_hi1_val = 0x000000D0, @@ -1029,8 +1041,8 @@ static struct clk_rcg2 cam_cc_ife_lite_1_csid_clk_src = { static const struct freq_tbl ftbl_cam_cc_ipe_0_clk_src[] = { F(19200000, P_BI_TCXO, 1, 0, 0), - F(375000000, P_CAM_CC_PLL1_OUT_EVEN, 1, 0, 0), - F(475000000, P_CAM_CC_PLL1_OUT_EVEN, 1, 0, 0), + F(300000000, P_CAM_CC_PLL1_OUT_EVEN, 1, 0, 0), + F(450000000, P_CAM_CC_PLL1_OUT_EVEN, 1, 0, 0), F(520000000, P_CAM_CC_PLL1_OUT_EVEN, 1, 0, 0), F(600000000, P_CAM_CC_PLL1_OUT_EVEN, 1, 0, 0), { } @@ -1128,8 +1140,8 @@ static struct clk_rcg2 cam_cc_lrme_clk_src = { }; static const struct freq_tbl ftbl_cam_cc_mclk0_clk_src[] = { - F(19200000, P_BI_TCXO_MX, 1, 0, 0), F(12000000, P_CAM_CC_PLL2_OUT_EARLY, 10, 1, 8), + F(19200000, P_BI_TCXO_MX, 1, 0, 0), F(24000000, P_CAM_CC_PLL2_OUT_EARLY, 10, 1, 4), F(68571429, P_CAM_CC_PLL2_OUT_EARLY, 14, 0, 0), { } diff --git a/drivers/clk/qcom/clk-alpha-pll.c b/drivers/clk/qcom/clk-alpha-pll.c index eda181fc6219d7e484f32ef089d4395069e6b34a..c0add060eda32565fca078ce102488952117666c 100644 --- a/drivers/clk/qcom/clk-alpha-pll.c +++ b/drivers/clk/qcom/clk-alpha-pll.c @@ -69,6 +69,7 @@ #define ALPHA_BITWIDTH 32 #define ALPHA_16BIT_MASK 0xffff #define ALPHA_REG_16BITWIDTH 16 +#define ALPHA_16_BIT_PLL_RATE_MARGIN 500 /* TRION PLL specific settings and offsets */ #define TRION_PLL_CAL_L_VAL 0x8 @@ -90,7 +91,6 @@ #define TRION_PLL_RUN 0x1 #define TRION_PLL_OUT_MASK 0x7 #define TRION_PCAL_DONE BIT(26) -#define TRION_PLL_RATE_MARGIN 500 #define TRION_PLL_ACK_LATCH BIT(29) #define TRION_PLL_UPDATE BIT(22) #define TRION_PLL_HW_UPDATE_LOGIC_BYPASS BIT(23) @@ -112,6 +112,7 @@ #define REGERA_PLL_OUT_MASK 0x9 /* FABIA PLL specific settings and offsets */ +#define FABIA_CAL_L_VAL 0x8 #define FABIA_USER_CTL_LO 0xc #define FABIA_USER_CTL_HI 0x10 #define FABIA_CONFIG_CTL_LO 0x14 @@ -123,11 +124,18 @@ #define FABIA_PLL_STANDBY 0x0 #define FABIA_PLL_RUN 0x1 #define FABIA_PLL_OUT_MASK 0x7 -#define FABIA_PLL_RATE_MARGIN 500 #define FABIA_PLL_ACK_LATCH BIT(29) #define FABIA_PLL_UPDATE BIT(22) #define FABIA_PLL_HW_UPDATE_LOGIC_BYPASS BIT(23) +/* AGERA PLL specific settings and offsets */ +#define AGERA_PLL_USER_CTL 0xc +#define AGERA_PLL_CONFIG_CTL 0x10 +#define AGERA_PLL_CONFIG_CTL_U 0x14 +#define AGERA_PLL_TEST_CTL 0x18 +#define AGERA_PLL_TEST_CTL_U 0x1c +#define AGERA_PLL_POST_DIV_MASK 0x3 + #define to_clk_alpha_pll(_hw) container_of(to_clk_regmap(_hw), \ struct clk_alpha_pll, clkr) @@ -444,7 +452,7 @@ static unsigned long alpha_pll_calc_rate(const struct clk_alpha_pll *pll, int alpha_bw = ALPHA_BITWIDTH; if (pll->type == TRION_PLL || pll->type == REGERA_PLL - || pll->type == FABIA_PLL) + || pll->type == FABIA_PLL || pll->type == AGERA_PLL) alpha_bw = ALPHA_REG_16BITWIDTH; return (prate * l) + ((prate * a) >> alpha_bw); @@ -481,7 +489,7 @@ alpha_pll_round_rate(const struct clk_alpha_pll *pll, unsigned long rate, * the fractional divider. */ if (pll->type == TRION_PLL || pll->type == REGERA_PLL - || pll->type == FABIA_PLL) + || pll->type == FABIA_PLL || pll->type == AGERA_PLL) alpha_bw = ALPHA_REG_16BITWIDTH; /* Upper ALPHA_BITWIDTH bits of Alpha */ @@ -666,7 +674,8 @@ static long clk_alpha_pll_round_rate(struct clk_hw *hw, unsigned long rate, rate = alpha_pll_round_rate(pll, rate, *prate, &l, &a); if ((pll->type == ALPHA_PLL && alpha_pll_find_vco(pll, rate)) || - (pll->type == FABIA_PLL || alpha_pll_find_vco(pll, rate))) + (pll->type == FABIA_PLL || alpha_pll_find_vco(pll, rate)) + || pll->type == AGERA_PLL) return rate; min_freq = pll->vco_table[0].min_freq; @@ -972,7 +981,7 @@ static int clk_trion_pll_set_rate(struct clk_hw *hw, unsigned long rate, * Due to a limited number of bits for fractional rate programming, the * rounded up rate could be marginally higher than the requested rate. */ - if (rrate > (rate + TRION_PLL_RATE_MARGIN) || rrate < rate) { + if (rrate > (rate + ALPHA_16_BIT_PLL_RATE_MARGIN) || rrate < rate) { pr_err("Call set rate on the PLL with rounded rates!\n"); return -EINVAL; } @@ -1223,7 +1232,7 @@ static int clk_regera_pll_set_rate(struct clk_hw *hw, unsigned long rate, * Due to a limited number of bits for fractional rate programming, the * rounded up rate could be marginally higher than the requested rate. */ - if (rrate > (rate + TRION_PLL_RATE_MARGIN) || rrate < rate) { + if (rrate > (rate + ALPHA_16_BIT_PLL_RATE_MARGIN) || rrate < rate) { pr_err("Requested rate (%lu) not matching the PLL's supported frequency (%lu)\n", rate, rrate); return -EINVAL; @@ -1350,12 +1359,17 @@ static unsigned long clk_alpha_pll_postdiv_recalc_rate(struct clk_hw *hw, unsigned long parent_rate) { struct clk_alpha_pll_postdiv *pll = to_clk_alpha_pll_postdiv(hw); - u32 ctl; + u32 ctl, user_ctl = PLL_USER_CTL, post_div_mask = PLL_POST_DIV_MASK; - regmap_read(pll->clkr.regmap, pll->offset + PLL_USER_CTL, &ctl); + if (pll->type == AGERA_PLL) { + user_ctl = AGERA_PLL_USER_CTL; + post_div_mask = AGERA_PLL_POST_DIV_MASK; + } + + regmap_read(pll->clkr.regmap, pll->offset + user_ctl, &ctl); ctl >>= PLL_POST_DIV_SHIFT; - ctl &= PLL_POST_DIV_MASK; + ctl &= post_div_mask; return parent_rate >> fls(ctl); } @@ -1384,12 +1398,18 @@ static int clk_alpha_pll_postdiv_set_rate(struct clk_hw *hw, unsigned long rate, { struct clk_alpha_pll_postdiv *pll = to_clk_alpha_pll_postdiv(hw); int div; + u32 user_ctl = PLL_USER_CTL, post_div_mask = PLL_POST_DIV_MASK; + + if (pll->type == AGERA_PLL) { + user_ctl = AGERA_PLL_USER_CTL; + post_div_mask = AGERA_PLL_POST_DIV_MASK; + } /* 16 -> 0xf, 8 -> 0x7, 4 -> 0x3, 2 -> 0x1, 1 -> 0x0 */ div = DIV_ROUND_UP_ULL((u64)parent_rate, rate) - 1; - return regmap_update_bits(pll->clkr.regmap, pll->offset + PLL_USER_CTL, - PLL_POST_DIV_MASK << PLL_POST_DIV_SHIFT, + return regmap_update_bits(pll->clkr.regmap, pll->offset + user_ctl, + post_div_mask << PLL_POST_DIV_SHIFT, div << PLL_POST_DIV_SHIFT); } @@ -1875,6 +1895,70 @@ static void clk_fabia_pll_disable(struct clk_hw *hw) regmap_write(pll->clkr.regmap, off + FABIA_OPMODE, FABIA_PLL_STANDBY); } +/* + * Fabia PLL requires power-on self calibration which happen when the PLL comes + * out of reset. Calibration frequency is calculated by below relation: + * + * calibration freq = ((pll_l_valmax + pll_l_valmin) * 0.54) + */ +static int clk_fabia_pll_prepare(struct clk_hw *hw) +{ + unsigned long calibration_freq, freq_hz; + struct clk_alpha_pll *pll = to_clk_alpha_pll(hw); + const struct pll_vco *vco; + struct clk_hw *parent; + u64 a; + u32 cal_l, regval, off = pll->offset; + int ret; + + /* Check if calibration needs to be done i.e. PLL is in reset */ + ret = regmap_read(pll->clkr.regmap, off + PLL_MODE, ®val); + if (ret) + return ret; + + /* Return early if calibration is not needed. */ + if (regval & PLL_RESET_N) + return 0; + + vco = alpha_pll_find_vco(pll, clk_hw_get_rate(hw)); + if (!vco) { + pr_err("alpha pll: not in a valid vco range\n"); + return -EINVAL; + } + + calibration_freq = ((pll->vco_table[0].min_freq + + pll->vco_table[0].max_freq) * 54)/100; + + parent = clk_hw_get_parent(hw); + if (!parent) + return -EINVAL; + + freq_hz = alpha_pll_round_rate(pll, calibration_freq, + clk_hw_get_rate(parent), &cal_l, &a); + /* + * Due to a limited number of bits for fractional rate programming, the + * rounded up rate could be marginally higher than the requested rate. + */ + if (freq_hz > (calibration_freq + ALPHA_16_BIT_PLL_RATE_MARGIN) || + freq_hz < calibration_freq) { + pr_err("fabia_pll: Call set rate with rounded rates!\n"); + return -EINVAL; + } + + /* Setup PLL for calibration frequency */ + regmap_write(pll->clkr.regmap, pll->offset + FABIA_CAL_L_VAL, cal_l); + + /* Bringup the pll at calibration frequency */ + ret = clk_fabia_pll_enable(hw); + if (ret) { + pr_err("alpha pll calibration failed\n"); + return ret; + } + + clk_fabia_pll_disable(hw); + return 0; +} + static unsigned long clk_fabia_pll_recalc_rate(struct clk_hw *hw, unsigned long parent_rate) { @@ -1907,7 +1991,7 @@ static int clk_fabia_pll_set_rate(struct clk_hw *hw, unsigned long rate, * Due to limited number of bits for fractional rate programming, the * rounded up rate could be marginally higher than the requested rate. */ - if (rrate > (rate + FABIA_PLL_RATE_MARGIN) || rrate < rate) { + if (rrate > (rate + ALPHA_16_BIT_PLL_RATE_MARGIN) || rrate < rate) { pr_err("Call set rate on the PLL with rounded rates!\n"); return -EINVAL; } @@ -1956,6 +2040,7 @@ static void clk_fabia_pll_list_registers(struct seq_file *f, struct clk_hw *hw) } const struct clk_ops clk_fabia_pll_ops = { + .prepare = clk_fabia_pll_prepare, .enable = clk_fabia_pll_enable, .disable = clk_fabia_pll_disable, .recalc_rate = clk_fabia_pll_recalc_rate, @@ -2054,3 +2139,140 @@ const struct clk_ops clk_generic_pll_postdiv_ops = { .set_rate = clk_generic_pll_postdiv_set_rate, }; EXPORT_SYMBOL(clk_generic_pll_postdiv_ops); + +void clk_agera_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap, + const struct alpha_pll_config *config) +{ + u32 val, mask; + + if (config->l) + regmap_write(regmap, pll->offset + PLL_L_VAL, + config->l); + + if (config->alpha) + regmap_write(regmap, pll->offset + PLL_ALPHA_VAL, + config->alpha); + if (config->post_div_mask) { + mask = config->post_div_mask; + val = config->post_div_val; + regmap_update_bits(regmap, pll->offset + AGERA_PLL_USER_CTL, + mask, val); + } + + if (config->main_output_mask || config->aux_output_mask || + config->aux2_output_mask || config->early_output_mask) { + + val = config->main_output_mask; + val |= config->aux_output_mask; + val |= config->aux2_output_mask; + val |= config->early_output_mask; + + mask = config->main_output_mask; + mask |= config->aux_output_mask; + mask |= config->aux2_output_mask; + mask |= config->early_output_mask; + + regmap_update_bits(regmap, pll->offset + AGERA_PLL_USER_CTL, + mask, val); + } + + if (config->config_ctl_val) + regmap_write(regmap, pll->offset + AGERA_PLL_CONFIG_CTL, + config->config_ctl_val); + + if (config->config_ctl_hi_val) + regmap_write(regmap, pll->offset + AGERA_PLL_CONFIG_CTL_U, + config->config_ctl_hi_val); + + if (config->test_ctl_val) + regmap_write(regmap, pll->offset + AGERA_PLL_TEST_CTL, + config->test_ctl_val); + + if (config->test_ctl_hi_val) + regmap_write(regmap, pll->offset + AGERA_PLL_TEST_CTL_U, + config->test_ctl_hi_val); +} + +static unsigned long +clk_agera_pll_recalc_rate(struct clk_hw *hw, unsigned long parent_rate) +{ + u32 l, a; + u64 prate = parent_rate; + struct clk_alpha_pll *pll = to_clk_alpha_pll(hw); + u32 off = pll->offset; + + regmap_read(pll->clkr.regmap, off + PLL_L_VAL, &l); + regmap_read(pll->clkr.regmap, off + PLL_ALPHA_VAL, &a); + + return alpha_pll_calc_rate(pll, prate, l, a); +} + +static int clk_agera_pll_set_rate(struct clk_hw *hw, unsigned long rate, + unsigned long prate) +{ + struct clk_alpha_pll *pll = to_clk_alpha_pll(hw); + unsigned long rrate; + int ret; + u32 l, off = pll->offset; + u64 a; + + rrate = alpha_pll_round_rate(pll, rate, prate, &l, &a); + /* + * Due to limited number of bits for fractional rate programming, the + * rounded up rate could be marginally higher than the requested rate. + */ + if (rrate > (rate + ALPHA_16_BIT_PLL_RATE_MARGIN) || rrate < rate) { + pr_err("Call set rate on the PLL with rounded rates!\n"); + return -EINVAL; + } + + /* change L_VAL without having to go through the power on sequence */ + regmap_write(pll->clkr.regmap, off + PLL_L_VAL, l); + regmap_write(pll->clkr.regmap, off + PLL_ALPHA_VAL, a); + + /* Ensure that the write above goes through before proceeding. */ + mb(); + + if (clk_hw_is_enabled(hw)) { + ret = wait_for_pll_enable_lock(pll); + if (ret) { + pr_err("Failed to lock after L_VAL update\n"); + return ret; + } + } + + return 0; +} + +static void clk_agera_pll_list_registers(struct seq_file *f, struct clk_hw *hw) +{ + static struct clk_register_data pll_regs[] = { + {"PLL_MODE", 0x0}, + {"PLL_L_VAL", 0x4}, + {"PLL_ALPHA_VAL", 0x8}, + {"PLL_USER_CTL", 0xc}, + {"PLL_CONFIG_CTL", 0x10}, + {"PLL_CONFIG_CTL_U", 0x14}, + {"PLL_TEST_CTL", 0x18}, + {"PLL_TEST_CTL_U", 0x1c}, + {"PLL_STATUS", 0x2c}, + }; + + static struct clk_register_data pll_vote_reg = { + "APSS_PLL_VOTE", 0x0 + }; + + print_pll_registers(f, hw, pll_regs, ARRAY_SIZE(pll_regs), + &pll_vote_reg); +} + +const struct clk_ops clk_agera_pll_ops = { + .enable = clk_alpha_pll_enable, + .disable = clk_alpha_pll_disable, + .is_enabled = clk_alpha_pll_is_enabled, + .recalc_rate = clk_agera_pll_recalc_rate, + .round_rate = clk_alpha_pll_round_rate, + .set_rate = clk_agera_pll_set_rate, + .list_registers = clk_agera_pll_list_registers, +}; +EXPORT_SYMBOL(clk_agera_pll_ops); diff --git a/drivers/clk/qcom/clk-alpha-pll.h b/drivers/clk/qcom/clk-alpha-pll.h index 9a6b0ba768c21b7f5321e99f388bae9dcba536c7..5157742c173682750e4c25ae10cdb0e3e13c2143 100644 --- a/drivers/clk/qcom/clk-alpha-pll.h +++ b/drivers/clk/qcom/clk-alpha-pll.h @@ -33,6 +33,7 @@ enum pll_type { TRION_PLL, REGERA_PLL, FABIA_PLL, + AGERA_PLL, }; /** @@ -97,6 +98,7 @@ struct clk_alpha_pll_postdiv { const struct clk_div_table *post_div_table; size_t num_post_div; struct clk_regmap clkr; + enum pll_type type; }; struct alpha_pll_config { @@ -140,6 +142,7 @@ extern const struct clk_ops clk_pll_sleep_vote_ops; extern const struct clk_ops clk_fabia_pll_ops; extern const struct clk_ops clk_fabia_fixed_pll_ops; extern const struct clk_ops clk_generic_pll_postdiv_ops; +extern const struct clk_ops clk_agera_pll_ops; void clk_alpha_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap, const struct alpha_pll_config *config); @@ -149,5 +152,7 @@ int clk_regera_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap, const struct alpha_pll_config *config); void clk_fabia_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap, const struct alpha_pll_config *config); +void clk_agera_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap, + const struct alpha_pll_config *config); #endif diff --git a/drivers/clk/qcom/clk-aop-qmp.c b/drivers/clk/qcom/clk-aop-qmp.c index a5a18082066780dfbb4a4339db61557e6013493e..d8b54b841d3819aa5164a75429258fd994eca3c2 100644 --- a/drivers/clk/qcom/clk-aop-qmp.c +++ b/drivers/clk/qcom/clk-aop-qmp.c @@ -157,7 +157,8 @@ static int clk_aop_qmp_prepare(struct clk_hw *hw) ret = mbox_send_message(clk->mbox, &pkt); if (ret < 0) { pr_err("Failed to send clk prepare request for %s, ret %d\n", - clk_hw_get_name(hw), ret); + hw->core ? clk_hw_get_name(hw) : hw->init->name, + ret); goto err; } diff --git a/drivers/clk/qcom/clk-cpu-osm.c b/drivers/clk/qcom/clk-cpu-osm.c index 325be5ed1a049d8439ec5b27812464e8f95c15c5..475ef6b0c12ea03174c6febf712ed81bb784d3d3 100644 --- a/drivers/clk/qcom/clk-cpu-osm.c +++ b/drivers/clk/qcom/clk-cpu-osm.c @@ -83,6 +83,7 @@ struct clk_osm { static bool is_sdmshrike; static bool is_sm6150; +static bool is_sdmmagpie; static inline struct clk_osm *to_clk_osm(struct clk_hw *_hw) { @@ -1007,7 +1008,7 @@ static int clk_osm_resources_init(struct platform_device *pdev) return -ENOMEM; } - if (is_sdmshrike || is_sm6150) + if (is_sdmshrike || is_sm6150 || is_sdmmagpie) return 0; res = platform_get_resource_byname(pdev, IORESOURCE_MEM, @@ -1068,6 +1069,9 @@ static int clk_cpu_osm_driver_probe(struct platform_device *pdev) .get_cpu_cycle_counter = clk_osm_get_cpu_cycle_counter, }; + is_sdmmagpie = of_device_is_compatible(pdev->dev.of_node, + "qcom,clk-cpu-osm-sdmmagpie"); + is_sm6150 = of_device_is_compatible(pdev->dev.of_node, "qcom,clk-cpu-osm-sm6150"); @@ -1075,7 +1079,7 @@ static int clk_cpu_osm_driver_probe(struct platform_device *pdev) "qcom,clk-cpu-osm-sdmshrike"); if (is_sdmshrike) clk_cpu_osm_driver_sdmshrike_fixup(); - else if (is_sm6150) + else if (is_sm6150 || is_sdmmagpie) clk_cpu_osm_driver_sm6150_fixup(); clk_data = devm_kzalloc(&pdev->dev, sizeof(struct clk_onecell_data), @@ -1128,7 +1132,7 @@ static int clk_cpu_osm_driver_probe(struct platform_device *pdev) return rc; } - if (!is_sdmshrike && !is_sm6150) { + if (!is_sdmshrike && !is_sm6150 && !is_sdmmagpie) { rc = clk_osm_read_lut(pdev, &perfpcl_clk); if (rc) { dev_err(&pdev->dev, "Unable to read OSM LUT for perf plus cluster, rc=%d\n", @@ -1201,6 +1205,7 @@ static int clk_cpu_osm_driver_probe(struct platform_device *pdev) static const struct of_device_id match_table[] = { { .compatible = "qcom,clk-cpu-osm" }, { .compatible = "qcom,clk-cpu-osm-sm6150" }, + { .compatible = "qcom,clk-cpu-osm-sdmmagpie" }, { .compatible = "qcom,clk-cpu-osm-sdmshrike" }, {} }; diff --git a/drivers/clk/qcom/clk-cpu-qcs405.c b/drivers/clk/qcom/clk-cpu-qcs405.c index dc2e03bac40cb8b8683a4bab2d65453e90557b01..331f3e195e3cb3ec91587eff31830f15bbe0c05e 100644 --- a/drivers/clk/qcom/clk-cpu-qcs405.c +++ b/drivers/clk/qcom/clk-cpu-qcs405.c @@ -267,8 +267,7 @@ static struct clk_pll apcs_cpu_pll = { .ops = &clk_pll_hf_ops, .vdd_class = &vdd_hf_pll, .rate_max = (unsigned long[VDD_HF_PLL_NUM]) { - [VDD_HF_PLL_SVS] = 1000000000, - [VDD_HF_PLL_NOM] = 2000000000, + [VDD_HF_PLL_SVS] = 2000000000, }, .num_rate_max = VDD_HF_PLL_NUM, }, diff --git a/drivers/clk/qcom/clk-debug.c b/drivers/clk/qcom/clk-debug.c index 76982b79a804bd35c84562cb8b9b15059b72d95b..2250099af90526c073ae9fbc4562e5f2a79f7b59 100644 --- a/drivers/clk/qcom/clk-debug.c +++ b/drivers/clk/qcom/clk-debug.c @@ -19,6 +19,7 @@ #include #include #include +#include #include "clk-regmap.h" #include "clk-debug.h" @@ -253,6 +254,13 @@ static int clk_debug_measure_get(void *data, u64 *val) mutex_lock(&clk_debug_lock); + /* + * Vote for bandwidth to re-connect config ports + * to multimedia clock controllers. + */ + if (meas->bus_cl_id) + msm_bus_scale_client_update_request(meas->bus_cl_id, 1); + ret = clk_set_parent(measure->clk, hw->clk); if (!ret) { par = measure; @@ -290,6 +298,8 @@ static int clk_debug_measure_get(void *data, u64 *val) exit1: disable_debug_clks(meas, index); exit: + if (meas->bus_cl_id) + msm_bus_scale_client_update_request(meas->bus_cl_id, 0); mutex_unlock(&clk_debug_lock); return ret; } @@ -345,11 +355,13 @@ int clk_debug_measure_add(struct clk_hw *hw, struct dentry *dentry) } meas = to_clk_measure(measure); + if (meas->bus_cl_id) + msm_bus_scale_client_update_request(meas->bus_cl_id, 1); ret = clk_set_parent(measure->clk, hw->clk); if (ret) { pr_debug("Unable to set %s as %s's parent, ret=%d\n", clk_hw_get_name(hw), clk_hw_get_name(measure), ret); - return 0; + goto err; } index = clk_debug_mux_get_parent(measure); @@ -359,6 +371,9 @@ int clk_debug_measure_add(struct clk_hw *hw, struct dentry *dentry) else debugfs_create_file("clk_measure", 0444, dentry, hw, &clk_measure_fops); +err: + if (meas->bus_cl_id) + msm_bus_scale_client_update_request(meas->bus_cl_id, 0); return 0; } EXPORT_SYMBOL(clk_debug_measure_add); diff --git a/drivers/clk/qcom/clk-debug.h b/drivers/clk/qcom/clk-debug.h index ba25ee6118199f9fa6632fcc1a093177bc8a4ccd..d423d23997c227bbd784199a5b89622395c3c4d5 100644 --- a/drivers/clk/qcom/clk-debug.h +++ b/drivers/clk/qcom/clk-debug.h @@ -130,6 +130,7 @@ struct clk_debug_mux { u32 post_div_mask; u32 post_div_shift; u32 period_offset; + u32 bus_cl_id; struct clk_hw hw; }; diff --git a/drivers/clk/qcom/debugcc-sdmmagpie.c b/drivers/clk/qcom/debugcc-sdmmagpie.c new file mode 100644 index 0000000000000000000000000000000000000000..3efe77f77c73d6bd7dcdaf647e927189f6fe0327 --- /dev/null +++ b/drivers/clk/qcom/debugcc-sdmmagpie.c @@ -0,0 +1,860 @@ +/* + * Copyright (c) 2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#define pr_fmt(fmt) "clk: %s: " fmt, __func__ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "clk-debug.h" + +static struct measure_clk_data debug_mux_priv = { + .ctl_reg = 0x62024, + .status_reg = 0x62028, + .xo_div4_cbcr = 0x43008, +}; + +static const char *const debug_mux_parent_names[] = { + "cam_cc_bps_ahb_clk", + "cam_cc_bps_areg_clk", + "cam_cc_bps_axi_clk", + "cam_cc_bps_clk", + "cam_cc_camnoc_axi_clk", + "cam_cc_camnoc_dcd_xo_clk", + "cam_cc_cci_0_clk", + "cam_cc_cci_1_clk", + "cam_cc_core_ahb_clk", + "cam_cc_cpas_ahb_clk", + "cam_cc_csi0phytimer_clk", + "cam_cc_csi1phytimer_clk", + "cam_cc_csi2phytimer_clk", + "cam_cc_csi3phytimer_clk", + "cam_cc_csiphy0_clk", + "cam_cc_csiphy1_clk", + "cam_cc_csiphy2_clk", + "cam_cc_csiphy3_clk", + "cam_cc_fd_core_clk", + "cam_cc_fd_core_uar_clk", + "cam_cc_gdsc_clk", + "cam_cc_icp_ahb_clk", + "cam_cc_icp_clk", + "cam_cc_ife_0_axi_clk", + "cam_cc_ife_0_clk", + "cam_cc_ife_0_cphy_rx_clk", + "cam_cc_ife_0_csid_clk", + "cam_cc_ife_0_dsp_clk", + "cam_cc_ife_1_axi_clk", + "cam_cc_ife_1_clk", + "cam_cc_ife_1_cphy_rx_clk", + "cam_cc_ife_1_csid_clk", + "cam_cc_ife_1_dsp_clk", + "cam_cc_ife_lite_clk", + "cam_cc_ife_lite_cphy_rx_clk", + "cam_cc_ife_lite_csid_clk", + "cam_cc_ipe_0_ahb_clk", + "cam_cc_ipe_0_areg_clk", + "cam_cc_ipe_0_axi_clk", + "cam_cc_ipe_0_clk", + "cam_cc_ipe_1_ahb_clk", + "cam_cc_ipe_1_areg_clk", + "cam_cc_ipe_1_axi_clk", + "cam_cc_ipe_1_clk", + "cam_cc_jpeg_clk", + "cam_cc_lrme_clk", + "cam_cc_mclk0_clk", + "cam_cc_mclk1_clk", + "cam_cc_mclk2_clk", + "cam_cc_mclk3_clk", + "cam_cc_sleep_clk", + "disp_cc_mdss_ahb_clk", + "disp_cc_mdss_byte0_clk", + "disp_cc_mdss_byte0_intf_clk", + "disp_cc_mdss_byte1_clk", + "disp_cc_mdss_byte1_intf_clk", + "disp_cc_mdss_dp_aux_clk", + "disp_cc_mdss_dp_crypto_clk", + "disp_cc_mdss_dp_link_clk", + "disp_cc_mdss_dp_link_intf_clk", + "disp_cc_mdss_dp_pixel1_clk", + "disp_cc_mdss_dp_pixel_clk", + "disp_cc_mdss_esc0_clk", + "disp_cc_mdss_esc1_clk", + "disp_cc_mdss_mdp_clk", + "disp_cc_mdss_mdp_lut_clk", + "disp_cc_mdss_non_gdsc_ahb_clk", + "disp_cc_mdss_pclk0_clk", + "disp_cc_mdss_pclk1_clk", + "disp_cc_mdss_rot_clk", + "disp_cc_mdss_rscc_ahb_clk", + "disp_cc_mdss_rscc_vsync_clk", + "disp_cc_mdss_vsync_clk", + "disp_cc_xo_clk", + "gcc_aggre_noc_pcie_tbu_clk", + "gcc_aggre_ufs_phy_axi_clk", + "gcc_aggre_usb3_prim_axi_clk", + "gcc_apc_vs_clk", + "gcc_boot_rom_ahb_clk", + "gcc_camera_ahb_clk", + "gcc_camera_hf_axi_clk", + "gcc_camera_sf_axi_clk", + "gcc_camera_xo_clk", + "gcc_ce1_ahb_clk", + "gcc_ce1_axi_clk", + "gcc_ce1_clk", + "gcc_cfg_noc_usb3_prim_axi_clk", + "gcc_cpuss_ahb_clk", + "gcc_cpuss_gnoc_clk", + "gcc_cpuss_rbcpr_clk", + "gcc_ddrss_gpu_axi_clk", + "gcc_disp_ahb_clk", + "gcc_disp_gpll0_clk_src", + "gcc_disp_gpll0_div_clk_src", + "gcc_disp_hf_axi_clk", + "gcc_disp_sf_axi_clk", + "gcc_disp_xo_clk", + "gcc_gp1_clk", + "gcc_gp2_clk", + "gcc_gp3_clk", + "gcc_gpu_cfg_ahb_clk", + "gcc_gpu_gpll0_clk_src", + "gcc_gpu_gpll0_div_clk_src", + "gcc_gpu_memnoc_gfx_clk", + "gcc_gpu_snoc_dvm_gfx_clk", + "gcc_gpu_vs_clk", + "gcc_npu_axi_clk", + "gcc_npu_cfg_ahb_clk", + "gcc_npu_gpll0_clk_src", + "gcc_npu_gpll0_div_clk_src", + "gcc_pcie_0_aux_clk", + "gcc_pcie_0_cfg_ahb_clk", + "gcc_pcie_0_mstr_axi_clk", + "gcc_pcie_0_pipe_clk", + "gcc_pcie_0_slv_axi_clk", + "gcc_pcie_0_slv_q2a_axi_clk", + "gcc_pcie_phy_aux_clk", + "gcc_pcie_phy_refgen_clk", + "gcc_pdm2_clk", + "gcc_pdm_ahb_clk", + "gcc_pdm_xo4_clk", + "gcc_prng_ahb_clk", + "gcc_qupv3_wrap0_core_2x_clk", + "gcc_qupv3_wrap0_core_clk", + "gcc_qupv3_wrap0_s0_clk", + "gcc_qupv3_wrap0_s1_clk", + "gcc_qupv3_wrap0_s2_clk", + "gcc_qupv3_wrap0_s3_clk", + "gcc_qupv3_wrap0_s4_clk", + "gcc_qupv3_wrap0_s5_clk", + "gcc_qupv3_wrap0_s6_clk", + "gcc_qupv3_wrap0_s7_clk", + "gcc_qupv3_wrap1_core_2x_clk", + "gcc_qupv3_wrap1_core_clk", + "gcc_qupv3_wrap1_s0_clk", + "gcc_qupv3_wrap1_s1_clk", + "gcc_qupv3_wrap1_s2_clk", + "gcc_qupv3_wrap1_s3_clk", + "gcc_qupv3_wrap1_s4_clk", + "gcc_qupv3_wrap1_s5_clk", + "gcc_qupv3_wrap1_s6_clk", + "gcc_qupv3_wrap1_s7_clk", + "gcc_qupv3_wrap_0_m_ahb_clk", + "gcc_qupv3_wrap_0_s_ahb_clk", + "gcc_qupv3_wrap_1_m_ahb_clk", + "gcc_qupv3_wrap_1_s_ahb_clk", + "gcc_sdcc1_ahb_clk", + "gcc_sdcc1_apps_clk", + "gcc_sdcc1_ice_core_clk", + "gcc_sdcc2_ahb_clk", + "gcc_sdcc2_apps_clk", + "gcc_sdcc4_ahb_clk", + "gcc_sdcc4_apps_clk", + "gcc_sys_noc_cpuss_ahb_clk", + "gcc_tsif_ahb_clk", + "gcc_tsif_inactivity_timers_clk", + "gcc_tsif_ref_clk", + "gcc_ufs_phy_ahb_clk", + "gcc_ufs_phy_axi_clk", + "gcc_ufs_phy_ice_core_clk", + "gcc_ufs_phy_phy_aux_clk", + "gcc_ufs_phy_rx_symbol_0_clk", + "gcc_ufs_phy_tx_symbol_0_clk", + "gcc_ufs_phy_unipro_core_clk", + "gcc_usb30_prim_master_clk", + "gcc_usb30_prim_mock_utmi_clk", + "gcc_usb30_prim_sleep_clk", + "gcc_usb3_prim_phy_aux_clk", + "gcc_usb3_prim_phy_com_aux_clk", + "gcc_usb3_prim_phy_pipe_clk", + "gcc_usb_phy_cfg_ahb2phy_clk", + "gcc_vdda_vs_clk", + "gcc_vddcx_vs_clk", + "gcc_vddmx_vs_clk", + "gcc_video_ahb_clk", + "gcc_video_axi_clk", + "gcc_video_xo_clk", + "gcc_vs_ctrl_ahb_clk", + "gcc_vs_ctrl_clk", + "measure_only_mccc_clk", + "measure_only_cnoc_clk", + "measure_only_ipa_2x_clk", + "measure_only_snoc_clk", + "gpu_cc_acd_ahb_clk", + "gpu_cc_acd_cxo_clk", + "gpu_cc_ahb_clk", + "gpu_cc_crc_ahb_clk", + "gpu_cc_cx_apb_clk", + "gpu_cc_cx_gfx3d_clk", + "gpu_cc_cx_gfx3d_slv_clk", + "gpu_cc_cx_gmu_clk", + "gpu_cc_cx_snoc_dvm_clk", + "gpu_cc_cxo_aon_clk", + "gpu_cc_cxo_clk", + "gpu_cc_gx_cxo_clk", + "gpu_cc_gx_gfx3d_clk", + "gpu_cc_gx_gmu_clk", + "gpu_cc_gx_vsense_clk", + "npu_cc_armwic_core_clk", + "npu_cc_bto_core_clk", + "npu_cc_bwmon_clk", + "npu_cc_cal_dp_cdc_clk", + "npu_cc_cal_dp_clk", + "npu_cc_comp_noc_axi_clk", + "npu_cc_conf_noc_ahb_clk", + "npu_cc_npu_core_apb_clk", + "npu_cc_npu_core_atb_clk", + "npu_cc_npu_core_clk", + "npu_cc_npu_core_cti_clk", + "npu_cc_npu_cpc_clk", + "npu_cc_npu_cpc_timer_clk", + "npu_cc_perf_cnt_clk", + "npu_cc_qtimer_core_clk", + "npu_cc_sleep_clk", + "npu_cc_xo_clk", + "video_cc_apb_clk", + "video_cc_at_clk", + "video_cc_iris_ahb_clk", + "video_cc_mvs0_axi_clk", + "video_cc_mvs0_core_clk", + "video_cc_mvs1_axi_clk", + "video_cc_mvs1_core_clk", + "video_cc_mvsc_core_clk", + "video_cc_mvsc_ctl_axi_clk", + "video_cc_venus_ahb_clk", + "video_cc_xo_clk", + "l3_clk", + "pwrcl_clk", + "perfcl_clk", +}; + +static struct clk_debug_mux gcc_debug_mux = { + .priv = &debug_mux_priv, + .debug_offset = 0x62008, + .post_div_offset = 0x62000, + .cbcr_offset = 0x62004, + .src_sel_mask = 0x3FF, + .src_sel_shift = 0, + .post_div_mask = 0xF, + .post_div_shift = 0, + MUX_SRC_LIST( + { "cam_cc_bps_ahb_clk", 0x46, 4, CAM_CC, + 0xE, 0xFF, 0, 0xF, 0, 4, 0xD000, 0xD004, 0xD008 }, + { "cam_cc_bps_areg_clk", 0x46, 4, CAM_CC, + 0xD, 0xFF, 0, 0xF, 0, 4, 0xD000, 0xD004, 0xD008 }, + { "cam_cc_bps_axi_clk", 0x46, 4, CAM_CC, + 0xC, 0xFF, 0, 0xF, 0, 4, 0xD000, 0xD004, 0xD008 }, + { "cam_cc_bps_clk", 0x46, 4, CAM_CC, + 0xB, 0xFF, 0, 0xF, 0, 4, 0xD000, 0xD004, 0xD008 }, + { "cam_cc_camnoc_axi_clk", 0x46, 4, CAM_CC, + 0x27, 0xFF, 0, 0xF, 0, 4, 0xD000, 0xD004, 0xD008 }, + { "cam_cc_camnoc_dcd_xo_clk", 0x46, 4, CAM_CC, + 0x33, 0xFF, 0, 0xF, 0, 4, 0xD000, 0xD004, 0xD008 }, + { "cam_cc_cci_0_clk", 0x46, 4, CAM_CC, + 0x2A, 0xFF, 0, 0xF, 0, 4, 0xD000, 0xD004, 0xD008 }, + { "cam_cc_cci_1_clk", 0x46, 4, CAM_CC, + 0x3B, 0xFF, 0, 0xF, 0, 4, 0xD000, 0xD004, 0xD008 }, + { "cam_cc_core_ahb_clk", 0x46, 4, CAM_CC, + 0x2E, 0xFF, 0, 0xF, 0, 4, 0xD000, 0xD004, 0xD008 }, + { "cam_cc_cpas_ahb_clk", 0x46, 4, CAM_CC, + 0x2C, 0xFF, 0, 0xF, 0, 4, 0xD000, 0xD004, 0xD008 }, + { "cam_cc_csi0phytimer_clk", 0x46, 4, CAM_CC, + 0x5, 0xFF, 0, 0xF, 0, 4, 0xD000, 0xD004, 0xD008 }, + { "cam_cc_csi1phytimer_clk", 0x46, 4, CAM_CC, + 0x7, 0xFF, 0, 0xF, 0, 4, 0xD000, 0xD004, 0xD008 }, + { "cam_cc_csi2phytimer_clk", 0x46, 4, CAM_CC, + 0x9, 0xFF, 0, 0xF, 0, 4, 0xD000, 0xD004, 0xD008 }, + { "cam_cc_csi3phytimer_clk", 0x46, 4, CAM_CC, + 0x35, 0xFF, 0, 0xF, 0, 4, 0xD000, 0xD004, 0xD008 }, + { "cam_cc_csiphy0_clk", 0x46, 4, CAM_CC, + 0x6, 0xFF, 0, 0xF, 0, 4, 0xD000, 0xD004, 0xD008 }, + { "cam_cc_csiphy1_clk", 0x46, 4, CAM_CC, + 0x8, 0xFF, 0, 0xF, 0, 4, 0xD000, 0xD004, 0xD008 }, + { "cam_cc_csiphy2_clk", 0x46, 4, CAM_CC, + 0xA, 0xFF, 0, 0xF, 0, 4, 0xD000, 0xD004, 0xD008 }, + { "cam_cc_csiphy3_clk", 0x46, 4, CAM_CC, + 0x36, 0xFF, 0, 0xF, 0, 4, 0xD000, 0xD004, 0xD008 }, + { "cam_cc_fd_core_clk", 0x46, 4, CAM_CC, + 0x28, 0xFF, 0, 0xF, 0, 4, 0xD000, 0xD004, 0xD008 }, + { "cam_cc_fd_core_uar_clk", 0x46, 4, CAM_CC, + 0x29, 0xFF, 0, 0xF, 0, 4, 0xD000, 0xD004, 0xD008 }, + { "cam_cc_gdsc_clk", 0x46, 4, CAM_CC, + 0x3C, 0xFF, 0, 0xF, 0, 4, 0xD000, 0xD004, 0xD008 }, + { "cam_cc_icp_ahb_clk", 0x46, 4, CAM_CC, + 0x37, 0xFF, 0, 0xF, 0, 4, 0xD000, 0xD004, 0xD008 }, + { "cam_cc_icp_clk", 0x46, 4, CAM_CC, + 0x26, 0xFF, 0, 0xF, 0, 4, 0xD000, 0xD004, 0xD008 }, + { "cam_cc_ife_0_axi_clk", 0x46, 4, CAM_CC, + 0x1B, 0xFF, 0, 0xF, 0, 4, 0xD000, 0xD004, 0xD008 }, + { "cam_cc_ife_0_clk", 0x46, 4, CAM_CC, + 0x17, 0xFF, 0, 0xF, 0, 4, 0xD000, 0xD004, 0xD008 }, + { "cam_cc_ife_0_cphy_rx_clk", 0x46, 4, CAM_CC, + 0x1A, 0xFF, 0, 0xF, 0, 4, 0xD000, 0xD004, 0xD008 }, + { "cam_cc_ife_0_csid_clk", 0x46, 4, CAM_CC, + 0x19, 0xFF, 0, 0xF, 0, 4, 0xD000, 0xD004, 0xD008 }, + { "cam_cc_ife_0_dsp_clk", 0x46, 4, CAM_CC, + 0x18, 0xFF, 0, 0xF, 0, 4, 0xD000, 0xD004, 0xD008 }, + { "cam_cc_ife_1_axi_clk", 0x46, 4, CAM_CC, + 0x21, 0xFF, 0, 0xF, 0, 4, 0xD000, 0xD004, 0xD008 }, + { "cam_cc_ife_1_clk", 0x46, 4, CAM_CC, + 0x1D, 0xFF, 0, 0xF, 0, 4, 0xD000, 0xD004, 0xD008 }, + { "cam_cc_ife_1_cphy_rx_clk", 0x46, 4, CAM_CC, + 0x20, 0xFF, 0, 0xF, 0, 4, 0xD000, 0xD004, 0xD008 }, + { "cam_cc_ife_1_csid_clk", 0x46, 4, CAM_CC, + 0x1F, 0xFF, 0, 0xF, 0, 4, 0xD000, 0xD004, 0xD008 }, + { "cam_cc_ife_1_dsp_clk", 0x46, 4, CAM_CC, + 0x1E, 0xFF, 0, 0xF, 0, 4, 0xD000, 0xD004, 0xD008 }, + { "cam_cc_ife_lite_clk", 0x46, 4, CAM_CC, + 0x22, 0xFF, 0, 0xF, 0, 4, 0xD000, 0xD004, 0xD008 }, + { "cam_cc_ife_lite_cphy_rx_clk", 0x46, 4, CAM_CC, + 0x24, 0xFF, 0, 0xF, 0, 4, 0xD000, 0xD004, 0xD008 }, + { "cam_cc_ife_lite_csid_clk", 0x46, 4, CAM_CC, + 0x23, 0xFF, 0, 0xF, 0, 4, 0xD000, 0xD004, 0xD008 }, + { "cam_cc_ipe_0_ahb_clk", 0x46, 4, CAM_CC, + 0x12, 0xFF, 0, 0xF, 0, 4, 0xD000, 0xD004, 0xD008 }, + { "cam_cc_ipe_0_areg_clk", 0x46, 4, CAM_CC, + 0x11, 0xFF, 0, 0xF, 0, 4, 0xD000, 0xD004, 0xD008 }, + { "cam_cc_ipe_0_axi_clk", 0x46, 4, CAM_CC, + 0x10, 0xFF, 0, 0xF, 0, 4, 0xD000, 0xD004, 0xD008 }, + { "cam_cc_ipe_0_clk", 0x46, 4, CAM_CC, + 0xF, 0xFF, 0, 0xF, 0, 4, 0xD000, 0xD004, 0xD008 }, + { "cam_cc_ipe_1_ahb_clk", 0x46, 4, CAM_CC, + 0x16, 0xFF, 0, 0xF, 0, 4, 0xD000, 0xD004, 0xD008 }, + { "cam_cc_ipe_1_areg_clk", 0x46, 4, CAM_CC, + 0x15, 0xFF, 0, 0xF, 0, 4, 0xD000, 0xD004, 0xD008 }, + { "cam_cc_ipe_1_axi_clk", 0x46, 4, CAM_CC, + 0x14, 0xFF, 0, 0xF, 0, 4, 0xD000, 0xD004, 0xD008 }, + { "cam_cc_ipe_1_clk", 0x46, 4, CAM_CC, + 0x13, 0xFF, 0, 0xF, 0, 4, 0xD000, 0xD004, 0xD008 }, + { "cam_cc_jpeg_clk", 0x46, 4, CAM_CC, + 0x25, 0xFF, 0, 0xF, 0, 4, 0xD000, 0xD004, 0xD008 }, + { "cam_cc_lrme_clk", 0x46, 4, CAM_CC, + 0x2B, 0xFF, 0, 0xF, 0, 4, 0xD000, 0xD004, 0xD008 }, + { "cam_cc_mclk0_clk", 0x46, 4, CAM_CC, + 0x1, 0xFF, 0, 0xF, 0, 4, 0xD000, 0xD004, 0xD008 }, + { "cam_cc_mclk1_clk", 0x46, 4, CAM_CC, + 0x2, 0xFF, 0, 0xF, 0, 4, 0xD000, 0xD004, 0xD008 }, + { "cam_cc_mclk2_clk", 0x46, 4, CAM_CC, + 0x3, 0xFF, 0, 0xF, 0, 4, 0xD000, 0xD004, 0xD008 }, + { "cam_cc_mclk3_clk", 0x46, 4, CAM_CC, + 0x4, 0xFF, 0, 0xF, 0, 4, 0xD000, 0xD004, 0xD008 }, + { "cam_cc_sleep_clk", 0x46, 4, CAM_CC, + 0x3F, 0xFF, 0, 0xF, 0, 4, 0xD000, 0xD004, 0xD008 }, + { "disp_cc_mdss_ahb_clk", 0x47, 4, DISP_CC, + 0x1F, 0xFF, 0, 0x3, 0, 4, 0x7000, 0x5008, 0x500C }, + { "disp_cc_mdss_byte0_clk", 0x47, 4, DISP_CC, + 0x13, 0xFF, 0, 0x3, 0, 4, 0x7000, 0x5008, 0x500C }, + { "disp_cc_mdss_byte0_intf_clk", 0x47, 4, DISP_CC, + 0x14, 0xFF, 0, 0x3, 0, 4, 0x7000, 0x5008, 0x500C }, + { "disp_cc_mdss_byte1_clk", 0x47, 4, DISP_CC, + 0x15, 0xFF, 0, 0x3, 0, 4, 0x7000, 0x5008, 0x500C }, + { "disp_cc_mdss_byte1_intf_clk", 0x47, 4, DISP_CC, + 0x16, 0xFF, 0, 0x3, 0, 4, 0x7000, 0x5008, 0x500C }, + { "disp_cc_mdss_dp_aux_clk", 0x47, 4, DISP_CC, + 0x1E, 0xFF, 0, 0x3, 0, 4, 0x7000, 0x5008, 0x500C }, + { "disp_cc_mdss_dp_crypto_clk", 0x47, 4, DISP_CC, + 0x1B, 0xFF, 0, 0x3, 0, 4, 0x7000, 0x5008, 0x500C }, + { "disp_cc_mdss_dp_link_clk", 0x47, 4, DISP_CC, + 0x19, 0xFF, 0, 0x3, 0, 4, 0x7000, 0x5008, 0x500C }, + { "disp_cc_mdss_dp_link_intf_clk", 0x47, 4, DISP_CC, + 0x1A, 0xFF, 0, 0x3, 0, 4, 0x7000, 0x5008, 0x500C }, + { "disp_cc_mdss_dp_pixel1_clk", 0x47, 4, DISP_CC, + 0x1D, 0xFF, 0, 0x3, 0, 4, 0x7000, 0x5008, 0x500C }, + { "disp_cc_mdss_dp_pixel_clk", 0x47, 4, DISP_CC, + 0x1C, 0xFF, 0, 0x3, 0, 4, 0x7000, 0x5008, 0x500C }, + { "disp_cc_mdss_esc0_clk", 0x47, 4, DISP_CC, + 0x17, 0xFF, 0, 0x3, 0, 4, 0x7000, 0x5008, 0x500C }, + { "disp_cc_mdss_esc1_clk", 0x47, 4, DISP_CC, + 0x18, 0xFF, 0, 0x3, 0, 4, 0x7000, 0x5008, 0x500C }, + { "disp_cc_mdss_mdp_clk", 0x47, 4, DISP_CC, + 0xF, 0xFF, 0, 0x3, 0, 4, 0x7000, 0x5008, 0x500C }, + { "disp_cc_mdss_mdp_lut_clk", 0x47, 4, DISP_CC, + 0x11, 0xFF, 0, 0x3, 0, 4, 0x7000, 0x5008, 0x500C }, + { "disp_cc_mdss_non_gdsc_ahb_clk", 0x47, 4, DISP_CC, + 0x20, 0xFF, 0, 0x3, 0, 4, 0x7000, 0x5008, 0x500C }, + { "disp_cc_mdss_pclk0_clk", 0x47, 4, DISP_CC, + 0xD, 0xFF, 0, 0x3, 0, 4, 0x7000, 0x5008, 0x500C }, + { "disp_cc_mdss_pclk1_clk", 0x47, 4, DISP_CC, + 0xE, 0xFF, 0, 0x3, 0, 4, 0x7000, 0x5008, 0x500C }, + { "disp_cc_mdss_rot_clk", 0x47, 4, DISP_CC, + 0x10, 0xFF, 0, 0x3, 0, 4, 0x7000, 0x5008, 0x500C }, + { "disp_cc_mdss_rscc_ahb_clk", 0x47, 4, DISP_CC, + 0x22, 0xFF, 0, 0x3, 0, 4, 0x7000, 0x5008, 0x500C }, + { "disp_cc_mdss_rscc_vsync_clk", 0x47, 4, DISP_CC, + 0x21, 0xFF, 0, 0x3, 0, 4, 0x7000, 0x5008, 0x500C }, + { "disp_cc_mdss_vsync_clk", 0x47, 4, DISP_CC, + 0x12, 0xFF, 0, 0x3, 0, 4, 0x7000, 0x5008, 0x500C }, + { "disp_cc_xo_clk", 0x47, 4, DISP_CC, + 0x2A, 0xFF, 0, 0x3, 0, 4, 0x7000, 0x5008, 0x500C }, + { "gcc_aggre_noc_pcie_tbu_clk", 0x2D, 4, GCC, + 0x2D, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 }, + { "gcc_aggre_ufs_phy_axi_clk", 0x11D, 4, GCC, + 0x11D, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 }, + { "gcc_aggre_usb3_prim_axi_clk", 0x11B, 4, GCC, + 0x11B, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 }, + { "gcc_apc_vs_clk", 0x113, 4, GCC, + 0x113, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 }, + { "gcc_boot_rom_ahb_clk", 0x94, 4, GCC, + 0x94, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 }, + { "gcc_camera_ahb_clk", 0x3A, 4, GCC, + 0x3A, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 }, + { "gcc_camera_hf_axi_clk", 0x40, 4, GCC, + 0x40, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 }, + { "gcc_camera_sf_axi_clk", 0x176, 4, GCC, + 0x176, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 }, + { "gcc_camera_xo_clk", 0x43, 4, GCC, + 0x43, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 }, + { "gcc_ce1_ahb_clk", 0xA9, 4, GCC, + 0xA9, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 }, + { "gcc_ce1_axi_clk", 0xA8, 4, GCC, + 0xA8, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 }, + { "gcc_ce1_clk", 0xA7, 4, GCC, + 0xA7, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 }, + { "gcc_cfg_noc_usb3_prim_axi_clk", 0x1D, 4, GCC, + 0x1D, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 }, + { "gcc_cpuss_ahb_clk", 0xCE, 4, GCC, + 0xCE, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 }, + { "gcc_cpuss_gnoc_clk", 0xCF, 4, GCC, + 0xCF, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 }, + { "gcc_cpuss_rbcpr_clk", 0xD0, 4, GCC, + 0xD0, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 }, + { "gcc_ddrss_gpu_axi_clk", 0xBB, 4, GCC, + 0xBB, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 }, + { "gcc_disp_ahb_clk", 0x3B, 4, GCC, + 0x3B, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 }, + { "gcc_disp_gpll0_clk_src", 0x4C, 4, GCC, + 0x4C, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 }, + { "gcc_disp_gpll0_div_clk_src", 0x4D, 4, GCC, + 0x4D, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 }, + { "gcc_disp_hf_axi_clk", 0x41, 4, GCC, + 0x41, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 }, + { "gcc_disp_sf_axi_clk", 0x177, 4, GCC, + 0x177, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 }, + { "gcc_disp_xo_clk", 0x44, 4, GCC, + 0x44, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 }, + { "gcc_gp1_clk", 0xDE, 4, GCC, + 0xDE, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 }, + { "gcc_gp2_clk", 0xDF, 4, GCC, + 0xDF, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 }, + { "gcc_gp3_clk", 0xE0, 4, GCC, + 0xE0, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 }, + { "gcc_gpu_cfg_ahb_clk", 0x142, 4, GCC, + 0x142, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 }, + { "gcc_gpu_gpll0_clk_src", 0x148, 4, GCC, + 0x148, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 }, + { "gcc_gpu_gpll0_div_clk_src", 0x149, 4, GCC, + 0x149, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 }, + { "gcc_gpu_memnoc_gfx_clk", 0x145, 4, GCC, + 0x145, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 }, + { "gcc_gpu_snoc_dvm_gfx_clk", 0x147, 4, GCC, + 0x147, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 }, + { "gcc_gpu_vs_clk", 0x112, 4, GCC, + 0x112, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 }, + { "gcc_npu_axi_clk", 0x16A, 4, GCC, + 0x16A, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 }, + { "gcc_npu_cfg_ahb_clk", 0x169, 4, GCC, + 0x169, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 }, + { "gcc_npu_gpll0_clk_src", 0x16D, 4, GCC, + 0x16D, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 }, + { "gcc_npu_gpll0_div_clk_src", 0x16E, 4, GCC, + 0x16E, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 }, + { "gcc_pcie_0_aux_clk", 0xE5, 4, GCC, + 0xE5, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 }, + { "gcc_pcie_0_cfg_ahb_clk", 0xE4, 4, GCC, + 0xE4, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 }, + { "gcc_pcie_0_mstr_axi_clk", 0xE3, 4, GCC, + 0xE3, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 }, + { "gcc_pcie_0_pipe_clk", 0xE6, 4, GCC, + 0xE6, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 }, + { "gcc_pcie_0_slv_axi_clk", 0xE2, 4, GCC, + 0xE2, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 }, + { "gcc_pcie_0_slv_q2a_axi_clk", 0xE1, 4, GCC, + 0xE1, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 }, + { "gcc_pcie_phy_aux_clk", 0xEF, 4, GCC, + 0xEF, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 }, + { "gcc_pcie_phy_refgen_clk", 0x160, 4, GCC, + 0x160, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 }, + { "gcc_pdm2_clk", 0x8E, 4, GCC, + 0x8E, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 }, + { "gcc_pdm_ahb_clk", 0x8C, 4, GCC, + 0x8C, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 }, + { "gcc_pdm_xo4_clk", 0x8D, 4, GCC, + 0x8D, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 }, + { "gcc_prng_ahb_clk", 0x8F, 4, GCC, + 0x8F, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 }, + { "gcc_qupv3_wrap0_core_2x_clk", 0x77, 4, GCC, + 0x77, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 }, + { "gcc_qupv3_wrap0_core_clk", 0x76, 4, GCC, + 0x76, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 }, + { "gcc_qupv3_wrap0_s0_clk", 0x78, 4, GCC, + 0x78, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 }, + { "gcc_qupv3_wrap0_s1_clk", 0x79, 4, GCC, + 0x79, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 }, + { "gcc_qupv3_wrap0_s2_clk", 0x7A, 4, GCC, + 0x7A, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 }, + { "gcc_qupv3_wrap0_s3_clk", 0x7B, 4, GCC, + 0x7B, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 }, + { "gcc_qupv3_wrap0_s4_clk", 0x7C, 4, GCC, + 0x7C, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 }, + { "gcc_qupv3_wrap0_s5_clk", 0x7D, 4, GCC, + 0x7D, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 }, + { "gcc_qupv3_wrap0_s6_clk", 0x7E, 4, GCC, + 0x7E, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 }, + { "gcc_qupv3_wrap0_s7_clk", 0x7F, 4, GCC, + 0x7F, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 }, + { "gcc_qupv3_wrap1_core_2x_clk", 0x80, 4, GCC, + 0x80, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 }, + { "gcc_qupv3_wrap1_core_clk", 0x81, 4, GCC, + 0x81, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 }, + { "gcc_qupv3_wrap1_s0_clk", 0x84, 4, GCC, + 0x84, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 }, + { "gcc_qupv3_wrap1_s1_clk", 0x85, 4, GCC, + 0x85, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 }, + { "gcc_qupv3_wrap1_s2_clk", 0x86, 4, GCC, + 0x86, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 }, + { "gcc_qupv3_wrap1_s3_clk", 0x87, 4, GCC, + 0x87, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 }, + { "gcc_qupv3_wrap1_s4_clk", 0x88, 4, GCC, + 0x88, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 }, + { "gcc_qupv3_wrap1_s5_clk", 0x89, 4, GCC, + 0x89, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 }, + { "gcc_qupv3_wrap1_s6_clk", 0x8A, 4, GCC, + 0x8A, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 }, + { "gcc_qupv3_wrap1_s7_clk", 0x8B, 4, GCC, + 0x8B, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 }, + { "gcc_qupv3_wrap_0_m_ahb_clk", 0x74, 4, GCC, + 0x74, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 }, + { "gcc_qupv3_wrap_0_s_ahb_clk", 0x75, 4, GCC, + 0x75, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 }, + { "gcc_qupv3_wrap_1_m_ahb_clk", 0x82, 4, GCC, + 0x82, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 }, + { "gcc_qupv3_wrap_1_s_ahb_clk", 0x83, 4, GCC, + 0x83, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 }, + { "gcc_sdcc1_ahb_clk", 0x15C, 4, GCC, + 0x15C, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 }, + { "gcc_sdcc1_apps_clk", 0x15B, 4, GCC, + 0x15B, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 }, + { "gcc_sdcc1_ice_core_clk", 0x15D, 4, GCC, + 0x15D, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 }, + { "gcc_sdcc2_ahb_clk", 0x71, 4, GCC, + 0x71, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 }, + { "gcc_sdcc2_apps_clk", 0x70, 4, GCC, + 0x70, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 }, + { "gcc_sdcc4_ahb_clk", 0x73, 4, GCC, + 0x73, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 }, + { "gcc_sdcc4_apps_clk", 0x72, 4, GCC, + 0x72, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 }, + { "gcc_sys_noc_cpuss_ahb_clk", 0xC, 4, GCC, + 0xC, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 }, + { "gcc_tsif_ahb_clk", 0x90, 4, GCC, + 0x90, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 }, + { "gcc_tsif_inactivity_timers_clk", 0x92, 4, GCC, + 0x92, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 }, + { "gcc_tsif_ref_clk", 0x91, 4, GCC, + 0x91, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 }, + { "gcc_ufs_phy_ahb_clk", 0xFC, 4, GCC, + 0xFC, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 }, + { "gcc_ufs_phy_axi_clk", 0xFB, 4, GCC, + 0xFB, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 }, + { "gcc_ufs_phy_ice_core_clk", 0x102, 4, GCC, + 0x102, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 }, + { "gcc_ufs_phy_phy_aux_clk", 0x103, 4, GCC, + 0x103, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 }, + { "gcc_ufs_phy_rx_symbol_0_clk", 0xFE, 4, GCC, + 0xFE, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 }, + { "gcc_ufs_phy_tx_symbol_0_clk", 0xFD, 4, GCC, + 0xFD, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 }, + { "gcc_ufs_phy_unipro_core_clk", 0x101, 4, GCC, + 0x101, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 }, + { "gcc_usb30_prim_master_clk", 0x5F, 4, GCC, + 0x5F, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 }, + { "gcc_usb30_prim_mock_utmi_clk", 0x61, 4, GCC, + 0x61, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 }, + { "gcc_usb30_prim_sleep_clk", 0x60, 4, GCC, + 0x60, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 }, + { "gcc_usb3_prim_phy_aux_clk", 0x62, 4, GCC, + 0x62, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 }, + { "gcc_usb3_prim_phy_com_aux_clk", 0x63, 4, GCC, + 0x63, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 }, + { "gcc_usb3_prim_phy_pipe_clk", 0x64, 4, GCC, + 0x64, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 }, + { "gcc_usb_phy_cfg_ahb2phy_clk", 0x6F, 4, GCC, + 0x6F, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 }, + { "gcc_vdda_vs_clk", 0x10E, 4, GCC, + 0x10E, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 }, + { "gcc_vddcx_vs_clk", 0x10C, 4, GCC, + 0x10C, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 }, + { "gcc_vddmx_vs_clk", 0x10D, 4, GCC, + 0x10D, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 }, + { "gcc_video_ahb_clk", 0x39, 4, GCC, + 0x39, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 }, + { "gcc_video_axi_clk", 0x3F, 4, GCC, + 0x3F, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 }, + { "gcc_video_xo_clk", 0x42, 4, GCC, + 0x42, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 }, + { "gcc_vs_ctrl_ahb_clk", 0x110, 4, GCC, + 0x110, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 }, + { "gcc_vs_ctrl_clk", 0x10F, 4, GCC, + 0x10F, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 }, + { "measure_only_mccc_clk", 0xC2, 1, MC_CC, + 0xC2, 0x3FF, 0, 0xF, 0, 1, 0x62008, 0x62000, 0x62004 }, + { "measure_only_cnoc_clk", 0x15, 1, GCC, + 0x15, 0x3FF, 0, 0xF, 0, 1, 0x62008, 0x62000, 0x62004 }, + { "measure_only_ipa_2x_clk", 0x128, 1, GCC, + 0x128, 0x3FF, 0, 0xF, 0, 1, 0x62008, 0x62000, 0x62004 }, + { "measure_only_snoc_clk", 0x7, 1, GCC, + 0x7, 0x3FF, 0, 0xF, 0, 1, 0x62008, 0x62000, 0x62004 }, + { "gpu_cc_acd_ahb_clk", 0x144, 4, GPU_CC, + 0x20, 0xFF, 0, 0x3, 0, 2, 0x1568, 0x10FC, 0x1100 }, + { "gpu_cc_acd_cxo_clk", 0x144, 4, GPU_CC, + 0x1F, 0xFF, 0, 0x3, 0, 2, 0x1568, 0x10FC, 0x1100 }, + { "gpu_cc_ahb_clk", 0x144, 4, GPU_CC, + 0x11, 0xFF, 0, 0x3, 0, 2, 0x1568, 0x10FC, 0x1100 }, + { "gpu_cc_crc_ahb_clk", 0x144, 4, GPU_CC, + 0x12, 0xFF, 0, 0x3, 0, 2, 0x1568, 0x10FC, 0x1100 }, + { "gpu_cc_cx_apb_clk", 0x144, 4, GPU_CC, + 0x15, 0xFF, 0, 0x3, 0, 2, 0x1568, 0x10FC, 0x1100 }, + { "gpu_cc_cx_gfx3d_clk", 0x144, 4, GPU_CC, + 0x1A, 0xFF, 0, 0x3, 0, 2, 0x1568, 0x10FC, 0x1100 }, + { "gpu_cc_cx_gfx3d_slv_clk", 0x144, 4, GPU_CC, + 0x1B, 0xFF, 0, 0x3, 0, 2, 0x1568, 0x10FC, 0x1100 }, + { "gpu_cc_cx_gmu_clk", 0x144, 4, GPU_CC, + 0x19, 0xFF, 0, 0x3, 0, 2, 0x1568, 0x10FC, 0x1100 }, + { "gpu_cc_cx_snoc_dvm_clk", 0x144, 4, GPU_CC, + 0x16, 0xFF, 0, 0x3, 0, 2, 0x1568, 0x10FC, 0x1100 }, + { "gpu_cc_cxo_aon_clk", 0x144, 4, GPU_CC, + 0xB, 0xFF, 0, 0x3, 0, 2, 0x1568, 0x10FC, 0x1100 }, + { "gpu_cc_cxo_clk", 0x144, 4, GPU_CC, + 0xA, 0xFF, 0, 0x3, 0, 2, 0x1568, 0x10FC, 0x1100 }, + { "gpu_cc_gx_cxo_clk", 0x144, 4, GPU_CC, + 0xF, 0xFF, 0, 0x3, 0, 2, 0x1568, 0x10FC, 0x1100 }, + { "gpu_cc_gx_gfx3d_clk", 0x144, 4, GPU_CC, + 0xC, 0xFF, 0, 0x3, 0, 2, 0x1568, 0x10FC, 0x1100 }, + { "gpu_cc_gx_gmu_clk", 0x144, 4, GPU_CC, + 0x10, 0xFF, 0, 0x3, 0, 2, 0x1568, 0x10FC, 0x1100 }, + { "gpu_cc_gx_vsense_clk", 0x144, 4, GPU_CC, + 0xD, 0xFF, 0, 0x3, 0, 2, 0x1568, 0x10FC, 0x1100 }, + { "npu_cc_armwic_core_clk", 0x16F, 4, NPU_CC, + 0x4, 0xFF, 0, 0x3, 0, 2, 0x4000, 0x3004, 0x3008 }, + { "npu_cc_bto_core_clk", 0x16F, 4, NPU_CC, + 0x12, 0xFF, 0, 0x3, 0, 2, 0x4000, 0x3004, 0x3008 }, + { "npu_cc_bwmon_clk", 0x16F, 4, NPU_CC, + 0xF, 0xFF, 0, 0x3, 0, 2, 0x4000, 0x3004, 0x3008 }, + { "npu_cc_cal_dp_cdc_clk", 0x16F, 4, NPU_CC, + 0x8, 0xFF, 0, 0x3, 0, 2, 0x4000, 0x3004, 0x3008 }, + { "npu_cc_cal_dp_clk", 0x16F, 4, NPU_CC, + 0x1, 0xFF, 0, 0x3, 0, 2, 0x4000, 0x3004, 0x3008 }, + { "npu_cc_comp_noc_axi_clk", 0x16F, 4, NPU_CC, + 0x9, 0xFF, 0, 0x3, 0, 2, 0x4000, 0x3004, 0x3008 }, + { "npu_cc_conf_noc_ahb_clk", 0x16F, 4, NPU_CC, + 0xA, 0xFF, 0, 0x3, 0, 2, 0x4000, 0x3004, 0x3008 }, + { "npu_cc_npu_core_apb_clk", 0x16F, 4, NPU_CC, + 0xE, 0xFF, 0, 0x3, 0, 2, 0x4000, 0x3004, 0x3008 }, + { "npu_cc_npu_core_atb_clk", 0x16F, 4, NPU_CC, + 0xB, 0xFF, 0, 0x3, 0, 2, 0x4000, 0x3004, 0x3008 }, + { "npu_cc_npu_core_clk", 0x16F, 4, NPU_CC, + 0x2, 0xFF, 0, 0x3, 0, 2, 0x4000, 0x3004, 0x3008 }, + { "npu_cc_npu_core_cti_clk", 0x16F, 4, NPU_CC, + 0xC, 0xFF, 0, 0x3, 0, 2, 0x4000, 0x3004, 0x3008 }, + { "npu_cc_npu_cpc_clk", 0x16F, 4, NPU_CC, + 0x3, 0xFF, 0, 0x3, 0, 2, 0x4000, 0x3004, 0x3008 }, + { "npu_cc_npu_cpc_timer_clk", 0x16F, 4, NPU_CC, + 0x5, 0xFF, 0, 0x3, 0, 2, 0x4000, 0x3004, 0x3008 }, + { "npu_cc_perf_cnt_clk", 0x16F, 4, NPU_CC, + 0x10, 0xFF, 0, 0x3, 0, 2, 0x4000, 0x3004, 0x3008 }, + { "npu_cc_qtimer_core_clk", 0x16F, 4, NPU_CC, + 0x6, 0xFF, 0, 0x3, 0, 2, 0x4000, 0x3004, 0x3008 }, + { "npu_cc_sleep_clk", 0x16F, 4, NPU_CC, + 0x7, 0xFF, 0, 0x3, 0, 2, 0x4000, 0x3004, 0x3008 }, + { "npu_cc_xo_clk", 0x16F, 4, NPU_CC, + 0x11, 0xFF, 0, 0x3, 0, 2, 0x4000, 0x3004, 0x3008 }, + { "video_cc_apb_clk", 0x48, 4, VIDEO_CC, + 0xE, 0x3F, 0, 0x7, 0, 5, 0xACC, 0x938, 0x940 }, + { "video_cc_at_clk", 0x48, 4, VIDEO_CC, + 0x11, 0x3F, 0, 0x7, 0, 5, 0xACC, 0x938, 0x940 }, + { "video_cc_iris_ahb_clk", 0x48, 4, VIDEO_CC, + 0x7, 0x3F, 0, 0x7, 0, 5, 0xACC, 0x938, 0x940 }, + { "video_cc_mvs0_axi_clk", 0x48, 4, VIDEO_CC, + 0xB, 0x3F, 0, 0x7, 0, 5, 0xACC, 0x938, 0x940 }, + { "video_cc_mvs0_core_clk", 0x48, 4, VIDEO_CC, + 0x3, 0x3F, 0, 0x7, 0, 5, 0xACC, 0x938, 0x940 }, + { "video_cc_mvs1_axi_clk", 0x48, 4, VIDEO_CC, + 0xC, 0x3F, 0, 0x7, 0, 5, 0xACC, 0x938, 0x940 }, + { "video_cc_mvs1_core_clk", 0x48, 4, VIDEO_CC, + 0x5, 0x3F, 0, 0x7, 0, 5, 0xACC, 0x938, 0x940 }, + { "video_cc_mvsc_core_clk", 0x48, 4, VIDEO_CC, + 0x1, 0x3F, 0, 0x7, 0, 5, 0xACC, 0x938, 0x940 }, + { "video_cc_mvsc_ctl_axi_clk", 0x48, 4, VIDEO_CC, + 0xA, 0x3F, 0, 0x7, 0, 5, 0xACC, 0x938, 0x940 }, + { "video_cc_venus_ahb_clk", 0x48, 4, VIDEO_CC, + 0xF, 0x3F, 0, 0x7, 0, 5, 0xACC, 0x938, 0x940 }, + { "video_cc_xo_clk", 0x48, 4, VIDEO_CC, + 0x8, 0x3F, 0, 0x7, 0, 5, 0xACC, 0x938, 0x940 }, + { "l3_clk", 0xD6, 4, CPU_CC, + 0x46, 0x7F, 4, 0xf, 11, 1, 0x0, 0x0, U32_MAX, 16 }, + { "pwrcl_clk", 0xD6, 4, CPU_CC, + 0x44, 0x7F, 4, 0xf, 11, 1, 0x0, 0x0, U32_MAX, 16 }, + { "perfcl_clk", 0xD6, 4, CPU_CC, + 0x45, 0x7F, 4, 0xf, 11, 1, 0x0, 0x0, U32_MAX, 16 }, + ), + .hw.init = &(struct clk_init_data){ + .name = "gcc_debug_mux", + .ops = &clk_debug_mux_ops, + .parent_names = debug_mux_parent_names, + .num_parents = ARRAY_SIZE(debug_mux_parent_names), + .flags = CLK_IS_MEASURE, + }, +}; + +static const struct of_device_id clk_debug_match_table[] = { + { .compatible = "qcom,debugcc-sdmmagpie" }, + { } +}; + +static int map_debug_bases(struct platform_device *pdev, char *base, int cc) +{ + if (!of_get_property(pdev->dev.of_node, base, NULL)) + return -ENODEV; + + gcc_debug_mux.regmap[cc] = + syscon_regmap_lookup_by_phandle(pdev->dev.of_node, base); + if (IS_ERR(gcc_debug_mux.regmap[cc])) { + pr_err("Failed to map %s (ret=%ld)\n", base, + PTR_ERR(gcc_debug_mux.regmap[cc])); + return PTR_ERR(gcc_debug_mux.regmap[cc]); + } + return 0; +} + +static int clk_debug_sdmmagpie_probe(struct platform_device *pdev) +{ + struct clk *clk; + int ret = 0, count; + + clk = devm_clk_get(&pdev->dev, "xo_clk_src"); + if (IS_ERR(clk)) { + if (PTR_ERR(clk) != -EPROBE_DEFER) + dev_err(&pdev->dev, "Unable to get xo clock\n"); + return PTR_ERR(clk); + } + + debug_mux_priv.cxo = clk; + + ret = of_property_read_u32(pdev->dev.of_node, "qcom,cc-count", + &count); + if (ret < 0) { + dev_err(&pdev->dev, "Num of debug clock controller not specified\n"); + return ret; + } + + if (!count) { + dev_err(&pdev->dev, "Count of CC cannot be zero\n"); + return -EINVAL; + } + + gcc_debug_mux.regmap = devm_kzalloc(&pdev->dev, + sizeof(struct regmap *) * count, GFP_KERNEL); + if (!gcc_debug_mux.regmap) + return -ENOMEM; + + ret = map_debug_bases(pdev, "qcom,gcc", GCC); + if (ret) + return ret; + + ret = map_debug_bases(pdev, "qcom,dispcc", DISP_CC); + if (ret) + return ret; + + ret = map_debug_bases(pdev, "qcom,videocc", VIDEO_CC); + if (ret) + return ret; + + ret = map_debug_bases(pdev, "qcom,camcc", CAM_CC); + if (ret) + return ret; + + ret = map_debug_bases(pdev, "qcom,npucc", NPU_CC); + if (ret) + return ret; + + ret = map_debug_bases(pdev, "qcom,gpucc", GPU_CC); + if (ret) + return ret; + + ret = map_debug_bases(pdev, "qcom,cpucc", CPU_CC); + if (ret) + return ret; + + ret = map_debug_bases(pdev, "qcom,mccc", MC_CC); + if (ret) + return ret; + + clk = devm_clk_register(&pdev->dev, &gcc_debug_mux.hw); + if (IS_ERR(clk)) { + dev_err(&pdev->dev, "Unable to register GCC debug mux\n"); + return PTR_ERR(clk); + } + + ret = clk_debug_measure_register(&gcc_debug_mux.hw); + if (ret) + dev_err(&pdev->dev, "Could not register Measure clock\n"); + else + dev_info(&pdev->dev, "Registered debug mux successfully\n"); + + return ret; +} + +static struct platform_driver clk_debug_driver = { + .probe = clk_debug_sdmmagpie_probe, + .driver = { + .name = "debugcc-sdmmagpie", + .of_match_table = clk_debug_match_table, + .owner = THIS_MODULE, + }, +}; + +int __init clk_debug_sdmmagpie_init(void) +{ + return platform_driver_register(&clk_debug_driver); +} +fs_initcall(clk_debug_sdmmagpie_init); + +MODULE_DESCRIPTION("QTI DEBUG CC SDMMAGPIE Driver"); +MODULE_LICENSE("GPL v2"); +MODULE_ALIAS("platform:debugcc-sdmmagpie"); diff --git a/drivers/clk/qcom/debugcc-sm8150.c b/drivers/clk/qcom/debugcc-sm8150.c index 34333df7dbbd03ed7f33e834d7fd89595122e4eb..60625bf3d4c29d660479601b3f02b57f05a21e2f 100644 --- a/drivers/clk/qcom/debugcc-sm8150.c +++ b/drivers/clk/qcom/debugcc-sm8150.c @@ -23,9 +23,51 @@ #include #include #include +#include +#include #include "clk-debug.h" +#define MSM_BUS_VECTOR(_src, _dst, _ab, _ib) \ +{ \ + .src = _src, \ + .dst = _dst, \ + .ab = _ab, \ + .ib = _ib, \ +} + +static struct msm_bus_vectors clk_measure_vectors[] = { + MSM_BUS_VECTOR(MSM_BUS_MASTER_AMPSS_M0, + MSM_BUS_SLAVE_CAMERA_CFG, 0, 0), + MSM_BUS_VECTOR(MSM_BUS_MASTER_AMPSS_M0, + MSM_BUS_SLAVE_VENUS_CFG, 0, 0), + MSM_BUS_VECTOR(MSM_BUS_MASTER_AMPSS_M0, + MSM_BUS_SLAVE_DISPLAY_CFG, 0, 0), + MSM_BUS_VECTOR(MSM_BUS_MASTER_AMPSS_M0, + MSM_BUS_SLAVE_CAMERA_CFG, 0, 1), + MSM_BUS_VECTOR(MSM_BUS_MASTER_AMPSS_M0, + MSM_BUS_SLAVE_VENUS_CFG, 0, 1), + MSM_BUS_VECTOR(MSM_BUS_MASTER_AMPSS_M0, + MSM_BUS_SLAVE_DISPLAY_CFG, 0, 1), +}; + +static struct msm_bus_paths clk_measure_usecases[] = { + { + .num_paths = 3, + .vectors = &clk_measure_vectors[0], + }, + { + .num_paths = 3, + .vectors = &clk_measure_vectors[3], + } +}; + +static struct msm_bus_scale_pdata clk_measure_scale_table = { + .usecase = clk_measure_usecases, + .num_usecases = ARRAY_SIZE(clk_measure_usecases), + .name = "clk_measure", +}; + static struct measure_clk_data debug_mux_priv = { .ctl_reg = 0x62038, .status_reg = 0x6203C, @@ -878,6 +920,12 @@ static int clk_debug_sm8150_probe(struct platform_device *pdev) if (ret) return ret; + gcc_debug_mux.bus_cl_id = + msm_bus_scale_register_client(&clk_measure_scale_table); + + if (!gcc_debug_mux.bus_cl_id) + return -EPROBE_DEFER; + clk = devm_clk_register(&pdev->dev, &gcc_debug_mux.hw); if (IS_ERR(clk)) { dev_err(&pdev->dev, "Unable to register GCC debug mux\n"); diff --git a/drivers/clk/qcom/dispcc-sm8150.c b/drivers/clk/qcom/dispcc-sm8150.c index 46475d7e1761df8d9d0bac4877869ba474fd122b..2a702f58226bd21d867106948ed97a38273f1627 100644 --- a/drivers/clk/qcom/dispcc-sm8150.c +++ b/drivers/clk/qcom/dispcc-sm8150.c @@ -38,7 +38,7 @@ #define F(f, s, h, m, n) { (f), (s), (2 * (h) - 1), (m), (n) } -static DEFINE_VDD_REGULATORS(vdd_mm, VDD_NUM, 1, vdd_corner); +static DEFINE_VDD_REGULATORS(vdd_mm, VDD_MM_NUM, 1, vdd_corner); #define DISP_CC_MISC_CMD 0x8000 diff --git a/drivers/clk/qcom/gcc-sdmmagpie.c b/drivers/clk/qcom/gcc-sdmmagpie.c index 81a05f4c00e68de57b2f5ec9c8739e5bbc38e12d..f5c25883b1c2c901bc740b9fdd7aec2a4d3f4057 100644 --- a/drivers/clk/qcom/gcc-sdmmagpie.c +++ b/drivers/clk/qcom/gcc-sdmmagpie.c @@ -1698,109 +1698,6 @@ static struct clk_branch gcc_gpu_vs_clk = { }, }; -static struct clk_branch gcc_mss_axis2_clk = { - .halt_reg = 0x8a008, - .halt_check = BRANCH_HALT, - .clkr = { - .enable_reg = 0x8a008, - .enable_mask = BIT(0), - .hw.init = &(struct clk_init_data){ - .name = "gcc_mss_axis2_clk", - .ops = &clk_branch2_ops, - }, - }, -}; - -static struct clk_branch gcc_mss_cfg_ahb_clk = { - .halt_reg = 0x8a000, - .halt_check = BRANCH_HALT, - .hwcg_reg = 0x8a000, - .hwcg_bit = 1, - .clkr = { - .enable_reg = 0x8a000, - .enable_mask = BIT(0), - .hw.init = &(struct clk_init_data){ - .name = "gcc_mss_cfg_ahb_clk", - .ops = &clk_branch2_ops, - }, - }, -}; - -static struct clk_branch gcc_mss_gpll0_div_clk_src = { - .halt_check = BRANCH_HALT_DELAY, - .clkr = { - .enable_reg = 0x52004, - .enable_mask = BIT(17), - .hw.init = &(struct clk_init_data){ - .name = "gcc_mss_gpll0_div_clk_src", - .parent_names = (const char *[]){ - "gcc_pll0_main_div_cdiv", - }, - .num_parents = 1, - .ops = &clk_branch2_ops, - }, - }, -}; - -static struct clk_branch gcc_mss_mfab_axis_clk = { - .halt_reg = 0x8a004, - .halt_check = BRANCH_VOTED, - .hwcg_reg = 0x8a004, - .hwcg_bit = 1, - .clkr = { - .enable_reg = 0x8a004, - .enable_mask = BIT(0), - .hw.init = &(struct clk_init_data){ - .name = "gcc_mss_mfab_axis_clk", - .ops = &clk_branch2_ops, - }, - }, -}; - -static struct clk_branch gcc_mss_q6_memnoc_axi_clk = { - .halt_reg = 0x8a154, - .halt_check = BRANCH_VOTED, - .clkr = { - .enable_reg = 0x8a154, - .enable_mask = BIT(0), - .hw.init = &(struct clk_init_data){ - .name = "gcc_mss_q6_memnoc_axi_clk", - .ops = &clk_branch2_ops, - }, - }, -}; - -static struct clk_branch gcc_mss_snoc_axi_clk = { - .halt_reg = 0x8a150, - .halt_check = BRANCH_HALT, - .clkr = { - .enable_reg = 0x8a150, - .enable_mask = BIT(0), - .hw.init = &(struct clk_init_data){ - .name = "gcc_mss_snoc_axi_clk", - .ops = &clk_branch2_ops, - }, - }, -}; - -static struct clk_branch gcc_mss_vs_clk = { - .halt_reg = 0x7a048, - .halt_check = BRANCH_HALT, - .clkr = { - .enable_reg = 0x7a048, - .enable_mask = BIT(0), - .hw.init = &(struct clk_init_data){ - .name = "gcc_mss_vs_clk", - .parent_names = (const char *[]){ - "gcc_vsensor_clk_src", - }, - .num_parents = 1, - .flags = CLK_SET_RATE_PARENT, - .ops = &clk_branch2_ops, - }, - }, -}; - static struct clk_branch gcc_npu_axi_clk = { .halt_reg = 0x4d008, .halt_check = BRANCH_HALT, @@ -3112,14 +3009,15 @@ static struct clk_branch gcc_cpuss_gnoc_clk = { }; /* Measure-only clock for ddrss_gcc_debug_clk. */ -static struct clk_dummy measure_only_bimc_clk = { +static struct clk_dummy measure_only_mccc_clk = { .rrate = 1000, .hw.init = &(struct clk_init_data){ - .name = "measure_only_bimc_clk", + .name = "measure_only_mccc_clk", .ops = &clk_dummy_ops, }, }; + /* Measure-only clock for gcc_cfg_noc_ahb_clk. */ static struct clk_dummy measure_only_cnoc_clk = { .rrate = 1000, @@ -3148,7 +3046,7 @@ static struct clk_dummy measure_only_snoc_clk = { }; struct clk_hw *gcc_sdmmagpie_hws[] = { - [MEASURE_ONLY_BIMC_CLK] = &measure_only_bimc_clk.hw, + [MEASURE_ONLY_BIMC_CLK] = &measure_only_mccc_clk.hw, [MEASURE_ONLY_CNOC_CLK] = &measure_only_cnoc_clk.hw, [MEASURE_ONLY_IPA_2X_CLK] = &measure_only_ipa_2x_clk.hw, [MEASURE_ONLY_SNOC_CLK] = &measure_only_snoc_clk.hw, @@ -3195,13 +3093,6 @@ static struct clk_regmap *gcc_sdmmagpie_clocks[] = { [GCC_GPU_MEMNOC_GFX_CLK] = &gcc_gpu_memnoc_gfx_clk.clkr, [GCC_GPU_SNOC_DVM_GFX_CLK] = &gcc_gpu_snoc_dvm_gfx_clk.clkr, [GCC_GPU_VS_CLK] = &gcc_gpu_vs_clk.clkr, - [GCC_MSS_AXIS2_CLK] = &gcc_mss_axis2_clk.clkr, - [GCC_MSS_CFG_AHB_CLK] = &gcc_mss_cfg_ahb_clk.clkr, - [GCC_MSS_GPLL0_DIV_CLK_SRC] = &gcc_mss_gpll0_div_clk_src.clkr, - [GCC_MSS_MFAB_AXIS_CLK] = &gcc_mss_mfab_axis_clk.clkr, - [GCC_MSS_Q6_MEMNOC_AXI_CLK] = &gcc_mss_q6_memnoc_axi_clk.clkr, - [GCC_MSS_SNOC_AXI_CLK] = &gcc_mss_snoc_axi_clk.clkr, - [GCC_MSS_VS_CLK] = &gcc_mss_vs_clk.clkr, [GCC_NPU_AXI_CLK] = &gcc_npu_axi_clk.clkr, [GCC_NPU_CFG_AHB_CLK] = &gcc_npu_cfg_ahb_clk.clkr, [GCC_NPU_GPLL0_CLK_SRC] = &gcc_npu_gpll0_clk_src.clkr, @@ -3336,6 +3227,7 @@ static const struct qcom_reset_map gcc_sdmmagpie_resets[] = { [GCC_USB3_DP_PHY_SEC_BCR] = { 0x50014 }, [GCC_USB3_PHY_PRIM_BCR] = { 0x50000 }, [GCC_USB3_PHY_SEC_BCR] = { 0x5000c }, + [GCC_QUSB2PHY_PRIM_BCR] = { 0x26000 }, }; static struct clk_dfs gcc_dfs_clocks[] = { diff --git a/drivers/clk/qcom/gcc-sm6150.c b/drivers/clk/qcom/gcc-sm6150.c index ccab586d913cdc7b57f633f2700e0089077ef9ae..c62f870b57eb9b2834273d318930937a9ad9d073 100644 --- a/drivers/clk/qcom/gcc-sm6150.c +++ b/drivers/clk/qcom/gcc-sm6150.c @@ -3424,8 +3424,8 @@ static const struct qcom_reset_map gcc_sm6150_resets[] = { [GCC_PCIE_PHY_COM_BCR] = { 0x6f010 }, [GCC_UFS_PHY_BCR] = { 0x77000 }, [GCC_USB20_SEC_BCR] = { 0xa6000 }, - [GCC_USB3_DP_PHY_PRIM_SP0_BCR] = { 0x50010 }, [GCC_USB3PHY_PHY_PRIM_SP0_BCR] = { 0x50008 }, + [GCC_USB3_PHY_PRIM_SP0_BCR] = { 0x50000 }, }; static struct clk_dfs gcc_dfs_clocks[] = { diff --git a/drivers/clk/qcom/gcc-sm8150.c b/drivers/clk/qcom/gcc-sm8150.c index 7d40149d840964ab217530b57ad0da917abe58a5..8edc1364458336bf0815f122ba863e7243f6496d 100644 --- a/drivers/clk/qcom/gcc-sm8150.c +++ b/drivers/clk/qcom/gcc-sm8150.c @@ -43,7 +43,7 @@ static DEFINE_VDD_REGULATORS(vdd_cx, VDD_NUM, 1, vdd_corner); static DEFINE_VDD_REGULATORS(vdd_cx_ao, VDD_NUM, 1, vdd_corner); -static DEFINE_VDD_REGULATORS(vdd_mm, VDD_NUM, 1, vdd_corner); +static DEFINE_VDD_REGULATORS(vdd_mm, VDD_MM_NUM, 1, vdd_corner); enum { P_AUD_REF_CLK, diff --git a/drivers/clk/qcom/gdsc-regulator.c b/drivers/clk/qcom/gdsc-regulator.c index a579a5d1f0287c9df7be0177e356b90ba2b981c4..f7796e9e40d3e81201d309ddcd3df394680b08d6 100644 --- a/drivers/clk/qcom/gdsc-regulator.c +++ b/drivers/clk/qcom/gdsc-regulator.c @@ -17,6 +17,7 @@ #include #include #include +#include #include #include #include @@ -28,6 +29,7 @@ #include #include #include +#include #include @@ -52,6 +54,8 @@ /* Timeout Delay */ #define TIMEOUT_US 100 +#define MBOX_TOUT_MS 100 + struct gdsc { struct regulator_dev *rdev; struct regulator_desc rdesc; @@ -64,6 +68,8 @@ struct gdsc { struct regulator *parent_regulator; struct reset_control **reset_clocks; struct msm_bus_scale_pdata *bus_pdata; + struct mbox_client mbox_client; + struct mbox_chan *mbox; u32 bus_handle; bool toggle_mem; bool toggle_periph; @@ -73,6 +79,7 @@ struct gdsc { bool force_root_en; bool no_status_check_on_disable; bool skip_disable; + bool bypass_skip_disable; bool is_gdsc_enabled; bool allow_clear; bool reset_aon; @@ -218,6 +225,35 @@ static int gdsc_is_enabled(struct regulator_dev *rdev) return is_enabled; } +#define MAX_LEN 96 + +static int gdsc_qmp_enable(struct gdsc *sc) +{ + char buf[MAX_LEN] = "{class: clock, res: gpu_noc_wa}"; + struct qmp_pkt pkt; + uint32_t regval; + int ret; + + regmap_read(sc->regmap, REG_OFFSET, ®val); + if (!(regval & SW_COLLAPSE_MASK)) { + /* + * Do not enable via a QMP request if the GDSC is already + * enabled by software. + */ + return 0; + } + + pkt.size = MAX_LEN; + pkt.data = buf; + + ret = mbox_send_message(sc->mbox, &pkt); + if (ret < 0) + dev_err(&sc->rdev->dev, "qmp message send failed, ret=%d\n", + ret); + + return ret; +} + static int gdsc_enable(struct regulator_dev *rdev) { struct gdsc *sc = rdev_get_drvdata(rdev); @@ -308,9 +344,15 @@ static int gdsc_enable(struct regulator_dev *rdev) gdsc_mb(sc); } - regmap_read(sc->regmap, REG_OFFSET, ®val); - regval &= ~SW_COLLAPSE_MASK; - regmap_write(sc->regmap, REG_OFFSET, regval); + if (sc->mbox) { + ret = gdsc_qmp_enable(sc); + if (ret < 0) + goto end; + } else { + regmap_read(sc->regmap, REG_OFFSET, ®val); + regval &= ~SW_COLLAPSE_MASK; + regmap_write(sc->regmap, REG_OFFSET, regval); + } /* Wait for 8 XO cycles before polling the status bit. */ gdsc_mb(sc); @@ -426,7 +468,7 @@ static int gdsc_disable(struct regulator_dev *rdev) /* Delay to account for staggered memory powerdown. */ udelay(1); - if (sc->skip_disable) { + if (sc->skip_disable && !sc->bypass_skip_disable) { /* * Don't change the GDSCR register state on disable. AOP will * handle this during system sleep. @@ -497,6 +539,12 @@ static unsigned int gdsc_get_mode(struct regulator_dev *rdev) uint32_t regval; int ret; + if (sc->skip_disable) { + if (sc->bypass_skip_disable) + return REGULATOR_MODE_IDLE; + return REGULATOR_MODE_NORMAL; + } + mutex_lock(&gdsc_seq_lock); if (sc->parent_regulator) { @@ -556,6 +604,23 @@ static int gdsc_set_mode(struct regulator_dev *rdev, unsigned int mode) mutex_lock(&gdsc_seq_lock); + if (sc->skip_disable) { + switch (mode) { + case REGULATOR_MODE_IDLE: + sc->bypass_skip_disable = true; + break; + case REGULATOR_MODE_NORMAL: + sc->bypass_skip_disable = false; + break; + default: + ret = -EINVAL; + break; + } + + mutex_unlock(&gdsc_seq_lock); + return ret; + } + if (sc->parent_regulator) { ret = regulator_set_voltage(sc->parent_regulator, RPMH_REGULATOR_LEVEL_LOW_SVS, INT_MAX); @@ -817,6 +882,23 @@ static int gdsc_probe(struct platform_device *pdev) sc->is_bus_enabled = true; } + if (of_find_property(pdev->dev.of_node, "mboxes", NULL)) { + sc->mbox_client.dev = &pdev->dev; + sc->mbox_client.tx_block = true; + sc->mbox_client.tx_tout = MBOX_TOUT_MS; + sc->mbox_client.knows_txdone = false; + + sc->mbox = mbox_request_channel(&sc->mbox_client, 0); + if (IS_ERR(sc->mbox)) { + ret = PTR_ERR(sc->mbox); + if (ret != -EPROBE_DEFER) + dev_err(&pdev->dev, "mailbox channel request failed, ret=%d\n", + ret); + sc->mbox = NULL; + goto err; + } + } + sc->rdesc.id = atomic_inc_return(&gdsc_count); sc->rdesc.ops = &gdsc_ops; sc->rdesc.type = REGULATOR_VOLTAGE; @@ -862,6 +944,17 @@ static int gdsc_probe(struct platform_device *pdev) REGULATOR_MODE_NORMAL | REGULATOR_MODE_FAST; } + if (sc->skip_disable) { + /* + * If the disable skipping feature is allowed, then use mode + * control to enable and disable the feature at runtime instead + * of using it to enable and disable hardware triggering. + */ + init_data->constraints.valid_ops_mask |= REGULATOR_CHANGE_MODE; + init_data->constraints.valid_modes_mask = + REGULATOR_MODE_NORMAL | REGULATOR_MODE_IDLE; + } + if (!sc->toggle_logic) { sc->reset_count = of_property_count_strings(pdev->dev.of_node, "reset-names"); @@ -955,6 +1048,9 @@ static int gdsc_probe(struct platform_device *pdev) return 0; err: + if (sc->mbox) + mbox_free_channel(sc->mbox); + if (sc->bus_handle) { if (sc->is_bus_enabled) msm_bus_scale_client_update_request(sc->bus_handle, 0); @@ -970,6 +1066,9 @@ static int gdsc_remove(struct platform_device *pdev) regulator_unregister(sc->rdev); + if (sc->mbox) + mbox_free_channel(sc->mbox); + if (sc->bus_handle) { if (sc->is_bus_enabled) msm_bus_scale_client_update_request(sc->bus_handle, 0); diff --git a/drivers/clk/qcom/gpucc-sm6150.c b/drivers/clk/qcom/gpucc-sm6150.c index 7da9ad021419da0003b2962b07a3079099147115..84191466c5a9eea355555ca96e7f3e34a837e697 100644 --- a/drivers/clk/qcom/gpucc-sm6150.c +++ b/drivers/clk/qcom/gpucc-sm6150.c @@ -396,12 +396,13 @@ static struct clk_branch gpu_cc_sleep_clk = { static struct clk_branch gpu_cc_ahb_clk = { .halt_reg = 0x1078, - .halt_check = BRANCH_HALT, + .halt_check = BRANCH_HALT_DELAY, .clkr = { .enable_reg = 0x1078, .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "gpu_cc_ahb_clk", + .flags = CLK_IS_CRITICAL, .ops = &clk_branch2_ops, }, }, diff --git a/drivers/clk/qcom/gpucc-sm8150.c b/drivers/clk/qcom/gpucc-sm8150.c index a11b2b5856b7f9b12ccadf8f20251322d19a53a3..4043e593d5f66f5e922157242e354d008abf3eb4 100644 --- a/drivers/clk/qcom/gpucc-sm8150.c +++ b/drivers/clk/qcom/gpucc-sm8150.c @@ -145,12 +145,13 @@ static struct clk_rcg2 gpu_cc_gmu_clk_src = { static struct clk_branch gpu_cc_ahb_clk = { .halt_reg = 0x1078, - .halt_check = BRANCH_HALT, + .halt_check = BRANCH_HALT_DELAY, .clkr = { .enable_reg = 0x1078, .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "gpu_cc_ahb_clk", + .flags = CLK_IS_CRITICAL, .ops = &clk_branch2_ops, }, }, diff --git a/drivers/clk/qcom/npucc-sm8150.c b/drivers/clk/qcom/npucc-sm8150.c index ed342ecfb27c9220db97a977ab24e6b0e47705a1..4b563519ab2ad891f74034eb75ef22dcd865eac6 100644 --- a/drivers/clk/qcom/npucc-sm8150.c +++ b/drivers/clk/qcom/npucc-sm8150.c @@ -111,11 +111,14 @@ static const struct alpha_pll_config npu_cc_pll0_config = { }; static const struct alpha_pll_config npu_cc_pll0_config_sm8150_v2 = { - .l = 0xD, - .alpha = 0x555, + .l = 0x1F, + .alpha = 0x4000, .config_ctl_val = 0x20485699, .config_ctl_hi_val = 0x00002267, .config_ctl_hi1_val = 0x00000024, + .test_ctl_val = 0x00000000, + .test_ctl_hi_val = 0x00000000, + .test_ctl_hi1_val = 0x00000020, .user_ctl_val = 0x00000000, .user_ctl_hi_val = 0x00000805, .user_ctl_hi1_val = 0x000000D0, @@ -186,6 +189,9 @@ static const struct alpha_pll_config npu_cc_pll1_config_sm8150_v2 = { .config_ctl_val = 0x20485699, .config_ctl_hi_val = 0x00002267, .config_ctl_hi1_val = 0x00000024, + .test_ctl_val = 0x00000000, + .test_ctl_hi_val = 0x00000000, + .test_ctl_hi1_val = 0x00000020, .user_ctl_val = 0x00000000, .user_ctl_hi_val = 0x00000805, .user_ctl_hi1_val = 0x000000D0, @@ -255,7 +261,8 @@ static const struct freq_tbl ftbl_npu_cc_cal_dp_clk_src_sm8150_v2[] = { F(300000000, P_NPU_CC_CRC_DIV, 1, 0, 0), F(400000000, P_NPU_CC_CRC_DIV, 1, 0, 0), F(487000000, P_NPU_CC_CRC_DIV, 1, 0, 0), - F(773000000, P_NPU_CC_CRC_DIV, 1, 0, 0), + F(652000000, P_NPU_CC_CRC_DIV, 1, 0, 0), + F(811000000, P_NPU_CC_CRC_DIV, 1, 0, 0), F(908000000, P_NPU_CC_CRC_DIV, 1, 0, 0), { } }; @@ -448,6 +455,10 @@ static struct clk_branch npu_cc_npu_core_apb_clk = { .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "npu_cc_npu_core_apb_clk", + .parent_names = (const char *[]){ + "qdss_qmp_clk", + }, + .num_parents = 1, .ops = &clk_branch2_ops, }, }, @@ -461,6 +472,10 @@ static struct clk_branch npu_cc_npu_core_atb_clk = { .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "npu_cc_npu_core_atb_clk", + .parent_names = (const char *[]){ + "gcc_npu_at_clk", + }, + .num_parents = 1, .ops = &clk_branch2_ops, }, }, @@ -492,6 +507,10 @@ static struct clk_branch npu_cc_npu_core_cti_clk = { .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "npu_cc_npu_core_cti_clk", + .parent_names = (const char *[]){ + "gcc_npu_trig_clk", + }, + .num_parents = 1, .ops = &clk_branch2_ops, }, }, @@ -650,8 +669,9 @@ static void npu_cc_sm8150_fixup_sm8150v2(struct regmap *regmap) npu_cc_cal_dp_clk_src.clkr.hw.init->rate_max[VDD_MIN] = 0; npu_cc_cal_dp_clk_src.clkr.hw.init->rate_max[VDD_LOW] = 400000000; npu_cc_cal_dp_clk_src.clkr.hw.init->rate_max[VDD_LOW_L1] = 487000000; - npu_cc_cal_dp_clk_src.clkr.hw.init->rate_max[VDD_NOMINAL] = 773000000; - npu_cc_cal_dp_clk_src.clkr.hw.init->rate_max[VDD_HIGH] = 908000000; + npu_cc_cal_dp_clk_src.clkr.hw.init->rate_max[VDD_NOMINAL] = 652000000; + npu_cc_cal_dp_clk_src.clkr.hw.init->rate_max[VDD_HIGH] = 811000000; + npu_cc_cal_dp_clk_src.clkr.hw.init->rate_max[VDD_HIGH_L1] = 908000000; npu_cc_npu_core_clk_src.freq_tbl = ftbl_npu_cc_npu_core_clk_src_sm8150_v2; npu_cc_npu_core_clk_src.clkr.hw.init->rate_max[VDD_MIN] = 0; diff --git a/drivers/clk/qcom/scc-sm8150.c b/drivers/clk/qcom/scc-sm8150.c new file mode 100644 index 0000000000000000000000000000000000000000..6519d6bd4eb2b673515751c519d7ca2dd44d8f47 --- /dev/null +++ b/drivers/clk/qcom/scc-sm8150.c @@ -0,0 +1,714 @@ +/* + * Copyright (c) 2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#define pr_fmt(fmt) "clk: %s: " fmt, __func__ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "common.h" +#include "clk-regmap.h" +#include "clk-pll.h" +#include "clk-rcg.h" +#include "clk-branch.h" +#include "clk-alpha-pll.h" +#include "vdd-level.h" + +#define F(f, s, h, m, n) { (f), (s), (2 * (h) - 1), (m), (n) } + +static DEFINE_VDD_REGULATORS(vdd_scc_cx, VDD_NUM, 1, vdd_corner); + +enum { + P_AON_SLEEP_CLK, + P_AOSS_CC_RO_CLK, + P_QDSP6SS_PLL_OUT_ODD, + P_SCC_PLL_OUT_EVEN, + P_SCC_PLL_OUT_MAIN, + P_SCC_PLL_OUT_ODD, + P_SSC_BI_PLL_TEST_SE, + P_SSC_BI_TCXO, +}; + +static const struct parent_map scc_parent_map_0[] = { + { P_AOSS_CC_RO_CLK, 0 }, + { P_AON_SLEEP_CLK, 1 }, + { P_SCC_PLL_OUT_EVEN, 2 }, + { P_SSC_BI_TCXO, 3 }, + { P_SCC_PLL_OUT_ODD, 4 }, + { P_QDSP6SS_PLL_OUT_ODD, 5 }, + { P_SCC_PLL_OUT_MAIN, 6 }, + { P_SSC_BI_PLL_TEST_SE, 7 }, +}; + +static const char * const scc_parent_names_0[] = { + "bi_tcxo", + "aon_sleep_clk", + "scc_pll_out_even", + "bi_tcxo", + "scc_pll_out_odd", + "qdsp6ss_pll_out_odd", + "scc_pll", + "ssc_bi_pll_test_se", +}; + +static struct pll_vco trion_vco[] = { + { 249600000, 2000000000, 0 }, +}; + +static const struct alpha_pll_config scc_pll_config = { + .l = 0x1F, + .alpha = 0x4000, + .config_ctl_val = 0x20485699, + .config_ctl_hi_val = 0x00002267, + .config_ctl_hi1_val = 0x00000024, + .test_ctl_val = 0x00000000, + .test_ctl_hi_val = 0x00000002, + .test_ctl_hi1_val = 0x00000000, + .user_ctl_val = 0x00000000, + .user_ctl_hi_val = 0x00000805, + .user_ctl_hi1_val = 0x000000D0, +}; + +static const struct alpha_pll_config scc_pll_config_sm8150_v2 = { + .l = 0x1E, + .alpha = 0x0, + .config_ctl_val = 0x20485699, + .config_ctl_hi_val = 0x00002267, + .config_ctl_hi1_val = 0x00000024, + .user_ctl_val = 0x00000000, + .user_ctl_hi_val = 0x00000805, + .user_ctl_hi1_val = 0x000000D0, +}; + +static struct clk_alpha_pll scc_pll = { + .offset = 0x0, + .vco_table = trion_vco, + .num_vco = ARRAY_SIZE(trion_vco), + .type = TRION_PLL, + .clkr = { + .hw.init = &(struct clk_init_data){ + .name = "scc_pll", + .parent_names = (const char *[]){ "bi_tcxo" }, + .num_parents = 1, + .ops = &clk_trion_pll_ops, + .vdd_class = &vdd_scc_cx, + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_MIN] = 615000000, + [VDD_LOW] = 1066000000, + [VDD_LOW_L1] = 1600000000, + [VDD_NOMINAL] = 2000000000}, + }, + }, +}; + +static const struct clk_div_table post_div_table_trion_even[] = { + { 0x0, 1 }, + { 0x1, 2 }, + { 0x3, 4 }, + { 0x7, 8 }, + { } +}; + +static struct clk_alpha_pll_postdiv scc_pll_out_even = { + .offset = 0x0, + .post_div_shift = 8, + .post_div_table = post_div_table_trion_even, + .num_post_div = ARRAY_SIZE(post_div_table_trion_even), + .width = 4, + .clkr.hw.init = &(struct clk_init_data){ + .name = "scc_pll_out_even", + .parent_names = (const char *[]){ "scc_pll" }, + .num_parents = 1, + .ops = &clk_trion_pll_postdiv_ops, + }, +}; + +static const struct freq_tbl ftbl_scc_main_rcg_clk_src[] = { + F(100000000, P_SCC_PLL_OUT_EVEN, 3, 0, 0), + { } +}; + +static const struct freq_tbl ftbl_scc_main_rcg_clk_src_sm8150_v2[] = { + F(96000000, P_SCC_PLL_OUT_EVEN, 3, 0, 0), + { } +}; + +static struct clk_rcg2 scc_main_rcg_clk_src = { + .cmd_rcgr = 0x1000, + .mnd_width = 0, + .hid_width = 5, + .parent_map = scc_parent_map_0, + .freq_tbl = ftbl_scc_main_rcg_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "scc_main_rcg_clk_src", + .parent_names = scc_parent_names_0, + .num_parents = 8, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_ops, + .vdd_class = &vdd_scc_cx, + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_MIN] = 100000000, + [VDD_NOMINAL] = 200000000}, + }, +}; + +static const struct freq_tbl ftbl_scc_qupv3_se0_clk_src[] = { + F(7372800, P_SCC_PLL_OUT_EVEN, 1, 384, 15625), + F(14745600, P_SCC_PLL_OUT_EVEN, 1, 768, 15625), + F(19200000, P_SSC_BI_TCXO, 1, 0, 0), + F(29491200, P_SCC_PLL_OUT_EVEN, 1, 1536, 15625), + F(32000000, P_SCC_PLL_OUT_EVEN, 1, 8, 75), + F(48000000, P_SCC_PLL_OUT_EVEN, 1, 4, 25), + F(64000000, P_SCC_PLL_OUT_EVEN, 1, 16, 75), + F(80000000, P_SCC_PLL_OUT_MAIN, 7.5, 0, 0), + F(96000000, P_SCC_PLL_OUT_EVEN, 1, 8, 25), + F(100000000, P_SCC_PLL_OUT_EVEN, 3, 0, 0), + F(102400000, P_SCC_PLL_OUT_EVEN, 1, 128, 375), + F(112000000, P_SCC_PLL_OUT_EVEN, 1, 28, 75), + F(117964800, P_SCC_PLL_OUT_EVEN, 1, 6144, 15625), + F(120000000, P_SCC_PLL_OUT_EVEN, 2.5, 0, 0), + F(128000000, P_SCC_PLL_OUT_EVEN, 1, 32, 75), + { } +}; + +static const struct freq_tbl ftbl_scc_qupv3_se0_clk_src_sm8150_v2[] = { + F(7372800, P_SCC_PLL_OUT_EVEN, 1, 16, 625), + F(14745600, P_SCC_PLL_OUT_EVEN, 1, 32, 625), + F(19200000, P_SSC_BI_TCXO, 1, 0, 0), + F(29491200, P_SCC_PLL_OUT_EVEN, 1, 64, 625), + F(32000000, P_SCC_PLL_OUT_EVEN, 9, 0, 0), + F(48000000, P_SCC_PLL_OUT_EVEN, 6, 0, 0), + F(64000000, P_SCC_PLL_OUT_EVEN, 4.5, 0, 0), + F(96000000, P_SCC_PLL_OUT_MAIN, 6, 0, 0), + F(100000000, P_SCC_PLL_OUT_MAIN, 1, 25, 144), + F(102400000, P_SCC_PLL_OUT_MAIN, 1, 8, 45), + F(112000000, P_SCC_PLL_OUT_MAIN, 1, 7, 36), + F(117964800, P_SCC_PLL_OUT_MAIN, 1, 128, 625), + F(120000000, P_SCC_PLL_OUT_MAIN, 1, 5, 24), + F(128000000, P_SCC_PLL_OUT_MAIN, 4.5, 0, 0), + F(144000000, P_SCC_PLL_OUT_MAIN, 4, 0, 0), + F(192000000, P_SCC_PLL_OUT_MAIN, 3, 0, 0), + { } +}; + +static struct clk_rcg2 scc_qupv3_se0_clk_src = { + .cmd_rcgr = 0x2004, + .mnd_width = 16, + .hid_width = 5, + .parent_map = scc_parent_map_0, + .freq_tbl = ftbl_scc_qupv3_se0_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "scc_qupv3_se0_clk_src", + .parent_names = scc_parent_names_0, + .num_parents = 8, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_ops, + .vdd_class = &vdd_scc_cx, + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_MIN] = 50000000, + [VDD_LOWER] = 80000000, + [VDD_LOW] = 120000000, + [VDD_NOMINAL] = 150000000}, + }, +}; + +static struct clk_rcg2 scc_qupv3_se1_clk_src = { + .cmd_rcgr = 0x3004, + .mnd_width = 16, + .hid_width = 5, + .parent_map = scc_parent_map_0, + .freq_tbl = ftbl_scc_qupv3_se0_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "scc_qupv3_se1_clk_src", + .parent_names = scc_parent_names_0, + .num_parents = 8, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_ops, + .vdd_class = &vdd_scc_cx, + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_MIN] = 50000000, + [VDD_LOWER] = 80000000, + [VDD_LOW] = 120000000, + [VDD_NOMINAL] = 150000000}, + }, +}; + +static struct clk_rcg2 scc_qupv3_se2_clk_src = { + .cmd_rcgr = 0x4004, + .mnd_width = 16, + .hid_width = 5, + .parent_map = scc_parent_map_0, + .freq_tbl = ftbl_scc_qupv3_se0_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "scc_qupv3_se2_clk_src", + .parent_names = scc_parent_names_0, + .num_parents = 8, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_ops, + .vdd_class = &vdd_scc_cx, + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_MIN] = 50000000, + [VDD_LOWER] = 80000000, + [VDD_LOW] = 120000000, + [VDD_NOMINAL] = 150000000}, + }, +}; + +static struct clk_rcg2 scc_qupv3_se3_clk_src = { + .cmd_rcgr = 0xb004, + .mnd_width = 16, + .hid_width = 5, + .parent_map = scc_parent_map_0, + .freq_tbl = ftbl_scc_qupv3_se0_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "scc_qupv3_se3_clk_src", + .parent_names = scc_parent_names_0, + .num_parents = 8, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_ops, + .vdd_class = &vdd_scc_cx, + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_MIN] = 50000000, + [VDD_LOWER] = 80000000, + [VDD_LOW] = 120000000, + [VDD_NOMINAL] = 150000000}, + }, +}; + +static struct clk_rcg2 scc_qupv3_se4_clk_src = { + .cmd_rcgr = 0xc004, + .mnd_width = 16, + .hid_width = 5, + .parent_map = scc_parent_map_0, + .freq_tbl = ftbl_scc_qupv3_se0_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "scc_qupv3_se4_clk_src", + .parent_names = scc_parent_names_0, + .num_parents = 8, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_ops, + .vdd_class = &vdd_scc_cx, + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_MIN] = 50000000, + [VDD_LOWER] = 80000000, + [VDD_LOW] = 120000000, + [VDD_NOMINAL] = 150000000}, + }, +}; + +static struct clk_rcg2 scc_qupv3_se5_clk_src = { + .cmd_rcgr = 0xd004, + .mnd_width = 16, + .hid_width = 5, + .parent_map = scc_parent_map_0, + .freq_tbl = ftbl_scc_qupv3_se0_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "scc_qupv3_se5_clk_src", + .parent_names = scc_parent_names_0, + .num_parents = 8, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_ops, + .vdd_class = &vdd_scc_cx, + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_MIN] = 50000000, + [VDD_LOWER] = 80000000, + [VDD_LOW] = 120000000, + [VDD_NOMINAL] = 150000000}, + }, +}; + +static struct clk_branch scc_qupv3_2xcore_clk = { + .halt_reg = 0x5008, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x21000, + .enable_mask = BIT(10), + .hw.init = &(struct clk_init_data){ + .name = "scc_qupv3_2xcore_clk", + .parent_names = (const char *[]){ + "scc_main_rcg_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch scc_qupv3_core_clk = { + .halt_reg = 0x5010, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x21000, + .enable_mask = BIT(11), + .hw.init = &(struct clk_init_data){ + .name = "scc_qupv3_core_clk", + .parent_names = (const char *[]){ + "scc_main_rcg_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch scc_qupv3_m_hclk_clk = { + .halt_reg = 0x9064, + .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0x9064, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x21000, + .enable_mask = BIT(1), + .hw.init = &(struct clk_init_data){ + .name = "scc_qupv3_m_hclk_clk", + .parent_names = (const char *[]){ + "scc_main_rcg_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch scc_qupv3_s_hclk_clk = { + .halt_reg = 0x9060, + .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0x9060, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x21000, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "scc_qupv3_s_hclk_clk", + .parent_names = (const char *[]){ + "scc_main_rcg_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch scc_qupv3_se0_clk = { + .halt_reg = 0x2130, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x21000, + .enable_mask = BIT(3), + .hw.init = &(struct clk_init_data){ + .name = "scc_qupv3_se0_clk", + .parent_names = (const char *[]){ + "scc_qupv3_se0_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch scc_qupv3_se1_clk = { + .halt_reg = 0x3130, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x21000, + .enable_mask = BIT(4), + .hw.init = &(struct clk_init_data){ + .name = "scc_qupv3_se1_clk", + .parent_names = (const char *[]){ + "scc_qupv3_se1_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch scc_qupv3_se2_clk = { + .halt_reg = 0x4130, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x21000, + .enable_mask = BIT(5), + .hw.init = &(struct clk_init_data){ + .name = "scc_qupv3_se2_clk", + .parent_names = (const char *[]){ + "scc_qupv3_se2_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch scc_qupv3_se3_clk = { + .halt_reg = 0xb130, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x21000, + .enable_mask = BIT(6), + .hw.init = &(struct clk_init_data){ + .name = "scc_qupv3_se3_clk", + .parent_names = (const char *[]){ + "scc_qupv3_se3_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch scc_qupv3_se4_clk = { + .halt_reg = 0xc130, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x21000, + .enable_mask = BIT(7), + .hw.init = &(struct clk_init_data){ + .name = "scc_qupv3_se4_clk", + .parent_names = (const char *[]){ + "scc_qupv3_se4_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch scc_qupv3_se5_clk = { + .halt_reg = 0xd130, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x21000, + .enable_mask = BIT(8), + .hw.init = &(struct clk_init_data){ + .name = "scc_qupv3_se5_clk", + .parent_names = (const char *[]){ + "scc_qupv3_se5_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_regmap *scc_sm8150_clocks[] = { + [SCC_MAIN_RCG_CLK_SRC] = &scc_main_rcg_clk_src.clkr, + [SCC_PLL] = &scc_pll.clkr, + [SCC_PLL_OUT_EVEN] = &scc_pll_out_even.clkr, + [SCC_QUPV3_2XCORE_CLK] = &scc_qupv3_2xcore_clk.clkr, + [SCC_QUPV3_CORE_CLK] = &scc_qupv3_core_clk.clkr, + [SCC_QUPV3_M_HCLK_CLK] = &scc_qupv3_m_hclk_clk.clkr, + [SCC_QUPV3_S_HCLK_CLK] = &scc_qupv3_s_hclk_clk.clkr, + [SCC_QUPV3_SE0_CLK] = &scc_qupv3_se0_clk.clkr, + [SCC_QUPV3_SE0_CLK_SRC] = &scc_qupv3_se0_clk_src.clkr, + [SCC_QUPV3_SE1_CLK] = &scc_qupv3_se1_clk.clkr, + [SCC_QUPV3_SE1_CLK_SRC] = &scc_qupv3_se1_clk_src.clkr, + [SCC_QUPV3_SE2_CLK] = &scc_qupv3_se2_clk.clkr, + [SCC_QUPV3_SE2_CLK_SRC] = &scc_qupv3_se2_clk_src.clkr, + [SCC_QUPV3_SE3_CLK] = &scc_qupv3_se3_clk.clkr, + [SCC_QUPV3_SE3_CLK_SRC] = &scc_qupv3_se3_clk_src.clkr, + [SCC_QUPV3_SE4_CLK] = &scc_qupv3_se4_clk.clkr, + [SCC_QUPV3_SE4_CLK_SRC] = &scc_qupv3_se4_clk_src.clkr, + [SCC_QUPV3_SE5_CLK] = &scc_qupv3_se5_clk.clkr, + [SCC_QUPV3_SE5_CLK_SRC] = &scc_qupv3_se5_clk_src.clkr, +}; + +static struct clk_dfs scc_dfs_clocks[] = { + { &scc_qupv3_se0_clk_src, DFS_ENABLE_RCG }, + { &scc_qupv3_se1_clk_src, DFS_ENABLE_RCG }, + { &scc_qupv3_se2_clk_src, DFS_ENABLE_RCG }, + { &scc_qupv3_se3_clk_src, DFS_ENABLE_RCG }, + { &scc_qupv3_se4_clk_src, DFS_ENABLE_RCG }, + { &scc_qupv3_se5_clk_src, DFS_ENABLE_RCG }, +}; + +static const struct qcom_cc_dfs_desc scc_sm8150_dfs_desc = { + .clks = scc_dfs_clocks, + .num_clks = ARRAY_SIZE(scc_dfs_clocks), +}; + +static const struct regmap_config scc_sm8150_regmap_config = { + .reg_bits = 32, + .reg_stride = 4, + .val_bits = 32, + .max_register = 0x23000, + .fast_io = true, +}; + +static const struct qcom_cc_desc scc_sm8150_desc = { + .config = &scc_sm8150_regmap_config, + .clks = scc_sm8150_clocks, + .num_clks = ARRAY_SIZE(scc_sm8150_clocks), +}; + +static const struct of_device_id scc_sm8150_match_table[] = { + { .compatible = "qcom,scc-sm8150" }, + { .compatible = "qcom,scc-sm8150-v2" }, + { } +}; +MODULE_DEVICE_TABLE(of, scc_sm8150_match_table); + +static void scc_sm8150_fixup_sm8150v2(struct regmap *regmap) +{ + clk_trion_pll_configure(&scc_pll, regmap, &scc_pll_config_sm8150_v2); + scc_main_rcg_clk_src.freq_tbl = ftbl_scc_main_rcg_clk_src_sm8150_v2; + scc_main_rcg_clk_src.clkr.hw.init->rate_max[VDD_MIN] = 96000000; + scc_main_rcg_clk_src.clkr.hw.init->rate_max[VDD_LOW] = 576000000; + scc_main_rcg_clk_src.clkr.hw.init->rate_max[VDD_NOMINAL] = 576000000; + scc_qupv3_se0_clk_src.freq_tbl = ftbl_scc_qupv3_se0_clk_src_sm8150_v2; + scc_qupv3_se0_clk_src.clkr.hw.init->rate_max[VDD_MIN] = 48000000; + scc_qupv3_se0_clk_src.clkr.hw.init->rate_max[VDD_LOWER] = 96000000; + scc_qupv3_se0_clk_src.clkr.hw.init->rate_max[VDD_LOW] = 128000000; + scc_qupv3_se0_clk_src.clkr.hw.init->rate_max[VDD_LOW_L1] = 144000000; + scc_qupv3_se0_clk_src.clkr.hw.init->rate_max[VDD_NOMINAL] = 192000000; + scc_qupv3_se1_clk_src.freq_tbl = ftbl_scc_qupv3_se0_clk_src_sm8150_v2; + scc_qupv3_se1_clk_src.clkr.hw.init->rate_max[VDD_MIN] = 48000000; + scc_qupv3_se1_clk_src.clkr.hw.init->rate_max[VDD_LOWER] = 96000000; + scc_qupv3_se1_clk_src.clkr.hw.init->rate_max[VDD_LOW] = 128000000; + scc_qupv3_se1_clk_src.clkr.hw.init->rate_max[VDD_LOW_L1] = 144000000; + scc_qupv3_se1_clk_src.clkr.hw.init->rate_max[VDD_NOMINAL] = 192000000; + scc_qupv3_se2_clk_src.freq_tbl = ftbl_scc_qupv3_se0_clk_src_sm8150_v2; + scc_qupv3_se2_clk_src.clkr.hw.init->rate_max[VDD_MIN] = 48000000; + scc_qupv3_se2_clk_src.clkr.hw.init->rate_max[VDD_LOWER] = 96000000; + scc_qupv3_se2_clk_src.clkr.hw.init->rate_max[VDD_LOW] = 128000000; + scc_qupv3_se2_clk_src.clkr.hw.init->rate_max[VDD_LOW_L1] = 144000000; + scc_qupv3_se2_clk_src.clkr.hw.init->rate_max[VDD_NOMINAL] = 192000000; + scc_qupv3_se3_clk_src.freq_tbl = ftbl_scc_qupv3_se0_clk_src_sm8150_v2; + scc_qupv3_se3_clk_src.clkr.hw.init->rate_max[VDD_MIN] = 48000000; + scc_qupv3_se3_clk_src.clkr.hw.init->rate_max[VDD_LOWER] = 96000000; + scc_qupv3_se3_clk_src.clkr.hw.init->rate_max[VDD_LOW] = 128000000; + scc_qupv3_se3_clk_src.clkr.hw.init->rate_max[VDD_LOW_L1] = 144000000; + scc_qupv3_se3_clk_src.clkr.hw.init->rate_max[VDD_NOMINAL] = 192000000; + scc_qupv3_se4_clk_src.freq_tbl = ftbl_scc_qupv3_se0_clk_src_sm8150_v2; + scc_qupv3_se4_clk_src.clkr.hw.init->rate_max[VDD_MIN] = 48000000; + scc_qupv3_se4_clk_src.clkr.hw.init->rate_max[VDD_LOWER] = 96000000; + scc_qupv3_se4_clk_src.clkr.hw.init->rate_max[VDD_LOW] = 128000000; + scc_qupv3_se4_clk_src.clkr.hw.init->rate_max[VDD_LOW_L1] = 144000000; + scc_qupv3_se4_clk_src.clkr.hw.init->rate_max[VDD_NOMINAL] = 192000000; + scc_qupv3_se5_clk_src.freq_tbl = ftbl_scc_qupv3_se0_clk_src_sm8150_v2; + scc_qupv3_se5_clk_src.clkr.hw.init->rate_max[VDD_MIN] = 48000000; + scc_qupv3_se5_clk_src.clkr.hw.init->rate_max[VDD_LOWER] = 96000000; + scc_qupv3_se5_clk_src.clkr.hw.init->rate_max[VDD_LOW] = 128000000; + scc_qupv3_se5_clk_src.clkr.hw.init->rate_max[VDD_LOW_L1] = 144000000; + scc_qupv3_se5_clk_src.clkr.hw.init->rate_max[VDD_NOMINAL] = 192000000; +} + +static int scc_sm8150_fixup(struct platform_device *pdev, struct regmap *regmap) +{ + const char *compat = NULL; + int compatlen = 0; + + compat = of_get_property(pdev->dev.of_node, "compatible", &compatlen); + if (!compat || (compatlen <= 0)) + return -EINVAL; + + if (!strcmp(compat, "qcom,scc-sm8150-v2")) + scc_sm8150_fixup_sm8150v2(regmap); + + return 0; +} + +static int scc_sm8150_probe(struct platform_device *pdev) +{ + struct regmap *regmap; + int ret; + + regmap = qcom_cc_map(pdev, &scc_sm8150_desc); + if (IS_ERR(regmap)) { + pr_err("Failed to map the scc registers\n"); + return PTR_ERR(regmap); + } + + vdd_scc_cx.regulator[0] = devm_regulator_get(&pdev->dev, "vdd_scc_cx"); + if (IS_ERR(vdd_scc_cx.regulator[0])) { + ret = PTR_ERR(vdd_scc_cx.regulator[0]); + if (ret != -EPROBE_DEFER) + dev_err(&pdev->dev, "Unable to get vdd_scc_cx regulator, ret=%d\n", + ret); + return ret; + } + + ret = scc_sm8150_fixup(pdev, regmap); + if (ret) + return ret; + + clk_trion_pll_configure(&scc_pll, regmap, &scc_pll_config); + + ret = qcom_cc_really_probe(pdev, &scc_sm8150_desc, regmap); + if (ret) { + dev_err(&pdev->dev, "Failed to register SCC clocks\n"); + return ret; + } + + ret = qcom_cc_register_rcg_dfs(pdev, &scc_sm8150_dfs_desc); + if (ret) { + dev_err(&pdev->dev, "Failed to register with DFS\n"); + return ret; + } + + dev_info(&pdev->dev, "Registered SCC clocks\n"); + + return 0; +} + +static struct platform_driver scc_sm8150_driver = { + .probe = scc_sm8150_probe, + .driver = { + .name = "scc-sm8150", + .of_match_table = scc_sm8150_match_table, + }, +}; + +static int __init scc_sm8150_init(void) +{ + return platform_driver_register(&scc_sm8150_driver); +} +subsys_initcall(scc_sm8150_init); + +static void __exit scc_sm8150_exit(void) +{ + platform_driver_unregister(&scc_sm8150_driver); +} +module_exit(scc_sm8150_exit); + +MODULE_DESCRIPTION("QTI SCC SM8150 Driver"); +MODULE_LICENSE("GPL v2"); +MODULE_ALIAS("platform:scc-sm8150"); diff --git a/drivers/clk/qcom/vdd-level.h b/drivers/clk/qcom/vdd-level.h index b11589776e33c8696b2604fc6e8e78c62ed4f6c9..100205a96d918b0683872c45e06364d69b3611ed 100644 --- a/drivers/clk/qcom/vdd-level.h +++ b/drivers/clk/qcom/vdd-level.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017, The Linux Foundation. All rights reserved. + * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -25,6 +25,8 @@ enum vdd_levels { VDD_LOW_L1, /* SVSL1 */ VDD_NOMINAL, /* NOM */ VDD_HIGH, /* TURBO */ + VDD_HIGH_L1, /* TURBOL1 */ + VDD_MM_NUM = VDD_HIGH_L1, VDD_NUM, }; @@ -36,6 +38,7 @@ static int vdd_corner[] = { RPMH_REGULATOR_LEVEL_SVS_L1, /* VDD_LOW_L1 */ RPMH_REGULATOR_LEVEL_NOM, /* VDD_NOMINAL */ RPMH_REGULATOR_LEVEL_TURBO, /* VDD_HIGH */ + RPMH_REGULATOR_LEVEL_TURBO_L1, /* VDD_HIGH_L1 */ }; #endif diff --git a/drivers/clk/qcom/videocc-sm8150.c b/drivers/clk/qcom/videocc-sm8150.c index 4c43d8579868b6bec44dbc690e491513b55e0866..444ae14c100eb4ee73307a3ad0cfe28a5463b76f 100644 --- a/drivers/clk/qcom/videocc-sm8150.c +++ b/drivers/clk/qcom/videocc-sm8150.c @@ -38,7 +38,7 @@ #define F(f, s, h, m, n) { (f), (s), (2 * (h) - 1), (m), (n) } -static DEFINE_VDD_REGULATORS(vdd_mm, VDD_NUM, 1, vdd_corner); +static DEFINE_VDD_REGULATORS(vdd_mm, VDD_MM_NUM, 1, vdd_corner); enum { P_BI_TCXO, diff --git a/drivers/cpufreq/cppc_cpufreq.c b/drivers/cpufreq/cppc_cpufreq.c index 8b432d6e846d9677e03c21604b6e495f453a14fa..c9ce716247c1550b06ebcae6b35b11103f16edee 100644 --- a/drivers/cpufreq/cppc_cpufreq.c +++ b/drivers/cpufreq/cppc_cpufreq.c @@ -126,6 +126,49 @@ static void cppc_cpufreq_stop_cpu(struct cpufreq_policy *policy) cpu->perf_caps.lowest_perf, cpu_num, ret); } +/* + * The PCC subspace describes the rate at which platform can accept commands + * on the shared PCC channel (including READs which do not count towards freq + * trasition requests), so ideally we need to use the PCC values as a fallback + * if we don't have a platform specific transition_delay_us + */ +#ifdef CONFIG_ARM64 +#include + +static unsigned int cppc_cpufreq_get_transition_delay_us(int cpu) +{ + unsigned long implementor = read_cpuid_implementor(); + unsigned long part_num = read_cpuid_part_number(); + unsigned int delay_us = 0; + + switch (implementor) { + case ARM_CPU_IMP_QCOM: + switch (part_num) { + case QCOM_CPU_PART_FALKOR_V1: + case QCOM_CPU_PART_FALKOR: + delay_us = 10000; + break; + default: + delay_us = cppc_get_transition_latency(cpu) / NSEC_PER_USEC; + break; + } + break; + default: + delay_us = cppc_get_transition_latency(cpu) / NSEC_PER_USEC; + break; + } + + return delay_us; +} + +#else + +static unsigned int cppc_cpufreq_get_transition_delay_us(int cpu) +{ + return cppc_get_transition_latency(cpu) / NSEC_PER_USEC; +} +#endif + static int cppc_cpufreq_cpu_init(struct cpufreq_policy *policy) { struct cppc_cpudata *cpu; @@ -163,8 +206,7 @@ static int cppc_cpufreq_cpu_init(struct cpufreq_policy *policy) policy->cpuinfo.max_freq = cppc_dmi_max_khz; policy->cpuinfo.transition_latency = cppc_get_transition_latency(cpu_num); - policy->transition_delay_us = cppc_get_transition_latency(cpu_num) / - NSEC_PER_USEC; + policy->transition_delay_us = cppc_cpufreq_get_transition_delay_us(cpu_num); policy->shared_type = cpu->shared_type; if (policy->shared_type == CPUFREQ_SHARED_TYPE_ANY) { diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index a905bbb45667b55ed0ba9e1fa614e6e26c2c5ddb..114dfe67015b264ba1a998875f350724272637cf 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c @@ -2188,6 +2188,18 @@ static bool __init intel_pstate_no_acpi_pss(void) return true; } +static bool __init intel_pstate_no_acpi_pcch(void) +{ + acpi_status status; + acpi_handle handle; + + status = acpi_get_handle(NULL, "\\_SB", &handle); + if (ACPI_FAILURE(status)) + return true; + + return !acpi_has_method(handle, "PCCH"); +} + static bool __init intel_pstate_has_acpi_ppc(void) { int i; @@ -2247,7 +2259,10 @@ static bool __init intel_pstate_platform_pwr_mgmt_exists(void) switch (plat_info[idx].data) { case PSS: - return intel_pstate_no_acpi_pss(); + if (!intel_pstate_no_acpi_pss()) + return false; + + return intel_pstate_no_acpi_pcch(); case PPC: return intel_pstate_has_acpi_ppc() && !force_load; } diff --git a/drivers/cpufreq/pcc-cpufreq.c b/drivers/cpufreq/pcc-cpufreq.c index 3f0ce2ae35ee432637c28e7dddd2851a20e29f16..0c56c97596725edf323a2abf471bbaf69582b903 100644 --- a/drivers/cpufreq/pcc-cpufreq.c +++ b/drivers/cpufreq/pcc-cpufreq.c @@ -580,6 +580,10 @@ static int __init pcc_cpufreq_init(void) { int ret; + /* Skip initialization if another cpufreq driver is there. */ + if (cpufreq_get_current_driver()) + return 0; + if (acpi_disabled) return 0; diff --git a/drivers/cpuidle/lpm-levels-of.c b/drivers/cpuidle/lpm-levels-of.c index 149e7ab3e17ac5d8b9aba64dfa772cf5f122eaec..5309df474bd177813e5f95d63f6cb9c3efe0f4b6 100644 --- a/drivers/cpuidle/lpm-levels-of.c +++ b/drivers/cpuidle/lpm-levels-of.c @@ -39,11 +39,9 @@ struct lpm_type_str { static const struct lpm_type_str lpm_types[] = { {IDLE, "idle_enabled"}, {SUSPEND, "suspend_enabled"}, - {LATENCY, "latency_us"}, + {LATENCY, "exit_latency_us"}, }; -static DEFINE_PER_CPU(uint32_t *, max_residency); -static DEFINE_PER_CPU(uint32_t *, min_residency); static struct lpm_level_avail *cpu_level_available[NR_CPUS]; static struct platform_device *lpm_pdev; @@ -78,99 +76,6 @@ static struct lpm_level_avail *get_avail_ptr(struct kobject *kobj, return avail; } -static void set_optimum_cpu_residency(struct lpm_cpu *cpu, int cpu_id, - bool probe_time) -{ - int i, j; - bool mode_avail; - uint32_t *maximum_residency = per_cpu(max_residency, cpu_id); - uint32_t *minimum_residency = per_cpu(min_residency, cpu_id); - - for (i = 0; i < cpu->nlevels; i++) { - struct power_params *pwr = &cpu->levels[i].pwr; - - mode_avail = probe_time || - lpm_cpu_mode_allow(cpu_id, i, true); - - if (!mode_avail) { - maximum_residency[i] = 0; - minimum_residency[i] = 0; - continue; - } - - maximum_residency[i] = ~0; - for (j = i + 1; j < cpu->nlevels; j++) { - mode_avail = probe_time || - lpm_cpu_mode_allow(cpu_id, j, true); - - if (mode_avail && - (maximum_residency[i] > pwr->residencies[j]) && - (pwr->residencies[j] != 0)) - maximum_residency[i] = pwr->residencies[j]; - } - - minimum_residency[i] = pwr->time_overhead_us; - for (j = i-1; j >= 0; j--) { - if (probe_time || lpm_cpu_mode_allow(cpu_id, j, true)) { - minimum_residency[i] = maximum_residency[j] + 1; - break; - } - } - } -} - -static void set_optimum_cluster_residency(struct lpm_cluster *cluster, - bool probe_time) -{ - int i, j; - bool mode_avail; - - for (i = 0; i < cluster->nlevels; i++) { - struct power_params *pwr = &cluster->levels[i].pwr; - - mode_avail = probe_time || - lpm_cluster_mode_allow(cluster, i, - true); - - if (!mode_avail) { - pwr->max_residency = 0; - pwr->min_residency = 0; - continue; - } - - pwr->max_residency = ~0; - for (j = i+1; j < cluster->nlevels; j++) { - mode_avail = probe_time || - lpm_cluster_mode_allow(cluster, j, - true); - if (mode_avail && - (pwr->max_residency > pwr->residencies[j]) && - (pwr->residencies[j] != 0)) - pwr->max_residency = pwr->residencies[j]; - } - - pwr->min_residency = pwr->time_overhead_us; - for (j = i-1; j >= 0; j--) { - if (probe_time || - lpm_cluster_mode_allow(cluster, j, true)) { - pwr->min_residency = - cluster->levels[j].pwr.max_residency + 1; - break; - } - } - } -} - -uint32_t *get_per_cpu_max_residency(int cpu) -{ - return per_cpu(max_residency, cpu); -} - -uint32_t *get_per_cpu_min_residency(int cpu) -{ - return per_cpu(min_residency, cpu); -} - static ssize_t lpm_latency_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { @@ -181,7 +86,7 @@ static ssize_t lpm_latency_show(struct kobject *kobj, if (WARN_ON(!avail)) return -EINVAL; - kp.arg = &avail->latency_us; + kp.arg = &avail->exit_latency; ret = param_get_uint(buf, &kp); if (ret > 0) { @@ -229,11 +134,6 @@ ssize_t lpm_enable_store(struct kobject *kobj, struct kobj_attribute *attr, kp.arg = get_enabled_ptr(attr, avail); ret = param_set_bool(buf, &kp); - if (avail->cpu_node) - set_optimum_cpu_residency(avail->data, avail->idx, false); - else - set_optimum_cluster_residency(avail->data, false); - return ret ? ret : len; } @@ -349,8 +249,8 @@ static int create_cpu_lvl_nodes(struct lpm_cluster *p, struct kobject *parent) * be available at all times. */ for (i = 1; i < lpm_cpu->nlevels; i++) { - level_list[i].latency_us = - p->levels[i].pwr.latency_us; + level_list[i].exit_latency = + p->levels[i].pwr.exit_latency; ret = create_lvl_avail_nodes( lpm_cpu->levels[i].name, cpu_kobj[cpu_idx], @@ -389,7 +289,8 @@ int create_cluster_lvl_nodes(struct lpm_cluster *p, struct kobject *kobj) return -ENOMEM; for (i = 0; i < p->nlevels; i++) { - p->levels[i].available.latency_us = p->levels[i].pwr.latency_us; + p->levels[i].available.exit_latency = + p->levels[i].pwr.exit_latency; ret = create_lvl_avail_nodes(p->levels[i].level_name, cluster_kobj, &p->levels[i].available, (void *)p, 0, false); @@ -487,23 +388,18 @@ static int parse_power_params(struct device_node *node, char *key; int ret; - key = "qcom,latency-us"; - ret = of_property_read_u32(node, key, &pwr->latency_us); + key = "qcom,entry-latency-us"; + ret = of_property_read_u32(node, key, &pwr->entry_latency); if (ret) goto fail; - key = "qcom,ss-power"; - ret = of_property_read_u32(node, key, &pwr->ss_power); + key = "qcom,exit-latency-us"; + ret = of_property_read_u32(node, key, &pwr->exit_latency); if (ret) goto fail; - key = "qcom,energy-overhead"; - ret = of_property_read_u32(node, key, &pwr->energy_overhead); - if (ret) - goto fail; - - key = "qcom,time-overhead"; - ret = of_property_read_u32(node, key, &pwr->time_overhead_us); + key = "qcom,min-residency-us"; + ret = of_property_read_u32(node, key, &pwr->min_residency); if (ret) goto fail; @@ -621,30 +517,11 @@ static int get_cpumask_for_node(struct device_node *node, struct cpumask *mask) return 0; } -static int calculate_residency(struct power_params *base_pwr, - struct power_params *next_pwr) -{ - int32_t residency = (int32_t)(next_pwr->energy_overhead - - base_pwr->energy_overhead) - - ((int32_t)(next_pwr->ss_power * next_pwr->time_overhead_us) - - (int32_t)(base_pwr->ss_power * base_pwr->time_overhead_us)); - - residency /= (int32_t)(base_pwr->ss_power - next_pwr->ss_power); - - if (residency < 0) { - pr_err("Residency < 0 for LPM\n"); - return next_pwr->time_overhead_us; - } - - return residency < next_pwr->time_overhead_us ? - next_pwr->time_overhead_us : residency; -} - static int parse_cpu(struct device_node *node, struct lpm_cpu *cpu) { struct device_node *n; - int ret, i, j; + int ret, i; const char *key; for_each_child_of_node(node, n) { @@ -678,36 +555,11 @@ static int parse_cpu(struct device_node *node, struct lpm_cpu *cpu) of_node_put(n); } - for (i = 0; i < cpu->nlevels; i++) { - for (j = 0; j < cpu->nlevels; j++) { - if (i >= j) { - cpu->levels[i].pwr.residencies[j] = 0; - continue; - } - - cpu->levels[i].pwr.residencies[j] = - calculate_residency(&cpu->levels[i].pwr, - &cpu->levels[j].pwr); + for (i = 1; i < cpu->nlevels; i++) + cpu->levels[i-1].pwr.max_residency = + cpu->levels[i].pwr.min_residency - 1; - pr_info("idx %d %u\n", j, - cpu->levels[i].pwr.residencies[j]); - } - } - - for_each_cpu(i, &cpu->related_cpus) { - - per_cpu(max_residency, i) = devm_kzalloc(&lpm_pdev->dev, - sizeof(uint32_t) * cpu->nlevels, GFP_KERNEL); - if (!per_cpu(max_residency, i)) - return -ENOMEM; - - per_cpu(min_residency, i) = devm_kzalloc(&lpm_pdev->dev, - sizeof(uint32_t) * cpu->nlevels, GFP_KERNEL); - if (!per_cpu(min_residency, i)) - return -ENOMEM; - - set_optimum_cpu_residency(cpu, i, true); - } + cpu->levels[i-1].pwr.max_residency = UINT_MAX; return 0; } @@ -801,8 +653,7 @@ struct lpm_cluster *parse_cluster(struct device_node *node, struct lpm_cluster *c; struct device_node *n; char *key; - int ret = 0; - int i, j; + int ret = 0, i; c = devm_kzalloc(&lpm_pdev->dev, sizeof(*c), GFP_KERNEL); if (!c) @@ -868,17 +719,12 @@ struct lpm_cluster *parse_cluster(struct device_node *node, else c->last_level = c->nlevels-1; - for (i = 0; i < c->nlevels; i++) { - for (j = 0; j < c->nlevels; j++) { - if (i >= j) { - c->levels[i].pwr.residencies[j] = 0; - continue; - } - c->levels[i].pwr.residencies[j] = calculate_residency( - &c->levels[i].pwr, &c->levels[j].pwr); - } - } - set_optimum_cluster_residency(c, true); + for (i = 1; i < c->nlevels; i++) + c->levels[i-1].pwr.max_residency = + c->levels[i].pwr.min_residency - 1; + + c->levels[i-1].pwr.max_residency = UINT_MAX; + return c; failed_parse_cluster: diff --git a/drivers/cpuidle/lpm-levels.c b/drivers/cpuidle/lpm-levels.c index 4011f05e9676ebcd9cfb1b43877bd515b7b66fc6..42dc0f5b49048b0fa0cb3b2ca5cf6652ea36e329 100644 --- a/drivers/cpuidle/lpm-levels.c +++ b/drivers/cpuidle/lpm-levels.c @@ -162,9 +162,9 @@ static uint32_t least_cluster_latency(struct lpm_cluster *cluster, level = &cluster->levels[i]; pwr_params = &level->pwr; if (lat_level->reset_level == level->reset_level) { - if ((latency > pwr_params->latency_us) + if ((latency > pwr_params->exit_latency) || (!latency)) - latency = pwr_params->latency_us; + latency = pwr_params->exit_latency; break; } } @@ -181,10 +181,10 @@ static uint32_t least_cluster_latency(struct lpm_cluster *cluster, pwr_params = &level->pwr; if (lat_level->reset_level == level->reset_level) { - if ((latency > pwr_params->latency_us) + if ((latency > pwr_params->exit_latency) || (!latency)) latency = - pwr_params->latency_us; + pwr_params->exit_latency; break; } } @@ -216,9 +216,9 @@ static uint32_t least_cpu_latency(struct list_head *child, pwr_params = &level->pwr; if (lat_level->reset_level == level->reset_level) { - if ((lat > pwr_params->latency_us) + if ((lat > pwr_params->exit_latency) || (!lat)) - lat = pwr_params->latency_us; + lat = pwr_params->exit_latency; break; } } @@ -427,8 +427,6 @@ static uint64_t lpm_cpuidle_predict(struct cpuidle_device *dev, uint64_t max, avg, stddev; int64_t thresh = LLONG_MAX; struct lpm_history *history = &per_cpu(hist, dev->cpu); - uint32_t *min_residency = get_per_cpu_min_residency(dev->cpu); - uint32_t *max_residency = get_per_cpu_max_residency(dev->cpu); if (!lpm_prediction || !cpu->lpm_prediction) return 0; @@ -505,12 +503,16 @@ static uint64_t lpm_cpuidle_predict(struct cpuidle_device *dev, */ if (history->htmr_wkup != 1) { for (j = 1; j < cpu->nlevels; j++) { + struct lpm_cpu_level *level = &cpu->levels[j]; + uint32_t min_residency = level->pwr.min_residency; + uint32_t max_residency = 0; + struct lpm_cpu_level *lvl; uint32_t failed = 0; uint64_t total = 0; for (i = 0; i < MAXSAMPLES; i++) { if ((history->mode[i] == j) && - (history->resi[i] < min_residency[j])) { + (history->resi[i] < min_residency)) { failed++; total += history->resi[i]; } @@ -519,9 +521,11 @@ static uint64_t lpm_cpuidle_predict(struct cpuidle_device *dev, *idx_restrict = j; do_div(total, failed); for (i = 0; i < j; i++) { - if (total < max_residency[i]) { - *idx_restrict = i+1; - total = max_residency[i]; + lvl = &cpu->levels[i]; + max_residency = lvl->pwr.max_residency; + if (total < max_residency) { + *idx_restrict = i + 1; + total = max_residency; break; } } @@ -600,11 +604,11 @@ static int cpu_power_select(struct cpuidle_device *dev, uint64_t predicted = 0; uint32_t htime = 0, idx_restrict_time = 0; uint32_t next_wakeup_us = (uint32_t)sleep_us; - uint32_t *min_residency = get_per_cpu_min_residency(dev->cpu); - uint32_t *max_residency = get_per_cpu_max_residency(dev->cpu); + uint32_t min_residency, max_residency; + struct power_params *pwr_params; if ((sleep_disabled && !cpu_isolated(dev->cpu)) || sleep_us < 0) - return 0; + return best_level; idx_restrict = cpu->nlevels + 1; @@ -614,8 +618,6 @@ static int cpu_power_select(struct cpuidle_device *dev, goto done_select; for (i = 0; i < cpu->nlevels; i++) { - struct lpm_cpu_level *level = &cpu->levels[i]; - struct power_params *pwr_params = &level->pwr; bool allow; allow = i ? lpm_cpu_mode_allow(dev->cpu, i, true) : true; @@ -623,7 +625,10 @@ static int cpu_power_select(struct cpuidle_device *dev, if (!allow) continue; - lvl_latency_us = pwr_params->latency_us; + pwr_params = &cpu->levels[i].pwr; + lvl_latency_us = pwr_params->exit_latency; + min_residency = pwr_params->min_residency; + max_residency = pwr_params->max_residency; if (latency_us < lvl_latency_us) break; @@ -643,11 +648,11 @@ static int cpu_power_select(struct cpuidle_device *dev, * deeper low power modes than clock gating do not * call prediction. */ - if (next_wakeup_us > max_residency[i]) { + if (next_wakeup_us > max_residency) { predicted = lpm_cpuidle_predict(dev, cpu, &idx_restrict, &idx_restrict_time); - if (predicted && (predicted < min_residency[i])) - predicted = min_residency[i]; + if (predicted && (predicted < min_residency)) + predicted = min_residency; } else invalidate_predict_history(dev); } @@ -662,8 +667,8 @@ static int cpu_power_select(struct cpuidle_device *dev, else modified_time_us = 0; - if (predicted ? (predicted <= max_residency[i]) - : (next_wakeup_us <= max_residency[i])) + if (predicted ? (predicted <= max_residency) + : (next_wakeup_us <= max_residency)) break; } @@ -674,17 +679,21 @@ static int cpu_power_select(struct cpuidle_device *dev, * Start timer to avoid staying in shallower mode forever * incase of misprediciton */ + + pwr_params = &cpu->levels[best_level].pwr; + min_residency = pwr_params->min_residency; + max_residency = pwr_params->max_residency; + if ((predicted || (idx_restrict != (cpu->nlevels + 1))) - && ((best_level >= 0) - && (best_level < (cpu->nlevels-1)))) { + && (best_level < (cpu->nlevels-1))) { htime = predicted + cpu->tmr_add; if (htime == cpu->tmr_add) htime = idx_restrict_time; - else if (htime > max_residency[best_level]) - htime = max_residency[best_level]; + else if (htime > max_residency) + htime = max_residency; if ((next_wakeup_us > htime) && - ((next_wakeup_us - htime) > max_residency[best_level])) + ((next_wakeup_us - htime) > max_residency)) histtimer_start(htime); } @@ -961,10 +970,11 @@ static int cluster_select(struct lpm_cluster *cluster, bool from_idle, &level->num_cpu_votes)) continue; - if (from_idle && latency_us < pwr_params->latency_us) + if (from_idle && latency_us < pwr_params->exit_latency) break; - if (sleep_us < pwr_params->time_overhead_us) + if (sleep_us < (pwr_params->exit_latency + + pwr_params->entry_latency)) break; if (suspend_in_progress && from_idle && level->notify_rpm) @@ -1503,8 +1513,7 @@ static int cluster_cpuidle_register(struct lpm_cluster *cl) snprintf(st->desc, CPUIDLE_DESC_LEN, "%s", cpu_level->name); st->flags = 0; - st->exit_latency = cpu_level->pwr.latency_us; - st->power_usage = cpu_level->pwr.ss_power; + st->exit_latency = cpu_level->pwr.exit_latency; st->target_residency = 0; st->enter = lpm_cpuidle_enter; if (i == lpm_cpu->nlevels - 1) diff --git a/drivers/cpuidle/lpm-levels.h b/drivers/cpuidle/lpm-levels.h index a6c7c5b5986cdb35f3c74f2d59783552278acd16..ae336d26c37b1f6a08bd90fcb9d575ad83e4dab1 100644 --- a/drivers/cpuidle/lpm-levels.h +++ b/drivers/cpuidle/lpm-levels.h @@ -26,11 +26,8 @@ #define PREMATURE_CNT_HIGH 5 struct power_params { - uint32_t latency_us; /* Enter + Exit latency */ - uint32_t ss_power; /* Steady state power */ - uint32_t energy_overhead; /* Enter + exit over head */ - uint32_t time_overhead_us; /* Enter + exit overhead */ - uint32_t residencies[NR_LPM_LEVELS]; + uint32_t entry_latency; /* Entry latency */ + uint32_t exit_latency; /* Exit latency */ uint32_t min_residency; uint32_t max_residency; }; @@ -62,7 +59,7 @@ struct lpm_cpu { struct lpm_level_avail { bool idle_enabled; bool suspend_enabled; - uint32_t latency_us; + uint32_t exit_latency; struct kobject *kobj; struct kobj_attribute idle_enabled_attr; struct kobj_attribute suspend_enabled_attr; diff --git a/drivers/crypto/amcc/crypto4xx_core.c b/drivers/crypto/amcc/crypto4xx_core.c index 65dc78b91dea0147e454a5f9fdb20ed4bf024510..3f9eee7e555fe921a8e4af9d3d5fee7063e5a711 100644 --- a/drivers/crypto/amcc/crypto4xx_core.c +++ b/drivers/crypto/amcc/crypto4xx_core.c @@ -207,7 +207,7 @@ static u32 crypto4xx_build_pdr(struct crypto4xx_device *dev) dev->pdr_pa); return -ENOMEM; } - memset(dev->pdr, 0, sizeof(struct ce_pd) * PPC4XX_NUM_PD); + memset(dev->pdr, 0, sizeof(struct ce_pd) * PPC4XX_NUM_PD); dev->shadow_sa_pool = dma_alloc_coherent(dev->core_dev->device, 256 * PPC4XX_NUM_PD, &dev->shadow_sa_pool_pa, @@ -240,13 +240,15 @@ static u32 crypto4xx_build_pdr(struct crypto4xx_device *dev) static void crypto4xx_destroy_pdr(struct crypto4xx_device *dev) { - if (dev->pdr != NULL) + if (dev->pdr) dma_free_coherent(dev->core_dev->device, sizeof(struct ce_pd) * PPC4XX_NUM_PD, dev->pdr, dev->pdr_pa); + if (dev->shadow_sa_pool) dma_free_coherent(dev->core_dev->device, 256 * PPC4XX_NUM_PD, dev->shadow_sa_pool, dev->shadow_sa_pool_pa); + if (dev->shadow_sr_pool) dma_free_coherent(dev->core_dev->device, sizeof(struct sa_state_record) * PPC4XX_NUM_PD, @@ -416,12 +418,12 @@ static u32 crypto4xx_build_sdr(struct crypto4xx_device *dev) static void crypto4xx_destroy_sdr(struct crypto4xx_device *dev) { - if (dev->sdr != NULL) + if (dev->sdr) dma_free_coherent(dev->core_dev->device, sizeof(struct ce_sd) * PPC4XX_NUM_SD, dev->sdr, dev->sdr_pa); - if (dev->scatter_buffer_va != NULL) + if (dev->scatter_buffer_va) dma_free_coherent(dev->core_dev->device, dev->scatter_buffer_size * PPC4XX_NUM_SD, dev->scatter_buffer_va, @@ -1033,12 +1035,10 @@ int crypto4xx_register_alg(struct crypto4xx_device *sec_dev, break; } - if (rc) { - list_del(&alg->entry); + if (rc) kfree(alg); - } else { + else list_add_tail(&alg->entry, &sec_dev->alg_list); - } } return 0; @@ -1193,7 +1193,7 @@ static int crypto4xx_probe(struct platform_device *ofdev) rc = crypto4xx_build_gdr(core_dev->dev); if (rc) - goto err_build_gdr; + goto err_build_pdr; rc = crypto4xx_build_sdr(core_dev->dev); if (rc) @@ -1236,12 +1236,11 @@ static int crypto4xx_probe(struct platform_device *ofdev) err_request_irq: irq_dispose_mapping(core_dev->irq); tasklet_kill(&core_dev->tasklet); - crypto4xx_destroy_sdr(core_dev->dev); err_build_sdr: + crypto4xx_destroy_sdr(core_dev->dev); crypto4xx_destroy_gdr(core_dev->dev); -err_build_gdr: - crypto4xx_destroy_pdr(core_dev->dev); err_build_pdr: + crypto4xx_destroy_pdr(core_dev->dev); kfree(core_dev->dev); err_alloc_dev: kfree(core_dev); diff --git a/drivers/crypto/msm/qce50.c b/drivers/crypto/msm/qce50.c index d3a9f62201e949addb559fdc471bfe7575d78888..d37d18a5dc948355050e106854b8197694c6c4d3 100644 --- a/drivers/crypto/msm/qce50.c +++ b/drivers/crypto/msm/qce50.c @@ -2443,6 +2443,9 @@ static int _qce_sps_add_sg_data(struct qce_device *pce_dev, struct sps_iovec *iovec = sps_bam_pipe->iovec + sps_bam_pipe->iovec_count; + if (!sg_src) + return -ENOENT; + while (nbytes > 0) { len = min(nbytes, sg_dma_len(sg_src)); nbytes -= len; @@ -5983,6 +5986,7 @@ static int qce_smmu_init(struct qce_device *pce_dev) struct dma_iommu_mapping *mapping; int attr = 1; int ret = 0; + struct device *dev = pce_dev->pdev; mapping = arm_iommu_create_mapping(&platform_bus_type, CRYPTO_SMMU_IOVA_START, CRYPTO_SMMU_IOVA_SIZE); @@ -6004,6 +6008,13 @@ static int qce_smmu_init(struct qce_device *pce_dev) pr_err("Attach device failed, err = %d\n", ret); goto ext_fail_set_attr; } + + if (!dev->dma_parms) + dev->dma_parms = devm_kzalloc(dev, + sizeof(*dev->dma_parms), GFP_KERNEL); + dma_set_max_seg_size(dev, DMA_BIT_MASK(32)); + dma_set_seg_boundary(dev, DMA_BIT_MASK(64)); + pce_dev->smmu_mapping = mapping; return ret; diff --git a/drivers/crypto/msm/qcedev_smmu.c b/drivers/crypto/msm/qcedev_smmu.c index 2fe9de8114eb7a3e70dc46794945a912cf8c2d01..08c4468f0a0b5dca3f56930fbb45d5421566ff99 100644 --- a/drivers/crypto/msm/qcedev_smmu.c +++ b/drivers/crypto/msm/qcedev_smmu.c @@ -57,6 +57,11 @@ static int qcedev_setup_context_bank(struct context_bank_info *cb, goto release_mapping; } } + if (!dev->dma_parms) + dev->dma_parms = devm_kzalloc(dev, + sizeof(*dev->dma_parms), GFP_KERNEL); + dma_set_max_seg_size(dev, DMA_BIT_MASK(32)); + dma_set_seg_boundary(dev, DMA_BIT_MASK(64)); rc = arm_iommu_attach_device(cb->dev, cb->mapping); if (rc) { diff --git a/drivers/crypto/padlock-aes.c b/drivers/crypto/padlock-aes.c index c939f18f70cc0be197dc4c805b58d5d933cd7072..7685f557dcc0799a2a7869b0bf152d1eac8bcc9d 100644 --- a/drivers/crypto/padlock-aes.c +++ b/drivers/crypto/padlock-aes.c @@ -266,6 +266,8 @@ static inline void padlock_xcrypt_ecb(const u8 *input, u8 *output, void *key, return; } + count -= initial; + if (initial) asm volatile (".byte 0xf3,0x0f,0xa7,0xc8" /* rep xcryptecb */ : "+S"(input), "+D"(output) @@ -273,7 +275,7 @@ static inline void padlock_xcrypt_ecb(const u8 *input, u8 *output, void *key, asm volatile (".byte 0xf3,0x0f,0xa7,0xc8" /* rep xcryptecb */ : "+S"(input), "+D"(output) - : "d"(control_word), "b"(key), "c"(count - initial)); + : "d"(control_word), "b"(key), "c"(count)); } static inline u8 *padlock_xcrypt_cbc(const u8 *input, u8 *output, void *key, @@ -284,6 +286,8 @@ static inline u8 *padlock_xcrypt_cbc(const u8 *input, u8 *output, void *key, if (count < cbc_fetch_blocks) return cbc_crypt(input, output, key, iv, control_word, count); + count -= initial; + if (initial) asm volatile (".byte 0xf3,0x0f,0xa7,0xd0" /* rep xcryptcbc */ : "+S" (input), "+D" (output), "+a" (iv) @@ -291,7 +295,7 @@ static inline u8 *padlock_xcrypt_cbc(const u8 *input, u8 *output, void *key, asm volatile (".byte 0xf3,0x0f,0xa7,0xd0" /* rep xcryptcbc */ : "+S" (input), "+D" (output), "+a" (iv) - : "d" (control_word), "b" (key), "c" (count-initial)); + : "d" (control_word), "b" (key), "c" (count)); return iv; } diff --git a/drivers/devfreq/governor_bw_hwmon.c b/drivers/devfreq/governor_bw_hwmon.c index 35e5e87abab7bca5f5a79ea2fcec222ee258de4e..ecaa8f003a77a0269d085c4d3bac4d17407d4814 100644 --- a/drivers/devfreq/governor_bw_hwmon.c +++ b/drivers/devfreq/governor_bw_hwmon.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013-2017, The Linux Foundation. All rights reserved. + * Copyright (c) 2013-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -74,6 +74,7 @@ struct hwmon_node { struct bw_hwmon *hw; struct devfreq_governor *gov; struct attribute_group *attr_grp; + struct mutex mon_lock; }; #define UP_WAKE 1 @@ -511,9 +512,11 @@ int update_bw_hwmon(struct bw_hwmon *hwmon) if (!node) return -ENODEV; - if (!node->mon_started) + mutex_lock(&node->mon_lock); + if (!node->mon_started) { + mutex_unlock(&node->mon_lock); return -EBUSY; - + } dev_dbg(df->dev.parent, "Got update request\n"); devfreq_monitor_stop(df); @@ -525,6 +528,7 @@ int update_bw_hwmon(struct bw_hwmon *hwmon) mutex_unlock(&df->lock); devfreq_monitor_start(df); + mutex_unlock(&node->mon_lock); return 0; } @@ -572,7 +576,9 @@ static void stop_monitor(struct devfreq *df, bool init) struct hwmon_node *node = df->data; struct bw_hwmon *hw = node->hw; + mutex_lock(&node->mon_lock); node->mon_started = false; + mutex_unlock(&node->mon_lock); if (init) { devfreq_monitor_stop(df); @@ -932,6 +938,7 @@ int register_bw_hwmon(struct device *dev, struct bw_hwmon *hwmon) node->mbps_zones[0] = 0; node->hw = hwmon; + mutex_init(&node->mon_lock); mutex_lock(&list_lock); list_add_tail(&node->list, &hwmon_list); mutex_unlock(&list_lock); diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c index 3a92f3eba813d171c991e5d1db34c6b16d217597..fb05cc57d102634f2a52d832b73f6f8e413906a6 100644 --- a/drivers/dma-buf/dma-buf.c +++ b/drivers/dma-buf/dma-buf.c @@ -36,6 +36,9 @@ #include #include #include +#include +#include +#include #include @@ -48,6 +51,19 @@ struct dma_buf_list { struct mutex lock; }; +struct dma_info { + struct dma_buf *dmabuf; + struct list_head head; +}; + +struct dma_proc { + char name[TASK_COMM_LEN]; + pid_t pid; + size_t size; + struct list_head dma_bufs; + struct list_head head; +}; + static struct dma_buf_list db_list; static int dma_buf_release(struct inode *inode, struct file *file) @@ -458,6 +474,7 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info) dmabuf->cb_excl.poll = dmabuf->cb_shared.poll = &dmabuf->poll; dmabuf->cb_excl.active = dmabuf->cb_shared.active = 0; dmabuf->name = bufname; + getnstimeofday(&dmabuf->ctime); if (!resv) { resv = (struct reservation_object *)&dmabuf[1]; @@ -1272,6 +1289,155 @@ static const struct file_operations dma_buf_debug_fops = { .release = single_release, }; +static bool list_contains(struct list_head *list, struct dma_buf *info) +{ + struct dma_info *curr; + + list_for_each_entry(curr, list, head) + if (curr->dmabuf == info) + return true; + + return false; +} + +static int get_dma_info(const void *data, struct file *file, unsigned int n) +{ + struct dma_proc *dma_proc; + struct dma_info *dma_info; + + dma_proc = (struct dma_proc *)data; + if (!is_dma_buf_file(file)) + return 0; + + if (list_contains(&dma_proc->dma_bufs, file->private_data)) + return 0; + + dma_info = kzalloc(sizeof(*dma_info), GFP_ATOMIC); + if (!dma_info) + return -ENOMEM; + + get_file(file); + dma_info->dmabuf = file->private_data; + dma_proc->size += dma_info->dmabuf->size / SZ_1K; + list_add(&dma_info->head, &dma_proc->dma_bufs); + return 0; +} + +static void write_proc(struct seq_file *s, struct dma_proc *proc) +{ + struct dma_info *tmp; + struct timespec curr_time; + + getnstimeofday(&curr_time); + seq_printf(s, "\n%s (PID %ld) size: %ld\nDMA Buffers:\n", + proc->name, proc->pid, proc->size); + seq_printf(s, "%-8s\t%-8s\t%-8s\n", + "Name", "Size (KB)", "Time Alive (sec)"); + + list_for_each_entry(tmp, &proc->dma_bufs, head) { + struct dma_buf *dmabuf; + struct timespec mtime; + __kernel_time_t elap_mtime; + + dmabuf = tmp->dmabuf; + mtime = dmabuf->ctime; + elap_mtime = curr_time.tv_sec - mtime.tv_sec; + seq_printf(s, "%-8s\t%-8ld\t%-8ld\n", + dmabuf->name, dmabuf->size / SZ_1K, elap_mtime); + } +} + +static void free_proc(struct dma_proc *proc) +{ + struct dma_info *tmp, *n; + + list_for_each_entry_safe(tmp, n, &proc->dma_bufs, head) { + dma_buf_put(tmp->dmabuf); + list_del(&tmp->head); + kfree(tmp); + } + kfree(proc); +} + +static int dmacmp(void *unused, struct list_head *a, struct list_head *b) +{ + struct dma_info *a_buf, *b_buf; + + a_buf = list_entry(a, struct dma_info, head); + b_buf = list_entry(b, struct dma_info, head); + return b_buf->dmabuf->size - a_buf->dmabuf->size; +} + +static int proccmp(void *unused, struct list_head *a, struct list_head *b) +{ + struct dma_proc *a_proc, *b_proc; + + a_proc = list_entry(a, struct dma_proc, head); + b_proc = list_entry(b, struct dma_proc, head); + return b_proc->size - a_proc->size; +} + +static int dma_procs_debug_show(struct seq_file *s, void *unused) +{ + struct task_struct *task, *thread; + struct files_struct *files; + int ret = 0; + struct dma_proc *tmp, *n; + LIST_HEAD(plist); + + read_lock(&tasklist_lock); + for_each_process(task) { + tmp = kzalloc(sizeof(*tmp), GFP_ATOMIC); + if (!tmp) { + ret = -ENOMEM; + read_unlock(&tasklist_lock); + goto mem_err; + } + INIT_LIST_HEAD(&tmp->dma_bufs); + for_each_thread(task, thread) { + files = get_files_struct(task); + if (!files) + continue; + ret = iterate_fd(files, 0, get_dma_info, tmp); + put_files_struct(files); + } + if (ret || list_empty(&tmp->dma_bufs)) + goto skip; + list_sort(NULL, &tmp->dma_bufs, dmacmp); + get_task_comm(tmp->name, task); + tmp->pid = task->tgid; + list_add(&tmp->head, &plist); + continue; +skip: + free_proc(tmp); + } + read_unlock(&tasklist_lock); + + list_sort(NULL, &plist, proccmp); + list_for_each_entry(tmp, &plist, head) + write_proc(s, tmp); + + ret = 0; +mem_err: + list_for_each_entry_safe(tmp, n, &plist, head) { + list_del(&tmp->head); + free_proc(tmp); + } + return ret; +} + +static int dma_procs_debug_open(struct inode *f_inode, struct file *file) +{ + return single_open(file, dma_procs_debug_show, NULL); +} + +static const struct file_operations dma_procs_debug_fops = { + .open = dma_procs_debug_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release +}; + static struct dentry *dma_buf_debugfs_dir; static int dma_buf_init_debugfs(void) @@ -1292,6 +1458,17 @@ static int dma_buf_init_debugfs(void) debugfs_remove_recursive(dma_buf_debugfs_dir); dma_buf_debugfs_dir = NULL; err = PTR_ERR(d); + return err; + } + + d = debugfs_create_file("dmaprocs", 0444, dma_buf_debugfs_dir, + NULL, &dma_procs_debug_fops); + + if (IS_ERR(d)) { + pr_debug("dma_buf: debugfs: failed to create node dmaprocs\n"); + debugfs_remove_recursive(dma_buf_debugfs_dir); + dma_buf_debugfs_dir = NULL; + err = PTR_ERR(d); } return err; diff --git a/drivers/edac/altera_edac.c b/drivers/edac/altera_edac.c index 346c4987b2848782336db887150b88db1a9b91a4..38983f56ad0ddd210703636088d5ccacac6ecbc7 100644 --- a/drivers/edac/altera_edac.c +++ b/drivers/edac/altera_edac.c @@ -1106,7 +1106,7 @@ static void *ocram_alloc_mem(size_t size, void **other) static void ocram_free_mem(void *p, size_t size, void *other) { - gen_pool_free((struct gen_pool *)other, (u32)p, size); + gen_pool_free((struct gen_pool *)other, (unsigned long)p, size); } static const struct edac_device_prv_data ocramecc_data = { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index 1360a24d2ede1de8afa739a87f8405df5e768246..f08624f2f20945c60d5d6d82726fb1c9e707e691 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c @@ -683,8 +683,12 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain, return -EINVAL; /* A shared bo cannot be migrated to VRAM */ - if (bo->prime_shared_count && (domain == AMDGPU_GEM_DOMAIN_VRAM)) - return -EINVAL; + if (bo->prime_shared_count) { + if (domain & AMDGPU_GEM_DOMAIN_GTT) + domain = AMDGPU_GEM_DOMAIN_GTT; + else + return -EINVAL; + } if (bo->pin_count) { uint32_t mem_type = bo->tbo.mem.mem_type; diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c index 0d8a417e2cd67abd15f631ec4f54cead209d285b..bb5cc15fa0b927c09ac20d7ecb9dac9f1c154e2e 100644 --- a/drivers/gpu/drm/drm_atomic.c +++ b/drivers/gpu/drm/drm_atomic.c @@ -1355,7 +1355,9 @@ drm_atomic_set_crtc_for_plane(struct drm_plane_state *plane_state, { struct drm_plane *plane = plane_state->plane; struct drm_crtc_state *crtc_state; - + /* Nothing to do for same crtc*/ + if (plane_state->crtc == crtc) + return 0; if (plane_state->crtc) { crtc_state = drm_atomic_get_crtc_state(plane_state->state, plane_state->crtc); diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c index 0028591f3f959ced1ad520ee280fb481d7a52898..1f08d597b87af472b824cffd0ea858c362824f1b 100644 --- a/drivers/gpu/drm/drm_atomic_helper.c +++ b/drivers/gpu/drm/drm_atomic_helper.c @@ -2683,31 +2683,9 @@ int __drm_atomic_helper_set_config(struct drm_mode_set *set, return 0; } -/** - * drm_atomic_helper_disable_all - disable all currently active outputs - * @dev: DRM device - * @ctx: lock acquisition context - * - * Loops through all connectors, finding those that aren't turned off and then - * turns them off by setting their DPMS mode to OFF and deactivating the CRTC - * that they are connected to. - * - * This is used for example in suspend/resume to disable all currently active - * functions when suspending. If you just want to shut down everything at e.g. - * driver unload, look at drm_atomic_helper_shutdown(). - * - * Note that if callers haven't already acquired all modeset locks this might - * return -EDEADLK, which must be handled by calling drm_modeset_backoff(). - * - * Returns: - * 0 on success or a negative error code on failure. - * - * See also: - * drm_atomic_helper_suspend(), drm_atomic_helper_resume() and - * drm_atomic_helper_shutdown(). - */ -int drm_atomic_helper_disable_all(struct drm_device *dev, - struct drm_modeset_acquire_ctx *ctx) +static int __drm_atomic_helper_disable_all(struct drm_device *dev, + struct drm_modeset_acquire_ctx *ctx, + bool clean_old_fbs) { struct drm_atomic_state *state; struct drm_connector_state *conn_state; @@ -2759,8 +2737,11 @@ int drm_atomic_helper_disable_all(struct drm_device *dev, goto free; drm_atomic_set_fb_for_plane(plane_state, NULL); - plane_mask |= BIT(drm_plane_index(plane)); - plane->old_fb = plane->fb; + + if (clean_old_fbs) { + plane->old_fb = plane->fb; + plane_mask |= BIT(drm_plane_index(plane)); + } } ret = drm_atomic_commit(state); @@ -2771,6 +2752,34 @@ int drm_atomic_helper_disable_all(struct drm_device *dev, return ret; } +/** + * drm_atomic_helper_disable_all - disable all currently active outputs + * @dev: DRM device + * @ctx: lock acquisition context + * + * Loops through all connectors, finding those that aren't turned off and then + * turns them off by setting their DPMS mode to OFF and deactivating the CRTC + * that they are connected to. + * + * This is used for example in suspend/resume to disable all currently active + * functions when suspending. If you just want to shut down everything at e.g. + * driver unload, look at drm_atomic_helper_shutdown(). + * + * Note that if callers haven't already acquired all modeset locks this might + * return -EDEADLK, which must be handled by calling drm_modeset_backoff(). + * + * Returns: + * 0 on success or a negative error code on failure. + * + * See also: + * drm_atomic_helper_suspend(), drm_atomic_helper_resume() and + * drm_atomic_helper_shutdown(). + */ +int drm_atomic_helper_disable_all(struct drm_device *dev, + struct drm_modeset_acquire_ctx *ctx) +{ + return __drm_atomic_helper_disable_all(dev, ctx, false); +} EXPORT_SYMBOL(drm_atomic_helper_disable_all); /** @@ -2793,7 +2802,7 @@ void drm_atomic_helper_shutdown(struct drm_device *dev) while (1) { ret = drm_modeset_lock_all_ctx(dev, &ctx); if (!ret) - ret = drm_atomic_helper_disable_all(dev, &ctx); + ret = __drm_atomic_helper_disable_all(dev, &ctx, true); if (ret != -EDEADLK) break; @@ -2897,16 +2906,11 @@ int drm_atomic_helper_commit_duplicated_state(struct drm_atomic_state *state, struct drm_connector_state *new_conn_state; struct drm_crtc *crtc; struct drm_crtc_state *new_crtc_state; - unsigned plane_mask = 0; - struct drm_device *dev = state->dev; - int ret; state->acquire_ctx = ctx; - for_each_new_plane_in_state(state, plane, new_plane_state, i) { - plane_mask |= BIT(drm_plane_index(plane)); + for_each_new_plane_in_state(state, plane, new_plane_state, i) state->planes[i].old_state = plane->state; - } for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) state->crtcs[i].old_state = crtc->state; @@ -2914,11 +2918,7 @@ int drm_atomic_helper_commit_duplicated_state(struct drm_atomic_state *state, for_each_new_connector_in_state(state, connector, new_conn_state, i) state->connectors[i].old_state = connector->state; - ret = drm_atomic_commit(state); - if (plane_mask) - drm_atomic_clean_old_fb(dev, plane_mask, ret); - - return ret; + return drm_atomic_commit(state); } EXPORT_SYMBOL(drm_atomic_helper_commit_duplicated_state); diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c index 41b492f99955f8d6778197e7102c854e41530b8f..c022ab6e84bdd714901a50bf539ddee849fabeff 100644 --- a/drivers/gpu/drm/drm_dp_mst_topology.c +++ b/drivers/gpu/drm/drm_dp_mst_topology.c @@ -2862,12 +2862,14 @@ static void drm_dp_mst_dump_mstb(struct seq_file *m, } } +#define DP_PAYLOAD_TABLE_SIZE 64 + static bool dump_dp_payload_table(struct drm_dp_mst_topology_mgr *mgr, char *buf) { int i; - for (i = 0; i < 64; i += 16) { + for (i = 0; i < DP_PAYLOAD_TABLE_SIZE; i += 16) { if (drm_dp_dpcd_read(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS + i, &buf[i], 16) != 16) @@ -2936,7 +2938,7 @@ void drm_dp_mst_dump_topology(struct seq_file *m, mutex_lock(&mgr->lock); if (mgr->mst_primary) { - u8 buf[64]; + u8 buf[DP_PAYLOAD_TABLE_SIZE]; int ret; ret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, buf, DP_RECEIVER_CAP_SIZE); @@ -2954,8 +2956,7 @@ void drm_dp_mst_dump_topology(struct seq_file *m, seq_printf(m, " revision: hw: %x.%x sw: %x.%x\n", buf[0x9] >> 4, buf[0x9] & 0xf, buf[0xa], buf[0xb]); if (dump_dp_payload_table(mgr, buf)) - seq_printf(m, "payload table: %*ph\n", 63, buf); - + seq_printf(m, "payload table: %*ph\n", DP_PAYLOAD_TABLE_SIZE, buf); } mutex_unlock(&mgr->lock); diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index a85861438dc2bfa513a3d8fed6e4b0f648939433..a89fc651f3dfb244605924bb0f3e3e59bce4d7bc 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c @@ -194,7 +194,8 @@ static const struct drm_display_mode drm_dmt_modes[] = { /* 0x05 - 640x480@72Hz */ { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 31500, 640, 664, 704, 832, 0, 480, 489, 492, 520, 0, - DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, + DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC), + .vrefresh = 72, }, /* 0x06 - 640x480@75Hz */ { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 31500, 640, 656, 720, 840, 0, 480, 481, 484, 500, 0, @@ -551,7 +552,8 @@ static const struct drm_display_mode edid_est_modes[] = { DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480@75Hz */ { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 31500, 640, 664, 704, 832, 0, 480, 489, 492, 520, 0, - DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480@72Hz */ + DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC), + .vrefresh = 72, }, /* 640x480@72Hz */ { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 30240, 640, 704, 768, 864, 0, 480, 483, 486, 525, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480@67Hz */ diff --git a/drivers/gpu/drm/gma500/psb_intel_drv.h b/drivers/gpu/drm/gma500/psb_intel_drv.h index e8e4ea14b12ba4f2bb8474e7941b9eec858f7cfe..e05e5399af2db518959143992fbe21dc16dd0c11 100644 --- a/drivers/gpu/drm/gma500/psb_intel_drv.h +++ b/drivers/gpu/drm/gma500/psb_intel_drv.h @@ -255,7 +255,7 @@ extern int intelfb_remove(struct drm_device *dev, extern bool psb_intel_lvds_mode_fixup(struct drm_encoder *encoder, const struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode); -extern int psb_intel_lvds_mode_valid(struct drm_connector *connector, +extern enum drm_mode_status psb_intel_lvds_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode); extern int psb_intel_lvds_set_property(struct drm_connector *connector, struct drm_property *property, diff --git a/drivers/gpu/drm/gma500/psb_intel_lvds.c b/drivers/gpu/drm/gma500/psb_intel_lvds.c index be3eefec5152aa0cdf9dd891bf4d4fe423ff87d4..8baf6325c6e46cfbc7d839607f8536d27ee5e7c0 100644 --- a/drivers/gpu/drm/gma500/psb_intel_lvds.c +++ b/drivers/gpu/drm/gma500/psb_intel_lvds.c @@ -343,7 +343,7 @@ static void psb_intel_lvds_restore(struct drm_connector *connector) } } -int psb_intel_lvds_mode_valid(struct drm_connector *connector, +enum drm_mode_status psb_intel_lvds_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) { struct drm_psb_private *dev_priv = connector->dev->dev_private; diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index b63893eeca73ddf78070bbecf055f6560f8636b7..20a471ad0ad27652447f9c6e33e957a4b2b43a2a 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -1786,10 +1786,38 @@ static void valleyview_pipestat_irq_handler(struct drm_i915_private *dev_priv, static u32 i9xx_hpd_irq_ack(struct drm_i915_private *dev_priv) { - u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT); + u32 hotplug_status = 0, hotplug_status_mask; + int i; + + if (IS_G4X(dev_priv) || + IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) + hotplug_status_mask = HOTPLUG_INT_STATUS_G4X | + DP_AUX_CHANNEL_MASK_INT_STATUS_G4X; + else + hotplug_status_mask = HOTPLUG_INT_STATUS_I915; - if (hotplug_status) + /* + * We absolutely have to clear all the pending interrupt + * bits in PORT_HOTPLUG_STAT. Otherwise the ISR port + * interrupt bit won't have an edge, and the i965/g4x + * edge triggered IIR will not notice that an interrupt + * is still pending. We can't use PORT_HOTPLUG_EN to + * guarantee the edge as the act of toggling the enable + * bits can itself generate a new hotplug interrupt :( + */ + for (i = 0; i < 10; i++) { + u32 tmp = I915_READ(PORT_HOTPLUG_STAT) & hotplug_status_mask; + + if (tmp == 0) + return hotplug_status; + + hotplug_status |= tmp; I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status); + } + + WARN_ONCE(1, + "PORT_HOTPLUG_STAT did not clear (0x%08x)\n", + I915_READ(PORT_HOTPLUG_STAT)); return hotplug_status; } diff --git a/drivers/gpu/drm/msm/Makefile b/drivers/gpu/drm/msm/Makefile index 1b75535b2468590d402ac23da3472af60ddb02ea..b5c4108b849176b5f1fb1d4838febb0f9d34b7f1 100644 --- a/drivers/gpu/drm/msm/Makefile +++ b/drivers/gpu/drm/msm/Makefile @@ -13,6 +13,7 @@ msm_drm-y := \ dp/dp_power.o \ dp/dp_catalog.o \ dp/dp_catalog_v420.o \ + dp/dp_catalog_v200.o \ dp/dp_aux.o \ dp/dp_panel.o \ dp/dp_link.o \ diff --git a/drivers/gpu/drm/msm/dp/dp_aux.c b/drivers/gpu/drm/msm/dp/dp_aux.c index 7cd3456112196a351e0718abc45995ed75accffa..7515ba4eddd2ff4f6e11b57a9748159f233c0cc4 100644 --- a/drivers/gpu/drm/msm/dp/dp_aux.c +++ b/drivers/gpu/drm/msm/dp/dp_aux.c @@ -41,6 +41,7 @@ struct dp_aux_private { bool read; bool no_send_addr; bool no_send_stop; + bool enabled; u32 offset; u32 segment; @@ -623,12 +624,16 @@ static void dp_aux_init(struct dp_aux *dp_aux, struct dp_aux_cfg *aux_cfg) aux = container_of(dp_aux, struct dp_aux_private, dp_aux); + if (aux->enabled) + return; + dp_aux_reset_phy_config_indices(aux_cfg); aux->catalog->setup(aux->catalog, aux_cfg); aux->catalog->reset(aux->catalog); aux->catalog->enable(aux->catalog, true); atomic_set(&aux->aborted, 0); aux->retry_cnt = 0; + aux->enabled = true; } static void dp_aux_deinit(struct dp_aux *dp_aux) @@ -642,8 +647,12 @@ static void dp_aux_deinit(struct dp_aux *dp_aux) aux = container_of(dp_aux, struct dp_aux_private, dp_aux); + if (!aux->enabled) + return; + atomic_set(&aux->aborted, 1); aux->catalog->enable(aux->catalog, false); + aux->enabled = false; } static int dp_aux_register(struct dp_aux *dp_aux) diff --git a/drivers/gpu/drm/msm/dp/dp_catalog.c b/drivers/gpu/drm/msm/dp/dp_catalog.c index 7a014dce9430b8b0239dd33bb66f3d833aefb655..7fd51ad69ee8822ce6ef65a6ee4d8bb0fa25fa99 100644 --- a/drivers/gpu/drm/msm/dp/dp_catalog.c +++ b/drivers/gpu/drm/msm/dp/dp_catalog.c @@ -86,6 +86,7 @@ struct dp_catalog_io { struct dp_io_data *usb3_dp_com; struct dp_io_data *hdcp_physical; struct dp_io_data *dp_p1; + struct dp_io_data *dp_tcsr; }; /* audio related catalog functions */ @@ -100,36 +101,6 @@ struct dp_catalog_private { char exe_mode[SZ_4]; }; -static u32 dp_read(struct dp_catalog_private *catalog, - struct dp_io_data *io_data, u32 offset) -{ - u32 data = 0; - - if (!strcmp(catalog->exe_mode, "hw") || - !strcmp(catalog->exe_mode, "all")) { - data = readl_relaxed(io_data->io.base + offset); - } else if (!strcmp(catalog->exe_mode, "sw")) { - if (io_data->buf) - memcpy(&data, io_data->buf + offset, sizeof(offset)); - } - - return data; -} - -static void dp_write(struct dp_catalog_private *catalog, - struct dp_io_data *io_data, u32 offset, u32 data) -{ - if (!strcmp(catalog->exe_mode, "hw") || - !strcmp(catalog->exe_mode, "all")) - writel_relaxed(data, io_data->io.base + offset); - - if (!strcmp(catalog->exe_mode, "sw") || - !strcmp(catalog->exe_mode, "all")) { - if (io_data->buf) - memcpy(io_data->buf + offset, &data, sizeof(data)); - } -} - /* aux related catalog functions */ static u32 dp_catalog_aux_read_data(struct dp_catalog_aux *aux) { @@ -144,7 +115,7 @@ static u32 dp_catalog_aux_read_data(struct dp_catalog_aux *aux) catalog = dp_catalog_get_priv(aux); io_data = catalog->io.dp_aux; - return dp_read(catalog, io_data, DP_AUX_DATA); + return dp_read(catalog->exe_mode, io_data, DP_AUX_DATA); end: return 0; } @@ -164,7 +135,7 @@ static int dp_catalog_aux_write_data(struct dp_catalog_aux *aux) catalog = dp_catalog_get_priv(aux); io_data = catalog->io.dp_aux; - dp_write(catalog, io_data, DP_AUX_DATA, aux->data); + dp_write(catalog->exe_mode, io_data, DP_AUX_DATA, aux->data); end: return rc; } @@ -184,7 +155,7 @@ static int dp_catalog_aux_write_trans(struct dp_catalog_aux *aux) catalog = dp_catalog_get_priv(aux); io_data = catalog->io.dp_aux; - dp_write(catalog, io_data, DP_AUX_TRANS_CTRL, aux->data); + dp_write(catalog->exe_mode, io_data, DP_AUX_TRANS_CTRL, aux->data); end: return rc; } @@ -206,11 +177,11 @@ static int dp_catalog_aux_clear_trans(struct dp_catalog_aux *aux, bool read) io_data = catalog->io.dp_aux; if (read) { - data = dp_read(catalog, io_data, DP_AUX_TRANS_CTRL); + data = dp_read(catalog->exe_mode, io_data, DP_AUX_TRANS_CTRL); data &= ~BIT(9); - dp_write(catalog, io_data, DP_AUX_TRANS_CTRL, data); + dp_write(catalog->exe_mode, io_data, DP_AUX_TRANS_CTRL, data); } else { - dp_write(catalog, io_data, DP_AUX_TRANS_CTRL, 0); + dp_write(catalog->exe_mode, io_data, DP_AUX_TRANS_CTRL, 0); } end: return rc; @@ -230,13 +201,13 @@ static void dp_catalog_aux_clear_hw_interrupts(struct dp_catalog_aux *aux) catalog = dp_catalog_get_priv(aux); io_data = catalog->io.dp_phy; - data = dp_read(catalog, io_data, DP_PHY_AUX_INTERRUPT_STATUS); + data = dp_read(catalog->exe_mode, io_data, DP_PHY_AUX_INTERRUPT_STATUS); - dp_write(catalog, io_data, DP_PHY_AUX_INTERRUPT_CLEAR, 0x1f); + dp_write(catalog->exe_mode, io_data, DP_PHY_AUX_INTERRUPT_CLEAR, 0x1f); wmb(); /* make sure 0x1f is written before next write */ - dp_write(catalog, io_data, DP_PHY_AUX_INTERRUPT_CLEAR, 0x9f); + dp_write(catalog->exe_mode, io_data, DP_PHY_AUX_INTERRUPT_CLEAR, 0x9f); wmb(); /* make sure 0x9f is written before next write */ - dp_write(catalog, io_data, DP_PHY_AUX_INTERRUPT_CLEAR, 0); + dp_write(catalog->exe_mode, io_data, DP_PHY_AUX_INTERRUPT_CLEAR, 0); wmb(); /* make sure register is cleared */ } @@ -254,15 +225,15 @@ static void dp_catalog_aux_reset(struct dp_catalog_aux *aux) catalog = dp_catalog_get_priv(aux); io_data = catalog->io.dp_aux; - aux_ctrl = dp_read(catalog, io_data, DP_AUX_CTRL); + aux_ctrl = dp_read(catalog->exe_mode, io_data, DP_AUX_CTRL); aux_ctrl |= BIT(1); - dp_write(catalog, io_data, DP_AUX_CTRL, aux_ctrl); + dp_write(catalog->exe_mode, io_data, DP_AUX_CTRL, aux_ctrl); usleep_range(1000, 1010); /* h/w recommended delay */ aux_ctrl &= ~BIT(1); - dp_write(catalog, io_data, DP_AUX_CTRL, aux_ctrl); + dp_write(catalog->exe_mode, io_data, DP_AUX_CTRL, aux_ctrl); wmb(); /* make sure AUX reset is done here */ } @@ -279,18 +250,18 @@ static void dp_catalog_aux_enable(struct dp_catalog_aux *aux, bool enable) catalog = dp_catalog_get_priv(aux); io_data = catalog->io.dp_aux; - aux_ctrl = dp_read(catalog, io_data, DP_AUX_CTRL); + aux_ctrl = dp_read(catalog->exe_mode, io_data, DP_AUX_CTRL); if (enable) { aux_ctrl |= BIT(0); - dp_write(catalog, io_data, DP_AUX_CTRL, aux_ctrl); + dp_write(catalog->exe_mode, io_data, DP_AUX_CTRL, aux_ctrl); wmb(); /* make sure AUX module is enabled */ - dp_write(catalog, io_data, DP_TIMEOUT_COUNT, 0xffff); - dp_write(catalog, io_data, DP_AUX_LIMITS, 0xffff); + dp_write(catalog->exe_mode, io_data, DP_TIMEOUT_COUNT, 0xffff); + dp_write(catalog->exe_mode, io_data, DP_AUX_LIMITS, 0xffff); } else { aux_ctrl &= ~BIT(0); - dp_write(catalog, io_data, DP_AUX_CTRL, aux_ctrl); + dp_write(catalog->exe_mode, io_data, DP_AUX_CTRL, aux_ctrl); } } @@ -316,7 +287,8 @@ static void dp_catalog_aux_update_cfg(struct dp_catalog_aux *aux, dp_phy_aux_config_type_to_string(type), cfg[type].lut[current_index], cfg[type].lut[new_index]); - dp_write(catalog, io_data, cfg[type].offset, cfg[type].lut[new_index]); + dp_write(catalog->exe_mode, io_data, cfg[type].offset, + cfg[type].lut[new_index]); cfg[type].current_index = new_index; } @@ -335,29 +307,31 @@ static void dp_catalog_aux_setup(struct dp_catalog_aux *aux, catalog = dp_catalog_get_priv(aux); io_data = catalog->io.dp_phy; - dp_write(catalog, io_data, DP_PHY_PD_CTL, 0x65); + dp_write(catalog->exe_mode, io_data, DP_PHY_PD_CTL, 0x65); wmb(); /* make sure PD programming happened */ /* Turn on BIAS current for PHY/PLL */ io_data = catalog->io.dp_pll; - dp_write(catalog, io_data, QSERDES_COM_BIAS_EN_CLKBUFLR_EN, 0x1b); + dp_write(catalog->exe_mode, io_data, QSERDES_COM_BIAS_EN_CLKBUFLR_EN, + 0x1b); io_data = catalog->io.dp_phy; - dp_write(catalog, io_data, DP_PHY_PD_CTL, 0x02); + dp_write(catalog->exe_mode, io_data, DP_PHY_PD_CTL, 0x02); wmb(); /* make sure PD programming happened */ - dp_write(catalog, io_data, DP_PHY_PD_CTL, 0x7d); + dp_write(catalog->exe_mode, io_data, DP_PHY_PD_CTL, 0x7d); /* Turn on BIAS current for PHY/PLL */ io_data = catalog->io.dp_pll; - dp_write(catalog, io_data, QSERDES_COM_BIAS_EN_CLKBUFLR_EN, 0x3f); + dp_write(catalog->exe_mode, io_data, QSERDES_COM_BIAS_EN_CLKBUFLR_EN, + 0x3f); /* DP AUX CFG register programming */ io_data = catalog->io.dp_phy; for (i = 0; i < PHY_AUX_CFG_MAX; i++) - dp_write(catalog, io_data, cfg[i].offset, + dp_write(catalog->exe_mode, io_data, cfg[i].offset, cfg[i].lut[cfg[i].current_index]); - dp_write(catalog, io_data, DP_PHY_AUX_INTERRUPT_MASK, 0x1F); + dp_write(catalog->exe_mode, io_data, DP_PHY_AUX_INTERRUPT_MASK, 0x1F); wmb(); /* make sure AUX configuration is done before enabling it */ } @@ -375,12 +349,12 @@ static void dp_catalog_aux_get_irq(struct dp_catalog_aux *aux, bool cmd_busy) catalog = dp_catalog_get_priv(aux); io_data = catalog->io.dp_ahb; - aux->isr = dp_read(catalog, io_data, DP_INTR_STATUS); + aux->isr = dp_read(catalog->exe_mode, io_data, DP_INTR_STATUS); aux->isr &= ~DP_INTR_MASK1; ack = aux->isr & DP_INTERRUPT_STATUS1; ack <<= 1; ack |= DP_INTR_MASK1; - dp_write(catalog, io_data, DP_INTR_STATUS, ack); + dp_write(catalog->exe_mode, io_data, DP_INTR_STATUS, ack); } /* controller related catalog functions */ @@ -397,7 +371,7 @@ static u32 dp_catalog_ctrl_read_hdcp_status(struct dp_catalog_ctrl *ctrl) catalog = dp_catalog_get_priv(ctrl); io_data = catalog->io.dp_ahb; - return dp_read(catalog, io_data, DP_HDCP_STATUS); + return dp_read(catalog->exe_mode, io_data, DP_HDCP_STATUS); } static void dp_catalog_panel_setup_infoframe_sdp(struct dp_catalog_panel *panel) @@ -422,7 +396,7 @@ static void dp_catalog_panel_setup_infoframe_sdp(struct dp_catalog_panel *panel) parity = dp_header_get_parity(header); data = ((header << HEADER_BYTE_1_BIT) | (parity << PARITY_BYTE_1_BIT)); - dp_write(catalog, io_data, MMSS_DP_VSCEXT_0, data); + dp_write(catalog->exe_mode, io_data, MMSS_DP_VSCEXT_0, data); memcpy(buf + off, &data, sizeof(data)); off += sizeof(data); @@ -431,22 +405,22 @@ static void dp_catalog_panel_setup_infoframe_sdp(struct dp_catalog_panel *panel) parity = dp_header_get_parity(header); data = ((header << HEADER_BYTE_2_BIT) | (parity << PARITY_BYTE_2_BIT)); - dp_write(catalog, io_data, MMSS_DP_VSCEXT_1, data); + dp_write(catalog->exe_mode, io_data, MMSS_DP_VSCEXT_1, data); /* HEADER BYTE 3 */ header = panel->hdr_data.vscext_header_byte3; parity = dp_header_get_parity(header); data = ((header << HEADER_BYTE_3_BIT) | (parity << PARITY_BYTE_3_BIT)); - data |= dp_read(catalog, io_data, MMSS_DP_VSCEXT_1); - dp_write(catalog, io_data, MMSS_DP_VSCEXT_1, data); + data |= dp_read(catalog->exe_mode, io_data, MMSS_DP_VSCEXT_1); + dp_write(catalog->exe_mode, io_data, MMSS_DP_VSCEXT_1, data); memcpy(buf + off, &data, sizeof(data)); off += sizeof(data); data = panel->hdr_data.version; data |= panel->hdr_data.length << 8; data |= hdr->eotf << 16; - dp_write(catalog, io_data, MMSS_DP_VSCEXT_2, data); + dp_write(catalog->exe_mode, io_data, MMSS_DP_VSCEXT_2, data); memcpy(buf + off, &data, sizeof(data)); off += sizeof(data); @@ -454,7 +428,7 @@ static void dp_catalog_panel_setup_infoframe_sdp(struct dp_catalog_panel *panel) (DP_GET_MSB(hdr->display_primaries_x[0]) << 8) | (DP_GET_LSB(hdr->display_primaries_y[0]) << 16) | (DP_GET_MSB(hdr->display_primaries_y[0]) << 24)); - dp_write(catalog, io_data, MMSS_DP_VSCEXT_3, data); + dp_write(catalog->exe_mode, io_data, MMSS_DP_VSCEXT_3, data); memcpy(buf + off, &data, sizeof(data)); off += sizeof(data); @@ -462,7 +436,7 @@ static void dp_catalog_panel_setup_infoframe_sdp(struct dp_catalog_panel *panel) (DP_GET_MSB(hdr->display_primaries_x[1]) << 8) | (DP_GET_LSB(hdr->display_primaries_y[1]) << 16) | (DP_GET_MSB(hdr->display_primaries_y[1]) << 24)); - dp_write(catalog, io_data, MMSS_DP_VSCEXT_4, data); + dp_write(catalog->exe_mode, io_data, MMSS_DP_VSCEXT_4, data); memcpy(buf + off, &data, sizeof(data)); off += sizeof(data); @@ -470,7 +444,7 @@ static void dp_catalog_panel_setup_infoframe_sdp(struct dp_catalog_panel *panel) (DP_GET_MSB(hdr->display_primaries_x[2]) << 8) | (DP_GET_LSB(hdr->display_primaries_y[2]) << 16) | (DP_GET_MSB(hdr->display_primaries_y[2]) << 24)); - dp_write(catalog, io_data, MMSS_DP_VSCEXT_5, data); + dp_write(catalog->exe_mode, io_data, MMSS_DP_VSCEXT_5, data); memcpy(buf + off, &data, sizeof(data)); off += sizeof(data); @@ -478,7 +452,7 @@ static void dp_catalog_panel_setup_infoframe_sdp(struct dp_catalog_panel *panel) (DP_GET_MSB(hdr->white_point_x) << 8) | (DP_GET_LSB(hdr->white_point_y) << 16) | (DP_GET_MSB(hdr->white_point_y) << 24)); - dp_write(catalog, io_data, MMSS_DP_VSCEXT_6, data); + dp_write(catalog->exe_mode, io_data, MMSS_DP_VSCEXT_6, data); memcpy(buf + off, &data, sizeof(data)); off += sizeof(data); @@ -486,7 +460,7 @@ static void dp_catalog_panel_setup_infoframe_sdp(struct dp_catalog_panel *panel) (DP_GET_MSB(hdr->max_luminance) << 8) | (DP_GET_LSB(hdr->min_luminance) << 16) | (DP_GET_MSB(hdr->min_luminance) << 24)); - dp_write(catalog, io_data, MMSS_DP_VSCEXT_7, data); + dp_write(catalog->exe_mode, io_data, MMSS_DP_VSCEXT_7, data); memcpy(buf + off, &data, sizeof(data)); off += sizeof(data); @@ -494,12 +468,12 @@ static void dp_catalog_panel_setup_infoframe_sdp(struct dp_catalog_panel *panel) (DP_GET_MSB(hdr->max_content_light_level) << 8) | (DP_GET_LSB(hdr->max_average_light_level) << 16) | (DP_GET_MSB(hdr->max_average_light_level) << 24)); - dp_write(catalog, io_data, MMSS_DP_VSCEXT_8, data); + dp_write(catalog->exe_mode, io_data, MMSS_DP_VSCEXT_8, data); memcpy(buf + off, &data, sizeof(data)); off += sizeof(data); data = 0; - dp_write(catalog, io_data, MMSS_DP_VSCEXT_9, data); + dp_write(catalog->exe_mode, io_data, MMSS_DP_VSCEXT_9, data); memcpy(buf + off, &data, sizeof(data)); off += sizeof(data); @@ -528,7 +502,7 @@ static void dp_catalog_panel_setup_vsc_sdp(struct dp_catalog_panel *panel) parity = dp_header_get_parity(header); data = ((header << HEADER_BYTE_1_BIT) | (parity << PARITY_BYTE_1_BIT)); - dp_write(catalog, io_data, MMSS_DP_GENERIC0_0, data); + dp_write(catalog->exe_mode, io_data, MMSS_DP_GENERIC0_0, data); memcpy(buf + off, &data, sizeof(data)); off += sizeof(data); @@ -537,32 +511,32 @@ static void dp_catalog_panel_setup_vsc_sdp(struct dp_catalog_panel *panel) parity = dp_header_get_parity(header); data = ((header << HEADER_BYTE_2_BIT) | (parity << PARITY_BYTE_2_BIT)); - dp_write(catalog, io_data, MMSS_DP_GENERIC0_1, data); + dp_write(catalog->exe_mode, io_data, MMSS_DP_GENERIC0_1, data); /* HEADER BYTE 3 */ header = panel->hdr_data.vsc_header_byte3; parity = dp_header_get_parity(header); data = ((header << HEADER_BYTE_3_BIT) | (parity << PARITY_BYTE_3_BIT)); - data |= dp_read(catalog, io_data, MMSS_DP_GENERIC0_1); - dp_write(catalog, io_data, MMSS_DP_GENERIC0_1, data); + data |= dp_read(catalog->exe_mode, io_data, MMSS_DP_GENERIC0_1); + dp_write(catalog->exe_mode, io_data, MMSS_DP_GENERIC0_1, data); memcpy(buf + off, &data, sizeof(data)); off += sizeof(data); data = 0; - dp_write(catalog, io_data, MMSS_DP_GENERIC0_2, data); + dp_write(catalog->exe_mode, io_data, MMSS_DP_GENERIC0_2, data); memcpy(buf + off, &data, sizeof(data)); off += sizeof(data); - dp_write(catalog, io_data, MMSS_DP_GENERIC0_3, data); + dp_write(catalog->exe_mode, io_data, MMSS_DP_GENERIC0_3, data); memcpy(buf + off, &data, sizeof(data)); off += sizeof(data); - dp_write(catalog, io_data, MMSS_DP_GENERIC0_4, data); + dp_write(catalog->exe_mode, io_data, MMSS_DP_GENERIC0_4, data); memcpy(buf + off, &data, sizeof(data)); off += sizeof(data); - dp_write(catalog, io_data, MMSS_DP_GENERIC0_5, data); + dp_write(catalog->exe_mode, io_data, MMSS_DP_GENERIC0_5, data); memcpy(buf + off, &data, sizeof(data)); off += sizeof(data); @@ -585,20 +559,20 @@ static void dp_catalog_panel_setup_vsc_sdp(struct dp_catalog_panel *panel) ((panel->hdr_data.dynamic_range & 0x1) << 15) | ((panel->hdr_data.content_type & 0x7) << 16); - dp_write(catalog, io_data, MMSS_DP_GENERIC0_6, data); + dp_write(catalog->exe_mode, io_data, MMSS_DP_GENERIC0_6, data); memcpy(buf + off, &data, sizeof(data)); off += sizeof(data); data = 0; - dp_write(catalog, io_data, MMSS_DP_GENERIC0_7, data); + dp_write(catalog->exe_mode, io_data, MMSS_DP_GENERIC0_7, data); memcpy(buf + off, &data, sizeof(data)); off += sizeof(data); - dp_write(catalog, io_data, MMSS_DP_GENERIC0_8, data); + dp_write(catalog->exe_mode, io_data, MMSS_DP_GENERIC0_8, data); memcpy(buf + off, &data, sizeof(data)); off += sizeof(data); - dp_write(catalog, io_data, MMSS_DP_GENERIC0_9, data); + dp_write(catalog->exe_mode, io_data, MMSS_DP_GENERIC0_9, data); memcpy(buf + off, &data, sizeof(data)); off += sizeof(data); @@ -636,19 +610,23 @@ static void dp_catalog_panel_config_hdr(struct dp_catalog_panel *panel, bool en) misc1_misc0_off = DP1_MISC1_MISC0 - DP_MISC1_MISC0; } - cfg = dp_read(catalog, io_data, MMSS_DP_SDP_CFG + sdp_cfg_off); - cfg2 = dp_read(catalog, io_data, MMSS_DP_SDP_CFG2 + sdp_cfg2_off); - misc = dp_read(catalog, io_data, DP_MISC1_MISC0 + misc1_misc0_off); + cfg = dp_read(catalog->exe_mode, io_data, + MMSS_DP_SDP_CFG + sdp_cfg_off); + cfg2 = dp_read(catalog->exe_mode, io_data, + MMSS_DP_SDP_CFG2 + sdp_cfg2_off); + misc = dp_read(catalog->exe_mode, io_data, + DP_MISC1_MISC0 + misc1_misc0_off); if (en) { /* VSCEXT_SDP_EN, GEN0_SDP_EN */ cfg |= BIT(16) | BIT(17); - dp_write(catalog, io_data, MMSS_DP_SDP_CFG + sdp_cfg_off, cfg); + dp_write(catalog->exe_mode, io_data, + MMSS_DP_SDP_CFG + sdp_cfg_off, cfg); /* EXTN_SDPSIZE GENERIC0_SDPSIZE */ cfg2 |= BIT(15) | BIT(16); - dp_write(catalog, io_data, MMSS_DP_SDP_CFG2 + sdp_cfg2_off, - cfg2); + dp_write(catalog->exe_mode, io_data, + MMSS_DP_SDP_CFG2 + sdp_cfg2_off, cfg2); dp_catalog_panel_setup_vsc_sdp(panel); dp_catalog_panel_setup_infoframe_sdp(panel); @@ -663,12 +641,13 @@ static void dp_catalog_panel_config_hdr(struct dp_catalog_panel *panel, bool en) } else { /* VSCEXT_SDP_EN, GEN0_SDP_EN */ cfg &= ~BIT(16) & ~BIT(17); - dp_write(catalog, io_data, MMSS_DP_SDP_CFG + sdp_cfg_off, cfg); + dp_write(catalog->exe_mode, io_data, + MMSS_DP_SDP_CFG + sdp_cfg_off, cfg); /* EXTN_SDPSIZE GENERIC0_SDPSIZE */ cfg2 &= ~BIT(15) & ~BIT(16); - dp_write(catalog, io_data, MMSS_DP_SDP_CFG2 + sdp_cfg2_off, - cfg2); + dp_write(catalog->exe_mode, io_data, + MMSS_DP_SDP_CFG2 + sdp_cfg2_off, cfg2); /* switch back to MSA */ misc &= ~BIT(14); @@ -676,10 +655,13 @@ static void dp_catalog_panel_config_hdr(struct dp_catalog_panel *panel, bool en) pr_debug("Disabled\n"); } - dp_write(catalog, io_data, DP_MISC1_MISC0 + misc1_misc0_off, misc); + dp_write(catalog->exe_mode, io_data, DP_MISC1_MISC0 + misc1_misc0_off, + misc); - dp_write(catalog, io_data, MMSS_DP_SDP_CFG3 + sdp_cfg3_off, 0x01); - dp_write(catalog, io_data, MMSS_DP_SDP_CFG3 + sdp_cfg3_off, 0x00); + dp_write(catalog->exe_mode, io_data, MMSS_DP_SDP_CFG3 + sdp_cfg3_off, + 0x01); + dp_write(catalog->exe_mode, io_data, MMSS_DP_SDP_CFG3 + sdp_cfg3_off, + 0x00); } static void dp_catalog_panel_update_transfer_unit( @@ -696,9 +678,11 @@ static void dp_catalog_panel_update_transfer_unit( catalog = dp_catalog_get_priv(panel); io_data = catalog->io.dp_link; - dp_write(catalog, io_data, DP_VALID_BOUNDARY, panel->valid_boundary); - dp_write(catalog, io_data, DP_TU, panel->dp_tu); - dp_write(catalog, io_data, DP_VALID_BOUNDARY_2, panel->valid_boundary2); + dp_write(catalog->exe_mode, io_data, DP_VALID_BOUNDARY, + panel->valid_boundary); + dp_write(catalog->exe_mode, io_data, DP_TU, panel->dp_tu); + dp_write(catalog->exe_mode, io_data, DP_VALID_BOUNDARY_2, + panel->valid_boundary2); } static void dp_catalog_ctrl_state_ctrl(struct dp_catalog_ctrl *ctrl, u32 state) @@ -714,7 +698,7 @@ static void dp_catalog_ctrl_state_ctrl(struct dp_catalog_ctrl *ctrl, u32 state) catalog = dp_catalog_get_priv(ctrl); io_data = catalog->io.dp_link; - dp_write(catalog, io_data, DP_STATE_CTRL, state); + dp_write(catalog->exe_mode, io_data, DP_STATE_CTRL, state); /* make sure to change the hw state */ wmb(); } @@ -733,13 +717,13 @@ static void dp_catalog_ctrl_config_ctrl(struct dp_catalog_ctrl *ctrl) catalog = dp_catalog_get_priv(ctrl); io_data = catalog->io.dp_link; - cfg = dp_read(catalog, io_data, DP_MAINLINK_CTRL); + cfg = dp_read(catalog->exe_mode, io_data, DP_MAINLINK_CTRL); cfg |= 0x02000000; - dp_write(catalog, io_data, DP_MAINLINK_CTRL, cfg); + dp_write(catalog->exe_mode, io_data, DP_MAINLINK_CTRL, cfg); pr_debug("DP_MAINLINK_CTRL=0x%x\n", cfg); - dp_write(catalog, io_data, DP_MAINLINK_LEVELS, 0xa08); + dp_write(catalog->exe_mode, io_data, DP_MAINLINK_LEVELS, 0xa08); } static void dp_catalog_panel_config_ctrl(struct dp_catalog_panel *panel, @@ -767,9 +751,10 @@ static void dp_catalog_panel_config_ctrl(struct dp_catalog_panel *panel, pr_debug("DP_CONFIGURATION_CTRL=0x%x\n", cfg); - dp_write(catalog, io_data, DP_CONFIGURATION_CTRL + strm_reg_off, cfg); + dp_write(catalog->exe_mode, io_data, + DP_CONFIGURATION_CTRL + strm_reg_off, cfg); - mainlink_ctrl = dp_read(catalog, io_data, DP_MAINLINK_CTRL); + mainlink_ctrl = dp_read(catalog->exe_mode, io_data, DP_MAINLINK_CTRL); if (panel->stream_id == DP_STREAM_0) io_data = catalog->io.dp_p0; @@ -777,9 +762,11 @@ static void dp_catalog_panel_config_ctrl(struct dp_catalog_panel *panel, io_data = catalog->io.dp_p1; if (mainlink_ctrl & BIT(8)) - dp_write(catalog, io_data, MMSS_DP_ASYNC_FIFO_CONFIG, 0x01); + dp_write(catalog->exe_mode, io_data, MMSS_DP_ASYNC_FIFO_CONFIG, + 0x01); else - dp_write(catalog, io_data, MMSS_DP_ASYNC_FIFO_CONFIG, 0x00); + dp_write(catalog->exe_mode, io_data, MMSS_DP_ASYNC_FIFO_CONFIG, + 0x00); } static void dp_catalog_panel_config_dto(struct dp_catalog_panel *panel, @@ -813,10 +800,11 @@ static void dp_catalog_panel_config_dto(struct dp_catalog_panel *panel, return; } - dp_write(catalog, io_data, MMSS_DP_DSC_DTO, ack << 1); + dp_write(catalog->exe_mode, io_data, MMSS_DP_DSC_DTO, ack << 1); } -static void dp_catalog_ctrl_lane_mapping(struct dp_catalog_ctrl *ctrl) +static void dp_catalog_ctrl_lane_mapping(struct dp_catalog_ctrl *ctrl, + bool flipped, char *lane_map) { struct dp_catalog_private *catalog; struct dp_io_data *io_data; @@ -829,7 +817,8 @@ static void dp_catalog_ctrl_lane_mapping(struct dp_catalog_ctrl *ctrl) catalog = dp_catalog_get_priv(ctrl); io_data = catalog->io.dp_link; - dp_write(catalog, io_data, DP_LOGICAL2PHYSICAL_LANE_MAPPING, 0xe4); + dp_write(catalog->exe_mode, io_data, DP_LOGICAL2PHYSICAL_LANE_MAPPING, + 0xe4); } static void dp_catalog_ctrl_mainlink_ctrl(struct dp_catalog_ctrl *ctrl, @@ -848,23 +837,29 @@ static void dp_catalog_ctrl_mainlink_ctrl(struct dp_catalog_ctrl *ctrl, io_data = catalog->io.dp_link; if (enable) { - reg = dp_read(catalog, io_data, DP_MAINLINK_CTRL); + reg = dp_read(catalog->exe_mode, io_data, DP_MAINLINK_CTRL); mainlink_ctrl = reg & ~(0x03); - dp_write(catalog, io_data, DP_MAINLINK_CTRL, mainlink_ctrl); + dp_write(catalog->exe_mode, io_data, DP_MAINLINK_CTRL, + mainlink_ctrl); wmb(); /* make sure mainlink is turned off before reset */ mainlink_ctrl = reg | 0x02; - dp_write(catalog, io_data, DP_MAINLINK_CTRL, mainlink_ctrl); + dp_write(catalog->exe_mode, io_data, DP_MAINLINK_CTRL, + mainlink_ctrl); wmb(); /* make sure mainlink entered reset */ mainlink_ctrl = reg & ~(0x03); - dp_write(catalog, io_data, DP_MAINLINK_CTRL, mainlink_ctrl); + dp_write(catalog->exe_mode, io_data, DP_MAINLINK_CTRL, + mainlink_ctrl); wmb(); /* make sure mainlink reset done */ mainlink_ctrl = reg | 0x01; - dp_write(catalog, io_data, DP_MAINLINK_CTRL, mainlink_ctrl); + dp_write(catalog->exe_mode, io_data, DP_MAINLINK_CTRL, + mainlink_ctrl); wmb(); /* make sure mainlink turned on */ } else { - mainlink_ctrl = dp_read(catalog, io_data, DP_MAINLINK_CTRL); + mainlink_ctrl = dp_read(catalog->exe_mode, io_data, + DP_MAINLINK_CTRL); mainlink_ctrl &= ~BIT(0); - dp_write(catalog, io_data, DP_MAINLINK_CTRL, mainlink_ctrl); + dp_write(catalog->exe_mode, io_data, DP_MAINLINK_CTRL, + mainlink_ctrl); } } @@ -891,7 +886,7 @@ static void dp_catalog_panel_config_misc(struct dp_catalog_panel *panel) reg_offset = DP1_MISC1_MISC0 - DP_MISC1_MISC0; pr_debug("misc settings = 0x%x\n", panel->misc_val); - dp_write(catalog, io_data, DP_MISC1_MISC0 + reg_offset, + dp_write(catalog->exe_mode, io_data, DP_MISC1_MISC0 + reg_offset, panel->misc_val); } @@ -946,9 +941,9 @@ static void dp_catalog_panel_config_msa(struct dp_catalog_panel *panel, if (panel->stream_id == DP_STREAM_1) strm_reg_off = MMSS_DP_PIXEL1_M - MMSS_DP_PIXEL_M; - pixel_m = dp_read(catalog, io_data, + pixel_m = dp_read(catalog->exe_mode, io_data, MMSS_DP_PIXEL_M + strm_reg_off); - pixel_n = dp_read(catalog, io_data, + pixel_n = dp_read(catalog->exe_mode, io_data, MMSS_DP_PIXEL_N + strm_reg_off); pr_debug("pixel_m=0x%x, pixel_n=0x%x\n", pixel_m, pixel_n); @@ -972,8 +967,10 @@ static void dp_catalog_panel_config_msa(struct dp_catalog_panel *panel, } pr_debug("mvid=0x%x, nvid=0x%x\n", mvid, nvid); - dp_write(catalog, io_data, DP_SOFTWARE_MVID + mvid_reg_off, mvid); - dp_write(catalog, io_data, DP_SOFTWARE_NVID + nvid_reg_off, nvid); + dp_write(catalog->exe_mode, io_data, DP_SOFTWARE_MVID + mvid_reg_off, + mvid); + dp_write(catalog->exe_mode, io_data, DP_SOFTWARE_NVID + nvid_reg_off, + nvid); } static void dp_catalog_ctrl_set_pattern(struct dp_catalog_ctrl *ctrl, @@ -995,13 +992,13 @@ static void dp_catalog_ctrl_set_pattern(struct dp_catalog_ctrl *ctrl, bit = 1; bit <<= (pattern - 1); pr_debug("hw: bit=%d train=%d\n", bit, pattern); - dp_write(catalog, io_data, DP_STATE_CTRL, bit); + dp_write(catalog->exe_mode, io_data, DP_STATE_CTRL, bit); bit = 8; bit <<= (pattern - 1); while (cnt--) { - data = dp_read(catalog, io_data, DP_MAINLINK_READY); + data = dp_read(catalog->exe_mode, io_data, DP_MAINLINK_READY); if (data & bit) break; } @@ -1024,24 +1021,27 @@ static void dp_catalog_ctrl_usb_reset(struct dp_catalog_ctrl *ctrl, bool flip) io_data = catalog->io.usb3_dp_com; - dp_write(catalog, io_data, USB3_DP_COM_RESET_OVRD_CTRL, 0x0a); - dp_write(catalog, io_data, USB3_DP_COM_PHY_MODE_CTRL, 0x02); - dp_write(catalog, io_data, USB3_DP_COM_SW_RESET, 0x01); + dp_write(catalog->exe_mode, io_data, USB3_DP_COM_RESET_OVRD_CTRL, 0x0a); + dp_write(catalog->exe_mode, io_data, USB3_DP_COM_PHY_MODE_CTRL, 0x02); + dp_write(catalog->exe_mode, io_data, USB3_DP_COM_SW_RESET, 0x01); /* make sure usb3 com phy software reset is done */ wmb(); - if (!flip) /* CC1 */ - dp_write(catalog, io_data, USB3_DP_COM_TYPEC_CTRL, 0x02); - else /* CC2 */ - dp_write(catalog, io_data, USB3_DP_COM_TYPEC_CTRL, 0x03); + if (!flip) { /* CC1 */ + dp_write(catalog->exe_mode, io_data, USB3_DP_COM_TYPEC_CTRL, + 0x02); + } else { /* CC2 */ + dp_write(catalog->exe_mode, io_data, USB3_DP_COM_TYPEC_CTRL, + 0x03); + } - dp_write(catalog, io_data, USB3_DP_COM_SWI_CTRL, 0x00); - dp_write(catalog, io_data, USB3_DP_COM_SW_RESET, 0x00); + dp_write(catalog->exe_mode, io_data, USB3_DP_COM_SWI_CTRL, 0x00); + dp_write(catalog->exe_mode, io_data, USB3_DP_COM_SW_RESET, 0x00); /* make sure the software reset is done */ wmb(); - dp_write(catalog, io_data, USB3_DP_COM_POWER_DOWN_CTRL, 0x01); - dp_write(catalog, io_data, USB3_DP_COM_RESET_OVRD_CTRL, 0x00); + dp_write(catalog->exe_mode, io_data, USB3_DP_COM_POWER_DOWN_CTRL, 0x01); + dp_write(catalog->exe_mode, io_data, USB3_DP_COM_RESET_OVRD_CTRL, 0x00); /* make sure phy is brought out of reset */ wmb(); } @@ -1070,42 +1070,46 @@ static void dp_catalog_panel_tpg_cfg(struct dp_catalog_panel *panel, io_data = catalog->io.dp_p1; if (!enable) { - dp_write(catalog, io_data, MMSS_DP_TPG_MAIN_CONTROL, 0x0); - dp_write(catalog, io_data, MMSS_DP_BIST_ENABLE, 0x0); - dp_write(catalog, io_data, MMSS_DP_TIMING_ENGINE_EN, 0x0); + dp_write(catalog->exe_mode, io_data, MMSS_DP_TPG_MAIN_CONTROL, + 0x0); + dp_write(catalog->exe_mode, io_data, MMSS_DP_BIST_ENABLE, 0x0); + dp_write(catalog->exe_mode, io_data, MMSS_DP_TIMING_ENGINE_EN, + 0x0); wmb(); /* ensure Timing generator is turned off */ return; } - dp_write(catalog, io_data, MMSS_DP_INTF_CONFIG, 0x0); - dp_write(catalog, io_data, MMSS_DP_INTF_HSYNC_CTL, panel->hsync_ctl); - dp_write(catalog, io_data, MMSS_DP_INTF_VSYNC_PERIOD_F0, + dp_write(catalog->exe_mode, io_data, MMSS_DP_INTF_CONFIG, 0x0); + dp_write(catalog->exe_mode, io_data, MMSS_DP_INTF_HSYNC_CTL, + panel->hsync_ctl); + dp_write(catalog->exe_mode, io_data, MMSS_DP_INTF_VSYNC_PERIOD_F0, panel->vsync_period * panel->hsync_period); - dp_write(catalog, io_data, MMSS_DP_INTF_VSYNC_PULSE_WIDTH_F0, + dp_write(catalog->exe_mode, io_data, MMSS_DP_INTF_VSYNC_PULSE_WIDTH_F0, panel->v_sync_width * panel->hsync_period); - dp_write(catalog, io_data, MMSS_DP_INTF_VSYNC_PERIOD_F1, 0); - dp_write(catalog, io_data, MMSS_DP_INTF_VSYNC_PULSE_WIDTH_F1, 0); - dp_write(catalog, io_data, MMSS_DP_INTF_DISPLAY_HCTL, + dp_write(catalog->exe_mode, io_data, MMSS_DP_INTF_VSYNC_PERIOD_F1, 0); + dp_write(catalog->exe_mode, io_data, MMSS_DP_INTF_VSYNC_PULSE_WIDTH_F1, + 0); + dp_write(catalog->exe_mode, io_data, MMSS_DP_INTF_DISPLAY_HCTL, panel->display_hctl); - dp_write(catalog, io_data, MMSS_DP_INTF_ACTIVE_HCTL, 0); - dp_write(catalog, io_data, MMSS_INTF_DISPLAY_V_START_F0, + dp_write(catalog->exe_mode, io_data, MMSS_DP_INTF_ACTIVE_HCTL, 0); + dp_write(catalog->exe_mode, io_data, MMSS_INTF_DISPLAY_V_START_F0, panel->display_v_start); - dp_write(catalog, io_data, MMSS_DP_INTF_DISPLAY_V_END_F0, + dp_write(catalog->exe_mode, io_data, MMSS_DP_INTF_DISPLAY_V_END_F0, panel->display_v_end); - dp_write(catalog, io_data, MMSS_INTF_DISPLAY_V_START_F1, 0); - dp_write(catalog, io_data, MMSS_DP_INTF_DISPLAY_V_END_F1, 0); - dp_write(catalog, io_data, MMSS_DP_INTF_ACTIVE_V_START_F0, 0); - dp_write(catalog, io_data, MMSS_DP_INTF_ACTIVE_V_END_F0, 0); - dp_write(catalog, io_data, MMSS_DP_INTF_ACTIVE_V_START_F1, 0); - dp_write(catalog, io_data, MMSS_DP_INTF_ACTIVE_V_END_F1, 0); - dp_write(catalog, io_data, MMSS_DP_INTF_POLARITY_CTL, 0); + dp_write(catalog->exe_mode, io_data, MMSS_INTF_DISPLAY_V_START_F1, 0); + dp_write(catalog->exe_mode, io_data, MMSS_DP_INTF_DISPLAY_V_END_F1, 0); + dp_write(catalog->exe_mode, io_data, MMSS_DP_INTF_ACTIVE_V_START_F0, 0); + dp_write(catalog->exe_mode, io_data, MMSS_DP_INTF_ACTIVE_V_END_F0, 0); + dp_write(catalog->exe_mode, io_data, MMSS_DP_INTF_ACTIVE_V_START_F1, 0); + dp_write(catalog->exe_mode, io_data, MMSS_DP_INTF_ACTIVE_V_END_F1, 0); + dp_write(catalog->exe_mode, io_data, MMSS_DP_INTF_POLARITY_CTL, 0); wmb(); /* ensure TPG registers are programmed */ - dp_write(catalog, io_data, MMSS_DP_TPG_MAIN_CONTROL, 0x100); - dp_write(catalog, io_data, MMSS_DP_TPG_VIDEO_CONFIG, 0x5); + dp_write(catalog->exe_mode, io_data, MMSS_DP_TPG_MAIN_CONTROL, 0x100); + dp_write(catalog->exe_mode, io_data, MMSS_DP_TPG_VIDEO_CONFIG, 0x5); wmb(); /* ensure TPG config is programmed */ - dp_write(catalog, io_data, MMSS_DP_BIST_ENABLE, 0x1); - dp_write(catalog, io_data, MMSS_DP_TIMING_ENGINE_EN, 0x1); + dp_write(catalog->exe_mode, io_data, MMSS_DP_BIST_ENABLE, 0x1); + dp_write(catalog->exe_mode, io_data, MMSS_DP_TIMING_ENGINE_EN, 0x1); wmb(); /* ensure Timing generator is turned on */ } @@ -1123,14 +1127,14 @@ static void dp_catalog_ctrl_reset(struct dp_catalog_ctrl *ctrl) catalog = dp_catalog_get_priv(ctrl); io_data = catalog->io.dp_ahb; - sw_reset = dp_read(catalog, io_data, DP_SW_RESET); + sw_reset = dp_read(catalog->exe_mode, io_data, DP_SW_RESET); sw_reset |= BIT(0); - dp_write(catalog, io_data, DP_SW_RESET, sw_reset); + dp_write(catalog->exe_mode, io_data, DP_SW_RESET, sw_reset); usleep_range(1000, 1010); /* h/w recommended delay */ sw_reset &= ~BIT(0); - dp_write(catalog, io_data, DP_SW_RESET, sw_reset); + dp_write(catalog->exe_mode, io_data, DP_SW_RESET, sw_reset); } static bool dp_catalog_ctrl_mainlink_ready(struct dp_catalog_ctrl *ctrl) @@ -1150,7 +1154,7 @@ static bool dp_catalog_ctrl_mainlink_ready(struct dp_catalog_ctrl *ctrl) while (--cnt) { /* DP_MAINLINK_READY */ - data = dp_read(catalog, io_data, DP_MAINLINK_READY); + data = dp_read(catalog->exe_mode, io_data, DP_MAINLINK_READY); if (data & BIT(0)) return true; @@ -1176,13 +1180,16 @@ static void dp_catalog_ctrl_enable_irq(struct dp_catalog_ctrl *ctrl, io_data = catalog->io.dp_ahb; if (enable) { - dp_write(catalog, io_data, DP_INTR_STATUS, DP_INTR_MASK1); - dp_write(catalog, io_data, DP_INTR_STATUS2, DP_INTR_MASK2); - dp_write(catalog, io_data, DP_INTR_STATUS5, DP_INTR_MASK5); + dp_write(catalog->exe_mode, io_data, DP_INTR_STATUS, + DP_INTR_MASK1); + dp_write(catalog->exe_mode, io_data, DP_INTR_STATUS2, + DP_INTR_MASK2); + dp_write(catalog->exe_mode, io_data, DP_INTR_STATUS5, + DP_INTR_MASK5); } else { - dp_write(catalog, io_data, DP_INTR_STATUS, 0x00); - dp_write(catalog, io_data, DP_INTR_STATUS2, 0x00); - dp_write(catalog, io_data, DP_INTR_STATUS5, 0x00); + dp_write(catalog->exe_mode, io_data, DP_INTR_STATUS, 0x00); + dp_write(catalog->exe_mode, io_data, DP_INTR_STATUS2, 0x00); + dp_write(catalog->exe_mode, io_data, DP_INTR_STATUS5, 0x00); } } @@ -1200,18 +1207,20 @@ static void dp_catalog_ctrl_hpd_config(struct dp_catalog_ctrl *ctrl, bool en) io_data = catalog->io.dp_aux; if (en) { - u32 reftimer = dp_read(catalog, io_data, DP_DP_HPD_REFTIMER); + u32 reftimer = dp_read(catalog->exe_mode, io_data, + DP_DP_HPD_REFTIMER); - dp_write(catalog, io_data, DP_DP_HPD_INT_ACK, 0xF); - dp_write(catalog, io_data, DP_DP_HPD_INT_MASK, 0xF); + dp_write(catalog->exe_mode, io_data, DP_DP_HPD_INT_ACK, 0xF); + dp_write(catalog->exe_mode, io_data, DP_DP_HPD_INT_MASK, 0xF); /* Enabling REFTIMER */ reftimer |= BIT(16); - dp_write(catalog, io_data, DP_DP_HPD_REFTIMER, 0xF); + dp_write(catalog->exe_mode, io_data, DP_DP_HPD_REFTIMER, + reftimer); /* Enable HPD */ - dp_write(catalog, io_data, DP_DP_HPD_CTRL, 0x1); + dp_write(catalog->exe_mode, io_data, DP_DP_HPD_CTRL, 0x1); } else { /*Disable HPD */ - dp_write(catalog, io_data, DP_DP_HPD_CTRL, 0x0); + dp_write(catalog->exe_mode, io_data, DP_DP_HPD_CTRL, 0x0); } } @@ -1229,19 +1238,19 @@ static void dp_catalog_ctrl_get_interrupt(struct dp_catalog_ctrl *ctrl) catalog = dp_catalog_get_priv(ctrl); io_data = catalog->io.dp_ahb; - ctrl->isr = dp_read(catalog, io_data, DP_INTR_STATUS2); + ctrl->isr = dp_read(catalog->exe_mode, io_data, DP_INTR_STATUS2); ctrl->isr &= ~DP_INTR_MASK2; ack = ctrl->isr & DP_INTERRUPT_STATUS2; ack <<= 1; ack |= DP_INTR_MASK2; - dp_write(catalog, io_data, DP_INTR_STATUS2, ack); + dp_write(catalog->exe_mode, io_data, DP_INTR_STATUS2, ack); - ctrl->isr5 = dp_read(catalog, io_data, DP_INTR_STATUS5); + ctrl->isr5 = dp_read(catalog->exe_mode, io_data, DP_INTR_STATUS5); ctrl->isr5 &= ~DP_INTR_MASK5; ack = ctrl->isr5 & DP_INTERRUPT_STATUS5; ack <<= 1; ack |= DP_INTR_MASK5; - dp_write(catalog, io_data, DP_INTR_STATUS5, ack); + dp_write(catalog->exe_mode, io_data, DP_INTR_STATUS5, ack); } static void dp_catalog_ctrl_phy_reset(struct dp_catalog_ctrl *ctrl) @@ -1257,9 +1266,9 @@ static void dp_catalog_ctrl_phy_reset(struct dp_catalog_ctrl *ctrl) catalog = dp_catalog_get_priv(ctrl); io_data = catalog->io.dp_ahb; - dp_write(catalog, io_data, DP_PHY_CTRL, 0x5); /* bit 0 & 2 */ + dp_write(catalog->exe_mode, io_data, DP_PHY_CTRL, 0x5); /* bit 0 & 2 */ usleep_range(1000, 1010); /* h/w recommended delay */ - dp_write(catalog, io_data, DP_PHY_CTRL, 0x0); + dp_write(catalog->exe_mode, io_data, DP_PHY_CTRL, 0x0); wmb(); /* make sure PHY reset done */ } @@ -1284,7 +1293,7 @@ static void dp_catalog_ctrl_phy_lane_cfg(struct dp_catalog_ctrl *ctrl, info |= ((orientation & 0x0F) << 4); pr_debug("Shared Info = 0x%x\n", info); - dp_write(catalog, io_data, DP_PHY_SPARE0, info); + dp_write(catalog->exe_mode, io_data, DP_PHY_SPARE0, info); } static void dp_catalog_ctrl_update_vx_px(struct dp_catalog_ctrl *ctrl, @@ -1309,12 +1318,12 @@ static void dp_catalog_ctrl_update_vx_px(struct dp_catalog_ctrl *ctrl, /* program default setting first */ io_data = catalog->io.dp_ln_tx0; - dp_write(catalog, io_data, TXn_TX_DRV_LVL, 0x2A); - dp_write(catalog, io_data, TXn_TX_EMP_POST1_LVL, 0x20); + dp_write(catalog->exe_mode, io_data, TXn_TX_DRV_LVL, 0x2A); + dp_write(catalog->exe_mode, io_data, TXn_TX_EMP_POST1_LVL, 0x20); io_data = catalog->io.dp_ln_tx1; - dp_write(catalog, io_data, TXn_TX_DRV_LVL, 0x2A); - dp_write(catalog, io_data, TXn_TX_EMP_POST1_LVL, 0x20); + dp_write(catalog->exe_mode, io_data, TXn_TX_DRV_LVL, 0x2A); + dp_write(catalog->exe_mode, io_data, TXn_TX_EMP_POST1_LVL, 0x20); /* Enable MUX to use Cursor values from these registers */ value0 |= BIT(5); @@ -1323,12 +1332,14 @@ static void dp_catalog_ctrl_update_vx_px(struct dp_catalog_ctrl *ctrl, /* Configure host and panel only if both values are allowed */ if (value0 != 0xFF && value1 != 0xFF) { io_data = catalog->io.dp_ln_tx0; - dp_write(catalog, io_data, TXn_TX_DRV_LVL, value0); - dp_write(catalog, io_data, TXn_TX_EMP_POST1_LVL, value1); + dp_write(catalog->exe_mode, io_data, TXn_TX_DRV_LVL, value0); + dp_write(catalog->exe_mode, io_data, TXn_TX_EMP_POST1_LVL, + value1); io_data = catalog->io.dp_ln_tx1; - dp_write(catalog, io_data, TXn_TX_DRV_LVL, value0); - dp_write(catalog, io_data, TXn_TX_EMP_POST1_LVL, value1); + dp_write(catalog->exe_mode, io_data, TXn_TX_DRV_LVL, value0); + dp_write(catalog->exe_mode, io_data, TXn_TX_EMP_POST1_LVL, + value1); pr_debug("hw: vx_value=0x%x px_value=0x%x\n", value0, value1); @@ -1354,50 +1365,50 @@ static void dp_catalog_ctrl_send_phy_pattern(struct dp_catalog_ctrl *ctrl, io_data = catalog->io.dp_link; - dp_write(catalog, io_data, DP_STATE_CTRL, 0x0); + dp_write(catalog->exe_mode, io_data, DP_STATE_CTRL, 0x0); switch (pattern) { case DP_TEST_PHY_PATTERN_D10_2_NO_SCRAMBLING: - dp_write(catalog, io_data, DP_STATE_CTRL, 0x1); + dp_write(catalog->exe_mode, io_data, DP_STATE_CTRL, 0x1); break; case DP_TEST_PHY_PATTERN_SYMBOL_ERR_MEASUREMENT_CNT: value &= ~(1 << 16); - dp_write(catalog, io_data, DP_HBR2_COMPLIANCE_SCRAMBLER_RESET, - value); + dp_write(catalog->exe_mode, io_data, + DP_HBR2_COMPLIANCE_SCRAMBLER_RESET, value); value |= 0xFC; - dp_write(catalog, io_data, DP_HBR2_COMPLIANCE_SCRAMBLER_RESET, - value); - dp_write(catalog, io_data, DP_MAINLINK_LEVELS, 0x2); - dp_write(catalog, io_data, DP_STATE_CTRL, 0x10); + dp_write(catalog->exe_mode, io_data, + DP_HBR2_COMPLIANCE_SCRAMBLER_RESET, value); + dp_write(catalog->exe_mode, io_data, DP_MAINLINK_LEVELS, 0x2); + dp_write(catalog->exe_mode, io_data, DP_STATE_CTRL, 0x10); break; case DP_TEST_PHY_PATTERN_PRBS7: - dp_write(catalog, io_data, DP_STATE_CTRL, 0x20); + dp_write(catalog->exe_mode, io_data, DP_STATE_CTRL, 0x20); break; case DP_TEST_PHY_PATTERN_80_BIT_CUSTOM_PATTERN: - dp_write(catalog, io_data, DP_STATE_CTRL, 0x40); + dp_write(catalog->exe_mode, io_data, DP_STATE_CTRL, 0x40); /* 00111110000011111000001111100000 */ - dp_write(catalog, io_data, DP_TEST_80BIT_CUSTOM_PATTERN_REG0, - 0x3E0F83E0); + dp_write(catalog->exe_mode, io_data, + DP_TEST_80BIT_CUSTOM_PATTERN_REG0, 0x3E0F83E0); /* 00001111100000111110000011111000 */ - dp_write(catalog, io_data, DP_TEST_80BIT_CUSTOM_PATTERN_REG1, - 0x0F83E0F8); + dp_write(catalog->exe_mode, io_data, + DP_TEST_80BIT_CUSTOM_PATTERN_REG1, 0x0F83E0F8); /* 1111100000111110 */ - dp_write(catalog, io_data, DP_TEST_80BIT_CUSTOM_PATTERN_REG2, - 0x0000F83E); + dp_write(catalog->exe_mode, io_data, + DP_TEST_80BIT_CUSTOM_PATTERN_REG2, 0x0000F83E); break; case DP_TEST_PHY_PATTERN_CP2520_PATTERN_1: value = BIT(16); - dp_write(catalog, io_data, DP_HBR2_COMPLIANCE_SCRAMBLER_RESET, - value); + dp_write(catalog->exe_mode, io_data, + DP_HBR2_COMPLIANCE_SCRAMBLER_RESET, value); value |= 0xFC; - dp_write(catalog, io_data, DP_HBR2_COMPLIANCE_SCRAMBLER_RESET, - value); - dp_write(catalog, io_data, DP_MAINLINK_LEVELS, 0x2); - dp_write(catalog, io_data, DP_STATE_CTRL, 0x10); + dp_write(catalog->exe_mode, io_data, + DP_HBR2_COMPLIANCE_SCRAMBLER_RESET, value); + dp_write(catalog->exe_mode, io_data, DP_MAINLINK_LEVELS, 0x2); + dp_write(catalog->exe_mode, io_data, DP_STATE_CTRL, 0x10); break; case DP_TEST_PHY_PATTERN_CP2520_PATTERN_3: - dp_write(catalog, io_data, DP_MAINLINK_CTRL, 0x11); - dp_write(catalog, io_data, DP_STATE_CTRL, 0x8); + dp_write(catalog->exe_mode, io_data, DP_MAINLINK_CTRL, 0x11); + dp_write(catalog->exe_mode, io_data, DP_STATE_CTRL, 0x8); break; default: pr_debug("No valid test pattern requested: 0x%x\n", pattern); @@ -1422,7 +1433,7 @@ static u32 dp_catalog_ctrl_read_phy_pattern(struct dp_catalog_ctrl *ctrl) io_data = catalog->io.dp_link; - return dp_read(catalog, io_data, DP_MAINLINK_READY); + return dp_read(catalog->exe_mode, io_data, DP_MAINLINK_READY); } static int dp_catalog_reg_dump(struct dp_catalog *dp_catalog, @@ -1502,13 +1513,13 @@ static void dp_catalog_ctrl_mst_config(struct dp_catalog_ctrl *ctrl, io_data = catalog->io.dp_link; - reg = dp_read(catalog, io_data, DP_MAINLINK_CTRL); + reg = dp_read(catalog->exe_mode, io_data, DP_MAINLINK_CTRL); if (enable) reg |= (0x04000100); else reg &= ~(0x04000100); - dp_write(catalog, io_data, DP_MAINLINK_CTRL, reg); + dp_write(catalog->exe_mode, io_data, DP_MAINLINK_CTRL, reg); /* make sure mainlink MST configuration is updated */ wmb(); } @@ -1527,7 +1538,7 @@ static void dp_catalog_ctrl_trigger_act(struct dp_catalog_ctrl *ctrl) io_data = catalog->io.dp_link; - dp_write(catalog, io_data, DP_MST_ACT, 0x1); + dp_write(catalog->exe_mode, io_data, DP_MST_ACT, 0x1); /* make sure ACT signal is performed */ wmb(); } @@ -1550,7 +1561,7 @@ static void dp_catalog_ctrl_read_act_complete_sts(struct dp_catalog_ctrl *ctrl, io_data = catalog->io.dp_link; - reg = dp_read(catalog, io_data, DP_MST_ACT); + reg = dp_read(catalog->exe_mode, io_data, DP_MST_ACT); if (!reg) *sts = true; @@ -1606,8 +1617,10 @@ static void dp_catalog_ctrl_channel_alloc(struct dp_catalog_ctrl *ctrl, pr_debug("ch:%d slot_reg_1:%d, slot_reg_2:%d\n", ch, slot_reg_1, slot_reg_2); - dp_write(catalog, io_data, DP_DP0_TIMESLOT_1_32 + reg_off, slot_reg_1); - dp_write(catalog, io_data, DP_DP0_TIMESLOT_33_63 + reg_off, slot_reg_2); + dp_write(catalog->exe_mode, io_data, DP_DP0_TIMESLOT_1_32 + reg_off, + slot_reg_1); + dp_write(catalog->exe_mode, io_data, DP_DP0_TIMESLOT_33_63 + reg_off, + slot_reg_2); } static void dp_catalog_ctrl_channel_dealloc(struct dp_catalog_ctrl *ctrl, @@ -1640,8 +1653,10 @@ static void dp_catalog_ctrl_channel_dealloc(struct dp_catalog_ctrl *ctrl, if (ch == DP_STREAM_1) reg_off = DP_DP1_TIMESLOT_1_32 - DP_DP0_TIMESLOT_1_32; - slot_reg_1 = dp_read(catalog, io_data, DP_DP0_TIMESLOT_1_32 + reg_off); - slot_reg_2 = dp_read(catalog, io_data, DP_DP0_TIMESLOT_33_63 + reg_off); + slot_reg_1 = dp_read(catalog->exe_mode, io_data, + DP_DP0_TIMESLOT_1_32 + reg_off); + slot_reg_2 = dp_read(catalog->exe_mode, io_data, + DP_DP0_TIMESLOT_33_63 + reg_off); ch_start_slot = ch_start_slot - 1; for (i = 0; i < tot_slot_cnt; i++) { @@ -1657,8 +1672,10 @@ static void dp_catalog_ctrl_channel_dealloc(struct dp_catalog_ctrl *ctrl, pr_debug("dealloc ch:%d slot_reg_1:%d, slot_reg_2:%d\n", ch, slot_reg_1, slot_reg_2); - dp_write(catalog, io_data, DP_DP0_TIMESLOT_1_32 + reg_off, slot_reg_1); - dp_write(catalog, io_data, DP_DP0_TIMESLOT_33_63 + reg_off, slot_reg_2); + dp_write(catalog->exe_mode, io_data, DP_DP0_TIMESLOT_1_32 + reg_off, + slot_reg_1); + dp_write(catalog->exe_mode, io_data, DP_DP0_TIMESLOT_33_63 + reg_off, + slot_reg_2); } static void dp_catalog_ctrl_update_rg(struct dp_catalog_ctrl *ctrl, u32 ch, @@ -1686,7 +1703,7 @@ static void dp_catalog_ctrl_update_rg(struct dp_catalog_ctrl *ctrl, u32 ch, if (ch == DP_STREAM_1) reg_off = DP_DP1_RG - DP_DP0_RG; - dp_write(catalog, io_data, DP_DP0_RG + reg_off, rg); + dp_write(catalog->exe_mode, io_data, DP_DP0_RG + reg_off, rg); } /* panel related catalog functions */ @@ -1712,12 +1729,13 @@ static int dp_catalog_panel_timing_cfg(struct dp_catalog_panel *panel) if (panel->stream_id == DP_STREAM_1) offset = DP1_TOTAL_HOR_VER - DP_TOTAL_HOR_VER; - dp_write(catalog, io_data, DP_TOTAL_HOR_VER + offset, panel->total); - dp_write(catalog, io_data, DP_START_HOR_VER_FROM_SYNC + offset, - panel->sync_start); - dp_write(catalog, io_data, DP_HSYNC_VSYNC_WIDTH_POLARITY + offset, - panel->width_blanking); - dp_write(catalog, io_data, DP_ACTIVE_HOR_VER + offset, + dp_write(catalog->exe_mode, io_data, DP_TOTAL_HOR_VER + offset, + panel->total); + dp_write(catalog->exe_mode, io_data, + DP_START_HOR_VER_FROM_SYNC + offset, panel->sync_start); + dp_write(catalog->exe_mode, io_data, + DP_HSYNC_VSYNC_WIDTH_POLARITY + offset, panel->width_blanking); + dp_write(catalog->exe_mode, io_data, DP_ACTIVE_HOR_VER + offset, panel->dp_active); end: return 0; @@ -1785,7 +1803,8 @@ static void dp_catalog_audio_config_sdp(struct dp_catalog_audio *audio) catalog = dp_catalog_get_priv(audio); io_data = catalog->io.dp_link; - sdp_cfg = dp_read(catalog, io_data, MMSS_DP_SDP_CFG + sdp_cfg_off); + sdp_cfg = dp_read(catalog->exe_mode, io_data, + MMSS_DP_SDP_CFG + sdp_cfg_off); /* AUDIO_TIMESTAMP_SDP_EN */ sdp_cfg |= BIT(1); @@ -1799,16 +1818,19 @@ static void dp_catalog_audio_config_sdp(struct dp_catalog_audio *audio) sdp_cfg |= BIT(20); pr_debug("sdp_cfg = 0x%x\n", sdp_cfg); - dp_write(catalog, io_data, MMSS_DP_SDP_CFG + sdp_cfg_off, sdp_cfg); + dp_write(catalog->exe_mode, io_data, MMSS_DP_SDP_CFG + sdp_cfg_off, + sdp_cfg); - sdp_cfg2 = dp_read(catalog, io_data, MMSS_DP_SDP_CFG2 + sdp_cfg_off); + sdp_cfg2 = dp_read(catalog->exe_mode, io_data, + MMSS_DP_SDP_CFG2 + sdp_cfg_off); /* IFRM_REGSRC -> Do not use reg values */ sdp_cfg2 &= ~BIT(0); /* AUDIO_STREAM_HB3_REGSRC-> Do not use reg values */ sdp_cfg2 &= ~BIT(1); pr_debug("sdp_cfg2 = 0x%x\n", sdp_cfg2); - dp_write(catalog, io_data, MMSS_DP_SDP_CFG2 + sdp_cfg_off, sdp_cfg2); + dp_write(catalog->exe_mode, io_data, MMSS_DP_SDP_CFG2 + sdp_cfg_off, + sdp_cfg2); } static void dp_catalog_audio_get_header(struct dp_catalog_audio *audio) @@ -1829,7 +1851,7 @@ static void dp_catalog_audio_get_header(struct dp_catalog_audio *audio) sdp = audio->sdp_type; header = audio->sdp_header; - audio->data = dp_read(catalog, io_data, sdp_map[sdp][header]); + audio->data = dp_read(catalog->exe_mode, io_data, sdp_map[sdp][header]); } static void dp_catalog_audio_set_header(struct dp_catalog_audio *audio) @@ -1852,7 +1874,7 @@ static void dp_catalog_audio_set_header(struct dp_catalog_audio *audio) header = audio->sdp_header; data = audio->data; - dp_write(catalog, io_data, sdp_map[sdp][header], data); + dp_write(catalog->exe_mode, io_data, sdp_map[sdp][header], data); } static void dp_catalog_audio_config_acr(struct dp_catalog_audio *audio) @@ -1870,7 +1892,7 @@ static void dp_catalog_audio_config_acr(struct dp_catalog_audio *audio) pr_debug("select = 0x%x, acr_ctrl = 0x%x\n", select, acr_ctrl); - dp_write(catalog, io_data, MMSS_DP_AUDIO_ACR_CTRL, acr_ctrl); + dp_write(catalog->exe_mode, io_data, MMSS_DP_AUDIO_ACR_CTRL, acr_ctrl); } static void dp_catalog_audio_safe_to_exit_level(struct dp_catalog_audio *audio) @@ -1884,14 +1906,16 @@ static void dp_catalog_audio_safe_to_exit_level(struct dp_catalog_audio *audio) io_data = catalog->io.dp_link; safe_to_exit_level = audio->data; - mainlink_levels = dp_read(catalog, io_data, DP_MAINLINK_LEVELS); + mainlink_levels = dp_read(catalog->exe_mode, io_data, + DP_MAINLINK_LEVELS); mainlink_levels &= 0xFE0; mainlink_levels |= safe_to_exit_level; pr_debug("mainlink_level = 0x%x, safe_to_exit_level = 0x%x\n", mainlink_levels, safe_to_exit_level); - dp_write(catalog, io_data, DP_MAINLINK_LEVELS, mainlink_levels); + dp_write(catalog->exe_mode, io_data, DP_MAINLINK_LEVELS, + mainlink_levels); } static void dp_catalog_audio_enable(struct dp_catalog_audio *audio) @@ -1906,7 +1930,7 @@ static void dp_catalog_audio_enable(struct dp_catalog_audio *audio) io_data = catalog->io.dp_link; enable = !!audio->data; - audio_ctrl = dp_read(catalog, io_data, MMSS_DP_AUDIO_CFG); + audio_ctrl = dp_read(catalog->exe_mode, io_data, MMSS_DP_AUDIO_CFG); if (enable) audio_ctrl |= BIT(0); @@ -1914,7 +1938,7 @@ static void dp_catalog_audio_enable(struct dp_catalog_audio *audio) audio_ctrl &= ~BIT(0); pr_debug("dp_audio_cfg = 0x%x\n", audio_ctrl); - dp_write(catalog, io_data, MMSS_DP_AUDIO_CFG, audio_ctrl); + dp_write(catalog->exe_mode, io_data, MMSS_DP_AUDIO_CFG, audio_ctrl); /* make sure audio engine is disabled */ wmb(); @@ -1937,7 +1961,8 @@ static void dp_catalog_config_spd_header(struct dp_catalog_panel *panel) offset = MMSS_DP1_GENERIC0_0 - MMSS_DP_GENERIC0_0; /* Config header and parity byte 1 */ - value = dp_read(catalog, io_data, MMSS_DP_GENERIC1_0 + offset); + value = dp_read(catalog->exe_mode, io_data, + MMSS_DP_GENERIC1_0 + offset); new_value = 0x83; parity_byte = dp_header_get_parity(new_value); @@ -1945,10 +1970,12 @@ static void dp_catalog_config_spd_header(struct dp_catalog_panel *panel) | (parity_byte << PARITY_BYTE_1_BIT)); pr_debug("Header Byte 1: value = 0x%x, parity_byte = 0x%x\n", value, parity_byte); - dp_write(catalog, io_data, MMSS_DP_GENERIC1_0 + offset, value); + dp_write(catalog->exe_mode, io_data, MMSS_DP_GENERIC1_0 + offset, + value); /* Config header and parity byte 2 */ - value = dp_read(catalog, io_data, MMSS_DP_GENERIC1_1 + offset); + value = dp_read(catalog->exe_mode, io_data, + MMSS_DP_GENERIC1_1 + offset); new_value = 0x1b; parity_byte = dp_header_get_parity(new_value); @@ -1956,10 +1983,12 @@ static void dp_catalog_config_spd_header(struct dp_catalog_panel *panel) | (parity_byte << PARITY_BYTE_2_BIT)); pr_debug("Header Byte 2: value = 0x%x, parity_byte = 0x%x\n", value, parity_byte); - dp_write(catalog, io_data, MMSS_DP_GENERIC1_1 + offset, value); + dp_write(catalog->exe_mode, io_data, MMSS_DP_GENERIC1_1 + offset, + value); /* Config header and parity byte 3 */ - value = dp_read(catalog, io_data, MMSS_DP_GENERIC1_1 + offset); + value = dp_read(catalog->exe_mode, io_data, + MMSS_DP_GENERIC1_1 + offset); new_value = (0x0 | (0x12 << 2)); parity_byte = dp_header_get_parity(new_value); @@ -1967,7 +1996,8 @@ static void dp_catalog_config_spd_header(struct dp_catalog_panel *panel) | (parity_byte << PARITY_BYTE_3_BIT)); pr_debug("Header Byte 3: value = 0x%x, parity_byte = 0x%x\n", new_value, parity_byte); - dp_write(catalog, io_data, MMSS_DP_GENERIC1_1 + offset, value); + dp_write(catalog->exe_mode, io_data, MMSS_DP_GENERIC1_1 + offset, + value); } static void dp_catalog_panel_config_spd(struct dp_catalog_panel *panel) @@ -2015,38 +2045,39 @@ static void dp_catalog_panel_config_spd(struct dp_catalog_panel *panel) vendor = panel->spd_vendor_name; product = panel->spd_product_description; - dp_write(catalog, io_data, MMSS_DP_GENERIC1_2 + offset, + dp_write(catalog->exe_mode, io_data, MMSS_DP_GENERIC1_2 + offset, ((vendor[0] & 0x7f) | ((vendor[1] & 0x7f) << 8) | ((vendor[2] & 0x7f) << 16) | ((vendor[3] & 0x7f) << 24))); - dp_write(catalog, io_data, MMSS_DP_GENERIC1_3 + offset, + dp_write(catalog->exe_mode, io_data, MMSS_DP_GENERIC1_3 + offset, ((vendor[4] & 0x7f) | ((vendor[5] & 0x7f) << 8) | ((vendor[6] & 0x7f) << 16) | ((vendor[7] & 0x7f) << 24))); - dp_write(catalog, io_data, MMSS_DP_GENERIC1_4 + offset, + dp_write(catalog->exe_mode, io_data, MMSS_DP_GENERIC1_4 + offset, ((product[0] & 0x7f) | ((product[1] & 0x7f) << 8) | ((product[2] & 0x7f) << 16) | ((product[3] & 0x7f) << 24))); - dp_write(catalog, io_data, MMSS_DP_GENERIC1_5 + offset, + dp_write(catalog->exe_mode, io_data, MMSS_DP_GENERIC1_5 + offset, ((product[4] & 0x7f) | ((product[5] & 0x7f) << 8) | ((product[6] & 0x7f) << 16) | ((product[7] & 0x7f) << 24))); - dp_write(catalog, io_data, MMSS_DP_GENERIC1_6 + offset, + dp_write(catalog->exe_mode, io_data, MMSS_DP_GENERIC1_6 + offset, ((product[8] & 0x7f) | ((product[9] & 0x7f) << 8) | ((product[10] & 0x7f) << 16) | ((product[11] & 0x7f) << 24))); - dp_write(catalog, io_data, MMSS_DP_GENERIC1_7 + offset, + dp_write(catalog->exe_mode, io_data, MMSS_DP_GENERIC1_7 + offset, ((product[12] & 0x7f) | ((product[13] & 0x7f) << 8) | ((product[14] & 0x7f) << 16) | ((product[15] & 0x7f) << 24))); - dp_write(catalog, io_data, MMSS_DP_GENERIC1_8 + offset, device_type); - dp_write(catalog, io_data, MMSS_DP_GENERIC1_9 + offset, 0x00); + dp_write(catalog->exe_mode, io_data, MMSS_DP_GENERIC1_8 + offset, + device_type); + dp_write(catalog->exe_mode, io_data, MMSS_DP_GENERIC1_9 + offset, 0x00); if (panel->stream_id == DP_STREAM_1) { sdp_cfg_off = MMSS_DP1_SDP_CFG - MMSS_DP_SDP_CFG; @@ -2054,18 +2085,24 @@ static void dp_catalog_panel_config_spd(struct dp_catalog_panel *panel) sdp_cfg3_off = MMSS_DP1_SDP_CFG3 - MMSS_DP_SDP_CFG; } - spd_cfg = dp_read(catalog, io_data, MMSS_DP_SDP_CFG + sdp_cfg_off); + spd_cfg = dp_read(catalog->exe_mode, io_data, + MMSS_DP_SDP_CFG + sdp_cfg_off); /* GENERIC1_SDP for SPD Infoframe */ spd_cfg |= BIT(18); - dp_write(catalog, io_data, MMSS_DP_SDP_CFG + sdp_cfg_off, spd_cfg); + dp_write(catalog->exe_mode, io_data, MMSS_DP_SDP_CFG + sdp_cfg_off, + spd_cfg); - spd_cfg2 = dp_read(catalog, io_data, MMSS_DP_SDP_CFG2 + sdp_cfg2_off); + spd_cfg2 = dp_read(catalog->exe_mode, io_data, + MMSS_DP_SDP_CFG2 + sdp_cfg2_off); /* 28 data bytes for SPD Infoframe with GENERIC1 set */ spd_cfg2 |= BIT(17); - dp_write(catalog, io_data, MMSS_DP_SDP_CFG2 + sdp_cfg2_off, spd_cfg2); + dp_write(catalog->exe_mode, io_data, MMSS_DP_SDP_CFG2 + sdp_cfg2_off, + spd_cfg2); - dp_write(catalog, io_data, MMSS_DP_SDP_CFG3 + sdp_cfg3_off, 0x1); - dp_write(catalog, io_data, MMSS_DP_SDP_CFG3 + sdp_cfg3_off, 0x0); + dp_write(catalog->exe_mode, io_data, MMSS_DP_SDP_CFG3 + sdp_cfg3_off, + 0x1); + dp_write(catalog->exe_mode, io_data, MMSS_DP_SDP_CFG3 + sdp_cfg3_off, + 0x0); } static void dp_catalog_get_io_buf(struct dp_catalog_private *catalog) @@ -2084,6 +2121,7 @@ static void dp_catalog_get_io_buf(struct dp_catalog_private *catalog) dp_catalog_fill_io_buf(dp_mmss_cc); dp_catalog_fill_io_buf(hdcp_physical); dp_catalog_fill_io_buf(dp_p1); + dp_catalog_fill_io_buf(dp_tcsr); } static void dp_catalog_get_io(struct dp_catalog_private *catalog) @@ -2102,6 +2140,7 @@ static void dp_catalog_get_io(struct dp_catalog_private *catalog) dp_catalog_fill_io(dp_mmss_cc); dp_catalog_fill_io(hdcp_physical); dp_catalog_fill_io(dp_p1); + dp_catalog_fill_io(dp_tcsr); } static void dp_catalog_set_exe_mode(struct dp_catalog *dp_catalog, char *mode) @@ -2138,6 +2177,8 @@ static int dp_catalog_init(struct device *dev, struct dp_catalog *catalog, if (parser->hw_cfg.phy_version == DP_PHY_VERSION_4_2_0) rc = dp_catalog_get_v420(dev, catalog, &catalog_priv->io); + else if (parser->hw_cfg.phy_version == DP_PHY_VERSION_2_0_0) + rc = dp_catalog_get_v200(dev, catalog, &catalog_priv->io); return rc; } diff --git a/drivers/gpu/drm/msm/dp/dp_catalog.h b/drivers/gpu/drm/msm/dp/dp_catalog.h index ac6c18e3466a6e1a48ca84aad8503dee717c87d8..6b09595bdfb10e9f358e42750abf7bdf6ce4c51d 100644 --- a/drivers/gpu/drm/msm/dp/dp_catalog.h +++ b/drivers/gpu/drm/msm/dp/dp_catalog.h @@ -100,7 +100,8 @@ struct dp_catalog_ctrl { void (*state_ctrl)(struct dp_catalog_ctrl *ctrl, u32 state); void (*config_ctrl)(struct dp_catalog_ctrl *ctrl); - void (*lane_mapping)(struct dp_catalog_ctrl *ctrl); + void (*lane_mapping)(struct dp_catalog_ctrl *ctrl, bool flipped, + char *lane_map); void (*mainlink_ctrl)(struct dp_catalog_ctrl *ctrl, bool enable); void (*set_pattern)(struct dp_catalog_ctrl *ctrl, u32 pattern); void (*reset)(struct dp_catalog_ctrl *ctrl); @@ -293,10 +294,40 @@ static inline u8 dp_header_get_parity(u32 data) return parity_byte; } +static inline u32 dp_read(char *exe_mode, struct dp_io_data *io_data, + u32 offset) +{ + u32 data = 0; + + if (!strcmp(exe_mode, "hw") || !strcmp(exe_mode, "all")) { + data = readl_relaxed(io_data->io.base + offset); + } else if (!strcmp(exe_mode, "sw")) { + if (io_data->buf) + memcpy(&data, io_data->buf + offset, sizeof(offset)); + } + + return data; +} + +static inline void dp_write(char *exe_mode, struct dp_io_data *io_data, + u32 offset, u32 data) +{ + if (!strcmp(exe_mode, "hw") || !strcmp(exe_mode, "all")) + writel_relaxed(data, io_data->io.base + offset); + + if (!strcmp(exe_mode, "sw") || !strcmp(exe_mode, "all")) { + if (io_data->buf) + memcpy(io_data->buf + offset, &data, sizeof(data)); + } +} + struct dp_catalog *dp_catalog_get(struct device *dev, struct dp_parser *parser); void dp_catalog_put(struct dp_catalog *catalog); int dp_catalog_get_v420(struct device *dev, struct dp_catalog *catalog, void *io); +int dp_catalog_get_v200(struct device *dev, struct dp_catalog *catalog, + void *io); + #endif /* _DP_CATALOG_H_ */ diff --git a/drivers/gpu/drm/msm/dp/dp_catalog_v200.c b/drivers/gpu/drm/msm/dp/dp_catalog_v200.c new file mode 100644 index 0000000000000000000000000000000000000000..eb116df448ea0a9e106e59ee7f07c31f0b512794 --- /dev/null +++ b/drivers/gpu/drm/msm/dp/dp_catalog_v200.c @@ -0,0 +1,325 @@ +/* + * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#define pr_fmt(fmt) "[drm-dp] %s: " fmt, __func__ + +#include + +#include "dp_catalog.h" +#include "dp_reg.h" + +#define dp_catalog_get_priv_v200(x) ({ \ + struct dp_catalog *dp_catalog; \ + dp_catalog = container_of(x, struct dp_catalog, x); \ + dp_catalog->priv.data; \ +}) + +struct dp_catalog_io { + struct dp_io_data *dp_ahb; + struct dp_io_data *dp_aux; + struct dp_io_data *dp_link; + struct dp_io_data *dp_p0; + struct dp_io_data *dp_phy; + struct dp_io_data *dp_ln_tx0; + struct dp_io_data *dp_ln_tx1; + struct dp_io_data *dp_mmss_cc; + struct dp_io_data *dp_pll; + struct dp_io_data *usb3_dp_com; + struct dp_io_data *hdcp_physical; + struct dp_io_data *dp_p1; + struct dp_io_data *dp_tcsr; +}; + +struct dp_catalog_private_v200 { + struct device *dev; + struct dp_catalog_io *io; + + char exe_mode[SZ_4]; +}; + +static void dp_catalog_aux_clear_hw_interrupts_v200(struct dp_catalog_aux *aux) +{ + struct dp_catalog_private_v200 *catalog; + struct dp_io_data *io_data; + u32 data = 0; + + if (!aux) { + pr_err("invalid input\n"); + return; + } + + catalog = dp_catalog_get_priv_v200(aux); + io_data = catalog->io->dp_phy; + + data = dp_read(catalog->exe_mode, io_data, + DP_PHY_AUX_INTERRUPT_STATUS_V200); + + dp_write(catalog->exe_mode, io_data, DP_PHY_AUX_INTERRUPT_CLEAR_V200, + 0x1f); + wmb(); /* make sure 0x1f is written before next write */ + dp_write(catalog->exe_mode, io_data, DP_PHY_AUX_INTERRUPT_CLEAR_V200, + 0x9f); + wmb(); /* make sure 0x9f is written before next write */ + dp_write(catalog->exe_mode, io_data, DP_PHY_AUX_INTERRUPT_CLEAR_V200, + 0); + wmb(); /* make sure register is cleared */ +} + +static void dp_catalog_aux_setup_v200(struct dp_catalog_aux *aux, + struct dp_aux_cfg *cfg) +{ + struct dp_catalog_private_v200 *catalog; + struct dp_io_data *io_data; + int i = 0, sw_reset = 0; + + if (!aux || !cfg) { + pr_err("invalid input\n"); + return; + } + + catalog = dp_catalog_get_priv_v200(aux); + + io_data = catalog->io->dp_ahb; + sw_reset = dp_read(catalog->exe_mode, io_data, DP_SW_RESET); + + sw_reset |= BIT(0); + dp_write(catalog->exe_mode, io_data, DP_SW_RESET, sw_reset); + usleep_range(1000, 1010); /* h/w recommended delay */ + + sw_reset &= ~BIT(0); + dp_write(catalog->exe_mode, io_data, DP_SW_RESET, sw_reset); + + dp_write(catalog->exe_mode, io_data, DP_PHY_CTRL, 0x4); /* bit 2 */ + udelay(1000); + dp_write(catalog->exe_mode, io_data, DP_PHY_CTRL, 0x0); /* bit 2 */ + wmb(); /* make sure programming happened */ + + io_data = catalog->io->dp_tcsr; + dp_write(catalog->exe_mode, io_data, 0x4c, 0x1); /* bit 0 & 2 */ + wmb(); /* make sure programming happened */ + + io_data = catalog->io->dp_phy; + dp_write(catalog->exe_mode, io_data, DP_PHY_PD_CTL, 0x3c); + wmb(); /* make sure PD programming happened */ + dp_write(catalog->exe_mode, io_data, DP_PHY_PD_CTL, 0x3d); + wmb(); /* make sure PD programming happened */ + + /* DP AUX CFG register programming */ + io_data = catalog->io->dp_phy; + for (i = 0; i < PHY_AUX_CFG_MAX; i++) + dp_write(catalog->exe_mode, io_data, cfg[i].offset, + cfg[i].lut[cfg[i].current_index]); + + dp_write(catalog->exe_mode, io_data, DP_PHY_AUX_INTERRUPT_MASK_V200, + 0x1F); + wmb(); /* make sure AUX configuration is done before enabling it */ +} + +static void dp_catalog_panel_config_msa_v200(struct dp_catalog_panel *panel, + u32 rate, u32 stream_rate_khz, + bool fixed_nvid) +{ + u32 pixel_m, pixel_n; + u32 mvid, nvid; + u64 mvid_calc; + u32 const nvid_fixed = 0x8000; + u32 const link_rate_hbr2 = 540000; + u32 const link_rate_hbr3 = 810000; + struct dp_catalog_private_v200 *catalog; + struct dp_io_data *io_data; + u32 strm_reg_off = 0; + u32 mvid_reg_off = 0, nvid_reg_off = 0; + + if (!panel) { + pr_err("invalid input\n"); + return; + } + + if (panel->stream_id >= DP_STREAM_MAX) { + pr_err("invalid stream_id:%d\n", panel->stream_id); + return; + } + + catalog = dp_catalog_get_priv_v200(panel); + if (fixed_nvid) { + pr_debug("use fixed NVID=0x%x\n", nvid_fixed); + nvid = nvid_fixed; + + pr_debug("link rate=%dkbps, stream_rate_khz=%uKhz", + rate, stream_rate_khz); + + /* + * For intermediate results, use 64 bit arithmetic to avoid + * loss of precision. + */ + mvid_calc = (u64) stream_rate_khz * nvid; + mvid_calc = div_u64(mvid_calc, rate); + + /* + * truncate back to 32 bits as this final divided value will + * always be within the range of a 32 bit unsigned int. + */ + mvid = (u32) mvid_calc; + } else { + io_data = catalog->io->dp_mmss_cc; + + if (panel->stream_id == DP_STREAM_1) + strm_reg_off = MMSS_DP_PIXEL1_M_V200 - + MMSS_DP_PIXEL_M_V200; + + pixel_m = dp_read(catalog->exe_mode, io_data, + MMSS_DP_PIXEL_M_V200 + strm_reg_off); + pixel_n = dp_read(catalog->exe_mode, io_data, + MMSS_DP_PIXEL_N_V200 + strm_reg_off); + pr_debug("pixel_m=0x%x, pixel_n=0x%x\n", pixel_m, pixel_n); + + mvid = (pixel_m & 0xFFFF) * 5; + nvid = (0xFFFF & (~pixel_n)) + (pixel_m & 0xFFFF); + + pr_debug("rate = %d\n", rate); + + if (link_rate_hbr2 == rate) + nvid *= 2; + + if (link_rate_hbr3 == rate) + nvid *= 3; + } + + io_data = catalog->io->dp_link; + + if (panel->stream_id == DP_STREAM_1) { + mvid_reg_off = DP1_SOFTWARE_MVID - DP_SOFTWARE_MVID; + nvid_reg_off = DP1_SOFTWARE_NVID - DP_SOFTWARE_NVID; + } + + pr_debug("mvid=0x%x, nvid=0x%x\n", mvid, nvid); + dp_write(catalog->exe_mode, io_data, DP_SOFTWARE_MVID + mvid_reg_off, + mvid); + dp_write(catalog->exe_mode, io_data, DP_SOFTWARE_NVID + nvid_reg_off, + nvid); +} + +static void dp_catalog_ctrl_lane_mapping_v200(struct dp_catalog_ctrl *ctrl, + bool flipped, char *lane_map) +{ + struct dp_catalog_private_v200 *catalog; + struct dp_io_data *io_data; + u8 l_map[4] = { 0 }, i = 0, j = 0; + u32 lane_map_reg = 0; + + if (!ctrl) { + pr_err("invalid input\n"); + return; + } + + catalog = dp_catalog_get_priv_v200(ctrl); + io_data = catalog->io->dp_link; + + /* For flip case, swap phy lanes with ML0 and ML3, ML1 and ML2 */ + if (flipped) { + for (i = 0; i < DP_MAX_PHY_LN; i++) { + if (lane_map[i] == DP_ML0) { + for (j = 0; j < DP_MAX_PHY_LN; j++) { + if (lane_map[j] == DP_ML3) { + l_map[i] = DP_ML3; + l_map[j] = DP_ML0; + break; + } + } + } else if (lane_map[i] == DP_ML1) { + for (j = 0; j < DP_MAX_PHY_LN; j++) { + if (lane_map[j] == DP_ML2) { + l_map[i] = DP_ML2; + l_map[j] = DP_ML1; + break; + } + } + } + } + } else { + /* Normal orientation */ + for (i = 0; i < DP_MAX_PHY_LN; i++) + l_map[i] = lane_map[i]; + } + + lane_map_reg = ((l_map[3]&3)<<6)|((l_map[2]&3)<<4)|((l_map[1]&3)<<2) + |(l_map[0]&3); + + dp_write(catalog->exe_mode, io_data, DP_LOGICAL2PHYSICAL_LANE_MAPPING, + lane_map_reg); +} + +static void dp_catalog_ctrl_usb_reset_v200(struct dp_catalog_ctrl *ctrl, + bool flip) +{ +} + +static void dp_catalog_put_v200(struct dp_catalog *catalog) +{ + struct dp_catalog_private_v200 *catalog_priv; + + if (!catalog || !catalog->priv.data) + return; + + catalog_priv = catalog->priv.data; + devm_kfree(catalog_priv->dev, catalog_priv); +} + +static void dp_catalog_set_exe_mode_v200(struct dp_catalog *catalog, char *mode) +{ + struct dp_catalog_private_v200 *catalog_priv; + + if (!catalog || !catalog->priv.data) + return; + + catalog_priv = catalog->priv.data; + + strlcpy(catalog_priv->exe_mode, mode, sizeof(catalog_priv->exe_mode)); +} + +int dp_catalog_get_v200(struct device *dev, struct dp_catalog *catalog, + void *io) +{ + struct dp_catalog_private_v200 *catalog_priv; + + if (!dev || !catalog) { + pr_err("invalid input\n"); + return -EINVAL; + } + + catalog_priv = devm_kzalloc(dev, sizeof(*catalog_priv), GFP_KERNEL); + if (!catalog_priv) + return -ENOMEM; + + catalog_priv->dev = dev; + catalog_priv->io = io; + catalog->priv.data = catalog_priv; + + catalog->priv.put = dp_catalog_put_v200; + catalog->priv.set_exe_mode = dp_catalog_set_exe_mode_v200; + + catalog->aux.clear_hw_interrupts = + dp_catalog_aux_clear_hw_interrupts_v200; + catalog->aux.setup = dp_catalog_aux_setup_v200; + + catalog->panel.config_msa = dp_catalog_panel_config_msa_v200; + + catalog->ctrl.lane_mapping = dp_catalog_ctrl_lane_mapping_v200; + catalog->ctrl.usb_reset = dp_catalog_ctrl_usb_reset_v200; + + /* Set the default execution mode to hardware mode */ + dp_catalog_set_exe_mode_v200(catalog, "hw"); + + return 0; +} diff --git a/drivers/gpu/drm/msm/dp/dp_catalog_v420.c b/drivers/gpu/drm/msm/dp/dp_catalog_v420.c index 54b5f14d965f6a3100a24ff2845e259e152a28cb..5337835a3a2067fb46ba28cf2f2fa7f95485a3af 100644 --- a/drivers/gpu/drm/msm/dp/dp_catalog_v420.c +++ b/drivers/gpu/drm/msm/dp/dp_catalog_v420.c @@ -63,36 +63,6 @@ struct dp_catalog_private_v420 { char exe_mode[SZ_4]; }; -static u32 dp_read(struct dp_catalog_private_v420 *catalog, - struct dp_io_data *io_data, u32 offset) -{ - u32 data = 0; - - if (!strcmp(catalog->exe_mode, "hw") || - !strcmp(catalog->exe_mode, "all")) { - data = readl_relaxed(io_data->io.base + offset); - } else if (!strcmp(catalog->exe_mode, "sw")) { - if (io_data->buf) - memcpy(&data, io_data->buf + offset, sizeof(offset)); - } - - return data; -} - -static void dp_write(struct dp_catalog_private_v420 *catalog, - struct dp_io_data *io_data, u32 offset, u32 data) -{ - if (!strcmp(catalog->exe_mode, "hw") || - !strcmp(catalog->exe_mode, "all")) - writel_relaxed(data, io_data->io.base + offset); - - if (!strcmp(catalog->exe_mode, "sw") || - !strcmp(catalog->exe_mode, "all")) { - if (io_data->buf) - memcpy(io_data->buf + offset, &data, sizeof(data)); - } -} - static void dp_catalog_aux_setup_v420(struct dp_catalog_aux *aux, struct dp_aux_cfg *cfg) { @@ -108,12 +78,13 @@ static void dp_catalog_aux_setup_v420(struct dp_catalog_aux *aux, catalog = dp_catalog_get_priv_v420(aux); io_data = catalog->io->dp_phy; - dp_write(catalog, io_data, DP_PHY_PD_CTL, 0x67); + dp_write(catalog->exe_mode, io_data, DP_PHY_PD_CTL, 0x67); wmb(); /* make sure PD programming happened */ /* Turn on BIAS current for PHY/PLL */ io_data = catalog->io->dp_pll; - dp_write(catalog, io_data, QSERDES_COM_BIAS_EN_CLKBUFLR_EN, 0x17); + dp_write(catalog->exe_mode, io_data, QSERDES_COM_BIAS_EN_CLKBUFLR_EN, + 0x17); wmb(); /* make sure BIAS programming happened */ io_data = catalog->io->dp_phy; @@ -122,12 +93,13 @@ static void dp_catalog_aux_setup_v420(struct dp_catalog_aux *aux, pr_debug("%s: offset=0x%08x, value=0x%08x\n", dp_phy_aux_config_type_to_string(i), cfg[i].offset, cfg[i].lut[cfg[i].current_index]); - dp_write(catalog, io_data, cfg[i].offset, + dp_write(catalog->exe_mode, io_data, cfg[i].offset, cfg[i].lut[cfg[i].current_index]); } wmb(); /* make sure DP AUX CFG programming happened */ - dp_write(catalog, io_data, DP_PHY_AUX_INTERRUPT_MASK_V420, 0x1F); + dp_write(catalog->exe_mode, io_data, DP_PHY_AUX_INTERRUPT_MASK_V420, + 0x1F); } static void dp_catalog_panel_config_msa_v420(struct dp_catalog_panel *panel, @@ -179,9 +151,9 @@ static void dp_catalog_panel_config_msa_v420(struct dp_catalog_panel *panel, if (panel->stream_id == DP_STREAM_1) reg_off = MMSS_DP_PIXEL1_M_V420 - MMSS_DP_PIXEL_M_V420; - pixel_m = dp_read(catalog, io_data, + pixel_m = dp_read(catalog->exe_mode, io_data, MMSS_DP_PIXEL_M_V420 + reg_off); - pixel_n = dp_read(catalog, io_data, + pixel_n = dp_read(catalog->exe_mode, io_data, MMSS_DP_PIXEL_N_V420 + reg_off); pr_debug("pixel_m=0x%x, pixel_n=0x%x\n", pixel_m, pixel_n); @@ -205,8 +177,8 @@ static void dp_catalog_panel_config_msa_v420(struct dp_catalog_panel *panel, } pr_debug("mvid=0x%x, nvid=0x%x\n", mvid, nvid); - dp_write(catalog, io_data, DP_SOFTWARE_MVID + mvid_off, mvid); - dp_write(catalog, io_data, DP_SOFTWARE_NVID + nvid_off, nvid); + dp_write(catalog->exe_mode, io_data, DP_SOFTWARE_MVID + mvid_off, mvid); + dp_write(catalog->exe_mode, io_data, DP_SOFTWARE_NVID + nvid_off, nvid); } static void dp_catalog_ctrl_phy_lane_cfg_v420(struct dp_catalog_ctrl *ctrl, @@ -229,7 +201,7 @@ static void dp_catalog_ctrl_phy_lane_cfg_v420(struct dp_catalog_ctrl *ctrl, info |= ((orientation & 0x0F) << 4); pr_debug("Shared Info = 0x%x\n", info); - dp_write(catalog, io_data, DP_PHY_SPARE0_V420, info); + dp_write(catalog->exe_mode, io_data, DP_PHY_SPARE0_V420, info); } static void dp_catalog_ctrl_update_vx_px_v420(struct dp_catalog_ctrl *ctrl, @@ -254,12 +226,12 @@ static void dp_catalog_ctrl_update_vx_px_v420(struct dp_catalog_ctrl *ctrl, /* program default setting first */ io_data = catalog->io->dp_ln_tx0; - dp_write(catalog, io_data, TXn_TX_DRV_LVL_V420, 0x2A); - dp_write(catalog, io_data, TXn_TX_EMP_POST1_LVL, 0x20); + dp_write(catalog->exe_mode, io_data, TXn_TX_DRV_LVL_V420, 0x2A); + dp_write(catalog->exe_mode, io_data, TXn_TX_EMP_POST1_LVL, 0x20); io_data = catalog->io->dp_ln_tx1; - dp_write(catalog, io_data, TXn_TX_DRV_LVL_V420, 0x2A); - dp_write(catalog, io_data, TXn_TX_EMP_POST1_LVL, 0x20); + dp_write(catalog->exe_mode, io_data, TXn_TX_DRV_LVL_V420, 0x2A); + dp_write(catalog->exe_mode, io_data, TXn_TX_EMP_POST1_LVL, 0x20); /* Enable MUX to use Cursor values from these registers */ value0 |= BIT(5); @@ -268,12 +240,16 @@ static void dp_catalog_ctrl_update_vx_px_v420(struct dp_catalog_ctrl *ctrl, /* Configure host and panel only if both values are allowed */ if (value0 != 0xFF && value1 != 0xFF) { io_data = catalog->io->dp_ln_tx0; - dp_write(catalog, io_data, TXn_TX_DRV_LVL_V420, value0); - dp_write(catalog, io_data, TXn_TX_EMP_POST1_LVL, value1); + dp_write(catalog->exe_mode, io_data, TXn_TX_DRV_LVL_V420, + value0); + dp_write(catalog->exe_mode, io_data, TXn_TX_EMP_POST1_LVL, + value1); io_data = catalog->io->dp_ln_tx1; - dp_write(catalog, io_data, TXn_TX_DRV_LVL_V420, value0); - dp_write(catalog, io_data, TXn_TX_EMP_POST1_LVL, value1); + dp_write(catalog->exe_mode, io_data, TXn_TX_DRV_LVL_V420, + value0); + dp_write(catalog->exe_mode, io_data, TXn_TX_EMP_POST1_LVL, + value1); pr_debug("hw: vx_value=0x%x px_value=0x%x\n", value0, value1); diff --git a/drivers/gpu/drm/msm/dp/dp_ctrl.c b/drivers/gpu/drm/msm/dp/dp_ctrl.c index f20acc3af5e9a56b1160f402f67490ebfa716009..6128222f289ff95cd86fd82a88a8af0de338737b 100644 --- a/drivers/gpu/drm/msm/dp/dp_ctrl.c +++ b/drivers/gpu/drm/msm/dp/dp_ctrl.c @@ -122,19 +122,12 @@ static void dp_ctrl_state_ctrl(struct dp_ctrl_private *ctrl, u32 state) ctrl->catalog->state_ctrl(ctrl->catalog, state); } -static void dp_ctrl_push_idle(struct dp_ctrl *dp_ctrl, enum dp_stream_id strm) +static void dp_ctrl_push_idle(struct dp_ctrl_private *ctrl, + enum dp_stream_id strm) { - int const idle_pattern_completion_timeout_ms = 3 * HZ / 100; - struct dp_ctrl_private *ctrl; + int const idle_pattern_completion_timeout_ms = HZ / 10; u32 state = 0x0; - if (!dp_ctrl) { - pr_err("Invalid input data\n"); - return; - } - - ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl); - if (!ctrl->power_on) { pr_err("CTRL off, return\n"); return; @@ -158,7 +151,7 @@ static void dp_ctrl_push_idle(struct dp_ctrl *dp_ctrl, enum dp_stream_id strm) if (!wait_for_completion_timeout(&ctrl->idle_comp, idle_pattern_completion_timeout_ms)) - pr_warn("PUSH_IDLE time out\n"); + pr_warn("time out\n"); pr_debug("mainlink off done\n"); } @@ -175,7 +168,8 @@ static void dp_ctrl_configure_source_link_params(struct dp_ctrl_private *ctrl, bool enable) { if (enable) { - ctrl->catalog->lane_mapping(ctrl->catalog); + ctrl->catalog->lane_mapping(ctrl->catalog, ctrl->orientation, + ctrl->parser->l_map); ctrl->catalog->mst_config(ctrl->catalog, ctrl->mst_mode); ctrl->catalog->config_ctrl(ctrl->catalog); ctrl->catalog->mainlink_ctrl(ctrl->catalog, true); @@ -301,7 +295,12 @@ static int dp_ctrl_link_train_1(struct dp_ctrl_private *ctrl) tries = 0; old_v_level = ctrl->link->phy_params.v_level; - while (!atomic_read(&ctrl->aborted)) { + while (1) { + if (atomic_read(&ctrl->aborted)) { + ret = -EINVAL; + break; + } + drm_dp_link_train_clock_recovery_delay(ctrl->panel->dpcd); ret = dp_ctrl_read_link_status(ctrl, link_status); @@ -417,6 +416,11 @@ static int dp_ctrl_link_training_2(struct dp_ctrl_private *ctrl) } do { + if (atomic_read(&ctrl->aborted)) { + ret = -EINVAL; + break; + } + drm_dp_link_train_channel_eq_delay(ctrl->panel->dpcd); ret = dp_ctrl_read_link_status(ctrl, link_status); @@ -439,7 +443,7 @@ static int dp_ctrl_link_training_2(struct dp_ctrl_private *ctrl) ret = -EINVAL; break; } - } while (!atomic_read(&ctrl->aborted)); + } while (1); end: ctrl->aux->state &= ~DP_STATE_TRAIN_2_STARTED; @@ -467,8 +471,10 @@ static int dp_ctrl_link_train(struct dp_ctrl_private *ctrl) link_info.capabilities = ctrl->panel->link_info.capabilities; ret = drm_dp_link_configure(ctrl->aux->drm_aux, &link_info); - if (ret) + if (ret) { + pr_err_ratelimited("link_configure failed, rc=%d\n", ret); goto end; + } ret = drm_dp_dpcd_write(ctrl->aux->drm_aux, DP_MAIN_LINK_CHANNEL_CODING_SET, &encoding, 1); @@ -504,7 +510,7 @@ static int dp_ctrl_link_train(struct dp_ctrl_private *ctrl) return ret; } -static int dp_ctrl_setup_main_link(struct dp_ctrl_private *ctrl, bool train) +static int dp_ctrl_setup_main_link(struct dp_ctrl_private *ctrl) { int ret = 0; @@ -513,9 +519,6 @@ static int dp_ctrl_setup_main_link(struct dp_ctrl_private *ctrl, bool train) if (ctrl->link->sink_request & DP_TEST_LINK_PHY_TEST_PATTERN) goto end; - if (!train) - goto end; - /* * As part of previous calls, DP controller state might have * transitioned to PUSH_IDLE. In order to start transmitting a link @@ -548,14 +551,17 @@ static void dp_ctrl_set_clock_rate(struct dp_ctrl_private *ctrl, pr_err("%s clock could not be set with rate %d\n", name, rate); } -static int dp_ctrl_enable_mainlink_clocks(struct dp_ctrl_private *ctrl) +static int dp_ctrl_enable_link_clock(struct dp_ctrl_private *ctrl) { int ret = 0; + u32 rate = drm_dp_bw_code_to_link_rate(ctrl->link->link_params.bw_code); + enum dp_pm_type type = DP_LINK_PM; + + pr_debug("rate=%d\n", rate); - dp_ctrl_set_clock_rate(ctrl, "ctrl_link_clk", DP_CTRL_PM, - drm_dp_bw_code_to_link_rate(ctrl->link->link_params.bw_code)); + dp_ctrl_set_clock_rate(ctrl, "link_clk", type, rate); - ret = ctrl->power->clk_enable(ctrl->power, DP_CTRL_PM, true); + ret = ctrl->power->clk_enable(ctrl->power, type, true); if (ret) { pr_err("Unabled to start link clocks\n"); ret = -EINVAL; @@ -564,9 +570,46 @@ static int dp_ctrl_enable_mainlink_clocks(struct dp_ctrl_private *ctrl) return ret; } -static int dp_ctrl_disable_mainlink_clocks(struct dp_ctrl_private *ctrl) +static void dp_ctrl_disable_link_clock(struct dp_ctrl_private *ctrl) +{ + ctrl->power->clk_enable(ctrl->power, DP_LINK_PM, false); +} + +static int dp_ctrl_link_setup(struct dp_ctrl_private *ctrl) { - return ctrl->power->clk_enable(ctrl->power, DP_CTRL_PM, false); + int rc = -EINVAL; + u32 link_train_max_retries = 100; + struct dp_catalog_ctrl *catalog; + struct dp_link_params *link_params; + + catalog = ctrl->catalog; + link_params = &ctrl->link->link_params; + + catalog->hpd_config(catalog, true); + catalog->phy_lane_cfg(catalog, ctrl->orientation, + link_params->lane_count); + + while (--link_train_max_retries || !atomic_read(&ctrl->aborted)) { + pr_debug("bw_code=%d, lane_count=%d\n", + link_params->bw_code, link_params->lane_count); + + dp_ctrl_enable_link_clock(ctrl); + dp_ctrl_configure_source_link_params(ctrl, true); + + rc = dp_ctrl_setup_main_link(ctrl); + if (!rc) + break; + + dp_ctrl_link_rate_down_shift(ctrl); + + dp_ctrl_configure_source_link_params(ctrl, false); + dp_ctrl_disable_link_clock(ctrl); + + /* hw recommended delays before retrying link training */ + msleep(20); + } + + return rc; } static int dp_ctrl_enable_stream_clocks(struct dp_ctrl_private *ctrl, @@ -645,6 +688,7 @@ static int dp_ctrl_host_init(struct dp_ctrl *dp_ctrl, bool flip, bool reset) catalog->phy_reset(ctrl->catalog); } catalog->enable_irq(ctrl->catalog, true); + atomic_set(&ctrl->aborted, 0); return 0; } @@ -686,51 +730,24 @@ static int dp_ctrl_link_maintenance(struct dp_ctrl *dp_ctrl) if (!ctrl->power_on || atomic_read(&ctrl->aborted)) { pr_err("CTRL off, return\n"); - return -EINVAL; + ret = -EINVAL; + goto end; } ctrl->aux->state &= ~DP_STATE_LINK_MAINTENANCE_COMPLETED; ctrl->aux->state &= ~DP_STATE_LINK_MAINTENANCE_FAILED; ctrl->aux->state |= DP_STATE_LINK_MAINTENANCE_STARTED; - ctrl->dp_ctrl.reset(&ctrl->dp_ctrl); - - do { - if (ret == -EAGAIN) { - /* try with lower link rate */ - dp_ctrl_link_rate_down_shift(ctrl); - - dp_ctrl_configure_source_link_params(ctrl, false); - } - - ctrl->catalog->phy_lane_cfg(ctrl->catalog, - ctrl->orientation, ctrl->link->link_params.lane_count); - - /* - * Disable and re-enable the mainlink clock since the - * link clock might have been adjusted as part of the - * link maintenance. - */ - dp_ctrl_disable_mainlink_clocks(ctrl); - - ret = dp_ctrl_enable_mainlink_clocks(ctrl); - if (ret) - continue; - - dp_ctrl_configure_source_link_params(ctrl, true); - - reinit_completion(&ctrl->idle_comp); - - ret = dp_ctrl_setup_main_link(ctrl, true); - } while (ret == -EAGAIN); + ctrl->catalog->reset(ctrl->catalog); + dp_ctrl_disable_link_clock(ctrl); + ret = dp_ctrl_link_setup(ctrl); ctrl->aux->state &= ~DP_STATE_LINK_MAINTENANCE_STARTED; - if (ret) ctrl->aux->state |= DP_STATE_LINK_MAINTENANCE_FAILED; else ctrl->aux->state |= DP_STATE_LINK_MAINTENANCE_COMPLETED; - +end: return ret; } @@ -753,13 +770,13 @@ static void dp_ctrl_process_phy_test_request(struct dp_ctrl *dp_ctrl) pr_debug("start\n"); - ctrl->dp_ctrl.push_idle(&ctrl->dp_ctrl, DP_STREAM_0); /* * The global reset will need DP link ralated clocks to be * running. Add the global reset just before disabling the * link clocks and core clocks. */ - ctrl->dp_ctrl.reset(&ctrl->dp_ctrl); + ctrl->catalog->reset(ctrl->catalog); + ctrl->dp_ctrl.stream_pre_off(&ctrl->dp_ctrl, ctrl->panel); ctrl->dp_ctrl.stream_off(&ctrl->dp_ctrl, ctrl->panel); ctrl->dp_ctrl.off(&ctrl->dp_ctrl); @@ -826,19 +843,6 @@ static void dp_ctrl_send_phy_test_pattern(struct dp_ctrl_private *ctrl) dp_link_get_phy_test_pattern(pattern_requested)); } -static void dp_ctrl_reset(struct dp_ctrl *dp_ctrl) -{ - struct dp_ctrl_private *ctrl; - - if (!dp_ctrl) { - pr_err("invalid params\n"); - return; - } - - ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl); - ctrl->catalog->reset(ctrl->catalog); -} - static void dp_ctrl_send_video(struct dp_ctrl_private *ctrl) { ctrl->catalog->state_ctrl(ctrl->catalog, ST_SEND_VIDEO); @@ -1052,12 +1056,16 @@ static void dp_ctrl_mst_stream_pre_off(struct dp_ctrl *dp_ctrl, static void dp_ctrl_stream_pre_off(struct dp_ctrl *dp_ctrl, struct dp_panel *panel) { + struct dp_ctrl_private *ctrl; + if (!dp_ctrl || !panel) { pr_err("invalid input\n"); return; } - dp_ctrl_push_idle(dp_ctrl, panel->stream_id); + ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl); + + dp_ctrl_push_idle(ctrl, panel->stream_id); dp_ctrl_mst_stream_pre_off(dp_ctrl, panel); } @@ -1081,7 +1089,6 @@ static int dp_ctrl_on(struct dp_ctrl *dp_ctrl, bool mst_mode) int rc = 0; struct dp_ctrl_private *ctrl; u32 rate = 0; - u32 link_train_max_retries = 100; if (!dp_ctrl) { rc = -EINVAL; @@ -1090,13 +1097,12 @@ static int dp_ctrl_on(struct dp_ctrl *dp_ctrl, bool mst_mode) ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl); - atomic_set(&ctrl->aborted, 0); + if (ctrl->power_on) + goto end; ctrl->mst_mode = mst_mode; rate = ctrl->panel->link_info.rate; - ctrl->catalog->hpd_config(ctrl->catalog, true); - if (ctrl->link->sink_request & DP_TEST_LINK_PHY_TEST_PATTERN) { pr_debug("using phy test link parameters\n"); } else { @@ -1110,38 +1116,11 @@ static int dp_ctrl_on(struct dp_ctrl *dp_ctrl, bool mst_mode) ctrl->link->link_params.bw_code, ctrl->link->link_params.lane_count); - ctrl->catalog->phy_lane_cfg(ctrl->catalog, - ctrl->orientation, ctrl->link->link_params.lane_count); - - rc = dp_ctrl_enable_mainlink_clocks(ctrl); + rc = dp_ctrl_link_setup(ctrl); if (rc) goto end; - reinit_completion(&ctrl->idle_comp); - - dp_ctrl_configure_source_link_params(ctrl, true); - - while (--link_train_max_retries && !atomic_read(&ctrl->aborted)) { - rc = dp_ctrl_setup_main_link(ctrl, true); - if (!rc) - break; - - /* try with lower link rate */ - dp_ctrl_link_rate_down_shift(ctrl); - - dp_ctrl_configure_source_link_params(ctrl, false); - - dp_ctrl_disable_mainlink_clocks(ctrl); - /* hw recommended delay before re-enabling clocks */ - msleep(20); - - dp_ctrl_enable_mainlink_clocks(ctrl); - } - ctrl->power_on = true; - - pr_debug("End-\n"); - end: return rc; } @@ -1155,13 +1134,16 @@ static void dp_ctrl_off(struct dp_ctrl *dp_ctrl) ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl); + if (!ctrl->power_on) + return; + dp_ctrl_configure_source_link_params(ctrl, false); ctrl->catalog->reset(ctrl->catalog); /* Make sure DP is disabled before clk disable */ wmb(); - dp_ctrl_disable_mainlink_clocks(ctrl); + dp_ctrl_disable_link_clock(ctrl); ctrl->mst_mode = false; ctrl->power_on = false; @@ -1249,10 +1231,8 @@ struct dp_ctrl *dp_ctrl_get(struct dp_ctrl_in *in) dp_ctrl->deinit = dp_ctrl_host_deinit; dp_ctrl->on = dp_ctrl_on; dp_ctrl->off = dp_ctrl_off; - dp_ctrl->push_idle = dp_ctrl_push_idle; dp_ctrl->abort = dp_ctrl_abort; dp_ctrl->isr = dp_ctrl_isr; - dp_ctrl->reset = dp_ctrl_reset; dp_ctrl->link_maintenance = dp_ctrl_link_maintenance; dp_ctrl->process_phy_test_request = dp_ctrl_process_phy_test_request; dp_ctrl->stream_on = dp_ctrl_stream_on; diff --git a/drivers/gpu/drm/msm/dp/dp_ctrl.h b/drivers/gpu/drm/msm/dp/dp_ctrl.h index 886be1ef12bd517e4662febe9f4a06c79c4daf8f..b1ae96300dade83730d6e42e301dcf705be4a396 100644 --- a/drivers/gpu/drm/msm/dp/dp_ctrl.h +++ b/drivers/gpu/drm/msm/dp/dp_ctrl.h @@ -27,8 +27,6 @@ struct dp_ctrl { void (*deinit)(struct dp_ctrl *dp_ctrl); int (*on)(struct dp_ctrl *dp_ctrl, bool mst_mode); void (*off)(struct dp_ctrl *dp_ctrl); - void (*reset)(struct dp_ctrl *dp_ctrl); - void (*push_idle)(struct dp_ctrl *dp_ctrl, enum dp_stream_id strm); void (*abort)(struct dp_ctrl *dp_ctrl); void (*isr)(struct dp_ctrl *dp_ctrl); bool (*handle_sink_request)(struct dp_ctrl *dp_ctrl); diff --git a/drivers/gpu/drm/msm/dp/dp_debug.c b/drivers/gpu/drm/msm/dp/dp_debug.c index 39a5d86a2531c2666286a2b7a505994a59cf3c31..73d2f75527ef11b9332dd0d7f8ff09529b523ec1 100644 --- a/drivers/gpu/drm/msm/dp/dp_debug.c +++ b/drivers/gpu/drm/msm/dp/dp_debug.c @@ -35,8 +35,6 @@ struct dp_debug_private { u8 *dpcd; u32 dpcd_size; - int vdo; - char exe_mode[SZ_32]; char reg_dump[SZ_32]; @@ -47,7 +45,6 @@ struct dp_debug_private { struct dp_catalog *catalog; struct drm_connector **connector; struct device *dev; - struct work_struct sim_work; struct dp_debug dp_debug; struct dp_parser *parser; }; @@ -120,8 +117,24 @@ static ssize_t dp_debug_write_edid(struct file *file, goto bail; if (edid_size != debug->edid_size) { - pr_debug("clearing debug edid\n"); - goto bail; + pr_debug("realloc debug edid\n"); + + if (debug->edid) { + devm_kfree(debug->dev, debug->edid); + + debug->edid = devm_kzalloc(debug->dev, + edid_size, GFP_KERNEL); + if (!debug->edid) { + rc = -ENOMEM; + goto bail; + } + + debug->edid_size = edid_size; + + debug->aux->set_sim_mode(debug->aux, + debug->dp_debug.sim_mode, + debug->edid, debug->dpcd); + } } while (edid_size--) { @@ -1068,9 +1081,7 @@ static ssize_t dp_debug_write_attention(struct file *file, if (kstrtoint(buf, 10, &vdo) != 0) goto end; - debug->vdo = vdo; - - schedule_work(&debug->sim_work); + debug->hpd->simulate_attention(debug->hpd, vdo); end: return len; } @@ -1371,6 +1382,7 @@ static int dp_debug_init(struct dp_debug *dp_debug) rc = PTR_ERR(file); pr_err("[%s] debugfs max_bw_code failed, rc=%d\n", DEBUG_NAME, rc); + goto error_remove_dir; } file = debugfs_create_file("mst_sideband_mode", 0644, dir, @@ -1379,6 +1391,7 @@ static int dp_debug_init(struct dp_debug *dp_debug) rc = PTR_ERR(file); pr_err("[%s] debugfs max_bw_code failed, rc=%d\n", DEBUG_NAME, rc); + goto error_remove_dir; } file = debugfs_create_file("max_pclk_khz", 0644, dir, @@ -1387,6 +1400,16 @@ static int dp_debug_init(struct dp_debug *dp_debug) rc = PTR_ERR(file); pr_err("[%s] debugfs max_pclk_khz failed, rc=%d\n", DEBUG_NAME, rc); + goto error_remove_dir; + } + + file = debugfs_create_bool("force_encryption", 0644, dir, + &debug->dp_debug.force_encryption); + if (IS_ERR_OR_NULL(file)) { + rc = PTR_ERR(file); + pr_err("[%s] debugfs force_encryption failed, rc=%d\n", + DEBUG_NAME, rc); + goto error_remove_dir; } return 0; @@ -1399,14 +1422,6 @@ static int dp_debug_init(struct dp_debug *dp_debug) return rc; } -static void dp_debug_sim_work(struct work_struct *work) -{ - struct dp_debug_private *debug = - container_of(work, typeof(*debug), sim_work); - - debug->hpd->simulate_attention(debug->hpd, debug->vdo); -} - u8 *dp_debug_get_edid(struct dp_debug *dp_debug) { struct dp_debug_private *debug; @@ -1419,39 +1434,33 @@ u8 *dp_debug_get_edid(struct dp_debug *dp_debug) return debug->edid; } -struct dp_debug *dp_debug_get(struct device *dev, struct dp_panel *panel, - struct dp_hpd *hpd, struct dp_link *link, - struct dp_aux *aux, struct drm_connector **connector, - struct dp_catalog *catalog, - struct dp_parser *parser) +struct dp_debug *dp_debug_get(struct dp_debug_in *in) { int rc = 0; struct dp_debug_private *debug; struct dp_debug *dp_debug; - if (!dev || !panel || !hpd || !link || !catalog) { + if (!in->dev || !in->panel || !in->hpd || !in->link || !in->catalog) { pr_err("invalid input\n"); rc = -EINVAL; goto error; } - debug = devm_kzalloc(dev, sizeof(*debug), GFP_KERNEL); + debug = devm_kzalloc(in->dev, sizeof(*debug), GFP_KERNEL); if (!debug) { rc = -ENOMEM; goto error; } - INIT_WORK(&debug->sim_work, dp_debug_sim_work); - debug->dp_debug.debug_en = false; - debug->hpd = hpd; - debug->link = link; - debug->panel = panel; - debug->aux = aux; - debug->dev = dev; - debug->connector = connector; - debug->catalog = catalog; - debug->parser = parser; + debug->hpd = in->hpd; + debug->link = in->link; + debug->panel = in->panel; + debug->aux = in->aux; + debug->dev = in->dev; + debug->connector = in->connector; + debug->catalog = in->catalog; + debug->parser = in->parser; dp_debug = &debug->dp_debug; dp_debug->vdisplay = 0; @@ -1460,7 +1469,7 @@ struct dp_debug *dp_debug_get(struct device *dev, struct dp_panel *panel, rc = dp_debug_init(dp_debug); if (rc) { - devm_kfree(dev, debug); + devm_kfree(in->dev, debug); goto error; } diff --git a/drivers/gpu/drm/msm/dp/dp_debug.h b/drivers/gpu/drm/msm/dp/dp_debug.h index c304c27fbe5f758179726c14d4e9705dfad89f1c..2ca6824e4653d66561a5a7d788bb2cef3fe88be8 100644 --- a/drivers/gpu/drm/msm/dp/dp_debug.h +++ b/drivers/gpu/drm/msm/dp/dp_debug.h @@ -28,6 +28,7 @@ * @vrefresh: used to filter out vrefresh value * @tpg_state: specifies whether tpg feature is enabled * @max_pclk_khz: max pclk supported + * @force_encryption: enable/disable forced encryption for HDCP 2.2 */ struct dp_debug { bool debug_en; @@ -39,30 +40,44 @@ struct dp_debug { int vrefresh; bool tpg_state; u32 max_pclk_khz; + bool force_encryption; u8 *(*get_edid)(struct dp_debug *dp_debug); }; /** - * dp_debug_get() - configure and get the DisplayPlot debug module data - * + * struct dp_debug_in * @dev: device instance of the caller * @panel: instance of panel module * @hpd: instance of hpd module * @link: instance of link module + * @aux: instance of aux module * @connector: double pointer to display connector * @catalog: instance of catalog module * @parser: instance of parser module + */ +struct dp_debug_in { + struct device *dev; + struct dp_panel *panel; + struct dp_hpd *hpd; + struct dp_link *link; + struct dp_aux *aux; + struct drm_connector **connector; + struct dp_catalog *catalog; + struct dp_parser *parser; +}; + +/** + * dp_debug_get() - configure and get the DisplayPlot debug module data + * + * @in: input structure containing data to initialize the debug module * return: pointer to allocated debug module data * * This function sets up the debug module and provides a way * for debugfs input to be communicated with existing modules */ -struct dp_debug *dp_debug_get(struct device *dev, struct dp_panel *panel, - struct dp_hpd *hpd, struct dp_link *link, - struct dp_aux *aux, struct drm_connector **connector, - struct dp_catalog *catalog, - struct dp_parser *parser); +struct dp_debug *dp_debug_get(struct dp_debug_in *in); + /** * dp_debug_put() * diff --git a/drivers/gpu/drm/msm/dp/dp_display.c b/drivers/gpu/drm/msm/dp/dp_display.c index dbafaa28aeb973fc50754b22c56e942e4aee76e1..d025a461dcb354d306d27432d9358a6fae46af33 100644 --- a/drivers/gpu/drm/msm/dp/dp_display.c +++ b/drivers/gpu/drm/msm/dp/dp_display.c @@ -67,6 +67,7 @@ struct dp_display_private { /* state variables */ bool core_initialized; bool power_on; + bool is_connected; atomic_t aborted; @@ -74,7 +75,6 @@ struct dp_display_private { struct device_node *aux_switch_node; struct dentry *root; struct completion notification_comp; - struct completion disconnect_comp; struct dp_hpd *hpd; struct dp_parser *parser; @@ -144,12 +144,31 @@ static irqreturn_t dp_display_irq(int irq, void *dev_id) return IRQ_HANDLED; } +static bool dp_display_is_ds_bridge(struct dp_panel *panel) +{ + return (panel->dpcd[DP_DOWNSTREAMPORT_PRESENT] & + DP_DWN_STRM_PORT_PRESENT); +} + +static bool dp_display_is_sink_count_zero(struct dp_display_private *dp) +{ + return dp_display_is_ds_bridge(dp->panel) && + (dp->link->sink_count.count == 0); +} + +static bool dp_display_is_ready(struct dp_display_private *dp) +{ + return dp->hpd->hpd_high && dp->is_connected && + !dp_display_is_sink_count_zero(dp) && + dp->hpd->alt_mode_cfg_done; +} static void dp_display_hdcp_cb_work(struct work_struct *work) { struct dp_display_private *dp; struct delayed_work *dw = to_delayed_work(work); struct sde_hdcp_ops *ops; + void *data; int rc = 0; u32 hdcp_auth_state; @@ -162,20 +181,24 @@ static void dp_display_hdcp_cb_work(struct work_struct *work) } ops = dp->hdcp.ops; + data = dp->hdcp.data; pr_debug("%s: %s\n", sde_hdcp_version(dp->link->hdcp_status.hdcp_version), sde_hdcp_state_name(dp->link->hdcp_status.hdcp_state)); + if (dp->debug->force_encryption && ops && ops->force_encryption) + ops->force_encryption(data, dp->debug->force_encryption); + switch (dp->link->hdcp_status.hdcp_state) { case HDCP_STATE_AUTHENTICATING: if (dp->hdcp.ops && dp->hdcp.ops->authenticate) - rc = dp->hdcp.ops->authenticate(dp->hdcp.data); + rc = dp->hdcp.ops->authenticate(data); break; case HDCP_STATE_AUTH_FAIL: - if (dp->power_on) { + if (dp_display_is_ready(dp) && dp->power_on) { if (ops && ops->reauthenticate) { - rc = ops->reauthenticate(dp->hdcp.data); + rc = ops->reauthenticate(data); if (rc) pr_err("failed rc=%d\n", rc); } @@ -200,7 +223,7 @@ static void dp_display_notify_hdcp_status_cb(void *ptr, dp->link->hdcp_status.hdcp_state = state; - if (dp->dp_display.is_connected) + if (dp->is_connected) queue_delayed_work(dp->wq, &dp->hdcp_cb_work, HZ/4); } @@ -390,18 +413,6 @@ static const struct component_ops dp_display_comp_ops = { .unbind = dp_display_unbind, }; -static bool dp_display_is_ds_bridge(struct dp_panel *panel) -{ - return (panel->dpcd[DP_DOWNSTREAMPORT_PRESENT] & - DP_DWN_STRM_PORT_PRESENT); -} - -static bool dp_display_is_sink_count_zero(struct dp_display_private *dp) -{ - return dp_display_is_ds_bridge(dp->panel) && - (dp->link->sink_count.count == 0); -} - static void dp_display_send_hpd_event(struct dp_display_private *dp) { struct drm_device *dev = NULL; @@ -475,23 +486,27 @@ static void dp_display_post_open(struct dp_display *dp_display) dp_display->post_open = NULL; } -static int dp_display_send_hpd_notification(struct dp_display_private *dp, - bool hpd) +static int dp_display_send_hpd_notification(struct dp_display_private *dp) { u32 timeout_sec; int ret = 0; + bool hpd = dp->is_connected; - dp->dp_display.is_connected = hpd; - - if (dp_display_framework_ready(dp)) + if (dp_display_framework_ready(dp)) timeout_sec = 5; else timeout_sec = 10; dp->aux->state |= DP_STATE_NOTIFICATION_SENT; - if (!dp->mst.mst_active) + if (!dp->mst.mst_active) { + if (dp->dp_display.is_sst_connected == hpd) { + pr_debug("SKIPPED:hpd:%d\n", hpd); + goto skip_wait; + } + dp->dp_display.is_sst_connected = hpd; + } reinit_completion(&dp->notification_comp); dp_display_send_hpd_event(dp); @@ -504,11 +519,9 @@ static int dp_display_send_hpd_notification(struct dp_display_private *dp, pr_warn("%s timeout\n", hpd ? "connect" : "disconnect"); ret = -EINVAL; } - -skip_wait: - dp->aux->state &= ~DP_STATE_NOTIFICATION_SENT; - return ret; +skip_wait: + return 0; } static void dp_display_process_mst_hpd_high(struct dp_display_private *dp) @@ -552,10 +565,56 @@ static void dp_display_host_init(struct dp_display_private *dp) dp->power->init(dp->power, flip); dp->ctrl->init(dp->ctrl, flip, reset); + dp->aux->init(dp->aux, dp->parser->aux_cfg); enable_irq(dp->irq); dp->core_initialized = true; } +static int dp_display_update_pclk(struct dp_display_private *dp) +{ + int rc = 0; + u32 rate, max_pclk_khz; + u32 const enc_factx10 = 8; + u32 const default_bpp = 30; + + if (dp->debug->max_pclk_khz) { + dp->dp_display.max_pclk_khz = dp->debug->max_pclk_khz; + goto end; + } + + rate = drm_dp_bw_code_to_link_rate(dp->link->link_params.bw_code); + rate /= default_bpp; + + max_pclk_khz = dp->link->link_params.lane_count * rate * enc_factx10; + + dp->dp_display.max_pclk_khz = min(dp->parser->max_pclk_khz, + max_pclk_khz); + + pr_debug("dp max_pclk_khz = %d\n", dp->dp_display.max_pclk_khz); +end: + return rc; +} + +static void dp_display_host_deinit(struct dp_display_private *dp) +{ + if (!dp->core_initialized) { + pr_debug("DP core already off\n"); + return; + } + + if (dp->active_stream_cnt) { + pr_debug("active stream present\n"); + return; + } + + dp->aux->deinit(dp->aux); + dp->ctrl->deinit(dp->ctrl); + dp->power->deinit(dp->power); + disable_irq(dp->irq); + dp->core_initialized = false; + dp->aux->state = 0; +} + static int dp_display_process_hpd_high(struct dp_display_private *dp) { int rc = 0; @@ -566,9 +625,9 @@ static int dp_display_process_hpd_high(struct dp_display_private *dp) goto end; } - dp_display_host_init(dp); + dp->is_connected = true; - dp->aux->init(dp->aux, dp->parser->aux_cfg); + dp_display_host_init(dp); if (dp->debug->psm_enabled) { dp->link->psm_config(dp->link, &dp->panel->link_info, false); @@ -576,51 +635,39 @@ static int dp_display_process_hpd_high(struct dp_display_private *dp) } if (!dp->dp_display.base_connector) - return 0; + goto end; rc = dp->panel->read_sink_caps(dp->panel, dp->dp_display.base_connector, dp->hpd->multi_func); - if (rc) { - /* - * ETIMEDOUT --> cable may have been removed - * ENOTCONN --> no downstream device connected - */ - if (rc == -ETIMEDOUT || rc == -ENOTCONN) - goto end; - else - goto notify; - } + /* + * ETIMEDOUT --> cable may have been removed + * ENOTCONN --> no downstream device connected + */ + if (rc == -ETIMEDOUT || rc == -ENOTCONN) + goto end; dp->link->process_request(dp->link); dp->panel->handle_sink_request(dp->panel); - if (dp->debug->max_pclk_khz) - dp->dp_display.max_pclk_khz = dp->debug->max_pclk_khz; - else - dp->dp_display.max_pclk_khz = dp->parser->max_pclk_khz; - - pr_debug("dp max_pclk_khz = %d\n", dp->dp_display.max_pclk_khz); - dp_display_process_mst_hpd_high(dp); -notify: - dp_display_send_hpd_notification(dp, true); -end: - return rc; -} + mutex_lock(&dp->session_lock); + rc = dp->ctrl->on(dp->ctrl, dp->mst.mst_active); + if (rc) { + mutex_unlock(&dp->session_lock); + goto end; + } -static void dp_display_host_deinit(struct dp_display_private *dp) -{ - if (!dp->core_initialized) { - pr_debug("DP core already off\n"); - return; + rc = dp_display_update_pclk(dp); + if (rc) { + mutex_unlock(&dp->session_lock); + goto end; } + mutex_unlock(&dp->session_lock); - dp->ctrl->deinit(dp->ctrl); - dp->power->deinit(dp->power); - disable_irq(dp->irq); - dp->core_initialized = false; - dp->aux->state = 0; + dp_display_send_hpd_notification(dp); +end: + return rc; } static void dp_display_process_mst_hpd_low(struct dp_display_private *dp) @@ -648,11 +695,7 @@ static int dp_display_process_hpd_low(struct dp_display_private *dp) mutex_lock(&dp->session_lock); - if (!dp->dp_display.is_connected) { - pr_debug("HPD already off\n"); - mutex_unlock(&dp->session_lock); - return 0; - } + dp->is_connected = false; if (dp_display_is_hdcp_enabled(dp) && dp->hdcp.ops->off) dp->hdcp.ops->off(dp->hdcp.data); @@ -673,7 +716,12 @@ static int dp_display_process_hpd_low(struct dp_display_private *dp) dp_display_process_mst_hpd_low(dp); - rc = dp_display_send_hpd_notification(dp, false); + rc = dp_display_send_hpd_notification(dp); + + mutex_lock(&dp->session_lock); + if (!dp->active_stream_cnt) + dp->ctrl->off(dp->ctrl); + mutex_unlock(&dp->session_lock); dp->panel->video_test = false; @@ -699,7 +747,7 @@ static int dp_display_usbpd_configure_cb(struct device *dev) } /* check for hpd high and framework ready */ - if (dp->hpd->hpd_high && dp_display_framework_ready(dp)) + if (dp->hpd->hpd_high && dp_display_framework_ready(dp)) queue_delayed_work(dp->wq, &dp->connect_work, 0); end: return rc; @@ -707,6 +755,9 @@ static int dp_display_usbpd_configure_cb(struct device *dev) static void dp_display_clean(struct dp_display_private *dp) { + int idx; + struct dp_panel *dp_panel; + if (dp_display_is_hdcp_enabled(dp)) { dp->link->hdcp_status.hdcp_state = HDCP_STATE_INACTIVE; @@ -715,10 +766,16 @@ static void dp_display_clean(struct dp_display_private *dp) dp->hdcp.ops->off(dp->hdcp.data); } - dp->ctrl->push_idle(dp->ctrl, DP_STREAM_0); - dp->ctrl->off(dp->ctrl); - dp->panel->deinit(dp->panel); - dp->aux->deinit(dp->aux); + for (idx = DP_STREAM_0; idx < DP_STREAM_MAX; idx++) { + if (!dp->active_panels[idx]) + continue; + + dp_panel = dp->active_panels[idx]; + + dp->ctrl->stream_pre_off(dp->ctrl, dp_panel); + dp->ctrl->stream_off(dp->ctrl, dp_panel); + } + dp->power_on = false; } @@ -737,17 +794,8 @@ static int dp_display_handle_disconnect(struct dp_display_private *dp) if (rc && dp->power_on) dp_display_clean(dp); - /* - * De-initialize the display core only if the display controller has - * been turned off either through the DRM bridge disable call or - * through the error handling code. This ensures that the power - * resource vote is still present in cases when the bridge disable is - * delayed. - */ - if (!dp->power_on && !dp->hpd->alt_mode_cfg_done) - dp_display_host_deinit(dp); + dp_display_host_deinit(dp); - complete_all(&dp->disconnect_comp); mutex_unlock(&dp->session_lock); return rc; @@ -791,11 +839,11 @@ static int dp_display_usbpd_disconnect_cb(struct device *dev) cancel_work(&dp->attention_work); flush_workqueue(dp->wq); - dp_display_handle_disconnect(dp); - if (!dp->debug->sim_mode && !dp->parser->no_aux_switch) dp->aux->aux_switch(dp->aux, false, ORIENTATION_NONE); + dp_display_handle_disconnect(dp); + /* Reset abort value to allow future connections */ atomic_set(&dp->aborted, 0); @@ -857,25 +905,16 @@ static void dp_display_attention_work(struct work_struct *work) { struct dp_display_private *dp = container_of(work, struct dp_display_private, attention_work); - bool is_sink_cnt_zero; - - if (!dp->power_on) - goto mst_attention; if (dp->link->sink_request & DS_PORT_STATUS_CHANGED) { - is_sink_cnt_zero = dp_display_is_sink_count_zero(dp); - - if (dp->mst.mst_active && !is_sink_cnt_zero) - goto mst_attention; - - dp_display_handle_disconnect(dp); - - if (is_sink_cnt_zero) { - pr_debug("sink count is zero, nothing to do\n"); - goto mst_attention; + if (dp_display_is_sink_count_zero(dp)) { + dp_display_handle_disconnect(dp); + } else { + if (!dp->mst.mst_active) + queue_delayed_work(dp->wq, + &dp->connect_work, 0); } - queue_delayed_work(dp->wq, &dp->connect_work, 0); goto mst_attention; } @@ -883,8 +922,7 @@ static void dp_display_attention_work(struct work_struct *work) dp_display_handle_disconnect(dp); dp->panel->video_test = true; - dp_display_send_hpd_notification(dp, true); - dp->link->send_test_response(dp->link); + queue_delayed_work(dp->wq, &dp->connect_work, 0); goto mst_attention; } @@ -924,28 +962,11 @@ static int dp_display_usbpd_attention_cb(struct device *dev) return -ENODEV; } - /* check if framework is ready */ - if (!dp_display_framework_ready(dp)) { - pr_err("framework not ready\n"); - return -ENODEV; - } - DP_MST_DEBUG("mst: hpd_irq:%d, hpd_high:%d, power_on:%d\n", dp->hpd->hpd_irq, dp->hpd->hpd_high, dp->power_on); - if (dp->hpd->hpd_irq && dp->hpd->hpd_high && - dp->power_on) { - dp->link->process_request(dp->link); - queue_work(dp->wq, &dp->attention_work); - } else if (dp->hpd->hpd_high && dp->hpd->hpd_irq) { - queue_delayed_work(dp->wq, &dp->connect_work, 0); - queue_work(dp->wq, &dp->attention_work); - } else if (dp->hpd->hpd_high) { - queue_delayed_work(dp->wq, &dp->connect_work, 0); - } else if (dp->hpd->hpd_irq) { - queue_work(dp->wq, &dp->attention_work); - } else { + if (!dp->hpd->hpd_high) { /* cancel any pending request */ atomic_set(&dp->aborted, 1); dp->ctrl->abort(dp->ctrl); @@ -958,6 +979,11 @@ static int dp_display_usbpd_attention_cb(struct device *dev) dp_display_handle_disconnect(dp); atomic_set(&dp->aborted, 0); + } else if (dp->hpd->hpd_irq && dp->core_initialized) { + dp->link->process_request(dp->link); + queue_work(dp->wq, &dp->attention_work); + } else { + queue_delayed_work(dp->wq, &dp->connect_work, 0); } return 0; @@ -965,35 +991,10 @@ static int dp_display_usbpd_attention_cb(struct device *dev) static void dp_display_connect_work(struct work_struct *work) { + int rc = 0; struct delayed_work *dw = to_delayed_work(work); struct dp_display_private *dp = container_of(dw, struct dp_display_private, connect_work); - u32 const disconnect_timeout_sec = 20; - - - if (dp->dp_display.is_connected && dp_display_framework_ready(dp)) { - pr_debug("HPD already on\n"); - return; - } - - if (atomic_read(&dp->aborted)) { - /* - * If we receive a connection event while processing a - * disconnect event from the previous session then we have to - * wait until that disconnect event completes or expires. We - * give the highest priority to handling disconnect events - * since they represent a state change triggered by a physical - * removal of the cable/plug. - */ - pr_warn("disconnect pending, waiting for %d sec\n", - disconnect_timeout_sec); - reinit_completion(&dp->disconnect_comp); - if (!wait_for_completion_timeout(&dp->disconnect_comp, - HZ * disconnect_timeout_sec)) { - pr_warn("disconnect completion timeout\n"); - return; - } - } if (atomic_read(&dp->aborted)) { pr_warn("HPD off requested\n"); @@ -1005,7 +1006,10 @@ static void dp_display_connect_work(struct work_struct *work) return; } - dp_display_process_hpd_high(dp); + rc = dp_display_process_hpd_high(dp); + + if (!rc && dp->panel->video_test) + dp->link->send_test_response(dp->link); } static void dp_display_deinit_sub_modules(struct dp_display_private *dp) @@ -1034,6 +1038,9 @@ static int dp_init_sub_modules(struct dp_display_private *dp) struct dp_panel_in panel_in = { .dev = dev, }; + struct dp_debug_in debug_in = { + .dev = dev, + }; mutex_init(&dp->session_lock); @@ -1149,11 +1156,17 @@ static int dp_init_sub_modules(struct dp_display_private *dp) goto error_hpd; } - dp->debug = dp_debug_get(dev, dp->panel, dp->hpd, - dp->link, dp->aux, - &dp->dp_display.base_connector, - dp->catalog, dp->parser); + dp_display_initialize_hdcp(dp); + + debug_in.panel = dp->panel; + debug_in.hpd = dp->hpd; + debug_in.link = dp->link; + debug_in.aux = dp->aux; + debug_in.connector = &dp->dp_display.base_connector; + debug_in.catalog = dp->catalog; + debug_in.parser = dp->parser; + dp->debug = dp_debug_get(&debug_in); if (IS_ERR(dp->debug)) { rc = PTR_ERR(dp->debug); pr_err("failed to initialize debug, rc = %d\n", rc); @@ -1207,8 +1220,6 @@ static int dp_display_post_init(struct dp_display *dp_display) if (rc) goto end; - dp_display_initialize_hdcp(dp); - dp_display->post_init = NULL; end: pr_debug("%s\n", rc ? "failed" : "success"); @@ -1255,6 +1266,7 @@ static int dp_display_prepare(struct dp_display *dp_display, void *panel) { struct dp_display_private *dp; struct dp_panel *dp_panel; + int rc = 0; if (!dp_display || !panel) { pr_err("invalid input\n"); @@ -1277,7 +1289,14 @@ static int dp_display_prepare(struct dp_display *dp_display, void *panel) if (dp->power_on) goto end; - dp->aux->init(dp->aux, dp->parser->aux_cfg); + if (!dp_display_is_ready(dp)) + goto end; + + dp_display_host_init(dp); + + rc = dp->ctrl->on(dp->ctrl, dp->mst.mst_active); + if (rc) + goto end; if (dp->debug->psm_enabled) { dp->link->psm_config(dp->link, &dp->panel->link_info, false); @@ -1367,29 +1386,21 @@ static int dp_display_enable(struct dp_display *dp_display, void *panel) mutex_lock(&dp->session_lock); - if (dp->power_on) { - pr_debug("Link already setup, perform setup stream\n"); - goto stream_setup; - } - if (atomic_read(&dp->aborted)) { pr_err("aborted\n"); goto end; } - rc = dp->ctrl->on(dp->ctrl, dp->mst.mst_active); - if (rc) + if (!dp_display_is_ready(dp) || !dp->core_initialized) { + pr_err("display not ready\n"); goto end; + } - dp->power_on = true; - -stream_setup: rc = dp_display_stream_enable(dp, panel); - if (rc && (dp->active_stream_cnt == 0)) { - dp->ctrl->off(dp->ctrl); - dp->power_on = false; - } + if (rc) + goto end; + dp->power_on = true; end: mutex_unlock(&dp->session_lock); return rc; @@ -1421,7 +1432,7 @@ static int dp_display_post_enable(struct dp_display *dp_display, void *panel) mutex_lock(&dp->session_lock); if (!dp->power_on) { - pr_debug("Link not setup, return\n"); + pr_debug("stream not setup, return\n"); goto end; } @@ -1430,6 +1441,11 @@ static int dp_display_post_enable(struct dp_display *dp_display, void *panel) goto end; } + if (!dp_display_is_ready(dp) || !dp->core_initialized) { + pr_err("display not ready\n"); + goto end; + } + dp_display_stream_post_enable(dp, dp_panel); edid = dp_panel->edid_ctrl->edid; @@ -1485,7 +1501,7 @@ static int dp_display_pre_disable(struct dp_display *dp_display, void *panel) mutex_lock(&dp->session_lock); if (!dp->power_on) { - pr_debug("Link already powered off, return\n"); + pr_debug("stream already powered off, return\n"); goto end; } @@ -1502,8 +1518,7 @@ static int dp_display_pre_disable(struct dp_display *dp_display, void *panel) rc = dp_display_stream_pre_disable(dp, dp_panel); - if (dp->hpd->hpd_high && !dp_display_is_sink_count_zero(dp) && - dp->hpd->alt_mode_cfg_done && !dp->mst.mst_active) { + if (dp_display_is_ready(dp) && !dp->mst.mst_active) { dp->link->psm_config(dp->link, &dp->panel->link_info, true); dp->debug->psm_enabled = true; } @@ -1556,17 +1571,17 @@ static int dp_display_disable(struct dp_display *dp_display, void *panel) goto end; } - dp->ctrl->off(dp->ctrl); - /* * In case of framework reboot, the DP off sequence is executed without * any notification from driver. Initialize post_open callback to notify * DP connection once framework restarts. */ - if (dp->hpd->hpd_high && !dp_display_is_sink_count_zero(dp) && - dp->hpd->alt_mode_cfg_done && !dp->mst.mst_active) { + if (dp_display_is_ready(dp) && !dp->mst.mst_active) { dp_display->post_open = dp_display_post_open; dp->dp_display.is_sst_connected = false; + + dp->ctrl->off(dp->ctrl); + dp_display_host_deinit(dp); } dp->power_on = false; @@ -1637,10 +1652,7 @@ static int dp_display_unprepare(struct dp_display *dp_display, void *panel) if (dp->active_stream_cnt) goto end; - if (!dp->mst.mst_active) { - dp->aux->deinit(dp->aux); - dp->aux->state = DP_STATE_CTRL_POWERED_OFF; - } + dp->aux->state = DP_STATE_CTRL_POWERED_OFF; complete_all(&dp->notification_comp); @@ -1650,46 +1662,70 @@ static int dp_display_unprepare(struct dp_display *dp_display, void *panel) return 0; } -static int dp_display_validate_mode(struct dp_display *dp, void *panel, - u32 mode_pclk_khz) +static enum drm_mode_status dp_display_validate_mode( + struct dp_display *dp_display, + void *panel, struct drm_display_mode *mode) { const u32 num_components = 3, default_bpp = 24; - struct dp_display_private *dp_display; + struct dp_display_private *dp; struct drm_dp_link *link_info; u32 mode_rate_khz = 0, supported_rate_khz = 0, mode_bpp = 0; struct dp_panel *dp_panel; + struct dp_debug *debug; + enum drm_mode_status mode_status = MODE_BAD; - if (!dp || !mode_pclk_khz || !panel) { + if (!dp_display || !mode || !panel) { pr_err("invalid params\n"); - return -EINVAL; + return mode_status; } + dp = container_of(dp_display, struct dp_display_private, dp_display); + + mutex_lock(&dp->session_lock); + dp_panel = panel; if (!dp_panel->connector) { pr_err("invalid connector\n"); - return -EINVAL; + goto end; } - dp_display = container_of(dp, struct dp_display_private, dp_display); - link_info = &dp_display->panel->link_info; + link_info = &dp->panel->link_info; + + debug = dp->debug; + if (!debug) + goto end; mode_bpp = dp_panel->connector->display_info.bpc * num_components; if (!mode_bpp) mode_bpp = default_bpp; - mode_bpp = dp_panel->get_mode_bpp(dp_panel, mode_bpp, mode_pclk_khz); + mode_bpp = dp_panel->get_mode_bpp(dp_panel, mode_bpp, mode->clock); - mode_rate_khz = mode_pclk_khz * mode_bpp; + mode_rate_khz = mode->clock * mode_bpp; supported_rate_khz = link_info->num_lanes * link_info->rate * 8; if (mode_rate_khz > supported_rate_khz) { DP_MST_DEBUG("pclk:%d, supported_rate:%d\n", - mode_pclk_khz, supported_rate_khz); + mode->clock, supported_rate_khz); + goto end; + } - return MODE_BAD; + if (mode->clock > dp_display->max_pclk_khz) { + DP_MST_DEBUG("clk:%d, max:%d\n", mode->clock, + dp_display->max_pclk_khz); + goto end; } - return MODE_OK; + if (debug->debug_en && (mode->hdisplay != debug->hdisplay || + mode->vdisplay != debug->vdisplay || + mode->vrefresh != debug->vrefresh || + mode->picture_aspect_ratio != debug->aspect_ratio)) + goto end; + + mode_status = MODE_OK; +end: + mutex_unlock(&dp->session_lock); + return mode_status; } static int dp_display_get_modes(struct dp_display *dp, void *panel, @@ -2034,7 +2070,6 @@ static int dp_display_probe(struct platform_device *pdev) } init_completion(&dp->notification_comp); - init_completion(&dp->disconnect_comp); dp->pdev = pdev; dp->name = "drm_dp"; diff --git a/drivers/gpu/drm/msm/dp/dp_display.h b/drivers/gpu/drm/msm/dp/dp_display.h index 4e66f99e68b9745834158a7e0a3d604b4eb051aa..0c293a70d560a5443ada0c2db4a1c859a4796e54 100644 --- a/drivers/gpu/drm/msm/dp/dp_display.h +++ b/drivers/gpu/drm/msm/dp/dp_display.h @@ -48,7 +48,6 @@ struct dp_display { struct dp_bridge *bridge; struct drm_connector *base_connector; void *base_dp_panel; - bool is_connected; bool is_sst_connected; u32 max_pclk_khz; void *dp_mst_prv_info; @@ -61,8 +60,8 @@ struct dp_display { int (*set_mode)(struct dp_display *dp_display, void *panel, struct dp_display_mode *mode); - int (*validate_mode)(struct dp_display *dp_display, void *panel, - u32 mode_pclk_khz); + enum drm_mode_status (*validate_mode)(struct dp_display *dp_display, + void *panel, struct drm_display_mode *mode); int (*get_modes)(struct dp_display *dp_display, void *panel, struct dp_display_mode *dp_mode); int (*prepare)(struct dp_display *dp_display, void *panel); diff --git a/drivers/gpu/drm/msm/dp/dp_drm.c b/drivers/gpu/drm/msm/dp/dp_drm.c index 416786cd69c6ab263ee2634f9dd8ea257d6f0bc4..563b0e0f7ba716658574f0655c8f6eb6d7bf92dd 100644 --- a/drivers/gpu/drm/msm/dp/dp_drm.c +++ b/drivers/gpu/drm/msm/dp/dp_drm.c @@ -568,11 +568,9 @@ void dp_drm_bridge_deinit(void *data) } enum drm_mode_status dp_connector_mode_valid(struct drm_connector *connector, - struct drm_display_mode *mode, - void *display) + struct drm_display_mode *mode, void *display) { struct dp_display *dp_disp; - struct dp_debug *debug; struct sde_connector *sde_conn; if (!mode || !display || !connector) { @@ -587,22 +585,7 @@ enum drm_mode_status dp_connector_mode_valid(struct drm_connector *connector, } dp_disp = display; - debug = dp_disp->get_debug(dp_disp); - mode->vrefresh = drm_mode_vrefresh(mode); - if (mode->clock > dp_disp->max_pclk_khz) { - DP_MST_DEBUG("clk:%d, max:%d\n", mode->clock, - dp_disp->max_pclk_khz); - return MODE_BAD; - } - - if (debug->debug_en && (mode->hdisplay != debug->hdisplay || - mode->vdisplay != debug->vdisplay || - mode->vrefresh != debug->vrefresh || - mode->picture_aspect_ratio != debug->aspect_ratio)) - return MODE_BAD; - - return dp_disp->validate_mode(dp_disp, sde_conn->drv_panel, - mode->clock); + return dp_disp->validate_mode(dp_disp, sde_conn->drv_panel, mode); } diff --git a/drivers/gpu/drm/msm/dp/dp_gpio_hpd.c b/drivers/gpu/drm/msm/dp/dp_gpio_hpd.c index c21090774fccb10f955c3b1889b7b3667da05f1a..7e4ce42c1d0c13bb66e87cee6d4ee24e1475720f 100644 --- a/drivers/gpu/drm/msm/dp/dp_gpio_hpd.c +++ b/drivers/gpu/drm/msm/dp/dp_gpio_hpd.c @@ -246,7 +246,9 @@ struct dp_hpd *dp_gpio_hpd_get(struct device *dev, gpio_hpd->base.simulate_connect = dp_gpio_hpd_simulate_connect; gpio_hpd->base.simulate_attention = dp_gpio_hpd_simulate_attention; - gpio_hpd->base.hpd_high = gpio_hpd->hpd; + + if (gpio_hpd->hpd) + queue_delayed_work(system_wq, &gpio_hpd->work, 0); return &gpio_hpd->base; diff --git a/drivers/gpu/drm/msm/dp/dp_hdcp2p2.c b/drivers/gpu/drm/msm/dp/dp_hdcp2p2.c index 456ce8ddc166c5d79580c18a6362c892aadff3d2..0b2f798c1258b8dde6fb72820faee9feba1ec1fd 100644 --- a/drivers/gpu/drm/msm/dp/dp_hdcp2p2.c +++ b/drivers/gpu/drm/msm/dp/dp_hdcp2p2.c @@ -438,6 +438,26 @@ static bool dp_hdcp2p2_feature_supported(void *input) return supported; } +static void dp_hdcp2p2_force_encryption(void *data, bool enable) +{ + struct dp_hdcp2p2_ctrl *ctrl = data; + struct sde_hdcp_2x_ops *lib = NULL; + + if (!ctrl) { + pr_err("invalid input\n"); + return; + } + + lib = ctrl->lib; + if (!lib) { + pr_err("invalid lib ops data\n"); + return; + } + + if (lib->force_encryption) + lib->force_encryption(ctrl->lib_ctx, enable); +} + static void dp_hdcp2p2_send_msg_work(struct kthread_work *work) { int rc = 0; @@ -788,6 +808,7 @@ void *sde_dp_hdcp2p2_init(struct sde_hdcp_init_data *init_data) .reauthenticate = dp_hdcp2p2_reauthenticate, .authenticate = dp_hdcp2p2_authenticate, .feature_supported = dp_hdcp2p2_feature_supported, + .force_encryption = dp_hdcp2p2_force_encryption, .off = dp_hdcp2p2_off, .cp_irq = dp_hdcp2p2_cp_irq, }; diff --git a/drivers/gpu/drm/msm/dp/dp_panel.c b/drivers/gpu/drm/msm/dp/dp_panel.c index 7183db6801025a358f2778484d0d77a3b40e0838..6cef92d02417f94858306f7418cf652a191f0c7e 100644 --- a/drivers/gpu/drm/msm/dp/dp_panel.c +++ b/drivers/gpu/drm/msm/dp/dp_panel.c @@ -1206,6 +1206,7 @@ static int dp_panel_set_stream_info(struct dp_panel *dp_panel, static int dp_panel_init_panel_info(struct dp_panel *dp_panel) { int rc = 0; + struct dp_panel_private *panel; struct dp_panel_info *pinfo; if (!dp_panel) { @@ -1214,27 +1215,22 @@ static int dp_panel_init_panel_info(struct dp_panel *dp_panel) goto end; } + panel = container_of(dp_panel, struct dp_panel_private, dp_panel); pinfo = &dp_panel->pinfo; /* * print resolution info as this is a result * of user initiated action of cable connection */ - pr_info("SET NEW RESOLUTION:\n"); - pr_info("%dx%d@%dfps\n", pinfo->h_active, - pinfo->v_active, pinfo->refresh_rate); - pr_info("h_porches(back|front|width) = (%d|%d|%d)\n", - pinfo->h_back_porch, - pinfo->h_front_porch, - pinfo->h_sync_width); - pr_info("v_porches(back|front|width) = (%d|%d|%d)\n", - pinfo->v_back_porch, - pinfo->v_front_porch, - pinfo->v_sync_width); - pr_info("pixel clock (KHz)=(%d)\n", pinfo->pixel_clk_khz); - pr_info("bpp = %d\n", pinfo->bpp); - pr_info("active low (h|v)=(%d|%d)\n", pinfo->h_active_low, - pinfo->v_active_low); + pr_info("DP RESOLUTION: active(back|front|width|low)\n"); + pr_info("%d(%d|%d|%d|%d)x%d(%d|%d|%d|%d)@%dfps %dbpp %dKhz %dLR %dLn\n", + pinfo->h_active, pinfo->h_back_porch, pinfo->h_front_porch, + pinfo->h_sync_width, pinfo->h_active_low, + pinfo->v_active, pinfo->v_back_porch, pinfo->v_front_porch, + pinfo->v_sync_width, pinfo->v_active_low, + pinfo->refresh_rate, pinfo->bpp, pinfo->pixel_clk_khz, + panel->link->link_params.bw_code, + panel->link->link_params.lane_count); end: return rc; } @@ -1271,6 +1267,7 @@ static int dp_panel_deinit_panel_info(struct dp_panel *dp_panel) connector->hdr_max_luminance = 0; connector->hdr_avg_luminance = 0; connector->hdr_min_luminance = 0; + connector->hdr_supported = false; memset(&c_state->hdr_meta, 0, sizeof(c_state->hdr_meta)); diff --git a/drivers/gpu/drm/msm/dp/dp_parser.c b/drivers/gpu/drm/msm/dp/dp_parser.c index a0e62c6d7afa21cd33a5ea1401bbdb0010e2ef8d..3d688d594d1540687f1a64d9a719aeca5ebce513 100644 --- a/drivers/gpu/drm/msm/dp/dp_parser.c +++ b/drivers/gpu/drm/msm/dp/dp_parser.c @@ -149,9 +149,17 @@ static int dp_parser_aux(struct dp_parser *parser) static int dp_parser_misc(struct dp_parser *parser) { - int rc = 0; + int rc = 0, len = 0, i = 0; + const char *data = NULL; + struct device_node *of_node = parser->pdev->dev.of_node; + data = of_get_property(of_node, "qcom,logical2physical-lane-map", &len); + if (data && (len == DP_MAX_PHY_LN)) { + for (i = 0; i < len; i++) + parser->l_map[i] = data[i]; + } + rc = of_property_read_u32(of_node, "qcom,max-pclk-frequency-khz", &parser->max_pclk_khz); if (rc) @@ -245,8 +253,8 @@ static int dp_parser_gpio(struct dp_parser *parser) dp_gpios[i], 0); if (!gpio_is_valid(mp->gpio_config[i].gpio)) { - pr_err("%s gpio not specified\n", dp_gpios[i]); - return -EINVAL; + pr_debug("%s gpio not specified\n", dp_gpios[i]); + continue; } strlcpy(mp->gpio_config[i].gpio_name, dp_gpios[i], @@ -455,18 +463,18 @@ static void dp_parser_put_gpio_data(struct device *dev, static int dp_parser_init_clk_data(struct dp_parser *parser) { int num_clk = 0, i = 0, rc = 0; - int core_clk_count = 0, ctrl_clk_count = 0; + int core_clk_count = 0, link_clk_count = 0; int strm0_clk_count = 0, strm1_clk_count = 0; const char *core_clk = "core"; - const char *ctrl_clk = "ctrl"; const char *strm0_clk = "strm0"; const char *strm1_clk = "strm1"; + const char *link_clk = "link"; const char *clk_name; struct device *dev = &parser->pdev->dev; struct dss_module_power *core_power = &parser->mp[DP_CORE_PM]; - struct dss_module_power *ctrl_power = &parser->mp[DP_CTRL_PM]; struct dss_module_power *strm0_power = &parser->mp[DP_STREAM0_PM]; struct dss_module_power *strm1_power = &parser->mp[DP_STREAM1_PM]; + struct dss_module_power *link_power = &parser->mp[DP_LINK_PM]; num_clk = of_property_count_strings(dev->of_node, "clock-names"); if (num_clk <= 0) { @@ -482,16 +490,14 @@ static int dp_parser_init_clk_data(struct dp_parser *parser) if (dp_parser_check_prefix(core_clk, clk_name)) core_clk_count++; - if (dp_parser_check_prefix(ctrl_clk, clk_name)) { - if (strcmp(clk_name, "ctrl_pixel_clk")) - ctrl_clk_count++; - } - if (dp_parser_check_prefix(strm0_clk, clk_name)) strm0_clk_count++; if (dp_parser_check_prefix(strm1_clk, clk_name)) strm1_clk_count++; + + if (dp_parser_check_prefix(link_clk, clk_name)) + link_clk_count++; } /* Initialize the CORE power module */ @@ -510,23 +516,6 @@ static int dp_parser_init_clk_data(struct dp_parser *parser) goto exit; } - /* Initialize the CTRL power module */ - if (ctrl_clk_count <= 0) { - pr_err("no ctrl clocks are defined\n"); - rc = -EINVAL; - goto ctrl_clock_error; - } - - ctrl_power->num_clk = ctrl_clk_count; - ctrl_power->clk_config = devm_kzalloc(dev, - sizeof(struct dss_clk) * ctrl_power->num_clk, - GFP_KERNEL); - if (!ctrl_power->clk_config) { - ctrl_power->num_clk = 0; - rc = -EINVAL; - goto ctrl_clock_error; - } - /* Initialize the STREAM0 power module */ if (strm0_clk_count <= 0) { pr_debug("no strm0 clocks are defined\n"); @@ -557,13 +546,30 @@ static int dp_parser_init_clk_data(struct dp_parser *parser) } } + /* Initialize the link power module */ + if (link_clk_count <= 0) { + pr_err("no link clocks are defined\n"); + rc = -EINVAL; + goto link_clock_error; + } + + link_power->num_clk = link_clk_count; + link_power->clk_config = devm_kzalloc(dev, + sizeof(struct dss_clk) * link_power->num_clk, + GFP_KERNEL); + if (!link_power->clk_config) { + link_power->num_clk = 0; + rc = -EINVAL; + goto link_clock_error; + } + return rc; +link_clock_error: + dp_parser_put_clk_data(dev, strm1_power); strm1_clock_error: dp_parser_put_clk_data(dev, strm0_power); strm0_clock_error: - dp_parser_put_clk_data(dev, ctrl_power); -ctrl_clock_error: dp_parser_put_clk_data(dev, core_power); exit: return rc; @@ -573,25 +579,25 @@ static int dp_parser_clock(struct dp_parser *parser) { int rc = 0, i = 0; int num_clk = 0; - int core_clk_index = 0, ctrl_clk_index = 0; - int core_clk_count = 0, ctrl_clk_count = 0; + int core_clk_index = 0, link_clk_index = 0; + int core_clk_count = 0, link_clk_count = 0; int strm0_clk_index = 0, strm1_clk_index = 0; int strm0_clk_count = 0, strm1_clk_count = 0; const char *clk_name; const char *core_clk = "core"; - const char *ctrl_clk = "ctrl"; const char *strm0_clk = "strm0"; const char *strm1_clk = "strm1"; + const char *link_clk = "link"; struct device *dev = &parser->pdev->dev; struct dss_module_power *core_power; - struct dss_module_power *ctrl_power; struct dss_module_power *strm0_power; struct dss_module_power *strm1_power; + struct dss_module_power *link_power; core_power = &parser->mp[DP_CORE_PM]; - ctrl_power = &parser->mp[DP_CTRL_PM]; strm0_power = &parser->mp[DP_STREAM0_PM]; strm1_power = &parser->mp[DP_STREAM1_PM]; + link_power = &parser->mp[DP_LINK_PM]; rc = dp_parser_init_clk_data(parser); if (rc) { @@ -601,7 +607,7 @@ static int dp_parser_clock(struct dp_parser *parser) } core_clk_count = core_power->num_clk; - ctrl_clk_count = ctrl_power->num_clk; + link_clk_count = link_power->num_clk; strm0_clk_count = strm0_power->num_clk; strm1_clk_count = strm1_power->num_clk; @@ -618,15 +624,14 @@ static int dp_parser_clock(struct dp_parser *parser) strlcpy(clk->clk_name, clk_name, sizeof(clk->clk_name)); clk->type = DSS_CLK_AHB; core_clk_index++; - } else if (dp_parser_check_prefix(ctrl_clk, clk_name) && - ctrl_clk_index < ctrl_clk_count) { + } else if (dp_parser_check_prefix(link_clk, clk_name) && + link_clk_index < link_clk_count) { struct dss_clk *clk = - &ctrl_power->clk_config[ctrl_clk_index]; + &link_power->clk_config[link_clk_index]; strlcpy(clk->clk_name, clk_name, sizeof(clk->clk_name)); - ctrl_clk_index++; + link_clk_index++; - if (!strcmp(clk_name, "ctrl_link_clk") || - !strcmp(clk_name, "ctrl_pixel_clk")) + if (!strcmp(clk_name, "link_clk")) clk->type = DSS_CLK_PCLK; else clk->type = DSS_CLK_AHB; @@ -663,8 +668,8 @@ static int dp_parser_catalog(struct dp_parser *parser) rc = of_property_read_u32(dev->of_node, "qcom,phy-version", &version); - if (!rc && (version == 0x420)) - parser->hw_cfg.phy_version = DP_PHY_VERSION_4_2_0; + if (!rc) + parser->hw_cfg.phy_version = version; return 0; } diff --git a/drivers/gpu/drm/msm/dp/dp_parser.h b/drivers/gpu/drm/msm/dp/dp_parser.h index 0bf17bbd7c9462ddd7da8b5d6218053b943e31e2..3c459718d09d87dd6da5aa14525767e4df4e7192 100644 --- a/drivers/gpu/drm/msm/dp/dp_parser.h +++ b/drivers/gpu/drm/msm/dp/dp_parser.h @@ -27,6 +27,7 @@ enum dp_pm_type { DP_PHY_PM, DP_STREAM0_PM, DP_STREAM1_PM, + DP_LINK_PM, DP_MAX_PM }; @@ -38,6 +39,7 @@ static inline const char *dp_parser_pm_name(enum dp_pm_type module) case DP_PHY_PM: return "DP_PHY_PM"; case DP_STREAM0_PM: return "DP_STREAM0_PM"; case DP_STREAM1_PM: return "DP_STREAM1_PM"; + case DP_LINK_PM: return "DP_LINK_PM"; default: return "???"; } } @@ -136,7 +138,8 @@ enum dp_phy_aux_config_type { */ enum dp_phy_version { DP_PHY_VERSION_UNKNOWN, - DP_PHY_VERSION_4_2_0, + DP_PHY_VERSION_2_0_0 = 0x200, + DP_PHY_VERSION_4_2_0 = 0x420, DP_PHY_VERSION_MAX }; @@ -213,6 +216,21 @@ struct dp_parser { void (*clear_io_buf)(struct dp_parser *parser); }; +enum dp_phy_lane_num { + DP_PHY_LN0 = 0, + DP_PHY_LN1 = 1, + DP_PHY_LN2 = 2, + DP_PHY_LN3 = 3, + DP_MAX_PHY_LN = 4, +}; + +enum dp_mainlink_lane_num { + DP_ML0 = 0, + DP_ML1 = 1, + DP_ML2 = 2, + DP_ML3 = 3, +}; + /** * dp_parser_get() - get the DP's device tree parser module * diff --git a/drivers/gpu/drm/msm/dp/dp_power.c b/drivers/gpu/drm/msm/dp/dp_power.c index 280f7accb7e12699b26387145457532cc8a8890e..b326fce482b0efeb063fe18488234f83c3eac066 100644 --- a/drivers/gpu/drm/msm/dp/dp_power.c +++ b/drivers/gpu/drm/msm/dp/dp_power.c @@ -147,49 +147,25 @@ static int dp_power_pinctrl_set(struct dp_power_private *power, bool active) static int dp_power_clk_init(struct dp_power_private *power, bool enable) { int rc = 0; - struct dss_module_power *core, *ctrl, *strm0, *strm1; struct device *dev; - - core = &power->parser->mp[DP_CORE_PM]; - ctrl = &power->parser->mp[DP_CTRL_PM]; - strm0 = &power->parser->mp[DP_STREAM0_PM]; - strm1 = &power->parser->mp[DP_STREAM1_PM]; + enum dp_pm_type module; dev = &power->pdev->dev; - if (!core || !ctrl) { - pr_err("invalid power_data\n"); - rc = -EINVAL; - goto exit; - } - if (enable) { - rc = msm_dss_get_clk(dev, core->clk_config, core->num_clk); - if (rc) { - pr_err("failed to get %s clk. err=%d\n", - dp_parser_pm_name(DP_CORE_PM), rc); - goto exit; - } + for (module = DP_CORE_PM; module < DP_MAX_PM; module++) { + struct dss_module_power *pm = + &power->parser->mp[module]; - rc = msm_dss_get_clk(dev, ctrl->clk_config, ctrl->num_clk); - if (rc) { - pr_err("failed to get %s clk. err=%d\n", - dp_parser_pm_name(DP_CTRL_PM), rc); - goto ctrl_get_error; - } - - rc = msm_dss_get_clk(dev, strm0->clk_config, strm0->num_clk); - if (rc) { - pr_err("failed to get %s clk. err=%d\n", - dp_parser_pm_name(DP_STREAM0_PM), rc); - goto strm0_get_error; - } + if (!pm->num_clk) + continue; - rc = msm_dss_get_clk(dev, strm1->clk_config, strm1->num_clk); - if (rc) { - pr_err("failed to get %s clk. err=%d\n", - dp_parser_pm_name(DP_STREAM1_PM), rc); - goto strm1_get_error; + rc = msm_dss_get_clk(dev, pm->clk_config, pm->num_clk); + if (rc) { + pr_err("failed to get %s clk. err=%d\n", + dp_parser_pm_name(module), rc); + goto exit; + } } power->pixel_clk_rcg = devm_clk_get(dev, "pixel_clk_rcg"); @@ -228,20 +204,16 @@ static int dp_power_clk_init(struct dp_power_private *power, bool enable) if (power->pixel1_clk_rcg) devm_clk_put(dev, power->pixel1_clk_rcg); - msm_dss_put_clk(ctrl->clk_config, ctrl->num_clk); - msm_dss_put_clk(core->clk_config, core->num_clk); - msm_dss_put_clk(strm0->clk_config, strm0->num_clk); - msm_dss_put_clk(strm1->clk_config, strm1->num_clk); - } + for (module = DP_CORE_PM; module < DP_MAX_PM; module++) { + struct dss_module_power *pm = + &power->parser->mp[module]; - return rc; + if (!pm->num_clk) + continue; -strm1_get_error: - msm_dss_put_clk(strm0->clk_config, strm0->num_clk); -strm0_get_error: - msm_dss_put_clk(ctrl->clk_config, ctrl->num_clk); -ctrl_get_error: - msm_dss_put_clk(core->clk_config, core->num_clk); + msm_dss_put_clk(pm->clk_config, pm->num_clk); + } + } exit: return rc; } @@ -300,35 +272,24 @@ static int dp_power_clk_enable(struct dp_power *dp_power, mp = &power->parser->mp[pm_type]; - if ((pm_type != DP_CORE_PM) && (pm_type != DP_CTRL_PM) && - (pm_type != DP_STREAM0_PM) && - (pm_type != DP_STREAM1_PM)) { + if (pm_type >= DP_MAX_PM) { pr_err("unsupported power module: %s\n", dp_parser_pm_name(pm_type)); return -EINVAL; } if (enable) { - if ((pm_type == DP_CORE_PM) - && (power->core_clks_on)) { + if (pm_type == DP_CORE_PM && power->core_clks_on) { pr_debug("core clks already enabled\n"); return 0; } - if ((pm_type == DP_CTRL_PM) - && (power->link_clks_on)) { - pr_debug("links clks already enabled\n"); - return 0; - } - - if ((pm_type == DP_STREAM0_PM) - && (power->strm0_clks_on)) { + if ((pm_type == DP_STREAM0_PM) && (power->strm0_clks_on)) { pr_debug("strm0 clks already enabled\n"); return 0; } - if ((pm_type == DP_STREAM1_PM) - && (power->strm1_clks_on)) { + if ((pm_type == DP_STREAM1_PM) && (power->strm1_clks_on)) { pr_debug("strm1 clks already enabled\n"); return 0; } @@ -345,6 +306,11 @@ static int dp_power_clk_enable(struct dp_power *dp_power, power->core_clks_on = true; } } + + if (pm_type == DP_LINK_PM && power->link_clks_on) { + pr_debug("links clks already enabled\n"); + return 0; + } } rc = dp_power_clk_set_rate(power, pm_type, enable); @@ -357,12 +323,12 @@ static int dp_power_clk_enable(struct dp_power *dp_power, if (pm_type == DP_CORE_PM) power->core_clks_on = enable; - else if (pm_type == DP_CTRL_PM) - power->link_clks_on = enable; else if (pm_type == DP_STREAM0_PM) power->strm0_clks_on = enable; else if (pm_type == DP_STREAM1_PM) power->strm1_clks_on = enable; + else if (pm_type == DP_LINK_PM) + power->link_clks_on = enable; pr_debug("%s clocks for %s\n", enable ? "enable" : "disable", @@ -470,8 +436,10 @@ static int dp_power_config_gpios(struct dp_power_private *power, bool flip, dp_power_set_gpio(power, flip); } else { for (i = 0; i < mp->num_gpio; i++) { - gpio_set_value(config[i].gpio, 0); - gpio_free(config[i].gpio); + if (gpio_is_valid(config[i].gpio)) { + gpio_set_value(config[i].gpio, 0); + gpio_free(config[i].gpio); + } } } @@ -637,15 +605,10 @@ static int dp_power_deinit(struct dp_power *dp_power) power = container_of(dp_power, struct dp_power_private, dp_power); dp_power_clk_enable(dp_power, DP_CORE_PM, false); - /* - * If the display power on event was not successful, for example if - * there was a link training failure, then the link clocks could - * possibly still be on. In this scenario, we need to turn off the - * link clocks as soon as the cable is disconnected so that the clock - * state is cleaned up before subsequent connection events. - */ + if (power->link_clks_on) - dp_power_clk_enable(dp_power, DP_CTRL_PM, false); + dp_power_clk_enable(dp_power, DP_LINK_PM, false); + rc = sde_power_resource_enable(power->phandle, power->dp_core_client, false); if (rc) { diff --git a/drivers/gpu/drm/msm/dp/dp_reg.h b/drivers/gpu/drm/msm/dp/dp_reg.h index 519912b9b69e3e6990274af826ef6c72b1f62243..6640b8533da08845d6b88202a626d63e5ce62851 100644 --- a/drivers/gpu/drm/msm/dp/dp_reg.h +++ b/drivers/gpu/drm/msm/dp/dp_reg.h @@ -272,6 +272,9 @@ #define DP_PHY_AUX_INTERRUPT_MASK (0x00000054) #define DP_PHY_AUX_INTERRUPT_CLEAR (0x00000058) #define DP_PHY_AUX_INTERRUPT_STATUS (0x000000D8) +#define DP_PHY_AUX_INTERRUPT_MASK_V200 (0x00000048) +#define DP_PHY_AUX_INTERRUPT_CLEAR_V200 (0x0000004C) +#define DP_PHY_AUX_INTERRUPT_STATUS_V200 (0x000000BC) #define DP_PHY_SPARE0 (0x00AC) @@ -289,10 +292,14 @@ #define MMSS_DP_LINK_CFG_RCGR (0x013C) #define MMSS_DP_PIXEL_M (0x0174) #define MMSS_DP_PIXEL_N (0x0178) +#define MMSS_DP_PIXEL_M_V200 (0x0130) +#define MMSS_DP_PIXEL_N_V200 (0x0134) #define MMSS_DP_PIXEL_M_V420 (0x01B4) #define MMSS_DP_PIXEL_N_V420 (0x01B8) #define MMSS_DP_PIXEL1_M (0x018C) #define MMSS_DP_PIXEL1_N (0x0190) +#define MMSS_DP_PIXEL1_M_V200 (0x0148) +#define MMSS_DP_PIXEL1_N_V200 (0x014C) #define MMSS_DP_PIXEL1_M_V420 (0x01CC) #define MMSS_DP_PIXEL1_N_V420 (0x01D0) diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_catalog.h b/drivers/gpu/drm/msm/dsi-staging/dsi_catalog.h index 81e7c0ecc5d5a7474924255ebcd330350dcf5e6a..80532366ba0b1ce37e0081e809ac1845a4b23f65 100644 --- a/drivers/gpu/drm/msm/dsi-staging/dsi_catalog.h +++ b/drivers/gpu/drm/msm/dsi-staging/dsi_catalog.h @@ -124,7 +124,8 @@ void dsi_phy_hw_v4_0_reset_clk_en_sel(struct dsi_phy_hw *phy); /* DSI controller common ops */ u32 dsi_ctrl_hw_cmn_get_interrupt_status(struct dsi_ctrl_hw *ctrl); -void dsi_ctrl_hw_cmn_debug_bus(struct dsi_ctrl_hw *ctrl); +void dsi_ctrl_hw_cmn_debug_bus(struct dsi_ctrl_hw *ctrl, u32 *entries, + u32 size); void dsi_ctrl_hw_cmn_clear_interrupt_status(struct dsi_ctrl_hw *ctrl, u32 ints); void dsi_ctrl_hw_cmn_enable_status_interrupts(struct dsi_ctrl_hw *ctrl, u32 ints); diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c index 8174ef1aa09f50f3e4a9064b79348e3db5f2158c..1085b4eb859c917dd6ac22e7b08811e91b33608f 100644 --- a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c +++ b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c @@ -248,6 +248,8 @@ static int dsi_ctrl_debugfs_init(struct dsi_ctrl *dsi_ctrl, dsi_ctrl->cell_index); sde_dbg_reg_register_base(dbg_name, dsi_ctrl->hw.base, msm_iomap_size(dsi_ctrl->pdev, "dsi_ctrl")); + sde_dbg_reg_register_dump_range(dbg_name, dbg_name, 0, + msm_iomap_size(dsi_ctrl->pdev, "dsi_ctrl"), 0); error_remove_dir: debugfs_remove(dir); error: @@ -277,6 +279,8 @@ static int dsi_ctrl_check_state(struct dsi_ctrl *dsi_ctrl, int rc = 0; struct dsi_ctrl_state_info *state = &dsi_ctrl->current_state; + SDE_EVT32(dsi_ctrl->cell_index, op); + switch (op) { case DSI_CTRL_OP_POWER_STATE_CHANGE: if (state->power_state == op_state) { @@ -790,7 +794,7 @@ static int dsi_ctrl_validate_panel_info(struct dsi_ctrl *dsi_ctrl, } /* Function returns number of bits per pxl */ -static int dsi_ctrl_pixel_format_to_bpp(enum dsi_pixel_format dst_format) +int dsi_ctrl_pixel_format_to_bpp(enum dsi_pixel_format dst_format) { u32 bpp = 0; @@ -1832,11 +1836,14 @@ static struct platform_driver dsi_ctrl_driver = { #if defined(CONFIG_DEBUG_FS) -void dsi_ctrl_debug_dump(void) +void dsi_ctrl_debug_dump(u32 *entries, u32 size) { struct list_head *pos, *tmp; struct dsi_ctrl *ctrl = NULL; + if (!entries || !size) + return; + mutex_lock(&dsi_ctrl_list_lock); list_for_each_safe(pos, tmp, &dsi_ctrl_list) { struct dsi_ctrl_list_item *n; @@ -1844,7 +1851,7 @@ void dsi_ctrl_debug_dump(void) n = list_entry(pos, struct dsi_ctrl_list_item, list); ctrl = n->ctrl; pr_err("dsi ctrl:%d\n", ctrl->cell_index); - ctrl->hw.ops.debug_bus(&ctrl->hw); + ctrl->hw.ops.debug_bus(&ctrl->hw, entries, size); } mutex_unlock(&dsi_ctrl_list_lock); } diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.h b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.h index 906721fd5b4199bf2ece41516c729ac73beedb63..3318ef6f36a99012cb523b6360f69cc64c8e4f14 100644 --- a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.h +++ b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.h @@ -778,4 +778,8 @@ int dsi_ctrl_wait_for_cmd_mode_mdp_idle(struct dsi_ctrl *dsi_ctrl); */ int dsi_ctrl_update_host_init_state(struct dsi_ctrl *dsi_ctrl, bool en); +/** + * dsi_ctrl_pixel_format_to_bpp() - returns number of bits per pxl + */ +int dsi_ctrl_pixel_format_to_bpp(enum dsi_pixel_format dst_format); #endif /* _DSI_CTRL_H_ */ diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw.h b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw.h index 625878d712edc3b6f85a238649bff3963a623e41..84d1302d7e45c5f92a5d4b6286676a19c5c213e3 100644 --- a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw.h +++ b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw.h @@ -463,8 +463,10 @@ struct dsi_ctrl_hw_ops { /** * debug_bus() - get dsi debug bus status. * @ctrl: Pointer to the controller host hardware. + * @entries: Array of dsi debug bus control values. + * @size: Size of dsi debug bus control array. */ - void (*debug_bus)(struct dsi_ctrl_hw *ctrl); + void (*debug_bus)(struct dsi_ctrl_hw *ctrl, u32 *entries, u32 size); /** * soft_reset() - perform a soft reset on DSI controller diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw_cmn.c b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw_cmn.c index 4dd7f3d77c023000a8bb0cd5fcb639a515a6d4b1..2823402202c3ac4b352d69ff706099b05fad5575 100644 --- a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw_cmn.c +++ b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw_cmn.c @@ -485,18 +485,20 @@ void dsi_ctrl_hw_cmn_video_engine_setup(struct dsi_ctrl_hw *ctrl, pr_debug("[DSI_%d] Video engine setup done\n", ctrl->index); } -void dsi_ctrl_hw_cmn_debug_bus(struct dsi_ctrl_hw *ctrl) +void dsi_ctrl_hw_cmn_debug_bus(struct dsi_ctrl_hw *ctrl, u32 *entries, u32 size) { - u32 reg = 0; - - DSI_W32(ctrl, DSI_DEBUG_BUS_CTL, 0x181); - - /* make sure that debug test point is enabled */ - wmb(); - reg = DSI_R32(ctrl, DSI_DEBUG_BUS_STATUS); + u32 reg = 0, i = 0; - pr_err("[DSI_%d] debug bus status:0x%x\n", ctrl->index, reg); + for (i = 0; i < size; i++) { + DSI_W32(ctrl, DSI_DEBUG_BUS_CTL, entries[i]); + /* make sure that debug test point is enabled */ + wmb(); + reg = DSI_R32(ctrl, DSI_DEBUG_BUS_STATUS); + pr_err("[DSI_%d] debug bus ctrl: 0x%x status:0x%x\n", + ctrl->index, entries[i], reg); + } } + /** * cmd_engine_setup() - setup dsi host controller for command mode * @ctrl: Pointer to the controller host hardware. diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_display.c b/drivers/gpu/drm/msm/dsi-staging/dsi_display.c index 2ef0470fef40114d5802f4b934ea003f2fb8a731..7a9b6ba3d6a0dd2f1cb9e0a3e7485b2943481243 100644 --- a/drivers/gpu/drm/msm/dsi-staging/dsi_display.c +++ b/drivers/gpu/drm/msm/dsi-staging/dsi_display.c @@ -761,6 +761,7 @@ int dsi_display_check_status(struct drm_connector *connector, void *display, rc = -EINVAL; goto release_panel_lock; } + SDE_EVT32(SDE_EVTLOG_FUNC_ENTRY); if (te_check_override && gpio_is_valid(dsi_display->disp_te_gpio)) status_mode = ESD_MODE_PANEL_TE; @@ -799,6 +800,7 @@ int dsi_display_check_status(struct drm_connector *connector, void *display, release_panel_lock: dsi_panel_release_panel_lock(panel); + SDE_EVT32(SDE_EVTLOG_FUNC_EXIT); return rc; } @@ -1923,7 +1925,8 @@ static void dsi_config_host_engine_state_for_cont_splash enum dsi_engine_state host_state = DSI_CTRL_ENGINE_ON; /* Sequence does not matter for split dsi usecases */ - for (i = 0; i < display->ctrl_count; i++) { + for (i = 0; (i < display->ctrl_count) && + (i < MAX_DSI_CTRLS_PER_DISPLAY); i++) { ctrl = &display->ctrl[i]; if (!ctrl->ctrl) continue; @@ -3619,6 +3622,7 @@ static int dsi_display_dfps_update(struct dsi_display *display, /* For split DSI, update the clock master first */ pr_debug("configuring seamless dynamic fps\n\n"); + SDE_EVT32(SDE_EVTLOG_FUNC_ENTRY); m_ctrl = &display->ctrl[display->clk_master_idx]; rc = dsi_ctrl_async_timing_update(m_ctrl->ctrl, timing); @@ -3653,6 +3657,7 @@ static int dsi_display_dfps_update(struct dsi_display *display, panel_mode->dsi_mode_flags = 0; error: + SDE_EVT32(SDE_EVTLOG_FUNC_EXIT); return rc; } @@ -4944,7 +4949,12 @@ static int dsi_display_ext_get_info(struct drm_connector *connector, info->h_tile_instance[i] = display->ctrl[i].ctrl->cell_index; info->is_connected = connector->status != connector_status_disconnected; - info->is_primary = true; + + if (!strcmp(display->display_type, "primary")) + info->is_primary = true; + else + info->is_primary = false; + info->capabilities |= (MSM_DISPLAY_CAP_VID_MODE | MSM_DISPLAY_CAP_EDID | MSM_DISPLAY_CAP_HOT_PLUG); @@ -5904,7 +5914,7 @@ static int dsi_display_cb_error_handler(void *data, { struct dsi_display *display = data; - if (!display) + if (!display || !(display->err_workq)) return -EINVAL; switch (event_idx) { @@ -5991,6 +6001,7 @@ int dsi_display_prepare(struct dsi_display *display) return -EINVAL; } + SDE_EVT32(SDE_EVTLOG_FUNC_ENTRY); mutex_lock(&display->display_lock); mode = display->panel->cur_mode; @@ -6127,6 +6138,7 @@ int dsi_display_prepare(struct dsi_display *display) (void)dsi_panel_post_unprepare(display->panel); error: mutex_unlock(&display->display_lock); + SDE_EVT32(SDE_EVTLOG_FUNC_EXIT); return rc; } @@ -6394,6 +6406,7 @@ int dsi_display_enable(struct dsi_display *display) pr_err("no valid mode set for the display"); return -EINVAL; } + SDE_EVT32(SDE_EVTLOG_FUNC_ENTRY); /* Engine states and panel states are populated during splash * resource init and hence we return early @@ -6479,6 +6492,7 @@ int dsi_display_enable(struct dsi_display *display) (void)dsi_panel_disable(display->panel); error: mutex_unlock(&display->display_lock); + SDE_EVT32(SDE_EVTLOG_FUNC_EXIT); return rc; } @@ -6541,6 +6555,7 @@ int dsi_display_disable(struct dsi_display *display) return -EINVAL; } + SDE_EVT32(SDE_EVTLOG_FUNC_ENTRY); mutex_lock(&display->display_lock); rc = dsi_display_wake_up(display); @@ -6569,6 +6584,7 @@ int dsi_display_disable(struct dsi_display *display) display->name, rc); mutex_unlock(&display->display_lock); + SDE_EVT32(SDE_EVTLOG_FUNC_EXIT); return rc; } @@ -6598,6 +6614,7 @@ int dsi_display_unprepare(struct dsi_display *display) return -EINVAL; } + SDE_EVT32(SDE_EVTLOG_FUNC_ENTRY); mutex_lock(&display->display_lock); rc = dsi_display_wake_up(display); @@ -6621,9 +6638,6 @@ int dsi_display_unprepare(struct dsi_display *display) pr_err("[%s] failed to disable Link clocks, rc=%d\n", display->name, rc); - /* Free up DSI ERROR event callback */ - dsi_display_unregister_error_handler(display); - rc = dsi_display_ctrl_deinit(display); if (rc) pr_err("[%s] failed to deinit controller, rc=%d\n", @@ -6645,12 +6659,16 @@ int dsi_display_unprepare(struct dsi_display *display) /* destrory dsi isr set up */ dsi_display_ctrl_isr_configure(display, false); + /* Free up DSI ERROR event callback */ + dsi_display_unregister_error_handler(display); + rc = dsi_panel_post_unprepare(display->panel); if (rc) pr_err("[%s] panel post-unprepare failed, rc=%d\n", display->name, rc); mutex_unlock(&display->display_lock); + SDE_EVT32(SDE_EVTLOG_FUNC_EXIT); return rc; } diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_drm.c b/drivers/gpu/drm/msm/dsi-staging/dsi_drm.c index d831defa0d2919f8e7a7f8f35d28d0b4b76ad6e9..2ad6d72179d6148b8f9bf1bef4224f8864849335 100644 --- a/drivers/gpu/drm/msm/dsi-staging/dsi_drm.c +++ b/drivers/gpu/drm/msm/dsi-staging/dsi_drm.c @@ -437,6 +437,8 @@ int dsi_conn_set_info_blob(struct drm_connector *connector, { struct dsi_display *dsi_display = display; struct dsi_panel *panel; + enum dsi_pixel_format fmt; + u32 bpp; if (!info || !dsi_display) return -EINVAL; @@ -557,6 +559,11 @@ int dsi_conn_set_info_blob(struct drm_connector *connector, mode_info->roi_caps.merge_rois); } + fmt = dsi_display->config.common_config.dst_format; + bpp = dsi_ctrl_pixel_format_to_bpp(fmt); + + sde_kms_info_add_keyint(info, "bit_depth", bpp); + end: return 0; } diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_panel.c b/drivers/gpu/drm/msm/dsi-staging/dsi_panel.c index cd7ade6f8ae977cd2b3174c27263e86957b94e34..c2eda63c60cb88b274a94256cb6f8c76d45d3d89 100644 --- a/drivers/gpu/drm/msm/dsi-staging/dsi_panel.c +++ b/drivers/gpu/drm/msm/dsi-staging/dsi_panel.c @@ -57,7 +57,7 @@ static u32 dsi_dsc_rc_buf_thresh[] = {0x0e, 0x1c, 0x2a, 0x38, 0x46, 0x54, * Rate control - Min QP values for each ratio type in dsi_dsc_ratio_type */ static char dsi_dsc_rc_range_min_qp_1_1[][15] = { - {0, 0, 1, 1, 3, 3, 3, 3, 3, 3, 5, 5, 5, 7, 13}, + {0, 0, 1, 1, 3, 3, 3, 3, 3, 3, 5, 5, 5, 7, 12}, {0, 4, 5, 5, 7, 7, 7, 7, 7, 7, 9, 9, 9, 11, 17}, {0, 4, 9, 9, 11, 11, 11, 11, 11, 11, 13, 13, 13, 15, 21}, }; @@ -78,7 +78,7 @@ static char dsi_dsc_rc_range_min_qp_1_1_scr1[][15] = { */ static char dsi_dsc_rc_range_max_qp_1_1[][15] = { {4, 4, 5, 6, 7, 7, 7, 8, 9, 10, 11, 12, 13, 13, 15}, - {8, 8, 9, 10, 11, 11, 11, 12, 13, 14, 15, 16, 17, 17, 19}, + {4, 8, 9, 10, 11, 11, 11, 12, 13, 14, 15, 16, 17, 17, 19}, {12, 12, 13, 14, 15, 15, 15, 16, 17, 18, 19, 20, 21, 21, 23}, }; @@ -89,7 +89,7 @@ static char dsi_dsc_rc_range_max_qp_1_1[][15] = { static char dsi_dsc_rc_range_max_qp_1_1_scr1[][15] = { {4, 4, 5, 6, 7, 7, 7, 8, 9, 10, 10, 11, 11, 12, 13}, {8, 8, 9, 10, 11, 11, 11, 12, 13, 14, 14, 15, 15, 16, 17}, - {12, 12, 13, 14, 15, 15, 15, 16, 17, 18, 18, 19, 19, 20, 21}, + {12, 12, 13, 14, 15, 15, 15, 16, 17, 18, 18, 19, 19, 20, 23}, }; /* @@ -1771,6 +1771,9 @@ static int dsi_panel_parse_misc_features(struct dsi_panel *panel) panel->sync_broadcast_en = utils->read_bool(utils->data, "qcom,cmd-sync-wait-broadcast"); + + panel->lp11_init = utils->read_bool(utils->data, + "qcom,mdss-dsi-lp11-init"); return 0; } @@ -2062,7 +2065,6 @@ int dsi_dsc_populate_static_param(struct msm_display_dsc_info *dsc) int hrd_delay; int pre_num_extra_mux_bits, num_extra_mux_bits; int slice_bits; - int target_bpp_x16; int data; int final_value, final_scale; int ratio_index, mod_offset; @@ -2104,7 +2106,7 @@ int dsi_dsc_populate_static_param(struct msm_display_dsc_info *dsc) } dsc->range_bpg_offset = dsi_dsc_rc_range_bpg_offset; - if (bpp == 8) + if (bpp <= 10) dsc->initial_offset = 6144; else dsc->initial_offset = 2048; /* bpp = 12 */ @@ -2114,22 +2116,21 @@ int dsi_dsc_populate_static_param(struct msm_display_dsc_info *dsc) else mux_words_size = 48; /* bpc == 8/10 */ + dsc->line_buf_depth = bpc + 1; + if (bpc == 8) { - dsc->line_buf_depth = 9; dsc->input_10_bits = 0; dsc->min_qp_flatness = 3; dsc->max_qp_flatness = 12; dsc->quant_incr_limit0 = 11; dsc->quant_incr_limit1 = 11; } else if (bpc == 10) { /* 10bpc */ - dsc->line_buf_depth = 11; dsc->input_10_bits = 1; dsc->min_qp_flatness = 7; dsc->max_qp_flatness = 16; dsc->quant_incr_limit0 = 15; dsc->quant_incr_limit1 = 15; } else { /* 12 bpc */ - dsc->line_buf_depth = 9; dsc->input_10_bits = 0; dsc->min_qp_flatness = 11; dsc->max_qp_flatness = 20; @@ -2152,7 +2153,7 @@ int dsi_dsc_populate_static_param(struct msm_display_dsc_info *dsc) break; } - dsc->det_thresh_flatness = 7 + 2*(bpc - 8); + dsc->det_thresh_flatness = 2 << (bpc - 8); dsc->initial_xmit_delay = dsc->rc_model_size / (2 * bpp); @@ -2191,14 +2192,7 @@ int dsi_dsc_populate_static_param(struct msm_display_dsc_info *dsc) + num_extra_mux_bits); dsc->slice_bpg_offset = DIV_ROUND_UP(data, groups_total); - /* bpp * 16 + 0.5 */ - data = bpp * 16; - data *= 2; - data++; - data /= 2; - target_bpp_x16 = data; - - data = (dsc->initial_xmit_delay * target_bpp_x16) / 16; + data = dsc->initial_xmit_delay * bpp; final_value = dsc->rc_model_size - data + num_extra_mux_bits; final_scale = 8 * dsc->rc_model_size / @@ -3755,14 +3749,19 @@ int dsi_panel_disable(struct dsi_panel *panel) if (!atomic_read(&panel->esd_recovery_pending)) { rc = dsi_panel_tx_cmd_set(panel, DSI_CMD_SET_OFF); if (rc) { - pr_err("[%s] failed to send DSI_CMD_SET_OFF cmds, rc=%d\n", + /* + * Sending panel off commands may fail when DSI + * controller is in a bad state. These failures can be + * ignored since controller will go for full reset on + * subsequent display enable anyway. + */ + pr_warn_ratelimited("[%s] failed to send DSI_CMD_SET_OFF cmds, rc=%d\n", panel->name, rc); - goto error; + rc = 0; } } panel->panel_initialized = false; -error: mutex_unlock(&panel->panel_lock); return rc; } @@ -3785,14 +3784,6 @@ int dsi_panel_unprepare(struct dsi_panel *panel) goto error; } - if (panel->lp11_init) { - rc = dsi_panel_power_off(panel); - if (rc) { - pr_err("[%s] panel power_Off failed, rc=%d\n", - panel->name, rc); - goto error; - } - } error: mutex_unlock(&panel->panel_lock); return rc; @@ -3809,13 +3800,11 @@ int dsi_panel_post_unprepare(struct dsi_panel *panel) mutex_lock(&panel->panel_lock); - if (!panel->lp11_init) { - rc = dsi_panel_power_off(panel); - if (rc) { - pr_err("[%s] panel power_Off failed, rc=%d\n", - panel->name, rc); - goto error; - } + rc = dsi_panel_power_off(panel); + if (rc) { + pr_err("[%s] panel power_Off failed, rc=%d\n", + panel->name, rc); + goto error; } error: mutex_unlock(&panel->panel_lock); diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_phy.c b/drivers/gpu/drm/msm/dsi-staging/dsi_phy.c index 899e4b91a9a89c104d00e8bad9b9742ec84766bc..b3e375e1940a724aed463b3820bbc03b4e4163ba 100644 --- a/drivers/gpu/drm/msm/dsi-staging/dsi_phy.c +++ b/drivers/gpu/drm/msm/dsi-staging/dsi_phy.c @@ -578,6 +578,8 @@ int dsi_phy_drv_init(struct msm_dsi_phy *dsi_phy) snprintf(dbg_name, DSI_DEBUG_NAME_LEN, "dsi%d_phy", dsi_phy->index); sde_dbg_reg_register_base(dbg_name, dsi_phy->hw.base, msm_iomap_size(dsi_phy->pdev, "dsi_phy")); + sde_dbg_reg_register_dump_range(dbg_name, dbg_name, 0, + msm_iomap_size(dsi_phy->pdev, "dsi_phy"), 0); return 0; } diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_phy_timing_v2_0.c b/drivers/gpu/drm/msm/dsi-staging/dsi_phy_timing_v2_0.c index 3da0fc3f0a268b74057ab7342244cccf415cdbc7..45f757703764e420a25f69ebb11294d3623be56c 100644 --- a/drivers/gpu/drm/msm/dsi-staging/dsi_phy_timing_v2_0.c +++ b/drivers/gpu/drm/msm/dsi-staging/dsi_phy_timing_v2_0.c @@ -113,7 +113,7 @@ void dsi_phy_hw_v2_0_update_timing_params( else timing->lane[i][4] = desc->hs_rqst.reg_value; - timing->lane[i][5] = 0x3; + timing->lane[i][5] = 0x2; timing->lane[i][6] = 0x4; timing->lane[i][7] = 0xA0; pr_debug("[%d][%d %d %d %d %d]\n", i, timing->lane[i][0], diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_phy_timing_v3_0.c b/drivers/gpu/drm/msm/dsi-staging/dsi_phy_timing_v3_0.c index 4392c60eb0f356d82f8a4628cb05bdf8f7036f01..c0e9d441542d8fa132e51c55472fe0c8d933c692 100644 --- a/drivers/gpu/drm/msm/dsi-staging/dsi_phy_timing_v3_0.c +++ b/drivers/gpu/drm/msm/dsi-staging/dsi_phy_timing_v3_0.c @@ -92,7 +92,7 @@ void dsi_phy_hw_v3_0_update_timing_params( timing->lane_v3[6] = desc->hs_prepare.reg_value; timing->lane_v3[7] = desc->hs_trail.reg_value; timing->lane_v3[8] = desc->hs_rqst.reg_value; - timing->lane_v3[9] = 0x03; + timing->lane_v3[9] = 0x02; timing->lane_v3[10] = 0x04; timing->lane_v3[11] = 0x00; diff --git a/drivers/gpu/drm/msm/msm_atomic.c b/drivers/gpu/drm/msm/msm_atomic.c index 60d55d7e66d43afc99298443548ecfa6d66735c0..066f3e46e1f982cf9f64523136b132b387278d62 100644 --- a/drivers/gpu/drm/msm/msm_atomic.c +++ b/drivers/gpu/drm/msm/msm_atomic.c @@ -50,6 +50,7 @@ int msm_drm_register_client(struct notifier_block *nb) return blocking_notifier_chain_register(&msm_drm_notifier_list, nb); } +EXPORT_SYMBOL(msm_drm_register_client); /** * msm_drm_unregister_client - unregister a client notifier @@ -63,6 +64,7 @@ int msm_drm_unregister_client(struct notifier_block *nb) return blocking_notifier_chain_unregister(&msm_drm_notifier_list, nb); } +EXPORT_SYMBOL(msm_drm_unregister_client); /** * msm_drm_notifier_call_chain - notify clients of drm_events @@ -239,11 +241,14 @@ msm_disable_outputs(struct drm_device *dev, struct drm_atomic_state *old_state) DRM_DEBUG_ATOMIC("disabling [ENCODER:%d:%s]\n", encoder->base.id, encoder->name); - blank = MSM_DRM_BLANK_POWERDOWN; - notifier_data.data = ␣ - notifier_data.id = crtc_idx; - msm_drm_notifier_call_chain(MSM_DRM_EARLY_EVENT_BLANK, - ¬ifier_data); + if (connector->state->crtc && + connector->state->crtc->state->active_changed) { + blank = MSM_DRM_BLANK_POWERDOWN; + notifier_data.data = ␣ + notifier_data.id = crtc_idx; + msm_drm_notifier_call_chain(MSM_DRM_EARLY_EVENT_BLANK, + ¬ifier_data); + } /* * Each encoder has at most one connector (since we always steal * it away), so we won't call disable hooks twice. @@ -259,8 +264,12 @@ msm_disable_outputs(struct drm_device *dev, struct drm_atomic_state *old_state) funcs->dpms(encoder, DRM_MODE_DPMS_OFF); drm_bridge_post_disable(encoder->bridge); - msm_drm_notifier_call_chain(MSM_DRM_EVENT_BLANK, - ¬ifier_data); + if (connector->state->crtc && + connector->state->crtc->state->active_changed) { + DRM_DEBUG_ATOMIC("Notify blank\n"); + msm_drm_notifier_call_chain(MSM_DRM_EVENT_BLANK, + ¬ifier_data); + } } for_each_crtc_in_state(old_state, crtc, old_crtc_state, i) { @@ -406,6 +415,7 @@ static void msm_atomic_helper_commit_modeset_enables(struct drm_device *dev, struct msm_kms *kms = priv->kms; int bridge_enable_count = 0; int i, blank; + bool splash = false; SDE_ATRACE_BEGIN("msm_enable"); for_each_oldnew_crtc_in_state(old_state, crtc, old_crtc_state, @@ -465,7 +475,11 @@ static void msm_atomic_helper_commit_modeset_enables(struct drm_device *dev, DRM_DEBUG_ATOMIC("enabling [ENCODER:%d:%s]\n", encoder->base.id, encoder->name); - if (connector->state->crtc->state->active_changed) { + if (kms && kms->funcs && kms->funcs->check_for_splash) + splash = kms->funcs->check_for_splash(kms); + + if (splash || (connector->state->crtc && + connector->state->crtc->state->active_changed)) { blank = MSM_DRM_BLANK_UNBLANK; notifier_data.data = ␣ notifier_data.id = @@ -522,7 +536,9 @@ static void msm_atomic_helper_commit_modeset_enables(struct drm_device *dev, encoder->base.id, encoder->name); drm_bridge_enable(encoder->bridge); - if (connector->state->crtc->state->active_changed) { + + if (splash || (connector->state->crtc && + connector->state->crtc->state->active_changed)) { DRM_DEBUG_ATOMIC("Notify unblank\n"); msm_drm_notifier_call_chain(MSM_DRM_EVENT_BLANK, ¬ifier_data); diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c index 9839cbe6368c6ca806bde5fab23833b1f021fae7..2c02d0985185bf532fae22a1330e1917a9ad84b6 100644 --- a/drivers/gpu/drm/msm/msm_gem.c +++ b/drivers/gpu/drm/msm/msm_gem.c @@ -78,6 +78,9 @@ static struct page **get_pages(struct drm_gem_object *obj) { struct msm_gem_object *msm_obj = to_msm_bo(obj); + if (obj->import_attach) + return msm_obj->pages; + if (!msm_obj->pages) { struct drm_device *dev = obj->dev; struct page **p; @@ -644,8 +647,14 @@ void *msm_gem_get_vaddr(struct drm_gem_object *obj) ret = PTR_ERR(pages); goto fail; } - msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT, + + if (obj->import_attach) + msm_obj->vaddr = + dma_buf_vmap(obj->import_attach->dmabuf); + else + msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT, VM_MAP, pgprot_writecombine(PAGE_KERNEL)); + if (msm_obj->vaddr == NULL) { ret = -ENOMEM; goto fail; @@ -742,7 +751,11 @@ static void msm_gem_vunmap_locked(struct drm_gem_object *obj) if (!msm_obj->vaddr || WARN_ON(!is_vunmapable(msm_obj))) return; - vunmap(msm_obj->vaddr); + if (obj->import_attach) + dma_buf_vunmap(obj->import_attach->dmabuf, msm_obj->vaddr); + else + vunmap(msm_obj->vaddr); + msm_obj->vaddr = NULL; } @@ -1134,8 +1147,6 @@ int msm_gem_delayed_import(struct drm_gem_object *obj) struct dma_buf_attachment *attach; struct sg_table *sgt; struct msm_gem_object *msm_obj; - uint32_t size; - int npages; int ret = 0; if (!obj) { @@ -1171,18 +1182,8 @@ int msm_gem_delayed_import(struct drm_gem_object *obj) ret); goto fail_import; } - - size = PAGE_ALIGN(attach->dmabuf->size); - npages = size >> PAGE_SHIFT; - msm_obj->sgt = sgt; - ret = drm_prime_sg_to_page_addr_arrays(sgt, msm_obj->pages, - NULL, npages); - if (ret) { - DRM_ERROR("fail drm_prime_sg_to_page_addr_arrays, err=%d\n", - ret); - goto fail_import; - } + msm_obj->pages = NULL; fail_import: return ret; @@ -1194,7 +1195,7 @@ struct drm_gem_object *msm_gem_import(struct drm_device *dev, struct msm_gem_object *msm_obj; struct drm_gem_object *obj = NULL; uint32_t size; - int ret, npages; + int ret; unsigned long flags = 0; /* if we don't have IOMMU, don't bother pretending we can import: */ @@ -1212,32 +1213,16 @@ struct drm_gem_object *msm_gem_import(struct drm_device *dev, drm_gem_private_object_init(dev, obj, size); - npages = size >> PAGE_SHIFT; - msm_obj = to_msm_bo(obj); mutex_lock(&msm_obj->lock); msm_obj->sgt = sgt; - msm_obj->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL); - if (!msm_obj->pages) { - mutex_unlock(&msm_obj->lock); - ret = -ENOMEM; - goto fail; - } - + msm_obj->pages = NULL; /* * If sg table is NULL, user should call msm_gem_delayed_import to add * back the sg table to the drm gem object */ - if (sgt) { - ret = drm_prime_sg_to_page_addr_arrays(sgt, msm_obj->pages, - NULL, npages); - if (ret) { - mutex_unlock(&msm_obj->lock); - goto fail; - } - } else { + if (!sgt) msm_obj->flags |= MSM_BO_EXTBUF; - } /* * For all uncached buffers, there is no need to perform cache diff --git a/drivers/gpu/drm/msm/sde/sde_crtc.c b/drivers/gpu/drm/msm/sde/sde_crtc.c index aed38457faf8cf4d9aa735c057adac134ddf7de0..cce3de136cef3dc62f6c8a4491b63c7406a28bfd 100644 --- a/drivers/gpu/drm/msm/sde/sde_crtc.c +++ b/drivers/gpu/drm/msm/sde/sde_crtc.c @@ -87,8 +87,6 @@ static struct sde_crtc_custom_events custom_events[] = { * Default value is set to 1 sec. */ #define CRTC_TIME_PERIOD_CALC_FPS_US 1000000 -#define MAX_PERIODICITY 5000000 -#define MAX_FRAME_COUNT 300 static inline struct sde_kms *_sde_crtc_get_kms(struct drm_crtc *crtc) { @@ -165,16 +163,6 @@ static void sde_crtc_calc_fps(struct sde_crtc *sde_crtc) sde_crtc->fps_info.last_sampled_time_us = current_time_us; sde_crtc->fps_info.frame_count = 0; } - - /** - * Array indexing is based on sliding window algorithm. - * sde_crtc->time_buf has a maximum capacity of MAX_FRAME_COUNT - * time slots. As the count increases to MAX_FRAME_COUNT + 1, the - * counter loops around and comes back to the first index to store - * the next ktime. - */ - sde_crtc->time_buf[sde_crtc->next_time_index++] = ktime_get(); - sde_crtc->next_time_index %= MAX_FRAME_COUNT; } /** @@ -697,131 +685,6 @@ static int _sde_debugfs_fps_status(struct inode *inode, struct file *file) inode->i_private); } -static ssize_t set_fps_periodicity(struct device *device, - struct device_attribute *attr, const char *buf, size_t count) -{ - struct drm_crtc *crtc; - struct sde_crtc *sde_crtc; - int res; - - if (!device || !buf) { - SDE_ERROR("invalid input param(s)\n"); - return -EAGAIN; - } - - crtc = dev_get_drvdata(device); - if (!crtc) - return -EINVAL; - - sde_crtc = to_sde_crtc(crtc); - - res = kstrtou32(buf, 10, &sde_crtc->fps_info.fps_periodicity); - if (res < 0) - return res; - - if (sde_crtc->fps_info.fps_periodicity < 0 || - (sde_crtc->fps_info.fps_periodicity)*1000 > - MAX_PERIODICITY) - sde_crtc->fps_info.fps_periodicity = - CRTC_TIME_PERIOD_CALC_FPS_US; - else - sde_crtc->fps_info.fps_periodicity *= 1000; - - return count; -} - -static ssize_t fps_periodicity_show(struct device *device, - struct device_attribute *attr, char *buf) -{ - struct drm_crtc *crtc; - struct sde_crtc *sde_crtc; - - if (!device || !buf) { - SDE_ERROR("invalid input param(s)\n"); - return -EAGAIN; - } - - crtc = dev_get_drvdata(device); - if (!crtc) - return -EINVAL; - - sde_crtc = to_sde_crtc(crtc); - - return scnprintf(buf, PAGE_SIZE, "%d\n", - (sde_crtc->fps_info.fps_periodicity)/1000); -} - -static ssize_t measured_fps_show(struct device *device, - struct device_attribute *attr, char *buf) -{ - struct drm_crtc *crtc; - struct sde_crtc *sde_crtc; - unsigned int fps_int, fps_decimal; - u64 fps = 0, frame_count = 1; - ktime_t current_time; - int i = 0, time_index; - u64 diff_us; - - if (!device || !buf) { - SDE_ERROR("invalid input param(s)\n"); - return -EAGAIN; - } - - crtc = dev_get_drvdata(device); - if (!crtc) - return -EINVAL; - - sde_crtc = to_sde_crtc(crtc); - - /** - * Whenever the time_index counter comes to zero upon decrementing, - * it is set to the last index since it is the next index that we - * should check for calculating the buftime. - */ - time_index = (sde_crtc->next_time_index - 1) < 0 ? - MAX_FRAME_COUNT - 1 : (sde_crtc->next_time_index - 1); - - current_time = ktime_get(); - - if (sde_crtc->fps_info.fps_periodicity <= MAX_PERIODICITY) { - for (; i < MAX_FRAME_COUNT; i++) { - u64 ptime = (u64)ktime_to_us(current_time); - u64 buftime = (u64) - ktime_to_us(sde_crtc->time_buf[time_index]); - if (ptime > buftime) { - diff_us = (u64)ktime_us_delta(current_time, - sde_crtc->time_buf[time_index]); - if (diff_us >= (u64) - sde_crtc->fps_info.fps_periodicity) { - fps = (frame_count) * 1000000 * 10; - do_div(fps, diff_us); - sde_crtc->fps_info.measured_fps = - (unsigned int)fps; - break; - } - } - - time_index = (time_index - 1) < 0 ? - (MAX_FRAME_COUNT - 1) : (time_index - 1); - frame_count++; - } - } - - if (i == MAX_FRAME_COUNT) { - diff_us = (u64)ktime_us_delta(current_time, - sde_crtc->time_buf[time_index]); - if (diff_us >= sde_crtc->fps_info.fps_periodicity) { - fps = (frame_count) * 1000000 * 10; - do_div(fps, diff_us); - sde_crtc->fps_info.measured_fps = (unsigned int)fps; - } - } - - fps_int = (unsigned int) sde_crtc->fps_info.measured_fps; - fps_decimal = do_div(fps_int, 10); - return scnprintf(buf, PAGE_SIZE, "%d.%d\n", fps_int, fps_decimal); -} - static ssize_t vsync_event_show(struct device *device, struct device_attribute *attr, char *buf) { @@ -840,13 +703,8 @@ static ssize_t vsync_event_show(struct device *device, } static DEVICE_ATTR_RO(vsync_event); -static DEVICE_ATTR(measured_fps, 0444, measured_fps_show, NULL); -static DEVICE_ATTR(fps_periodicity_ms, 0644, fps_periodicity_show, - set_fps_periodicity); static struct attribute *sde_crtc_dev_attrs[] = { &dev_attr_vsync_event.attr, - &dev_attr_measured_fps.attr, - &dev_attr_fps_periodicity_ms.attr, NULL }; @@ -945,7 +803,6 @@ static void _sde_crtc_setup_blend_cfg(struct sde_crtc_mixer *mixer, if (fg_alpha != 0xff) { bg_alpha = fg_alpha; blend_op |= SDE_BLEND_FG_MOD_ALPHA | - SDE_BLEND_FG_INV_MOD_ALPHA | SDE_BLEND_BG_MOD_ALPHA | SDE_BLEND_BG_INV_MOD_ALPHA; } else { @@ -3267,6 +3124,7 @@ static void _sde_crtc_setup_mixers(struct drm_crtc *crtc) } mutex_unlock(&sde_crtc->crtc_lock); + _sde_crtc_check_dest_scaler_data(crtc, crtc->state); } static void _sde_crtc_setup_is_ppsplit(struct drm_crtc_state *state) @@ -4379,6 +4237,7 @@ static void sde_crtc_disable(struct drm_crtc *crtc) struct sde_crtc_irq_info *node = NULL; struct drm_event event; u32 power_on; + bool in_cont_splash = false; int ret, i; if (!crtc || !crtc->dev || !crtc->dev->dev_private || !crtc->state) { @@ -4461,8 +4320,18 @@ static void sde_crtc_disable(struct drm_crtc *crtc) } spin_unlock_irqrestore(&sde_crtc->spin_lock, flags); + drm_for_each_encoder(encoder, crtc->dev) { + if (encoder->crtc != crtc) + continue; + + if (sde_encoder_in_cont_splash(encoder)) { + in_cont_splash = true; + break; + } + } + /* avoid clk/bw downvote if cont-splash is enabled */ - if (!sde_kms->splash_data.cont_splash_en) + if (!in_cont_splash) sde_core_perf_crtc_update(crtc, 0, true); drm_for_each_encoder(encoder, crtc->dev) { @@ -6400,14 +6269,6 @@ struct drm_crtc *sde_crtc_init(struct drm_device *dev, struct drm_plane *plane) sde_crtc->enabled = false; - /* Below parameters are for fps calculation for sysfs node */ - sde_crtc->fps_info.fps_periodicity = CRTC_TIME_PERIOD_CALC_FPS_US; - sde_crtc->fps_info.frame_count = 0; - sde_crtc->time_buf = kmalloc_array(MAX_FRAME_COUNT, - sizeof(sde_crtc->time_buf), GFP_KERNEL); - memset(sde_crtc->time_buf, 0, sizeof(*(sde_crtc->time_buf))); - sde_crtc->next_time_index = 0; - INIT_LIST_HEAD(&sde_crtc->frame_event_list); INIT_LIST_HEAD(&sde_crtc->user_event_list); for (i = 0; i < ARRAY_SIZE(sde_crtc->frame_events); i++) { @@ -6509,6 +6370,7 @@ static int _sde_crtc_event_enable(struct sde_kms *kms, unsigned long flags; bool found = false; int ret, i = 0; + bool add_event = false; crtc = to_sde_crtc(crtc_drm); spin_lock_irqsave(&crtc->spin_lock, flags); @@ -6558,11 +6420,24 @@ static int _sde_crtc_event_enable(struct sde_kms *kms, } INIT_LIST_HEAD(&node->irq.list); + + mutex_lock(&crtc->crtc_lock); ret = node->func(crtc_drm, true, &node->irq); + if (!ret) { + spin_lock_irqsave(&crtc->spin_lock, flags); + list_add_tail(&node->list, &crtc->user_event_list); + add_event = true; + spin_unlock_irqrestore(&crtc->spin_lock, flags); + } + mutex_unlock(&crtc->crtc_lock); + sde_power_resource_enable(&priv->phandle, kms->core_client, false); } + if (add_event) + return 0; + if (!ret) { spin_lock_irqsave(&crtc->spin_lock, flags); list_add_tail(&node->list, &crtc->user_event_list); diff --git a/drivers/gpu/drm/msm/sde/sde_crtc.h b/drivers/gpu/drm/msm/sde/sde_crtc.h index 9c1fa88cacfbe3acd02aef6e8c1112edab3b782b..2d897169c358a063f1d5aef0bd1245d7304a6b34 100644 --- a/drivers/gpu/drm/msm/sde/sde_crtc.h +++ b/drivers/gpu/drm/msm/sde/sde_crtc.h @@ -148,7 +148,6 @@ struct sde_crtc_fps_info { u32 frame_count; ktime_t last_sampled_time_us; u32 measured_fps; - u32 fps_periodicity; }; /* @@ -242,7 +241,6 @@ struct sde_crtc { u64 play_count; ktime_t vblank_cb_time; ktime_t vblank_last_cb_time; - ktime_t *time_buf; struct sde_crtc_fps_info fps_info; struct device *sysfs_dev; struct kernfs_node *vsync_event_sf; @@ -289,7 +287,6 @@ struct sde_crtc { struct list_head rp_head; u32 plane_mask_old; - u32 next_time_index; /* blob for histogram data */ struct drm_property_blob *hist_blob; diff --git a/drivers/gpu/drm/msm/sde/sde_encoder.c b/drivers/gpu/drm/msm/sde/sde_encoder.c index cd02797ad4a67d9042674b472ee096e742e0320b..d345d55999c971c589f9119f5730fcaea5d298f5 100644 --- a/drivers/gpu/drm/msm/sde/sde_encoder.c +++ b/drivers/gpu/drm/msm/sde/sde_encoder.c @@ -308,14 +308,17 @@ static void _sde_encoder_pm_qos_add_request(struct drm_encoder *drm_enc) if (!cpu_mask) return; - req = &sde_kms->pm_qos_cpu_req; - req->type = PM_QOS_REQ_AFFINE_CORES; - cpumask_empty(&req->cpus_affine); - for_each_possible_cpu(cpu) { - if ((1 << cpu) & cpu_mask) - cpumask_set_cpu(cpu, &req->cpus_affine); + if (atomic_inc_return(&sde_kms->pm_qos_counts) == 1) { + req = &sde_kms->pm_qos_cpu_req; + req->type = PM_QOS_REQ_AFFINE_CORES; + cpumask_empty(&req->cpus_affine); + for_each_possible_cpu(cpu) { + if ((1 << cpu) & cpu_mask) + cpumask_set_cpu(cpu, &req->cpus_affine); + } + pm_qos_add_request(req, PM_QOS_CPU_DMA_LATENCY, + cpu_dma_latency); } - pm_qos_add_request(req, PM_QOS_CPU_DMA_LATENCY, cpu_dma_latency); SDE_EVT32_VERBOSE(DRMID(drm_enc), cpu_mask, cpu_dma_latency); } @@ -340,7 +343,9 @@ static void _sde_encoder_pm_qos_remove_request(struct drm_encoder *drm_enc) if (!sde_kms || !sde_kms->catalog || !sde_kms->catalog->perf.cpu_mask) return; - pm_qos_remove_request(&sde_kms->pm_qos_cpu_req); + atomic_add_unless(&sde_kms->pm_qos_counts, -1, 0); + if (atomic_read(&sde_kms->pm_qos_counts) == 0) + pm_qos_remove_request(&sde_kms->pm_qos_cpu_req); } static struct drm_connector_state *_sde_encoder_get_conn_state( @@ -440,6 +445,14 @@ int sde_encoder_in_clone_mode(struct drm_encoder *drm_enc) sde_enc->cur_master->in_clone_mode; } +int sde_encoder_in_cont_splash(struct drm_encoder *drm_enc) +{ + struct sde_encoder_virt *sde_enc = to_sde_encoder_virt(drm_enc); + + return sde_enc && sde_enc->cur_master && + sde_enc->cur_master->cont_splash_enabled; +} + static inline int _sde_encoder_power_enable(struct sde_encoder_virt *sde_enc, bool enable) { @@ -2892,7 +2905,7 @@ static void _sde_encoder_virt_enable_helper(struct drm_encoder *drm_enc) if (sde_enc->cur_master->hw_ctl && sde_enc->cur_master->hw_ctl->ops.setup_intf_cfg_v1 && - !sde_kms->splash_data.cont_splash_en) + !sde_enc->cur_master->cont_splash_enabled) sde_enc->cur_master->hw_ctl->ops.setup_intf_cfg_v1( sde_enc->cur_master->hw_ctl, &sde_enc->cur_master->intf_cfg_v1); @@ -3146,7 +3159,7 @@ static void sde_encoder_virt_disable(struct drm_encoder *drm_enc) for (i = 0; i < sde_enc->num_phys_encs; i++) { if (sde_enc->phys_encs[i]) { - sde_enc->phys_encs[i]->cont_splash_settings = false; + sde_enc->phys_encs[i]->cont_splash_enabled = false; sde_enc->phys_encs[i]->cont_splash_single_flush = 0; sde_enc->phys_encs[i]->connector = NULL; } @@ -3289,6 +3302,12 @@ static void sde_encoder_frame_done_callback( struct sde_encoder_virt *sde_enc = to_sde_encoder_virt(drm_enc); unsigned int i; + if (!drm_enc || !sde_enc->cur_master) { + SDE_ERROR("invalid param: drm_enc %x, cur_master %x\n", + drm_enc, drm_enc ? sde_enc->cur_master : 0); + return; + } + sde_enc->crtc_frame_event_cb_data.connector = sde_enc->cur_master->connector; @@ -3622,17 +3641,20 @@ static void _sde_encoder_kickoff_phys(struct sde_encoder_virt *sde_enc) u32 pending_kickoff_cnt; struct msm_drm_private *priv = NULL; struct sde_kms *sde_kms = NULL; + bool is_vid_mode = false; if (!sde_enc) { SDE_ERROR("invalid encoder\n"); return; } + is_vid_mode = sde_enc->disp_info.capabilities & + MSM_DISPLAY_CAP_VID_MODE; + /* don't perform flush/start operations for slave encoders */ for (i = 0; i < sde_enc->num_phys_encs; i++) { struct sde_encoder_phys *phys = sde_enc->phys_encs[i]; enum sde_rm_topology_name topology = SDE_RM_TOPOLOGY_NONE; - bool wait_for_dma = false; if (!phys || phys->enable_state == SDE_ENC_DISABLED) continue; @@ -3641,18 +3663,9 @@ static void _sde_encoder_kickoff_phys(struct sde_encoder_virt *sde_enc) if (!ctl) continue; - if (phys->ops.wait_dma_trigger) - wait_for_dma = phys->ops.wait_dma_trigger( - phys); - - if (phys->hw_ctl->ops.reg_dma_flush) - phys->hw_ctl->ops.reg_dma_flush(phys->hw_ctl, - wait_for_dma); - if (phys->connector) topology = sde_connector_get_topology_name( phys->connector); - /* * don't wait on ppsplit slaves or skipped encoders because * they dont receive irqs @@ -3663,18 +3676,22 @@ static void _sde_encoder_kickoff_phys(struct sde_encoder_virt *sde_enc) set_bit(i, sde_enc->frame_busy_mask); if (!phys->ops.needs_single_flush || - !phys->ops.needs_single_flush(phys)) + !phys->ops.needs_single_flush(phys)) { + if (ctl->ops.reg_dma_flush) + ctl->ops.reg_dma_flush(ctl, is_vid_mode); _sde_encoder_trigger_flush(&sde_enc->base, phys, 0x0); - else if (ctl->ops.get_pending_flush) + } else if (ctl->ops.get_pending_flush) { ctl->ops.get_pending_flush(ctl, &pending_flush); + } } /* for split flush, combine pending flush masks and send to master */ if (pending_flush.pending_flush_mask && sde_enc->cur_master) { - _sde_encoder_trigger_flush( - &sde_enc->base, - sde_enc->cur_master, - &pending_flush); + ctl = sde_enc->cur_master->hw_ctl; + if (ctl->ops.reg_dma_flush) + ctl->ops.reg_dma_flush(ctl, is_vid_mode); + _sde_encoder_trigger_flush(&sde_enc->base, sde_enc->cur_master, + &pending_flush); } /* update pending_kickoff_cnt AFTER flush but before trigger start */ @@ -4350,8 +4367,8 @@ int sde_encoder_prepare_for_kickoff(struct drm_encoder *drm_enc, } } - if (_sde_encoder_is_dsc_enabled(drm_enc) && - !sde_kms->splash_data.cont_splash_en) { + if (_sde_encoder_is_dsc_enabled(drm_enc) && sde_enc->cur_master && + !sde_enc->cur_master->cont_splash_enabled) { rc = _sde_encoder_dsc_setup(sde_enc, params); if (rc) { SDE_ERROR_ENC(sde_enc, "failed to setup DSC: %d\n", rc); @@ -5160,10 +5177,13 @@ enum sde_intf_mode sde_encoder_get_intf_mode(struct drm_encoder *encoder) /** * sde_encoder_update_caps_for_cont_splash - update encoder settings during * device bootup when cont_splash is enabled - * @drm_enc: Pointer to drm encoder structure + * @drm_enc: Pointer to drm encoder structure + * @splash_display: Pointer to sde_splash_display corresponding to this encoder + * @enable: boolean indicates enable or displae state of splash * @Return: true if successful in updating the encoder structure */ -int sde_encoder_update_caps_for_cont_splash(struct drm_encoder *encoder) +int sde_encoder_update_caps_for_cont_splash(struct drm_encoder *encoder, + struct sde_splash_display *splash_display, bool enable) { struct sde_encoder_virt *sde_enc; struct msm_drm_private *priv; @@ -5201,6 +5221,21 @@ int sde_encoder_update_caps_for_cont_splash(struct drm_encoder *encoder) SDE_DEBUG_ENC(sde_enc, "num of connectors: %d\n", priv->num_connectors); + SDE_DEBUG_ENC(sde_enc, "enable: %d\n", enable); + if (!enable) { + for (i = 0; i < sde_enc->num_phys_encs; i++) { + phys_enc = sde_enc->phys_encs[i]; + if (phys_enc) + phys_enc->cont_splash_enabled = false; + } + return ret; + } + + if (!splash_display) { + SDE_ERROR_ENC(sde_enc, "invalid splash data\n"); + return -EINVAL; + } + for (i = 0; i < priv->num_connectors; i++) { SDE_DEBUG_ENC(sde_enc, "connector id: %d\n", priv->connectors[i]->base.id); @@ -5341,9 +5376,9 @@ int sde_encoder_update_caps_for_cont_splash(struct drm_encoder *encoder) /* update connector for master and slave phys encoders */ phys->connector = conn; + phys->cont_splash_enabled = true; phys->cont_splash_single_flush = - sde_kms->splash_data.single_flush_en; - phys->cont_splash_settings = true; + splash_display->single_flush_en; phys->hw_pp = sde_enc->hw_pp[i]; if (phys->ops.cont_splash_mode_set) diff --git a/drivers/gpu/drm/msm/sde/sde_encoder.h b/drivers/gpu/drm/msm/sde/sde_encoder.h index 6a610a582fba16d9804f8419566e8b3fe3118e80..c6829bc99f012ab71e05c7e7a01d244b656652b2 100644 --- a/drivers/gpu/drm/msm/sde/sde_encoder.h +++ b/drivers/gpu/drm/msm/sde/sde_encoder.h @@ -236,9 +236,12 @@ void sde_encoder_prepare_commit(struct drm_encoder *drm_enc); * sde_encoder_update_caps_for_cont_splash - update encoder settings during * device bootup when cont_splash is enabled * @drm_enc: Pointer to drm encoder structure + * @splash_display: Pointer to sde_splash_display corresponding to this encoder + * @enable: boolean indicates enable or displae state of splash * @Return: true if successful in updating the encoder structure */ -int sde_encoder_update_caps_for_cont_splash(struct drm_encoder *encoder); +int sde_encoder_update_caps_for_cont_splash(struct drm_encoder *encoder, + struct sde_splash_display *splash_display, bool enable); /** * sde_encoder_display_failure_notification - update sde encoder state for @@ -285,4 +288,11 @@ int sde_encoder_in_clone_mode(struct drm_encoder *enc); */ void sde_encoder_control_idle_pc(struct drm_encoder *enc, bool enable); +/** + * sde_encoder_in_cont_splash - checks if display is in continuous splash + * @drm_enc: Pointer to drm encoder structure + * @Return: true if display in continuous splash + */ +int sde_encoder_in_cont_splash(struct drm_encoder *enc); + #endif /* __SDE_ENCODER_H__ */ diff --git a/drivers/gpu/drm/msm/sde/sde_encoder_phys.h b/drivers/gpu/drm/msm/sde/sde_encoder_phys.h index dc5fdd1b1d30a52a6f515fe9151f88b4ded10355..0881b099d523932f679dcd1e9a2a3d1187559728 100644 --- a/drivers/gpu/drm/msm/sde/sde_encoder_phys.h +++ b/drivers/gpu/drm/msm/sde/sde_encoder_phys.h @@ -285,7 +285,7 @@ struct sde_encoder_irq { * @irq: IRQ tracking structures * @has_intf_te: Interface TE configuration support * @cont_splash_single_flush Variable to check if single flush is enabled. - * @cont_splash_settings Variable to store continuous splash settings. + * @cont_splash_enabled: Variable to store continuous splash settings. * @in_clone_mode Indicates if encoder is in clone mode ref@CWB * @vfp_cached: cached vertical front porch to be used for * programming ROT and MDP fetch start @@ -324,7 +324,7 @@ struct sde_encoder_phys { struct sde_encoder_irq irq[INTR_IDX_MAX]; bool has_intf_te; u32 cont_splash_single_flush; - bool cont_splash_settings; + bool cont_splash_enabled; bool in_clone_mode; int vfp_cached; }; @@ -690,7 +690,7 @@ static inline bool sde_encoder_phys_needs_single_flush( if (!phys_enc) return false; - return phys_enc->cont_splash_settings ? + return phys_enc->cont_splash_enabled ? phys_enc->cont_splash_single_flush : (_sde_encoder_phys_is_ppsplit(phys_enc) || _sde_encoder_phys_is_dual_ctl(phys_enc)); diff --git a/drivers/gpu/drm/msm/sde/sde_encoder_phys_cmd.c b/drivers/gpu/drm/msm/sde/sde_encoder_phys_cmd.c index fe5a2f8aae97106bda0cb5db7c985f713e57d79d..6afe9be6d5dbe49bc817bb3ba7ebdf23e181d701 100644 --- a/drivers/gpu/drm/msm/sde/sde_encoder_phys_cmd.c +++ b/drivers/gpu/drm/msm/sde/sde_encoder_phys_cmd.c @@ -50,6 +50,8 @@ */ #define SDE_ENC_CTL_START_THRESHOLD_US 500 +#define SDE_ENC_MAX_POLL_TIMEOUT_US 2000 + static inline int _sde_encoder_phys_cmd_get_idle_timeout( struct sde_encoder_phys_cmd *cmd_enc) { @@ -420,6 +422,10 @@ static void sde_encoder_phys_cmd_cont_splash_mode_set( struct sde_encoder_phys *phys_enc, struct drm_display_mode *adj_mode) { + struct sde_hw_intf *hw_intf; + struct sde_hw_pingpong *hw_pp; + struct sde_encoder_phys_cmd *cmd_enc; + if (!phys_enc || !adj_mode) { SDE_ERROR("invalid args\n"); return; @@ -435,6 +441,21 @@ static void sde_encoder_phys_cmd_cont_splash_mode_set( return; } + if (sde_encoder_phys_cmd_is_master(phys_enc)) { + cmd_enc = to_sde_encoder_phys_cmd(phys_enc); + hw_pp = phys_enc->hw_pp; + hw_intf = phys_enc->hw_intf; + + if (phys_enc->has_intf_te && hw_intf && + hw_intf->ops.get_autorefresh) { + hw_intf->ops.get_autorefresh(hw_intf, + &cmd_enc->autorefresh.cfg); + } else if (hw_pp && hw_pp->ops.get_autorefresh) { + hw_pp->ops.get_autorefresh(hw_pp, + &cmd_enc->autorefresh.cfg); + } + } + _sde_encoder_phys_cmd_setup_irq_hw_idx(phys_enc); } @@ -1110,7 +1131,7 @@ static void sde_encoder_phys_cmd_enable(struct sde_encoder_phys *phys_enc) SDE_DEBUG_CMDENC(cmd_enc, "pp %d\n", phys_enc->hw_pp->idx - PINGPONG_0); if (phys_enc->enable_state == SDE_ENC_ENABLED) { - if (!phys_enc->sde_kms->splash_data.cont_splash_en) + if (!phys_enc->cont_splash_enabled) SDE_ERROR("already enabled\n"); return; } @@ -1506,7 +1527,7 @@ static void sde_encoder_phys_cmd_prepare_commit( { struct sde_encoder_phys_cmd *cmd_enc = to_sde_encoder_phys_cmd(phys_enc); - unsigned long lock_flags; + int trial = 0; if (!phys_enc) return; @@ -1520,35 +1541,31 @@ static void sde_encoder_phys_cmd_prepare_commit( if (!sde_encoder_phys_cmd_is_autorefresh_enabled(phys_enc)) return; - /** - * Autorefresh must be disabled carefully: - * - Autorefresh must be disabled between pp_done and te - * signal prior to sdm845 targets. All targets after sdm845 - * supports autorefresh disable without turning off the - * hardware TE and pp_done wait. - * - * - Wait for TX to Complete - * Wait for PPDone confirms the last frame transfer is complete. - * - * - Leave Autorefresh Disabled - * - Assume disable of Autorefresh since it is now safe - * - Can now safely Disable Encoder, do debug printing, etc. - * without worrying that Autorefresh will kickoff + /* + * If autorefresh is enabled, disable it and make sure it is safe to + * proceed with current frame commit/push. Sequence fallowed is, + * 1. Disable TE + * 2. Disable autorefresh config + * 4. Poll for frame transfer ongoing to be false + * 5. Enable TE back */ - - spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags); + sde_encoder_phys_cmd_connect_te(phys_enc, false); _sde_encoder_phys_cmd_config_autorefresh(phys_enc, 0); - /* check for outstanding TX */ - if (_sde_encoder_phys_cmd_is_ongoing_pptx(phys_enc)) - atomic_add_unless(&phys_enc->pending_kickoff_cnt, 1, 1); - spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags); + do { + udelay(SDE_ENC_MAX_POLL_TIMEOUT_US); + if ((trial * SDE_ENC_MAX_POLL_TIMEOUT_US) + > (KICKOFF_TIMEOUT_MS * USEC_PER_MSEC)) { + SDE_ERROR_CMDENC(cmd_enc, + "disable autorefresh failed\n"); + break; + } - /* wait for ppdone if necessary due to catching ongoing TX */ - if (_sde_encoder_phys_cmd_wait_for_idle(phys_enc)) - SDE_ERROR_CMDENC(cmd_enc, "pp:%d kickoff timed out\n", - phys_enc->hw_pp->idx - PINGPONG_0); + trial++; + } while (_sde_encoder_phys_cmd_is_ongoing_pptx(phys_enc)); + + sde_encoder_phys_cmd_connect_te(phys_enc, true); SDE_DEBUG_CMDENC(cmd_enc, "disabled autorefresh\n"); } diff --git a/drivers/gpu/drm/msm/sde/sde_encoder_phys_vid.c b/drivers/gpu/drm/msm/sde/sde_encoder_phys_vid.c index 6b07602d64ab36444c73320e6e9b2a41637109b3..1575cbcae87775809264c0597c712c93b72b350c 100644 --- a/drivers/gpu/drm/msm/sde/sde_encoder_phys_vid.c +++ b/drivers/gpu/drm/msm/sde/sde_encoder_phys_vid.c @@ -332,7 +332,7 @@ static void programmable_rot_fetch_config(struct sde_encoder_phys *phys_enc, rot_fetch_lines, vfp_fetch_lines, rot_fetch_start_vsync_counter); - if (!phys_enc->sde_kms->splash_data.cont_splash_en) { + if (!phys_enc->cont_splash_enabled) { SDE_EVT32(DRMID(phys_enc->parent), f.enable, f.fetch_start); phys_enc->hw_ctl->ops.update_bitmask_intf( @@ -476,7 +476,7 @@ static void sde_encoder_phys_vid_setup_timing_engine( vid_enc->timing_params = timing_params; - if (phys_enc->sde_kms->splash_data.cont_splash_en) { + if (phys_enc->cont_splash_enabled) { SDE_DEBUG_VIDENC(vid_enc, "skipping intf programming since cont splash is enabled\n"); goto exit; @@ -818,7 +818,7 @@ static void sde_encoder_phys_vid_enable(struct sde_encoder_phys *phys_enc) /* reset state variables until after first update */ vid_enc->rot_fetch_valid = false; - if (!phys_enc->sde_kms->splash_data.cont_splash_en) + if (!phys_enc->cont_splash_enabled) sde_encoder_helper_split_config(phys_enc, phys_enc->hw_intf->idx); @@ -840,8 +840,7 @@ static void sde_encoder_phys_vid_enable(struct sde_encoder_phys *phys_enc) * skip flushing intf during cont. splash handoff since bootloader * has already enabled the hardware and is single buffered. */ - - if (phys_enc->sde_kms->splash_data.cont_splash_en) { + if (phys_enc->cont_splash_enabled) { SDE_DEBUG_VIDENC(vid_enc, "skipping intf flush bit set as cont. splash is enabled\n"); goto skip_flush; diff --git a/drivers/gpu/drm/msm/sde/sde_encoder_phys_wb.c b/drivers/gpu/drm/msm/sde/sde_encoder_phys_wb.c index 343a53cb9d6ac0991ff34a742ab2db62296d3514..653aabe94a649bff94711d3d3829108702a878d2 100644 --- a/drivers/gpu/drm/msm/sde/sde_encoder_phys_wb.c +++ b/drivers/gpu/drm/msm/sde/sde_encoder_phys_wb.c @@ -1120,18 +1120,14 @@ static void sde_encoder_phys_wb_irq_ctrl( if (enable) { sde_encoder_helper_register_irq(phys, INTR_IDX_WB_DONE); - if (phys->in_clone_mode) { - for (index = 0; index < CRTC_DUAL_MIXERS; index++) - sde_encoder_helper_register_irq(phys, - cwb_irq_tbl[index + pp]); - } + for (index = 0; index < CRTC_DUAL_MIXERS; index++) + sde_encoder_helper_register_irq(phys, + cwb_irq_tbl[index + pp]); } else { sde_encoder_helper_unregister_irq(phys, INTR_IDX_WB_DONE); - if (phys->in_clone_mode) { - for (index = 0; index < CRTC_DUAL_MIXERS; index++) - sde_encoder_helper_unregister_irq(phys, - cwb_irq_tbl[index + pp]); - } + for (index = 0; index < CRTC_DUAL_MIXERS; index++) + sde_encoder_helper_unregister_irq(phys, + cwb_irq_tbl[index + pp]); } } diff --git a/drivers/gpu/drm/msm/sde/sde_hw_catalog.c b/drivers/gpu/drm/msm/sde/sde_hw_catalog.c index 8593bbe10d0717db48ba74c1d8cae4426572958b..65c8cf76da3982b2108294c61e5061a5c29280af 100644 --- a/drivers/gpu/drm/msm/sde/sde_hw_catalog.c +++ b/drivers/gpu/drm/msm/sde/sde_hw_catalog.c @@ -333,6 +333,7 @@ enum { MIXER_PAIR_MASK, MIXER_BLOCKS, MIXER_DISP, + MIXER_CWB, MIXER_PROP_MAX, }; @@ -571,6 +572,8 @@ static struct sde_prop_type mixer_prop[] = { {MIXER_BLOCKS, "qcom,sde-mixer-blocks", false, PROP_TYPE_NODE}, {MIXER_DISP, "qcom,sde-mixer-display-pref", false, PROP_TYPE_STRING_ARRAY}, + {MIXER_CWB, "qcom,sde-mixer-cwb-pref", false, + PROP_TYPE_STRING_ARRAY}, }; static struct sde_prop_type mixer_blocks_prop[] = { @@ -1626,6 +1629,7 @@ static int sde_mixer_parse_dt(struct device_node *np, for (i = 0, mixer_count = 0, pp_idx = 0, dspp_idx = 0, ds_idx = 0; i < off_count; i++) { const char *disp_pref = NULL; + const char *cwb_pref = NULL; mixer_base = PROP_VALUE_ACCESS(prop_value, MIXER_OFF, i); if (!mixer_base) @@ -1673,6 +1677,11 @@ static int sde_mixer_parse_dt(struct device_node *np, if (disp_pref && !strcmp(disp_pref, "primary")) set_bit(SDE_DISP_PRIMARY_PREF, &mixer->features); + of_property_read_string_index(np, + mixer_prop[MIXER_CWB].prop_name, i, &cwb_pref); + if (cwb_pref && !strcmp(cwb_pref, "cwb")) + set_bit(SDE_DISP_CWB_PREF, &mixer->features); + mixer->pingpong = pp_count > 0 ? pp_idx + PINGPONG_0 : PINGPONG_MAX; mixer->dspp = dspp_count > 0 ? dspp_idx + DSPP_0 diff --git a/drivers/gpu/drm/msm/sde/sde_hw_catalog.h b/drivers/gpu/drm/msm/sde/sde_hw_catalog.h index bf446df4f73fba4b043b0c5fe8bf81a43165065f..ab8d688b78e8f37da181e4ad8195cf1a9bea9e6c 100644 --- a/drivers/gpu/drm/msm/sde/sde_hw_catalog.h +++ b/drivers/gpu/drm/msm/sde/sde_hw_catalog.h @@ -203,6 +203,7 @@ enum { * @SDE_MIXER_SOURCESPLIT Layer mixer supports source-split configuration * @SDE_MIXER_GC Gamma correction block * @SDE_DIM_LAYER Layer mixer supports dim layer + * @SDE_DISP_CWB_PREF Layer mixer preferred for CWB * @SDE_DISP_PRIMARY_PREF Layer mixer preferred for primary display * @SDE_MIXER_MAX maximum value */ @@ -212,6 +213,7 @@ enum { SDE_MIXER_GC, SDE_DIM_LAYER, SDE_DISP_PRIMARY_PREF, + SDE_DISP_CWB_PREF, SDE_MIXER_MAX }; diff --git a/drivers/gpu/drm/msm/sde/sde_hw_ctl.c b/drivers/gpu/drm/msm/sde/sde_hw_ctl.c index 2e46d6cb52132561c79abf63aacb005aeabc2239..66c62038275444b02eb2b910d1271d1046624b20 100644 --- a/drivers/gpu/drm/msm/sde/sde_hw_ctl.c +++ b/drivers/gpu/drm/msm/sde/sde_hw_ctl.c @@ -57,6 +57,9 @@ #define CTL_FLUSH_MASK_ROT BIT(27) #define CTL_FLUSH_MASK_CTL BIT(17) +#define CTL_NUM_EXT 4 +#define CTL_SSPP_MAX_RECTS 2 + #define SDE_REG_RESET_TIMEOUT_US 2000 #define UPDATE_MASK(m, idx, en) \ @@ -142,6 +145,40 @@ static const u32 cdm_flush_tbl[CDM_MAX] = {SDE_NONE, 0}; static const u32 cwb_flush_tbl[CWB_MAX] = {SDE_NONE, SDE_NONE, SDE_NONE, 2, 3, 4, 5}; +/** + * struct ctl_sspp_stage_reg_map: Describes bit layout for a sspp stage cfg + * @ext: Index to indicate LAYER_x_EXT id for given sspp + * @start: Start position of blend stage bits for given sspp + * @bits: Number of bits from @start assigned for given sspp + * @sec_bit_mask: Bitmask to add to LAYER_x_EXT1 for missing bit of sspp + */ +struct ctl_sspp_stage_reg_map { + u32 ext; + u32 start; + u32 bits; + u32 sec_bit_mask; +}; + +/* list of ctl_sspp_stage_reg_map for all the sppp */ +static const struct ctl_sspp_stage_reg_map +sspp_reg_cfg_tbl[SSPP_MAX][CTL_SSPP_MAX_RECTS] = { + /* SSPP_NONE */{ {0, 0, 0, 0}, {0, 0, 0, 0} }, + /* SSPP_VIG0 */{ {0, 0, 3, BIT(0)}, {3, 0, 4, 0} }, + /* SSPP_VIG1 */{ {0, 3, 3, BIT(2)}, {3, 4, 4, 0} }, + /* SSPP_VIG2 */{ {0, 6, 3, BIT(4)}, {3, 8, 4, 0} }, + /* SSPP_VIG3 */{ {0, 26, 3, BIT(6)}, {3, 12, 4, 0} }, + /* SSPP_RGB0 */{ {0, 9, 3, BIT(8)}, {0, 0, 0, 0} }, + /* SSPP_RGB1 */{ {0, 12, 3, BIT(10)}, {0, 0, 0, 0} }, + /* SSPP_RGB2 */{ {0, 15, 3, BIT(12)}, {0, 0, 0, 0} }, + /* SSPP_RGB3 */{ {0, 29, 3, BIT(14)}, {0, 0, 0, 0} }, + /* SSPP_DMA0 */{ {0, 18, 3, BIT(16)}, {2, 8, 4, 0} }, + /* SSPP_DMA1 */{ {0, 21, 3, BIT(18)}, {2, 12, 4, 0} }, + /* SSPP_DMA2 */{ {2, 0, 4, 0}, {2, 16, 4, 0} }, + /* SSPP_DMA3 */{ {2, 4, 4, 0}, {2, 20, 4, 0} }, + /* SSPP_CURSOR0 */{ {1, 20, 4, 0}, {0, 0, 0, 0} }, + /* SSPP_CURSOR1 */{ {0, 26, 4, 0}, {0, 0, 0, 0} } +}; + /** * Individual flush bit in CTL_FLUSH */ @@ -880,6 +917,52 @@ static void sde_hw_ctl_setup_blendstage(struct sde_hw_ctl *ctx, SDE_REG_WRITE(c, CTL_LAYER_EXT3(lm), mixercfg_ext3); } +static u32 sde_hw_ctl_get_staged_sspp(struct sde_hw_ctl *ctx, enum sde_lm lm, + struct sde_sspp_index_info *info, u32 info_max_cnt) +{ + int i, j; + u32 count = 0; + u32 mask = 0; + bool staged; + u32 mixercfg[CTL_NUM_EXT]; + struct sde_hw_blk_reg_map *c; + const struct ctl_sspp_stage_reg_map *sspp_cfg; + + if (!ctx || (lm >= LM_MAX) || !info) + return count; + + c = &ctx->hw; + mixercfg[0] = SDE_REG_READ(c, CTL_LAYER(lm)); + mixercfg[1] = SDE_REG_READ(c, CTL_LAYER_EXT(lm)); + mixercfg[2] = SDE_REG_READ(c, CTL_LAYER_EXT2(lm)); + mixercfg[3] = SDE_REG_READ(c, CTL_LAYER_EXT3(lm)); + + for (i = SSPP_VIG0; i < SSPP_MAX; i++) { + for (j = 0; j < CTL_SSPP_MAX_RECTS; j++) { + if (count >= info_max_cnt) + goto end; + + sspp_cfg = &sspp_reg_cfg_tbl[i][j]; + if (!sspp_cfg->bits || sspp_cfg->ext >= CTL_NUM_EXT) + continue; + + mask = ((0x1 << sspp_cfg->bits) - 1) << sspp_cfg->start; + staged = mixercfg[sspp_cfg->ext] & mask; + if (!staged) + staged = mixercfg[1] & sspp_cfg->sec_bit_mask; + + if (staged) { + info[count].sspp = i; + info[count].is_virtual = j; + count++; + } + } + } + +end: + return count; +} + static int sde_hw_ctl_intf_cfg_v1(struct sde_hw_ctl *ctx, struct sde_hw_intf_cfg_v1 *cfg) { @@ -1196,6 +1279,7 @@ static void _setup_ctl_ops(struct sde_hw_ctl_ops *ops, ops->wait_reset_status = sde_hw_ctl_wait_reset_status; ops->clear_all_blendstages = sde_hw_ctl_clear_all_blendstages; ops->setup_blendstage = sde_hw_ctl_setup_blendstage; + ops->get_staged_sspp = sde_hw_ctl_get_staged_sspp; ops->update_bitmask_sspp = sde_hw_ctl_update_bitmask_sspp; ops->update_bitmask_mixer = sde_hw_ctl_update_bitmask_mixer; ops->update_bitmask_dspp = sde_hw_ctl_update_bitmask_dspp; diff --git a/drivers/gpu/drm/msm/sde/sde_hw_ctl.h b/drivers/gpu/drm/msm/sde/sde_hw_ctl.h index 63a098e613aa43bde3a5b34fde5130c555e38974..240277f3fdc33189c088511eb701e45361ba197e 100644 --- a/drivers/gpu/drm/msm/sde/sde_hw_ctl.h +++ b/drivers/gpu/drm/msm/sde/sde_hw_ctl.h @@ -448,6 +448,22 @@ struct sde_hw_ctl_ops { void (*setup_blendstage)(struct sde_hw_ctl *ctx, enum sde_lm lm, struct sde_hw_stage_cfg *cfg); + /** + * Get all the sspp staged on a layer mixer + * @ctx : ctl path ctx pointer + * @lm : layer mixer enumeration + * @info : array address to populate connected sspp index info + * @info_max_cnt : maximum sspp info elements based on array size + * @Return: count of sspps info elements populated + */ + u32 (*get_staged_sspp)(struct sde_hw_ctl *ctx, enum sde_lm lm, + struct sde_sspp_index_info *info, u32 info_max_cnt); + + /** + * Setup the stream buffer config like rotation mode + * @ctx : ctl path ctx pointer + * Returns: 0 on success or -error + */ int (*setup_sbuf_cfg)(struct sde_hw_ctl *ctx, struct sde_ctl_sbuf_cfg *cfg); diff --git a/drivers/gpu/drm/msm/sde/sde_hw_intf.c b/drivers/gpu/drm/msm/sde/sde_hw_intf.c index 9ab6e6b07c2a5200fe41185735a6d771ef931c53..2036ef8b106dfb2725c04fe60f8ee8e524e5b252 100644 --- a/drivers/gpu/drm/msm/sde/sde_hw_intf.c +++ b/drivers/gpu/drm/msm/sde/sde_hw_intf.c @@ -175,7 +175,7 @@ static void sde_hw_intf_avr_ctrl(struct sde_hw_intf *ctx, if (avr_params->avr_mode) { avr_ctrl = BIT(0); avr_mode = (avr_params->avr_mode == AVR_ONE_SHOT_MODE) ? - (BIT(1) | BIT(8)) : 0x0; + (BIT(0) | BIT(8)) : 0x0; } SDE_REG_WRITE(c, INTF_AVR_CONTROL, avr_ctrl); diff --git a/drivers/gpu/drm/msm/sde/sde_hw_mdss.h b/drivers/gpu/drm/msm/sde/sde_hw_mdss.h index f64c43f67d8e829237071d0ba935473ecdb6a902..22eb6ae9b67d53d13f7cb0614f8b2f813ae93bc9 100644 --- a/drivers/gpu/drm/msm/sde/sde_hw_mdss.h +++ b/drivers/gpu/drm/msm/sde/sde_hw_mdss.h @@ -43,6 +43,9 @@ #define SDE_MAX_DE_CURVES 3 #endif +#define MAX_DSI_DISPLAYS 2 +#define MAX_DATA_PATH_PER_DSIPLAY 2 + enum sde_format_flags { SDE_FORMAT_FLAG_YUV_BIT, SDE_FORMAT_FLAG_DX_BIT, @@ -549,65 +552,75 @@ struct sde_hw_dim_layer { }; /** - * struct sde_splash_lm_hw - Struct contains LM block properties - * @lm_id: stores the current LM ID - * @ctl_id: stores the current CTL ID associated with the LM. - * @lm_reg_value:Store the LM block register value + * struct sde_splash_mem - Struct contains splah memory info + * @splash_buf_size: Indicates the size of the memory region + * @splash_buf_base: Address of specific splash memory region + * @ramdump_size: Size of ramdump buffer region + * @ramdump_base: Address of ramdump region reserved by bootloader + * @ref_cnt: Tracks the map count to help in sharing splash memory */ -struct sde_splash_lm_hw { - u8 lm_id; - u8 ctl_id; - u32 lm_reg_value; +struct sde_splash_mem { + u32 splash_buf_size; + unsigned long splash_buf_base; + u32 ramdump_size; + unsigned long ramdump_base; + u32 ref_cnt; }; /** - * struct ctl_top - Struct contains CTL block properties - * @intf_sel: stores the intf selected in the CTL block - * @lm: Pointer to store the list of LMs in the CTL block - * @ctl_lm_cnt: stores the active number of MDSS "LM" blocks in the CTL block + * struct sde_sspp_index_info - Struct containing sspp identifier info + * @sspp: Enum value indicates sspp id + * @is_virtual: Boolean to identify if virtual or base */ -struct ctl_top { - u8 intf_sel; - struct sde_splash_lm_hw lm[LM_MAX - LM_0]; - u8 ctl_lm_cnt; +struct sde_sspp_index_info { + enum sde_sspp sspp; + bool is_virtual; }; /** - * struct sde_splash_data - Struct contains details of continuous splash - * memory region and initial pipeline configuration. - * @resource_handoff_pending: boolean to notify boot up resource handoff - * is pending. - * @splash_base: Base address of continuous splash region reserved - * by bootloader - * @splash_size: Size of continuous splash region - * @ramdump_base: Base address of ramdump display region reserved - * by bootloader - * @ramdump_size: Size of ramdump buffer region - * @top: struct ctl_top objects - * @ctl_ids: stores the valid MDSS ctl block ids for the current mode - * @lm_ids: stores the valid MDSS layer mixer block ids for the current mode - * @dsc_ids: stores the valid MDSS DSC block ids for the current mode - * @ctl_top_cnt:stores the active number of MDSS "top" blks of the current mode - * @lm_cnt: stores the active number of MDSS "LM" blks for the current mode - * @dsc_cnt: stores the active number of MDSS "dsc" blks for the current mode - * @cont_splash_en: Stores the cont_splash status (enabled/disabled) - * @single_flush_en: Stores if the single flush is enabled. + * struct sde_splash_data - Struct contains details of resources and hw blocks + * used in continuous splash on a specific display. + * @cont_splash_enabled: Stores the cont_splash status (enabled/disabled) + * @single_flush_en: Stores if the single flush is enabled + * @encoder: Pointer to the drm encoder object used for this display + * @splash: Pointer to struct sde_splash_mem used for this display + * @ctl_ids: Stores the valid MDSS ctl block ids for the current mode + * @lm_ids: Stores the valid MDSS layer mixer block ids for the current mode + * @dsc_ids: Stores the valid MDSS DSC block ids for the current mode + * @pipes: Array of sspp info detected on this display + * @ctl_cnt: Stores the active number of MDSS "top" blks of the current mode + * @lm_cnt: Stores the active number of MDSS "LM" blks for the current mode + * @dsc_cnt: Stores the active number of MDSS "dsc" blks for the current mode + * @pipe_cnt: Stores the active number of "sspp" blks connected */ -struct sde_splash_data { - bool resource_handoff_pending; - unsigned long splash_base; - u32 splash_size; - unsigned long ramdump_base; - u32 ramdump_size; - struct ctl_top top[CTL_MAX - CTL_0]; - u8 ctl_ids[CTL_MAX - CTL_0]; - u8 lm_ids[LM_MAX - LM_0]; - u8 dsc_ids[DSC_MAX - DSC_0]; - u8 ctl_top_cnt; +struct sde_splash_display { + bool cont_splash_enabled; + bool single_flush_en; + struct drm_encoder *encoder; + struct sde_splash_mem *splash; + u8 ctl_ids[MAX_DATA_PATH_PER_DSIPLAY]; + u8 lm_ids[MAX_DATA_PATH_PER_DSIPLAY]; + u8 dsc_ids[MAX_DATA_PATH_PER_DSIPLAY]; + struct sde_sspp_index_info pipes[MAX_DATA_PATH_PER_DSIPLAY]; + u8 ctl_cnt; u8 lm_cnt; u8 dsc_cnt; - bool cont_splash_en; - bool single_flush_en; + u8 pipe_cnt; +}; + +/** + * struct sde_splash_data - Struct contains details of continuous splash + * for all the displays connected by probe time + * @num_splash_regions: Indicates number of splash memory regions from dtsi + * @num_splash_displays: Indicates count of active displays in continuous splash + * @splash_mem: Array of all struct sde_splash_mem listed from dtsi + * @splash_display: Array of all struct sde_splash_display + */ +struct sde_splash_data { + u32 num_splash_regions; + u32 num_splash_displays; + struct sde_splash_mem splash_mem[MAX_DSI_DISPLAYS]; + struct sde_splash_display splash_display[MAX_DSI_DISPLAYS]; }; /** diff --git a/drivers/gpu/drm/msm/sde/sde_hw_sspp.c b/drivers/gpu/drm/msm/sde/sde_hw_sspp.c index 04bfee10c00b5abe90f39fd2b605e30a8d767253..1aac39f99b9e0ad2ee5c0945e17fad77c63cc630 100644 --- a/drivers/gpu/drm/msm/sde/sde_hw_sspp.c +++ b/drivers/gpu/drm/msm/sde/sde_hw_sspp.c @@ -724,6 +724,19 @@ static void sde_hw_sspp_setup_sourceaddress(struct sde_hw_pipe *ctx, } } +u32 sde_hw_sspp_get_source_addr(struct sde_hw_pipe *ctx, bool is_virtual) +{ + u32 idx; + u32 offset = 0; + + if (!ctx || _sspp_subblk_offset(ctx, SDE_SSPP_SRC, &idx)) + return 0; + + offset = is_virtual ? (SSPP_SRC1_ADDR + idx) : (SSPP_SRC0_ADDR + idx); + + return SDE_REG_READ(&ctx->hw, offset); +} + static void sde_hw_sspp_setup_csc(struct sde_hw_pipe *ctx, struct sde_csc_cfg *data) { @@ -1094,6 +1107,7 @@ static void _setup_layer_ops(struct sde_hw_pipe *c, c->ops.setup_format = sde_hw_sspp_setup_format; c->ops.setup_rects = sde_hw_sspp_setup_rects; c->ops.setup_sourceaddress = sde_hw_sspp_setup_sourceaddress; + c->ops.get_sourceaddress = sde_hw_sspp_get_source_addr; c->ops.setup_solidfill = sde_hw_sspp_setup_solidfill; c->ops.setup_pe = sde_hw_sspp_setup_pe_config; c->ops.setup_secure_address = sde_hw_sspp_setup_secure; diff --git a/drivers/gpu/drm/msm/sde/sde_hw_sspp.h b/drivers/gpu/drm/msm/sde/sde_hw_sspp.h index 2fd89f7969f4f725d108d2abf91b13ac04a86e74..f97eca2fdecc91b1d57e958665637d4d2d3fe2b2 100644 --- a/drivers/gpu/drm/msm/sde/sde_hw_sspp.h +++ b/drivers/gpu/drm/msm/sde/sde_hw_sspp.h @@ -346,6 +346,12 @@ struct sde_hw_sspp_ops { struct sde_hw_pipe_cfg *cfg, enum sde_sspp_multirect_index index); + /* get_sourceaddress - get pipe current source addresses of a plane + * @ctx: Pointer to pipe context + * @is_virtual: If true get address programmed for R1 in multirect + */ + u32 (*get_sourceaddress)(struct sde_hw_pipe *ctx, bool is_virtual); + /** * setup_csc - setup color space conversion * @ctx: Pointer to pipe context diff --git a/drivers/gpu/drm/msm/sde/sde_irq.c b/drivers/gpu/drm/msm/sde/sde_irq.c index d8240eeed17fa912fc991678422cba74af31554c..7584942ac59189b6998e9c376fe2f4a0f58b8a74 100644 --- a/drivers/gpu/drm/msm/sde/sde_irq.c +++ b/drivers/gpu/drm/msm/sde/sde_irq.c @@ -108,7 +108,7 @@ void sde_irq_preinstall(struct msm_kms *kms) } /* disable irq until power event enables it */ - if (!sde_kms->splash_data.cont_splash_en && !sde_kms->irq_enabled) + if (!sde_kms->splash_data.num_splash_displays && !sde_kms->irq_enabled) irq_set_status_flags(sde_kms->irq_num, IRQ_NOAUTOEN); } diff --git a/drivers/gpu/drm/msm/sde/sde_kms.c b/drivers/gpu/drm/msm/sde/sde_kms.c index 8667a6e5694afd29419d9cdde11d4e8b7ea536e3..2139a8c24c74d04f851182b9bed177e7b9f8e85b 100644 --- a/drivers/gpu/drm/msm/sde/sde_kms.c +++ b/drivers/gpu/drm/msm/sde/sde_kms.c @@ -492,8 +492,7 @@ static int _sde_kms_scm_call(struct sde_kms *sde_kms, int vmid) SDE_ERROR("Error:scm_call2, vmid %lld, ret%d\n", desc.args[3], ret); SDE_EVT32(mem_protect_sd_ctrl_id, - desc.args[0], desc.args[3], num_sids, - sec_sid[0], sec_sid[1], ret); + desc.args[0], desc.args[3], num_sids, ret); kfree(sec_sid); return ret; @@ -810,16 +809,23 @@ static int sde_kms_prepare_secure_transition(struct msm_kms *kms, static int _sde_kms_release_splash_buffer(unsigned int mem_addr, unsigned int splash_buffer_size, + unsigned int ramdump_base, unsigned int ramdump_buffer_size) { unsigned long pfn_start, pfn_end, pfn_idx; int ret = 0; - if (!mem_addr || !splash_buffer_size) + if (!mem_addr || !splash_buffer_size) { SDE_ERROR("invalid params\n"); + return -EINVAL; + } - mem_addr += ramdump_buffer_size; - splash_buffer_size -= ramdump_buffer_size; + /* leave ramdump memory only if base address matches */ + if (ramdump_base == mem_addr && + ramdump_buffer_size <= splash_buffer_size) { + mem_addr += ramdump_buffer_size; + splash_buffer_size -= ramdump_buffer_size; + } pfn_start = mem_addr >> PAGE_SHIFT; pfn_end = (mem_addr + splash_buffer_size) >> PAGE_SHIFT; @@ -836,38 +842,73 @@ static int _sde_kms_release_splash_buffer(unsigned int mem_addr, } -static int _sde_kms_splash_smmu_map(struct drm_device *dev, struct msm_mmu *mmu, - struct sde_splash_data *data) +static int _sde_kms_splash_mem_get(struct sde_kms *sde_kms, + struct sde_splash_mem *splash) { + struct msm_mmu *mmu = NULL; int ret = 0; - if (!mmu || !data) + if (!sde_kms->aspace[0]) { + SDE_ERROR("aspace not found for sde kms node\n"); + return -EINVAL; + } + + mmu = sde_kms->aspace[0]->mmu; + if (!mmu) { + SDE_ERROR("mmu not found for aspace\n"); + return -EINVAL; + } + + if (!splash || !mmu->funcs || !mmu->funcs->one_to_one_map) { + SDE_ERROR("invalid input params for map\n"); return -EINVAL; + } - ret = mmu->funcs->one_to_one_map(mmu, data->splash_base, - data->splash_base, data->splash_size, + if (!splash->ref_cnt) { + ret = mmu->funcs->one_to_one_map(mmu, splash->splash_buf_base, + splash->splash_buf_base, + splash->splash_buf_size, IOMMU_READ | IOMMU_NOEXEC); - if (ret) - SDE_ERROR("Splash smmu map failed: %d\n", ret); + if (ret) + SDE_ERROR("splash memory smmu map failed:%d\n", ret); + } + + splash->ref_cnt++; + SDE_DEBUG("one2one mapping done for base:%x size:%x ref_cnt:%d\n", + splash->splash_buf_base, + splash->splash_buf_size, + splash->ref_cnt); return ret; } -static int _sde_kms_splash_smmu_unmap(struct sde_kms *sde_kms) +static int _sde_kms_map_all_splash_regions(struct sde_kms *sde_kms) { - struct sde_splash_data *data; - struct msm_mmu *mmu; - int rc = 0; + int i = 0; + int ret = 0; if (!sde_kms) return -EINVAL; - data = &sde_kms->splash_data; - if (!data) { - SDE_ERROR("Invalid splash data\n"); - return -EINVAL; + for (i = 0; i < sde_kms->splash_data.num_splash_displays; i++) { + ret = _sde_kms_splash_mem_get(sde_kms, + sde_kms->splash_data.splash_display[i].splash); + if (ret) + return ret; } + return ret; +} + +static int _sde_kms_splash_mem_put(struct sde_kms *sde_kms, + struct sde_splash_mem *splash) +{ + struct msm_mmu *mmu = NULL; + int rc = 0; + + if (!sde_kms) + return -EINVAL; + if (!sde_kms->aspace[0]) { SDE_ERROR("aspace not found for sde kms node\n"); return -EINVAL; @@ -879,13 +920,45 @@ static int _sde_kms_splash_smmu_unmap(struct sde_kms *sde_kms) return -EINVAL; } - if (mmu->funcs && mmu->funcs->one_to_one_unmap) - mmu->funcs->one_to_one_unmap(mmu, data->splash_base, - data->splash_size); + if (!splash || !mmu->funcs || !mmu->funcs->one_to_one_unmap) + return -EINVAL; + + splash->ref_cnt--; + + SDE_DEBUG("splash base:%x refcnt:%d\n", + splash->splash_buf_base, splash->ref_cnt); + + if (!splash->ref_cnt) { + mmu->funcs->one_to_one_unmap(mmu, splash->splash_buf_base, + splash->splash_buf_size); + rc = _sde_kms_release_splash_buffer(splash->splash_buf_base, + splash->splash_buf_size, splash->ramdump_base, + splash->ramdump_size); + splash->splash_buf_base = 0; + splash->splash_buf_size = 0; + } return rc; } +static int _sde_kms_unmap_all_splash_regions(struct sde_kms *sde_kms) +{ + int i = 0; + int ret = 0; + + if (!sde_kms) + return -EINVAL; + + for (i = 0; i < sde_kms->splash_data.num_splash_displays; i++) { + ret = _sde_kms_splash_mem_put(sde_kms, + sde_kms->splash_data.splash_display[i].splash); + if (ret) + return ret; + } + + return ret; +} + static void sde_kms_prepare_commit(struct msm_kms *kms, struct drm_atomic_state *state) { @@ -970,59 +1043,56 @@ static void sde_kms_commit(struct msm_kms *kms, } static void _sde_kms_release_splash_resource(struct sde_kms *sde_kms, - struct drm_atomic_state *old_state) + struct drm_crtc *crtc) { - struct drm_crtc *crtc; - struct drm_crtc_state *crtc_state; - bool primary_crtc_active = false; struct msm_drm_private *priv; - int i, rc = 0; + struct sde_splash_display *splash_display; + int i; + + if (!sde_kms || !crtc) + return; priv = sde_kms->dev->dev_private; - if (!sde_kms->splash_data.resource_handoff_pending) + SDE_EVT32(crtc->base.id, crtc->state->active, + sde_kms->splash_data.num_splash_displays); + if (!crtc->state->active || !sde_kms->splash_data.num_splash_displays) return; - SDE_EVT32(SDE_EVTLOG_FUNC_CASE1); - for_each_crtc_in_state(old_state, crtc, crtc_state, i) { - if (crtc->state->active) - primary_crtc_active = true; - SDE_EVT32(crtc->base.id, crtc->state->active); + for (i = 0; i < MAX_DSI_DISPLAYS; i++) { + splash_display = &sde_kms->splash_data.splash_display[i]; + if (splash_display->encoder && + crtc == splash_display->encoder->crtc) + break; } - if (!primary_crtc_active) { - SDE_EVT32(SDE_EVTLOG_FUNC_CASE2); + if (i >= MAX_DSI_DISPLAYS) return; - } - sde_kms->splash_data.resource_handoff_pending = false; + _sde_kms_splash_mem_put(sde_kms, splash_display->splash); - if (sde_kms->splash_data.cont_splash_en) { - SDE_DEBUG("disabling cont_splash feature\n"); - sde_kms->splash_data.cont_splash_en = false; + if (splash_display->cont_splash_enabled) { + sde_encoder_update_caps_for_cont_splash(splash_display->encoder, + splash_display, false); + splash_display->cont_splash_enabled = false; + sde_kms->splash_data.num_splash_displays--; + SDE_DEBUG("cont_splash handoff done for dpy:%d remaining:%d\n", + i, sde_kms->splash_data.num_splash_displays); + memset(splash_display, 0x0, sizeof(struct sde_splash_display)); - for (i = 0; i < SDE_POWER_HANDLE_DBUS_ID_MAX; i++) - sde_power_data_bus_set_quota(&priv->phandle, - sde_kms->core_client, - SDE_POWER_HANDLE_DATA_BUS_CLIENT_RT, i, - SDE_POWER_HANDLE_ENABLE_BUS_AB_QUOTA, - SDE_POWER_HANDLE_ENABLE_BUS_IB_QUOTA); - - sde_power_resource_enable(&priv->phandle, sde_kms->core_client, - false); } - if (sde_kms->splash_data.splash_base) { - _sde_kms_splash_smmu_unmap(sde_kms); + /* remove the votes if all displays are done with splash */ + if (!sde_kms->splash_data.num_splash_displays) { + for (i = 0; i < SDE_POWER_HANDLE_DBUS_ID_MAX; i++) + sde_power_data_bus_set_quota(&priv->phandle, + sde_kms->core_client, + SDE_POWER_HANDLE_DATA_BUS_CLIENT_RT, i, + SDE_POWER_HANDLE_ENABLE_BUS_AB_QUOTA, + SDE_POWER_HANDLE_ENABLE_BUS_IB_QUOTA); - rc = _sde_kms_release_splash_buffer( - sde_kms->splash_data.splash_base, - sde_kms->splash_data.splash_size, - sde_kms->splash_data.ramdump_size); - if (rc) - pr_err("failed to release splash memory\n"); - sde_kms->splash_data.splash_base = 0; - sde_kms->splash_data.splash_size = 0; + sde_power_resource_enable(&priv->phandle, + sde_kms->core_client, false); } } @@ -1075,7 +1145,8 @@ static void sde_kms_complete_commit(struct msm_kms *kms, sde_power_resource_enable(&priv->phandle, sde_kms->core_client, false); - _sde_kms_release_splash_resource(sde_kms, old_state); + for_each_crtc_in_state(old_state, crtc, old_crtc_state, i) + _sde_kms_release_splash_resource(sde_kms, crtc); SDE_EVT32_VERBOSE(SDE_EVTLOG_FUNC_EXIT); SDE_ATRACE_END("sde_kms_complete_commit"); @@ -1665,8 +1736,10 @@ void sde_kms_timeline_status(struct drm_device *dev) *Probably locked from last close dumping status anyway */ SDE_ERROR("dumping conn_timeline without mode_config lock\n"); + drm_connector_list_iter_begin(dev, &conn_iter); drm_for_each_connector_iter(conn, &conn_iter) sde_conn_timeline_status(conn); + drm_connector_list_iter_end(&conn_iter); return; } @@ -1741,10 +1814,8 @@ static void _sde_kms_hw_destroy(struct sde_kms *sde_kms, &priv->phandle, sde_kms->power_event); _sde_kms_release_displays(sde_kms); - (void)_sde_kms_release_splash_buffer( - sde_kms->splash_data.splash_base, - sde_kms->splash_data.splash_size, - sde_kms->splash_data.ramdump_size); + + _sde_kms_unmap_all_splash_regions(sde_kms); /* safe to call these more than once during shutdown */ _sde_debugfs_destroy(sde_kms); @@ -2326,6 +2397,52 @@ static void _sde_kms_post_open(struct msm_kms *kms, struct drm_file *file) } +static int _sde_kms_update_planes_for_cont_splash(struct sde_kms *sde_kms, + struct sde_splash_display *splash_display, + struct drm_crtc *crtc) +{ + struct msm_drm_private *priv; + struct drm_plane *plane; + struct sde_splash_mem *splash; + enum sde_sspp plane_id; + bool is_virtual; + int i, j; + + if (!sde_kms || !splash_display || !crtc) { + SDE_ERROR("invalid input args\n"); + return -EINVAL; + } + + priv = sde_kms->dev->dev_private; + for (i = 0; i < priv->num_planes; i++) { + plane = priv->planes[i]; + plane_id = sde_plane_pipe(plane); + is_virtual = is_sde_plane_virtual(plane); + splash = splash_display->splash; + + for (j = 0; j < splash_display->pipe_cnt; j++) { + if ((plane_id != splash_display->pipes[j].sspp) || + (splash_display->pipes[j].is_virtual + != is_virtual)) + continue; + + if (splash && sde_plane_validate_src_addr(plane, + splash->splash_buf_base, + splash->splash_buf_size)) { + SDE_ERROR("invalid adr on pipe:%d crtc:%d\n", + plane_id, crtc->base.id); + } + + plane->crtc = crtc; + plane->state->crtc = crtc; + SDE_DEBUG("set crtc:%d for plane:%d rect:%d\n", + crtc->base.id, plane_id, is_virtual); + } + } + + return 0; +} + static int sde_kms_cont_splash_config(struct msm_kms *kms) { void *display; @@ -2341,6 +2458,7 @@ static int sde_kms_cont_splash_config(struct msm_kms *kms) struct drm_connector_list_iter conn_iter; struct drm_connector *connector = NULL; struct sde_connector *sde_conn = NULL; + struct sde_splash_display *splash_display; if (!kms) { SDE_ERROR("invalid kms\n"); @@ -2354,16 +2472,32 @@ static int sde_kms_cont_splash_config(struct msm_kms *kms) return -EINVAL; } - if (!sde_kms->splash_data.cont_splash_en) { + if (!sde_kms->splash_data.num_splash_regions || + !sde_kms->splash_data.num_splash_displays) { DRM_INFO("cont_splash feature not enabled\n"); return rc; } - /* Currently, we only support one dsi display configuration */ + if (sde_kms->dsi_display_count != + sde_kms->splash_data.num_splash_displays) { + SDE_ERROR("mismatch - displays:%d vs splash-displays:%d\n", + sde_kms->dsi_display_count, + sde_kms->splash_data.num_splash_displays); + return rc; + } + /* dsi */ for (i = 0; i < sde_kms->dsi_display_count; ++i) { display = sde_kms->dsi_displays[i]; dsi_display = (struct dsi_display *)display; + splash_display = &sde_kms->splash_data.splash_display[i]; + + if (!splash_display->cont_splash_enabled) { + SDE_DEBUG("display->name = %s splash not enabled\n", + dsi_display->name); + continue; + } + SDE_DEBUG("display->name = %s\n", dsi_display->name); if (dsi_display->bridge->base.encoder) { @@ -2380,79 +2514,88 @@ static int sde_kms_cont_splash_config(struct msm_kms *kms) SDE_DEBUG("info.is_connected = %s, info.is_primary = %s\n", ((info.is_connected) ? "true" : "false"), ((info.is_primary) ? "true" : "false")); - break; - } - if (!encoder) { - SDE_ERROR("encoder not initialized\n"); - return -EINVAL; - } - - priv = sde_kms->dev->dev_private; - encoder->crtc = priv->crtcs[0]; - crtc = encoder->crtc; - SDE_DEBUG("crtc id = %d\n", crtc->base.id); + if (!encoder) { + SDE_ERROR("encoder not initialized\n"); + return -EINVAL; + } + priv = sde_kms->dev->dev_private; + encoder->crtc = priv->crtcs[i]; + crtc = encoder->crtc; + splash_display->encoder = encoder; + + SDE_DEBUG("for dsi-display:%d crtc id = %d enc id =%d\n", + i, crtc->base.id, encoder->base.id); + + mutex_lock(&dev->mode_config.mutex); + drm_connector_list_iter_begin(dev, &conn_iter); + drm_for_each_connector_iter(connector, &conn_iter) { + /** + * SDE_KMS doesn't attach more than one encoder to + * a DSI connector. So it is safe to check only with + * the first encoder entry. Revisit this logic if we + * ever have to support continuous splash for + * external displays in MST configuration. + */ + if (connector->encoder_ids[0] == encoder->base.id) + break; + } + drm_connector_list_iter_end(&conn_iter); - mutex_lock(&dev->mode_config.mutex); - drm_connector_list_iter_begin(dev, &conn_iter); - drm_for_each_connector_iter(connector, &conn_iter) { - /** - * SDE_KMS doesn't attach more than one encoder to - * a DSI connector. So it is safe to check only with - * the first encoder entry. Revisit this logic if we - * ever have to support continuous splash for - * external displays in MST configuration. - */ - if (connector->encoder_ids[0] == encoder->base.id) - break; - } - drm_connector_list_iter_end(&conn_iter); + if (!connector) { + SDE_ERROR("connector not initialized\n"); + mutex_unlock(&dev->mode_config.mutex); + return -EINVAL; + } - if (!connector) { - SDE_ERROR("connector not initialized\n"); + if (connector->funcs->fill_modes) { + connector->funcs->fill_modes(connector, + dev->mode_config.max_width, + dev->mode_config.max_height); + } else { + SDE_ERROR("fill_modes api not defined\n"); + mutex_unlock(&dev->mode_config.mutex); + return -EINVAL; + } mutex_unlock(&dev->mode_config.mutex); - return -EINVAL; - } - if (connector->funcs->fill_modes) { - connector->funcs->fill_modes(connector, - dev->mode_config.max_width, - dev->mode_config.max_height); - } else { - SDE_ERROR("fill_modes api not defined\n"); - mutex_unlock(&dev->mode_config.mutex); - return -EINVAL; - } - mutex_unlock(&dev->mode_config.mutex); + crtc->state->encoder_mask = (1 << drm_encoder_index(encoder)); - crtc->state->encoder_mask = (1 << drm_encoder_index(encoder)); + /* currently consider modes[0] as the preferred mode */ + drm_mode = list_first_entry(&connector->modes, + struct drm_display_mode, head); + SDE_DEBUG("drm_mode->name = %s, id=%d, type=0x%x, flags=0x%x\n", + drm_mode->name, drm_mode->base.id, + drm_mode->type, drm_mode->flags); - /* currently consider modes[0] as the preferred mode */ - drm_mode = list_first_entry(&connector->modes, - struct drm_display_mode, head); - SDE_DEBUG("drm_mode->name = %s, id=%d, type=0x%x, flags=0x%x\n", - drm_mode->name, drm_mode->base.id, - drm_mode->type, drm_mode->flags); + /* Update CRTC drm structure */ + crtc->state->active = true; + rc = drm_atomic_set_mode_for_crtc(crtc->state, drm_mode); + if (rc) { + SDE_ERROR("Failed: set mode for crtc. rc = %d\n", rc); + return rc; + } + drm_mode_copy(&crtc->state->adjusted_mode, drm_mode); + drm_mode_copy(&crtc->mode, drm_mode); - /* Update CRTC drm structure */ - crtc->state->active = true; - rc = drm_atomic_set_mode_for_crtc(crtc->state, drm_mode); - if (rc) { - SDE_ERROR("Failed: set mode for crtc. rc = %d\n", rc); - return rc; - } - drm_mode_copy(&crtc->state->adjusted_mode, drm_mode); - drm_mode_copy(&crtc->mode, drm_mode); + /* Update encoder structure */ + sde_encoder_update_caps_for_cont_splash(encoder, + splash_display, true); - /* Update encoder structure */ - sde_encoder_update_caps_for_cont_splash(encoder); + sde_crtc_update_cont_splash_mixer_settings(crtc); - sde_crtc_update_cont_splash_mixer_settings(crtc); + sde_conn = to_sde_connector(connector); + if (sde_conn && sde_conn->ops.cont_splash_config) + sde_conn->ops.cont_splash_config(sde_conn->display); - sde_conn = to_sde_connector(connector); - if (sde_conn && sde_conn->ops.cont_splash_config) - sde_conn->ops.cont_splash_config(sde_conn->display); + rc = _sde_kms_update_planes_for_cont_splash(sde_kms, + splash_display, crtc); + if (rc) { + SDE_ERROR("Failed: updating plane status rc=%d\n", rc); + return rc; + } + } return rc; } @@ -2467,7 +2610,7 @@ static bool sde_kms_check_for_splash(struct msm_kms *kms) } sde_kms = to_sde_kms(kms); - return sde_kms->splash_data.cont_splash_en; + return sde_kms->splash_data.num_splash_displays; } static int sde_kms_pm_suspend(struct device *dev) @@ -2779,9 +2922,8 @@ static int _sde_kms_mmu_init(struct sde_kms *sde_kms) /* Mapping splash memory block */ if ((i == MSM_SMMU_DOMAIN_UNSECURE) && - sde_kms->splash_data.splash_base) { - ret = _sde_kms_splash_smmu_map(sde_kms->dev, mmu, - &sde_kms->splash_data); + sde_kms->splash_data.num_splash_regions) { + ret = _sde_kms_map_all_splash_regions(sde_kms); if (ret) { SDE_ERROR("failed to map ret:%d\n", ret); goto fail; @@ -2802,8 +2944,7 @@ static int _sde_kms_mmu_init(struct sde_kms *sde_kms) return 0; early_map_fail: - mmu->funcs->one_to_one_unmap(mmu, sde_kms->splash_data.splash_base, - sde_kms->splash_data.splash_size); + _sde_kms_unmap_all_splash_regions(sde_kms); fail: mmu->funcs->destroy(mmu); _sde_kms_mmu_destroy(sde_kms); @@ -2896,56 +3037,87 @@ static int sde_kms_pd_disable(struct generic_pm_domain *genpd) static int _sde_kms_get_splash_data(struct sde_splash_data *data) { + int i = 0; int ret = 0; struct device_node *parent, *node, *node1; struct resource r, r1; + const char *node_name = "cont_splash_region"; + struct sde_splash_mem *mem; + bool share_splash_mem = false; + int num_displays, num_regions; + struct sde_splash_display *splash_display; if (!data) return -EINVAL; + memset(data, 0, sizeof(*data)); + parent = of_find_node_by_path("/reserved-memory"); if (!parent) { SDE_ERROR("failed to find reserved-memory node\n"); return -EINVAL; } - node = of_find_node_by_name(parent, "cont_splash_region"); + node = of_find_node_by_name(parent, node_name); if (!node) { - SDE_ERROR("failed to find splash memory reservation\n"); - return -EINVAL; - } - - if (of_address_to_resource(node, 0, &r)) { - SDE_ERROR("failed to find data for splash memory\n"); + SDE_ERROR("failed to find node %s\n", node_name); return -EINVAL; } - data->splash_base = (unsigned long)r.start; - data->splash_size = (r.end - r.start) + 1; - node1 = of_find_node_by_name(parent, "disp_rdump_region"); if (!node1) SDE_DEBUG("failed to find disp ramdump memory reservation\n"); - if (!node1 || of_address_to_resource(node1, 0, &r1)) { - SDE_DEBUG("failed to find data for disp ramdump memory\n"); - data->ramdump_base = 0; - data->ramdump_size = 0; - } else { - data->ramdump_base = (unsigned long)r1.start; - data->ramdump_size = (r1.end - r1.start) + 1; + /** + * Support sharing a single splash memory for all the built in displays + * and also independent splash region per displays. Incase of + * independent splash region for each connected display, dtsi node of + * cont_splash_region should be collection of all memory regions + * Ex: + */ + num_displays = dsi_display_get_num_of_displays(); + num_regions = of_property_count_u64_elems(node, "reg") / 2; + + data->num_splash_displays = num_displays; + + pr_info("splash mem num_regions:%d\n", num_regions); + if (num_displays > num_regions) { + share_splash_mem = true; + pr_info(":%d displays share same splash buf\n", num_displays); } - if ((data->ramdump_base && data->ramdump_base != data->splash_base) || - (data->ramdump_size > data->splash_size)) { - SDE_ERROR("ramdump/splash buffer addr/size mismatched\n"); - data->ramdump_base = 0; - data->ramdump_size = 0; + for (i = 0; i < num_displays; i++) { + splash_display = &data->splash_display[i]; + if (!i || !share_splash_mem) { + if (of_address_to_resource(node, i, &r)) { + SDE_ERROR("invalid data for:%s\n", node_name); + return -EINVAL; + } + + mem = &data->splash_mem[i]; + if (!node1 || of_address_to_resource(node1, i, &r1)) { + SDE_DEBUG("failed to find ramdump memory\n"); + mem->ramdump_base = 0; + mem->ramdump_size = 0; + } else { + mem->ramdump_base = (unsigned long)r1.start; + mem->ramdump_size = (r1.end - r1.start) + 1; + } + + mem->splash_buf_base = (unsigned long)r.start; + mem->splash_buf_size = (r.end - r.start) + 1; + mem->ref_cnt = 0; + splash_display->splash = mem; + data->num_splash_regions++; + } else { + data->splash_display[i].splash = &data->splash_mem[0]; + } + + pr_info("splash mem for disp:%d add:%lx size:%x\n", (i + 1), + splash_display->splash->splash_buf_base, + splash_display->splash->splash_buf_size); } - pr_info("cont spla base adds:%lx size:%x rdump adds=:%lx size:%x\n", - data->splash_base, data->splash_size, - data->ramdump_base, data->ramdump_size); return ret; } @@ -3081,8 +3253,6 @@ static int sde_kms_hw_init(struct msm_kms *kms) goto power_error; } - sde_kms->splash_data.resource_handoff_pending = true; - /* initialize power domain if defined */ if (of_find_property(dev->dev->of_node, "#power-domain-cells", NULL)) { sde_kms->genpd.name = dev->unique; @@ -3147,7 +3317,7 @@ static int sde_kms_hw_init(struct msm_kms *kms) * Attempt continuous splash handoff only if reserved * splash memory is found. */ - if (sde_kms->splash_data.splash_base) + if (sde_kms->splash_data.num_splash_regions) sde_rm_cont_splash_res_init(priv, &sde_kms->rm, &sde_kms->splash_data, sde_kms->catalog); @@ -3202,6 +3372,7 @@ static int sde_kms_hw_init(struct msm_kms *kms) mutex_init(&sde_kms->secure_transition_lock); atomic_set(&sde_kms->detach_sec_cb, 0); atomic_set(&sde_kms->detach_all_cb, 0); + atomic_set(&sde_kms->pm_qos_counts, 0); /* * Support format modifiers for compression etc. @@ -3217,7 +3388,7 @@ static int sde_kms_hw_init(struct msm_kms *kms) SDE_POWER_EVENT_PRE_DISABLE, sde_kms_handle_power_event, sde_kms, "kms"); - if (sde_kms->splash_data.cont_splash_en) { + if (sde_kms->splash_data.num_splash_displays) { SDE_DEBUG("Skipping MDP Resources disable\n"); } else { for (i = 0; i < SDE_POWER_HANDLE_DBUS_ID_MAX; i++) diff --git a/drivers/gpu/drm/msm/sde/sde_kms.h b/drivers/gpu/drm/msm/sde/sde_kms.h index a4129931cfa73c5c9ee42bd300c893fb973eafb6..5fa6e64760f4608ad4997fba64d6f8e1deb19f70 100644 --- a/drivers/gpu/drm/msm/sde/sde_kms.h +++ b/drivers/gpu/drm/msm/sde/sde_kms.h @@ -225,6 +225,7 @@ struct sde_kms { struct msm_gem_address_space *aspace[MSM_SMMU_DOMAIN_MAX]; struct sde_power_client *core_client; struct pm_qos_request pm_qos_cpu_req; + atomic_t pm_qos_counts; struct sde_power_event *power_event; diff --git a/drivers/gpu/drm/msm/sde/sde_plane.c b/drivers/gpu/drm/msm/sde/sde_plane.c index 496893ea5be4c5b6d37fb6c73370c8120b956900..437ea3d7824548e4d0115140cedce270535f53c7 100644 --- a/drivers/gpu/drm/msm/sde/sde_plane.c +++ b/drivers/gpu/drm/msm/sde/sde_plane.c @@ -3399,6 +3399,28 @@ static void _sde_plane_sspp_atomic_check_mode_changed(struct sde_plane *psde, } } +int sde_plane_validate_src_addr(struct drm_plane *plane, + unsigned long base_addr, u32 size) +{ + int ret = -EINVAL; + u32 addr; + struct sde_plane *psde = to_sde_plane(plane); + + if (!psde || !base_addr || !size) { + SDE_ERROR_PLANE(psde, "invalid arguments\n"); + return ret; + } + + if (psde->pipe_hw && psde->pipe_hw->ops.get_sourceaddress) { + addr = psde->pipe_hw->ops.get_sourceaddress(psde->pipe_hw, + is_sde_plane_virtual(plane)); + if ((addr >= base_addr) && (addr < (base_addr + size))) + ret = 0; + } + + return ret; +} + static int _sde_plane_validate_scaler_v2(struct sde_plane *psde, struct sde_plane_state *pstate, const struct sde_format *fmt, diff --git a/drivers/gpu/drm/msm/sde/sde_plane.h b/drivers/gpu/drm/msm/sde/sde_plane.h index 29ff89199c5f3385aa906eb0a1be686a6cae8f82..33f890398363781b1b241a1f3012bdadd1828795 100644 --- a/drivers/gpu/drm/msm/sde/sde_plane.h +++ b/drivers/gpu/drm/msm/sde/sde_plane.h @@ -305,6 +305,17 @@ int sde_plane_validate_multirect_v2(struct sde_multirect_plane_states *plane); */ void sde_plane_clear_multirect(const struct drm_plane_state *drm_state); +/** + * sde_plane_validate_src_addr - validate if current sspp addr of given + * plane is within the input address range + * @drm_plane: Pointer to DRM plane object + * @base_addr: Start address of the input address range + * @size: Size of the input address range + * @Return: Non-zero if source pipe current address is not in input range + */ +int sde_plane_validate_src_addr(struct drm_plane *plane, + unsigned long base_addr, u32 size); + /** * sde_plane_wait_input_fence - wait for input fence object * @plane: Pointer to DRM plane object diff --git a/drivers/gpu/drm/msm/sde/sde_rm.c b/drivers/gpu/drm/msm/sde/sde_rm.c index 82891477c403181ea5c6ce8bdbc10be887ddc0ab..28b5966d43d991d146d4390a77045ef690fa78f0 100644 --- a/drivers/gpu/drm/msm/sde/sde_rm.c +++ b/drivers/gpu/drm/msm/sde/sde_rm.c @@ -34,6 +34,7 @@ #define RM_RQ_CLEAR(r) ((r)->top_ctrl & BIT(SDE_RM_TOPCTL_RESERVE_CLEAR)) #define RM_RQ_DSPP(r) ((r)->top_ctrl & BIT(SDE_RM_TOPCTL_DSPP)) #define RM_RQ_DS(r) ((r)->top_ctrl & BIT(SDE_RM_TOPCTL_DS)) +#define RM_RQ_CWB(r) ((r)->top_ctrl & BIT(SDE_RM_TOPCTL_CWB)) #define RM_IS_TOPOLOGY_MATCH(t, r) ((t).num_lm == (r).num_lm && \ (t).num_comp_enc == (r).num_enc && \ (t).num_intf == (r).num_intf) @@ -610,16 +611,17 @@ static bool _sde_rm_check_lm_and_get_connected_blks( const struct sde_pingpong_cfg *pp_cfg; struct sde_rm_hw_iter iter; bool is_valid_dspp, is_valid_ds, ret; - u32 display_pref; + u32 display_pref, cwb_pref; *dspp = NULL; *ds = NULL; *pp = NULL; display_pref = lm_cfg->features & BIT(SDE_DISP_PRIMARY_PREF); + cwb_pref = lm_cfg->features & BIT(SDE_DISP_CWB_PREF); - SDE_DEBUG("check lm %d: dspp %d ds %d pp %d display_pref: %d\n", + SDE_DEBUG("check lm %d: dspp %d ds %d pp %d disp_pref: %d cwb_pref%d\n", lm_cfg->id, lm_cfg->dspp, lm_cfg->ds, - lm_cfg->pingpong, display_pref); + lm_cfg->pingpong, display_pref, cwb_pref); /* Check if this layer mixer is a peer of the proposed primary LM */ if (primary_lm) { @@ -661,6 +663,16 @@ static bool _sde_rm_check_lm_and_get_connected_blks( lm_cfg->ds); return ret; } + + /** + * If CWB is enabled and LM is not CWB supported + * then return false. + */ + if (RM_RQ_CWB(reqs) && !cwb_pref) { + SDE_DEBUG("fail: cwb supported lm not allocated\n"); + return false; + } + } else if (!(reqs->hw_res.is_primary && display_pref)) { SDE_DEBUG( "display preference is not met. is_primary: %d display_pref: %d\n", @@ -910,7 +922,8 @@ static int _sde_rm_reserve_ctls( if (top->top_name == SDE_RM_TOPOLOGY_PPSPLIT && !has_ppsplit) continue; - } else if (!(reqs->hw_res.is_primary && primary_pref)) { + } else if (!(reqs->hw_res.is_primary && primary_pref) && + !_ctl_ids) { SDE_DEBUG( "display pref not met. is_primary: %d primary_pref: %d\n", reqs->hw_res.is_primary, primary_pref); @@ -1158,370 +1171,67 @@ static int _sde_rm_make_next_rsvp( return ret; } -static void _sde_rm_clear_irq_status(struct sde_hw_intr *hw_intr, - int irq_idx_pp_done, int irq_idx_autorefresh) -{ - u32 intr_value = 0; - - if ((irq_idx_pp_done >= 0) && (hw_intr->ops.get_intr_status_nomask)) { - intr_value = hw_intr->ops.get_intr_status_nomask(hw_intr, - irq_idx_pp_done, false); - hw_intr->ops.clear_intr_status_force_mask(hw_intr, - irq_idx_pp_done, intr_value); - } - - if ((irq_idx_autorefresh >= 0) && - (hw_intr->ops.get_intr_status_nomask)) { - intr_value = hw_intr->ops.get_intr_status_nomask(hw_intr, - irq_idx_autorefresh, false); - hw_intr->ops.clear_intr_status_force_mask(hw_intr, - irq_idx_autorefresh, intr_value); - } -} - -static u32 _sde_rm_poll_intr_status_for_cont_splash(struct sde_hw_intr *intr, - int irq_idx_pp_done, int irq_idx_autorefresh, u32 const msec) -{ - int i; - u32 status = 0; - u32 const delay_us = 500; - u32 const timeout_us = msec * 1000; - /* Make sure the status is checked atleast once */ - int loop = max((u32)1, (u32)(timeout_us / delay_us)); - - if (!intr) - return 0; - - for (i = 0; i < loop; i++) { - status = intr->ops.get_intr_status_nomask - (intr, irq_idx_pp_done, false); - - if (status & BIT(irq_idx_pp_done)) { - SDE_DEBUG("pp_done received i=%d, status=0x%x\n", - i, status); - SDE_EVT32(status, i, irq_idx_pp_done); - - if (status & BIT(irq_idx_autorefresh)) - _sde_rm_clear_irq_status(intr, - irq_idx_pp_done, irq_idx_autorefresh); - else - return 0; - } - usleep_range(delay_us, delay_us + 10); - } - - SDE_EVT32(status, irq_idx_pp_done, SDE_EVTLOG_ERROR); - SDE_DEBUG("polling timed out. status = 0x%x\n", status); - return -ETIMEDOUT; -} - -static inline bool _sde_rm_autorefresh_validate(struct sde_hw_pingpong *pp, - struct sde_hw_intf *intf, - bool hw_intf_te) -{ - - if ((hw_intf_te && !intf) || - (!hw_intf_te && !pp)) { - SDE_ERROR("autorefresh wrong params!\n"); - return true; - } - - if (hw_intf_te) { - if (!intf->ops.get_autorefresh || - !intf->ops.setup_autorefresh || - !intf->ops.connect_external_te || - !intf->ops.get_vsync_info) { - SDE_ERROR("intf autorefresh apis not supported\n"); - return true; - } - } else { - if (!pp->ops.get_autorefresh || - !pp->ops.setup_autorefresh || - !pp->ops.connect_external_te || - !pp->ops.get_vsync_info) { - SDE_ERROR("pp autorefresh apis not supported\n"); - return true; - } - } - - return false; -} - -static inline void _sde_rm_autorefresh_get_cfg( - struct sde_hw_pingpong *pp, - struct sde_hw_intf *intf, - struct sde_hw_autorefresh *cfg, - bool hw_intf_te) -{ - if (hw_intf_te) - intf->ops.get_autorefresh(intf, cfg); - else - pp->ops.get_autorefresh(pp, cfg); -} - -static inline void _sde_rm_autorefresh_connect_external_te( - struct sde_hw_pingpong *pp, - struct sde_hw_intf *intf, - bool hw_intf_te, - bool enable) -{ - if (hw_intf_te) - intf->ops.connect_external_te(intf, enable); - else - pp->ops.connect_external_te(pp, enable); -} - -static inline void _sde_rm_autorefresh_setup(struct sde_hw_pingpong *pp, - struct sde_hw_intf *intf, - struct sde_hw_autorefresh *cfg, - bool hw_intf_te) -{ - if (hw_intf_te) - intf->ops.setup_autorefresh(intf, cfg); - else - pp->ops.setup_autorefresh(pp, cfg); -} - -static inline void _sde_rm_autorefresh_get_vsync_info( - struct sde_hw_pingpong *pp, - struct sde_hw_intf *intf, - struct sde_hw_pp_vsync_info *info, - bool hw_intf_te) -{ - if (hw_intf_te) - intf->ops.get_vsync_info(intf, info); - else - pp->ops.get_vsync_info(pp, info); -} - -static int _sde_rm_autorefresh_disable(struct sde_hw_pingpong *pp, - struct sde_hw_intf *intf, - struct sde_hw_intr *hw_intr, - bool hw_intf_te) -{ - u32 const timeout_ms = 35; /* Max two vsyncs delay */ - int rc = 0, i, loop = 3; - struct sde_hw_pp_vsync_info info; - int irq_idx_pp_done = -1, irq_idx_autorefresh = -1; - struct sde_hw_autorefresh cfg = {0}; - int dbg_idx; - int te_irq_idx; - - if (_sde_rm_autorefresh_validate(pp, intf, hw_intf_te)) - return 0; - - dbg_idx = hw_intf_te ? intf->idx - INTF_0 : pp->idx - PINGPONG_0; - te_irq_idx = hw_intf_te ? intf->idx : pp->idx; - - /* read default autorefresh configuration */ - _sde_rm_autorefresh_get_cfg(pp, intf, &cfg, hw_intf_te); - - if (!cfg.enable) { - SDE_DEBUG("autorefresh already disabled idx:%d\n", - dbg_idx); - SDE_EVT32(dbg_idx, SDE_EVTLOG_FUNC_CASE1); - return 0; - } - - /* disable external TE first */ - _sde_rm_autorefresh_connect_external_te(pp, intf, hw_intf_te, false); - - /* get all IRQ indexes */ - if (hw_intr->ops.irq_idx_lookup) { - irq_idx_pp_done = hw_intr->ops.irq_idx_lookup( - SDE_IRQ_TYPE_PING_PONG_COMP, te_irq_idx); - irq_idx_autorefresh = hw_intr->ops.irq_idx_lookup( - SDE_IRQ_TYPE_PING_PONG_AUTO_REF, te_irq_idx); - SDE_DEBUG("pp_done irq_idx = %d autorefresh irq_idx:%d\n", - irq_idx_pp_done, irq_idx_autorefresh); - } - - /* disable autorefresh */ - cfg.enable = false; - _sde_rm_autorefresh_setup(pp, intf, &cfg, hw_intf_te); - - SDE_EVT32(dbg_idx, irq_idx_pp_done, irq_idx_autorefresh); - _sde_rm_clear_irq_status(hw_intr, irq_idx_pp_done, irq_idx_autorefresh); - - /* - * Check the line count again if - * the line count is equal to the active - * height to make sure their is no - * additional frame updates - */ - for (i = 0; i < loop; i++) { - info.wr_ptr_line_count = 0; - info.rd_ptr_init_val = 0; - _sde_rm_autorefresh_get_vsync_info(pp, intf, &info, hw_intf_te); - - SDE_EVT32(dbg_idx, info.wr_ptr_line_count, - info.rd_ptr_init_val, SDE_EVTLOG_FUNC_CASE1); - - /* wait for read ptr intr */ - rc = _sde_rm_poll_intr_status_for_cont_splash(hw_intr, - irq_idx_pp_done, irq_idx_autorefresh, timeout_ms); - - info.wr_ptr_line_count = 0; - info.rd_ptr_init_val = 0; - - _sde_rm_autorefresh_get_vsync_info(pp, intf, &info, hw_intf_te); - - SDE_DEBUG("i=%d, line count=%d\n", i, info.wr_ptr_line_count); - SDE_EVT32(dbg_idx, info.wr_ptr_line_count, - info.rd_ptr_init_val, SDE_EVTLOG_FUNC_CASE2); - - /* log line count and return */ - if (!rc) - break; - /* - * Wait for few milli seconds for line count - * to increase if any frame transfer is - * pending. - */ - usleep_range(3000, 4000); - } - - _sde_rm_autorefresh_connect_external_te(pp, intf, hw_intf_te, true); - - return rc; -} - /** - * sde_rm_get_pp_dsc_for_cont_splash - retrieve the current dsc enabled blocks - * and disable autorefresh if enabled. + * _sde_rm_get_hw_blk_for_cont_splash - retrieve the LM blocks on given CTL + * and populate the connected HW blk ids in sde_splash_display * @rm: Pointer to resource manager structure - * @sde_kms: Pointer to sde kms structure - * @max_dsc_cnt: number of DSC blocks supported in the hw - * @dsc_ids: pointer to store the active DSC block IDs - * return: number of active DSC blocks + * @ctl: Pointer to CTL hardware block + * @splash_display: Pointer to struct sde_splash_display + * return: number of active LM blocks for this CTL block */ -static int _sde_rm_get_pp_dsc_for_cont_splash(struct sde_rm *rm, - struct sde_kms *sde_kms, - int max_dsc_cnt, u8 *dsc_ids) +static int _sde_rm_get_hw_blk_for_cont_splash(struct sde_rm *rm, + struct sde_hw_ctl *ctl, + struct sde_splash_display *splash_display) { - int index = 0; - int value, dsc_cnt = 0; - struct sde_rm_hw_iter iter_pp, intf_iter; - bool hw_intf_te_supported; - struct sde_hw_intr *hw_intr = NULL; + u32 lm_reg; + struct sde_rm_hw_iter iter_lm, iter_pp; + struct sde_hw_pingpong *pp; - if (!rm || !sde_kms || !dsc_ids) { + if (!rm || !ctl || !splash_display) { SDE_ERROR("invalid input parameters\n"); return 0; } - hw_intf_te_supported = sde_hw_intf_te_supported(sde_kms->catalog); - hw_intr = sde_kms->hw_intr; - if (!hw_intr) { - SDE_ERROR("hw_intr handler not initialized\n"); - return 0; - } - - SDE_DEBUG("max_dsc_cnt = %d\n", max_dsc_cnt); + sde_rm_init_hw_iter(&iter_lm, 0, SDE_HW_BLK_LM); sde_rm_init_hw_iter(&iter_pp, 0, SDE_HW_BLK_PINGPONG); - while (_sde_rm_get_hw_locked(rm, &iter_pp)) { - struct sde_hw_pingpong *pp = - to_sde_hw_pingpong(iter_pp.blk->hw); - - if (!pp->ops.get_dsc_status) { - SDE_ERROR("get_dsc_status ops not initialized\n"); - return 0; - } - - value = pp->ops.get_dsc_status(pp); - SDE_DEBUG("DSC[%d]=0x%x, dsc_cnt = %d\n", - index, value, dsc_cnt); - if (value) { - dsc_ids[dsc_cnt] = index + DSC_0; - dsc_cnt++; - } - index++; - - if (!hw_intf_te_supported) - _sde_rm_autorefresh_disable(pp, NULL, hw_intr, - hw_intf_te_supported); - } - - sde_rm_init_hw_iter(&intf_iter, 0, SDE_HW_BLK_INTF); - while (_sde_rm_get_hw_locked(rm, &intf_iter)) { - struct sde_hw_intf *intf = - to_sde_hw_intf(intf_iter.blk->hw); - - if (hw_intf_te_supported) - _sde_rm_autorefresh_disable(NULL, intf, hw_intr, - hw_intf_te_supported); - } - - return dsc_cnt; -} + while (_sde_rm_get_hw_locked(rm, &iter_lm)) { + _sde_rm_get_hw_locked(rm, &iter_pp); -/** - * _sde_rm_get_ctl_lm_for_cont_splash - retrieve the current LM blocks - * @ctl: Pointer to CTL hardware block - * @max_lm_cnt: number of LM blocks supported in the hw - * @lm_cnt: number of LM blocks already active - * @lm_ids: pointer to store the active LM block IDs - * @top: pointer to the current "ctl_top" structure - * @index: ctl_top index - * return: number of active LM blocks for this CTL block - */ -static int _sde_rm_get_ctl_lm_for_cont_splash(struct sde_hw_ctl *ctl, - int max_lm_cnt, u8 lm_cnt, - u8 *lm_ids, struct ctl_top *top, - int index) -{ - int j; - struct sde_splash_lm_hw *lm; + if (splash_display->lm_cnt >= MAX_DATA_PATH_PER_DSIPLAY) + break; - if (!ctl || !top || !lm_ids) { - SDE_ERROR("invalid input parameters\n"); - return 0; - } + lm_reg = ctl->ops.read_ctl_layers(ctl, iter_lm.blk->id); + if (!lm_reg) + continue; - lm = top->lm; - for (j = 0; j < max_lm_cnt; j++) { - lm[top->ctl_lm_cnt].lm_reg_value = - ctl->ops.read_ctl_layers(ctl, j + LM_0); - SDE_DEBUG("ctl[%d]_top --> lm[%d]=0x%x, j=%d\n", - index, top->ctl_lm_cnt, - lm[top->ctl_lm_cnt].lm_reg_value, j); - SDE_DEBUG("lm_cnt = %d\n", lm_cnt); - if (lm[top->ctl_lm_cnt].lm_reg_value) { - lm[top->ctl_lm_cnt].ctl_id = index; - lm_ids[lm_cnt++] = j + LM_0; - lm[top->ctl_lm_cnt].lm_id = j + LM_0; - SDE_DEBUG("ctl_id=%d, lm[%d].lm_id = %d\n", - lm[top->ctl_lm_cnt].ctl_id, - top->ctl_lm_cnt, - lm[top->ctl_lm_cnt].lm_id); - top->ctl_lm_cnt++; + splash_display->lm_ids[splash_display->lm_cnt++] = + iter_lm.blk->id; + SDE_DEBUG("lm_cnt=%d lm_reg[%d]=0x%x\n", splash_display->lm_cnt, + iter_lm.blk->id - LM_0, lm_reg); + + if (ctl->ops.get_staged_sspp && + ctl->ops.get_staged_sspp(ctl, iter_lm.blk->id, + &splash_display->pipes[ + splash_display->pipe_cnt], 1)) { + splash_display->pipe_cnt++; + } else { + SDE_ERROR("no pipe detected on LM-%d\n", + iter_lm.blk->id - LM_0); + return 0; } - } - return top->ctl_lm_cnt; -} -/** - * _sde_rm_get_ctl_top_for_cont_splash - retrieve the current LM blocks - * @ctl: Pointer to CTL hardware block - * @top: pointer to the current "ctl_top" structure thats needs update - * @index: ctl_top index - */ -static void _sde_rm_get_ctl_top_for_cont_splash(struct sde_hw_ctl *ctl, - struct ctl_top *top) -{ - if (!ctl || !top) { - SDE_ERROR("invalid input parameters\n"); - return; - } - - if (!ctl->ops.get_ctl_intf) { - SDE_ERROR("get_ctl_intf not initialized\n"); - return; + pp = to_sde_hw_pingpong(iter_pp.blk->hw); + if (pp && pp->ops.get_dsc_status && + pp->ops.get_dsc_status(pp)) { + splash_display->dsc_ids[splash_display->dsc_cnt++] = + iter_pp.blk->id; + SDE_DEBUG("lm/pp[%d] path, using dsc[%d]\n", + iter_lm.blk->id - LM_0, + iter_pp.blk->id - DSC_0); + } } - top->intf_sel = ctl->ops.get_ctl_intf(ctl); - - SDE_DEBUG("id=%d intf_sel=%d\n", ctl->idx, top->intf_sel); + return splash_display->lm_cnt; } int sde_rm_cont_splash_res_init(struct msm_drm_private *priv, @@ -1533,6 +1243,8 @@ int sde_rm_cont_splash_res_init(struct msm_drm_private *priv, int index = 0, ctl_top_cnt; struct sde_kms *sde_kms = NULL; struct sde_hw_mdp *hw_mdp; + struct sde_splash_display *splash_display; + u8 intf_sel; if (!priv || !rm || !cat || !splash_data) { SDE_ERROR("invalid input parameters\n"); @@ -1552,53 +1264,44 @@ int sde_rm_cont_splash_res_init(struct msm_drm_private *priv, } sde_kms = to_sde_kms(priv->kms); - if (ctl_top_cnt > ARRAY_SIZE(splash_data->top)) { - SDE_ERROR("Mismatch in ctl_top array size\n"); - return -EINVAL; - } + hw_mdp = sde_rm_get_mdp(rm); sde_rm_init_hw_iter(&iter_c, 0, SDE_HW_BLK_CTL); while (_sde_rm_get_hw_locked(rm, &iter_c)) { struct sde_hw_ctl *ctl = to_sde_hw_ctl(iter_c.blk->hw); - _sde_rm_get_ctl_top_for_cont_splash(ctl, - &splash_data->top[index]); - if (splash_data->top[index].intf_sel) { - splash_data->lm_cnt += - _sde_rm_get_ctl_lm_for_cont_splash - (ctl, - cat->mixer_count, - splash_data->lm_cnt, - splash_data->lm_ids, - &splash_data->top[index], index); - splash_data->ctl_ids[splash_data->ctl_top_cnt] - = index + CTL_0; - splash_data->ctl_top_cnt++; - splash_data->cont_splash_en = true; + if (!ctl->ops.get_ctl_intf) { + SDE_ERROR("get_ctl_intf not initialized\n"); + return -EINVAL; } - index++; - } - /* Skip DSC blk reads if cont_splash is disabled */ - if (!splash_data->cont_splash_en) - return 0; + intf_sel = ctl->ops.get_ctl_intf(ctl); + if (intf_sel) { + splash_display = &splash_data->splash_display[index]; + SDE_DEBUG("finding resources for display=%d ctl=%d\n", + index, iter_c.blk->id - CTL_0); + + _sde_rm_get_hw_blk_for_cont_splash(rm, + ctl, splash_display); + splash_display->cont_splash_enabled = true; + splash_display->ctl_ids[splash_display->ctl_cnt++] = + iter_c.blk->id; + + if (hw_mdp && hw_mdp->ops.get_split_flush_status) { + splash_display->single_flush_en = + hw_mdp->ops.get_split_flush_status( + hw_mdp); + } - splash_data->dsc_cnt = - _sde_rm_get_pp_dsc_for_cont_splash(rm, - sde_kms, - cat->dsc_count, - splash_data->dsc_ids); + if (!splash_display->single_flush_en || + (iter_c.blk->id != CTL_0)) + index++; - hw_mdp = sde_rm_get_mdp(rm); - if (hw_mdp && hw_mdp->ops.get_split_flush_status) { - splash_data->single_flush_en = - hw_mdp->ops.get_split_flush_status(hw_mdp); + if (index >= ARRAY_SIZE(splash_data->splash_display)) + break; + } } - SDE_DEBUG("splash_data: ctl_top_cnt=%d, lm_cnt=%d, dsc_cnt=%d sf=%d\n", - splash_data->ctl_top_cnt, splash_data->lm_cnt, - splash_data->dsc_cnt, splash_data->single_flush_en); - return 0; } @@ -1614,6 +1317,7 @@ static int _sde_rm_make_next_rsvp_for_cont_splash( struct sde_rm_topology_def topology; struct msm_drm_private *priv; struct sde_kms *sde_kms; + struct sde_splash_display *splash_display = NULL; int i; if (!enc->dev || !enc->dev->dev_private) { @@ -1627,11 +1331,22 @@ static int _sde_rm_make_next_rsvp_for_cont_splash( } sde_kms = to_sde_kms(priv->kms); - for (i = 0; i < sde_kms->splash_data.lm_cnt; i++) + for (i = 0; i < ARRAY_SIZE(sde_kms->splash_data.splash_display); i++) { + if (enc == sde_kms->splash_data.splash_display[i].encoder) + splash_display = + &sde_kms->splash_data.splash_display[i]; + } + + if (!splash_display) { + SDE_ERROR("invalid splash data for enc:%d\n", enc->base.id); + return -EINVAL; + } + + for (i = 0; i < splash_display->lm_cnt; i++) SDE_DEBUG("splash_data.lm_ids[%d] = %d\n", - i, sde_kms->splash_data.lm_ids[i]); + i, splash_display->lm_ids[i]); - if (sde_kms->splash_data.lm_cnt != + if (splash_display->lm_cnt != reqs->topology->num_lm) SDE_DEBUG("Configured splash screen LMs != needed LM cnt\n"); @@ -1648,11 +1363,11 @@ static int _sde_rm_make_next_rsvp_for_cont_splash( * - Only then allow to grab from mixers with DSPP capability */ ret = _sde_rm_reserve_lms(rm, rsvp, reqs, - sde_kms->splash_data.lm_ids); + splash_display->lm_ids); if (ret && !RM_RQ_DSPP(reqs)) { reqs->top_ctrl |= BIT(SDE_RM_TOPCTL_DSPP); ret = _sde_rm_reserve_lms(rm, rsvp, reqs, - sde_kms->splash_data.lm_ids); + splash_display->lm_ids); } if (ret) { @@ -1665,17 +1380,17 @@ static int _sde_rm_make_next_rsvp_for_cont_splash( * - Check mixers without Split Display * - Only then allow to grab from CTLs with split display capability */ - for (i = 0; i < sde_kms->splash_data.ctl_top_cnt; i++) + for (i = 0; i < splash_display->ctl_cnt; i++) SDE_DEBUG("splash_data.ctl_ids[%d] = %d\n", - i, sde_kms->splash_data.ctl_ids[i]); + i, splash_display->ctl_ids[i]); _sde_rm_reserve_ctls(rm, rsvp, reqs, reqs->topology, - sde_kms->splash_data.ctl_ids); + splash_display->ctl_ids); if (ret && !reqs->topology->needs_split_display) { memcpy(&topology, reqs->topology, sizeof(topology)); topology.needs_split_display = true; _sde_rm_reserve_ctls(rm, rsvp, reqs, &topology, - sde_kms->splash_data.ctl_ids); + splash_display->ctl_ids); } if (ret) { SDE_ERROR("unable to find appropriate CTL\n"); @@ -1687,12 +1402,12 @@ static int _sde_rm_make_next_rsvp_for_cont_splash( if (ret) return ret; - for (i = 0; i < sde_kms->splash_data.dsc_cnt; i++) + for (i = 0; i < splash_display->dsc_cnt; i++) SDE_DEBUG("splash_data.dsc_ids[%d] = %d\n", - i, sde_kms->splash_data.dsc_ids[i]); + i, splash_display->dsc_ids[i]); ret = _sde_rm_reserve_dsc(rm, rsvp, reqs->topology, - sde_kms->splash_data.dsc_ids); + splash_display->dsc_ids); if (ret) return ret; @@ -1737,6 +1452,13 @@ static int _sde_rm_populate_requirements( conn_state->connector->connector_type == DRM_MODE_CONNECTOR_DSI) reqs->top_ctrl |= BIT(SDE_RM_TOPCTL_DS); + /** + * Set the requirement for LM which has CWB support if CWB is + * found enabled. + */ + if (!RM_RQ_CWB(reqs) && sde_encoder_in_clone_mode(enc)) + reqs->top_ctrl |= BIT(SDE_RM_TOPCTL_CWB); + SDE_DEBUG("top_ctrl: 0x%llX num_h_tiles: %d\n", reqs->top_ctrl, reqs->hw_res.display_num_of_h_tiles); SDE_DEBUG("num_lm: %d num_ctl: %d topology: %d split_display: %d\n", @@ -1927,6 +1649,21 @@ static int _sde_rm_commit_rsvp( return ret; } +static bool sde_rm_is_display_in_cont_splash(struct sde_kms *sde_kms, + struct drm_encoder *enc) +{ + int i; + struct sde_splash_display *splash_dpy; + + for (i = 0; i < MAX_DSI_DISPLAYS; i++) { + splash_dpy = &sde_kms->splash_data.splash_display[i]; + if (splash_dpy->encoder == enc) + return splash_dpy->cont_splash_enabled; + } + + return false; +} + int sde_rm_reserve( struct sde_rm *rm, struct drm_encoder *enc, @@ -1957,7 +1694,7 @@ int sde_rm_reserve( sde_kms = to_sde_kms(priv->kms); /* Check if this is just a page-flip */ - if (!sde_kms->splash_data.cont_splash_en && + if (!sde_rm_is_display_in_cont_splash(sde_kms, enc) && !drm_atomic_crtc_needs_modeset(crtc_state)) return 0; @@ -2009,8 +1746,8 @@ int sde_rm_reserve( } /* Check the proposed reservation, store it in hw's "next" field */ - if (sde_kms->splash_data.cont_splash_en) { - SDE_DEBUG("cont_splash feature enabled\n"); + if (sde_rm_is_display_in_cont_splash(sde_kms, enc)) { + SDE_DEBUG("cont_splash enabled on enc-%d\n", enc->base.id); ret = _sde_rm_make_next_rsvp_for_cont_splash (rm, enc, crtc_state, conn_state, rsvp_nxt, &reqs); } else { diff --git a/drivers/gpu/drm/msm/sde/sde_rm.h b/drivers/gpu/drm/msm/sde/sde_rm.h index dff00543cbe8930218a8500fbadc45b94df168ca..4c4e34131e1f6c84fbf6faac3eb9dec31ad8453f 100644 --- a/drivers/gpu/drm/msm/sde/sde_rm.h +++ b/drivers/gpu/drm/msm/sde/sde_rm.h @@ -57,12 +57,14 @@ enum sde_rm_topology_name { * reservation list during the AtomicTest phase. * @SDE_RM_TOPCTL_DSPP: Require layer mixers with DSPP capabilities * @SDE_RM_TOPCTL_DS : Require layer mixers with DS capabilities + * @SDE_RM_TOPCTL_CWB : Require layer mixers with CWB capabilities */ enum sde_rm_topology_control { SDE_RM_TOPCTL_RESERVE_LOCK, SDE_RM_TOPCTL_RESERVE_CLEAR, SDE_RM_TOPCTL_DSPP, SDE_RM_TOPCTL_DS, + SDE_RM_TOPCTL_CWB, }; /** diff --git a/drivers/gpu/drm/msm/sde/sde_wb.c b/drivers/gpu/drm/msm/sde/sde_wb.c index 31bd54ae2e597b95b6babac2c3237c68de6ce905..7b77aecf8987c590538cd438f13fa2004f05c220 100644 --- a/drivers/gpu/drm/msm/sde/sde_wb.c +++ b/drivers/gpu/drm/msm/sde/sde_wb.c @@ -392,7 +392,7 @@ int sde_wb_connector_set_info_blob(struct drm_connector *connector, wb_dev->wb_cfg->sblk->maxlinewidth); sde_kms_info_start(info, "features"); - if (wb_dev->wb_cfg && (wb_dev->wb_cfg->features & SDE_WB_UBWC)) + if (wb_dev->wb_cfg && (wb_dev->wb_cfg->features & BIT(SDE_WB_UBWC))) sde_kms_info_append(info, "wb_ubwc"); sde_kms_info_stop(info); diff --git a/drivers/gpu/drm/msm/sde_dbg.c b/drivers/gpu/drm/msm/sde_dbg.c index c1e4019a37933a1e4a3f856b5c6f3d395eb25c16..c4718e11ea6aceff67b0d845d2bd29dfbe9961d3 100644 --- a/drivers/gpu/drm/msm/sde_dbg.c +++ b/drivers/gpu/drm/msm/sde_dbg.c @@ -174,6 +174,11 @@ struct sde_dbg_vbif_debug_bus { struct vbif_debug_bus_entry *entries; }; +struct sde_dbg_dsi_debug_bus { + u32 *entries; + u32 size; +}; + /** * struct sde_dbg_regbuf - wraps buffer and tracking params for register dumps * @buf: pointer to allocated memory for storing register dumps in hw recovery @@ -229,6 +234,7 @@ static struct sde_dbg_base { struct sde_dbg_sde_debug_bus dbgbus_sde; struct sde_dbg_vbif_debug_bus dbgbus_vbif_rt; + struct sde_dbg_dsi_debug_bus dbgbus_dsi; bool dump_all; bool dsi_dbg_bus; u32 debugfs_ctrl; @@ -3331,6 +3337,42 @@ static struct vbif_debug_bus_entry vbif_dbg_bus_msm8998[] = { {0x21c, 0x214, 0, 14, 0, 0xc}, /* xin blocks - clock side */ }; +static u32 dsi_dbg_bus_sdm845[] = { + 0x0001, 0x1001, 0x0001, 0x0011, + 0x1021, 0x0021, 0x0031, 0x0041, + 0x0051, 0x0061, 0x3061, 0x0061, + 0x2061, 0x2061, 0x1061, 0x1061, + 0x1061, 0x0071, 0x0071, 0x0071, + 0x0081, 0x0081, 0x00A1, 0x00A1, + 0x10A1, 0x20A1, 0x30A1, 0x10A1, + 0x10A1, 0x30A1, 0x20A1, 0x00B1, + 0x00C1, 0x00C1, 0x10C1, 0x20C1, + 0x30C1, 0x00D1, 0x00D1, 0x20D1, + 0x30D1, 0x00E1, 0x00E1, 0x00E1, + 0x00F1, 0x00F1, 0x0101, 0x0101, + 0x1101, 0x2101, 0x3101, 0x0111, + 0x0141, 0x1141, 0x0141, 0x1141, + 0x1141, 0x0151, 0x0151, 0x1151, + 0x2151, 0x3151, 0x0161, 0x0161, + 0x1161, 0x0171, 0x0171, 0x0181, + 0x0181, 0x0191, 0x0191, 0x01A1, + 0x01A1, 0x01B1, 0x01B1, 0x11B1, + 0x21B1, 0x01C1, 0x01C1, 0x11C1, + 0x21C1, 0x31C1, 0x01D1, 0x01D1, + 0x01D1, 0x01D1, 0x11D1, 0x21D1, + 0x21D1, 0x01E1, 0x01E1, 0x01F1, + 0x01F1, 0x0201, 0x0201, 0x0211, + 0x0221, 0x0231, 0x0241, 0x0251, + 0x0281, 0x0291, 0x0281, 0x0291, + 0x02A1, 0x02B1, 0x02C1, 0x0321, + 0x0321, 0x1321, 0x2321, 0x3321, + 0x0331, 0x0331, 0x1331, 0x0341, + 0x0341, 0x1341, 0x2341, 0x3341, + 0x0351, 0x0361, 0x0361, 0x1361, + 0x2361, 0x0371, 0x0381, 0x0391, + 0x03C1, 0x03D1, 0x03E1, 0x03F1, +}; + /** * _sde_dbg_enable_power - use callback to turn power on for hw register access * @enable: whether to turn power on or off @@ -3898,7 +3940,8 @@ static void _sde_dump_array(struct sde_dbg_reg_base *blk_arr[], _sde_dbg_dump_vbif_dbg_bus(&sde_dbg_base.dbgbus_vbif_rt); if (sde_dbg_base.dsi_dbg_bus || dump_all) - dsi_ctrl_debug_dump(); + dsi_ctrl_debug_dump(sde_dbg_base.dbgbus_dsi.entries, + sde_dbg_base.dbgbus_dsi.size); if (do_panic && sde_dbg_base.panic_on_err) panic(name); @@ -4990,6 +5033,8 @@ void sde_dbg_init_dbg_buses(u32 hwversion) dbg->dbgbus_vbif_rt.entries = vbif_dbg_bus_msm8998; dbg->dbgbus_vbif_rt.cmn.entries_size = ARRAY_SIZE(vbif_dbg_bus_msm8998); + dbg->dbgbus_dsi.entries = NULL; + dbg->dbgbus_dsi.size = 0; } else if (IS_SDM845_TARGET(hwversion) || IS_SDM670_TARGET(hwversion)) { dbg->dbgbus_sde.entries = dbg_bus_sde_sdm845; dbg->dbgbus_sde.cmn.entries_size = @@ -5000,6 +5045,8 @@ void sde_dbg_init_dbg_buses(u32 hwversion) dbg->dbgbus_vbif_rt.entries = vbif_dbg_bus_msm8998; dbg->dbgbus_vbif_rt.cmn.entries_size = ARRAY_SIZE(vbif_dbg_bus_msm8998); + dbg->dbgbus_dsi.entries = dsi_dbg_bus_sdm845; + dbg->dbgbus_dsi.size = ARRAY_SIZE(dsi_dbg_bus_sdm845); } else if (IS_SM8150_TARGET(hwversion) || IS_SM6150_TARGET(hwversion)) { dbg->dbgbus_sde.entries = dbg_bus_sde_sm8150; dbg->dbgbus_sde.cmn.entries_size = @@ -5009,6 +5056,8 @@ void sde_dbg_init_dbg_buses(u32 hwversion) dbg->dbgbus_vbif_rt.entries = vbif_dbg_bus_msm8998; dbg->dbgbus_vbif_rt.cmn.entries_size = ARRAY_SIZE(vbif_dbg_bus_msm8998); + dbg->dbgbus_dsi.entries = NULL; + dbg->dbgbus_dsi.size = 0; } else { pr_err("unsupported chipset id %X\n", hwversion); } diff --git a/drivers/gpu/drm/msm/sde_dbg.h b/drivers/gpu/drm/msm/sde_dbg.h index 00e486bec18c28370bd08b937b81c1e7f83f974b..7c6d1789ae7f9b4d372b475c43d1eb814803480c 100644 --- a/drivers/gpu/drm/msm/sde_dbg.h +++ b/drivers/gpu/drm/msm/sde_dbg.h @@ -345,8 +345,10 @@ void sde_rsc_debug_dump(u32 mux_sel); /** * dsi_ctrl_debug_dump - dump dsi debug dump status + * @entries: array of debug bus control values + * @size: size of the debug bus control array */ -void dsi_ctrl_debug_dump(void); +void dsi_ctrl_debug_dump(u32 *entries, u32 size); #else static inline struct sde_dbg_evtlog *sde_evtlog_init(void) @@ -438,7 +440,7 @@ static inline void sde_rsc_debug_dump(u32 mux_sel) { } -static inline void dsi_ctrl_debug_dump(void) +static inline void dsi_ctrl_debug_dump(u32 entries, u32 size) { } diff --git a/drivers/gpu/drm/msm/sde_edid_parser.c b/drivers/gpu/drm/msm/sde_edid_parser.c index d72196bff19ff772405b5fecb0e28d9b5b7d3826..db706ad6455464eca332097c17c46d84ce0f6a8c 100644 --- a/drivers/gpu/drm/msm/sde_edid_parser.c +++ b/drivers/gpu/drm/msm/sde_edid_parser.c @@ -179,8 +179,9 @@ static const u8 *_sde_edid_find_block(const u8 *in_buf, u32 start_offset, * * edid buffer 1, byte 2 being 0 means no non-DTD/DATA block * collection present and no DTD data present. */ + if ((dbc_offset == 0) || (dbc_offset == 4)) { - SDE_ERROR("EDID: no DTD or non-DTD data present\n"); + SDE_EDID_DEBUG("EDID: no DTD or non-DTD data present\n"); return NULL; } diff --git a/drivers/gpu/drm/msm/sde_hdcp.h b/drivers/gpu/drm/msm/sde_hdcp.h index 6d028b76cc86cf9eea4239330d2f1b8cd2dab0ed..a27a4b4172857b3cc4f65304d8683a275bf0394f 100644 --- a/drivers/gpu/drm/msm/sde_hdcp.h +++ b/drivers/gpu/drm/msm/sde_hdcp.h @@ -71,6 +71,7 @@ struct sde_hdcp_ops { int (*reauthenticate)(void *input); int (*authenticate)(void *hdcp_ctrl); bool (*feature_supported)(void *input); + void (*force_encryption)(void *input, bool enable); void (*off)(void *hdcp_ctrl); }; diff --git a/drivers/gpu/drm/msm/sde_hdcp_2x.c b/drivers/gpu/drm/msm/sde_hdcp_2x.c index 8badd7a865ee00a91eb071f16bd8c580c3a86c1c..9a1f77178ce989045e515afc32101c6236b2d6e0 100644 --- a/drivers/gpu/drm/msm/sde_hdcp_2x.c +++ b/drivers/gpu/drm/msm/sde_hdcp_2x.c @@ -55,10 +55,14 @@ struct sde_hdcp_2x_ctrl { struct hdcp2_app_data app_data; u32 timeout_left; + u32 wait_timeout_ms; u32 total_message_length; bool no_stored_km; bool feature_supported; + bool force_encryption; bool authenticated; + bool resend_lc_init; + bool resend_stream_manage; void *client_data; void *hdcp2_ctx; struct hdcp_transport_ops *client_ops; @@ -71,7 +75,7 @@ struct sde_hdcp_2x_ctrl { enum sde_hdcp_2x_device_type device_type; struct task_struct *thread; - struct completion topo_wait; + struct completion response_completion; struct kthread_worker worker; struct kthread_work wk_init; @@ -80,6 +84,7 @@ struct sde_hdcp_2x_ctrl { struct kthread_work wk_timeout; struct kthread_work wk_clean; struct kthread_work wk_stream; + struct kthread_work wk_wait; }; static const char *sde_hdcp_2x_message_name(int msg_id) @@ -217,7 +222,10 @@ static int sde_hdcp_2x_get_next_message(struct sde_hdcp_2x_ctrl *hdcp, case LC_INIT: return LC_SEND_L_PRIME; case LC_SEND_L_PRIME: - return SKE_SEND_EKS; + if (hdcp->resend_lc_init) + return LC_INIT; + else + return SKE_SEND_EKS; case SKE_SEND_EKS: if (!hdcp->repeater_flag) return SKE_SEND_TYPE_ID; @@ -234,40 +242,72 @@ static int sde_hdcp_2x_get_next_message(struct sde_hdcp_2x_ctrl *hdcp, case REP_SEND_RECV_ID_LIST: return REP_SEND_ACK; case REP_STREAM_MANAGE: - return REP_STREAM_READY; + if (hdcp->resend_stream_manage) + return REP_STREAM_MANAGE; + else + return REP_STREAM_READY; default: pr_err("Uknown message ID (%d)", hdcp->last_msg); return -EINVAL; } } +static void sde_hdcp_2x_wait_for_response(struct sde_hdcp_2x_ctrl *hdcp) +{ + switch (hdcp->last_msg) { + case AKE_SEND_H_PRIME: + if (hdcp->no_stored_km) + hdcp->wait_timeout_ms = HZ; + else + hdcp->wait_timeout_ms = HZ / 4; + break; + case AKE_SEND_PAIRING_INFO: + hdcp->wait_timeout_ms = HZ / 4; + break; + case REP_SEND_RECV_ID_LIST: + if (!hdcp->authenticated) + hdcp->wait_timeout_ms = HZ * 3; + else + hdcp->wait_timeout_ms = 0; + break; + default: + hdcp->wait_timeout_ms = 0; + } + + if (hdcp->wait_timeout_ms) + HDCP_2X_EXECUTE(wait); +} + static void sde_hdcp_2x_wakeup_client(struct sde_hdcp_2x_ctrl *hdcp, struct hdcp_transport_wakeup_data *data) { int rc = 0; - if (hdcp && hdcp->client_ops && hdcp->client_ops->wakeup && - data && (data->cmd != HDCP_TRANSPORT_CMD_INVALID)) { - data->abort_mask = REAUTH_REQ | LINK_INTEGRITY_FAILURE; - - if (data->cmd == HDCP_TRANSPORT_CMD_SEND_MESSAGE || - data->cmd == HDCP_TRANSPORT_CMD_RECV_MESSAGE || - data->cmd == HDCP_TRANSPORT_CMD_LINK_POLL) { - hdcp->last_msg = - sde_hdcp_2x_get_next_message(hdcp, data); - if (hdcp->last_msg <= INVALID_MESSAGE) { - hdcp->last_msg = INVALID_MESSAGE; - return; - } + if (!hdcp || !hdcp->client_ops || !hdcp->client_ops->wakeup || + !data || (data->cmd == HDCP_TRANSPORT_CMD_INVALID)) + return; + + data->abort_mask = REAUTH_REQ | LINK_INTEGRITY_FAILURE; - data->message_data = &hdcp_msg_lookup[hdcp->last_msg]; + if (data->cmd == HDCP_TRANSPORT_CMD_SEND_MESSAGE || + data->cmd == HDCP_TRANSPORT_CMD_RECV_MESSAGE || + data->cmd == HDCP_TRANSPORT_CMD_LINK_POLL) { + hdcp->last_msg = + sde_hdcp_2x_get_next_message(hdcp, data); + if (hdcp->last_msg <= INVALID_MESSAGE) { + hdcp->last_msg = INVALID_MESSAGE; + return; } - rc = hdcp->client_ops->wakeup(data); - if (rc) - pr_err("error sending %s to client\n", - hdcp_transport_cmd_to_str(data->cmd)); + data->message_data = &hdcp_msg_lookup[hdcp->last_msg]; } + + rc = hdcp->client_ops->wakeup(data); + if (rc) + pr_err("error sending %s to client\n", + hdcp_transport_cmd_to_str(data->cmd)); + + sde_hdcp_2x_wait_for_response(hdcp); } static inline void sde_hdcp_2x_send_message(struct sde_hdcp_2x_ctrl *hdcp) @@ -300,6 +340,19 @@ static bool sde_hdcp_2x_client_feature_supported(void *data) return hdcp2_feature_supported(hdcp->hdcp2_ctx); } +static void sde_hdcp_2x_force_encryption(void *data, bool enable) +{ + struct sde_hdcp_2x_ctrl *hdcp = data; + + if (!hdcp) { + pr_err("invalid input\n"); + return; + } + + hdcp->force_encryption = enable; + pr_info("force_encryption=%d\n", hdcp->force_encryption); +} + static int sde_hdcp_2x_check_valid_state(struct sde_hdcp_2x_ctrl *hdcp) { int rc = 0; @@ -410,6 +463,9 @@ static void sde_hdcp_2x_msg_sent(struct sde_hdcp_2x_ctrl *hdcp) HDCP2_CMD_EN_ENCRYPTION, &hdcp->app_data)) { hdcp->authenticated = true; + if (hdcp->force_encryption) + hdcp2_force_encryption(hdcp->hdcp2_ctx, 1); + cdata.cmd = HDCP_TRANSPORT_CMD_STATUS_SUCCESS; sde_hdcp_2x_wakeup_client(hdcp, &cdata); } @@ -432,7 +488,8 @@ static void sde_hdcp_2x_msg_sent(struct sde_hdcp_2x_ctrl *hdcp) } break; case REP_SEND_ACK: - pr_debug("Repeater authentication successful\n"); + pr_debug("Repeater authentication successful. update_stream=%d\n", + hdcp->update_stream); if (hdcp->update_stream) { HDCP_2X_EXECUTE(stream); @@ -572,7 +629,8 @@ static void sde_hdcp_2x_msg_recvd(struct sde_hdcp_2x_ctrl *hdcp) rc = hdcp2_app_comm(hdcp->hdcp2_ctx, HDCP2_CMD_PROCESS_MSG, &hdcp->app_data); if (rc) { - pr_err("failed to process message from sink (%d)\n", rc); + pr_err("failed to process sink's response to %s (%d)\n", + sde_hdcp_2x_message_name(msg[0]), rc); rc = -EINVAL; goto exit; } @@ -584,7 +642,12 @@ static void sde_hdcp_2x_msg_recvd(struct sde_hdcp_2x_ctrl *hdcp) goto exit; } - if (msg[0] == REP_STREAM_READY) { + out_msg = (u32)hdcp->app_data.response.data[0]; + + pr_debug("message received from TZ: %s\n", + sde_hdcp_2x_message_name(out_msg)); + + if (msg[0] == REP_STREAM_READY && out_msg != REP_STREAM_MANAGE) { if (!hdcp->authenticated) { rc = hdcp2_app_comm(hdcp->hdcp2_ctx, HDCP2_CMD_EN_ENCRYPTION, @@ -592,6 +655,10 @@ static void sde_hdcp_2x_msg_recvd(struct sde_hdcp_2x_ctrl *hdcp) if (!rc) { hdcp->authenticated = true; + if (hdcp->force_encryption) + hdcp2_force_encryption( + hdcp->hdcp2_ctx, 1); + cdata.cmd = HDCP_TRANSPORT_CMD_STATUS_SUCCESS; sde_hdcp_2x_wakeup_client(hdcp, &cdata); } else { @@ -605,10 +672,17 @@ static void sde_hdcp_2x_msg_recvd(struct sde_hdcp_2x_ctrl *hdcp) goto exit; } - out_msg = (u32)hdcp->app_data.response.data[0]; + hdcp->resend_lc_init = false; + if (msg[0] == LC_SEND_L_PRIME && out_msg == LC_INIT) { + pr_debug("resend %s\n", sde_hdcp_2x_message_name(out_msg)); + hdcp->resend_lc_init = true; + } - pr_debug("message received from TZ: %s\n", - sde_hdcp_2x_message_name(out_msg)); + hdcp->resend_stream_manage = false; + if (msg[0] == REP_STREAM_READY && out_msg == REP_STREAM_MANAGE) { + pr_debug("resend %s\n", sde_hdcp_2x_message_name(out_msg)); + hdcp->resend_stream_manage = true; + } if (out_msg == AKE_NO_STORED_KM) hdcp->no_stored_km = 1; @@ -643,6 +717,36 @@ static void sde_hdcp_2x_msg_recvd_work(struct kthread_work *work) sde_hdcp_2x_msg_recvd(hdcp); } +static void sde_hdcp_2x_wait_for_response_work(struct kthread_work *work) +{ + u32 timeout; + struct sde_hdcp_2x_ctrl *hdcp = container_of(work, + struct sde_hdcp_2x_ctrl, wk_wait); + + if (!hdcp) { + pr_err("invalid input\n"); + return; + } + + if (atomic_read(&hdcp->hdcp_off)) { + pr_debug("invalid state: hdcp off\n"); + return; + } + + reinit_completion(&hdcp->response_completion); + timeout = wait_for_completion_timeout(&hdcp->response_completion, + hdcp->wait_timeout_ms); + if (!timeout) { + pr_err("completion expired, last message = %s\n", + sde_hdcp_2x_message_name(hdcp->last_msg)); + + if (!atomic_read(&hdcp->hdcp_off)) + HDCP_2X_EXECUTE(clean); + } + + hdcp->wait_timeout_ms = 0; +} + static int sde_hdcp_2x_wakeup(struct sde_hdcp_2x_wakeup_data *data) { struct sde_hdcp_2x_ctrl *hdcp; @@ -664,11 +768,14 @@ static int sde_hdcp_2x_wakeup(struct sde_hdcp_2x_wakeup_data *data) pr_debug("%s\n", sde_hdcp_2x_cmd_to_str(hdcp->wakeup_cmd)); rc = sde_hdcp_2x_check_valid_state(hdcp); - if (rc) + if (rc) { + pr_err("invalid state for command=%s\n", + sde_hdcp_2x_cmd_to_str(hdcp->wakeup_cmd)); goto exit; + } - if (!completion_done(&hdcp->topo_wait)) - complete_all(&hdcp->topo_wait); + if (!completion_done(&hdcp->response_completion)) + complete_all(&hdcp->response_completion); switch (hdcp->wakeup_cmd) { case HDCP_2X_CMD_START: @@ -683,7 +790,6 @@ static int sde_hdcp_2x_wakeup(struct sde_hdcp_2x_wakeup_data *data) break; case HDCP_2X_CMD_STOP: atomic_set(&hdcp->hdcp_off, 1); - HDCP_2X_EXECUTE(clean); break; case HDCP_2X_CMD_MSG_SEND_SUCCESS: @@ -740,6 +846,7 @@ int sde_hdcp_2x_register(struct sde_hdcp_2x_register_data *data) /* populate ops to be called by client */ data->ops->feature_supported = sde_hdcp_2x_client_feature_supported; data->ops->wakeup = sde_hdcp_2x_wakeup; + data->ops->force_encryption = sde_hdcp_2x_force_encryption; hdcp = kzalloc(sizeof(*hdcp), GFP_KERNEL); if (!hdcp) { @@ -765,8 +872,9 @@ int sde_hdcp_2x_register(struct sde_hdcp_2x_register_data *data) kthread_init_work(&hdcp->wk_timeout, sde_hdcp_2x_timeout_work); kthread_init_work(&hdcp->wk_clean, sde_hdcp_2x_cleanup_work); kthread_init_work(&hdcp->wk_stream, sde_hdcp_2x_query_stream_work); + kthread_init_work(&hdcp->wk_wait, sde_hdcp_2x_wait_for_response_work); - init_completion(&hdcp->topo_wait); + init_completion(&hdcp->response_completion); *data->hdcp_data = hdcp; @@ -780,6 +888,8 @@ int sde_hdcp_2x_register(struct sde_hdcp_2x_register_data *data) goto error; } + hdcp->force_encryption = false; + return 0; error: kzfree(hdcp); diff --git a/drivers/gpu/drm/msm/sde_hdcp_2x.h b/drivers/gpu/drm/msm/sde_hdcp_2x.h index e1ae8ca1066250c157d3cda2b0066252ba65075c..68a1653f3e303f5c6f91fd27413aa0f937cf4ba6 100644 --- a/drivers/gpu/drm/msm/sde_hdcp_2x.h +++ b/drivers/gpu/drm/msm/sde_hdcp_2x.h @@ -181,7 +181,8 @@ static inline const char *hdcp_transport_cmd_to_str( struct sde_hdcp_2x_ops { int (*wakeup)(struct sde_hdcp_2x_wakeup_data *data); - bool (*feature_supported)(void *phdcpcontext); + bool (*feature_supported)(void *data); + void (*force_encryption)(void *data, bool enable); }; struct hdcp_transport_ops { diff --git a/drivers/gpu/drm/msm/sde_power_handle.c b/drivers/gpu/drm/msm/sde_power_handle.c index 56d5a4a876f4371834ee4b6e8a64e1e24e410c9c..d933dbf82154231f10cc47260edb7c6ec1295f62 100644 --- a/drivers/gpu/drm/msm/sde_power_handle.c +++ b/drivers/gpu/drm/msm/sde_power_handle.c @@ -765,6 +765,12 @@ int sde_power_resource_init(struct platform_device *pdev, } } + if (of_find_property(pdev->dev.of_node, "qcom,dss-cx-ipeak", NULL)) + phandle->dss_cx_ipeak = cx_ipeak_register(pdev->dev.of_node, + "qcom,dss-cx-ipeak"); + else + pr_debug("cx ipeak client parse failed\n"); + INIT_LIST_HEAD(&phandle->power_client_clist); INIT_LIST_HEAD(&phandle->event_list); @@ -829,6 +835,9 @@ void sde_power_resource_deinit(struct platform_device *pdev, } mutex_unlock(&phandle->phandle_lock); + if (phandle->dss_cx_ipeak) + cx_ipeak_unregister(phandle->dss_cx_ipeak); + for (i = 0; i < SDE_POWER_HANDLE_DBUS_ID_MAX; i++) sde_power_data_bus_unregister(&phandle->data_bus_handle[i]); @@ -1069,11 +1078,47 @@ int sde_power_resource_is_enabled(struct sde_power_handle *phandle) return phandle->current_usecase_ndx != VOTE_INDEX_DISABLE; } +int sde_cx_ipeak_vote(struct sde_power_handle *phandle, struct dss_clk *clock, + u64 requested_clk_rate, u64 prev_clk_rate, bool enable_vote) +{ + int ret = 0; + u64 curr_core_clk_rate, max_core_clk_rate, prev_core_clk_rate; + + if (phandle->dss_cx_ipeak) { + pr_debug("%pS->%s: Invalid input\n", + __builtin_return_address(0), __func__); + return -EINVAL; + } + + if (strcmp("core_clk", clock->clk_name)) { + pr_debug("Not a core clk , cx_ipeak vote not needed\n"); + return -EINVAL; + } + + curr_core_clk_rate = clock->rate; + max_core_clk_rate = clock->max_rate; + prev_core_clk_rate = prev_clk_rate; + + if (enable_vote && requested_clk_rate == max_core_clk_rate && + curr_core_clk_rate != requested_clk_rate) + ret = cx_ipeak_update(phandle->dss_cx_ipeak, true); + else if (!enable_vote && requested_clk_rate != max_core_clk_rate && + prev_core_clk_rate == max_core_clk_rate) + ret = cx_ipeak_update(phandle->dss_cx_ipeak, false); + + if (ret) + SDE_EVT32(ret, enable_vote, requested_clk_rate, + curr_core_clk_rate, prev_core_clk_rate); + + return ret; +} + int sde_power_clk_set_rate(struct sde_power_handle *phandle, char *clock_name, u64 rate) { int i, rc = -EINVAL; struct dss_module_power *mp; + u64 prev_clk_rate, requested_clk_rate; if (!phandle) { pr_err("invalid input power handle\n"); @@ -1087,8 +1132,15 @@ int sde_power_clk_set_rate(struct sde_power_handle *phandle, char *clock_name, (rate > mp->clk_config[i].max_rate)) rate = mp->clk_config[i].max_rate; + prev_clk_rate = mp->clk_config[i].rate; + requested_clk_rate = rate; + sde_cx_ipeak_vote(phandle, &mp->clk_config[i], + requested_clk_rate, prev_clk_rate, true); mp->clk_config[i].rate = rate; rc = msm_dss_clk_set_rate(mp->clk_config, mp->num_clk); + if (!rc) + sde_cx_ipeak_vote(phandle, &mp->clk_config[i], + requested_clk_rate, prev_clk_rate, false); break; } } diff --git a/drivers/gpu/drm/msm/sde_power_handle.h b/drivers/gpu/drm/msm/sde_power_handle.h index 2c8665f9bb2b109d31adbb0a7c2caa4a80d80cce..0daf751776cfd52f4bf7b2b1f161fd25853e3a08 100644 --- a/drivers/gpu/drm/msm/sde_power_handle.h +++ b/drivers/gpu/drm/msm/sde_power_handle.h @@ -26,6 +26,7 @@ #define SDE_POWER_HANDLE_CONT_SPLASH_BUS_AB_QUOTA 1800000000 #include +#include /* event will be triggered before power handler disable */ #define SDE_POWER_EVENT_PRE_DISABLE 0x1 @@ -165,6 +166,7 @@ struct sde_power_event { * @event_list: current power handle event list * @rsc_client: sde rsc client pointer * @rsc_client_init: boolean to control rsc client create + * @dss_cx_ipeak: client pointer for cx ipeak driver */ struct sde_power_handle { struct dss_module_power mp; @@ -178,6 +180,7 @@ struct sde_power_handle { struct list_head event_list; struct sde_rsc_client *rsc_client; bool rsc_client_init; + struct cx_ipeak_client *dss_cx_ipeak; }; /** diff --git a/drivers/gpu/drm/nouveau/dispnv04/disp.c b/drivers/gpu/drm/nouveau/dispnv04/disp.c index 5b9d549aa791f5d02d1abef5e1c96e1aaf4bf2c6..e7926da59214fd6d919cde21f22e97a08f565883 100644 --- a/drivers/gpu/drm/nouveau/dispnv04/disp.c +++ b/drivers/gpu/drm/nouveau/dispnv04/disp.c @@ -55,6 +55,9 @@ nv04_display_create(struct drm_device *dev) nouveau_display(dev)->init = nv04_display_init; nouveau_display(dev)->fini = nv04_display_fini; + /* Pre-nv50 doesn't support atomic, so don't expose the ioctls */ + dev->driver->driver_features &= ~DRIVER_ATOMIC; + nouveau_hw_save_vga_fonts(dev, 1); nv04_crtc_create(dev, 0); diff --git a/drivers/gpu/drm/nouveau/nouveau_backlight.c b/drivers/gpu/drm/nouveau/nouveau_backlight.c index debbbf0fd4bdda619732c67952c772f9957c4166..408b955e5c39a6b41043c18fb37ae8dc9de42c04 100644 --- a/drivers/gpu/drm/nouveau/nouveau_backlight.c +++ b/drivers/gpu/drm/nouveau/nouveau_backlight.c @@ -267,6 +267,7 @@ nouveau_backlight_init(struct drm_device *dev) struct nouveau_drm *drm = nouveau_drm(dev); struct nvif_device *device = &drm->client.device; struct drm_connector *connector; + struct drm_connector_list_iter conn_iter; INIT_LIST_HEAD(&drm->bl_connectors); @@ -275,7 +276,8 @@ nouveau_backlight_init(struct drm_device *dev) return 0; } - list_for_each_entry(connector, &dev->mode_config.connector_list, head) { + drm_connector_list_iter_begin(dev, &conn_iter); + drm_for_each_connector_iter(connector, &conn_iter) { if (connector->connector_type != DRM_MODE_CONNECTOR_LVDS && connector->connector_type != DRM_MODE_CONNECTOR_eDP) continue; @@ -292,7 +294,7 @@ nouveau_backlight_init(struct drm_device *dev) break; } } - + drm_connector_list_iter_end(&conn_iter); return 0; } diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c index c902a851eb5129b45bf1b21810a31c3da4f1d220..430830d63a33dcd7aad0ecf17ed40b8cd5051162 100644 --- a/drivers/gpu/drm/nouveau/nouveau_connector.c +++ b/drivers/gpu/drm/nouveau/nouveau_connector.c @@ -1208,14 +1208,19 @@ nouveau_connector_create(struct drm_device *dev, int index) struct nouveau_display *disp = nouveau_display(dev); struct nouveau_connector *nv_connector = NULL; struct drm_connector *connector; + struct drm_connector_list_iter conn_iter; int type, ret = 0; bool dummy; - list_for_each_entry(connector, &dev->mode_config.connector_list, head) { + drm_connector_list_iter_begin(dev, &conn_iter); + nouveau_for_each_non_mst_connector_iter(connector, &conn_iter) { nv_connector = nouveau_connector(connector); - if (nv_connector->index == index) + if (nv_connector->index == index) { + drm_connector_list_iter_end(&conn_iter); return connector; + } } + drm_connector_list_iter_end(&conn_iter); nv_connector = kzalloc(sizeof(*nv_connector), GFP_KERNEL); if (!nv_connector) diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.h b/drivers/gpu/drm/nouveau/nouveau_connector.h index a4d1a059bd3d4f948c36c0a942150c68199ec974..dc7454e7f19aa0ec9f22e279015a0966eedbd531 100644 --- a/drivers/gpu/drm/nouveau/nouveau_connector.h +++ b/drivers/gpu/drm/nouveau/nouveau_connector.h @@ -33,6 +33,7 @@ #include #include #include "nouveau_crtc.h" +#include "nouveau_encoder.h" struct nvkm_i2c_port; @@ -60,19 +61,46 @@ static inline struct nouveau_connector *nouveau_connector( return container_of(con, struct nouveau_connector, base); } +static inline bool +nouveau_connector_is_mst(struct drm_connector *connector) +{ + const struct nouveau_encoder *nv_encoder; + const struct drm_encoder *encoder; + + if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort) + return false; + + nv_encoder = find_encoder(connector, DCB_OUTPUT_ANY); + if (!nv_encoder) + return false; + + encoder = &nv_encoder->base.base; + return encoder->encoder_type == DRM_MODE_ENCODER_DPMST; +} + +#define nouveau_for_each_non_mst_connector_iter(connector, iter) \ + drm_for_each_connector_iter(connector, iter) \ + for_each_if(!nouveau_connector_is_mst(connector)) + static inline struct nouveau_connector * nouveau_crtc_connector_get(struct nouveau_crtc *nv_crtc) { struct drm_device *dev = nv_crtc->base.dev; struct drm_connector *connector; + struct drm_connector_list_iter conn_iter; + struct nouveau_connector *nv_connector = NULL; struct drm_crtc *crtc = to_drm_crtc(nv_crtc); - list_for_each_entry(connector, &dev->mode_config.connector_list, head) { - if (connector->encoder && connector->encoder->crtc == crtc) - return nouveau_connector(connector); + drm_connector_list_iter_begin(dev, &conn_iter); + nouveau_for_each_non_mst_connector_iter(connector, &conn_iter) { + if (connector->encoder && connector->encoder->crtc == crtc) { + nv_connector = nouveau_connector(connector); + break; + } } + drm_connector_list_iter_end(&conn_iter); - return NULL; + return nv_connector; } struct drm_connector * diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c index 2e7785f49e6d54c1c0941c69de2ff0a869cfd3f0..caf53503c0f7a5cea1265c2b333212f76e3b9014 100644 --- a/drivers/gpu/drm/nouveau/nouveau_display.c +++ b/drivers/gpu/drm/nouveau/nouveau_display.c @@ -405,6 +405,7 @@ nouveau_display_init(struct drm_device *dev) struct nouveau_display *disp = nouveau_display(dev); struct nouveau_drm *drm = nouveau_drm(dev); struct drm_connector *connector; + struct drm_connector_list_iter conn_iter; int ret; ret = disp->init(dev); @@ -412,10 +413,12 @@ nouveau_display_init(struct drm_device *dev) return ret; /* enable hotplug interrupts */ - list_for_each_entry(connector, &dev->mode_config.connector_list, head) { + drm_connector_list_iter_begin(dev, &conn_iter); + nouveau_for_each_non_mst_connector_iter(connector, &conn_iter) { struct nouveau_connector *conn = nouveau_connector(connector); nvif_notify_get(&conn->hpd); } + drm_connector_list_iter_end(&conn_iter); /* enable flip completion events */ nvif_notify_get(&drm->flip); @@ -428,6 +431,7 @@ nouveau_display_fini(struct drm_device *dev, bool suspend) struct nouveau_display *disp = nouveau_display(dev); struct nouveau_drm *drm = nouveau_drm(dev); struct drm_connector *connector; + struct drm_connector_list_iter conn_iter; if (!suspend) { if (drm_drv_uses_atomic_modeset(dev)) @@ -440,10 +444,12 @@ nouveau_display_fini(struct drm_device *dev, bool suspend) nvif_notify_put(&drm->flip); /* disable hotplug interrupts */ - list_for_each_entry(connector, &dev->mode_config.connector_list, head) { + drm_connector_list_iter_begin(dev, &conn_iter); + nouveau_for_each_non_mst_connector_iter(connector, &conn_iter) { struct nouveau_connector *conn = nouveau_connector(connector); nvif_notify_put(&conn->hpd); } + drm_connector_list_iter_end(&conn_iter); drm_kms_helper_poll_disable(dev); disp->fini(dev); diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c index 595630d1fb9e239b68e6ddc71d1269cafca5ae7b..362a34cb435db7ad2b8dd5810c9dd25a0d5b744e 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drm.c +++ b/drivers/gpu/drm/nouveau/nouveau_drm.c @@ -79,6 +79,10 @@ MODULE_PARM_DESC(modeset, "enable driver (default: auto, " int nouveau_modeset = -1; module_param_named(modeset, nouveau_modeset, int, 0400); +MODULE_PARM_DESC(atomic, "Expose atomic ioctl (default: disabled)"); +static int nouveau_atomic = 0; +module_param_named(atomic, nouveau_atomic, int, 0400); + MODULE_PARM_DESC(runpm, "disable (0), force enable (1), optimus only default (-1)"); static int nouveau_runtime_pm = -1; module_param_named(runpm, nouveau_runtime_pm, int, 0400); @@ -383,6 +387,9 @@ static int nouveau_drm_probe(struct pci_dev *pdev, pci_set_master(pdev); + if (nouveau_atomic) + driver_pci.driver_features |= DRIVER_ATOMIC; + ret = drm_get_pci_dev(pdev, pent, &driver_pci); if (ret) { nvkm_device_del(&device); diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c index a29474528e8506ed7b8c4ff4c5770adc63df30e2..926ec51ba5be19820c3ad0a703e84cfe16cd9236 100644 --- a/drivers/gpu/drm/nouveau/nv50_display.c +++ b/drivers/gpu/drm/nouveau/nv50_display.c @@ -4150,7 +4150,7 @@ nv50_disp_atomic_commit(struct drm_device *dev, nv50_disp_atomic_commit_tail(state); drm_for_each_crtc(crtc, dev) { - if (crtc->state->enable) { + if (crtc->state->active) { if (!drm->have_disp_power_ref) { drm->have_disp_power_ref = true; return 0; @@ -4398,10 +4398,6 @@ nv50_display_destroy(struct drm_device *dev) kfree(disp); } -MODULE_PARM_DESC(atomic, "Expose atomic ioctl (default: disabled)"); -static int nouveau_atomic = 0; -module_param_named(atomic, nouveau_atomic, int, 0400); - int nv50_display_create(struct drm_device *dev) { @@ -4426,8 +4422,6 @@ nv50_display_create(struct drm_device *dev) disp->disp = &nouveau_display(dev)->disp; dev->mode_config.funcs = &nv50_disp_func; dev->driver->driver_features |= DRIVER_PREFER_XBGR_30BPP; - if (nouveau_atomic) - dev->driver->driver_features |= DRIVER_ATOMIC; /* small shared memory area we use for notifiers and semaphores */ ret = nouveau_bo_new(&drm->client, 4096, 0x1000, TTM_PL_FLAG_VRAM, diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c index a7e55c422501cf9c4fc98bc7e7798d2b2213cfcf..0b632dc0cf7d4c6b40aee1226ca3e4f01f2485a3 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c @@ -155,10 +155,10 @@ gk104_fifo_runlist_commit(struct gk104_fifo *fifo, int runl) (target << 28)); nvkm_wr32(device, 0x002274, (runl << 20) | nr); - if (wait_event_timeout(fifo->runlist[runl].wait, - !(nvkm_rd32(device, 0x002284 + (runl * 0x08)) - & 0x00100000), - msecs_to_jiffies(2000)) == 0) + if (nvkm_msec(device, 2000, + if (!(nvkm_rd32(device, 0x002284 + (runl * 0x08)) & 0x00100000)) + break; + ) < 0) nvkm_error(subdev, "runlist %d update timeout\n", runl); unlock: mutex_unlock(&subdev->mutex); diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c index 424cd1b665759bfb0033cdf8aefff77d863cab06..337d3a1c2a4099b54652b7c5b836f4826b1c1ee8 100644 --- a/drivers/gpu/drm/radeon/radeon_connectors.c +++ b/drivers/gpu/drm/radeon/radeon_connectors.c @@ -853,7 +853,7 @@ static int radeon_lvds_get_modes(struct drm_connector *connector) return ret; } -static int radeon_lvds_mode_valid(struct drm_connector *connector, +static enum drm_mode_status radeon_lvds_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) { struct drm_encoder *encoder = radeon_best_single_encoder(connector); @@ -1013,7 +1013,7 @@ static int radeon_vga_get_modes(struct drm_connector *connector) return ret; } -static int radeon_vga_mode_valid(struct drm_connector *connector, +static enum drm_mode_status radeon_vga_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) { struct drm_device *dev = connector->dev; @@ -1157,7 +1157,7 @@ static int radeon_tv_get_modes(struct drm_connector *connector) return 1; } -static int radeon_tv_mode_valid(struct drm_connector *connector, +static enum drm_mode_status radeon_tv_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) { if ((mode->hdisplay > 1024) || (mode->vdisplay > 768)) @@ -1499,7 +1499,7 @@ static void radeon_dvi_force(struct drm_connector *connector) radeon_connector->use_digital = true; } -static int radeon_dvi_mode_valid(struct drm_connector *connector, +static enum drm_mode_status radeon_dvi_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) { struct drm_device *dev = connector->dev; @@ -1801,7 +1801,7 @@ radeon_dp_detect(struct drm_connector *connector, bool force) return ret; } -static int radeon_dp_mode_valid(struct drm_connector *connector, +static enum drm_mode_status radeon_dp_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) { struct drm_device *dev = connector->dev; diff --git a/drivers/gpu/drm/vc4/vc4_plane.c b/drivers/gpu/drm/vc4/vc4_plane.c index 77c56264c05bb2875cc662fb8d189bedf1abef41..17590cb2b80d952b4479dc14b00b26c6db7ae379 100644 --- a/drivers/gpu/drm/vc4/vc4_plane.c +++ b/drivers/gpu/drm/vc4/vc4_plane.c @@ -352,6 +352,9 @@ static int vc4_plane_setup_clipping_and_scaling(struct drm_plane_state *state) vc4_state->x_scaling[0] = VC4_SCALING_TPZ; if (vc4_state->y_scaling[0] == VC4_SCALING_NONE) vc4_state->y_scaling[0] = VC4_SCALING_TPZ; + } else { + vc4_state->x_scaling[1] = VC4_SCALING_NONE; + vc4_state->y_scaling[1] = VC4_SCALING_NONE; } vc4_state->is_unity = (vc4_state->x_scaling[0] == VC4_SCALING_NONE && diff --git a/drivers/gpu/msm/a6xx_reg.h b/drivers/gpu/msm/a6xx_reg.h index 2fdc84bef54f3a00dca9b08293d97c9d253cc7f0..41854d22743fa2e4058b3ed59bde7a99d40091f6 100644 --- a/drivers/gpu/msm/a6xx_reg.h +++ b/drivers/gpu/msm/a6xx_reg.h @@ -36,6 +36,7 @@ #define A6XX_INT_UCHE_TRAP_INTR 25 #define A6XX_INT_DEBBUS_INTR_0 26 #define A6XX_INT_DEBBUS_INTR_1 27 +#define A6XX_INT_TSB_WRITE_ERROR 28 #define A6XX_INT_ISDB_CPU_IRQ 30 #define A6XX_INT_ISDB_UNDER_DEBUG 31 diff --git a/drivers/gpu/msm/adreno-gpulist.h b/drivers/gpu/msm/adreno-gpulist.h index f3813cb7f8c69dc38ecf8336df02f4a4cea115f8..3a2bc36c7515738eb198c9a750844c7745776628 100644 --- a/drivers/gpu/msm/adreno-gpulist.h +++ b/drivers/gpu/msm/adreno-gpulist.h @@ -448,7 +448,7 @@ static const struct adreno_gpu_core adreno_gpulist[] = { .minor = 8, .patchid = ANY_ID, .features = ADRENO_64BIT | ADRENO_CONTENT_PROTECTION | - ADRENO_PREEMPTION, + ADRENO_IOCOHERENT | ADRENO_PREEMPTION, .sqefw_name = "a630_sqe.fw", .zap_name = "a608_zap", .gpudev = &adreno_a6xx_gpudev, diff --git a/drivers/gpu/msm/adreno.c b/drivers/gpu/msm/adreno.c index e3cd9803dc88e9ba55702ff7d9f6261449e755cc..fd22fa6d2e276834085a5b73d2c8b3cb21fe976d 100644 --- a/drivers/gpu/msm/adreno.c +++ b/drivers/gpu/msm/adreno.c @@ -1338,6 +1338,10 @@ static int adreno_probe(struct platform_device *pdev) device->mmu.va_padding = adreno_dev->gpucore->va_padding; } + if (adreno_dev->gpucore->cx_ipeak_gpu_freq) + device->pwrctrl.cx_ipeak_gpu_freq = + adreno_dev->gpucore->cx_ipeak_gpu_freq; + status = kgsl_device_platform_probe(device); if (status) { device->pdev = NULL; @@ -3570,7 +3574,7 @@ static void adreno_power_stats(struct kgsl_device *device, struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev); struct kgsl_pwrctrl *pwr = &device->pwrctrl; struct adreno_busy_data *busy = &adreno_dev->busy_data; - uint64_t adj = 0; + int64_t adj = 0; memset(stats, 0, sizeof(*stats)); @@ -3583,6 +3587,9 @@ static void adreno_power_stats(struct kgsl_device *device, if (gpudev->read_throttling_counters) { adj = gpudev->read_throttling_counters(adreno_dev); + if (adj < 0 && -adj > gpu_busy) + adj = -gpu_busy; + gpu_busy += adj; } @@ -3843,7 +3850,6 @@ static const struct kgsl_functable adreno_functable = { .device_private_create = adreno_device_private_create, .device_private_destroy = adreno_device_private_destroy, /* Optional functions */ - .snapshot_gmu = adreno_snapshot_gmu, .drawctxt_create = adreno_drawctxt_create, .drawctxt_detach = adreno_drawctxt_detach, .drawctxt_destroy = adreno_drawctxt_destroy, diff --git a/drivers/gpu/msm/adreno.h b/drivers/gpu/msm/adreno.h index 0131912811808c1cd8a64bf7e2b4ce079e35ab35..22bcc2b87c6fa61376b335f3523688b0d84a66a1 100644 --- a/drivers/gpu/msm/adreno.h +++ b/drivers/gpu/msm/adreno.h @@ -390,6 +390,7 @@ struct adreno_device_private { * @gpmu_tsens: ID for the temporature sensor used by the GPMU * @max_power: Max possible power draw of a core, units elephant tail hairs * @va_padding: Size to pad allocations to, zero if not required + * @cx_ipeak_gpu_freq : Default Cx Ipeak GPU frequency */ struct adreno_gpu_core { enum adreno_gpurev gpurev; @@ -421,6 +422,7 @@ struct adreno_gpu_core { unsigned int gpmu_tsens; unsigned int max_power; uint64_t va_padding; + unsigned int cx_ipeak_gpu_freq; }; @@ -951,8 +953,6 @@ struct adreno_gpudev { /* GPU specific function hooks */ void (*irq_trace)(struct adreno_device *, unsigned int status); void (*snapshot)(struct adreno_device *, struct kgsl_snapshot *); - void (*snapshot_debugbus)(struct adreno_device *adreno_dev, - struct kgsl_snapshot *snapshot); void (*platform_setup)(struct adreno_device *); void (*init)(struct adreno_device *); void (*remove)(struct adreno_device *); @@ -967,7 +967,7 @@ struct adreno_gpudev { void (*pwrlevel_change_settings)(struct adreno_device *, unsigned int prelevel, unsigned int postlevel, bool post); - uint64_t (*read_throttling_counters)(struct adreno_device *); + int64_t (*read_throttling_counters)(struct adreno_device *); void (*count_throttles)(struct adreno_device *, uint64_t adj); int (*enable_pwr_counters)(struct adreno_device *, unsigned int counter); @@ -1124,9 +1124,6 @@ void adreno_snapshot(struct kgsl_device *device, struct kgsl_snapshot *snapshot, struct kgsl_context *context); -void adreno_snapshot_gmu(struct kgsl_device *device, - struct kgsl_snapshot *snapshot); - int adreno_reset(struct kgsl_device *device, int fault); void adreno_fault_skipcmd_detached(struct adreno_device *adreno_dev, diff --git a/drivers/gpu/msm/adreno_a5xx.c b/drivers/gpu/msm/adreno_a5xx.c index 0b89c6aee2f5d4c033d51786f7f820a1cf96c818..44a7c39f7e1311fecd9e7f462308e62460063a4d 100644 --- a/drivers/gpu/msm/adreno_a5xx.c +++ b/drivers/gpu/msm/adreno_a5xx.c @@ -1688,9 +1688,10 @@ static int a5xx_enable_pwr_counters(struct adreno_device *adreno_dev, /* number of cycles when clock is throttle by less than 50% (CRC) */ #define CRC_LESS50PCT 3 -static uint64_t a5xx_read_throttling_counters(struct adreno_device *adreno_dev) +static int64_t a5xx_read_throttling_counters(struct adreno_device *adreno_dev) { - int i, adj; + int i; + int64_t adj; uint32_t th[ADRENO_GPMU_THROTTLE_COUNTERS]; struct adreno_busy_data *busy = &adreno_dev->busy_data; diff --git a/drivers/gpu/msm/adreno_a6xx.c b/drivers/gpu/msm/adreno_a6xx.c index 8c6ca7c9e0b20f8e3d8a3c5caa5e2bfd408d43f3..22c7d2b6021e51014c2bbe8121487e6cb6f13de9 100644 --- a/drivers/gpu/msm/adreno_a6xx.c +++ b/drivers/gpu/msm/adreno_a6xx.c @@ -675,14 +675,27 @@ static void a6xx_hwcg_set(struct adreno_device *adreno_dev, bool on) regs = a6xx_hwcg_registers[i].regs; - /* Disable SP clock before programming HWCG registers */ - gmu_core_regrmw(device, A6XX_GPU_GMU_GX_SPTPRAC_CLOCK_CONTROL, 1, 0); + /* + * Disable SP clock before programming HWCG registers. + * A608 GPU is not having the GX power domain. Hence + * skip GMU_GX registers for A608. + */ + + if (!adreno_is_a608(adreno_dev)) + gmu_core_regrmw(device, + A6XX_GPU_GMU_GX_SPTPRAC_CLOCK_CONTROL, 1, 0); for (j = 0; j < a6xx_hwcg_registers[i].count; j++) kgsl_regwrite(device, regs[j].off, on ? regs[j].val : 0); - /* Enable SP clock */ - gmu_core_regrmw(device, A6XX_GPU_GMU_GX_SPTPRAC_CLOCK_CONTROL, 0, 1); + /* + * Enable SP clock after programming HWCG registers. + * A608 GPU is not having the GX power domain. Hence + * skip GMU_GX registers for A608. + */ + if (!adreno_is_a608(adreno_dev)) + gmu_core_regrmw(device, + A6XX_GPU_GMU_GX_SPTPRAC_CLOCK_CONTROL, 0, 1); /* enable top level HWCG */ kgsl_regwrite(device, A6XX_RBBM_CLOCK_CNTL, @@ -810,18 +823,20 @@ static void a6xx_start(struct adreno_device *adreno_dev) kgsl_regwrite(device, A6XX_CP_ROQ_THRESHOLDS_2, 0x02000140); kgsl_regwrite(device, A6XX_CP_ROQ_THRESHOLDS_1, 0x8040362C); } else if (adreno_is_a608(adreno_dev)) { - kgsl_regwrite(device, A6XX_CP_ROQ_THRESHOLDS_2, 0x800060); + kgsl_regwrite(device, A6XX_CP_ROQ_THRESHOLDS_2, 0x00800060); kgsl_regwrite(device, A6XX_CP_ROQ_THRESHOLDS_1, 0x40201b16); } else { kgsl_regwrite(device, A6XX_CP_ROQ_THRESHOLDS_2, 0x010000C0); kgsl_regwrite(device, A6XX_CP_ROQ_THRESHOLDS_1, 0x8040362C); } - /* For a608 Mem pool size is reduced to 1/4 */ - if (adreno_is_a608(adreno_dev)) - kgsl_regwrite(device, A6XX_CP_MEM_POOL_SIZE, 32); - else + if (adreno_is_a608(adreno_dev)) { + /* For a608 Mem pool size is reduced to 48 */ + kgsl_regwrite(device, A6XX_CP_MEM_POOL_SIZE, 48); + kgsl_regwrite(device, A6XX_CP_MEM_POOL_DBG_ADDR, 47); + } else { kgsl_regwrite(device, A6XX_CP_MEM_POOL_SIZE, 128); + } /* Setting the primFifo thresholds values */ if (adreno_is_a640(adreno_dev)) @@ -1399,6 +1414,37 @@ static int a6xx_soft_reset(struct adreno_device *adreno_dev) return 0; } +static int64_t a6xx_read_throttling_counters(struct adreno_device *adreno_dev) +{ + int i; + int64_t adj = 0; + uint32_t counts[ADRENO_GPMU_THROTTLE_COUNTERS]; + struct adreno_busy_data *busy = &adreno_dev->busy_data; + + for (i = 0; i < ARRAY_SIZE(counts); i++) { + if (!adreno_dev->gpmu_throttle_counters[i]) + counts[i] = 0; + else + counts[i] = counter_delta(KGSL_DEVICE(adreno_dev), + adreno_dev->gpmu_throttle_counters[i], + &busy->throttle_cycles[i]); + } + + /* + * The adjustment is the number of cycles lost to throttling, which + * is calculated as a weighted average of the cycles throttled + * at 10%, 50%, and 90%. The adjustment is negative because in A6XX, + * the busy count includes the throttled cycles. Therefore, we want + * to remove them to prevent appearing to be busier than + * we actually are. + */ + adj = -((counts[0] * 1) + (counts[1] * 5) + (counts[2] * 9)) / 10; + + trace_kgsl_clock_throttling(0, counts[1], counts[2], + counts[0], adj); + return adj; +} + static void a6xx_count_throttles(struct adreno_device *adreno_dev, uint64_t adj) { @@ -1533,6 +1579,9 @@ static void a6xx_err_callback(struct adreno_device *adreno_dev, int bit) case A6XX_INT_UCHE_TRAP_INTR: KGSL_DRV_CRIT_RATELIMIT(device, "UCHE: Trap interrupt\n"); break; + case A6XX_INT_TSB_WRITE_ERROR: + KGSL_DRV_CRIT_RATELIMIT(device, "TSB: Write error interrupt\n"); + break; default: KGSL_DRV_CRIT_RATELIMIT(device, "Unknown interrupt %d\n", bit); } @@ -1704,7 +1753,8 @@ static void a6xx_cp_callback(struct adreno_device *adreno_dev, int bit) (1 << A6XX_INT_RBBM_ATB_BUS_OVERFLOW) | \ (1 << A6XX_INT_RBBM_HANG_DETECT) | \ (1 << A6XX_INT_UCHE_OOB_ACCESS) | \ - (1 << A6XX_INT_UCHE_TRAP_INTR)) + (1 << A6XX_INT_UCHE_TRAP_INTR) | \ + (1 << A6XX_INT_TSB_WRITE_ERROR)) static struct adreno_irq_funcs a6xx_irq_funcs[32] = { ADRENO_IRQ_CALLBACK(NULL), /* 0 - RBBM_GPU_IDLE */ @@ -1737,7 +1787,7 @@ static struct adreno_irq_funcs a6xx_irq_funcs[32] = { ADRENO_IRQ_CALLBACK(a6xx_err_callback), /* 25 - UCHE_TRAP_INTR */ ADRENO_IRQ_CALLBACK(NULL), /* 26 - DEBBUS_INTR_0 */ ADRENO_IRQ_CALLBACK(NULL), /* 27 - DEBBUS_INTR_1 */ - ADRENO_IRQ_CALLBACK(NULL), /* 28 - UNUSED */ + ADRENO_IRQ_CALLBACK(a6xx_err_callback), /* 28 - TSBWRITEERROR */ ADRENO_IRQ_CALLBACK(NULL), /* 29 - UNUSED */ ADRENO_IRQ_CALLBACK(NULL), /* 30 - ISDB_CPU_IRQ */ ADRENO_IRQ_CALLBACK(NULL), /* 31 - ISDB_UNDER_DEBUG */ @@ -1748,15 +1798,6 @@ static struct adreno_irq a6xx_irq = { .mask = A6XX_INT_MASK, }; -static struct adreno_snapshot_sizes a6xx_snap_sizes = { - .cp_pfp = 0x33, - .roq = 0x400, -}; - -static struct adreno_snapshot_data a6xx_snapshot_data = { - .sect_sizes = &a6xx_snap_sizes, -}; - static struct adreno_coresight_register a6xx_coresight_regs[] = { { A6XX_DBGC_CFG_DBGBUS_SEL_A }, { A6XX_DBGC_CFG_DBGBUS_SEL_B }, @@ -2619,7 +2660,10 @@ static int a6xx_enable_pwr_counters(struct adreno_device *adreno_dev, if (counter == 0) return -EINVAL; - if (!gmu_core_isenabled(device)) + /* We can use GPU without GMU and allow it to count GPU busy cycles */ + if (!gmu_core_isenabled(device) && + !kgsl_is_register_offset(device, + A6XX_GPU_GMU_AO_GPU_CX_BUSY_MASK)) return -ENODEV; kgsl_regwrite(device, A6XX_GPU_GMU_AO_GPU_CX_BUSY_MASK, 0xFF000000); @@ -2856,6 +2900,18 @@ static const struct adreno_reg_offsets a6xx_reg_offsets = { .offset_0 = ADRENO_REG_REGISTER_MAX, }; +static void a6xx_perfcounter_init(struct adreno_device *adreno_dev) +{ + /* + * A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_4/5 is not present on A608. + * Mark them as broken so that they can't be used. + */ + if (adreno_is_a608(adreno_dev)) { + a6xx_pwrcounters_gpmu[4].countable = KGSL_PERFCOUNTER_BROKEN; + a6xx_pwrcounters_gpmu[5].countable = KGSL_PERFCOUNTER_BROKEN; + } +} + static int a6xx_perfcounter_update(struct adreno_device *adreno_dev, struct adreno_perfcount_register *reg, bool update_reg) { @@ -2923,9 +2979,7 @@ struct adreno_gpudev adreno_a6xx_gpudev = { .reg_offsets = &a6xx_reg_offsets, .start = a6xx_start, .snapshot = a6xx_snapshot, - .snapshot_debugbus = a6xx_snapshot_debugbus, .irq = &a6xx_irq, - .snapshot_data = &a6xx_snapshot_data, .irq_trace = trace_kgsl_a5xx_irq_status, .num_prio_levels = KGSL_PRIORITY_MAX_RB_LEVELS, .platform_setup = a6xx_platform_setup, @@ -2935,6 +2989,7 @@ struct adreno_gpudev adreno_a6xx_gpudev = { .regulator_disable = a6xx_sptprac_disable, .perfcounters = &a6xx_perfcounters, .enable_pwr_counters = a6xx_enable_pwr_counters, + .read_throttling_counters = a6xx_read_throttling_counters, .count_throttles = a6xx_count_throttles, .microcode_read = a6xx_microcode_read, .enable_64bit = a6xx_enable_64bit, @@ -2955,6 +3010,7 @@ struct adreno_gpudev adreno_a6xx_gpudev = { .preemption_context_destroy = a6xx_preemption_context_destroy, .sptprac_is_on = a6xx_sptprac_is_on, .ccu_invalidate = a6xx_ccu_invalidate, + .perfcounter_init = a6xx_perfcounter_init, .perfcounter_update = a6xx_perfcounter_update, .coresight = {&a6xx_coresight, &a6xx_coresight_cx}, }; diff --git a/drivers/gpu/msm/adreno_a6xx.h b/drivers/gpu/msm/adreno_a6xx.h index e45b2cb1e2aa3127ce49e0ec7f62f107cf6a9acd..55e573edfa484c6bb5dd1f0446740ffd8fae7cb8 100644 --- a/drivers/gpu/msm/adreno_a6xx.h +++ b/drivers/gpu/msm/adreno_a6xx.h @@ -191,8 +191,6 @@ void a6xx_preemption_context_destroy(struct kgsl_context *context); void a6xx_snapshot(struct adreno_device *adreno_dev, struct kgsl_snapshot *snapshot); -void a6xx_snapshot_debugbus(struct adreno_device *adreno_dev, - struct kgsl_snapshot *snapshot); void a6xx_crashdump_init(struct adreno_device *adreno_dev); int a6xx_gmu_sptprac_enable(struct adreno_device *adreno_dev); void a6xx_gmu_sptprac_disable(struct adreno_device *adreno_dev); diff --git a/drivers/gpu/msm/adreno_a6xx_gmu.c b/drivers/gpu/msm/adreno_a6xx_gmu.c index 24a578d16d03441070d29600d78a3d9a396fe10d..42df65e0be1d198927127ad0ef2cf0848f01f2a7 100644 --- a/drivers/gpu/msm/adreno_a6xx_gmu.c +++ b/drivers/gpu/msm/adreno_a6xx_gmu.c @@ -505,8 +505,10 @@ static int load_gmu_fw(struct kgsl_device *device) gmu_core_regwrite(device, tcm_addr + j, fwptr[j]); } else { + uint32_t offset = blk->addr - (uint32_t)md->gmuaddr; + /* Copy the memory directly */ - memcpy(md->hostptr, fw, blk->size); + memcpy(md->hostptr + offset, fw, blk->size); } fw += blk->size; @@ -1259,6 +1261,37 @@ static uint32_t lm_limit(struct adreno_device *adreno_dev) return adreno_dev->lm_limit; } +static int a640_throttling_counters[ADRENO_GPMU_THROTTLE_COUNTERS] = { + 0x11, 0x15, 0x19 +}; + +static void _setup_throttling_counters(struct adreno_device *adreno_dev) +{ + struct kgsl_device *device = KGSL_DEVICE(adreno_dev); + struct gmu_device *gmu = KGSL_GMU_DEVICE(device); + int i, ret; + + for (i = 0; i < ARRAY_SIZE(a640_throttling_counters); i++) { + adreno_dev->busy_data.throttle_cycles[i] = 0; + + if (!a640_throttling_counters[i]) + continue; + if (adreno_dev->gpmu_throttle_counters[i]) + continue; + + ret = adreno_perfcounter_get(adreno_dev, + KGSL_PERFCOUNTER_GROUP_GPMU_PWR, + a640_throttling_counters[i], + &adreno_dev->gpmu_throttle_counters[i], + NULL, + PERFCOUNTER_FLAG_KERNEL); + if (ret) + dev_err_once(&gmu->pdev->dev, + "Unable to get counter for LM: GPMU_PWR %d\n", + a640_throttling_counters[i]); + } +} + #define LIMITS_CONFIG(t, s, c, i, a) ( \ (t & 0xF) | \ ((s & 0xF) << 4) | \ @@ -1278,6 +1311,12 @@ void a6xx_gmu_enable_lm(struct kgsl_device *device) !test_bit(ADRENO_LM_CTRL, &adreno_dev->pwrctrl_flag)) return; + /* a640 only needs to set up throttling counters for DCVS */ + if (adreno_is_a640(adreno_dev)) { + _setup_throttling_counters(adreno_dev); + return; + } + gmu_core_regwrite(device, A6XX_GPU_GMU_CX_GMU_PWR_THRESHOLD, GPU_LIMIT_THRESHOLD_ENABLE | lm_limit(adreno_dev)); gmu_core_regwrite(device, A6XX_GMU_AO_SPARE_CNTL, 1); @@ -1392,7 +1431,6 @@ static void a6xx_gmu_snapshot(struct adreno_device *adreno_dev, struct gmu_mem_type_desc desc[] = { {gmu->hfi_mem, SNAPSHOT_GMU_HFIMEM}, {gmu->gmu_log, SNAPSHOT_GMU_LOG}, - {gmu->bw_mem, SNAPSHOT_GMU_BWMEM}, {gmu->dump_mem, SNAPSHOT_GMU_DUMPMEM} }; unsigned int val, i; diff --git a/drivers/gpu/msm/adreno_a6xx_preempt.c b/drivers/gpu/msm/adreno_a6xx_preempt.c index bbfac62152970f1dfc0387b8fcbc70a90985777c..8daeb894f0936e83b6b9ed79567c56bcb43af779 100644 --- a/drivers/gpu/msm/adreno_a6xx_preempt.c +++ b/drivers/gpu/msm/adreno_a6xx_preempt.c @@ -122,6 +122,8 @@ static void _a6xx_preemption_done(struct adreno_device *adreno_dev) return; } + adreno_dev->preempt.count++; + del_timer_sync(&adreno_dev->preempt.timer); adreno_readreg(adreno_dev, ADRENO_REG_CP_PREEMPT_LEVEL_STATUS, &status); @@ -297,8 +299,6 @@ void a6xx_preemption_trigger(struct adreno_device *adreno_dev) kgsl_sharedmem_writel(device, &next->preemption_desc, PREEMPT_RECORD(wptr), next->wptr); - preempt->count++; - spin_unlock_irqrestore(&next->preempt_lock, flags); /* And write it to the smmu info */ @@ -323,7 +323,8 @@ void a6xx_preemption_trigger(struct adreno_device *adreno_dev) * free when the GPU is already powered on, whereas an OOB requires an * unconditional handshake with the GMU. */ - gmu_core_regrmw(device, A6XX_GMU_AO_SPARE_CNTL, 0x0, 0x2); + if (gmu_core_isenabled(device)) + gmu_core_regrmw(device, A6XX_GMU_AO_SPARE_CNTL, 0x0, 0x2); /* * Fenced writes on this path will make sure the GPU is woken up @@ -400,12 +401,15 @@ void a6xx_preemption_callback(struct adreno_device *adreno_dev, int bit) return; } + adreno_dev->preempt.count++; + /* * We can now safely clear the preemption keepalive bit, allowing * power collapse to resume its regular activity. */ - gmu_core_regrmw(KGSL_DEVICE(adreno_dev), A6XX_GMU_AO_SPARE_CNTL, 0x2, - 0x0); + if (gmu_core_isenabled(KGSL_DEVICE(adreno_dev))) + gmu_core_regrmw(KGSL_DEVICE(adreno_dev), + A6XX_GMU_AO_SPARE_CNTL, 0x2, 0x0); del_timer(&adreno_dev->preempt.timer); diff --git a/drivers/gpu/msm/adreno_a6xx_snapshot.c b/drivers/gpu/msm/adreno_a6xx_snapshot.c index 552e39307cf91a73b287d73cd5b88788a3ffc9d5..5dc4ecf380164fb30df9cf5e999b05d35df15db3 100644 --- a/drivers/gpu/msm/adreno_a6xx_snapshot.c +++ b/drivers/gpu/msm/adreno_a6xx_snapshot.c @@ -1285,7 +1285,7 @@ static size_t a6xx_snapshot_cx_dbgc_debugbus_block(struct kgsl_device *device, } /* a6xx_snapshot_debugbus() - Capture debug bus data */ -void a6xx_snapshot_debugbus(struct adreno_device *adreno_dev, +static void a6xx_snapshot_debugbus(struct adreno_device *adreno_dev, struct kgsl_snapshot *snapshot) { int i; @@ -1493,18 +1493,22 @@ void a6xx_snapshot(struct adreno_device *adreno_dev, struct kgsl_device *device = KGSL_DEVICE(adreno_dev); struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev); struct gmu_dev_ops *gmu_dev_ops = GMU_DEVICE_OPS(device); - struct adreno_snapshot_data *snap_data = gpudev->snapshot_data; bool sptprac_on, gx_on = true; unsigned int i, roq_size; - /* ROQ size is 0x800 DW on a640 and a680 */ - roq_size = adreno_is_a640(adreno_dev) || adreno_is_a680(adreno_dev) ? - (snap_data->sect_sizes->roq * 2) : snap_data->sect_sizes->roq; - /* GMU TCM data dumped through AHB */ if (GMU_DEV_OP_VALID(gmu_dev_ops, snapshot)) gmu_dev_ops->snapshot(adreno_dev, snapshot); + /* + * Dump debugbus data here to capture it for both + * GMU and GPU snapshot. Debugbus data can be accessed + * even if the gx headswitch or sptprac is off. If gx + * headswitch is off, data for gx blocks will show as + * 0x5c00bd00. + */ + a6xx_snapshot_debugbus(adreno_dev, snapshot); + sptprac_on = gpudev->sptprac_is_on(adreno_dev); if (GMU_DEV_OP_VALID(gmu_dev_ops, gx_is_on)) @@ -1539,8 +1543,7 @@ void a6xx_snapshot(struct adreno_device *adreno_dev, /* CP_SQE indexed registers */ kgsl_snapshot_indexed_registers(device, snapshot, - A6XX_CP_SQE_STAT_ADDR, A6XX_CP_SQE_STAT_DATA, - 0, snap_data->sect_sizes->cp_pfp); + A6XX_CP_SQE_STAT_ADDR, A6XX_CP_SQE_STAT_DATA, 0, 0x33); /* CP_DRAW_STATE */ kgsl_snapshot_indexed_registers(device, snapshot, @@ -1552,7 +1555,13 @@ void a6xx_snapshot(struct adreno_device *adreno_dev, A6XX_CP_SQE_UCODE_DBG_ADDR, A6XX_CP_SQE_UCODE_DBG_DATA, 0, 0x6000); - /* CP ROQ */ + /* + * CP ROQ dump units is 4dwords. The number of units is stored + * in CP_ROQ_THRESHOLDS_2[31:16]. Read the value and convert to + * dword units. + */ + kgsl_regread(device, A6XX_CP_ROQ_THRESHOLDS_2, &roq_size); + roq_size = roq_size >> 14; kgsl_snapshot_add_section(device, KGSL_SNAPSHOT_SECTION_DEBUG, snapshot, adreno_snapshot_cp_roq, &roq_size); diff --git a/drivers/gpu/msm/adreno_ringbuffer.c b/drivers/gpu/msm/adreno_ringbuffer.c index c63e8b51f569f493c1b77a73be09673538362fc0..f105b9b9d33f8f34b97dea4332d888d5988ba817 100644 --- a/drivers/gpu/msm/adreno_ringbuffer.c +++ b/drivers/gpu/msm/adreno_ringbuffer.c @@ -116,8 +116,11 @@ static void adreno_ringbuffer_wptr(struct adreno_device *adreno_dev, rb->wptr = rb->_wptr; spin_unlock_irqrestore(&rb->preempt_lock, flags); - if (ret) - kgsl_device_snapshot(KGSL_DEVICE(adreno_dev), NULL, false); + if (ret) { + /* If WPTR update fails, set the fault and trigger recovery */ + adreno_set_gpu_fault(adreno_dev, ADRENO_GMU_FAULT); + adreno_dispatcher_schedule(KGSL_DEVICE(adreno_dev)); + } } diff --git a/drivers/gpu/msm/adreno_snapshot.c b/drivers/gpu/msm/adreno_snapshot.c index b5ac3109c3e8e4fbda54c1b4020effb545850b25..f604e765929d05d8c9a7b37ea2c337343b684d88 100644 --- a/drivers/gpu/msm/adreno_snapshot.c +++ b/drivers/gpu/msm/adreno_snapshot.c @@ -947,29 +947,6 @@ void adreno_snapshot(struct kgsl_device *device, struct kgsl_snapshot *snapshot, } -/* adreno_snapshot_gmu - Snapshot the Adreno GMU state - * @device - KGSL device to snapshot - * @snapshot - Pointer to the snapshot instance - * This is a hook function called by kgsl_snapshot to snapshot the - * Adreno specific information for the GMU snapshot. In turn, this function - * calls the GMU specific snapshot function to get core specific information. - */ -void adreno_snapshot_gmu(struct kgsl_device *device, - struct kgsl_snapshot *snapshot) -{ - struct adreno_device *adreno_dev = ADRENO_DEVICE(device); - struct gmu_dev_ops *gmu_dev_ops = GMU_DEVICE_OPS(device); - struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev); - - /* Add GMU specific sections */ - if (GMU_DEV_OP_VALID(gmu_dev_ops, snapshot)) - gmu_dev_ops->snapshot(adreno_dev, snapshot); - - if (gpudev->snapshot_debugbus) - gpudev->snapshot_debugbus(adreno_dev, snapshot); - -} - /* * adreno_snapshot_cp_roq - Dump CP merciu data in snapshot * @device: Device being snapshotted diff --git a/drivers/gpu/msm/kgsl.c b/drivers/gpu/msm/kgsl.c index 2ef9b45c3e8d2a21ba57e4be8f61a71319f4fdce..56930dcc8f83a0030cebbc32d2799230a2866718 100644 --- a/drivers/gpu/msm/kgsl.c +++ b/drivers/gpu/msm/kgsl.c @@ -75,10 +75,23 @@ MODULE_PARM_DESC(kgsl_mmu_type, "Type of MMU to be used for graphics"); DEFINE_MUTEX(kgsl_mmu_sync); EXPORT_SYMBOL(kgsl_mmu_sync); +/* List of dmabufs mapped */ +static LIST_HEAD(kgsl_dmabuf_list); +static DEFINE_SPINLOCK(kgsl_dmabuf_lock); + +struct dmabuf_list_entry { + struct page *firstpage; + struct list_head node; + struct list_head dmabuf_list; +}; + struct kgsl_dma_buf_meta { + struct kgsl_mem_entry *entry; struct dma_buf_attachment *attach; struct dma_buf *dmabuf; struct sg_table *table; + struct dmabuf_list_entry *dle; + struct list_head node; }; static inline struct kgsl_pagetable *_get_memdesc_pagetable( @@ -269,10 +282,65 @@ kgsl_mem_entry_create(void) return entry; } + +static void add_dmabuf_list(struct kgsl_dma_buf_meta *meta) +{ + struct dmabuf_list_entry *dle; + struct page *page; + + /* + * Get the first page. We will use it to identify the imported + * buffer, since the same buffer can be mapped as different + * mem entries. + */ + page = sg_page(meta->table->sgl); + + spin_lock(&kgsl_dmabuf_lock); + + /* Go through the list to see if we imported this buffer before */ + list_for_each_entry(dle, &kgsl_dmabuf_list, node) { + if (dle->firstpage == page) { + /* Add the dmabuf meta to the list for this dle */ + meta->dle = dle; + list_add(&meta->node, &dle->dmabuf_list); + spin_unlock(&kgsl_dmabuf_lock); + return; + } + } + + /* This is a new buffer. Add a new entry for it */ + dle = kzalloc(sizeof(*dle), GFP_ATOMIC); + if (dle) { + dle->firstpage = page; + INIT_LIST_HEAD(&dle->dmabuf_list); + list_add(&dle->node, &kgsl_dmabuf_list); + meta->dle = dle; + list_add(&meta->node, &dle->dmabuf_list); + } + spin_unlock(&kgsl_dmabuf_lock); +} + +static void remove_dmabuf_list(struct kgsl_dma_buf_meta *meta) +{ + struct dmabuf_list_entry *dle = meta->dle; + + if (!dle) + return; + + spin_lock(&kgsl_dmabuf_lock); + list_del(&meta->node); + if (list_empty(&dle->dmabuf_list)) { + list_del(&dle->node); + kfree(dle); + } + spin_unlock(&kgsl_dmabuf_lock); +} + #ifdef CONFIG_DMA_SHARED_BUFFER static void kgsl_destroy_ion(struct kgsl_dma_buf_meta *meta) { if (meta != NULL) { + remove_dmabuf_list(meta); dma_buf_unmap_attachment(meta->attach, meta->table, DMA_FROM_DEVICE); dma_buf_detach(meta->dmabuf, meta->attach); @@ -539,7 +607,7 @@ int kgsl_context_init(struct kgsl_device_private *dev_priv, */ spin_lock(&proc_priv->ctxt_count_lock); if (atomic_read(&proc_priv->ctxt_count) > KGSL_MAX_CONTEXTS_PER_PROC) { - KGSL_DRV_ERR(device, + KGSL_DRV_ERR_RATELIMIT(device, "Per process context limit reached for pid %u", dev_priv->process_priv->pid); spin_unlock(&proc_priv->ctxt_count_lock); @@ -1415,6 +1483,45 @@ long kgsl_ioctl_device_getproperty(struct kgsl_device_private *dev_priv, kgsl_context_put(context); break; } + case KGSL_PROP_SECURE_BUFFER_ALIGNMENT: + { + unsigned int align; + + if (param->sizebytes != sizeof(unsigned int)) { + result = -EINVAL; + break; + } + /* + * XPUv2 impose the constraint of 1MB memory alignment, + * on the other hand Hypervisor does not have such + * constraints. So driver should fulfill such + * requirements when allocating secure memory. + */ + align = MMU_FEATURE(&dev_priv->device->mmu, + KGSL_MMU_HYP_SECURE_ALLOC) ? PAGE_SIZE : SZ_1M; + + if (copy_to_user(param->value, &align, sizeof(align))) + result = -EFAULT; + + break; + } + case KGSL_PROP_SECURE_CTXT_SUPPORT: + { + unsigned int secure_ctxt; + + if (param->sizebytes != sizeof(unsigned int)) { + result = -EINVAL; + break; + } + + secure_ctxt = dev_priv->device->mmu.secured ? 1 : 0; + + if (copy_to_user(param->value, &secure_ctxt, + sizeof(secure_ctxt))) + result = -EFAULT; + + break; + } default: if (is_compat_task()) result = dev_priv->device->ftbl->getproperty_compat( @@ -2574,6 +2681,7 @@ static int kgsl_setup_dma_buf(struct kgsl_device *device, meta->dmabuf = dmabuf; meta->attach = attach; + meta->entry = entry; entry->priv_data = meta; entry->memdesc.pagetable = pagetable; @@ -2610,6 +2718,12 @@ static int kgsl_setup_dma_buf(struct kgsl_device *device, entry->memdesc.size += (uint64_t) s->length; } + if (!entry->memdesc.size) { + ret = -EINVAL; + goto out; + } + + add_dmabuf_list(meta); entry->memdesc.size = PAGE_ALIGN(entry->memdesc.size); out: @@ -2617,7 +2731,6 @@ static int kgsl_setup_dma_buf(struct kgsl_device *device, if (!IS_ERR_OR_NULL(attach)) dma_buf_detach(dmabuf, attach); - kfree(meta); } @@ -2630,21 +2743,16 @@ void kgsl_get_egl_counts(struct kgsl_mem_entry *entry, int *egl_surface_count, int *egl_image_count) { struct kgsl_dma_buf_meta *meta = entry->priv_data; - struct dma_buf *dmabuf = meta->dmabuf; - struct dma_buf_attachment *mem_entry_buf_attachment = meta->attach; - struct device *buf_attachment_dev = mem_entry_buf_attachment->dev; - struct dma_buf_attachment *attachment = NULL; - - mutex_lock(&dmabuf->lock); - list_for_each_entry(attachment, &dmabuf->attachments, node) { - struct kgsl_mem_entry *scan_mem_entry = NULL; + struct dmabuf_list_entry *dle = meta->dle; + struct kgsl_dma_buf_meta *scan_meta; + struct kgsl_mem_entry *scan_mem_entry; - if (attachment->dev != buf_attachment_dev) - continue; + if (!dle) + return; - scan_mem_entry = attachment->priv; - if (!scan_mem_entry) - continue; + spin_lock(&kgsl_dmabuf_lock); + list_for_each_entry(scan_meta, &dle->dmabuf_list, node) { + scan_mem_entry = scan_meta->entry; switch (kgsl_memdesc_get_memtype(&scan_mem_entry->memdesc)) { case KGSL_MEMTYPE_EGL_SURFACE: @@ -2655,7 +2763,7 @@ void kgsl_get_egl_counts(struct kgsl_mem_entry *entry, break; } } - mutex_unlock(&dmabuf->lock); + spin_unlock(&kgsl_dmabuf_lock); } #else void kgsl_get_egl_counts(struct kgsl_mem_entry *entry, diff --git a/drivers/gpu/msm/kgsl_device.h b/drivers/gpu/msm/kgsl_device.h index c4264df712ae193dfbe3a58be2a34af3d6c64e02..f81328deaa2c19c37ab7c86ffca005a6f2e84af1 100644 --- a/drivers/gpu/msm/kgsl_device.h +++ b/drivers/gpu/msm/kgsl_device.h @@ -148,8 +148,6 @@ struct kgsl_functable { unsigned int (*gpuid)(struct kgsl_device *device, unsigned int *chipid); void (*snapshot)(struct kgsl_device *device, struct kgsl_snapshot *snapshot, struct kgsl_context *context); - void (*snapshot_gmu)(struct kgsl_device *device, - struct kgsl_snapshot *snapshot); irqreturn_t (*irq_handler)(struct kgsl_device *device); int (*drain)(struct kgsl_device *device); struct kgsl_device_private * (*device_private_create)(void); diff --git a/drivers/gpu/msm/kgsl_gmu.c b/drivers/gpu/msm/kgsl_gmu.c index 0ea075fb93ebd1097100d270c7d8cb8e3f6e0ca1..37b12a51ad0a5a252e60bdac7fcb01bc6def0cc7 100644 --- a/drivers/gpu/msm/kgsl_gmu.c +++ b/drivers/gpu/msm/kgsl_gmu.c @@ -359,7 +359,6 @@ static void gmu_kmem_close(struct gmu_device *gmu) struct gmu_iommu_context *ctx = &gmu_ctx[GMU_CONTEXT_KERNEL]; gmu->hfi_mem = NULL; - gmu->bw_mem = NULL; gmu->dump_mem = NULL; gmu->gmu_log = NULL; @@ -459,13 +458,6 @@ static int gmu_memory_probe(struct kgsl_device *device, goto err_ret; } - gmu->bw_mem = allocate_gmu_kmem(gmu, GMU_NONCACHED_KERNEL, BWMEM_SIZE, - IOMMU_READ); - if (IS_ERR(gmu->bw_mem)) { - ret = PTR_ERR(gmu->bw_mem); - goto err_ret; - } - /* Allocates & maps GMU crash dump memory */ if (adreno_is_a630(adreno_dev)) { gmu->dump_mem = allocate_gmu_kmem(gmu, GMU_NONCACHED_KERNEL, @@ -512,10 +504,6 @@ static int gmu_dcvs_set(struct kgsl_device *device, .bw = INVALID_DCVS_IDX, }; - /* Do not set to XO and lower GPU clock vote from GMU */ - if ((gpu_pwrlevel != INVALID_DCVS_IDX) && - (gpu_pwrlevel >= gmu->num_gpupwrlevels - 1)) - return -EINVAL; /* If GMU has not been started, save it */ if (!test_bit(GMU_HFI_ON, &device->gmu_core.flags)) { @@ -524,6 +512,11 @@ static int gmu_dcvs_set(struct kgsl_device *device, return 0; } + /* Do not set to XO and lower GPU clock vote from GMU */ + if ((gpu_pwrlevel != INVALID_DCVS_IDX) && + (gpu_pwrlevel >= gmu->num_gpupwrlevels - 1)) + return -EINVAL; + if (gpu_pwrlevel < gmu->num_gpupwrlevels - 1) req.freq = gmu->num_gpupwrlevels - gpu_pwrlevel - 1; @@ -794,33 +787,35 @@ static void build_rpmh_bw_votes(struct gmu_bw_votes *rpmh_vote, } } -/* TODO: Remove this and use the actual bus API */ -#define GET_IB_VAL(i) ((i) & 0x3FFF) -#define GET_AB_VAL(i) (((i) >> 14) & 0x3FFF) - -static void build_rpmh_bw_buf(struct gmu_device *gmu) +static void build_bwtable_cmd_cache(struct gmu_device *gmu) { - struct hfi_bwbuf *bwbuf = gmu->bw_mem->hostptr; + struct hfi_bwtable_cmd *cmd = &gmu->hfi.bwtbl_cmd; struct rpmh_votes_t *votes = &gmu->rpmh_votes; - unsigned int i, val; - - /* TODO: wait for IB/AB query API ready */ - - /* Build from DDR votes in case IB/AB query API fail */ - for (i = 0; i < gmu->num_bwlevels; i++) { - /* FIXME: wait for HPG to specify which node has IB/AB - * node 0 for now - */ - /* Get IB val */ - val = GET_IB_VAL(votes->ddr_votes.cmd_data[i][0]); - /* If IB val not set, use AB val */ - if (val == 0) - val = GET_AB_VAL(votes->ddr_votes.cmd_data[i][0]); - - /* Set only vote data */ - bwbuf->arr[i] &= 0xFFFF; - bwbuf->arr[i] |= (val << 16); - } + unsigned int i, j; + + cmd->hdr = 0xFFFFFFFF; + cmd->bw_level_num = gmu->num_bwlevels; + cmd->cnoc_cmds_num = votes->cnoc_votes.cmds_per_bw_vote; + cmd->cnoc_wait_bitmask = votes->cnoc_votes.cmds_wait_bitmask; + cmd->ddr_cmds_num = votes->ddr_votes.cmds_per_bw_vote; + cmd->ddr_wait_bitmask = votes->ddr_votes.cmds_wait_bitmask; + + for (i = 0; i < cmd->ddr_cmds_num; i++) + cmd->ddr_cmd_addrs[i] = votes->ddr_votes.cmd_addrs[i]; + + for (i = 0; i < cmd->bw_level_num; i++) + for (j = 0; j < cmd->ddr_cmds_num; j++) + cmd->ddr_cmd_data[i][j] = + votes->ddr_votes.cmd_data[i][j]; + + for (i = 0; i < cmd->cnoc_cmds_num; i++) + cmd->cnoc_cmd_addrs[i] = + votes->cnoc_votes.cmd_addrs[i]; + + for (i = 0; i < MAX_CNOC_LEVELS; i++) + for (j = 0; j < cmd->cnoc_cmds_num; j++) + cmd->cnoc_cmd_data[i][j] = + votes->cnoc_votes.cmd_data[i][j]; } /* @@ -860,7 +855,7 @@ static int gmu_bus_vote_init(struct gmu_device *gmu, struct kgsl_pwrctrl *pwr) build_rpmh_bw_votes(&votes->cnoc_votes, gmu->num_cnocbwlevels, hdl); - build_rpmh_bw_buf(gmu); + build_bwtable_cmd_cache(gmu); out: kfree(usecases); @@ -1056,10 +1051,6 @@ static int gmu_clocks_probe(struct gmu_device *gmu, struct device_node *node) static int gmu_gpu_bw_probe(struct kgsl_device *device, struct gmu_device *gmu) { struct msm_bus_scale_pdata *bus_scale_table; - struct msm_bus_paths *usecase; - struct msm_bus_vectors *vector; - struct hfi_bwbuf *bwbuf = gmu->bw_mem->hostptr; - int i; bus_scale_table = msm_bus_cl_get_pdata(device->pdev); if (bus_scale_table == NULL) { @@ -1074,25 +1065,6 @@ static int gmu_gpu_bw_probe(struct kgsl_device *device, struct gmu_device *gmu) return -ENODEV; } - /* 0-15: num levels; 16-31: arr offset in bytes */ - bwbuf->hdr[0] = (12 << 16) | (bus_scale_table->num_usecases & 0xFFFF); - /* 0-15: element size in bytes; 16-31: data size in bytes */ - bwbuf->hdr[1] = (2 << 16) | 4; - /* 0-15: bw val offset in bytes; 16-31: vote data offset in bytes */ - bwbuf->hdr[2] = (2 << 16) | 0; - - for (i = 0; i < bus_scale_table->num_usecases; i++) { - usecase = &bus_scale_table->usecase[i]; - vector = &usecase->vectors[0]; - /* Clear bw val */ - bwbuf->arr[i] &= 0xFFFF0000; - /* Set bw val if not first entry */ - if (i) - bwbuf->arr[i] |= - (DIV_ROUND_UP_ULL(vector->ib, 1048576) - & 0xFFFF); - } - return 0; } @@ -1335,9 +1307,10 @@ static int gmu_probe(struct kgsl_device *device, struct device_node *node) disable_irq(gmu->gmu_interrupt_num); disable_irq(hfi->hfi_interrupt_num); - tasklet_init(&hfi->tasklet, hfi_receiver, (unsigned long)device); + tasklet_init(&hfi->tasklet, hfi_receiver, (unsigned long) gmu); INIT_LIST_HEAD(&hfi->msglist); spin_lock_init(&hfi->msglock); + spin_lock_init(&hfi->read_queue_lock); hfi->kgsldev = device; /* Retrieves GMU/GPU power level configurations*/ @@ -1390,10 +1363,11 @@ static int gmu_probe(struct kgsl_device *device, struct device_node *node) "AOP mailbox init failed: %d\n", ret); } - /* disable LM during boot time */ - clear_bit(ADRENO_LM_CTRL, &adreno_dev->pwrctrl_flag); - set_bit(GMU_ENABLED, &device->gmu_core.flags); + /* disable LM if the feature is not enabled */ + if (!ADRENO_FEATURE(adreno_dev, ADRENO_LM)) + clear_bit(ADRENO_LM_CTRL, &adreno_dev->pwrctrl_flag); + set_bit(GMU_ENABLED, &device->gmu_core.flags); device->gmu_core.dev_ops = &adreno_a6xx_gmudev; return 0; diff --git a/drivers/gpu/msm/kgsl_gmu.h b/drivers/gpu/msm/kgsl_gmu.h index f6ca3d2c45d0391d868c5602f4571621520e46f0..8619b9327165aa58b287697d6e78be1f1ef35020 100644 --- a/drivers/gpu/msm/kgsl_gmu.h +++ b/drivers/gpu/msm/kgsl_gmu.h @@ -132,7 +132,6 @@ struct kgsl_mailbox { * @gmu_interrupt_num: GMU interrupt number * @fw_image: GMU FW image * @hfi_mem: pointer to HFI shared memory - * @bw_mem: pointer to BW data indirect buffer memory * @dump_mem: pointer to GMU debug dump memory * @gmu_log: gmu event log memory * @hfi: HFI controller @@ -169,7 +168,6 @@ struct gmu_device { unsigned int gmu_interrupt_num; const struct firmware *fw_image; struct gmu_memdesc *hfi_mem; - struct gmu_memdesc *bw_mem; struct gmu_memdesc *dump_mem; struct gmu_memdesc *gmu_log; struct kgsl_hfi hfi; diff --git a/drivers/gpu/msm/kgsl_hfi.c b/drivers/gpu/msm/kgsl_hfi.c index b1516aefdf0c3c3c6f0914755a9050887df0556b..6a469aa6cfdeb1b9577a272f8566ffa73031c1ba 100644 --- a/drivers/gpu/msm/kgsl_hfi.c +++ b/drivers/gpu/msm/kgsl_hfi.c @@ -45,10 +45,13 @@ (((minor) & 0x7FFFFF) << 5) | \ ((branch) & 0x1F)) +static void hfi_process_queue(struct gmu_device *gmu, uint32_t queue_idx); + /* Size in below functions are in unit of dwords */ static int hfi_queue_read(struct gmu_device *gmu, uint32_t queue_idx, unsigned int *output, unsigned int max_size) { + struct kgsl_hfi *hfi = &gmu->hfi; struct gmu_memdesc *mem_addr = gmu->hfi_mem; struct hfi_queue_table *tbl = mem_addr->hostptr; struct hfi_queue_header *hdr = &tbl->qhdr[queue_idx]; @@ -61,9 +64,12 @@ static int hfi_queue_read(struct gmu_device *gmu, uint32_t queue_idx, if (hdr->status == HFI_QUEUE_STATUS_DISABLED) return -EINVAL; + spin_lock_bh(&hfi->read_queue_lock); + if (hdr->read_index == hdr->write_index) { hdr->rx_req = 1; - return -ENODATA; + result = -ENODATA; + goto done; } /* Clear the output data before populating */ @@ -77,7 +83,8 @@ static int hfi_queue_read(struct gmu_device *gmu, uint32_t queue_idx, dev_err(&gmu->pdev->dev, "HFI message too big: hdr:0x%x rd idx=%d\n", msg_hdr, hdr->read_index); - return -EMSGSIZE; + result = -EMSGSIZE; + goto done; } read = hdr->read_index; @@ -101,6 +108,8 @@ static int hfi_queue_read(struct gmu_device *gmu, uint32_t queue_idx, hdr->read_index = read; +done: + spin_unlock_bh(&hfi->read_queue_lock); return result; } @@ -231,8 +240,7 @@ void hfi_init(struct kgsl_hfi *hfi, struct gmu_memdesc *mem_addr, #define HDR_CMP_SEQNUM(out_hdr, in_hdr) \ (MSG_HDR_GET_SEQNUM(out_hdr) == MSG_HDR_GET_SEQNUM(in_hdr)) -static void receive_ack_cmd(struct kgsl_device *device, - struct gmu_device *gmu, void *rcvd) +static void receive_ack_cmd(struct gmu_device *gmu, void *rcvd) { uint32_t *ack = rcvd; uint32_t hdr = ack[0]; @@ -266,8 +274,8 @@ static void receive_ack_cmd(struct kgsl_device *device, "HFI ACK: Waiters: 0x%8.8X\n", waiters[j]); } - adreno_set_gpu_fault(ADRENO_DEVICE(device), ADRENO_GMU_FAULT); - adreno_dispatcher_schedule(device); + adreno_set_gpu_fault(ADRENO_DEVICE(hfi->kgsldev), ADRENO_GMU_FAULT); + adreno_dispatcher_schedule(hfi->kgsldev); } #define MSG_HDR_SET_SEQNUM(hdr, num) \ @@ -300,15 +308,18 @@ static int hfi_send_cmd(struct gmu_device *gmu, uint32_t queue_idx, &ret_cmd->msg_complete, msecs_to_jiffies(HFI_RSP_TIMEOUT)); if (!rc) { - dev_err(&gmu->pdev->dev, - "Receiving GMU ack %d timed out\n", - MSG_HDR_GET_ID(*cmd)); - rc = -ETIMEDOUT; - goto done; - } - - /* If we got here we succeeded */ - rc = 0; + /* Check one more time to make sure there is no response */ + hfi_process_queue(gmu, HFI_MSG_IDX); + if (!completion_done(&ret_cmd->msg_complete)) { + dev_err(&gmu->pdev->dev, + "Timed out waiting on ack for 0x%8.8x (id %d, sequence %d)\n", + cmd[0], + MSG_HDR_GET_ID(*cmd), + MSG_HDR_GET_SEQNUM(*cmd)); + rc = -ETIMEDOUT; + } + } else + rc = 0; done: spin_lock_bh(&hfi->msglock); list_del(&ret_cmd->node); @@ -389,6 +400,7 @@ static int hfi_send_core_fw_start(struct gmu_device *gmu) static const char * const hfi_features[] = { [HFI_FEATURE_ECP] = "ECP", [HFI_FEATURE_ACD] = "ACD", + [HFI_FEATURE_LM] = "LM", }; static const char *feature_to_string(uint32_t feature) @@ -490,36 +502,11 @@ static int hfi_send_dcvstbl(struct gmu_device *gmu) static int hfi_send_bwtbl(struct gmu_device *gmu) { - struct hfi_bwtable_cmd cmd = { - .hdr = CMD_MSG_HDR(H2F_MSG_BW_VOTE_TBL, sizeof(cmd)), - .bw_level_num = gmu->num_bwlevels, - .cnoc_cmds_num = - gmu->rpmh_votes.cnoc_votes.cmds_per_bw_vote, - .cnoc_wait_bitmask = - gmu->rpmh_votes.cnoc_votes.cmds_wait_bitmask, - .ddr_cmds_num = gmu->rpmh_votes.ddr_votes.cmds_per_bw_vote, - .ddr_wait_bitmask = gmu->rpmh_votes.ddr_votes.cmds_wait_bitmask, - }; - int i, j; - - for (i = 0; i < cmd.ddr_cmds_num; i++) - cmd.ddr_cmd_addrs[i] = gmu->rpmh_votes.ddr_votes.cmd_addrs[i]; + struct hfi_bwtable_cmd *cmd = &gmu->hfi.bwtbl_cmd; - for (i = 0; i < cmd.bw_level_num; i++) - for (j = 0; j < cmd.ddr_cmds_num; j++) - cmd.ddr_cmd_data[i][j] = - gmu->rpmh_votes.ddr_votes.cmd_data[i][j]; + cmd->hdr = CMD_MSG_HDR(H2F_MSG_BW_VOTE_TBL, sizeof(*cmd)); - for (i = 0; i < cmd.cnoc_cmds_num; i++) - cmd.cnoc_cmd_addrs[i] = - gmu->rpmh_votes.cnoc_votes.cmd_addrs[i]; - - for (i = 0; i < MAX_CNOC_LEVELS; i++) - for (j = 0; j < cmd.cnoc_cmds_num; j++) - cmd.cnoc_cmd_data[i][j] = - gmu->rpmh_votes.cnoc_votes.cmd_data[i][j]; - - return hfi_send_generic_req(gmu, HFI_CMD_IDX, &cmd); + return hfi_send_generic_req(gmu, HFI_CMD_IDX, cmd); } static int hfi_send_test(struct gmu_device *gmu) @@ -549,12 +536,11 @@ static void receive_debug_req(struct gmu_device *gmu, void *rcvd) cmd->type, cmd->timestamp, cmd->data); } -static void hfi_v1_receiver(struct kgsl_device *device, - struct gmu_device *gmu, uint32_t *rcvd) +static void hfi_v1_receiver(struct gmu_device *gmu, uint32_t *rcvd) { /* V1 ACK Handler */ if (MSG_HDR_GET_TYPE(rcvd[0]) == HFI_V1_MSG_ACK) { - receive_ack_cmd(device, gmu, rcvd); + receive_ack_cmd(gmu, rcvd); return; } @@ -574,57 +560,47 @@ static void hfi_v1_receiver(struct kgsl_device *device, } } -void hfi_receiver(unsigned long data) +static void hfi_process_queue(struct gmu_device *gmu, uint32_t queue_idx) { - struct kgsl_device *device; - struct gmu_device *gmu; uint32_t rcvd[MAX_RCVD_SIZE]; - int read_queue[] = { - HFI_MSG_IDX, - HFI_DBG_IDX, - }; - int q; - if (!data) - return; + while (hfi_queue_read(gmu, queue_idx, rcvd, sizeof(rcvd)) > 0) { + /* Special case if we're v1 */ + if (HFI_VER_MAJOR(&gmu->hfi) < 2) { + hfi_v1_receiver(gmu, rcvd); + continue; + } + + /* V2 ACK Handler */ + if (MSG_HDR_GET_TYPE(rcvd[0]) == HFI_MSG_ACK) { + receive_ack_cmd(gmu, rcvd); + continue; + } - device = (struct kgsl_device *)data; - gmu = KGSL_GMU_DEVICE(device); - - /* While we are here, check all of the queues for messages */ - for (q = 0; q < ARRAY_SIZE(read_queue); q++) { - while (hfi_queue_read(gmu, read_queue[q], - rcvd, sizeof(rcvd)) > 0) { - /* Special case if we're v1 */ - if (HFI_VER_MAJOR(&gmu->hfi) < 2) { - hfi_v1_receiver(device, gmu, rcvd); - continue; - } - - /* V2 ACK Handler */ - if (MSG_HDR_GET_TYPE(rcvd[0]) == HFI_MSG_ACK) { - receive_ack_cmd(device, gmu, rcvd); - continue; - } - - /* V2 Request Handler */ - switch (MSG_HDR_GET_ID(rcvd[0])) { - case F2H_MSG_ERR: /* No Reply */ - receive_err_req(gmu, rcvd); - break; - case F2H_MSG_DEBUG: /* No Reply */ - receive_debug_req(gmu, rcvd); - break; - default: /* No Reply */ - dev_err(&gmu->pdev->dev, - "HFI request %d not supported\n", - MSG_HDR_GET_ID(rcvd[0])); - break; - } - }; + /* V2 Request Handler */ + switch (MSG_HDR_GET_ID(rcvd[0])) { + case F2H_MSG_ERR: /* No Reply */ + receive_err_req(gmu, rcvd); + break; + case F2H_MSG_DEBUG: /* No Reply */ + receive_debug_req(gmu, rcvd); + break; + default: /* No Reply */ + dev_err(&gmu->pdev->dev, + "HFI request %d not supported\n", + MSG_HDR_GET_ID(rcvd[0])); + break; + } } } +void hfi_receiver(unsigned long data) +{ + /* Process all read (firmware to host) queues */ + hfi_process_queue((struct gmu_device *) data, HFI_MSG_IDX); + hfi_process_queue((struct gmu_device *) data, HFI_DBG_IDX); +} + #define GMU_VER_MAJOR(ver) (((ver) >> 28) & 0xF) #define GMU_VER_MINOR(ver) (((ver) >> 16) & 0xFFF) #define GMU_VERSION(major, minor) \ @@ -672,6 +648,9 @@ static int hfi_verify_fw_version(struct kgsl_device *device, return 0; } +/* Levels greater than or equal to LM_DCVS_LEVEL are subject to throttling */ +#define LM_DCVS_LEVEL 4 + int hfi_start(struct kgsl_device *device, struct gmu_device *gmu, uint32_t boot_state) { @@ -733,6 +712,16 @@ int hfi_start(struct kgsl_device *device, if (result) return result; + if (test_bit(ADRENO_LM_CTRL, &adreno_dev->pwrctrl_flag)) { + /* We want all bits starting at LM_DCVS_LEVEL to be 1 */ + int lm_data = -1 << (LM_DCVS_LEVEL - 1); + + result = hfi_send_feature_ctrl(gmu, + HFI_FEATURE_LM, 1, lm_data); + if (result) + return result; + } + result = hfi_send_core_fw_start(gmu); if (result) return result; diff --git a/drivers/gpu/msm/kgsl_hfi.h b/drivers/gpu/msm/kgsl_hfi.h index 197e0d8aecb1619a57ef2881419bc89de5ce3c0c..0d6a3ef9641f76a2c55c69c5b3d57ee37e0fe8cb 100644 --- a/drivers/gpu/msm/kgsl_hfi.h +++ b/drivers/gpu/msm/kgsl_hfi.h @@ -247,11 +247,6 @@ struct hfi_bwtable_cmd { uint32_t ddr_cmd_data[MAX_GX_LEVELS][MAX_BW_CMDS]; }; -struct hfi_bwbuf { - uint32_t hdr[3]; - uint32_t arr[NUM_BW_LEVELS]; -}; - struct opp_gx_desc { uint32_t vote; uint32_t acd; @@ -610,6 +605,7 @@ struct pending_cmd { * @kgsldev: Point to the kgsl device * @hfi_interrupt_num: number of GMU asserted HFI interrupt * @msglock: spinlock to protect access to outstanding command message list + * @read_queue_lock: spinlock to protect against concurrent reading of queues * @cmdq_mutex: mutex to protect command queue access from multiple senders * @msglist: outstanding command message list. Each message in the list * is waiting for ACK from GMU @@ -617,16 +613,19 @@ struct pending_cmd { * @version: HFI version number provided * @seqnum: atomic counter that is incremented for each message sent. The * value of the counter is used as sequence number for HFI message + * @bwtbl_cmd: HFI BW table buffer */ struct kgsl_hfi { struct kgsl_device *kgsldev; int hfi_interrupt_num; spinlock_t msglock; + spinlock_t read_queue_lock; struct mutex cmdq_mutex; struct list_head msglist; struct tasklet_struct tasklet; uint32_t version; atomic_t seqnum; + struct hfi_bwtable_cmd bwtbl_cmd; }; struct gmu_device; diff --git a/drivers/gpu/msm/kgsl_iommu.c b/drivers/gpu/msm/kgsl_iommu.c index 5566d3fffefc53a9b12dc2f6f16721592ef1d011..c7126fb810113d875218ee2122e7114ab38896a1 100644 --- a/drivers/gpu/msm/kgsl_iommu.c +++ b/drivers/gpu/msm/kgsl_iommu.c @@ -838,7 +838,9 @@ static int kgsl_iommu_fault_handler(struct iommu_domain *domain, * search and delays the trace unnecessarily. */ trace_kgsl_mmu_pagefault(ctx->kgsldev, addr, - ptname, write ? "write" : "read"); + ptname, + context != NULL ? context->proc_priv->comm : "unknown", + write ? "write" : "read"); if (test_bit(KGSL_FT_PAGEFAULT_LOG_ONE_PER_PAGE, &adreno_dev->ft_pf_policy)) @@ -846,7 +848,9 @@ static int kgsl_iommu_fault_handler(struct iommu_domain *domain, if (!no_page_fault_log && __ratelimit(&_rs)) { KGSL_MEM_CRIT(ctx->kgsldev, - "GPU PAGE FAULT: addr = %lX pid= %d\n", addr, ptname); + "GPU PAGE FAULT: addr = %lX pid= %d name=%s\n", addr, + ptname, + context != NULL ? context->proc_priv->comm : "unknown"); KGSL_MEM_CRIT(ctx->kgsldev, "context=%s TTBR0=0x%llx CIDR=0x%x (%s %s fault)\n", ctx->name, ptbase, contextidr, diff --git a/drivers/gpu/msm/kgsl_pwrctrl.c b/drivers/gpu/msm/kgsl_pwrctrl.c index a36127f2bbd0933745ed3abca8fc55173d183ad0..18153e3ff558c41f40636f494c247105119d6f4b 100644 --- a/drivers/gpu/msm/kgsl_pwrctrl.c +++ b/drivers/gpu/msm/kgsl_pwrctrl.c @@ -428,6 +428,28 @@ void kgsl_pwrctrl_pwrlevel_change(struct kgsl_device *device, !test_bit(GMU_DCVS_REPLAY, &device->gmu_core.flags)) return; + if (pwr->gpu_cx_ipeak) { + unsigned int old_freq = pwr->pwrlevels[old_level].gpu_freq; + unsigned int new_freq = pwr->pwrlevels[new_level].gpu_freq; + unsigned int ipeak_freq = pwr->cx_ipeak_gpu_freq; + /* + * Set CX Ipeak vote for GPU if it tries to cross + * threshold frequency. + */ + if (old_freq < ipeak_freq && new_freq >= ipeak_freq) { + int ret = cx_ipeak_update(pwr->gpu_cx_ipeak, true); + /* + * Hardware damage is possible at peak current + * if mitigation not done to limit peak power. + */ + if (ret) { + KGSL_PWR_ERR(device, + "ipeak voting failed due to timeout %d\n", ret); + return; + } + } + } + kgsl_pwrscale_update_stats(device); /* @@ -492,6 +514,25 @@ void kgsl_pwrctrl_pwrlevel_change(struct kgsl_device *device, /* Timestamp the frequency change */ device->pwrscale.freq_change_time = ktime_to_ms(ktime_get()); + + if (pwr->gpu_cx_ipeak) { + unsigned int old_freq = pwr->pwrlevels[old_level].gpu_freq; + unsigned int new_freq = pwr->pwrlevels[new_level].gpu_freq; + unsigned int ipeak_freq = pwr->cx_ipeak_gpu_freq; + /* + * Reset CX Ipeak vote for GPU if it goes below + * threshold frequency. + */ + if (old_freq >= ipeak_freq && new_freq < ipeak_freq) { + int ret = cx_ipeak_update(pwr->gpu_cx_ipeak, false); + + /* Failed to withdraw the voting from ipeak driver */ + if (ret) + KGSL_PWR_ERR(device, + "Failed to withdraw votes from ipeak %d\n", + ret); + } + } } EXPORT_SYMBOL(kgsl_pwrctrl_pwrlevel_change); @@ -2414,8 +2455,43 @@ int kgsl_pwrctrl_init(struct kgsl_device *device) of_property_read_string(pdev->dev.of_node, "qcom,tzone-name", &pwr->tzone_name); + /* + * Cx ipeak client support, default value of Cx Ipeak GPU freq + * is used if defined in GPU list and it is overridden by + * new frequency value if defined in dt. + */ + if (of_find_property(pdev->dev.of_node, "qcom,gpu-cx-ipeak", NULL)) { + if (!of_property_read_u32(pdev->dev.of_node, + "qcom,gpu-cx-ipeak-freq", &pwr->cx_ipeak_gpu_freq) + || pwr->cx_ipeak_gpu_freq) { + pwr->gpu_cx_ipeak = cx_ipeak_register(pdev->dev.of_node, + "qcom,gpu-cx-ipeak"); + } else { + KGSL_PWR_ERR(device, + "failed to get GPU-CX-Ipeak Frequency\n"); + result = -EINVAL; + goto error_cleanup_pwr_limit; + } + + if (IS_ERR(pwr->gpu_cx_ipeak)) { + result = PTR_ERR(pwr->gpu_cx_ipeak); + KGSL_PWR_ERR(device, + "Failed to register client with CX Ipeak %d\n", + result); + goto error_cleanup_pwr_limit; + } + } return result; +error_cleanup_pwr_limit: + pwr->power_flags = 0; + + if (!IS_ERR_OR_NULL(pwr->sysfs_pwr_limit)) { + list_del(&pwr->sysfs_pwr_limit->node); + kfree(pwr->sysfs_pwr_limit); + pwr->sysfs_pwr_limit = NULL; + } + kfree(pwr->bus_ib); error_cleanup_pcl: _close_pcl(pwr); error_cleanup_ocmem_pcl: @@ -2435,6 +2511,8 @@ void kgsl_pwrctrl_close(struct kgsl_device *device) KGSL_PWR_INFO(device, "close device %d\n", device->id); + cx_ipeak_unregister(pwr->gpu_cx_ipeak); + pwr->power_flags = 0; if (!IS_ERR_OR_NULL(pwr->sysfs_pwr_limit)) { diff --git a/drivers/gpu/msm/kgsl_pwrctrl.h b/drivers/gpu/msm/kgsl_pwrctrl.h index 8f700028351abc50d8299d661b237c9ed2109c1e..0a78aba0657eab083b57848552ed28666589262c 100644 --- a/drivers/gpu/msm/kgsl_pwrctrl.h +++ b/drivers/gpu/msm/kgsl_pwrctrl.h @@ -14,6 +14,7 @@ #define __KGSL_PWRCTRL_H #include +#include /***************************************************************************** * power flags @@ -172,6 +173,8 @@ struct kgsl_regulator { * isense_clk_indx - index of isense clock, 0 if no isense * isense_clk_on_level - isense clock rate is XO rate below this level. * tzone_name - pointer to thermal zone name of GPU temperature sensor + * gpu_cx_ipeak - pointer to CX Ipeak client used by GPU + * cx_ipeak_gpu_freq - Value of GPU CX Ipeak frequency */ struct kgsl_pwrctrl { @@ -229,6 +232,8 @@ struct kgsl_pwrctrl { unsigned int gpu_bimc_int_clk_freq; bool gpu_bimc_interface_enabled; const char *tzone_name; + struct cx_ipeak_client *gpu_cx_ipeak; + unsigned int cx_ipeak_gpu_freq; }; int kgsl_pwrctrl_init(struct kgsl_device *device); diff --git a/drivers/gpu/msm/kgsl_snapshot.c b/drivers/gpu/msm/kgsl_snapshot.c index 0ed17d859080ad16f9f9e54038b8d51149dc4155..13769f8ec2d281ea7a7073a21e76d9d0aab098c5 100644 --- a/drivers/gpu/msm/kgsl_snapshot.c +++ b/drivers/gpu/msm/kgsl_snapshot.c @@ -660,10 +660,12 @@ void kgsl_device_snapshot(struct kgsl_device *device, device->snapshot_faultcount++; /* - * Overwrite a non-GMU fault snapshot if a GMU fault occurs. + * Overwrite a fault snapshot only if GMU is + * enabled and we managed to recover from it. */ if (device->snapshot != NULL) { - if (!device->prioritize_unrecoverable || + if (!gmu_core_gpmu_isenabled(device) || + !device->prioritize_unrecoverable || !device->snapshot->recovered) return; @@ -704,22 +706,20 @@ void kgsl_device_snapshot(struct kgsl_device *device, snapshot->size += sizeof(*header); /* Build the Linux specific header */ - /* We either want to only dump GMU, or we want to dump GPU and GMU */ - if (gmu_fault) { - /* Dump only the GMU */ + if (gmu_fault) kgsl_snapshot_add_section(device, KGSL_SNAPSHOT_SECTION_OS, - snapshot, snapshot_os_no_ctxt, NULL); - - if (device->ftbl->snapshot_gmu) - device->ftbl->snapshot_gmu(device, snapshot); - } else { - /* Dump GPU and GMU */ + snapshot, snapshot_os_no_ctxt, NULL); + else kgsl_snapshot_add_section(device, KGSL_SNAPSHOT_SECTION_OS, - snapshot, snapshot_os, NULL); + snapshot, snapshot_os, NULL); - if (device->ftbl->snapshot) - device->ftbl->snapshot(device, snapshot, context); - } + /* + * Trigger both GPU and GMU snapshot. GPU specific code + * will take care of whether to dumps full state or only + * GMU state based on current GPU power state. + */ + if (device->ftbl->snapshot) + device->ftbl->snapshot(device, snapshot, context); /* * The timestamp is the seconds since boot so it is easier to match to diff --git a/drivers/gpu/msm/kgsl_trace.h b/drivers/gpu/msm/kgsl_trace.h index c7690a14abcda4778d470eb5db5ecbefd3220c5b..d301949b67658e7220342e9904caa82d706fbb66 100644 --- a/drivers/gpu/msm/kgsl_trace.h +++ b/drivers/gpu/msm/kgsl_trace.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2011-2017, The Linux Foundation. All rights reserved. +/* Copyright (c) 2011-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -821,14 +821,15 @@ TRACE_EVENT(kgsl_constraint, TRACE_EVENT(kgsl_mmu_pagefault, TP_PROTO(struct kgsl_device *device, unsigned long page, - unsigned int pt, const char *op), + unsigned int pt, const char *name, const char *op), - TP_ARGS(device, page, pt, op), + TP_ARGS(device, page, pt, name, op), TP_STRUCT__entry( __string(device_name, device->name) __field(unsigned long, page) __field(unsigned int, pt) + __string(name, name) __string(op, op) ), @@ -836,13 +837,14 @@ TRACE_EVENT(kgsl_mmu_pagefault, __assign_str(device_name, device->name); __entry->page = page; __entry->pt = pt; + __assign_str(name, name); __assign_str(op, op); ), TP_printk( - "d_name=%s page=0x%lx pt=%u op=%s", + "d_name=%s page=0x%lx pt=%u op=%s name=%s", __get_str(device_name), __entry->page, __entry->pt, - __get_str(op) + __get_str(op), __get_str(name) ) ); @@ -1198,7 +1200,7 @@ TRACE_EVENT(kgsl_clock_throttling, int crc_50pct, int crc_more50pct, int crc_less50pct, - int adj + int64_t adj ), TP_ARGS( idle_10pct, @@ -1212,7 +1214,7 @@ TRACE_EVENT(kgsl_clock_throttling, __field(int, crc_50pct) __field(int, crc_more50pct) __field(int, crc_less50pct) - __field(int, adj) + __field(int64_t, adj) ), TP_fast_assign( __entry->idle_10pct = idle_10pct; diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig index eca4c9d97110c2e5a1de2eed8457fd44133515ba..725a325ffd651b9dca13a0c7c3ac93067d7ac2cc 100644 --- a/drivers/hid/Kconfig +++ b/drivers/hid/Kconfig @@ -1028,6 +1028,13 @@ config HID_ALPS Say Y here if you have a Alps touchpads over i2c-hid or usbhid and want support for its special functionalities. +config HID_QVR + tristate "QVR support" + depends on HID + ---help--- + Say 'Y' or 'M' if you want to connect an external device to + stream sensor data for QVR support. + endmenu endif # HID diff --git a/drivers/hid/Makefile b/drivers/hid/Makefile index 235bd2a7b333a4da9650ea4dc6d094318c90f7b2..376d52ac913e0abed45d057a0b1669b9a9f1add6 100644 --- a/drivers/hid/Makefile +++ b/drivers/hid/Makefile @@ -122,3 +122,5 @@ obj-$(CONFIG_USB_KBD) += usbhid/ obj-$(CONFIG_I2C_HID) += i2c-hid/ obj-$(CONFIG_INTEL_ISH_HID) += intel-ish-hid/ + +obj-$(CONFIG_HID_QVR) += hid-qvr.o diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c index 672b0be41d44218040607f0086be2392a708d61b..a36b7132cdf7cb2b88d0ed06406bd28b35b92588 100644 --- a/drivers/hid/hid-core.c +++ b/drivers/hid/hid-core.c @@ -2247,6 +2247,9 @@ static const struct hid_device_id hid_have_special_driver[] = { #if IS_ENABLED(CONFIG_HID_PRODIKEYS) { HID_USB_DEVICE(USB_VENDOR_ID_CREATIVELABS, USB_DEVICE_ID_PRODIKEYS_PCMIDI) }, #endif +#if IS_ENABLED(CONFIG_HID_QVR) + { HID_USB_DEVICE(USB_VENDOR_ID_QVR5, USB_DEVICE_ID_QVR5) }, +#endif #if IS_ENABLED(CONFIG_HID_RETRODE) { HID_USB_DEVICE(USB_VENDOR_ID_FUTURE_TECHNOLOGY, USB_DEVICE_ID_RETRODE2) }, #endif diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h index 9e478f03e8456f18ee2df447602ebf7bc42c2ea6..461a5f1b220cb122ee8a0e28f391a5949dccb91a 100644 --- a/drivers/hid/hid-ids.h +++ b/drivers/hid/hid-ids.h @@ -1172,4 +1172,7 @@ #define USB_VENDOR_ID_UGTIZER 0x2179 #define USB_DEVICE_ID_UGTIZER_TABLET_GP0610 0x0053 +#define USB_VENDOR_ID_QVR5 0x045e +#define USB_DEVICE_ID_QVR5 0x0659 + #endif diff --git a/drivers/hid/hid-plantronics.c b/drivers/hid/hid-plantronics.c index febb21ee190e99134ae995887ceeeb86721e1440..584b10d3fc3d84d026c56e4ae4b74fb7b2525d69 100644 --- a/drivers/hid/hid-plantronics.c +++ b/drivers/hid/hid-plantronics.c @@ -2,7 +2,7 @@ * Plantronics USB HID Driver * * Copyright (c) 2014 JD Cole - * Copyright (c) 2015 Terry Junge + * Copyright (c) 2015-2018 Terry Junge */ /* @@ -48,6 +48,10 @@ static int plantronics_input_mapping(struct hid_device *hdev, unsigned short mapped_key; unsigned long plt_type = (unsigned long)hid_get_drvdata(hdev); + /* special case for PTT products */ + if (field->application == HID_GD_JOYSTICK) + goto defaulted; + /* handle volume up/down mapping */ /* non-standard types or multi-HID interfaces - plt_type is PID */ if (!(plt_type & HID_USAGE_PAGE)) { diff --git a/drivers/hid/hid-qvr.c b/drivers/hid/hid-qvr.c new file mode 100644 index 0000000000000000000000000000000000000000..7e1eac068d5120a066491ed9715d88964be0ba4b --- /dev/null +++ b/drivers/hid/hid-qvr.c @@ -0,0 +1,392 @@ +/* + * Copyright (c) 2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "hid-ids.h" +#include "hid-qvr.h" + +static struct dma_buf *qvr_buf; +static void *vaddr; +static size_t vsize; +static uint64_t ts_base; +static uint64_t ts_offset; + +struct gpio_info { + unsigned int smem_bit; + struct qcom_smem_state *smem_state; +}; + + +static struct device *qvr_device; +static struct gpio_info gpio_info_out; + +static struct hid_driver qvr_external_sensor_driver; +static int fd; + +const static int msg_size = 368; +const static int hid_request_report_id = 2; +const static int hid_request_report_size = 64; + +struct qvr_buf_index { + int most_recent_index; + uint8_t padding[60]; +}; + +struct qvr_sensor_t { + uint64_t gts; + uint64_t ats; + uint64_t mts; + s32 gx; + s32 gy; + s32 gz; + s32 ax; + s32 ay; + s32 az; + s32 mx; + s32 my; + s32 mz; + uint8_t padding[4]; +}; + + +int qvr_send_package_wrap(u8 *message, int msize, struct hid_device *hid) +{ + struct qvr_sensor_t *sensor_buf; + struct qvr_sensor_t *data; + static int buf_index; + struct external_imu_format imuData = { 0 }; + struct qvr_buf_index *index_buf; + + /* + * Actual message size is 369 bytes + * to make it 8 byte aligned we created a structure of size 368 bytes. + * Ignoring the first byte 'report id' (which is always 1) + * + */ + memcpy((void *)&imuData, (void *)message + 1, msg_size); + + if (!ts_base) + ts_base = ktime_to_ns(ktime_get_boottime()); + if (!ts_offset) + ts_offset = imuData.gts0; + index_buf = (struct qvr_buf_index *) + ((uintptr_t)vaddr + (vsize / 2) + (8 * sizeof(*sensor_buf))); + sensor_buf = (struct qvr_sensor_t *)((uintptr_t)vaddr + (vsize / 2)); + + data = (struct qvr_sensor_t *)&(sensor_buf[buf_index]); + if (ts_offset > imuData.gts0) + data->ats = ts_base + ((ts_offset - imuData.gts0) * 100); + else + data->ats = ts_base + ((imuData.gts0 - ts_offset) * 100); + data->gts = data->ats; + data->mts = data->ats; + data->ax = -imuData.ax0; + data->ay = imuData.ay0; + data->az = -imuData.az0; + data->gx = -imuData.gx0; + data->gy = imuData.gy0; + data->gz = -imuData.gz0; + data->mx = -imuData.mx0; + data->my = imuData.my0; + data->mz = -imuData.mz0; + + index_buf->most_recent_index = buf_index; + buf_index = (buf_index == (8 - 1)) ? 0 : buf_index + 1; + return 0; +} + +static int register_smp2p(struct device *dev, char *node_name, + struct gpio_info *gpio_info_ptr) +{ + struct device_node *node = dev->of_node; + + if (!gpio_info_ptr) + return -EINVAL; + if (node == NULL) { + pr_debug("%s: device node NULL\n", __func__); + dev->of_node = of_find_compatible_node(NULL, NULL, node_name); + node = dev->of_node; + } + if (!of_find_property(node, "qcom,smem-states", NULL)) + return -EINVAL; + gpio_info_ptr->smem_state = qcom_smem_state_get(dev, + "qvrexternal-smp2p-out", + &gpio_info_ptr->smem_bit); + pr_debug("%s: state: %pK, bit: %d\n", __func__, + gpio_info_ptr->smem_state, + gpio_info_ptr->smem_bit); + if (IS_ERR_OR_NULL(gpio_info_ptr->smem_state)) { + pr_debug("%s: Error smem_state\n", __func__); + return PTR_ERR(gpio_info_ptr->smem_state); + } + + return 0; + +} +static int kernel_map_gyro_buffer(int fd) +{ + int ret = 0; + + qvr_buf = dma_buf_get(fd); + if (IS_ERR_OR_NULL(qvr_buf)) { + ret = -ENOMEM; + pr_err("dma_buf_get failed for fd: %d\n", fd); + goto done; + } + ret = dma_buf_begin_cpu_access(qvr_buf, DMA_BIDIRECTIONAL); + if (ret) { + pr_err("%s: dma_buf_begin_cpu_access failed\n", __func__); + goto err_dma; + } + vsize = qvr_buf->size; + vaddr = dma_buf_kmap(qvr_buf, 0); + if (IS_ERR_OR_NULL(vaddr)) { + ret = -ENOMEM; + pr_err("dma_buf_kmap failed for fd: %d\n", fd); + goto err_end_access; + } + + return 0; + +err_end_access: + dma_buf_end_cpu_access(qvr_buf, DMA_BIDIRECTIONAL); +err_dma: + dma_buf_put(qvr_buf); + qvr_buf = NULL; +done: + return ret; + +} + + +static void kernel_unmap_gyro_buffer(void) +{ + if (IS_ERR_OR_NULL(vaddr)) + return; + dma_buf_kunmap(qvr_buf, 0, vaddr); + dma_buf_end_cpu_access(qvr_buf, DMA_BIDIRECTIONAL); + vaddr = NULL; + dma_buf_put(qvr_buf); + qvr_buf = NULL; +} + +static ssize_t fd_show(struct kobject *kobj, + struct kobj_attribute *attr, + char *buf) +{ + return snprintf(buf, sizeof(buf), "%d\n", fd); +} + +static ssize_t fd_store(struct kobject *kobj, + struct kobj_attribute *attr, + const char *buf, size_t count) +{ + int ret; + + ret = kstrtoint(buf, 10, &fd); + if (ret < 0) + return ret; + if (fd == -1) + kernel_unmap_gyro_buffer(); + else + kernel_map_gyro_buffer(fd); + ts_base = 0; + ts_offset = 0; + + return count; +} + +static ssize_t ts_base_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +{ + return snprintf(buf, 16, "%lld\n", ts_base); +} + +static ssize_t ts_base_store(struct kobject *kobj, + struct kobj_attribute *attr, + const char *buf, size_t count) +{ + return 0; +} + +static ssize_t ts_offset_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +{ + return snprintf(buf, 16, "%lld\n", ts_offset * 100); +} + +static ssize_t ts_offset_store(struct kobject *kobj, + struct kobj_attribute *attr, + const char *buf, size_t count) +{ + return 0; +} + +static struct kobj_attribute fd_attribute = __ATTR(fd, 0664, + fd_show, + fd_store); +static struct kobj_attribute ts_base_attribute = __ATTR(ts_base, 0664, + ts_base_show, + ts_base_store); +static struct kobj_attribute ts_offset_attribute = __ATTR(ts_offset, 0664, + ts_offset_show, + ts_offset_store); + +static struct attribute *attrs[] = { + &fd_attribute.attr, + &ts_base_attribute.attr, + &ts_offset_attribute.attr, + NULL, +}; + +static struct attribute_group attr_group = { + .attrs = attrs, +}; + +static struct kobject *qvr_external_sensor_kobj; + +static int qvr_external_sensor_probe(struct hid_device *hdev, + const struct hid_device_id *id) +{ + int ret; + char *node_name = "qcom,smp2p-interrupt-qvrexternal-5-out"; + __u8 *hid_buf; + + ret = register_smp2p(&hdev->dev, node_name, &gpio_info_out); + if (ret) { + pr_err("%s: register_smp2p failed", __func__); + goto err_free; + } + ret = hid_open_report(hdev); + if (ret) { + pr_err("%s: hid_open_report failed", __func__); + goto err_free; + } + ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT); + if (ret) { + pr_err("%s: hid_hw_start failed", __func__); + goto err_free; + } + if (hdev->vendor == USB_VENDOR_ID_QVR5) { + hid_buf = kzalloc(255, GFP_ATOMIC); + if (hid_buf == NULL) + return -ENOMEM; + hid_buf[0] = hid_request_report_id; + hid_buf[1] = 7; + ret = hid_hw_raw_request(hdev, hid_buf[0], hid_buf, + hid_request_report_size, + HID_FEATURE_REPORT, + HID_REQ_SET_REPORT); + kfree(hid_buf); + } + + qvr_device = &hdev->dev; + + return 0; + +err_free: + return ret; + +} + +static int qvr_external_sensor_raw_event(struct hid_device *hid, + struct hid_report *report, + u8 *data, int size) +{ + static int val; + int ret = -1; + + if ((hid->vendor == USB_VENDOR_ID_QVR5) && (vaddr != NULL)) { + ret = qvr_send_package_wrap(data/*hid_value*/, size, hid); + if (ret == 0) { + val = 1 ^ val; + qcom_smem_state_update_bits(gpio_info_out.smem_state, + BIT(gpio_info_out.smem_bit), val); + ret = -1; + } + } + return ret; +} + +static void qvr_external_sensor_device_remove(struct hid_device *hdev) +{ + hid_hw_stop(hdev); +} + +static struct hid_device_id qvr_external_sensor_table[] = { + { HID_USB_DEVICE(USB_VENDOR_ID_QVR5, USB_DEVICE_ID_QVR5) }, + { } +}; +MODULE_DEVICE_TABLE(hid, qvr_external_sensor_table); + +static struct hid_driver qvr_external_sensor_driver = { + .name = "qvr_external_sensor", + .id_table = qvr_external_sensor_table, + .probe = qvr_external_sensor_probe, + .raw_event = qvr_external_sensor_raw_event, + .remove = qvr_external_sensor_device_remove, +}; + +module_hid_driver(qvr_external_sensor_driver); + +static int __init qvr_external_sensor_init(void) +{ + int ret = 0; + + qvr_external_sensor_kobj = + kobject_create_and_add("qvr_external_sensor", kernel_kobj); + if (!qvr_external_sensor_kobj) { + pr_err("%s: kobject_create_and_add() fail\n", __func__); + return -ENOMEM; + } + ret = sysfs_create_group(qvr_external_sensor_kobj, &attr_group); + if (ret) { + pr_err("%s: can't register sysfs\n", __func__); + return -ENOMEM; + } + + return ret; +} + +static void __exit qvr_external_sensor_exit(void) +{ + kobject_put(qvr_external_sensor_kobj); +} + +module_init(qvr_external_sensor_init); +module_exit(qvr_external_sensor_exit); +MODULE_LICENSE("GPL v2"); + diff --git a/drivers/hid/hid-qvr.h b/drivers/hid/hid-qvr.h new file mode 100644 index 0000000000000000000000000000000000000000..c1a3a93e9b0458f6e5a27bfc88f388f653b1efe7 --- /dev/null +++ b/drivers/hid/hid-qvr.h @@ -0,0 +1,167 @@ +/* + * Copyright (c) 2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef HID_QVR_H_FILE +#define HID_QVR_H_FILE + +#define USB_VENDOR_ID_QVR5 0x045e +#define USB_DEVICE_ID_QVR5 0x0659 +#define QVR_EXTERNAL_SENSOR_REPORT_ID 0x1 + +struct external_imu_format { + s16 temp0; + s16 temp1; + s16 temp2; + s16 temp3; + u64 gts0; + u64 gts1; + u64 gts2; + u64 gts3; + s16 gx0; + s16 gx1; + s16 gx2; + s16 gx3; + s16 gx4; + s16 gx5; + s16 gx6; + s16 gx7; + s16 gx8; + s16 gx9; + s16 gx10; + s16 gx11; + s16 gx12; + s16 gx13; + s16 gx14; + s16 gx15; + s16 gx16; + s16 gx17; + s16 gx18; + s16 gx19; + s16 gx20; + s16 gx21; + s16 gx22; + s16 gx23; + s16 gx24; + s16 gx25; + s16 gx26; + s16 gx27; + s16 gx28; + s16 gx29; + s16 gx30; + s16 gx31; + s16 gy0; + s16 gy1; + s16 gy2; + s16 gy3; + s16 gy4; + s16 gy5; + s16 gy6; + s16 gy7; + s16 gy8; + s16 gy9; + s16 gy10; + s16 gy11; + s16 gy12; + s16 gy13; + s16 gy14; + s16 gy15; + s16 gy16; + s16 gy17; + s16 gy18; + s16 gy19; + s16 gy20; + s16 gy21; + s16 gy22; + s16 gy23; + s16 gy24; + s16 gy25; + s16 gy26; + s16 gy27; + s16 gy28; + s16 gy29; + s16 gy30; + s16 gy31; + s16 gz0; + s16 gz1; + s16 gz2; + s16 gz3; + s16 gz4; + s16 gz5; + s16 gz6; + s16 gz7; + s16 gz8; + s16 gz9; + s16 gz10; + s16 gz11; + s16 gz12; + s16 gz13; + s16 gz14; + s16 gz15; + s16 gz16; + s16 gz17; + s16 gz18; + s16 gz19; + s16 gz20; + s16 gz21; + s16 gz22; + s16 gz23; + s16 gz24; + s16 gz25; + s16 gz26; + s16 gz27; + s16 gz28; + s16 gz29; + s16 gz30; + s16 gz31; + u64 ats0; + u64 ats1; + u64 ats2; + u64 ats3; + s32 ax0; + s32 ax1; + s32 ax2; + s32 ax3; + s32 ay0; + s32 ay1; + s32 ay2; + s32 ay3; + s32 az0; + s32 az1; + s32 az2; + s32 az3; + u64 mts0; + u64 mts1; + u64 mts2; + u64 mts3; + s16 mx0; + s16 mx1; + s16 mx2; + s16 mx3; + s16 my0; + s16 my1; + s16 my2; + s16 my3; + s16 mz0; + s16 mz1; + s16 mz2; + s16 mz3; //368 bytes +}; + +int qvr_send_package_wrap(u8 *message, int msize, struct hid_device *hid); +void qvr_clear_def_parmeter(void); +void qvr_init(struct hid_device *hdev); +int qvr_input_init(void); +void qvr_input_remove(void); + +#endif diff --git a/drivers/hid/i2c-hid/i2c-hid.c b/drivers/hid/i2c-hid/i2c-hid.c index d9282755638934331acf79f0fafe960b1122757a..136a34dc31b8e23fbea60fdda7968c81e65adfab 100644 --- a/drivers/hid/i2c-hid/i2c-hid.c +++ b/drivers/hid/i2c-hid/i2c-hid.c @@ -1036,6 +1036,14 @@ static int i2c_hid_probe(struct i2c_client *client, pm_runtime_enable(&client->dev); device_enable_async_suspend(&client->dev); + /* Make sure there is something at this address */ + ret = i2c_smbus_read_byte(client); + if (ret < 0) { + dev_dbg(&client->dev, "nothing at this address: %d\n", ret); + ret = -ENXIO; + goto err_pm; + } + ret = i2c_hid_fetch_hid_descriptor(ihid); if (ret < 0) goto err_pm; diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c index f489a5cfcb48cf5216a532091482ac669786e61d..90af938be5159ce57889004975a99d0623540b97 100644 --- a/drivers/hid/usbhid/hid-quirks.c +++ b/drivers/hid/usbhid/hid-quirks.c @@ -170,6 +170,7 @@ static const struct hid_blacklist { { USB_VENDOR_ID_DRACAL_RAPHNET, USB_DEVICE_ID_RAPHNET_2NES2SNES, HID_QUIRK_MULTI_INPUT }, { USB_VENDOR_ID_DRACAL_RAPHNET, USB_DEVICE_ID_RAPHNET_4NES4SNES, HID_QUIRK_MULTI_INPUT }, { USB_VENDOR_ID_INNOMEDIA, USB_DEVICE_ID_INNEX_GENESIS_ATARI, HID_QUIRK_MULTI_INPUT }, + { USB_VENDOR_ID_QVR5, USB_DEVICE_ID_QVR5, HID_QUIRK_HIDINPUT_FORCE }, { 0, 0 } }; diff --git a/drivers/hwtracing/coresight/coresight-ost.c b/drivers/hwtracing/coresight/coresight-ost.c index 943e5c4ea855dd09065a1e967d2d2c3a7c0e954f..80cf70e0d73a6b2dd65132164ac486fe2d097108 100644 --- a/drivers/hwtracing/coresight/coresight-ost.c +++ b/drivers/hwtracing/coresight/coresight-ost.c @@ -176,6 +176,9 @@ static inline int __stm_trace(uint32_t flags, uint8_t entity_id, uint32_t ch; void __iomem *ch_addr; + if (!(drvdata && drvdata->master_enable)) + return 0; + /* allocate channel and get the channel address */ ch = stm_channel_alloc(); if (unlikely(ch >= drvdata->numsp)) { @@ -226,9 +229,9 @@ int stm_trace(uint32_t flags, uint8_t entity_id, uint8_t proto_id, struct stm_drvdata *drvdata = stmdrvdata; /* we don't support sizes more than 24bits (0 to 23) */ - if (!(drvdata && drvdata->enable && drvdata->master_enable && - test_bit(entity_id, drvdata->entities) && size && - (size < 0x1000000))) + if (!(drvdata && drvdata->enable && + test_bit(entity_id, drvdata->entities) && + size && (size < 0x1000000))) return 0; return __stm_trace(flags, entity_id, proto_id, data, size); diff --git a/drivers/hwtracing/coresight/coresight-stm.c b/drivers/hwtracing/coresight/coresight-stm.c index 9b0525c272380037338c7d4bb40597928e469170..2b537932f3857d1d88eb7f29db7d87a8f0fcdf92 100644 --- a/drivers/hwtracing/coresight/coresight-stm.c +++ b/drivers/hwtracing/coresight/coresight-stm.c @@ -367,6 +367,9 @@ static ssize_t notrace stm_generic_packet(struct stm_data *stm_data, if (!(drvdata && local_read(&drvdata->mode))) return -EACCES; + if (!drvdata->master_enable) + return -EPERM; + if (channel >= drvdata->numsp) return -EINVAL; diff --git a/drivers/hwtracing/coresight/coresight-tmc-etr.c b/drivers/hwtracing/coresight/coresight-tmc-etr.c index 42e263347e4f4bd62a61bdbbaa4d5ce7fef8de0c..1820e094280e6aa554dbbf21375c72ba2f0f1f7f 100644 --- a/drivers/hwtracing/coresight/coresight-tmc-etr.c +++ b/drivers/hwtracing/coresight/coresight-tmc-etr.c @@ -937,7 +937,6 @@ static int tmc_enable_etr_sink_sysfs(struct coresight_device *csdev) tmc_etr_enable_hw(drvdata); drvdata->enable = true; - drvdata->sticky_enable = true; out: spin_unlock_irqrestore(&drvdata->spinlock, flags); @@ -1018,6 +1017,7 @@ static void tmc_disable_etr_sink(struct coresight_device *csdev) spin_unlock_irqrestore(&drvdata->spinlock, flags); tmc_etr_bam_disable(drvdata); usb_qdss_close(drvdata->usbch); + drvdata->mode = CS_MODE_DISABLED; goto out; } else { tmc_etr_disable_hw(drvdata); diff --git a/drivers/hwtracing/coresight/coresight-tmc.c b/drivers/hwtracing/coresight/coresight-tmc.c index 8ed08a9c129f7ae498cc2a09f1e325ce7be6dc71..8fef3c073b4fe0c817372319a7ff662c45380603 100644 --- a/drivers/hwtracing/coresight/coresight-tmc.c +++ b/drivers/hwtracing/coresight/coresight-tmc.c @@ -141,7 +141,6 @@ static void __tmc_reg_dump(struct tmc_drvdata *drvdata) void tmc_enable_hw(struct tmc_drvdata *drvdata) { drvdata->enable = true; - drvdata->sticky_enable = true; writel_relaxed(TMC_CTL_CAPT_EN, drvdata->base + TMC_CTL); if (drvdata->force_reg_dump) __tmc_reg_dump(drvdata); @@ -157,7 +156,7 @@ static int tmc_read_prepare(struct tmc_drvdata *drvdata) { int ret = 0; - if (!drvdata->sticky_enable) + if (!drvdata->enable) return -EPERM; switch (drvdata->config_type) { @@ -590,8 +589,8 @@ static ssize_t block_size_store(struct device *dev, if (!drvdata->byte_cntr) return -EINVAL; - if (val && val < 16) { - pr_err("Assign minimum block size of 16 bytes\n"); + if (val && val < 4096) { + pr_err("Assign minimum block size of 4096 bytes\n"); return -EINVAL; } diff --git a/drivers/hwtracing/coresight/coresight.c b/drivers/hwtracing/coresight/coresight.c index ef79c9efd8409adc432af692922a154c1cc81f8e..7bb1ed6936423b7653f5d22ac508445da5c863bd 100644 --- a/drivers/hwtracing/coresight/coresight.c +++ b/drivers/hwtracing/coresight/coresight.c @@ -206,8 +206,10 @@ static int coresight_enable_link(struct coresight_device *csdev, if (atomic_inc_return(&csdev->refcnt[refport]) == 1) { if (link_ops(csdev)->enable) { ret = link_ops(csdev)->enable(csdev, inport, outport); - if (ret) + if (ret) { + atomic_dec(&csdev->refcnt[refport]); return ret; + } } } @@ -296,42 +298,66 @@ static bool coresight_disable_source(struct coresight_device *csdev) return !csdev->enable; } -void coresight_disable_path(struct list_head *path) +static void coresigh_disable_list_node(struct list_head *path, + struct coresight_node *nd) { u32 type; - struct coresight_node *nd; struct coresight_device *csdev, *parent, *child; - list_for_each_entry(nd, path, link) { - csdev = nd->csdev; - type = csdev->type; + csdev = nd->csdev; + type = csdev->type; - /* - * ETF devices are tricky... They can be a link or a sink, - * depending on how they are configured. If an ETF has been - * "activated" it will be configured as a sink, otherwise - * go ahead with the link configuration. - */ - if (type == CORESIGHT_DEV_TYPE_LINKSINK) - type = (csdev == coresight_get_sink(path)) ? - CORESIGHT_DEV_TYPE_SINK : - CORESIGHT_DEV_TYPE_LINK; + /* + * ETF devices are tricky... They can be a link or a sink, + * depending on how they are configured. If an ETF has been + * "activated" it will be configured as a sink, otherwise + * go ahead with the link configuration. + */ + if (type == CORESIGHT_DEV_TYPE_LINKSINK) + type = (csdev == coresight_get_sink(path)) ? + CORESIGHT_DEV_TYPE_SINK : + CORESIGHT_DEV_TYPE_LINK; + + switch (type) { + case CORESIGHT_DEV_TYPE_SINK: + coresight_disable_sink(csdev); + break; + case CORESIGHT_DEV_TYPE_SOURCE: + /* sources are disabled from either sysFS or Perf */ + break; + case CORESIGHT_DEV_TYPE_LINK: + parent = list_prev_entry(nd, link)->csdev; + child = list_next_entry(nd, link)->csdev; + coresight_disable_link(csdev, parent, child); + break; + default: + break; + } +} - switch (type) { - case CORESIGHT_DEV_TYPE_SINK: - coresight_disable_sink(csdev); - break; - case CORESIGHT_DEV_TYPE_SOURCE: - /* sources are disabled from either sysFS or Perf */ - break; - case CORESIGHT_DEV_TYPE_LINK: - parent = list_prev_entry(nd, link)->csdev; - child = list_next_entry(nd, link)->csdev; - coresight_disable_link(csdev, parent, child); - break; - default: - break; - } +/** + * During enabling path, if it is failed, then only those enabled + * devices need to be disabled. This function is to disable devices + * which is enabled before the failed device. + * + * @path the head of the list + * @nd the failed device node + */ +static void coresight_disable_previous_devs(struct list_head *path, + struct coresight_node *nd) +{ + + list_for_each_entry_continue(nd, path, link) { + coresigh_disable_list_node(path, nd); + } +} + +void coresight_disable_path(struct list_head *path) +{ + struct coresight_node *nd; + + list_for_each_entry(nd, path, link) { + coresigh_disable_list_node(path, nd); } } @@ -382,7 +408,7 @@ int coresight_enable_path(struct list_head *path, u32 mode) out: return ret; err: - coresight_disable_path(path); + coresight_disable_previous_devs(path, nd); goto out; } @@ -786,6 +812,7 @@ static ssize_t enable_source_store(struct device *dev, if (ret) return ret; } else { + atomic_set(csdev->refcnt, 1); coresight_disable(csdev); } @@ -1032,6 +1059,7 @@ static ssize_t reset_source_sink_store(struct bus_type *bus, csdev = coresight_get_source(cspath->path); if (!csdev) continue; + atomic_set(csdev->refcnt, 1); __coresight_disable(csdev); } diff --git a/drivers/hwtracing/stm/core.c b/drivers/hwtracing/stm/core.c index 24be5a8454acfdf45583ca8a95510cd948617c0c..f17e56b52c757a03a1d3a8808a73f4995bb6265d 100644 --- a/drivers/hwtracing/stm/core.c +++ b/drivers/hwtracing/stm/core.c @@ -227,8 +227,8 @@ stm_output_disclaim(struct stm_device *stm, struct stm_output *output) bitmap_release_region(&master->chan_map[0], output->channel, ilog2(output->nr_chans)); - output->nr_chans = 0; master->nr_free += output->nr_chans; + output->nr_chans = 0; } /* diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c index f96830ffd9f1c1456965810fad723ab365a7f263..75c6b98585ba2e545ac0b3239e237796b68881b7 100644 --- a/drivers/i2c/busses/i2c-imx.c +++ b/drivers/i2c/busses/i2c-imx.c @@ -376,6 +376,7 @@ static int i2c_imx_dma_xfer(struct imx_i2c_struct *i2c_imx, goto err_desc; } + reinit_completion(&dma->cmd_complete); txdesc->callback = i2c_imx_dma_callback; txdesc->callback_param = i2c_imx; if (dma_submit_error(dmaengine_submit(txdesc))) { @@ -619,7 +620,6 @@ static int i2c_imx_dma_write(struct imx_i2c_struct *i2c_imx, * The first byte must be transmitted by the CPU. */ imx_i2c_write_reg(msgs->addr << 1, i2c_imx, IMX_I2C_I2DR); - reinit_completion(&i2c_imx->dma->cmd_complete); time_left = wait_for_completion_timeout( &i2c_imx->dma->cmd_complete, msecs_to_jiffies(DMA_TIMEOUT)); @@ -678,7 +678,6 @@ static int i2c_imx_dma_read(struct imx_i2c_struct *i2c_imx, if (result) return result; - reinit_completion(&i2c_imx->dma->cmd_complete); time_left = wait_for_completion_timeout( &i2c_imx->dma->cmd_complete, msecs_to_jiffies(DMA_TIMEOUT)); diff --git a/drivers/i2c/i2c-core-base.c b/drivers/i2c/i2c-core-base.c index 56e46581b84bdb03eeb07ddaa8d83cec1aa76341..6f2fe63e8f5aa80ade4daa76bdfe0b48516b2256 100644 --- a/drivers/i2c/i2c-core-base.c +++ b/drivers/i2c/i2c-core-base.c @@ -808,8 +808,11 @@ EXPORT_SYMBOL_GPL(i2c_new_device); */ void i2c_unregister_device(struct i2c_client *client) { - if (client->dev.of_node) + if (client->dev.of_node) { of_node_clear_flag(client->dev.of_node, OF_POPULATED); + of_node_put(client->dev.of_node); + } + if (ACPI_COMPANION(&client->dev)) acpi_device_clear_enumerated(ACPI_COMPANION(&client->dev)); device_unregister(&client->dev); diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c index f0b06b14e782b5b926b5ba7876d827551ce4cfa9..16249b0953fff61e14f0c786decb4de6927a5e74 100644 --- a/drivers/idle/intel_idle.c +++ b/drivers/idle/intel_idle.c @@ -1061,7 +1061,7 @@ static const struct idle_cpu idle_cpu_dnv = { }; #define ICPU(model, cpu) \ - { X86_VENDOR_INTEL, 6, model, X86_FEATURE_MWAIT, (unsigned long)&cpu } + { X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, (unsigned long)&cpu } static const struct x86_cpu_id intel_idle_ids[] __initconst = { ICPU(INTEL_FAM6_NEHALEM_EP, idle_cpu_nehalem), @@ -1125,6 +1125,11 @@ static int __init intel_idle_probe(void) return -ENODEV; } + if (!boot_cpu_has(X86_FEATURE_MWAIT)) { + pr_debug("Please enable MWAIT in BIOS SETUP\n"); + return -ENODEV; + } + if (boot_cpu_data.cpuid_level < CPUID_MWAIT_LEAF) return -ENODEV; diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c index d8efdc191c27f0cda1a5458d523b402811c87805..55252079faf65812ca5663a98be193543636150f 100644 --- a/drivers/infiniband/core/mad.c +++ b/drivers/infiniband/core/mad.c @@ -1558,7 +1558,8 @@ static int add_oui_reg_req(struct ib_mad_reg_req *mad_reg_req, mad_reg_req->oui, 3)) { method = &(*vendor_table)->vendor_class[ vclass]->method_table[i]; - BUG_ON(!*method); + if (!*method) + goto error3; goto check_in_use; } } @@ -1568,10 +1569,12 @@ static int add_oui_reg_req(struct ib_mad_reg_req *mad_reg_req, vclass]->oui[i])) { method = &(*vendor_table)->vendor_class[ vclass]->method_table[i]; - BUG_ON(*method); /* Allocate method table for this OUI */ - if ((ret = allocate_method_table(method))) - goto error3; + if (!*method) { + ret = allocate_method_table(method); + if (ret) + goto error3; + } memcpy((*vendor_table)->vendor_class[vclass]->oui[i], mad_reg_req->oui, 3); goto check_in_use; diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c index e47baf0950e3dbe2039b5e0cf9d79e585839af53..a22b992cde38c2bdf586bcea56b2edb351b8bf89 100644 --- a/drivers/infiniband/core/ucma.c +++ b/drivers/infiniband/core/ucma.c @@ -218,7 +218,7 @@ static struct ucma_multicast* ucma_alloc_multicast(struct ucma_context *ctx) return NULL; mutex_lock(&mut); - mc->id = idr_alloc(&multicast_idr, mc, 0, 0, GFP_KERNEL); + mc->id = idr_alloc(&multicast_idr, NULL, 0, 0, GFP_KERNEL); mutex_unlock(&mut); if (mc->id < 0) goto error; @@ -1404,6 +1404,10 @@ static ssize_t ucma_process_join(struct ucma_file *file, goto err3; } + mutex_lock(&mut); + idr_replace(&multicast_idr, mc, mc->id); + mutex_unlock(&mut); + mutex_unlock(&file->mut); ucma_put_ctx(ctx); return 0; diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c index 186dce6bba8f3a212ccd87acb4b2c0f43ed37e4b..f836ed1dd300eb53a1e025bd29e293685482bea4 100644 --- a/drivers/infiniband/core/uverbs_cmd.c +++ b/drivers/infiniband/core/uverbs_cmd.c @@ -1981,15 +1981,64 @@ static int modify_qp(struct ib_uverbs_file *file, goto release_qp; } - if ((cmd->base.attr_mask & IB_QP_AV) && - !rdma_is_port_valid(qp->device, cmd->base.dest.port_num)) { - ret = -EINVAL; - goto release_qp; + if ((cmd->base.attr_mask & IB_QP_AV)) { + if (!rdma_is_port_valid(qp->device, cmd->base.dest.port_num)) { + ret = -EINVAL; + goto release_qp; + } + + if (cmd->base.attr_mask & IB_QP_STATE && + cmd->base.qp_state == IB_QPS_RTR) { + /* We are in INIT->RTR TRANSITION (if we are not, + * this transition will be rejected in subsequent checks). + * In the INIT->RTR transition, we cannot have IB_QP_PORT set, + * but the IB_QP_STATE flag is required. + * + * Since kernel 3.14 (commit dbf727de7440), the uverbs driver, + * when IB_QP_AV is set, has required inclusion of a valid + * port number in the primary AV. (AVs are created and handled + * differently for infiniband and ethernet (RoCE) ports). + * + * Check the port number included in the primary AV against + * the port number in the qp struct, which was set (and saved) + * in the RST->INIT transition. + */ + if (cmd->base.dest.port_num != qp->real_qp->port) { + ret = -EINVAL; + goto release_qp; + } + } else { + /* We are in SQD->SQD. (If we are not, this transition will + * be rejected later in the verbs layer checks). + * Check for both IB_QP_PORT and IB_QP_AV, these can be set + * together in the SQD->SQD transition. + * + * If only IP_QP_AV was set, add in IB_QP_PORT as well (the + * verbs layer driver does not track primary port changes + * resulting from path migration. Thus, in SQD, if the primary + * AV is modified, the primary port should also be modified). + * + * Note that in this transition, the IB_QP_STATE flag + * is not allowed. + */ + if (((cmd->base.attr_mask & (IB_QP_AV | IB_QP_PORT)) + == (IB_QP_AV | IB_QP_PORT)) && + cmd->base.port_num != cmd->base.dest.port_num) { + ret = -EINVAL; + goto release_qp; + } + if ((cmd->base.attr_mask & (IB_QP_AV | IB_QP_PORT)) + == IB_QP_AV) { + cmd->base.attr_mask |= IB_QP_PORT; + cmd->base.port_num = cmd->base.dest.port_num; + } + } } if ((cmd->base.attr_mask & IB_QP_ALT_PATH) && (!rdma_is_port_valid(qp->device, cmd->base.alt_port_num) || - !rdma_is_port_valid(qp->device, cmd->base.alt_dest.port_num))) { + !rdma_is_port_valid(qp->device, cmd->base.alt_dest.port_num) || + cmd->base.alt_port_num != cmd->base.alt_dest.port_num)) { ret = -EINVAL; goto release_qp; } @@ -3376,6 +3425,11 @@ int ib_uverbs_ex_create_flow(struct ib_uverbs_file *file, goto err_uobj; } + if (qp->qp_type != IB_QPT_UD && qp->qp_type != IB_QPT_RAW_PACKET) { + err = -EINVAL; + goto err_put; + } + flow_attr = kzalloc(sizeof(*flow_attr) + cmd.flow_attr.num_of_specs * sizeof(union ib_flow_spec), GFP_KERNEL); if (!flow_attr) { diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c index 9032f77cc38d259e08f8ccaff34bd7808d7ff785..feb80dbb59487124778ced1734a53bda1ee448cb 100644 --- a/drivers/infiniband/core/verbs.c +++ b/drivers/infiniband/core/verbs.c @@ -2115,10 +2115,16 @@ static void __ib_drain_sq(struct ib_qp *qp) struct ib_cq *cq = qp->send_cq; struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR }; struct ib_drain_cqe sdrain; - struct ib_send_wr swr = {}, *bad_swr; + struct ib_send_wr *bad_swr; + struct ib_rdma_wr swr = { + .wr = { + .next = NULL, + { .wr_cqe = &sdrain.cqe, }, + .opcode = IB_WR_RDMA_WRITE, + }, + }; int ret; - swr.wr_cqe = &sdrain.cqe; sdrain.cqe.done = ib_drain_qp_done; init_completion(&sdrain.done); @@ -2128,7 +2134,7 @@ static void __ib_drain_sq(struct ib_qp *qp) return; } - ret = ib_post_send(qp, &swr, &bad_swr); + ret = ib_post_send(qp, &swr.wr, &bad_swr); if (ret) { WARN_ONCE(ret, "failed to drain send queue: %d\n", ret); return; diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c index 7b1d7e58671ed5674ec49a13d2e050e3c3116b24..2e7982042fe59e94efbb8870ed2bc414a3748374 100644 --- a/drivers/infiniband/ulp/srpt/ib_srpt.c +++ b/drivers/infiniband/ulp/srpt/ib_srpt.c @@ -787,13 +787,17 @@ static int srpt_post_recv(struct srpt_device *sdev, */ static int srpt_zerolength_write(struct srpt_rdma_ch *ch) { - struct ib_send_wr wr, *bad_wr; + struct ib_send_wr *bad_wr; + struct ib_rdma_wr wr = { + .wr = { + .next = NULL, + { .wr_cqe = &ch->zw_cqe, }, + .opcode = IB_WR_RDMA_WRITE, + .send_flags = IB_SEND_SIGNALED, + } + }; - memset(&wr, 0, sizeof(wr)); - wr.opcode = IB_WR_RDMA_WRITE; - wr.wr_cqe = &ch->zw_cqe; - wr.send_flags = IB_SEND_SIGNALED; - return ib_post_send(ch->qp, &wr, &bad_wr); + return ib_post_send(ch->qp, &wr.wr, &bad_wr); } static void srpt_zerolength_write_done(struct ib_cq *cq, struct ib_wc *wc) diff --git a/drivers/input/misc/qti-haptics.c b/drivers/input/misc/qti-haptics.c index f60e835af89cc4d797a99f2b524a69d3b6220a20..78ea30de6689a3c33045cd504fe5f175b94b413c 100644 --- a/drivers/input/misc/qti-haptics.c +++ b/drivers/input/misc/qti-haptics.c @@ -10,8 +10,10 @@ * GNU General Public License for more details. */ +#include #include #include +#include #include #include #include @@ -24,8 +26,9 @@ #include #include #include -#include +#include #include +#include enum actutor_type { ACT_LRA, @@ -70,6 +73,7 @@ enum haptics_custom_effect_param { #define HAP_SC_DET_MAX_COUNT 5 #define HAP_SC_DET_TIME_US 1000000 #define FF_EFFECT_COUNT_MAX 32 +#define HAP_DISABLE_DELAY_USEC 1000 /* haptics module register definitions */ #define REG_HAP_STATUS1 0x0A @@ -216,6 +220,8 @@ struct qti_hap_chip { struct qti_hap_effect constant; struct regulator *vdd_supply; struct hrtimer stop_timer; + struct hrtimer hap_disable_timer; + struct dentry *hap_debugfs; spinlock_t bus_lock; ktime_t last_sc_time; int play_irq; @@ -524,9 +530,24 @@ static int qti_haptics_config_play_rate_us(struct qti_hap_chip *chip, return rc; } -static int qti_haptics_config_brake(struct qti_hap_chip *chip, u8 *brake) +static int qti_haptics_brake_enable(struct qti_hap_chip *chip, bool en) { u8 addr, mask, val; + int rc; + + addr = REG_HAP_EN_CTL2; + mask = HAP_BRAKE_EN_BIT; + val = en ? HAP_BRAKE_EN_BIT : 0; + rc = qti_haptics_masked_write(chip, addr, mask, val); + if (rc < 0) + dev_err(chip->dev, "write BRAKE_EN failed, rc=%d\n", rc); + + return rc; +} + +static int qti_haptics_config_brake(struct qti_hap_chip *chip, u8 *brake) +{ + u8 addr, val; int i, rc; addr = REG_HAP_BRAKE; @@ -543,11 +564,7 @@ static int qti_haptics_config_brake(struct qti_hap_chip *chip, u8 *brake) * Set BRAKE_EN regardless of the brake pattern, this helps to stop * playing immediately once the valid values in WF_Sx are played. */ - addr = REG_HAP_EN_CTL2; - val = mask = HAP_BRAKE_EN_BIT; - rc = qti_haptics_masked_write(chip, addr, mask, val); - if (rc < 0) - dev_err(chip->dev, "set EN_CTL2 failed, rc=%d\n", rc); + rc = qti_haptics_brake_enable(chip, true); return rc; } @@ -567,16 +584,46 @@ static int qti_haptics_lra_auto_res_enable(struct qti_hap_chip *chip, bool en) return rc; } +#define HAP_CLEAR_PLAYING_RATE_US 15 +static int qti_haptics_clear_settings(struct qti_hap_chip *chip) +{ + int rc; + u8 pattern[HAP_WAVEFORM_BUFFER_MAX] = {1, 0, 0, 0, 0, 0, 0, 0}; + + rc = qti_haptics_brake_enable(chip, false); + if (rc < 0) + return rc; + + rc = qti_haptics_lra_auto_res_enable(chip, false); + if (rc < 0) + return rc; + + rc = qti_haptics_config_play_rate_us(chip, HAP_CLEAR_PLAYING_RATE_US); + if (rc < 0) + return rc; + + rc = qti_haptics_write(chip, REG_HAP_WF_S1, pattern, + HAP_WAVEFORM_BUFFER_MAX); + if (rc < 0) + return rc; + + rc = qti_haptics_play(chip, true); + if (rc < 0) + return rc; + + rc = qti_haptics_play(chip, false); + if (rc < 0) + return rc; + + return 0; +} + static int qti_haptics_load_constant_waveform(struct qti_hap_chip *chip) { struct qti_hap_play_info *play = &chip->play; struct qti_hap_config *config = &chip->config; int rc = 0; - rc = qti_haptics_module_en(chip, true); - if (rc < 0) - return rc; - rc = qti_haptics_config_play_rate_us(chip, config->play_rate_us); if (rc < 0) return rc; @@ -604,6 +651,7 @@ static int qti_haptics_load_constant_waveform(struct qti_hap_chip *chip) return rc; play->playing_pattern = false; + play->effect = NULL; } else { rc = qti_haptics_config_vmax(chip, config->vmax_mv); if (rc < 0) @@ -644,11 +692,6 @@ static int qti_haptics_load_predefined_effect(struct qti_hap_chip *chip, play->effect = &chip->predefined[effect_idx]; play->playing_pos = 0; - - rc = qti_haptics_module_en(chip, true); - if (rc < 0) - return rc; - rc = qti_haptics_config_vmax(chip, play->vmax_mv); if (rc < 0) return rc; @@ -790,15 +833,15 @@ static int qti_haptics_upload_effect(struct input_dev *dev, struct qti_hap_play_info *play = &chip->play; int rc = 0, tmp, i; s16 level, data[CUSTOM_DATA_LEN]; - - if (chip->vdd_supply && !chip->vdd_enabled) { - rc = regulator_enable(chip->vdd_supply); - if (rc < 0) { - dev_err(chip->dev, "Enable VDD supply failed, rc=%d\n", - rc); - return rc; - } - chip->vdd_enabled = true; + ktime_t rem; + s64 time_us; + + if (hrtimer_active(&chip->hap_disable_timer)) { + rem = hrtimer_get_remaining(&chip->hap_disable_timer); + time_us = ktime_to_us(rem); + dev_dbg(chip->dev, "waiting for playing clear sequence: %ld us\n", + time_us); + usleep_range(time_us, time_us + 100); } switch (effect->type) { @@ -814,27 +857,22 @@ static int qti_haptics_upload_effect(struct input_dev *dev, if (rc < 0) { dev_err(chip->dev, "Play constant waveform failed, rc=%d\n", rc); - goto disable_vdd; + return rc; } break; case FF_PERIODIC: - if (chip->effects_count == 0) { - rc = -EINVAL; - goto disable_vdd; - } + if (chip->effects_count == 0) + return -EINVAL; if (effect->u.periodic.waveform != FF_CUSTOM) { dev_err(chip->dev, "Only accept custom waveforms\n"); - rc = -EINVAL; - goto disable_vdd; + return -EINVAL; } if (copy_from_user(data, effect->u.periodic.custom_data, - sizeof(s16) * CUSTOM_DATA_LEN)) { - rc = -EFAULT; - goto disable_vdd; - } + sizeof(s16) * CUSTOM_DATA_LEN)) + return -EFAULT; for (i = 0; i < chip->effects_count; i++) if (chip->predefined[i].id == @@ -844,8 +882,7 @@ static int qti_haptics_upload_effect(struct input_dev *dev, if (i == chip->effects_count) { dev_err(chip->dev, "predefined effect %d is NOT supported\n", data[0]); - rc = -EINVAL; - goto disable_vdd; + return -EINVAL; } level = effect->u.periodic.magnitude; @@ -858,7 +895,7 @@ static int qti_haptics_upload_effect(struct input_dev *dev, if (rc < 0) { dev_err(chip->dev, "Play predefined effect %d failed, rc=%d\n", chip->predefined[i].id, rc); - goto disable_vdd; + return rc; } get_play_length(play, &play->length_us); @@ -873,30 +910,27 @@ static int qti_haptics_upload_effect(struct input_dev *dev, * send stop playing command after it's done. */ if (copy_to_user(effect->u.periodic.custom_data, data, - sizeof(s16) * CUSTOM_DATA_LEN)) { - rc = -EFAULT; - goto disable_vdd; - } + sizeof(s16) * CUSTOM_DATA_LEN)) + return -EFAULT; break; default: dev_err(chip->dev, "Unsupported effect type: %d\n", effect->type); - break; + return -EINVAL; } - return 0; -disable_vdd: - if (chip->vdd_supply && chip->vdd_enabled) { - rc = regulator_disable(chip->vdd_supply); + if (chip->vdd_supply && !chip->vdd_enabled) { + rc = regulator_enable(chip->vdd_supply); if (rc < 0) { - dev_err(chip->dev, "Disable VDD supply failed, rc=%d\n", + dev_err(chip->dev, "Enable VDD supply failed, rc=%d\n", rc); return rc; } - chip->vdd_enabled = false; + chip->vdd_enabled = true; } - return rc; + + return 0; } static int qti_haptics_playback(struct input_dev *dev, int effect_id, int val) @@ -909,6 +943,10 @@ static int qti_haptics_playback(struct input_dev *dev, int effect_id, int val) dev_dbg(chip->dev, "playback, val = %d\n", val); if (!!val) { + rc = qti_haptics_module_en(chip, true); + if (rc < 0) + return rc; + rc = qti_haptics_play(chip, true); if (rc < 0) return rc; @@ -939,10 +977,6 @@ static int qti_haptics_playback(struct input_dev *dev, int effect_id, int val) if (rc < 0) return rc; - rc = qti_haptics_module_en(chip, false); - if (rc < 0) - return rc; - if (chip->play_irq_en) { disable_irq_nosync(chip->play_irq); chip->play_irq_en = false; @@ -955,7 +989,7 @@ static int qti_haptics_playback(struct input_dev *dev, int effect_id, int val) static int qti_haptics_erase(struct input_dev *dev, int effect_id) { struct qti_hap_chip *chip = input_get_drvdata(dev); - int rc = 0; + int delay_us, rc = 0; if (chip->vdd_supply && chip->vdd_enabled) { rc = regulator_disable(chip->vdd_supply); @@ -967,6 +1001,22 @@ static int qti_haptics_erase(struct input_dev *dev, int effect_id) chip->vdd_enabled = false; } + rc = qti_haptics_clear_settings(chip); + if (rc < 0) { + dev_err(chip->dev, "clear setting failed, rc=%d\n", rc); + return rc; + } + + if (chip->play.effect) + delay_us = chip->play.effect->play_rate_us; + else + delay_us = chip->config.play_rate_us; + + delay_us += HAP_DISABLE_DELAY_USEC; + hrtimer_start(&chip->hap_disable_timer, + ktime_set(0, delay_us * NSEC_PER_USEC), + HRTIMER_MODE_REL); + return rc; } @@ -1093,18 +1143,45 @@ static enum hrtimer_restart qti_hap_stop_timer(struct hrtimer *timer) chip->play.length_us = 0; rc = qti_haptics_play(chip, false); - if (rc < 0) { + if (rc < 0) dev_err(chip->dev, "Stop playing failed, rc=%d\n", rc); - goto err_out; - } + + return HRTIMER_NORESTART; +} + +static enum hrtimer_restart qti_hap_disable_timer(struct hrtimer *timer) +{ + struct qti_hap_chip *chip = container_of(timer, struct qti_hap_chip, + hap_disable_timer); + int rc; rc = qti_haptics_module_en(chip, false); if (rc < 0) - dev_err(chip->dev, "Disable module failed, rc=%d\n", rc); -err_out: + dev_err(chip->dev, "Disable haptics module failed\n", rc); + return HRTIMER_NORESTART; } +static void verify_brake_setting(struct qti_hap_effect *effect) +{ + int i = effect->brake_pattern_length - 1; + u8 val = 0; + + for (; i >= 0; i--) { + if (effect->brake[i] != 0) + break; + + effect->brake_pattern_length--; + } + + for (i = 0; i < effect->brake_pattern_length; i++) { + effect->brake[i] &= HAP_BRAKE_PATTERN_MASK; + val |= effect->brake[i] << (i * HAP_BRAKE_PATTERN_SHIFT); + } + + effect->brake_en = (val != 0); +} + static int qti_haptics_parse_dt(struct qti_hap_chip *chip) { struct qti_hap_config *config = &chip->config; @@ -1113,7 +1190,6 @@ static int qti_haptics_parse_dt(struct qti_hap_chip *chip) struct qti_hap_effect *effect; const char *str; int rc = 0, tmp, i = 0, j, m; - u8 val; rc = of_property_read_u32(node, "reg", &tmp); if (rc < 0) { @@ -1353,17 +1429,7 @@ static int qti_haptics_parse_dt(struct qti_hap_chip *chip) } effect->brake_pattern_length = tmp; - for (j = tmp - 1; j >= 0; j--) { - if (effect->brake[j] != 0) - break; - effect->brake_pattern_length--; - } - - for (val = 0, j = 0; j < effect->brake_pattern_length; j++) - val |= (effect->brake[j] & HAP_BRAKE_PATTERN_MASK) - << j * HAP_BRAKE_PATTERN_SHIFT; - - effect->brake_en = (val != 0); + verify_brake_setting(effect); } for (j = 0; j < i; j++) { @@ -1391,6 +1457,390 @@ static int qti_haptics_parse_dt(struct qti_hap_chip *chip) return 0; } +#ifdef CONFIG_DEBUG_FS +static int play_rate_dbgfs_read(void *data, u64 *val) +{ + struct qti_hap_effect *effect = (struct qti_hap_effect *)data; + + *val = effect->play_rate_us; + + return 0; +} + +static int play_rate_dbgfs_write(void *data, u64 val) +{ + struct qti_hap_effect *effect = (struct qti_hap_effect *)data; + + if (val > HAP_PLAY_RATE_US_MAX) + val = HAP_PLAY_RATE_US_MAX; + + effect->play_rate_us = val; + + return 0; +} + +static int vmax_dbgfs_read(void *data, u64 *val) +{ + struct qti_hap_effect *effect = (struct qti_hap_effect *)data; + + *val = effect->vmax_mv; + + return 0; +} + +static int vmax_dbgfs_write(void *data, u64 val) +{ + struct qti_hap_effect *effect = (struct qti_hap_effect *)data; + + if (val > HAP_VMAX_MV_MAX) + val = HAP_VMAX_MV_MAX; + + effect->vmax_mv = val; + + return 0; +} + +static int wf_repeat_n_dbgfs_read(void *data, u64 *val) +{ + struct qti_hap_effect *effect = (struct qti_hap_effect *)data; + + *val = wf_repeat[effect->wf_repeat_n]; + + return 0; +} + +static int wf_repeat_n_dbgfs_write(void *data, u64 val) +{ + struct qti_hap_effect *effect = (struct qti_hap_effect *)data; + int i; + + for (i = 0; i < ARRAY_SIZE(wf_repeat); i++) + if (val == wf_repeat[i]) + break; + + if (i == ARRAY_SIZE(wf_repeat)) + pr_err("wf_repeat value %lu is invalid\n", val); + else + effect->wf_repeat_n = i; + + return 0; +} + +static int wf_s_repeat_n_dbgfs_read(void *data, u64 *val) +{ + struct qti_hap_effect *effect = (struct qti_hap_effect *)data; + + *val = wf_s_repeat[effect->wf_s_repeat_n]; + + return 0; +} + +static int wf_s_repeat_n_dbgfs_write(void *data, u64 val) +{ + struct qti_hap_effect *effect = (struct qti_hap_effect *)data; + int i; + + for (i = 0; i < ARRAY_SIZE(wf_s_repeat); i++) + if (val == wf_s_repeat[i]) + break; + + if (i == ARRAY_SIZE(wf_s_repeat)) + pr_err("wf_s_repeat value %lu is invalid\n", val); + else + effect->wf_s_repeat_n = i; + + return 0; +} + + +static int auto_res_dbgfs_read(void *data, u64 *val) +{ + struct qti_hap_effect *effect = (struct qti_hap_effect *)data; + + *val = !effect->lra_auto_res_disable; + + return 0; +} + +static int auto_res_dbgfs_write(void *data, u64 val) +{ + struct qti_hap_effect *effect = (struct qti_hap_effect *)data; + + effect->lra_auto_res_disable = !val; + + return 0; +} + +DEFINE_SIMPLE_ATTRIBUTE(play_rate_debugfs_ops, play_rate_dbgfs_read, + play_rate_dbgfs_write, "%llu\n"); +DEFINE_SIMPLE_ATTRIBUTE(vmax_debugfs_ops, vmax_dbgfs_read, + vmax_dbgfs_write, "%llu\n"); +DEFINE_SIMPLE_ATTRIBUTE(wf_repeat_n_debugfs_ops, wf_repeat_n_dbgfs_read, + wf_repeat_n_dbgfs_write, "%llu\n"); +DEFINE_SIMPLE_ATTRIBUTE(wf_s_repeat_n_debugfs_ops, wf_s_repeat_n_dbgfs_read, + wf_s_repeat_n_dbgfs_write, "%llu\n"); +DEFINE_SIMPLE_ATTRIBUTE(auto_res_debugfs_ops, auto_res_dbgfs_read, + auto_res_dbgfs_write, "%llu\n"); + +#define CHAR_PER_PATTERN 8 +static ssize_t brake_pattern_dbgfs_read(struct file *filep, + char __user *buf, size_t count, loff_t *ppos) +{ + struct qti_hap_effect *effect = + (struct qti_hap_effect *)filep->private_data; + char *kbuf, *tmp; + int rc, length, i, len; + + kbuf = kcalloc(CHAR_PER_PATTERN, HAP_BRAKE_PATTERN_MAX, GFP_KERNEL); + if (!kbuf) + return -ENOMEM; + + tmp = kbuf; + for (length = 0, i = 0; i < HAP_BRAKE_PATTERN_MAX; i++) { + len = snprintf(tmp, CHAR_PER_PATTERN, "0x%x ", + effect->brake[i]); + tmp += len; + length += len; + } + + kbuf[length++] = '\n'; + kbuf[length++] = '\0'; + + rc = simple_read_from_buffer(buf, count, ppos, kbuf, length); + + kfree(kbuf); + return rc; +} + +static ssize_t brake_pattern_dbgfs_write(struct file *filep, + const char __user *buf, size_t count, loff_t *ppos) +{ + struct qti_hap_effect *effect = + (struct qti_hap_effect *)filep->private_data; + char *kbuf, *token; + int rc = 0, i = 0, j; + u32 val; + + kbuf = kmalloc(count + 1, GFP_KERNEL); + if (!kbuf) + return -ENOMEM; + + rc = copy_from_user(kbuf, buf, count); + if (rc > 0) { + rc = -EFAULT; + goto err; + } + + kbuf[count] = '\0'; + *ppos += count; + + while ((token = strsep(&kbuf, " ")) != NULL) { + rc = kstrtouint(token, 0, &val); + if (rc < 0) { + rc = -EINVAL; + goto err; + } + + effect->brake[i++] = val & HAP_BRAKE_PATTERN_MASK; + + if (i >= HAP_BRAKE_PATTERN_MAX) + break; + } + + for (j = i; j < HAP_BRAKE_PATTERN_MAX; j++) + effect->brake[j] = 0; + + effect->brake_pattern_length = i; + verify_brake_setting(effect); + + rc = count; +err: + kfree(kbuf); + return rc; +} + +static const struct file_operations brake_pattern_dbgfs_ops = { + .read = brake_pattern_dbgfs_read, + .write = brake_pattern_dbgfs_write, + .owner = THIS_MODULE, + .open = simple_open, +}; + +static ssize_t pattern_dbgfs_read(struct file *filep, + char __user *buf, size_t count, loff_t *ppos) +{ + struct qti_hap_effect *effect = + (struct qti_hap_effect *)filep->private_data; + char *kbuf, *tmp; + int rc, length, i, len; + + kbuf = kcalloc(CHAR_PER_PATTERN, effect->pattern_length, GFP_KERNEL); + if (!kbuf) + return -ENOMEM; + + tmp = kbuf; + for (length = 0, i = 0; i < effect->pattern_length; i++) { + len = snprintf(tmp, CHAR_PER_PATTERN, "0x%x ", + effect->pattern[i]); + tmp += len; + length += len; + } + + kbuf[length++] = '\n'; + kbuf[length++] = '\0'; + + rc = simple_read_from_buffer(buf, count, ppos, kbuf, length); + + kfree(kbuf); + return rc; +} + +static ssize_t pattern_dbgfs_write(struct file *filep, + const char __user *buf, size_t count, loff_t *ppos) +{ + struct qti_hap_effect *effect = + (struct qti_hap_effect *)filep->private_data; + char *kbuf, *token; + int rc = 0, i = 0, j; + u32 val; + + kbuf = kmalloc(count + 1, GFP_KERNEL); + if (!kbuf) + return -ENOMEM; + + rc = copy_from_user(kbuf, buf, count); + if (rc > 0) { + rc = -EFAULT; + goto err; + } + + kbuf[count] = '\0'; + *ppos += count; + + while ((token = strsep(&kbuf, " ")) != NULL) { + rc = kstrtouint(token, 0, &val); + if (rc < 0) { + rc = -EINVAL; + goto err; + } + + effect->pattern[i++] = val & 0xff; + + if (i >= effect->pattern_length) + break; + } + + for (j = i; j < effect->pattern_length; j++) + effect->pattern[j] = 0; + + rc = count; +err: + kfree(kbuf); + return rc; +} + +static const struct file_operations pattern_dbgfs_ops = { + .read = pattern_dbgfs_read, + .write = pattern_dbgfs_write, + .owner = THIS_MODULE, + .open = simple_open, +}; + +static int create_effect_debug_files(struct qti_hap_effect *effect, + struct dentry *dir) +{ + struct dentry *file; + + file = debugfs_create_file("play_rate_us", 0644, dir, + effect, &play_rate_debugfs_ops); + if (!file) { + pr_err("create play-rate debugfs node failed\n"); + return -ENOMEM; + } + + file = debugfs_create_file("vmax_mv", 0644, dir, + effect, &vmax_debugfs_ops); + if (!file) { + pr_err("create vmax debugfs node failed\n"); + return -ENOMEM; + } + + file = debugfs_create_file("wf_repeat_n", 0644, dir, + effect, &wf_repeat_n_debugfs_ops); + if (!file) { + pr_err("create wf-repeat debugfs node failed\n"); + return -ENOMEM; + } + + file = debugfs_create_file("wf_s_repeat_n", 0644, dir, + effect, &wf_s_repeat_n_debugfs_ops); + if (!file) { + pr_err("create wf-s-repeat debugfs node failed\n"); + return -ENOMEM; + } + + file = debugfs_create_file("lra_auto_res_en", 0644, dir, + effect, &auto_res_debugfs_ops); + if (!file) { + pr_err("create lra-auto-res-en debugfs node failed\n"); + return -ENOMEM; + } + + file = debugfs_create_file("brake", 0644, dir, + effect, &brake_pattern_dbgfs_ops); + if (!file) { + pr_err("create brake debugfs node failed\n"); + return -ENOMEM; + } + + file = debugfs_create_file("pattern", 0644, dir, + effect, &pattern_dbgfs_ops); + if (!file) { + pr_err("create pattern debugfs node failed\n"); + return -ENOMEM; + } + + return 0; +} + +static int qti_haptics_add_debugfs(struct qti_hap_chip *chip) +{ + struct dentry *hap_dir, *effect_dir; + char str[12] = {0}; + int i, rc = 0; + + hap_dir = debugfs_create_dir("haptics", NULL); + if (!hap_dir) { + pr_err("create haptics debugfs directory failed\n"); + return -ENOMEM; + } + + for (i = 0; i < chip->effects_count; i++) { + snprintf(str, ARRAY_SIZE(str), "effect%d", i); + effect_dir = debugfs_create_dir(str, hap_dir); + if (!effect_dir) { + pr_err("create %s debugfs directory failed\n", str); + rc = -ENOMEM; + goto cleanup; + } + + rc = create_effect_debug_files(&chip->predefined[i], + effect_dir); + if (rc < 0) { + rc = -ENOMEM; + goto cleanup; + } + } + + chip->hap_debugfs = hap_dir; + return 0; + +cleanup: + debugfs_remove_recursive(hap_dir); + return rc; +} +#endif + static int qti_haptics_probe(struct platform_device *pdev) { struct qti_hap_chip *chip; @@ -1449,7 +1899,9 @@ static int qti_haptics_probe(struct platform_device *pdev) hrtimer_init(&chip->stop_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); chip->stop_timer.function = qti_hap_stop_timer; - + hrtimer_init(&chip->hap_disable_timer, CLOCK_MONOTONIC, + HRTIMER_MODE_REL); + chip->hap_disable_timer.function = qti_hap_disable_timer; input_dev->name = "qti-haptics"; input_set_drvdata(input_dev, chip); chip->input_dev = input_dev; @@ -1486,6 +1938,11 @@ static int qti_haptics_probe(struct platform_device *pdev) } dev_set_drvdata(chip->dev, chip); +#ifdef CONFIG_DEBUG_FS + rc = qti_haptics_add_debugfs(chip); + if (rc < 0) + dev_dbg(chip->dev, "create debugfs failed, rc=%d\n", rc); +#endif return 0; destroy_ff: @@ -1497,12 +1954,35 @@ static int qti_haptics_remove(struct platform_device *pdev) { struct qti_hap_chip *chip = dev_get_drvdata(&pdev->dev); +#ifdef CONFIG_DEBUG_FS + debugfs_remove_recursive(chip->hap_debugfs); +#endif input_ff_destroy(chip->input_dev); dev_set_drvdata(chip->dev, NULL); return 0; } +static void qti_haptics_shutdown(struct platform_device *pdev) +{ + struct qti_hap_chip *chip = dev_get_drvdata(&pdev->dev); + int rc; + + dev_dbg(chip->dev, "Shutdown!\n"); + + qti_haptics_module_en(chip, false); + + if (chip->vdd_supply && chip->vdd_enabled) { + rc = regulator_disable(chip->vdd_supply); + if (rc < 0) { + dev_err(chip->dev, "Disable VDD supply failed, rc=%d\n", + rc); + return; + } + chip->vdd_enabled = false; + } +} + static const struct of_device_id haptics_match_table[] = { { .compatible = "qcom,haptics" }, { .compatible = "qcom,pm660-haptics" }, @@ -1513,10 +1993,12 @@ static const struct of_device_id haptics_match_table[] = { static struct platform_driver qti_haptics_driver = { .driver = { .name = "qcom,haptics", + .owner = THIS_MODULE, .of_match_table = haptics_match_table, }, .probe = qti_haptics_probe, .remove = qti_haptics_remove, + .shutdown = qti_haptics_shutdown, }; module_platform_driver(qti_haptics_driver); diff --git a/drivers/input/mouse/elan_i2c_core.c b/drivers/input/mouse/elan_i2c_core.c index 7b5fa501bbcf6651886cb16a2bcdbede026f382e..696e540304fd91231126557892f5222261ee0db2 100644 --- a/drivers/input/mouse/elan_i2c_core.c +++ b/drivers/input/mouse/elan_i2c_core.c @@ -1262,6 +1262,8 @@ static const struct acpi_device_id elan_acpi_id[] = { { "ELAN0611", 0 }, { "ELAN0612", 0 }, { "ELAN0618", 0 }, + { "ELAN061D", 0 }, + { "ELAN0622", 0 }, { "ELAN1000", 0 }, { } }; diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h index b353d494ad404888bd2884527fe771937cb1416f..136f6e7bf797767256e66c1c083cb80c55cd7a1b 100644 --- a/drivers/input/serio/i8042-x86ia64io.h +++ b/drivers/input/serio/i8042-x86ia64io.h @@ -527,6 +527,13 @@ static const struct dmi_system_id __initconst i8042_dmi_nomux_table[] = { DMI_MATCH(DMI_PRODUCT_NAME, "N24_25BU"), }, }, + { + /* Lenovo LaVie Z */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo LaVie Z"), + }, + }, { } }; diff --git a/drivers/input/touchscreen/hxchipset/himax_common.c b/drivers/input/touchscreen/hxchipset/himax_common.c index 980ceeb685bb035ee804196eb04e8196d4dc1150..fe81162b05f509d78002c874f3306940a6e92e43 100644 --- a/drivers/input/touchscreen/hxchipset/himax_common.c +++ b/drivers/input/touchscreen/hxchipset/himax_common.c @@ -187,7 +187,7 @@ static ssize_t himax_HSEN_write(struct file *file, const char __user *buff, size return -EINVAL; g_core_fp.fp_set_HSEN_enable(ts->HSEN_enable, ts->suspended); - I("%s: HSEN_enable = %d.\n", __func__, ts->HSEN_enable); + D("%s: HSEN_enable = %d.\n", __func__, ts->HSEN_enable); return len; } @@ -449,8 +449,9 @@ int himax_input_register(struct himax_ts_data *ts) input_mt_init_slots(ts->input_dev, ts->nFinger_support); #endif #endif - I("input_set_abs_params: mix_x %d, max_x %d, min_y %d, max_y %d\n", - ts->pdata->abs_x_min, ts->pdata->abs_x_max, ts->pdata->abs_y_min, ts->pdata->abs_y_max); + D("input_set_abs_params: mix_x %d, max_x %d, min_y %d, max_y %d\n", + ts->pdata->abs_x_min, ts->pdata->abs_x_max, + ts->pdata->abs_y_min, ts->pdata->abs_y_max); input_set_abs_params(ts->input_dev, ABS_MT_POSITION_X, ts->pdata->abs_x_min, ts->pdata->abs_x_max, ts->pdata->abs_x_fuzz, 0); input_set_abs_params(ts->input_dev, ABS_MT_POSITION_Y, ts->pdata->abs_y_min, ts->pdata->abs_y_max, ts->pdata->abs_y_fuzz, 0); input_set_abs_params(ts->input_dev, ABS_MT_TOUCH_MAJOR, ts->pdata->abs_pressure_min, ts->pdata->abs_pressure_max, ts->pdata->abs_pressure_fuzz, 0); @@ -467,7 +468,7 @@ int himax_input_register(struct himax_ts_data *ts) ret = INPUT_REGISTER_FAIL; input_device_fail: - I("%s, input device register fail!\n", __func__); + E("%s, input device register fail\n", __func__); return ret; } @@ -489,7 +490,11 @@ static void calcDataSize(uint8_t finger_num) ts_data->x_channel + ts_data->y_channel) / ts_data->raw_data_frame_size + (((uint32_t)ts_data->x_channel * ts_data->y_channel + ts_data->x_channel + ts_data->y_channel) % ts_data->raw_data_frame_size) ? 1 : 0; - I("%s: coord_data_size: %d, area_data_size:%d, raw_data_frame_size:%d, raw_data_nframes:%d", __func__, ts_data->coord_data_size, ts_data->area_data_size, ts_data->raw_data_frame_size, ts_data->raw_data_nframes); + + D("%s: coord_data_size:%d, area_data_size:%d\n", __func__, + ts_data->coord_data_size, ts_data->area_data_size); + D("%s: raw_data_frame_size:%d, raw_data_nframes:%d\n", __func__, + ts_data->raw_data_frame_size, ts_data->raw_data_nframes); } static void calculate_point_number(void) @@ -569,7 +574,7 @@ static int i_update_FW(void) static int himax_loadSensorConfig(struct himax_i2c_platform_data *pdata) { - I("%s: initialization complete\n", __func__); + D("%s: initialization complete\n", __func__); return NO_ERR; } @@ -870,8 +875,15 @@ int himax_report_data_init(void) hx_touch_data->rawdata_frame_size = (ic_data->HX_TX_NUM * ic_data->HX_RX_NUM + ic_data->HX_TX_NUM + ic_data->HX_RX_NUM) / hx_touch_data->rawdata_size + 1; - I("%s: rawdata_frame_size = %d ", __func__, hx_touch_data->rawdata_frame_size); - I("%s: ic_data->HX_MAX_PT:%d, hx_raw_cnt_max:%d, hx_raw_cnt_rmd:%d, g_hx_rawdata_size:%d, hx_touch_data->touch_info_size:%d\n", __func__, ic_data->HX_MAX_PT, hx_touch_data->raw_cnt_max, hx_touch_data->raw_cnt_rmd, hx_touch_data->rawdata_size, hx_touch_data->touch_info_size); + D("%s: rawdata_frame_size = %d", + __func__, hx_touch_data->rawdata_frame_size); + + D("%s: ic_data->HX_MAX_PT:%d, hx_raw_cnt_max:%d, hx_raw_cnt_rmd:%d\n", + __func__, ic_data->HX_MAX_PT, hx_touch_data->raw_cnt_max, + hx_touch_data->raw_cnt_rmd); + D("%s: g_hx_rawdata_size:%d, hx_touch_data->touch_info_size:%d\n", + __func__, hx_touch_data->rawdata_size, + hx_touch_data->touch_info_size); hx_touch_data->hx_coord_buf = kzalloc(sizeof(uint8_t) * (hx_touch_data->touch_info_size), GFP_KERNEL); if (hx_touch_data->hx_coord_buf == NULL) @@ -934,7 +946,6 @@ void himax_cable_detect_func(bool force_renew) connect_status = USB_detect_flag;/* upmu_is_chr_det(); */ ts = private_ts; - /* I("Touch: cable status=%d, cable_config=%p, usb_connected=%d\n", connect_status, ts->cable_config, ts->usb_connected); */ if (ts->cable_config) { if (((!!connect_status) != ts->usb_connected) || force_renew) { if (!!connect_status) { @@ -1193,7 +1204,7 @@ static int himax_err_ctrl(struct himax_ts_data *ts, uint8_t *buf, int ts_path, i if (HX_HW_RESET_ACTIVATE) { /* drop 1st interrupts after chip reset */ HX_HW_RESET_ACTIVATE = 0; - I("[HX_HW_RESET_ACTIVATE]:%s: Back from reset, ready to serve.\n", __func__); + D(":%s: Back from reset, ready to serve.\n", __func__); ts_status = HX_RST_OK; goto END_FUNCTION; } @@ -1277,7 +1288,6 @@ int himax_parse_report_points(struct himax_ts_data *ts, int ts_path, int ts_stat if (g_ts_dbg != 0) I("%s: start!\n", __func__); - ts->old_finger = ts->pre_finger_mask; ts->pre_finger_mask = 0; hx_touch_data->finger_num = hx_touch_data->hx_coord_buf[ts->coordInfoSize - 4] & 0x0F; @@ -1313,7 +1323,7 @@ int himax_parse_report_points(struct himax_ts_data *ts, int ts_path, int ts_stat if (!ts->first_pressed) { ts->first_pressed = 1; - I("S1@%d, %d\n", x, y); + D("S1@%d, %d\n", x, y); } ts->pre_finger_data[loop_i][0] = x; @@ -1328,7 +1338,8 @@ int himax_parse_report_points(struct himax_ts_data *ts, int ts_path, int ts_stat if (loop_i == 0 && ts->first_pressed == 1) { ts->first_pressed = 2; - I("E1@%d, %d\n", ts->pre_finger_data[0][0], ts->pre_finger_data[0][1]); + D("E1@%d, %d\n", ts->pre_finger_data[0][0], + ts->pre_finger_data[0][1]); } } } @@ -1808,7 +1819,7 @@ void himax_ts_work(struct himax_ts_data *ts) goto END_FUNCTION; GET_TOUCH_FAIL: - I("%s: Now reset the Touch chip.\n", __func__); + E("%s: Now reset the Touch chip.\n", __func__); #ifdef HX_RST_PIN_FUNC g_core_fp.fp_ic_reset(false, true); #endif @@ -1894,7 +1905,7 @@ int himax_fb_register(struct himax_ts_data *ts) { int ret = 0; - I(" %s in\n", __func__); + D(" %s in\n", __func__); ts->fb_notif.notifier_call = drm_notifier_callback; ret = msm_drm_register_client(&ts->fb_notif); if (ret) @@ -1908,7 +1919,7 @@ int himax_fb_register(struct himax_ts_data *ts) { int ret = 0; - I(" %s in\n", __func__); + D(" %s in\n", __func__); ts->fb_notif.notifier_call = fb_notifier_callback; ret = fb_register_client(&ts->fb_notif); @@ -1932,18 +1943,18 @@ int himax_chip_common_init(void) #if defined(HX_AUTO_UPDATE_FW) || defined(HX_ZERO_FLASH) bool auto_update_flag = false; #endif - int ret = 0, err = 0; + int ret = 0, err = -1; struct himax_ts_data *ts = private_ts; struct himax_i2c_platform_data *pdata; - I("PDATA START\n"); + D("PDATA START\n"); pdata = kzalloc(sizeof(*pdata), GFP_KERNEL); if (pdata == NULL) { /* Allocate Platform data space */ err = -ENOMEM; goto err_dt_platform_data_fail; } - I("ic_data START\n"); + D("ic_data START\n"); ic_data = kzalloc(sizeof(*ic_data), GFP_KERNEL); if (ic_data == NULL) { /* Allocate IC data space */ err = -ENOMEM; @@ -1958,7 +1969,7 @@ int himax_chip_common_init(void) } if (himax_parse_dt(ts, pdata) < 0) { - I(" pdata is NULL for DT\n"); + E(" pdata is NULL for DT\n"); err = -ECANCELED; goto err_alloc_dt_pdata_failed; } @@ -2048,7 +2059,7 @@ int himax_chip_common_init(void) ts->nFinger_support = ic_data->HX_MAX_PT; /* calculate the i2c data size */ calcDataSize(ts->nFinger_support); - I("%s: calcDataSize complete\n", __func__); + D("%s: calcDataSize complete\n", __func__); #ifdef CONFIG_OF ts->pdata->abs_pressure_min = 0; ts->pdata->abs_pressure_max = 200; @@ -2067,7 +2078,7 @@ int himax_chip_common_init(void) #else ts->protocol_type = PROTOCOL_TYPE_B; #endif - I("%s: Use Protocol Type %c\n", __func__, + D("%s: Use Protocol Type %c\n", __func__, ts->protocol_type == PROTOCOL_TYPE_A ? 'A' : 'B'); ret = himax_input_register(ts); @@ -2176,7 +2187,6 @@ remove_proc_entry(HIMAX_PROC_TOUCH_FOLDER, NULL); err_dt_ic_data_fail: kfree(pdata); err_dt_platform_data_fail: - kfree(ts); probe_fail_flag = 1; return err; } @@ -2245,7 +2255,7 @@ int himax_chip_common_suspend(struct himax_ts_data *ts) } ts->suspended = true; - I("%s: enter\n", __func__); + D("%s: enter\n", __func__); if (debug_data != NULL && debug_data->flash_dump_going == true) { I("[himax] %s: Flash dump is going, reject suspend\n", __func__); @@ -2285,16 +2295,16 @@ int himax_chip_common_suspend(struct himax_ts_data *ts) if (ts->pdata->powerOff3V3 && ts->pdata->power) ts->pdata->power(0); - I("%s: END\n", __func__); + D("%s: END\n", __func__); return 0; } int himax_chip_common_resume(struct himax_ts_data *ts) { - I("%s: enter\n", __func__); + D("%s: enter\n", __func__); if (ts->suspended == false) { - I("%s: It had entered resume, skip this step\n", __func__); + D("%s: It had entered resume, skip this step\n", __func__); return 0; } ts->suspended = false; @@ -2314,6 +2324,6 @@ int himax_chip_common_resume(struct himax_ts_data *ts) g_core_fp.fp_resume_ic_action(); himax_int_enable(1); - I("%s: END\n", __func__); + D("%s: END\n", __func__); return 0; } diff --git a/drivers/input/touchscreen/hxchipset/himax_common.h b/drivers/input/touchscreen/hxchipset/himax_common.h index 45bd140f94a94e8175b4314402e9d1fe5144b9fa..6d48fe782ce7e85e37f75109810a94ab634302bb 100644 --- a/drivers/input/touchscreen/hxchipset/himax_common.h +++ b/drivers/input/touchscreen/hxchipset/himax_common.h @@ -74,7 +74,7 @@ /*#define HX_USB_DETECT_GLOBAL*/ /*#define HX_USB_DETECT_CALLBACK*/ /*#define HX_PROTOCOL_A*/ /* for MTK special platform.If turning on,it will report to system by using specific format. */ -/*#define HX_RESUME_HW_RESET*/ +#define HX_RESUME_HW_RESET #define HX_PROTOCOL_B_3PA /*#define HX_FIX_TOUCH_INFO*/ /* if open, you need to change the touch info in the fix_touch_info*/ /*#define HX_ZERO_FLASH*/ diff --git a/drivers/input/touchscreen/hxchipset/himax_debug.c b/drivers/input/touchscreen/hxchipset/himax_debug.c index de60c9e148cf5d086f4541533c17b5775b93bdae..9d7f5b99a4cb982c6af32585a5748af841af9b65 100644 --- a/drivers/input/touchscreen/hxchipset/himax_debug.c +++ b/drivers/input/touchscreen/hxchipset/himax_debug.c @@ -2913,7 +2913,7 @@ int himax_debug_init(void) struct himax_ts_data *ts = private_ts; int err = 0; - I("%s:Enter\n", __func__); + D("%s:Enter\n", __func__); if (ts == NULL) { E("%s: ts struct is NULL\n", __func__); diff --git a/drivers/input/touchscreen/hxchipset/himax_ic_HX83112.c b/drivers/input/touchscreen/hxchipset/himax_ic_HX83112.c index 307ea4842ade41461c94357e9606412827ad05b5..14604dc1eb95cc15abb2295f7ce3cff5bd7256e9 100644 --- a/drivers/input/touchscreen/hxchipset/himax_ic_HX83112.c +++ b/drivers/input/touchscreen/hxchipset/himax_ic_HX83112.c @@ -92,7 +92,8 @@ static bool hx83112_sense_off(void) *===================== */ g_core_fp.fp_register_read(pic_op->addr_cs_central_state, FOUR_BYTE_ADDR_SZ, tmp_data, 0); - I("%s: Check enter_save_mode data[0]=%X\n", __func__, tmp_data[0]); + D("%s: Check enter_save_mode data[0]=%X\n", + __func__, tmp_data[0]); if (tmp_data[0] == 0x0C) { /* @@ -167,7 +168,8 @@ static bool hx83112_chip_detect(void) I("%s:IC name = %s\n", __func__, private_ts->chip_name); - I("Himax IC package %x%x%x in\n", tmp_data[3], tmp_data[2], tmp_data[1]); + D("Himax IC package %x%x%x in\n", + tmp_data[3], tmp_data[2], tmp_data[1]); ret_data = true; break; } @@ -184,7 +186,7 @@ static void hx83112_chip_init(void) { private_ts->chip_cell_type = CHIP_IS_IN_CELL; - I("%s:IC cell type = %d\n", __func__, private_ts->chip_cell_type); + D("%s:IC cell type = %d\n", __func__, private_ts->chip_cell_type); IC_CHECKSUM = HX_TP_BIN_CHECKSUM_CRC; /* Himax: Set FW and CFG Flash Address */ FW_VER_MAJ_FLASH_ADDR = 49157; /* 0x00C005 */ @@ -211,7 +213,7 @@ static void hx83112_chip_init(void) #ifdef CONFIG_CHIP_DTCFG static int himax_hx83112_probe(struct platform_device *pdev) { - I("%s:Enter\n", __func__); + D("%s:Enter\n", __func__); g_core_fp.fp_chip_detect = hx83112_chip_detect; g_core_fp.fp_chip_init = hx83112_chip_init; return 0; @@ -246,7 +248,7 @@ static struct platform_driver himax_hx83112_driver = { static int __init himax_hx83112_init(void) { - I("%s\n", __func__); + D("%s\n", __func__); platform_driver_register(&himax_hx83112_driver); return 0; } @@ -259,7 +261,7 @@ static void __exit himax_hx83112_exit(void) #else static int himax_hx83112_probe(void) { - I("%s:Enter\n", __func__); + D("%s:Enter\n", __func__); g_core_fp.fp_chip_detect = hx83112_chip_detect; g_core_fp.fp_chip_init = hx83112_chip_init; @@ -278,7 +280,7 @@ static int __init himax_hx83112_init(void) { int ret = 0; - I("%s\n", __func__); + D("%s\n", __func__); ret = himax_hx83112_probe(); return 0; } diff --git a/drivers/input/touchscreen/hxchipset/himax_ic_incell_core.c b/drivers/input/touchscreen/hxchipset/himax_ic_incell_core.c index b720826e59e27033b9fc0d59c118dbe04ae5e00a..74bb6c4ae00a1d3694427fa99a0006fdc949fd9c 100644 --- a/drivers/input/touchscreen/hxchipset/himax_ic_incell_core.c +++ b/drivers/input/touchscreen/hxchipset/himax_ic_incell_core.c @@ -315,7 +315,7 @@ static void himax_mcu_sense_on(uint8_t FlashMode) uint8_t tmp_data[FOUR_BYTE_DATA_SZ]; int retry = 0; - I("Enter %s\n", __func__); + D("Enter %s\n", __func__); g_core_fp.fp_interface_on(); g_core_fp.fp_register_write(pfw_op->addr_ctrl_fw_isr, sizeof(pfw_op->data_clear), pfw_op->data_clear, false); @@ -406,7 +406,8 @@ static bool himax_mcu_sense_off(void) *====================== */ g_core_fp.fp_register_read(pic_op->addr_cs_central_state, FOUR_BYTE_ADDR_SZ, tmp_data, 0); - I("%s: Check enter_save_mode data[0]=%X\n", __func__, tmp_data[0]); + D("%s: Check enter_save_mode data[0]=%X\n", + __func__, tmp_data[0]); if (tmp_data[0] == 0x0C) { /* @@ -472,7 +473,7 @@ static void himax_mcu_suspend_ic_action(void) static void himax_mcu_power_on_init(void) { - I("%s:\n", __func__); + D("%s:\n", __func__); g_core_fp.fp_touch_information(); /* RawOut select initial */ g_core_fp.fp_register_write(pfw_op->addr_raw_out_sel, sizeof(pfw_op->data_clear), pfw_op->data_clear, false); @@ -877,7 +878,7 @@ static void himax_mcu_read_FW_ver(void) if ((data[1] == 0x3A && data[0] == 0xA3) || (data_2[1] == 0x72 && data_2[0] == 0xC0)) { - I("reload OK!\n"); + D("reload OK!\n"); reload_status = 1; break; } else if (retry == 0) { @@ -895,12 +896,13 @@ static void himax_mcu_read_FW_ver(void) retry--; msleep(20); if (retry % 10 == 0) - I("reload fail ,delay 10ms retry=%d\n", retry); + E("reload fail ,delay 10ms retry=%d\n", retry); } - I("%s : data[0]=0x%2.2X,data[1]=0x%2.2X,data_2[0]=0x%2.2X,data_2[1]=0x%2.2X\n", __func__, data[0], data[1], data_2[0], data_2[1]); - I("reload_status=%d\n", reload_status); + D("%s:data[]={0x%2.2X, 0x%2.2X}, data_2[]={0x%2.2X, 0x%2.2X}\n", + __func__, data[0], data[1], data_2[0], data_2[1]); + D("reload_status=%d\n", reload_status); /* *===================================== * Read FW version : 0x1000_7004 but 05,06 are the real addr for FW Version @@ -910,19 +912,19 @@ static void himax_mcu_read_FW_ver(void) g_core_fp.fp_register_read(pfw_op->addr_fw_ver_addr, FOUR_BYTE_DATA_SZ, data, 0); ic_data->vendor_panel_ver = data[0]; ic_data->vendor_fw_ver = data[1] << 8 | data[2]; - I("PANEL_VER : %X\n", ic_data->vendor_panel_ver); - I("FW_VER : %X\n", ic_data->vendor_fw_ver); + D("PANEL_VER : %X\n", ic_data->vendor_panel_ver); + D("FW_VER : %X\n", ic_data->vendor_fw_ver); g_core_fp.fp_register_read(pfw_op->addr_fw_cfg_addr, FOUR_BYTE_DATA_SZ, data, 0); ic_data->vendor_config_ver = data[2] << 8 | data[3]; /* I("CFG_VER : %X\n",ic_data->vendor_config_ver); */ ic_data->vendor_touch_cfg_ver = data[2]; - I("TOUCH_VER : %X\n", ic_data->vendor_touch_cfg_ver); + D("TOUCH_VER : %X\n", ic_data->vendor_touch_cfg_ver); ic_data->vendor_display_cfg_ver = data[3]; - I("DISPLAY_VER : %X\n", ic_data->vendor_display_cfg_ver); + D("DISPLAY_VER : %X\n", ic_data->vendor_display_cfg_ver); g_core_fp.fp_register_read(pfw_op->addr_fw_vendor_addr, FOUR_BYTE_DATA_SZ, data, 0); ic_data->vendor_cid_maj_ver = data[2]; ic_data->vendor_cid_min_ver = data[3]; - I("CID_VER : %X\n", (ic_data->vendor_cid_maj_ver << 8 | ic_data->vendor_cid_min_ver)); + D("CID_VER : %X\n", (data[2] << 8 | data[3])); } static bool himax_mcu_read_event_stack(uint8_t *buf, uint8_t length) @@ -956,10 +958,10 @@ static void himax_mcu_return_event_stack(void) int retry = 20, i; uint8_t tmp_data[FOUR_BYTE_DATA_SZ]; - I("%s:entering\n", __func__); + D("%s:entering\n", __func__); do { - I("now %d times\n!", retry); + D("now %d times\n!", retry); for (i = 0; i < FOUR_BYTE_DATA_SZ; i++) tmp_data[i] = psram_op->addr_rawdata_end[i]; @@ -970,7 +972,7 @@ static void himax_mcu_return_event_stack(void) msleep(20); } while ((tmp_data[1] != psram_op->addr_rawdata_end[1] && tmp_data[0] != psram_op->addr_rawdata_end[0]) && retry > 0); - I("%s: End of setting!\n", __func__); + D("%s: End of setting!\n", __func__); } static bool himax_mcu_calculateChecksum(bool change_iref) @@ -1041,7 +1043,7 @@ static void himax_mcu_irq_switch(int switch_on) static int himax_mcu_assign_sorting_mode(uint8_t *tmp_data) { - I("%s:Now tmp_data[3]=0x%02X,tmp_data[2]=0x%02X,tmp_data[1]=0x%02X,tmp_data[0]=0x%02X\n", + D("%s:Now tmp[3]=0x%02X, tmp[2]=0x%02X, tmp[1]=0x%02X, tmp[0]=0x%02X\n", __func__, tmp_data[3], tmp_data[2], tmp_data[1], tmp_data[0]); g_core_fp.fp_flash_write_burst(pfw_op->addr_sorting_mode_en, tmp_data); @@ -1050,9 +1052,8 @@ static int himax_mcu_assign_sorting_mode(uint8_t *tmp_data) static int himax_mcu_check_sorting_mode(uint8_t *tmp_data) { - g_core_fp.fp_register_read(pfw_op->addr_sorting_mode_en, FOUR_BYTE_DATA_SZ, tmp_data, 0); - I("%s: tmp_data[0]=%x,tmp_data[1]=%x\n", __func__, tmp_data[0], tmp_data[1]); + D("%s: tmp[0]=%x,tmp[1]=%x\n", __func__, tmp_data[0], tmp_data[1]); return NO_ERR; } @@ -1065,7 +1066,7 @@ static int himax_mcu_switch_mode(int mode) int result = -1; int retry = 200; - I("%s: Entering\n", __func__); + D("%s: Entering\n", __func__); if (mode == 0) { /* normal mode */ @@ -1114,14 +1115,15 @@ static int himax_mcu_switch_mode(int mode) g_core_fp.fp_sense_on(0x01); while (retry != 0) { - I("[%d] %s Read\n", retry, __func__); + D("[%d] %s Read\n", retry, __func__); /* tmp_addr[3] = 0x10; tmp_addr[2] = 0x00; tmp_addr[1] = 0x7F; tmp_addr[0] = 0x04; */ g_core_fp.fp_check_sorting_mode(tmp_data); msleep(100); - I("mode_read_cmd(0)=0x%2.2X,mode_read_cmd(1)=0x%2.2X\n", tmp_data[0], tmp_data[1]); + D("mode_read_cmd(0)=0x%2.2X,mode_read_cmd(1)=0x%2.2X\n", + tmp_data[0], tmp_data[1]); if (tmp_data[0] == mode_read_cmd && tmp_data[1] == mode_read_cmd) { - I("Read OK!\n"); + D("Read OK!\n"); result = 0; break; } @@ -1262,7 +1264,7 @@ static bool himax_mcu_block_erase(int start_addr, int length) } } - I("%s:END\n", __func__); + D("%s:END\n", __func__); return true; } @@ -1626,7 +1628,7 @@ static void himax_mcu_get_DSRAM_data(uint8_t *info_data, bool DSRAM_Flag) #ifdef CORE_DRIVER static bool himax_mcu_detect_ic(void) { - I("%s: use default incell detect.\n", __func__); + D("%s: use default incell detect.\n", __func__); return 0; } @@ -1634,14 +1636,14 @@ static bool himax_mcu_detect_ic(void) static void himax_mcu_init_ic(void) { - I("%s: use default incell init.\n", __func__); + D("%s: use default incell init.\n", __func__); } #ifdef HX_RST_PIN_FUNC static void himax_mcu_pin_reset(void) { - I("%s: Now reset the Touch chip.\n", __func__); + D("%s: Now reset the Touch chip.\n", __func__); himax_rst_gpio_set(private_ts->rst_gpio, 0); msleep(20); himax_rst_gpio_set(private_ts->rst_gpio, 1); @@ -1653,7 +1655,8 @@ static void himax_mcu_ic_reset(uint8_t loadconfig, uint8_t int_off) struct himax_ts_data *ts = private_ts; HX_HW_RESET_ACTIVATE = 1; - I("%s,status: loadconfig=%d,int_off=%d\n", __func__, loadconfig, int_off); + D("%s, status: loadconfig=%d, int_off=%d\n", + __func__, loadconfig, int_off); if (ts->rst_gpio >= 0) { if (int_off) @@ -1745,9 +1748,11 @@ static void himax_mcu_touch_information(void) ic_data->HX_XY_REVERSE = FIX_HX_XY_REVERSE; ic_data->HX_INT_IS_EDGE = FIX_HX_INT_IS_EDGE; #endif - I("%s:HX_RX_NUM =%d,HX_TX_NUM =%d,HX_MAX_PT=%d\n", __func__, ic_data->HX_RX_NUM, ic_data->HX_TX_NUM, ic_data->HX_MAX_PT); - I("%s:HX_XY_REVERSE =%d,HX_Y_RES =%d,HX_X_RES=%d\n", __func__, ic_data->HX_XY_REVERSE, ic_data->HX_Y_RES, ic_data->HX_X_RES); - I("%s:HX_INT_IS_EDGE =%d\n", __func__, ic_data->HX_INT_IS_EDGE); + D("%s:HX_RX_NUM =%d,HX_TX_NUM =%d,HX_MAX_PT=%d\n", __func__, + ic_data->HX_RX_NUM, ic_data->HX_TX_NUM, ic_data->HX_MAX_PT); + D("%s:HX_XY_REVERSE =%d,HX_Y_RES =%d,HX_X_RES=%d\n", __func__, + ic_data->HX_XY_REVERSE, ic_data->HX_Y_RES, ic_data->HX_X_RES); + D("%s:HX_INT_IS_EDGE =%d\n", __func__, ic_data->HX_INT_IS_EDGE); } static void himax_mcu_reload_config(void) @@ -2464,7 +2469,7 @@ static void himax_mcu_fp_init(void) void himax_mcu_in_cmd_struct_init(void) { - I("%s: Entering!\n", __func__); + D("%s: Entering!\n", __func__); g_core_cmd_op = kzalloc(sizeof(struct himax_core_command_operation), GFP_KERNEL); if (!g_core_cmd_op) return; @@ -2581,7 +2586,7 @@ void himax_in_parse_assign_cmd(uint32_t addr, uint8_t *cmd, int len) void himax_mcu_in_cmd_init(void) { - I("%s: Entering!\n", __func__); + D("%s: Entering!\n", __func__); #ifdef CORE_IC himax_in_parse_assign_cmd(ic_adr_ahb_addr_byte_0, pic_op->addr_ahb_addr_byte_0, sizeof(pic_op->addr_ahb_addr_byte_0)); himax_in_parse_assign_cmd(ic_adr_ahb_rdata_byte_0, pic_op->addr_ahb_rdata_byte_0, sizeof(pic_op->addr_ahb_rdata_byte_0)); diff --git a/drivers/input/touchscreen/hxchipset/himax_platform.c b/drivers/input/touchscreen/hxchipset/himax_platform.c index 8672e95d240d013b20d6b90be973cd57616f4683..a32b7ccfd26b7d67cb3ea41d263c786e4df20e41 100644 --- a/drivers/input/touchscreen/hxchipset/himax_platform.c +++ b/drivers/input/touchscreen/hxchipset/himax_platform.c @@ -58,7 +58,7 @@ void himax_vk_parser(struct device_node *dt, node = of_parse_phandle(dt, "virtualkey", 0); if (node == NULL) { - I(" DT-No vk info in DT"); + D(" DT-No vk info in DT"); return; } @@ -115,8 +115,9 @@ int himax_parse_dt(struct himax_ts_data *ts, struct himax_i2c_platform_data *pda if (of_property_read_u32_array(dt, "himax,panel-coords", coords, coords_size) == 0) { pdata->abs_x_min = coords[0], pdata->abs_x_max = coords[1]; pdata->abs_y_min = coords[2], pdata->abs_y_max = coords[3]; - I(" DT-%s:panel-coords = %d, %d, %d, %d\n", __func__, pdata->abs_x_min, - pdata->abs_x_max, pdata->abs_y_min, pdata->abs_y_max); + D(" DT-%s:panel-coords = %d, %d, %d, %d\n", __func__, + pdata->abs_x_min, pdata->abs_x_max, + pdata->abs_y_min, pdata->abs_y_max); } prop = of_find_property(dt, "himax,display-coords", NULL); @@ -137,24 +138,25 @@ int himax_parse_dt(struct himax_ts_data *ts, struct himax_i2c_platform_data *pda pdata->screenWidth = coords[1]; pdata->screenHeight = coords[3]; - I(" DT-%s:display-coords = (%d, %d)", __func__, pdata->screenWidth, - pdata->screenHeight); + D(" DT-%s:display-coords = (%d, %d)", __func__, pdata->screenWidth, + pdata->screenHeight); pdata->gpio_irq = of_get_named_gpio(dt, "himax,irq-gpio", 0); if (!gpio_is_valid(pdata->gpio_irq)) - I(" DT:gpio_irq value is not valid\n"); + E(" DT:gpio_irq value is not valid\n"); pdata->gpio_reset = of_get_named_gpio(dt, "himax,rst-gpio", 0); if (!gpio_is_valid(pdata->gpio_reset)) - I(" DT:gpio_rst value is not valid\n"); + E(" DT:gpio_rst value is not valid\n"); pdata->gpio_3v3_en = of_get_named_gpio(dt, "himax,3v3-gpio", 0); if (!gpio_is_valid(pdata->gpio_3v3_en)) - I(" DT:gpio_3v3_en value is not valid\n"); + D(" DT:gpio_3v3_en value is not valid\n"); - I(" DT:gpio_irq=%d, gpio_rst=%d, gpio_3v3_en=%d", pdata->gpio_irq, pdata->gpio_reset, pdata->gpio_3v3_en); + D(" DT:gpio_irq=%d, gpio_rst=%d, gpio_3v3_en=%d", + pdata->gpio_irq, pdata->gpio_reset, pdata->gpio_3v3_en); if (of_property_read_u32(dt, "himax,report_type", &data) == 0) { pdata->protocol_type = data; @@ -188,7 +190,7 @@ int himax_bus_read(uint8_t command, uint8_t *data, uint32_t length, uint8_t toRe }; if (length > HX_REPORT_SZ * 2) { - E("%s: data length too large %d!\n", __func__, length); + I("%s: data length too large %d\n", __func__, length); buf = kmalloc(length, GFP_KERNEL); if (!buf) { E("%s: failed realloc buf %d\n", __func__, length); @@ -240,7 +242,7 @@ int himax_bus_write(uint8_t command, uint8_t *data, uint32_t length, uint8_t toR }; if (length + 1 > HX_REPORT_SZ * 2) { - E("%s: data length too large %d!\n", __func__, length + 1); + I("%s: data length too large %d\n", __func__, length + 1); buf = kmalloc(length + 1, GFP_KERNEL); if (!buf) { E("%s: failed realloc buf %d\n", __func__, length + 1); @@ -297,7 +299,7 @@ int himax_bus_master_write(uint8_t *data, uint32_t length, uint8_t toRetry) }; if (length > HX_REPORT_SZ * 2) { - E("%s: data length too large %d!\n", __func__, length); + I("%s: data length too large %d\n", __func__, length); buf = kmalloc(length, GFP_KERNEL); if (!buf) { E("%s: failed realloc buf %d\n", __func__, length); @@ -347,7 +349,7 @@ void himax_int_enable(int enable) private_ts->irq_enabled = 0; } - I("irq_enable_count = %d", irq_enable_count); + D("irq_enable_count = %d", irq_enable_count); } #ifdef HX_RST_PIN_FUNC @@ -613,10 +615,10 @@ int himax_int_register_trigger(void) struct i2c_client *client = private_ts->client; if (ic_data->HX_INT_IS_EDGE) { - I("%s edge triiger falling\n ", __func__); + D("%s edge triiger falling\n ", __func__); ret = request_threaded_irq(client->irq, NULL, himax_ts_thread, IRQF_TRIGGER_FALLING | IRQF_ONESHOT, client->name, ts); } else { - I("%s level trigger low\n ", __func__); + D("%s level trigger low\n ", __func__); ret = request_threaded_irq(client->irq, NULL, himax_ts_thread, IRQF_TRIGGER_LOW | IRQF_ONESHOT, client->name, ts); } @@ -647,7 +649,8 @@ int himax_ts_register_interrupt(void) if (ret == 0) { ts->irq_enabled = 1; irq_enable_count = 1; - I("%s: irq enabled at qpio: %d\n", __func__, client->irq); + D("%s: irq enabled at qpio: %d\n", + __func__, client->irq); #ifdef HX_SMART_WAKEUP irq_set_irq_wake(client->irq, 1); #endif @@ -675,7 +678,9 @@ static int himax_common_suspend(struct device *dev) { struct himax_ts_data *ts = dev_get_drvdata(dev); - I("%s: enter\n", __func__); + D("%s: enter\n", __func__); + if (!ts->initialized) + return -ECANCELED; himax_chip_common_suspend(ts); return 0; } @@ -684,7 +689,16 @@ static int himax_common_resume(struct device *dev) { struct himax_ts_data *ts = dev_get_drvdata(dev); - I("%s: enter\n", __func__); + D("%s: enter\n", __func__); + if (!ts->initialized) { + /* + * wait until device resume for TDDI + * TDDI: Touch and display Driver IC + */ + if (himax_chip_common_init()) + return -ECANCELED; + ts->initialized = true; + } himax_chip_common_resume(ts); return 0; } @@ -698,26 +712,23 @@ int drm_notifier_callback(struct notifier_block *self, struct msm_drm_notifier *evdata = data; int *blank; struct himax_ts_data *ts = - container_of(self, struct himax_ts_data, fb_notif); + container_of(self, struct himax_ts_data, fb_notif); if (!evdata || (evdata->id != 0)) return 0; - I("DRM %s\n", __func__); + D("DRM %s\n", __func__); if (evdata->data && event == MSM_DRM_EVENT_BLANK && ts && ts->client) { blank = evdata->data; switch (*blank) { case MSM_DRM_BLANK_UNBLANK: - if (!ts->initialized) { - if (himax_chip_common_init()) - return -ECANCELED; - ts->initialized = true; - } himax_common_resume(&ts->client->dev); break; case MSM_DRM_BLANK_POWERDOWN: + if (!ts->initialized) + return -ECANCELED; himax_common_suspend(&ts->client->dev); break; } @@ -734,24 +745,16 @@ int fb_notifier_callback(struct notifier_block *self, struct fb_event *evdata = data; int *blank; struct himax_ts_data *ts = - container_of(self, struct himax_ts_data, fb_notif); - - if (!evdata || (evdata->id != 0)) - return 0; + container_of(self, struct himax_ts_data, fb_notif); - I("FB %s\n", __func__); + D("FB %s\n", __func__); if (evdata && evdata->data && event == FB_EVENT_BLANK && ts && - ts->client) { + ts->client) { blank = evdata->data; switch (*blank) { case FB_BLANK_UNBLANK: - if (!ts->initialized) { - if (himax_chip_common_init()) - return 0; - ts->initialized = true; - } himax_common_resume(&ts->client->dev); break; case FB_BLANK_POWERDOWN: @@ -772,7 +775,7 @@ int himax_chip_common_probe(struct i2c_client *client, const struct i2c_device_i int ret = 0; struct himax_ts_data *ts; - I("%s:Enter\n", __func__); + D("%s:Enter\n", __func__); /* Check I2C functionality */ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) { @@ -867,7 +870,7 @@ static struct i2c_driver himax_common_driver = { static int __init himax_common_init(void) { - I("Himax common touch panel driver init\n"); + D("Himax common touch panel driver init\n"); i2c_add_driver(&himax_common_driver); return 0; diff --git a/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_core.c b/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_core.c index 1f45adddec76b28387f1aaf17820c51455d7b8e7..1ddd0d40f71f9e00d428f1d4d1141ce38714b6a9 100644 --- a/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_core.c +++ b/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_core.c @@ -147,6 +147,8 @@ static int synaptics_rmi4_suspend(struct device *dev); static int synaptics_rmi4_resume(struct device *dev); +static int synaptics_rmi4_defer_probe(struct platform_device *pdev); + static ssize_t synaptics_rmi4_f01_reset_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count); @@ -4226,7 +4228,6 @@ EXPORT_SYMBOL(synaptics_rmi4_new_function); static int synaptics_rmi4_probe(struct platform_device *pdev) { int retval; - unsigned char attr_count; struct synaptics_rmi4_data *rmi4_data; const struct synaptics_dsx_hw_interface *hw_if; const struct synaptics_dsx_board_data *bdata; @@ -4277,6 +4278,31 @@ static int synaptics_rmi4_probe(struct platform_device *pdev) vir_button_map = bdata->vir_button_map; + rmi4_data->initialized = false; +#ifdef CONFIG_FB + rmi4_data->fb_notifier.notifier_call = + synaptics_rmi4_dsi_panel_notifier_cb; + retval = msm_drm_register_client(&rmi4_data->fb_notifier); + if (retval < 0) { + dev_err(&pdev->dev, + "%s: Failed to register fb notifier client\n", + __func__); + } +#endif + return retval; +} + +static int synaptics_rmi4_defer_probe(struct platform_device *pdev) +{ + int retval; + struct synaptics_rmi4_data *rmi4_data; + const struct synaptics_dsx_hw_interface *hw_if; + const struct synaptics_dsx_board_data *bdata; + unsigned char attr_count; + + rmi4_data = platform_get_drvdata(pdev); + hw_if = rmi4_data->hw_if; + bdata = hw_if->board_data; retval = synaptics_rmi4_get_reg(rmi4_data, true); if (retval < 0) { dev_err(&pdev->dev, @@ -4335,18 +4361,6 @@ static int synaptics_rmi4_probe(struct platform_device *pdev) goto err_set_input_dev; } -#ifdef CONFIG_FB - rmi4_data->fb_notifier.notifier_call = synaptics_rmi4_dsi_panel_notifier_cb; - retval = msm_drm_register_client(&rmi4_data->fb_notifier); - if (retval < 0) { - - - dev_err(&pdev->dev, - "%s: Failed to register fb notifier client\n", - __func__); - } -#endif - #ifdef USE_EARLYSUSPEND rmi4_data->early_suspend.level = EARLY_SUSPEND_LEVEL_BLANK_SCREEN + 1; rmi4_data->early_suspend.suspend = synaptics_rmi4_early_suspend; @@ -4591,9 +4605,17 @@ static int synaptics_rmi4_dsi_panel_notifier_cb(struct notifier_block *self, if (event == MSM_DRM_EVENT_BLANK) { transition = *(int *)evdata->data; if (transition == MSM_DRM_BLANK_POWERDOWN) { + if (!rmi4_data->initialized) + return -ECANCELED; synaptics_rmi4_suspend(&rmi4_data->pdev->dev); rmi4_data->fb_ready = false; } else if (transition == MSM_DRM_BLANK_UNBLANK) { + if (!rmi4_data->initialized) { + if (synaptics_rmi4_defer_probe( + rmi4_data->pdev)) + return -ECANCELED; + rmi4_data->initialized = true; + } synaptics_rmi4_resume(&rmi4_data->pdev->dev); rmi4_data->fb_ready = true; } diff --git a/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_core.h b/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_core.h index 34170708a96b61ef3a5b9fb7d2710d26d6f6638a..423db092f9767e68c76844d8b2ed3e110355f97a 100644 --- a/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_core.h +++ b/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_core.h @@ -353,6 +353,7 @@ struct synaptics_rmi4_device_info { * @report_touch: pointer to touch reporting function */ struct synaptics_rmi4_data { + bool initialized; struct platform_device *pdev; struct input_dev *input_dev; struct input_dev *stylus_dev; diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c index e8f09add1c354e9fe0419e2151be3e47a0c49ccd..ea26ce6a66845bd5afd0c78db008a09f948da191 100644 --- a/drivers/iommu/arm-smmu.c +++ b/drivers/iommu/arm-smmu.c @@ -255,6 +255,7 @@ struct arm_smmu_device { #define ARM_SMMU_OPT_NO_ASID_RETENTION (1 << 5) #define ARM_SMMU_OPT_STATIC_CB (1 << 6) #define ARM_SMMU_OPT_DISABLE_ATOS (1 << 7) +#define ARM_SMMU_OPT_MIN_IOVA_ALIGN (1 << 8) u32 options; enum arm_smmu_arch_version version; enum arm_smmu_implementation model; @@ -394,6 +395,7 @@ static struct arm_smmu_option_prop arm_smmu_options[] = { { ARM_SMMU_OPT_NO_ASID_RETENTION, "qcom,no-asid-retention" }, { ARM_SMMU_OPT_STATIC_CB, "qcom,enable-static-cb"}, { ARM_SMMU_OPT_DISABLE_ATOS, "qcom,disable-atos" }, + { ARM_SMMU_OPT_MIN_IOVA_ALIGN, "qcom,min-iova-align" }, { 0, NULL}, }; @@ -976,7 +978,7 @@ static void arm_smmu_domain_power_off(struct iommu_domain *domain, } /* Wait for any pending TLB invalidations to complete */ -static void __arm_smmu_tlb_sync(struct arm_smmu_device *smmu, +static int __arm_smmu_tlb_sync(struct arm_smmu_device *smmu, void __iomem *sync, void __iomem *status) { unsigned int spin_cnt, delay; @@ -985,7 +987,7 @@ static void __arm_smmu_tlb_sync(struct arm_smmu_device *smmu, for (delay = 1; delay < TLB_LOOP_TIMEOUT; delay *= 2) { for (spin_cnt = TLB_SPIN_COUNT; spin_cnt > 0; spin_cnt--) { if (!(readl_relaxed(status) & sTLBGSTATUS_GSACTIVE)) - return; + return 0; cpu_relax(); } udelay(delay); @@ -993,6 +995,7 @@ static void __arm_smmu_tlb_sync(struct arm_smmu_device *smmu, trace_tlbsync_timeout(smmu->dev, 0); dev_err_ratelimited(smmu->dev, "TLB sync timed out -- SMMU may be deadlocked\n"); + return -EINVAL; } static void arm_smmu_tlb_sync_global(struct arm_smmu_device *smmu) @@ -1001,8 +1004,10 @@ static void arm_smmu_tlb_sync_global(struct arm_smmu_device *smmu) unsigned long flags; spin_lock_irqsave(&smmu->global_sync_lock, flags); - __arm_smmu_tlb_sync(smmu, base + ARM_SMMU_GR0_sTLBGSYNC, - base + ARM_SMMU_GR0_sTLBGSTATUS); + if (__arm_smmu_tlb_sync(smmu, base + ARM_SMMU_GR0_sTLBGSYNC, + base + ARM_SMMU_GR0_sTLBGSTATUS)) + dev_err_ratelimited(smmu->dev, + "TLB global sync failed!\n"); spin_unlock_irqrestore(&smmu->global_sync_lock, flags); } @@ -1014,8 +1019,12 @@ static void arm_smmu_tlb_sync_context(void *cookie) unsigned long flags; spin_lock_irqsave(&smmu_domain->sync_lock, flags); - __arm_smmu_tlb_sync(smmu, base + ARM_SMMU_CB_TLBSYNC, - base + ARM_SMMU_CB_TLBSTATUS); + if (__arm_smmu_tlb_sync(smmu, base + ARM_SMMU_CB_TLBSYNC, + base + ARM_SMMU_CB_TLBSTATUS)) + dev_err_ratelimited(smmu->dev, + "TLB sync on cb%d failed for device %s\n", + smmu_domain->cfg.cbndx, + dev_name(smmu_domain->dev)); spin_unlock_irqrestore(&smmu_domain->sync_lock, flags); } @@ -4972,6 +4981,12 @@ static int qsmmuv500_tbu_halt(struct qsmmuv500_tbu_device *tbu, u32 halt, fsr, sctlr_orig, sctlr, status; void __iomem *base, *cb_base; + if (of_property_read_bool(tbu->dev->of_node, + "qcom,opt-out-tbu-halting")) { + dev_notice(tbu->dev, "TBU opted-out for halting!\n"); + return -EBUSY; + } + spin_lock_irqsave(&tbu->halt_lock, flags); if (tbu->halt_count) { tbu->halt_count++; @@ -5331,8 +5346,9 @@ static void qsmmuv500_init_cb(struct arm_smmu_domain *smmu_domain, * Prefetch only works properly if the start and end of all * buffers in the page table are aligned to ARM_SMMU_MIN_IOVA_ALIGN. */ - if ((iommudata->actlr >> QSMMUV500_ACTLR_DEEP_PREFETCH_SHIFT) & - QSMMUV500_ACTLR_DEEP_PREFETCH_MASK) + if (((iommudata->actlr >> QSMMUV500_ACTLR_DEEP_PREFETCH_SHIFT) & + QSMMUV500_ACTLR_DEEP_PREFETCH_MASK) && + (smmu->options & ARM_SMMU_OPT_MIN_IOVA_ALIGN)) smmu_domain->qsmmuv500_errata1_min_iova_align = true; } diff --git a/drivers/irqchip/irq-ls-scfg-msi.c b/drivers/irqchip/irq-ls-scfg-msi.c index 119f4ef0d42175b01ea8247728e303b8697fc1b7..b7f943f96068d116b2ee86217013f6cdc44e3655 100644 --- a/drivers/irqchip/irq-ls-scfg-msi.c +++ b/drivers/irqchip/irq-ls-scfg-msi.c @@ -21,6 +21,7 @@ #include #include #include +#include #define MSI_IRQS_PER_MSIR 32 #define MSI_MSIR_OFFSET 4 @@ -94,6 +95,8 @@ static void ls_scfg_msi_compose_msg(struct irq_data *data, struct msi_msg *msg) if (msi_affinity_flag) msg->data |= cpumask_first(data->common->affinity); + + iommu_dma_map_msi_msg(data->irq, msg); } static int ls_scfg_msi_set_affinity(struct irq_data *irq_data, diff --git a/drivers/lightnvm/pblk-rb.c b/drivers/lightnvm/pblk-rb.c index 9bc32578a766e1581d315dc23337214d75fd38d3..c0dd17a821709d8959a06e2ca3674b4e733da806 100644 --- a/drivers/lightnvm/pblk-rb.c +++ b/drivers/lightnvm/pblk-rb.c @@ -142,10 +142,9 @@ static void clean_wctx(struct pblk_w_ctx *w_ctx) { int flags; -try: flags = READ_ONCE(w_ctx->flags); - if (!(flags & PBLK_SUBMITTED_ENTRY)) - goto try; + WARN_ONCE(!(flags & PBLK_SUBMITTED_ENTRY), + "pblk: overwriting unsubmitted data\n"); /* Release flags on context. Protect from writes and reads */ smp_store_release(&w_ctx->flags, PBLK_WRITABLE_ENTRY); diff --git a/drivers/mailbox/qcom-apcs-ipc-mailbox.c b/drivers/mailbox/qcom-apcs-ipc-mailbox.c index 47bc75cb0fdd7ecd00f575456ad9873add2d8f88..a297ad4c61a22515e57668b4c1d5d9819b422220 100644 --- a/drivers/mailbox/qcom-apcs-ipc-mailbox.c +++ b/drivers/mailbox/qcom-apcs-ipc-mailbox.c @@ -59,6 +59,8 @@ static int qcom_apcs_ipc_probe(struct platform_device *pdev) return -ENOMEM; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) + return -ENODEV; base = devm_ioremap(&pdev->dev, res->start, resource_size(res)); if (IS_ERR(base)) return PTR_ERR(base); diff --git a/drivers/mailbox/qcom-rpmh-mailbox.c b/drivers/mailbox/qcom-rpmh-mailbox.c index 94bd6f99395e069e8633430c72778da5f562206b..98d92f32f9666edf1c49558c91893acaf496b396 100644 --- a/drivers/mailbox/qcom-rpmh-mailbox.c +++ b/drivers/mailbox/qcom-rpmh-mailbox.c @@ -30,6 +30,7 @@ #include #include +#include #include #include @@ -489,6 +490,7 @@ static irqreturn_t tcs_irq_handler(int irq, void *p) } else { /* Clear the enable bit for the commands */ write_tcs_reg(base, RSC_DRV_CMD_ENABLE, m, 0, 0); + write_tcs_reg(base, RSC_DRV_CMD_WAIT_FOR_CMPL, m, 0, 0); } no_resp: @@ -807,6 +809,7 @@ static int tcs_mbox_write(struct mbox_chan *chan, struct tcs_mbox_msg *msg, static void __tcs_buffer_invalidate(void __iomem *base, int m) { write_tcs_reg(base, RSC_DRV_CMD_ENABLE, m, 0, 0); + write_tcs_reg(base, RSC_DRV_CMD_WAIT_FOR_CMPL, m, 0, 0); } static int tcs_mbox_invalidate(struct mbox_chan *chan) diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig index e4a0eced8950a4a42b60bccef5a1918ed29a6aaf..888c9020ca067a9334904ff73bb3c50170fb6879 100644 --- a/drivers/md/Kconfig +++ b/drivers/md/Kconfig @@ -286,6 +286,24 @@ config DM_CRYPT If unsure, say N. +config DM_DEFAULT_KEY + tristate "Default-key crypt target support" + depends on BLK_DEV_DM + depends on PFK + ---help--- + This (currently Android-specific) device-mapper target allows you to + create a device that assigns a default encryption key to bios that + don't already have one. This can sit between inline cryptographic + acceleration hardware and filesystems that use it. This ensures that + where the filesystem doesn't explicitly specify a key, such as for + filesystem metadata, a default key will be used instead, leaving no + sectors unencrypted. + + To compile this code as a module, choose M here: the module will be + called dm-default-key. + + If unsure, say N. + config DM_SNAPSHOT tristate "Snapshot target" depends on BLK_DEV_DM @@ -557,10 +575,11 @@ config DM_ZONED config DM_ANDROID_VERITY bool "Android verity target support" + depends on BLK_DEV_DM=y depends on DM_VERITY=y depends on X509_CERTIFICATE_PARSER depends on SYSTEM_TRUSTED_KEYRING - depends on PUBLIC_KEY_ALGO_RSA + depends on CRYPTO_RSA depends on KEYS depends on ASYMMETRIC_KEY_TYPE depends on ASYMMETRIC_PUBLIC_KEY_SUBTYPE @@ -576,8 +595,8 @@ config DM_ANDROID_VERITY config DM_ANDROID_VERITY_AT_MOST_ONCE_DEFAULT_ENABLED bool "Verity will validate blocks at most once" - depends on DM_VERITY - ---help--- + depends on DM_VERITY + ---help--- Default enables at_most_once option for dm-verity Verify data blocks only the first time they are read from the diff --git a/drivers/md/Makefile b/drivers/md/Makefile index 26c2f8f28ec0011e6e74b3851d4b7b347cd019f4..bfd027659aafe63ba22adaeede557c0e600ac873 100644 --- a/drivers/md/Makefile +++ b/drivers/md/Makefile @@ -43,6 +43,7 @@ obj-$(CONFIG_BLK_DEV_DM_BUILTIN) += dm-builtin.o obj-$(CONFIG_DM_BUFIO) += dm-bufio.o obj-$(CONFIG_DM_BIO_PRISON) += dm-bio-prison.o obj-$(CONFIG_DM_CRYPT) += dm-crypt.o +obj-$(CONFIG_DM_DEFAULT_KEY) += dm-default-key.o obj-$(CONFIG_DM_DELAY) += dm-delay.o obj-$(CONFIG_DM_FLAKEY) += dm-flakey.o obj-$(CONFIG_DM_MULTIPATH) += dm-multipath.o dm-round-robin.o diff --git a/drivers/md/dm-android-verity.c b/drivers/md/dm-android-verity.c index 0dd69244f77cce1cf38565fa1390b43ef20f4ef2..20e05936551fd593793c758589aeeabb0d303910 100644 --- a/drivers/md/dm-android-verity.c +++ b/drivers/md/dm-android-verity.c @@ -33,6 +33,7 @@ #include #include +#include #include #include #include @@ -122,75 +123,6 @@ static inline bool is_unlocked(void) return !strncmp(verifiedbootstate, unlocked, sizeof(unlocked)); } -static int table_extract_mpi_array(struct public_key_signature *pks, - const void *data, size_t len) -{ - MPI mpi = mpi_read_raw_data(data, len); - - if (!mpi) { - DMERR("Error while allocating mpi array"); - return -ENOMEM; - } - - pks->mpi[0] = mpi; - pks->nr_mpi = 1; - return 0; -} - -static struct public_key_signature *table_make_digest( - enum hash_algo hash, - const void *table, - unsigned long table_len) -{ - struct public_key_signature *pks = NULL; - struct crypto_shash *tfm; - struct shash_desc *desc; - size_t digest_size, desc_size; - int ret; - - /* Allocate the hashing algorithm we're going to need and find out how - * big the hash operational data will be. - */ - tfm = crypto_alloc_shash(hash_algo_name[hash], 0, 0); - if (IS_ERR(tfm)) - return ERR_CAST(tfm); - - desc_size = crypto_shash_descsize(tfm) + sizeof(*desc); - digest_size = crypto_shash_digestsize(tfm); - - /* We allocate the hash operational data storage on the end of out - * context data and the digest output buffer on the end of that. - */ - ret = -ENOMEM; - pks = kzalloc(digest_size + sizeof(*pks) + desc_size, GFP_KERNEL); - if (!pks) - goto error; - - pks->pkey_hash_algo = hash; - pks->digest = (u8 *)pks + sizeof(*pks) + desc_size; - pks->digest_size = digest_size; - - desc = (struct shash_desc *)(pks + 1); - desc->tfm = tfm; - desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP; - - ret = crypto_shash_init(desc); - if (ret < 0) - goto error; - - ret = crypto_shash_finup(desc, table, table_len, pks->digest); - if (ret < 0) - goto error; - - crypto_free_shash(tfm); - return pks; - -error: - kfree(pks); - crypto_free_shash(tfm); - return ERR_PTR(ret); -} - static int read_block_dev(struct bio_read *payload, struct block_device *bdev, sector_t offset, int length) { @@ -205,8 +137,9 @@ static int read_block_dev(struct bio_read *payload, struct block_device *bdev, return -ENOMEM; } - bio->bi_bdev = bdev; + bio_set_dev(bio, bdev); bio->bi_iter.bi_sector = offset; + bio_set_op_attrs(bio, REQ_OP_READ, 0); payload->page_io = kzalloc(sizeof(struct page *) * payload->number_of_pages, GFP_KERNEL); @@ -230,7 +163,7 @@ static int read_block_dev(struct bio_read *payload, struct block_device *bdev, } } - if (!submit_bio_wait(READ, bio)) + if (!submit_bio_wait(bio)) /* success */ goto free_bio; DMERR("bio read failed"); @@ -567,28 +500,85 @@ static int verity_mode(void) return DM_VERITY_MODE_EIO; } +static void handle_error(void) +{ + int mode = verity_mode(); + if (mode == DM_VERITY_MODE_RESTART) { + DMERR("triggering restart"); + kernel_restart("dm-verity device corrupted"); + } else { + DMERR("Mounting verity root failed"); + } +} + +static struct public_key_signature *table_make_digest( + enum hash_algo hash, + const void *table, + unsigned long table_len) +{ + struct public_key_signature *pks = NULL; + struct crypto_shash *tfm; + struct shash_desc *desc; + size_t digest_size, desc_size; + int ret; + + /* Allocate the hashing algorithm we're going to need and find out how + * big the hash operational data will be. + */ + tfm = crypto_alloc_shash(hash_algo_name[hash], 0, 0); + if (IS_ERR(tfm)) + return ERR_CAST(tfm); + + desc_size = crypto_shash_descsize(tfm) + sizeof(*desc); + digest_size = crypto_shash_digestsize(tfm); + + /* We allocate the hash operational data storage on the end of out + * context data and the digest output buffer on the end of that. + */ + ret = -ENOMEM; + pks = kzalloc(digest_size + sizeof(*pks) + desc_size, GFP_KERNEL); + if (!pks) + goto error; + + pks->pkey_algo = "rsa"; + pks->hash_algo = hash_algo_name[hash]; + pks->digest = (u8 *)pks + sizeof(*pks) + desc_size; + pks->digest_size = digest_size; + + desc = (struct shash_desc *)(pks + 1); + desc->tfm = tfm; + desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP; + + ret = crypto_shash_init(desc); + if (ret < 0) + goto error; + + ret = crypto_shash_finup(desc, table, table_len, pks->digest); + if (ret < 0) + goto error; + + crypto_free_shash(tfm); + return pks; + +error: + kfree(pks); + crypto_free_shash(tfm); + return ERR_PTR(ret); +} + + static int verify_verity_signature(char *key_id, struct android_metadata *metadata) { - key_ref_t key_ref; - struct key *key; struct public_key_signature *pks = NULL; int retval = -EINVAL; - key_ref = keyring_search(make_key_ref(system_trusted_keyring, 1), - &key_type_asymmetric, key_id); - - if (IS_ERR(key_ref)) { - DMERR("keyring: key not found"); - return -ENOKEY; - } - - key = key_ref_to_ptr(key_ref); + if (!key_id) + goto error; pks = table_make_digest(HASH_ALGO_SHA256, (const void *)metadata->verity_table, le32_to_cpu(metadata->header->table_length)); - if (IS_ERR(pks)) { DMERR("hashing failed"); retval = PTR_ERR(pks); @@ -596,33 +586,20 @@ static int verify_verity_signature(char *key_id, goto error; } - retval = table_extract_mpi_array(pks, &metadata->header->signature[0], - RSANUMBYTES); - if (retval < 0) { - DMERR("Error extracting mpi %d", retval); + pks->s = kmemdup(&metadata->header->signature[0], RSANUMBYTES, GFP_KERNEL); + if (!pks->s) { + DMERR("Error allocating memory for signature"); goto error; } + pks->s_size = RSANUMBYTES; - retval = verify_signature(key, pks); - mpi_free(pks->rsa.s); + retval = verify_signature_one(pks, NULL, key_id); + kfree(pks->s); error: kfree(pks); - key_put(key); - return retval; } -static void handle_error(void) -{ - int mode = verity_mode(); - if (mode == DM_VERITY_MODE_RESTART) { - DMERR("triggering restart"); - kernel_restart("dm-verity device corrupted"); - } else { - DMERR("Mounting verity root failed"); - } -} - static inline bool test_mult_overflow(sector_t a, u32 b) { sector_t r = (sector_t)~0ULL; @@ -696,8 +673,8 @@ static int android_verity_ctr(struct dm_target *ti, unsigned argc, char **argv) dev_t uninitialized_var(dev); struct android_metadata *metadata = NULL; int err = 0, i, mode; - char *key_id, *table_ptr, dummy, *target_device, - *verity_table_args[VERITY_TABLE_ARGS + 2 + VERITY_TABLE_OPT_FEC_ARGS]; + char *key_id = NULL, *table_ptr, dummy, *target_device; + char *verity_table_args[VERITY_TABLE_ARGS + 2 + VERITY_TABLE_OPT_FEC_ARGS]; /* One for specifying number of opt args and one for mode */ sector_t data_sectors; u32 data_block_size; @@ -716,16 +693,16 @@ static int android_verity_ctr(struct dm_target *ti, unsigned argc, char **argv) handle_error(); return -EINVAL; } - } else if (argc == 2) - key_id = argv[1]; - else { + target_device = argv[0]; + } else if (argc == 2) { + key_id = argv[0]; + target_device = argv[1]; + } else { DMERR("Incorrect number of arguments"); handle_error(); return -EINVAL; } - target_device = argv[0]; - dev = name_to_dev_t(target_device); if (!dev) { DMERR("no dev found for %s", target_device); @@ -879,12 +856,11 @@ static int android_verity_ctr(struct dm_target *ti, unsigned argc, char **argv) } err = verity_ctr(ti, no_of_args, verity_table_args); - - if (err) - DMERR("android-verity failed to mount as verity target"); - else { + if (err) { + DMERR("android-verity failed to create a verity target"); + } else { target_added = true; - DMINFO("android-verity mounted as verity target"); + DMINFO("android-verity created as verity target"); } free_metadata: diff --git a/drivers/md/dm-android-verity.h b/drivers/md/dm-android-verity.h index ed67d567f3eeb8c2757a4230c563931e3a290fab..ef406c136fcd1eccd877a51fa78cf2dd6fb71ed5 100644 --- a/drivers/md/dm-android-verity.h +++ b/drivers/md/dm-android-verity.h @@ -120,8 +120,9 @@ extern int dm_linear_prepare_ioctl(struct dm_target *ti, extern int dm_linear_iterate_devices(struct dm_target *ti, iterate_devices_callout_fn fn, void *data); extern int dm_linear_ctr(struct dm_target *ti, unsigned int argc, char **argv); -extern long dm_linear_dax_direct_access(struct dm_target *ti, sector_t sector, - void **kaddr, pfn_t *pfn, long size); +extern long dm_linear_dax_direct_access(struct dm_target *ti, pgoff_t pgoff, + long nr_pages, void **kaddr, + pfn_t *pfn); extern size_t dm_linear_dax_copy_from_iter(struct dm_target *ti, pgoff_t pgoff, void *addr, size_t bytes, struct iov_iter *i); #endif /* DM_ANDROID_VERITY_H */ diff --git a/drivers/md/dm-default-key.c b/drivers/md/dm-default-key.c new file mode 100644 index 0000000000000000000000000000000000000000..23e91d8af0bd90006919327199457860ba7d0a7f --- /dev/null +++ b/drivers/md/dm-default-key.c @@ -0,0 +1,224 @@ +/* + * Copyright (C) 2017 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include + +#define DM_MSG_PREFIX "default-key" + +struct default_key_c { + struct dm_dev *dev; + sector_t start; + struct blk_encryption_key key; +}; + +static void default_key_dtr(struct dm_target *ti) +{ + struct default_key_c *dkc = ti->private; + + if (dkc->dev) + dm_put_device(ti, dkc->dev); + kzfree(dkc); +} + +/* + * Construct a default-key mapping: + */ +static int default_key_ctr(struct dm_target *ti, unsigned int argc, char **argv) +{ + struct default_key_c *dkc; + size_t key_size; + unsigned long long tmp; + char dummy; + int err; + + if (argc != 4) { + ti->error = "Invalid argument count"; + return -EINVAL; + } + + dkc = kzalloc(sizeof(*dkc), GFP_KERNEL); + if (!dkc) { + ti->error = "Out of memory"; + return -ENOMEM; + } + ti->private = dkc; + + if (strcmp(argv[0], "AES-256-XTS") != 0) { + ti->error = "Unsupported encryption mode"; + err = -EINVAL; + goto bad; + } + + key_size = strlen(argv[1]); + if (key_size != 2 * BLK_ENCRYPTION_KEY_SIZE_AES_256_XTS) { + ti->error = "Unsupported key size"; + err = -EINVAL; + goto bad; + } + key_size /= 2; + + if (hex2bin(dkc->key.raw, argv[1], key_size) != 0) { + ti->error = "Malformed key string"; + err = -EINVAL; + goto bad; + } + + err = dm_get_device(ti, argv[2], dm_table_get_mode(ti->table), + &dkc->dev); + if (err) { + ti->error = "Device lookup failed"; + goto bad; + } + + if (sscanf(argv[3], "%llu%c", &tmp, &dummy) != 1) { + ti->error = "Invalid start sector"; + err = -EINVAL; + goto bad; + } + dkc->start = tmp; + + if (!blk_queue_inlinecrypt(bdev_get_queue(dkc->dev->bdev))) { + ti->error = "Device does not support inline encryption"; + err = -EINVAL; + goto bad; + } + + /* Pass flush requests through to the underlying device. */ + ti->num_flush_bios = 1; + + /* + * We pass discard requests through to the underlying device, although + * the discarded blocks will be zeroed, which leaks information about + * unused blocks. It's also impossible for dm-default-key to know not + * to decrypt discarded blocks, so they will not be read back as zeroes + * and we must set discard_zeroes_data_unsupported. + */ + ti->num_discard_bios = 1; + + /* + * It's unclear whether WRITE_SAME would work with inline encryption; it + * would depend on whether the hardware duplicates the data before or + * after encryption. But since the internal storage in some devices + * (MSM8998-based) doesn't claim to support WRITE_SAME anyway, we don't + * currently have a way to test it. Leave it disabled it for now. + */ + /*ti->num_write_same_bios = 1;*/ + + return 0; + +bad: + default_key_dtr(ti); + return err; +} + +static int default_key_map(struct dm_target *ti, struct bio *bio) +{ + const struct default_key_c *dkc = ti->private; + + bio_set_dev(bio, dkc->dev->bdev); + if (bio_sectors(bio)) { + bio->bi_iter.bi_sector = dkc->start + + dm_target_offset(ti, bio->bi_iter.bi_sector); + } + + if (!bio->bi_crypt_key && !bio->bi_crypt_skip) + bio->bi_crypt_key = &dkc->key; + + return DM_MAPIO_REMAPPED; +} + +static void default_key_status(struct dm_target *ti, status_type_t type, + unsigned int status_flags, char *result, + unsigned int maxlen) +{ + const struct default_key_c *dkc = ti->private; + unsigned int sz = 0; + + switch (type) { + case STATUSTYPE_INFO: + result[0] = '\0'; + break; + + case STATUSTYPE_TABLE: + + /* encryption mode */ + DMEMIT("AES-256-XTS"); + + /* reserved for key; dm-crypt shows it, but we don't for now */ + DMEMIT(" -"); + + /* name of underlying device, and the start sector in it */ + DMEMIT(" %s %llu", dkc->dev->name, + (unsigned long long)dkc->start); + break; + } +} + +static int default_key_prepare_ioctl(struct dm_target *ti, + struct block_device **bdev, fmode_t *mode) +{ + struct default_key_c *dkc = ti->private; + struct dm_dev *dev = dkc->dev; + + *bdev = dev->bdev; + + /* + * Only pass ioctls through if the device sizes match exactly. + */ + if (dkc->start || + ti->len != i_size_read(dev->bdev->bd_inode) >> SECTOR_SHIFT) + return 1; + return 0; +} + +static int default_key_iterate_devices(struct dm_target *ti, + iterate_devices_callout_fn fn, + void *data) +{ + struct default_key_c *dkc = ti->private; + + return fn(ti, dkc->dev, dkc->start, ti->len, data); +} + +static struct target_type default_key_target = { + .name = "default-key", + .version = {1, 0, 0}, + .module = THIS_MODULE, + .ctr = default_key_ctr, + .dtr = default_key_dtr, + .map = default_key_map, + .status = default_key_status, + .prepare_ioctl = default_key_prepare_ioctl, + .iterate_devices = default_key_iterate_devices, +}; + +static int __init dm_default_key_init(void) +{ + return dm_register_target(&default_key_target); +} + +static void __exit dm_default_key_exit(void) +{ + dm_unregister_target(&default_key_target); +} + +module_init(dm_default_key_init); +module_exit(dm_default_key_exit); + +MODULE_AUTHOR("Paul Lawrence "); +MODULE_AUTHOR("Paul Crowley "); +MODULE_AUTHOR("Eric Biggers "); +MODULE_DESCRIPTION(DM_NAME " target for encrypting filesystem metadata"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index 23e8bde4c500a8e98a650471858642159962e016..52a695f065e1c988ef77ef62766afe6c2b934e3e 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c @@ -1686,6 +1686,16 @@ static int queue_supports_sg_merge(struct dm_target *ti, struct dm_dev *dev, return q && !test_bit(QUEUE_FLAG_NO_SG_MERGE, &q->queue_flags); } +static int queue_supports_inline_encryption(struct dm_target *ti, + struct dm_dev *dev, + sector_t start, sector_t len, + void *data) +{ + struct request_queue *q = bdev_get_queue(dev->bdev); + + return q && blk_queue_inlinecrypt(q); +} + static bool dm_table_all_devices_attribute(struct dm_table *t, iterate_devices_callout_fn func) { @@ -1836,6 +1846,11 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, else queue_flag_set_unlocked(QUEUE_FLAG_NO_SG_MERGE, q); + if (dm_table_all_devices_attribute(t, queue_supports_inline_encryption)) + queue_flag_set_unlocked(QUEUE_FLAG_INLINECRYPT, q); + else + queue_flag_clear_unlocked(QUEUE_FLAG_INLINECRYPT, q); + dm_table_verify_integrity(t); /* diff --git a/drivers/md/md.c b/drivers/md/md.c index 861e764ec169bef84ff8a2fe54a57409f8eed8b6..24f0f7c0d5becd3c2c37331212900480fa86c926 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -6498,6 +6498,9 @@ static int hot_remove_disk(struct mddev *mddev, dev_t dev) char b[BDEVNAME_SIZE]; struct md_rdev *rdev; + if (!mddev->pers) + return -ENODEV; + rdev = find_rdev(mddev, dev); if (!rdev) return -ENXIO; diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index 029ecba607272cdabc315e83ddf21780b356b26a..78d8307637045f3b797d7f6954732268a82b9c82 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -2462,6 +2462,8 @@ static void handle_read_error(struct r1conf *conf, struct r1bio *r1_bio) fix_read_error(conf, r1_bio->read_disk, r1_bio->sector, r1_bio->sectors); unfreeze_array(conf); + } else if (mddev->ro == 0 && test_bit(FailFast, &rdev->flags)) { + md_error(mddev, rdev); } else { r1_bio->bios[r1_bio->read_disk] = IO_BLOCKED; } diff --git a/drivers/media/common/siano/smsendian.c b/drivers/media/common/siano/smsendian.c index bfe831c10b1c46ccabf4efd41a49a8550b04ab70..b95a631f23f9ab3f34d7f323997afbfe9b30d542 100644 --- a/drivers/media/common/siano/smsendian.c +++ b/drivers/media/common/siano/smsendian.c @@ -35,7 +35,7 @@ void smsendian_handle_tx_message(void *buffer) switch (msg->x_msg_header.msg_type) { case MSG_SMS_DATA_DOWNLOAD_REQ: { - msg->msg_data[0] = le32_to_cpu(msg->msg_data[0]); + msg->msg_data[0] = le32_to_cpu((__force __le32)(msg->msg_data[0])); break; } @@ -44,7 +44,7 @@ void smsendian_handle_tx_message(void *buffer) sizeof(struct sms_msg_hdr))/4; for (i = 0; i < msg_words; i++) - msg->msg_data[i] = le32_to_cpu(msg->msg_data[i]); + msg->msg_data[i] = le32_to_cpu((__force __le32)msg->msg_data[i]); break; } @@ -64,7 +64,7 @@ void smsendian_handle_rx_message(void *buffer) { struct sms_version_res *ver = (struct sms_version_res *) msg; - ver->chip_model = le16_to_cpu(ver->chip_model); + ver->chip_model = le16_to_cpu((__force __le16)ver->chip_model); break; } @@ -81,7 +81,7 @@ void smsendian_handle_rx_message(void *buffer) sizeof(struct sms_msg_hdr))/4; for (i = 0; i < msg_words; i++) - msg->msg_data[i] = le32_to_cpu(msg->msg_data[i]); + msg->msg_data[i] = le32_to_cpu((__force __le32)msg->msg_data[i]); break; } @@ -95,9 +95,9 @@ void smsendian_handle_message_header(void *msg) #ifdef __BIG_ENDIAN struct sms_msg_hdr *phdr = (struct sms_msg_hdr *)msg; - phdr->msg_type = le16_to_cpu(phdr->msg_type); - phdr->msg_length = le16_to_cpu(phdr->msg_length); - phdr->msg_flags = le16_to_cpu(phdr->msg_flags); + phdr->msg_type = le16_to_cpu((__force __le16)phdr->msg_type); + phdr->msg_length = le16_to_cpu((__force __le16)phdr->msg_length); + phdr->msg_flags = le16_to_cpu((__force __le16)phdr->msg_flags); #endif /* __BIG_ENDIAN */ } EXPORT_SYMBOL_GPL(smsendian_handle_message_header); diff --git a/drivers/media/i2c/smiapp/smiapp-core.c b/drivers/media/i2c/smiapp/smiapp-core.c index 700f433261d01b7700756cde33c2ca8b05af3e59..e4d7f2febf00ce0f5fb24227bc0228d325d49955 100644 --- a/drivers/media/i2c/smiapp/smiapp-core.c +++ b/drivers/media/i2c/smiapp/smiapp-core.c @@ -1001,7 +1001,7 @@ static int smiapp_read_nvm(struct smiapp_sensor *sensor, if (rval) goto out; - for (i = 0; i < 1000; i++) { + for (i = 1000; i > 0; i--) { rval = smiapp_read( sensor, SMIAPP_REG_U8_DATA_TRANSFER_IF_1_STATUS, &s); @@ -1012,11 +1012,10 @@ static int smiapp_read_nvm(struct smiapp_sensor *sensor, if (s & SMIAPP_DATA_TRANSFER_IF_1_STATUS_RD_READY) break; - if (--i == 0) { - rval = -ETIMEDOUT; - goto out; - } - + } + if (!i) { + rval = -ETIMEDOUT; + goto out; } for (i = 0; i < SMIAPP_NVM_PAGE_SIZE; i++) { diff --git a/drivers/media/media-device.c b/drivers/media/media-device.c index 98de74862d472531ecea493e47a6241947f31418..62b2c5d9bdfb704a6cb40d61268d9b35a170536a 100644 --- a/drivers/media/media-device.c +++ b/drivers/media/media-device.c @@ -56,7 +56,7 @@ static int media_device_close(struct file *filp) static long media_device_get_info(struct media_device *dev, void *arg) { - struct media_device_info *info = (struct media_device_info *)arg; + struct media_device_info *info = arg; memset(info, 0, sizeof(*info)); @@ -96,7 +96,7 @@ static struct media_entity *find_entity(struct media_device *mdev, u32 id) static long media_device_enum_entities(struct media_device *mdev, void *arg) { - struct media_entity_desc *entd = (struct media_entity_desc *)arg; + struct media_entity_desc *entd = arg; struct media_entity *ent; ent = find_entity(mdev, entd->id); @@ -149,7 +149,7 @@ static void media_device_kpad_to_upad(const struct media_pad *kpad, static long media_device_enum_links(struct media_device *mdev, void *arg) { - struct media_links_enum *links = (struct media_links_enum *)arg; + struct media_links_enum *links = arg; struct media_entity *entity; entity = find_entity(mdev, links->entity); @@ -197,7 +197,7 @@ static long media_device_enum_links(struct media_device *mdev, void *arg) static long media_device_setup_link(struct media_device *mdev, void *arg) { - struct media_link_desc *linkd = (struct media_link_desc *)arg; + struct media_link_desc *linkd = arg; struct media_link *link = NULL; struct media_entity *source; struct media_entity *sink; @@ -225,7 +225,7 @@ static long media_device_setup_link(struct media_device *mdev, void *arg) static long media_device_get_topology(struct media_device *mdev, void *arg) { - struct media_v2_topology *topo = (struct media_v2_topology *)arg; + struct media_v2_topology *topo = arg; struct media_entity *entity; struct media_interface *intf; struct media_pad *pad; diff --git a/drivers/media/pci/saa7164/saa7164-fw.c b/drivers/media/pci/saa7164/saa7164-fw.c index ef4906406ebf9aa67f527c9ac81707a1a786c373..a50461861133f7aebfe73c6b18a7f0dc3a1b201a 100644 --- a/drivers/media/pci/saa7164/saa7164-fw.c +++ b/drivers/media/pci/saa7164/saa7164-fw.c @@ -426,7 +426,8 @@ int saa7164_downloadfirmware(struct saa7164_dev *dev) __func__, fw->size); if (fw->size != fwlength) { - printk(KERN_ERR "xc5000: firmware incorrect size\n"); + printk(KERN_ERR "saa7164: firmware incorrect size %zu != %u\n", + fw->size, fwlength); ret = -ENOMEM; goto out; } diff --git a/drivers/media/pci/tw686x/tw686x-video.c b/drivers/media/pci/tw686x/tw686x-video.c index c3fafa97b2d0c1dd30037ff77d72f0baeb1dc221..0ea8dd44026c34e6772b35c391aa03d15aefeffd 100644 --- a/drivers/media/pci/tw686x/tw686x-video.c +++ b/drivers/media/pci/tw686x/tw686x-video.c @@ -1228,7 +1228,8 @@ int tw686x_video_init(struct tw686x_dev *dev) vc->vidq.timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC; vc->vidq.min_buffers_needed = 2; vc->vidq.lock = &vc->vb_mutex; - vc->vidq.gfp_flags = GFP_DMA32; + vc->vidq.gfp_flags = dev->dma_mode != TW686X_DMA_MODE_MEMCPY ? + GFP_DMA32 : 0; vc->vidq.dev = &dev->pci_dev->dev; err = vb2_queue_init(&vc->vidq); diff --git a/drivers/media/platform/msm/camera/cam_core/cam_context.c b/drivers/media/platform/msm/camera/cam_core/cam_context.c index 06151bbd3440f3f15a302083d610c6096f0295f6..20d3ecf13d3e5d861a9eabe555333fcfd269cc06 100644 --- a/drivers/media/platform/msm/camera/cam_core/cam_context.c +++ b/drivers/media/platform/msm/camera/cam_core/cam_context.c @@ -163,7 +163,6 @@ int cam_context_handle_crm_apply_req(struct cam_context *ctx, return -EINVAL; } - mutex_lock(&ctx->ctx_mutex); if (ctx->state_machine[ctx->state].crm_ops.apply_req) { rc = ctx->state_machine[ctx->state].crm_ops.apply_req(ctx, apply); @@ -172,7 +171,6 @@ int cam_context_handle_crm_apply_req(struct cam_context *ctx, ctx->dev_hdl, ctx->state); rc = -EPROTO; } - mutex_unlock(&ctx->ctx_mutex); return rc; } diff --git a/drivers/media/platform/msm/camera/cam_core/cam_context_utils.c b/drivers/media/platform/msm/camera/cam_core/cam_context_utils.c index 89aad8cb58c3f80b76f9d74cf0b365d0cc856e1c..6c2383ed811005558a3de010a7afd631ca592746 100644 --- a/drivers/media/platform/msm/camera/cam_core/cam_context_utils.c +++ b/drivers/media/platform/msm/camera/cam_core/cam_context_utils.c @@ -275,7 +275,7 @@ int32_t cam_context_prepare_dev_to_hw(struct cam_context *ctx, uint64_t packet_addr; struct cam_packet *packet; size_t len = 0; - int32_t i = 0; + int32_t i = 0, j = 0; if (!ctx || !cmd) { CAM_ERR(CAM_CTXT, "Invalid input params %pK %pK", ctx, cmd); @@ -355,6 +355,15 @@ int32_t cam_context_prepare_dev_to_hw(struct cam_context *ctx, req->status = 1; req->req_priv = cfg.priv; + for (i = 0; i < req->num_out_map_entries; i++) { + rc = cam_sync_get_obj_ref(req->out_map_entries[i].sync_id); + if (rc) { + CAM_ERR(CAM_CTXT, "Can't get ref for sync %d", + req->out_map_entries[i].sync_id); + goto put_ref; + } + } + if (req->num_in_map_entries > 0) { spin_lock(&ctx->lock); list_add_tail(&req->list, &ctx->pending_req_list); @@ -365,17 +374,17 @@ int32_t cam_context_prepare_dev_to_hw(struct cam_context *ctx, "[%s][%d] : Moving req[%llu] from free_list to pending_list", ctx->dev_name, ctx->ctx_id, req->request_id); - for (i = 0; i < req->num_in_map_entries; i++) { + for (j = 0; j < req->num_in_map_entries; j++) { cam_context_getref(ctx); rc = cam_sync_register_callback( cam_context_sync_callback, (void *)req, - req->in_map_entries[i].sync_id); + req->in_map_entries[j].sync_id); if (rc) { CAM_ERR(CAM_CTXT, "[%s][%d] Failed register fence cb: %d ret = %d", ctx->dev_name, ctx->ctx_id, - req->in_map_entries[i].sync_id, rc); + req->in_map_entries[j].sync_id, rc); spin_lock(&ctx->lock); list_del_init(&req->list); spin_unlock(&ctx->lock); @@ -388,16 +397,23 @@ int32_t cam_context_prepare_dev_to_hw(struct cam_context *ctx, cam_context_putref(ctx); - goto free_req; + goto put_ref; } CAM_DBG(CAM_CTXT, "register in fence cb: %d ret = %d", - req->in_map_entries[i].sync_id, rc); + req->in_map_entries[j].sync_id, rc); } goto end; } return rc; +put_ref: + for (--i; i >= 0; i--) { + rc = cam_sync_put_obj_ref(req->out_map_entries[i].sync_id); + if (rc) + CAM_ERR(CAM_CTXT, "Failed to put ref of fence %d", + req->out_map_entries[i].sync_id); + } free_req: spin_lock(&ctx->lock); list_add_tail(&req->list, &ctx->free_req_list); diff --git a/drivers/media/platform/msm/camera/cam_core/cam_node.c b/drivers/media/platform/msm/camera/cam_core/cam_node.c index 235c754c30f425beca90009e08a9d8d4fc06adb9..f0dc8b500b490f03881d902c0907d906a48ca88f 100644 --- a/drivers/media/platform/msm/camera/cam_core/cam_node.c +++ b/drivers/media/platform/msm/camera/cam_core/cam_node.c @@ -286,20 +286,30 @@ static int __cam_node_handle_release_dev(struct cam_node *node, return -EINVAL; } - rc = cam_context_handle_release_dev(ctx, release); - if (rc) - CAM_ERR(CAM_CORE, "context release failed node %s", node->name); + if (ctx->state > CAM_CTX_UNINIT && ctx->state < CAM_CTX_STATE_MAX) { + rc = cam_context_handle_release_dev(ctx, release); + if (rc) + CAM_ERR(CAM_CORE, "context release failed for node %s", + node->name); + } else { + CAM_WARN(CAM_CORE, + "node %s context id %u state %d invalid to release hdl", + node->name, ctx->ctx_id, ctx->state); + goto destroy_dev_hdl; + } + cam_context_putref(ctx); + +destroy_dev_hdl: rc = cam_destroy_device_hdl(release->dev_handle); if (rc) - CAM_ERR(CAM_CORE, "destroy device handle is failed node %s", + CAM_ERR(CAM_CORE, "destroy device hdl failed for node %s", node->name); CAM_DBG(CAM_CORE, "[%s] Release ctx_id=%d, refcount=%d", node->name, ctx->ctx_id, atomic_read(&(ctx->refcount.refcount.refs))); - cam_context_putref(ctx); return rc; } diff --git a/drivers/media/platform/msm/camera/cam_cpas/cpas_top/cpastop_v150_100.h b/drivers/media/platform/msm/camera/cam_cpas/cpas_top/cpastop_v150_100.h index ceb3c02f1d15980e68d1d16dac63bb19d8e5f2a6..b4ef971fb253896d980df1e2f4dc990ab2518791 100644 --- a/drivers/media/platform/msm/camera/cam_cpas/cpas_top/cpastop_v150_100.h +++ b/drivers/media/platform/msm/camera/cam_cpas/cpas_top/cpastop_v150_100.h @@ -381,7 +381,7 @@ static struct cam_camnoc_specific .value = 0x0, }, .ubwc_ctl = { - .enable = true, + .enable = false, .access_type = CAM_REG_TYPE_READ_WRITE, .masked_value = 0, .offset = 0xd08, /* SPECIFIC_IBL_RD_DECCTL_LOW */ @@ -431,11 +431,11 @@ static struct cam_camnoc_specific .value = 0x0, }, .ubwc_ctl = { - .enable = true, + .enable = false, .access_type = CAM_REG_TYPE_READ_WRITE, .masked_value = 0, .offset = 0x1188, /* SPECIFIC_IBL_WR_ENCCTL_LOW */ - .value = 1, + .value = 0x5, }, }, { diff --git a/drivers/media/platform/msm/camera/cam_icp/icp_hw/a5_hw/a5_core.c b/drivers/media/platform/msm/camera/cam_icp/icp_hw/a5_hw/a5_core.c index 4b5f22eda7627096bc8cb25978a7a5fd41a3b728..8f2c76943ebce82e4f2c466e7b96fca1f47ad3c2 100644 --- a/drivers/media/platform/msm/camera/cam_icp/icp_hw/a5_hw/a5_core.c +++ b/drivers/media/platform/msm/camera/cam_icp/icp_hw/a5_hw/a5_core.c @@ -354,9 +354,11 @@ irqreturn_t cam_a5_irq(int irq_num, void *data) CAM_ERR_RATE_LIMIT(CAM_ICP, "watch dog interrupt from A5"); } + spin_lock(&a5_dev->hw_lock); if (core_info->irq_cb.icp_hw_mgr_cb) core_info->irq_cb.icp_hw_mgr_cb(irq_status, core_info->irq_cb.data); + spin_unlock(&a5_dev->hw_lock); return IRQ_HANDLED; } @@ -369,6 +371,7 @@ int cam_a5_process_cmd(void *device_priv, uint32_t cmd_type, struct cam_a5_device_core_info *core_info = NULL; struct cam_a5_device_hw_info *hw_info = NULL; struct a5_soc_info *a5_soc = NULL; + unsigned long flags; int rc = 0; if (!device_priv) { @@ -414,8 +417,10 @@ int cam_a5_process_cmd(void *device_priv, uint32_t cmd_type, return -EINVAL; } + spin_lock_irqsave(&a5_dev->hw_lock, flags); core_info->irq_cb.icp_hw_mgr_cb = irq_cb->icp_hw_mgr_cb; core_info->irq_cb.data = irq_cb->data; + spin_unlock_irqrestore(&a5_dev->hw_lock, flags); break; } diff --git a/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.c b/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.c index 6639d33f31d14f42f950b449a5f0c0cdc4b951a8..f4d63ca0244282706219938be97bde5f935cbed1 100644 --- a/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.c +++ b/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.c @@ -50,6 +50,7 @@ #include "cam_soc_util.h" #include "cam_trace.h" #include "cam_cpas_api.h" +#include "cam_common_util.h" #define ICP_WORKQ_TASK_CMD_TYPE 1 #define ICP_WORKQ_TASK_MSG_TYPE 2 @@ -2001,6 +2002,11 @@ int32_t cam_icp_hw_mgr_cb(uint32_t irq_status, void *data) struct crm_workq_task *task; struct hfi_msg_work_data *task_data; + if (!data) { + CAM_ERR(CAM_ICP, "irq cb data is NULL"); + return rc; + } + spin_lock_irqsave(&hw_mgr->hw_mgr_lock, flags); task = cam_req_mgr_workq_get_task(icp_hw_mgr.msg_work); if (!task) { @@ -3100,7 +3106,7 @@ static int cam_icp_mgr_send_config_io(struct cam_icp_hw_ctx_data *ctx_data, task_data->type = ICP_WORKQ_TASK_MSG_TYPE; task->process_cb = cam_icp_mgr_process_cmd; size_in_words = (*(uint32_t *)task_data->data) >> 2; - CAM_INFO(CAM_ICP, "size_in_words %u", size_in_words); + CAM_DBG(CAM_ICP, "size_in_words %u", size_in_words); rc = cam_req_mgr_workq_enqueue_task(task, &icp_hw_mgr, CRM_TASK_PRIORITY_0); if (rc) @@ -3389,6 +3395,11 @@ static int cam_icp_mgr_process_io_cfg(struct cam_icp_hw_mgr *hw_mgr, io_cfg_ptr[i].resource_type); } + if (prepare_args->num_in_map_entries > 1) + prepare_args->num_in_map_entries = + cam_common_util_remove_duplicate_arr( + sync_in_obj, prepare_args->num_in_map_entries); + if (prepare_args->num_in_map_entries > 1) { rc = cam_sync_merge(&sync_in_obj[0], prepare_args->num_in_map_entries, &merged_sync_in_obj); diff --git a/drivers/media/platform/msm/camera/cam_isp/cam_isp_context.c b/drivers/media/platform/msm/camera/cam_isp/cam_isp_context.c index 334928fd8bb1a60c6af06fdf73253ebbb5454e62..63d4bc7d7c22789579340734e5a7558b943f0758 100644 --- a/drivers/media/platform/msm/camera/cam_isp/cam_isp_context.c +++ b/drivers/media/platform/msm/camera/cam_isp/cam_isp_context.c @@ -687,24 +687,16 @@ static int __cam_isp_ctx_reg_upd_in_sof(struct cam_isp_context *ctx_isp, * This is for the first update. The initial setting will * cause the reg_upd in the first frame. */ - if (!list_empty(&ctx->pending_req_list)) { - req = list_first_entry(&ctx->pending_req_list, + if (!list_empty(&ctx->wait_req_list)) { + req = list_first_entry(&ctx->wait_req_list, struct cam_ctx_request, list); list_del_init(&req->list); req_isp = (struct cam_isp_ctx_req *) req->req_priv; - if (req_isp->num_fence_map_out == req_isp->num_acked) { + if (req_isp->num_fence_map_out == req_isp->num_acked) list_add_tail(&req->list, &ctx->free_req_list); - } else { - /* need to handle the buf done */ - list_add_tail(&req->list, &ctx->active_req_list); - ctx_isp->active_req_cnt++; - CAM_DBG(CAM_REQ, - "move request %lld to active list(cnt = %d)", - req->request_id, - ctx_isp->active_req_cnt); - ctx_isp->substate_activated = - CAM_ISP_CTX_ACTIVATED_EPOCH; - } + else + CAM_ERR(CAM_ISP, + "receive rup in unexpected state"); } if (req != NULL) { __cam_isp_ctx_update_state_monitor_array(ctx_isp, @@ -728,7 +720,7 @@ static int __cam_isp_ctx_epoch_in_applied(struct cam_isp_context *ctx_isp, * If no wait req in epoch, this is an error case. * The recovery is to go back to sof state */ - CAM_ERR(CAM_ISP, "No pending request"); + CAM_ERR(CAM_ISP, "No wait request"); ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_SOF; /* Send SOF event as empty frame*/ @@ -830,8 +822,10 @@ static int __cam_isp_ctx_sof_in_epoch(struct cam_isp_context *ctx_isp, req = list_last_entry(&ctx->active_req_list, struct cam_ctx_request, list); - __cam_isp_ctx_update_state_monitor_array(ctx_isp, - CAM_ISP_STATE_CHANGE_TRIGGER_SOF, ctx->req_list->request_id); + if (req) + __cam_isp_ctx_update_state_monitor_array(ctx_isp, + CAM_ISP_STATE_CHANGE_TRIGGER_SOF, + ctx->req_list->request_id); CAM_DBG(CAM_ISP, "next substate %d", ctx_isp->substate_activated); @@ -873,7 +867,7 @@ static int __cam_isp_ctx_epoch_in_bubble_applied( * transition to BUBBLE state again. */ - if (list_empty(&ctx->pending_req_list)) { + if (list_empty(&ctx->wait_req_list)) { /* * If no pending req in epoch, this is an error case. * Just go back to the bubble state. @@ -886,14 +880,16 @@ static int __cam_isp_ctx_epoch_in_bubble_applied( goto end; } - req = list_first_entry(&ctx->pending_req_list, struct cam_ctx_request, + req = list_first_entry(&ctx->wait_req_list, struct cam_ctx_request, list); req_isp = (struct cam_isp_ctx_req *)req->req_priv; + list_del_init(&req->list); if (req_isp->bubble_report && ctx->ctx_crm_intf && ctx->ctx_crm_intf->notify_err) { struct cam_req_mgr_error_notify notify; + list_add(&req->list, &ctx->pending_req_list); notify.link_hdl = ctx->link_hdl; notify.dev_hdl = ctx->dev_hdl; notify.req_id = req->request_id; @@ -907,7 +903,6 @@ static int __cam_isp_ctx_epoch_in_bubble_applied( * If we can not report bubble, then treat it as if no bubble * report. Just move the req to active list. */ - list_del_init(&req->list); list_add_tail(&req->list, &ctx->active_req_list); ctx_isp->active_req_cnt++; CAM_DBG(CAM_ISP, "move request %lld to active list(cnt = %d)", @@ -933,8 +928,9 @@ static int __cam_isp_ctx_epoch_in_bubble_applied( end: req = list_last_entry(&ctx->active_req_list, struct cam_ctx_request, list); - __cam_isp_ctx_update_state_monitor_array(ctx_isp, - CAM_ISP_STATE_CHANGE_TRIGGER_EPOCH, req->request_id); + if (req) + __cam_isp_ctx_update_state_monitor_array(ctx_isp, + CAM_ISP_STATE_CHANGE_TRIGGER_EPOCH, req->request_id); return 0; } @@ -1162,7 +1158,7 @@ static int __cam_isp_ctx_apply_req_in_activated_state( { int rc = 0; struct cam_ctx_request *req; - struct cam_ctx_request *active_req; + struct cam_ctx_request *active_req = NULL; struct cam_isp_ctx_req *req_isp; struct cam_isp_ctx_req *active_req_isp; struct cam_isp_context *ctx_isp = NULL; @@ -1182,8 +1178,10 @@ static int __cam_isp_ctx_apply_req_in_activated_state( * */ ctx_isp = (struct cam_isp_context *) ctx->ctx_priv; + spin_lock_bh(&ctx->lock); req = list_first_entry(&ctx->pending_req_list, struct cam_ctx_request, list); + spin_unlock_bh(&ctx->lock); /* * Check whehter the request id is matching the tip, if not, this means @@ -1206,19 +1204,25 @@ static int __cam_isp_ctx_apply_req_in_activated_state( "Reject apply request (id %lld) due to congestion(cnt = %d)", req->request_id, ctx_isp->active_req_cnt); - if (!list_empty(&ctx->active_req_list)) { + + spin_lock_bh(&ctx->lock); + if (!list_empty(&ctx->active_req_list)) active_req = list_first_entry(&ctx->active_req_list, struct cam_ctx_request, list); - active_req_isp = - (struct cam_isp_ctx_req *) active_req->req_priv; - __cam_isp_ctx_handle_buf_done_fail_log(active_req_isp); - } else { + else CAM_ERR_RATE_LIMIT(CAM_ISP, "WARNING: should not happen (cnt = %d) but active_list empty", ctx_isp->active_req_cnt); + spin_unlock_bh(&ctx->lock); + + if (active_req) { + active_req_isp = + (struct cam_isp_ctx_req *) active_req->req_priv; + __cam_isp_ctx_handle_buf_done_fail_log(active_req_isp); } - rc = -EFAULT; - goto end; + + rc = -EFAULT; + goto end; } req_isp->bubble_report = apply->report_if_bubble; @@ -1752,11 +1756,11 @@ static int __cam_isp_ctx_rdi_only_reg_upd_in_bubble_applied_state( ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_EPOCH; /* notify reqmgr with sof signal*/ if (ctx->ctx_crm_intf && ctx->ctx_crm_intf->notify_trigger) { - if (list_empty(&ctx->pending_req_list)) { - CAM_ERR(CAM_ISP, "Reg upd ack with no pending request"); + if (list_empty(&ctx->wait_req_list)) { + CAM_ERR(CAM_ISP, "Reg upd ack with no waiting request"); goto error; } - req = list_first_entry(&ctx->pending_req_list, + req = list_first_entry(&ctx->wait_req_list, struct cam_ctx_request, list); list_del_init(&req->list); @@ -2000,7 +2004,7 @@ static int __cam_isp_ctx_release_dev_in_top_state(struct cam_context *ctx, static int __cam_isp_ctx_config_dev_in_top_state( struct cam_context *ctx, struct cam_config_dev_cmd *cmd) { - int rc = 0; + int rc = 0, i; struct cam_ctx_request *req = NULL; struct cam_isp_ctx_req *req_isp; uint64_t packet_addr; @@ -2076,6 +2080,15 @@ static int __cam_isp_ctx_config_dev_in_top_state( req_isp->num_fence_map_in = cfg.num_in_map_entries; req_isp->num_acked = 0; + for (i = 0; i < req_isp->num_fence_map_out; i++) { + rc = cam_sync_get_obj_ref(req_isp->fence_map_out[i].sync_id); + if (rc) { + CAM_ERR(CAM_ISP, "Can't get ref for fence %d", + req_isp->fence_map_out[i].sync_id); + goto put_ref; + } + } + CAM_DBG(CAM_ISP, "num_entry: %d, num fence out: %d, num fence in: %d", req_isp->num_cfg, req_isp->num_fence_map_out, req_isp->num_fence_map_in); @@ -2117,7 +2130,7 @@ static int __cam_isp_ctx_config_dev_in_top_state( } } if (rc) - goto free_req; + goto put_ref; CAM_DBG(CAM_REQ, "Preprocessing Config req_id %lld successful on ctx %u", @@ -2125,6 +2138,13 @@ static int __cam_isp_ctx_config_dev_in_top_state( return rc; +put_ref: + for (--i; i >= 0; i--) { + rc = cam_sync_put_obj_ref(req_isp->fence_map_out[i].sync_id); + if (rc) + CAM_ERR(CAM_CTXT, "Failed to put ref of fence %d", + req_isp->fence_map_out[i].sync_id); + } free_req: spin_lock_bh(&ctx->lock); list_add_tail(&req->list, &ctx->free_req_list); @@ -2401,9 +2421,13 @@ static int __cam_isp_ctx_start_dev_in_ready(struct cam_context *ctx, } CAM_DBG(CAM_ISP, "start device success ctx %u", ctx->ctx_id); + list_del_init(&req->list); + if (req_isp->num_fence_map_out) { - list_del_init(&req->list); list_add_tail(&req->list, &ctx->active_req_list); + ctx_isp->active_req_cnt++; + } else { + list_add_tail(&req->list, &ctx->wait_req_list); } end: return rc; diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_core.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_core.c index 548e849dd45c8e7aca5b8def4c1dfe2e6eb222b7..c5e1c52b907fb7cb76e9c94e286bde2bdc1d3e9f 100644 --- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_core.c +++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_core.c @@ -42,6 +42,9 @@ #define CAM_IFE_CSID_QTIMER_MUL_FACTOR 10000 #define CAM_IFE_CSID_QTIMER_DIV_FACTOR 192 +/* Max number of sof irq's triggered in case of SOF freeze */ +#define CAM_CSID_IRQ_SOF_DEBUG_CNT_MAX 12 + static int cam_ife_csid_is_ipp_ppp_format_supported( uint32_t in_format) { @@ -1410,15 +1413,43 @@ static int cam_ife_csid_disable_csi2( if (csid_hw->csi2_cfg_cnt) return 0; - /*Disable the CSI2 rx inerrupts */ + /* Disable the CSI2 rx inerrupts */ cam_io_w_mb(0, soc_info->reg_map[0].mem_base + csid_reg->csi2_reg->csid_csi2_rx_irq_mask_addr); + /* Reset the Rx CFG registers */ + cam_io_w_mb(0, soc_info->reg_map[0].mem_base + + csid_reg->csi2_reg->csid_csi2_rx_cfg0_addr); + cam_io_w_mb(0, soc_info->reg_map[0].mem_base + + csid_reg->csi2_reg->csid_csi2_rx_cfg1_addr); + res->res_state = CAM_ISP_RESOURCE_STATE_RESERVED; return 0; } +static void cam_ife_csid_halt_csi2( + struct cam_ife_csid_hw *csid_hw) +{ + const struct cam_ife_csid_reg_offset *csid_reg; + struct cam_hw_soc_info *soc_info; + + csid_reg = csid_hw->csid_info->csid_reg; + soc_info = &csid_hw->hw_info->soc_info; + CAM_INFO(CAM_ISP, "CSID: %d cnt: %d Halt csi2 rx", + csid_hw->hw_intf->hw_idx, csid_hw->csi2_cfg_cnt); + + /* Disable the CSI2 rx inerrupts */ + cam_io_w_mb(0, soc_info->reg_map[0].mem_base + + csid_reg->csi2_reg->csid_csi2_rx_irq_mask_addr); + + /* Reset the Rx CFG registers */ + cam_io_w_mb(0, soc_info->reg_map[0].mem_base + + csid_reg->csi2_reg->csid_csi2_rx_cfg0_addr); + cam_io_w_mb(0, soc_info->reg_map[0].mem_base + + csid_reg->csi2_reg->csid_csi2_rx_cfg1_addr); +} + static int cam_ife_csid_init_config_pxl_path( struct cam_ife_csid_hw *csid_hw, struct cam_isp_resource_node *res) @@ -2501,6 +2532,10 @@ static int cam_ife_csid_start(void *hw_priv, void *start_args, goto end; } + /* Reset sof irq debug fields */ + csid_hw->sof_irq_triggered = false; + csid_hw->irq_debug_cnt = 0; + CAM_DBG(CAM_ISP, "CSID:%d res_type :%d res_id:%d", csid_hw->hw_intf->hw_idx, res->res_type, res->res_id); @@ -2653,10 +2688,13 @@ static int cam_ife_csid_sof_irq_debug( } } - if (sof_irq_enable) + if (sof_irq_enable) { csid_hw->csid_debug |= CSID_DEBUG_ENABLE_SOF_IRQ; - else + csid_hw->sof_irq_triggered = true; + } else { csid_hw->csid_debug &= ~CSID_DEBUG_ENABLE_SOF_IRQ; + csid_hw->sof_irq_triggered = false; + } CAM_INFO(CAM_ISP, "SOF freeze: CSID SOF irq %s", (sof_irq_enable == true) ? "enabled" : "disabled"); @@ -2709,6 +2747,8 @@ irqreturn_t cam_ife_csid_irq(int irq_num, void *data) uint32_t i, irq_status_top, irq_status_rx, irq_status_ipp = 0; uint32_t irq_status_rdi[4] = {0, 0, 0, 0}; uint32_t val, irq_status_ppp = 0; + bool fatal_err_detected = false; + uint32_t sof_irq_debug_en = 0; csid_hw = (struct cam_ife_csid_hw *)data; @@ -2777,22 +2817,27 @@ irqreturn_t cam_ife_csid_irq(int irq_num, void *data) if (irq_status_rx & CSID_CSI2_RX_ERROR_LANE0_FIFO_OVERFLOW) { CAM_ERR_RATE_LIMIT(CAM_ISP, "CSID:%d lane 0 over flow", csid_hw->hw_intf->hw_idx); + fatal_err_detected = true; } if (irq_status_rx & CSID_CSI2_RX_ERROR_LANE1_FIFO_OVERFLOW) { CAM_ERR_RATE_LIMIT(CAM_ISP, "CSID:%d lane 1 over flow", csid_hw->hw_intf->hw_idx); + fatal_err_detected = true; } if (irq_status_rx & CSID_CSI2_RX_ERROR_LANE2_FIFO_OVERFLOW) { CAM_ERR_RATE_LIMIT(CAM_ISP, "CSID:%d lane 2 over flow", csid_hw->hw_intf->hw_idx); + fatal_err_detected = true; } if (irq_status_rx & CSID_CSI2_RX_ERROR_LANE3_FIFO_OVERFLOW) { CAM_ERR_RATE_LIMIT(CAM_ISP, "CSID:%d lane 3 over flow", csid_hw->hw_intf->hw_idx); + fatal_err_detected = true; } if (irq_status_rx & CSID_CSI2_RX_ERROR_TG_FIFO_OVERFLOW) { - CAM_ERR_RATE_LIMIT(CAM_ISP, "CSID:%d TG OVER FLOW", + CAM_ERR_RATE_LIMIT(CAM_ISP, "CSID:%d TG OVER FLOW", csid_hw->hw_intf->hw_idx); + fatal_err_detected = true; } if (irq_status_rx & CSID_CSI2_RX_ERROR_CPHY_EOT_RECEPTION) { CAM_ERR_RATE_LIMIT(CAM_ISP, "CSID:%d CPHY_EOT_RECEPTION", @@ -2827,24 +2872,27 @@ irqreturn_t cam_ife_csid_irq(int irq_num, void *data) csid_hw->hw_intf->hw_idx); } + if (fatal_err_detected) + cam_ife_csid_halt_csi2(csid_hw); + if (csid_hw->csid_debug & CSID_DEBUG_ENABLE_EOT_IRQ) { if (irq_status_rx & CSID_CSI2_RX_INFO_PHY_DL0_EOT_CAPTURED) { - CAM_ERR_RATE_LIMIT(CAM_ISP, + CAM_INFO_RATE_LIMIT(CAM_ISP, "CSID:%d PHY_DL0_EOT_CAPTURED", csid_hw->hw_intf->hw_idx); } if (irq_status_rx & CSID_CSI2_RX_INFO_PHY_DL1_EOT_CAPTURED) { - CAM_ERR_RATE_LIMIT(CAM_ISP, + CAM_INFO_RATE_LIMIT(CAM_ISP, "CSID:%d PHY_DL1_EOT_CAPTURED", csid_hw->hw_intf->hw_idx); } if (irq_status_rx & CSID_CSI2_RX_INFO_PHY_DL2_EOT_CAPTURED) { - CAM_ERR_RATE_LIMIT(CAM_ISP, + CAM_INFO_RATE_LIMIT(CAM_ISP, "CSID:%d PHY_DL2_EOT_CAPTURED", csid_hw->hw_intf->hw_idx); } if (irq_status_rx & CSID_CSI2_RX_INFO_PHY_DL3_EOT_CAPTURED) { - CAM_ERR_RATE_LIMIT(CAM_ISP, + CAM_INFO_RATE_LIMIT(CAM_ISP, "CSID:%d PHY_DL3_EOT_CAPTURED", csid_hw->hw_intf->hw_idx); } @@ -2852,22 +2900,22 @@ irqreturn_t cam_ife_csid_irq(int irq_num, void *data) if (csid_hw->csid_debug & CSID_DEBUG_ENABLE_SOT_IRQ) { if (irq_status_rx & CSID_CSI2_RX_INFO_PHY_DL0_SOT_CAPTURED) { - CAM_ERR_RATE_LIMIT(CAM_ISP, + CAM_INFO_RATE_LIMIT(CAM_ISP, "CSID:%d PHY_DL0_SOT_CAPTURED", csid_hw->hw_intf->hw_idx); } if (irq_status_rx & CSID_CSI2_RX_INFO_PHY_DL1_SOT_CAPTURED) { - CAM_ERR_RATE_LIMIT(CAM_ISP, + CAM_INFO_RATE_LIMIT(CAM_ISP, "CSID:%d PHY_DL1_SOT_CAPTURED", csid_hw->hw_intf->hw_idx); } if (irq_status_rx & CSID_CSI2_RX_INFO_PHY_DL2_SOT_CAPTURED) { - CAM_ERR_RATE_LIMIT(CAM_ISP, + CAM_INFO_RATE_LIMIT(CAM_ISP, "CSID:%d PHY_DL2_SOT_CAPTURED", csid_hw->hw_intf->hw_idx); } if (irq_status_rx & CSID_CSI2_RX_INFO_PHY_DL3_SOT_CAPTURED) { - CAM_ERR_RATE_LIMIT(CAM_ISP, + CAM_INFO_RATE_LIMIT(CAM_ISP, "CSID:%d PHY_DL3_SOT_CAPTURED", csid_hw->hw_intf->hw_idx); } @@ -2875,44 +2923,48 @@ irqreturn_t cam_ife_csid_irq(int irq_num, void *data) if ((csid_hw->csid_debug & CSID_DEBUG_ENABLE_LONG_PKT_CAPTURE) && (irq_status_rx & CSID_CSI2_RX_INFO_LONG_PKT_CAPTURED)) { - CAM_ERR(CAM_ISP, "CSID:%d LONG_PKT_CAPTURED", + CAM_INFO_RATE_LIMIT(CAM_ISP, "CSID:%d LONG_PKT_CAPTURED", csid_hw->hw_intf->hw_idx); val = cam_io_r_mb(soc_info->reg_map[0].mem_base + csi2_reg->csid_csi2_rx_captured_long_pkt_0_addr); - CAM_ERR(CAM_ISP, "CSID:%d long packet VC :%d DT:%d WC:%d", + CAM_INFO_RATE_LIMIT(CAM_ISP, + "CSID:%d long packet VC :%d DT:%d WC:%d", csid_hw->hw_intf->hw_idx, (val >> 22), ((val >> 16) & 0x3F), (val & 0xFFFF)); val = cam_io_r_mb(soc_info->reg_map[0].mem_base + csi2_reg->csid_csi2_rx_captured_long_pkt_1_addr); - CAM_ERR(CAM_ISP, "CSID:%d long packet ECC :%d", + CAM_INFO_RATE_LIMIT(CAM_ISP, "CSID:%d long packet ECC :%d", csid_hw->hw_intf->hw_idx, val); val = cam_io_r_mb(soc_info->reg_map[0].mem_base + csi2_reg->csid_csi2_rx_captured_long_pkt_ftr_addr); - CAM_ERR(CAM_ISP, "CSID:%d long pkt cal CRC:%d expected CRC:%d", + CAM_INFO_RATE_LIMIT(CAM_ISP, + "CSID:%d long pkt cal CRC:%d expected CRC:%d", csid_hw->hw_intf->hw_idx, (val >> 16), (val & 0xFFFF)); } if ((csid_hw->csid_debug & CSID_DEBUG_ENABLE_SHORT_PKT_CAPTURE) && (irq_status_rx & CSID_CSI2_RX_INFO_SHORT_PKT_CAPTURED)) { - CAM_ERR(CAM_ISP, "CSID:%d SHORT_PKT_CAPTURED", + CAM_INFO_RATE_LIMIT(CAM_ISP, "CSID:%d SHORT_PKT_CAPTURED", csid_hw->hw_intf->hw_idx); val = cam_io_r_mb(soc_info->reg_map[0].mem_base + csi2_reg->csid_csi2_rx_captured_short_pkt_0_addr); - CAM_ERR(CAM_ISP, "CSID:%d short pkt VC :%d DT:%d LC:%d", + CAM_INFO_RATE_LIMIT(CAM_ISP, + "CSID:%d short pkt VC :%d DT:%d LC:%d", csid_hw->hw_intf->hw_idx, (val >> 22), ((val >> 16) & 0x1F), (val & 0xFFFF)); val = cam_io_r_mb(soc_info->reg_map[0].mem_base + csi2_reg->csid_csi2_rx_captured_short_pkt_1_addr); - CAM_ERR(CAM_ISP, "CSID:%d short packet ECC :%d", + CAM_INFO_RATE_LIMIT(CAM_ISP, "CSID:%d short packet ECC :%d", csid_hw->hw_intf->hw_idx, val); } if ((csid_hw->csid_debug & CSID_DEBUG_ENABLE_CPHY_PKT_CAPTURE) && (irq_status_rx & CSID_CSI2_RX_INFO_CPHY_PKT_HDR_CAPTURED)) { - CAM_ERR(CAM_ISP, "CSID:%d CPHY_PKT_HDR_CAPTURED", + CAM_INFO_RATE_LIMIT(CAM_ISP, "CSID:%d CPHY_PKT_HDR_CAPTURED", csid_hw->hw_intf->hw_idx); val = cam_io_r_mb(soc_info->reg_map[0].mem_base + csi2_reg->csid_csi2_rx_captured_cphy_pkt_hdr_addr); - CAM_ERR(CAM_ISP, "CSID:%d cphy packet VC :%d DT:%d WC:%d", + CAM_INFO_RATE_LIMIT(CAM_ISP, + "CSID:%d cphy packet VC :%d DT:%d WC:%d", csid_hw->hw_intf->hw_idx, (val >> 22), ((val >> 16) & 0x1F), (val & 0xFFFF)); } @@ -2927,20 +2979,23 @@ irqreturn_t cam_ife_csid_irq(int irq_num, void *data) } if ((irq_status_ipp & CSID_PATH_INFO_INPUT_SOF) && - (csid_hw->csid_debug & CSID_DEBUG_ENABLE_SOF_IRQ)) - CAM_ERR_RATE_LIMIT(CAM_ISP, "CSID:%d IPP SOF received", + (csid_hw->csid_debug & CSID_DEBUG_ENABLE_SOF_IRQ)) { + CAM_INFO_RATE_LIMIT(CAM_ISP, "CSID:%d IPP SOF received", csid_hw->hw_intf->hw_idx); + if (csid_hw->sof_irq_triggered) + csid_hw->irq_debug_cnt++; + } if ((irq_status_ipp & CSID_PATH_INFO_INPUT_EOF) && (csid_hw->csid_debug & CSID_DEBUG_ENABLE_EOF_IRQ)) - CAM_ERR_RATE_LIMIT(CAM_ISP, "CSID:%d IPP EOF received", + CAM_INFO_RATE_LIMIT(CAM_ISP, "CSID:%d IPP EOF received", csid_hw->hw_intf->hw_idx); if (irq_status_ipp & CSID_PATH_ERROR_FIFO_OVERFLOW) { CAM_ERR_RATE_LIMIT(CAM_ISP, "CSID:%d IPP fifo over flow", csid_hw->hw_intf->hw_idx); - /*Stop IPP path immediately */ + /* Stop IPP path immediately */ cam_io_w_mb(CAM_CSID_HALT_IMMEDIATELY, soc_info->reg_map[0].mem_base + csid_reg->ipp_reg->csid_pxl_ctrl_addr); @@ -2957,20 +3012,23 @@ irqreturn_t cam_ife_csid_irq(int irq_num, void *data) } if ((irq_status_ppp & CSID_PATH_INFO_INPUT_SOF) && - (csid_hw->csid_debug & CSID_DEBUG_ENABLE_SOF_IRQ)) - CAM_ERR_RATE_LIMIT(CAM_ISP, "CSID:%d PPP SOF received", + (csid_hw->csid_debug & CSID_DEBUG_ENABLE_SOF_IRQ)) { + CAM_INFO_RATE_LIMIT(CAM_ISP, "CSID:%d PPP SOF received", csid_hw->hw_intf->hw_idx); + if (csid_hw->sof_irq_triggered) + csid_hw->irq_debug_cnt++; + } if ((irq_status_ppp & CSID_PATH_INFO_INPUT_EOF) && (csid_hw->csid_debug & CSID_DEBUG_ENABLE_EOF_IRQ)) - CAM_ERR_RATE_LIMIT(CAM_ISP, "CSID:%d PPP EOF received", + CAM_INFO_RATE_LIMIT(CAM_ISP, "CSID:%d PPP EOF received", csid_hw->hw_intf->hw_idx); if (irq_status_ppp & CSID_PATH_ERROR_FIFO_OVERFLOW) { CAM_ERR_RATE_LIMIT(CAM_ISP, "CSID:%d PPP fifo over flow", csid_hw->hw_intf->hw_idx); - /*Stop PPP path immediately */ + /* Stop PPP path immediately */ cam_io_w_mb(CAM_CSID_HALT_IMMEDIATELY, soc_info->reg_map[0].mem_base + csid_reg->ppp_reg->csid_pxl_ctrl_addr); @@ -2985,26 +3043,34 @@ irqreturn_t cam_ife_csid_irq(int irq_num, void *data) } if ((irq_status_rdi[i] & CSID_PATH_INFO_INPUT_SOF) && - (csid_hw->csid_debug & CSID_DEBUG_ENABLE_SOF_IRQ)) - CAM_ERR_RATE_LIMIT(CAM_ISP, + (csid_hw->csid_debug & CSID_DEBUG_ENABLE_SOF_IRQ)) { + CAM_INFO_RATE_LIMIT(CAM_ISP, "CSID RDI:%d SOF received", i); + if (csid_hw->sof_irq_triggered) + csid_hw->irq_debug_cnt++; + } if ((irq_status_rdi[i] & CSID_PATH_INFO_INPUT_EOF) && (csid_hw->csid_debug & CSID_DEBUG_ENABLE_EOF_IRQ)) - CAM_ERR_RATE_LIMIT(CAM_ISP, + CAM_INFO_RATE_LIMIT(CAM_ISP, "CSID RDI:%d EOF received", i); if (irq_status_rdi[i] & CSID_PATH_ERROR_FIFO_OVERFLOW) { CAM_ERR_RATE_LIMIT(CAM_ISP, "CSID:%d RDI fifo over flow", csid_hw->hw_intf->hw_idx); - /*Stop RDI path immediately */ + /* Stop RDI path immediately */ cam_io_w_mb(CAM_CSID_HALT_IMMEDIATELY, soc_info->reg_map[0].mem_base + csid_reg->rdi_reg[i]->csid_rdi_ctrl_addr); } } + if (csid_hw->irq_debug_cnt >= CAM_CSID_IRQ_SOF_DEBUG_CNT_MAX) { + cam_ife_csid_sof_irq_debug(csid_hw, &sof_irq_debug_en); + csid_hw->irq_debug_cnt = 0; + } + CAM_DBG(CAM_ISP, "IRQ Handling exit"); return IRQ_HANDLED; } diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_core.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_core.h index b920602fc5d5e8f97dc5d8fa3e0e5fd07e6c7155..730528de2796ae558fbbf7a48f79c12febd96fcf 100644 --- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_core.h +++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_core.h @@ -441,6 +441,9 @@ struct cam_ife_csid_path_cfg { * @csid_debug: csid debug information to enable the SOT, EOT, * SOF, EOF, measure etc in the csid hw * @clk_rate Clock rate + * @sof_irq_triggered: Flag is set on receiving event to enable sof irq + * incase of SOF freeze. + * @irq_debug_cnt: Counter to track sof irq's when above flag is set. * */ struct cam_ife_csid_hw { @@ -464,6 +467,8 @@ struct cam_ife_csid_hw { struct completion csid_rdin_complete[CAM_IFE_CSID_RDI_MAX]; uint64_t csid_debug; uint64_t clk_rate; + bool sof_irq_triggered; + uint32_t irq_debug_cnt; }; int cam_ife_csid_hw_probe_init(struct cam_hw_intf *csid_hw_intf, diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/include/cam_vfe_hw_intf.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/include/cam_vfe_hw_intf.h index a578da3ece79cc6f480fd639e9eb20d1764171ff..b957d6913f776959ae8b00db118d30809bc8dcfb 100644 --- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/include/cam_vfe_hw_intf.h +++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/include/cam_vfe_hw_intf.h @@ -238,6 +238,7 @@ struct cam_vfe_top_irq_evt_payload { * * @list: list_head node for the payload * @core_index: Index of VFE HW that generated this IRQ event + * @debug_status_0: Value of debug status_0 register at time of IRQ * @evt_id: IRQ event * @irq_reg_val: IRQ and Error register values, read when IRQ was * handled @@ -248,6 +249,7 @@ struct cam_vfe_top_irq_evt_payload { struct cam_vfe_bus_irq_evt_payload { struct list_head list; uint32_t core_index; + uint32_t debug_status_0; uint32_t evt_id; uint32_t irq_reg_val[CAM_IFE_BUS_IRQ_REGISTERS_MAX]; uint32_t error_type; diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe17x/cam_vfe170.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe17x/cam_vfe170.h index 992ffac5cc906dda9780d57a5f8ce850aa2c6db7..c810c1bf998cba8e3beacbfa62b3701803df6d5e 100644 --- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe17x/cam_vfe170.h +++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe17x/cam_vfe170.h @@ -239,6 +239,8 @@ static struct cam_vfe_bus_ver2_hw_info vfe170_bus_hw_info = { .addr_sync_cfg = 0x0000207C, .addr_sync_frame_hdr = 0x00002080, .addr_sync_no_sync = 0x00002084, + .debug_status_cfg = 0x0000226C, + .debug_status_0 = 0x00002270, }, .num_client = 20, .bus_client_reg = { diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver2.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver2.c index 654f274f8079834744443eddb4f3a29784a1bd7d..620349faef6048f00c5b75959e2d8002bd0fe251 100644 --- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver2.c +++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver2.c @@ -36,6 +36,8 @@ static const char drv_name[] = "vfe_bus"; #define CAM_VFE_BUS_VER2_PAYLOAD_MAX 256 +#define CAM_VFE_BUS_SET_DEBUG_REG 0x82 + #define CAM_VFE_RDI_BUS_DEFAULT_WIDTH 0xFF01 #define CAM_VFE_RDI_BUS_DEFAULT_STRIDE 0xFF01 #define CAM_VFE_BUS_INTRA_CLIENT_MASK 0x3 @@ -208,6 +210,7 @@ struct cam_vfe_bus_ver2_priv { uint32_t irq_handle; uint32_t error_irq_handle; + void *tasklet_info; }; static int cam_vfe_bus_process_cmd( @@ -1348,6 +1351,99 @@ static int cam_vfe_bus_handle_wm_done_bottom_half(void *wm_node, return rc; } + +static int cam_vfe_bus_err_bottom_half(void *ctx_priv, + void *evt_payload_priv) +{ + struct cam_vfe_bus_irq_evt_payload *evt_payload; + struct cam_vfe_bus_ver2_common_data *common_data; + uint32_t val = 0; + + if (!ctx_priv || !evt_payload_priv) + return -EINVAL; + + evt_payload = evt_payload_priv; + common_data = evt_payload->ctx; + + val = evt_payload->debug_status_0; + CAM_ERR(CAM_ISP, "Bus Violation: debug_status_0 = 0x%x", val); + + if (val & 0x01) + CAM_INFO(CAM_ISP, "RDI 0 violation"); + + if (val & 0x02) + CAM_INFO(CAM_ISP, "RDI 1 violation"); + + if (val & 0x04) + CAM_INFO(CAM_ISP, "RDI 2 violation"); + + if (val & 0x08) + CAM_INFO(CAM_ISP, "VID Y 1:1 UBWC violation"); + + if (val & 0x010) + CAM_INFO(CAM_ISP, "VID C 1:1 UBWC violation"); + + if (val & 0x020) + CAM_INFO(CAM_ISP, "VID YC 4:1 violation"); + + if (val & 0x040) + CAM_INFO(CAM_ISP, "VID YC 16:1 violation"); + + if (val & 0x080) + CAM_INFO(CAM_ISP, "FD Y violation"); + + if (val & 0x0100) + CAM_INFO(CAM_ISP, "FD C violation"); + + if (val & 0x0200) + CAM_INFO(CAM_ISP, "RAW DUMP violation"); + + if (val & 0x0400) + CAM_INFO(CAM_ISP, "PDAF violation"); + + if (val & 0x0800) + CAM_INFO(CAM_ISP, "STATs HDR BE violation"); + + if (val & 0x01000) + CAM_INFO(CAM_ISP, "STATs HDR BHIST violation"); + + if (val & 0x02000) + CAM_INFO(CAM_ISP, "STATs TINTLESS BG violation"); + + if (val & 0x04000) + CAM_INFO(CAM_ISP, "STATs BF violation"); + + if (val & 0x08000) + CAM_INFO(CAM_ISP, "STATs AWB BG UBWC violation"); + + if (val & 0x010000) + CAM_INFO(CAM_ISP, "STATs BHIST violation"); + + if (val & 0x020000) + CAM_INFO(CAM_ISP, "STATs RS violation"); + + if (val & 0x040000) + CAM_INFO(CAM_ISP, "STATs CS violation"); + + if (val & 0x080000) + CAM_INFO(CAM_ISP, "STATs IHIST violation"); + + if (val & 0x0100000) + CAM_INFO(CAM_ISP, "DISP Y 1:1 UBWC violation"); + + if (val & 0x0200000) + CAM_INFO(CAM_ISP, "DISP C 1:1 UBWC violation"); + + if (val & 0x0400000) + CAM_INFO(CAM_ISP, "DISP YC 4:1 violation"); + + if (val & 0x0800000) + CAM_INFO(CAM_ISP, "DISP YC 16:1 violation"); + + cam_vfe_bus_put_evt_payload(common_data, &evt_payload); + return 0; +} + static int cam_vfe_bus_init_wm_resource(uint32_t index, struct cam_vfe_bus_ver2_priv *ver2_bus_priv, struct cam_vfe_bus_ver2_hw_info *ver2_hw_info, @@ -2048,6 +2144,7 @@ static int cam_vfe_bus_acquire_vfe_out(void *bus_priv, void *acquire_args, } mutex_unlock(&rsrc_data->common_data->bus_mutex); + ver2_bus_priv->tasklet_info = acq_args->tasklet; rsrc_data->num_wm = num_wm; rsrc_node->res_id = out_acquire_args->out_port_info->res_type; rsrc_node->tasklet_info = acq_args->tasklet; @@ -2389,9 +2486,10 @@ static int cam_vfe_bus_ver2_handle_irq(uint32_t evt_id, static int cam_vfe_bus_error_irq_top_half(uint32_t evt_id, struct cam_irq_th_payload *th_payload) { - int i = 0; + int i = 0, rc = 0; struct cam_vfe_bus_ver2_priv *bus_priv = th_payload->handler_priv; + struct cam_vfe_bus_irq_evt_payload *evt_payload; CAM_ERR_RATE_LIMIT(CAM_ISP, "Bus Err IRQ"); for (i = 0; i < th_payload->num_registers; i++) { @@ -2402,8 +2500,25 @@ static int cam_vfe_bus_error_irq_top_half(uint32_t evt_id, cam_irq_controller_disable_irq(bus_priv->common_data.bus_irq_controller, bus_priv->error_irq_handle); - /* Returning error stops from enqueuing bottom half */ - return -EFAULT; + rc = cam_vfe_bus_get_evt_payload(&bus_priv->common_data, &evt_payload); + if (rc) { + CAM_ERR_RATE_LIMIT(CAM_ISP, "Cannot get payload"); + return rc; + } + + for (i = 0; i < th_payload->num_registers; i++) + evt_payload->irq_reg_val[i] = th_payload->evt_status_arr[i]; + + evt_payload->core_index = bus_priv->common_data.core_index; + evt_payload->evt_id = evt_id; + evt_payload->ctx = &bus_priv->common_data; + evt_payload->debug_status_0 = cam_io_r_mb( + bus_priv->common_data.mem_base + + bus_priv->common_data.common_reg->debug_status_0); + + th_payload->evt_payload_priv = evt_payload; + + return rc; } static void cam_vfe_bus_update_ubwc_meta_addr( @@ -3161,15 +3276,19 @@ static int cam_vfe_bus_init_hw(void *hw_priv, bus_error_irq_mask, bus_priv, cam_vfe_bus_error_irq_top_half, - NULL, - NULL, - NULL); + cam_vfe_bus_err_bottom_half, + bus_priv->tasklet_info, + &tasklet_bh_api); if (bus_priv->irq_handle <= 0) { CAM_ERR(CAM_ISP, "Failed to subscribe BUS IRQ"); return -EFAULT; } + /*Set Debug Registers*/ + cam_io_w_mb(CAM_VFE_BUS_SET_DEBUG_REG, bus_priv->common_data.mem_base + + bus_priv->common_data.common_reg->debug_status_cfg); + /* BUS_WR_INPUT_IF_ADDR_SYNC_FRAME_HEADER */ cam_io_w_mb(0x0, bus_priv->common_data.mem_base + bus_priv->common_data.common_reg->addr_sync_frame_hdr); diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver2.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver2.h index 02ba3f67267afde4394efb18b0f78b1de370f58f..73b7eb2dc59d40bfa789824a6a295f3e07b5afe7 100644 --- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver2.h +++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver2.h @@ -87,6 +87,8 @@ struct cam_vfe_bus_ver2_reg_offset_common { uint32_t addr_sync_cfg; uint32_t addr_sync_frame_hdr; uint32_t addr_sync_no_sync; + uint32_t debug_status_cfg; + uint32_t debug_status_0; }; /* diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_ver2.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_ver2.c index b554fe48de3c29c59c36a7c86f6baf822e852fd3..1da2d2e9588ea30a05498275aa102f49cad67497 100644 --- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_ver2.c +++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_ver2.c @@ -24,6 +24,8 @@ #include "cam_cdm_util.h" #include "cam_cpas_api.h" +#define CAM_VFE_CAMIF_IRQ_SOF_DEBUG_CNT_MAX 2 + struct cam_vfe_mux_camif_data { void __iomem *mem_base; struct cam_hw_intf *hw_intf; @@ -40,6 +42,7 @@ struct cam_vfe_mux_camif_data { uint32_t last_pixel; uint32_t last_line; bool enable_sof_irq_debug; + uint32_t irq_debug_cnt; }; static int cam_vfe_camif_validate_pix_pattern(uint32_t pattern) @@ -208,6 +211,8 @@ static int cam_vfe_camif_resource_start( uint32_t epoch0_irq_mask; uint32_t epoch1_irq_mask; uint32_t computed_epoch_line_cfg; + uint32_t camera_hw_version = 0; + int rc = 0; if (!camif_res) { CAM_ERR(CAM_ISP, "Error! Invalid input arguments"); @@ -247,16 +252,50 @@ static int cam_vfe_camif_resource_start( rsrc_data->common_reg->module_ctrl[ CAM_VFE_TOP_VER2_MODULE_STATS]->cgc_ovd); + /* get the HW version */ + rc = cam_cpas_get_cpas_hw_version(&camera_hw_version); + + if (rc) { + CAM_ERR(CAM_ISP, "Couldn't find HW version. rc: %d", rc); + return rc; + } + /* epoch config */ - epoch0_irq_mask = ((rsrc_data->last_line - rsrc_data->first_line) / 2) + - rsrc_data->first_line; - epoch1_irq_mask = rsrc_data->reg_data->epoch_line_cfg & 0xFFFF; - computed_epoch_line_cfg = (epoch0_irq_mask << 16) | epoch1_irq_mask; - cam_io_w_mb(computed_epoch_line_cfg, - rsrc_data->mem_base + rsrc_data->camif_reg->epoch_irq); - CAM_DBG(CAM_ISP, "first_line:%u last_line:%u epoch_line_cfg: 0x%x", - rsrc_data->first_line, rsrc_data->last_line, - computed_epoch_line_cfg); + switch (camera_hw_version) { + case CAM_CPAS_TITAN_175_V101: + case CAM_CPAS_TITAN_175_V100: + epoch0_irq_mask = ((rsrc_data->last_line - + rsrc_data->first_line) / 2) + + rsrc_data->first_line; + epoch1_irq_mask = rsrc_data->reg_data->epoch_line_cfg & + 0xFFFF; + computed_epoch_line_cfg = (epoch0_irq_mask << 16) | + epoch1_irq_mask; + cam_io_w_mb(computed_epoch_line_cfg, + rsrc_data->mem_base + + rsrc_data->camif_reg->epoch_irq); + CAM_DBG(CAM_ISP, "first_line: %u\n" + "last_line: %u\n" + "epoch_line_cfg: 0x%x", + rsrc_data->first_line, + rsrc_data->last_line, + computed_epoch_line_cfg); + break; + case CAM_CPAS_TITAN_170_V100: + case CAM_CPAS_TITAN_170_V110: + case CAM_CPAS_TITAN_170_V120: + cam_io_w_mb(rsrc_data->reg_data->epoch_line_cfg, + rsrc_data->mem_base + + rsrc_data->camif_reg->epoch_irq); + break; + default: + cam_io_w_mb(rsrc_data->reg_data->epoch_line_cfg, + rsrc_data->mem_base + + rsrc_data->camif_reg->epoch_irq); + CAM_WARN(CAM_ISP, "Hardware version not proper: 0x%x", + camera_hw_version); + break; + } camif_res->res_state = CAM_ISP_RESOURCE_STATE_STREAMING; @@ -266,6 +305,10 @@ static int cam_vfe_camif_resource_start( CAM_DBG(CAM_ISP, "hw id:%d RUP val:%d", camif_res->hw_intf->hw_idx, rsrc_data->reg_data->reg_update_cmd_data); + /* disable sof irq debug flag */ + rsrc_data->enable_sof_irq_debug = false; + rsrc_data->irq_debug_cnt = 0; + CAM_DBG(CAM_ISP, "Start Camif IFE %d Done", camif_res->hw_intf->hw_idx); return 0; } @@ -440,11 +483,21 @@ static int cam_vfe_camif_handle_irq_bottom_half(void *handler_priv, switch (payload->evt_id) { case CAM_ISP_HW_EVENT_SOF: if (irq_status0 & camif_priv->reg_data->sof_irq_mask) { - if (camif_priv->enable_sof_irq_debug) - CAM_ERR_RATE_LIMIT(CAM_ISP, "Received SOF"); - else + if ((camif_priv->enable_sof_irq_debug) && + (camif_priv->irq_debug_cnt <= + CAM_VFE_CAMIF_IRQ_SOF_DEBUG_CNT_MAX)) { + CAM_INFO_RATE_LIMIT(CAM_ISP, "Received SOF"); + + camif_priv->irq_debug_cnt++; + if (camif_priv->irq_debug_cnt == + CAM_VFE_CAMIF_IRQ_SOF_DEBUG_CNT_MAX) { + camif_priv->enable_sof_irq_debug = + false; + camif_priv->irq_debug_cnt = 0; + } + } else { CAM_DBG(CAM_ISP, "Received SOF"); - + } ret = CAM_VFE_IRQ_STATUS_SUCCESS; } break; diff --git a/drivers/media/platform/msm/camera/cam_lrme/cam_lrme_context.c b/drivers/media/platform/msm/camera/cam_lrme/cam_lrme_context.c index 3d0266d6fb1388aabb3775f0851f0b6f06479cd9..5dc87633991f0c8d1078d261c62c0ca8abeed83c 100644 --- a/drivers/media/platform/msm/camera/cam_lrme/cam_lrme_context.c +++ b/drivers/media/platform/msm/camera/cam_lrme/cam_lrme_context.c @@ -25,7 +25,7 @@ static int __cam_lrme_ctx_acquire_dev_in_available(struct cam_context *ctx, uint64_t ctxt_to_hw_map = (uint64_t)ctx->ctxt_to_hw_map; struct cam_lrme_context *lrme_ctx = ctx->ctx_priv; - CAM_DBG(CAM_LRME, "Enter"); + CAM_DBG(CAM_LRME, "Enter ctx %d", ctx->ctx_id); rc = cam_context_acquire_dev_to_hw(ctx, cmd); if (rc) { @@ -46,7 +46,7 @@ static int __cam_lrme_ctx_release_dev_in_acquired(struct cam_context *ctx, { int rc = 0; - CAM_DBG(CAM_LRME, "Enter"); + CAM_DBG(CAM_LRME, "Enter ctx %d", ctx->ctx_id); rc = cam_context_release_dev_to_hw(ctx, cmd); if (rc) { @@ -64,7 +64,7 @@ static int __cam_lrme_ctx_start_dev_in_acquired(struct cam_context *ctx, { int rc = 0; - CAM_DBG(CAM_LRME, "Enter"); + CAM_DBG(CAM_LRME, "Enter ctx %d", ctx->ctx_id); rc = cam_context_start_dev_to_hw(ctx, cmd); if (rc) { @@ -82,7 +82,7 @@ static int __cam_lrme_ctx_config_dev_in_activated(struct cam_context *ctx, { int rc; - CAM_DBG(CAM_LRME, "Enter"); + CAM_DBG(CAM_LRME, "Enter ctx %d", ctx->ctx_id); rc = cam_context_prepare_dev_to_hw(ctx, cmd); if (rc) { @@ -98,6 +98,8 @@ static int __cam_lrme_ctx_flush_dev_in_activated(struct cam_context *ctx, { int rc; + CAM_DBG(CAM_LRME, "Enter ctx %d", ctx->ctx_id); + rc = cam_context_flush_dev_to_hw(ctx, cmd); if (rc) CAM_ERR(CAM_LRME, "Failed to flush device"); @@ -109,7 +111,7 @@ static int __cam_lrme_ctx_stop_dev_in_activated(struct cam_context *ctx, { int rc = 0; - CAM_DBG(CAM_LRME, "Enter"); + CAM_DBG(CAM_LRME, "Enter ctx %d", ctx->ctx_id); rc = cam_context_stop_dev_to_hw(ctx); if (rc) { @@ -127,7 +129,7 @@ static int __cam_lrme_ctx_release_dev_in_activated(struct cam_context *ctx, { int rc = 0; - CAM_DBG(CAM_LRME, "Enter"); + CAM_DBG(CAM_LRME, "Enter ctx %d", ctx->ctx_id); rc = __cam_lrme_ctx_stop_dev_in_activated(ctx, NULL); if (rc) { @@ -182,6 +184,7 @@ static struct cam_ctx_ops /* Acquired */ { .ioctl_ops = { + .config_dev = __cam_lrme_ctx_config_dev_in_activated, .release_dev = __cam_lrme_ctx_release_dev_in_acquired, .start_dev = __cam_lrme_ctx_start_dev_in_acquired, }, diff --git a/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/cam_lrme_hw_mgr.c b/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/cam_lrme_hw_mgr.c index efb3ccb198a5e4e71bfd5df38573413ca54fe55c..49d04d11bb439294000a2fe46f631bb1fd17fb31 100644 --- a/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/cam_lrme_hw_mgr.c +++ b/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/cam_lrme_hw_mgr.c @@ -765,6 +765,12 @@ static int cam_lrme_mgr_hw_start(void *hw_mgr_priv, void *hw_start_args) return -EINVAL; } + rc = hw_device->hw_intf.hw_ops.process_cmd( + hw_device->hw_intf.hw_priv, + CAM_LRME_HW_CMD_DUMP_REGISTER, + &g_lrme_hw_mgr.debugfs_entry.dump_register, + sizeof(bool)); + return rc; } @@ -963,6 +969,35 @@ static int cam_lrme_mgr_hw_config(void *hw_mgr_priv, return rc; } +static int cam_lrme_mgr_create_debugfs_entry(void) +{ + int rc = 0; + + g_lrme_hw_mgr.debugfs_entry.dentry = + debugfs_create_dir("camera_lrme", NULL); + if (!g_lrme_hw_mgr.debugfs_entry.dentry) { + CAM_ERR(CAM_LRME, "failed to create dentry"); + return -ENOMEM; + } + + if (!debugfs_create_bool("dump_register", + 0644, + g_lrme_hw_mgr.debugfs_entry.dentry, + &g_lrme_hw_mgr.debugfs_entry.dump_register)) { + CAM_ERR(CAM_LRME, "failed to create dump register entry"); + rc = -ENOMEM; + goto err; + } + + return rc; + +err: + debugfs_remove_recursive(g_lrme_hw_mgr.debugfs_entry.dentry); + g_lrme_hw_mgr.debugfs_entry.dentry = NULL; + return rc; +} + + int cam_lrme_mgr_register_device( struct cam_hw_intf *lrme_hw_intf, struct cam_iommu_handle *device_iommu, @@ -1113,6 +1148,8 @@ int cam_lrme_hw_mgr_init(struct cam_hw_mgr_intf *hw_mgr_intf, g_lrme_hw_mgr.event_cb = cam_lrme_dev_buf_done_cb; + cam_lrme_mgr_create_debugfs_entry(); + CAM_DBG(CAM_LRME, "Hw mgr init done"); return rc; } diff --git a/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/cam_lrme_hw_mgr.h b/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/cam_lrme_hw_mgr.h index 09321d26c4ae0af039a558e75b5ed6e284d03714..b0e0cd1eba94c0b89273c1b23d5badd5c4e0045f 100644 --- a/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/cam_lrme_hw_mgr.h +++ b/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/cam_lrme_hw_mgr.h @@ -52,12 +52,23 @@ enum cam_lrme_hw_mgr_ctx_priority { /** * struct cam_lrme_mgr_work_data : HW Mgr work data * - * hw_device : Pointer to the hw device + * @hw_device : Pointer to the hw device */ struct cam_lrme_mgr_work_data { struct cam_lrme_device *hw_device; }; +/** + * struct cam_lrme_debugfs_entry : debugfs entry struct + * + * @dentry : entry of debugfs + * @dump_register : flag to dump registers + */ +struct cam_lrme_debugfs_entry { + struct dentry *dentry; + bool dump_register; +}; + /** * struct cam_lrme_device : LRME HW device * @@ -98,6 +109,7 @@ struct cam_lrme_device { * @frame_req : List of frame request to use * @lrme_caps : LRME capabilities * @event_cb : IRQ callback function + * @debugfs_entry : debugfs entry to set debug prop */ struct cam_lrme_hw_mgr { uint32_t device_count; @@ -110,6 +122,7 @@ struct cam_lrme_hw_mgr { struct cam_lrme_frame_request frame_req[CAM_CTX_REQ_MAX * CAM_CTX_MAX]; struct cam_lrme_query_cap_cmd lrme_caps; cam_hw_event_cb_func event_cb; + struct cam_lrme_debugfs_entry debugfs_entry; }; int cam_lrme_mgr_register_device(struct cam_hw_intf *lrme_hw_intf, diff --git a/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/lrme_hw/cam_lrme_hw_core.c b/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/lrme_hw/cam_lrme_hw_core.c index 595bb8182c8fbf58f88b188f2d69f14a96a2c0b9..022e1a7fcf1efbb258ab4611f31bc3712ab789e4 100644 --- a/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/lrme_hw/cam_lrme_hw_core.c +++ b/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/lrme_hw/cam_lrme_hw_core.c @@ -14,6 +14,20 @@ #include "cam_lrme_hw_soc.h" #include "cam_smmu_api.h" +static void cam_lrme_dump_registers(void __iomem *base) +{ + /* dump the clc registers */ + cam_io_dump(base, 0x60, (0xc0 - 0x60) / 0x4); + /* dump the fe and we registers */ + cam_io_dump(base, 0x200, (0x29c - 0x200) / 0x4); + cam_io_dump(base, 0x2f0, (0x330 - 0x2f0) / 0x4); + cam_io_dump(base, 0x500, (0x5b4 - 0x500) / 0x4); + cam_io_dump(base, 0x700, (0x778 - 0x700) / 0x4); + cam_io_dump(base, 0x800, (0x878 - 0x800) / 0x4); + /* dump lrme sw registers, interrupts */ + cam_io_dump(base, 0x900, (0x928 - 0x900) / 0x4); +} + static void cam_lrme_cdm_write_reg_val_pair(uint32_t *buffer, uint32_t *index, uint32_t reg_offset, uint32_t reg_value) { @@ -64,7 +78,8 @@ static void cam_lrme_hw_util_fill_fe_reg(struct cam_lrme_hw_io_buffer *io_buf, cam_lrme_cdm_write_reg_val_pair(reg_val_pair, num_cmd, hw_info->bus_rd_reg.bus_client_reg[index].unpack_cfg_0, 0x0); - else if (io_buf->io_cfg->format == CAM_FORMAT_Y_ONLY) + else if (io_buf->io_cfg->format == CAM_FORMAT_Y_ONLY || + io_buf->io_cfg->format == CAM_FORMAT_PLAIN8) cam_lrme_cdm_write_reg_val_pair(reg_val_pair, num_cmd, hw_info->bus_rd_reg.bus_client_reg[index].unpack_cfg_0, 0x1); @@ -567,6 +582,8 @@ static int cam_lrme_hw_util_process_err(struct cam_hw_info *lrme_hw) lrme_core->state); } + cam_lrme_dump_registers(lrme_hw->soc_info.reg_map[0].mem_base); + CAM_ERR_RATE_LIMIT(CAM_LRME, "Start recovery"); lrme_core->state = CAM_LRME_CORE_STATE_RECOVERY; rc = cam_lrme_hw_util_reset(lrme_hw, CAM_LRME_HW_RESET_TYPE_HW_RESET); @@ -610,6 +627,9 @@ static int cam_lrme_hw_util_process_reg_update( lrme_core->req_proc = lrme_core->req_submit; lrme_core->req_submit = NULL; + if (lrme_core->dump_flag) + cam_lrme_dump_registers(lrme_hw->soc_info.reg_map[0].mem_base); + return 0; } @@ -654,13 +674,13 @@ void cam_lrme_set_irq(struct cam_hw_info *lrme_hw, cam_io_w_mb(0xFFFF, soc_info->reg_map[0].mem_base + hw_info->titan_reg.top_irq_mask); - cam_io_w_mb(0xFFFF, + cam_io_w_mb(0xFFFFF, soc_info->reg_map[0].mem_base + hw_info->bus_wr_reg.common_reg.irq_mask_0); - cam_io_w_mb(0xFFFF, + cam_io_w_mb(0xFFFFF, soc_info->reg_map[0].mem_base + hw_info->bus_wr_reg.common_reg.irq_mask_1); - cam_io_w_mb(0xFFFF, + cam_io_w_mb(0xFFFFF, soc_info->reg_map[0].mem_base + hw_info->bus_rd_reg.common_reg.irq_mask); break; @@ -952,6 +972,7 @@ int cam_lrme_hw_submit_req(void *hw_priv, void *hw_submit_args, } lrme_core->req_submit = frame_req; + mutex_unlock(&lrme_hw->hw_mutex); CAM_DBG(CAM_LRME, "Release lock, submit done for req %llu", frame_req->req_id); @@ -1235,6 +1256,14 @@ int cam_lrme_hw_process_cmd(void *hw_priv, uint32_t cmd_type, break; } + case CAM_LRME_HW_CMD_DUMP_REGISTER: { + struct cam_lrme_core *lrme_core = + (struct cam_lrme_core *)lrme_hw->core_info; + lrme_core->dump_flag = *(bool *)cmd_args; + CAM_DBG(CAM_LRME, "dump_flag %d", lrme_core->dump_flag); + break; + } + default: break; } diff --git a/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/lrme_hw/cam_lrme_hw_core.h b/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/lrme_hw/cam_lrme_hw_core.h index 05d82f03ce1c1bd1d4f8518748a23fc1b5cc30c1..c0786f53c19c92e97c5f4fcb0b1d407525a85527 100644 --- a/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/lrme_hw/cam_lrme_hw_core.h +++ b/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/lrme_hw/cam_lrme_hw_core.h @@ -137,6 +137,7 @@ struct cam_lrme_core { struct cam_lrme_frame_request *req_submit; struct cam_lrme_cdm_info *hw_cdm_info; uint32_t hw_idx; + bool dump_flag; }; /** diff --git a/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/lrme_hw/cam_lrme_hw_intf.h b/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/lrme_hw/cam_lrme_hw_intf.h index d3d068725cfe792f0484927b49ed7342bf4c0445..4cd643f0413ae6d24c3ae06a9250bd5eb56b28fe 100644 --- a/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/lrme_hw/cam_lrme_hw_intf.h +++ b/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/lrme_hw/cam_lrme_hw_intf.h @@ -65,11 +65,13 @@ enum cam_lrme_cb_type { * @CAM_LRME_HW_CMD_prepare_hw_update : Prepare HW update * @CAM_LRME_HW_CMD_REGISTER_CB : register HW manager callback * @CAM_LRME_HW_CMD_SUBMIT : Submit frame to HW + * @CAM_LRME_HW_CMD_DUMP_REGISTER : dump register values */ enum cam_lrme_hw_cmd_type { CAM_LRME_HW_CMD_PREPARE_HW_UPDATE, CAM_LRME_HW_CMD_REGISTER_CB, CAM_LRME_HW_CMD_SUBMIT, + CAM_LRME_HW_CMD_DUMP_REGISTER, }; /** diff --git a/drivers/media/platform/msm/camera/cam_req_mgr/cam_mem_mgr.c b/drivers/media/platform/msm/camera/cam_req_mgr/cam_mem_mgr.c index 457480aaccdc0870e47f8d75c93e38518178b6cd..6e0093364390d756238df2076b72201b775a61d8 100644 --- a/drivers/media/platform/msm/camera/cam_req_mgr/cam_mem_mgr.c +++ b/drivers/media/platform/msm/camera/cam_req_mgr/cam_mem_mgr.c @@ -244,37 +244,42 @@ int cam_mem_get_cpu_buf(int32_t buf_handle, uint64_t *vaddr_ptr, size_t *len) if (!tbl.bufq[idx].active) return -EPERM; - mutex_lock(&tbl.bufq[idx].q_lock); if (buf_handle != tbl.bufq[idx].buf_handle) { rc = -EINVAL; goto exit_func; } - dmabuf = tbl.bufq[idx].dma_buf; - if (!dmabuf) { - CAM_ERR(CAM_MEM, "Invalid DMA buffer pointer"); + if (!(tbl.bufq[idx].flags & CAM_MEM_FLAG_KMD_ACCESS)) { rc = -EINVAL; goto exit_func; } - if (tbl.bufq[idx].flags & CAM_MEM_FLAG_KMD_ACCESS) { - if (!tbl.bufq[idx].kmdvaddr) { - rc = cam_mem_util_map_cpu_va(dmabuf, - &kvaddr, &klen); - if (rc) - goto exit_func; - tbl.bufq[idx].kmdvaddr = kvaddr; + if (!tbl.bufq[idx].kmdvaddr) { + mutex_lock(&tbl.bufq[idx].q_lock); + dmabuf = tbl.bufq[idx].dma_buf; + if (!dmabuf) { + CAM_ERR(CAM_MEM, "Invalid DMA buffer pointer"); + rc = -EINVAL; + goto release_mutex; } - } else { - rc = -EINVAL; - goto exit_func; + + rc = cam_mem_util_map_cpu_va(dmabuf, + &kvaddr, &klen); + if (rc) + goto release_mutex; + + tbl.bufq[idx].kmdvaddr = kvaddr; + mutex_unlock(&tbl.bufq[idx].q_lock); } *vaddr_ptr = tbl.bufq[idx].kmdvaddr; *len = tbl.bufq[idx].len; -exit_func: + return rc; + +release_mutex: mutex_unlock(&tbl.bufq[idx].q_lock); +exit_func: return rc; } EXPORT_SYMBOL(cam_mem_get_cpu_buf); diff --git a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.c b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.c index 3311f29fba1ee0e27043173b21a26ecce04648e1..d11e1c74e5e30c6c40ec5f8d42b1985dda7bcd80 100644 --- a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.c +++ b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.c @@ -1282,7 +1282,8 @@ static int __cam_req_mgr_destroy_link_info(struct cam_req_mgr_core_link *link) rc = dev->ops->link_setup(&link_data); if (rc) CAM_ERR(CAM_CRM, - "Unlink failed dev_hdl %d", + "Unlink failed dev name %s hdl %x", + dev->dev_info.name, dev->dev_hdl); } dev->dev_hdl = 0; @@ -2347,8 +2348,8 @@ static int __cam_req_mgr_unlink(struct cam_req_mgr_core_link *link) /* Destroy the link handle */ rc = cam_destroy_device_hdl(link->link_hdl); if (rc < 0) { - CAM_ERR(CAM_CRM, "error while destroying dev handle %d %x", - rc, link->link_hdl); + CAM_ERR(CAM_CRM, "error destroying link hdl %x rc %d", + link->link_hdl, rc); } mutex_unlock(&link->lock); @@ -2549,8 +2550,7 @@ int cam_req_mgr_unlink(struct cam_req_mgr_unlink_info *unlink_info) rc = __cam_req_mgr_unlink(link); /* Free curent link and put back into session's free pool of links */ - if (!rc) - __cam_req_mgr_unreserve_link(cam_session, link); + __cam_req_mgr_unreserve_link(cam_session, link); done: mutex_unlock(&g_crm_core_dev->crm_lock); diff --git a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_dev.c b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_dev.c index 543e3329fc523ed68a9a5ec691f1b0ce2de64b30..0d21064afed768a47805310c9297bbc597883085 100644 --- a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_dev.c +++ b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_dev.c @@ -584,7 +584,6 @@ EXPORT_SYMBOL(cam_unregister_subdev); static int cam_req_mgr_remove(struct platform_device *pdev) { cam_req_mgr_core_device_deinit(); - cam_mem_mgr_deinit(); cam_req_mgr_util_deinit(); cam_media_device_cleanup(); cam_video_device_cleanup(); @@ -624,12 +623,6 @@ static int cam_req_mgr_probe(struct platform_device *pdev) goto req_mgr_util_fail; } - rc = cam_mem_mgr_init(); - if (rc) { - CAM_ERR(CAM_CRM, "mem mgr init failed"); - goto mem_mgr_init_fail; - } - rc = cam_req_mgr_core_device_init(); if (rc) { CAM_ERR(CAM_CRM, "core device setup failed"); @@ -654,8 +647,6 @@ static int cam_req_mgr_probe(struct platform_device *pdev) return rc; req_mgr_core_fail: - cam_mem_mgr_deinit(); -mem_mgr_init_fail: cam_req_mgr_util_deinit(); req_mgr_util_fail: mutex_destroy(&g_dev.dev_lock); diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_core.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_core.c index b203948ae39efe914223ff05203c3b4792b508ad..80dc2711aa15243454b70446606866aebdc8602a 100644 --- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_core.c +++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_core.c @@ -869,8 +869,8 @@ static int32_t cam_cci_burst_read(struct v4l2_subdev *sd, struct cam_cci_ctrl *c_ctrl) { int32_t rc = 0; - uint32_t val = 0, i = 0; - unsigned long rem_jiffies; + uint32_t val = 0, i = 0, j = 0; + unsigned long rem_jiffies, flags; int32_t read_words = 0, exp_words = 0; int32_t index = 0, first_byte = 0, total_read_words = 0; enum cci_i2c_master_t master; @@ -989,11 +989,13 @@ static int32_t cam_cci_burst_read(struct v4l2_subdev *sd, val = 1 << ((master * 2) + queue); cam_io_w_mb(val, base + CCI_QUEUE_START_ADDR); + exp_words = ((read_cfg->num_byte / 4) + 1); + CAM_DBG(CAM_CCI, "waiting for threshold [exp_words %d]", exp_words); - while (exp_words != total_read_words) { + while (total_read_words != exp_words) { rem_jiffies = wait_for_completion_timeout( - &cci_dev->cci_master_info[master].reset_complete, + &cci_dev->cci_master_info[master].th_complete, CCI_TIMEOUT); if (!rem_jiffies) { rc = -ETIMEDOUT; @@ -1012,6 +1014,14 @@ static int32_t cam_cci_burst_read(struct v4l2_subdev *sd, read_words = cam_io_r_mb(base + CCI_I2C_M0_READ_BUF_LEVEL_ADDR + master * 0x100); + if (read_words <= 0) { + CAM_DBG(CAM_CCI, "FIFO Buffer lvl is 0"); + continue; + } + + j++; + CAM_DBG(CAM_CCI, "Iteration: %u read_words %d", j, read_words); + total_read_words += read_words; while (read_words > 0) { val = cam_io_r_mb(base + @@ -1033,8 +1043,55 @@ static int32_t cam_cci_burst_read(struct v4l2_subdev *sd, } read_words--; } + + CAM_DBG(CAM_CCI, "Iteraion:%u total_read_words %d", + j, total_read_words); + + spin_lock_irqsave(&cci_dev->lock_status, flags); + if (cci_dev->irq_status1) { + CAM_DBG(CAM_CCI, "clear irq_status1:%x", + cci_dev->irq_status1); + cam_io_w_mb(cci_dev->irq_status1, + base + CCI_IRQ_CLEAR_1_ADDR); + cam_io_w_mb(0x1, base + CCI_IRQ_GLOBAL_CLEAR_CMD_ADDR); + cci_dev->irq_status1 = 0; + } + spin_unlock_irqrestore(&cci_dev->lock_status, flags); + + if (total_read_words == exp_words) { + /* + * This wait is for RD_DONE irq, if RD_DONE is + * triggered we will call complete on both threshold + * & read done waits. As part of the threshold wait + * we will be draining the entire buffer out. This + * wait is to compensate for the complete invoked for + * RD_DONE exclusively. + */ + rem_jiffies = wait_for_completion_timeout( + &cci_dev->cci_master_info[master].reset_complete, + CCI_TIMEOUT); + if (!rem_jiffies) { + rc = -ETIMEDOUT; + val = cam_io_r_mb(base + + CCI_I2C_M0_READ_BUF_LEVEL_ADDR + + master * 0x100); + CAM_ERR(CAM_CCI, + "Failed to receive RD_DONE irq rc = %d FIFO buf_lvl:0x%x", + rc, val); + #ifdef DUMP_CCI_REGISTERS + cam_cci_dump_registers(cci_dev, + master, queue); + #endif + cam_cci_flush_queue(cci_dev, master); + goto rel_mutex; + } + break; + } } + CAM_DBG(CAM_CCI, "Burst read successful words_read %d", + total_read_words); + rel_mutex: mutex_unlock(&cci_dev->cci_master_info[master].mutex_q[queue]); return rc; @@ -1166,7 +1223,8 @@ static int32_t cam_cci_read(struct v4l2_subdev *sd, val = 1 << ((master * 2) + queue); cam_io_w_mb(val, base + CCI_QUEUE_START_ADDR); - CAM_DBG(CAM_CCI, "wait_for_completion_timeout"); + CAM_DBG(CAM_CCI, + "waiting_for_rd_done [exp_words: %d]", exp_words); rc = wait_for_completion_timeout( &cci_dev->cci_master_info[master].reset_complete, CCI_TIMEOUT); @@ -1221,7 +1279,6 @@ static int32_t cam_cci_read(struct v4l2_subdev *sd, } rel_mutex: mutex_unlock(&cci_dev->cci_master_info[master].mutex_q[queue]); - return rc; } @@ -1400,23 +1457,34 @@ static int32_t cam_cci_read_bytes(struct v4l2_subdev *sd, } read_bytes = read_cfg->num_byte; + + /* + * To avoid any conflicts due to back to back trigger of + * THRESHOLD irq's, we reinit the threshold wait before + * we load the burst read cmd. + */ + reinit_completion(&cci_dev->cci_master_info[master].th_complete); + + CAM_DBG(CAM_CCI, "Bytes to read %u", read_bytes); do { - if (read_bytes > CCI_I2C_MAX_BYTE_COUNT) + if (read_bytes >= CCI_I2C_MAX_BYTE_COUNT) read_cfg->num_byte = CCI_I2C_MAX_BYTE_COUNT; else read_cfg->num_byte = read_bytes; - if (read_cfg->num_byte > CCI_READ_MAX) + if (read_cfg->num_byte >= CCI_READ_MAX) { + cci_dev->is_burst_read = true; rc = cam_cci_burst_read(sd, c_ctrl); - else + } else { + cci_dev->is_burst_read = false; rc = cam_cci_read(sd, c_ctrl); - + } if (rc) { CAM_ERR(CAM_CCI, "failed to read rc:%d", rc); goto ERROR; } - if (read_bytes > CCI_I2C_MAX_BYTE_COUNT) { + if (read_bytes >= CCI_I2C_MAX_BYTE_COUNT) { read_cfg->addr += (CCI_I2C_MAX_BYTE_COUNT / read_cfg->data_type); read_cfg->data += CCI_I2C_MAX_BYTE_COUNT; @@ -1427,6 +1495,7 @@ static int32_t cam_cci_read_bytes(struct v4l2_subdev *sd, } while (read_bytes); ERROR: + cci_dev->is_burst_read = false; return rc; } diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_dev.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_dev.c index a763d3ffb39eee0b1a439bfe435b89a7797e9345..3e1c5e19ec248b33767b6f61af8d93da70867c77 100644 --- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_dev.c +++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_dev.c @@ -67,15 +67,12 @@ irqreturn_t cam_cci_irq(int irq_num, void *data) &cci_dev->soc_info; void __iomem *base = soc_info->reg_map[0].mem_base; unsigned long flags; - bool burst_read_assert = false; + bool rd_done_th_assert = false; irq_status0 = cam_io_r_mb(base + CCI_IRQ_STATUS_0_ADDR); irq_status1 = cam_io_r_mb(base + CCI_IRQ_STATUS_1_ADDR); - cam_io_w_mb(irq_status0, base + CCI_IRQ_CLEAR_0_ADDR); - cam_io_w_mb(irq_status1, base + CCI_IRQ_CLEAR_1_ADDR); - cam_io_w_mb(0x1, base + CCI_IRQ_GLOBAL_CLEAR_CMD_ADDR); - CAM_DBG(CAM_CCI, "irq0:%x irq1:%x", irq_status0, irq_status1); + if (irq_status0 & CCI_IRQ_STATUS_0_RST_DONE_ACK_BMSK) { if (cci_dev->cci_master_info[MASTER_0].reset_pending == TRUE) { cci_dev->cci_master_info[MASTER_0].reset_pending = @@ -94,18 +91,23 @@ irqreturn_t cam_cci_irq(int irq_num, void *data) if ((irq_status0 & CCI_IRQ_STATUS_0_I2C_M0_RD_DONE_BMSK) && (irq_status1 & CCI_IRQ_STATUS_1_I2C_M0_RD_THRESHOLD)) { cci_dev->cci_master_info[MASTER_0].status = 0; + rd_done_th_assert = true; + complete(&cci_dev->cci_master_info[MASTER_0].th_complete); complete(&cci_dev->cci_master_info[MASTER_0].reset_complete); - burst_read_assert = true; } if ((irq_status0 & CCI_IRQ_STATUS_0_I2C_M0_RD_DONE_BMSK) && - (!burst_read_assert)) { + (!rd_done_th_assert)) { cci_dev->cci_master_info[MASTER_0].status = 0; + rd_done_th_assert = true; + if (cci_dev->is_burst_read) + complete( + &cci_dev->cci_master_info[MASTER_0].th_complete); complete(&cci_dev->cci_master_info[MASTER_0].reset_complete); } if ((irq_status1 & CCI_IRQ_STATUS_1_I2C_M0_RD_THRESHOLD) && - (!burst_read_assert)) { + (!rd_done_th_assert)) { cci_dev->cci_master_info[MASTER_0].status = 0; - complete(&cci_dev->cci_master_info[MASTER_0].reset_complete); + complete(&cci_dev->cci_master_info[MASTER_0].th_complete); } if (irq_status0 & CCI_IRQ_STATUS_0_I2C_M0_Q0_REPORT_BMSK) { struct cam_cci_master_info *cci_master_info; @@ -144,18 +146,23 @@ irqreturn_t cam_cci_irq(int irq_num, void *data) if ((irq_status0 & CCI_IRQ_STATUS_0_I2C_M1_RD_DONE_BMSK) && (irq_status1 & CCI_IRQ_STATUS_1_I2C_M1_RD_THRESHOLD)) { cci_dev->cci_master_info[MASTER_1].status = 0; + rd_done_th_assert = true; + complete(&cci_dev->cci_master_info[MASTER_1].th_complete); complete(&cci_dev->cci_master_info[MASTER_1].reset_complete); - burst_read_assert = true; } if ((irq_status0 & CCI_IRQ_STATUS_0_I2C_M1_RD_DONE_BMSK) && - (!burst_read_assert)) { + (!rd_done_th_assert)) { cci_dev->cci_master_info[MASTER_1].status = 0; + rd_done_th_assert = true; + if (cci_dev->is_burst_read) + complete( + &cci_dev->cci_master_info[MASTER_1].th_complete); complete(&cci_dev->cci_master_info[MASTER_1].reset_complete); } if ((irq_status1 & CCI_IRQ_STATUS_1_I2C_M1_RD_THRESHOLD) && - (!burst_read_assert)) { + (!rd_done_th_assert)) { cci_dev->cci_master_info[MASTER_1].status = 0; - complete(&cci_dev->cci_master_info[MASTER_1].reset_complete); + complete(&cci_dev->cci_master_info[MASTER_1].th_complete); } if (irq_status0 & CCI_IRQ_STATUS_0_I2C_M1_Q0_REPORT_BMSK) { struct cam_cci_master_info *cci_master_info; @@ -191,6 +198,12 @@ irqreturn_t cam_cci_irq(int irq_num, void *data) &cci_dev->cci_master_info[MASTER_1].lock_q[QUEUE_1], flags); } + if (irq_status1 & CCI_IRQ_STATUS_1_I2C_M0_RD_PAUSE) + CAM_DBG(CAM_CCI, "RD_PAUSE ON MASTER_0"); + + if (irq_status1 & CCI_IRQ_STATUS_1_I2C_M1_RD_PAUSE) + CAM_DBG(CAM_CCI, "RD_PAUSE ON MASTER_1"); + if (irq_status0 & CCI_IRQ_STATUS_0_I2C_M0_Q0Q1_HALT_ACK_BMSK) { cci_dev->cci_master_info[MASTER_0].reset_pending = TRUE; cam_io_w_mb(CCI_M0_RESET_RMSK, @@ -213,6 +226,19 @@ irqreturn_t cam_cci_irq(int irq_num, void *data) base + CCI_HALT_REQ_ADDR); CAM_DBG(CAM_CCI, "MASTER_1 error 0x%x", irq_status0); } + + if ((rd_done_th_assert) || (!cci_dev->is_burst_read)) { + cam_io_w_mb(irq_status1, base + CCI_IRQ_CLEAR_1_ADDR); + CAM_DBG(CAM_CCI, "clear irq_status0:%x irq_status1:%x", + irq_status0, irq_status1); + } else { + spin_lock_irqsave(&cci_dev->lock_status, flags); + cci_dev->irq_status1 |= irq_status1; + spin_unlock_irqrestore(&cci_dev->lock_status, flags); + } + + cam_io_w_mb(irq_status0, base + CCI_IRQ_CLEAR_0_ADDR); + cam_io_w_mb(0x1, base + CCI_IRQ_GLOBAL_CLEAR_CMD_ADDR); return IRQ_HANDLED; } diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_dev.h b/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_dev.h index 66345192df1a9fbd09deb5bcb4d930c1a0935657..eee2da11d2119db831e3321155a2bb2e7647617c 100644 --- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_dev.h +++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_dev.h @@ -139,6 +139,7 @@ struct cam_cci_master_info { uint8_t reset_pending; struct mutex mutex; struct completion reset_complete; + struct completion th_complete; struct mutex mutex_q[NUM_QUEUES]; struct completion report_q[NUM_QUEUES]; atomic_t done_pending[NUM_QUEUES]; @@ -194,6 +195,11 @@ enum cam_cci_state_t { * @cci_wait_sync_cfg: CCI sync config * @cycles_per_us: Cycles per micro sec * @payload_size: CCI packet payload size + * @irq_status1: Store irq_status1 to be cleared after + * draining FIFO buffer for burst read + * @lock_status: to protect changes to irq_status1 + * @is_burst_read: Flag to determine if we are performing + * a burst read operation or not */ struct cci_device { struct v4l2_subdev subdev; @@ -218,6 +224,9 @@ struct cci_device { uint8_t payload_size; char device_name[20]; uint32_t cpas_handle; + uint32_t irq_status1; + spinlock_t lock_status; + bool is_burst_read; }; enum cam_cci_i2c_cmd_type { diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_hwreg.h b/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_hwreg.h index c21afc9558a028cab51fc5fa347c7bc53540580d..027a0501dcae57e5aa57c45b3f67c83de1a18925 100644 --- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_hwreg.h +++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_hwreg.h @@ -56,15 +56,17 @@ #define CCI_IRQ_STATUS_0_I2C_M1_Q0_REPORT_BMSK 0x10000 #define CCI_IRQ_STATUS_0_I2C_M1_RD_DONE_BMSK 0x1000 #define CCI_IRQ_STATUS_1_I2C_M1_RD_THRESHOLD 0x100000 +#define CCI_IRQ_STATUS_1_I2C_M1_RD_PAUSE 0x200000 #define CCI_IRQ_STATUS_0_I2C_M0_Q1_REPORT_BMSK 0x100 #define CCI_IRQ_STATUS_0_I2C_M0_Q0_REPORT_BMSK 0x10 #define CCI_IRQ_STATUS_0_I2C_M0_ERROR_BMSK 0x18000EE6 #define CCI_IRQ_STATUS_0_I2C_M1_ERROR_BMSK 0x60EE6000 #define CCI_IRQ_STATUS_0_I2C_M0_RD_DONE_BMSK 0x1 #define CCI_IRQ_STATUS_1_I2C_M0_RD_THRESHOLD 0x10000 +#define CCI_IRQ_STATUS_1_I2C_M0_RD_PAUSE 0x20000 #define CCI_I2C_M0_RD_THRESHOLD_ADDR 0x00000120 #define CCI_I2C_M1_RD_THRESHOLD_ADDR 0x00000220 -#define CCI_I2C_RD_THRESHOLD_VALUE 0x38 +#define CCI_I2C_RD_THRESHOLD_VALUE 0x30 #define CCI_IRQ_GLOBAL_CLEAR_CMD_ADDR 0x00000c00 #define DEBUG_TOP_REG_START 0x0 diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_soc.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_soc.c index 6099ec1542cdeb25bb7b4efeae1a91cfd5fc0a30..da714af3fde279002600800a8655e1c91b4fc505 100644 --- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_soc.c +++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_soc.c @@ -199,6 +199,8 @@ static void cam_cci_init_cci_params(struct cci_device *new_cci_dev) mutex_init(&new_cci_dev->cci_master_info[i].mutex); init_completion( &new_cci_dev->cci_master_info[i].reset_complete); + init_completion( + &new_cci_dev->cci_master_info[i].th_complete); for (j = 0; j < NUM_QUEUES; j++) { mutex_init(&new_cci_dev->cci_master_info[i].mutex_q[j]); @@ -208,6 +210,7 @@ static void cam_cci_init_cci_params(struct cci_device *new_cci_dev) &new_cci_dev->cci_master_info[i].lock_q[j]); } } + spin_lock_init(&new_cci_dev->lock_status); } static void cam_cci_init_default_clk_params(struct cci_device *cci_dev, diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/cam_csiphy_core.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/cam_csiphy_core.c index b0b4db53e4940961570a975f91f3d123d4637969..aaf6ba45c56ef05e832494bbb906bd6d67ca9214 100644 --- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/cam_csiphy_core.c +++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/cam_csiphy_core.c @@ -352,8 +352,10 @@ int32_t cam_csiphy_config_dev(struct csiphy_device *csiphy_dev) CAM_DBG(CAM_CSIPHY, "Do Nothing"); break; } - usleep_range(reg_array[lane_pos][i].delay*1000, - reg_array[lane_pos][i].delay*1000 + 1000); + if (reg_array[lane_pos][i].delay > 0) { + usleep_range(reg_array[lane_pos][i].delay*1000, + reg_array[lane_pos][i].delay*1000 + 10); + } } lane_mask >>= 1; lane_pos++; diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/include/cam_csiphy_1_1_hwreg.h b/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/include/cam_csiphy_1_1_hwreg.h index 055c5dfad7fca683cba6d1a048e58c0bcf29f24a..0ebaa4619deba91c0c683d26f0f019ac82d2c9c7 100644 --- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/include/cam_csiphy_1_1_hwreg.h +++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/include/cam_csiphy_1_1_hwreg.h @@ -165,7 +165,7 @@ struct csiphy_reg_t {0x0714, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS}, {0x0728, 0x04, 0x00, CSIPHY_DEFAULT_PARAMS}, {0x073C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS}, - {0x0700, 0x8D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0700, 0x80, 0x00, CSIPHY_DEFAULT_PARAMS}, {0x0708, 0x0E, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, {0x070C, 0xA5, 0x00, CSIPHY_DEFAULT_PARAMS}, {0x0710, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS}, @@ -195,7 +195,7 @@ struct csiphy_reg_t {0x0434, 0x07, 0x00, CSIPHY_DEFAULT_PARAMS}, {0x041C, 0x08, 0x00, CSIPHY_DEFAULT_PARAMS}, {0x0414, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS}, - {0x0428, 0x00, 0x00, CSIPHY_DNP_PARAMS}, + {0x0428, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS}, {0x043C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS}, {0x0400, 0x8D, 0x00, CSIPHY_DEFAULT_PARAMS}, {0x0408, 0x0E, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, @@ -211,13 +211,13 @@ struct csiphy_reg_t {0x0634, 0x07, 0x00, CSIPHY_DEFAULT_PARAMS}, {0x061C, 0x08, 0x00, CSIPHY_DEFAULT_PARAMS}, {0x0614, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS}, - {0x0628, 0x00, 0x00, CSIPHY_DNP_PARAMS}, + {0x0628, 0x0E, 0x00, CSIPHY_DEFAULT_PARAMS}, {0x063C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS}, - {0x0600, 0x8D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0600, 0x80, 0x00, CSIPHY_DEFAULT_PARAMS}, {0x0608, 0x0E, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, - {0x060C, 0x00, 0x00, CSIPHY_DNP_PARAMS}, + {0x060C, 0xA5, 0x00, CSIPHY_DEFAULT_PARAMS}, {0x0610, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS}, - {0x0638, 0xFE, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0638, 0x1F, 0x00, CSIPHY_DEFAULT_PARAMS}, {0x0660, 0x31, 0x00, CSIPHY_DEFAULT_PARAMS}, {0x0664, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS}, }, diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/cam_eeprom_dev.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/cam_eeprom_dev.c index dbeb4af4820ed9d59478ec3a91920ed6793d9662..6d8820abb7d70449568919691d2d8f73f68ff597 100644 --- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/cam_eeprom_dev.c +++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/cam_eeprom_dev.c @@ -261,7 +261,10 @@ static int cam_eeprom_i2c_driver_remove(struct i2c_client *client) for (i = 0; i < soc_info->num_clk; i++) devm_clk_put(soc_info->dev, soc_info->clk[i]); + mutex_destroy(&(e_ctrl->eeprom_mutex)); kfree(soc_private); + kfree(e_ctrl->io_master_info.cci_client); + v4l2_set_subdevdata(&e_ctrl->v4l2_dev_str.sd, NULL); kfree(e_ctrl); return 0; @@ -392,6 +395,8 @@ static int cam_eeprom_spi_driver_remove(struct spi_device *sdev) kfree(soc_private->power_info.gpio_num_info); kfree(soc_private); } + mutex_destroy(&(e_ctrl->eeprom_mutex)); + v4l2_set_subdevdata(&e_ctrl->v4l2_dev_str.sd, NULL); kfree(e_ctrl); return 0; @@ -487,8 +492,11 @@ static int cam_eeprom_platform_driver_remove(struct platform_device *pdev) for (i = 0; i < soc_info->num_clk; i++) devm_clk_put(soc_info->dev, soc_info->clk[i]); + mutex_destroy(&(e_ctrl->eeprom_mutex)); kfree(soc_info->soc_private); kfree(e_ctrl->io_master_info.cci_client); + platform_set_drvdata(pdev, NULL); + v4l2_set_subdevdata(&e_ctrl->v4l2_dev_str.sd, NULL); kfree(e_ctrl); return 0; } diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_core.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_core.c index ee58e7e17a6c995c817fbe6f39ae2da870d29c5f..9af1f51204854ee685c963c189a59421dada217e 100644 --- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_core.c +++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_core.c @@ -371,12 +371,12 @@ int cam_flash_apply_setting(struct cam_flash_ctrl *fctrl, if (flash_data->opcode == CAMERA_SENSOR_FLASH_OP_FIREHIGH) { - if (fctrl->flash_state != - CAM_FLASH_STATE_CONFIG) { + if (fctrl->flash_state == + CAM_FLASH_STATE_START) { CAM_WARN(CAM_FLASH, - "Cannot apply Start Dev:Prev state: %d", + "Wrong state :Prev state: %d", fctrl->flash_state); - return rc; + return -EINVAL; } rc = cam_flash_prepare(fctrl, true); if (rc) { @@ -387,8 +387,27 @@ int cam_flash_apply_setting(struct cam_flash_ctrl *fctrl, rc = cam_flash_high(fctrl, flash_data); if (rc) CAM_ERR(CAM_FLASH, - "FLASH ON failed : %d", - rc); + "FLASH ON failed : %d", rc); + } + if (flash_data->opcode == + CAMERA_SENSOR_FLASH_OP_FIRELOW) { + if (fctrl->flash_state == + CAM_FLASH_STATE_START) { + CAM_WARN(CAM_FLASH, + "Wrong state :Prev state: %d", + fctrl->flash_state); + return -EINVAL; + } + rc = cam_flash_prepare(fctrl, true); + if (rc) { + CAM_ERR(CAM_FLASH, + "Enable Regulator Failed rc = %d", rc); + return rc; + } + rc = cam_flash_low(fctrl, flash_data); + if (rc) + CAM_ERR(CAM_FLASH, + "TORCH ON failed : %d", rc); } if (flash_data->opcode == CAMERA_SENSOR_FLASH_OP_OFF) { @@ -522,7 +541,6 @@ int cam_flash_apply_setting(struct cam_flash_ctrl *fctrl, goto apply_setting_err; } } else if (flash_data->opcode == CAM_PKT_NOP_OPCODE) { - flash_data->opcode = 0; CAM_DBG(CAM_FLASH, "NOP Packet"); } else { rc = -EINVAL; @@ -618,7 +636,7 @@ int cam_flash_parser(struct cam_flash_ctrl *fctrl, void *arg) CAM_FLASH_STATE_CONFIG; break; case CAMERA_SENSOR_FLASH_CMD_TYPE_INIT_FIRE: - CAM_DBG(CAM_FLASH, "Widget Flash Operation"); + CAM_DBG(CAM_FLASH, "INIT Fire Operation"); flash_operation_info = (struct cam_flash_set_on_off *) cmd_buf; fctrl->nrt_info.cmn_attr.count = diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_dev.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_dev.c index d9b5f6406058b0fca7bc54085b05ade6bd5936b8..f8be3de85708b47f7a8e51320d6c326c373fadf9 100644 --- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_dev.c +++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_dev.c @@ -149,17 +149,15 @@ static int32_t cam_flash_driver_cmd(struct cam_flash_ctrl *fctrl, goto release_mutex; } - rc = cam_flash_prepare(fctrl, true); - if (rc) { - CAM_ERR(CAM_FLASH, - "Enable Regulator Failed rc = %d", rc); - goto release_mutex; - } - rc = cam_flash_apply_setting(fctrl, 0); - if (rc) { - CAM_ERR(CAM_FLASH, "cannot apply settings rc = %d", rc); - goto release_mutex; + if (fctrl->is_regulator_enabled == false) { + rc = cam_flash_prepare(fctrl, true); + if (rc) { + CAM_ERR(CAM_FLASH, + "Enable Regulator Failed rc = %d", rc); + goto release_mutex; + } } + fctrl->flash_state = CAM_FLASH_STATE_START; break; } diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_ois/cam_ois_core.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_ois/cam_ois_core.c index d44ab04054235091300e8e02f3083aa35cc2d3a4..a46e96f1f241a5fd1b282a44a393131471b5a428 100644 --- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_ois/cam_ois_core.c +++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_ois/cam_ois_core.c @@ -662,6 +662,15 @@ void cam_ois_shutdown(struct cam_ois_ctrl_t *o_ctrl) o_ctrl->bridge_intf.session_hdl = -1; } + if (o_ctrl->i2c_mode_data.is_settings_valid == 1) + delete_request(&o_ctrl->i2c_mode_data); + + if (o_ctrl->i2c_calib_data.is_settings_valid == 1) + delete_request(&o_ctrl->i2c_calib_data); + + if (o_ctrl->i2c_init_data.is_settings_valid == 1) + delete_request(&o_ctrl->i2c_init_data); + kfree(power_info->power_setting); kfree(power_info->power_down_setting); power_info->power_setting = NULL; @@ -779,6 +788,16 @@ int cam_ois_driver_cmd(struct cam_ois_ctrl_t *o_ctrl, void *arg) power_info->power_down_setting = NULL; power_info->power_down_setting_size = 0; power_info->power_setting_size = 0; + + if (o_ctrl->i2c_mode_data.is_settings_valid == 1) + delete_request(&o_ctrl->i2c_mode_data); + + if (o_ctrl->i2c_calib_data.is_settings_valid == 1) + delete_request(&o_ctrl->i2c_calib_data); + + if (o_ctrl->i2c_init_data.is_settings_valid == 1) + delete_request(&o_ctrl->i2c_init_data); + break; case CAM_STOP_DEV: if (o_ctrl->cam_ois_state != CAM_OIS_START) { diff --git a/drivers/media/platform/msm/camera/cam_sync/cam_sync.c b/drivers/media/platform/msm/camera/cam_sync/cam_sync.c index c5438c917bc0e5a8e9a14cfa8b1d8176346d3f55..4525bb5bce25448543ae5992f5f47fb3f850d21e 100644 --- a/drivers/media/platform/msm/camera/cam_sync/cam_sync.c +++ b/drivers/media/platform/msm/camera/cam_sync/cam_sync.c @@ -18,6 +18,7 @@ #include #include "cam_sync_util.h" #include "cam_debug_util.h" +#include "cam_common_util.h" struct sync_device *sync_dev; @@ -221,6 +222,11 @@ int cam_sync_signal(int32_t sync_obj, uint32_t status) return -EINVAL; } + if (!atomic_dec_and_test(&row->ref_cnt)) { + spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]); + return 0; + } + row->state = status; cam_sync_util_dispatch_signaled_cb(sync_obj, status); @@ -284,6 +290,12 @@ int cam_sync_merge(int32_t *sync_obj, uint32_t num_objs, int32_t *merged_obj) return -EINVAL; } + if (cam_common_util_remove_duplicate_arr(sync_obj, num_objs) + != num_objs) { + CAM_ERR(CAM_SYNC, "The obj list has duplicate fence"); + return -EINVAL; + } + do { idx = find_first_zero_bit(sync_dev->bitmap, CAM_SYNC_MAX_OBJS); if (idx >= CAM_SYNC_MAX_OBJS) @@ -309,6 +321,46 @@ int cam_sync_merge(int32_t *sync_obj, uint32_t num_objs, int32_t *merged_obj) return 0; } +int cam_sync_get_obj_ref(int32_t sync_obj) +{ + struct sync_table_row *row = NULL; + + if (sync_obj >= CAM_SYNC_MAX_OBJS || sync_obj <= 0) + return -EINVAL; + + row = sync_dev->sync_table + sync_obj; + + spin_lock(&sync_dev->row_spinlocks[sync_obj]); + + if (row->state != CAM_SYNC_STATE_ACTIVE) { + spin_unlock(&sync_dev->row_spinlocks[sync_obj]); + CAM_ERR(CAM_SYNC, + "Error: accessing an uninitialized sync obj = %d", + sync_obj); + return -EINVAL; + } + + atomic_inc(&row->ref_cnt); + spin_unlock(&sync_dev->row_spinlocks[sync_obj]); + CAM_DBG(CAM_SYNC, "get ref for obj %d", sync_obj); + + return 0; +} + +int cam_sync_put_obj_ref(int32_t sync_obj) +{ + struct sync_table_row *row = NULL; + + if (sync_obj >= CAM_SYNC_MAX_OBJS || sync_obj <= 0) + return -EINVAL; + + row = sync_dev->sync_table + sync_obj; + atomic_dec(&row->ref_cnt); + CAM_DBG(CAM_SYNC, "put ref for obj %d", sync_obj); + + return 0; +} + int cam_sync_destroy(int32_t sync_obj) { CAM_DBG(CAM_SYNC, "sync_obj: %i", sync_obj); @@ -405,6 +457,8 @@ static int cam_sync_handle_signal(struct cam_private_ioctl_arg *k_ioctl) k_ioctl->size)) return -EFAULT; + /* need to get ref for UMD signaled fences */ + cam_sync_get_obj_ref(sync_signal.sync_obj); return cam_sync_signal(sync_signal.sync_obj, sync_signal.sync_state); } diff --git a/drivers/media/platform/msm/camera/cam_sync/cam_sync_api.h b/drivers/media/platform/msm/camera/cam_sync/cam_sync_api.h index 8ce9b4e61b10e31c8161f7708cd9121594c8e6fd..c735d51fe46250a54af7b169a5a6d919d8c4b92e 100644 --- a/drivers/media/platform/msm/camera/cam_sync/cam_sync_api.h +++ b/drivers/media/platform/msm/camera/cam_sync/cam_sync_api.h @@ -100,6 +100,29 @@ int cam_sync_signal(int32_t sync_obj, uint32_t status); */ int cam_sync_merge(int32_t *sync_obj, uint32_t num_objs, int32_t *merged_obj); +/** + * @brief: get ref count of sync obj + * + * This function will increment ref count for the sync object, and the ref + * count will be decremented when this sync object is signaled. + * + * @param sync_obj: sync object + * + * @return Status of operation. Negative in case of error. Zero otherwise. + */ +int cam_sync_get_obj_ref(int32_t sync_obj); + +/** + * @brief: put ref count of sync obj + * + * This function will decrement ref count for the sync object. + * + * @param sync_obj: sync object + * + * @return Status of operation. Negative in case of error. Zero otherwise. + */ +int cam_sync_put_obj_ref(int32_t sync_obj); + /** * @brief: Destroys a sync object * diff --git a/drivers/media/platform/msm/camera/cam_sync/cam_sync_private.h b/drivers/media/platform/msm/camera/cam_sync/cam_sync_private.h index 38dab42a56cc65a132bcca314f8cce850800491d..eb2fb34fc33c1980e321f50cd7ad2404b3c0a0c0 100644 --- a/drivers/media/platform/msm/camera/cam_sync/cam_sync_private.h +++ b/drivers/media/platform/msm/camera/cam_sync/cam_sync_private.h @@ -139,6 +139,7 @@ struct sync_user_payload { * @signaled : Completion variable on which block calls will wait * @callback_list : Linked list of kernel callbacks registered * @user_payload_list : LInked list of user space payloads registered + * @ref_cnt : ref count of the number of usage of the fence. */ struct sync_table_row { char name[CAM_SYNC_OBJ_NAME_LEN]; @@ -153,6 +154,7 @@ struct sync_table_row { struct completion signaled; struct list_head callback_list; struct list_head user_payload_list; + atomic_t ref_cnt; }; /** diff --git a/drivers/media/platform/msm/camera/cam_sync/cam_sync_util.c b/drivers/media/platform/msm/camera/cam_sync/cam_sync_util.c index 49a9d2f39974d1b52a82a30db0559853a5aeac8b..0a059184311d21faf418f126193509d8eb05230a 100644 --- a/drivers/media/platform/msm/camera/cam_sync/cam_sync_util.c +++ b/drivers/media/platform/msm/camera/cam_sync/cam_sync_util.c @@ -49,6 +49,7 @@ int cam_sync_init_row(struct sync_table_row *table, row->sync_id = idx; row->state = CAM_SYNC_STATE_ACTIVE; row->remaining = 0; + atomic_set(&row->ref_cnt, 0); init_completion(&row->signaled); INIT_LIST_HEAD(&row->callback_list); INIT_LIST_HEAD(&row->user_payload_list); @@ -175,6 +176,12 @@ int cam_sync_deinit_object(struct sync_table_row *table, uint32_t idx) idx); return -EINVAL; } + + if (row->state == CAM_SYNC_STATE_ACTIVE) + CAM_WARN(CAM_SYNC, + "Destroying an active sync object name:%s id:%i", + row->name, row->sync_id); + row->state = CAM_SYNC_STATE_INVALID; /* Object's child and parent objects will be added into this list */ @@ -217,6 +224,11 @@ int cam_sync_deinit_object(struct sync_table_row *table, uint32_t idx) continue; } + if (child_row->state == CAM_SYNC_STATE_ACTIVE) + CAM_WARN(CAM_SYNC, + "Warning: destroying active child sync obj = %d", + child_info->sync_id); + cam_sync_util_cleanup_parents_list(child_row, SYNC_LIST_CLEAN_ONE, idx); @@ -241,6 +253,11 @@ int cam_sync_deinit_object(struct sync_table_row *table, uint32_t idx) continue; } + if (parent_row->state == CAM_SYNC_STATE_ACTIVE) + CAM_WARN(CAM_SYNC, + "Warning: destroying active parent sync obj = %d", + parent_info->sync_id); + cam_sync_util_cleanup_children_list(parent_row, SYNC_LIST_CLEAN_ONE, idx); diff --git a/drivers/media/platform/msm/camera/cam_utils/cam_common_util.c b/drivers/media/platform/msm/camera/cam_utils/cam_common_util.c index 043545c0069801514efab096a3a2020ca0d72dc6..bba12cf106dfbe9c1eafc5abc06bbc9ccf5817cf 100644 --- a/drivers/media/platform/msm/camera/cam_utils/cam_common_util.c +++ b/drivers/media/platform/msm/camera/cam_utils/cam_common_util.c @@ -33,3 +33,25 @@ int cam_common_util_get_string_index(const char **strings, return -EINVAL; } + +uint32_t cam_common_util_remove_duplicate_arr(int32_t *arr, uint32_t num) +{ + int i, j; + uint32_t wr_idx = 1; + + if (!arr) { + CAM_ERR(CAM_UTIL, "Null input array"); + return 0; + } + + for (i = 1; i < num; i++) { + for (j = 0; j < wr_idx ; j++) { + if (arr[i] == arr[j]) + break; + } + if (j == wr_idx) + arr[wr_idx++] = arr[i]; + } + + return wr_idx; +} diff --git a/drivers/media/platform/msm/camera/cam_utils/cam_common_util.h b/drivers/media/platform/msm/camera/cam_utils/cam_common_util.h index 337a26f4875afcb8a411eb4e43ac5541a6f90439..3e1281b625674de04c2cecc4a89574dda34603b7 100644 --- a/drivers/media/platform/msm/camera/cam_utils/cam_common_util.h +++ b/drivers/media/platform/msm/camera/cam_utils/cam_common_util.h @@ -32,4 +32,18 @@ int cam_common_util_get_string_index(const char **strings, uint32_t num_strings, char *matching_string, uint32_t *index); +/** + * cam_common_util_remove_duplicate_arr() + * + * @brief Move all the unique integers to the start of + * the array and return the number of unique integers + * + * @array: Pointer to the first integer of array + * @num: Number of elements in array + * + * @return: Number of unique integers in array + */ +uint32_t cam_common_util_remove_duplicate_arr(int32_t *array, + uint32_t num); + #endif /* _CAM_COMMON_UTIL_H_ */ diff --git a/drivers/media/platform/msm/camera/cam_utils/cam_debug_util.h b/drivers/media/platform/msm/camera/cam_utils/cam_debug_util.h index 1ed7056cd09f651bc1ba22fa92404fb1a98b6021..9093517de1e49f87803a16bebeaf9194e8d88243 100644 --- a/drivers/media/platform/msm/camera/cam_utils/cam_debug_util.h +++ b/drivers/media/platform/msm/camera/cam_utils/cam_debug_util.h @@ -106,9 +106,22 @@ const char *cam_get_module_name(unsigned int module_id); #define CAM_INFO(__module, fmt, args...) \ pr_info("CAM_INFO: %s: %s: %d " fmt "\n", \ cam_get_module_name(__module), __func__, __LINE__, ##args) + +/* + * CAM_INFO_RATE_LIMIT + * @brief : This Macro will print info logs with ratelimit + * + * @__module : Respective module id which is been calling this Macro + * @fmt : Formatted string which needs to be print in log + * @args : Arguments which needs to be print in log + */ +#define CAM_INFO_RATE_LIMIT(__module, fmt, args...) \ + pr_err_ratelimited("CAM_INFO: %s: %s: %d " fmt "\n", \ + cam_get_module_name(__module), __func__, __LINE__, ##args) + /* * CAM_DBG - * @brief : This Macro will print debug logs when enabled using GROUP + * @brief : This Macro will print debug logs when enabled using GROUP * * @__module : Respective module id which is been calling this Macro * @fmt : Formatted string which needs to be print in log @@ -119,7 +132,7 @@ const char *cam_get_module_name(unsigned int module_id); /* * CAM_ERR_RATE_LIMIT - * @brief : This Macro will prevent error print logs with ratelimit + * @brief : This Macro will print error print logs with ratelimit */ #define CAM_ERR_RATE_LIMIT(__module, fmt, args...) \ pr_err_ratelimited("CAM_ERR: %s: %s: %d " fmt "\n", \ diff --git a/drivers/media/platform/msm/npu/npu_common.h b/drivers/media/platform/msm/npu/npu_common.h index 63c4fc1c1493a842d6b321b01859be02b8cef417..83ed68654dec6149686f1b0fcff921a69f471775 100644 --- a/drivers/media/platform/msm/npu/npu_common.h +++ b/drivers/media/platform/msm/npu/npu_common.h @@ -166,6 +166,7 @@ struct npu_thermalctrl { struct npu_irq { char *name; int irq; + int irq_type; }; struct npu_device { @@ -239,6 +240,6 @@ int npu_set_uc_power_level(struct npu_device *npu_dev, uint32_t pwr_level); int fw_init(struct npu_device *npu_dev); -void fw_deinit(struct npu_device *npu_dev, bool fw_alive, bool ssr); +void fw_deinit(struct npu_device *npu_dev, bool ssr); #endif /* _NPU_COMMON_H */ diff --git a/drivers/media/platform/msm/npu/npu_debugfs.c b/drivers/media/platform/msm/npu/npu_debugfs.c index df58639df2637ba016d3159744e094c5aec02643..89d71f6a3f2148bd0190fe7b47ca93aea4b73be6 100644 --- a/drivers/media/platform/msm/npu/npu_debugfs.c +++ b/drivers/media/platform/msm/npu/npu_debugfs.c @@ -374,7 +374,7 @@ static ssize_t npu_debug_ctrl_write(struct file *file, pr_info("error in fw_init\n"); } else if (strcmp(buf, "off") == 0) { pr_info("triggering fw_deinit\n"); - fw_deinit(npu_dev, true, false); + fw_deinit(npu_dev, false); } else if (strcmp(buf, "ssr") == 0) { pr_info("trigger error irq\n"); if (npu_enable_core_power(npu_dev)) diff --git a/drivers/media/platform/msm/npu/npu_dev.c b/drivers/media/platform/msm/npu/npu_dev.c index 9e68687fe6286b6336a63422708a2d67269a7d31..43891c7fe938993b91c7100f75748373a04c6e47 100644 --- a/drivers/media/platform/msm/npu/npu_dev.c +++ b/drivers/media/platform/msm/npu/npu_dev.c @@ -166,9 +166,9 @@ static struct npu_reg npu_saved_bw_registers[] = { }; static const struct npu_irq npu_irq_info[NPU_MAX_IRQ] = { - {"ipc_irq", 0}, - {"error_irq", 0}, - {"wdg_bite_irq", 0}, + {"ipc_irq", 0, IRQF_TRIGGER_HIGH}, + {"error_irq", 0, IRQF_TRIGGER_RISING | IRQF_ONESHOT}, + {"wdg_bite_irq", 0, IRQF_TRIGGER_RISING | IRQF_ONESHOT}, }; /* ------------------------------------------------------------------------- @@ -933,6 +933,7 @@ static int npu_load_network(struct npu_client *client, unsigned long arg) { struct msm_npu_load_network_ioctl req; + struct msm_npu_unload_network_ioctl unload_req; void __user *argp = (void __user *)arg; int ret = 0; @@ -948,21 +949,24 @@ static int npu_load_network(struct npu_client *client, ret = npu_host_load_network(client, &req); if (ret) { pr_err("network load failed: %d\n", ret); - return -EFAULT; + return ret; } ret = copy_to_user(argp, &req, sizeof(req)); if (ret) { pr_err("fail to copy to user\n"); - return -EFAULT; + ret = -EFAULT; + unload_req.network_hdl = req.network_hdl; + npu_host_unload_network(client, &unload_req); } - return 0; + return ret; } static int npu_load_network_v2(struct npu_client *client, unsigned long arg) { struct msm_npu_load_network_ioctl_v2 req; + struct msm_npu_unload_network_ioctl unload_req; void __user *argp = (void __user *)arg; struct msm_npu_patch_info_v2 *patch_info = NULL; int ret; @@ -985,23 +989,34 @@ static int npu_load_network_v2(struct npu_client *client, if (!patch_info) return -ENOMEM; - copy_from_user(patch_info, + ret = copy_from_user(patch_info, (void __user *)req.patch_info, req.patch_info_num * sizeof(*patch_info)); + if (ret) { + pr_err("fail to copy patch info\n"); + kfree(patch_info); + return -EFAULT; + } } pr_debug("network load with perf request %d\n", req.perf_mode); ret = npu_host_load_network_v2(client, &req, patch_info); + + kfree(patch_info); if (ret) { pr_err("network load failed: %d\n", ret); - } else { - ret = copy_to_user(argp, &req, sizeof(req)); - if (ret) - pr_err("fail to copy to user\n"); + return ret; + } + + ret = copy_to_user(argp, &req, sizeof(req)); + if (ret) { + pr_err("fail to copy to user\n"); + ret = -EFAULT; + unload_req.network_hdl = req.network_hdl; + npu_host_unload_network(client, &unload_req); } - kfree(patch_info); return ret; } @@ -1105,21 +1120,30 @@ static int npu_exec_network_v2(struct npu_client *client, if (!patch_buf_info) return -ENOMEM; - copy_from_user(patch_buf_info, + ret = copy_from_user(patch_buf_info, (void __user *)req.patch_buf_info, req.patch_buf_info_num * sizeof(*patch_buf_info)); + if (ret) { + pr_err("fail to copy patch buf info\n"); + kfree(patch_buf_info); + return -EFAULT; + } } ret = npu_host_exec_network_v2(client, &req, patch_buf_info); + + kfree(patch_buf_info); if (ret) { pr_err("npu_host_exec_network failed\n"); - } else { - ret = copy_to_user(argp, &req, sizeof(req)); - if (ret) - pr_err("fail to copy to user\n"); + return ret; + } + + ret = copy_to_user(argp, &req, sizeof(req)); + if (ret) { + pr_err("fail to copy to user\n"); + ret = -EFAULT; } - kfree(patch_buf_info); return ret; } @@ -1470,7 +1494,7 @@ static int npu_irq_init(struct npu_device *npu_dev) memcpy(npu_dev->irq, npu_irq_info, sizeof(npu_irq_info)); for (i = 0; i < NPU_MAX_IRQ; i++) { - irq_type = IRQF_TRIGGER_RISING | IRQF_ONESHOT; + irq_type = npu_irq_info[i].irq_type; npu_dev->irq[i].irq = platform_get_irq_byname( npu_dev->pdev, npu_dev->irq[i].name); if (npu_dev->irq[i].irq < 0) { diff --git a/drivers/media/platform/msm/npu/npu_firmware.h b/drivers/media/platform/msm/npu/npu_firmware.h index 949dad10003603b77147d300dc9026d984b3c753..3d63213bf13e7f06e1e4ba88b0c27524b8065ac5 100644 --- a/drivers/media/platform/msm/npu/npu_firmware.h +++ b/drivers/media/platform/msm/npu/npu_firmware.h @@ -31,8 +31,10 @@ #define REG_NPU_HOST_CTRL_VALUE NPU_GPR2 /* Simulates an interrupt for FW->HOST, used for pre-silicon */ #define REG_FW_TO_HOST_EVENT NPU_GPR3 +/* Read/Written by both host and dsp for sync between driver and dsp */ +#define REG_HOST_DSP_CTRL_STATUS NPU_GPR4 /* Data value for debug */ -#define REG_NPU_FW_DEBUG_DATA NPU_GPR4 +#define REG_NPU_FW_DEBUG_DATA NPU_GPR13 /* Started job count */ #define REG_FW_JOB_CNT_START NPU_GPR14 @@ -87,6 +89,30 @@ #define HOST_CTRL_STATUS_FW_PAUSE_VAL \ (1 << HOST_CTRL_STATUS_FW_PAUSE) + +/* NPU HOST DSP Control/Status Register */ +/* notification of power up */ +/* following bits are set by host and read by dsp */ +#define HOST_DSP_CTRL_STATUS_PWR_UP_BIT 0 +/* notification of power dwn */ +#define HOST_DSP_CTRL_STATUS_PWR_DWN_BIT 1 +/* following bits are set by dsp and read by host */ +/* notification of power up acknowlegement*/ +#define HOST_DSP_CTRL_STATUS_PWR_UP_ACK_BIT 4 +/* notification of power down acknowlegement*/ +#define HOST_DSP_CTRL_STATUS_PWR_DWN_ACK_BIT 5 + + +/* 32 bit values of the bit fields above */ +#define HOST_DSP_CTRL_STATUS_PWR_UP_VAL \ + (1 << HOST_DSP_CTRL_STATUS_PWR_UP_BIT) +#define HOST_DSP_CTRL_STATUS_PWR_DWN_VAL \ + (1 << HOST_DSP_CTRL_STATUS_PWR_DWN_BIT) +#define HOST_DSP_CTRL_STATUS_PWR_UP_ACK_VAL \ + (1 << HOST_DSP_CTRL_STATUS_PWR_UP_ACK_BIT) +#define HOST_DSP_CTRL_STATUS_PWR_DWN_ACK_VAL \ + (1 << HOST_DSP_CTRL_STATUS_PWR_DWN_ACK_BIT) + /* Queue table header definition */ struct hfi_queue_tbl_header { uint32_t qtbl_version; /* queue table version number */ diff --git a/drivers/media/platform/msm/npu/npu_hw.h b/drivers/media/platform/msm/npu/npu_hw.h index 3e71c53187c056dfd32aec499b2ff211b2ce5bd9..1c61849a6cb89965c93748b93b631860d961ddd1 100644 --- a/drivers/media/platform/msm/npu/npu_hw.h +++ b/drivers/media/platform/msm/npu/npu_hw.h @@ -23,6 +23,8 @@ #define NPU_CACHE_ATTR_IDn(n) (0x00100800+0x4*(n)) #define NPU_MASTERn_IPC_IRQ_IN_CTRL(n) (0x00101008+0x1000*(n)) #define NPU_MASTER0_IPC_IRQ_IN_CTRL__IRQ_SOURCE_SELECT___S 4 +#define NPU_MASTERn_IPC_IRQ_OUT_CTRL(n) (0x00101004+0x1000*(n)) +#define NPU_MASTER0_IPC_IRQ_OUT_CTRL__IRQ_TYPE_PULSE 4 #define NPU_GPR0 (0x00100100) #define NPU_MASTERn_ERROR_IRQ_STATUS(n) (0x00101010+0x1000*(n)) #define NPU_MASTERn_ERROR_IRQ_INCLUDE(n) (0x00101014+0x1000*(n)) @@ -40,6 +42,7 @@ #define NPU_GPR2 (0x00100108) #define NPU_GPR3 (0x0010010C) #define NPU_GPR4 (0x00100110) +#define NPU_GPR13 (0x00100134) #define NPU_GPR14 (0x00100138) #define NPU_GPR15 (0x0010013C) diff --git a/drivers/media/platform/msm/npu/npu_hw_access.c b/drivers/media/platform/msm/npu/npu_hw_access.c index c233bd964161b06057866b7dc483ba2ef6343557..169b2e349322a3f5db2b080c4de495fbfb2bbdc0 100644 --- a/drivers/media/platform/msm/npu/npu_hw_access.c +++ b/drivers/media/platform/msm/npu/npu_hw_access.c @@ -147,6 +147,13 @@ int32_t npu_interrupt_raise_m0(struct npu_device *npu_dev) return ret; } +int32_t npu_interrupt_raise_dsp(struct npu_device *npu_dev) +{ + npu_reg_write(npu_dev, NPU_MASTERn_IPC_IRQ_OUT_CTRL(1), 0x8); + + return 0; +} + /* ------------------------------------------------------------------------- * Functions - ION Memory * ------------------------------------------------------------------------- @@ -171,7 +178,7 @@ static struct npu_ion_buf *npu_alloc_npu_ion_buffer(struct npu_client pr_err("ion buf %x has been mapped\n"); ret_val = NULL; } else { - ret_val = kmalloc(sizeof(struct npu_ion_buf), GFP_KERNEL); + ret_val = kzalloc(sizeof(*ret_val), GFP_KERNEL); if (ret_val) { ret_val->fd = buf_hdl; ret_val->size = size; diff --git a/drivers/media/platform/msm/npu/npu_hw_access.h b/drivers/media/platform/msm/npu/npu_hw_access.h index 96dc48af3e482a27b590e18c8d376f1d8b322667..c2fb3c081dfb6d1bc5dd480e1a39797af718c4d4 100644 --- a/drivers/media/platform/msm/npu/npu_hw_access.h +++ b/drivers/media/platform/msm/npu/npu_hw_access.h @@ -35,6 +35,7 @@ #define IPC_ADDR npu_ipc_addr() #define INTERRUPT_ACK(npu_dev, num) npu_interrupt_ack(npu_dev, num) #define INTERRUPT_RAISE_NPU(npu_dev) npu_interrupt_raise_m0(npu_dev) +#define INTERRUPT_RAISE_DSP(npu_dev) npu_interrupt_raise_dsp(npu_dev) /* ------------------------------------------------------------------------- * Data Structures @@ -67,6 +68,7 @@ bool npu_mem_verify_addr(struct npu_client *client, uint64_t addr); void *npu_ipc_addr(void); void npu_interrupt_ack(struct npu_device *npu_dev, uint32_t intr_num); int32_t npu_interrupt_raise_m0(struct npu_device *npu_dev); +int32_t npu_interrupt_raise_dsp(struct npu_device *npu_dev); struct workqueue_struct *npu_create_wq(struct npu_host_ctx *host_ctx, const char *name, wq_hdlr_fn hdlr, struct work_struct *irq_work); diff --git a/drivers/media/platform/msm/npu/npu_mgr.c b/drivers/media/platform/msm/npu/npu_mgr.c index 6315156a4b2b29d3d6e98160370974b54863af58..55a1315554fcd277d24746a80f664d9e7c486fbf 100644 --- a/drivers/media/platform/msm/npu/npu_mgr.c +++ b/drivers/media/platform/msm/npu/npu_mgr.c @@ -41,7 +41,8 @@ */ static void host_irq_wq(struct work_struct *work); static void turn_off_fw_logging(struct npu_device *npu_dev); -static int wait_for_fw_ready(struct npu_device *npu_dev, uint32_t status_bits); +static int wait_for_status_ready(struct npu_device *npu_dev, + uint32_t status_reg, uint32_t status_bits); static struct npu_network *alloc_network(struct npu_host_ctx *ctx, struct npu_client *client); static struct npu_network *get_network_by_hdl(struct npu_host_ctx *ctx, @@ -59,6 +60,7 @@ static int npu_send_network_cmd(struct npu_device *npu_dev, static int npu_send_misc_cmd(struct npu_device *npu_dev, uint32_t q_idx, void *cmd_ptr); static int npu_queue_event(struct npu_client *client, struct npu_kevent *evt); +static int npu_notify_dsp(struct npu_device *npu_dev, bool pwr_up); /* ------------------------------------------------------------------------- * Function Definitions - Init / Deinit @@ -132,7 +134,7 @@ int fw_init(struct npu_device *npu_dev) /* Keep reading ctrl status until NPU is ready */ pr_debug("waiting for status ready from fw\n"); - if (wait_for_fw_ready(npu_dev, + if (wait_for_status_ready(npu_dev, REG_NPU_FW_CTRL_STATUS, FW_CTRL_STATUS_MAIN_THREAD_READY_BIT)) { ret = -EPERM; goto wait_fw_ready_fail; @@ -151,6 +153,8 @@ int fw_init(struct npu_device *npu_dev) mutex_unlock(&host_ctx->lock); pr_debug("firmware init complete\n"); + npu_notify_dsp(npu_dev, true); + /* Set logging state */ if (!npu_hw_log_enabled()) { pr_debug("fw logging disabled\n"); @@ -173,7 +177,7 @@ int fw_init(struct npu_device *npu_dev) return ret; } -void fw_deinit(struct npu_device *npu_dev, bool fw_alive, bool ssr) +void fw_deinit(struct npu_device *npu_dev, bool ssr) { struct npu_host_ctx *host_ctx = &npu_dev->host_ctx; struct ipc_cmd_shutdown_pkt cmd_shutdown_pkt; @@ -199,7 +203,7 @@ void fw_deinit(struct npu_device *npu_dev, bool fw_alive, bool ssr) npu_disable_irq(npu_dev); - if (fw_alive) { + if (!ssr) { /* Command header */ cmd_shutdown_pkt.header.cmd_type = NPU_IPC_CMD_SHUTDOWN; cmd_shutdown_pkt.header.size = @@ -217,7 +221,8 @@ void fw_deinit(struct npu_device *npu_dev, bool fw_alive, bool ssr) } else { /* Keep reading ctrl status until NPU shuts down */ pr_debug("waiting for shutdown status from fw\n"); - if (wait_for_fw_ready(npu_dev, + if (wait_for_status_ready(npu_dev, + REG_NPU_FW_CTRL_STATUS, FW_CTRL_STATUS_SHUTDOWN_DONE_VAL)) { pr_err("wait for fw shutdown timedout\n"); ret = -ETIMEDOUT; @@ -225,6 +230,8 @@ void fw_deinit(struct npu_device *npu_dev, bool fw_alive, bool ssr) } } + npu_notify_dsp(npu_dev, false); + npu_disable_post_pil_clocks(npu_dev); npu_disable_sys_cache(npu_dev); subsystem_put_local(host_ctx->subsystem_handle); @@ -299,19 +306,16 @@ static int host_error_hdlr(struct npu_device *npu_dev) { struct npu_host_ctx *host_ctx = &npu_dev->host_ctx; struct npu_network *network = NULL; - bool fw_alive = true; struct npu_kevent kevt; int i; if ((host_ctx->wdg_irq_sts == 0) && (host_ctx->err_irq_sts == 0)) return 0; - if (host_ctx->wdg_irq_sts) { + if (host_ctx->wdg_irq_sts) pr_info("watchdog irq triggered\n"); - fw_alive = false; - } - fw_deinit(npu_dev, fw_alive, true); + fw_deinit(npu_dev, true); host_ctx->wdg_irq_sts = 0; host_ctx->err_irq_sts = 0; @@ -375,7 +379,8 @@ static void turn_off_fw_logging(struct npu_device *npu_dev) pr_err("npu_host_ipc_send_cmd failed\n"); } -static int wait_for_fw_ready(struct npu_device *npu_dev, uint32_t status_bits) +static int wait_for_status_ready(struct npu_device *npu_dev, + uint32_t status_reg, uint32_t status_bits) { uint32_t ctrl_sts = 0; uint32_t wait_cnt = 0, max_wait_ms; @@ -384,9 +389,9 @@ static int wait_for_fw_ready(struct npu_device *npu_dev, uint32_t status_bits) max_wait_ms = (host_ctx->fw_dbg_mode & FW_DBG_MODE_INC_TIMEOUT) ? NW_DEBUG_TIMEOUT_MS : NPU_FW_TIMEOUT_MS; - /* keep reading ctrl status until NPU is ready */ + /* keep reading status register until bits are set */ while ((ctrl_sts & status_bits) != status_bits) { - ctrl_sts = REGR(npu_dev, REG_NPU_FW_CTRL_STATUS); + ctrl_sts = REGR(npu_dev, status_reg); msleep(NPU_FW_TIMEOUT_POLL_INTERVAL_MS); wait_cnt += NPU_FW_TIMEOUT_POLL_INTERVAL_MS; if (wait_cnt >= max_wait_ms) { @@ -395,10 +400,38 @@ static int wait_for_fw_ready(struct npu_device *npu_dev, uint32_t status_bits) return -EPERM; } } - pr_debug("status %x ready from fw received\n", status_bits); + pr_debug("status %x[reg %x] ready received\n", status_bits, status_reg); return 0; } +static int npu_notify_dsp(struct npu_device *npu_dev, bool pwr_up) +{ + uint32_t ack_val, notify_val; + int ret = 0; + + if (pwr_up) { + notify_val = HOST_DSP_CTRL_STATUS_PWR_UP_VAL; + ack_val = HOST_DSP_CTRL_STATUS_PWR_UP_ACK_VAL; + } else { + notify_val = HOST_DSP_CTRL_STATUS_PWR_DWN_VAL; + ack_val = HOST_DSP_CTRL_STATUS_PWR_DWN_ACK_VAL; + } + + REGW(npu_dev, REG_HOST_DSP_CTRL_STATUS, + notify_val); + /* Read back to flush register for dsp to read */ + REGR(npu_dev, REG_HOST_DSP_CTRL_STATUS); + + INTERRUPT_RAISE_DSP(npu_dev); + + ret = wait_for_status_ready(npu_dev, REG_HOST_DSP_CTRL_STATUS, + ack_val); + if (ret) + pr_warn("No response from dsp\n"); + + return ret; +} + /* ------------------------------------------------------------------------- * Function Definitions - Network Management * ------------------------------------------------------------------------- @@ -539,7 +572,9 @@ static void app_msg_proc(struct npu_host_ctx *host_ctx, uint32_t *msg) exe_rsp_pkt->network_hdl); break; } + network->cmd_pending = false; + network->cmd_ret_status = exe_rsp_pkt->header.status; if (!network->cmd_async) { complete(&network->cmd_done); @@ -586,6 +621,8 @@ static void app_msg_proc(struct npu_host_ctx *host_ctx, uint32_t *msg) network->stats_buf_size = stats_size; network->cmd_pending = false; + network->cmd_ret_status = exe_rsp_pkt->header.status; + if (network->cmd_async) { pr_debug("async cmd, queue event\n"); kevt.evt.type = MSM_NPU_EVENT_TYPE_EXEC_V2_DONE; @@ -626,6 +663,8 @@ static void app_msg_proc(struct npu_host_ctx *host_ctx, uint32_t *msg) } network->network_hdl = load_rsp_pkt->network_hdl; network->cmd_pending = false; + network->cmd_ret_status = load_rsp_pkt->header.status; + complete(&network->cmd_done); break; } @@ -647,6 +686,8 @@ static void app_msg_proc(struct npu_host_ctx *host_ctx, uint32_t *msg) } network->cmd_pending = false; + network->cmd_ret_status = unload_rsp_pkt->header.status; + complete(&network->cmd_done); break; } @@ -763,10 +804,12 @@ static int npu_send_network_cmd(struct npu_device *npu_dev, ((struct ipc_cmd_header_pkt *)cmd_ptr)->cmd_type, network->id); network->cmd_async = async; + network->cmd_ret_status = 0; + network->cmd_pending = true; ret = npu_host_ipc_send_cmd(npu_dev, IPC_QUEUE_APPS_EXEC, cmd_ptr); - if (!ret) - network->cmd_pending = true; + if (ret) + network->cmd_pending = false; } mutex_unlock(&host_ctx->lock); @@ -927,6 +970,10 @@ int32_t npu_host_load_network(struct npu_client *client, goto error_free_network; } + ret = network->cmd_ret_status; + if (ret) + goto error_free_network; + load_ioctl->network_hdl = network->network_hdl; return ret; @@ -934,7 +981,7 @@ int32_t npu_host_load_network(struct npu_client *client, error_free_network: free_network(host_ctx, network->id); err_deinit_fw: - fw_deinit(npu_dev, true, false); + fw_deinit(npu_dev, false); return ret; } @@ -1030,6 +1077,10 @@ int32_t npu_host_load_network_v2(struct npu_client *client, goto error_free_network; } + ret = network->cmd_ret_status; + if (ret) + goto error_free_network; + load_ioctl->network_hdl = network->network_hdl; return ret; @@ -1038,7 +1089,7 @@ int32_t npu_host_load_network_v2(struct npu_client *client, kfree(load_packet); free_network(host_ctx, network->id); err_deinit_fw: - fw_deinit(npu_dev, true, false); + fw_deinit(npu_dev, false); return ret; } @@ -1092,7 +1143,7 @@ int32_t npu_host_unload_network(struct npu_client *client, * handle is unloaded on the firmware side */ free_network(host_ctx, network->id); - fw_deinit(npu_dev, true, false); + fw_deinit(npu_dev, false); return ret; } @@ -1164,10 +1215,16 @@ int32_t npu_host_exec_network(struct npu_client *client, pr_err_ratelimited("npu: NPU_IPC_CMD_EXECUTE time out\n"); /* dump debug stats */ npu_dump_debug_timeout_stats(npu_dev); + network->cmd_pending = false; + + /* treat execution timed out as ssr */ + fw_deinit(npu_dev, true); ret = -ETIMEDOUT; } else if (network->fw_error) { ret = -EIO; pr_err("execute cmd returns with error\n"); + } else { + ret = network->cmd_ret_status; } return ret; @@ -1250,16 +1307,24 @@ int32_t npu_host_exec_network_v2(struct npu_client *client, pr_err_ratelimited("npu: NPU_IPC_CMD_EXECUTE_V2 time out\n"); /* dump debug stats */ npu_dump_debug_timeout_stats(npu_dev); + network->cmd_pending = false; + /* treat execution timed out as ssr */ + fw_deinit(npu_dev, true); ret = -ETIMEDOUT; } else if (network->fw_error) { ret = -EIO; pr_err("execute cmd returns with error\n"); } else { - exec_ioctl->stats_buf_size = network->stats_buf_size; - if (copy_to_user((void __user *)exec_ioctl->stats_buf_addr, - network->stats_buf, exec_ioctl->stats_buf_size)) { - pr_err("copy stats to user failed\n"); - exec_ioctl->stats_buf_size = 0; + ret = network->cmd_ret_status; + if (!ret) { + exec_ioctl->stats_buf_size = network->stats_buf_size; + if (copy_to_user( + (void __user *)exec_ioctl->stats_buf_addr, + network->stats_buf, + exec_ioctl->stats_buf_size)) { + pr_err("copy stats to user failed\n"); + exec_ioctl->stats_buf_size = 0; + } } } @@ -1297,7 +1362,7 @@ int32_t npu_host_loopback_test(struct npu_device *npu_dev) ret = -ETIMEDOUT; } - fw_deinit(npu_dev, true, false); + fw_deinit(npu_dev, false); return ret; } diff --git a/drivers/media/platform/msm/npu/npu_mgr.h b/drivers/media/platform/msm/npu/npu_mgr.h index 0a49893d69dcb5922c3bdd6b7d65236464923fca..20116957e8bf6775d809f1a4ea7fd6c96a63a72a 100644 --- a/drivers/media/platform/msm/npu/npu_mgr.h +++ b/drivers/media/platform/msm/npu/npu_mgr.h @@ -57,6 +57,7 @@ struct npu_network { bool fw_error; bool cmd_pending; bool cmd_async; + int cmd_ret_status; struct completion cmd_done; struct npu_client *client; }; diff --git a/drivers/media/platform/msm/vidc/governors/msm_vidc_dyn_gov.c b/drivers/media/platform/msm/vidc/governors/msm_vidc_dyn_gov.c index 611c52cfe9d0d7c17dc8d826919ded5c39339acd..3f63c7cbfcd4759de924b7e49f6d6fd338625216 100644 --- a/drivers/media/platform/msm/vidc/governors/msm_vidc_dyn_gov.c +++ b/drivers/media/platform/msm/vidc/governors/msm_vidc_dyn_gov.c @@ -352,13 +352,14 @@ static unsigned long __calculate_decoder(struct vidc_bus_vote_data *d, */ /* Decoder parameters */ int width, height, lcu_size, fps, dpb_bpp; - bool unified_dpb_opb, dpb_compression_enabled, + bool unified_dpb_opb, dpb_compression_enabled = true, opb_compression_enabled = false, llc_ref_read_l2_cache_enabled = false, llc_top_line_buf_enabled = false; fp_t dpb_read_compression_factor, dpb_opb_scaling_ratio, dpb_write_compression_factor, opb_write_compression_factor, qsmmu_bw_overhead_factor; + bool is_h264_category = true; /* Derived parameters */ int lcu_per_frame, collocated_bytes_per_lcu, tnbr_per_lcu; @@ -423,18 +424,19 @@ static unsigned long __calculate_decoder(struct vidc_bus_vote_data *d, motion_vector_complexity = FP(integer_part, frac_part, 100); - dpb_write_compression_factor = !dpb_compression_enabled ? FP_ONE : - __compression_ratio(__lut(width, height, fps), dpb_bpp); - - dpb_write_compression_factor = d->use_dpb_read ? - dpb_read_compression_factor : - dpb_write_compression_factor; + dpb_write_compression_factor = dpb_read_compression_factor; opb_write_compression_factor = opb_compression_enabled ? dpb_write_compression_factor : FP_ONE; + if (d->codec == HAL_VIDEO_CODEC_HEVC || + d->codec == HAL_VIDEO_CODEC_VP9) { + /* H264, VP8, MPEG2 use the same settings */ + /* HEVC, VP9 use the same setting */ + is_h264_category = false; + } if (d->use_sys_cache) { llc_ref_read_l2_cache_enabled = true; - if (d->codec == HAL_VIDEO_CODEC_H264) + if (is_h264_category) llc_top_line_buf_enabled = true; } @@ -464,16 +466,15 @@ static unsigned long __calculate_decoder(struct vidc_bus_vote_data *d, ddr.vsp_write = fp_div(fp_mult(FP_INT(bitrate), vsp_write_factor), FP_INT(8)); - ddr.collocated_read = FP_INT(lcu_per_frame * - collocated_bytes_per_lcu * fps / bps(1)); + ddr.collocated_read = fp_div(FP_INT(lcu_per_frame * + collocated_bytes_per_lcu * fps), FP_INT(bps(1))); ddr.collocated_write = ddr.collocated_read; - y_bw_no_ubwc_8bpp = fp_div(fp_div(fp_mult( - FP_INT((int)(width * height)), FP_INT((int)(256 * fps))), - FP_INT(32 * 8)), FP_INT(1000 * 1000)); - y_bw_no_ubwc_10bpp = fp_div(fp_div(fp_mult( - FP_INT((int)(width * height)), FP_INT((int)(256 * fps))), - FP_INT(48 * 4)), FP_INT(1000 * 1000)); + y_bw_no_ubwc_8bpp = fp_div(fp_mult( + FP_INT((int)(width * height)), FP_INT((int)fps)), + FP_INT(1000 * 1000)); + y_bw_no_ubwc_10bpp = fp_div(fp_mult(y_bw_no_ubwc_8bpp, FP_INT(256)), + FP_INT(192)); ddr.dpb_read = dpb_bpp == 8 ? y_bw_no_ubwc_8bpp : y_bw_no_ubwc_10bpp; ddr.dpb_read = fp_div(fp_mult(ddr.dpb_read, @@ -488,16 +489,15 @@ static unsigned long __calculate_decoder(struct vidc_bus_vote_data *d, dpb_total = ddr.dpb_read + ddr.dpb_write; if (llc_ref_read_l2_cache_enabled) { - ddr.dpb_read = fp_div(ddr.dpb_read, - d->codec == HAL_VIDEO_CODEC_H264 ? FP(1, 15, 100) : - FP(1, 30, 100)); - llc.dpb_read = dpb_total - ddr.dpb_read; + ddr.dpb_read = fp_div(ddr.dpb_read, is_h264_category ? + FP(1, 15, 100) : FP(1, 30, 100)); + llc.dpb_read = dpb_total - ddr.dpb_write - ddr.dpb_read; } ddr.opb_read = FP_ZERO; ddr.opb_write = unified_dpb_opb ? FP_ZERO : (dpb_bpp == 8 ? y_bw_no_ubwc_8bpp : y_bw_no_ubwc_10bpp); - ddr.opb_write = fp_div(ddr.opb_write, + ddr.opb_write = fp_div(fp_mult(dpb_factor, ddr.opb_write), fp_mult(dpb_opb_scaling_ratio, opb_write_compression_factor)); ddr.line_buffer_read = FP_INT(tnbr_per_lcu * @@ -518,7 +518,8 @@ static unsigned long __calculate_decoder(struct vidc_bus_vote_data *d, qsmmu_bw_overhead_factor = FP(1, 3, 100); ddr.total = fp_mult(ddr.total, qsmmu_bw_overhead_factor); - llc.total = llc.dpb_read + llc.line_buffer_read + llc.line_buffer_write; + llc.total = llc.dpb_read + llc.line_buffer_read + + llc.line_buffer_write + ddr.total; /* Dump all the variables for easier debugging */ if (debug) { @@ -606,8 +607,7 @@ static unsigned long __calculate_encoder(struct vidc_bus_vote_data *d, /* Encoder Parameters */ int width, height, fps, lcu_size, bitrate, lcu_per_frame, collocated_bytes_per_lcu, tnbr_per_lcu, dpb_bpp, - original_color_format, dpb_ubwc_tile_width, - dpb_ubwc_tile_height, vertical_tile_width; + original_color_format, vertical_tile_width; bool work_mode_1, original_compression_enabled, low_power, rotation, cropping_or_scaling, b_frames_enabled = false, @@ -617,6 +617,7 @@ static unsigned long __calculate_encoder(struct vidc_bus_vote_data *d, fp_t bins_to_bit_factor, dpb_compression_factor, original_compression_factor, + original_compression_factor_y, y_bw_no_ubwc_8bpp, y_bw_no_ubwc_10bpp, input_compression_factor, ref_y_read_bw_factor, ref_cbcr_read_bw_factor, @@ -653,19 +654,18 @@ static unsigned long __calculate_encoder(struct vidc_bus_vote_data *d, fps = d->fps; width = max(d->input_width, BASELINE_DIMENSIONS.width); height = max(d->input_height, BASELINE_DIMENSIONS.height); - bitrate = __lut(width, height, fps)->bitrate; + bitrate = d->bitrate > 0 ? d->bitrate / 1000000 : + __lut(width, height, fps)->bitrate; lcu_size = d->lcu_size; lcu_per_frame = DIV_ROUND_UP(width, lcu_size) * DIV_ROUND_UP(height, lcu_size); - tnbr_per_lcu = lcu_size == 16 ? 128 : - lcu_size == 32 ? 64 : 128; + tnbr_per_lcu = 16; - y_bw_no_ubwc_8bpp = fp_div(fp_div(fp_mult( - FP_INT((int)(width * height)), FP_INT((int)(256 * fps))), - FP_INT(32 * 8)), FP_INT(1000 * 1000)); - y_bw_no_ubwc_10bpp = fp_div(fp_div(fp_mult( - FP_INT((int)(width * height)), FP_INT((int)(256 * fps))), - FP_INT(48 * 4)), FP_INT(1000 * 1000)); + y_bw_no_ubwc_8bpp = fp_div(fp_mult( + FP_INT((int)(width * height)), FP_INT(fps)), + FP_INT(1000 * 1000)); + y_bw_no_ubwc_10bpp = fp_div(fp_mult(y_bw_no_ubwc_8bpp, + FP_INT(256)), FP_INT(192)); b_frames_enabled = d->b_frames_enabled; original_color_format = d->num_formats >= 1 ? @@ -679,8 +679,6 @@ static unsigned long __calculate_encoder(struct vidc_bus_vote_data *d, low_power = d->power_mode == VIDC_POWER_LOW; bins_to_bit_factor = work_mode_1 ? FP_INT(0) : FP_INT(4); - dpb_ubwc_tile_width = dpb_bpp == 8 ? 32 : 48; - dpb_ubwc_tile_height = dpb_bpp == 8 ? 8 : 4; if (d->use_sys_cache) { llc_ref_chroma_cache_enabled = true; @@ -709,13 +707,24 @@ static unsigned long __calculate_encoder(struct vidc_bus_vote_data *d, input_compression_factor = FP(integer_part, frac_part, 100); - original_compression_factor = - original_compression_enabled ? d->use_dpb_read ? - dpb_compression_factor : input_compression_factor : - FP_ONE; + original_compression_factor = original_compression_factor_y = + !original_compression_enabled ? FP_ONE : + __compression_ratio(__lut(width, height, fps), dpb_bpp); + /* use input cr if it is valid (not 1), otherwise use lut */ + if (original_compression_enabled && + input_compression_factor != FP_ONE) { + original_compression_factor = input_compression_factor; + /* Luma usually has lower compression factor than Chroma, + * input cf is overall cf, add 1.08 factor for Luma cf + */ + original_compression_factor_y = + input_compression_factor > FP(1, 8, 100) ? + fp_div(input_compression_factor, FP(1, 8, 100)) : + input_compression_factor; + } mese_read_factor = fp_div(FP_INT((width * height * fps)/4), - original_compression_factor); + original_compression_factor_y); mese_read_factor = fp_div(fp_mult(mese_read_factor, FP(2, 53, 100)), FP_INT(1000 * 1000)); @@ -726,8 +735,8 @@ static unsigned long __calculate_encoder(struct vidc_bus_vote_data *d, collocated_bytes_per_lcu = lcu_size == 16 ? 16 : lcu_size == 32 ? 64 : 256; - ddr.collocated_read = FP_INT(lcu_per_frame * - collocated_bytes_per_lcu * fps / bps(1)); + ddr.collocated_read = fp_div(FP_INT(lcu_per_frame * + collocated_bytes_per_lcu * fps), FP_INT(bps(1))); ddr.collocated_write = ddr.collocated_read; @@ -765,7 +774,7 @@ static unsigned long __calculate_encoder(struct vidc_bus_vote_data *d, ddr.orig_read = dpb_bpp == 8 ? y_bw_no_ubwc_8bpp : y_bw_no_ubwc_10bpp; ddr.orig_read = fp_div(fp_mult(ddr.orig_read, FP(1, 50, 100)), - input_compression_factor); + original_compression_factor); ddr.line_buffer_read = FP_INT(tnbr_per_lcu * lcu_per_frame * fps / bps(1)); @@ -777,13 +786,12 @@ static unsigned long __calculate_encoder(struct vidc_bus_vote_data *d, } ddr.mese_read = dpb_bpp == 8 ? y_bw_no_ubwc_8bpp : y_bw_no_ubwc_10bpp; - ddr.mese_read = fp_div(ddr.mese_read, - fp_mult(FP(1, 37, 100), original_compression_factor)) + - mese_read_factor; + ddr.mese_read = fp_div(fp_mult(ddr.mese_read, FP(1, 37, 100)), + original_compression_factor_y) + mese_read_factor; ddr.mese_write = FP_INT((width * height)/512) + fp_div(FP_INT((width * height)/4), - original_compression_factor) + + original_compression_factor_y) + FP_INT((width * height)/128); ddr.mese_write = fp_div(fp_mult(ddr.mese_write, FP_INT(fps)), FP_INT(1000 * 1000)); @@ -796,10 +804,9 @@ static unsigned long __calculate_encoder(struct vidc_bus_vote_data *d, ddr.line_buffer_read + ddr.line_buffer_write + ddr.mese_read + ddr.mese_write; - llc.total = llc.ref_read_crcb + llc.line_buffer; - qsmmu_bw_overhead_factor = FP(1, 3, 100); ddr.total = fp_mult(ddr.total, qsmmu_bw_overhead_factor); + llc.total = llc.ref_read_crcb + llc.line_buffer + ddr.total; if (debug) { struct dump dump[] = { @@ -811,6 +818,7 @@ static unsigned long __calculate_encoder(struct vidc_bus_vote_data *d, {"cropping or scaling", "%d", cropping_or_scaling}, {"low power mode", "%d", low_power}, {"work Mode", "%d", work_mode_1}, + {"B frame enabled", "%d", b_frames_enabled}, {"original frame format", "%#x", original_color_format}, {"original compression enabled", "%d", original_compression_enabled}, @@ -831,6 +839,8 @@ static unsigned long __calculate_encoder(struct vidc_bus_vote_data *d, {"bins to bit factor", DUMP_FP_FMT, bins_to_bit_factor}, {"original compression factor", DUMP_FP_FMT, original_compression_factor}, + {"original compression factor y", DUMP_FP_FMT, + original_compression_factor_y}, {"mese read factor", DUMP_FP_FMT, mese_read_factor}, {"qsmmu_bw_overhead_factor", diff --git a/drivers/media/platform/msm/vidc/hfi_packetization.c b/drivers/media/platform/msm/vidc/hfi_packetization.c index f1bfeb316d944d3e5a599e71984637204fa06ac7..a3387ed4d190f7235ac15daeef30fde41a530fc6 100644 --- a/drivers/media/platform/msm/vidc/hfi_packetization.c +++ b/drivers/media/platform/msm/vidc/hfi_packetization.c @@ -54,6 +54,7 @@ static int color_format[] = { HFI_COLOR_FORMAT_YUV420_TP10_UBWC, /*P010 10bit format*/ [ilog2(HAL_COLOR_FORMAT_P010)] = HFI_COLOR_FORMAT_P010, + [ilog2(HAL_COLOR_FORMAT_NV12_512)] = HFI_COLOR_FORMAT_NV12, }; static int nal_type[] = { diff --git a/drivers/media/platform/msm/vidc/msm_cvp.c b/drivers/media/platform/msm/vidc/msm_cvp.c index d1705b0c1af43a128fd311786420a2b39e09d29a..768efeb26471663629f239346f8b759fc9a28cfe 100644 --- a/drivers/media/platform/msm/vidc/msm_cvp.c +++ b/drivers/media/platform/msm/vidc/msm_cvp.c @@ -304,8 +304,9 @@ static int msm_cvp_request_power(struct msm_vidc_inst *inst, inst->clk_data.min_freq = max(power->clock_cycles_a, power->clock_cycles_b); - inst->clk_data.ddr_bw = power->ddr_bw; - inst->clk_data.sys_cache_bw = power->sys_cache_bw; + /* convert client provided bps into kbps as expected by driver */ + inst->clk_data.ddr_bw = power->ddr_bw / 1000; + inst->clk_data.sys_cache_bw = power->sys_cache_bw / 1000; rc = msm_cvp_scale_clocks_and_bus(inst); if (rc) { dprintk(VIDC_ERR, diff --git a/drivers/media/platform/msm/vidc/msm_v4l2_vidc.c b/drivers/media/platform/msm/vidc/msm_v4l2_vidc.c index 376f204815cc27393b3b0b18b751ef9b66c47fe6..e25689f79f360418f3d9d52dcee525de83d22e5c 100644 --- a/drivers/media/platform/msm/vidc/msm_v4l2_vidc.c +++ b/drivers/media/platform/msm/vidc/msm_v4l2_vidc.c @@ -496,7 +496,34 @@ static const struct of_device_id msm_vidc_dt_match[] = { {.compatible = "qcom,msm-vidc,mem-cdsp"}, {} }; +static int msm_vidc_register_video_device(enum session_type sess_type, + int nr, struct msm_vidc_core *core, struct device *dev) +{ + int rc = 0; + core->vdev[sess_type].vdev.release = + msm_vidc_release_video_device; + core->vdev[sess_type].vdev.fops = &msm_v4l2_vidc_fops; + core->vdev[sess_type].vdev.ioctl_ops = &msm_v4l2_ioctl_ops; + core->vdev[sess_type].vdev.vfl_dir = VFL_DIR_M2M; + core->vdev[sess_type].type = sess_type; + core->vdev[sess_type].vdev.v4l2_dev = &core->v4l2_dev; + rc = video_register_device(&core->vdev[sess_type].vdev, + VFL_TYPE_GRABBER, nr); + if (rc) { + dprintk(VIDC_ERR, "Failed to register the video device\n"); + return rc; + } + video_set_drvdata(&core->vdev[sess_type].vdev, core); + dev = &core->vdev[sess_type].vdev.dev; + rc = device_create_file(dev, &dev_attr_link_name); + if (rc) { + dprintk(VIDC_ERR, "Failed to create video device file\n"); + video_unregister_device(&core->vdev[sess_type].vdev); + return rc; + } + return 0; +} static int msm_vidc_probe_vidc_device(struct platform_device *pdev) { int rc = 0; @@ -536,75 +563,29 @@ static int msm_vidc_probe_vidc_device(struct platform_device *pdev) } /* setup the decoder device */ - core->vdev[MSM_VIDC_DECODER].vdev.release = - msm_vidc_release_video_device; - core->vdev[MSM_VIDC_DECODER].vdev.fops = &msm_v4l2_vidc_fops; - core->vdev[MSM_VIDC_DECODER].vdev.ioctl_ops = &msm_v4l2_ioctl_ops; - core->vdev[MSM_VIDC_DECODER].vdev.vfl_dir = VFL_DIR_M2M; - core->vdev[MSM_VIDC_DECODER].type = MSM_VIDC_DECODER; - core->vdev[MSM_VIDC_DECODER].vdev.v4l2_dev = &core->v4l2_dev; - rc = video_register_device(&core->vdev[MSM_VIDC_DECODER].vdev, - VFL_TYPE_GRABBER, nr); + rc = msm_vidc_register_video_device(MSM_VIDC_DECODER, + nr, core, dev); if (rc) { - dprintk(VIDC_ERR, "Failed to register video decoder device"); - goto err_dec_register; - } - - video_set_drvdata(&core->vdev[MSM_VIDC_DECODER].vdev, core); - dev = &core->vdev[MSM_VIDC_DECODER].vdev.dev; - rc = device_create_file(dev, &dev_attr_link_name); - if (rc) { - dprintk(VIDC_ERR, - "Failed to create link name sysfs for decoder"); - goto err_dec_attr_link_name; + dprintk(VIDC_ERR, "Failed to register video decoder\n"); + goto err_dec; } /* setup the encoder device */ - core->vdev[MSM_VIDC_ENCODER].vdev.release = - msm_vidc_release_video_device; - core->vdev[MSM_VIDC_ENCODER].vdev.fops = &msm_v4l2_vidc_fops; - core->vdev[MSM_VIDC_ENCODER].vdev.ioctl_ops = &msm_v4l2_ioctl_ops; - core->vdev[MSM_VIDC_ENCODER].vdev.vfl_dir = VFL_DIR_M2M; - core->vdev[MSM_VIDC_ENCODER].type = MSM_VIDC_ENCODER; - core->vdev[MSM_VIDC_ENCODER].vdev.v4l2_dev = &core->v4l2_dev; - rc = video_register_device(&core->vdev[MSM_VIDC_ENCODER].vdev, - VFL_TYPE_GRABBER, nr + 1); + rc = msm_vidc_register_video_device(MSM_VIDC_ENCODER, + nr + 1, core, dev); if (rc) { - dprintk(VIDC_ERR, "Failed to register video encoder device"); - goto err_enc_register; - } - - video_set_drvdata(&core->vdev[MSM_VIDC_ENCODER].vdev, core); - dev = &core->vdev[MSM_VIDC_ENCODER].vdev.dev; - rc = device_create_file(dev, &dev_attr_link_name); - if (rc) { - dprintk(VIDC_ERR, - "Failed to create link name sysfs for encoder"); - goto err_enc_attr_link_name; + dprintk(VIDC_ERR, "Failed to register video encoder\n"); + goto err_enc; } /* setup the cvp device */ - core->vdev[MSM_VIDC_CVP].vdev.release = - msm_vidc_release_video_device; - core->vdev[MSM_VIDC_CVP].vdev.fops = &msm_v4l2_vidc_fops; - core->vdev[MSM_VIDC_CVP].vdev.ioctl_ops = &msm_v4l2_ioctl_ops; - core->vdev[MSM_VIDC_CVP].vdev.vfl_dir = VFL_DIR_M2M; - core->vdev[MSM_VIDC_CVP].type = MSM_VIDC_CVP; - core->vdev[MSM_VIDC_CVP].vdev.v4l2_dev = &core->v4l2_dev; - rc = video_register_device(&core->vdev[MSM_VIDC_CVP].vdev, - VFL_TYPE_GRABBER, nr + 2); - if (rc) { - dprintk(VIDC_ERR, "Failed to register video cvp device"); - goto err_cvp_register; - } - - video_set_drvdata(&core->vdev[MSM_VIDC_CVP].vdev, core); - dev = &core->vdev[MSM_VIDC_CVP].vdev.dev; - rc = device_create_file(dev, &dev_attr_link_name); - if (rc) { - dprintk(VIDC_ERR, - "Failed to create link name sysfs for cvp"); - goto err_cvp_attr_link_name; + if (core->resources.domain_cvp) { + rc = msm_vidc_register_video_device(MSM_VIDC_CVP, + nr + 2, core, dev); + if (rc) { + dprintk(VIDC_ERR, "Failed to register video CVP\n"); + goto err_cvp; + } } /* finish setting up the 'core' */ @@ -661,21 +642,20 @@ static int msm_vidc_probe_vidc_device(struct platform_device *pdev) err_fail_sub_device_probe: vidc_hfi_deinitialize(core->hfi_type, core->device); err_cores_exceeded: - device_remove_file(&core->vdev[MSM_VIDC_CVP].vdev.dev, + if (core->resources.domain_cvp) { + device_remove_file(&core->vdev[MSM_VIDC_CVP].vdev.dev, &dev_attr_link_name); -err_cvp_attr_link_name: - video_unregister_device(&core->vdev[MSM_VIDC_CVP].vdev); -err_cvp_register: + video_unregister_device(&core->vdev[MSM_VIDC_CVP].vdev); + } +err_cvp: device_remove_file(&core->vdev[MSM_VIDC_ENCODER].vdev.dev, &dev_attr_link_name); -err_enc_attr_link_name: video_unregister_device(&core->vdev[MSM_VIDC_ENCODER].vdev); -err_enc_register: +err_enc: device_remove_file(&core->vdev[MSM_VIDC_DECODER].vdev.dev, &dev_attr_link_name); -err_dec_attr_link_name: video_unregister_device(&core->vdev[MSM_VIDC_DECODER].vdev); -err_dec_register: +err_dec: v4l2_device_unregister(&core->v4l2_dev); err_v4l2_register: sysfs_remove_group(&pdev->dev.kobj, &msm_vidc_core_attr_group); @@ -745,9 +725,11 @@ static int msm_vidc_remove(struct platform_device *pdev) venus_boot_deinit(); vidc_hfi_deinitialize(core->hfi_type, core->device); - device_remove_file(&core->vdev[MSM_VIDC_CVP].vdev.dev, + if (core->resources.domain_cvp) { + device_remove_file(&core->vdev[MSM_VIDC_CVP].vdev.dev, &dev_attr_link_name); - video_unregister_device(&core->vdev[MSM_VIDC_CVP].vdev); + video_unregister_device(&core->vdev[MSM_VIDC_CVP].vdev); + } device_remove_file(&core->vdev[MSM_VIDC_ENCODER].vdev.dev, &dev_attr_link_name); video_unregister_device(&core->vdev[MSM_VIDC_ENCODER].vdev); diff --git a/drivers/media/platform/msm/vidc/msm_vdec.c b/drivers/media/platform/msm/vidc/msm_vdec.c index f91ed9e7bb933a53776f62ff6c8ac1b8ce2dd81e..b480f05e3d093454f544d170c517268f6e3b9f6b 100644 --- a/drivers/media/platform/msm/vidc/msm_vdec.c +++ b/drivers/media/platform/msm/vidc/msm_vdec.c @@ -81,6 +81,8 @@ static const char *const vp9_level[] = { "4.1", "5.0", "5.1", + "6.0", + "6.1", }; static const char *const mpeg2_profile[] = { @@ -248,8 +250,8 @@ static struct msm_vidc_ctrl msm_vdec_ctrls[] = { .name = "VP9 Level", .type = V4L2_CTRL_TYPE_MENU, .minimum = V4L2_MPEG_VIDC_VIDEO_VP9_LEVEL_UNUSED, - .maximum = V4L2_MPEG_VIDC_VIDEO_VP9_LEVEL_51, - .default_value = V4L2_MPEG_VIDC_VIDEO_VP9_LEVEL_51, + .maximum = V4L2_MPEG_VIDC_VIDEO_VP9_LEVEL_61, + .default_value = V4L2_MPEG_VIDC_VIDEO_VP9_LEVEL_61, .menu_skip_mask = 0, .qmenu = vp9_level, .flags = V4L2_CTRL_FLAG_VOLATILE | V4L2_CTRL_FLAG_READ_ONLY, diff --git a/drivers/media/platform/msm/vidc/msm_vidc.c b/drivers/media/platform/msm/vidc/msm_vidc.c index 23fc44e16c25d9ec68d55198cd6c87010eff34be..ce074ef26ec0d71025b3301005da04fb2dcf7208 100644 --- a/drivers/media/platform/msm/vidc/msm_vidc.c +++ b/drivers/media/platform/msm/vidc/msm_vidc.c @@ -282,6 +282,9 @@ int msm_vidc_g_fmt(void *instance, struct v4l2_format *f) case V4L2_PIX_FMT_NV12: color_format = COLOR_FMT_NV12; break; + case V4L2_PIX_FMT_NV12_512: + color_format = COLOR_FMT_NV12_512; + break; case V4L2_PIX_FMT_NV12_UBWC: color_format = COLOR_FMT_NV12_UBWC; break; @@ -303,6 +306,8 @@ int msm_vidc_g_fmt(void *instance, struct v4l2_format *f) inst->prop.width[port]); f->fmt.pix_mp.plane_fmt[0].reserved[0] = VENUS_Y_SCANLINES(color_format, inst->prop.height[port]); + f->fmt.pix_mp.plane_fmt[0].sizeimage = VENUS_BUFFER_SIZE(color_format, + inst->prop.width[port], inst->prop.height[port]); dprintk(VIDC_DBG, "g_fmt: %x : type %d wxh %dx%d pixelfmt %#x num_planes %d size[0] %d size[1] %d in_reconfig %d\n", @@ -925,8 +930,10 @@ int msm_vidc_set_internal_config(struct msm_vidc_inst *inst) if ((rc_mode == V4L2_MPEG_VIDEO_BITRATE_MODE_CBR || rc_mode == V4L2_MPEG_VIDEO_BITRATE_MODE_CBR_VFR) && (codec != V4L2_PIX_FMT_VP8)) { - if (rc_mode == V4L2_MPEG_VIDEO_BITRATE_MODE_CBR && - mbps < CBR_MB_LIMIT) + if ((rc_mode == V4L2_MPEG_VIDEO_BITRATE_MODE_CBR && + mbps < CBR_MB_LIMIT) || + (rc_mode == V4L2_MPEG_VIDEO_BITRATE_MODE_CBR_VFR && + mbps < CBR_VFR_MB_LIMIT)) hrd_buf_size.vbv_hdr_buf_size = 500; else hrd_buf_size.vbv_hdr_buf_size = 1000; diff --git a/drivers/media/platform/msm/vidc/msm_vidc_clocks.c b/drivers/media/platform/msm/vidc/msm_vidc_clocks.c index a5dec9c9ef200806fe3dd1e60136b3237562ba4d..f1788d635b1c646feec2ad93ce2cd7ff0695075d 100644 --- a/drivers/media/platform/msm/vidc/msm_vidc_clocks.c +++ b/drivers/media/platform/msm/vidc/msm_vidc_clocks.c @@ -145,15 +145,21 @@ static int fill_dynamic_stats(struct msm_vidc_inst *inst, { struct recon_buf *binfo, *nextb; struct vidc_input_cr_data *temp, *next; - u32 min_cf = 0, max_cf = 0; - u32 min_input_cr = 0, max_input_cr = 0, min_cr = 0, max_cr = 0; + u32 min_cf = MSM_VIDC_MAX_UBWC_COMPLEXITY_FACTOR, max_cf = 0; + u32 min_input_cr = MSM_VIDC_MAX_UBWC_COMPRESSION_RATIO, + max_input_cr = 0; + u32 min_cr = MSM_VIDC_MAX_UBWC_COMPRESSION_RATIO, max_cr = 0; mutex_lock(&inst->reconbufs.lock); list_for_each_entry_safe(binfo, nextb, &inst->reconbufs.list, list) { - min_cr = min(min_cr, binfo->CR); - max_cr = max(max_cr, binfo->CR); - min_cf = min(min_cf, binfo->CF); - max_cf = max(max_cf, binfo->CF); + if (binfo->CR) { + min_cr = min(min_cr, binfo->CR); + max_cr = max(max_cr, binfo->CR); + } + if (binfo->CF) { + min_cf = min(min_cf, binfo->CF); + max_cf = max(max_cf, binfo->CF); + } } mutex_unlock(&inst->reconbufs.lock); @@ -292,16 +298,28 @@ int msm_comm_vote_bus(struct msm_vidc_core *core) vote_data[i].output_height = max(inst->prop.height[CAPTURE_PORT], inst->prop.height[OUTPUT_PORT]); - vote_data[i].lcu_size = codec == V4L2_PIX_FMT_HEVC ? 32 : 16; + vote_data[i].lcu_size = (codec == V4L2_PIX_FMT_HEVC || + codec == V4L2_PIX_FMT_VP9) ? 32 : 16; vote_data[i].b_frames_enabled = msm_comm_g_ctrl_for_id(inst, V4L2_CID_MPEG_VIDC_VIDEO_NUM_B_FRAMES) != 0; vote_data[i].fps = msm_vidc_get_fps(inst); + if (inst->session_type == MSM_VIDC_ENCODER) { + vote_data[i].bitrate = inst->clk_data.bitrate; + /* scale bitrate if operating rate is larger than fps */ + if (vote_data[i].fps > inst->prop.fps + && inst->prop.fps) { + vote_data[i].bitrate = vote_data[i].bitrate / + inst->prop.fps * vote_data[i].fps; + } + } vote_data[i].power_mode = 0; - if (msm_vidc_clock_voting || is_turbo || - inst->clk_data.buffer_counter < DCVS_FTB_WINDOW) + if (inst->clk_data.buffer_counter < DCVS_FTB_WINDOW && + inst->session_type != MSM_VIDC_CVP) + vote_data[i].power_mode = VIDC_POWER_TURBO; + if (msm_vidc_clock_voting || is_turbo) vote_data[i].power_mode = VIDC_POWER_TURBO; if (msm_comm_get_stream_output_mode(inst) == @@ -344,56 +362,13 @@ int msm_comm_vote_bus(struct msm_vidc_core *core) return rc; } -static inline int get_bufs_outside_fw(struct msm_vidc_inst *inst) -{ - u32 fw_out_qsize = 0, i = 0; - struct vb2_queue *q = NULL; - struct vb2_buffer *vb = NULL; - - /* - * DCVS always operates on Uncompressed buffers. - * For Decoders, FTB and Encoders, ETB. - */ - - if (inst->state >= MSM_VIDC_OPEN_DONE && - inst->state < MSM_VIDC_STOP_DONE) { - - /* - * For decoder, there will be some frames with client - * but not to be displayed. Ex : VP9 DECODE_ONLY frames. - * Hence don't count them. - */ - - if (inst->session_type == MSM_VIDC_DECODER) { - q = &inst->bufq[CAPTURE_PORT].vb2_bufq; - for (i = 0; i < q->num_buffers; i++) { - vb = q->bufs[i]; - if (vb && vb->state != VB2_BUF_STATE_ACTIVE && - vb->planes[0].bytesused) - fw_out_qsize++; - } - } else { - q = &inst->bufq[OUTPUT_PORT].vb2_bufq; - for (i = 0; i < q->num_buffers; i++) { - vb = q->bufs[i]; - if (vb && vb->state != VB2_BUF_STATE_ACTIVE) - fw_out_qsize++; - } - } - } - - return fw_out_qsize; -} - -static int msm_dcvs_scale_clocks(struct msm_vidc_inst *inst) +static int msm_dcvs_scale_clocks(struct msm_vidc_inst *inst, + unsigned long freq) { int rc = 0; - int fw_pending_bufs = 0; - int total_output_buf = 0; - int min_output_buf = 0; - int buffers_outside_fw = 0; - struct msm_vidc_core *core; - struct hal_buffer_requirements *output_buf_req; + int bufs_with_fw = 0; + int bufs_with_client = 0; + struct hal_buffer_requirements *buf_reqs; struct clock_data *dcvs; if (!inst || !inst->core || !inst->core->device) { @@ -401,41 +376,35 @@ static int msm_dcvs_scale_clocks(struct msm_vidc_inst *inst) return -EINVAL; } + /* assume no increment or decrement is required initially */ + inst->clk_data.dcvs_flags = 0; + if (!inst->clk_data.dcvs_mode || inst->batch.enable) { dprintk(VIDC_DBG, "Skip DCVS (dcvs %d, batching %d)\n", inst->clk_data.dcvs_mode, inst->batch.enable); - /* Request right clocks (load normal clocks) */ + /* update load (freq) with normal value */ inst->clk_data.load = inst->clk_data.load_norm; return 0; } dcvs = &inst->clk_data; - core = inst->core; - mutex_lock(&inst->lock); - buffers_outside_fw = get_bufs_outside_fw(inst); - - output_buf_req = get_buff_req_buffer(inst, - dcvs->buffer_type); - mutex_unlock(&inst->lock); - if (!output_buf_req) { - dprintk(VIDC_ERR, - "%s: No buffer requirement for buffer type %x\n", - __func__, dcvs->buffer_type); + if (is_decode_session(inst)) + bufs_with_fw = msm_comm_num_queued_bufs(inst, + V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE); + else + bufs_with_fw = msm_comm_num_queued_bufs(inst, + V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE); + /* +1 as one buffer is going to be queued after the function */ + bufs_with_fw += 1; + + buf_reqs = get_buff_req_buffer(inst, dcvs->buffer_type); + if (!buf_reqs) { + dprintk(VIDC_ERR, "%s: invalid buf type %d\n", + __func__, dcvs->buffer_type); return -EINVAL; } - - /* Total number of output buffers */ - total_output_buf = output_buf_req->buffer_count_actual; - - min_output_buf = output_buf_req->buffer_count_min; - - /* Buffers outside Display are with FW. */ - fw_pending_bufs = total_output_buf - buffers_outside_fw; - dprintk(VIDC_PROF, - "Counts : total_output_buf = %d Min buffers = %d fw_pending_bufs = %d buffers_outside_fw = %d\n", - total_output_buf, min_output_buf, fw_pending_bufs, - buffers_outside_fw); + bufs_with_client = buf_reqs->buffer_count_actual - bufs_with_fw; /* * PMS decides clock level based on below algo @@ -455,13 +424,22 @@ static int msm_dcvs_scale_clocks(struct msm_vidc_inst *inst) * pipeline, request Right Clocks. */ - if (buffers_outside_fw <= dcvs->max_threshold) + if (bufs_with_client <= dcvs->max_threshold) { dcvs->load = dcvs->load_high; - else if (fw_pending_bufs < min_output_buf) + dcvs->dcvs_flags |= MSM_VIDC_DCVS_INCR; + } else if (bufs_with_fw < buf_reqs->buffer_count_min) { dcvs->load = dcvs->load_low; - else + dcvs->dcvs_flags |= MSM_VIDC_DCVS_DECR; + } else { dcvs->load = dcvs->load_norm; + dcvs->dcvs_flags = 0; + } + dprintk(VIDC_PROF, + "DCVS: %x : total bufs %d outside fw %d max threshold %d with fw %d min bufs %d flags %#x\n", + hash32_ptr(inst->session), buf_reqs->buffer_count_actual, + bufs_with_client, dcvs->max_threshold, bufs_with_fw, + buf_reqs->buffer_count_min, dcvs->dcvs_flags); return rc; } @@ -521,37 +499,6 @@ static unsigned long msm_vidc_max_freq(struct msm_vidc_core *core) return freq; } -static unsigned long msm_vidc_adjust_freq(struct msm_vidc_inst *inst) -{ - struct vidc_freq_data *temp; - unsigned long freq = 0; - bool is_turbo = false; - - mutex_lock(&inst->freqs.lock); - list_for_each_entry(temp, &inst->freqs.list, list) { - freq = max(freq, temp->freq); - if (temp->turbo) { - is_turbo = true; - break; - } - } - mutex_unlock(&inst->freqs.lock); - - if (is_turbo) { - return msm_vidc_max_freq(inst->core); - } - /* If current requirement is within DCVS limits, try DCVS. */ - - if (freq < inst->clk_data.load_norm) { - dprintk(VIDC_DBG, "Calling DCVS now\n"); - msm_dcvs_scale_clocks(inst); - freq = inst->clk_data.load; - } - dprintk(VIDC_PROF, "%s Inst %pK : Freq = %lu\n", __func__, inst, freq); - - return freq; -} - void msm_comm_free_freq_table(struct msm_vidc_inst *inst) { struct vidc_freq_data *temp, *next; @@ -753,7 +700,6 @@ static unsigned long msm_vidc_calc_freq(struct msm_vidc_inst *inst, freq = max(vpp_cycles, vsp_cycles); freq = max(freq, fw_cycles); - dprintk(VIDC_DBG, "Update DCVS Load\n"); allowed_clks_tbl = core->resources.allowed_clks_tbl; for (i = core->resources.allowed_clks_tbl_size - 1; i >= 0; i--) { rate = allowed_clks_tbl[i].clock_rate; @@ -767,10 +713,10 @@ static unsigned long msm_vidc_calc_freq(struct msm_vidc_inst *inst, dcvs->load_high = i > 0 ? allowed_clks_tbl[i-1].clock_rate : dcvs->load_norm; - msm_dcvs_print_dcvs_stats(dcvs); - - dprintk(VIDC_PROF, "%s Inst %pK : Filled Len = %d Freq = %lu\n", - __func__, inst, filled_len, freq); + dprintk(VIDC_PROF, + "%s: inst %pK: %x : filled len %d required freq %lu load_norm %lu\n", + __func__, inst, hash32_ptr(inst->session), + filled_len, freq, dcvs->load_norm); return freq; } @@ -783,6 +729,7 @@ int msm_vidc_set_clocks(struct msm_vidc_core *core) struct msm_vidc_inst *temp = NULL; int rc = 0, i = 0; struct allowed_clock_rates_table *allowed_clks_tbl = NULL; + bool increment, decrement; hdev = core->device; allowed_clks_tbl = core->resources.allowed_clks_tbl; @@ -793,6 +740,8 @@ int msm_vidc_set_clocks(struct msm_vidc_core *core) } mutex_lock(&core->lock); + increment = false; + decrement = true; list_for_each_entry(temp, &core->instances, list) { if (temp->clk_data.core_id == VIDC_CORE_ID_1) @@ -820,20 +769,39 @@ int msm_vidc_set_clocks(struct msm_vidc_core *core) freq_core_max = msm_vidc_max_freq(core); break; } + /* increment even if one session requested for it */ + if (temp->clk_data.dcvs_flags & MSM_VIDC_DCVS_INCR) + increment = true; + /* decrement only if all sessions requested for it */ + if (!(temp->clk_data.dcvs_flags & MSM_VIDC_DCVS_DECR)) + decrement = false; } + /* + * keep checking from lowest to highest rate until + * table rate >= requested rate + */ for (i = core->resources.allowed_clks_tbl_size - 1; i >= 0; i--) { rate = allowed_clks_tbl[i].clock_rate; if (rate >= freq_core_max) break; } + if (increment) { + if (i > 0) + rate = allowed_clks_tbl[i-1].clock_rate; + } else if (decrement) { + if (i < (core->resources.allowed_clks_tbl_size - 1)) + rate = allowed_clks_tbl[i+1].clock_rate; + } core->min_freq = freq_core_max; core->curr_freq = rate; mutex_unlock(&core->lock); - dprintk(VIDC_PROF, "Min freq = %lu Current Freq = %lu\n", - core->min_freq, core->curr_freq); + dprintk(VIDC_PROF, + "%s: clock rate %lu requested %lu increment %d decrement %d\n", + __func__, core->curr_freq, core->min_freq, + increment, decrement); rc = call_hfi_op(hdev, scale_clocks, hdev->hfi_device_data, core->curr_freq); @@ -940,18 +908,17 @@ int msm_comm_scale_clocks(struct msm_vidc_inst *inst) } freq = call_core_op(inst->core, calc_freq, inst, filled_len); - - msm_vidc_update_freq_entry(inst, freq, device_addr, is_turbo); - - freq = msm_vidc_adjust_freq(inst); - inst->clk_data.min_freq = freq; + /* update dcvs flags */ + msm_dcvs_scale_clocks(inst, freq); - if (inst->clk_data.buffer_counter < DCVS_FTB_WINDOW || - msm_vidc_clock_voting) + if (inst->clk_data.buffer_counter < DCVS_FTB_WINDOW || is_turbo || + msm_vidc_clock_voting) { inst->clk_data.min_freq = msm_vidc_max_freq(inst->core); - else - inst->clk_data.min_freq = freq; + inst->clk_data.dcvs_flags = 0; + } + + msm_vidc_update_freq_entry(inst, freq, device_addr, is_turbo); msm_vidc_set_clocks(inst->core); @@ -1013,6 +980,12 @@ int msm_comm_init_clocks_and_bus_data(struct msm_vidc_inst *inst) __func__, inst); return -EINVAL; } + + if (inst->session_type == MSM_VIDC_CVP) { + dprintk(VIDC_DBG, "%s: cvp session\n", __func__); + return 0; + } + count = inst->core->resources.codec_data_count; fourcc = inst->session_type == MSM_VIDC_DECODER ? inst->fmts[OUTPUT_PORT].fourcc : @@ -1028,6 +1001,12 @@ int msm_comm_init_clocks_and_bus_data(struct msm_vidc_inst *inst) break; } } + + if (!inst->clk_data.entry) { + dprintk(VIDC_ERR, "%s No match found\n", __func__); + rc = -EINVAL; + } + return rc; } @@ -1039,7 +1018,7 @@ void msm_clock_data_reset(struct msm_vidc_inst *inst) u64 total_freq = 0, rate = 0, load; int cycles; struct clock_data *dcvs; - struct hal_buffer_requirements *output_buf_req; + struct hal_buffer_requirements *buf_req; dprintk(VIDC_DBG, "Init DCVS Load\n"); @@ -1062,22 +1041,33 @@ void msm_clock_data_reset(struct msm_vidc_inst *inst) dcvs->buffer_type = HAL_BUFFER_INPUT; dcvs->min_threshold = msm_vidc_get_extra_buff_count(inst, HAL_BUFFER_INPUT); + buf_req = get_buff_req_buffer(inst, HAL_BUFFER_INPUT); + if (buf_req) + dcvs->max_threshold = + buf_req->buffer_count_actual - + buf_req->buffer_count_min_host + 2; + else + dprintk(VIDC_ERR, + "%s: No bufer req for buffer type %x\n", + __func__, HAL_BUFFER_INPUT); + } else if (inst->session_type == MSM_VIDC_DECODER) { dcvs->buffer_type = msm_comm_get_hal_output_buffer(inst); - output_buf_req = get_buff_req_buffer(inst, - dcvs->buffer_type); - if (!output_buf_req) { + buf_req = get_buff_req_buffer(inst, dcvs->buffer_type); + if (buf_req) + dcvs->max_threshold = + buf_req->buffer_count_actual - + buf_req->buffer_count_min_host + 2; + else dprintk(VIDC_ERR, "%s: No bufer req for buffer type %x\n", __func__, dcvs->buffer_type); - return; - } - dcvs->max_threshold = output_buf_req->buffer_count_actual - - output_buf_req->buffer_count_min_host + 2; dcvs->min_threshold = msm_vidc_get_extra_buff_count(inst, dcvs->buffer_type); } else { + dprintk(VIDC_ERR, "%s: invalid session type %#x\n", + __func__, inst->session_type); return; } @@ -1214,7 +1204,9 @@ int msm_vidc_decide_work_route(struct msm_vidc_inst *inst) if (slice_mode == V4L2_MPEG_VIDEO_MULTI_SICE_MODE_MAX_BYTES || (rc_mode == V4L2_MPEG_VIDEO_BITRATE_MODE_CBR && - mbps < CBR_MB_LIMIT)) { + mbps < CBR_MB_LIMIT) || + (rc_mode == V4L2_MPEG_VIDEO_BITRATE_MODE_CBR_VFR && + mbps < CBR_VFR_MB_LIMIT)) { pdata.video_work_route = 1; dprintk(VIDC_DBG, "Configured work route = 1"); } diff --git a/drivers/media/platform/msm/vidc/msm_vidc_common.c b/drivers/media/platform/msm/vidc/msm_vidc_common.c index 6778c1734b8c76008d8e3ed906ac1eeef73ca14a..6c19dba19b59b8031bdf491ede0e74fcf1878e38 100644 --- a/drivers/media/platform/msm/vidc/msm_vidc_common.c +++ b/drivers/media/platform/msm/vidc/msm_vidc_common.c @@ -300,6 +300,10 @@ int msm_comm_hal_to_v4l2(int id, int value) return V4L2_MPEG_VIDC_VIDEO_VP9_LEVEL_5; case HAL_VP9_LEVEL_51: return V4L2_MPEG_VIDC_VIDEO_VP9_LEVEL_51; + case HAL_VP9_LEVEL_6: + return V4L2_MPEG_VIDC_VIDEO_VP9_LEVEL_6; + case HAL_VP9_LEVEL_61: + return V4L2_MPEG_VIDC_VIDEO_VP9_LEVEL_61; case HAL_VP9_LEVEL_UNUSED: return V4L2_MPEG_VIDC_VIDEO_VP9_LEVEL_UNUSED; default: @@ -749,11 +753,8 @@ enum multi_stream msm_comm_get_stream_output_mode(struct msm_vidc_inst *inst) return HAL_VIDEO_DECODER_PRIMARY; } - if (!is_decode_session(inst)) { - dprintk(VIDC_DBG, "%s: not a decode session %x\n", - __func__, hash32_ptr(inst->session)); + if (!is_decode_session(inst)) return HAL_VIDEO_DECODER_PRIMARY; - } if (inst->stream_output_mode == HAL_VIDEO_DECODER_SECONDARY) return HAL_VIDEO_DECODER_SECONDARY; @@ -942,6 +943,9 @@ enum hal_uncompressed_format msm_comm_get_hal_uncompressed(int fourcc) case V4L2_PIX_FMT_NV12: format = HAL_COLOR_FORMAT_NV12; break; + case V4L2_PIX_FMT_NV12_512: + format = HAL_COLOR_FORMAT_NV12_512; + break; case V4L2_PIX_FMT_NV21: format = HAL_COLOR_FORMAT_NV21; break; @@ -2236,7 +2240,7 @@ static void handle_sys_error(enum hal_command_response cmd, void *data) msm_vidc_handle_hw_error(core); if (response->status == VIDC_ERR_NOC_ERROR) { dprintk(VIDC_WARN, "Got NOC error"); - MSM_VIDC_ERROR(true); + MSM_VIDC_ERROR(false); } dprintk(VIDC_DBG, "Calling core_release\n"); @@ -2322,9 +2326,13 @@ struct vb2_buffer *msm_comm_get_vb_using_vidc_buffer( return NULL; } - q = &inst->bufq[port].vb2_bufq; mutex_lock(&inst->bufq[port].lock); found = false; + q = &inst->bufq[port].vb2_bufq; + if (!q->streaming) { + dprintk(VIDC_ERR, "port %d is not streaming", port); + goto unlock; + } list_for_each_entry(vb, &q->queued_list, queued_entry) { if (vb->state != VB2_BUF_STATE_ACTIVE) continue; @@ -2333,6 +2341,7 @@ struct vb2_buffer *msm_comm_get_vb_using_vidc_buffer( break; } } +unlock: mutex_unlock(&inst->bufq[port].lock); if (!found) { print_vidc_buffer(VIDC_ERR, "vb2 not found for", inst, mbuf); @@ -2343,28 +2352,52 @@ struct vb2_buffer *msm_comm_get_vb_using_vidc_buffer( } int msm_comm_vb2_buffer_done(struct msm_vidc_inst *inst, - struct vb2_buffer *vb) + struct msm_vidc_buffer *mbuf) { - u32 port; + struct vb2_buffer *vb2; + struct vb2_v4l2_buffer *vbuf; + u32 i, port; - if (!inst || !vb) { + if (!inst || !mbuf) { dprintk(VIDC_ERR, "%s: invalid params %pK %pK\n", - __func__, inst, vb); + __func__, inst, mbuf); return -EINVAL; } - if (vb->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) { + if (mbuf->vvb.vb2_buf.type == + V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) port = CAPTURE_PORT; - } else if (vb->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) { + else if (mbuf->vvb.vb2_buf.type == + V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) port = OUTPUT_PORT; - } else { - dprintk(VIDC_ERR, "%s: invalid type %d\n", - __func__, vb->type); + else return -EINVAL; - } + vb2 = msm_comm_get_vb_using_vidc_buffer(inst, mbuf); + if (!vb2) + return -EINVAL; + + /* + * access vb2 buffer under q->lock and if streaming only to + * ensure the buffer was not free'd by vb2 framework while + * we are accessing it here. + */ mutex_lock(&inst->bufq[port].lock); - vb2_buffer_done(vb, VB2_BUF_STATE_DONE); + if (inst->bufq[port].vb2_bufq.streaming) { + vbuf = to_vb2_v4l2_buffer(vb2); + vbuf->flags = mbuf->vvb.flags; + vb2->timestamp = mbuf->vvb.vb2_buf.timestamp; + for (i = 0; i < mbuf->vvb.vb2_buf.num_planes; i++) { + vb2->planes[i].bytesused = + mbuf->vvb.vb2_buf.planes[i].bytesused; + vb2->planes[i].data_offset = + mbuf->vvb.vb2_buf.planes[i].data_offset; + } + vb2_buffer_done(vb2, VB2_BUF_STATE_DONE); + } else { + dprintk(VIDC_ERR, "%s: port %d is not streaming\n", + __func__, port); + } mutex_unlock(&inst->bufq[port].lock); return 0; @@ -2402,10 +2435,10 @@ bool heic_encode_session_supported(struct msm_vidc_inst *inst) n_bframes == 0 && n_pframes == 0) { if (inst->grid_enable > 0) { - if (!(inst->prop.height[CAPTURE_PORT] == - inst->prop.width[CAPTURE_PORT] && - inst->prop.width[CAPTURE_PORT] == - HEIC_GRID_DIMENSION)) + if (inst->prop.width[CAPTURE_PORT] < + HEIC_GRID_DIMENSION || + inst->prop.height[CAPTURE_PORT] < + HEIC_GRID_DIMENSION) return false; } return true; @@ -2438,12 +2471,11 @@ static void handle_ebd(enum hal_command_response cmd, void *data) { struct msm_vidc_cb_data_done *response = data; struct msm_vidc_buffer *mbuf; - struct vb2_buffer *vb, *vb2; + struct vb2_buffer *vb; struct msm_vidc_inst *inst; struct vidc_hal_ebd *empty_buf_done; - struct vb2_v4l2_buffer *vbuf; u32 planes[VIDEO_MAX_PLANES] = {0}; - u32 extra_idx = 0, i; + u32 extra_idx = 0; if (!response) { dprintk(VIDC_ERR, "Invalid response from vidc_hal\n"); @@ -2476,15 +2508,7 @@ static void handle_ebd(enum hal_command_response cmd, void *data) __func__, planes[0], planes[1]); goto exit; } - vb2 = msm_comm_get_vb_using_vidc_buffer(inst, mbuf); - - /* - * take registeredbufs.lock to update mbuf & vb2 variables together - * so that both are in sync else if mbuf and vb2 variables are not - * in sync msm_comm_compare_vb2_planes() returns false for the - * right buffer due to data_offset field mismatch. - */ - mutex_lock(&inst->registeredbufs.lock); + mbuf->flags &= ~MSM_VIDC_FLAG_QUEUED; vb = &mbuf->vvb.vb2_buf; vb->planes[0].bytesused = response->input_done.filled_len; @@ -2510,18 +2534,6 @@ static void handle_ebd(enum hal_command_response cmd, void *data) if (extra_idx && extra_idx < VIDEO_MAX_PLANES) vb->planes[extra_idx].bytesused = vb->planes[extra_idx].length; - if (vb2) { - vbuf = to_vb2_v4l2_buffer(vb2); - vbuf->flags |= mbuf->vvb.flags; - for (i = 0; i < mbuf->vvb.vb2_buf.num_planes; i++) { - vb2->planes[i].bytesused = - mbuf->vvb.vb2_buf.planes[i].bytesused; - vb2->planes[i].data_offset = - mbuf->vvb.vb2_buf.planes[i].data_offset; - } - } - mutex_unlock(&inst->registeredbufs.lock); - update_recon_stats(inst, &empty_buf_done->recon_stats); msm_vidc_clear_freq_entry(inst, mbuf->smem[0].device_addr); /* @@ -2535,7 +2547,7 @@ static void handle_ebd(enum hal_command_response cmd, void *data) * in put_buffer. */ msm_comm_put_vidc_buffer(inst, mbuf); - msm_comm_vb2_buffer_done(inst, vb2); + msm_comm_vb2_buffer_done(inst, mbuf); msm_vidc_debugfs_update(inst, MSM_VIDC_DEBUGFS_EVENT_EBD); kref_put_mbuf(mbuf); exit: @@ -2589,13 +2601,12 @@ static void handle_fbd(enum hal_command_response cmd, void *data) struct msm_vidc_cb_data_done *response = data; struct msm_vidc_buffer *mbuf; struct msm_vidc_inst *inst; - struct vb2_buffer *vb, *vb2; + struct vb2_buffer *vb; struct vidc_hal_fbd *fill_buf_done; - struct vb2_v4l2_buffer *vbuf; enum hal_buffer buffer_type; u64 time_usec = 0; u32 planes[VIDEO_MAX_PLANES] = {0}; - u32 extra_idx, i; + u32 extra_idx; if (!response) { dprintk(VIDC_ERR, "Invalid response from vidc_hal\n"); @@ -2623,7 +2634,6 @@ static void handle_fbd(enum hal_command_response cmd, void *data) __func__, planes[0], planes[1]); goto exit; } - vb2 = msm_comm_get_vb_using_vidc_buffer(inst, mbuf); } else { if (handle_multi_stream_buffers(inst, fill_buf_done->packet_buffer1)) @@ -2632,14 +2642,7 @@ static void handle_fbd(enum hal_command_response cmd, void *data) &fill_buf_done->packet_buffer1); goto exit; } - - /* - * take registeredbufs.lock to update mbuf & vb2 variables together - * so that both are in sync else if mbuf and vb2 variables are not - * in sync msm_comm_compare_vb2_planes() returns false for the - * right buffer due to data_offset field mismatch. - */ - mutex_lock(&inst->registeredbufs.lock); + mbuf->flags &= ~MSM_VIDC_FLAG_QUEUED; vb = &mbuf->vvb.vb2_buf; if (fill_buf_done->flags1 & HAL_BUFFERFLAG_DROP_FRAME) @@ -2698,19 +2701,6 @@ static void handle_fbd(enum hal_command_response cmd, void *data) break; } - if (vb2) { - vbuf = to_vb2_v4l2_buffer(vb2); - vbuf->flags = mbuf->vvb.flags; - vb2->timestamp = mbuf->vvb.vb2_buf.timestamp; - for (i = 0; i < mbuf->vvb.vb2_buf.num_planes; i++) { - vb2->planes[i].bytesused = - mbuf->vvb.vb2_buf.planes[i].bytesused; - vb2->planes[i].data_offset = - mbuf->vvb.vb2_buf.planes[i].data_offset; - } - } - mutex_unlock(&inst->registeredbufs.lock); - /* * dma cache operations need to be performed before dma_unmap * which is done inside msm_comm_put_vidc_buffer() @@ -2722,7 +2712,7 @@ static void handle_fbd(enum hal_command_response cmd, void *data) * in put_buffer. */ msm_comm_put_vidc_buffer(inst, mbuf); - msm_comm_vb2_buffer_done(inst, vb2); + msm_comm_vb2_buffer_done(inst, mbuf); msm_vidc_debugfs_update(inst, MSM_VIDC_DEBUGFS_EVENT_FBD); kref_put_mbuf(mbuf); @@ -3274,7 +3264,11 @@ static int msm_comm_session_init(int flipped_state, return -EINVAL; } - msm_comm_init_clocks_and_bus_data(inst); + rc = msm_comm_init_clocks_and_bus_data(inst); + if (rc) { + dprintk(VIDC_ERR, "Failed to initialize clocks and bus data\n"); + goto exit; + } dprintk(VIDC_DBG, "%s: inst %pK\n", __func__, inst); rc = call_hfi_op(hdev, session_init, hdev->hfi_device_data, @@ -4251,6 +4245,29 @@ enum hal_buffer get_hal_buffer_type(unsigned int type, } } +int msm_comm_num_queued_bufs(struct msm_vidc_inst *inst, u32 type) +{ + int count = 0; + struct msm_vidc_buffer *mbuf; + + if (!inst) { + dprintk(VIDC_ERR, "%s: invalid params\n", __func__); + return 0; + } + + mutex_lock(&inst->registeredbufs.lock); + list_for_each_entry(mbuf, &inst->registeredbufs.list, list) { + if (mbuf->vvb.vb2_buf.type != type) + continue; + if (!(mbuf->flags & MSM_VIDC_FLAG_QUEUED)) + continue; + count++; + } + mutex_unlock(&inst->registeredbufs.lock); + + return count; +} + static int num_pending_qbufs(struct msm_vidc_inst *inst, u32 type) { int count = 0; @@ -4309,6 +4326,7 @@ static int msm_comm_qbuf_to_hfi(struct msm_vidc_inst *inst, dprintk(VIDC_ERR, "%s: Failed to qbuf: %d\n", __func__, rc); goto err_bad_input; } + mbuf->flags |= MSM_VIDC_FLAG_QUEUED; msm_vidc_debugfs_update(inst, e); err_bad_input: @@ -6100,7 +6118,6 @@ bool msm_comm_compare_vb2_plane(struct msm_vidc_inst *inst, vb = &mbuf->vvb.vb2_buf; if (vb->planes[i].m.fd == vb2->planes[i].m.fd && - vb->planes[i].data_offset == vb2->planes[i].data_offset && vb->planes[i].length == vb2->planes[i].length) { return true; } @@ -6230,8 +6247,8 @@ struct msm_vidc_buffer *msm_comm_get_buffer_using_device_planes( int msm_comm_flush_vidc_buffer(struct msm_vidc_inst *inst, struct msm_vidc_buffer *mbuf) { - int rc; struct vb2_buffer *vb; + u32 port; if (!inst || !mbuf) { dprintk(VIDC_ERR, "%s: invalid params %pK %pK\n", @@ -6246,13 +6263,26 @@ int msm_comm_flush_vidc_buffer(struct msm_vidc_inst *inst, return -EINVAL; } - vb->planes[0].bytesused = 0; - rc = msm_comm_vb2_buffer_done(inst, vb); - if (rc) - print_vidc_buffer(VIDC_ERR, - "vb2_buffer_done failed for", inst, mbuf); + if (mbuf->vvb.vb2_buf.type == + V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) + port = CAPTURE_PORT; + else if (mbuf->vvb.vb2_buf.type == + V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) + port = OUTPUT_PORT; + else + return -EINVAL; - return rc; + mutex_lock(&inst->bufq[port].lock); + if (inst->bufq[port].vb2_bufq.streaming) { + vb->planes[0].bytesused = 0; + vb2_buffer_done(vb, VB2_BUF_STATE_DONE); + } else { + dprintk(VIDC_ERR, "%s: port %d is not streaming\n", + __func__, port); + } + mutex_unlock(&inst->bufq[port].lock); + + return 0; } int msm_comm_qbuf_cache_operations(struct msm_vidc_inst *inst, @@ -6415,23 +6445,24 @@ struct msm_vidc_buffer *msm_comm_get_vidc_buffer(struct msm_vidc_inst *inst, } mutex_lock(&inst->registeredbufs.lock); - if (inst->session_type == MSM_VIDC_DECODER) { + /* + * for encoder input, client may queue the same buffer with different + * fd before driver returned old buffer to the client. This buffer + * should be treated as new buffer Search the list with fd so that + * it will be treated as new msm_vidc_buffer. + */ + if (is_encode_session(inst) && vb2->type == + V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) { list_for_each_entry(mbuf, &inst->registeredbufs.list, list) { - if (msm_comm_compare_dma_planes(inst, mbuf, - dma_planes)) { + if (msm_comm_compare_vb2_planes(inst, mbuf, vb2)) { found = true; break; } } } else { - /* - * for encoder, client may queue the same buffer with different - * fd before driver returned old buffer to the client. This - * buffer should be treated as new buffer. Search the list with - * fd so that it will be treated as new msm_vidc_buffer. - */ list_for_each_entry(mbuf, &inst->registeredbufs.list, list) { - if (msm_comm_compare_vb2_planes(inst, mbuf, vb2)) { + if (msm_comm_compare_dma_planes(inst, mbuf, + dma_planes)) { found = true; break; } diff --git a/drivers/media/platform/msm/vidc/msm_vidc_common.h b/drivers/media/platform/msm/vidc/msm_vidc_common.h index 01791b3a5bfa9111230e9191f87d3f852c20eb4b..23def7663242f3a87e243ae500c26f7f90b3ee90 100644 --- a/drivers/media/platform/msm/vidc/msm_vidc_common.h +++ b/drivers/media/platform/msm/vidc/msm_vidc_common.h @@ -24,7 +24,8 @@ #define DEFAULT_FRAME_QUALITY 80 #define FRAME_QUALITY_STEP 1 #define HEIC_GRID_DIMENSION 512 -#define CBR_MB_LIMIT (1280*720/256*30) +#define CBR_MB_LIMIT (((1280+15)/16)*((720+15)/16)*30) +#define CBR_VFR_MB_LIMIT (((640+15)/16)*((480+15)/16)*30) struct vb2_buf_entry { struct list_head list; @@ -214,7 +215,7 @@ void msm_comm_put_vidc_buffer(struct msm_vidc_inst *inst, void handle_release_buffer_reference(struct msm_vidc_inst *inst, struct msm_vidc_buffer *mbuf); int msm_comm_vb2_buffer_done(struct msm_vidc_inst *inst, - struct vb2_buffer *vb); + struct msm_vidc_buffer *mbuf); int msm_comm_flush_vidc_buffer(struct msm_vidc_inst *inst, struct msm_vidc_buffer *mbuf); int msm_comm_unmap_vidc_buffer(struct msm_vidc_inst *inst, @@ -250,4 +251,5 @@ void msm_comm_fetch_mark_data(struct msm_vidc_list *data_list, int msm_comm_release_mark_data(struct msm_vidc_inst *inst); int msm_comm_qbuf_decode_batch(struct msm_vidc_inst *inst, struct msm_vidc_buffer *mbuf); +int msm_comm_num_queued_bufs(struct msm_vidc_inst *inst, u32 type); #endif diff --git a/drivers/media/platform/msm/vidc/msm_vidc_internal.h b/drivers/media/platform/msm/vidc/msm_vidc_internal.h index e088214ee7eaf2b688cf07a7dc8c671a724518e1..4705a27017232dec66576b88c2cfc9b97dd37c4d 100644 --- a/drivers/media/platform/msm/vidc/msm_vidc_internal.h +++ b/drivers/media/platform/msm/vidc/msm_vidc_internal.h @@ -328,6 +328,11 @@ struct batch_mode { u32 size; }; +enum dcvs_flags { + MSM_VIDC_DCVS_INCR = BIT(0), + MSM_VIDC_DCVS_DECR = BIT(1), +}; + struct clock_data { int buffer_counter; int load; @@ -354,6 +359,7 @@ struct clock_data { bool low_latency_mode; bool turbo_mode; u32 work_route; + u32 dcvs_flags; }; struct profile_data { @@ -502,6 +508,7 @@ void msm_vidc_queue_v4l2_event(struct msm_vidc_inst *inst, int event_type); enum msm_vidc_flags { MSM_VIDC_FLAG_DEFERRED = BIT(0), MSM_VIDC_FLAG_RBR_PENDING = BIT(1), + MSM_VIDC_FLAG_QUEUED = BIT(2), }; struct msm_vidc_buffer { diff --git a/drivers/media/platform/msm/vidc/msm_vidc_platform.c b/drivers/media/platform/msm/vidc/msm_vidc_platform.c index 5d9e7698ebc6e261969eb810d4c482c4da8bf4c5..531ad8ceb67823bff3f4cbc0b00997e33e7c5d35 100644 --- a/drivers/media/platform/msm/vidc/msm_vidc_platform.c +++ b/drivers/media/platform/msm/vidc/msm_vidc_platform.c @@ -169,11 +169,19 @@ static struct msm_vidc_common_data sm6150_common_data[] = { }, { .key = "qcom,power-collapse-delay", - .value = 500, + .value = 1500, }, { .key = "qcom,hw-resp-timeout", - .value = 250, + .value = 1000, + }, + { + .key = "qcom,dcvs", + .value = 1, + }, + { + .key = "qcom,fw-cycles", + .value = 733003, }, }; diff --git a/drivers/media/platform/msm/vidc/vidc_hfi_api.h b/drivers/media/platform/msm/vidc/vidc_hfi_api.h index 0efa9b88848a231c55a6aecf9d7676da058b4f89..ac7c1ee8c63e6ab806e55cd534b2d3ab3f01c1c3 100644 --- a/drivers/media/platform/msm/vidc/vidc_hfi_api.h +++ b/drivers/media/platform/msm/vidc/vidc_hfi_api.h @@ -406,6 +406,8 @@ enum hal_vp9_level { HAL_VP9_LEVEL_41 = 0x00000080, HAL_VP9_LEVEL_5 = 0x00000100, HAL_VP9_LEVEL_51 = 0x00000200, + HAL_VP9_LEVEL_6 = 0x00000400, + HAL_VP9_LEVEL_61 = 0x00000800, }; struct hal_frame_rate { @@ -436,6 +438,7 @@ enum hal_uncompressed_format { HAL_COLOR_FORMAT_RGBA8888 = 0x00008000, HAL_COLOR_FORMAT_RGBA8888_UBWC = 0x00010000, HAL_COLOR_FORMAT_P010 = 0x00020000, + HAL_COLOR_FORMAT_NV12_512 = 0x00040000, HAL_UNUSED_COLOR = 0x10000000, }; @@ -1391,7 +1394,7 @@ struct vidc_bus_vote_data { enum hal_video_codec codec; enum hal_uncompressed_format color_formats[2]; int num_formats; /* 1 = DPB-OPB unified; 2 = split */ - int input_height, input_width, fps; + int input_height, input_width, fps, bitrate; int output_height, output_width; int compression_ratio; int complexity_factor; diff --git a/drivers/media/platform/omap3isp/isp.c b/drivers/media/platform/omap3isp/isp.c index 1a428fe9f07077e9c8c02fea1cfce723b247cb85..9f023bc6e1b7e4dafd861a4a0344bb4868dec6b1 100644 --- a/drivers/media/platform/omap3isp/isp.c +++ b/drivers/media/platform/omap3isp/isp.c @@ -1945,6 +1945,7 @@ static int isp_initialize_modules(struct isp_device *isp) static void isp_detach_iommu(struct isp_device *isp) { + arm_iommu_detach_device(isp->dev); arm_iommu_release_mapping(isp->mapping); isp->mapping = NULL; } @@ -1961,8 +1962,7 @@ static int isp_attach_iommu(struct isp_device *isp) mapping = arm_iommu_create_mapping(&platform_bus_type, SZ_1G, SZ_2G); if (IS_ERR(mapping)) { dev_err(isp->dev, "failed to create ARM IOMMU mapping\n"); - ret = PTR_ERR(mapping); - goto error; + return PTR_ERR(mapping); } isp->mapping = mapping; @@ -1977,7 +1977,8 @@ static int isp_attach_iommu(struct isp_device *isp) return 0; error: - isp_detach_iommu(isp); + arm_iommu_release_mapping(isp->mapping); + isp->mapping = NULL; return ret; } diff --git a/drivers/media/platform/rcar_jpu.c b/drivers/media/platform/rcar_jpu.c index 070bac36d766891dabeb01e1f1bf064eb989de58..2e2b8c409150547d9c998a10bf8f55ff881a10a2 100644 --- a/drivers/media/platform/rcar_jpu.c +++ b/drivers/media/platform/rcar_jpu.c @@ -1280,7 +1280,7 @@ static int jpu_open(struct file *file) /* ...issue software reset */ ret = jpu_reset(jpu); if (ret) - goto device_prepare_rollback; + goto jpu_reset_rollback; } jpu->ref_count++; @@ -1288,6 +1288,8 @@ static int jpu_open(struct file *file) mutex_unlock(&jpu->mutex); return 0; +jpu_reset_rollback: + clk_disable_unprepare(jpu->clk); device_prepare_rollback: mutex_unlock(&jpu->mutex); v4l_prepare_rollback: diff --git a/drivers/media/radio/si470x/radio-si470x-i2c.c b/drivers/media/radio/si470x/radio-si470x-i2c.c index b3034f80163fb349d50dd7cd8da1b725741cc418..8ce6f9cff74633a5aab9fb87bbe37cca33919874 100644 --- a/drivers/media/radio/si470x/radio-si470x-i2c.c +++ b/drivers/media/radio/si470x/radio-si470x-i2c.c @@ -92,7 +92,7 @@ MODULE_PARM_DESC(max_rds_errors, "RDS maximum block errors: *1*"); */ int si470x_get_register(struct si470x_device *radio, int regnr) { - u16 buf[READ_REG_NUM]; + __be16 buf[READ_REG_NUM]; struct i2c_msg msgs[1] = { { .addr = radio->client->addr, @@ -117,7 +117,7 @@ int si470x_get_register(struct si470x_device *radio, int regnr) int si470x_set_register(struct si470x_device *radio, int regnr) { int i; - u16 buf[WRITE_REG_NUM]; + __be16 buf[WRITE_REG_NUM]; struct i2c_msg msgs[1] = { { .addr = radio->client->addr, @@ -147,7 +147,7 @@ int si470x_set_register(struct si470x_device *radio, int regnr) static int si470x_get_all_registers(struct si470x_device *radio) { int i; - u16 buf[READ_REG_NUM]; + __be16 buf[READ_REG_NUM]; struct i2c_msg msgs[1] = { { .addr = radio->client->addr, diff --git a/drivers/media/rc/rc-main.c b/drivers/media/rc/rc-main.c index 72f381522cb26dfa64662ab0b6a4f61adc91a11c..a22828713c1cab4c481a09e7d1f507f4b6347949 100644 --- a/drivers/media/rc/rc-main.c +++ b/drivers/media/rc/rc-main.c @@ -1824,11 +1824,11 @@ void rc_unregister_device(struct rc_dev *dev) if (!dev) return; - del_timer_sync(&dev->timer_keyup); - if (dev->driver_type == RC_DRIVER_IR_RAW) ir_raw_event_unregister(dev); + del_timer_sync(&dev->timer_keyup); + rc_free_rx_device(dev); device_del(&dev->dev); diff --git a/drivers/media/v4l2-core/videobuf2-core.c b/drivers/media/v4l2-core/videobuf2-core.c index ffbb178c6918e746b29b05ede7cbf5b80d253f79..2dbf632c10de350d72dc61f1a0b63343deee84ba 100644 --- a/drivers/media/v4l2-core/videobuf2-core.c +++ b/drivers/media/v4l2-core/videobuf2-core.c @@ -912,9 +912,12 @@ void vb2_buffer_done(struct vb2_buffer *vb, enum vb2_buffer_state state) dprintk(4, "done processing on buffer %d, state: %d\n", vb->index, state); - /* sync buffers */ - for (plane = 0; plane < vb->num_planes; ++plane) - call_void_memop(vb, finish, vb->planes[plane].mem_priv); + if (state != VB2_BUF_STATE_QUEUED && + state != VB2_BUF_STATE_REQUEUEING) { + /* sync buffers */ + for (plane = 0; plane < vb->num_planes; ++plane) + call_void_memop(vb, finish, vb->planes[plane].mem_priv); + } spin_lock_irqsave(&q->done_lock, flags); if (state == VB2_BUF_STATE_QUEUED || diff --git a/drivers/memory/tegra/mc.c b/drivers/memory/tegra/mc.c index a4803ac192bbc86f550d35f4e8eb236c77aeafd6..1d49a8dd4a374df623a7bfdddda4874ddeaf47ee 100644 --- a/drivers/memory/tegra/mc.c +++ b/drivers/memory/tegra/mc.c @@ -20,14 +20,6 @@ #include "mc.h" #define MC_INTSTATUS 0x000 -#define MC_INT_DECERR_MTS (1 << 16) -#define MC_INT_SECERR_SEC (1 << 13) -#define MC_INT_DECERR_VPR (1 << 12) -#define MC_INT_INVALID_APB_ASID_UPDATE (1 << 11) -#define MC_INT_INVALID_SMMU_PAGE (1 << 10) -#define MC_INT_ARBITRATION_EMEM (1 << 9) -#define MC_INT_SECURITY_VIOLATION (1 << 8) -#define MC_INT_DECERR_EMEM (1 << 6) #define MC_INTMASK 0x004 @@ -248,12 +240,13 @@ static const char *const error_names[8] = { static irqreturn_t tegra_mc_irq(int irq, void *data) { struct tegra_mc *mc = data; - unsigned long status, mask; + unsigned long status; unsigned int bit; /* mask all interrupts to avoid flooding */ - status = mc_readl(mc, MC_INTSTATUS); - mask = mc_readl(mc, MC_INTMASK); + status = mc_readl(mc, MC_INTSTATUS) & mc->soc->intmask; + if (!status) + return IRQ_NONE; for_each_set_bit(bit, &status, 32) { const char *error = status_names[bit] ?: "unknown"; @@ -346,7 +339,6 @@ static int tegra_mc_probe(struct platform_device *pdev) const struct of_device_id *match; struct resource *res; struct tegra_mc *mc; - u32 value; int err; match = of_match_node(tegra_mc_of_match, pdev->dev.of_node); @@ -414,11 +406,7 @@ static int tegra_mc_probe(struct platform_device *pdev) WARN(!mc->soc->client_id_mask, "Missing client ID mask for this SoC\n"); - value = MC_INT_DECERR_MTS | MC_INT_SECERR_SEC | MC_INT_DECERR_VPR | - MC_INT_INVALID_APB_ASID_UPDATE | MC_INT_INVALID_SMMU_PAGE | - MC_INT_SECURITY_VIOLATION | MC_INT_DECERR_EMEM; - - mc_writel(mc, value, MC_INTMASK); + mc_writel(mc, mc->soc->intmask, MC_INTMASK); return 0; } diff --git a/drivers/memory/tegra/mc.h b/drivers/memory/tegra/mc.h index ddb16676c3af4d99b59e62b84d77d0bdb30b5158..24e020b4609be7c571c0be7616491f46cb945994 100644 --- a/drivers/memory/tegra/mc.h +++ b/drivers/memory/tegra/mc.h @@ -14,6 +14,15 @@ #include +#define MC_INT_DECERR_MTS (1 << 16) +#define MC_INT_SECERR_SEC (1 << 13) +#define MC_INT_DECERR_VPR (1 << 12) +#define MC_INT_INVALID_APB_ASID_UPDATE (1 << 11) +#define MC_INT_INVALID_SMMU_PAGE (1 << 10) +#define MC_INT_ARBITRATION_EMEM (1 << 9) +#define MC_INT_SECURITY_VIOLATION (1 << 8) +#define MC_INT_DECERR_EMEM (1 << 6) + static inline u32 mc_readl(struct tegra_mc *mc, unsigned long offset) { return readl(mc->regs + offset); diff --git a/drivers/memory/tegra/tegra114.c b/drivers/memory/tegra/tegra114.c index ba8fff3d66a655d0875a50f4abb7ea863ea0099c..6d2a5a849d928b1d25ee712ddf1e1535fb203ed7 100644 --- a/drivers/memory/tegra/tegra114.c +++ b/drivers/memory/tegra/tegra114.c @@ -930,4 +930,6 @@ const struct tegra_mc_soc tegra114_mc_soc = { .atom_size = 32, .client_id_mask = 0x7f, .smmu = &tegra114_smmu_soc, + .intmask = MC_INT_INVALID_SMMU_PAGE | MC_INT_SECURITY_VIOLATION | + MC_INT_DECERR_EMEM, }; diff --git a/drivers/memory/tegra/tegra124.c b/drivers/memory/tegra/tegra124.c index 5a58e440f4a7bd58ec87cd2cbaa4b60993b74667..9f68a56f2727b9d045d5b7ac6076df7e9bf932ad 100644 --- a/drivers/memory/tegra/tegra124.c +++ b/drivers/memory/tegra/tegra124.c @@ -1020,6 +1020,9 @@ const struct tegra_mc_soc tegra124_mc_soc = { .smmu = &tegra124_smmu_soc, .emem_regs = tegra124_mc_emem_regs, .num_emem_regs = ARRAY_SIZE(tegra124_mc_emem_regs), + .intmask = MC_INT_DECERR_MTS | MC_INT_SECERR_SEC | MC_INT_DECERR_VPR | + MC_INT_INVALID_APB_ASID_UPDATE | MC_INT_INVALID_SMMU_PAGE | + MC_INT_SECURITY_VIOLATION | MC_INT_DECERR_EMEM, }; #endif /* CONFIG_ARCH_TEGRA_124_SOC */ @@ -1042,5 +1045,8 @@ const struct tegra_mc_soc tegra132_mc_soc = { .atom_size = 32, .client_id_mask = 0x7f, .smmu = &tegra132_smmu_soc, + .intmask = MC_INT_DECERR_MTS | MC_INT_SECERR_SEC | MC_INT_DECERR_VPR | + MC_INT_INVALID_APB_ASID_UPDATE | MC_INT_INVALID_SMMU_PAGE | + MC_INT_SECURITY_VIOLATION | MC_INT_DECERR_EMEM, }; #endif /* CONFIG_ARCH_TEGRA_132_SOC */ diff --git a/drivers/memory/tegra/tegra210.c b/drivers/memory/tegra/tegra210.c index 5e144abe4c181eea8fb330ec9f68acb9735fe46f..47c78a6d8f00926d51d5724432a172938f36a2a9 100644 --- a/drivers/memory/tegra/tegra210.c +++ b/drivers/memory/tegra/tegra210.c @@ -1077,4 +1077,7 @@ const struct tegra_mc_soc tegra210_mc_soc = { .atom_size = 64, .client_id_mask = 0xff, .smmu = &tegra210_smmu_soc, + .intmask = MC_INT_DECERR_MTS | MC_INT_SECERR_SEC | MC_INT_DECERR_VPR | + MC_INT_INVALID_APB_ASID_UPDATE | MC_INT_INVALID_SMMU_PAGE | + MC_INT_SECURITY_VIOLATION | MC_INT_DECERR_EMEM, }; diff --git a/drivers/memory/tegra/tegra30.c b/drivers/memory/tegra/tegra30.c index b44737840e70c188344c3d51e5edc08ffb02b256..d0689428ea1a5b932e8b071508ec4de43d3f3b39 100644 --- a/drivers/memory/tegra/tegra30.c +++ b/drivers/memory/tegra/tegra30.c @@ -952,4 +952,6 @@ const struct tegra_mc_soc tegra30_mc_soc = { .atom_size = 16, .client_id_mask = 0x7f, .smmu = &tegra30_smmu_soc, + .intmask = MC_INT_INVALID_SMMU_PAGE | MC_INT_SECURITY_VIOLATION | + MC_INT_DECERR_EMEM, }; diff --git a/drivers/mfd/cros_ec.c b/drivers/mfd/cros_ec.c index b0ca5a4c841e0c98828efbcebf42caca3223006b..c5528ae982f26ab90721fa33525d60cf5df731a9 100644 --- a/drivers/mfd/cros_ec.c +++ b/drivers/mfd/cros_ec.c @@ -112,7 +112,11 @@ int cros_ec_register(struct cros_ec_device *ec_dev) mutex_init(&ec_dev->lock); - cros_ec_query_all(ec_dev); + err = cros_ec_query_all(ec_dev); + if (err) { + dev_err(dev, "Cannot identify the EC: error %d\n", err); + return err; + } if (ec_dev->irq) { err = request_threaded_irq(ec_dev->irq, NULL, ec_irq_thread, diff --git a/drivers/misc/cxl/api.c b/drivers/misc/cxl/api.c index a0c44d16bf30c58559a6ed0489cb762c13593e32..c75daba57fd77ffd6bce5a78f88c7ab9c16498b2 100644 --- a/drivers/misc/cxl/api.c +++ b/drivers/misc/cxl/api.c @@ -102,15 +102,15 @@ static struct file *cxl_getfile(const char *name, d_instantiate(path.dentry, inode); file = alloc_file(&path, OPEN_FMODE(flags), fops); - if (IS_ERR(file)) - goto err_dput; + if (IS_ERR(file)) { + path_put(&path); + goto err_fs; + } file->f_flags = flags & (O_ACCMODE | O_NONBLOCK); file->private_data = priv; return file; -err_dput: - path_put(&path); err_inode: iput(inode); err_fs: diff --git a/drivers/misc/qseecom.c b/drivers/misc/qseecom.c index 9a47f6d5bc85767b7b10855702b6c767b9e3d5f4..13a2279e2cc7029f2fde5a011e670a93ea804430 100644 --- a/drivers/misc/qseecom.c +++ b/drivers/misc/qseecom.c @@ -67,7 +67,7 @@ #define QSEE_CE_CLK_100MHZ 100000000 #define CE_CLK_DIV 1000000 -#define QSEECOM_MAX_SG_ENTRY 512 +#define QSEECOM_MAX_SG_ENTRY 4096 #define QSEECOM_SG_ENTRY_MSG_BUF_SZ_64BIT \ (QSEECOM_MAX_SG_ENTRY * SG_ENTRY_SZ_64BIT) diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c index 20462d098ef9d044fbe5fe98f1ef21faffacbd9d..c1403ebc0053db2034ba5a3ad00af414511ab6c5 100644 --- a/drivers/mmc/core/block.c +++ b/drivers/mmc/core/block.c @@ -3467,6 +3467,91 @@ static inline int mmc_blk_cmdq_part_switch(struct mmc_card *card, return ret; } +static void mmc_cmdq_wait_for_small_sector_read(struct mmc_card *card) +{ + struct mmc_host *host = card->host; + struct mmc_cmdq_context_info *ctx = &host->cmdq_ctx; + int ret; + + if ((card->quirks & MMC_QUIRK_CMDQ_EMPTY_BEFORE_DCMD) && + ctx->active_small_sector_read_reqs) { + ret = wait_event_interruptible(ctx->queue_empty_wq, + !ctx->active_reqs); + if (ret) { + pr_err("%s: failed while waiting for the CMDQ to be empty %s err (%d)\n", + mmc_hostname(host), __func__, ret); + BUG_ON(1); + } + /* clear the counter now */ + ctx->active_small_sector_read_reqs = 0; + /* + * If there were small sector (less than 8 sectors) read + * operations in progress then we have to wait for the + * outstanding requests to finish and should also have + * atleast 6 microseconds delay before queuing the DCMD + * request. + */ + udelay(MMC_QUIRK_CMDQ_DELAY_BEFORE_DCMD); + } +} + +static int mmc_blk_cmdq_issue_drv_op(struct mmc_card *card, struct request *req) +{ + struct mmc_queue_req *mq_rq; + u8 **ext_csd; + u32 status; + int ret; + + mq_rq = req_to_mmc_queue_req(req); + ext_csd = mq_rq->drv_op_data; + + ret = mmc_cmdq_halt_on_empty_queue(card->host); + if (ret) { + pr_err("%s: failed to halt on empty queue\n", + mmc_hostname(card->host)); + blk_end_request_all(req, ret); + mmc_put_card(card); + return ret; + } + + switch (mq_rq->drv_op) { + case MMC_DRV_OP_GET_EXT_CSD: + ret = mmc_get_ext_csd(card, ext_csd); + if (ret) { + pr_err("%s: failed to get ext_csd\n", + mmc_hostname(card->host)); + goto out_unhalt; + } + break; + case MMC_DRV_OP_GET_CARD_STATUS: + ret = mmc_send_status(card, &status); + if (ret) { + pr_err("%s: failed to get status\n", + mmc_hostname(card->host)); + goto out_unhalt; + } + ret = status; + break; + default: + pr_err("%s: unknown driver specific operation\n", + mmc_hostname(card->host)); + ret = -EINVAL; + break; + } + mq_rq->drv_op_result = ret; + ret = ret ? BLK_STS_IOERR : BLK_STS_OK; + +out_unhalt: + blk_end_request_all(req, ret); + ret = mmc_cmdq_halt(card->host, false); + if (ret) + pr_err("%s: %s: failed to unhalt\n", + mmc_hostname(card->host), __func__); + mmc_put_card(card); + + return ret; +} + static int mmc_blk_cmdq_issue_rq(struct mmc_queue *mq, struct request *req) { int ret, err = 0; @@ -3510,42 +3595,26 @@ static int mmc_blk_cmdq_issue_rq(struct mmc_queue *mq, struct request *req) } if (req) { - struct mmc_host *host = card->host; - struct mmc_cmdq_context_info *ctx = &host->cmdq_ctx; - - if ((req_op(req) == REQ_OP_FLUSH || - req_op(req) == REQ_OP_DISCARD) && - (card->quirks & MMC_QUIRK_CMDQ_EMPTY_BEFORE_DCMD) && - ctx->active_small_sector_read_reqs) { - ret = wait_event_interruptible(ctx->queue_empty_wq, - !ctx->active_reqs); - if (ret) { - pr_err("%s: failed while waiting for the CMDQ to be empty %s err (%d)\n", - mmc_hostname(host), __func__, ret); - BUG_ON(1); - } - /* clear the counter now */ - ctx->active_small_sector_read_reqs = 0; - /* - * If there were small sector (less than 8 sectors) read - * operations in progress then we have to wait for the - * outstanding requests to finish and should also have - * atleast 6 microseconds delay before queuing the DCMD - * request. - */ - udelay(MMC_QUIRK_CMDQ_DELAY_BEFORE_DCMD); - } - - if (req_op(req) == REQ_OP_DISCARD) { + switch (req_op(req)) { + case REQ_OP_DISCARD: + mmc_cmdq_wait_for_small_sector_read(card); ret = mmc_blk_cmdq_issue_discard_rq(mq, req); - } else if (req_op(req) == REQ_OP_SECURE_ERASE) { + break; + case REQ_OP_SECURE_ERASE: if (!(card->quirks & MMC_QUIRK_SEC_ERASE_TRIM_BROKEN)) ret = mmc_blk_cmdq_issue_secdiscard_rq(mq, req); else ret = mmc_blk_cmdq_issue_discard_rq(mq, req); - } else if (req_op(req) == REQ_OP_FLUSH) { + break; + case REQ_OP_FLUSH: + mmc_cmdq_wait_for_small_sector_read(card); ret = mmc_blk_cmdq_issue_flush_rq(mq, req); - } else { + break; + case REQ_OP_DRV_IN: + case REQ_OP_DRV_OUT: + ret = mmc_blk_cmdq_issue_drv_op(card, req); + break; + default: ret = mmc_blk_cmdq_issue_rw_rq(mq, req); /* * If issuing of the request fails with eitehr EBUSY or @@ -3568,6 +3637,7 @@ static int mmc_blk_cmdq_issue_rq(struct mmc_queue *mq, struct request *req) */ goto out; } + break; } } @@ -4018,17 +4088,6 @@ static int mmc_dbg_card_status_get(void *data, u64 *val) struct request *req; int ret; - mmc_get_card(card); - if (mmc_card_cmdq(card)) { - ret = mmc_cmdq_halt_on_empty_queue(card->host); - if (ret) { - pr_err("%s: halt failed while doing %s err (%d)\n", - mmc_hostname(card->host), __func__, - ret); - goto out; - } - } - /* Ask the block layer about the card status */ req = blk_get_request(mq->queue, REQ_OP_DRV_IN, __GFP_RECLAIM); if (IS_ERR(req)) @@ -4042,13 +4101,6 @@ static int mmc_dbg_card_status_get(void *data, u64 *val) } blk_put_request(req); - if (mmc_card_cmdq(card)) { - if (mmc_cmdq_halt(card->host, false)) - pr_err("%s: %s: cmdq unhalt failed\n", - mmc_hostname(card->host), __func__); - } -out: - mmc_put_card(card); return ret; } DEFINE_SIMPLE_ATTRIBUTE(mmc_dbg_card_status_fops, mmc_dbg_card_status_get, @@ -4072,18 +4124,6 @@ static int mmc_ext_csd_open(struct inode *inode, struct file *filp) if (!buf) return -ENOMEM; - mmc_get_card(card); - if (mmc_card_cmdq(card)) { - err = mmc_cmdq_halt_on_empty_queue(card->host); - if (err) { - pr_err("%s: halt failed while doing %s err (%d)\n", - mmc_hostname(card->host), __func__, - err); - mmc_put_card(card); - goto out_free_halt; - } - } - /* Ask the block layer for the EXT CSD */ req = blk_get_request(mq->queue, REQ_OP_DRV_IN, __GFP_RECLAIM); if (IS_ERR(req)) { @@ -4112,23 +4152,10 @@ static int mmc_ext_csd_open(struct inode *inode, struct file *filp) filp->private_data = buf; - if (mmc_card_cmdq(card)) { - if (mmc_cmdq_halt(card->host, false)) - pr_err("%s: %s: cmdq unhalt failed\n", - mmc_hostname(card->host), __func__); - } - - mmc_put_card(card); kfree(ext_csd); return 0; + out_free: - if (mmc_card_cmdq(card)) { - if (mmc_cmdq_halt(card->host, false)) - pr_err("%s: %s: cmdq unhalt failed\n", - mmc_hostname(card->host), __func__); - } - mmc_put_card(card); -out_free_halt: kfree(buf); return err; } diff --git a/drivers/mmc/core/pwrseq_simple.c b/drivers/mmc/core/pwrseq_simple.c index 13ef162cf066a63363106e40a513f3317205d10d..a8b9fee4d62a1e2c16a463cc02c0a8802c186a29 100644 --- a/drivers/mmc/core/pwrseq_simple.c +++ b/drivers/mmc/core/pwrseq_simple.c @@ -40,14 +40,18 @@ static void mmc_pwrseq_simple_set_gpios_value(struct mmc_pwrseq_simple *pwrseq, struct gpio_descs *reset_gpios = pwrseq->reset_gpios; if (!IS_ERR(reset_gpios)) { - int i; - int values[reset_gpios->ndescs]; + int i, *values; + int nvalues = reset_gpios->ndescs; - for (i = 0; i < reset_gpios->ndescs; i++) + values = kmalloc_array(nvalues, sizeof(int), GFP_KERNEL); + if (!values) + return; + + for (i = 0; i < nvalues; i++) values[i] = value; - gpiod_set_array_value_cansleep( - reset_gpios->ndescs, reset_gpios->desc, values); + gpiod_set_array_value_cansleep(nvalues, reset_gpios->desc, values); + kfree(values); } } diff --git a/drivers/mmc/core/queue.c b/drivers/mmc/core/queue.c index e2dd4c03b20e430195addfe71daa965b2fd9ff55..804dc190b53c337108cfcb8fe73a2e896eece76c 100644 --- a/drivers/mmc/core/queue.c +++ b/drivers/mmc/core/queue.c @@ -208,6 +208,8 @@ void mmc_cmdq_setup_queue(struct mmc_queue *mq, struct mmc_card *card) host->max_req_size / 512)); blk_queue_max_segment_size(mq->queue, host->max_seg_size); blk_queue_max_segments(mq->queue, host->max_segs); + if (host->inlinecrypt_support) + queue_flag_set_unlocked(QUEUE_FLAG_INLINECRYPT, mq->queue); } static struct scatterlist *mmc_alloc_sg(int sg_len, gfp_t gfp) @@ -464,6 +466,8 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, min(host->max_blk_count, host->max_req_size / 512)); blk_queue_max_segments(mq->queue, host->max_segs); blk_queue_max_segment_size(mq->queue, host->max_seg_size); + if (host->inlinecrypt_support) + queue_flag_set_unlocked(QUEUE_FLAG_INLINECRYPT, mq->queue); sema_init(&mq->thread_sem, 1); diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c index 6a2cbbba29aadf317ad13c825c5c1602f0efd9fb..5252885e5cda5e87c9e3e49ce66bacf823e033b1 100644 --- a/drivers/mmc/host/dw_mmc.c +++ b/drivers/mmc/host/dw_mmc.c @@ -1255,6 +1255,8 @@ static void dw_mci_setup_bus(struct dw_mci_slot *slot, bool force_clkinit) if (host->state == STATE_WAITING_CMD11_DONE) sdmmc_cmd_bits |= SDMMC_CMD_VOLT_SWITCH; + slot->mmc->actual_clock = 0; + if (!clock) { mci_writel(host, CLKENA, 0); mci_send_cmd(slot, sdmmc_cmd_bits, 0); @@ -1313,6 +1315,8 @@ static void dw_mci_setup_bus(struct dw_mci_slot *slot, bool force_clkinit) /* keep the last clock value that was requested from core */ slot->__clk_old = clock; + slot->mmc->actual_clock = div ? ((host->bus_hz / div) >> 1) : + host->bus_hz; } host->current_speed = clock; diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c index 71a07f8dbc71691b65697013a20feb81fa7b4bd6..2137eb53d059bce85f0e5a8d886a65e463d1f9dd 100644 --- a/drivers/mmc/host/sdhci-msm.c +++ b/drivers/mmc/host/sdhci-msm.c @@ -5150,19 +5150,50 @@ static int sdhci_msm_remove(struct platform_device *pdev) struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); struct sdhci_msm_host *msm_host = pltfm_host->priv; struct sdhci_msm_pltfm_data *pdata = msm_host->pdata; + int nr_groups = msm_host->pdata->pm_qos_data.cpu_group_map.nr_groups; + int i; int dead = (readl_relaxed(host->ioaddr + SDHCI_INT_STATUS) == 0xffffffff); - pr_debug("%s: %s\n", dev_name(&pdev->dev), __func__); + pr_debug("%s: %s Enter\n", dev_name(&pdev->dev), __func__); if (!gpio_is_valid(msm_host->pdata->status_gpio)) device_remove_file(&pdev->dev, &msm_host->polling); + + device_remove_file(&pdev->dev, &msm_host->auto_cmd21_attr); device_remove_file(&pdev->dev, &msm_host->msm_bus_vote.max_bus_bw); pm_runtime_disable(&pdev->dev); + if (msm_host->pm_qos_group_enable) { + struct sdhci_msm_pm_qos_group *group; + + for (i = 0; i < nr_groups; i++) + cancel_delayed_work_sync( + &msm_host->pm_qos[i].unvote_work); + + device_remove_file(&msm_host->pdev->dev, + &msm_host->pm_qos_group_enable_attr); + device_remove_file(&msm_host->pdev->dev, + &msm_host->pm_qos_group_status_attr); + + for (i = 0; i < nr_groups; i++) { + group = &msm_host->pm_qos[i]; + pm_qos_remove_request(&group->req); + } + } + + if (msm_host->pm_qos_irq.enabled) { + cancel_delayed_work_sync(&msm_host->pm_qos_irq.unvote_work); + device_remove_file(&pdev->dev, + &msm_host->pm_qos_irq.enable_attr); + device_remove_file(&pdev->dev, + &msm_host->pm_qos_irq.status_attr); + pm_qos_remove_request(&msm_host->pm_qos_irq.req); + } + if (msm_host->pm_qos_wq) destroy_workqueue(msm_host->pm_qos_wq); + sdhci_remove_host(host, dead); - sdhci_pltfm_free(pdev); sdhci_msm_vreg_init(&pdev->dev, msm_host->pdata, false); @@ -5173,6 +5204,9 @@ static int sdhci_msm_remove(struct platform_device *pdev) sdhci_msm_bus_cancel_work_and_set_vote(host, 0); sdhci_msm_bus_unregister(msm_host); } + + sdhci_pltfm_free(pdev); + return 0; } diff --git a/drivers/mtd/nand/denali_dt.c b/drivers/mtd/nand/denali_dt.c index 56e2e177644d6c3d9b0301b50ed780c51509a9bb..3f4f4aea0e8b6dfe3f3c10082db7d87219cc8aec 100644 --- a/drivers/mtd/nand/denali_dt.c +++ b/drivers/mtd/nand/denali_dt.c @@ -122,7 +122,11 @@ static int denali_dt_probe(struct platform_device *pdev) if (ret) return ret; - denali->clk_x_rate = clk_get_rate(dt->clk); + /* + * Hardcode the clock rate for the backward compatibility. + * This works for both SOCFPGA and UniPhier. + */ + denali->clk_x_rate = 200000000; ret = denali_init(denali); if (ret) diff --git a/drivers/mtd/nand/fsl_ifc_nand.c b/drivers/mtd/nand/fsl_ifc_nand.c index 4005b427023c31f48a562494a8a3258135e362b0..16deba1a2385876ae8002eab2c6df14f1294a576 100644 --- a/drivers/mtd/nand/fsl_ifc_nand.c +++ b/drivers/mtd/nand/fsl_ifc_nand.c @@ -342,9 +342,16 @@ static void fsl_ifc_cmdfunc(struct mtd_info *mtd, unsigned int command, case NAND_CMD_READID: case NAND_CMD_PARAM: { + /* + * For READID, read 8 bytes that are currently used. + * For PARAM, read all 3 copies of 256-bytes pages. + */ + int len = 8; int timing = IFC_FIR_OP_RB; - if (command == NAND_CMD_PARAM) + if (command == NAND_CMD_PARAM) { timing = IFC_FIR_OP_RBCD; + len = 256 * 3; + } ifc_out32((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) | (IFC_FIR_OP_UA << IFC_NAND_FIR0_OP1_SHIFT) | @@ -354,12 +361,8 @@ static void fsl_ifc_cmdfunc(struct mtd_info *mtd, unsigned int command, &ifc->ifc_nand.nand_fcr0); ifc_out32(column, &ifc->ifc_nand.row3); - /* - * although currently it's 8 bytes for READID, we always read - * the maximum 256 bytes(for PARAM) - */ - ifc_out32(256, &ifc->ifc_nand.nand_fbcr); - ifc_nand_ctrl->read_bytes = 256; + ifc_out32(len, &ifc->ifc_nand.nand_fbcr); + ifc_nand_ctrl->read_bytes = len; set_addr(mtd, 0, 0, 0); fsl_ifc_run_command(mtd); diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 00245b73c224cbd1eb8343b664f2fc1fd6fc81cb..15aedb64a02be03bd15e4726f824d6b19946e54a 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -1687,6 +1687,8 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) goto err_upper_unlink; } + bond->nest_level = dev_get_nest_level(bond_dev) + 1; + /* If the mode uses primary, then the following is handled by * bond_change_active_slave(). */ @@ -1734,7 +1736,6 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) if (bond_mode_uses_xmit_hash(bond)) bond_update_slave_arr(bond, NULL); - bond->nest_level = dev_get_nest_level(bond_dev); netdev_info(bond_dev, "Enslaving %s as %s interface with %s link\n", slave_dev->name, @@ -3379,6 +3380,13 @@ static void bond_fold_stats(struct rtnl_link_stats64 *_res, } } +static int bond_get_nest_level(struct net_device *bond_dev) +{ + struct bonding *bond = netdev_priv(bond_dev); + + return bond->nest_level; +} + static void bond_get_stats(struct net_device *bond_dev, struct rtnl_link_stats64 *stats) { @@ -3387,7 +3395,7 @@ static void bond_get_stats(struct net_device *bond_dev, struct list_head *iter; struct slave *slave; - spin_lock(&bond->stats_lock); + spin_lock_nested(&bond->stats_lock, bond_get_nest_level(bond_dev)); memcpy(stats, &bond->bond_stats, sizeof(*stats)); rcu_read_lock(); @@ -4182,6 +4190,7 @@ static const struct net_device_ops bond_netdev_ops = { .ndo_neigh_setup = bond_neigh_setup, .ndo_vlan_rx_add_vid = bond_vlan_rx_add_vid, .ndo_vlan_rx_kill_vid = bond_vlan_rx_kill_vid, + .ndo_get_lock_subclass = bond_get_nest_level, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_netpoll_setup = bond_netpoll_setup, .ndo_netpoll_cleanup = bond_netpoll_cleanup, @@ -4680,6 +4689,7 @@ static int bond_init(struct net_device *bond_dev) if (!bond->wq) return -ENOMEM; + bond->nest_level = SINGLE_DEPTH_NESTING; netdev_lockdep_set_classes(bond_dev); list_add_tail(&bond->bond_list, &bn->dev_list); diff --git a/drivers/net/bonding/bond_options.c b/drivers/net/bonding/bond_options.c index 61084ba69a99f2ce2b1992d22608539c5675eba4..3d154eb63dcf2557f27b7564c966f37e176f6aa5 100644 --- a/drivers/net/bonding/bond_options.c +++ b/drivers/net/bonding/bond_options.c @@ -743,15 +743,20 @@ const struct bond_option *bond_opt_get(unsigned int option) static int bond_option_mode_set(struct bonding *bond, const struct bond_opt_value *newval) { - if (!bond_mode_uses_arp(newval->value) && bond->params.arp_interval) { - netdev_dbg(bond->dev, "%s mode is incompatible with arp monitoring, start mii monitoring\n", - newval->string); - /* disable arp monitoring */ - bond->params.arp_interval = 0; - /* set miimon to default value */ - bond->params.miimon = BOND_DEFAULT_MIIMON; - netdev_dbg(bond->dev, "Setting MII monitoring interval to %d\n", - bond->params.miimon); + if (!bond_mode_uses_arp(newval->value)) { + if (bond->params.arp_interval) { + netdev_dbg(bond->dev, "%s mode is incompatible with arp monitoring, start mii monitoring\n", + newval->string); + /* disable arp monitoring */ + bond->params.arp_interval = 0; + } + + if (!bond->params.miimon) { + /* set miimon to default value */ + bond->params.miimon = BOND_DEFAULT_MIIMON; + netdev_dbg(bond->dev, "Setting MII monitoring interval to %d\n", + bond->params.miimon); + } } if (newval->value == BOND_MODE_ALB) diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c index 5d4e61741476660b925e80a81ca1d41c17587f3b..ca3fa82316c2a9940865c4b7b056f76fd268db55 100644 --- a/drivers/net/can/m_can/m_can.c +++ b/drivers/net/can/m_can/m_can.c @@ -1073,7 +1073,8 @@ static void m_can_chip_config(struct net_device *dev) } else { /* Version 3.1.x or 3.2.x */ - cccr &= ~(CCCR_TEST | CCCR_MON | CCCR_BRSE | CCCR_FDOE); + cccr &= ~(CCCR_TEST | CCCR_MON | CCCR_BRSE | CCCR_FDOE | + CCCR_NISO); /* Only 3.2.x has NISO Bit implemented */ if (priv->can.ctrlmode & CAN_CTRLMODE_FD_NON_ISO) diff --git a/drivers/net/can/peak_canfd/peak_pciefd_main.c b/drivers/net/can/peak_canfd/peak_pciefd_main.c index 3c51a884db87bc90e71d5df8d5b0a91eadf69cdb..fa689854f16b310012f2240f2a9ed98486e809ff 100644 --- a/drivers/net/can/peak_canfd/peak_pciefd_main.c +++ b/drivers/net/can/peak_canfd/peak_pciefd_main.c @@ -58,6 +58,10 @@ MODULE_LICENSE("GPL v2"); #define PCIEFD_REG_SYS_VER1 0x0040 /* version reg #1 */ #define PCIEFD_REG_SYS_VER2 0x0044 /* version reg #2 */ +#define PCIEFD_FW_VERSION(x, y, z) (((u32)(x) << 24) | \ + ((u32)(y) << 16) | \ + ((u32)(z) << 8)) + /* System Control Registers Bits */ #define PCIEFD_SYS_CTL_TS_RST 0x00000001 /* timestamp clock */ #define PCIEFD_SYS_CTL_CLK_EN 0x00000002 /* system clock */ @@ -783,6 +787,21 @@ static int peak_pciefd_probe(struct pci_dev *pdev, "%ux CAN-FD PCAN-PCIe FPGA v%u.%u.%u:\n", can_count, hw_ver_major, hw_ver_minor, hw_ver_sub); +#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT + /* FW < v3.3.0 DMA logic doesn't handle correctly the mix of 32-bit and + * 64-bit logical addresses: this workaround forces usage of 32-bit + * DMA addresses only when such a fw is detected. + */ + if (PCIEFD_FW_VERSION(hw_ver_major, hw_ver_minor, hw_ver_sub) < + PCIEFD_FW_VERSION(3, 3, 0)) { + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); + if (err) + dev_warn(&pdev->dev, + "warning: can't set DMA mask %llxh (err %d)\n", + DMA_BIT_MASK(32), err); + } +#endif + /* stop system clock */ pciefd_sys_writereg(pciefd, PCIEFD_SYS_CTL_CLK_EN, PCIEFD_REG_SYS_CTL_CLR); diff --git a/drivers/net/can/usb/ems_usb.c b/drivers/net/can/usb/ems_usb.c index b00358297424604634489a9b106bf313c06b7fc9..d0846ae9e0e4084f3d57358c29d705d1c933a8a4 100644 --- a/drivers/net/can/usb/ems_usb.c +++ b/drivers/net/can/usb/ems_usb.c @@ -1071,6 +1071,7 @@ static void ems_usb_disconnect(struct usb_interface *intf) usb_free_urb(dev->intr_urb); kfree(dev->intr_in_buffer); + kfree(dev->tx_msg_buffer); } } diff --git a/drivers/net/can/xilinx_can.c b/drivers/net/can/xilinx_can.c index 89aec07c225f58d26a80ce4795afbaa6c19d9d84..5a24039733efd23255142c4abc0d2b758d188554 100644 --- a/drivers/net/can/xilinx_can.c +++ b/drivers/net/can/xilinx_can.c @@ -2,6 +2,7 @@ * * Copyright (C) 2012 - 2014 Xilinx, Inc. * Copyright (C) 2009 PetaLogix. All rights reserved. + * Copyright (C) 2017 Sandvik Mining and Construction Oy * * Description: * This driver is developed for Axi CAN IP and for Zynq CANPS Controller. @@ -25,8 +26,10 @@ #include #include #include +#include #include #include +#include #include #include #include @@ -101,7 +104,7 @@ enum xcan_reg { #define XCAN_INTR_ALL (XCAN_IXR_TXOK_MASK | XCAN_IXR_BSOFF_MASK |\ XCAN_IXR_WKUP_MASK | XCAN_IXR_SLP_MASK | \ XCAN_IXR_RXNEMP_MASK | XCAN_IXR_ERROR_MASK | \ - XCAN_IXR_ARBLST_MASK | XCAN_IXR_RXOK_MASK) + XCAN_IXR_RXOFLW_MASK | XCAN_IXR_ARBLST_MASK) /* CAN register bit shift - XCAN___SHIFT */ #define XCAN_BTR_SJW_SHIFT 7 /* Synchronous jump width */ @@ -118,6 +121,7 @@ enum xcan_reg { /** * struct xcan_priv - This definition define CAN driver instance * @can: CAN private data structure. + * @tx_lock: Lock for synchronizing TX interrupt handling * @tx_head: Tx CAN packets ready to send on the queue * @tx_tail: Tx CAN packets successfully sended on the queue * @tx_max: Maximum number packets the driver can send @@ -132,6 +136,7 @@ enum xcan_reg { */ struct xcan_priv { struct can_priv can; + spinlock_t tx_lock; unsigned int tx_head; unsigned int tx_tail; unsigned int tx_max; @@ -159,6 +164,11 @@ static const struct can_bittiming_const xcan_bittiming_const = { .brp_inc = 1, }; +#define XCAN_CAP_WATERMARK 0x0001 +struct xcan_devtype_data { + unsigned int caps; +}; + /** * xcan_write_reg_le - Write a value to the device register little endian * @priv: Driver private data structure @@ -238,6 +248,10 @@ static int set_reset_mode(struct net_device *ndev) usleep_range(500, 10000); } + /* reset clears FIFOs */ + priv->tx_head = 0; + priv->tx_tail = 0; + return 0; } @@ -392,6 +406,7 @@ static int xcan_start_xmit(struct sk_buff *skb, struct net_device *ndev) struct net_device_stats *stats = &ndev->stats; struct can_frame *cf = (struct can_frame *)skb->data; u32 id, dlc, data[2] = {0, 0}; + unsigned long flags; if (can_dropped_invalid_skb(ndev, skb)) return NETDEV_TX_OK; @@ -439,6 +454,9 @@ static int xcan_start_xmit(struct sk_buff *skb, struct net_device *ndev) data[1] = be32_to_cpup((__be32 *)(cf->data + 4)); can_put_echo_skb(skb, ndev, priv->tx_head % priv->tx_max); + + spin_lock_irqsave(&priv->tx_lock, flags); + priv->tx_head++; /* Write the Frame to Xilinx CAN TX FIFO */ @@ -454,10 +472,16 @@ static int xcan_start_xmit(struct sk_buff *skb, struct net_device *ndev) stats->tx_bytes += cf->can_dlc; } + /* Clear TX-FIFO-empty interrupt for xcan_tx_interrupt() */ + if (priv->tx_max > 1) + priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_TXFEMP_MASK); + /* Check if the TX buffer is full */ if ((priv->tx_head - priv->tx_tail) == priv->tx_max) netif_stop_queue(ndev); + spin_unlock_irqrestore(&priv->tx_lock, flags); + return NETDEV_TX_OK; } @@ -529,6 +553,123 @@ static int xcan_rx(struct net_device *ndev) return 1; } +/** + * xcan_current_error_state - Get current error state from HW + * @ndev: Pointer to net_device structure + * + * Checks the current CAN error state from the HW. Note that this + * only checks for ERROR_PASSIVE and ERROR_WARNING. + * + * Return: + * ERROR_PASSIVE or ERROR_WARNING if either is active, ERROR_ACTIVE + * otherwise. + */ +static enum can_state xcan_current_error_state(struct net_device *ndev) +{ + struct xcan_priv *priv = netdev_priv(ndev); + u32 status = priv->read_reg(priv, XCAN_SR_OFFSET); + + if ((status & XCAN_SR_ESTAT_MASK) == XCAN_SR_ESTAT_MASK) + return CAN_STATE_ERROR_PASSIVE; + else if (status & XCAN_SR_ERRWRN_MASK) + return CAN_STATE_ERROR_WARNING; + else + return CAN_STATE_ERROR_ACTIVE; +} + +/** + * xcan_set_error_state - Set new CAN error state + * @ndev: Pointer to net_device structure + * @new_state: The new CAN state to be set + * @cf: Error frame to be populated or NULL + * + * Set new CAN error state for the device, updating statistics and + * populating the error frame if given. + */ +static void xcan_set_error_state(struct net_device *ndev, + enum can_state new_state, + struct can_frame *cf) +{ + struct xcan_priv *priv = netdev_priv(ndev); + u32 ecr = priv->read_reg(priv, XCAN_ECR_OFFSET); + u32 txerr = ecr & XCAN_ECR_TEC_MASK; + u32 rxerr = (ecr & XCAN_ECR_REC_MASK) >> XCAN_ESR_REC_SHIFT; + + priv->can.state = new_state; + + if (cf) { + cf->can_id |= CAN_ERR_CRTL; + cf->data[6] = txerr; + cf->data[7] = rxerr; + } + + switch (new_state) { + case CAN_STATE_ERROR_PASSIVE: + priv->can.can_stats.error_passive++; + if (cf) + cf->data[1] = (rxerr > 127) ? + CAN_ERR_CRTL_RX_PASSIVE : + CAN_ERR_CRTL_TX_PASSIVE; + break; + case CAN_STATE_ERROR_WARNING: + priv->can.can_stats.error_warning++; + if (cf) + cf->data[1] |= (txerr > rxerr) ? + CAN_ERR_CRTL_TX_WARNING : + CAN_ERR_CRTL_RX_WARNING; + break; + case CAN_STATE_ERROR_ACTIVE: + if (cf) + cf->data[1] |= CAN_ERR_CRTL_ACTIVE; + break; + default: + /* non-ERROR states are handled elsewhere */ + WARN_ON(1); + break; + } +} + +/** + * xcan_update_error_state_after_rxtx - Update CAN error state after RX/TX + * @ndev: Pointer to net_device structure + * + * If the device is in a ERROR-WARNING or ERROR-PASSIVE state, check if + * the performed RX/TX has caused it to drop to a lesser state and set + * the interface state accordingly. + */ +static void xcan_update_error_state_after_rxtx(struct net_device *ndev) +{ + struct xcan_priv *priv = netdev_priv(ndev); + enum can_state old_state = priv->can.state; + enum can_state new_state; + + /* changing error state due to successful frame RX/TX can only + * occur from these states + */ + if (old_state != CAN_STATE_ERROR_WARNING && + old_state != CAN_STATE_ERROR_PASSIVE) + return; + + new_state = xcan_current_error_state(ndev); + + if (new_state != old_state) { + struct sk_buff *skb; + struct can_frame *cf; + + skb = alloc_can_err_skb(ndev, &cf); + + xcan_set_error_state(ndev, new_state, skb ? cf : NULL); + + if (skb) { + struct net_device_stats *stats = &ndev->stats; + + stats->rx_packets++; + stats->rx_bytes += cf->can_dlc; + netif_rx(skb); + } + } +} + /** * xcan_err_interrupt - error frame Isr * @ndev: net_device pointer @@ -544,16 +685,12 @@ static void xcan_err_interrupt(struct net_device *ndev, u32 isr) struct net_device_stats *stats = &ndev->stats; struct can_frame *cf; struct sk_buff *skb; - u32 err_status, status, txerr = 0, rxerr = 0; + u32 err_status; skb = alloc_can_err_skb(ndev, &cf); err_status = priv->read_reg(priv, XCAN_ESR_OFFSET); priv->write_reg(priv, XCAN_ESR_OFFSET, err_status); - txerr = priv->read_reg(priv, XCAN_ECR_OFFSET) & XCAN_ECR_TEC_MASK; - rxerr = ((priv->read_reg(priv, XCAN_ECR_OFFSET) & - XCAN_ECR_REC_MASK) >> XCAN_ESR_REC_SHIFT); - status = priv->read_reg(priv, XCAN_SR_OFFSET); if (isr & XCAN_IXR_BSOFF_MASK) { priv->can.state = CAN_STATE_BUS_OFF; @@ -563,28 +700,10 @@ static void xcan_err_interrupt(struct net_device *ndev, u32 isr) can_bus_off(ndev); if (skb) cf->can_id |= CAN_ERR_BUSOFF; - } else if ((status & XCAN_SR_ESTAT_MASK) == XCAN_SR_ESTAT_MASK) { - priv->can.state = CAN_STATE_ERROR_PASSIVE; - priv->can.can_stats.error_passive++; - if (skb) { - cf->can_id |= CAN_ERR_CRTL; - cf->data[1] = (rxerr > 127) ? - CAN_ERR_CRTL_RX_PASSIVE : - CAN_ERR_CRTL_TX_PASSIVE; - cf->data[6] = txerr; - cf->data[7] = rxerr; - } - } else if (status & XCAN_SR_ERRWRN_MASK) { - priv->can.state = CAN_STATE_ERROR_WARNING; - priv->can.can_stats.error_warning++; - if (skb) { - cf->can_id |= CAN_ERR_CRTL; - cf->data[1] |= (txerr > rxerr) ? - CAN_ERR_CRTL_TX_WARNING : - CAN_ERR_CRTL_RX_WARNING; - cf->data[6] = txerr; - cf->data[7] = rxerr; - } + } else { + enum can_state new_state = xcan_current_error_state(ndev); + + xcan_set_error_state(ndev, new_state, skb ? cf : NULL); } /* Check for Arbitration lost interrupt */ @@ -600,7 +719,6 @@ static void xcan_err_interrupt(struct net_device *ndev, u32 isr) if (isr & XCAN_IXR_RXOFLW_MASK) { stats->rx_over_errors++; stats->rx_errors++; - priv->write_reg(priv, XCAN_SRR_OFFSET, XCAN_SRR_RESET_MASK); if (skb) { cf->can_id |= CAN_ERR_CRTL; cf->data[1] |= CAN_ERR_CRTL_RX_OVERFLOW; @@ -709,26 +827,20 @@ static int xcan_rx_poll(struct napi_struct *napi, int quota) isr = priv->read_reg(priv, XCAN_ISR_OFFSET); while ((isr & XCAN_IXR_RXNEMP_MASK) && (work_done < quota)) { - if (isr & XCAN_IXR_RXOK_MASK) { - priv->write_reg(priv, XCAN_ICR_OFFSET, - XCAN_IXR_RXOK_MASK); - work_done += xcan_rx(ndev); - } else { - priv->write_reg(priv, XCAN_ICR_OFFSET, - XCAN_IXR_RXNEMP_MASK); - break; - } + work_done += xcan_rx(ndev); priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_RXNEMP_MASK); isr = priv->read_reg(priv, XCAN_ISR_OFFSET); } - if (work_done) + if (work_done) { can_led_event(ndev, CAN_LED_EVENT_RX); + xcan_update_error_state_after_rxtx(ndev); + } if (work_done < quota) { napi_complete_done(napi, work_done); ier = priv->read_reg(priv, XCAN_IER_OFFSET); - ier |= (XCAN_IXR_RXOK_MASK | XCAN_IXR_RXNEMP_MASK); + ier |= XCAN_IXR_RXNEMP_MASK; priv->write_reg(priv, XCAN_IER_OFFSET, ier); } return work_done; @@ -743,18 +855,71 @@ static void xcan_tx_interrupt(struct net_device *ndev, u32 isr) { struct xcan_priv *priv = netdev_priv(ndev); struct net_device_stats *stats = &ndev->stats; + unsigned int frames_in_fifo; + int frames_sent = 1; /* TXOK => at least 1 frame was sent */ + unsigned long flags; + int retries = 0; + + /* Synchronize with xmit as we need to know the exact number + * of frames in the FIFO to stay in sync due to the TXFEMP + * handling. + * This also prevents a race between netif_wake_queue() and + * netif_stop_queue(). + */ + spin_lock_irqsave(&priv->tx_lock, flags); + + frames_in_fifo = priv->tx_head - priv->tx_tail; + + if (WARN_ON_ONCE(frames_in_fifo == 0)) { + /* clear TXOK anyway to avoid getting back here */ + priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_TXOK_MASK); + spin_unlock_irqrestore(&priv->tx_lock, flags); + return; + } + + /* Check if 2 frames were sent (TXOK only means that at least 1 + * frame was sent). + */ + if (frames_in_fifo > 1) { + WARN_ON(frames_in_fifo > priv->tx_max); + + /* Synchronize TXOK and isr so that after the loop: + * (1) isr variable is up-to-date at least up to TXOK clear + * time. This avoids us clearing a TXOK of a second frame + * but not noticing that the FIFO is now empty and thus + * marking only a single frame as sent. + * (2) No TXOK is left. Having one could mean leaving a + * stray TXOK as we might process the associated frame + * via TXFEMP handling as we read TXFEMP *after* TXOK + * clear to satisfy (1). + */ + while ((isr & XCAN_IXR_TXOK_MASK) && !WARN_ON(++retries == 100)) { + priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_TXOK_MASK); + isr = priv->read_reg(priv, XCAN_ISR_OFFSET); + } - while ((priv->tx_head - priv->tx_tail > 0) && - (isr & XCAN_IXR_TXOK_MASK)) { + if (isr & XCAN_IXR_TXFEMP_MASK) { + /* nothing in FIFO anymore */ + frames_sent = frames_in_fifo; + } + } else { + /* single frame in fifo, just clear TXOK */ priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_TXOK_MASK); + } + + while (frames_sent--) { can_get_echo_skb(ndev, priv->tx_tail % priv->tx_max); priv->tx_tail++; stats->tx_packets++; - isr = priv->read_reg(priv, XCAN_ISR_OFFSET); } - can_led_event(ndev, CAN_LED_EVENT_TX); + netif_wake_queue(ndev); + + spin_unlock_irqrestore(&priv->tx_lock, flags); + + can_led_event(ndev, CAN_LED_EVENT_TX); + xcan_update_error_state_after_rxtx(ndev); } /** @@ -773,6 +938,7 @@ static irqreturn_t xcan_interrupt(int irq, void *dev_id) struct net_device *ndev = (struct net_device *)dev_id; struct xcan_priv *priv = netdev_priv(ndev); u32 isr, ier; + u32 isr_errors; /* Get the interrupt status from Xilinx CAN */ isr = priv->read_reg(priv, XCAN_ISR_OFFSET); @@ -791,18 +957,17 @@ static irqreturn_t xcan_interrupt(int irq, void *dev_id) xcan_tx_interrupt(ndev, isr); /* Check for the type of error interrupt and Processing it */ - if (isr & (XCAN_IXR_ERROR_MASK | XCAN_IXR_RXOFLW_MASK | - XCAN_IXR_BSOFF_MASK | XCAN_IXR_ARBLST_MASK)) { - priv->write_reg(priv, XCAN_ICR_OFFSET, (XCAN_IXR_ERROR_MASK | - XCAN_IXR_RXOFLW_MASK | XCAN_IXR_BSOFF_MASK | - XCAN_IXR_ARBLST_MASK)); + isr_errors = isr & (XCAN_IXR_ERROR_MASK | XCAN_IXR_RXOFLW_MASK | + XCAN_IXR_BSOFF_MASK | XCAN_IXR_ARBLST_MASK); + if (isr_errors) { + priv->write_reg(priv, XCAN_ICR_OFFSET, isr_errors); xcan_err_interrupt(ndev, isr); } /* Check for the type of receive interrupt and Processing it */ - if (isr & (XCAN_IXR_RXNEMP_MASK | XCAN_IXR_RXOK_MASK)) { + if (isr & XCAN_IXR_RXNEMP_MASK) { ier = priv->read_reg(priv, XCAN_IER_OFFSET); - ier &= ~(XCAN_IXR_RXNEMP_MASK | XCAN_IXR_RXOK_MASK); + ier &= ~XCAN_IXR_RXNEMP_MASK; priv->write_reg(priv, XCAN_IER_OFFSET, ier); napi_schedule(&priv->napi); } @@ -819,13 +984,9 @@ static irqreturn_t xcan_interrupt(int irq, void *dev_id) static void xcan_chip_stop(struct net_device *ndev) { struct xcan_priv *priv = netdev_priv(ndev); - u32 ier; /* Disable interrupts and leave the can in configuration mode */ - ier = priv->read_reg(priv, XCAN_IER_OFFSET); - ier &= ~XCAN_INTR_ALL; - priv->write_reg(priv, XCAN_IER_OFFSET, ier); - priv->write_reg(priv, XCAN_SRR_OFFSET, XCAN_SRR_RESET_MASK); + set_reset_mode(ndev); priv->can.state = CAN_STATE_STOPPED; } @@ -958,10 +1119,15 @@ static const struct net_device_ops xcan_netdev_ops = { */ static int __maybe_unused xcan_suspend(struct device *dev) { - if (!device_may_wakeup(dev)) - return pm_runtime_force_suspend(dev); + struct net_device *ndev = dev_get_drvdata(dev); - return 0; + if (netif_running(ndev)) { + netif_stop_queue(ndev); + netif_device_detach(ndev); + xcan_chip_stop(ndev); + } + + return pm_runtime_force_suspend(dev); } /** @@ -973,11 +1139,27 @@ static int __maybe_unused xcan_suspend(struct device *dev) */ static int __maybe_unused xcan_resume(struct device *dev) { - if (!device_may_wakeup(dev)) - return pm_runtime_force_resume(dev); + struct net_device *ndev = dev_get_drvdata(dev); + int ret; - return 0; + ret = pm_runtime_force_resume(dev); + if (ret) { + dev_err(dev, "pm_runtime_force_resume failed on resume\n"); + return ret; + } + + if (netif_running(ndev)) { + ret = xcan_chip_start(ndev); + if (ret) { + dev_err(dev, "xcan_chip_start failed on resume\n"); + return ret; + } + + netif_device_attach(ndev); + netif_start_queue(ndev); + } + return 0; } /** @@ -992,14 +1174,6 @@ static int __maybe_unused xcan_runtime_suspend(struct device *dev) struct net_device *ndev = dev_get_drvdata(dev); struct xcan_priv *priv = netdev_priv(ndev); - if (netif_running(ndev)) { - netif_stop_queue(ndev); - netif_device_detach(ndev); - } - - priv->write_reg(priv, XCAN_MSR_OFFSET, XCAN_MSR_SLEEP_MASK); - priv->can.state = CAN_STATE_SLEEPING; - clk_disable_unprepare(priv->bus_clk); clk_disable_unprepare(priv->can_clk); @@ -1018,7 +1192,6 @@ static int __maybe_unused xcan_runtime_resume(struct device *dev) struct net_device *ndev = dev_get_drvdata(dev); struct xcan_priv *priv = netdev_priv(ndev); int ret; - u32 isr, status; ret = clk_prepare_enable(priv->bus_clk); if (ret) { @@ -1032,27 +1205,6 @@ static int __maybe_unused xcan_runtime_resume(struct device *dev) return ret; } - priv->write_reg(priv, XCAN_SRR_OFFSET, XCAN_SRR_RESET_MASK); - isr = priv->read_reg(priv, XCAN_ISR_OFFSET); - status = priv->read_reg(priv, XCAN_SR_OFFSET); - - if (netif_running(ndev)) { - if (isr & XCAN_IXR_BSOFF_MASK) { - priv->can.state = CAN_STATE_BUS_OFF; - priv->write_reg(priv, XCAN_SRR_OFFSET, - XCAN_SRR_RESET_MASK); - } else if ((status & XCAN_SR_ESTAT_MASK) == - XCAN_SR_ESTAT_MASK) { - priv->can.state = CAN_STATE_ERROR_PASSIVE; - } else if (status & XCAN_SR_ERRWRN_MASK) { - priv->can.state = CAN_STATE_ERROR_WARNING; - } else { - priv->can.state = CAN_STATE_ERROR_ACTIVE; - } - netif_device_attach(ndev); - netif_start_queue(ndev); - } - return 0; } @@ -1061,6 +1213,18 @@ static const struct dev_pm_ops xcan_dev_pm_ops = { SET_RUNTIME_PM_OPS(xcan_runtime_suspend, xcan_runtime_resume, NULL) }; +static const struct xcan_devtype_data xcan_zynq_data = { + .caps = XCAN_CAP_WATERMARK, +}; + +/* Match table for OF platform binding */ +static const struct of_device_id xcan_of_match[] = { + { .compatible = "xlnx,zynq-can-1.0", .data = &xcan_zynq_data }, + { .compatible = "xlnx,axi-can-1.00.a", }, + { /* end of list */ }, +}; +MODULE_DEVICE_TABLE(of, xcan_of_match); + /** * xcan_probe - Platform registration call * @pdev: Handle to the platform device structure @@ -1075,8 +1239,10 @@ static int xcan_probe(struct platform_device *pdev) struct resource *res; /* IO mem resources */ struct net_device *ndev; struct xcan_priv *priv; + const struct of_device_id *of_id; + int caps = 0; void __iomem *addr; - int ret, rx_max, tx_max; + int ret, rx_max, tx_max, tx_fifo_depth; /* Get the virtual base address for the device */ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); @@ -1086,7 +1252,8 @@ static int xcan_probe(struct platform_device *pdev) goto err; } - ret = of_property_read_u32(pdev->dev.of_node, "tx-fifo-depth", &tx_max); + ret = of_property_read_u32(pdev->dev.of_node, "tx-fifo-depth", + &tx_fifo_depth); if (ret < 0) goto err; @@ -1094,6 +1261,30 @@ static int xcan_probe(struct platform_device *pdev) if (ret < 0) goto err; + of_id = of_match_device(xcan_of_match, &pdev->dev); + if (of_id) { + const struct xcan_devtype_data *devtype_data = of_id->data; + + if (devtype_data) + caps = devtype_data->caps; + } + + /* There is no way to directly figure out how many frames have been + * sent when the TXOK interrupt is processed. If watermark programming + * is supported, we can have 2 frames in the FIFO and use TXFEMP + * to determine if 1 or 2 frames have been sent. + * Theoretically we should be able to use TXFWMEMP to determine up + * to 3 frames, but it seems that after putting a second frame in the + * FIFO, with watermark at 2 frames, it can happen that TXFWMEMP (less + * than 2 frames in FIFO) is set anyway with no TXOK (a frame was + * sent), which is not a sensible state - possibly TXFWMEMP is not + * completely synchronized with the rest of the bits? + */ + if (caps & XCAN_CAP_WATERMARK) + tx_max = min(tx_fifo_depth, 2); + else + tx_max = 1; + /* Create a CAN device instance */ ndev = alloc_candev(sizeof(struct xcan_priv), tx_max); if (!ndev) @@ -1108,6 +1299,7 @@ static int xcan_probe(struct platform_device *pdev) CAN_CTRLMODE_BERR_REPORTING; priv->reg_base = addr; priv->tx_max = tx_max; + spin_lock_init(&priv->tx_lock); /* Get IRQ for the device */ ndev->irq = platform_get_irq(pdev, 0); @@ -1172,9 +1364,9 @@ static int xcan_probe(struct platform_device *pdev) pm_runtime_put(&pdev->dev); - netdev_dbg(ndev, "reg_base=0x%p irq=%d clock=%d, tx fifo depth:%d\n", + netdev_dbg(ndev, "reg_base=0x%p irq=%d clock=%d, tx fifo depth: actual %d, using %d\n", priv->reg_base, ndev->irq, priv->can.clock.freq, - priv->tx_max); + tx_fifo_depth, priv->tx_max); return 0; @@ -1208,14 +1400,6 @@ static int xcan_remove(struct platform_device *pdev) return 0; } -/* Match table for OF platform binding */ -static const struct of_device_id xcan_of_match[] = { - { .compatible = "xlnx,zynq-can-1.0", }, - { .compatible = "xlnx,axi-can-1.00.a", }, - { /* end of list */ }, -}; -MODULE_DEVICE_TABLE(of, xcan_of_match); - static struct platform_driver xcan_driver = { .probe = xcan_probe, .remove = xcan_remove, diff --git a/drivers/net/dsa/qca8k.c b/drivers/net/dsa/qca8k.c index 5ada7a41449c46b2dd8161f806ef55f05cdf2fc9..9645c8f05c7fa1c570110f7ee7e2ce9c93823ba6 100644 --- a/drivers/net/dsa/qca8k.c +++ b/drivers/net/dsa/qca8k.c @@ -473,7 +473,7 @@ qca8k_set_pad_ctrl(struct qca8k_priv *priv, int port, int mode) static void qca8k_port_set_status(struct qca8k_priv *priv, int port, int enable) { - u32 mask = QCA8K_PORT_STATUS_TXMAC; + u32 mask = QCA8K_PORT_STATUS_TXMAC | QCA8K_PORT_STATUS_RXMAC; /* Port 0 and 6 have no internal PHY */ if ((port > 0) && (port < 6)) @@ -490,6 +490,7 @@ qca8k_setup(struct dsa_switch *ds) { struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv; int ret, i, phy_mode = -1; + u32 mask; /* Make sure that port 0 is the cpu port */ if (!dsa_is_cpu_port(ds, 0)) { @@ -515,7 +516,10 @@ qca8k_setup(struct dsa_switch *ds) if (ret < 0) return ret; - /* Enable CPU Port */ + /* Enable CPU Port, force it to maximum bandwidth and full-duplex */ + mask = QCA8K_PORT_STATUS_SPEED_1000 | QCA8K_PORT_STATUS_TXFLOW | + QCA8K_PORT_STATUS_RXFLOW | QCA8K_PORT_STATUS_DUPLEX; + qca8k_write(priv, QCA8K_REG_PORT_STATUS(QCA8K_CPU_PORT), mask); qca8k_reg_set(priv, QCA8K_REG_GLOBAL_FW_CTRL0, QCA8K_GLOBAL_FW_CTRL0_CPU_PORT_EN); qca8k_port_set_status(priv, QCA8K_CPU_PORT, 1); @@ -584,6 +588,47 @@ qca8k_setup(struct dsa_switch *ds) return 0; } +static void +qca8k_adjust_link(struct dsa_switch *ds, int port, struct phy_device *phy) +{ + struct qca8k_priv *priv = ds->priv; + u32 reg; + + /* Force fixed-link setting for CPU port, skip others. */ + if (!phy_is_pseudo_fixed_link(phy)) + return; + + /* Set port speed */ + switch (phy->speed) { + case 10: + reg = QCA8K_PORT_STATUS_SPEED_10; + break; + case 100: + reg = QCA8K_PORT_STATUS_SPEED_100; + break; + case 1000: + reg = QCA8K_PORT_STATUS_SPEED_1000; + break; + default: + dev_dbg(priv->dev, "port%d link speed %dMbps not supported.\n", + port, phy->speed); + return; + } + + /* Set duplex mode */ + if (phy->duplex == DUPLEX_FULL) + reg |= QCA8K_PORT_STATUS_DUPLEX; + + /* Force flow control */ + if (dsa_is_cpu_port(ds, port)) + reg |= QCA8K_PORT_STATUS_RXFLOW | QCA8K_PORT_STATUS_TXFLOW; + + /* Force link down before changing MAC options */ + qca8k_port_set_status(priv, port, 0); + qca8k_write(priv, QCA8K_REG_PORT_STATUS(port), reg); + qca8k_port_set_status(priv, port, 1); +} + static int qca8k_phy_read(struct dsa_switch *ds, int phy, int regnum) { @@ -832,6 +877,7 @@ qca8k_get_tag_protocol(struct dsa_switch *ds) static const struct dsa_switch_ops qca8k_switch_ops = { .get_tag_protocol = qca8k_get_tag_protocol, .setup = qca8k_setup, + .adjust_link = qca8k_adjust_link, .get_strings = qca8k_get_strings, .phy_read = qca8k_phy_read, .phy_write = qca8k_phy_write, @@ -863,6 +909,7 @@ qca8k_sw_probe(struct mdio_device *mdiodev) return -ENOMEM; priv->bus = mdiodev->bus; + priv->dev = &mdiodev->dev; /* read the switches ID register */ id = qca8k_read(priv, QCA8K_REG_MASK_CTRL); @@ -934,6 +981,7 @@ static SIMPLE_DEV_PM_OPS(qca8k_pm_ops, qca8k_suspend, qca8k_resume); static const struct of_device_id qca8k_of_match[] = { + { .compatible = "qca,qca8334" }, { .compatible = "qca,qca8337" }, { /* sentinel */ }, }; diff --git a/drivers/net/dsa/qca8k.h b/drivers/net/dsa/qca8k.h index 1cf8a920d4ffc5ed84b8fd0948fa088b2b3ebf1b..613fe5c50236c50cfbc6659b5a7d895b21409ec9 100644 --- a/drivers/net/dsa/qca8k.h +++ b/drivers/net/dsa/qca8k.h @@ -51,8 +51,10 @@ #define QCA8K_GOL_MAC_ADDR0 0x60 #define QCA8K_GOL_MAC_ADDR1 0x64 #define QCA8K_REG_PORT_STATUS(_i) (0x07c + (_i) * 4) -#define QCA8K_PORT_STATUS_SPEED GENMASK(2, 0) -#define QCA8K_PORT_STATUS_SPEED_S 0 +#define QCA8K_PORT_STATUS_SPEED GENMASK(1, 0) +#define QCA8K_PORT_STATUS_SPEED_10 0 +#define QCA8K_PORT_STATUS_SPEED_100 0x1 +#define QCA8K_PORT_STATUS_SPEED_1000 0x2 #define QCA8K_PORT_STATUS_TXMAC BIT(2) #define QCA8K_PORT_STATUS_RXMAC BIT(3) #define QCA8K_PORT_STATUS_TXFLOW BIT(4) @@ -165,6 +167,7 @@ struct qca8k_priv { struct ar8xxx_port_status port_sts[QCA8K_NUM_PORTS]; struct dsa_switch *ds; struct mutex reg_mutex; + struct device *dev; }; struct qca8k_mib_desc { diff --git a/drivers/net/ethernet/amazon/ena/ena_com.c b/drivers/net/ethernet/amazon/ena/ena_com.c index 52beba8c7a39990d3a9d6fb5910c689905a3a6a9..e3b7a71fcad940bad61802a2c77157b9b965713d 100644 --- a/drivers/net/ethernet/amazon/ena/ena_com.c +++ b/drivers/net/ethernet/amazon/ena/ena_com.c @@ -331,6 +331,7 @@ static int ena_com_init_io_sq(struct ena_com_dev *ena_dev, memset(&io_sq->desc_addr, 0x0, sizeof(io_sq->desc_addr)); + io_sq->dma_addr_bits = ena_dev->dma_addr_bits; io_sq->desc_entry_size = (io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX) ? sizeof(struct ena_eth_io_tx_desc) : diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c index 1b45cd73a258f05211bfc2ca6e69124c5347bda8..119777986ea48ab26f148b372dd9702613422092 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c @@ -1128,14 +1128,14 @@ static void xgbe_phy_adjust_link(struct xgbe_prv_data *pdata) if (pdata->tx_pause != pdata->phy.tx_pause) { new_state = 1; - pdata->hw_if.config_tx_flow_control(pdata); pdata->tx_pause = pdata->phy.tx_pause; + pdata->hw_if.config_tx_flow_control(pdata); } if (pdata->rx_pause != pdata->phy.rx_pause) { new_state = 1; - pdata->hw_if.config_rx_flow_control(pdata); pdata->rx_pause = pdata->phy.rx_pause; + pdata->hw_if.config_rx_flow_control(pdata); } /* Speed support */ diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c index 567ee54504bcd6eba897009259f691b74b77609e..5e5022fa1d047be078be911bc4f6cd0631f04de7 100644 --- a/drivers/net/ethernet/atheros/alx/main.c +++ b/drivers/net/ethernet/atheros/alx/main.c @@ -1897,13 +1897,19 @@ static int alx_resume(struct device *dev) struct pci_dev *pdev = to_pci_dev(dev); struct alx_priv *alx = pci_get_drvdata(pdev); struct alx_hw *hw = &alx->hw; + int err; alx_reset_phy(hw); if (!netif_running(alx->dev)) return 0; netif_device_attach(alx->dev); - return __alx_open(alx, true); + + rtnl_lock(); + err = __alx_open(alx, true); + rtnl_unlock(); + + return err; } static SIMPLE_DEV_PM_OPS(alx_pm_ops, alx_suspend, alx_resume); diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.c b/drivers/net/ethernet/broadcom/bcm63xx_enet.c index 4f3845a581269e84738ed4f2f487d1dd755efc2b..68470c7c630a8e86bfbb9e2e97b54b0f60ad7a68 100644 --- a/drivers/net/ethernet/broadcom/bcm63xx_enet.c +++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c @@ -1062,7 +1062,8 @@ static int bcm_enet_open(struct net_device *dev) val = enet_readl(priv, ENET_CTL_REG); val |= ENET_CTL_ENABLE_MASK; enet_writel(priv, val, ENET_CTL_REG); - enet_dma_writel(priv, ENETDMA_CFG_EN_MASK, ENETDMA_CFG_REG); + if (priv->dma_has_sram) + enet_dma_writel(priv, ENETDMA_CFG_EN_MASK, ENETDMA_CFG_REG); enet_dmac_writel(priv, priv->dma_chan_en_mask, ENETDMAC_CHANCFG, priv->rx_chan); @@ -1773,7 +1774,9 @@ static int bcm_enet_probe(struct platform_device *pdev) ret = PTR_ERR(priv->mac_clk); goto out; } - clk_prepare_enable(priv->mac_clk); + ret = clk_prepare_enable(priv->mac_clk); + if (ret) + goto out_put_clk_mac; /* initialize default and fetch platform data */ priv->rx_ring_size = BCMENET_DEF_RX_DESC; @@ -1805,9 +1808,11 @@ static int bcm_enet_probe(struct platform_device *pdev) if (IS_ERR(priv->phy_clk)) { ret = PTR_ERR(priv->phy_clk); priv->phy_clk = NULL; - goto out_put_clk_mac; + goto out_disable_clk_mac; } - clk_prepare_enable(priv->phy_clk); + ret = clk_prepare_enable(priv->phy_clk); + if (ret) + goto out_put_clk_phy; } /* do minimal hardware init to be able to probe mii bus */ @@ -1901,13 +1906,16 @@ static int bcm_enet_probe(struct platform_device *pdev) out_uninit_hw: /* turn off mdc clock */ enet_writel(priv, 0, ENET_MIISC_REG); - if (priv->phy_clk) { + if (priv->phy_clk) clk_disable_unprepare(priv->phy_clk); + +out_put_clk_phy: + if (priv->phy_clk) clk_put(priv->phy_clk); - } -out_put_clk_mac: +out_disable_clk_mac: clk_disable_unprepare(priv->mac_clk); +out_put_clk_mac: clk_put(priv->mac_clk); out: free_netdev(dev); @@ -2752,7 +2760,9 @@ static int bcm_enetsw_probe(struct platform_device *pdev) ret = PTR_ERR(priv->mac_clk); goto out_unmap; } - clk_enable(priv->mac_clk); + ret = clk_prepare_enable(priv->mac_clk); + if (ret) + goto out_put_clk; priv->rx_chan = 0; priv->tx_chan = 1; @@ -2773,7 +2783,7 @@ static int bcm_enetsw_probe(struct platform_device *pdev) ret = register_netdev(dev); if (ret) - goto out_put_clk; + goto out_disable_clk; netif_carrier_off(dev); platform_set_drvdata(pdev, dev); @@ -2782,6 +2792,9 @@ static int bcm_enetsw_probe(struct platform_device *pdev) return 0; +out_disable_clk: + clk_disable_unprepare(priv->mac_clk); + out_put_clk: clk_put(priv->mac_clk); @@ -2813,6 +2826,9 @@ static int bcm_enetsw_remove(struct platform_device *pdev) res = platform_get_resource(pdev, IORESOURCE_MEM, 0); release_mem_region(res->start, resource_size(res)); + clk_disable_unprepare(priv->mac_clk); + clk_put(priv->mac_clk); + free_netdev(dev); return 0; } diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c index 1e856e8b9a92dc57753944a1e142654617150ae9..0fff2432ab4cdff1983adf518acdb29ad84267fe 100644 --- a/drivers/net/ethernet/broadcom/bcmsysport.c +++ b/drivers/net/ethernet/broadcom/bcmsysport.c @@ -1851,8 +1851,8 @@ static int bcm_sysport_open(struct net_device *dev) if (!priv->is_lite) priv->crc_fwd = !!(umac_readl(priv, UMAC_CMD) & CMD_CRC_FWD); else - priv->crc_fwd = !!(gib_readl(priv, GIB_CONTROL) & - GIB_FCS_STRIP); + priv->crc_fwd = !((gib_readl(priv, GIB_CONTROL) & + GIB_FCS_STRIP) >> GIB_FCS_STRIP_SHIFT); phydev = of_phy_connect(dev, priv->phy_dn, bcm_sysport_adj_link, 0, priv->phy_interface); diff --git a/drivers/net/ethernet/broadcom/bcmsysport.h b/drivers/net/ethernet/broadcom/bcmsysport.h index a2006f5fc26f8f6f1c213acebe4e834b6a71c57c..86ae751ccb5c16d268c05bbdeeafa35f7a2515f9 100644 --- a/drivers/net/ethernet/broadcom/bcmsysport.h +++ b/drivers/net/ethernet/broadcom/bcmsysport.h @@ -277,7 +277,8 @@ struct bcm_rsb { #define GIB_GTX_CLK_EXT_CLK (0 << GIB_GTX_CLK_SEL_SHIFT) #define GIB_GTX_CLK_125MHZ (1 << GIB_GTX_CLK_SEL_SHIFT) #define GIB_GTX_CLK_250MHZ (2 << GIB_GTX_CLK_SEL_SHIFT) -#define GIB_FCS_STRIP (1 << 6) +#define GIB_FCS_STRIP_SHIFT 6 +#define GIB_FCS_STRIP (1 << GIB_FCS_STRIP_SHIFT) #define GIB_LCL_LOOP_EN (1 << 7) #define GIB_LCL_LOOP_TXEN (1 << 8) #define GIB_RMT_LOOP_EN (1 << 9) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index bfd2d0382f4cfca593e4de42b9852b278978d8c3..94931318587c141c81658c6088c2ce76f9654896 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -5927,6 +5927,9 @@ static int bnxt_update_link(struct bnxt *bp, bool chng_link_state) } mutex_unlock(&bp->hwrm_cmd_lock); + if (!BNXT_SINGLE_PF(bp)) + return 0; + diff = link_info->support_auto_speeds ^ link_info->advertising; if ((link_info->support_auto_speeds | diff) != link_info->support_auto_speeds) { diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index 9a8ef630466f2f12d1053ed71f0f4b264ab08260..1b1d2a67f412c50819ab84900a30f3b285440f2f 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c @@ -9279,6 +9279,15 @@ static int tg3_chip_reset(struct tg3 *tp) tg3_restore_clk(tp); + /* Increase the core clock speed to fix tx timeout issue for 5762 + * with 100Mbps link speed. + */ + if (tg3_asic_rev(tp) == ASIC_REV_5762) { + val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE); + tw32(TG3_CPMU_CLCK_ORIDE_ENABLE, val | + TG3_CPMU_MAC_ORIDE_ENABLE); + } + /* Reprobe ASF enable state. */ tg3_flag_clear(tp, ENABLE_ASF); tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK | diff --git a/drivers/net/ethernet/cadence/macb_ptp.c b/drivers/net/ethernet/cadence/macb_ptp.c index 2220c771092b46e8fb583d46ea99d5829e1793d0..678835136bf8069326067feaa46f8465db4e38d4 100755 --- a/drivers/net/ethernet/cadence/macb_ptp.c +++ b/drivers/net/ethernet/cadence/macb_ptp.c @@ -170,10 +170,7 @@ static int gem_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta) if (delta > TSU_NSEC_MAX_VAL) { gem_tsu_get_time(&bp->ptp_clock_info, &now); - if (sign) - now = timespec64_sub(now, then); - else - now = timespec64_add(now, then); + now = timespec64_add(now, then); gem_tsu_set_time(&bp->ptp_clock_info, (const struct timespec64 *)&now); diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c index 6a015362c34066bf054aeb0f5f7168fbee190858..bf291e90cdb0f44fb56f0769182c409c27662ba6 100644 --- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c +++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c @@ -51,6 +51,7 @@ #include #include #include +#include #include "common.h" #include "cxgb3_ioctl.h" @@ -2268,6 +2269,7 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr) if (t.qset_idx >= nqsets) return -EINVAL; + t.qset_idx = array_index_nospec(t.qset_idx, nqsets); q = &adapter->params.sge.qset[q1 + t.qset_idx]; t.rspq_size = q->rspq_size; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index ff7a70ffafc65f22fd3ef51416b6487d74f37824..c133491ad9fa0ab3e5bbd3a5356ff64e8e856627 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -1272,8 +1272,11 @@ static int hclge_alloc_vport(struct hclge_dev *hdev) /* We need to alloc a vport for main NIC of PF */ num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1; - if (hdev->num_tqps < num_vport) - num_vport = hdev->num_tqps; + if (hdev->num_tqps < num_vport) { + dev_err(&hdev->pdev->dev, "tqps(%d) is less than vports(%d)", + hdev->num_tqps, num_vport); + return -EINVAL; + } /* Alloc the same number of TQPs for every vport */ tqp_per_vport = hdev->num_tqps / num_vport; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c index d1e4dcec5db27a68617796e4aa5ab2c39a6d5f91..69726908e72c49e1cf532abce4e4cc4561718670 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c @@ -1598,6 +1598,7 @@ static void hns3_replace_buffer(struct hns3_enet_ring *ring, int i, hns3_unmap_buffer(ring, &ring->desc_cb[i]); ring->desc_cb[i] = *res_cb; ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma); + ring->desc[i].rx.bd_base_info = 0; } static void hns3_reuse_buffer(struct hns3_enet_ring *ring, int i) @@ -1605,6 +1606,7 @@ static void hns3_reuse_buffer(struct hns3_enet_ring *ring, int i) ring->desc_cb[i].reuse_flag = 0; ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma + ring->desc_cb[i].page_offset); + ring->desc[i].rx.bd_base_info = 0; } static void hns3_nic_reclaim_one_desc(struct hns3_enet_ring *ring, int *bytes, @@ -2881,6 +2883,8 @@ static int __init hns3_init_module(void) client.ops = &client_ops; + INIT_LIST_HEAD(&client.node); + ret = hnae3_register_client(&client); if (ret) return ret; diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index 7a226537877b29a877c86ebbb6a4d4699a44db2c..6265ce8915b66132f4e8aee12388491b517b8689 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@ -3558,15 +3558,12 @@ s32 e1000e_get_base_timinca(struct e1000_adapter *adapter, u32 *timinca) } break; case e1000_pch_spt: - if (er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_SYSCFI) { - /* Stable 24MHz frequency */ - incperiod = INCPERIOD_24MHZ; - incvalue = INCVALUE_24MHZ; - shift = INCVALUE_SHIFT_24MHZ; - adapter->cc.shift = shift; - break; - } - return -EINVAL; + /* Stable 24MHz frequency */ + incperiod = INCPERIOD_24MHZ; + incvalue = INCVALUE_24MHZ; + shift = INCVALUE_SHIFT_24MHZ; + adapter->cc.shift = shift; + break; case e1000_pch_cnp: if (er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_SYSCFI) { /* Stable 24MHz frequency */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_ptp.c b/drivers/net/ethernet/intel/i40e/i40e_ptp.c index d8456c381c99d47b5de90e4680c9af7aaba5bfbd..ef242dbae116b5afbb2f88bfe0aeab6cc82f65a1 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ptp.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ptp.c @@ -337,6 +337,8 @@ void i40e_ptp_rx_hang(struct i40e_pf *pf) **/ void i40e_ptp_tx_hang(struct i40e_pf *pf) { + struct sk_buff *skb; + if (!(pf->flags & I40E_FLAG_PTP) || !pf->ptp_tx) return; @@ -349,9 +351,12 @@ void i40e_ptp_tx_hang(struct i40e_pf *pf) * within a second it is reasonable to assume that we never will. */ if (time_is_before_jiffies(pf->ptp_tx_start + HZ)) { - dev_kfree_skb_any(pf->ptp_tx_skb); + skb = pf->ptp_tx_skb; pf->ptp_tx_skb = NULL; clear_bit_unlock(__I40E_PTP_TX_IN_PROGRESS, pf->state); + + /* Free the skb after we clear the bitlock */ + dev_kfree_skb_any(skb); pf->tx_hwtstamp_timeouts++; } } diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index 6ca580cdfd843e792783daedb085835838358180..1c027f9d9af54cfca8faceca572fb10dc207ba76 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -8376,12 +8376,17 @@ static void igb_rar_set_index(struct igb_adapter *adapter, u32 index) if (is_valid_ether_addr(addr)) rar_high |= E1000_RAH_AV; - if (hw->mac.type == e1000_82575) + switch (hw->mac.type) { + case e1000_82575: + case e1000_i210: rar_high |= E1000_RAH_POOL_1 * adapter->mac_table[index].queue; - else + break; + default: rar_high |= E1000_RAH_POOL_1 << adapter->mac_table[index].queue; + break; + } } wr32(E1000_RAL(index), rar_low); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 20a8018d41ef6b9875ca1003b6cc46e5dfecc7ee..b68d94b49a8a690416eb9e44db7301906ce07327 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -2211,9 +2211,10 @@ static struct sk_buff *ixgbe_build_skb(struct ixgbe_ring *rx_ring, return skb; } -#define IXGBE_XDP_PASS 0 -#define IXGBE_XDP_CONSUMED 1 -#define IXGBE_XDP_TX 2 +#define IXGBE_XDP_PASS 0 +#define IXGBE_XDP_CONSUMED BIT(0) +#define IXGBE_XDP_TX BIT(1) +#define IXGBE_XDP_REDIR BIT(2) static int ixgbe_xmit_xdp_ring(struct ixgbe_adapter *adapter, struct xdp_buff *xdp); @@ -2242,7 +2243,7 @@ static struct sk_buff *ixgbe_run_xdp(struct ixgbe_adapter *adapter, case XDP_REDIRECT: err = xdp_do_redirect(adapter->netdev, xdp, xdp_prog); if (!err) - result = IXGBE_XDP_TX; + result = IXGBE_XDP_REDIR; else result = IXGBE_XDP_CONSUMED; break; @@ -2302,7 +2303,7 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, unsigned int mss = 0; #endif /* IXGBE_FCOE */ u16 cleaned_count = ixgbe_desc_unused(rx_ring); - bool xdp_xmit = false; + unsigned int xdp_xmit = 0; while (likely(total_rx_packets < budget)) { union ixgbe_adv_rx_desc *rx_desc; @@ -2342,8 +2343,10 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, } if (IS_ERR(skb)) { - if (PTR_ERR(skb) == -IXGBE_XDP_TX) { - xdp_xmit = true; + unsigned int xdp_res = -PTR_ERR(skb); + + if (xdp_res & (IXGBE_XDP_TX | IXGBE_XDP_REDIR)) { + xdp_xmit |= xdp_res; ixgbe_rx_buffer_flip(rx_ring, rx_buffer, size); } else { rx_buffer->pagecnt_bias++; @@ -2415,7 +2418,10 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, total_rx_packets++; } - if (xdp_xmit) { + if (xdp_xmit & IXGBE_XDP_REDIR) + xdp_do_flush_map(); + + if (xdp_xmit & IXGBE_XDP_TX) { struct ixgbe_ring *ring = adapter->xdp_ring[smp_processor_id()]; /* Force memory writes to complete before letting h/w @@ -2423,8 +2429,6 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, */ wmb(); writel(ring->next_to_use, ring->tail); - - xdp_do_flush_map(); } u64_stats_update_begin(&rx_ring->syncp); diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c index 90ecc4b0646210bc9daba9ac15eb47e1b4574b32..90be4385bf368c446c020289008f7800de738f78 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c @@ -3737,6 +3737,7 @@ static int ixgbevf_set_mac(struct net_device *netdev, void *p) return -EPERM; ether_addr_copy(hw->mac.addr, addr->sa_data); + ether_addr_copy(hw->mac.perm_addr, addr->sa_data); ether_addr_copy(netdev->dev_addr, addr->sa_data); return 0; diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index d28f873169a9041129853cdd1e82f0f64878a669..3deaa341331370bf7e88e8e3583d7b4f0fd2b104 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c @@ -1959,7 +1959,7 @@ static int mvneta_rx_swbm(struct mvneta_port *pp, int rx_todo, rx_bytes = rx_desc->data_size - (ETH_FCS_LEN + MVNETA_MH_SIZE); index = rx_desc - rxq->descs; data = rxq->buf_virt_addr[index]; - phys_addr = rx_desc->buf_phys_addr; + phys_addr = rx_desc->buf_phys_addr - pp->rx_offset_correction; if (!mvneta_rxq_desc_is_first_last(rx_status) || (rx_status & MVNETA_RXD_ERR_SUMMARY)) { diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c index b97a55c827ebc7eb8109a293bc86ee419d42563b..ab2a9dbb46c7fcd5743169b8b18be8e1a4fb0a6a 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c @@ -472,10 +472,10 @@ static int mlx4_en_complete_rx_desc(struct mlx4_en_priv *priv, { const struct mlx4_en_frag_info *frag_info = priv->frag_info; unsigned int truesize = 0; + bool release = true; int nr, frag_size; struct page *page; dma_addr_t dma; - bool release; /* Collect used fragments while replacing them in the HW descriptors */ for (nr = 0;; frags++) { @@ -498,7 +498,11 @@ static int mlx4_en_complete_rx_desc(struct mlx4_en_priv *priv, release = page_count(page) != 1 || page_is_pfmemalloc(page) || page_to_nid(page) != numa_mem_id(); - } else { + } else if (!priv->rx_headroom) { + /* rx_headroom for non XDP setup is always 0. + * When XDP is set, the above condition will + * guarantee page is always released. + */ u32 sz_align = ALIGN(frag_size, SMP_CACHE_BYTES); frags->page_offset += sz_align; diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c index a069fcc823c30f765d65d5b638b4449b2e41acd1..b26da0952a4dd6a13a60bff63797a6d88d1d98b0 100644 --- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c +++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c @@ -2957,7 +2957,7 @@ int mlx4_RST2INIT_QP_wrapper(struct mlx4_dev *dev, int slave, u32 srqn = qp_get_srqn(qpc) & 0xffffff; int use_srq = (qp_get_srqn(qpc) >> 24) & 1; struct res_srq *srq; - int local_qpn = be32_to_cpu(qpc->local_qpn) & 0xffffff; + int local_qpn = vhcr->in_modifier & 0xffffff; err = adjust_qp_sched_queue(dev, slave, qpc, inbox); if (err) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c index 3efe45bc247127bfbf15567c5204c91d1c396ea3..cf94fdf25155f9882eb3d5d3395644a22259b982 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c @@ -801,6 +801,7 @@ static void cmd_work_handler(struct work_struct *work) unsigned long flags; bool poll_cmd = ent->polling; int alloc_ret; + int cmd_mode; sem = ent->page_queue ? &cmd->pages_sem : &cmd->sem; down(sem); @@ -847,6 +848,7 @@ static void cmd_work_handler(struct work_struct *work) set_signature(ent, !cmd->checksum_disabled); dump_command(dev, ent, 1); ent->ts1 = ktime_get_ns(); + cmd_mode = cmd->mode; if (ent->callback) schedule_delayed_work(&ent->cb_timeout_work, cb_timeout); @@ -871,7 +873,7 @@ static void cmd_work_handler(struct work_struct *work) iowrite32be(1 << ent->idx, &dev->iseg->cmd_dbell); mmiowb(); /* if not in polling don't use ent after this point */ - if (cmd->mode == CMD_MODE_POLLING || poll_cmd) { + if (cmd_mode == CMD_MODE_POLLING || poll_cmd) { poll_timeout(ent); /* make sure we read the descriptor after ownership is SW */ rmb(); @@ -1272,7 +1274,7 @@ static ssize_t outlen_write(struct file *filp, const char __user *buf, { struct mlx5_core_dev *dev = filp->private_data; struct mlx5_cmd_debug *dbg = &dev->cmd.dbg; - char outlen_str[8]; + char outlen_str[8] = {0}; int outlen; void *ptr; int err; @@ -1287,8 +1289,6 @@ static ssize_t outlen_write(struct file *filp, const char __user *buf, if (copy_from_user(outlen_str, buf, count)) return -EFAULT; - outlen_str[7] = 0; - err = sscanf(outlen_str, "%d", &outlen); if (err < 0) return err; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c index 12d3ced611145858c2ac67141b3d1179ad7f59f9..e87923e046c98cbc7b07971c707c40b6c02c7e41 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c @@ -381,14 +381,14 @@ static void arfs_may_expire_flow(struct mlx5e_priv *priv) HLIST_HEAD(del_list); spin_lock_bh(&priv->fs.arfs.arfs_lock); mlx5e_for_each_arfs_rule(arfs_rule, htmp, priv->fs.arfs.arfs_tables, i, j) { - if (quota++ > MLX5E_ARFS_EXPIRY_QUOTA) - break; if (!work_pending(&arfs_rule->arfs_work) && rps_may_expire_flow(priv->netdev, arfs_rule->rxq, arfs_rule->flow_id, arfs_rule->filter_id)) { hlist_del_init(&arfs_rule->hlist); hlist_add_head(&arfs_rule->hlist, &del_list); + if (quota++ > MLX5E_ARFS_EXPIRY_QUOTA) + break; } } spin_unlock_bh(&priv->fs.arfs.arfs_lock); @@ -711,6 +711,9 @@ int mlx5e_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb, skb->protocol != htons(ETH_P_IPV6)) return -EPROTONOSUPPORT; + if (skb->encapsulation) + return -EPROTONOSUPPORT; + arfs_t = arfs_get_table(arfs, arfs_get_ip_proto(skb), skb->protocol); if (!arfs_t) return -EPROTONOSUPPORT; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_clock.c b/drivers/net/ethernet/mellanox/mlx5/core/en_clock.c index 84dd63e740414e75d5aa5ac1ebf871c801718bda..27040009d87a67b71f26e77f6bdf7ceb4ae3cd9a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_clock.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_clock.c @@ -545,6 +545,7 @@ void mlx5e_pps_event_handler(struct mlx5e_priv *priv, void mlx5e_timestamp_init(struct mlx5e_priv *priv) { struct mlx5e_tstamp *tstamp = &priv->tstamp; + u64 overflow_cycles; u64 ns; u64 frac = 0; u32 dev_freq; @@ -569,10 +570,17 @@ void mlx5e_timestamp_init(struct mlx5e_priv *priv) /* Calculate period in seconds to call the overflow watchdog - to make * sure counter is checked at least once every wrap around. + * The period is calculated as the minimum between max HW cycles count + * (The clock source mask) and max amount of cycles that can be + * multiplied by clock multiplier where the result doesn't exceed + * 64bits. */ - ns = cyclecounter_cyc2ns(&tstamp->cycles, tstamp->cycles.mask, + overflow_cycles = div64_u64(~0ULL >> 1, tstamp->cycles.mult); + overflow_cycles = min(overflow_cycles, tstamp->cycles.mask >> 1); + + ns = cyclecounter_cyc2ns(&tstamp->cycles, overflow_cycles, frac, &frac); - do_div(ns, NSEC_PER_SEC / 2 / HZ); + do_div(ns, NSEC_PER_SEC / HZ); tstamp->overflow_period = ns; INIT_WORK(&tstamp->pps_info.out_work, mlx5e_pps_out); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 337ce94237946b7bc34d51896585ac2cc15e386d..bf34264c734b5c1a0ac3210cfca401fe2737de29 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -2626,7 +2626,7 @@ void mlx5e_activate_priv_channels(struct mlx5e_priv *priv) mlx5e_activate_channels(&priv->channels); netif_tx_start_all_queues(priv->netdev); - if (MLX5_VPORT_MANAGER(priv->mdev)) + if (MLX5_ESWITCH_MANAGER(priv->mdev)) mlx5e_add_sqs_fwd_rules(priv); mlx5e_wait_channels_min_rx_wqes(&priv->channels); @@ -2637,7 +2637,7 @@ void mlx5e_deactivate_priv_channels(struct mlx5e_priv *priv) { mlx5e_redirect_rqts_to_drop(priv); - if (MLX5_VPORT_MANAGER(priv->mdev)) + if (MLX5_ESWITCH_MANAGER(priv->mdev)) mlx5e_remove_sqs_fwd_rules(priv); /* FIXME: This is a W/A only for tx timeout watch dog false alarm when @@ -4127,7 +4127,7 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev) mlx5e_set_netdev_dev_addr(netdev); #if IS_ENABLED(CONFIG_MLX5_ESWITCH) - if (MLX5_VPORT_MANAGER(mdev)) + if (MLX5_ESWITCH_MANAGER(mdev)) netdev->switchdev_ops = &mlx5e_switchdev_ops; #endif @@ -4273,7 +4273,7 @@ static void mlx5e_nic_enable(struct mlx5e_priv *priv) mlx5e_enable_async_events(priv); - if (MLX5_VPORT_MANAGER(priv->mdev)) + if (MLX5_ESWITCH_MANAGER(priv->mdev)) mlx5e_register_vport_reps(priv); if (netdev->reg_state != NETREG_REGISTERED) @@ -4300,7 +4300,7 @@ static void mlx5e_nic_disable(struct mlx5e_priv *priv) queue_work(priv->wq, &priv->set_rx_mode_work); - if (MLX5_VPORT_MANAGER(priv->mdev)) + if (MLX5_ESWITCH_MANAGER(priv->mdev)) mlx5e_unregister_vport_reps(priv); mlx5e_disable_async_events(priv); @@ -4483,7 +4483,7 @@ static void *mlx5e_add(struct mlx5_core_dev *mdev) return NULL; #ifdef CONFIG_MLX5_ESWITCH - if (MLX5_VPORT_MANAGER(mdev)) { + if (MLX5_ESWITCH_MANAGER(mdev)) { rpriv = mlx5e_alloc_nic_rep_priv(mdev); if (!rpriv) { mlx5_core_warn(mdev, "Failed to alloc NIC rep priv data\n"); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c index 4727e7390834f1e3cc77dcfd821867792df69159..281911698f72fbff76be3f2d98d6b69ddab3438e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c @@ -710,7 +710,7 @@ bool mlx5e_is_uplink_rep(struct mlx5e_priv *priv) struct mlx5e_rep_priv *rpriv = priv->ppriv; struct mlx5_eswitch_rep *rep; - if (!MLX5_CAP_GEN(priv->mdev, vport_group_manager)) + if (!MLX5_ESWITCH_MANAGER(priv->mdev)) return false; rep = rpriv->rep; @@ -724,8 +724,12 @@ bool mlx5e_is_uplink_rep(struct mlx5e_priv *priv) static bool mlx5e_is_vf_vport_rep(struct mlx5e_priv *priv) { struct mlx5e_rep_priv *rpriv = priv->ppriv; - struct mlx5_eswitch_rep *rep = rpriv->rep; + struct mlx5_eswitch_rep *rep; + if (!MLX5_ESWITCH_MANAGER(priv->mdev)) + return false; + + rep = rpriv->rep; if (rep && rep->vport != FDB_UPLINK_VPORT) return true; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c index 82e37250ed01c4c65a4e6ed5a4486027bc613137..f697084937c381113632317f263d0af6abef3eee 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c @@ -1535,7 +1535,7 @@ int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs, int mode) if (!ESW_ALLOWED(esw)) return 0; - if (!MLX5_CAP_GEN(esw->dev, eswitch_flow_table) || + if (!MLX5_ESWITCH_MANAGER(esw->dev) || !MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, ft_support)) { esw_warn(esw->dev, "E-Switch FDB is not supported, aborting ...\n"); return -EOPNOTSUPP; @@ -1616,7 +1616,7 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev) int vport_num; int err; - if (!MLX5_VPORT_MANAGER(dev)) + if (!MLX5_ESWITCH_MANAGER(dev)) return 0; esw_info(dev, @@ -1689,7 +1689,7 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev) void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw) { - if (!esw || !MLX5_VPORT_MANAGER(esw->dev)) + if (!esw || !MLX5_ESWITCH_MANAGER(esw->dev)) return; esw_info(esw->dev, "cleanup\n"); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h index 565c8b7a399af7559c910faabbec4e8d99738bb6..10bf770675f32aa24c7b005f8b786cc288bf9120 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h @@ -39,6 +39,8 @@ #include #include "lib/mpfs.h" +#define MLX5_ESWITCH_MANAGER(mdev) MLX5_CAP_GEN(mdev, eswitch_manager) + enum { SRIOV_NONE, SRIOV_LEGACY, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c index d9fd8570b07c8344bb1dc04934392e20f7628245..c699055c0ffdecde6ddaac99a10b2f6dbe1af4ce 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c @@ -912,8 +912,8 @@ static int mlx5_devlink_eswitch_check(struct devlink *devlink) if (MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH) return -EOPNOTSUPP; - if (!MLX5_CAP_GEN(dev, vport_group_manager)) - return -EOPNOTSUPP; + if(!MLX5_ESWITCH_MANAGER(dev)) + return -EPERM; if (dev->priv.eswitch->mode == SRIOV_NONE) return -EOPNOTSUPP; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c index 33e5ff081e36ee14e58558bbd5e4e18ae1e4ea8f..dd05cf14884543db45abdf932645d685e4ee6714 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c @@ -36,6 +36,7 @@ #include "mlx5_core.h" #include "fs_core.h" #include "fs_cmd.h" +#include "eswitch.h" #include "diag/fs_tracepoint.h" #define INIT_TREE_NODE_ARRAY_SIZE(...) (sizeof((struct init_tree_node[]){__VA_ARGS__}) /\ @@ -2211,7 +2212,7 @@ int mlx5_init_fs(struct mlx5_core_dev *dev) goto err; } - if (MLX5_CAP_GEN(dev, eswitch_flow_table)) { + if (MLX5_ESWITCH_MANAGER(dev)) { if (MLX5_CAP_ESW_FLOWTABLE_FDB(dev, ft_support)) { err = init_fdb_root_ns(steering); if (err) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw.c b/drivers/net/ethernet/mellanox/mlx5/core/fw.c index 2c71557d1cee724c6b12b2033133060661dabbfc..d69897a1e2cedbe53abffc35cf6d245387b273d4 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fw.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fw.c @@ -34,6 +34,7 @@ #include #include #include "mlx5_core.h" +#include "eswitch.h" #include "../../mlxfw/mlxfw.h" static int mlx5_cmd_query_adapter(struct mlx5_core_dev *dev, u32 *out, @@ -152,13 +153,13 @@ int mlx5_query_hca_caps(struct mlx5_core_dev *dev) } if (MLX5_CAP_GEN(dev, vport_group_manager) && - MLX5_CAP_GEN(dev, eswitch_flow_table)) { + MLX5_ESWITCH_MANAGER(dev)) { err = mlx5_core_get_caps(dev, MLX5_CAP_ESWITCH_FLOW_TABLE); if (err) return err; } - if (MLX5_CAP_GEN(dev, eswitch_flow_table)) { + if (MLX5_ESWITCH_MANAGER(dev)) { err = mlx5_core_get_caps(dev, MLX5_CAP_ESWITCH); if (err) return err; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.c index 7cb67122e8b5f04371651e1c1e2757acb281a36e..22811ecd8fcde1bc96a15087753ed09257a7c5ea 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.c @@ -34,6 +34,7 @@ #include #include #include "mlx5_core.h" +#include "eswitch.h" #include "lib/mpfs.h" /* HW L2 Table (MPFS) management */ @@ -98,7 +99,7 @@ int mlx5_mpfs_init(struct mlx5_core_dev *dev) int l2table_size = 1 << MLX5_CAP_GEN(dev, log_max_l2_table); struct mlx5_mpfs *mpfs; - if (!MLX5_VPORT_MANAGER(dev)) + if (!MLX5_ESWITCH_MANAGER(dev)) return 0; mpfs = kzalloc(sizeof(*mpfs), GFP_KERNEL); @@ -122,7 +123,7 @@ void mlx5_mpfs_cleanup(struct mlx5_core_dev *dev) { struct mlx5_mpfs *mpfs = dev->priv.mpfs; - if (!MLX5_VPORT_MANAGER(dev)) + if (!MLX5_ESWITCH_MANAGER(dev)) return; WARN_ON(!hlist_empty(mpfs->hash)); @@ -137,7 +138,7 @@ int mlx5_mpfs_add_mac(struct mlx5_core_dev *dev, u8 *mac) u32 index; int err; - if (!MLX5_VPORT_MANAGER(dev)) + if (!MLX5_ESWITCH_MANAGER(dev)) return 0; mutex_lock(&mpfs->lock); @@ -179,7 +180,7 @@ int mlx5_mpfs_del_mac(struct mlx5_core_dev *dev, u8 *mac) int err = 0; u32 index; - if (!MLX5_VPORT_MANAGER(dev)) + if (!MLX5_ESWITCH_MANAGER(dev)) return 0; mutex_lock(&mpfs->lock); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/port.c b/drivers/net/ethernet/mellanox/mlx5/core/port.c index e07061f565d6432d1c6c88b78468e038f20572cc..ccb6287aeeb74445db238f11d91cdde970ee771a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/port.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/port.c @@ -641,7 +641,7 @@ EXPORT_SYMBOL_GPL(mlx5_query_port_prio_tc); static int mlx5_set_port_qetcr_reg(struct mlx5_core_dev *mdev, u32 *in, int inlen) { - u32 out[MLX5_ST_SZ_DW(qtct_reg)]; + u32 out[MLX5_ST_SZ_DW(qetc_reg)]; if (!MLX5_CAP_GEN(mdev, ets)) return -EOPNOTSUPP; @@ -653,7 +653,7 @@ static int mlx5_set_port_qetcr_reg(struct mlx5_core_dev *mdev, u32 *in, static int mlx5_query_port_qetcr_reg(struct mlx5_core_dev *mdev, u32 *out, int outlen) { - u32 in[MLX5_ST_SZ_DW(qtct_reg)]; + u32 in[MLX5_ST_SZ_DW(qetc_reg)]; if (!MLX5_CAP_GEN(mdev, ets)) return -EOPNOTSUPP; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sriov.c b/drivers/net/ethernet/mellanox/mlx5/core/sriov.c index 2a8b529ce6dd176cbc29b9bb4b74cd1d1c48f671..a0674962f02c4d2a35d05c98f84436967703101c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/sriov.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/sriov.c @@ -88,6 +88,9 @@ static int mlx5_device_enable_sriov(struct mlx5_core_dev *dev, int num_vfs) return -EBUSY; } + if (!MLX5_ESWITCH_MANAGER(dev)) + goto enable_vfs_hca; + err = mlx5_eswitch_enable_sriov(dev->priv.eswitch, num_vfs, SRIOV_LEGACY); if (err) { mlx5_core_warn(dev, @@ -95,6 +98,7 @@ static int mlx5_device_enable_sriov(struct mlx5_core_dev *dev, int num_vfs) return err; } +enable_vfs_hca: for (vf = 0; vf < num_vfs; vf++) { err = mlx5_core_enable_hca(dev, vf + 1); if (err) { @@ -140,7 +144,8 @@ static void mlx5_device_disable_sriov(struct mlx5_core_dev *dev) } out: - mlx5_eswitch_disable_sriov(dev->priv.eswitch); + if (MLX5_ESWITCH_MANAGER(dev)) + mlx5_eswitch_disable_sriov(dev->priv.eswitch); if (mlx5_wait_for_vf_pages(dev)) mlx5_core_warn(dev, "timeout reclaiming VFs pages\n"); diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c index 42a6afcaae03ffc233af1852c986a3f78f097dd3..7924f241e3ad068ed68ef295e967384ac559a08d 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c @@ -912,8 +912,10 @@ mlxsw_sp_port_vlan_bridge_join(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan, int err; /* No need to continue if only VLAN flags were changed */ - if (mlxsw_sp_port_vlan->bridge_port) + if (mlxsw_sp_port_vlan->bridge_port) { + mlxsw_sp_port_vlan_put(mlxsw_sp_port_vlan); return 0; + } err = mlxsw_sp_port_vlan_fid_join(mlxsw_sp_port_vlan, bridge_port); if (err) diff --git a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c index 8f6ccc0c39e5e8682aa7ac677f576b4cdd970d6f..b306961b02fdf40aef89db7d5ed1e535a98a65a1 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c @@ -700,9 +700,9 @@ qed_dcbx_get_local_lldp_params(struct qed_hwfn *p_hwfn, p_local = &p_hwfn->p_dcbx_info->lldp_local[LLDP_NEAREST_BRIDGE]; memcpy(params->lldp_local.local_chassis_id, p_local->local_chassis_id, - ARRAY_SIZE(p_local->local_chassis_id)); + sizeof(p_local->local_chassis_id)); memcpy(params->lldp_local.local_port_id, p_local->local_port_id, - ARRAY_SIZE(p_local->local_port_id)); + sizeof(p_local->local_port_id)); } static void @@ -714,9 +714,9 @@ qed_dcbx_get_remote_lldp_params(struct qed_hwfn *p_hwfn, p_remote = &p_hwfn->p_dcbx_info->lldp_remote[LLDP_NEAREST_BRIDGE]; memcpy(params->lldp_remote.peer_chassis_id, p_remote->peer_chassis_id, - ARRAY_SIZE(p_remote->peer_chassis_id)); + sizeof(p_remote->peer_chassis_id)); memcpy(params->lldp_remote.peer_port_id, p_remote->peer_port_id, - ARRAY_SIZE(p_remote->peer_port_id)); + sizeof(p_remote->peer_port_id)); } static int diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c index 58a689fb04db6c494f9ba3b6452f53a0d0a6fb69..ef237469972676637d0e40657635be2ad88e07b0 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dev.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c @@ -1782,7 +1782,7 @@ int qed_hw_init(struct qed_dev *cdev, struct qed_hw_init_params *p_params) DP_INFO(p_hwfn, "Failed to update driver state\n"); rc = qed_mcp_ov_update_eswitch(p_hwfn, p_hwfn->p_main_ptt, - QED_OV_ESWITCH_VEB); + QED_OV_ESWITCH_NONE); if (rc) DP_INFO(p_hwfn, "Failed to update eswitch mode\n"); } diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c index 27832885a87fdb6614d4d646d5ae25e45a253f8a..2c958921dfb36f876594b6b0f7f22d67628731fb 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_main.c +++ b/drivers/net/ethernet/qlogic/qed/qed_main.c @@ -779,6 +779,14 @@ static int qed_slowpath_setup_int(struct qed_dev *cdev, /* We want a minimum of one slowpath and one fastpath vector per hwfn */ cdev->int_params.in.min_msix_cnt = cdev->num_hwfns * 2; + if (is_kdump_kernel()) { + DP_INFO(cdev, + "Kdump kernel: Limit the max number of requested MSI-X vectors to %hd\n", + cdev->int_params.in.min_msix_cnt); + cdev->int_params.in.num_vectors = + cdev->int_params.in.min_msix_cnt; + } + rc = qed_set_int_mode(cdev, false); if (rc) { DP_ERR(cdev, "qed_slowpath_setup_int ERR\n"); diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c index 3f40b1de79570991bf76bfe42d1eb9f2720882b2..d08fe350ab6cd3f925ce2de8768c893b82796c93 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sriov.c +++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c @@ -4396,6 +4396,8 @@ static void qed_sriov_enable_qid_config(struct qed_hwfn *hwfn, static int qed_sriov_enable(struct qed_dev *cdev, int num) { struct qed_iov_vf_init_params params; + struct qed_hwfn *hwfn; + struct qed_ptt *ptt; int i, j, rc; if (num >= RESC_NUM(&cdev->hwfns[0], QED_VPORT)) { @@ -4408,8 +4410,8 @@ static int qed_sriov_enable(struct qed_dev *cdev, int num) /* Initialize HW for VF access */ for_each_hwfn(cdev, j) { - struct qed_hwfn *hwfn = &cdev->hwfns[j]; - struct qed_ptt *ptt = qed_ptt_acquire(hwfn); + hwfn = &cdev->hwfns[j]; + ptt = qed_ptt_acquire(hwfn); /* Make sure not to use more than 16 queues per VF */ params.num_queues = min_t(int, @@ -4445,6 +4447,19 @@ static int qed_sriov_enable(struct qed_dev *cdev, int num) goto err; } + hwfn = QED_LEADING_HWFN(cdev); + ptt = qed_ptt_acquire(hwfn); + if (!ptt) { + DP_ERR(hwfn, "Failed to acquire ptt\n"); + rc = -EBUSY; + goto err; + } + + rc = qed_mcp_ov_update_eswitch(hwfn, ptt, QED_OV_ESWITCH_VEB); + if (rc) + DP_INFO(cdev, "Failed to update eswitch mode\n"); + qed_ptt_release(hwfn, ptt); + return num; err: diff --git a/drivers/net/ethernet/qlogic/qede/qede_ptp.c b/drivers/net/ethernet/qlogic/qede/qede_ptp.c index 9b2280badaf77666ceab5cf0409f484ed08719b8..475f6ae5d4b396341caa959cb7475f03b2752750 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_ptp.c +++ b/drivers/net/ethernet/qlogic/qede/qede_ptp.c @@ -337,8 +337,14 @@ int qede_ptp_get_ts_info(struct qede_dev *edev, struct ethtool_ts_info *info) { struct qede_ptp *ptp = edev->ptp; - if (!ptp) - return -EIO; + if (!ptp) { + info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE | + SOF_TIMESTAMPING_RX_SOFTWARE | + SOF_TIMESTAMPING_SOFTWARE; + info->phc_index = -1; + + return 0; + } info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE | SOF_TIMESTAMPING_RX_SOFTWARE | diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c index 30edf85628891d2ae684cc3e79ccedb9f59f7cf0..fb5bed4d159ecbae735a4952ed39c5a16832f881 100644 --- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c +++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c @@ -496,7 +496,8 @@ EXPORT_SYMBOL(rmnet_get_qmi_pt); void *rmnet_get_qos_pt(struct net_device *dev) { if (dev) - return ((struct rmnet_priv *)netdev_priv(dev))->qos_info; + return rcu_dereference( + ((struct rmnet_priv *)netdev_priv(dev))->qos_info); return NULL; } @@ -520,14 +521,9 @@ struct net_device *rmnet_get_rmnet_dev(void *port, u8 mux_id) struct rmnet_endpoint *ep; if (port) { - struct net_device *dev; - ep = rmnet_get_endpoint((struct rmnet_port *)port, mux_id); - if (ep) { - dev = ep->egress_dev; - - return dev; - } + if (ep) + return ep->egress_dev; } return NULL; diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h index 2def5e5d3cd09c028e41cd36bdbfa983fcdc0681..3124e0765f440a227ed6036bb9d7de0772447358 100644 --- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h +++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h @@ -107,7 +107,7 @@ struct rmnet_priv { struct rmnet_pcpu_stats __percpu *pcpu_stats; struct gro_cells gro_cells; struct rmnet_priv_stats stats; - void *qos_info; + void __rcu *qos_info; }; int rmnet_is_real_dev_registered(const struct net_device *real_dev); diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c index 244dd1f41841499484cb0223e02b52df051770bd..dcb02284dc2815b4d034176c9bb6a4ef1330d099 100644 --- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c +++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c @@ -300,6 +300,9 @@ rx_handler_result_t rmnet_rx_handler(struct sk_buff **pskb) if (!skb) goto done; + if (skb->pkt_type == PACKET_LOOPBACK) + return RX_HANDLER_PASS; + dev = skb->dev; port = rmnet_get_port(dev); diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c index e965dbea2e7aa9f1982bfa7e5f8c3b256e474220..106c270ea933579561df4630ed4f1c79d9657d1b 100644 --- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c +++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c @@ -16,6 +16,7 @@ #include #include +#include #include #include "rmnet_config.h" #include "rmnet_handlers.h" @@ -61,12 +62,19 @@ static netdev_tx_t rmnet_vnd_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct rmnet_priv *priv; + int ip_type; + u32 mark; + unsigned int len; priv = netdev_priv(dev); if (priv->real_dev) { + ip_type = (ip_hdr(skb)->version == 4) ? + AF_INET : AF_INET6; + mark = skb->mark; + len = skb->len; trace_rmnet_xmit_skb(skb); - qmi_rmnet_burst_fc_check(dev, skb); rmnet_egress_handler(skb); + qmi_rmnet_burst_fc_check(dev, ip_type, mark, len); } else { this_cpu_inc(priv->pcpu_stats->stats.tx_drops); kfree_skb(skb); @@ -111,12 +119,15 @@ static int rmnet_vnd_init(struct net_device *dev) static void rmnet_vnd_uninit(struct net_device *dev) { struct rmnet_priv *priv = netdev_priv(dev); + void *qos; gro_cells_destroy(&priv->gro_cells); free_percpu(priv->pcpu_stats); - qmi_rmnet_qos_exit(dev); - priv->qos_info = NULL; + qos = priv->qos_info; + RCU_INIT_POINTER(priv->qos_info, NULL); + synchronize_rcu(); + qmi_rmnet_qos_exit(dev, qos); } static void rmnet_get_stats64(struct net_device *dev, @@ -150,6 +161,14 @@ static void rmnet_get_stats64(struct net_device *dev, s->tx_dropped = total_stats.tx_drops; } +static u16 rmnet_vnd_select_queue(struct net_device *dev, + struct sk_buff *skb, + void *accel_priv, + select_queue_fallback_t fallback) +{ + return 0; +} + static const struct net_device_ops rmnet_vnd_ops = { .ndo_start_xmit = rmnet_vnd_start_xmit, .ndo_change_mtu = rmnet_vnd_change_mtu, @@ -159,6 +178,7 @@ static const struct net_device_ops rmnet_vnd_ops = { .ndo_init = rmnet_vnd_init, .ndo_uninit = rmnet_vnd_uninit, .ndo_get_stats64 = rmnet_get_stats64, + .ndo_select_queue = rmnet_vnd_select_queue, }; static const char rmnet_gstrings_stats[][ETH_GSTRING_LEN] = { diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 9866d2e34cdd8165fce6ad75c859566496c272d5..1a9a382bf1c4b764ed32257061e5ab801be5d134 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -51,7 +51,7 @@ #include #include "dwmac1000.h" -#define STMMAC_ALIGN(x) L1_CACHE_ALIGN(x) +#define STMMAC_ALIGN(x) __ALIGN_KERNEL(x, SMP_CACHE_BYTES) #define TSO_MAX_BUFF_SIZE (SZ_16K - 1) /* Module parameters */ @@ -914,6 +914,7 @@ static void stmmac_check_pcs_mode(struct stmmac_priv *priv) static int stmmac_init_phy(struct net_device *dev) { struct stmmac_priv *priv = netdev_priv(dev); + u32 tx_cnt = priv->plat->tx_queues_to_use; struct phy_device *phydev; char phy_id_fmt[MII_BUS_ID_SIZE + 3]; char bus_id[MII_BUS_ID_SIZE]; @@ -954,6 +955,15 @@ static int stmmac_init_phy(struct net_device *dev) phydev->advertising &= ~(SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full); + /* + * Half-duplex mode not supported with multiqueue + * half-duplex can only works with single queue + */ + if (tx_cnt > 1) + phydev->supported &= ~(SUPPORTED_1000baseT_Half | + SUPPORTED_100baseT_Half | + SUPPORTED_10baseT_Half); + /* * Broken HW is sometimes missing the pull-up resistor on the * MDIO line, which results in reads to non-existent devices returning diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c index 8d375e51a5265104d515f5622696dc2e0cae1ea2..6a393b16a1fcaf924473e7a56968d69e57f12f7e 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c @@ -257,7 +257,7 @@ static int stmmac_pci_probe(struct pci_dev *pdev, return -ENOMEM; /* Enable pci device */ - ret = pcim_enable_device(pdev); + ret = pci_enable_device(pdev); if (ret) { dev_err(&pdev->dev, "%s: ERROR: failed to enable device\n", __func__); @@ -300,9 +300,45 @@ static int stmmac_pci_probe(struct pci_dev *pdev, static void stmmac_pci_remove(struct pci_dev *pdev) { stmmac_dvr_remove(&pdev->dev); + pci_disable_device(pdev); } -static SIMPLE_DEV_PM_OPS(stmmac_pm_ops, stmmac_suspend, stmmac_resume); +static int stmmac_pci_suspend(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + int ret; + + ret = stmmac_suspend(dev); + if (ret) + return ret; + + ret = pci_save_state(pdev); + if (ret) + return ret; + + pci_disable_device(pdev); + pci_wake_from_d3(pdev, true); + return 0; +} + +static int stmmac_pci_resume(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + int ret; + + pci_restore_state(pdev); + pci_set_power_state(pdev, PCI_D0); + + ret = pci_enable_device(pdev); + if (ret) + return ret; + + pci_set_master(pdev); + + return stmmac_resume(dev); +} + +static SIMPLE_DEV_PM_OPS(stmmac_pm_ops, stmmac_pci_suspend, stmmac_pci_resume); /* synthetic ID, no official vendor */ #define PCI_VENDOR_ID_STMMAC 0x700 diff --git a/drivers/net/ethernet/sun/sungem.c b/drivers/net/ethernet/sun/sungem.c index fa607d062cb3130eff15295f61a8efce7c6a969c..15cd086e3f4718c2f2c6ceaabd9d6ac285b73736 100644 --- a/drivers/net/ethernet/sun/sungem.c +++ b/drivers/net/ethernet/sun/sungem.c @@ -59,8 +59,7 @@ #include #include "sungem.h" -/* Stripping FCS is causing problems, disabled for now */ -#undef STRIP_FCS +#define STRIP_FCS #define DEFAULT_MSG (NETIF_MSG_DRV | \ NETIF_MSG_PROBE | \ @@ -434,7 +433,7 @@ static int gem_rxmac_reset(struct gem *gp) writel(desc_dma & 0xffffffff, gp->regs + RXDMA_DBLOW); writel(RX_RING_SIZE - 4, gp->regs + RXDMA_KICK); val = (RXDMA_CFG_BASE | (RX_OFFSET << 10) | - ((14 / 2) << 13) | RXDMA_CFG_FTHRESH_128); + (ETH_HLEN << 13) | RXDMA_CFG_FTHRESH_128); writel(val, gp->regs + RXDMA_CFG); if (readl(gp->regs + GREG_BIFCFG) & GREG_BIFCFG_M66EN) writel(((5 & RXDMA_BLANK_IPKTS) | @@ -759,7 +758,6 @@ static int gem_rx(struct gem *gp, int work_to_do) struct net_device *dev = gp->dev; int entry, drops, work_done = 0; u32 done; - __sum16 csum; if (netif_msg_rx_status(gp)) printk(KERN_DEBUG "%s: rx interrupt, done: %d, rx_new: %d\n", @@ -854,9 +852,13 @@ static int gem_rx(struct gem *gp, int work_to_do) skb = copy_skb; } - csum = (__force __sum16)htons((status & RXDCTRL_TCPCSUM) ^ 0xffff); - skb->csum = csum_unfold(csum); - skb->ip_summed = CHECKSUM_COMPLETE; + if (likely(dev->features & NETIF_F_RXCSUM)) { + __sum16 csum; + + csum = (__force __sum16)htons((status & RXDCTRL_TCPCSUM) ^ 0xffff); + skb->csum = csum_unfold(csum); + skb->ip_summed = CHECKSUM_COMPLETE; + } skb->protocol = eth_type_trans(skb, gp->dev); napi_gro_receive(&gp->napi, skb); @@ -1760,7 +1762,7 @@ static void gem_init_dma(struct gem *gp) writel(0, gp->regs + TXDMA_KICK); val = (RXDMA_CFG_BASE | (RX_OFFSET << 10) | - ((14 / 2) << 13) | RXDMA_CFG_FTHRESH_128); + (ETH_HLEN << 13) | RXDMA_CFG_FTHRESH_128); writel(val, gp->regs + RXDMA_CFG); writel(desc_dma >> 32, gp->regs + RXDMA_DBHI); @@ -2986,8 +2988,8 @@ static int gem_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) pci_set_drvdata(pdev, dev); /* We can do scatter/gather and HW checksum */ - dev->hw_features = NETIF_F_SG | NETIF_F_HW_CSUM; - dev->features |= dev->hw_features | NETIF_F_RXCSUM; + dev->hw_features = NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_RXCSUM; + dev->features = dev->hw_features; if (pci_using_dac) dev->features |= NETIF_F_HIGHDMA; diff --git a/drivers/net/ethernet/ti/cpsw-phy-sel.c b/drivers/net/ethernet/ti/cpsw-phy-sel.c index 18013645e76c8be4a460e50d7edd31abda29900f..0c1adad7415da7d9b858925d0ec5715e9ca7dfec 100644 --- a/drivers/net/ethernet/ti/cpsw-phy-sel.c +++ b/drivers/net/ethernet/ti/cpsw-phy-sel.c @@ -177,12 +177,18 @@ void cpsw_phy_sel(struct device *dev, phy_interface_t phy_mode, int slave) } dev = bus_find_device(&platform_bus_type, NULL, node, match); - of_node_put(node); + if (!dev) { + dev_err(dev, "unable to find platform device for %pOF\n", node); + goto out; + } + priv = dev_get_drvdata(dev); priv->cpsw_phy_sel(priv, phy_mode, slave); put_device(dev); +out: + of_node_put(node); } EXPORT_SYMBOL_GPL(cpsw_phy_sel); diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c index fbc825ac97ab3f9a91b3021aa8f8149fe9ed6249..cb51448389a17e035a8271738baa766107fe72a1 100644 --- a/drivers/net/geneve.c +++ b/drivers/net/geneve.c @@ -474,7 +474,7 @@ static struct sk_buff **geneve_gro_receive(struct sock *sk, out_unlock: rcu_read_unlock(); out: - NAPI_GRO_CB(skb)->flush |= flush; + skb_gro_flush_final(skb, pp, flush); return pp; } diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h index 01017dd88802bb8829f7d4c29baf80692676e77a..e33a6c672a0a4357e565fa5e1f1b5d12e28d0088 100644 --- a/drivers/net/hyperv/hyperv_net.h +++ b/drivers/net/hyperv/hyperv_net.h @@ -207,7 +207,7 @@ int netvsc_recv_callback(struct net_device *net, void netvsc_channel_cb(void *context); int netvsc_poll(struct napi_struct *napi, int budget); -void rndis_set_subchannel(struct work_struct *w); +int rndis_set_subchannel(struct net_device *ndev, struct netvsc_device *nvdev); int rndis_filter_open(struct netvsc_device *nvdev); int rndis_filter_close(struct netvsc_device *nvdev); struct netvsc_device *rndis_filter_device_add(struct hv_device *dev, @@ -724,6 +724,8 @@ struct net_device_context { struct hv_device *device_ctx; /* netvsc_device */ struct netvsc_device __rcu *nvdev; + /* list of netvsc net_devices */ + struct list_head list; /* reconfigure work */ struct delayed_work dwork; /* last reconfig time */ diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c index 4647ecbe6f36df2f9410c3474fe18717ed089eb2..806239b89990d31820d54faa98d8e6e892e53437 100644 --- a/drivers/net/hyperv/netvsc.c +++ b/drivers/net/hyperv/netvsc.c @@ -62,6 +62,41 @@ void netvsc_switch_datapath(struct net_device *ndev, bool vf) VM_PKT_DATA_INBAND, 0); } +/* Worker to setup sub channels on initial setup + * Initial hotplug event occurs in softirq context + * and can't wait for channels. + */ +static void netvsc_subchan_work(struct work_struct *w) +{ + struct netvsc_device *nvdev = + container_of(w, struct netvsc_device, subchan_work); + struct rndis_device *rdev; + int i, ret; + + /* Avoid deadlock with device removal already under RTNL */ + if (!rtnl_trylock()) { + schedule_work(w); + return; + } + + rdev = nvdev->extension; + if (rdev) { + ret = rndis_set_subchannel(rdev->ndev, nvdev); + if (ret == 0) { + netif_device_attach(rdev->ndev); + } else { + /* fallback to only primary channel */ + for (i = 1; i < nvdev->num_chn; i++) + netif_napi_del(&nvdev->chan_table[i].napi); + + nvdev->max_chn = 1; + nvdev->num_chn = 1; + } + } + + rtnl_unlock(); +} + static struct netvsc_device *alloc_net_device(void) { struct netvsc_device *net_device; @@ -78,7 +113,7 @@ static struct netvsc_device *alloc_net_device(void) init_completion(&net_device->channel_init_wait); init_waitqueue_head(&net_device->subchan_open); - INIT_WORK(&net_device->subchan_work, rndis_set_subchannel); + INIT_WORK(&net_device->subchan_work, netvsc_subchan_work); return net_device; } @@ -1215,6 +1250,7 @@ int netvsc_poll(struct napi_struct *napi, int budget) struct hv_device *device = netvsc_channel_to_device(channel); struct net_device *ndev = hv_get_drvdata(device); int work_done = 0; + int ret; /* If starting a new interval */ if (!nvchan->desc) @@ -1226,16 +1262,18 @@ int netvsc_poll(struct napi_struct *napi, int budget) nvchan->desc = hv_pkt_iter_next(channel, nvchan->desc); } - /* If send of pending receive completions suceeded - * and did not exhaust NAPI budget this time - * and not doing busy poll + /* Send any pending receive completions */ + ret = send_recv_completions(ndev, net_device, nvchan); + + /* If it did not exhaust NAPI budget this time + * and not doing busy poll * then re-enable host interrupts - * and reschedule if ring is not empty. + * and reschedule if ring is not empty + * or sending receive completion failed. */ - if (send_recv_completions(ndev, net_device, nvchan) == 0 && - work_done < budget && + if (work_done < budget && napi_complete_done(napi, work_done) && - hv_end_read(&channel->inbound) && + (ret || hv_end_read(&channel->inbound)) && napi_schedule_prep(napi)) { hv_begin_read(&channel->inbound); __napi_schedule(napi); diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index 6890478a085167379d4f2bf2a8ac38a83982dd1a..6a77ef38c5495cc62b4c547c95f42a22051cdf2a 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c @@ -66,6 +66,8 @@ static int debug = -1; module_param(debug, int, S_IRUGO); MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); +static LIST_HEAD(netvsc_dev_list); + static void netvsc_change_rx_flags(struct net_device *net, int change) { struct net_device_context *ndev_ctx = netdev_priv(net); @@ -911,8 +913,20 @@ static int netvsc_attach(struct net_device *ndev, if (IS_ERR(nvdev)) return PTR_ERR(nvdev); - /* Note: enable and attach happen when sub-channels setup */ + if (nvdev->num_chn > 1) { + ret = rndis_set_subchannel(ndev, nvdev); + + /* if unavailable, just proceed with one queue */ + if (ret) { + nvdev->max_chn = 1; + nvdev->num_chn = 1; + } + } + /* In any case device is now ready */ + netif_device_attach(ndev); + + /* Note: enable and attach happen when sub-channels setup */ netif_carrier_off(ndev); if (netif_running(ndev)) { @@ -1737,13 +1751,10 @@ static void netvsc_link_change(struct work_struct *w) static struct net_device *get_netvsc_bymac(const u8 *mac) { - struct net_device *dev; - - ASSERT_RTNL(); + struct net_device_context *ndev_ctx; - for_each_netdev(&init_net, dev) { - if (dev->netdev_ops != &device_ops) - continue; /* not a netvsc device */ + list_for_each_entry(ndev_ctx, &netvsc_dev_list, list) { + struct net_device *dev = hv_get_drvdata(ndev_ctx->device_ctx); if (ether_addr_equal(mac, dev->perm_addr)) return dev; @@ -1754,25 +1765,18 @@ static struct net_device *get_netvsc_bymac(const u8 *mac) static struct net_device *get_netvsc_byref(struct net_device *vf_netdev) { + struct net_device_context *net_device_ctx; struct net_device *dev; - ASSERT_RTNL(); + dev = netdev_master_upper_dev_get(vf_netdev); + if (!dev || dev->netdev_ops != &device_ops) + return NULL; /* not a netvsc device */ - for_each_netdev(&init_net, dev) { - struct net_device_context *net_device_ctx; + net_device_ctx = netdev_priv(dev); + if (!rtnl_dereference(net_device_ctx->nvdev)) + return NULL; /* device is removed */ - if (dev->netdev_ops != &device_ops) - continue; /* not a netvsc device */ - - net_device_ctx = netdev_priv(dev); - if (!rtnl_dereference(net_device_ctx->nvdev)) - continue; /* device is removed */ - - if (rtnl_dereference(net_device_ctx->vf_netdev) == vf_netdev) - return dev; /* a match */ - } - - return NULL; + return dev; } /* Called when VF is injecting data into network stack. @@ -2035,6 +2039,9 @@ static int netvsc_probe(struct hv_device *dev, memcpy(net->dev_addr, device_info.mac_adr, ETH_ALEN); + if (nvdev->num_chn > 1) + schedule_work(&nvdev->subchan_work); + /* hw_features computed in rndis_netdev_set_hwcaps() */ net->features = net->hw_features | NETIF_F_HIGHDMA | NETIF_F_SG | @@ -2050,15 +2057,19 @@ static int netvsc_probe(struct hv_device *dev, else net->max_mtu = ETH_DATA_LEN; - ret = register_netdev(net); + rtnl_lock(); + ret = register_netdevice(net); if (ret != 0) { pr_err("Unable to register netdev.\n"); goto register_failed; } - return ret; + list_add(&net_device_ctx->list, &netvsc_dev_list); + rtnl_unlock(); + return 0; register_failed: + rtnl_unlock(); rndis_filter_device_remove(dev, nvdev); rndis_failed: free_percpu(net_device_ctx->vf_stats); @@ -2104,6 +2115,7 @@ static int netvsc_remove(struct hv_device *dev) rndis_filter_device_remove(dev, nvdev); unregister_netdevice(net); + list_del(&ndev_ctx->list); rtnl_unlock(); rcu_read_unlock(); diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c index d1ae184008b41866d642561c21b5ead9983c449b..cb03a6ea076a66c7ca856e912ed42a97741e05da 100644 --- a/drivers/net/hyperv/rndis_filter.c +++ b/drivers/net/hyperv/rndis_filter.c @@ -1055,29 +1055,15 @@ static void netvsc_sc_open(struct vmbus_channel *new_sc) * This breaks overlap of processing the host message for the * new primary channel with the initialization of sub-channels. */ -void rndis_set_subchannel(struct work_struct *w) +int rndis_set_subchannel(struct net_device *ndev, struct netvsc_device *nvdev) { - struct netvsc_device *nvdev - = container_of(w, struct netvsc_device, subchan_work); struct nvsp_message *init_packet = &nvdev->channel_init_pkt; - struct net_device_context *ndev_ctx; - struct rndis_device *rdev; - struct net_device *ndev; - struct hv_device *hv_dev; + struct net_device_context *ndev_ctx = netdev_priv(ndev); + struct hv_device *hv_dev = ndev_ctx->device_ctx; + struct rndis_device *rdev = nvdev->extension; int i, ret; - if (!rtnl_trylock()) { - schedule_work(w); - return; - } - - rdev = nvdev->extension; - if (!rdev) - goto unlock; /* device was removed */ - - ndev = rdev->ndev; - ndev_ctx = netdev_priv(ndev); - hv_dev = ndev_ctx->device_ctx; + ASSERT_RTNL(); memset(init_packet, 0, sizeof(struct nvsp_message)); init_packet->hdr.msg_type = NVSP_MSG5_TYPE_SUBCHANNEL; @@ -1091,13 +1077,13 @@ void rndis_set_subchannel(struct work_struct *w) VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); if (ret) { netdev_err(ndev, "sub channel allocate send failed: %d\n", ret); - goto failed; + return ret; } wait_for_completion(&nvdev->channel_init_wait); if (init_packet->msg.v5_msg.subchn_comp.status != NVSP_STAT_SUCCESS) { netdev_err(ndev, "sub channel request failed\n"); - goto failed; + return -EIO; } nvdev->num_chn = 1 + @@ -1116,21 +1102,7 @@ void rndis_set_subchannel(struct work_struct *w) for (i = 0; i < VRSS_SEND_TAB_SIZE; i++) ndev_ctx->tx_table[i] = i % nvdev->num_chn; - netif_device_attach(ndev); - rtnl_unlock(); - return; - -failed: - /* fallback to only primary channel */ - for (i = 1; i < nvdev->num_chn; i++) - netif_napi_del(&nvdev->chan_table[i].napi); - - nvdev->max_chn = 1; - nvdev->num_chn = 1; - - netif_device_attach(ndev); -unlock: - rtnl_unlock(); + return 0; } static int rndis_netdev_set_hwcaps(struct rndis_device *rndis_device, @@ -1321,21 +1293,12 @@ struct netvsc_device *rndis_filter_device_add(struct hv_device *dev, netif_napi_add(net, &net_device->chan_table[i].napi, netvsc_poll, NAPI_POLL_WEIGHT); - if (net_device->num_chn > 1) - schedule_work(&net_device->subchan_work); + return net_device; out: - /* if unavailable, just proceed with one queue */ - if (ret) { - net_device->max_chn = 1; - net_device->num_chn = 1; - } - - /* No sub channels, device is ready */ - if (net_device->num_chn == 1) - netif_device_attach(net); - - return net_device; + /* setting up multiple channels failed */ + net_device->max_chn = 1; + net_device->num_chn = 1; err_dev_remv: rndis_filter_device_remove(dev, net_device); diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c index c74893c1e6200849fcd6293e19a7813e21a6ebae..e7f7a1a002ee05bace0a05aab920eb3537a1d40b 100644 --- a/drivers/net/ipvlan/ipvlan_main.c +++ b/drivers/net/ipvlan/ipvlan_main.c @@ -546,7 +546,8 @@ int ipvlan_link_new(struct net *src_net, struct net_device *dev, ipvlan->dev = dev; ipvlan->port = port; ipvlan->sfeatures = IPVLAN_FEATURES; - ipvlan_adjust_mtu(ipvlan, phy_dev); + if (!tb[IFLA_MTU]) + ipvlan_adjust_mtu(ipvlan, phy_dev); INIT_LIST_HEAD(&ipvlan->addrs); /* If the port-id base is at the MAX value, then wrap it around and diff --git a/drivers/net/phy/mdio-mux-bcm-iproc.c b/drivers/net/phy/mdio-mux-bcm-iproc.c index 0831b7142df7a334b889296688cc3218d8efa2a1..0c5b68e7da51aa8d0c7c73e7c6b3cc632b1e3510 100644 --- a/drivers/net/phy/mdio-mux-bcm-iproc.c +++ b/drivers/net/phy/mdio-mux-bcm-iproc.c @@ -218,7 +218,7 @@ static int mdio_mux_iproc_probe(struct platform_device *pdev) static int mdio_mux_iproc_remove(struct platform_device *pdev) { - struct iproc_mdiomux_desc *md = dev_get_platdata(&pdev->dev); + struct iproc_mdiomux_desc *md = platform_get_drvdata(pdev); mdio_mux_uninit(md->mux_handle); mdiobus_unregister(md->mii_bus); diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c index dba6d17ad885e5e66076c4741e357f482bfbe8ff..47d2ef2fb9b33102b9b96805b54fa1743ec2aaf1 100644 --- a/drivers/net/phy/phy.c +++ b/drivers/net/phy/phy.c @@ -511,7 +511,7 @@ static int phy_start_aneg_priv(struct phy_device *phydev, bool sync) * negotiation may already be done and aneg interrupt may not be * generated. */ - if (phy_interrupt_is_valid(phydev) && (phydev->state == PHY_AN)) { + if (phydev->irq != PHY_POLL && phydev->state == PHY_AN) { err = phy_aneg_done(phydev); if (err > 0) { trigger = true; diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c index a1e7ea4d4b16ed3a87a2fb8507918f1b6e640b55..a174d05a975226c22ff50b5085bc7689083ebfa6 100644 --- a/drivers/net/phy/phy_device.c +++ b/drivers/net/phy/phy_device.c @@ -1686,11 +1686,8 @@ EXPORT_SYMBOL(genphy_loopback); static int __set_phy_supported(struct phy_device *phydev, u32 max_speed) { - /* The default values for phydev->supported are provided by the PHY - * driver "features" member, we want to reset to sane defaults first - * before supporting higher speeds. - */ - phydev->supported &= PHY_DEFAULT_FEATURES; + phydev->supported &= ~(PHY_1000BT_FEATURES | PHY_100BT_FEATURES | + PHY_10BT_FEATURES); switch (max_speed) { default: diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c index 1b2fe74a44eab81b0c2b690745cbbaa8df53e645..e4a6ed88b9cf0253efdfd98a53aa7d9ec799f94d 100644 --- a/drivers/net/phy/phylink.c +++ b/drivers/net/phy/phylink.c @@ -561,6 +561,8 @@ void phylink_destroy(struct phylink *pl) { if (pl->sfp_bus) sfp_unregister_upstream(pl->sfp_bus); + if (!IS_ERR(pl->link_gpio)) + gpiod_put(pl->link_gpio); cancel_work_sync(&pl->resolve); kfree(pl); diff --git a/drivers/net/usb/asix_devices.c b/drivers/net/usb/asix_devices.c index 3d4f7959dabb9c39e17754df4f72013c89743d5a..b1b3d8f7e67dd052eae618e33698c633751df60a 100644 --- a/drivers/net/usb/asix_devices.c +++ b/drivers/net/usb/asix_devices.c @@ -642,10 +642,12 @@ static void ax88772_restore_phy(struct usbnet *dev) priv->presvd_phy_advertise); /* Restore BMCR */ + if (priv->presvd_phy_bmcr & BMCR_ANENABLE) + priv->presvd_phy_bmcr |= BMCR_ANRESTART; + asix_mdio_write_nopm(dev->net, dev->mii.phy_id, MII_BMCR, priv->presvd_phy_bmcr); - mii_nway_restart(&dev->mii); priv->presvd_phy_advertise = 0; priv->presvd_phy_bmcr = 0; } diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c index 9881edc568ba7803a967ae2608b93d36e27da979..9e3f632e22f1411dda03da5284e54b1de77a0836 100644 --- a/drivers/net/usb/lan78xx.c +++ b/drivers/net/usb/lan78xx.c @@ -1216,6 +1216,8 @@ static int lan78xx_link_reset(struct lan78xx_net *dev) mod_timer(&dev->stat_monitor, jiffies + STAT_UPDATE_TIMER); } + + tasklet_schedule(&dev->bh); } return ret; @@ -3197,6 +3199,7 @@ static void lan78xx_tx_bh(struct lan78xx_net *dev) pkt_cnt = 0; count = 0; length = 0; + spin_lock_irqsave(&tqp->lock, flags); for (skb = tqp->next; pkt_cnt < tqp->qlen; skb = skb->next) { if (skb_is_gso(skb)) { if (pkt_cnt) { @@ -3205,7 +3208,8 @@ static void lan78xx_tx_bh(struct lan78xx_net *dev) } count = 1; length = skb->len - TX_OVERHEAD; - skb2 = skb_dequeue(tqp); + __skb_unlink(skb, tqp); + spin_unlock_irqrestore(&tqp->lock, flags); goto gso_skb; } @@ -3214,6 +3218,7 @@ static void lan78xx_tx_bh(struct lan78xx_net *dev) skb_totallen = skb->len + roundup(skb_totallen, sizeof(u32)); pkt_cnt++; } + spin_unlock_irqrestore(&tqp->lock, flags); /* copy to a single skb */ skb = alloc_skb(skb_totallen, GFP_ATOMIC); diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c index b23ee948e7c955dc3c5a64302bf7940a8c6d8be3..6d3811c869fdddeadd3110f320b2ee5cb2c18f96 100644 --- a/drivers/net/usb/qmi_wwan.c +++ b/drivers/net/usb/qmi_wwan.c @@ -1245,12 +1245,14 @@ static const struct usb_device_id products[] = { {QMI_FIXED_INTF(0x413c, 0x81b3, 8)}, /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card (rev3) */ {QMI_FIXED_INTF(0x413c, 0x81b6, 8)}, /* Dell Wireless 5811e */ {QMI_FIXED_INTF(0x413c, 0x81b6, 10)}, /* Dell Wireless 5811e */ + {QMI_FIXED_INTF(0x413c, 0x81d7, 1)}, /* Dell Wireless 5821e */ {QMI_FIXED_INTF(0x03f0, 0x4e1d, 8)}, /* HP lt4111 LTE/EV-DO/HSPA+ Gobi 4G Module */ {QMI_FIXED_INTF(0x03f0, 0x9d1d, 1)}, /* HP lt4120 Snapdragon X5 LTE */ {QMI_FIXED_INTF(0x22de, 0x9061, 3)}, /* WeTelecom WPD-600N */ {QMI_FIXED_INTF(0x1e0e, 0x9001, 5)}, /* SIMCom 7230E */ {QMI_QUIRK_SET_DTR(0x2c7c, 0x0125, 4)}, /* Quectel EC25, EC20 R2.0 Mini PCIe */ {QMI_QUIRK_SET_DTR(0x2c7c, 0x0121, 4)}, /* Quectel EC21 Mini PCIe */ + {QMI_QUIRK_SET_DTR(0x2c7c, 0x0191, 4)}, /* Quectel EG91 */ {QMI_FIXED_INTF(0x2c7c, 0x0296, 4)}, /* Quectel BG96 */ {QMI_QUIRK_SET_DTR(0x2c7c, 0x0306, 4)}, /* Quectel EP06 Mini PCIe */ diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c index aa88b640cb6c22c7e897f1f309dbc31d37c13769..0fa64cc1a01182d0592b13997d2a3e4cae24f018 100644 --- a/drivers/net/usb/r8152.c +++ b/drivers/net/usb/r8152.c @@ -3959,7 +3959,8 @@ static int rtl8152_close(struct net_device *netdev) #ifdef CONFIG_PM_SLEEP unregister_pm_notifier(&tp->pm_notifier); #endif - napi_disable(&tp->napi); + if (!test_bit(RTL8152_UNPLUG, &tp->flags)) + napi_disable(&tp->napi); clear_bit(WORK_ENABLE, &tp->flags); usb_kill_urb(tp->intr_urb); cancel_delayed_work_sync(&tp->schedule); diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index 3d9c5b35a4a753e4052fd41686f5332bcbc43ed9..13d39a72fe0d0c48feb054c0140b827aa0041f24 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -623,9 +623,7 @@ static struct sk_buff **vxlan_gro_receive(struct sock *sk, flush = 0; out: - skb_gro_remcsum_cleanup(skb, &grc); - skb->remcsum_offload = 0; - NAPI_GRO_CB(skb)->flush |= flush; + skb_gro_flush_final_remcsum(skb, pp, flush, &grc); return pp; } @@ -638,8 +636,61 @@ static int vxlan_gro_complete(struct sock *sk, struct sk_buff *skb, int nhoff) return eth_gro_complete(skb, nhoff + sizeof(struct vxlanhdr)); } -/* Add new entry to forwarding table -- assumes lock held */ +static struct vxlan_fdb *vxlan_fdb_alloc(struct vxlan_dev *vxlan, + const u8 *mac, __u16 state, + __be32 src_vni, __u8 ndm_flags) +{ + struct vxlan_fdb *f; + + f = kmalloc(sizeof(*f), GFP_ATOMIC); + if (!f) + return NULL; + f->state = state; + f->flags = ndm_flags; + f->updated = f->used = jiffies; + f->vni = src_vni; + INIT_LIST_HEAD(&f->remotes); + memcpy(f->eth_addr, mac, ETH_ALEN); + + return f; +} + static int vxlan_fdb_create(struct vxlan_dev *vxlan, + const u8 *mac, union vxlan_addr *ip, + __u16 state, __be16 port, __be32 src_vni, + __be32 vni, __u32 ifindex, __u8 ndm_flags, + struct vxlan_fdb **fdb) +{ + struct vxlan_rdst *rd = NULL; + struct vxlan_fdb *f; + int rc; + + if (vxlan->cfg.addrmax && + vxlan->addrcnt >= vxlan->cfg.addrmax) + return -ENOSPC; + + netdev_dbg(vxlan->dev, "add %pM -> %pIS\n", mac, ip); + f = vxlan_fdb_alloc(vxlan, mac, state, src_vni, ndm_flags); + if (!f) + return -ENOMEM; + + rc = vxlan_fdb_append(f, ip, port, vni, ifindex, &rd); + if (rc < 0) { + kfree(f); + return rc; + } + + ++vxlan->addrcnt; + hlist_add_head_rcu(&f->hlist, + vxlan_fdb_head(vxlan, mac, src_vni)); + + *fdb = f; + + return 0; +} + +/* Add new entry to forwarding table -- assumes lock held */ +static int vxlan_fdb_update(struct vxlan_dev *vxlan, const u8 *mac, union vxlan_addr *ip, __u16 state, __u16 flags, __be16 port, __be32 src_vni, __be32 vni, @@ -689,37 +740,17 @@ static int vxlan_fdb_create(struct vxlan_dev *vxlan, if (!(flags & NLM_F_CREATE)) return -ENOENT; - if (vxlan->cfg.addrmax && - vxlan->addrcnt >= vxlan->cfg.addrmax) - return -ENOSPC; - /* Disallow replace to add a multicast entry */ if ((flags & NLM_F_REPLACE) && (is_multicast_ether_addr(mac) || is_zero_ether_addr(mac))) return -EOPNOTSUPP; netdev_dbg(vxlan->dev, "add %pM -> %pIS\n", mac, ip); - f = kmalloc(sizeof(*f), GFP_ATOMIC); - if (!f) - return -ENOMEM; - - notify = 1; - f->state = state; - f->flags = ndm_flags; - f->updated = f->used = jiffies; - f->vni = src_vni; - INIT_LIST_HEAD(&f->remotes); - memcpy(f->eth_addr, mac, ETH_ALEN); - - rc = vxlan_fdb_append(f, ip, port, vni, ifindex, &rd); - if (rc < 0) { - kfree(f); + rc = vxlan_fdb_create(vxlan, mac, ip, state, port, src_vni, + vni, ifindex, ndm_flags, &f); + if (rc < 0) return rc; - } - - ++vxlan->addrcnt; - hlist_add_head_rcu(&f->hlist, - vxlan_fdb_head(vxlan, mac, src_vni)); + notify = 1; } if (notify) { @@ -743,13 +774,15 @@ static void vxlan_fdb_free(struct rcu_head *head) kfree(f); } -static void vxlan_fdb_destroy(struct vxlan_dev *vxlan, struct vxlan_fdb *f) +static void vxlan_fdb_destroy(struct vxlan_dev *vxlan, struct vxlan_fdb *f, + bool do_notify) { netdev_dbg(vxlan->dev, "delete %pM\n", f->eth_addr); --vxlan->addrcnt; - vxlan_fdb_notify(vxlan, f, first_remote_rtnl(f), RTM_DELNEIGH); + if (do_notify) + vxlan_fdb_notify(vxlan, f, first_remote_rtnl(f), RTM_DELNEIGH); hlist_del_rcu(&f->hlist); call_rcu(&f->rcu, vxlan_fdb_free); @@ -865,7 +898,7 @@ static int vxlan_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], return -EAFNOSUPPORT; spin_lock_bh(&vxlan->hash_lock); - err = vxlan_fdb_create(vxlan, addr, &ip, ndm->ndm_state, flags, + err = vxlan_fdb_update(vxlan, addr, &ip, ndm->ndm_state, flags, port, src_vni, vni, ifindex, ndm->ndm_flags); spin_unlock_bh(&vxlan->hash_lock); @@ -899,7 +932,7 @@ static int __vxlan_fdb_delete(struct vxlan_dev *vxlan, goto out; } - vxlan_fdb_destroy(vxlan, f); + vxlan_fdb_destroy(vxlan, f, true); out: return 0; @@ -1008,7 +1041,7 @@ static bool vxlan_snoop(struct net_device *dev, /* close off race between vxlan_flush and incoming packets */ if (netif_running(dev)) - vxlan_fdb_create(vxlan, src_mac, src_ip, + vxlan_fdb_update(vxlan, src_mac, src_ip, NUD_REACHABLE, NLM_F_EXCL|NLM_F_CREATE, vxlan->cfg.dst_port, @@ -2362,7 +2395,7 @@ static void vxlan_cleanup(unsigned long arg) "garbage collect %pM\n", f->eth_addr); f->state = NUD_STALE; - vxlan_fdb_destroy(vxlan, f); + vxlan_fdb_destroy(vxlan, f, true); } else if (time_before(timeout, next_timer)) next_timer = timeout; } @@ -2413,7 +2446,7 @@ static void vxlan_fdb_delete_default(struct vxlan_dev *vxlan, __be32 vni) spin_lock_bh(&vxlan->hash_lock); f = __vxlan_find_mac(vxlan, all_zeros_mac, vni); if (f) - vxlan_fdb_destroy(vxlan, f); + vxlan_fdb_destroy(vxlan, f, true); spin_unlock_bh(&vxlan->hash_lock); } @@ -2467,7 +2500,7 @@ static void vxlan_flush(struct vxlan_dev *vxlan, bool do_all) continue; /* the all_zeros_mac entry is deleted at vxlan_uninit */ if (!is_zero_ether_addr(f->eth_addr)) - vxlan_fdb_destroy(vxlan, f); + vxlan_fdb_destroy(vxlan, f, true); } } spin_unlock_bh(&vxlan->hash_lock); @@ -3159,6 +3192,7 @@ static int __vxlan_dev_create(struct net *net, struct net_device *dev, { struct vxlan_net *vn = net_generic(net, vxlan_net_id); struct vxlan_dev *vxlan = netdev_priv(dev); + struct vxlan_fdb *f = NULL; int err; err = vxlan_dev_configure(net, dev, conf, false, extack); @@ -3172,24 +3206,35 @@ static int __vxlan_dev_create(struct net *net, struct net_device *dev, err = vxlan_fdb_create(vxlan, all_zeros_mac, &vxlan->default_dst.remote_ip, NUD_REACHABLE | NUD_PERMANENT, - NLM_F_EXCL | NLM_F_CREATE, vxlan->cfg.dst_port, vxlan->default_dst.remote_vni, vxlan->default_dst.remote_vni, vxlan->default_dst.remote_ifindex, - NTF_SELF); + NTF_SELF, &f); if (err) return err; } err = register_netdevice(dev); + if (err) + goto errout; + + err = rtnl_configure_link(dev, NULL); if (err) { - vxlan_fdb_delete_default(vxlan, vxlan->default_dst.remote_vni); - return err; + unregister_netdevice(dev); + goto errout; } + /* notify default fdb entry */ + if (f) + vxlan_fdb_notify(vxlan, f, first_remote_rtnl(f), RTM_NEWNEIGH); + list_add(&vxlan->next, &vn->vxlan_list); return 0; +errout: + if (f) + vxlan_fdb_destroy(vxlan, f, false); + return err; } static int vxlan_nl2conf(struct nlattr *tb[], struct nlattr *data[], @@ -3418,6 +3463,7 @@ static int vxlan_changelink(struct net_device *dev, struct nlattr *tb[], struct vxlan_rdst *dst = &vxlan->default_dst; struct vxlan_rdst old_dst; struct vxlan_config conf; + struct vxlan_fdb *f = NULL; int err; err = vxlan_nl2conf(tb, data, @@ -3446,16 +3492,16 @@ static int vxlan_changelink(struct net_device *dev, struct nlattr *tb[], err = vxlan_fdb_create(vxlan, all_zeros_mac, &dst->remote_ip, NUD_REACHABLE | NUD_PERMANENT, - NLM_F_CREATE | NLM_F_APPEND, vxlan->cfg.dst_port, dst->remote_vni, dst->remote_vni, dst->remote_ifindex, - NTF_SELF); + NTF_SELF, &f); if (err) { spin_unlock_bh(&vxlan->hash_lock); return err; } + vxlan_fdb_notify(vxlan, f, first_remote_rtnl(f), RTM_NEWNEIGH); } spin_unlock_bh(&vxlan->hash_lock); } diff --git a/drivers/net/wireless/ath/regd.h b/drivers/net/wireless/ath/regd.h index 5d80be213fac0ec9c05d01bc49c6bfd190e68a16..869f276cc1d8d3446b1589cb6546b597a152bd13 100644 --- a/drivers/net/wireless/ath/regd.h +++ b/drivers/net/wireless/ath/regd.h @@ -68,12 +68,14 @@ enum CountryCode { CTRY_AUSTRALIA = 36, CTRY_AUSTRIA = 40, CTRY_AZERBAIJAN = 31, + CTRY_BAHAMAS = 44, CTRY_BAHRAIN = 48, CTRY_BANGLADESH = 50, CTRY_BARBADOS = 52, CTRY_BELARUS = 112, CTRY_BELGIUM = 56, CTRY_BELIZE = 84, + CTRY_BERMUDA = 60, CTRY_BOLIVIA = 68, CTRY_BOSNIA_HERZ = 70, CTRY_BRAZIL = 76, @@ -159,6 +161,7 @@ enum CountryCode { CTRY_ROMANIA = 642, CTRY_RUSSIA = 643, CTRY_SAUDI_ARABIA = 682, + CTRY_SERBIA = 688, CTRY_SERBIA_MONTENEGRO = 891, CTRY_SINGAPORE = 702, CTRY_SLOVAKIA = 703, @@ -170,11 +173,13 @@ enum CountryCode { CTRY_SWITZERLAND = 756, CTRY_SYRIA = 760, CTRY_TAIWAN = 158, + CTRY_TANZANIA = 834, CTRY_THAILAND = 764, CTRY_TRINIDAD_Y_TOBAGO = 780, CTRY_TUNISIA = 788, CTRY_TURKEY = 792, CTRY_UAE = 784, + CTRY_UGANDA = 800, CTRY_UKRAINE = 804, CTRY_UNITED_KINGDOM = 826, CTRY_UNITED_STATES = 840, diff --git a/drivers/net/wireless/ath/regd_common.h b/drivers/net/wireless/ath/regd_common.h index bdd2b4d61f2f0f797dc40f9fe49b5d549ed6c129..15bbd1e0d912f5c799661b5adbb6c48e99da2819 100644 --- a/drivers/net/wireless/ath/regd_common.h +++ b/drivers/net/wireless/ath/regd_common.h @@ -35,6 +35,7 @@ enum EnumRd { FRANCE_RES = 0x31, FCC3_FCCA = 0x3A, FCC3_WORLD = 0x3B, + FCC3_ETSIC = 0x3F, ETSI1_WORLD = 0x37, ETSI3_ETSIA = 0x32, @@ -44,6 +45,7 @@ enum EnumRd { ETSI4_ETSIC = 0x38, ETSI5_WORLD = 0x39, ETSI6_WORLD = 0x34, + ETSI8_WORLD = 0x3D, ETSI_RESERVED = 0x33, MKK1_MKKA = 0x40, @@ -59,6 +61,7 @@ enum EnumRd { MKK1_MKKA1 = 0x4A, MKK1_MKKA2 = 0x4B, MKK1_MKKC = 0x4C, + APL2_FCCA = 0x4D, APL3_FCCA = 0x50, APL1_WORLD = 0x52, @@ -67,6 +70,7 @@ enum EnumRd { APL1_ETSIC = 0x55, APL2_ETSIC = 0x56, APL5_WORLD = 0x58, + APL13_WORLD = 0x5A, APL6_WORLD = 0x5B, APL7_FCCA = 0x5C, APL8_WORLD = 0x5D, @@ -168,6 +172,7 @@ static struct reg_dmn_pair_mapping regDomainPairs[] = { {FCC2_ETSIC, CTL_FCC, CTL_ETSI}, {FCC3_FCCA, CTL_FCC, CTL_FCC}, {FCC3_WORLD, CTL_FCC, CTL_ETSI}, + {FCC3_ETSIC, CTL_FCC, CTL_ETSI}, {FCC4_FCCA, CTL_FCC, CTL_FCC}, {FCC5_FCCA, CTL_FCC, CTL_FCC}, {FCC6_FCCA, CTL_FCC, CTL_FCC}, @@ -179,6 +184,7 @@ static struct reg_dmn_pair_mapping regDomainPairs[] = { {ETSI4_WORLD, CTL_ETSI, CTL_ETSI}, {ETSI5_WORLD, CTL_ETSI, CTL_ETSI}, {ETSI6_WORLD, CTL_ETSI, CTL_ETSI}, + {ETSI8_WORLD, CTL_ETSI, CTL_ETSI}, /* XXX: For ETSI3_ETSIA, Was NO_CTL meant for the 2 GHz band ? */ {ETSI3_ETSIA, CTL_ETSI, CTL_ETSI}, @@ -188,9 +194,11 @@ static struct reg_dmn_pair_mapping regDomainPairs[] = { {FCC1_FCCA, CTL_FCC, CTL_FCC}, {APL1_WORLD, CTL_FCC, CTL_ETSI}, {APL2_WORLD, CTL_FCC, CTL_ETSI}, + {APL2_FCCA, CTL_FCC, CTL_FCC}, {APL3_WORLD, CTL_FCC, CTL_ETSI}, {APL4_WORLD, CTL_FCC, CTL_ETSI}, {APL5_WORLD, CTL_FCC, CTL_ETSI}, + {APL13_WORLD, CTL_ETSI, CTL_ETSI}, {APL6_WORLD, CTL_ETSI, CTL_ETSI}, {APL8_WORLD, CTL_ETSI, CTL_ETSI}, {APL9_WORLD, CTL_ETSI, CTL_ETSI}, @@ -298,6 +306,7 @@ static struct country_code_to_enum_rd allCountries[] = { {CTRY_AUSTRALIA2, FCC6_WORLD, "AU"}, {CTRY_AUSTRIA, ETSI1_WORLD, "AT"}, {CTRY_AZERBAIJAN, ETSI4_WORLD, "AZ"}, + {CTRY_BAHAMAS, FCC3_WORLD, "BS"}, {CTRY_BAHRAIN, APL6_WORLD, "BH"}, {CTRY_BANGLADESH, NULL1_WORLD, "BD"}, {CTRY_BARBADOS, FCC2_WORLD, "BB"}, @@ -305,6 +314,7 @@ static struct country_code_to_enum_rd allCountries[] = { {CTRY_BELGIUM, ETSI1_WORLD, "BE"}, {CTRY_BELGIUM2, ETSI4_WORLD, "BL"}, {CTRY_BELIZE, APL1_ETSIC, "BZ"}, + {CTRY_BERMUDA, FCC3_FCCA, "BM"}, {CTRY_BOLIVIA, APL1_ETSIC, "BO"}, {CTRY_BOSNIA_HERZ, ETSI1_WORLD, "BA"}, {CTRY_BRAZIL, FCC3_WORLD, "BR"}, @@ -444,6 +454,7 @@ static struct country_code_to_enum_rd allCountries[] = { {CTRY_ROMANIA, NULL1_WORLD, "RO"}, {CTRY_RUSSIA, NULL1_WORLD, "RU"}, {CTRY_SAUDI_ARABIA, NULL1_WORLD, "SA"}, + {CTRY_SERBIA, ETSI1_WORLD, "RS"}, {CTRY_SERBIA_MONTENEGRO, ETSI1_WORLD, "CS"}, {CTRY_SINGAPORE, APL6_WORLD, "SG"}, {CTRY_SLOVAKIA, ETSI1_WORLD, "SK"}, @@ -455,10 +466,12 @@ static struct country_code_to_enum_rd allCountries[] = { {CTRY_SWITZERLAND, ETSI1_WORLD, "CH"}, {CTRY_SYRIA, NULL1_WORLD, "SY"}, {CTRY_TAIWAN, APL3_FCCA, "TW"}, + {CTRY_TANZANIA, APL1_WORLD, "TZ"}, {CTRY_THAILAND, FCC3_WORLD, "TH"}, {CTRY_TRINIDAD_Y_TOBAGO, FCC3_WORLD, "TT"}, {CTRY_TUNISIA, ETSI3_WORLD, "TN"}, {CTRY_TURKEY, ETSI3_WORLD, "TR"}, + {CTRY_UGANDA, FCC3_WORLD, "UG"}, {CTRY_UKRAINE, NULL1_WORLD, "UA"}, {CTRY_UAE, NULL1_WORLD, "AE"}, {CTRY_UNITED_KINGDOM, ETSI1_WORLD, "GB"}, diff --git a/drivers/net/wireless/ath/wil6210/cfg80211.c b/drivers/net/wireless/ath/wil6210/cfg80211.c index cc8bfbc4b76a1e15624325f0f7d351a0cddbe11d..63d8a99e404cc33372ee5c54abe3a483bec13e70 100644 --- a/drivers/net/wireless/ath/wil6210/cfg80211.c +++ b/drivers/net/wireless/ath/wil6210/cfg80211.c @@ -393,7 +393,9 @@ wil_mgmt_stypes[NUM_NL80211_IFTYPES] = { .tx = BIT(IEEE80211_STYPE_ACTION >> 4) | BIT(IEEE80211_STYPE_PROBE_RESP >> 4) | BIT(IEEE80211_STYPE_ASSOC_RESP >> 4) | - BIT(IEEE80211_STYPE_DISASSOC >> 4), + BIT(IEEE80211_STYPE_DISASSOC >> 4) | + BIT(IEEE80211_STYPE_AUTH >> 4) | + BIT(IEEE80211_STYPE_REASSOC_RESP >> 4), .rx = BIT(IEEE80211_STYPE_ACTION >> 4) | BIT(IEEE80211_STYPE_PROBE_REQ >> 4) | BIT(IEEE80211_STYPE_ASSOC_REQ >> 4) | @@ -1145,6 +1147,26 @@ static void wil_print_crypto(struct wil6210_priv *wil, c->control_port_no_encrypt); } +static const char * +wil_get_auth_type_name(enum nl80211_auth_type auth_type) +{ + switch (auth_type) { + case NL80211_AUTHTYPE_OPEN_SYSTEM: + return "OPEN_SYSTEM"; + case NL80211_AUTHTYPE_SHARED_KEY: + return "SHARED_KEY"; + case NL80211_AUTHTYPE_FT: + return "FT"; + case NL80211_AUTHTYPE_NETWORK_EAP: + return "NETWORK_EAP"; + case NL80211_AUTHTYPE_SAE: + return "SAE"; + case NL80211_AUTHTYPE_AUTOMATIC: + return "AUTOMATIC"; + default: + return "unknown"; + } +} static void wil_print_connect_params(struct wil6210_priv *wil, struct cfg80211_connect_params *sme) { @@ -1158,11 +1180,82 @@ static void wil_print_connect_params(struct wil6210_priv *wil, if (sme->ssid) print_hex_dump(KERN_INFO, " SSID: ", DUMP_PREFIX_OFFSET, 16, 1, sme->ssid, sme->ssid_len, true); + if (sme->prev_bssid) + wil_info(wil, " Previous BSSID=%pM\n", sme->prev_bssid); + wil_info(wil, " Auth Type: %s\n", + wil_get_auth_type_name(sme->auth_type)); wil_info(wil, " Privacy: %s\n", sme->privacy ? "secure" : "open"); wil_info(wil, " PBSS: %d\n", sme->pbss); wil_print_crypto(wil, &sme->crypto); } +static int wil_ft_connect(struct wiphy *wiphy, + struct net_device *ndev, + struct cfg80211_connect_params *sme) +{ + struct wil6210_priv *wil = wiphy_to_wil(wiphy); + struct wil6210_vif *vif = ndev_to_vif(ndev); + struct wmi_ft_auth_cmd auth_cmd; + int rc; + + if (!test_bit(WMI_FW_CAPABILITY_FT_ROAMING, wil->fw_capabilities)) { + wil_err(wil, "FT: FW does not support FT roaming\n"); + return -EOPNOTSUPP; + } + + if (!sme->prev_bssid) { + wil_err(wil, "FT: prev_bssid was not set\n"); + return -EINVAL; + } + + if (ether_addr_equal(sme->prev_bssid, sme->bssid)) { + wil_err(wil, "FT: can not roam to same AP\n"); + return -EINVAL; + } + + if (!test_bit(wil_vif_fwconnected, vif->status)) { + wil_err(wil, "FT: roam while not connected\n"); + return -EINVAL; + } + + if (vif->privacy != sme->privacy) { + wil_err(wil, "FT: privacy mismatch, current (%d) roam (%d)\n", + vif->privacy, sme->privacy); + return -EINVAL; + } + + if (sme->pbss) { + wil_err(wil, "FT: roam is not valid for PBSS\n"); + return -EINVAL; + } + + memset(&auth_cmd, 0, sizeof(auth_cmd)); + auth_cmd.channel = sme->channel->hw_value - 1; + ether_addr_copy(auth_cmd.bssid, sme->bssid); + + if (test_bit(WMI_FW_CAPABILITY_CHANNEL_BONDING, wil->fw_capabilities)) + if (wil->force_edmg_channel) { + rc = wil_spec2wmi_ch(wil->force_edmg_channel, + &auth_cmd.channel); + if (rc) + wil_err(wil, "FT: wmi channel for channel %d not found", + wil->force_edmg_channel); + } + + wil_info(wil, "FT: roaming\n"); + + set_bit(wil_vif_ft_roam, vif->status); + rc = wmi_send(wil, WMI_FT_AUTH_CMDID, vif->mid, + &auth_cmd, sizeof(auth_cmd)); + if (rc == 0) + mod_timer(&vif->connect_timer, + jiffies + msecs_to_jiffies(5000)); + else + clear_bit(wil_vif_ft_roam, vif->status); + + return rc; +} + static int wil_cfg80211_connect(struct wiphy *wiphy, struct net_device *ndev, struct cfg80211_connect_params *sme) @@ -1175,14 +1268,23 @@ static int wil_cfg80211_connect(struct wiphy *wiphy, const u8 *rsn_eid; int ch; int rc = 0; + bool is_ft_roam = false; + u8 network_type; enum ieee80211_bss_type bss_type = IEEE80211_BSS_TYPE_ESS; wil_dbg_misc(wil, "connect, mid=%d\n", vif->mid); wil_print_connect_params(wil, sme); - if (test_bit(wil_vif_fwconnecting, vif->status) || + if (sme->auth_type == NL80211_AUTHTYPE_FT) + is_ft_roam = true; + if (sme->auth_type == NL80211_AUTHTYPE_AUTOMATIC && test_bit(wil_vif_fwconnected, vif->status)) - return -EALREADY; + is_ft_roam = true; + + if (!is_ft_roam) + if (test_bit(wil_vif_fwconnecting, vif->status) || + test_bit(wil_vif_fwconnected, vif->status)) + return -EALREADY; if (sme->ie_len > WMI_MAX_IE_LEN) { wil_err(wil, "IE too large (%td bytes)\n", sme->ie_len); @@ -1192,8 +1294,13 @@ static int wil_cfg80211_connect(struct wiphy *wiphy, rsn_eid = sme->ie ? cfg80211_find_ie(WLAN_EID_RSN, sme->ie, sme->ie_len) : NULL; - if (sme->privacy && !rsn_eid) + if (sme->privacy && !rsn_eid) { wil_info(wil, "WSC connection\n"); + if (is_ft_roam) { + wil_err(wil, "No WSC with FT roam\n"); + return -EINVAL; + } + } if (sme->pbss) bss_type = IEEE80211_BSS_TYPE_PBSS; @@ -1215,6 +1322,45 @@ static int wil_cfg80211_connect(struct wiphy *wiphy, vif->privacy = sme->privacy; vif->pbss = sme->pbss; + rc = wmi_set_ie(vif, WMI_FRAME_ASSOC_REQ, sme->ie_len, sme->ie); + if (rc) + goto out; + + switch (bss->capability & WLAN_CAPABILITY_DMG_TYPE_MASK) { + case WLAN_CAPABILITY_DMG_TYPE_AP: + network_type = WMI_NETTYPE_INFRA; + break; + case WLAN_CAPABILITY_DMG_TYPE_PBSS: + network_type = WMI_NETTYPE_P2P; + break; + default: + wil_err(wil, "Unsupported BSS type, capability= 0x%04x\n", + bss->capability); + rc = -EINVAL; + goto out; + } + + ch = bss->channel->hw_value; + if (ch == 0) { + wil_err(wil, "BSS at unknown frequency %dMhz\n", + bss->channel->center_freq); + rc = -EOPNOTSUPP; + goto out; + } + + if (is_ft_roam) { + if (network_type != WMI_NETTYPE_INFRA) { + wil_err(wil, "FT: Unsupported BSS type, capability= 0x%04x\n", + bss->capability); + rc = -EINVAL; + goto out; + } + rc = wil_ft_connect(wiphy, ndev, sme); + if (rc == 0) + vif->bss = bss; + goto out; + } + if (vif->privacy) { /* For secure assoc, remove old keys */ rc = wmi_del_cipher_key(vif, 0, bss->bssid, @@ -1231,28 +1377,9 @@ static int wil_cfg80211_connect(struct wiphy *wiphy, } } - /* WMI_SET_APPIE_CMD. ie may contain rsn info as well as other info - * elements. Send it also in case it's empty, to erase previously set - * ies in FW. - */ - rc = wmi_set_ie(vif, WMI_FRAME_ASSOC_REQ, sme->ie_len, sme->ie); - if (rc) - goto out; - /* WMI_CONNECT_CMD */ memset(&conn, 0, sizeof(conn)); - switch (bss->capability & WLAN_CAPABILITY_DMG_TYPE_MASK) { - case WLAN_CAPABILITY_DMG_TYPE_AP: - conn.network_type = WMI_NETTYPE_INFRA; - break; - case WLAN_CAPABILITY_DMG_TYPE_PBSS: - conn.network_type = WMI_NETTYPE_P2P; - break; - default: - wil_err(wil, "Unsupported BSS type, capability= 0x%04x\n", - bss->capability); - goto out; - } + conn.network_type = network_type; if (vif->privacy) { if (rsn_eid) { /* regular secure connection */ conn.dot11_auth_mode = WMI_AUTH11_SHARED; @@ -1272,14 +1399,6 @@ static int wil_cfg80211_connect(struct wiphy *wiphy, conn.ssid_len = min_t(u8, ssid_eid[1], 32); memcpy(conn.ssid, ssid_eid+2, conn.ssid_len); - - ch = bss->channel->hw_value; - if (ch == 0) { - wil_err(wil, "BSS at unknown frequency %dMhz\n", - bss->channel->center_freq); - rc = -EOPNOTSUPP; - goto out; - } conn.channel = ch - 1; if (test_bit(WMI_FW_CAPABILITY_CHANNEL_BONDING, wil->fw_capabilities)) @@ -1485,9 +1604,9 @@ wil_find_sta_by_key_usage(struct wil6210_priv *wil, u8 mid, return &wil->sta[cid]; } -static void wil_set_crypto_rx(u8 key_index, enum wmi_key_usage key_usage, - struct wil_sta_info *cs, - struct key_params *params) +void wil_set_crypto_rx(u8 key_index, enum wmi_key_usage key_usage, + struct wil_sta_info *cs, + struct key_params *params) { struct wil_tid_crypto_rx_single *cc; int tid; @@ -1570,13 +1689,19 @@ static int wil_cfg80211_add_key(struct wiphy *wiphy, params->seq_len, params->seq); if (IS_ERR(cs)) { - wil_err(wil, "Not connected, %pM %s[%d] PN %*phN\n", - mac_addr, key_usage_str[key_usage], key_index, - params->seq_len, params->seq); - return -EINVAL; + /* in FT, sta info may not be available as add_key may be + * sent by host before FW sends WMI_CONNECT_EVENT + */ + if (!test_bit(wil_vif_ft_roam, vif->status)) { + wil_err(wil, "Not connected, %pM %s[%d] PN %*phN\n", + mac_addr, key_usage_str[key_usage], key_index, + params->seq_len, params->seq); + return -EINVAL; + } } - wil_del_rx_key(key_index, key_usage, cs); + if (!IS_ERR(cs)) + wil_del_rx_key(key_index, key_usage, cs); if (params->seq && params->seq_len != IEEE80211_GCMP_PN_LEN) { wil_err(wil, @@ -1589,7 +1714,10 @@ static int wil_cfg80211_add_key(struct wiphy *wiphy, rc = wmi_add_cipher_key(vif, key_index, mac_addr, params->key_len, params->key, key_usage); - if (!rc) + if (!rc && !IS_ERR(cs)) + /* in FT set crypto will take place upon receiving + * WMI_RING_EN_EVENTID event + */ wil_set_crypto_rx(key_index, key_usage, cs, params); return rc; @@ -1752,21 +1880,36 @@ static void wil_print_bcon_data(struct cfg80211_beacon_data *b) } /* internal functions for device reset and starting AP */ -static int _wil_cfg80211_set_ies(struct wil6210_vif *vif, - struct cfg80211_beacon_data *bcon) +static u8 * +_wil_cfg80211_get_proberesp_ies(const u8 *proberesp, u16 proberesp_len, + u16 *ies_len) { - int rc; - u16 len = 0, proberesp_len = 0; - u8 *ies = NULL, *proberesp = NULL; + u8 *ies = NULL; - if (bcon->probe_resp) { + if (proberesp) { struct ieee80211_mgmt *f = - (struct ieee80211_mgmt *)bcon->probe_resp; + (struct ieee80211_mgmt *)proberesp; size_t hlen = offsetof(struct ieee80211_mgmt, u.probe_resp.variable); - proberesp = f->u.probe_resp.variable; - proberesp_len = bcon->probe_resp_len - hlen; + + ies = f->u.probe_resp.variable; + if (ies_len) + *ies_len = proberesp_len - hlen; } + + return ies; +} + +static int _wil_cfg80211_set_ies(struct wil6210_vif *vif, + struct cfg80211_beacon_data *bcon) +{ + int rc; + u16 len = 0, proberesp_len = 0; + u8 *ies = NULL, *proberesp; + + proberesp = _wil_cfg80211_get_proberesp_ies(bcon->probe_resp, + bcon->probe_resp_len, + &proberesp_len); rc = _wil_cfg80211_merge_extra_ies(proberesp, proberesp_len, bcon->proberesp_ies, @@ -1810,6 +1953,9 @@ static int _wil_cfg80211_start_ap(struct wiphy *wiphy, struct wireless_dev *wdev = ndev->ieee80211_ptr; u8 wmi_nettype = wil_iftype_nl2wmi(wdev->iftype); u8 is_go = (wdev->iftype == NL80211_IFTYPE_P2P_GO); + u16 proberesp_len = 0; + u8 *proberesp; + bool ft = false; if (pbss) wmi_nettype = WMI_NETTYPE_P2P; @@ -1822,6 +1968,25 @@ static int _wil_cfg80211_start_ap(struct wiphy *wiphy, wil_set_recovery_state(wil, fw_recovery_idle); + proberesp = _wil_cfg80211_get_proberesp_ies(bcon->probe_resp, + bcon->probe_resp_len, + &proberesp_len); + /* check that the probe response IEs has a MDE */ + if ((proberesp && proberesp_len > 0 && + cfg80211_find_ie(WLAN_EID_MOBILITY_DOMAIN, + proberesp, + proberesp_len))) + ft = true; + + if (ft) { + if (!test_bit(WMI_FW_CAPABILITY_FT_ROAMING, + wil->fw_capabilities)) { + wil_err(wil, "FW does not support FT roaming\n"); + return -ENOTSUPP; + } + set_bit(wil_vif_ft_roam, vif->status); + } + mutex_lock(&wil->mutex); if (!wil_has_other_active_ifaces(wil, ndev, true, false)) { @@ -1983,6 +2148,7 @@ static int wil_cfg80211_stop_ap(struct wiphy *wiphy, mutex_lock(&wil->mutex); wmi_pcp_stop(vif); + clear_bit(wil_vif_ft_roam, vif->status); if (last) __wil_down(wil); @@ -2002,8 +2168,9 @@ static int wil_cfg80211_add_station(struct wiphy *wiphy, struct wil6210_vif *vif = ndev_to_vif(dev); struct wil6210_priv *wil = wiphy_to_wil(wiphy); - wil_dbg_misc(wil, "add station %pM aid %d mid %d\n", - mac, params->aid, vif->mid); + wil_dbg_misc(wil, "add station %pM aid %d mid %d mask 0x%x set 0x%x\n", + mac, params->aid, vif->mid, + params->sta_flags_mask, params->sta_flags_set); if (!disable_ap_sme) { wil_err(wil, "not supported with AP SME enabled\n"); @@ -2383,6 +2550,54 @@ static void wil_cfg80211_reg_notify(struct wiphy *wiphy, memcpy(wil->regdomain, request->alpha2, 2); } +static int +wil_cfg80211_update_ft_ies(struct wiphy *wiphy, struct net_device *dev, + struct cfg80211_update_ft_ies_params *ftie) +{ + struct wil6210_priv *wil = wiphy_to_wil(wiphy); + struct wil6210_vif *vif = ndev_to_vif(dev); + struct cfg80211_bss *bss; + struct wmi_ft_reassoc_cmd reassoc; + int rc = 0; + + wil_dbg_misc(wil, "update ft ies, mid=%d\n", vif->mid); + wil_hex_dump_misc("FT IE ", DUMP_PREFIX_OFFSET, 16, 1, + ftie->ie, ftie->ie_len, true); + + if (!test_bit(WMI_FW_CAPABILITY_FT_ROAMING, wil->fw_capabilities)) { + wil_err(wil, "FW does not support FT roaming\n"); + return -EOPNOTSUPP; + } + + rc = wmi_update_ft_ies(vif, ftie->ie_len, ftie->ie); + if (rc) + return rc; + + if (!test_bit(wil_vif_ft_roam, vif->status)) + /* vif is not roaming */ + return 0; + + /* wil_vif_ft_roam is set. wil_cfg80211_update_ft_ies is used as + * a trigger for reassoc + */ + + bss = vif->bss; + if (!bss) { + wil_err(wil, "FT: bss is NULL\n"); + return -EINVAL; + } + + memset(&reassoc, 0, sizeof(reassoc)); + ether_addr_copy(reassoc.bssid, bss->bssid); + + rc = wmi_send(wil, WMI_FT_REASSOC_CMDID, vif->mid, + &reassoc, sizeof(reassoc)); + if (rc) + wil_err(wil, "FT: reassoc failed (%d)\n", rc); + + return rc; +} + static const struct cfg80211_ops wil_cfg80211_ops = { .add_virtual_intf = wil_cfg80211_add_iface, .del_virtual_intf = wil_cfg80211_del_iface, @@ -2418,6 +2633,7 @@ static const struct cfg80211_ops wil_cfg80211_ops = { .resume = wil_cfg80211_resume, .sched_scan_start = wil_cfg80211_sched_scan_start, .sched_scan_stop = wil_cfg80211_sched_scan_stop, + .update_ft_ies = wil_cfg80211_update_ft_ies, }; static void wil_wiphy_init(struct wiphy *wiphy) diff --git a/drivers/net/wireless/ath/wil6210/debugfs.c b/drivers/net/wireless/ath/wil6210/debugfs.c index bb7a7fdfa75506a7ca95808117b03215fec134bf..852ee04aab8e8e2021098febe5808782695ddf92 100644 --- a/drivers/net/wireless/ath/wil6210/debugfs.c +++ b/drivers/net/wireless/ath/wil6210/debugfs.c @@ -726,32 +726,6 @@ struct dentry *wil_debugfs_create_ioblob(const char *name, return debugfs_create_file(name, mode, parent, wil_blob, &fops_ioblob); } -/*---reset---*/ -static ssize_t wil_write_file_reset(struct file *file, const char __user *buf, - size_t len, loff_t *ppos) -{ - struct wil6210_priv *wil = file->private_data; - struct net_device *ndev = wil->main_ndev; - - /** - * BUG: - * this code does NOT sync device state with the rest of system - * use with care, debug only!!! - */ - rtnl_lock(); - dev_close(ndev); - ndev->flags &= ~IFF_UP; - rtnl_unlock(); - wil_reset(wil, true); - - return len; -} - -static const struct file_operations fops_reset = { - .write = wil_write_file_reset, - .open = simple_open, -}; - /*---write channel 1..4 to rxon for it, 0 to rxoff---*/ static ssize_t wil_write_file_rxon(struct file *file, const char __user *buf, size_t len, loff_t *ppos) @@ -1264,6 +1238,9 @@ static int wil_rx_buff_mgmt_debugfs_show(struct seq_file *s, void *data) int num_active; int num_free; + if (!rbm->buff_arr) + return -EINVAL; + seq_printf(s, " size = %zu\n", rbm->size); seq_printf(s, " free_list_empty_cnt = %lu\n", rbm->free_list_empty_cnt); @@ -1734,6 +1711,7 @@ __acquires(&p->tid_rx_lock) __releases(&p->tid_rx_lock) char *status = "unknown"; u8 aid = 0; u8 mid; + bool sta_connected = false; switch (p->status) { case wil_sta_unused: @@ -1748,8 +1726,20 @@ __acquires(&p->tid_rx_lock) __releases(&p->tid_rx_lock) break; } mid = (p->status != wil_sta_unused) ? p->mid : U8_MAX; - seq_printf(s, "[%d] %pM %s MID %d AID %d\n", i, p->addr, status, - mid, aid); + if (mid < wil->max_vifs) { + struct wil6210_vif *vif = wil->vifs[mid]; + + if (vif->wdev.iftype == NL80211_IFTYPE_STATION && + p->status == wil_sta_connected) + sta_connected = true; + } + /* print roam counter only for connected stations */ + if (sta_connected) + seq_printf(s, "[%d] %pM connected (roam counter %d) MID %d AID %d\n", + i, p->addr, p->stats.ft_roams, mid, aid); + else + seq_printf(s, "[%d] %pM %s MID %d AID %d\n", i, + p->addr, status, mid, aid); if (p->status == wil_sta_connected) { spin_lock_bh(&p->tid_rx_lock); @@ -2491,7 +2481,6 @@ static const struct { {"bf", 0444, &fops_bf}, {"ssid", 0644, &fops_ssid}, {"mem_val", 0644, &fops_memread}, - {"reset", 0244, &fops_reset}, {"rxon", 0244, &fops_rxon}, {"tx_mgmt", 0244, &fops_txmgmt}, {"wmi_send", 0244, &fops_wmi}, diff --git a/drivers/net/wireless/ath/wil6210/main.c b/drivers/net/wireless/ath/wil6210/main.c index d749ed8d3bcf47df83dd953792f305846ba7bff2..6c8f8c60e6a0eef34a2143d140d57222e9b721d3 100644 --- a/drivers/net/wireless/ath/wil6210/main.c +++ b/drivers/net/wireless/ath/wil6210/main.c @@ -80,9 +80,9 @@ static const struct kernel_param_ops mtu_max_ops = { module_param_cb(mtu_max, &mtu_max_ops, &mtu_max, 0444); MODULE_PARM_DESC(mtu_max, " Max MTU value."); -static uint rx_ring_order = WIL_RX_RING_SIZE_ORDER_DEFAULT; -static uint tx_ring_order = WIL_TX_RING_SIZE_ORDER_DEFAULT; -static uint bcast_ring_order = WIL_BCAST_RING_SIZE_ORDER_DEFAULT; +static uint rx_ring_order; +static uint tx_ring_order = WIL_TX_RING_SIZE_ORDER_DEFAULT; +static uint bcast_ring_order = WIL_BCAST_RING_SIZE_ORDER_DEFAULT; static int ring_order_set(const char *val, const struct kernel_param *kp) { @@ -362,6 +362,8 @@ static void _wil6210_disconnect(struct wil6210_vif *vif, const u8 *bssid, vif->bss = NULL; } clear_bit(wil_vif_fwconnecting, vif->status); + clear_bit(wil_vif_ft_roam, vif->status); + break; case NL80211_IFTYPE_AP: case NL80211_IFTYPE_P2P_GO: @@ -1716,6 +1718,11 @@ int __wil_up(struct wil6210_priv *wil) return rc; /* Rx RING. After MAC and beacon */ + if (rx_ring_order == 0) + rx_ring_order = wil->hw_version < HW_VER_TALYN_MB ? + WIL_RX_RING_SIZE_ORDER_DEFAULT : + WIL_RX_RING_SIZE_ORDER_TALYN_DEFAULT; + rc = wil->txrx_ops.rx_init(wil, 1 << rx_ring_order); if (rc) return rc; diff --git a/drivers/net/wireless/ath/wil6210/txrx.c b/drivers/net/wireless/ath/wil6210/txrx.c index 7020e5f48578572adfebcbbcd3ca344730a4105b..e74f48bfb5055997bda98699ef7655df06c9f8ec 100644 --- a/drivers/net/wireless/ath/wil6210/txrx.c +++ b/drivers/net/wireless/ath/wil6210/txrx.c @@ -1085,6 +1085,88 @@ static int wil_vring_init_tx(struct wil6210_vif *vif, int id, int size, return rc; } +static int wil_tx_vring_modify(struct wil6210_vif *vif, int ring_id, int cid, + int tid) +{ + struct wil6210_priv *wil = vif_to_wil(vif); + int rc; + struct wmi_vring_cfg_cmd cmd = { + .action = cpu_to_le32(WMI_VRING_CMD_MODIFY), + .vring_cfg = { + .tx_sw_ring = { + .max_mpdu_size = + cpu_to_le16(wil_mtu2macbuf(mtu_max)), + .ring_size = 0, + }, + .ringid = ring_id, + .cidxtid = mk_cidxtid(cid, tid), + .encap_trans_type = WMI_VRING_ENC_TYPE_802_3, + .mac_ctrl = 0, + .to_resolution = 0, + .agg_max_wsize = 0, + .schd_params = { + .priority = cpu_to_le16(0), + .timeslot_us = cpu_to_le16(0xfff), + }, + }, + }; + struct { + struct wmi_cmd_hdr wmi; + struct wmi_vring_cfg_done_event cmd; + } __packed reply = { + .cmd = {.status = WMI_FW_STATUS_FAILURE}, + }; + struct wil_ring *vring = &wil->ring_tx[ring_id]; + struct wil_ring_tx_data *txdata = &wil->ring_tx_data[ring_id]; + + wil_dbg_misc(wil, "vring_modify: ring %d cid %d tid %d\n", ring_id, + cid, tid); + lockdep_assert_held(&wil->mutex); + + if (!vring->va) { + wil_err(wil, "Tx ring [%d] not allocated\n", ring_id); + return -EINVAL; + } + + if (wil->ring2cid_tid[ring_id][0] != cid || + wil->ring2cid_tid[ring_id][1] != tid) { + wil_err(wil, "ring info does not match cid=%u tid=%u\n", + wil->ring2cid_tid[ring_id][0], + wil->ring2cid_tid[ring_id][1]); + } + + cmd.vring_cfg.tx_sw_ring.ring_mem_base = cpu_to_le64(vring->pa); + + rc = wmi_call(wil, WMI_VRING_CFG_CMDID, vif->mid, &cmd, sizeof(cmd), + WMI_VRING_CFG_DONE_EVENTID, &reply, sizeof(reply), 100); + if (rc) + goto fail; + + if (reply.cmd.status != WMI_FW_STATUS_SUCCESS) { + wil_err(wil, "Tx modify failed, status 0x%02x\n", + reply.cmd.status); + rc = -EINVAL; + goto fail; + } + + /* set BA aggregation window size to 0 to force a new BA with the + * new AP + */ + txdata->agg_wsize = 0; + if (txdata->dot1x_open && agg_wsize >= 0) + wil_addba_tx_request(wil, ring_id, agg_wsize); + + return 0; +fail: + spin_lock_bh(&txdata->lock); + txdata->dot1x_open = false; + txdata->enabled = 0; + spin_unlock_bh(&txdata->lock); + wil->ring2cid_tid[ring_id][0] = WIL6210_MAX_CID; + wil->ring2cid_tid[ring_id][1] = 0; + return rc; +} + int wil_vring_init_bcast(struct wil6210_vif *vif, int id, int size) { struct wil6210_priv *wil = vif_to_wil(vif); @@ -2307,6 +2389,7 @@ void wil_init_txrx_ops_legacy_dma(struct wil6210_priv *wil) wil->txrx_ops.ring_init_bcast = wil_vring_init_bcast; wil->txrx_ops.tx_init = wil_tx_init; wil->txrx_ops.tx_fini = wil_tx_fini; + wil->txrx_ops.tx_ring_modify = wil_tx_vring_modify; /* RX ops */ wil->txrx_ops.rx_init = wil_rx_init; wil->txrx_ops.wmi_addba_rx_resp = wmi_addba_rx_resp; diff --git a/drivers/net/wireless/ath/wil6210/txrx_edma.c b/drivers/net/wireless/ath/wil6210/txrx_edma.c index e44629f56b51609cbb61f4883642fa223df94c2b..f53c356cc78449e8e6fca0efa2d1cda9c3d0abd7 100644 --- a/drivers/net/wireless/ath/wil6210/txrx_edma.c +++ b/drivers/net/wireless/ath/wil6210/txrx_edma.c @@ -358,8 +358,8 @@ static int wil_init_rx_sring(struct wil6210_priv *wil, struct wil_status_ring *sring = &wil->srings[ring_id]; int rc; - wil_dbg_misc(wil, "init RX sring: size=%u, ring_id=%u\n", sring->size, - ring_id); + wil_dbg_misc(wil, "init RX sring: size=%u, ring_id=%u\n", + status_ring_size, ring_id); memset(&sring->rx_data, 0, sizeof(sring->rx_data)); @@ -748,6 +748,16 @@ static int wil_ring_init_tx_edma(struct wil6210_vif *vif, int ring_id, return rc; } +static int wil_tx_ring_modify_edma(struct wil6210_vif *vif, int ring_id, + int cid, int tid) +{ + struct wil6210_priv *wil = vif_to_wil(vif); + + wil_err(wil, "ring modify is not supported for EDMA\n"); + + return -EOPNOTSUPP; +} + /* This function is used only for RX SW reorder */ static int wil_check_bar(struct wil6210_priv *wil, void *msg, int cid, struct sk_buff *skb, struct wil_net_stats *stats) @@ -1601,6 +1611,7 @@ void wil_init_txrx_ops_edma(struct wil6210_priv *wil) wil->txrx_ops.tx_desc_map = wil_tx_desc_map_edma; wil->txrx_ops.tx_desc_unmap = wil_tx_desc_unmap_edma; wil->txrx_ops.tx_ring_tso = __wil_tx_ring_tso_edma; + wil->txrx_ops.tx_ring_modify = wil_tx_ring_modify_edma; /* RX ops */ wil->txrx_ops.rx_init = wil_rx_init_edma; wil->txrx_ops.wmi_addba_rx_resp = wmi_addba_rx_resp_edma; diff --git a/drivers/net/wireless/ath/wil6210/txrx_edma.h b/drivers/net/wireless/ath/wil6210/txrx_edma.h index a7fe9292fda380015cbd8d9b0017f12d4e6f5fd9..343516a03a1e4098dc9c561845c358b25e3c460f 100644 --- a/drivers/net/wireless/ath/wil6210/txrx_edma.h +++ b/drivers/net/wireless/ath/wil6210/txrx_edma.h @@ -23,9 +23,9 @@ #define WIL_SRING_SIZE_ORDER_MIN (WIL_RING_SIZE_ORDER_MIN) #define WIL_SRING_SIZE_ORDER_MAX (WIL_RING_SIZE_ORDER_MAX) /* RX sring order should be bigger than RX ring order */ -#define WIL_RX_SRING_SIZE_ORDER_DEFAULT (11) +#define WIL_RX_SRING_SIZE_ORDER_DEFAULT (12) #define WIL_TX_SRING_SIZE_ORDER_DEFAULT (12) -#define WIL_RX_BUFF_ARR_SIZE_DEFAULT (1536) +#define WIL_RX_BUFF_ARR_SIZE_DEFAULT (2600) #define WIL_DEFAULT_RX_STATUS_RING_ID 0 #define WIL_RX_DESC_RING_ID 0 diff --git a/drivers/net/wireless/ath/wil6210/wil6210.h b/drivers/net/wireless/ath/wil6210/wil6210.h index e03ab272daa848f71e6f8b169919e8d0c59cdfd0..48136a1f8d8492bedbca66e1addb9642cbf0d4de 100644 --- a/drivers/net/wireless/ath/wil6210/wil6210.h +++ b/drivers/net/wireless/ath/wil6210/wil6210.h @@ -83,6 +83,7 @@ static inline u32 WIL_GET_BITS(u32 x, int b0, int b1) #define WIL_TX_Q_LEN_DEFAULT (4000) #define WIL_RX_RING_SIZE_ORDER_DEFAULT (10) +#define WIL_RX_RING_SIZE_ORDER_TALYN_DEFAULT (11) #define WIL_TX_RING_SIZE_ORDER_DEFAULT (12) #define WIL_BCAST_RING_SIZE_ORDER_DEFAULT (7) #define WIL_BCAST_MCS0_LIMIT (1024) /* limit for MCS0 frame size */ @@ -451,6 +452,15 @@ static inline void parse_cidxtid(u8 cidxtid, u8 *cid, u8 *tid) *tid = (cidxtid >> 4) & 0xf; } +/** + * wil_cid_valid - check cid is valid + * @cid: CID value + */ +static inline bool wil_cid_valid(u8 cid) +{ + return (cid >= 0 && cid < WIL6210_MAX_CID); +} + struct wil6210_mbox_ring { u32 base; u16 entry_size; /* max. size of mbox entry, incl. all headers */ @@ -578,6 +588,7 @@ struct wil_net_stats { unsigned long rx_csum_err; u16 last_mcs_rx; u64 rx_per_mcs[WIL_MCS_MAX + 1]; + u32 ft_roams; /* relevant in STA mode */ }; /** @@ -600,6 +611,8 @@ struct wil_txrx_ops { struct wil_ctx *ctx); int (*tx_ring_tso)(struct wil6210_priv *wil, struct wil6210_vif *vif, struct wil_ring *ring, struct sk_buff *skb); + int (*tx_ring_modify)(struct wil6210_vif *vif, int ring_id, + int cid, int tid); irqreturn_t (*irq_tx)(int irq, void *cookie); /* RX ops */ int (*rx_init)(struct wil6210_priv *wil, u16 ring_size); @@ -823,6 +836,7 @@ extern u8 led_polarity; enum wil6210_vif_status { wil_vif_fwconnecting, wil_vif_fwconnected, + wil_vif_ft_roam, wil_vif_status_last /* keep last */ }; @@ -1230,6 +1244,7 @@ int wmi_add_cipher_key(struct wil6210_vif *vif, u8 key_index, int wmi_echo(struct wil6210_priv *wil); int wmi_set_ie(struct wil6210_vif *vif, u8 type, u16 ie_len, const void *ie); int wmi_rx_chain_add(struct wil6210_priv *wil, struct wil_ring *vring); +int wmi_update_ft_ies(struct wil6210_vif *vif, u16 ie_len, const void *ie); int wmi_rxon(struct wil6210_priv *wil, bool on); int wmi_get_temperature(struct wil6210_priv *wil, u32 *t_m, u32 *t_r); int wmi_disconnect_sta(struct wil6210_vif *vif, const u8 *mac, @@ -1351,6 +1366,9 @@ void wil6210_unmask_irq_tx_edma(struct wil6210_priv *wil); void wil_rx_handle(struct wil6210_priv *wil, int *quota); void wil6210_unmask_irq_rx(struct wil6210_priv *wil); void wil6210_unmask_irq_rx_edma(struct wil6210_priv *wil); +void wil_set_crypto_rx(u8 key_index, enum wmi_key_usage key_usage, + struct wil_sta_info *cs, + struct key_params *params); int wil_iftype_nl2wmi(enum nl80211_iftype type); diff --git a/drivers/net/wireless/ath/wil6210/wmi.c b/drivers/net/wireless/ath/wil6210/wmi.c index ba0efb11687693c43fc94e1d6be94e749110163f..f2c93c2a0645567daa2cd18d73271c7604a4cc8c 100644 --- a/drivers/net/wireless/ath/wil6210/wmi.c +++ b/drivers/net/wireless/ath/wil6210/wmi.c @@ -228,6 +228,14 @@ struct blink_on_off_time led_blink_time[] = { {WIL_LED_BLINK_ON_FAST_MS, WIL_LED_BLINK_OFF_FAST_MS}, }; +struct auth_no_hdr { + __le16 auth_alg; + __le16 auth_transaction; + __le16 status_code; + /* possibly followed by Challenge text */ + u8 variable[0]; +} __packed; + u8 led_polarity = LED_POLARITY_LOW_ACTIVE; /** @@ -469,6 +477,12 @@ static const char *cmdid2name(u16 cmdid) return "WMI_LINK_STATS_CMD"; case WMI_SW_TX_REQ_EXT_CMDID: return "WMI_SW_TX_REQ_EXT_CMDID"; + case WMI_FT_AUTH_CMDID: + return "WMI_FT_AUTH_CMD"; + case WMI_FT_REASSOC_CMDID: + return "WMI_FT_REASSOC_CMD"; + case WMI_UPDATE_FT_IES_CMDID: + return "WMI_UPDATE_FT_IES_CMD"; default: return "Untracked CMD"; } @@ -607,6 +621,12 @@ static const char *eventid2name(u16 eventid) return "WMI_LINK_STATS_CONFIG_DONE_EVENT"; case WMI_LINK_STATS_EVENTID: return "WMI_LINK_STATS_EVENT"; + case WMI_COMMAND_NOT_SUPPORTED_EVENTID: + return "WMI_COMMAND_NOT_SUPPORTED_EVENT"; + case WMI_FT_AUTH_STATUS_EVENTID: + return "WMI_FT_AUTH_STATUS_EVENT"; + case WMI_FT_REASSOC_STATUS_EVENTID: + return "WMI_FT_REASSOC_STATUS_EVENT"; default: return "Untracked EVENT"; } @@ -1176,6 +1196,9 @@ static void wmi_evt_ring_en(struct wil6210_vif *vif, int id, void *d, int len) struct wmi_ring_en_event *evt = d; u8 vri = evt->ring_index; struct wireless_dev *wdev = vif_to_wdev(vif); + struct wil_sta_info *sta; + int cid; + struct key_params params; wil_dbg_wmi(wil, "Enable vring %d MID %d\n", vri, vif->mid); @@ -1184,13 +1207,33 @@ static void wmi_evt_ring_en(struct wil6210_vif *vif, int id, void *d, int len) return; } - if (wdev->iftype != NL80211_IFTYPE_AP || !disable_ap_sme) - /* in AP mode with disable_ap_sme, this is done by - * wil_cfg80211_change_station() + if (wdev->iftype != NL80211_IFTYPE_AP || !disable_ap_sme || + test_bit(wil_vif_ft_roam, vif->status)) + /* in AP mode with disable_ap_sme that is not FT, + * this is done by wil_cfg80211_change_station() */ wil->ring_tx_data[vri].dot1x_open = true; if (vri == vif->bcast_ring) /* no BA for bcast */ return; + + cid = wil->ring2cid_tid[vri][0]; + if (!wil_cid_valid(cid)) { + wil_err(wil, "invalid cid %d for vring %d\n", cid, vri); + return; + } + + /* In FT mode we get key but not store it as it is received + * before WMI_CONNECT_EVENT received from FW. + * wil_set_crypto_rx is called here to reset the security PN + */ + sta = &wil->sta[cid]; + if (test_bit(wil_vif_ft_roam, vif->status)) { + memset(¶ms, 0, sizeof(params)); + wil_set_crypto_rx(0, WMI_KEY_USE_PAIRWISE, sta, ¶ms); + if (wdev->iftype != NL80211_IFTYPE_AP) + clear_bit(wil_vif_ft_roam, vif->status); + } + if (agg_wsize >= 0) wil_addba_tx_request(wil, vri, agg_wsize); } @@ -1504,6 +1547,283 @@ wmi_evt_link_stats(struct wil6210_vif *vif, int id, void *d, int len) evt->payload, payload_size); } +/** + * find cid and ringid for the station vif + * + * return error, if other interfaces are used or ring was not found + */ +static int wil_find_cid_ringid_sta(struct wil6210_priv *wil, + struct wil6210_vif *vif, + int *cid, + int *ringid) +{ + struct wil_ring *ring; + struct wil_ring_tx_data *txdata; + int min_ring_id = wil_get_min_tx_ring_id(wil); + int i; + u8 lcid; + + if (!(vif->wdev.iftype == NL80211_IFTYPE_STATION || + vif->wdev.iftype == NL80211_IFTYPE_P2P_CLIENT)) { + wil_err(wil, "invalid interface type %d\n", vif->wdev.iftype); + return -EINVAL; + } + + /* In the STA mode, it is expected to have only one ring + * for the AP we are connected to. + * find it and return the cid associated with it. + */ + for (i = min_ring_id; i < WIL6210_MAX_TX_RINGS; i++) { + ring = &wil->ring_tx[i]; + txdata = &wil->ring_tx_data[i]; + if (!ring->va || !txdata->enabled || txdata->mid != vif->mid) + continue; + + lcid = wil->ring2cid_tid[i][0]; + if (lcid >= WIL6210_MAX_CID) /* skip BCAST */ + continue; + + wil_dbg_wmi(wil, "find sta -> ringid %d cid %d\n", i, lcid); + *cid = lcid; + *ringid = i; + return 0; + } + + wil_dbg_wmi(wil, "find sta cid while no rings active?\n"); + + return -ENOENT; +} + +static void +wmi_evt_auth_status(struct wil6210_vif *vif, int id, void *d, int len) +{ + struct wil6210_priv *wil = vif_to_wil(vif); + struct net_device *ndev = vif_to_ndev(vif); + struct wmi_ft_auth_status_event *data = d; + int ie_len = len - offsetof(struct wmi_ft_auth_status_event, ie_info); + int rc, cid = 0, ringid = 0; + struct cfg80211_ft_event_params ft; + u16 d_len; + /* auth_alg(u16) + auth_transaction(u16) + status_code(u16) */ + const size_t auth_ie_offset = sizeof(u16) * 3; + struct auth_no_hdr *auth = (struct auth_no_hdr *)data->ie_info; + + /* check the status */ + if (ie_len >= 0 && data->status != WMI_FW_STATUS_SUCCESS) { + wil_err(wil, "FT: auth failed. status %d\n", data->status); + goto fail; + } + + if (ie_len < auth_ie_offset) { + wil_err(wil, "FT: auth event too short, len %d\n", len); + goto fail; + } + + d_len = le16_to_cpu(data->ie_len); + if (d_len != ie_len) { + wil_err(wil, + "FT: auth ie length mismatch, d_len %d should be %d\n", + d_len, ie_len); + goto fail; + } + + if (!test_bit(wil_vif_ft_roam, wil->status)) { + wil_err(wil, "FT: Not in roaming state\n"); + goto fail; + } + + if (le16_to_cpu(auth->auth_transaction) != 2) { + wil_err(wil, "FT: auth error. auth_transaction %d\n", + le16_to_cpu(auth->auth_transaction)); + goto fail; + } + + if (le16_to_cpu(auth->auth_alg) != WLAN_AUTH_FT) { + wil_err(wil, "FT: auth error. auth_alg %d\n", + le16_to_cpu(auth->auth_alg)); + goto fail; + } + + wil_dbg_wmi(wil, "FT: Auth to %pM successfully\n", data->mac_addr); + wil_hex_dump_wmi("FT Auth ies : ", DUMP_PREFIX_OFFSET, 16, 1, + data->ie_info, d_len, true); + + /* find cid and ringid */ + rc = wil_find_cid_ringid_sta(wil, vif, &cid, &ringid); + if (rc) { + wil_err(wil, "No valid cid found\n"); + goto fail; + } + + if (vif->privacy) { + /* For secure assoc, remove old keys */ + rc = wmi_del_cipher_key(vif, 0, wil->sta[cid].addr, + WMI_KEY_USE_PAIRWISE); + if (rc) { + wil_err(wil, "WMI_DELETE_CIPHER_KEY_CMD(PTK) failed\n"); + goto fail; + } + rc = wmi_del_cipher_key(vif, 0, wil->sta[cid].addr, + WMI_KEY_USE_RX_GROUP); + if (rc) { + wil_err(wil, "WMI_DELETE_CIPHER_KEY_CMD(GTK) failed\n"); + goto fail; + } + } + + memset(&ft, 0, sizeof(ft)); + ft.ies = data->ie_info + auth_ie_offset; + ft.ies_len = d_len - auth_ie_offset; + ft.target_ap = data->mac_addr; + cfg80211_ft_event(ndev, &ft); + + return; + +fail: + wil6210_disconnect(vif, NULL, WLAN_REASON_PREV_AUTH_NOT_VALID, false); +} + +static void +wmi_evt_reassoc_status(struct wil6210_vif *vif, int id, void *d, int len) +{ + struct wil6210_priv *wil = vif_to_wil(vif); + struct net_device *ndev = vif_to_ndev(vif); + struct wiphy *wiphy = wil_to_wiphy(wil); + struct wmi_ft_reassoc_status_event *data = d; + int ies_len = len - offsetof(struct wmi_ft_reassoc_status_event, + ie_info); + int rc = -ENOENT, cid = 0, ringid = 0; + int ch; /* channel number (primary) */ + u8 spec_ch = 0; /* spec channel number */ + size_t assoc_req_ie_len = 0, assoc_resp_ie_len = 0; + u8 *assoc_req_ie = NULL, *assoc_resp_ie = NULL; + /* capinfo(u16) + listen_interval(u16) + current_ap mac addr + IEs */ + const size_t assoc_req_ie_offset = sizeof(u16) * 2 + ETH_ALEN; + /* capinfo(u16) + status_code(u16) + associd(u16) + IEs */ + const size_t assoc_resp_ie_offset = sizeof(u16) * 3; + u16 d_len; + int freq; + struct cfg80211_roam_info info; + + if (ies_len < 0) { + wil_err(wil, "ft reassoc event too short, len %d\n", len); + goto fail; + } + + wil_dbg_wmi(wil, "Reasoc Status event: status=%d, aid=%d", + data->status, data->aid); + wil_dbg_wmi(wil, " mac_addr=%pM, beacon_ie_len=%d", + data->mac_addr, data->beacon_ie_len); + wil_dbg_wmi(wil, " reassoc_req_ie_len=%d, reassoc_resp_ie_len=%d", + le16_to_cpu(data->reassoc_req_ie_len), + le16_to_cpu(data->reassoc_resp_ie_len)); + + d_len = le16_to_cpu(data->beacon_ie_len) + + le16_to_cpu(data->reassoc_req_ie_len) + + le16_to_cpu(data->reassoc_resp_ie_len); + if (d_len != ies_len) { + wil_err(wil, + "ft reassoc ie length mismatch, d_len %d should be %d\n", + d_len, ies_len); + goto fail; + } + + /* check the status */ + if (data->status != WMI_FW_STATUS_SUCCESS) { + wil_err(wil, "ft reassoc failed. status %d\n", data->status); + goto fail; + } + + /* find cid and ringid */ + rc = wil_find_cid_ringid_sta(wil, vif, &cid, &ringid); + if (rc) { + wil_err(wil, "No valid cid found\n"); + goto fail; + } + + ch = data->channel + 1; + if (data->edmg_channel && + test_bit(WMI_FW_CAPABILITY_CHANNEL_BONDING, wil->fw_capabilities)) + wil_wmi2spec_ch(data->edmg_channel, &spec_ch); + if (spec_ch) + wil_info(wil, "FT: Roam %pM EDMG channel [%d] primary channel [%d] cid %d aid %d\n", + data->mac_addr, spec_ch, ch, cid, data->aid); + else + wil_info(wil, "FT: Roam %pM channel [%d] cid %d aid %d\n", + data->mac_addr, ch, cid, data->aid); + + wil_hex_dump_wmi("reassoc AI : ", DUMP_PREFIX_OFFSET, 16, 1, + data->ie_info, len - sizeof(*data), true); + + /* figure out IE's */ + if (le16_to_cpu(data->reassoc_req_ie_len) > assoc_req_ie_offset) { + assoc_req_ie = &data->ie_info[assoc_req_ie_offset]; + assoc_req_ie_len = le16_to_cpu(data->reassoc_req_ie_len) - + assoc_req_ie_offset; + } + if (le16_to_cpu(data->reassoc_resp_ie_len) <= assoc_resp_ie_offset) { + wil_err(wil, "FT: reassoc resp ie len is too short, len %d\n", + le16_to_cpu(data->reassoc_resp_ie_len)); + goto fail; + } + + assoc_resp_ie = &data->ie_info[le16_to_cpu(data->reassoc_req_ie_len) + + assoc_resp_ie_offset]; + assoc_resp_ie_len = le16_to_cpu(data->reassoc_resp_ie_len) - + assoc_resp_ie_offset; + + if (test_bit(wil_status_resetting, wil->status) || + !test_bit(wil_status_fwready, wil->status)) { + wil_err(wil, "FT: status_resetting, cancel reassoc event\n"); + /* no need for cleanup, wil_reset will do that */ + return; + } + + mutex_lock(&wil->mutex); + + /* ring modify to set the ring for the roamed AP settings */ + wil_dbg_wmi(wil, + "ft modify tx config for connection CID %d ring %d\n", + cid, ringid); + + rc = wil->txrx_ops.tx_ring_modify(vif, ringid, cid, 0); + if (rc) { + wil_err(wil, "modify TX for CID %d MID %d ring %d failed (%d)\n", + cid, vif->mid, ringid, rc); + mutex_unlock(&wil->mutex); + goto fail; + } + + /* Update the driver STA members with the new bss */ + wil->sta[cid].aid = data->aid; + wil->sta[cid].stats.ft_roams++; + ether_addr_copy(wil->sta[cid].addr, vif->bss->bssid); + mutex_unlock(&wil->mutex); + del_timer_sync(&vif->connect_timer); + + cfg80211_ref_bss(wiphy, vif->bss); + if (spec_ch) + freq = ieee80211_channel_to_frequency(spec_ch, + NL80211_BAND_60GHZ); + else + freq = ieee80211_channel_to_frequency(ch, NL80211_BAND_60GHZ); + + memset(&info, 0, sizeof(info)); + info.channel = ieee80211_get_channel(wiphy, freq); + info.bss = vif->bss; + info.req_ie = assoc_req_ie; + info.req_ie_len = assoc_req_ie_len; + info.resp_ie = assoc_resp_ie; + info.resp_ie_len = assoc_resp_ie_len; + cfg80211_roamed(ndev, &info, GFP_KERNEL); + vif->bss = NULL; + + return; + +fail: + wil6210_disconnect(vif, NULL, WLAN_REASON_PREV_AUTH_NOT_VALID, false); +} + /** * Some events are ignored for purpose; and need not be interpreted as * "unhandled events" @@ -1542,6 +1862,8 @@ static const struct { {WMI_TOF_CHANNEL_INFO_EVENTID, wmi_evt_ignore}, {WMI_SCHED_SCAN_RESULT_EVENTID, wmi_evt_sched_scan_result}, {WMI_LINK_STATS_EVENTID, wmi_evt_link_stats}, + {WMI_FT_AUTH_STATUS_EVENTID, wmi_evt_auth_status}, + {WMI_FT_REASSOC_STATUS_EVENTID, wmi_evt_reassoc_status}, }; /* @@ -2147,6 +2469,40 @@ int wmi_set_ie(struct wil6210_vif *vif, u8 type, u16 ie_len, const void *ie) return rc; } +int wmi_update_ft_ies(struct wil6210_vif *vif, u16 ie_len, const void *ie) +{ + struct wil6210_priv *wil = vif_to_wil(vif); + u16 len; + struct wmi_update_ft_ies_cmd *cmd; + int rc; + + if (!ie) + ie_len = 0; + + len = sizeof(struct wmi_update_ft_ies_cmd) + ie_len; + if (len < ie_len) { + wil_err(wil, "wraparound. ie len %d\n", ie_len); + return -EINVAL; + } + + cmd = kzalloc(len, GFP_KERNEL); + if (!cmd) { + rc = -ENOMEM; + goto out; + } + + cmd->ie_len = cpu_to_le16(ie_len); + memcpy(cmd->ie_info, ie, ie_len); + rc = wmi_send(wil, WMI_UPDATE_FT_IES_CMDID, vif->mid, cmd, len); + kfree(cmd); + +out: + if (rc) + wil_err(wil, "update ft ies failed : %d\n", rc); + + return rc; +} + /** * wmi_rxon - turn radio on/off * @on: turn on if true, off otherwise diff --git a/drivers/net/wireless/ath/wil6210/wmi.h b/drivers/net/wireless/ath/wil6210/wmi.h index c07fca09818f5361990bdb1b0f63b41a993b741a..97168245c4d3bd4f8e2e9ef4f710ad54b20b897a 100644 --- a/drivers/net/wireless/ath/wil6210/wmi.h +++ b/drivers/net/wireless/ath/wil6210/wmi.h @@ -2393,6 +2393,7 @@ struct wmi_ft_reassoc_status_event { __le16 beacon_ie_len; __le16 reassoc_req_ie_len; __le16 reassoc_resp_ie_len; + u8 reserved[4]; u8 ie_info[0]; } __packed; diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c index cd587325e2867915165d278e89174594d0b6b02b..dd6e27513cc1bb8395a7b79b9e391c10bcec79f2 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c @@ -1098,6 +1098,7 @@ static const struct sdio_device_id brcmf_sdmmc_ids[] = { BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_43340), BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_43341), BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_43362), + BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_43364), BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_4335_4339), BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_4339), BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_43430), diff --git a/drivers/net/wireless/cnss2/bus.c b/drivers/net/wireless/cnss2/bus.c index 6a8e67c19250ef5f3cf1302d5b54eee025b819d6..15a2086b2fb70fb2fbbfa0d7a2a19eed98c53309 100644 --- a/drivers/net/wireless/cnss2/bus.c +++ b/drivers/net/wireless/cnss2/bus.c @@ -32,8 +32,8 @@ enum cnss_dev_bus_type cnss_get_bus_type(unsigned long device_id) { switch (device_id) { case QCA6174_DEVICE_ID: - case QCA6290_EMULATION_DEVICE_ID: case QCA6290_DEVICE_ID: + case QCA6390_DEVICE_ID: return CNSS_BUS_PCI; default: cnss_pr_err("Unknown device_id: 0x%lx\n", device_id); diff --git a/drivers/net/wireless/cnss2/bus.h b/drivers/net/wireless/cnss2/bus.h index 203de2316a25bc023be9bd20d9089b9bb9ef886d..05ab66453ef18ba293594a425f9cb043e0ec90d0 100644 --- a/drivers/net/wireless/cnss2/bus.h +++ b/drivers/net/wireless/cnss2/bus.h @@ -22,8 +22,6 @@ #define QCA6174_REV3_2_VERSION 0x5030000 #define QCA6290_VENDOR_ID 0x17CB #define QCA6290_DEVICE_ID 0x1100 -#define QCA6290_EMULATION_VENDOR_ID 0x168C -#define QCA6290_EMULATION_DEVICE_ID 0xABCD #define QCA6390_VENDOR_ID 0x17CB #define QCA6390_DEVICE_ID 0x1101 diff --git a/drivers/net/wireless/cnss2/main.c b/drivers/net/wireless/cnss2/main.c index 69d4b3ec20f4c84a0146dd5ed3ad2b6f24f281ec..44c48c503f06d5877565f77e1d120baf0498a3a2 100644 --- a/drivers/net/wireless/cnss2/main.c +++ b/drivers/net/wireless/cnss2/main.c @@ -900,8 +900,8 @@ static int cnss_do_recovery(struct cnss_plat_data *plat_priv, goto self_recovery; break; case CNSS_REASON_RDDM: - clear_bit(CNSS_DEV_ERR_NOTIFY, &plat_priv->driver_state); cnss_bus_collect_dump_info(plat_priv, false); + clear_bit(CNSS_DEV_ERR_NOTIFY, &plat_priv->driver_state); break; case CNSS_REASON_DEFAULT: case CNSS_REASON_TIMEOUT: @@ -1384,7 +1384,6 @@ int cnss_register_ramdump(struct cnss_plat_data *plat_priv) case QCA6174_DEVICE_ID: ret = cnss_register_ramdump_v1(plat_priv); break; - case QCA6290_EMULATION_DEVICE_ID: case QCA6290_DEVICE_ID: case QCA6390_DEVICE_ID: ret = cnss_register_ramdump_v2(plat_priv); @@ -1403,7 +1402,6 @@ void cnss_unregister_ramdump(struct cnss_plat_data *plat_priv) case QCA6174_DEVICE_ID: cnss_unregister_ramdump_v1(plat_priv); break; - case QCA6290_EMULATION_DEVICE_ID: case QCA6290_DEVICE_ID: case QCA6390_DEVICE_ID: cnss_unregister_ramdump_v2(plat_priv); @@ -1474,7 +1472,6 @@ static ssize_t cnss_fs_ready_store(struct device *dev, } switch (plat_priv->device_id) { - case QCA6290_EMULATION_DEVICE_ID: case QCA6290_DEVICE_ID: case QCA6390_DEVICE_ID: break; diff --git a/drivers/net/wireless/cnss2/pci.c b/drivers/net/wireless/cnss2/pci.c index cd4444ec88d6d0bdf68524319adcf8eef26f4a1e..05681493f52079a240b0c2bb5c78efd35d8621bd 100644 --- a/drivers/net/wireless/cnss2/pci.c +++ b/drivers/net/wireless/cnss2/pci.c @@ -50,6 +50,12 @@ #define FW_ASSERT_TIMEOUT 5000 +#ifdef CONFIG_CNSS_EMULATION +#define EMULATION_HW 1 +#else +#define EMULATION_HW 0 +#endif + static DEFINE_SPINLOCK(pci_link_down_lock); static unsigned int pci_link_down_panic; @@ -64,6 +70,21 @@ MODULE_PARM_DESC(fbc_bypass, "Bypass firmware download when loading WLAN driver"); #endif +#ifdef CONFIG_CNSS2_DEBUG +#ifdef CONFIG_CNSS_EMULATION +static unsigned int mhi_timeout = 90000; +#else +static unsigned int mhi_timeout; +#endif +module_param(mhi_timeout, uint, 0600); +MODULE_PARM_DESC(mhi_timeout, + "Timeout for MHI operation in milliseconds"); + +#define MHI_TIMEOUT_OVERWRITE_MS mhi_timeout +#else +#define MHI_TIMEOUT_OVERWRITE_MS 0 +#endif + static int cnss_set_pci_config_space(struct cnss_pci_data *pci_priv, bool save) { struct pci_dev *pci_dev = pci_priv->pci_dev; @@ -229,6 +250,27 @@ int cnss_pci_link_down(struct device *dev) } EXPORT_SYMBOL(cnss_pci_link_down); +int cnss_pci_is_device_down(struct device *dev) +{ + struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(dev); + struct cnss_pci_data *pci_priv; + + if (!plat_priv) { + cnss_pr_err("plat_priv is NULL\n"); + return -ENODEV; + } + + pci_priv = plat_priv->bus_priv; + if (!pci_priv) { + cnss_pr_err("pci_priv is NULL\n"); + return -ENODEV; + } + + return test_bit(CNSS_DEV_ERR_NOTIFY, &plat_priv->driver_state) | + pci_priv->pci_link_down_ind; +} +EXPORT_SYMBOL(cnss_pci_is_device_down); + int cnss_pci_call_driver_probe(struct cnss_pci_data *pci_priv) { int ret = 0; @@ -568,7 +610,6 @@ int cnss_pci_dev_powerup(struct cnss_pci_data *pci_priv) case QCA6174_DEVICE_ID: ret = cnss_qca6174_powerup(pci_priv); break; - case QCA6290_EMULATION_DEVICE_ID: case QCA6290_DEVICE_ID: case QCA6390_DEVICE_ID: ret = cnss_qca6290_powerup(pci_priv); @@ -595,7 +636,6 @@ int cnss_pci_dev_shutdown(struct cnss_pci_data *pci_priv) case QCA6174_DEVICE_ID: ret = cnss_qca6174_shutdown(pci_priv); break; - case QCA6290_EMULATION_DEVICE_ID: case QCA6290_DEVICE_ID: case QCA6390_DEVICE_ID: ret = cnss_qca6290_shutdown(pci_priv); @@ -622,7 +662,6 @@ int cnss_pci_dev_crash_shutdown(struct cnss_pci_data *pci_priv) case QCA6174_DEVICE_ID: cnss_qca6174_crash_shutdown(pci_priv); break; - case QCA6290_EMULATION_DEVICE_ID: case QCA6290_DEVICE_ID: case QCA6390_DEVICE_ID: cnss_qca6290_crash_shutdown(pci_priv); @@ -649,7 +688,6 @@ int cnss_pci_dev_ramdump(struct cnss_pci_data *pci_priv) case QCA6174_DEVICE_ID: ret = cnss_qca6174_ramdump(pci_priv); break; - case QCA6290_EMULATION_DEVICE_ID: case QCA6290_DEVICE_ID: case QCA6390_DEVICE_ID: ret = cnss_qca6290_ramdump(pci_priv); @@ -2110,6 +2148,9 @@ int cnss_pci_start_mhi(struct cnss_pci_data *pci_priv) if (fbc_bypass) return 0; + if (MHI_TIMEOUT_OVERWRITE_MS) + pci_priv->mhi_ctrl->timeout_ms = MHI_TIMEOUT_OVERWRITE_MS; + ret = cnss_pci_set_mhi_state(pci_priv, CNSS_MHI_INIT); if (ret) goto out; @@ -2242,7 +2283,6 @@ static int cnss_pci_probe(struct pci_dev *pci_dev, ret); cnss_power_off_device(plat_priv); break; - case QCA6290_EMULATION_DEVICE_ID: case QCA6290_DEVICE_ID: case QCA6390_DEVICE_ID: ret = cnss_pci_enable_msi(pci_priv); @@ -2253,6 +2293,8 @@ static int cnss_pci_probe(struct pci_dev *pci_dev, cnss_pci_disable_msi(pci_priv); goto disable_bus; } + if (EMULATION_HW) + break; ret = cnss_suspend_pci_link(pci_priv); if (ret) cnss_pr_err("Failed to suspend PCI link, err = %d\n", @@ -2295,7 +2337,6 @@ static void cnss_pci_remove(struct pci_dev *pci_dev) cnss_pci_free_fw_mem(pci_priv); switch (pci_dev->device) { - case QCA6290_EMULATION_DEVICE_ID: case QCA6290_DEVICE_ID: case QCA6390_DEVICE_ID: cnss_pci_unregister_mhi(pci_priv); @@ -2318,8 +2359,6 @@ static void cnss_pci_remove(struct pci_dev *pci_dev) static const struct pci_device_id cnss_pci_id_table[] = { { QCA6174_VENDOR_ID, QCA6174_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID }, - { QCA6290_EMULATION_VENDOR_ID, QCA6290_EMULATION_DEVICE_ID, - PCI_ANY_ID, PCI_ANY_ID }, { QCA6290_VENDOR_ID, QCA6290_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID }, { QCA6390_VENDOR_ID, QCA6390_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID }, { 0 } diff --git a/drivers/net/wireless/cnss2/qmi.c b/drivers/net/wireless/cnss2/qmi.c index d0b35ad520ff65034e607068400bed2414dda88c..6a38b66cbb38854e0ba59ed6d6e30c71d4003f71 100644 --- a/drivers/net/wireless/cnss2/qmi.c +++ b/drivers/net/wireless/cnss2/qmi.c @@ -22,8 +22,17 @@ #define WLFW_SERVICE_INS_ID_V01 1 #define WLFW_CLIENT_ID 0x4b4e454c #define MAX_BDF_FILE_NAME 11 -#define DEFAULT_BDF_FILE_NAME "bdwlan.elf" -#define BDF_FILE_NAME_PREFIX "bdwlan.e" +#define ELF_BDF_FILE_NAME "bdwlan.elf" +#define ELF_BDF_FILE_NAME_PREFIX "bdwlan.e" +#define BIN_BDF_FILE_NAME "bdwlan.bin" +#define BIN_BDF_FILE_NAME_PREFIX "bdwlan.b" +#define DUMMY_BDF_FILE_NAME "bdwlan.dmy" + +enum cnss_bdf_type { + CNSS_BDF_BIN, + CNSS_BDF_ELF, + CNSS_BDF_DUMMY = 255, +}; #ifdef CONFIG_CNSS2_DEBUG static unsigned int qmi_timeout = 10000; @@ -42,17 +51,12 @@ static bool daemon_support; module_param(daemon_support, bool, 0600); MODULE_PARM_DESC(daemon_support, "User space has cnss-daemon support or not"); -static bool bdf_bypass; +static unsigned int bdf_type = CNSS_BDF_ELF; #ifdef CONFIG_CNSS2_DEBUG -module_param(bdf_bypass, bool, 0600); -MODULE_PARM_DESC(bdf_bypass, "If BDF is not found, send dummy BDF to FW"); +module_param(bdf_type, uint, 0600); +MODULE_PARM_DESC(bdf_type, "Type of board data file to be downloaded"); #endif -enum cnss_bdf_type { - CNSS_BDF_BIN, - CNSS_BDF_ELF, -}; - static char *cnss_qmi_mode_to_str(enum cnss_driver_mode mode) { switch (mode) { @@ -443,18 +447,33 @@ int cnss_wlfw_bdf_dnld_send_sync(struct cnss_plat_data *plat_priv) return -ENOMEM; } - if (plat_priv->board_info.board_id == 0xFF) - snprintf(filename, sizeof(filename), DEFAULT_BDF_FILE_NAME); - else - snprintf(filename, sizeof(filename), - BDF_FILE_NAME_PREFIX "%02x", - plat_priv->board_info.board_id); - - if (bdf_bypass) { - cnss_pr_info("bdf_bypass is enabled, sending dummy BDF\n"); - temp = filename; + switch (bdf_type) { + case CNSS_BDF_ELF: + if (plat_priv->board_info.board_id == 0xFF) + snprintf(filename, sizeof(filename), ELF_BDF_FILE_NAME); + else + snprintf(filename, sizeof(filename), + ELF_BDF_FILE_NAME_PREFIX "%02x", + plat_priv->board_info.board_id); + break; + case CNSS_BDF_BIN: + if (plat_priv->board_info.board_id == 0xFF) + snprintf(filename, sizeof(filename), BIN_BDF_FILE_NAME); + else + snprintf(filename, sizeof(filename), + BIN_BDF_FILE_NAME_PREFIX "%02x", + plat_priv->board_info.board_id); + break; + case CNSS_BDF_DUMMY: + cnss_pr_dbg("CNSS_BDF_DUMMY is set, sending dummy BDF\n"); + snprintf(filename, sizeof(filename), DUMMY_BDF_FILE_NAME); + temp = DUMMY_BDF_FILE_NAME; remaining = MAX_BDF_FILE_NAME; goto bypass_bdf; + default: + cnss_pr_err("Invalid BDF type: %d\n", bdf_type); + ret = -EINVAL; + goto err_req_fw; } ret = request_firmware(&fw_entry, filename, &plat_priv->plat_dev->dev); @@ -479,7 +498,7 @@ int cnss_wlfw_bdf_dnld_send_sync(struct cnss_plat_data *plat_priv) req->data_valid = 1; req->end_valid = 1; req->bdf_type_valid = 1; - req->bdf_type = CNSS_BDF_ELF; + req->bdf_type = bdf_type; if (remaining > QMI_WLFW_MAX_DATA_SIZE_V01) { req->data_len = QMI_WLFW_MAX_DATA_SIZE_V01; @@ -534,7 +553,7 @@ int cnss_wlfw_bdf_dnld_send_sync(struct cnss_plat_data *plat_priv) return 0; err_send: - if (!bdf_bypass) + if (bdf_type != CNSS_BDF_DUMMY) release_firmware(fw_entry); err_req_fw: CNSS_ASSERT(0); @@ -1327,14 +1346,17 @@ int cnss_qmi_init(struct cnss_plat_data *plat_priv) ret = qmi_handle_init(&plat_priv->qmi_wlfw, QMI_WLFW_MAX_RECV_BUF_SIZE, &qmi_wlfw_ops, qmi_wlfw_msg_handlers); - if (ret < 0) + if (ret < 0) { cnss_pr_err("Failed to initialize QMI handle, err: %d\n", ret); + goto out; + } ret = qmi_add_lookup(&plat_priv->qmi_wlfw, WLFW_SERVICE_ID_V01, - WLFW_SERVICE_VERS_V01, 0); + WLFW_SERVICE_VERS_V01, WLFW_SERVICE_INS_ID_V01); if (ret < 0) cnss_pr_err("Failed to add QMI lookup, err: %d\n", ret); +out: return ret; } diff --git a/drivers/net/wireless/cnss2/wlan_firmware_service_v01.c b/drivers/net/wireless/cnss2/wlan_firmware_service_v01.c index 3890ed0f8abf713f52ee8b4a857fc87d17065eaf..f7d91eeaf89407cdb73d6ed39bb1520b4775b6f0 100644 --- a/drivers/net/wireless/cnss2/wlan_firmware_service_v01.c +++ b/drivers/net/wireless/cnss2/wlan_firmware_service_v01.c @@ -607,6 +607,42 @@ struct qmi_elem_info wlfw_ind_register_req_msg_v01_ei[] = { .offset = offsetof(struct wlfw_ind_register_req_msg_v01, cal_done_enable), }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .is_array = NO_ARRAY, + .tlv_type = 0x1C, + .offset = offsetof(struct wlfw_ind_register_req_msg_v01, + qdss_trace_req_mem_enable_valid), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(u8), + .is_array = NO_ARRAY, + .tlv_type = 0x1C, + .offset = offsetof(struct wlfw_ind_register_req_msg_v01, + qdss_trace_req_mem_enable), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .is_array = NO_ARRAY, + .tlv_type = 0x1D, + .offset = offsetof(struct wlfw_ind_register_req_msg_v01, + qdss_trace_save_enable_valid), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(u8), + .is_array = NO_ARRAY, + .tlv_type = 0x1D, + .offset = offsetof(struct wlfw_ind_register_req_msg_v01, + qdss_trace_save_enable), + }, { .data_type = QMI_EOTI, .is_array = NO_ARRAY, @@ -2230,6 +2266,24 @@ struct qmi_elem_info wlfw_host_cap_req_msg_v01_ei[] = { .offset = offsetof(struct wlfw_host_cap_req_msg_v01, mem_cfg_mode), }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .is_array = NO_ARRAY, + .tlv_type = 0x1D, + .offset = offsetof(struct wlfw_host_cap_req_msg_v01, + cal_duration_valid), + }, + { + .data_type = QMI_UNSIGNED_2_BYTE, + .elem_len = 1, + .elem_size = sizeof(u16), + .is_array = NO_ARRAY, + .tlv_type = 0x1D, + .offset = offsetof(struct wlfw_host_cap_req_msg_v01, + cal_duration), + }, { .data_type = QMI_EOTI, .is_array = NO_ARRAY, @@ -2605,3 +2659,472 @@ struct qmi_elem_info wlfw_cal_done_ind_msg_v01_ei[] = { .tlv_type = QMI_COMMON_TLV_TYPE, }, }; + +struct qmi_elem_info wlfw_qdss_trace_req_mem_ind_msg_v01_ei[] = { + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(u32), + .is_array = NO_ARRAY, + .tlv_type = 0x01, + .offset = offsetof(struct + wlfw_qdss_trace_req_mem_ind_msg_v01, + total_size), + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +struct qmi_elem_info wlfw_qdss_trace_mem_info_req_msg_v01_ei[] = { + { + .data_type = QMI_UNSIGNED_8_BYTE, + .elem_len = 1, + .elem_size = sizeof(u64), + .is_array = NO_ARRAY, + .tlv_type = 0x01, + .offset = offsetof(struct + wlfw_qdss_trace_mem_info_req_msg_v01, + addr), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(u32), + .is_array = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof(struct + wlfw_qdss_trace_mem_info_req_msg_v01, + size), + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +struct qmi_elem_info wlfw_qdss_trace_mem_info_resp_msg_v01_ei[] = { + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct qmi_response_type_v01), + .is_array = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof(struct + wlfw_qdss_trace_mem_info_resp_msg_v01, + resp), + .ei_array = qmi_response_type_v01_ei, + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +struct qmi_elem_info wlfw_qdss_trace_save_ind_msg_v01_ei[] = { + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(u32), + .is_array = NO_ARRAY, + .tlv_type = 0x01, + .offset = offsetof(struct + wlfw_qdss_trace_save_ind_msg_v01, + source), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(u32), + .is_array = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof(struct + wlfw_qdss_trace_save_ind_msg_v01, + total_size), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .is_array = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct + wlfw_qdss_trace_save_ind_msg_v01, + file_name_valid), + }, + { + .data_type = QMI_STRING, + .elem_len = QMI_WLFW_MAX_STR_LEN_V01 + 1, + .elem_size = sizeof(char), + .is_array = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct + wlfw_qdss_trace_save_ind_msg_v01, + file_name), + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +struct qmi_elem_info wlfw_qdss_trace_data_req_msg_v01_ei[] = { + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(u32), + .is_array = NO_ARRAY, + .tlv_type = 0x01, + .offset = offsetof(struct + wlfw_qdss_trace_data_req_msg_v01, + seg_id), + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +struct qmi_elem_info wlfw_qdss_trace_data_resp_msg_v01_ei[] = { + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct qmi_response_type_v01), + .is_array = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof(struct + wlfw_qdss_trace_data_resp_msg_v01, + resp), + .ei_array = qmi_response_type_v01_ei, + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .is_array = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct + wlfw_qdss_trace_data_resp_msg_v01, + total_size_valid), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(u32), + .is_array = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct + wlfw_qdss_trace_data_resp_msg_v01, + total_size), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .is_array = NO_ARRAY, + .tlv_type = 0x11, + .offset = offsetof(struct + wlfw_qdss_trace_data_resp_msg_v01, + seg_id_valid), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(u32), + .is_array = NO_ARRAY, + .tlv_type = 0x11, + .offset = offsetof(struct + wlfw_qdss_trace_data_resp_msg_v01, + seg_id), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .is_array = NO_ARRAY, + .tlv_type = 0x12, + .offset = offsetof(struct + wlfw_qdss_trace_data_resp_msg_v01, + data_valid), + }, + { + .data_type = QMI_DATA_LEN, + .elem_len = 1, + .elem_size = sizeof(u16), + .is_array = NO_ARRAY, + .tlv_type = 0x12, + .offset = offsetof(struct + wlfw_qdss_trace_data_resp_msg_v01, + data_len), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = QMI_WLFW_MAX_DATA_SIZE_V01, + .elem_size = sizeof(u8), + .is_array = VAR_LEN_ARRAY, + .tlv_type = 0x12, + .offset = offsetof(struct + wlfw_qdss_trace_data_resp_msg_v01, + data), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .is_array = NO_ARRAY, + .tlv_type = 0x13, + .offset = offsetof(struct + wlfw_qdss_trace_data_resp_msg_v01, + end_valid), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(u8), + .is_array = NO_ARRAY, + .tlv_type = 0x13, + .offset = offsetof(struct + wlfw_qdss_trace_data_resp_msg_v01, + end), + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +struct qmi_elem_info wlfw_qdss_trace_config_download_req_msg_v01_ei[] = { + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .is_array = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct + wlfw_qdss_trace_config_download_req_msg_v01, + total_size_valid), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(u32), + .is_array = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct + wlfw_qdss_trace_config_download_req_msg_v01, + total_size), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .is_array = NO_ARRAY, + .tlv_type = 0x11, + .offset = offsetof(struct + wlfw_qdss_trace_config_download_req_msg_v01, + seg_id_valid), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(u32), + .is_array = NO_ARRAY, + .tlv_type = 0x11, + .offset = offsetof(struct + wlfw_qdss_trace_config_download_req_msg_v01, + seg_id), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .is_array = NO_ARRAY, + .tlv_type = 0x12, + .offset = offsetof(struct + wlfw_qdss_trace_config_download_req_msg_v01, + data_valid), + }, + { + .data_type = QMI_DATA_LEN, + .elem_len = 1, + .elem_size = sizeof(u16), + .is_array = NO_ARRAY, + .tlv_type = 0x12, + .offset = offsetof(struct + wlfw_qdss_trace_config_download_req_msg_v01, + data_len), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = QMI_WLFW_MAX_DATA_SIZE_V01, + .elem_size = sizeof(u8), + .is_array = VAR_LEN_ARRAY, + .tlv_type = 0x12, + .offset = offsetof(struct + wlfw_qdss_trace_config_download_req_msg_v01, + data), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .is_array = NO_ARRAY, + .tlv_type = 0x13, + .offset = offsetof(struct + wlfw_qdss_trace_config_download_req_msg_v01, + end_valid), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(u8), + .is_array = NO_ARRAY, + .tlv_type = 0x13, + .offset = offsetof(struct + wlfw_qdss_trace_config_download_req_msg_v01, + end), + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +struct qmi_elem_info wlfw_qdss_trace_config_download_resp_msg_v01_ei[] = { + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct qmi_response_type_v01), + .is_array = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof(struct + wlfw_qdss_trace_config_download_resp_msg_v01, + resp), + .ei_array = qmi_response_type_v01_ei, + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +struct qmi_elem_info wlfw_qdss_trace_mode_req_msg_v01_ei[] = { + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .is_array = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct + wlfw_qdss_trace_mode_req_msg_v01, + mode_valid), + }, + { + .data_type = QMI_SIGNED_4_BYTE_ENUM, + .elem_len = 1, + .elem_size = sizeof(enum wlfw_qdss_trace_mode_enum_v01), + .is_array = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct + wlfw_qdss_trace_mode_req_msg_v01, + mode), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .is_array = NO_ARRAY, + .tlv_type = 0x11, + .offset = offsetof(struct + wlfw_qdss_trace_mode_req_msg_v01, + option_valid), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(u32), + .is_array = NO_ARRAY, + .tlv_type = 0x11, + .offset = offsetof(struct + wlfw_qdss_trace_mode_req_msg_v01, + option), + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +struct qmi_elem_info wlfw_qdss_trace_mode_resp_msg_v01_ei[] = { + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct qmi_response_type_v01), + .is_array = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof(struct + wlfw_qdss_trace_mode_resp_msg_v01, + resp), + .ei_array = qmi_response_type_v01_ei, + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +struct qmi_elem_info wlfw_shutdown_req_msg_v01_ei[] = { + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .is_array = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct wlfw_shutdown_req_msg_v01, + shutdown_valid), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(u8), + .is_array = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct wlfw_shutdown_req_msg_v01, + shutdown), + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +struct qmi_elem_info wlfw_shutdown_resp_msg_v01_ei[] = { + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct qmi_response_type_v01), + .is_array = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof(struct wlfw_shutdown_resp_msg_v01, + resp), + .ei_array = qmi_response_type_v01_ei, + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + diff --git a/drivers/net/wireless/cnss2/wlan_firmware_service_v01.h b/drivers/net/wireless/cnss2/wlan_firmware_service_v01.h index 30e16f7920a271bf984301a74b9b0f1fc100f8fe..e745a5307afa44d5b7f2b13e634bc17c83c7e216 100644 --- a/drivers/net/wireless/cnss2/wlan_firmware_service_v01.h +++ b/drivers/net/wireless/cnss2/wlan_firmware_service_v01.h @@ -21,6 +21,7 @@ #define QMI_WLFW_BDF_DOWNLOAD_REQ_V01 0x0025 #define QMI_WLFW_FW_MEM_READY_IND_V01 0x0037 +#define QMI_WLFW_QDSS_TRACE_CONFIG_DOWNLOAD_REQ_V01 0x0044 #define QMI_WLFW_INITIATE_CAL_UPDATE_IND_V01 0x002A #define QMI_WLFW_CAL_DONE_IND_V01 0x003E #define QMI_WLFW_HOST_CAP_REQ_V01 0x0034 @@ -31,10 +32,14 @@ #define QMI_WLFW_CAL_REPORT_REQ_V01 0x0026 #define QMI_WLFW_M3_INFO_RESP_V01 0x003C #define QMI_WLFW_CAL_UPDATE_RESP_V01 0x0029 +#define QMI_WLFW_QDSS_TRACE_START_RESP_V01 0x0045 #define QMI_WLFW_CAL_DOWNLOAD_RESP_V01 0x0027 #define QMI_WLFW_XO_CAL_IND_V01 0x003D #define QMI_WLFW_INI_RESP_V01 0x002F #define QMI_WLFW_CAL_REPORT_RESP_V01 0x0026 +#define QMI_WLFW_QDSS_TRACE_MEM_INFO_REQ_V01 0x0040 +#define QMI_WLFW_QDSS_TRACE_REQ_MEM_IND_V01 0x003F +#define QMI_WLFW_SHUTDOWN_RESP_V01 0x0043 #define QMI_WLFW_MAC_ADDR_RESP_V01 0x0033 #define QMI_WLFW_INITIATE_CAL_DOWNLOAD_IND_V01 0x0028 #define QMI_WLFW_HOST_CAP_RESP_V01 0x0034 @@ -44,6 +49,7 @@ #define QMI_WLFW_IND_REGISTER_REQ_V01 0x0020 #define QMI_WLFW_WLAN_CFG_RESP_V01 0x0023 #define QMI_WLFW_REQUEST_MEM_IND_V01 0x0035 +#define QMI_WLFW_QDSS_TRACE_CONFIG_DOWNLOAD_RESP_V01 0x0044 #define QMI_WLFW_REJUVENATE_IND_V01 0x0039 #define QMI_WLFW_DYNAMIC_FEATURE_MASK_REQ_V01 0x003B #define QMI_WLFW_ATHDIAG_WRITE_REQ_V01 0x0031 @@ -51,16 +57,22 @@ #define QMI_WLFW_RESPOND_MEM_REQ_V01 0x0036 #define QMI_WLFW_PIN_CONNECT_RESULT_IND_V01 0x002C #define QMI_WLFW_FW_READY_IND_V01 0x0021 +#define QMI_WLFW_QDSS_TRACE_SAVE_IND_V01 0x0041 +#define QMI_WLFW_QDSS_TRACE_MEM_INFO_RESP_V01 0x0040 #define QMI_WLFW_MSA_READY_RESP_V01 0x002E +#define QMI_WLFW_QDSS_TRACE_DATA_REQ_V01 0x0042 #define QMI_WLFW_CAL_UPDATE_REQ_V01 0x0029 #define QMI_WLFW_INI_REQ_V01 0x002F #define QMI_WLFW_BDF_DOWNLOAD_RESP_V01 0x0025 #define QMI_WLFW_REJUVENATE_ACK_RESP_V01 0x003A #define QMI_WLFW_MSA_INFO_RESP_V01 0x002D #define QMI_WLFW_MSA_READY_REQ_V01 0x002E +#define QMI_WLFW_QDSS_TRACE_DATA_RESP_V01 0x0042 #define QMI_WLFW_CAP_RESP_V01 0x0024 +#define QMI_WLFW_QDSS_TRACE_START_REQ_V01 0x0045 #define QMI_WLFW_REJUVENATE_ACK_REQ_V01 0x003A #define QMI_WLFW_ATHDIAG_READ_RESP_V01 0x0030 +#define QMI_WLFW_SHUTDOWN_REQ_V01 0x0043 #define QMI_WLFW_VBATT_REQ_V01 0x0032 #define QMI_WLFW_MAC_ADDR_REQ_V01 0x0033 #define QMI_WLFW_RESPOND_MEM_RESP_V01 0x0036 @@ -131,6 +143,13 @@ enum wlfw_mem_type_enum_v01 { WLFW_MEM_TYPE_ENUM_MAX_VAL_V01 = INT_MAX, }; +enum wlfw_qdss_trace_mode_enum_v01 { + WLFW_QDSS_TRACE_MODE_ENUM_MIN_VAL_V01 = INT_MIN, + QMI_WLFW_QDSS_TRACE_OFF_V01 = 0, + QMI_WLFW_QDSS_TRACE_ON_V01 = 1, + WLFW_QDSS_TRACE_MODE_ENUM_MAX_VAL_V01 = INT_MAX, +}; + #define QMI_WLFW_CE_ATTR_FLAGS_V01 ((u32)0x00) #define QMI_WLFW_CE_ATTR_NO_SNOOP_V01 ((u32)0x01) #define QMI_WLFW_CE_ATTR_BYTE_SWAP_DATA_V01 ((u32)0x02) @@ -238,9 +257,13 @@ struct wlfw_ind_register_req_msg_v01 { u8 xo_cal_enable; u8 cal_done_enable_valid; u8 cal_done_enable; + u8 qdss_trace_req_mem_enable_valid; + u8 qdss_trace_req_mem_enable; + u8 qdss_trace_save_enable_valid; + u8 qdss_trace_save_enable; }; -#define WLFW_IND_REGISTER_REQ_MSG_V01_MAX_MSG_LEN 54 +#define WLFW_IND_REGISTER_REQ_MSG_V01_MAX_MSG_LEN 62 extern struct qmi_elem_info wlfw_ind_register_req_msg_v01_ei[]; struct wlfw_ind_register_resp_msg_v01 { @@ -599,9 +622,11 @@ struct wlfw_host_cap_req_msg_v01 { u32 mem_bucket; u8 mem_cfg_mode_valid; u8 mem_cfg_mode; + u8 cal_duration_valid; + u16 cal_duration; }; -#define WLFW_HOST_CAP_REQ_MSG_V01_MAX_MSG_LEN 189 +#define WLFW_HOST_CAP_REQ_MSG_V01_MAX_MSG_LEN 194 extern struct qmi_elem_info wlfw_host_cap_req_msg_v01_ei[]; struct wlfw_host_cap_resp_msg_v01 { @@ -724,4 +749,113 @@ struct wlfw_cal_done_ind_msg_v01 { #define WLFW_CAL_DONE_IND_MSG_V01_MAX_MSG_LEN 0 extern struct qmi_elem_info wlfw_cal_done_ind_msg_v01_ei[]; +struct wlfw_qdss_trace_req_mem_ind_msg_v01 { + u32 total_size; +}; + +#define WLFW_QDSS_TRACE_REQ_MEM_IND_MSG_V01_MAX_MSG_LEN 7 +extern struct qmi_elem_info wlfw_qdss_trace_req_mem_ind_msg_v01_ei[]; + +struct wlfw_qdss_trace_mem_info_req_msg_v01 { + u64 addr; + u32 size; +}; + +#define WLFW_QDSS_TRACE_MEM_INFO_REQ_MSG_V01_MAX_MSG_LEN 18 +extern struct qmi_elem_info wlfw_qdss_trace_mem_info_req_msg_v01_ei[]; + +struct wlfw_qdss_trace_mem_info_resp_msg_v01 { + struct qmi_response_type_v01 resp; +}; + +#define WLFW_QDSS_TRACE_MEM_INFO_RESP_MSG_V01_MAX_MSG_LEN 7 +extern struct qmi_elem_info wlfw_qdss_trace_mem_info_resp_msg_v01_ei[]; + +struct wlfw_qdss_trace_save_ind_msg_v01 { + u32 source; + u32 total_size; + u8 file_name_valid; + char file_name[QMI_WLFW_MAX_STR_LEN_V01 + 1]; +}; + +#define WLFW_QDSS_TRACE_SAVE_IND_MSG_V01_MAX_MSG_LEN 33 +extern struct qmi_elem_info wlfw_qdss_trace_save_ind_msg_v01_ei[]; + +struct wlfw_qdss_trace_data_req_msg_v01 { + u32 seg_id; +}; + +#define WLFW_QDSS_TRACE_DATA_REQ_MSG_V01_MAX_MSG_LEN 7 +extern struct qmi_elem_info wlfw_qdss_trace_data_req_msg_v01_ei[]; + +struct wlfw_qdss_trace_data_resp_msg_v01 { + struct qmi_response_type_v01 resp; + u8 total_size_valid; + u32 total_size; + u8 seg_id_valid; + u32 seg_id; + u8 data_valid; + u32 data_len; + u8 data[QMI_WLFW_MAX_DATA_SIZE_V01]; + u8 end_valid; + u8 end; +}; + +#define WLFW_QDSS_TRACE_DATA_RESP_MSG_V01_MAX_MSG_LEN 6174 +extern struct qmi_elem_info wlfw_qdss_trace_data_resp_msg_v01_ei[]; + +struct wlfw_qdss_trace_config_download_req_msg_v01 { + u8 total_size_valid; + u32 total_size; + u8 seg_id_valid; + u32 seg_id; + u8 data_valid; + u32 data_len; + u8 data[QMI_WLFW_MAX_DATA_SIZE_V01]; + u8 end_valid; + u8 end; +}; + +#define WLFW_QDSS_TRACE_CONFIG_DOWNLOAD_REQ_MSG_V01_MAX_MSG_LEN 6167 +extern struct qmi_elem_info wlfw_qdss_trace_config_download_req_msg_v01_ei[]; + +struct wlfw_qdss_trace_config_download_resp_msg_v01 { + struct qmi_response_type_v01 resp; +}; + +#define WLFW_QDSS_TRACE_CONFIG_DOWNLOAD_RESP_MSG_V01_MAX_MSG_LEN 7 +extern struct qmi_elem_info wlfw_qdss_trace_config_download_resp_msg_v01_ei[]; + +struct wlfw_qdss_trace_mode_req_msg_v01 { + u8 mode_valid; + enum wlfw_qdss_trace_mode_enum_v01 mode; + u8 option_valid; + u32 option; +}; + +#define WLFW_QDSS_TRACE_MODE_REQ_MSG_V01_MAX_MSG_LEN 14 +extern struct qmi_elem_info wlfw_qdss_trace_mode_req_msg_v01_ei[]; + +struct wlfw_qdss_trace_mode_resp_msg_v01 { + struct qmi_response_type_v01 resp; +}; + +#define WLFW_QDSS_TRACE_MODE_RESP_MSG_V01_MAX_MSG_LEN 7 +extern struct qmi_elem_info wlfw_qdss_trace_mode_resp_msg_v01_ei[]; + +struct wlfw_shutdown_req_msg_v01 { + u8 shutdown_valid; + u8 shutdown; +}; + +#define WLFW_SHUTDOWN_REQ_MSG_V01_MAX_MSG_LEN 4 +extern struct qmi_elem_info wlfw_shutdown_req_msg_v01_ei[]; + +struct wlfw_shutdown_resp_msg_v01 { + struct qmi_response_type_v01 resp; +}; + +#define WLFW_SHUTDOWN_RESP_MSG_V01_MAX_MSG_LEN 7 +extern struct qmi_elem_info wlfw_shutdown_resp_msg_v01_ei[]; + #endif diff --git a/drivers/net/wireless/cnss_prealloc/cnss_prealloc.c b/drivers/net/wireless/cnss_prealloc/cnss_prealloc.c index d9444c23139a869dc3074569936f3cda884eccf7..49bf8ce742e8019a2dcdaa443e6d51d6b2e8888e 100644 --- a/drivers/net/wireless/cnss_prealloc/cnss_prealloc.c +++ b/drivers/net/wireless/cnss_prealloc/cnss_prealloc.c @@ -14,6 +14,7 @@ #include #include #include +#include static DEFINE_SPINLOCK(alloc_lock); @@ -23,7 +24,7 @@ static DEFINE_SPINLOCK(alloc_lock); struct wcnss_prealloc { int occupied; - unsigned int size; + size_t size; void *ptr; #ifdef CONFIG_SLUB_DEBUG unsigned long stack_trace[WCNSS_MAX_STACK_TRACE]; @@ -143,7 +144,7 @@ static inline void wcnss_prealloc_save_stack_trace(struct wcnss_prealloc *entry) {} #endif -void *wcnss_prealloc_get(unsigned int size) +void *wcnss_prealloc_get(size_t size) { int i = 0; unsigned long flags; @@ -200,7 +201,7 @@ void wcnss_prealloc_check_memory_leak(void) j++; } - pr_err("Size: %u, addr: %pK, backtrace:\n", + pr_err("Size: %zu, addr: %pK, backtrace:\n", wcnss_allocs[i].size, wcnss_allocs[i].ptr); print_stack_trace(&wcnss_allocs[i].trace, 1); } diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/9000.c b/drivers/net/wireless/intel/iwlwifi/cfg/9000.c index 73da5e63a609296816414b9069bb6154cc55efe0..2c80c722fecad3f5b43b7f7ae8f26a037e4ee43a 100644 --- a/drivers/net/wireless/intel/iwlwifi/cfg/9000.c +++ b/drivers/net/wireless/intel/iwlwifi/cfg/9000.c @@ -177,6 +177,17 @@ const struct iwl_cfg iwl9260_2ac_cfg = { .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, }; +const struct iwl_cfg iwl9260_killer_2ac_cfg = { + .name = "Killer (R) Wireless-AC 1550 Wireless Network Adapter (9260NGW)", + .fw_name_pre = IWL9260A_FW_PRE, + .fw_name_pre_b_or_c_step = IWL9260B_FW_PRE, + IWL_DEVICE_9000, + .ht_params = &iwl9000_ht_params, + .nvm_ver = IWL9000_NVM_VERSION, + .nvm_calib_ver = IWL9000_TX_POWER_VERSION, + .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, +}; + const struct iwl_cfg iwl9270_2ac_cfg = { .name = "Intel(R) Dual Band Wireless AC 9270", .fw_name_pre = IWL9260A_FW_PRE, @@ -266,6 +277,34 @@ const struct iwl_cfg iwl9560_2ac_cfg_soc = { .soc_latency = 5000, }; +const struct iwl_cfg iwl9560_killer_2ac_cfg_soc = { + .name = "Killer (R) Wireless-AC 1550i Wireless Network Adapter (9560NGW)", + .fw_name_pre = IWL9000A_FW_PRE, + .fw_name_pre_b_or_c_step = IWL9000B_FW_PRE, + .fw_name_pre_rf_next_step = IWL9000RFB_FW_PRE, + IWL_DEVICE_9000, + .ht_params = &iwl9000_ht_params, + .nvm_ver = IWL9000_NVM_VERSION, + .nvm_calib_ver = IWL9000_TX_POWER_VERSION, + .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, + .integrated = true, + .soc_latency = 5000, +}; + +const struct iwl_cfg iwl9560_killer_s_2ac_cfg_soc = { + .name = "Killer (R) Wireless-AC 1550s Wireless Network Adapter (9560NGW)", + .fw_name_pre = IWL9000A_FW_PRE, + .fw_name_pre_b_or_c_step = IWL9000B_FW_PRE, + .fw_name_pre_rf_next_step = IWL9000RFB_FW_PRE, + IWL_DEVICE_9000, + .ht_params = &iwl9000_ht_params, + .nvm_ver = IWL9000_NVM_VERSION, + .nvm_calib_ver = IWL9000_TX_POWER_VERSION, + .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, + .integrated = true, + .soc_latency = 5000, +}; + const struct iwl_cfg iwl9460_2ac_cfg_shared_clk = { .name = "Intel(R) Dual Band Wireless AC 9460", .fw_name_pre = IWL9000A_FW_PRE, @@ -326,6 +365,36 @@ const struct iwl_cfg iwl9560_2ac_cfg_shared_clk = { .extra_phy_cfg_flags = FW_PHY_CFG_SHARED_CLK }; +const struct iwl_cfg iwl9560_killer_2ac_cfg_shared_clk = { + .name = "Killer (R) Wireless-AC 1550i Wireless Network Adapter (9560NGW)", + .fw_name_pre = IWL9000A_FW_PRE, + .fw_name_pre_b_or_c_step = IWL9000B_FW_PRE, + .fw_name_pre_rf_next_step = IWL9000RFB_FW_PRE, + IWL_DEVICE_9000, + .ht_params = &iwl9000_ht_params, + .nvm_ver = IWL9000_NVM_VERSION, + .nvm_calib_ver = IWL9000_TX_POWER_VERSION, + .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, + .integrated = true, + .soc_latency = 5000, + .extra_phy_cfg_flags = FW_PHY_CFG_SHARED_CLK +}; + +const struct iwl_cfg iwl9560_killer_s_2ac_cfg_shared_clk = { + .name = "Killer (R) Wireless-AC 1550s Wireless Network Adapter (9560NGW)", + .fw_name_pre = IWL9000A_FW_PRE, + .fw_name_pre_b_or_c_step = IWL9000B_FW_PRE, + .fw_name_pre_rf_next_step = IWL9000RFB_FW_PRE, + IWL_DEVICE_9000, + .ht_params = &iwl9000_ht_params, + .nvm_ver = IWL9000_NVM_VERSION, + .nvm_calib_ver = IWL9000_TX_POWER_VERSION, + .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, + .integrated = true, + .soc_latency = 5000, + .extra_phy_cfg_flags = FW_PHY_CFG_SHARED_CLK +}; + MODULE_FIRMWARE(IWL9000A_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX)); MODULE_FIRMWARE(IWL9000B_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX)); MODULE_FIRMWARE(IWL9000RFB_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX)); diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-config.h b/drivers/net/wireless/intel/iwlwifi/iwl-config.h index 85fe1a928adc961b3ccfcf160ab209d4e2ad637c..70f3c327eb4abc3bc3ea03973aacb56d8ac19794 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-config.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-config.h @@ -470,6 +470,7 @@ extern const struct iwl_cfg iwl8265_2ac_sdio_cfg; extern const struct iwl_cfg iwl4165_2ac_sdio_cfg; extern const struct iwl_cfg iwl9160_2ac_cfg; extern const struct iwl_cfg iwl9260_2ac_cfg; +extern const struct iwl_cfg iwl9260_killer_2ac_cfg; extern const struct iwl_cfg iwl9270_2ac_cfg; extern const struct iwl_cfg iwl9460_2ac_cfg; extern const struct iwl_cfg iwl9560_2ac_cfg; @@ -477,10 +478,14 @@ extern const struct iwl_cfg iwl9460_2ac_cfg_soc; extern const struct iwl_cfg iwl9461_2ac_cfg_soc; extern const struct iwl_cfg iwl9462_2ac_cfg_soc; extern const struct iwl_cfg iwl9560_2ac_cfg_soc; +extern const struct iwl_cfg iwl9560_killer_2ac_cfg_soc; +extern const struct iwl_cfg iwl9560_killer_s_2ac_cfg_soc; extern const struct iwl_cfg iwl9460_2ac_cfg_shared_clk; extern const struct iwl_cfg iwl9461_2ac_cfg_shared_clk; extern const struct iwl_cfg iwl9462_2ac_cfg_shared_clk; extern const struct iwl_cfg iwl9560_2ac_cfg_shared_clk; +extern const struct iwl_cfg iwl9560_killer_2ac_cfg_shared_clk; +extern const struct iwl_cfg iwl9560_killer_s_2ac_cfg_shared_clk; extern const struct iwl_cfg iwla000_2ac_cfg_hr; extern const struct iwl_cfg iwla000_2ac_cfg_hr_cdb; extern const struct iwl_cfg iwla000_2ac_cfg_jf; diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c index 9a8605abb00a99da41ed36638e32f4a16a363bba..4cbc6cb8bf89e979a5f00227c98c8e879d95aeb7 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c @@ -543,6 +543,9 @@ static const struct pci_device_id iwl_hw_card_ids[] = { {IWL_PCI_DEVICE(0x2526, 0x1210, iwl9260_2ac_cfg)}, {IWL_PCI_DEVICE(0x2526, 0x1410, iwl9270_2ac_cfg)}, {IWL_PCI_DEVICE(0x2526, 0x1420, iwl9460_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x2526, 0x1550, iwl9260_killer_2ac_cfg)}, + {IWL_PCI_DEVICE(0x2526, 0x1551, iwl9560_killer_s_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x2526, 0x1552, iwl9560_killer_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x2526, 0x1610, iwl9270_2ac_cfg)}, {IWL_PCI_DEVICE(0x2526, 0x2030, iwl9560_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x2526, 0x2034, iwl9560_2ac_cfg_soc)}, @@ -552,6 +555,7 @@ static const struct pci_device_id iwl_hw_card_ids[] = { {IWL_PCI_DEVICE(0x2526, 0x40A4, iwl9460_2ac_cfg)}, {IWL_PCI_DEVICE(0x2526, 0x4234, iwl9560_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x2526, 0x42A4, iwl9462_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x2526, 0x8014, iwl9260_2ac_cfg)}, {IWL_PCI_DEVICE(0x2526, 0xA014, iwl9260_2ac_cfg)}, {IWL_PCI_DEVICE(0x271B, 0x0010, iwl9160_2ac_cfg)}, {IWL_PCI_DEVICE(0x271B, 0x0014, iwl9160_2ac_cfg)}, @@ -576,6 +580,8 @@ static const struct pci_device_id iwl_hw_card_ids[] = { {IWL_PCI_DEVICE(0x2720, 0x1010, iwl9260_2ac_cfg)}, {IWL_PCI_DEVICE(0x2720, 0x1030, iwl9560_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x2720, 0x1210, iwl9260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x2720, 0x1551, iwl9560_killer_s_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x2720, 0x1552, iwl9560_killer_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x2720, 0x2030, iwl9560_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x2720, 0x2034, iwl9560_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x2720, 0x4030, iwl9560_2ac_cfg)}, @@ -602,6 +608,8 @@ static const struct pci_device_id iwl_hw_card_ids[] = { {IWL_PCI_DEVICE(0x30DC, 0x1010, iwl9260_2ac_cfg)}, {IWL_PCI_DEVICE(0x30DC, 0x1030, iwl9560_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x30DC, 0x1210, iwl9260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x30DC, 0x1551, iwl9560_killer_s_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x30DC, 0x1552, iwl9560_killer_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x30DC, 0x2030, iwl9560_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x30DC, 0x2034, iwl9560_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x30DC, 0x4030, iwl9560_2ac_cfg_soc)}, @@ -628,6 +636,8 @@ static const struct pci_device_id iwl_hw_card_ids[] = { {IWL_PCI_DEVICE(0x31DC, 0x1010, iwl9260_2ac_cfg)}, {IWL_PCI_DEVICE(0x31DC, 0x1030, iwl9560_2ac_cfg_shared_clk)}, {IWL_PCI_DEVICE(0x31DC, 0x1210, iwl9260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x31DC, 0x1551, iwl9560_killer_s_2ac_cfg_shared_clk)}, + {IWL_PCI_DEVICE(0x31DC, 0x1552, iwl9560_killer_2ac_cfg_shared_clk)}, {IWL_PCI_DEVICE(0x31DC, 0x2030, iwl9560_2ac_cfg_shared_clk)}, {IWL_PCI_DEVICE(0x31DC, 0x2034, iwl9560_2ac_cfg_shared_clk)}, {IWL_PCI_DEVICE(0x31DC, 0x4030, iwl9560_2ac_cfg_shared_clk)}, @@ -654,6 +664,8 @@ static const struct pci_device_id iwl_hw_card_ids[] = { {IWL_PCI_DEVICE(0x34F0, 0x1010, iwl9260_2ac_cfg)}, {IWL_PCI_DEVICE(0x34F0, 0x1030, iwl9560_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x34F0, 0x1210, iwl9260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x34F0, 0x1551, iwl9560_killer_s_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x34F0, 0x1552, iwl9560_killer_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x34F0, 0x2030, iwl9560_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x34F0, 0x2034, iwl9560_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x34F0, 0x4030, iwl9560_2ac_cfg_soc)}, @@ -680,6 +692,8 @@ static const struct pci_device_id iwl_hw_card_ids[] = { {IWL_PCI_DEVICE(0x3DF0, 0x1010, iwl9260_2ac_cfg)}, {IWL_PCI_DEVICE(0x3DF0, 0x1030, iwl9560_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x3DF0, 0x1210, iwl9260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x3DF0, 0x1551, iwl9560_killer_s_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x3DF0, 0x1552, iwl9560_killer_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x3DF0, 0x2030, iwl9560_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x3DF0, 0x2034, iwl9560_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x3DF0, 0x4030, iwl9560_2ac_cfg_soc)}, @@ -706,6 +720,8 @@ static const struct pci_device_id iwl_hw_card_ids[] = { {IWL_PCI_DEVICE(0x43F0, 0x1010, iwl9260_2ac_cfg)}, {IWL_PCI_DEVICE(0x43F0, 0x1030, iwl9560_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x43F0, 0x1210, iwl9260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x43F0, 0x1551, iwl9560_killer_s_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x43F0, 0x1552, iwl9560_killer_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x43F0, 0x2030, iwl9560_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x43F0, 0x2034, iwl9560_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x43F0, 0x4030, iwl9560_2ac_cfg_soc)}, @@ -741,6 +757,8 @@ static const struct pci_device_id iwl_hw_card_ids[] = { {IWL_PCI_DEVICE(0x9DF0, 0x1010, iwl9260_2ac_cfg)}, {IWL_PCI_DEVICE(0x9DF0, 0x1030, iwl9560_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x9DF0, 0x1210, iwl9260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x9DF0, 0x1551, iwl9560_killer_s_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x9DF0, 0x1552, iwl9560_killer_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x9DF0, 0x2010, iwl9460_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x9DF0, 0x2030, iwl9560_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x9DF0, 0x2034, iwl9560_2ac_cfg_soc)}, @@ -769,6 +787,8 @@ static const struct pci_device_id iwl_hw_card_ids[] = { {IWL_PCI_DEVICE(0xA0F0, 0x1010, iwl9260_2ac_cfg)}, {IWL_PCI_DEVICE(0xA0F0, 0x1030, iwl9560_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0xA0F0, 0x1210, iwl9260_2ac_cfg)}, + {IWL_PCI_DEVICE(0xA0F0, 0x1551, iwl9560_killer_s_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0xA0F0, 0x1552, iwl9560_killer_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0xA0F0, 0x2030, iwl9560_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0xA0F0, 0x2034, iwl9560_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0xA0F0, 0x4030, iwl9560_2ac_cfg_soc)}, @@ -795,6 +815,8 @@ static const struct pci_device_id iwl_hw_card_ids[] = { {IWL_PCI_DEVICE(0xA370, 0x1010, iwl9260_2ac_cfg)}, {IWL_PCI_DEVICE(0xA370, 0x1030, iwl9560_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0xA370, 0x1210, iwl9260_2ac_cfg)}, + {IWL_PCI_DEVICE(0xA370, 0x1551, iwl9560_killer_s_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0xA370, 0x1552, iwl9560_killer_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0xA370, 0x2030, iwl9560_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0xA370, 0x2034, iwl9560_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0xA370, 0x4030, iwl9560_2ac_cfg_soc)}, diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c index a06b6612b6583d6b5efa1d2396bc2060a2ffa37f..ca99c3cf41c21b8b400bae2e8874c45f29365bbd 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c @@ -901,6 +901,8 @@ static int _iwl_pcie_rx_init(struct iwl_trans *trans) } def_rxq = trans_pcie->rxq; + cancel_work_sync(&rba->rx_alloc); + spin_lock(&rba->lock); atomic_set(&rba->req_pending, 0); atomic_set(&rba->req_ready, 0); diff --git a/drivers/net/wireless/marvell/mwifiex/usb.c b/drivers/net/wireless/marvell/mwifiex/usb.c index f4f2b9b27e3269a4c36f9ab462baad462ad330b2..50890cab8807bf81282ce817edde16a9ac969836 100644 --- a/drivers/net/wireless/marvell/mwifiex/usb.c +++ b/drivers/net/wireless/marvell/mwifiex/usb.c @@ -644,6 +644,9 @@ static void mwifiex_usb_disconnect(struct usb_interface *intf) MWIFIEX_FUNC_SHUTDOWN); } + if (adapter->workqueue) + flush_workqueue(adapter->workqueue); + mwifiex_usb_free(card); mwifiex_dbg(adapter, FATAL, diff --git a/drivers/net/wireless/marvell/mwifiex/util.c b/drivers/net/wireless/marvell/mwifiex/util.c index 0cd68ffc2c74dc1c849e8011b84208ebf74a6730..51ccf10f44132eeff9bdc9c347664b37f0e93a22 100644 --- a/drivers/net/wireless/marvell/mwifiex/util.c +++ b/drivers/net/wireless/marvell/mwifiex/util.c @@ -708,12 +708,14 @@ void mwifiex_hist_data_set(struct mwifiex_private *priv, u8 rx_rate, s8 snr, s8 nflr) { struct mwifiex_histogram_data *phist_data = priv->hist_data; + s8 nf = -nflr; + s8 rssi = snr - nflr; atomic_inc(&phist_data->num_samples); atomic_inc(&phist_data->rx_rate[rx_rate]); - atomic_inc(&phist_data->snr[snr]); - atomic_inc(&phist_data->noise_flr[128 + nflr]); - atomic_inc(&phist_data->sig_str[nflr - snr]); + atomic_inc(&phist_data->snr[snr + 128]); + atomic_inc(&phist_data->noise_flr[nf + 128]); + atomic_inc(&phist_data->sig_str[rssi + 128]); } /* function to reset histogram data during init/reset */ diff --git a/drivers/net/wireless/realtek/rtlwifi/base.c b/drivers/net/wireless/realtek/rtlwifi/base.c index 93256f8bc0b536627c6bde4ab499778bfc7c9c8b..ec82c1c3f12e419642ffddcb9ad06b60571d25cc 100644 --- a/drivers/net/wireless/realtek/rtlwifi/base.c +++ b/drivers/net/wireless/realtek/rtlwifi/base.c @@ -483,18 +483,21 @@ static void _rtl_init_deferred_work(struct ieee80211_hw *hw) } -void rtl_deinit_deferred_work(struct ieee80211_hw *hw) +void rtl_deinit_deferred_work(struct ieee80211_hw *hw, bool ips_wq) { struct rtl_priv *rtlpriv = rtl_priv(hw); del_timer_sync(&rtlpriv->works.watchdog_timer); - cancel_delayed_work(&rtlpriv->works.watchdog_wq); - cancel_delayed_work(&rtlpriv->works.ips_nic_off_wq); - cancel_delayed_work(&rtlpriv->works.ps_work); - cancel_delayed_work(&rtlpriv->works.ps_rfon_wq); - cancel_delayed_work(&rtlpriv->works.fwevt_wq); - cancel_delayed_work(&rtlpriv->works.c2hcmd_wq); + cancel_delayed_work_sync(&rtlpriv->works.watchdog_wq); + if (ips_wq) + cancel_delayed_work(&rtlpriv->works.ips_nic_off_wq); + else + cancel_delayed_work_sync(&rtlpriv->works.ips_nic_off_wq); + cancel_delayed_work_sync(&rtlpriv->works.ps_work); + cancel_delayed_work_sync(&rtlpriv->works.ps_rfon_wq); + cancel_delayed_work_sync(&rtlpriv->works.fwevt_wq); + cancel_delayed_work_sync(&rtlpriv->works.c2hcmd_wq); } EXPORT_SYMBOL_GPL(rtl_deinit_deferred_work); diff --git a/drivers/net/wireless/realtek/rtlwifi/base.h b/drivers/net/wireless/realtek/rtlwifi/base.h index b56d1b7f556730c695ad9b665692936468655d6a..cbbb5be36a096bdb1b3655dedd111aa5ec5ef7d6 100644 --- a/drivers/net/wireless/realtek/rtlwifi/base.h +++ b/drivers/net/wireless/realtek/rtlwifi/base.h @@ -121,7 +121,7 @@ void rtl_init_rfkill(struct ieee80211_hw *hw); void rtl_deinit_rfkill(struct ieee80211_hw *hw); void rtl_watch_dog_timer_callback(unsigned long data); -void rtl_deinit_deferred_work(struct ieee80211_hw *hw); +void rtl_deinit_deferred_work(struct ieee80211_hw *hw, bool ips_wq); bool rtl_action_proc(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx); int rtlwifi_rate_mapping(struct ieee80211_hw *hw, bool isht, diff --git a/drivers/net/wireless/realtek/rtlwifi/core.c b/drivers/net/wireless/realtek/rtlwifi/core.c index c53cbf3d52bdd960a44f7be0d49a5f067ac80560..b01123138797dda5ad439d78a0c0a59445695d7d 100644 --- a/drivers/net/wireless/realtek/rtlwifi/core.c +++ b/drivers/net/wireless/realtek/rtlwifi/core.c @@ -130,7 +130,6 @@ static void rtl_fw_do_work(const struct firmware *firmware, void *context, firmware->size); rtlpriv->rtlhal.wowlan_fwsize = firmware->size; } - rtlpriv->rtlhal.fwsize = firmware->size; release_firmware(firmware); } @@ -196,7 +195,7 @@ static void rtl_op_stop(struct ieee80211_hw *hw) /* reset sec info */ rtl_cam_reset_sec_info(hw); - rtl_deinit_deferred_work(hw); + rtl_deinit_deferred_work(hw, false); } rtlpriv->intf_ops->adapter_stop(hw); diff --git a/drivers/net/wireless/realtek/rtlwifi/pci.c b/drivers/net/wireless/realtek/rtlwifi/pci.c index d7331225c5f3df1d9a018cf796acb52d50fa47b6..457a0f725c8aa598c14ea80b38e6d4a6d4183c48 100644 --- a/drivers/net/wireless/realtek/rtlwifi/pci.c +++ b/drivers/net/wireless/realtek/rtlwifi/pci.c @@ -2359,7 +2359,7 @@ void rtl_pci_disconnect(struct pci_dev *pdev) ieee80211_unregister_hw(hw); rtlmac->mac80211_registered = 0; } else { - rtl_deinit_deferred_work(hw); + rtl_deinit_deferred_work(hw, false); rtlpriv->intf_ops->adapter_stop(hw); } rtlpriv->cfg->ops->disable_interrupt(hw); diff --git a/drivers/net/wireless/realtek/rtlwifi/ps.c b/drivers/net/wireless/realtek/rtlwifi/ps.c index 07ee3096f50e25f2a6cc264c9341982277f1017a..f6d00613c53d9f1a5bfcb332f82cb4b9a27c0cf5 100644 --- a/drivers/net/wireless/realtek/rtlwifi/ps.c +++ b/drivers/net/wireless/realtek/rtlwifi/ps.c @@ -66,7 +66,7 @@ bool rtl_ps_disable_nic(struct ieee80211_hw *hw) struct rtl_priv *rtlpriv = rtl_priv(hw); /*<1> Stop all timer */ - rtl_deinit_deferred_work(hw); + rtl_deinit_deferred_work(hw, true); /*<2> Disable Interrupt */ rtlpriv->cfg->ops->disable_interrupt(hw); @@ -287,7 +287,7 @@ void rtl_ips_nic_on(struct ieee80211_hw *hw) struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); enum rf_pwrstate rtstate; - cancel_delayed_work(&rtlpriv->works.ips_nic_off_wq); + cancel_delayed_work_sync(&rtlpriv->works.ips_nic_off_wq); spin_lock(&rtlpriv->locks.ips_lock); if (ppsc->inactiveps) { diff --git a/drivers/net/wireless/realtek/rtlwifi/usb.c b/drivers/net/wireless/realtek/rtlwifi/usb.c index 5590d07d0918bf7015874bd27b061c097509c068..820c42ff53841bb920e3ad43152997e353844600 100644 --- a/drivers/net/wireless/realtek/rtlwifi/usb.c +++ b/drivers/net/wireless/realtek/rtlwifi/usb.c @@ -1150,7 +1150,7 @@ void rtl_usb_disconnect(struct usb_interface *intf) ieee80211_unregister_hw(hw); rtlmac->mac80211_registered = 0; } else { - rtl_deinit_deferred_work(hw); + rtl_deinit_deferred_work(hw, false); rtlpriv->intf_ops->adapter_stop(hw); } /*deinit rfkill */ diff --git a/drivers/net/wireless/rsi/rsi_91x_hal.c b/drivers/net/wireless/rsi/rsi_91x_hal.c index 070dfd68bb836281890e70a18ca9bed8f9cd7770..120b0ff545c176410998d13149b0616c9d928a5a 100644 --- a/drivers/net/wireless/rsi/rsi_91x_hal.c +++ b/drivers/net/wireless/rsi/rsi_91x_hal.c @@ -557,28 +557,32 @@ static int bl_write_header(struct rsi_hw *adapter, u8 *flash_content, u32 content_size) { struct rsi_host_intf_ops *hif_ops = adapter->host_intf_ops; - struct bl_header bl_hdr; + struct bl_header *bl_hdr; u32 write_addr, write_len; int status; - bl_hdr.flags = 0; - bl_hdr.image_no = cpu_to_le32(adapter->priv->coex_mode); - bl_hdr.check_sum = cpu_to_le32( - *(u32 *)&flash_content[CHECK_SUM_OFFSET]); - bl_hdr.flash_start_address = cpu_to_le32( - *(u32 *)&flash_content[ADDR_OFFSET]); - bl_hdr.flash_len = cpu_to_le32(*(u32 *)&flash_content[LEN_OFFSET]); + bl_hdr = kzalloc(sizeof(*bl_hdr), GFP_KERNEL); + if (!bl_hdr) + return -ENOMEM; + + bl_hdr->flags = 0; + bl_hdr->image_no = cpu_to_le32(adapter->priv->coex_mode); + bl_hdr->check_sum = + cpu_to_le32(*(u32 *)&flash_content[CHECK_SUM_OFFSET]); + bl_hdr->flash_start_address = + cpu_to_le32(*(u32 *)&flash_content[ADDR_OFFSET]); + bl_hdr->flash_len = cpu_to_le32(*(u32 *)&flash_content[LEN_OFFSET]); write_len = sizeof(struct bl_header); if (adapter->rsi_host_intf == RSI_HOST_INTF_USB) { write_addr = PING_BUFFER_ADDRESS; status = hif_ops->write_reg_multiple(adapter, write_addr, - (u8 *)&bl_hdr, write_len); + (u8 *)bl_hdr, write_len); if (status < 0) { rsi_dbg(ERR_ZONE, "%s: Failed to load Version/CRC structure\n", __func__); - return status; + goto fail; } } else { write_addr = PING_BUFFER_ADDRESS >> 16; @@ -587,20 +591,23 @@ static int bl_write_header(struct rsi_hw *adapter, u8 *flash_content, rsi_dbg(ERR_ZONE, "%s: Unable to set ms word to common reg\n", __func__); - return status; + goto fail; } write_addr = RSI_SD_REQUEST_MASTER | (PING_BUFFER_ADDRESS & 0xFFFF); status = hif_ops->write_reg_multiple(adapter, write_addr, - (u8 *)&bl_hdr, write_len); + (u8 *)bl_hdr, write_len); if (status < 0) { rsi_dbg(ERR_ZONE, "%s: Failed to load Version/CRC structure\n", __func__); - return status; + goto fail; } } - return 0; + status = 0; +fail: + kfree(bl_hdr); + return status; } static u32 read_flash_capacity(struct rsi_hw *adapter) diff --git a/drivers/net/wireless/rsi/rsi_91x_sdio.c b/drivers/net/wireless/rsi/rsi_91x_sdio.c index 370161ca2a1c34097cd8e1fd0f1b0502b2d842ee..0362967874aac45f19ff0cff9a68b9cf69d70d8e 100644 --- a/drivers/net/wireless/rsi/rsi_91x_sdio.c +++ b/drivers/net/wireless/rsi/rsi_91x_sdio.c @@ -161,7 +161,6 @@ static void rsi_reset_card(struct sdio_func *pfunction) int err; struct mmc_card *card = pfunction->card; struct mmc_host *host = card->host; - s32 bit = (fls(host->ocr_avail) - 1); u8 cmd52_resp; u32 clock, resp, i; u16 rca; @@ -181,7 +180,6 @@ static void rsi_reset_card(struct sdio_func *pfunction) msleep(20); /* Initialize the SDIO card */ - host->ios.vdd = bit; host->ios.chip_select = MMC_CS_DONTCARE; host->ios.bus_mode = MMC_BUSMODE_OPENDRAIN; host->ios.power_mode = MMC_POWER_UP; @@ -970,17 +968,21 @@ static void ulp_read_write(struct rsi_hw *adapter, u16 addr, u32 data, /*This function resets and re-initializes the chip.*/ static void rsi_reset_chip(struct rsi_hw *adapter) { - __le32 data; + u8 *data; u8 sdio_interrupt_status = 0; u8 request = 1; int ret; + data = kzalloc(sizeof(u32), GFP_KERNEL); + if (!data) + return; + rsi_dbg(INFO_ZONE, "Writing disable to wakeup register\n"); ret = rsi_sdio_write_register(adapter, 0, SDIO_WAKEUP_REG, &request); if (ret < 0) { rsi_dbg(ERR_ZONE, "%s: Failed to write SDIO wakeup register\n", __func__); - return; + goto err; } msleep(20); ret = rsi_sdio_read_register(adapter, RSI_FN1_INT_REGISTER, @@ -988,7 +990,7 @@ static void rsi_reset_chip(struct rsi_hw *adapter) if (ret < 0) { rsi_dbg(ERR_ZONE, "%s: Failed to Read Intr Status Register\n", __func__); - return; + goto err; } rsi_dbg(INFO_ZONE, "%s: Intr Status Register value = %d\n", __func__, sdio_interrupt_status); @@ -998,17 +1000,17 @@ static void rsi_reset_chip(struct rsi_hw *adapter) rsi_dbg(ERR_ZONE, "%s: Unable to set ms word to common reg\n", __func__); - return; + goto err; } - data = TA_HOLD_THREAD_VALUE; + put_unaligned_le32(TA_HOLD_THREAD_VALUE, data); if (rsi_sdio_write_register_multiple(adapter, TA_HOLD_THREAD_REG | RSI_SD_REQUEST_MASTER, - (u8 *)&data, 4)) { + data, 4)) { rsi_dbg(ERR_ZONE, "%s: Unable to hold Thread-Arch processor threads\n", __func__); - return; + goto err; } /* This msleep will ensure Thread-Arch processor to go to hold @@ -1029,6 +1031,9 @@ static void rsi_reset_chip(struct rsi_hw *adapter) * read write operations to complete for chip reset. */ msleep(500); +err: + kfree(data); + return; } /** diff --git a/drivers/net/wireless/rsi/rsi_sdio.h b/drivers/net/wireless/rsi/rsi_sdio.h index 90339203920016dfc4fc90c8d8d2aa979ee18c27..6788fbbdd166e5aa77c8ff5597868ac81b0d6590 100644 --- a/drivers/net/wireless/rsi/rsi_sdio.h +++ b/drivers/net/wireless/rsi/rsi_sdio.h @@ -85,7 +85,7 @@ enum sdio_interrupt_type { #define TA_SOFT_RST_CLR 0 #define TA_SOFT_RST_SET BIT(0) #define TA_PC_ZERO 0 -#define TA_HOLD_THREAD_VALUE cpu_to_le32(0xF) +#define TA_HOLD_THREAD_VALUE 0xF #define TA_RELEASE_THREAD_VALUE cpu_to_le32(0xF) #define TA_BASE_ADDR 0x2200 #define MISC_CFG_BASE_ADDR 0x4105 diff --git a/drivers/net/wireless/ti/wlcore/sdio.c b/drivers/net/wireless/ti/wlcore/sdio.c index f8a1fea64e2567c6b368e2fc93a667b1a9bbb0ae..219d1a86b92ecb3f48491ab8034ef62e74790d38 100644 --- a/drivers/net/wireless/ti/wlcore/sdio.c +++ b/drivers/net/wireless/ti/wlcore/sdio.c @@ -406,6 +406,11 @@ static int wl1271_suspend(struct device *dev) mmc_pm_flag_t sdio_flags; int ret = 0; + if (!wl) { + dev_err(dev, "no wilink module was probed\n"); + goto out; + } + dev_dbg(dev, "wl1271 suspend. wow_enabled: %d\n", wl->wow_enabled); diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c index f07b9c9bb5ba8155ec959d441e2c4af9ec40095f..dfc076f9ee4b582ada02f5a946fcef18ae91d2ad 100644 --- a/drivers/net/xen-netfront.c +++ b/drivers/net/xen-netfront.c @@ -87,6 +87,7 @@ struct netfront_cb { /* IRQ name is queue name with "-tx" or "-rx" appended */ #define IRQ_NAME_SIZE (QUEUE_NAME_SIZE + 3) +static DECLARE_WAIT_QUEUE_HEAD(module_load_q); static DECLARE_WAIT_QUEUE_HEAD(module_unload_q); struct netfront_stats { @@ -239,7 +240,7 @@ static void rx_refill_timeout(unsigned long data) static int netfront_tx_slot_available(struct netfront_queue *queue) { return (queue->tx.req_prod_pvt - queue->tx.rsp_cons) < - (NET_TX_RING_SIZE - MAX_SKB_FRAGS - 2); + (NET_TX_RING_SIZE - XEN_NETIF_NR_SLOTS_MIN - 1); } static void xennet_maybe_wake_tx(struct netfront_queue *queue) @@ -790,7 +791,7 @@ static int xennet_get_responses(struct netfront_queue *queue, RING_IDX cons = queue->rx.rsp_cons; struct sk_buff *skb = xennet_get_rx_skb(queue, cons); grant_ref_t ref = xennet_get_rx_ref(queue, cons); - int max = MAX_SKB_FRAGS + (rx->status <= RX_COPY_THRESHOLD); + int max = XEN_NETIF_NR_SLOTS_MIN + (rx->status <= RX_COPY_THRESHOLD); int slots = 1; int err = 0; unsigned long ret; @@ -1330,6 +1331,11 @@ static struct net_device *xennet_create_dev(struct xenbus_device *dev) netif_carrier_off(netdev); xenbus_switch_state(dev, XenbusStateInitialising); + wait_event(module_load_q, + xenbus_read_driver_state(dev->otherend) != + XenbusStateClosed && + xenbus_read_driver_state(dev->otherend) != + XenbusStateUnknown); return netdev; exit: diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index 4cac4755abefc48f9d04c640944455dfa136d160..a67d037165104db83dea64ab0bf99f05ee64e8db 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -77,7 +77,7 @@ static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown); * Represents an NVM Express device. Each nvme_dev is a PCI function. */ struct nvme_dev { - struct nvme_queue **queues; + struct nvme_queue *queues; struct blk_mq_tag_set tagset; struct blk_mq_tag_set admin_tagset; u32 __iomem *dbs; @@ -348,7 +348,7 @@ static int nvme_admin_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, unsigned int hctx_idx) { struct nvme_dev *dev = data; - struct nvme_queue *nvmeq = dev->queues[0]; + struct nvme_queue *nvmeq = &dev->queues[0]; WARN_ON(hctx_idx != 0); WARN_ON(dev->admin_tagset.tags[0] != hctx->tags); @@ -370,7 +370,7 @@ static int nvme_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, unsigned int hctx_idx) { struct nvme_dev *dev = data; - struct nvme_queue *nvmeq = dev->queues[hctx_idx + 1]; + struct nvme_queue *nvmeq = &dev->queues[hctx_idx + 1]; if (!nvmeq->tags) nvmeq->tags = &dev->tagset.tags[hctx_idx]; @@ -386,7 +386,7 @@ static int nvme_init_request(struct blk_mq_tag_set *set, struct request *req, struct nvme_dev *dev = set->driver_data; struct nvme_iod *iod = blk_mq_rq_to_pdu(req); int queue_idx = (set == &dev->tagset) ? hctx_idx + 1 : 0; - struct nvme_queue *nvmeq = dev->queues[queue_idx]; + struct nvme_queue *nvmeq = &dev->queues[queue_idx]; BUG_ON(!nvmeq); iod->nvmeq = nvmeq; @@ -900,7 +900,7 @@ static int nvme_poll(struct blk_mq_hw_ctx *hctx, unsigned int tag) static void nvme_pci_submit_async_event(struct nvme_ctrl *ctrl, int aer_idx) { struct nvme_dev *dev = to_nvme_dev(ctrl); - struct nvme_queue *nvmeq = dev->queues[0]; + struct nvme_queue *nvmeq = &dev->queues[0]; struct nvme_command c; memset(&c, 0, sizeof(c)); @@ -1146,7 +1146,6 @@ static void nvme_free_queue(struct nvme_queue *nvmeq) if (nvmeq->sq_cmds) dma_free_coherent(nvmeq->q_dmadev, SQ_SIZE(nvmeq->q_depth), nvmeq->sq_cmds, nvmeq->sq_dma_addr); - kfree(nvmeq); } static void nvme_free_queues(struct nvme_dev *dev, int lowest) @@ -1154,10 +1153,8 @@ static void nvme_free_queues(struct nvme_dev *dev, int lowest) int i; for (i = dev->ctrl.queue_count - 1; i >= lowest; i--) { - struct nvme_queue *nvmeq = dev->queues[i]; dev->ctrl.queue_count--; - dev->queues[i] = NULL; - nvme_free_queue(nvmeq); + nvme_free_queue(&dev->queues[i]); } } @@ -1189,10 +1186,8 @@ static int nvme_suspend_queue(struct nvme_queue *nvmeq) static void nvme_disable_admin_queue(struct nvme_dev *dev, bool shutdown) { - struct nvme_queue *nvmeq = dev->queues[0]; + struct nvme_queue *nvmeq = &dev->queues[0]; - if (!nvmeq) - return; if (nvme_suspend_queue(nvmeq)) return; @@ -1246,13 +1241,13 @@ static int nvme_alloc_sq_cmds(struct nvme_dev *dev, struct nvme_queue *nvmeq, return 0; } -static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid, - int depth, int node) +static int nvme_alloc_queue(struct nvme_dev *dev, int qid, + int depth, int node) { - struct nvme_queue *nvmeq = kzalloc_node(sizeof(*nvmeq), GFP_KERNEL, - node); - if (!nvmeq) - return NULL; + struct nvme_queue *nvmeq = &dev->queues[qid]; + + if (dev->ctrl.queue_count > qid) + return 0; nvmeq->cqes = dma_zalloc_coherent(dev->dev, CQ_SIZE(depth), &nvmeq->cq_dma_addr, GFP_KERNEL); @@ -1271,17 +1266,15 @@ static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid, nvmeq->q_depth = depth; nvmeq->qid = qid; nvmeq->cq_vector = -1; - dev->queues[qid] = nvmeq; dev->ctrl.queue_count++; - return nvmeq; + return 0; free_cqdma: dma_free_coherent(dev->dev, CQ_SIZE(depth), (void *)nvmeq->cqes, nvmeq->cq_dma_addr); free_nvmeq: - kfree(nvmeq); - return NULL; + return -ENOMEM; } static int queue_request_irq(struct nvme_queue *nvmeq) @@ -1468,14 +1461,12 @@ static int nvme_pci_configure_admin_queue(struct nvme_dev *dev) if (result < 0) return result; - nvmeq = dev->queues[0]; - if (!nvmeq) { - nvmeq = nvme_alloc_queue(dev, 0, NVME_AQ_DEPTH, - dev_to_node(dev->dev)); - if (!nvmeq) - return -ENOMEM; - } + result = nvme_alloc_queue(dev, 0, NVME_AQ_DEPTH, + dev_to_node(dev->dev)); + if (result) + return result; + nvmeq = &dev->queues[0]; aqa = nvmeq->q_depth - 1; aqa |= aqa << 16; @@ -1505,7 +1496,7 @@ static int nvme_create_io_queues(struct nvme_dev *dev) for (i = dev->ctrl.queue_count; i <= dev->max_qid; i++) { /* vector == qid - 1, match nvme_create_queue */ - if (!nvme_alloc_queue(dev, i, dev->q_depth, + if (nvme_alloc_queue(dev, i, dev->q_depth, pci_irq_get_node(to_pci_dev(dev->dev), i - 1))) { ret = -ENOMEM; break; @@ -1514,7 +1505,7 @@ static int nvme_create_io_queues(struct nvme_dev *dev) max = min(dev->max_qid, dev->ctrl.queue_count - 1); for (i = dev->online_queues; i <= max; i++) { - ret = nvme_create_queue(dev->queues[i], i); + ret = nvme_create_queue(&dev->queues[i], i); if (ret) break; } @@ -1770,7 +1761,7 @@ static int nvme_setup_host_mem(struct nvme_dev *dev) static int nvme_setup_io_queues(struct nvme_dev *dev) { - struct nvme_queue *adminq = dev->queues[0]; + struct nvme_queue *adminq = &dev->queues[0]; struct pci_dev *pdev = to_pci_dev(dev->dev); int result, nr_io_queues; unsigned long size; @@ -1896,7 +1887,7 @@ static void nvme_disable_io_queues(struct nvme_dev *dev, int queues) retry: timeout = ADMIN_TIMEOUT; for (; i > 0; i--, sent++) - if (nvme_delete_queue(dev->queues[i], opcode)) + if (nvme_delete_queue(&dev->queues[i], opcode)) break; while (sent--) { @@ -2081,7 +2072,7 @@ static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown) queues = dev->online_queues - 1; for (i = dev->ctrl.queue_count - 1; i > 0; i--) - nvme_suspend_queue(dev->queues[i]); + nvme_suspend_queue(&dev->queues[i]); if (dead) { /* A device might become IO incapable very soon during @@ -2089,7 +2080,7 @@ static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown) * queue_count can be 0 here. */ if (dev->ctrl.queue_count) - nvme_suspend_queue(dev->queues[0]); + nvme_suspend_queue(&dev->queues[0]); } else { nvme_disable_io_queues(dev, queues); nvme_disable_admin_queue(dev, shutdown); @@ -2345,7 +2336,8 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id) dev = kzalloc_node(sizeof(*dev), GFP_KERNEL, node); if (!dev) return -ENOMEM; - dev->queues = kzalloc_node((num_possible_cpus() + 1) * sizeof(void *), + + dev->queues = kzalloc_node((num_possible_cpus() + 1) * sizeof(struct nvme_queue), GFP_KERNEL, node); if (!dev->queues) goto free; @@ -2519,6 +2511,9 @@ static pci_ers_result_t nvme_slot_reset(struct pci_dev *pdev) static void nvme_error_resume(struct pci_dev *pdev) { + struct nvme_dev *dev = pci_get_drvdata(pdev); + + flush_work(&dev->ctrl.reset_work); pci_cleanup_aer_uncorrect_error_status(pdev); } @@ -2562,6 +2557,8 @@ static const struct pci_device_id nvme_id_table[] = { .driver_data = NVME_QUIRK_LIGHTNVM, }, { PCI_DEVICE(0x1d1d, 0x2807), /* CNEX WL */ .driver_data = NVME_QUIRK_LIGHTNVM, }, + { PCI_DEVICE(0x1d1d, 0x2601), /* CNEX Granby */ + .driver_data = NVME_QUIRK_LIGHTNVM, }, { PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, 0xffffff) }, { PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2001) }, { PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2003) }, diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c index 93a082e0bdd4e078c978c1db62c401780fc14e62..48a831d58e7aeab2f628bb43cc9c84339f6bb5a2 100644 --- a/drivers/nvme/host/rdma.c +++ b/drivers/nvme/host/rdma.c @@ -796,7 +796,7 @@ static int nvme_rdma_configure_admin_queue(struct nvme_rdma_ctrl *ctrl, if (error) { dev_err(ctrl->ctrl.device, "prop_get NVME_REG_CAP failed\n"); - goto out_cleanup_queue; + goto out_stop_queue; } ctrl->ctrl.sqsize = @@ -804,23 +804,25 @@ static int nvme_rdma_configure_admin_queue(struct nvme_rdma_ctrl *ctrl, error = nvme_enable_ctrl(&ctrl->ctrl, ctrl->ctrl.cap); if (error) - goto out_cleanup_queue; + goto out_stop_queue; ctrl->ctrl.max_hw_sectors = (ctrl->max_fr_pages - 1) << (ilog2(SZ_4K) - 9); error = nvme_init_identify(&ctrl->ctrl); if (error) - goto out_cleanup_queue; + goto out_stop_queue; error = nvme_rdma_alloc_qe(ctrl->queues[0].device->dev, &ctrl->async_event_sqe, sizeof(struct nvme_command), DMA_TO_DEVICE); if (error) - goto out_cleanup_queue; + goto out_stop_queue; return 0; +out_stop_queue: + nvme_rdma_stop_queue(&ctrl->queues[0]); out_cleanup_queue: if (new) blk_cleanup_queue(ctrl->ctrl.admin_q); diff --git a/drivers/nvme/target/fc.c b/drivers/nvme/target/fc.c index 8e21211b904b3291faebaf82d4b02c3e31fd3f3b..b7a5d1065378db75f48766c9d5caf003f99d1256 100644 --- a/drivers/nvme/target/fc.c +++ b/drivers/nvme/target/fc.c @@ -58,8 +58,8 @@ struct nvmet_fc_ls_iod { struct work_struct work; } __aligned(sizeof(unsigned long long)); +/* desired maximum for a single sequence - if sg list allows it */ #define NVMET_FC_MAX_SEQ_LENGTH (256 * 1024) -#define NVMET_FC_MAX_XFR_SGENTS (NVMET_FC_MAX_SEQ_LENGTH / PAGE_SIZE) enum nvmet_fcp_datadir { NVMET_FCP_NODATA, @@ -74,6 +74,7 @@ struct nvmet_fc_fcp_iod { struct nvme_fc_cmd_iu cmdiubuf; struct nvme_fc_ersp_iu rspiubuf; dma_addr_t rspdma; + struct scatterlist *next_sg; struct scatterlist *data_sg; int data_sg_cnt; u32 total_length; @@ -1000,8 +1001,7 @@ nvmet_fc_register_targetport(struct nvmet_fc_port_info *pinfo, INIT_LIST_HEAD(&newrec->assoc_list); kref_init(&newrec->ref); ida_init(&newrec->assoc_cnt); - newrec->max_sg_cnt = min_t(u32, NVMET_FC_MAX_XFR_SGENTS, - template->max_sgl_segments); + newrec->max_sg_cnt = template->max_sgl_segments; ret = nvmet_fc_alloc_ls_iodlist(newrec); if (ret) { @@ -1717,6 +1717,7 @@ nvmet_fc_alloc_tgt_pgs(struct nvmet_fc_fcp_iod *fod) ((fod->io_dir == NVMET_FCP_WRITE) ? DMA_FROM_DEVICE : DMA_TO_DEVICE)); /* note: write from initiator perspective */ + fod->next_sg = fod->data_sg; return 0; @@ -1874,24 +1875,49 @@ nvmet_fc_transfer_fcp_data(struct nvmet_fc_tgtport *tgtport, struct nvmet_fc_fcp_iod *fod, u8 op) { struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq; + struct scatterlist *sg = fod->next_sg; unsigned long flags; - u32 tlen; + u32 remaininglen = fod->total_length - fod->offset; + u32 tlen = 0; int ret; fcpreq->op = op; fcpreq->offset = fod->offset; fcpreq->timeout = NVME_FC_TGTOP_TIMEOUT_SEC; - tlen = min_t(u32, tgtport->max_sg_cnt * PAGE_SIZE, - (fod->total_length - fod->offset)); + /* + * for next sequence: + * break at a sg element boundary + * attempt to keep sequence length capped at + * NVMET_FC_MAX_SEQ_LENGTH but allow sequence to + * be longer if a single sg element is larger + * than that amount. This is done to avoid creating + * a new sg list to use for the tgtport api. + */ + fcpreq->sg = sg; + fcpreq->sg_cnt = 0; + while (tlen < remaininglen && + fcpreq->sg_cnt < tgtport->max_sg_cnt && + tlen + sg_dma_len(sg) < NVMET_FC_MAX_SEQ_LENGTH) { + fcpreq->sg_cnt++; + tlen += sg_dma_len(sg); + sg = sg_next(sg); + } + if (tlen < remaininglen && fcpreq->sg_cnt == 0) { + fcpreq->sg_cnt++; + tlen += min_t(u32, sg_dma_len(sg), remaininglen); + sg = sg_next(sg); + } + if (tlen < remaininglen) + fod->next_sg = sg; + else + fod->next_sg = NULL; + fcpreq->transfer_length = tlen; fcpreq->transferred_length = 0; fcpreq->fcp_error = 0; fcpreq->rsplen = 0; - fcpreq->sg = &fod->data_sg[fod->offset / PAGE_SIZE]; - fcpreq->sg_cnt = DIV_ROUND_UP(tlen, PAGE_SIZE); - /* * If the last READDATA request: check if LLDD supports * combined xfr with response. diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c index d12e5de78e700018dc58965157b21ff2639f34c5..2afafd5d8915088de1533881247fcb67b4a8aa4e 100644 --- a/drivers/nvmem/core.c +++ b/drivers/nvmem/core.c @@ -1049,6 +1049,8 @@ static inline void *nvmem_cell_prepare_write_buffer(struct nvmem_cell *cell, /* setup the first byte with lsb bits from nvmem */ rc = nvmem_reg_read(nvmem, cell->offset, &v, 1); + if (rc) + goto err; *b++ |= GENMASK(bit_offset - 1, 0) & v; /* setup rest of the byte if any */ @@ -1067,11 +1069,16 @@ static inline void *nvmem_cell_prepare_write_buffer(struct nvmem_cell *cell, /* setup the last byte with msb bits from nvmem */ rc = nvmem_reg_read(nvmem, cell->offset + cell->bytes - 1, &v, 1); + if (rc) + goto err; *p |= GENMASK(7, (nbits + bit_offset) % BITS_PER_BYTE) & v; } return buf; +err: + kfree(buf); + return ERR_PTR(rc); } /** diff --git a/drivers/pci/dwc/pci-exynos.c b/drivers/pci/dwc/pci-exynos.c index 5596fdedbb9471c49b4bb73fbc57b5b03170521f..ea03f1ec12a47713f3633816cb6216a92965a629 100644 --- a/drivers/pci/dwc/pci-exynos.c +++ b/drivers/pci/dwc/pci-exynos.c @@ -695,7 +695,8 @@ static int __init exynos_pcie_probe(struct platform_device *pdev) return ret; } - if (ep->ops && ep->ops->get_clk_resources) { + if (ep->ops && ep->ops->get_clk_resources && + ep->ops->init_clk_resources) { ret = ep->ops->get_clk_resources(ep); if (ret) return ret; diff --git a/drivers/pci/host/pci-hyperv.c b/drivers/pci/host/pci-hyperv.c index caea7c618207aae6b2e7c1822ddfa55a724103fa..4523d7e1bcb9c7d66528105306bf3b688c8a1e05 100644 --- a/drivers/pci/host/pci-hyperv.c +++ b/drivers/pci/host/pci-hyperv.c @@ -1091,6 +1091,7 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) struct pci_bus *pbus; struct pci_dev *pdev; struct cpumask *dest; + unsigned long flags; struct compose_comp_ctxt comp; struct tran_int_desc *int_desc; struct { @@ -1182,14 +1183,15 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) * the channel callback directly when channel->target_cpu is * the current CPU. When the higher level interrupt code * calls us with interrupt enabled, let's add the - * local_bh_disable()/enable() to avoid race. + * local_irq_save()/restore() to avoid race: + * hv_pci_onchannelcallback() can also run in tasklet. */ - local_bh_disable(); + local_irq_save(flags); if (hbus->hdev->channel->target_cpu == smp_processor_id()) hv_pci_onchannelcallback(hbus); - local_bh_enable(); + local_irq_restore(flags); if (hpdev->state == hv_pcichild_ejecting) { dev_err_once(&hbus->hdev->device, diff --git a/drivers/pci/host/pci-msm.c b/drivers/pci/host/pci-msm.c index 06bbe471ef803ece5ec4a3567233393ff47de959..59cec9e6c8a569772c97a500a7c9ee7e787ff100 100644 --- a/drivers/pci/host/pci-msm.c +++ b/drivers/pci/host/pci-msm.c @@ -26,6 +26,7 @@ #include #include #include +#include #include #include #include @@ -172,6 +173,12 @@ #define GEN2_SPEED 0x2 #define GEN3_SPEED 0x3 +#define MSM_PCIE_IOMMU_PRESENT BIT(0) +#define MSM_PCIE_IOMMU_S1_BYPASS BIT(1) +#define MSM_PCIE_IOMMU_FAST BIT(2) +#define MSM_PCIE_IOMMU_ATOMIC BIT(3) +#define MSM_PCIE_IOMMU_FORCE_COHERENT BIT(4) + #define PHY_READY_TIMEOUT_COUNT 10 #define XMLH_LINK_UP 0x400 #define MAX_LINK_RETRIES 5 @@ -587,7 +594,6 @@ struct msm_pcie_dev_t { bool clk_power_manage_en; bool aux_clk_sync; bool aer_enable; - bool smmu_exist; uint32_t smmu_sid_base; uint32_t n_fts; uint32_t max_link_speed; @@ -649,6 +655,14 @@ struct msm_pcie_dev_t { struct msm_pcie_device_info pcidev_table[MAX_DEVICE_NUM]; }; +struct msm_root_dev_t { + struct msm_pcie_dev_t *pcie_dev; + struct pci_dev *pci_dev; + uint32_t iommu_cfg; + dma_addr_t iommu_base; + size_t iommu_size; +}; + /* debug mask sys interface */ static int msm_pcie_debug_mask; module_param_named(debug_mask, msm_pcie_debug_mask, @@ -1245,8 +1259,6 @@ static void msm_pcie_show_status(struct msm_pcie_dev_t *dev) dev->msi_gicm_base); PCIE_DBG_FS(dev, "bus_client: %d\n", dev->bus_client); - PCIE_DBG_FS(dev, "smmu does %s exist\n", - dev->smmu_exist ? "" : "not"); PCIE_DBG_FS(dev, "smmu_sid_base: 0x%x\n", dev->smmu_sid_base); PCIE_DBG_FS(dev, "n_fts: %d\n", @@ -5912,13 +5924,6 @@ static int msm_pcie_probe(struct platform_device *pdev) "AUX clock frequency is %s 19.2MHz.\n", msm_pcie_dev[rc_idx].use_19p2mhz_aux_clk ? "" : "not"); - msm_pcie_dev[rc_idx].smmu_exist = - of_property_read_bool((&pdev->dev)->of_node, - "qcom,smmu-exist"); - PCIE_DBG(&msm_pcie_dev[rc_idx], - "SMMU does %s exist.\n", - msm_pcie_dev[rc_idx].smmu_exist ? "" : "not"); - msm_pcie_dev[rc_idx].smmu_sid_base = 0; ret = of_property_read_u32((&pdev->dev)->of_node, "qcom,smmu-sid-base", &msm_pcie_dev[rc_idx].smmu_sid_base); @@ -6333,6 +6338,200 @@ static int msm_pcie_remove(struct platform_device *pdev) return ret; } +static int msm_pci_iommu_parse_dt(struct msm_root_dev_t *root_dev) +{ + int ret; + struct msm_pcie_dev_t *pcie_dev = root_dev->pcie_dev; + struct pci_dev *pci_dev = root_dev->pci_dev; + struct device_node *pci_of_node = pci_dev->dev.of_node; + + ret = of_property_read_u32(pci_of_node, "qcom,iommu-cfg", + &root_dev->iommu_cfg); + if (ret) { + PCIE_DBG(pcie_dev, "PCIe: RC%d: no iommu-cfg present in DT\n", + pcie_dev->rc_idx); + return 0; + } + + if (root_dev->iommu_cfg & MSM_PCIE_IOMMU_S1_BYPASS) { + root_dev->iommu_base = 0; + root_dev->iommu_size = PAGE_SIZE; + } else { + u64 iommu_range[2]; + + ret = of_property_count_elems_of_size(pci_of_node, + "qcom,iommu-range", + sizeof(iommu_range)); + if (ret != 1) { + PCIE_ERR(pcie_dev, + "invalid entry for iommu address: %d\n", + ret); + return ret; + } + + ret = of_property_read_u64_array(pci_of_node, + "qcom,iommu-range", + iommu_range, 2); + if (ret) { + PCIE_ERR(pcie_dev, + "failed to get iommu address: %d\n", ret); + return ret; + } + + root_dev->iommu_base = (dma_addr_t)iommu_range[0]; + root_dev->iommu_size = (size_t)iommu_range[1]; + } + + PCIE_DBG(pcie_dev, + "iommu-cfg: 0x%x iommu-base: %pad iommu-size: 0x%zx\n", + root_dev->iommu_cfg, &root_dev->iommu_base, + root_dev->iommu_size); + + return 0; +} + +static int msm_pci_iommu_init(struct msm_root_dev_t *root_dev) +{ + int ret; + struct dma_iommu_mapping *mapping; + struct msm_pcie_dev_t *pcie_dev = root_dev->pcie_dev; + struct pci_dev *pci_dev = root_dev->pci_dev; + + ret = msm_pci_iommu_parse_dt(root_dev); + if (ret) + return ret; + + if (!(root_dev->iommu_cfg & MSM_PCIE_IOMMU_PRESENT)) + return 0; + + mapping = arm_iommu_create_mapping(&pci_bus_type, root_dev->iommu_base, + root_dev->iommu_size); + if (IS_ERR_OR_NULL(mapping)) { + ret = PTR_ERR(mapping); + PCIE_ERR(pcie_dev, + "PCIe: RC%d: Failed to create IOMMU mapping (%d)\n", + pcie_dev->rc_idx, ret); + return ret; + } + + if (root_dev->iommu_cfg & MSM_PCIE_IOMMU_S1_BYPASS) { + int iommu_s1_bypass = 1; + + ret = iommu_domain_set_attr(mapping->domain, + DOMAIN_ATTR_S1_BYPASS, + &iommu_s1_bypass); + if (ret) { + PCIE_ERR(pcie_dev, + "PCIe: RC%d: failed to set attribute S1_BYPASS: %d\n", + pcie_dev->rc_idx, ret); + goto release_mapping; + } + } + + if (root_dev->iommu_cfg & MSM_PCIE_IOMMU_FAST) { + int iommu_fast = 1; + + ret = iommu_domain_set_attr(mapping->domain, + DOMAIN_ATTR_FAST, + &iommu_fast); + if (ret) { + PCIE_ERR(pcie_dev, + "PCIe: RC%d: failed to set attribute FAST: %d\n", + pcie_dev->rc_idx, ret); + goto release_mapping; + } + } + + if (root_dev->iommu_cfg & MSM_PCIE_IOMMU_ATOMIC) { + int iommu_atomic = 1; + + ret = iommu_domain_set_attr(mapping->domain, + DOMAIN_ATTR_ATOMIC, + &iommu_atomic); + if (ret) { + PCIE_ERR(pcie_dev, + "PCIe: RC%d: failed to set attribute ATOMIC: %d\n", + pcie_dev->rc_idx, ret); + goto release_mapping; + } + } + + if (root_dev->iommu_cfg & MSM_PCIE_IOMMU_FORCE_COHERENT) { + int iommu_force_coherent = 1; + + ret = iommu_domain_set_attr(mapping->domain, + DOMAIN_ATTR_PAGE_TABLE_FORCE_COHERENT, + &iommu_force_coherent); + if (ret) { + PCIE_ERR(pcie_dev, + "PCIe: RC%d: failed to set attribute FORCE_COHERENT: %d\n", + pcie_dev->rc_idx, ret); + goto release_mapping; + } + } + + ret = arm_iommu_attach_device(&pci_dev->dev, mapping); + if (ret) { + PCIE_ERR(pcie_dev, + "failed to iommu attach device (%d)\n", + pcie_dev->rc_idx, ret); + goto release_mapping; + } + + PCIE_DBG(pcie_dev, "PCIe: RC%d: successful iommu attach\n", + pcie_dev->rc_idx); + return 0; + +release_mapping: + arm_iommu_release_mapping(mapping); + + return ret; +} + +int msm_pci_probe(struct pci_dev *pci_dev, + const struct pci_device_id *device_id) +{ + int ret; + struct msm_pcie_dev_t *pcie_dev = PCIE_BUS_PRIV_DATA(pci_dev->bus); + struct msm_root_dev_t *root_dev; + + PCIE_DBG(pcie_dev, "PCIe: RC%d: PCI Probe\n", pcie_dev->rc_idx); + + if (!pci_dev->dev.of_node) + return -ENODEV; + + root_dev = devm_kzalloc(&pci_dev->dev, sizeof(*root_dev), GFP_KERNEL); + if (!root_dev) + return -ENOMEM; + + root_dev->pcie_dev = pcie_dev; + root_dev->pci_dev = pci_dev; + dev_set_drvdata(&pci_dev->dev, root_dev); + + ret = msm_pci_iommu_init(root_dev); + if (ret) + return ret; + + ret = dma_set_mask_and_coherent(&pci_dev->dev, DMA_BIT_MASK(64)); + if (ret) { + PCIE_ERR(pcie_dev, "DMA set mask failed (%d)\n", ret); + return ret; + } + + return 0; +} + +static struct pci_device_id msm_pci_device_id[] = { + {PCI_DEVICE(0x17cb, 0x0108)}, + {0}, +}; + +static struct pci_driver msm_pci_driver = { + .name = "pci-msm-rc", + .id_table = msm_pci_device_id, + .probe = msm_pci_probe, +}; + static const struct of_device_id msm_pcie_match[] = { { .compatible = "qcom,pci-msm", }, @@ -6416,6 +6615,10 @@ static int __init pcie_init(void) msm_pcie_debugfs_init(); + ret = pci_register_driver(&msm_pci_driver); + if (ret) + return ret; + ret = platform_driver_register(&msm_pcie_driver); return ret; diff --git a/drivers/pci/host/pci-xgene.c b/drivers/pci/host/pci-xgene.c index 087645116ecb222bd44a3e90eb9bcbc5e3fc25a8..c78fd9c2cf8cf672021ea28789d869ac6ee629d6 100644 --- a/drivers/pci/host/pci-xgene.c +++ b/drivers/pci/host/pci-xgene.c @@ -686,7 +686,6 @@ static int xgene_pcie_probe_bridge(struct platform_device *pdev) bus = bridge->bus; - pci_scan_child_bus(bus); pci_assign_unassigned_bus_resources(bus); list_for_each_entry(child, &bus->children, node) pcie_bus_configure_settings(child); diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c index 05832b597e536e3db9744bc4cff9e9661bc5ee80..46c2ee2caf281680aa280f1a386f4d059d110888 100644 --- a/drivers/pci/hotplug/pciehp_hpc.c +++ b/drivers/pci/hotplug/pciehp_hpc.c @@ -863,6 +863,13 @@ struct controller *pcie_init(struct pcie_device *dev) if (pdev->hotplug_user_indicators) slot_cap &= ~(PCI_EXP_SLTCAP_AIP | PCI_EXP_SLTCAP_PIP); + /* + * We assume no Thunderbolt controllers support Command Complete events, + * but some controllers falsely claim they do. + */ + if (pdev->is_thunderbolt) + slot_cap |= PCI_EXP_SLTCAP_NCCS; + ctrl->slot_cap = slot_cap; mutex_init(&ctrl->ctrl_lock); init_waitqueue_head(&ctrl->queue); diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c index a8da543b3814b312a99af0ef1f3c3ef18e089478..4708eb9df71b0ebb7548b87e4cb4238a0b900b6a 100644 --- a/drivers/pci/pci-acpi.c +++ b/drivers/pci/pci-acpi.c @@ -624,7 +624,7 @@ void acpi_pci_add_bus(struct pci_bus *bus) union acpi_object *obj; struct pci_host_bridge *bridge; - if (acpi_pci_disabled || !bus->bridge) + if (acpi_pci_disabled || !bus->bridge || !ACPI_HANDLE(bus->bridge)) return; acpi_pci_slot_enumerate(bus); diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c index 00fa4278c1f49c22d97451759ff0708016523830..c3f0473d1afa2aacaf4729bc8887bf137d299170 100644 --- a/drivers/pci/pci-sysfs.c +++ b/drivers/pci/pci-sysfs.c @@ -305,13 +305,16 @@ static ssize_t enable_store(struct device *dev, struct device_attribute *attr, if (!capable(CAP_SYS_ADMIN)) return -EPERM; - if (!val) { - if (pci_is_enabled(pdev)) - pci_disable_device(pdev); - else - result = -EIO; - } else + device_lock(dev); + if (dev->driver) + result = -EBUSY; + else if (val) result = pci_enable_device(pdev); + else if (pci_is_enabled(pdev)) + pci_disable_device(pdev); + else + result = -EIO; + device_unlock(dev); return result < 0 ? result : count; } diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c index f285cd74088ec47151b7852341884fd685ba1c83..4bccaf688aad7c7d5ed73b89b1b4f118c0a6690b 100644 --- a/drivers/pci/probe.c +++ b/drivers/pci/probe.c @@ -516,12 +516,14 @@ static void devm_pci_release_host_bridge_dev(struct device *dev) if (bridge->release_fn) bridge->release_fn(bridge); + + pci_free_resource_list(&bridge->windows); } static void pci_release_host_bridge_dev(struct device *dev) { devm_pci_release_host_bridge_dev(dev); - pci_free_host_bridge(to_pci_host_bridge(dev)); + kfree(to_pci_host_bridge(dev)); } struct pci_host_bridge *pci_alloc_host_bridge(size_t priv) diff --git a/drivers/perf/qcom_llcc_pmu.c b/drivers/perf/qcom_llcc_pmu.c index e4063970193626c499ea870d50a409f51218b3bb..065151302468ef7a52bebea8021db4a5e8cb8ade 100644 --- a/drivers/perf/qcom_llcc_pmu.c +++ b/drivers/perf/qcom_llcc_pmu.c @@ -148,6 +148,7 @@ static int qcom_llcc_pmu_probe(struct platform_device *pdev) .start = qcom_llcc_event_start, .stop = qcom_llcc_event_stop, .read = qcom_llcc_event_read, + .events_across_hotplug = 1, }; res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "lagg-base"); diff --git a/drivers/pinctrl/pinctrl-at91-pio4.c b/drivers/pinctrl/pinctrl-at91-pio4.c index b1ca838dd80a1c2843083b9c16d532be6ce5d40d..e61e2f8c91ce862c09123b9c0f391f51fb32bf6e 100644 --- a/drivers/pinctrl/pinctrl-at91-pio4.c +++ b/drivers/pinctrl/pinctrl-at91-pio4.c @@ -576,8 +576,10 @@ static int atmel_pctl_dt_node_to_map(struct pinctrl_dev *pctldev, for_each_child_of_node(np_config, np) { ret = atmel_pctl_dt_subnode_to_map(pctldev, np, map, &reserved_maps, num_maps); - if (ret < 0) + if (ret < 0) { + of_node_put(np); break; + } } } diff --git a/drivers/pinctrl/qcom/Kconfig b/drivers/pinctrl/qcom/Kconfig index 30443446d31de686ef99bb5d91bffede229c3094..82473d18075e15e354cbdddcdcf3866063c53ed9 100644 --- a/drivers/pinctrl/qcom/Kconfig +++ b/drivers/pinctrl/qcom/Kconfig @@ -198,4 +198,11 @@ config PINCTRL_SDXPRAIRIE the Qualcomm Technologies Inc TLMM block found on the Qualcomm Technologies Inc SDXPRAIRIE platform. +config PINCTRL_SLPI + tristate "Qualcomm Technologies, Inc SLPI pin controller driver" + depends on GPIOLIB && OF + help + This is the pinctrl, pinmux and pinconf driver for the + SLPI pin controller block. + endif diff --git a/drivers/pinctrl/qcom/Makefile b/drivers/pinctrl/qcom/Makefile index 567ac4b0844e82924951b653eb017a923946e04b..fe98640c1fbccb9b99a4e7b1d09abe478e11f775 100644 --- a/drivers/pinctrl/qcom/Makefile +++ b/drivers/pinctrl/qcom/Makefile @@ -24,3 +24,4 @@ obj-$(CONFIG_PINCTRL_SDMSHRIKE) += pinctrl-sdmshrike.o obj-$(CONFIG_PINCTRL_SM6150) += pinctrl-sm6150.o obj-$(CONFIG_PINCTRL_SDXPRAIRIE) += pinctrl-sdxprairie.o obj-$(CONFIG_PINCTRL_SDMMAGPIE) += pinctrl-sdmmagpie.o +obj-$(CONFIG_PINCTRL_SLPI) += pinctrl-slpi.o diff --git a/drivers/pinctrl/qcom/pinctrl-msm.c b/drivers/pinctrl/qcom/pinctrl-msm.c index ed11b59348fb45a0ff88e78fb39623bdf6d3ac84..fef0970abaf24687ee60b252d61176ca14f38295 100644 --- a/drivers/pinctrl/qcom/pinctrl-msm.c +++ b/drivers/pinctrl/qcom/pinctrl-msm.c @@ -1390,6 +1390,12 @@ static void msm_gpio_setup_dir_connects(struct msm_pinctrl *pctrl) static int msm_gpiochip_to_irq(struct gpio_chip *chip, unsigned int offset) { struct irq_fwspec fwspec; + struct irq_domain *domain = chip->irqdomain; + int virq; + + virq = irq_find_mapping(domain, offset); + if (virq) + return virq; fwspec.fwnode = of_node_to_fwnode(chip->of_node); fwspec.param[0] = offset; diff --git a/drivers/pinctrl/qcom/pinctrl-sdmmagpie.c b/drivers/pinctrl/qcom/pinctrl-sdmmagpie.c index 48e20ce254be176143add7198a5d936aa601ecd5..f6e6f8aa075fb2b4d1e3399c7e2c187bc64f7445 100644 --- a/drivers/pinctrl/qcom/pinctrl-sdmmagpie.c +++ b/drivers/pinctrl/qcom/pinctrl-sdmmagpie.c @@ -1440,14 +1440,14 @@ static const struct msm_pingroup sdmmagpie_groups[] = { [116] = PINGROUP(116, NORTH, NA, NA, NA, NA, NA, NA, NA, NA, NA), [117] = PINGROUP(117, NORTH, NA, NA, NA, NA, NA, NA, NA, NA, NA), [118] = PINGROUP(118, NORTH, NA, NA, NA, NA, NA, NA, NA, NA, NA), - [119] = SDC_QDSD_PINGROUP(sdc1_rclk, 0x9a000, 15, 0), - [120] = SDC_QDSD_PINGROUP(sdc1_clk, 0x9a000, 13, 6), - [121] = SDC_QDSD_PINGROUP(sdc1_cmd, 0x9a000, 11, 3), - [122] = SDC_QDSD_PINGROUP(sdc1_data, 0x9a000, 9, 0), - [123] = SDC_QDSD_PINGROUP(sdc2_clk, 0x98000, 14, 6), - [124] = SDC_QDSD_PINGROUP(sdc2_cmd, 0x98000, 11, 3), - [125] = SDC_QDSD_PINGROUP(sdc2_data, 0x98000, 9, 0), - [126] = UFS_RESET(ufs_reset, 0x9f000), + [119] = SDC_QDSD_PINGROUP(sdc1_rclk, 0x19a000, 15, 0), + [120] = SDC_QDSD_PINGROUP(sdc1_clk, 0x19a000, 13, 6), + [121] = SDC_QDSD_PINGROUP(sdc1_cmd, 0x19a000, 11, 3), + [122] = SDC_QDSD_PINGROUP(sdc1_data, 0x19a000, 9, 0), + [123] = SDC_QDSD_PINGROUP(sdc2_clk, 0x998000, 14, 6), + [124] = SDC_QDSD_PINGROUP(sdc2_cmd, 0x998000, 11, 3), + [125] = SDC_QDSD_PINGROUP(sdc2_data, 0x998000, 9, 0), + [126] = UFS_RESET(ufs_reset, 0x19f000), }; static struct msm_dir_conn sdmmagpie_dir_conn[] = { diff --git a/drivers/pinctrl/qcom/pinctrl-slpi.c b/drivers/pinctrl/qcom/pinctrl-slpi.c new file mode 100644 index 0000000000000000000000000000000000000000..a89573ea3d4a4e5cf8d6871b704d3967c0a1c7a8 --- /dev/null +++ b/drivers/pinctrl/qcom/pinctrl-slpi.c @@ -0,0 +1,461 @@ +/* + * Copyright (c) 2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "../core.h" +#include "../pinctrl-utils.h" + +/** + * struct slpi_pin - SLPI pin definition + * @name: Name of the pin. + * @ctl_reg: Offset of the register holding control bits for this group. + * @io_reg: Offset of the register holding input/output bits for this group. + * @pull_bit: Offset in @ctl_reg for the bias configuration. + * @mux_bit: Offset in @ctl_reg for the pinmux function selection. + * @drv_bit: Offset in @ctl_reg for the drive strength configuration. + * @oe_bit: Offset in @ctl_reg for controlling output enable. + * @in_bit: Offset in @io_reg for the input bit value. + * @out_bit: Offset in @io_reg for the output bit value. + */ +struct slpi_pin { + void __iomem *base; + unsigned int offset; + + unsigned int ctl_reg; + unsigned int io_reg; + + unsigned int pull_bit:5; + unsigned int mux_bit:5; + unsigned int drv_bit:5; + unsigned int oe_bit:5; + unsigned int in_bit:5; + unsigned int out_bit:5; +}; + + +/* The index of each function in slpi_pin_functions[] array */ +enum slpi_pin_func_index { + SLPI_PIN_FUNC_INDEX_GPIO = 0x00, + SLPI_PIN_FUNC_INDEX_FUNC1 = 0x01, + SLPI_PIN_FUNC_INDEX_FUNC2 = 0x02, + SLPI_PIN_FUNC_INDEX_FUNC3 = 0x03, + SLPI_PIN_FUNC_INDEX_FUNC4 = 0x04, + SLPI_PIN_FUNC_INDEX_FUNC5 = 0x05, +}; + +#define SLPI_PIN_FUNC_GPIO "gpio" +#define SLPI_PIN_FUNC_FUNC1 "func1" +#define SLPI_PIN_FUNC_FUNC2 "func2" +#define SLPI_PIN_FUNC_FUNC3 "func3" +#define SLPI_PIN_FUNC_FUNC4 "func4" +#define SLPI_PIN_FUNC_FUNC5 "func5" + +static const char *const slpi_gpio_groups[] = { + "gpio0", "gpio1", "gpio2", "gpio3", "gpio4", "gpio5", "gpio6", "gpio7", + "gpio8", "gpio9", "gpio10", "gpio11", "gpio12", "gpio13", +}; + +static const char *const slpi_pin_functions[] = { + [SLPI_PIN_FUNC_INDEX_GPIO] = SLPI_PIN_FUNC_GPIO, + [SLPI_PIN_FUNC_INDEX_FUNC1] = SLPI_PIN_FUNC_FUNC1, + [SLPI_PIN_FUNC_INDEX_FUNC2] = SLPI_PIN_FUNC_FUNC2, + [SLPI_PIN_FUNC_INDEX_FUNC3] = SLPI_PIN_FUNC_FUNC3, + [SLPI_PIN_FUNC_INDEX_FUNC4] = SLPI_PIN_FUNC_FUNC4, + [SLPI_PIN_FUNC_INDEX_FUNC5] = SLPI_PIN_FUNC_FUNC5, +}; + +static unsigned int slpi_read(struct slpi_pin *pin, u32 reg) +{ + return readl_relaxed(pin->base + pin->offset + reg); +} + +static void slpi_write(u32 val, struct slpi_pin *pin, u32 reg) +{ + return writel_relaxed(val, pin->base + pin->offset + reg); +} + +static int slpi_get_groups_count(struct pinctrl_dev *pctldev) +{ + /* Every PIN is a group */ + return pctldev->desc->npins; +} + +static const char *slpi_get_group_name(struct pinctrl_dev *pctldev, + unsigned int pin) +{ + return pctldev->desc->pins[pin].name; +} + +static int slpi_get_group_pins(struct pinctrl_dev *pctldev, + unsigned int pin, + const unsigned int **pins, + unsigned int *num_pins) +{ + *pins = &pctldev->desc->pins[pin].number; + *num_pins = 1; + return 0; +} + +static const struct pinctrl_ops slpi_pinctrl_ops = { + .get_groups_count = slpi_get_groups_count, + .get_group_name = slpi_get_group_name, + .get_group_pins = slpi_get_group_pins, + .dt_node_to_map = pinconf_generic_dt_node_to_map_group, + .dt_free_map = pinctrl_utils_free_map, +}; + +static int slpi_get_functions_count(struct pinctrl_dev *pctldev) +{ + return ARRAY_SIZE(slpi_pin_functions); +} + +static const char *slpi_get_function_name(struct pinctrl_dev *pctldev, + unsigned int function) +{ + return slpi_pin_functions[function]; +} + +static int slpi_get_function_groups(struct pinctrl_dev *pctldev, + unsigned int function, + const char * const **groups, + unsigned int * const num_groups) +{ + *groups = slpi_gpio_groups; + *num_groups = pctldev->desc->npins; + return 0; +} + +static int slpi_pinmux_set_mux(struct pinctrl_dev *pctldev, + unsigned int function, + unsigned int pin_index) +{ + struct slpi_pin *pin; + u32 val; + + pin = pctldev->desc->pins[pin_index].drv_data; + + if (WARN_ON(function >= ARRAY_SIZE(slpi_pin_functions))) + return -EINVAL; + + val = slpi_read(pin, pin->ctl_reg); + val &= ~(0x7 << pin->mux_bit); + val |= function << pin->mux_bit; + slpi_write(val, pin, pin->ctl_reg); + + return 0; +} + +static const struct pinmux_ops slpi_pinmux_ops = { + .get_functions_count = slpi_get_functions_count, + .get_function_name = slpi_get_function_name, + .get_function_groups = slpi_get_function_groups, + .set_mux = slpi_pinmux_set_mux, +}; + +static int slpi_config_reg(const struct slpi_pin *pin, + unsigned int param, + unsigned int *mask, + unsigned int *bit) +{ + switch (param) { + case PIN_CONFIG_BIAS_DISABLE: + case PIN_CONFIG_BIAS_PULL_DOWN: + case PIN_CONFIG_BIAS_BUS_HOLD: + case PIN_CONFIG_BIAS_PULL_UP: + *bit = pin->pull_bit; + *mask = 3; + break; + case PIN_CONFIG_DRIVE_STRENGTH: + *bit = pin->drv_bit; + *mask = 7; + break; + case PIN_CONFIG_OUTPUT: + case PIN_CONFIG_INPUT_ENABLE: + *bit = pin->oe_bit; + *mask = 1; + break; + default: + return -ENOTSUPP; + } + + return 0; +} + +#define MSM_NO_PULL 0 +#define MSM_PULL_DOWN 1 +#define MSM_KEEPER 2 +#define MSM_PULL_UP 3 + +static unsigned int slpi_regval_to_drive(u32 val) +{ + return (val + 1) * 2; +} + +static int slpi_config_group_get(struct pinctrl_dev *pctldev, + unsigned int pin_index, + unsigned long *config) +{ + unsigned int param = pinconf_to_config_param(*config); + struct slpi_pin *pin; + unsigned int mask; + unsigned int arg; + unsigned int bit; + int ret; + u32 val; + + pin = pctldev->desc->pins[pin_index].drv_data; + ret = slpi_config_reg(pin, param, &mask, &bit); + if (ret < 0) + return ret; + + val = slpi_read(pin, pin->ctl_reg); + arg = (val >> bit) & mask; + + /* Convert register value to pinconf value */ + switch (param) { + case PIN_CONFIG_BIAS_DISABLE: + arg = arg == MSM_NO_PULL; + break; + case PIN_CONFIG_BIAS_PULL_DOWN: + arg = arg == MSM_PULL_DOWN; + break; + case PIN_CONFIG_BIAS_BUS_HOLD: + arg = arg == MSM_KEEPER; + break; + case PIN_CONFIG_BIAS_PULL_UP: + arg = arg == MSM_PULL_UP; + break; + case PIN_CONFIG_DRIVE_STRENGTH: + arg = slpi_regval_to_drive(arg); + break; + case PIN_CONFIG_OUTPUT: + /* Pin is not output */ + if (!arg) + return -EINVAL; + + val = slpi_read(pin, pin->io_reg); + arg = !!(val & BIT(pin->in_bit)); + break; + case PIN_CONFIG_INPUT_ENABLE: + /* Pin is output */ + if (arg) + return -EINVAL; + arg = 1; + break; + default: + return -ENOTSUPP; + } + + *config = pinconf_to_config_packed(param, arg); + + return 0; +} + +static int slpi_config_group_set(struct pinctrl_dev *pctldev, + unsigned int pin_index, + unsigned long *configs, + unsigned int num_configs) +{ + struct slpi_pin *pin; + unsigned int param; + unsigned int mask; + unsigned int arg; + unsigned int bit; + int ret; + u32 val; + int i; + + pin = pctldev->desc->pins[pin_index].drv_data; + + for (i = 0; i < num_configs; i++) { + param = pinconf_to_config_param(configs[i]); + arg = pinconf_to_config_argument(configs[i]); + + ret = slpi_config_reg(pin, param, &mask, &bit); + if (ret < 0) + return ret; + + /* Convert pinconf values to register values */ + switch (param) { + case PIN_CONFIG_BIAS_DISABLE: + arg = MSM_NO_PULL; + break; + case PIN_CONFIG_BIAS_PULL_DOWN: + arg = MSM_PULL_DOWN; + break; + case PIN_CONFIG_BIAS_BUS_HOLD: + arg = MSM_KEEPER; + break; + case PIN_CONFIG_BIAS_PULL_UP: + arg = MSM_PULL_UP; + break; + case PIN_CONFIG_DRIVE_STRENGTH: + /* Check for invalid values */ + if (arg > 16 || arg < 2 || (arg % 2) != 0) + arg = -1; + else + arg = (arg / 2) - 1; + break; + case PIN_CONFIG_OUTPUT: + /* set output value */ + val = slpi_read(pin, pin->io_reg); + if (arg) + val |= BIT(pin->out_bit); + else + val &= ~BIT(pin->out_bit); + slpi_write(val, pin, pin->io_reg); + + /* enable output */ + arg = 1; + break; + case PIN_CONFIG_INPUT_ENABLE: + /* disable output */ + arg = 0; + break; + default: + dev_err(pctldev->dev, "Unsupported config parameter: %x\n", + param); + return -EINVAL; + } + + /* Range-check user-supplied value */ + if (arg & ~mask) { + dev_err(pctldev->dev, "config %x: %x is invalid\n", + param, arg); + return -EINVAL; + } + + val = slpi_read(pin, pin->ctl_reg); + val &= ~(mask << bit); + val |= arg << bit; + slpi_write(val, pin, pin->ctl_reg); + } + + return 0; +} + +static const struct pinconf_ops slpi_pinconf_ops = { + .is_generic = true, + .pin_config_group_get = slpi_config_group_get, + .pin_config_group_set = slpi_config_group_set, +}; + +static int slpi_pinctrl_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct pinctrl_dev *pctldev; + struct pinctrl_pin_desc *pindesc; + struct pinctrl_desc *pctrldesc; + struct slpi_pin *pin, *pins; + struct resource *res; + int ret, npins, i; + void __iomem *base; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(base)) + return PTR_ERR(base); + + ret = of_property_read_u32(dev->of_node, "qcom,num-pins", &npins); + if (ret < 0) + return ret; + + pindesc = devm_kcalloc(dev, npins, sizeof(*pindesc), GFP_KERNEL); + if (!pindesc) + return -ENOMEM; + + WARN_ON(npins > ARRAY_SIZE(slpi_gpio_groups)); + + pins = devm_kcalloc(dev, npins, sizeof(*pins), GFP_KERNEL); + if (!pins) + return -ENOMEM; + + pctrldesc = devm_kzalloc(dev, sizeof(*pctrldesc), GFP_KERNEL); + if (!pctrldesc) + return -ENOMEM; + + pctrldesc->pctlops = &slpi_pinctrl_ops; + pctrldesc->pmxops = &slpi_pinmux_ops; + pctrldesc->confops = &slpi_pinconf_ops; + pctrldesc->owner = THIS_MODULE; + pctrldesc->name = dev_name(&pdev->dev); + pctrldesc->pins = pindesc; + pctrldesc->npins = npins; + + for (i = 0; i < npins; i++, pindesc++) { + pin = &pins[i]; + pindesc->drv_data = pin; + pindesc->number = i; + pindesc->name = slpi_gpio_groups[i]; + + pin->base = base; + pin->offset = i * 0x1000; + pin->ctl_reg = 0x0; + pin->io_reg = 0x4; + + pin->pull_bit = 0; + pin->out_bit = 1; + pin->mux_bit = 2; + pin->oe_bit = 9; + pin->drv_bit = 6; + pin->in_bit = 0; + } + + pctldev = devm_pinctrl_register(&pdev->dev, pctrldesc, NULL); + if (IS_ERR(pctldev)) { + dev_err(dev, "Failed to register pinctrl device\n"); + return PTR_ERR(pctldev); + } + + return 0; +} + +static const struct of_device_id slpi_pinctrl_of_match[] = { + { .compatible = "qcom,slpi-pinctrl" }, /* Generic */ + { }, +}; + +MODULE_DEVICE_TABLE(of, slpi_pinctrl_of_match); + +static struct platform_driver slpi_pinctrl_driver = { + .driver = { + .name = "qcom-slpi-pinctrl", + .of_match_table = slpi_pinctrl_of_match, + }, + .probe = slpi_pinctrl_probe, +}; + +static int __init slpi_pinctrl_init(void) +{ + return platform_driver_register(&slpi_pinctrl_driver); +} +arch_initcall(slpi_pinctrl_init); + +static void __exit slpi_pinctrl_exit(void) +{ + platform_driver_unregister(&slpi_pinctrl_driver); +} +module_exit(slpi_pinctrl_exit); + +MODULE_DESCRIPTION("QTI SLPI GPIO pin control driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/platform/msm/Kconfig b/drivers/platform/msm/Kconfig index 34ffbe2f626c49c1c6f6c53dd5ef18cb470f04d2..32c1977c87aeaf513eae2f2df873a26ff517127d 100644 --- a/drivers/platform/msm/Kconfig +++ b/drivers/platform/msm/Kconfig @@ -55,6 +55,27 @@ config SPS_SUPPORT_NDP_BAM help No-Data-Path BAM is used to improve BAM performance. +config EP_PCIE + bool "PCIe Endpoint mode support" + select GENERIC_ALLOCATOR + help + PCIe controller is in endpoint mode. + It supports the APIs to clients as a service layer, and allows + clients to enable/disable PCIe link, configure the address + mapping for the access to host memory, trigger wake interrupt + on host side to wake up host, and trigger MSI to host side. + +config EP_PCIE_HW + bool "PCIe Endpoint HW driver" + depends on EP_PCIE + help + PCIe endpoint HW specific implementation. + It supports: + 1. link training with Root Complex. + 2. Address mapping. + 3. Sideband signaling. + 4. Power management. + config USB_BAM bool "USB BAM Driver" depends on SPS && USB_GADGET diff --git a/drivers/platform/msm/Makefile b/drivers/platform/msm/Makefile index 12149fde88fe84605564115d2dc93f30521d7d84..29735cc6b4f878e7040695bc1a3f0fdc0ce864f6 100644 --- a/drivers/platform/msm/Makefile +++ b/drivers/platform/msm/Makefile @@ -9,5 +9,6 @@ obj-$(CONFIG_USB_BAM) += usb_bam.o obj-$(CONFIG_GSI) += gsi/ obj-$(CONFIG_IPA) += ipa/ obj-$(CONFIG_IPA3) += ipa/ +obj-$(CONFIG_EP_PCIE) += ep_pcie/ obj-$(CONFIG_MSM_11AD) += msm_11ad/ obj-$(CONFIG_SEEMP_CORE) += seemp_core/ diff --git a/drivers/platform/msm/ep_pcie/Makefile b/drivers/platform/msm/ep_pcie/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..0567e1539fcae9e73e920691d5e0446b1c706532 --- /dev/null +++ b/drivers/platform/msm/ep_pcie/Makefile @@ -0,0 +1,2 @@ +obj-$(CONFIG_EP_PCIE) += ep_pcie.o +obj-$(CONFIG_EP_PCIE_HW) += ep_pcie_core.o ep_pcie_phy.o ep_pcie_dbg.o diff --git a/drivers/platform/msm/ep_pcie/ep_pcie.c b/drivers/platform/msm/ep_pcie/ep_pcie.c new file mode 100644 index 0000000000000000000000000000000000000000..9b5d2acbb22d2721ebd1c99d276a270835f2cd95 --- /dev/null +++ b/drivers/platform/msm/ep_pcie/ep_pcie.c @@ -0,0 +1,205 @@ +/* Copyright (c) 2015, 2017-2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +/* + * MSM PCIe endpoint service layer. + */ +#include +#include +#include +#include +#include +#include +#include "ep_pcie_com.h" + +LIST_HEAD(head); + +int ep_pcie_register_drv(struct ep_pcie_hw *handle) +{ + struct ep_pcie_hw *present; + bool new = true; + + if (WARN_ON(!handle)) + return -EINVAL; + + list_for_each_entry(present, &head, node) { + if (present->device_id == handle->device_id) { + new = false; + break; + } + } + + if (new) { + list_add(&handle->node, &head); + pr_debug("ep_pcie:%s: register a new driver for device 0x%x\n", + __func__, handle->device_id); + return 0; + } + pr_debug( + "ep_pcie:%s: driver to register for device 0x%x has already existed\n", + __func__, handle->device_id); + return -EEXIST; +} +EXPORT_SYMBOL(ep_pcie_register_drv); + +int ep_pcie_deregister_drv(struct ep_pcie_hw *handle) +{ + struct ep_pcie_hw *present; + bool found = false; + + if (WARN_ON(!handle)) + return -EINVAL; + + list_for_each_entry(present, &head, node) { + if (present->device_id == handle->device_id) { + found = true; + list_del(&handle->node); + break; + } + } + + if (found) { + pr_debug("ep_pcie:%s: deregistered driver for device 0x%x\n", + __func__, handle->device_id); + return 0; + } + pr_err("ep_pcie:%s: driver for device 0x%x does not exist\n", + __func__, handle->device_id); + return -EEXIST; +} +EXPORT_SYMBOL(ep_pcie_deregister_drv); + +struct ep_pcie_hw *ep_pcie_get_phandle(u32 id) +{ + struct ep_pcie_hw *present; + + list_for_each_entry(present, &head, node) { + if (present->device_id == id) { + pr_debug("ep_pcie:%s: found driver for device 0x%x\n", + __func__, id); + return present; + } + } + + pr_debug("ep_pcie:%s: driver for device 0x%x does not exist\n", + __func__, id); + return NULL; +} +EXPORT_SYMBOL(ep_pcie_get_phandle); + +int ep_pcie_register_event(struct ep_pcie_hw *phandle, + struct ep_pcie_register_event *reg) +{ + if (phandle) + return phandle->register_event(reg); + + return ep_pcie_core_register_event(reg); +} +EXPORT_SYMBOL(ep_pcie_register_event); + +int ep_pcie_deregister_event(struct ep_pcie_hw *phandle) +{ + if (WARN_ON(!phandle)) + return -EINVAL; + + return phandle->deregister_event(); +} +EXPORT_SYMBOL(ep_pcie_deregister_event); + +enum ep_pcie_link_status ep_pcie_get_linkstatus(struct ep_pcie_hw *phandle) +{ + if (WARN_ON(!phandle)) + return -EINVAL; + + return phandle->get_linkstatus(); +} +EXPORT_SYMBOL(ep_pcie_get_linkstatus); + +int ep_pcie_config_outbound_iatu(struct ep_pcie_hw *phandle, + struct ep_pcie_iatu entries[], + u32 num_entries) +{ + if (WARN_ON(!phandle)) + return -EINVAL; + + return phandle->config_outbound_iatu(entries, num_entries); +} +EXPORT_SYMBOL(ep_pcie_config_outbound_iatu); + +int ep_pcie_get_msi_config(struct ep_pcie_hw *phandle, + struct ep_pcie_msi_config *cfg) +{ + if (WARN_ON(!phandle)) + return -EINVAL; + + return phandle->get_msi_config(cfg); +} +EXPORT_SYMBOL(ep_pcie_get_msi_config); + +int ep_pcie_trigger_msi(struct ep_pcie_hw *phandle, u32 idx) +{ + if (WARN_ON(!phandle)) + return -EINVAL; + + return phandle->trigger_msi(idx); +} +EXPORT_SYMBOL(ep_pcie_trigger_msi); + +int ep_pcie_wakeup_host(struct ep_pcie_hw *phandle) +{ + if (WARN_ON(!phandle)) + return -EINVAL; + + return phandle->wakeup_host(); +} +EXPORT_SYMBOL(ep_pcie_wakeup_host); + +int ep_pcie_config_db_routing(struct ep_pcie_hw *phandle, + struct ep_pcie_db_config chdb_cfg, + struct ep_pcie_db_config erdb_cfg) +{ + if (WARN_ON(!phandle)) + return -EINVAL; + + return phandle->config_db_routing(chdb_cfg, erdb_cfg); +} +EXPORT_SYMBOL(ep_pcie_config_db_routing); + +int ep_pcie_enable_endpoint(struct ep_pcie_hw *phandle, + enum ep_pcie_options opt) +{ + if (WARN_ON(!phandle)) + return -EINVAL; + + return phandle->enable_endpoint(opt); +} +EXPORT_SYMBOL(ep_pcie_enable_endpoint); + +int ep_pcie_disable_endpoint(struct ep_pcie_hw *phandle) +{ + if (WARN_ON(!phandle)) + return -EINVAL; + + return phandle->disable_endpoint(); +} +EXPORT_SYMBOL(ep_pcie_disable_endpoint); + +int ep_pcie_mask_irq_event(struct ep_pcie_hw *phandle, + enum ep_pcie_irq_event event, + bool enable) +{ + if (WARN_ON(!phandle)) + return -EINVAL; + + return phandle->mask_irq_event(event, enable); +} +EXPORT_SYMBOL(ep_pcie_mask_irq_event); diff --git a/drivers/platform/msm/ep_pcie/ep_pcie_com.h b/drivers/platform/msm/ep_pcie/ep_pcie_com.h new file mode 100644 index 0000000000000000000000000000000000000000..36d49e4eb4d3f4a4ce9be0dd420a7dc6a22fdb15 --- /dev/null +++ b/drivers/platform/msm/ep_pcie/ep_pcie_com.h @@ -0,0 +1,422 @@ +/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef __EP_PCIE_COM_H +#define __EP_PCIE_COM_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define PCIE20_PARF_SYS_CTRL 0x00 +#define PCIE20_PARF_DB_CTRL 0x10 +#define PCIE20_PARF_PM_CTRL 0x20 +#define PCIE20_PARF_PM_STTS 0x24 +#define PCIE20_PARF_PHY_CTRL 0x40 +#define PCIE20_PARF_PHY_REFCLK 0x4C +#define PCIE20_PARF_CONFIG_BITS 0x50 +#define PCIE20_PARF_TEST_BUS 0xE4 +#define PCIE20_PARF_MHI_BASE_ADDR_LOWER 0x178 +#define PCIE20_PARF_MHI_BASE_ADDR_UPPER 0x17c +#define PCIE20_PARF_MSI_GEN 0x188 +#define PCIE20_PARF_DEBUG_INT_EN 0x190 +#define PCIE20_PARF_MHI_IPA_DBS 0x198 +#define PCIE20_PARF_MHI_IPA_CDB_TARGET_LOWER 0x19C +#define PCIE20_PARF_MHI_IPA_EDB_TARGET_LOWER 0x1A0 +#define PCIE20_PARF_AXI_MSTR_RD_HALT_NO_WRITES 0x1A4 +#define PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT 0x1A8 +#define PCIE20_PARF_Q2A_FLUSH 0x1AC +#define PCIE20_PARF_LTSSM 0x1B0 +#define PCIE20_PARF_CFG_BITS 0x210 +#define PCIE20_PARF_LTR_MSI_EXIT_L1SS 0x214 +#define PCIE20_PARF_INT_ALL_STATUS 0x224 +#define PCIE20_PARF_INT_ALL_CLEAR 0x228 +#define PCIE20_PARF_INT_ALL_MASK 0x22C +#define PCIE20_PARF_SLV_ADDR_MSB_CTRL 0x2C0 +#define PCIE20_PARF_DBI_BASE_ADDR 0x350 +#define PCIE20_PARF_DBI_BASE_ADDR_HI 0x354 +#define PCIE20_PARF_SLV_ADDR_SPACE_SIZE 0x358 +#define PCIE20_PARF_SLV_ADDR_SPACE_SIZE_HI 0x35C +#define PCIE20_PARF_ATU_BASE_ADDR 0x634 +#define PCIE20_PARF_ATU_BASE_ADDR_HI 0x638 +#define PCIE20_PARF_DEVICE_TYPE 0x1000 + +#define PCIE20_ELBI_VERSION 0x00 +#define PCIE20_ELBI_SYS_CTRL 0x04 +#define PCIE20_ELBI_SYS_STTS 0x08 +#define PCIE20_ELBI_CS2_ENABLE 0xA4 + +#define PCIE20_DEVICE_ID_VENDOR_ID 0x00 +#define PCIE20_COMMAND_STATUS 0x04 +#define PCIE20_CLASS_CODE_REVISION_ID 0x08 +#define PCIE20_BIST_HDR_TYPE 0x0C +#define PCIE20_BAR0 0x10 +#define PCIE20_SUBSYSTEM 0x2c +#define PCIE20_CAP_ID_NXT_PTR 0x40 +#define PCIE20_CON_STATUS 0x44 +#define PCIE20_MSI_CAP_ID_NEXT_CTRL 0x50 +#define PCIE20_MSI_LOWER 0x54 +#define PCIE20_MSI_UPPER 0x58 +#define PCIE20_MSI_DATA 0x5C +#define PCIE20_MSI_MASK 0x60 +#define PCIE20_DEVICE_CAPABILITIES 0x74 +#define PCIE20_MASK_EP_L1_ACCPT_LATENCY 0xE00 +#define PCIE20_MASK_EP_L0S_ACCPT_LATENCY 0x1C0 +#define PCIE20_LINK_CAPABILITIES 0x7C +#define PCIE20_MASK_CLOCK_POWER_MAN 0x40000 +#define PCIE20_MASK_L1_EXIT_LATENCY 0x38000 +#define PCIE20_MASK_L0S_EXIT_LATENCY 0x7000 +#define PCIE20_CAP_LINKCTRLSTATUS 0x80 +#define PCIE20_DEVICE_CONTROL2_STATUS2 0x98 +#define PCIE20_LINK_CONTROL2_LINK_STATUS2 0xA0 +#define PCIE20_L1SUB_CAPABILITY 0x154 +#define PCIE20_L1SUB_CONTROL1 0x158 +#define PCIE20_ACK_F_ASPM_CTRL_REG 0x70C +#define PCIE20_MASK_ACK_N_FTS 0xff00 +#define PCIE20_MISC_CONTROL_1 0x8BC + +#define PCIE20_PLR_IATU_VIEWPORT 0x900 +#define PCIE20_PLR_IATU_CTRL1 0x904 +#define PCIE20_PLR_IATU_CTRL2 0x908 +#define PCIE20_PLR_IATU_LBAR 0x90C +#define PCIE20_PLR_IATU_UBAR 0x910 +#define PCIE20_PLR_IATU_LAR 0x914 +#define PCIE20_PLR_IATU_LTAR 0x918 +#define PCIE20_PLR_IATU_UTAR 0x91c + +#define PCIE20_IATU_BASE(n) (n * 0x200) + +#define PCIE20_IATU_O_CTRL1(n) (PCIE20_IATU_BASE(n) + 0x00) +#define PCIE20_IATU_O_CTRL2(n) (PCIE20_IATU_BASE(n) + 0x04) +#define PCIE20_IATU_O_LBAR(n) (PCIE20_IATU_BASE(n) + 0x08) +#define PCIE20_IATU_O_UBAR(n) (PCIE20_IATU_BASE(n) + 0x0c) +#define PCIE20_IATU_O_LAR(n) (PCIE20_IATU_BASE(n) + 0x10) +#define PCIE20_IATU_O_LTAR(n) (PCIE20_IATU_BASE(n) + 0x14) +#define PCIE20_IATU_O_UTAR(n) (PCIE20_IATU_BASE(n) + 0x18) + +#define PCIE20_IATU_I_CTRL1(n) (PCIE20_IATU_BASE(n) + 0x100) +#define PCIE20_IATU_I_CTRL2(n) (PCIE20_IATU_BASE(n) + 0x104) +#define PCIE20_IATU_I_LBAR(n) (PCIE20_IATU_BASE(n) + 0x108) +#define PCIE20_IATU_I_UBAR(n) (PCIE20_IATU_BASE(n) + 0x10c) +#define PCIE20_IATU_I_LAR(n) (PCIE20_IATU_BASE(n) + 0x110) +#define PCIE20_IATU_I_LTAR(n) (PCIE20_IATU_BASE(n) + 0x114) +#define PCIE20_IATU_I_UTAR(n) (PCIE20_IATU_BASE(n) + 0x118) + +#define PCIE20_MHICFG 0x110 +#define PCIE20_BHI_EXECENV 0x228 +#define PCIE20_MHIVER 0x108 +#define PCIE20_MHICTRL 0x138 +#define PCIE20_MHISTATUS 0x148 +#define PCIE20_BHI_VERSION_LOWER 0x200 +#define PCIE20_BHI_VERSION_UPPER 0x204 +#define PCIE20_BHI_INTVEC 0x220 + +#define PCIE20_AUX_CLK_FREQ_REG 0xB40 + +#define PERST_TIMEOUT_US_MIN 1000 +#define PERST_TIMEOUT_US_MAX 1000 +#define PERST_CHECK_MAX_COUNT 30000 +#define LINK_UP_TIMEOUT_US_MIN 1000 +#define LINK_UP_TIMEOUT_US_MAX 1000 +#define LINK_UP_CHECK_MAX_COUNT 30000 +#define BME_TIMEOUT_US_MIN 1000 +#define BME_TIMEOUT_US_MAX 1000 +#define BME_CHECK_MAX_COUNT 30000 +#define PHY_STABILIZATION_DELAY_US_MIN 1000 +#define PHY_STABILIZATION_DELAY_US_MAX 1000 +#define REFCLK_STABILIZATION_DELAY_US_MIN 1000 +#define REFCLK_STABILIZATION_DELAY_US_MAX 1000 +#define PHY_READY_TIMEOUT_COUNT 30000 +#define MSI_EXIT_L1SS_WAIT 10 +#define MSI_EXIT_L1SS_WAIT_MAX_COUNT 100 +#define XMLH_LINK_UP 0x400 +#define PARF_XMLH_LINK_UP 0x40000000 + +#define MAX_PROP_SIZE 32 +#define MAX_MSG_LEN 80 +#define MAX_NAME_LEN 80 +#define MAX_IATU_ENTRY_NUM 2 + +#define EP_PCIE_LOG_PAGES 50 +#define EP_PCIE_MAX_VREG 2 +#define EP_PCIE_MAX_CLK 7 +#define EP_PCIE_MAX_PIPE_CLK 1 +#define EP_PCIE_MAX_RESET 2 + +#define EP_PCIE_ERROR -30655 +#define EP_PCIE_LINK_DOWN 0xFFFFFFFF + +#define EP_PCIE_OATU_INDEX_MSI 1 +#define EP_PCIE_OATU_INDEX_CTRL 2 +#define EP_PCIE_OATU_INDEX_DATA 3 + +#define EP_PCIE_OATU_UPPER 0x100 + +#define EP_PCIE_GEN_DBG(x...) do { \ + if (ep_pcie_get_debug_mask()) \ + pr_alert(x); \ + else \ + pr_debug(x); \ + } while (0) + +#define EP_PCIE_DBG(dev, fmt, arg...) do { \ + if ((dev)->ipc_log_ful) \ + ipc_log_string((dev)->ipc_log_ful, "%s: " fmt, __func__, arg); \ + if (ep_pcie_get_debug_mask()) \ + pr_alert("%s: " fmt, __func__, arg); \ + } while (0) + +#define EP_PCIE_DBG2(dev, fmt, arg...) do { \ + if ((dev)->ipc_log_sel) \ + ipc_log_string((dev)->ipc_log_sel, \ + "DBG1:%s: " fmt, __func__, arg); \ + if ((dev)->ipc_log_ful) \ + ipc_log_string((dev)->ipc_log_ful, \ + "DBG2:%s: " fmt, __func__, arg); \ + if (ep_pcie_get_debug_mask()) \ + pr_alert("%s: " fmt, __func__, arg); \ + } while (0) + +#define EP_PCIE_DBG_FS(fmt, arg...) pr_alert("%s: " fmt, __func__, arg) + +#define EP_PCIE_DUMP(dev, fmt, arg...) do { \ + if ((dev)->ipc_log_dump) \ + ipc_log_string((dev)->ipc_log_dump, \ + "DUMP:%s: " fmt, __func__, arg); \ + if (ep_pcie_get_debug_mask()) \ + pr_alert("%s: " fmt, __func__, arg); \ + } while (0) + +#define EP_PCIE_INFO(dev, fmt, arg...) do { \ + if ((dev)->ipc_log_sel) \ + ipc_log_string((dev)->ipc_log_sel, \ + "INFO:%s: " fmt, __func__, arg); \ + if ((dev)->ipc_log_ful) \ + ipc_log_string((dev)->ipc_log_ful, "%s: " fmt, __func__, arg); \ + pr_info("%s: " fmt, __func__, arg); \ + } while (0) + +#define EP_PCIE_ERR(dev, fmt, arg...) do { \ + if ((dev)->ipc_log_sel) \ + ipc_log_string((dev)->ipc_log_sel, \ + "ERR:%s: " fmt, __func__, arg); \ + if ((dev)->ipc_log_ful) \ + ipc_log_string((dev)->ipc_log_ful, "%s: " fmt, __func__, arg); \ + pr_err("%s: " fmt, __func__, arg); \ + } while (0) + +enum ep_pcie_res { + EP_PCIE_RES_PARF, + EP_PCIE_RES_PHY, + EP_PCIE_RES_MMIO, + EP_PCIE_RES_MSI, + EP_PCIE_RES_DM_CORE, + EP_PCIE_RES_ELBI, + EP_PCIE_RES_IATU, + EP_PCIE_MAX_RES, +}; + +enum ep_pcie_irq { + EP_PCIE_INT_PM_TURNOFF, + EP_PCIE_INT_DSTATE_CHANGE, + EP_PCIE_INT_L1SUB_TIMEOUT, + EP_PCIE_INT_LINK_UP, + EP_PCIE_INT_LINK_DOWN, + EP_PCIE_INT_BRIDGE_FLUSH_N, + EP_PCIE_INT_BME, + EP_PCIE_INT_GLOBAL, + EP_PCIE_MAX_IRQ, +}; + +enum ep_pcie_gpio { + EP_PCIE_GPIO_PERST, + EP_PCIE_GPIO_WAKE, + EP_PCIE_GPIO_CLKREQ, + EP_PCIE_GPIO_MDM2AP, + EP_PCIE_MAX_GPIO, +}; + +struct ep_pcie_gpio_info_t { + char *name; + u32 num; + bool out; + u32 on; + u32 init; +}; + +struct ep_pcie_vreg_info_t { + struct regulator *hdl; + char *name; + u32 max_v; + u32 min_v; + u32 opt_mode; + bool required; +}; + +struct ep_pcie_clk_info_t { + struct clk *hdl; + char *name; + u32 freq; + bool required; +}; + +struct ep_pcie_reset_info_t { + struct reset_control *hdl; + char *name; + bool required; +}; + +struct ep_pcie_res_info_t { + char *name; + struct resource *resource; + void __iomem *base; +}; + +struct ep_pcie_irq_info_t { + char *name; + u32 num; +}; + +/* phy info structure */ +struct ep_pcie_phy_info_t { + u32 offset; + u32 val; + u32 delay; + u32 direction; +}; + +/* pcie endpoint device structure */ +struct ep_pcie_dev_t { + struct platform_device *pdev; + struct regulator *gdsc; + struct ep_pcie_vreg_info_t vreg[EP_PCIE_MAX_VREG]; + struct ep_pcie_gpio_info_t gpio[EP_PCIE_MAX_GPIO]; + struct ep_pcie_clk_info_t clk[EP_PCIE_MAX_CLK]; + struct ep_pcie_clk_info_t pipeclk[EP_PCIE_MAX_PIPE_CLK]; + struct ep_pcie_reset_info_t reset[EP_PCIE_MAX_RESET]; + struct ep_pcie_irq_info_t irq[EP_PCIE_MAX_IRQ]; + struct ep_pcie_res_info_t res[EP_PCIE_MAX_RES]; + + void __iomem *parf; + void __iomem *phy; + void __iomem *mmio; + void __iomem *msi; + void __iomem *dm_core; + void __iomem *elbi; + void __iomem *iatu; + + struct msm_bus_scale_pdata *bus_scale_table; + u32 bus_client; + u32 link_speed; + bool active_config; + bool aggregated_irq; + bool mhi_a7_irq; + u32 dbi_base_reg; + u32 slv_space_reg; + u32 phy_status_reg; + u32 phy_init_len; + struct ep_pcie_phy_info_t *phy_init; + bool perst_enum; + + u32 rev; + u32 phy_rev; + void *ipc_log_sel; + void *ipc_log_ful; + void *ipc_log_dump; + struct mutex setup_mtx; + struct mutex ext_mtx; + spinlock_t ext_lock; + unsigned long ext_save_flags; + + spinlock_t isr_lock; + unsigned long isr_save_flags; + ulong linkdown_counter; + ulong linkup_counter; + ulong bme_counter; + ulong pm_to_counter; + ulong d0_counter; + ulong d3_counter; + ulong perst_ast_counter; + ulong perst_deast_counter; + ulong wake_counter; + ulong msi_counter; + ulong global_irq_counter; + + bool dump_conf; + bool config_mmio_init; + bool enumerated; + enum ep_pcie_link_status link_status; + bool perst_deast; + bool power_on; + bool suspending; + bool l23_ready; + bool l1ss_enabled; + struct ep_pcie_msi_config msi_cfg; + bool no_notify; + bool client_ready; + + struct ep_pcie_register_event *event_reg; + struct work_struct handle_perst_work; + struct work_struct handle_bme_work; + struct work_struct handle_d3cold_work; +}; + +extern struct ep_pcie_dev_t ep_pcie_dev; +extern struct ep_pcie_hw hw_drv; + +static inline void ep_pcie_write_mask(void __iomem *addr, + u32 clear_mask, u32 set_mask) +{ + u32 val; + + val = (readl_relaxed(addr) & ~clear_mask) | set_mask; + writel_relaxed(val, addr); + /* ensure register write goes through before next regiser operation */ + wmb(); +} + +static inline void ep_pcie_write_reg(void __iomem *base, u32 offset, u32 value) +{ + writel_relaxed(value, base + offset); + /* ensure register write goes through before next regiser operation */ + wmb(); +} + +static inline void ep_pcie_write_reg_field(void __iomem *base, u32 offset, + const u32 mask, u32 val) +{ + u32 shift = find_first_bit((void *)&mask, 32); + u32 tmp = readl_relaxed(base + offset); + + tmp &= ~mask; /* clear written bits */ + val = tmp | (val << shift); + writel_relaxed(val, base + offset); + /* ensure register write goes through before next regiser operation */ + wmb(); +} + +extern int ep_pcie_core_register_event(struct ep_pcie_register_event *reg); +extern int ep_pcie_get_debug_mask(void); +extern void ep_pcie_phy_init(struct ep_pcie_dev_t *dev); +extern bool ep_pcie_phy_is_ready(struct ep_pcie_dev_t *dev); +extern void ep_pcie_reg_dump(struct ep_pcie_dev_t *dev, u32 sel, bool linkdown); +extern void ep_pcie_debugfs_init(struct ep_pcie_dev_t *ep_dev); +extern void ep_pcie_debugfs_exit(void); + +#endif diff --git a/drivers/platform/msm/ep_pcie/ep_pcie_core.c b/drivers/platform/msm/ep_pcie/ep_pcie_core.c new file mode 100644 index 0000000000000000000000000000000000000000..a676f4e8b32f8510492b442bc8e26337e1b865aa --- /dev/null +++ b/drivers/platform/msm/ep_pcie/ep_pcie_core.c @@ -0,0 +1,2730 @@ +/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +/* + * MSM PCIe endpoint core driver. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "ep_pcie_com.h" + +/* debug mask sys interface */ +static int ep_pcie_debug_mask; +static int ep_pcie_debug_keep_resource; +static u32 ep_pcie_bar0_address; +module_param_named(debug_mask, ep_pcie_debug_mask, + int, 0664); +module_param_named(debug_keep_resource, ep_pcie_debug_keep_resource, + int, 0664); +module_param_named(bar0_address, ep_pcie_bar0_address, + int, 0664); + +struct ep_pcie_dev_t ep_pcie_dev = {0}; + +static struct ep_pcie_vreg_info_t ep_pcie_vreg_info[EP_PCIE_MAX_VREG] = { + {NULL, "vreg-1.8", 1800000, 1800000, 14000, true}, + {NULL, "vreg-0.9", 1000000, 1000000, 40000, true}, +}; + +static struct ep_pcie_gpio_info_t ep_pcie_gpio_info[EP_PCIE_MAX_GPIO] = { + {"perst-gpio", 0, 0, 0, 1}, + {"wake-gpio", 0, 1, 0, 1}, + {"clkreq-gpio", 0, 1, 0, 0}, + {"mdm2apstatus-gpio", 0, 1, 1, 0}, +}; + +static struct ep_pcie_clk_info_t + ep_pcie_clk_info[EP_PCIE_MAX_CLK] = { + {NULL, "pcie_0_cfg_ahb_clk", 0, true}, + {NULL, "pcie_0_mstr_axi_clk", 0, true}, + {NULL, "pcie_0_slv_axi_clk", 0, true}, + {NULL, "pcie_0_aux_clk", 1000000, true}, + {NULL, "pcie_0_ldo", 0, true}, + {NULL, "pcie_0_sleep_clk", 0, false}, + {NULL, "pcie_0_slv_q2a_axi_clk", 0, false}, +}; + +static struct ep_pcie_clk_info_t + ep_pcie_pipe_clk_info[EP_PCIE_MAX_PIPE_CLK] = { + {NULL, "pcie_0_pipe_clk", 62500000, true}, +}; + +static struct ep_pcie_reset_info_t + ep_pcie_reset_info[EP_PCIE_MAX_RESET] = { + {NULL, "pcie_0_core_reset", false}, + {NULL, "pcie_0_phy_reset", false}, +}; + +static const struct ep_pcie_res_info_t ep_pcie_res_info[EP_PCIE_MAX_RES] = { + {"parf", NULL, NULL}, + {"phy", NULL, NULL}, + {"mmio", NULL, NULL}, + {"msi", NULL, NULL}, + {"dm_core", NULL, NULL}, + {"elbi", NULL, NULL}, + {"iatu", NULL, NULL}, +}; + +static const struct ep_pcie_irq_info_t ep_pcie_irq_info[EP_PCIE_MAX_IRQ] = { + {"int_pm_turnoff", 0}, + {"int_dstate_change", 0}, + {"int_l1sub_timeout", 0}, + {"int_link_up", 0}, + {"int_link_down", 0}, + {"int_bridge_flush_n", 0}, + {"int_bme", 0}, + {"int_global", 0}, +}; + +int ep_pcie_get_debug_mask(void) +{ + return ep_pcie_debug_mask; +} + +static bool ep_pcie_confirm_linkup(struct ep_pcie_dev_t *dev, + bool check_sw_stts) +{ + u32 val; + + if (check_sw_stts && (dev->link_status != EP_PCIE_LINK_ENABLED)) { + EP_PCIE_DBG(dev, "PCIe V%d: The link is not enabled\n", + dev->rev); + return false; + } + + val = readl_relaxed(dev->dm_core); + EP_PCIE_DBG(dev, "PCIe V%d: device ID and vendor ID are 0x%x\n", + dev->rev, val); + if (val == EP_PCIE_LINK_DOWN) { + EP_PCIE_ERR(dev, + "PCIe V%d: The link is not really up; device ID and vendor ID are 0x%x\n", + dev->rev, val); + return false; + } + + return true; +} + +static int ep_pcie_gpio_init(struct ep_pcie_dev_t *dev) +{ + int i, rc = 0; + struct ep_pcie_gpio_info_t *info; + + EP_PCIE_DBG(dev, "PCIe V%d\n", dev->rev); + + for (i = 0; i < EP_PCIE_MAX_GPIO; i++) { + info = &dev->gpio[i]; + + if (!info->num) { + if (i == EP_PCIE_GPIO_MDM2AP) { + EP_PCIE_DBG(dev, + "PCIe V%d: gpio %s does not exist\n", + dev->rev, info->name); + continue; + } else { + EP_PCIE_ERR(dev, + "PCIe V%d: the number of gpio %s is invalid\n", + dev->rev, info->name); + rc = -EINVAL; + break; + } + } + + rc = gpio_request(info->num, info->name); + if (rc) { + EP_PCIE_ERR(dev, "PCIe V%d: can't get gpio %s; %d\n", + dev->rev, info->name, rc); + break; + } + + if (info->out) + rc = gpio_direction_output(info->num, info->init); + else + rc = gpio_direction_input(info->num); + if (rc) { + EP_PCIE_ERR(dev, + "PCIe V%d: can't set direction for GPIO %s:%d\n", + dev->rev, info->name, rc); + gpio_free(info->num); + break; + } + } + + if (rc) + while (i--) + gpio_free(dev->gpio[i].num); + + return rc; +} + +static void ep_pcie_gpio_deinit(struct ep_pcie_dev_t *dev) +{ + int i; + + EP_PCIE_DBG(dev, "PCIe V%d\n", dev->rev); + + for (i = 0; i < EP_PCIE_MAX_GPIO; i++) + gpio_free(dev->gpio[i].num); +} + +static int ep_pcie_vreg_init(struct ep_pcie_dev_t *dev) +{ + int i, rc = 0; + struct regulator *vreg; + struct ep_pcie_vreg_info_t *info; + + EP_PCIE_DBG(dev, "PCIe V%d\n", dev->rev); + + for (i = 0; i < EP_PCIE_MAX_VREG; i++) { + info = &dev->vreg[i]; + vreg = info->hdl; + + if (!vreg) { + EP_PCIE_ERR(dev, + "PCIe V%d: handle of Vreg %s is NULL\n", + dev->rev, info->name); + rc = -EINVAL; + break; + } + + EP_PCIE_DBG(dev, "PCIe V%d: Vreg %s is being enabled\n", + dev->rev, info->name); + if (info->max_v) { + rc = regulator_set_voltage(vreg, + info->min_v, info->max_v); + if (rc) { + EP_PCIE_ERR(dev, + "PCIe V%d: can't set voltage for %s: %d\n", + dev->rev, info->name, rc); + break; + } + } + + if (info->opt_mode) { + rc = regulator_set_load(vreg, info->opt_mode); + if (rc < 0) { + EP_PCIE_ERR(dev, + "PCIe V%d: can't set mode for %s: %d\n", + dev->rev, info->name, rc); + break; + } + } + + rc = regulator_enable(vreg); + if (rc) { + EP_PCIE_ERR(dev, + "PCIe V%d: can't enable regulator %s: %d\n", + dev->rev, info->name, rc); + break; + } + } + + if (rc) + while (i--) { + struct regulator *hdl = dev->vreg[i].hdl; + + if (hdl) + regulator_disable(hdl); + } + + return rc; +} + +static void ep_pcie_vreg_deinit(struct ep_pcie_dev_t *dev) +{ + int i; + + EP_PCIE_DBG(dev, "PCIe V%d\n", dev->rev); + + for (i = EP_PCIE_MAX_VREG - 1; i >= 0; i--) { + if (dev->vreg[i].hdl) { + EP_PCIE_DBG(dev, "Vreg %s is being disabled\n", + dev->vreg[i].name); + regulator_disable(dev->vreg[i].hdl); + } + } +} + +static int ep_pcie_clk_init(struct ep_pcie_dev_t *dev) +{ + int i, rc = 0; + struct ep_pcie_clk_info_t *info; + struct ep_pcie_reset_info_t *reset_info; + + EP_PCIE_DBG(dev, "PCIe V%d\n", dev->rev); + + rc = regulator_enable(dev->gdsc); + + if (rc) { + EP_PCIE_ERR(dev, "PCIe V%d: fail to enable GDSC for %s\n", + dev->rev, dev->pdev->name); + return rc; + } + + if (dev->bus_client) { + rc = msm_bus_scale_client_update_request(dev->bus_client, 1); + if (rc) { + EP_PCIE_ERR(dev, + "PCIe V%d: fail to set bus bandwidth:%d\n", + dev->rev, rc); + return rc; + } + EP_PCIE_DBG(dev, + "PCIe V%d: set bus bandwidth\n", + dev->rev); + } + + for (i = 0; i < EP_PCIE_MAX_CLK; i++) { + info = &dev->clk[i]; + + if (!info->hdl) { + EP_PCIE_DBG(dev, + "PCIe V%d: handle of Clock %s is NULL\n", + dev->rev, info->name); + continue; + } + + if (info->freq) { + rc = clk_set_rate(info->hdl, info->freq); + if (rc) { + EP_PCIE_ERR(dev, + "PCIe V%d: can't set rate for clk %s: %d\n", + dev->rev, info->name, rc); + break; + } + EP_PCIE_DBG(dev, + "PCIe V%d: set rate %d for clk %s\n", + dev->rev, info->freq, info->name); + } + + rc = clk_prepare_enable(info->hdl); + + if (rc) + EP_PCIE_ERR(dev, "PCIe V%d: failed to enable clk %s\n", + dev->rev, info->name); + else + EP_PCIE_DBG(dev, "PCIe V%d: enable clk %s\n", + dev->rev, info->name); + } + + if (rc) { + EP_PCIE_DBG(dev, + "PCIe V%d: disable clocks for error handling\n", + dev->rev); + while (i--) { + struct clk *hdl = dev->clk[i].hdl; + + if (hdl) + clk_disable_unprepare(hdl); + } + + regulator_disable(dev->gdsc); + } + + for (i = 0; i < EP_PCIE_MAX_RESET; i++) { + reset_info = &dev->reset[i]; + if (reset_info->hdl) { + rc = reset_control_assert(reset_info->hdl); + if (rc) + EP_PCIE_ERR(dev, + "PCIe V%d: failed to assert reset for %s\n", + dev->rev, reset_info->name); + else + EP_PCIE_DBG(dev, + "PCIe V%d: successfully asserted reset for %s\n", + dev->rev, reset_info->name); + + /* add a 1ms delay to ensure the reset is asserted */ + usleep_range(1000, 1005); + + rc = reset_control_deassert(reset_info->hdl); + if (rc) + EP_PCIE_ERR(dev, + "PCIe V%d: failed to deassert reset for %s\n", + dev->rev, reset_info->name); + else + EP_PCIE_DBG(dev, + "PCIe V%d: successfully deasserted reset for %s\n", + dev->rev, reset_info->name); + } + } + + return rc; +} + +static void ep_pcie_clk_deinit(struct ep_pcie_dev_t *dev) +{ + int i; + int rc; + + EP_PCIE_DBG(dev, "PCIe V%d\n", dev->rev); + + for (i = EP_PCIE_MAX_CLK - 1; i >= 0; i--) + if (dev->clk[i].hdl) + clk_disable_unprepare(dev->clk[i].hdl); + + if (dev->bus_client) { + rc = msm_bus_scale_client_update_request(dev->bus_client, 0); + if (rc) + EP_PCIE_ERR(dev, + "PCIe V%d: fail to relinquish bus bandwidth:%d\n", + dev->rev, rc); + else + EP_PCIE_DBG(dev, + "PCIe V%d: relinquish bus bandwidth\n", + dev->rev); + } + + regulator_disable(dev->gdsc); +} + +static int ep_pcie_pipe_clk_init(struct ep_pcie_dev_t *dev) +{ + int i, rc = 0; + struct ep_pcie_clk_info_t *info; + + EP_PCIE_DBG(dev, "PCIe V%d\n", dev->rev); + + for (i = 0; i < EP_PCIE_MAX_PIPE_CLK; i++) { + info = &dev->pipeclk[i]; + + if (!info->hdl) { + EP_PCIE_ERR(dev, + "PCIe V%d: handle of Pipe Clock %s is NULL\n", + dev->rev, info->name); + rc = -EINVAL; + break; + } + + if (info->freq) { + rc = clk_set_rate(info->hdl, info->freq); + if (rc) { + EP_PCIE_ERR(dev, + "PCIe V%d: can't set rate for clk %s: %d\n", + dev->rev, info->name, rc); + break; + } + EP_PCIE_DBG(dev, + "PCIe V%d: set rate for clk %s\n", + dev->rev, info->name); + } + + rc = clk_prepare_enable(info->hdl); + + if (rc) + EP_PCIE_ERR(dev, "PCIe V%d: failed to enable clk %s\n", + dev->rev, info->name); + else + EP_PCIE_DBG(dev, "PCIe V%d: enabled pipe clk %s\n", + dev->rev, info->name); + } + + if (rc) { + EP_PCIE_DBG(dev, + "PCIe V%d: disable pipe clocks for error handling\n", + dev->rev); + while (i--) + if (dev->pipeclk[i].hdl) + clk_disable_unprepare(dev->pipeclk[i].hdl); + } + + return rc; +} + +static void ep_pcie_pipe_clk_deinit(struct ep_pcie_dev_t *dev) +{ + int i; + + EP_PCIE_DBG(dev, "PCIe V%d\n", dev->rev); + + for (i = 0; i < EP_PCIE_MAX_PIPE_CLK; i++) + if (dev->pipeclk[i].hdl) + clk_disable_unprepare( + dev->pipeclk[i].hdl); +} + +static void ep_pcie_bar_init(struct ep_pcie_dev_t *dev) +{ + struct resource *res = dev->res[EP_PCIE_RES_MMIO].resource; + u32 mask = res->end - res->start; + u32 properties = 0x4; + + EP_PCIE_DBG(dev, "PCIe V%d: BAR mask to program is 0x%x\n", + dev->rev, mask); + + /* Configure BAR mask via CS2 */ + ep_pcie_write_mask(dev->elbi + PCIE20_ELBI_CS2_ENABLE, 0, BIT(0)); + ep_pcie_write_reg(dev->dm_core, PCIE20_BAR0, mask); + ep_pcie_write_reg(dev->dm_core, PCIE20_BAR0 + 0x4, 0); + ep_pcie_write_reg(dev->dm_core, PCIE20_BAR0 + 0x8, mask); + ep_pcie_write_reg(dev->dm_core, PCIE20_BAR0 + 0xc, 0); + ep_pcie_write_reg(dev->dm_core, PCIE20_BAR0 + 0x10, 0); + ep_pcie_write_reg(dev->dm_core, PCIE20_BAR0 + 0x14, 0); + ep_pcie_write_mask(dev->elbi + PCIE20_ELBI_CS2_ENABLE, BIT(0), 0); + + /* Configure BAR properties via CS */ + ep_pcie_write_mask(dev->dm_core + PCIE20_MISC_CONTROL_1, 0, BIT(0)); + ep_pcie_write_reg(dev->dm_core, PCIE20_BAR0, properties); + ep_pcie_write_reg(dev->dm_core, PCIE20_BAR0 + 0x8, properties); + ep_pcie_write_mask(dev->dm_core + PCIE20_MISC_CONTROL_1, BIT(0), 0); +} + +static void ep_pcie_config_mmio(struct ep_pcie_dev_t *dev) +{ + EP_PCIE_DBG(dev, + "Initial version of MMIO is:0x%x\n", + readl_relaxed(dev->mmio + PCIE20_MHIVER)); + + if (dev->config_mmio_init) { + EP_PCIE_DBG(dev, + "PCIe V%d: MMIO already initialized, return\n", + dev->rev); + return; + } + + ep_pcie_write_reg(dev->mmio, PCIE20_MHICFG, 0x02800880); + ep_pcie_write_reg(dev->mmio, PCIE20_BHI_EXECENV, 0x2); + ep_pcie_write_reg(dev->mmio, PCIE20_MHICTRL, 0x0); + ep_pcie_write_reg(dev->mmio, PCIE20_MHISTATUS, 0x0); + ep_pcie_write_reg(dev->mmio, PCIE20_MHIVER, 0x1000000); + ep_pcie_write_reg(dev->mmio, PCIE20_BHI_VERSION_LOWER, 0x2); + ep_pcie_write_reg(dev->mmio, PCIE20_BHI_VERSION_UPPER, 0x1); + ep_pcie_write_reg(dev->mmio, PCIE20_BHI_INTVEC, 0xffffffff); + + dev->config_mmio_init = true; +} + +static void ep_pcie_core_init(struct ep_pcie_dev_t *dev, bool configured) +{ + EP_PCIE_DBG(dev, "PCIe V%d\n", dev->rev); + + /* enable debug IRQ */ + ep_pcie_write_mask(dev->parf + PCIE20_PARF_DEBUG_INT_EN, + 0, BIT(3) | BIT(2) | BIT(1)); + + if (!configured) { + /* Configure PCIe to endpoint mode */ + ep_pcie_write_reg(dev->parf, PCIE20_PARF_DEVICE_TYPE, 0x0); + + /* adjust DBI base address */ + if (dev->phy_rev < 6) { + if (dev->dbi_base_reg) + writel_relaxed(0x3FFFE000, + dev->parf + dev->dbi_base_reg); + else + writel_relaxed(0x3FFFE000, + dev->parf + PCIE20_PARF_DBI_BASE_ADDR); + } + + /* Configure PCIe core to support 1GB aperture */ + if (dev->slv_space_reg) + ep_pcie_write_reg(dev->parf, dev->slv_space_reg, + 0x40000000); + else + ep_pcie_write_reg(dev->parf, + PCIE20_PARF_SLV_ADDR_SPACE_SIZE, 0x40000000); + + /* Configure link speed */ + ep_pcie_write_mask(dev->dm_core + + PCIE20_LINK_CONTROL2_LINK_STATUS2, + 0xf, dev->link_speed); + } + + if (dev->active_config) { + struct resource *dbi = dev->res[EP_PCIE_RES_DM_CORE].resource; + u32 dbi_lo = dbi->start; + + EP_PCIE_DBG2(dev, "PCIe V%d: Enable L1\n", dev->rev); + ep_pcie_write_mask(dev->parf + PCIE20_PARF_PM_CTRL, BIT(5), 0); + + ep_pcie_write_mask(dev->parf + PCIE20_PARF_SLV_ADDR_MSB_CTRL, + 0, BIT(0)); + ep_pcie_write_reg(dev->parf, PCIE20_PARF_SLV_ADDR_SPACE_SIZE_HI, + 0x200); + ep_pcie_write_reg(dev->parf, PCIE20_PARF_SLV_ADDR_SPACE_SIZE, + 0x0); + ep_pcie_write_reg(dev->parf, PCIE20_PARF_DBI_BASE_ADDR_HI, + 0x100); + ep_pcie_write_reg(dev->parf, PCIE20_PARF_DBI_BASE_ADDR, + dbi_lo); + + EP_PCIE_DBG(dev, + "PCIe V%d: DBI base:0x%x\n", dev->rev, + readl_relaxed(dev->parf + PCIE20_PARF_DBI_BASE_ADDR)); + + if (dev->phy_rev >= 6) { + struct resource *atu = + dev->res[EP_PCIE_RES_IATU].resource; + u32 atu_lo = atu->start; + + EP_PCIE_DBG(dev, + "PCIe V%d: configure MSB of ATU base for flipping and LSB as 0x%x\n", + dev->rev, atu_lo); + ep_pcie_write_reg(dev->parf, + PCIE20_PARF_ATU_BASE_ADDR_HI, 0x100); + ep_pcie_write_reg(dev->parf, PCIE20_PARF_ATU_BASE_ADDR, + atu_lo); + EP_PCIE_DBG(dev, + "PCIe V%d: LSB of ATU base:0x%x\n", + dev->rev, readl_relaxed(dev->parf + + PCIE20_PARF_ATU_BASE_ADDR)); + } + } + + /* Read halts write */ + ep_pcie_write_mask(dev->parf + PCIE20_PARF_AXI_MSTR_RD_HALT_NO_WRITES, + 0, BIT(0)); + + /* Write after write halt */ + ep_pcie_write_mask(dev->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT, + 0, BIT(31)); + + /* Q2A flush disable */ + writel_relaxed(0, dev->parf + PCIE20_PARF_Q2A_FLUSH); + + /* Disable the DBI Wakeup */ + ep_pcie_write_mask(dev->parf + PCIE20_PARF_SYS_CTRL, BIT(11), 0); + + /* Disable the debouncers */ + ep_pcie_write_reg(dev->parf, PCIE20_PARF_DB_CTRL, 0x73); + + /* Disable core clock CGC */ + ep_pcie_write_mask(dev->parf + PCIE20_PARF_SYS_CTRL, 0, BIT(6)); + + /* Set AUX power to be on */ + ep_pcie_write_mask(dev->parf + PCIE20_PARF_SYS_CTRL, 0, BIT(4)); + + /* Request to exit from L1SS for MSI and LTR MSG */ + ep_pcie_write_mask(dev->parf + PCIE20_PARF_CFG_BITS, 0, BIT(1)); + + EP_PCIE_DBG(dev, + "Initial: CLASS_CODE_REVISION_ID:0x%x; HDR_TYPE:0x%x\n", + readl_relaxed(dev->dm_core + PCIE20_CLASS_CODE_REVISION_ID), + readl_relaxed(dev->dm_core + PCIE20_BIST_HDR_TYPE)); + + if (!configured) { + /* Enable CS for RO(CS) register writes */ + ep_pcie_write_mask(dev->dm_core + PCIE20_MISC_CONTROL_1, 0, + BIT(0)); + + /* Set class code and revision ID */ + ep_pcie_write_reg(dev->dm_core, PCIE20_CLASS_CODE_REVISION_ID, + 0xff000000); + + /* Set header type */ + ep_pcie_write_reg(dev->dm_core, PCIE20_BIST_HDR_TYPE, 0x10); + + /* Set Subsystem ID and Subsystem Vendor ID */ + ep_pcie_write_reg(dev->dm_core, PCIE20_SUBSYSTEM, 0xa01f17cb); + + /* Set the PMC Register - to support PME in D0/D3hot/D3cold */ + ep_pcie_write_mask(dev->dm_core + PCIE20_CAP_ID_NXT_PTR, 0, + BIT(31)|BIT(30)|BIT(27)); + + /* Set the Endpoint L0s Acceptable Latency to 1us (max) */ + ep_pcie_write_reg_field(dev->dm_core, + PCIE20_DEVICE_CAPABILITIES, + PCIE20_MASK_EP_L0S_ACCPT_LATENCY, 0x7); + + /* Set the Endpoint L1 Acceptable Latency to 2 us (max) */ + ep_pcie_write_reg_field(dev->dm_core, + PCIE20_DEVICE_CAPABILITIES, + PCIE20_MASK_EP_L1_ACCPT_LATENCY, 0x7); + + /* Set the L0s Exit Latency to 2us-4us = 0x6 */ + ep_pcie_write_reg_field(dev->dm_core, PCIE20_LINK_CAPABILITIES, + PCIE20_MASK_L1_EXIT_LATENCY, 0x6); + + /* Set the L1 Exit Latency to be 32us-64 us = 0x6 */ + ep_pcie_write_reg_field(dev->dm_core, PCIE20_LINK_CAPABILITIES, + PCIE20_MASK_L0S_EXIT_LATENCY, 0x6); + + /* L1ss is supported */ + ep_pcie_write_mask(dev->dm_core + PCIE20_L1SUB_CAPABILITY, 0, + 0x1f); + + /* Enable Clock Power Management */ + ep_pcie_write_reg_field(dev->dm_core, PCIE20_LINK_CAPABILITIES, + PCIE20_MASK_CLOCK_POWER_MAN, 0x1); + + /* Disable CS for RO(CS) register writes */ + ep_pcie_write_mask(dev->dm_core + PCIE20_MISC_CONTROL_1, BIT(0), + 0); + + /* Set FTS value to match the PHY setting */ + ep_pcie_write_reg_field(dev->dm_core, + PCIE20_ACK_F_ASPM_CTRL_REG, + PCIE20_MASK_ACK_N_FTS, 0x80); + + EP_PCIE_DBG(dev, + "After program: CLASS_CODE_REVISION_ID:0x%x; HDR_TYPE:0x%x; L1SUB_CAPABILITY:0x%x; PARF_SYS_CTRL:0x%x\n", + readl_relaxed(dev->dm_core + + PCIE20_CLASS_CODE_REVISION_ID), + readl_relaxed(dev->dm_core + PCIE20_BIST_HDR_TYPE), + readl_relaxed(dev->dm_core + PCIE20_L1SUB_CAPABILITY), + readl_relaxed(dev->parf + PCIE20_PARF_SYS_CTRL)); + + /* Configure BARs */ + ep_pcie_bar_init(dev); + } + + /* Configure IRQ events */ + if (dev->aggregated_irq) { + ep_pcie_write_reg(dev->parf, PCIE20_PARF_INT_ALL_MASK, 0); + ep_pcie_write_mask(dev->parf + PCIE20_PARF_INT_ALL_MASK, 0, + BIT(EP_PCIE_INT_EVT_LINK_DOWN) | + BIT(EP_PCIE_INT_EVT_BME) | + BIT(EP_PCIE_INT_EVT_PM_TURNOFF) | + BIT(EP_PCIE_INT_EVT_DSTATE_CHANGE) | + BIT(EP_PCIE_INT_EVT_LINK_UP)); + if (!dev->mhi_a7_irq) + ep_pcie_write_mask(dev->parf + + PCIE20_PARF_INT_ALL_MASK, 0, + BIT(EP_PCIE_INT_EVT_MHI_A7)); + + EP_PCIE_DBG(dev, "PCIe V%d: PCIE20_PARF_INT_ALL_MASK:0x%x\n", + dev->rev, + readl_relaxed(dev->parf + PCIE20_PARF_INT_ALL_MASK)); + } + + if (dev->active_config) { + ep_pcie_write_reg(dev->dm_core, PCIE20_AUX_CLK_FREQ_REG, 0x14); + + EP_PCIE_DBG2(dev, "PCIe V%d: Enable L1\n", dev->rev); + ep_pcie_write_mask(dev->parf + PCIE20_PARF_PM_CTRL, BIT(5), 0); + } + + /* Configure MMIO */ + ep_pcie_config_mmio(dev); +} + +static void ep_pcie_config_inbound_iatu(struct ep_pcie_dev_t *dev) +{ + struct resource *mmio = dev->res[EP_PCIE_RES_MMIO].resource; + u32 lower, limit, bar; + + lower = mmio->start; + limit = mmio->end; + bar = readl_relaxed(dev->dm_core + PCIE20_BAR0); + + EP_PCIE_DBG(dev, + "PCIe V%d: BAR0 is 0x%x; MMIO[0x%x-0x%x]\n", + dev->rev, bar, lower, limit); + + ep_pcie_write_reg(dev->parf, PCIE20_PARF_MHI_BASE_ADDR_LOWER, lower); + ep_pcie_write_reg(dev->parf, PCIE20_PARF_MHI_BASE_ADDR_UPPER, 0x0); + + if (dev->phy_rev >= 6) { + ep_pcie_write_reg(dev->iatu, PCIE20_IATU_I_CTRL1(0), 0x0); + ep_pcie_write_reg(dev->iatu, PCIE20_IATU_I_LTAR(0), lower); + ep_pcie_write_reg(dev->iatu, PCIE20_IATU_I_UTAR(0), 0x0); + ep_pcie_write_reg(dev->iatu, PCIE20_IATU_I_CTRL2(0), + 0xc0000000); + + EP_PCIE_DBG(dev, + "PCIe V%d: Inbound iATU configuration\n", dev->rev); + EP_PCIE_DBG(dev, "PCIE20_IATU_I_CTRL1(0):0x%x\n", + readl_relaxed(dev->iatu + PCIE20_IATU_I_CTRL1(0))); + EP_PCIE_DBG(dev, "PCIE20_IATU_I_LTAR(0):0x%x\n", + readl_relaxed(dev->iatu + PCIE20_IATU_I_LTAR(0))); + EP_PCIE_DBG(dev, "PCIE20_IATU_I_UTAR(0):0x%x\n", + readl_relaxed(dev->iatu + PCIE20_IATU_I_UTAR(0))); + EP_PCIE_DBG(dev, "PCIE20_IATU_I_CTRL2(0):0x%x\n", + readl_relaxed(dev->iatu + PCIE20_IATU_I_CTRL2(0))); + return; + } + + /* program inbound address translation using region 0 */ + ep_pcie_write_reg(dev->dm_core, PCIE20_PLR_IATU_VIEWPORT, 0x80000000); + /* set region to mem type */ + ep_pcie_write_reg(dev->dm_core, PCIE20_PLR_IATU_CTRL1, 0x0); + /* setup target address registers */ + ep_pcie_write_reg(dev->dm_core, PCIE20_PLR_IATU_LTAR, lower); + ep_pcie_write_reg(dev->dm_core, PCIE20_PLR_IATU_UTAR, 0x0); + /* use BAR match mode for BAR0 and enable region 0 */ + ep_pcie_write_reg(dev->dm_core, PCIE20_PLR_IATU_CTRL2, 0xc0000000); + + EP_PCIE_DBG(dev, "PCIE20_PLR_IATU_VIEWPORT:0x%x\n", + readl_relaxed(dev->dm_core + PCIE20_PLR_IATU_VIEWPORT)); + EP_PCIE_DBG(dev, "PCIE20_PLR_IATU_CTRL1:0x%x\n", + readl_relaxed(dev->dm_core + PCIE20_PLR_IATU_CTRL1)); + EP_PCIE_DBG(dev, "PCIE20_PLR_IATU_LTAR:0x%x\n", + readl_relaxed(dev->dm_core + PCIE20_PLR_IATU_LTAR)); + EP_PCIE_DBG(dev, "PCIE20_PLR_IATU_UTAR:0x%x\n", + readl_relaxed(dev->dm_core + PCIE20_PLR_IATU_UTAR)); + EP_PCIE_DBG(dev, "PCIE20_PLR_IATU_CTRL2:0x%x\n", + readl_relaxed(dev->dm_core + PCIE20_PLR_IATU_CTRL2)); +} + +static void ep_pcie_config_outbound_iatu_entry(struct ep_pcie_dev_t *dev, + u32 region, u32 lower, u32 upper, + u32 limit, u32 tgt_lower, u32 tgt_upper) +{ + EP_PCIE_DBG(dev, + "PCIe V%d: region:%d; lower:0x%x; limit:0x%x; target_lower:0x%x; target_upper:0x%x\n", + dev->rev, region, lower, limit, tgt_lower, tgt_upper); + + if (dev->phy_rev >= 6) { + ep_pcie_write_reg(dev->iatu, PCIE20_IATU_O_CTRL1(region), + 0x0); + ep_pcie_write_reg(dev->iatu, PCIE20_IATU_O_LBAR(region), + lower); + ep_pcie_write_reg(dev->iatu, PCIE20_IATU_O_UBAR(region), + upper); + ep_pcie_write_reg(dev->iatu, PCIE20_IATU_O_LAR(region), + limit); + ep_pcie_write_reg(dev->iatu, PCIE20_IATU_O_LTAR(region), + tgt_lower); + ep_pcie_write_reg(dev->iatu, PCIE20_IATU_O_UTAR(region), + tgt_upper); + ep_pcie_write_mask(dev->iatu + PCIE20_IATU_O_CTRL2(region), + 0, BIT(31)); + + EP_PCIE_DBG(dev, + "PCIe V%d: Outbound iATU configuration\n", dev->rev); + EP_PCIE_DBG(dev, "PCIE20_IATU_O_CTRL1:0x%x\n", + readl_relaxed(dev->iatu + + PCIE20_IATU_O_CTRL1(region))); + EP_PCIE_DBG(dev, "PCIE20_IATU_O_LBAR:0x%x\n", + readl_relaxed(dev->iatu + + PCIE20_IATU_O_LBAR(region))); + EP_PCIE_DBG(dev, "PCIE20_IATU_O_UBAR:0x%x\n", + readl_relaxed(dev->iatu + + PCIE20_IATU_O_UBAR(region))); + EP_PCIE_DBG(dev, "PCIE20_IATU_O_LAR:0x%x\n", + readl_relaxed(dev->iatu + + PCIE20_IATU_O_LAR(region))); + EP_PCIE_DBG(dev, "PCIE20_IATU_O_LTAR:0x%x\n", + readl_relaxed(dev->iatu + + PCIE20_IATU_O_LTAR(region))); + EP_PCIE_DBG(dev, "PCIE20_IATU_O_UTAR:0x%x\n", + readl_relaxed(dev->iatu + + PCIE20_IATU_O_UTAR(region))); + EP_PCIE_DBG(dev, "PCIE20_IATU_O_CTRL2:0x%x\n", + readl_relaxed(dev->iatu + + PCIE20_IATU_O_CTRL2(region))); + + return; + } + + /* program outbound address translation using an input region */ + ep_pcie_write_reg(dev->dm_core, PCIE20_PLR_IATU_VIEWPORT, region); + /* set region to mem type */ + ep_pcie_write_reg(dev->dm_core, PCIE20_PLR_IATU_CTRL1, 0x0); + /* setup source address registers */ + ep_pcie_write_reg(dev->dm_core, PCIE20_PLR_IATU_LBAR, lower); + ep_pcie_write_reg(dev->dm_core, PCIE20_PLR_IATU_UBAR, upper); + ep_pcie_write_reg(dev->dm_core, PCIE20_PLR_IATU_LAR, limit); + /* setup target address registers */ + ep_pcie_write_reg(dev->dm_core, PCIE20_PLR_IATU_LTAR, tgt_lower); + ep_pcie_write_reg(dev->dm_core, PCIE20_PLR_IATU_UTAR, tgt_upper); + /* use DMA bypass mode and enable the region */ + ep_pcie_write_mask(dev->dm_core + PCIE20_PLR_IATU_CTRL2, 0, + BIT(31) | BIT(27)); + + EP_PCIE_DBG(dev, "PCIE20_PLR_IATU_VIEWPORT:0x%x\n", + readl_relaxed(dev->dm_core + PCIE20_PLR_IATU_VIEWPORT)); + EP_PCIE_DBG(dev, "PCIE20_PLR_IATU_CTRL1:0x%x\n", + readl_relaxed(dev->dm_core + PCIE20_PLR_IATU_CTRL1)); + EP_PCIE_DBG(dev, "PCIE20_PLR_IATU_LBAR:0x%x\n", + readl_relaxed(dev->dm_core + PCIE20_PLR_IATU_LBAR)); + EP_PCIE_DBG(dev, "PCIE20_PLR_IATU_UBAR:0x%x\n", + readl_relaxed(dev->dm_core + PCIE20_PLR_IATU_UBAR)); + EP_PCIE_DBG(dev, "PCIE20_PLR_IATU_LAR:0x%x\n", + readl_relaxed(dev->dm_core + PCIE20_PLR_IATU_LAR)); + EP_PCIE_DBG(dev, "PCIE20_PLR_IATU_LTAR:0x%x\n", + readl_relaxed(dev->dm_core + PCIE20_PLR_IATU_LTAR)); + EP_PCIE_DBG(dev, "PCIE20_PLR_IATU_UTAR:0x%x\n", + readl_relaxed(dev->dm_core + PCIE20_PLR_IATU_UTAR)); + EP_PCIE_DBG(dev, "PCIE20_PLR_IATU_CTRL2:0x%x\n", + readl_relaxed(dev->dm_core + PCIE20_PLR_IATU_CTRL2)); +} + +static void ep_pcie_notify_event(struct ep_pcie_dev_t *dev, + enum ep_pcie_event event) +{ + if (dev->event_reg && dev->event_reg->callback && + (dev->event_reg->events & event)) { + struct ep_pcie_notify *notify = &dev->event_reg->notify; + + notify->event = event; + notify->user = dev->event_reg->user; + EP_PCIE_DBG(&ep_pcie_dev, + "PCIe V%d: Callback client for event %d\n", + dev->rev, event); + dev->event_reg->callback(notify); + } else { + EP_PCIE_DBG(&ep_pcie_dev, + "PCIe V%d: Client does not register for event %d\n", + dev->rev, event); + } +} + +static int ep_pcie_get_resources(struct ep_pcie_dev_t *dev, + struct platform_device *pdev) +{ + int i, len, cnt, ret = 0, size = 0; + struct ep_pcie_vreg_info_t *vreg_info; + struct ep_pcie_gpio_info_t *gpio_info; + struct ep_pcie_clk_info_t *clk_info; + struct ep_pcie_reset_info_t *reset_info; + struct resource *res; + struct ep_pcie_res_info_t *res_info; + struct ep_pcie_irq_info_t *irq_info; + char prop_name[MAX_PROP_SIZE]; + const __be32 *prop; + u32 *clkfreq = NULL; + + EP_PCIE_DBG(dev, "PCIe V%d\n", dev->rev); + + of_get_property(pdev->dev.of_node, "qcom,phy-init", &size); + if (size) { + dev->phy_init = (struct ep_pcie_phy_info_t *) + devm_kzalloc(&pdev->dev, size, GFP_KERNEL); + + if (dev->phy_init) { + dev->phy_init_len = + size / sizeof(*dev->phy_init); + EP_PCIE_DBG(dev, + "PCIe V%d: phy init length is 0x%x\n", + dev->rev, dev->phy_init_len); + + of_property_read_u32_array(pdev->dev.of_node, + "qcom,phy-init", + (unsigned int *)dev->phy_init, + size / sizeof(dev->phy_init->offset)); + } else { + EP_PCIE_ERR(dev, + "PCIe V%d: Could not allocate memory for phy init sequence\n", + dev->rev); + return -ENOMEM; + } + } else { + EP_PCIE_DBG(dev, + "PCIe V%d: PHY V%d: phy init sequence is not present in DT\n", + dev->rev, dev->phy_rev); + } + + cnt = of_property_count_strings((&pdev->dev)->of_node, + "clock-names"); + if (cnt > 0) { + size_t size = cnt * sizeof(*clkfreq); + + clkfreq = kzalloc(size, GFP_KERNEL); + if (!clkfreq) + return -ENOMEM; + ret = of_property_read_u32_array( + (&pdev->dev)->of_node, + "max-clock-frequency-hz", clkfreq, cnt); + if (ret) + EP_PCIE_DBG2(dev, + "PCIe V%d: cannot get max-clock-frequency-hz property from DT:%d\n", + dev->rev, ret); + } + + for (i = 0; i < EP_PCIE_MAX_VREG; i++) { + vreg_info = &dev->vreg[i]; + vreg_info->hdl = + devm_regulator_get(&pdev->dev, vreg_info->name); + + if (PTR_ERR(vreg_info->hdl) == -EPROBE_DEFER) { + EP_PCIE_DBG(dev, "EPROBE_DEFER for VReg:%s\n", + vreg_info->name); + ret = PTR_ERR(vreg_info->hdl); + goto out; + } + + if (IS_ERR(vreg_info->hdl)) { + if (vreg_info->required) { + EP_PCIE_ERR(dev, "Vreg %s doesn't exist\n", + vreg_info->name); + ret = PTR_ERR(vreg_info->hdl); + goto out; + } else { + EP_PCIE_DBG(dev, + "Optional Vreg %s doesn't exist\n", + vreg_info->name); + vreg_info->hdl = NULL; + } + } else { + snprintf(prop_name, MAX_PROP_SIZE, + "qcom,%s-voltage-level", vreg_info->name); + prop = of_get_property((&pdev->dev)->of_node, + prop_name, &len); + if (!prop || (len != (3 * sizeof(__be32)))) { + EP_PCIE_DBG(dev, "%s %s property\n", + prop ? "invalid format" : + "no", prop_name); + } else { + vreg_info->max_v = be32_to_cpup(&prop[0]); + vreg_info->min_v = be32_to_cpup(&prop[1]); + vreg_info->opt_mode = + be32_to_cpup(&prop[2]); + } + } + } + + dev->gdsc = devm_regulator_get(&pdev->dev, "gdsc-vdd"); + + if (IS_ERR(dev->gdsc)) { + EP_PCIE_ERR(dev, "PCIe V%d: Failed to get %s GDSC:%ld\n", + dev->rev, dev->pdev->name, PTR_ERR(dev->gdsc)); + if (PTR_ERR(dev->gdsc) == -EPROBE_DEFER) + EP_PCIE_DBG(dev, "PCIe V%d: EPROBE_DEFER for %s GDSC\n", + dev->rev, dev->pdev->name); + ret = PTR_ERR(dev->gdsc); + goto out; + } + + for (i = 0; i < EP_PCIE_MAX_GPIO; i++) { + gpio_info = &dev->gpio[i]; + ret = of_get_named_gpio((&pdev->dev)->of_node, + gpio_info->name, 0); + if (ret >= 0) { + gpio_info->num = ret; + ret = 0; + EP_PCIE_DBG(dev, "GPIO num for %s is %d\n", + gpio_info->name, gpio_info->num); + } else { + EP_PCIE_DBG(dev, + "GPIO %s is not supported in this configuration\n", + gpio_info->name); + ret = 0; + } + } + + for (i = 0; i < EP_PCIE_MAX_CLK; i++) { + clk_info = &dev->clk[i]; + + clk_info->hdl = devm_clk_get(&pdev->dev, clk_info->name); + + if (IS_ERR(clk_info->hdl)) { + if (clk_info->required) { + EP_PCIE_ERR(dev, + "Clock %s isn't available:%ld\n", + clk_info->name, PTR_ERR(clk_info->hdl)); + ret = PTR_ERR(clk_info->hdl); + goto out; + } else { + EP_PCIE_DBG(dev, "Ignoring Clock %s\n", + clk_info->name); + clk_info->hdl = NULL; + } + } else { + if (clkfreq != NULL) { + clk_info->freq = clkfreq[i + + EP_PCIE_MAX_PIPE_CLK]; + EP_PCIE_DBG(dev, "Freq of Clock %s is:%d\n", + clk_info->name, clk_info->freq); + } + } + } + + for (i = 0; i < EP_PCIE_MAX_PIPE_CLK; i++) { + clk_info = &dev->pipeclk[i]; + + clk_info->hdl = devm_clk_get(&pdev->dev, clk_info->name); + + if (IS_ERR(clk_info->hdl)) { + if (clk_info->required) { + EP_PCIE_ERR(dev, + "Clock %s isn't available:%ld\n", + clk_info->name, PTR_ERR(clk_info->hdl)); + ret = PTR_ERR(clk_info->hdl); + goto out; + } else { + EP_PCIE_DBG(dev, "Ignoring Clock %s\n", + clk_info->name); + clk_info->hdl = NULL; + } + } else { + if (clkfreq != NULL) { + clk_info->freq = clkfreq[i]; + EP_PCIE_DBG(dev, "Freq of Clock %s is:%d\n", + clk_info->name, clk_info->freq); + } + } + } + + for (i = 0; i < EP_PCIE_MAX_RESET; i++) { + reset_info = &dev->reset[i]; + + reset_info->hdl = devm_reset_control_get(&pdev->dev, + reset_info->name); + + if (IS_ERR(reset_info->hdl)) { + if (reset_info->required) { + EP_PCIE_ERR(dev, + "Reset %s isn't available:%ld\n", + reset_info->name, + PTR_ERR(reset_info->hdl)); + + ret = PTR_ERR(reset_info->hdl); + reset_info->hdl = NULL; + goto out; + } else { + EP_PCIE_DBG(dev, "Ignoring Reset %s\n", + reset_info->name); + reset_info->hdl = NULL; + } + } + } + + dev->bus_scale_table = msm_bus_cl_get_pdata(pdev); + if (!dev->bus_scale_table) { + EP_PCIE_DBG(dev, "PCIe V%d: No bus scale table for %s\n", + dev->rev, dev->pdev->name); + dev->bus_client = 0; + } else { + dev->bus_client = + msm_bus_scale_register_client(dev->bus_scale_table); + if (!dev->bus_client) { + EP_PCIE_ERR(dev, + "PCIe V%d: Failed to register bus client for %s\n", + dev->rev, dev->pdev->name); + msm_bus_cl_clear_pdata(dev->bus_scale_table); + ret = -ENODEV; + goto out; + } + } + + for (i = 0; i < EP_PCIE_MAX_RES; i++) { + res_info = &dev->res[i]; + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, + res_info->name); + + if (!res) { + EP_PCIE_ERR(dev, + "PCIe V%d: can't get resource for %s\n", + dev->rev, res_info->name); + ret = -ENOMEM; + goto out; + } else { + EP_PCIE_DBG(dev, "start addr for %s is %pa\n", + res_info->name, &res->start); + } + + res_info->base = devm_ioremap(&pdev->dev, + res->start, resource_size(res)); + if (!res_info->base) { + EP_PCIE_ERR(dev, "PCIe V%d: can't remap %s\n", + dev->rev, res_info->name); + ret = -ENOMEM; + goto out; + } + res_info->resource = res; + } + + for (i = 0; i < EP_PCIE_MAX_IRQ; i++) { + irq_info = &dev->irq[i]; + + res = platform_get_resource_byname(pdev, IORESOURCE_IRQ, + irq_info->name); + + if (!res) { + EP_PCIE_DBG2(dev, "PCIe V%d: can't find IRQ # for %s\n", + dev->rev, irq_info->name); + } else { + irq_info->num = res->start; + EP_PCIE_DBG2(dev, "IRQ # for %s is %d\n", + irq_info->name, irq_info->num); + } + } + + dev->parf = dev->res[EP_PCIE_RES_PARF].base; + dev->phy = dev->res[EP_PCIE_RES_PHY].base; + dev->mmio = dev->res[EP_PCIE_RES_MMIO].base; + dev->msi = dev->res[EP_PCIE_RES_MSI].base; + dev->dm_core = dev->res[EP_PCIE_RES_DM_CORE].base; + dev->elbi = dev->res[EP_PCIE_RES_ELBI].base; + dev->iatu = dev->res[EP_PCIE_RES_IATU].base; + +out: + kfree(clkfreq); + return ret; +} + +static void ep_pcie_release_resources(struct ep_pcie_dev_t *dev) +{ + dev->parf = NULL; + dev->elbi = NULL; + dev->dm_core = NULL; + dev->phy = NULL; + dev->mmio = NULL; + dev->msi = NULL; + dev->iatu = NULL; + + if (dev->bus_client) { + msm_bus_scale_unregister_client(dev->bus_client); + dev->bus_client = 0; + } +} + +static void ep_pcie_enumeration_complete(struct ep_pcie_dev_t *dev) +{ + unsigned long irqsave_flags; + + spin_lock_irqsave(&dev->isr_lock, irqsave_flags); + + dev->enumerated = true; + dev->link_status = EP_PCIE_LINK_ENABLED; + + if (dev->gpio[EP_PCIE_GPIO_MDM2AP].num) { + /* assert MDM2AP Status GPIO */ + EP_PCIE_DBG2(dev, "PCIe V%d: assert MDM2AP Status\n", + dev->rev); + EP_PCIE_DBG(dev, + "PCIe V%d: MDM2APStatus GPIO initial:%d\n", + dev->rev, + gpio_get_value( + dev->gpio[EP_PCIE_GPIO_MDM2AP].num)); + gpio_set_value(dev->gpio[EP_PCIE_GPIO_MDM2AP].num, + dev->gpio[EP_PCIE_GPIO_MDM2AP].on); + EP_PCIE_DBG(dev, + "PCIe V%d: MDM2APStatus GPIO after assertion:%d\n", + dev->rev, + gpio_get_value( + dev->gpio[EP_PCIE_GPIO_MDM2AP].num)); + } + + hw_drv.device_id = readl_relaxed(dev->dm_core); + EP_PCIE_DBG(&ep_pcie_dev, + "PCIe V%d: register driver for device 0x%x\n", + ep_pcie_dev.rev, hw_drv.device_id); + ep_pcie_register_drv(&hw_drv); + if (!dev->no_notify) + ep_pcie_notify_event(dev, EP_PCIE_EVENT_LINKUP); + else + EP_PCIE_DBG(dev, + "PCIe V%d: do not notify client about linkup\n", + dev->rev); + + spin_unlock_irqrestore(&dev->isr_lock, irqsave_flags); +} + +int ep_pcie_core_enable_endpoint(enum ep_pcie_options opt) +{ + int ret = 0; + u32 val = 0; + u32 retries = 0; + u32 bme = 0; + bool ltssm_en = false; + struct ep_pcie_dev_t *dev = &ep_pcie_dev; + + EP_PCIE_DBG(dev, "PCIe V%d: options input are 0x%x\n", dev->rev, opt); + + mutex_lock(&dev->setup_mtx); + + if (dev->link_status == EP_PCIE_LINK_ENABLED) { + EP_PCIE_ERR(dev, + "PCIe V%d: link is already enabled\n", + dev->rev); + goto out; + } + + if (dev->link_status == EP_PCIE_LINK_UP) + EP_PCIE_DBG(dev, + "PCIe V%d: link is already up, let's proceed with the voting for the resources\n", + dev->rev); + + if (dev->power_on && (opt & EP_PCIE_OPT_POWER_ON)) { + EP_PCIE_ERR(dev, + "PCIe V%d: request to turn on the power when link is already powered on\n", + dev->rev); + goto out; + } + + if (opt & EP_PCIE_OPT_POWER_ON) { + /* enable power */ + ret = ep_pcie_vreg_init(dev); + if (ret) { + EP_PCIE_ERR(dev, "PCIe V%d: failed to enable Vreg\n", + dev->rev); + goto out; + } + + /* enable clocks */ + ret = ep_pcie_clk_init(dev); + if (ret) { + EP_PCIE_ERR(dev, "PCIe V%d: failed to enable clocks\n", + dev->rev); + goto clk_fail; + } + + /* enable pipe clock */ + ret = ep_pcie_pipe_clk_init(dev); + if (ret) { + EP_PCIE_ERR(dev, + "PCIe V%d: failed to enable pipe clock\n", + dev->rev); + goto pipe_clk_fail; + } + + dev->power_on = true; + } + + if (!(opt & EP_PCIE_OPT_ENUM)) + goto out; + + /* check link status during initial bootup */ + if (!dev->enumerated) { + val = readl_relaxed(dev->parf + PCIE20_PARF_PM_STTS); + val = val & PARF_XMLH_LINK_UP; + EP_PCIE_DBG(dev, "PCIe V%d: Link status is 0x%x\n", dev->rev, + val); + if (val) { + EP_PCIE_INFO(dev, + "PCIe V%d: link initialized by bootloader for LE PCIe endpoint; skip link training in HLOS\n", + dev->rev); + ep_pcie_core_init(dev, true); + dev->link_status = EP_PCIE_LINK_UP; + dev->l23_ready = false; + goto checkbme; + } else { + ltssm_en = readl_relaxed(dev->parf + + PCIE20_PARF_LTSSM) & BIT(8); + + if (ltssm_en) { + EP_PCIE_ERR(dev, + "PCIe V%d: link is not up when LTSSM has already enabled by bootloader\n", + dev->rev); + ret = EP_PCIE_ERROR; + goto link_fail; + } else { + EP_PCIE_DBG(dev, + "PCIe V%d: Proceed with regular link training\n", + dev->rev); + } + } + } + + if (opt & EP_PCIE_OPT_AST_WAKE) { + /* assert PCIe WAKE# */ + EP_PCIE_INFO(dev, "PCIe V%d: assert PCIe WAKE#\n", + dev->rev); + EP_PCIE_DBG(dev, "PCIe V%d: WAKE GPIO initial:%d\n", + dev->rev, + gpio_get_value(dev->gpio[EP_PCIE_GPIO_WAKE].num)); + gpio_set_value(dev->gpio[EP_PCIE_GPIO_WAKE].num, + 1 - dev->gpio[EP_PCIE_GPIO_WAKE].on); + EP_PCIE_DBG(dev, + "PCIe V%d: WAKE GPIO after deassertion:%d\n", + dev->rev, + gpio_get_value(dev->gpio[EP_PCIE_GPIO_WAKE].num)); + gpio_set_value(dev->gpio[EP_PCIE_GPIO_WAKE].num, + dev->gpio[EP_PCIE_GPIO_WAKE].on); + EP_PCIE_DBG(dev, + "PCIe V%d: WAKE GPIO after assertion:%d\n", + dev->rev, + gpio_get_value(dev->gpio[EP_PCIE_GPIO_WAKE].num)); + } + + /* wait for host side to deassert PERST */ + retries = 0; + do { + if (gpio_get_value(dev->gpio[EP_PCIE_GPIO_PERST].num) == 1) + break; + retries++; + usleep_range(PERST_TIMEOUT_US_MIN, PERST_TIMEOUT_US_MAX); + } while (retries < PERST_CHECK_MAX_COUNT); + + EP_PCIE_DBG(dev, "PCIe V%d: number of PERST retries:%d\n", + dev->rev, retries); + + if (retries == PERST_CHECK_MAX_COUNT) { + EP_PCIE_ERR(dev, + "PCIe V%d: PERST is not de-asserted by host\n", + dev->rev); + ret = EP_PCIE_ERROR; + goto link_fail; + } else { + dev->perst_deast = true; + if (opt & EP_PCIE_OPT_AST_WAKE) { + /* deassert PCIe WAKE# */ + EP_PCIE_DBG(dev, + "PCIe V%d: deassert PCIe WAKE# after PERST# is deasserted\n", + dev->rev); + gpio_set_value(dev->gpio[EP_PCIE_GPIO_WAKE].num, + 1 - dev->gpio[EP_PCIE_GPIO_WAKE].on); + } + } + + /* init PCIe PHY */ + ep_pcie_phy_init(dev); + + EP_PCIE_DBG(dev, "PCIe V%d: waiting for phy ready\n", dev->rev); + retries = 0; + do { + if (ep_pcie_phy_is_ready(dev)) + break; + retries++; + if (retries % 100 == 0) + EP_PCIE_DBG(dev, + "PCIe V%d: current number of PHY retries:%d\n", + dev->rev, retries); + usleep_range(REFCLK_STABILIZATION_DELAY_US_MIN, + REFCLK_STABILIZATION_DELAY_US_MAX); + } while (retries < PHY_READY_TIMEOUT_COUNT); + + EP_PCIE_DBG(dev, "PCIe V%d: number of PHY retries:%d\n", + dev->rev, retries); + + if (retries == PHY_READY_TIMEOUT_COUNT) { + EP_PCIE_ERR(dev, "PCIe V%d: PCIe PHY failed to come up\n", + dev->rev); + ret = EP_PCIE_ERROR; + ep_pcie_reg_dump(dev, BIT(EP_PCIE_RES_PHY), false); + goto link_fail; + } else { + EP_PCIE_INFO(dev, "PCIe V%d: PCIe PHY is ready\n", dev->rev); + } + + ep_pcie_core_init(dev, false); + ep_pcie_config_inbound_iatu(dev); + + /* enable link training */ + if (dev->phy_rev >= 3) + ep_pcie_write_mask(dev->parf + PCIE20_PARF_LTSSM, 0, BIT(8)); + else + ep_pcie_write_mask(dev->elbi + PCIE20_ELBI_SYS_CTRL, 0, BIT(0)); + + EP_PCIE_DBG(dev, "PCIe V%d: check if link is up\n", dev->rev); + + /* Wait for up to 100ms for the link to come up */ + retries = 0; + do { + usleep_range(LINK_UP_TIMEOUT_US_MIN, LINK_UP_TIMEOUT_US_MAX); + val = readl_relaxed(dev->elbi + PCIE20_ELBI_SYS_STTS); + retries++; + if (retries % 100 == 0) + EP_PCIE_DBG(dev, "PCIe V%d: LTSSM_STATE:0x%x\n", + dev->rev, (val >> 0xC) & 0x3f); + } while ((!(val & XMLH_LINK_UP) || + !ep_pcie_confirm_linkup(dev, false)) + && (retries < LINK_UP_CHECK_MAX_COUNT)); + + if (retries == LINK_UP_CHECK_MAX_COUNT) { + EP_PCIE_ERR(dev, "PCIe V%d: link initialization failed\n", + dev->rev); + ret = EP_PCIE_ERROR; + goto link_fail; + } else { + dev->link_status = EP_PCIE_LINK_UP; + dev->l23_ready = false; + EP_PCIE_DBG(dev, + "PCIe V%d: link is up after %d checkings (%d ms)\n", + dev->rev, retries, + LINK_UP_TIMEOUT_US_MIN * retries / 1000); + EP_PCIE_INFO(dev, + "PCIe V%d: link initialized for LE PCIe endpoint\n", + dev->rev); + } + +checkbme: + if (dev->active_config) + ep_pcie_write_reg(dev->dm_core, PCIE20_AUX_CLK_FREQ_REG, 0x14); + + if (!(opt & EP_PCIE_OPT_ENUM_ASYNC)) { + /* Wait for up to 1000ms for BME to be set */ + retries = 0; + + bme = readl_relaxed(dev->dm_core + + PCIE20_COMMAND_STATUS) & BIT(2); + while (!bme && (retries < BME_CHECK_MAX_COUNT)) { + retries++; + usleep_range(BME_TIMEOUT_US_MIN, BME_TIMEOUT_US_MAX); + bme = readl_relaxed(dev->dm_core + + PCIE20_COMMAND_STATUS) & BIT(2); + } + } else { + EP_PCIE_DBG(dev, + "PCIe V%d: EP_PCIE_OPT_ENUM_ASYNC is true\n", + dev->rev); + bme = readl_relaxed(dev->dm_core + + PCIE20_COMMAND_STATUS) & BIT(2); + } + + if (bme) { + EP_PCIE_DBG(dev, + "PCIe V%d: PCIe link is up and BME is enabled after %d checkings (%d ms)\n", + dev->rev, retries, + BME_TIMEOUT_US_MIN * retries / 1000); + ep_pcie_enumeration_complete(dev); + /* expose BAR to user space to identify modem */ + ep_pcie_bar0_address = + readl_relaxed(dev->dm_core + PCIE20_BAR0); + } else { + if (!(opt & EP_PCIE_OPT_ENUM_ASYNC)) + EP_PCIE_ERR(dev, + "PCIe V%d: PCIe link is up but BME is still disabled after max waiting time\n", + dev->rev); + if (!ep_pcie_debug_keep_resource && + !(opt&EP_PCIE_OPT_ENUM_ASYNC)) { + ret = EP_PCIE_ERROR; + dev->link_status = EP_PCIE_LINK_DISABLED; + goto link_fail; + } + } + + dev->suspending = false; + goto out; + +link_fail: + dev->power_on = false; + if (!ep_pcie_debug_keep_resource) + ep_pcie_pipe_clk_deinit(dev); +pipe_clk_fail: + if (!ep_pcie_debug_keep_resource) + ep_pcie_clk_deinit(dev); +clk_fail: + if (!ep_pcie_debug_keep_resource) + ep_pcie_vreg_deinit(dev); + else + ret = 0; +out: + mutex_unlock(&dev->setup_mtx); + + return ret; +} + +int ep_pcie_core_disable_endpoint(void) +{ + int rc = 0; + struct ep_pcie_dev_t *dev = &ep_pcie_dev; + + EP_PCIE_DBG(dev, "PCIe V%d\n", dev->rev); + + mutex_lock(&dev->setup_mtx); + + if (!dev->power_on) { + EP_PCIE_DBG(dev, + "PCIe V%d: the link is already power down\n", + dev->rev); + goto out; + } + + dev->link_status = EP_PCIE_LINK_DISABLED; + dev->power_on = false; + + EP_PCIE_DBG(dev, "PCIe V%d: shut down the link\n", + dev->rev); + + ep_pcie_pipe_clk_deinit(dev); + ep_pcie_clk_deinit(dev); + ep_pcie_vreg_deinit(dev); +out: + mutex_unlock(&dev->setup_mtx); + return rc; +} + +int ep_pcie_core_mask_irq_event(enum ep_pcie_irq_event event, + bool enable) +{ + int rc = 0; + struct ep_pcie_dev_t *dev = &ep_pcie_dev; + unsigned long irqsave_flags; + u32 mask = 0; + + EP_PCIE_DUMP(dev, + "PCIe V%d: Client askes to %s IRQ event 0x%x\n", + dev->rev, + enable ? "enable" : "disable", + event); + + spin_lock_irqsave(&dev->ext_lock, irqsave_flags); + + if (dev->aggregated_irq) { + mask = readl_relaxed(dev->parf + PCIE20_PARF_INT_ALL_MASK); + EP_PCIE_DUMP(dev, + "PCIe V%d: current PCIE20_PARF_INT_ALL_MASK:0x%x\n", + dev->rev, mask); + if (enable) + ep_pcie_write_mask(dev->parf + PCIE20_PARF_INT_ALL_MASK, + 0, BIT(event)); + else + ep_pcie_write_mask(dev->parf + PCIE20_PARF_INT_ALL_MASK, + BIT(event), 0); + EP_PCIE_DUMP(dev, + "PCIe V%d: new PCIE20_PARF_INT_ALL_MASK:0x%x\n", + dev->rev, + readl_relaxed(dev->parf + PCIE20_PARF_INT_ALL_MASK)); + } else { + EP_PCIE_ERR(dev, + "PCIe V%d: Client askes to %s IRQ event 0x%x when aggregated IRQ is not supported\n", + dev->rev, + enable ? "enable" : "disable", + event); + rc = EP_PCIE_ERROR; + } + + spin_unlock_irqrestore(&dev->ext_lock, irqsave_flags); + return rc; +} + +static irqreturn_t ep_pcie_handle_bme_irq(int irq, void *data) +{ + struct ep_pcie_dev_t *dev = data; + unsigned long irqsave_flags; + + spin_lock_irqsave(&dev->isr_lock, irqsave_flags); + + dev->bme_counter++; + EP_PCIE_DBG(dev, + "PCIe V%d: No. %ld BME IRQ\n", dev->rev, dev->bme_counter); + + if (readl_relaxed(dev->dm_core + PCIE20_COMMAND_STATUS) & BIT(2)) { + /* BME has been enabled */ + if (!dev->enumerated) { + EP_PCIE_DBG(dev, + "PCIe V%d:BME is set. Enumeration is complete\n", + dev->rev); + schedule_work(&dev->handle_bme_work); + } else { + EP_PCIE_DBG(dev, + "PCIe V%d:BME is set again after the enumeration has completed; callback client for link ready\n", + dev->rev); + ep_pcie_notify_event(dev, EP_PCIE_EVENT_LINKUP); + } + } else { + EP_PCIE_DBG(dev, + "PCIe V%d:BME is still disabled\n", dev->rev); + } + + spin_unlock_irqrestore(&dev->isr_lock, irqsave_flags); + return IRQ_HANDLED; +} + +static irqreturn_t ep_pcie_handle_linkdown_irq(int irq, void *data) +{ + struct ep_pcie_dev_t *dev = data; + unsigned long irqsave_flags; + + spin_lock_irqsave(&dev->isr_lock, irqsave_flags); + + dev->linkdown_counter++; + EP_PCIE_DBG(dev, + "PCIe V%d: No. %ld linkdown IRQ\n", + dev->rev, dev->linkdown_counter); + + if (!dev->enumerated || dev->link_status == EP_PCIE_LINK_DISABLED) { + EP_PCIE_DBG(dev, + "PCIe V%d:Linkdown IRQ happened when the link is disabled\n", + dev->rev); + } else if (dev->suspending) { + EP_PCIE_DBG(dev, + "PCIe V%d:Linkdown IRQ happened when the link is suspending\n", + dev->rev); + } else { + dev->link_status = EP_PCIE_LINK_DISABLED; + EP_PCIE_ERR(dev, "PCIe V%d:PCIe link is down for %ld times\n", + dev->rev, dev->linkdown_counter); + ep_pcie_reg_dump(dev, BIT(EP_PCIE_RES_PHY) | + BIT(EP_PCIE_RES_PARF), true); + ep_pcie_notify_event(dev, EP_PCIE_EVENT_LINKDOWN); + } + + spin_unlock_irqrestore(&dev->isr_lock, irqsave_flags); + + return IRQ_HANDLED; +} + +static irqreturn_t ep_pcie_handle_linkup_irq(int irq, void *data) +{ + struct ep_pcie_dev_t *dev = data; + unsigned long irqsave_flags; + + spin_lock_irqsave(&dev->isr_lock, irqsave_flags); + + dev->linkup_counter++; + EP_PCIE_DBG(dev, + "PCIe V%d: No. %ld linkup IRQ\n", + dev->rev, dev->linkup_counter); + + dev->link_status = EP_PCIE_LINK_UP; + + spin_unlock_irqrestore(&dev->isr_lock, irqsave_flags); + + return IRQ_HANDLED; +} + +static irqreturn_t ep_pcie_handle_pm_turnoff_irq(int irq, void *data) +{ + struct ep_pcie_dev_t *dev = data; + unsigned long irqsave_flags; + + spin_lock_irqsave(&dev->isr_lock, irqsave_flags); + + dev->pm_to_counter++; + EP_PCIE_DBG2(dev, + "PCIe V%d: No. %ld PM_TURNOFF is received\n", + dev->rev, dev->pm_to_counter); + EP_PCIE_DBG2(dev, "PCIe V%d: Put the link into L23\n", dev->rev); + ep_pcie_write_mask(dev->parf + PCIE20_PARF_PM_CTRL, 0, BIT(2)); + + spin_unlock_irqrestore(&dev->isr_lock, irqsave_flags); + + return IRQ_HANDLED; +} + +static irqreturn_t ep_pcie_handle_dstate_change_irq(int irq, void *data) +{ + struct ep_pcie_dev_t *dev = data; + unsigned long irqsave_flags; + u32 dstate; + + spin_lock_irqsave(&dev->isr_lock, irqsave_flags); + + dstate = readl_relaxed(dev->dm_core + + PCIE20_CON_STATUS) & 0x3; + + if (dev->dump_conf) + ep_pcie_reg_dump(dev, BIT(EP_PCIE_RES_DM_CORE), false); + + if (dstate == 3) { + dev->l23_ready = true; + dev->d3_counter++; + EP_PCIE_DBG(dev, + "PCIe V%d: No. %ld change to D3 state\n", + dev->rev, dev->d3_counter); + ep_pcie_write_mask(dev->parf + PCIE20_PARF_PM_CTRL, 0, BIT(1)); + + if (dev->enumerated) + ep_pcie_notify_event(dev, EP_PCIE_EVENT_PM_D3_HOT); + else + EP_PCIE_DBG(dev, + "PCIe V%d: do not notify client about this D3 hot event since enumeration by HLOS is not done yet\n", + dev->rev); + } else if (dstate == 0) { + dev->l23_ready = false; + dev->d0_counter++; + EP_PCIE_DBG(dev, + "PCIe V%d: No. %ld change to D0 state\n", + dev->rev, dev->d0_counter); + ep_pcie_notify_event(dev, EP_PCIE_EVENT_PM_D0); + } else { + EP_PCIE_ERR(dev, + "PCIe V%d:invalid D state change to 0x%x\n", + dev->rev, dstate); + } + + spin_unlock_irqrestore(&dev->isr_lock, irqsave_flags); + + return IRQ_HANDLED; +} + +static int ep_pcie_enumeration(struct ep_pcie_dev_t *dev) +{ + int ret = 0; + + if (!dev) { + EP_PCIE_ERR(&ep_pcie_dev, + "PCIe V%d: the input handler is NULL\n", + ep_pcie_dev.rev); + return EP_PCIE_ERROR; + } + + EP_PCIE_DBG(dev, + "PCIe V%d: start PCIe link enumeration per host side\n", + dev->rev); + + ret = ep_pcie_core_enable_endpoint(EP_PCIE_OPT_ALL); + + if (ret) { + EP_PCIE_ERR(&ep_pcie_dev, + "PCIe V%d: PCIe link enumeration failed\n", + ep_pcie_dev.rev); + } else { + if (dev->link_status == EP_PCIE_LINK_ENABLED) { + EP_PCIE_INFO(&ep_pcie_dev, + "PCIe V%d: PCIe link enumeration is successful with host side\n", + ep_pcie_dev.rev); + } else if (dev->link_status == EP_PCIE_LINK_UP) { + EP_PCIE_INFO(&ep_pcie_dev, + "PCIe V%d: PCIe link training is successful with host side. Waiting for enumeration to complete\n", + ep_pcie_dev.rev); + } else { + EP_PCIE_ERR(&ep_pcie_dev, + "PCIe V%d: PCIe link is in the unexpected status: %d\n", + ep_pcie_dev.rev, dev->link_status); + } + } + + return ret; +} + +static void handle_perst_func(struct work_struct *work) +{ + struct ep_pcie_dev_t *dev = container_of(work, struct ep_pcie_dev_t, + handle_perst_work); + + EP_PCIE_DBG(dev, + "PCIe V%d: Start enumeration due to PERST deassertion\n", + dev->rev); + + ep_pcie_enumeration(dev); +} + +static void handle_d3cold_func(struct work_struct *work) +{ + struct ep_pcie_dev_t *dev = container_of(work, struct ep_pcie_dev_t, + handle_d3cold_work); + + EP_PCIE_DBG(dev, + "PCIe V%d: shutdown PCIe link due to PERST assertion before BME is set\n", + dev->rev); + ep_pcie_core_disable_endpoint(); + dev->no_notify = false; +} + +static void handle_bme_func(struct work_struct *work) +{ + struct ep_pcie_dev_t *dev = container_of(work, + struct ep_pcie_dev_t, handle_bme_work); + + ep_pcie_enumeration_complete(dev); +} + +static irqreturn_t ep_pcie_handle_perst_irq(int irq, void *data) +{ + struct ep_pcie_dev_t *dev = data; + unsigned long irqsave_flags; + u32 perst; + + spin_lock_irqsave(&dev->isr_lock, irqsave_flags); + + perst = gpio_get_value(dev->gpio[EP_PCIE_GPIO_PERST].num); + + if (!dev->enumerated) { + EP_PCIE_DBG(dev, + "PCIe V%d: PCIe is not enumerated yet; PERST is %sasserted\n", + dev->rev, perst ? "de" : ""); + if (perst) { + /* start work for link enumeration with the host side */ + schedule_work(&dev->handle_perst_work); + } else { + dev->no_notify = true; + /* shutdown the link if the link is already on */ + schedule_work(&dev->handle_d3cold_work); + } + + goto out; + } + + if (perst) { + dev->perst_deast = true; + dev->perst_deast_counter++; + EP_PCIE_DBG(dev, + "PCIe V%d: No. %ld PERST deassertion\n", + dev->rev, dev->perst_deast_counter); + ep_pcie_notify_event(dev, EP_PCIE_EVENT_PM_RST_DEAST); + } else { + dev->perst_deast = false; + dev->perst_ast_counter++; + EP_PCIE_DBG(dev, + "PCIe V%d: No. %ld PERST assertion\n", + dev->rev, dev->perst_ast_counter); + + if (dev->client_ready) { + ep_pcie_notify_event(dev, EP_PCIE_EVENT_PM_D3_COLD); + } else { + dev->no_notify = true; + EP_PCIE_DBG(dev, + "PCIe V%d: Client driver is not ready when this PERST assertion happens; shutdown link now\n", + dev->rev); + schedule_work(&dev->handle_d3cold_work); + } + } + +out: + spin_unlock_irqrestore(&dev->isr_lock, irqsave_flags); + + return IRQ_HANDLED; +} + +static irqreturn_t ep_pcie_handle_global_irq(int irq, void *data) +{ + struct ep_pcie_dev_t *dev = data; + int i; + u32 status = readl_relaxed(dev->parf + PCIE20_PARF_INT_ALL_STATUS); + u32 mask = readl_relaxed(dev->parf + PCIE20_PARF_INT_ALL_MASK); + + ep_pcie_write_mask(dev->parf + PCIE20_PARF_INT_ALL_CLEAR, 0, status); + + dev->global_irq_counter++; + EP_PCIE_DUMP(dev, + "PCIe V%d: No. %ld Global IRQ %d received; status:0x%x; mask:0x%x\n", + dev->rev, dev->global_irq_counter, irq, status, mask); + status &= mask; + + for (i = 1; i <= EP_PCIE_INT_EVT_MAX; i++) { + if (status & BIT(i)) { + switch (i) { + case EP_PCIE_INT_EVT_LINK_DOWN: + EP_PCIE_DUMP(dev, + "PCIe V%d: handle linkdown event\n", + dev->rev); + ep_pcie_handle_linkdown_irq(irq, data); + break; + case EP_PCIE_INT_EVT_BME: + EP_PCIE_DUMP(dev, + "PCIe V%d: handle BME event\n", + dev->rev); + ep_pcie_handle_bme_irq(irq, data); + break; + case EP_PCIE_INT_EVT_PM_TURNOFF: + EP_PCIE_DUMP(dev, + "PCIe V%d: handle PM Turn-off event\n", + dev->rev); + ep_pcie_handle_pm_turnoff_irq(irq, data); + break; + case EP_PCIE_INT_EVT_MHI_A7: + EP_PCIE_DUMP(dev, + "PCIe V%d: handle MHI A7 event\n", + dev->rev); + ep_pcie_notify_event(dev, EP_PCIE_EVENT_MHI_A7); + break; + case EP_PCIE_INT_EVT_DSTATE_CHANGE: + EP_PCIE_DUMP(dev, + "PCIe V%d: handle D state change event\n", + dev->rev); + ep_pcie_handle_dstate_change_irq(irq, data); + break; + case EP_PCIE_INT_EVT_LINK_UP: + EP_PCIE_DUMP(dev, + "PCIe V%d: handle linkup event\n", + dev->rev); + ep_pcie_handle_linkup_irq(irq, data); + break; + default: + EP_PCIE_ERR(dev, + "PCIe V%d: Unexpected event %d is caught\n", + dev->rev, i); + } + } + } + + return IRQ_HANDLED; +} + +int32_t ep_pcie_irq_init(struct ep_pcie_dev_t *dev) +{ + int ret; + struct device *pdev = &dev->pdev->dev; + u32 perst_irq; + + EP_PCIE_DBG(dev, "PCIe V%d\n", dev->rev); + + /* Initialize all works to be performed before registering for IRQs*/ + INIT_WORK(&dev->handle_perst_work, handle_perst_func); + INIT_WORK(&dev->handle_bme_work, handle_bme_func); + INIT_WORK(&dev->handle_d3cold_work, handle_d3cold_func); + + if (dev->aggregated_irq) { + ret = devm_request_irq(pdev, + dev->irq[EP_PCIE_INT_GLOBAL].num, + ep_pcie_handle_global_irq, + IRQF_TRIGGER_HIGH, dev->irq[EP_PCIE_INT_GLOBAL].name, + dev); + if (ret) { + EP_PCIE_ERR(dev, + "PCIe V%d: Unable to request global interrupt %d\n", + dev->rev, dev->irq[EP_PCIE_INT_GLOBAL].num); + return ret; + } + + ret = enable_irq_wake(dev->irq[EP_PCIE_INT_GLOBAL].num); + if (ret) { + EP_PCIE_ERR(dev, + "PCIe V%d: Unable to enable wake for Global interrupt\n", + dev->rev); + return ret; + } + + EP_PCIE_DBG(dev, + "PCIe V%d: request global interrupt %d\n", + dev->rev, dev->irq[EP_PCIE_INT_GLOBAL].num); + goto perst_irq; + } + + /* register handler for BME interrupt */ + ret = devm_request_irq(pdev, + dev->irq[EP_PCIE_INT_BME].num, + ep_pcie_handle_bme_irq, + IRQF_TRIGGER_RISING, dev->irq[EP_PCIE_INT_BME].name, + dev); + if (ret) { + EP_PCIE_ERR(dev, + "PCIe V%d: Unable to request BME interrupt %d\n", + dev->rev, dev->irq[EP_PCIE_INT_BME].num); + return ret; + } + + ret = enable_irq_wake(dev->irq[EP_PCIE_INT_BME].num); + if (ret) { + EP_PCIE_ERR(dev, + "PCIe V%d: Unable to enable wake for BME interrupt\n", + dev->rev); + return ret; + } + + /* register handler for linkdown interrupt */ + ret = devm_request_irq(pdev, + dev->irq[EP_PCIE_INT_LINK_DOWN].num, + ep_pcie_handle_linkdown_irq, + IRQF_TRIGGER_RISING, dev->irq[EP_PCIE_INT_LINK_DOWN].name, + dev); + if (ret) { + EP_PCIE_ERR(dev, + "PCIe V%d: Unable to request linkdown interrupt %d\n", + dev->rev, dev->irq[EP_PCIE_INT_LINK_DOWN].num); + return ret; + } + + /* register handler for linkup interrupt */ + ret = devm_request_irq(pdev, + dev->irq[EP_PCIE_INT_LINK_UP].num, ep_pcie_handle_linkup_irq, + IRQF_TRIGGER_RISING, dev->irq[EP_PCIE_INT_LINK_UP].name, + dev); + if (ret) { + EP_PCIE_ERR(dev, + "PCIe V%d: Unable to request linkup interrupt %d\n", + dev->rev, dev->irq[EP_PCIE_INT_LINK_UP].num); + return ret; + } + + /* register handler for PM_TURNOFF interrupt */ + ret = devm_request_irq(pdev, + dev->irq[EP_PCIE_INT_PM_TURNOFF].num, + ep_pcie_handle_pm_turnoff_irq, + IRQF_TRIGGER_RISING, dev->irq[EP_PCIE_INT_PM_TURNOFF].name, + dev); + if (ret) { + EP_PCIE_ERR(dev, + "PCIe V%d: Unable to request PM_TURNOFF interrupt %d\n", + dev->rev, dev->irq[EP_PCIE_INT_PM_TURNOFF].num); + return ret; + } + + /* register handler for D state change interrupt */ + ret = devm_request_irq(pdev, + dev->irq[EP_PCIE_INT_DSTATE_CHANGE].num, + ep_pcie_handle_dstate_change_irq, + IRQF_TRIGGER_RISING, dev->irq[EP_PCIE_INT_DSTATE_CHANGE].name, + dev); + if (ret) { + EP_PCIE_ERR(dev, + "PCIe V%d: Unable to request D state change interrupt %d\n", + dev->rev, dev->irq[EP_PCIE_INT_DSTATE_CHANGE].num); + return ret; + } + +perst_irq: + /* register handler for PERST interrupt */ + perst_irq = gpio_to_irq(dev->gpio[EP_PCIE_GPIO_PERST].num); + ret = devm_request_irq(pdev, perst_irq, + ep_pcie_handle_perst_irq, + IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING, + "ep_pcie_perst", dev); + if (ret) { + EP_PCIE_ERR(dev, + "PCIe V%d: Unable to request PERST interrupt %d\n", + dev->rev, perst_irq); + return ret; + } + + ret = enable_irq_wake(perst_irq); + if (ret) { + EP_PCIE_ERR(dev, + "PCIe V%d: Unable to enable PERST interrupt %d\n", + dev->rev, perst_irq); + return ret; + } + + return 0; +} + +void ep_pcie_irq_deinit(struct ep_pcie_dev_t *dev) +{ + EP_PCIE_DBG(dev, "PCIe V%d\n", dev->rev); + + disable_irq(gpio_to_irq(dev->gpio[EP_PCIE_GPIO_PERST].num)); +} + +int ep_pcie_core_register_event(struct ep_pcie_register_event *reg) +{ + if (!reg) { + EP_PCIE_ERR(&ep_pcie_dev, + "PCIe V%d: Event registration is NULL\n", + ep_pcie_dev.rev); + return -ENODEV; + } + + if (!reg->user) { + EP_PCIE_ERR(&ep_pcie_dev, + "PCIe V%d: User of event registration is NULL\n", + ep_pcie_dev.rev); + return -ENODEV; + } + + ep_pcie_dev.event_reg = reg; + EP_PCIE_DBG(&ep_pcie_dev, + "PCIe V%d: Event 0x%x is registered\n", + ep_pcie_dev.rev, reg->events); + + ep_pcie_dev.client_ready = true; + + return 0; +} + +int ep_pcie_core_deregister_event(void) +{ + if (ep_pcie_dev.event_reg) { + EP_PCIE_DBG(&ep_pcie_dev, + "PCIe V%d: current registered events:0x%x; events are deregistered\n", + ep_pcie_dev.rev, ep_pcie_dev.event_reg->events); + ep_pcie_dev.event_reg = NULL; + } else { + EP_PCIE_ERR(&ep_pcie_dev, + "PCIe V%d: Event registration is NULL\n", + ep_pcie_dev.rev); + } + + return 0; +} + +enum ep_pcie_link_status ep_pcie_core_get_linkstatus(void) +{ + struct ep_pcie_dev_t *dev = &ep_pcie_dev; + u32 bme; + + if (!dev->power_on || (dev->link_status == EP_PCIE_LINK_DISABLED)) { + EP_PCIE_DBG(dev, + "PCIe V%d: PCIe endpoint is not powered on\n", + dev->rev); + return EP_PCIE_LINK_DISABLED; + } + + bme = readl_relaxed(dev->dm_core + + PCIE20_COMMAND_STATUS) & BIT(2); + if (bme) { + EP_PCIE_DBG(dev, + "PCIe V%d: PCIe link is up and BME is enabled; current SW link status:%d\n", + dev->rev, dev->link_status); + dev->link_status = EP_PCIE_LINK_ENABLED; + if (dev->no_notify) { + EP_PCIE_DBG(dev, + "PCIe V%d: BME is set now, but do not tell client about BME enable\n", + dev->rev); + return EP_PCIE_LINK_UP; + } + } else { + EP_PCIE_DBG(dev, + "PCIe V%d: PCIe link is up but BME is disabled; current SW link status:%d\n", + dev->rev, dev->link_status); + dev->link_status = EP_PCIE_LINK_UP; + } + return dev->link_status; +} + +int ep_pcie_core_config_outbound_iatu(struct ep_pcie_iatu entries[], + u32 num_entries) +{ + u32 data_start = 0; + u32 data_end = 0; + u32 data_tgt_lower = 0; + u32 data_tgt_upper = 0; + u32 ctrl_start = 0; + u32 ctrl_end = 0; + u32 ctrl_tgt_lower = 0; + u32 ctrl_tgt_upper = 0; + u32 upper = 0; + bool once = true; + + if (ep_pcie_dev.active_config) { + upper = EP_PCIE_OATU_UPPER; + if (once) { + once = false; + EP_PCIE_DBG2(&ep_pcie_dev, + "PCIe V%d: No outbound iATU config is needed since active config is enabled\n", + ep_pcie_dev.rev); + } + } + + if ((num_entries > MAX_IATU_ENTRY_NUM) || !num_entries) { + EP_PCIE_ERR(&ep_pcie_dev, + "PCIe V%d: Wrong iATU entry number %d\n", + ep_pcie_dev.rev, num_entries); + return EP_PCIE_ERROR; + } + + data_start = entries[0].start; + data_end = entries[0].end; + data_tgt_lower = entries[0].tgt_lower; + data_tgt_upper = entries[0].tgt_upper; + + if (num_entries > 1) { + ctrl_start = entries[1].start; + ctrl_end = entries[1].end; + ctrl_tgt_lower = entries[1].tgt_lower; + ctrl_tgt_upper = entries[1].tgt_upper; + } + + EP_PCIE_DBG(&ep_pcie_dev, + "PCIe V%d: data_start:0x%x; data_end:0x%x; data_tgt_lower:0x%x; data_tgt_upper:0x%x; ctrl_start:0x%x; ctrl_end:0x%x; ctrl_tgt_lower:0x%x; ctrl_tgt_upper:0x%x\n", + ep_pcie_dev.rev, data_start, data_end, data_tgt_lower, + data_tgt_upper, ctrl_start, ctrl_end, ctrl_tgt_lower, + ctrl_tgt_upper); + + + if ((ctrl_end < data_start) || (data_end < ctrl_start)) { + EP_PCIE_DBG(&ep_pcie_dev, + "PCIe V%d: iATU configuration case No. 1: detached\n", + ep_pcie_dev.rev); + ep_pcie_config_outbound_iatu_entry(&ep_pcie_dev, + EP_PCIE_OATU_INDEX_DATA, + data_start, upper, data_end, + data_tgt_lower, data_tgt_upper); + ep_pcie_config_outbound_iatu_entry(&ep_pcie_dev, + EP_PCIE_OATU_INDEX_CTRL, + ctrl_start, upper, ctrl_end, + ctrl_tgt_lower, ctrl_tgt_upper); + } else if ((data_start <= ctrl_start) && (ctrl_end <= data_end)) { + EP_PCIE_DBG(&ep_pcie_dev, + "PCIe V%d: iATU configuration case No. 2: included\n", + ep_pcie_dev.rev); + ep_pcie_config_outbound_iatu_entry(&ep_pcie_dev, + EP_PCIE_OATU_INDEX_DATA, + data_start, upper, data_end, + data_tgt_lower, data_tgt_upper); + } else { + EP_PCIE_DBG(&ep_pcie_dev, + "PCIe V%d: iATU configuration case No. 3: overlap\n", + ep_pcie_dev.rev); + ep_pcie_config_outbound_iatu_entry(&ep_pcie_dev, + EP_PCIE_OATU_INDEX_CTRL, + ctrl_start, upper, ctrl_end, + ctrl_tgt_lower, ctrl_tgt_upper); + ep_pcie_config_outbound_iatu_entry(&ep_pcie_dev, + EP_PCIE_OATU_INDEX_DATA, + data_start, upper, data_end, + data_tgt_lower, data_tgt_upper); + } + + return 0; +} + +int ep_pcie_core_get_msi_config(struct ep_pcie_msi_config *cfg) +{ + u32 cap, lower, upper, data, ctrl_reg; + static u32 changes; + + if (ep_pcie_dev.link_status == EP_PCIE_LINK_DISABLED) { + EP_PCIE_ERR(&ep_pcie_dev, + "PCIe V%d: PCIe link is currently disabled\n", + ep_pcie_dev.rev); + return EP_PCIE_ERROR; + } + + cap = readl_relaxed(ep_pcie_dev.dm_core + PCIE20_MSI_CAP_ID_NEXT_CTRL); + EP_PCIE_DBG(&ep_pcie_dev, "PCIe V%d: MSI CAP:0x%x\n", + ep_pcie_dev.rev, cap); + + if (!(cap & BIT(16))) { + EP_PCIE_ERR(&ep_pcie_dev, + "PCIe V%d: MSI is not enabled yet\n", + ep_pcie_dev.rev); + return EP_PCIE_ERROR; + } + + lower = readl_relaxed(ep_pcie_dev.dm_core + PCIE20_MSI_LOWER); + upper = readl_relaxed(ep_pcie_dev.dm_core + PCIE20_MSI_UPPER); + data = readl_relaxed(ep_pcie_dev.dm_core + PCIE20_MSI_DATA); + ctrl_reg = readl_relaxed(ep_pcie_dev.dm_core + + PCIE20_MSI_CAP_ID_NEXT_CTRL); + + EP_PCIE_DBG(&ep_pcie_dev, + "PCIe V%d: MSI info: lower:0x%x; upper:0x%x; data:0x%x\n", + ep_pcie_dev.rev, lower, upper, data); + + if (ctrl_reg & BIT(16)) { + struct resource *msi = + ep_pcie_dev.res[EP_PCIE_RES_MSI].resource; + if (ep_pcie_dev.active_config) + ep_pcie_config_outbound_iatu_entry(&ep_pcie_dev, + EP_PCIE_OATU_INDEX_MSI, + msi->start, EP_PCIE_OATU_UPPER, + msi->end, lower, upper); + else + ep_pcie_config_outbound_iatu_entry(&ep_pcie_dev, + EP_PCIE_OATU_INDEX_MSI, + msi->start, 0, msi->end, + lower, upper); + + if (ep_pcie_dev.active_config) { + cfg->lower = lower; + cfg->upper = upper; + } else { + cfg->lower = msi->start + (lower & 0xfff); + cfg->upper = 0; + } + cfg->data = data; + cfg->msg_num = (cap >> 20) & 0x7; + if ((lower != ep_pcie_dev.msi_cfg.lower) + || (upper != ep_pcie_dev.msi_cfg.upper) + || (data != ep_pcie_dev.msi_cfg.data) + || (cfg->msg_num != ep_pcie_dev.msi_cfg.msg_num)) { + changes++; + EP_PCIE_DBG(&ep_pcie_dev, + "PCIe V%d: MSI config has been changed by host side for %d time(s)\n", + ep_pcie_dev.rev, changes); + EP_PCIE_DBG(&ep_pcie_dev, + "PCIe V%d: old MSI cfg: lower:0x%x; upper:0x%x; data:0x%x; msg_num:0x%x\n", + ep_pcie_dev.rev, ep_pcie_dev.msi_cfg.lower, + ep_pcie_dev.msi_cfg.upper, + ep_pcie_dev.msi_cfg.data, + ep_pcie_dev.msi_cfg.msg_num); + ep_pcie_dev.msi_cfg.lower = lower; + ep_pcie_dev.msi_cfg.upper = upper; + ep_pcie_dev.msi_cfg.data = data; + ep_pcie_dev.msi_cfg.msg_num = cfg->msg_num; + } + return 0; + } + + EP_PCIE_ERR(&ep_pcie_dev, + "PCIe V%d: Wrong MSI info found when MSI is enabled: lower:0x%x; data:0x%x\n", + ep_pcie_dev.rev, lower, data); + return EP_PCIE_ERROR; +} + +int ep_pcie_core_trigger_msi(u32 idx) +{ + u32 addr, data, ctrl_reg; + int max_poll = MSI_EXIT_L1SS_WAIT_MAX_COUNT; + + if (ep_pcie_dev.link_status == EP_PCIE_LINK_DISABLED) { + EP_PCIE_ERR(&ep_pcie_dev, + "PCIe V%d: PCIe link is currently disabled\n", + ep_pcie_dev.rev); + return EP_PCIE_ERROR; + } + + addr = readl_relaxed(ep_pcie_dev.dm_core + PCIE20_MSI_LOWER); + data = readl_relaxed(ep_pcie_dev.dm_core + PCIE20_MSI_DATA); + ctrl_reg = readl_relaxed(ep_pcie_dev.dm_core + + PCIE20_MSI_CAP_ID_NEXT_CTRL); + + if (ctrl_reg & BIT(16)) { + ep_pcie_dev.msi_counter++; + EP_PCIE_DUMP(&ep_pcie_dev, + "PCIe V%d: No. %ld MSI fired for IRQ %d; index from client:%d; active-config is %s enabled\n", + ep_pcie_dev.rev, ep_pcie_dev.msi_counter, + data + idx, idx, + ep_pcie_dev.active_config ? "" : "not"); + + if (ep_pcie_dev.active_config) { + u32 status; + + if (ep_pcie_dev.msi_counter % 2) { + EP_PCIE_DBG2(&ep_pcie_dev, + "PCIe V%d: try to trigger MSI by PARF_MSI_GEN\n", + ep_pcie_dev.rev); + ep_pcie_write_reg(ep_pcie_dev.parf, + PCIE20_PARF_MSI_GEN, idx); + status = readl_relaxed(ep_pcie_dev.parf + + PCIE20_PARF_LTR_MSI_EXIT_L1SS); + while ((status & BIT(1)) && (max_poll-- > 0)) { + udelay(MSI_EXIT_L1SS_WAIT); + status = readl_relaxed(ep_pcie_dev.parf + + + PCIE20_PARF_LTR_MSI_EXIT_L1SS); + } + if (max_poll == 0) + EP_PCIE_DBG2(&ep_pcie_dev, + "PCIe V%d: MSI_EXIT_L1SS is not cleared yet\n", + ep_pcie_dev.rev); + else + EP_PCIE_DBG2(&ep_pcie_dev, + "PCIe V%d: MSI_EXIT_L1SS has been cleared\n", + ep_pcie_dev.rev); + } else { + EP_PCIE_DBG2(&ep_pcie_dev, + "PCIe V%d: try to trigger MSI by direct address write as well\n", + ep_pcie_dev.rev); + ep_pcie_write_reg(ep_pcie_dev.msi, addr & 0xfff, + data + idx); + } + } else { + ep_pcie_write_reg(ep_pcie_dev.msi, addr & 0xfff, data + + idx); + } + return 0; + } + + EP_PCIE_ERR(&ep_pcie_dev, + "PCIe V%d: MSI is not enabled yet. MSI addr:0x%x; data:0x%x; index from client:%d\n", + ep_pcie_dev.rev, addr, data, idx); + return EP_PCIE_ERROR; +} + +int ep_pcie_core_wakeup_host(void) +{ + struct ep_pcie_dev_t *dev = &ep_pcie_dev; + + if (dev->perst_deast && !dev->l23_ready) { + EP_PCIE_ERR(dev, + "PCIe V%d: request to assert WAKE# when PERST is de-asserted and D3hot is not received\n", + dev->rev); + return EP_PCIE_ERROR; + } + + dev->wake_counter++; + EP_PCIE_DBG(dev, + "PCIe V%d: No. %ld to assert PCIe WAKE#; perst is %s de-asserted; D3hot is %s received\n", + dev->rev, dev->wake_counter, + dev->perst_deast ? "" : "not", + dev->l23_ready ? "" : "not"); + gpio_set_value(dev->gpio[EP_PCIE_GPIO_WAKE].num, + 1 - dev->gpio[EP_PCIE_GPIO_WAKE].on); + gpio_set_value(dev->gpio[EP_PCIE_GPIO_WAKE].num, + dev->gpio[EP_PCIE_GPIO_WAKE].on); + return 0; +} + +int ep_pcie_core_config_db_routing(struct ep_pcie_db_config chdb_cfg, + struct ep_pcie_db_config erdb_cfg) +{ + u32 dbs = (erdb_cfg.end << 24) | (erdb_cfg.base << 16) | + (chdb_cfg.end << 8) | chdb_cfg.base; + + ep_pcie_write_reg(ep_pcie_dev.parf, PCIE20_PARF_MHI_IPA_DBS, dbs); + ep_pcie_write_reg(ep_pcie_dev.parf, + PCIE20_PARF_MHI_IPA_CDB_TARGET_LOWER, + chdb_cfg.tgt_addr); + ep_pcie_write_reg(ep_pcie_dev.parf, + PCIE20_PARF_MHI_IPA_EDB_TARGET_LOWER, + erdb_cfg.tgt_addr); + + EP_PCIE_DBG(&ep_pcie_dev, + "PCIe V%d: DB routing info: chdb_cfg.base:0x%x; chdb_cfg.end:0x%x; erdb_cfg.base:0x%x; erdb_cfg.end:0x%x; chdb_cfg.tgt_addr:0x%x; erdb_cfg.tgt_addr:0x%x\n", + ep_pcie_dev.rev, chdb_cfg.base, chdb_cfg.end, erdb_cfg.base, + erdb_cfg.end, chdb_cfg.tgt_addr, erdb_cfg.tgt_addr); + + return 0; +} + +struct ep_pcie_hw hw_drv = { + .register_event = ep_pcie_core_register_event, + .deregister_event = ep_pcie_core_deregister_event, + .get_linkstatus = ep_pcie_core_get_linkstatus, + .config_outbound_iatu = ep_pcie_core_config_outbound_iatu, + .get_msi_config = ep_pcie_core_get_msi_config, + .trigger_msi = ep_pcie_core_trigger_msi, + .wakeup_host = ep_pcie_core_wakeup_host, + .config_db_routing = ep_pcie_core_config_db_routing, + .enable_endpoint = ep_pcie_core_enable_endpoint, + .disable_endpoint = ep_pcie_core_disable_endpoint, + .mask_irq_event = ep_pcie_core_mask_irq_event, +}; + +static int ep_pcie_probe(struct platform_device *pdev) +{ + int ret; + + pr_debug("%s\n", __func__); + + ep_pcie_dev.link_speed = 1; + ret = of_property_read_u32((&pdev->dev)->of_node, + "qcom,pcie-link-speed", + &ep_pcie_dev.link_speed); + if (ret) + EP_PCIE_DBG(&ep_pcie_dev, + "PCIe V%d: pcie-link-speed does not exist\n", + ep_pcie_dev.rev); + else + EP_PCIE_DBG(&ep_pcie_dev, "PCIe V%d: pcie-link-speed:%d\n", + ep_pcie_dev.rev, ep_pcie_dev.link_speed); + + ret = of_property_read_u32((&pdev->dev)->of_node, + "qcom,dbi-base-reg", + &ep_pcie_dev.dbi_base_reg); + if (ret) + EP_PCIE_DBG(&ep_pcie_dev, + "PCIe V%d: dbi-base-reg does not exist\n", + ep_pcie_dev.rev); + else + EP_PCIE_DBG(&ep_pcie_dev, "PCIe V%d: dbi-base-reg:0x%x\n", + ep_pcie_dev.rev, ep_pcie_dev.dbi_base_reg); + + ret = of_property_read_u32((&pdev->dev)->of_node, + "qcom,slv-space-reg", + &ep_pcie_dev.slv_space_reg); + if (ret) + EP_PCIE_DBG(&ep_pcie_dev, + "PCIe V%d: slv-space-reg does not exist\n", + ep_pcie_dev.rev); + else + EP_PCIE_DBG(&ep_pcie_dev, "PCIe V%d: slv-space-reg:0x%x\n", + ep_pcie_dev.rev, ep_pcie_dev.slv_space_reg); + + ret = of_property_read_u32((&pdev->dev)->of_node, + "qcom,phy-status-reg", + &ep_pcie_dev.phy_status_reg); + if (ret) + EP_PCIE_DBG(&ep_pcie_dev, + "PCIe V%d: phy-status-reg does not exist\n", + ep_pcie_dev.rev); + else + EP_PCIE_DBG(&ep_pcie_dev, "PCIe V%d: phy-status-reg:0x%x\n", + ep_pcie_dev.rev, ep_pcie_dev.phy_status_reg); + + ep_pcie_dev.phy_rev = 1; + ret = of_property_read_u32((&pdev->dev)->of_node, + "qcom,pcie-phy-ver", + &ep_pcie_dev.phy_rev); + if (ret) + EP_PCIE_DBG(&ep_pcie_dev, + "PCIe V%d: pcie-phy-ver does not exist\n", + ep_pcie_dev.rev); + else + EP_PCIE_DBG(&ep_pcie_dev, "PCIe V%d: pcie-phy-ver:%d\n", + ep_pcie_dev.rev, ep_pcie_dev.phy_rev); + + ep_pcie_dev.active_config = of_property_read_bool((&pdev->dev)->of_node, + "qcom,pcie-active-config"); + EP_PCIE_DBG(&ep_pcie_dev, + "PCIe V%d: active config is %s enabled\n", + ep_pcie_dev.rev, ep_pcie_dev.active_config ? "" : "not"); + + ep_pcie_dev.aggregated_irq = + of_property_read_bool((&pdev->dev)->of_node, + "qcom,pcie-aggregated-irq"); + EP_PCIE_DBG(&ep_pcie_dev, + "PCIe V%d: aggregated IRQ is %s enabled\n", + ep_pcie_dev.rev, ep_pcie_dev.aggregated_irq ? "" : "not"); + + ep_pcie_dev.mhi_a7_irq = + of_property_read_bool((&pdev->dev)->of_node, + "qcom,pcie-mhi-a7-irq"); + EP_PCIE_DBG(&ep_pcie_dev, + "PCIe V%d: Mhi a7 IRQ is %s enabled\n", + ep_pcie_dev.rev, ep_pcie_dev.mhi_a7_irq ? "" : "not"); + + ep_pcie_dev.perst_enum = of_property_read_bool((&pdev->dev)->of_node, + "qcom,pcie-perst-enum"); + EP_PCIE_DBG(&ep_pcie_dev, + "PCIe V%d: enum by PERST is %s enabled\n", + ep_pcie_dev.rev, ep_pcie_dev.perst_enum ? "" : "not"); + + ep_pcie_dev.rev = 1711211; + ep_pcie_dev.pdev = pdev; + memcpy(ep_pcie_dev.vreg, ep_pcie_vreg_info, + sizeof(ep_pcie_vreg_info)); + memcpy(ep_pcie_dev.gpio, ep_pcie_gpio_info, + sizeof(ep_pcie_gpio_info)); + memcpy(ep_pcie_dev.clk, ep_pcie_clk_info, + sizeof(ep_pcie_clk_info)); + memcpy(ep_pcie_dev.pipeclk, ep_pcie_pipe_clk_info, + sizeof(ep_pcie_pipe_clk_info)); + memcpy(ep_pcie_dev.reset, ep_pcie_reset_info, + sizeof(ep_pcie_reset_info)); + memcpy(ep_pcie_dev.res, ep_pcie_res_info, + sizeof(ep_pcie_res_info)); + memcpy(ep_pcie_dev.irq, ep_pcie_irq_info, + sizeof(ep_pcie_irq_info)); + + ret = ep_pcie_get_resources(&ep_pcie_dev, + ep_pcie_dev.pdev); + if (ret) { + EP_PCIE_ERR(&ep_pcie_dev, + "PCIe V%d: failed to get resources\n", + ep_pcie_dev.rev); + goto res_failure; + } + + ret = ep_pcie_gpio_init(&ep_pcie_dev); + if (ret) { + EP_PCIE_ERR(&ep_pcie_dev, + "PCIe V%d: failed to init GPIO\n", + ep_pcie_dev.rev); + ep_pcie_release_resources(&ep_pcie_dev); + goto gpio_failure; + } + + ret = ep_pcie_irq_init(&ep_pcie_dev); + if (ret) { + EP_PCIE_ERR(&ep_pcie_dev, + "PCIe V%d: failed to init IRQ\n", + ep_pcie_dev.rev); + ep_pcie_release_resources(&ep_pcie_dev); + ep_pcie_gpio_deinit(&ep_pcie_dev); + goto irq_failure; + } + + if (ep_pcie_dev.perst_enum && + !gpio_get_value(ep_pcie_dev.gpio[EP_PCIE_GPIO_PERST].num)) { + EP_PCIE_DBG2(&ep_pcie_dev, + "PCIe V%d: %s probe is done; link will be trained when PERST is deasserted\n", + ep_pcie_dev.rev, dev_name(&(pdev->dev))); + return 0; + } + + EP_PCIE_DBG(&ep_pcie_dev, + "PCIe V%d: %s got resources successfully; start turning on the link\n", + ep_pcie_dev.rev, dev_name(&(pdev->dev))); + + ret = ep_pcie_enumeration(&ep_pcie_dev); + + if (!ret || ep_pcie_debug_keep_resource) + return 0; + + ep_pcie_irq_deinit(&ep_pcie_dev); +irq_failure: + ep_pcie_gpio_deinit(&ep_pcie_dev); +gpio_failure: + ep_pcie_release_resources(&ep_pcie_dev); +res_failure: + EP_PCIE_ERR(&ep_pcie_dev, "PCIe V%d: Driver probe failed:%d\n", + ep_pcie_dev.rev, ret); + + return ret; +} + +static int __exit ep_pcie_remove(struct platform_device *pdev) +{ + pr_debug("%s\n", __func__); + + ep_pcie_irq_deinit(&ep_pcie_dev); + ep_pcie_vreg_deinit(&ep_pcie_dev); + ep_pcie_pipe_clk_deinit(&ep_pcie_dev); + ep_pcie_clk_deinit(&ep_pcie_dev); + ep_pcie_gpio_deinit(&ep_pcie_dev); + ep_pcie_release_resources(&ep_pcie_dev); + ep_pcie_deregister_drv(&hw_drv); + + return 0; +} + +static const struct of_device_id ep_pcie_match[] = { + { .compatible = "qcom,pcie-ep", + }, + {} +}; + +static struct platform_driver ep_pcie_driver = { + .probe = ep_pcie_probe, + .remove = ep_pcie_remove, + .driver = { + .name = "pcie-ep", + .owner = THIS_MODULE, + .of_match_table = ep_pcie_match, + }, +}; + +static int __init ep_pcie_init(void) +{ + int ret; + char logname[MAX_NAME_LEN]; + + pr_debug("%s\n", __func__); + + snprintf(logname, MAX_NAME_LEN, "ep-pcie-long"); + ep_pcie_dev.ipc_log_sel = + ipc_log_context_create(EP_PCIE_LOG_PAGES, logname, 0); + if (ep_pcie_dev.ipc_log_sel == NULL) + pr_err("%s: unable to create IPC selected log for %s\n", + __func__, logname); + else + EP_PCIE_DBG(&ep_pcie_dev, + "PCIe V%d: IPC selected logging is enable for %s\n", + ep_pcie_dev.rev, logname); + + snprintf(logname, MAX_NAME_LEN, "ep-pcie-short"); + ep_pcie_dev.ipc_log_ful = + ipc_log_context_create(EP_PCIE_LOG_PAGES * 2, logname, 0); + if (ep_pcie_dev.ipc_log_ful == NULL) + pr_err("%s: unable to create IPC detailed log for %s\n", + __func__, logname); + else + EP_PCIE_DBG(&ep_pcie_dev, + "PCIe V%d: IPC detailed logging is enable for %s\n", + ep_pcie_dev.rev, logname); + + snprintf(logname, MAX_NAME_LEN, "ep-pcie-dump"); + ep_pcie_dev.ipc_log_dump = + ipc_log_context_create(EP_PCIE_LOG_PAGES, logname, 0); + if (ep_pcie_dev.ipc_log_dump == NULL) + pr_err("%s: unable to create IPC dump log for %s\n", + __func__, logname); + else + EP_PCIE_DBG(&ep_pcie_dev, + "PCIe V%d: IPC dump logging is enable for %s\n", + ep_pcie_dev.rev, logname); + + mutex_init(&ep_pcie_dev.setup_mtx); + mutex_init(&ep_pcie_dev.ext_mtx); + spin_lock_init(&ep_pcie_dev.ext_lock); + spin_lock_init(&ep_pcie_dev.isr_lock); + + ep_pcie_debugfs_init(&ep_pcie_dev); + + ret = platform_driver_register(&ep_pcie_driver); + + if (ret) + EP_PCIE_ERR(&ep_pcie_dev, + "PCIe V%d: failed register platform driver:%d\n", + ep_pcie_dev.rev, ret); + else + EP_PCIE_DBG(&ep_pcie_dev, + "PCIe V%d: platform driver is registered\n", + ep_pcie_dev.rev); + + return ret; +} + +static void __exit ep_pcie_exit(void) +{ + pr_debug("%s\n", __func__); + + ipc_log_context_destroy(ep_pcie_dev.ipc_log_sel); + ipc_log_context_destroy(ep_pcie_dev.ipc_log_ful); + ipc_log_context_destroy(ep_pcie_dev.ipc_log_dump); + + ep_pcie_debugfs_exit(); + + platform_driver_unregister(&ep_pcie_driver); +} + +module_init(ep_pcie_init); +module_exit(ep_pcie_exit); +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("MSM PCIe Endpoint Driver"); diff --git a/drivers/platform/msm/ep_pcie/ep_pcie_dbg.c b/drivers/platform/msm/ep_pcie/ep_pcie_dbg.c new file mode 100644 index 0000000000000000000000000000000000000000..e6a2ef96cb42f1b46351a904f0b84841acfb6a90 --- /dev/null +++ b/drivers/platform/msm/ep_pcie/ep_pcie_dbg.c @@ -0,0 +1,459 @@ +/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +/* + * Debugging enhancement in MSM PCIe endpoint driver. + */ + +#include +#include +#include +#include +#include +#include "ep_pcie_com.h" +#include "ep_pcie_phy.h" + +static struct dentry *dent_ep_pcie; +static struct dentry *dfile_case; +static struct ep_pcie_dev_t *dev; + +static void ep_ep_pcie_phy_dump_pcs_debug_bus(struct ep_pcie_dev_t *dev, + u32 cntrl4, u32 cntrl5, + u32 cntrl6, u32 cntrl7) +{ + ep_pcie_write_reg(dev->phy, PCIE_PHY_TEST_CONTROL4, cntrl4); + ep_pcie_write_reg(dev->phy, PCIE_PHY_TEST_CONTROL5, cntrl5); + ep_pcie_write_reg(dev->phy, PCIE_PHY_TEST_CONTROL6, cntrl6); + ep_pcie_write_reg(dev->phy, PCIE_PHY_TEST_CONTROL7, cntrl7); + + if (!cntrl4 && !cntrl5 && !cntrl6 && !cntrl7) { + EP_PCIE_DUMP(dev, + "PCIe V%d: zero out test control registers\n\n", + dev->rev); + return; + } + + EP_PCIE_DUMP(dev, + "PCIe V%d: PCIE_PHY_TEST_CONTROL4: 0x%x\n", dev->rev, + readl_relaxed(dev->phy + PCIE_PHY_TEST_CONTROL4)); + EP_PCIE_DUMP(dev, + "PCIe V%d: PCIE_PHY_TEST_CONTROL5: 0x%x\n", dev->rev, + readl_relaxed(dev->phy + PCIE_PHY_TEST_CONTROL5)); + EP_PCIE_DUMP(dev, + "PCIe V%d: PCIE_PHY_TEST_CONTROL6: 0x%x\n", dev->rev, + readl_relaxed(dev->phy + PCIE_PHY_TEST_CONTROL6)); + EP_PCIE_DUMP(dev, + "PCIe V%d: PCIE_PHY_TEST_CONTROL7: 0x%x\n", dev->rev, + readl_relaxed(dev->phy + PCIE_PHY_TEST_CONTROL7)); + + EP_PCIE_DUMP(dev, + "PCIe V%d: PCIE_PHY_DEBUG_BUS_0_STATUS: 0x%x\n", dev->rev, + readl_relaxed(dev->phy + PCIE_PHY_DEBUG_BUS_0_STATUS)); + EP_PCIE_DUMP(dev, + "PCIe V%d: PCIE_PHY_DEBUG_BUS_1_STATUS: 0x%x\n", dev->rev, + readl_relaxed(dev->phy + PCIE_PHY_DEBUG_BUS_1_STATUS)); + EP_PCIE_DUMP(dev, + "PCIe V%d: PCIE_PHY_DEBUG_BUS_2_STATUS: 0x%x\n", dev->rev, + readl_relaxed(dev->phy + PCIE_PHY_DEBUG_BUS_2_STATUS)); + EP_PCIE_DUMP(dev, + "PCIe V%d: PCIE_PHY_DEBUG_BUS_3_STATUS: 0x%x\n\n", dev->rev, + readl_relaxed(dev->phy + PCIE_PHY_DEBUG_BUS_3_STATUS)); +} + +static void ep_ep_pcie_phy_dump_pcs_misc_debug_bus(struct ep_pcie_dev_t *dev, + u32 b0, u32 b1, u32 b2, u32 b3) +{ + ep_pcie_write_reg(dev->phy, PCIE_PHY_MISC_DEBUG_BUS_BYTE0_INDEX, b0); + ep_pcie_write_reg(dev->phy, PCIE_PHY_MISC_DEBUG_BUS_BYTE1_INDEX, b1); + ep_pcie_write_reg(dev->phy, PCIE_PHY_MISC_DEBUG_BUS_BYTE2_INDEX, b2); + ep_pcie_write_reg(dev->phy, PCIE_PHY_MISC_DEBUG_BUS_BYTE3_INDEX, b3); + + if (!b0 && !b1 && !b2 && !b3) { + EP_PCIE_DUMP(dev, + "PCIe V%d: zero out misc debug bus byte index registers\n\n", + dev->rev); + return; + } + + EP_PCIE_DUMP(dev, + "PCIe V%d: PCIE_PHY_MISC_DEBUG_BUS_BYTE0_INDEX: 0x%x\n", + dev->rev, + readl_relaxed(dev->phy + PCIE_PHY_MISC_DEBUG_BUS_BYTE0_INDEX)); + EP_PCIE_DUMP(dev, + "PCIe V%d: PCIE_PHY_MISC_DEBUG_BUS_BYTE1_INDEX: 0x%x\n", + dev->rev, + readl_relaxed(dev->phy + PCIE_PHY_MISC_DEBUG_BUS_BYTE1_INDEX)); + EP_PCIE_DUMP(dev, + "PCIe V%d: PCIE_PHY_MISC_DEBUG_BUS_BYTE2_INDEX: 0x%x\n", + dev->rev, + readl_relaxed(dev->phy + PCIE_PHY_MISC_DEBUG_BUS_BYTE2_INDEX)); + EP_PCIE_DUMP(dev, + "PCIe V%d: PCIE_PHY_MISC_DEBUG_BUS_BYTE3_INDEX: 0x%x\n", + dev->rev, + readl_relaxed(dev->phy + PCIE_PHY_MISC_DEBUG_BUS_BYTE3_INDEX)); + + EP_PCIE_DUMP(dev, + "PCIe V%d: PCIE_PHY_MISC_DEBUG_BUS_0_STATUS: 0x%x\n", dev->rev, + readl_relaxed(dev->phy + PCIE_PHY_MISC_DEBUG_BUS_0_STATUS)); + EP_PCIE_DUMP(dev, + "PCIe V%d: PCIE_PHY_MISC_DEBUG_BUS_1_STATUS: 0x%x\n", dev->rev, + readl_relaxed(dev->phy + PCIE_PHY_MISC_DEBUG_BUS_1_STATUS)); + EP_PCIE_DUMP(dev, + "PCIe V%d: PCIE_PHY_MISC_DEBUG_BUS_2_STATUS: 0x%x\n", dev->rev, + readl_relaxed(dev->phy + PCIE_PHY_MISC_DEBUG_BUS_2_STATUS)); + EP_PCIE_DUMP(dev, + "PCIe V%d: PCIE_PHY_MISC_DEBUG_BUS_3_STATUS: 0x%x\n\n", + dev->rev, + readl_relaxed(dev->phy + PCIE_PHY_MISC_DEBUG_BUS_3_STATUS)); +} + +static void ep_pcie_phy_dump(struct ep_pcie_dev_t *dev) +{ + int i; + u32 write_val; + + EP_PCIE_DUMP(dev, "PCIe V%d: Beginning of PHY debug dump\n\n", + dev->rev); + + EP_PCIE_DUMP(dev, "PCIe V%d: PCS Debug Signals\n\n", dev->rev); + + ep_ep_pcie_phy_dump_pcs_debug_bus(dev, 0x01, 0x02, 0x03, 0x0A); + ep_ep_pcie_phy_dump_pcs_debug_bus(dev, 0x0E, 0x0F, 0x12, 0x13); + ep_ep_pcie_phy_dump_pcs_debug_bus(dev, 0x18, 0x19, 0x1A, 0x1B); + ep_ep_pcie_phy_dump_pcs_debug_bus(dev, 0x1C, 0x1D, 0x1E, 0x1F); + ep_ep_pcie_phy_dump_pcs_debug_bus(dev, 0x20, 0x21, 0x22, 0x23); + ep_ep_pcie_phy_dump_pcs_debug_bus(dev, 0, 0, 0, 0); + + EP_PCIE_DUMP(dev, "PCIe V%d: PCS Misc Debug Signals\n\n", dev->rev); + + ep_ep_pcie_phy_dump_pcs_misc_debug_bus(dev, 0x1, 0x2, 0x3, 0x4); + ep_ep_pcie_phy_dump_pcs_misc_debug_bus(dev, 0x5, 0x6, 0x7, 0x8); + ep_ep_pcie_phy_dump_pcs_misc_debug_bus(dev, 0, 0, 0, 0); + + EP_PCIE_DUMP(dev, "PCIe V%d: QSERDES COM Debug Signals\n\n", dev->rev); + + for (i = 0; i < 2; i++) { + write_val = 0x2 + i; + + ep_pcie_write_reg(dev->phy, QSERDES_COM_DEBUG_BUS_SEL, + write_val); + + EP_PCIE_DUMP(dev, + "PCIe V%d: to QSERDES_COM_DEBUG_BUS_SEL: 0x%x\n", + dev->rev, + readl_relaxed(dev->phy + QSERDES_COM_DEBUG_BUS_SEL)); + EP_PCIE_DUMP(dev, + "PCIe V%d: QSERDES_COM_DEBUG_BUS0: 0x%x\n", + dev->rev, + readl_relaxed(dev->phy + QSERDES_COM_DEBUG_BUS0)); + EP_PCIE_DUMP(dev, + "PCIe V%d: QSERDES_COM_DEBUG_BUS1: 0x%x\n", + dev->rev, + readl_relaxed(dev->phy + QSERDES_COM_DEBUG_BUS1)); + EP_PCIE_DUMP(dev, + "PCIe V%d: QSERDES_COM_DEBUG_BUS2: 0x%x\n", + dev->rev, + readl_relaxed(dev->phy + QSERDES_COM_DEBUG_BUS2)); + EP_PCIE_DUMP(dev, + "PCIe V%d: QSERDES_COM_DEBUG_BUS3: 0x%x\n\n", + dev->rev, + readl_relaxed(dev->phy + QSERDES_COM_DEBUG_BUS3)); + } + + ep_pcie_write_reg(dev->phy, QSERDES_COM_DEBUG_BUS_SEL, 0); + + EP_PCIE_DUMP(dev, "PCIe V%d: QSERDES LANE Debug Signals\n\n", + dev->rev); + + for (i = 0; i < 3; i++) { + write_val = 0x1 + i; + ep_pcie_write_reg(dev->phy, + QSERDES_TX_DEBUG_BUS_SEL, write_val); + EP_PCIE_DUMP(dev, + "PCIe V%d: QSERDES_TX_DEBUG_BUS_SEL: 0x%x\n", + dev->rev, + readl_relaxed(dev->phy + QSERDES_TX_DEBUG_BUS_SEL)); + + ep_ep_pcie_phy_dump_pcs_debug_bus(dev, 0x30, 0x31, 0x32, 0x33); + } + + ep_ep_pcie_phy_dump_pcs_debug_bus(dev, 0, 0, 0, 0); + + EP_PCIE_DUMP(dev, "PCIe V%d: End of PHY debug dump\n\n", dev->rev); + +} + +void ep_pcie_reg_dump(struct ep_pcie_dev_t *dev, u32 sel, bool linkdown) +{ + int r, i; + u32 original; + u32 size; + + EP_PCIE_DBG(dev, + "PCIe V%d: Dump PCIe reg for 0x%x %s linkdown\n", + dev->rev, sel, linkdown ? "with" : "without"); + + if (!dev->power_on) { + EP_PCIE_ERR(dev, + "PCIe V%d: the power is already down; can't dump registers\n", + dev->rev); + return; + } + + if (linkdown) { + EP_PCIE_DUMP(dev, + "PCIe V%d: dump PARF registers for linkdown case\n", + dev->rev); + + original = readl_relaxed(dev->parf + PCIE20_PARF_SYS_CTRL); + for (i = 1; i <= 0x1A; i++) { + ep_pcie_write_mask(dev->parf + PCIE20_PARF_SYS_CTRL, + 0xFF0000, i << 16); + EP_PCIE_DUMP(dev, + "PCIe V%d: PARF_SYS_CTRL:0x%x PARF_TEST_BUS:0x%x\n", + dev->rev, + readl_relaxed(dev->parf + PCIE20_PARF_SYS_CTRL), + readl_relaxed(dev->parf + + PCIE20_PARF_TEST_BUS)); + } + ep_pcie_write_reg(dev->parf, PCIE20_PARF_SYS_CTRL, original); + } + + for (r = 0; r < EP_PCIE_MAX_RES; r++) { + if (!(sel & BIT(r))) + continue; + + if ((r == EP_PCIE_RES_PHY) && (dev->phy_rev > 3)) + ep_pcie_phy_dump(dev); + + size = resource_size(dev->res[r].resource); + EP_PCIE_DUMP(dev, + "\nPCIe V%d: dump registers of %s\n\n", + dev->rev, dev->res[r].name); + + for (i = 0; i < size; i += 32) { + EP_PCIE_DUMP(dev, + "0x%04x %08x %08x %08x %08x %08x %08x %08x %08x\n", + i, readl_relaxed(dev->res[r].base + i), + readl_relaxed(dev->res[r].base + (i + 4)), + readl_relaxed(dev->res[r].base + (i + 8)), + readl_relaxed(dev->res[r].base + (i + 12)), + readl_relaxed(dev->res[r].base + (i + 16)), + readl_relaxed(dev->res[r].base + (i + 20)), + readl_relaxed(dev->res[r].base + (i + 24)), + readl_relaxed(dev->res[r].base + (i + 28))); + } + } +} + +static void ep_pcie_show_status(struct ep_pcie_dev_t *dev) +{ + EP_PCIE_DBG_FS("PCIe: is %s enumerated\n", + dev->enumerated ? "" : "not"); + EP_PCIE_DBG_FS("PCIe: link is %s\n", + (dev->link_status == EP_PCIE_LINK_ENABLED) + ? "enabled" : "disabled"); + EP_PCIE_DBG_FS("the link is %s suspending\n", + dev->suspending ? "" : "not"); + EP_PCIE_DBG_FS("the power is %s on\n", + dev->power_on ? "" : "not"); + EP_PCIE_DBG_FS("bus_client: %d\n", + dev->bus_client); + EP_PCIE_DBG_FS("linkdown_counter: %lu\n", + dev->linkdown_counter); + EP_PCIE_DBG_FS("linkup_counter: %lu\n", + dev->linkup_counter); + EP_PCIE_DBG_FS("wake_counter: %lu\n", + dev->wake_counter); + EP_PCIE_DBG_FS("d0_counter: %lu\n", + dev->d0_counter); + EP_PCIE_DBG_FS("d3_counter: %lu\n", + dev->d3_counter); + EP_PCIE_DBG_FS("perst_ast_counter: %lu\n", + dev->perst_ast_counter); + EP_PCIE_DBG_FS("perst_deast_counter: %lu\n", + dev->perst_deast_counter); +} + +static ssize_t ep_pcie_cmd_debug(struct file *file, + const char __user *buf, + size_t count, loff_t *ppos) +{ + unsigned long ret; + char str[MAX_MSG_LEN]; + unsigned int testcase = 0; + struct ep_pcie_msi_config msi_cfg; + int i; + struct ep_pcie_hw *phandle = NULL; + struct ep_pcie_iatu entries[2] = { + {0x80000000, 0xbe7fffff, 0, 0}, + {0xb1440000, 0xb144ae1e, 0x31440000, 0} + }; + struct ep_pcie_db_config chdb_cfg = {0x64, 0x6b, 0xfd4fa000}; + struct ep_pcie_db_config erdb_cfg = {0x64, 0x6b, 0xfd4fa080}; + + phandle = ep_pcie_get_phandle(hw_drv.device_id); + + memset(str, 0, sizeof(str)); + ret = copy_from_user(str, buf, sizeof(str)); + if (ret) + return -EFAULT; + + for (i = 0; i < sizeof(str) && (str[i] >= '0') && (str[i] <= '9'); ++i) + testcase = (testcase * 10) + (str[i] - '0'); + + EP_PCIE_DBG_FS("PCIe: TEST: %d\n", testcase); + + + switch (testcase) { + case 0: /* output status */ + ep_pcie_show_status(dev); + break; + case 1: /* output PHY and PARF registers */ + ep_pcie_reg_dump(dev, BIT(EP_PCIE_RES_PHY) | + BIT(EP_PCIE_RES_PARF), true); + break; + case 2: /* output core registers */ + ep_pcie_reg_dump(dev, BIT(EP_PCIE_RES_DM_CORE), false); + break; + case 3: /* output MMIO registers */ + ep_pcie_reg_dump(dev, BIT(EP_PCIE_RES_MMIO), false); + break; + case 4: /* output ELBI registers */ + ep_pcie_reg_dump(dev, BIT(EP_PCIE_RES_ELBI), false); + break; + case 5: /* output MSI registers */ + ep_pcie_reg_dump(dev, BIT(EP_PCIE_RES_MSI), false); + break; + case 6: /* turn on link */ + ep_pcie_enable_endpoint(phandle, EP_PCIE_OPT_ALL); + break; + case 7: /* enumeration */ + ep_pcie_enable_endpoint(phandle, EP_PCIE_OPT_ENUM); + break; + case 8: /* turn off link */ + ep_pcie_disable_endpoint(phandle); + break; + case 9: /* check MSI */ + ep_pcie_get_msi_config(phandle, &msi_cfg); + break; + case 10: /* trigger MSI */ + ep_pcie_trigger_msi(phandle, 0); + break; + case 11: /* indicate the status of PCIe link */ + EP_PCIE_DBG_FS("\nPCIe: link status is %d\n\n", + ep_pcie_get_linkstatus(phandle)); + break; + case 12: /* configure outbound iATU */ + ep_pcie_config_outbound_iatu(phandle, entries, 2); + break; + case 13: /* wake up the host */ + ep_pcie_wakeup_host(phandle); + break; + case 14: /* Configure routing of doorbells */ + ep_pcie_config_db_routing(phandle, chdb_cfg, erdb_cfg); + break; + case 21: /* write D3 */ + EP_PCIE_DBG_FS("\nPCIe Testcase %d: write D3 to EP\n\n", + testcase); + EP_PCIE_DBG_FS("\nPCIe: 0x44 of EP is 0x%x before change\n\n", + readl_relaxed(dev->dm_core + 0x44)); + ep_pcie_write_mask(dev->dm_core + 0x44, 0, 0x3); + EP_PCIE_DBG_FS("\nPCIe: 0x44 of EP is 0x%x now\n\n", + readl_relaxed(dev->dm_core + 0x44)); + break; + case 22: /* write D0 */ + EP_PCIE_DBG_FS("\nPCIe Testcase %d: write D0 to EP\n\n", + testcase); + EP_PCIE_DBG_FS("\nPCIe: 0x44 of EP is 0x%x before change\n\n", + readl_relaxed(dev->dm_core + 0x44)); + ep_pcie_write_mask(dev->dm_core + 0x44, 0x3, 0); + EP_PCIE_DBG_FS("\nPCIe: 0x44 of EP is 0x%x now\n\n", + readl_relaxed(dev->dm_core + 0x44)); + break; + case 23: /* assert wake */ + EP_PCIE_DBG_FS("\nPCIe Testcase %d: assert wake\n\n", + testcase); + gpio_set_value(dev->gpio[EP_PCIE_GPIO_WAKE].num, + dev->gpio[EP_PCIE_GPIO_WAKE].on); + break; + case 24: /* deassert wake */ + EP_PCIE_DBG_FS("\nPCIe Testcase %d: deassert wake\n\n", + testcase); + gpio_set_value(dev->gpio[EP_PCIE_GPIO_WAKE].num, + 1 - dev->gpio[EP_PCIE_GPIO_WAKE].on); + break; + case 25: /* output PERST# status */ + EP_PCIE_DBG_FS("\nPCIe: PERST# is %d\n\n", + gpio_get_value(dev->gpio[EP_PCIE_GPIO_PERST].num)); + break; + case 26: /* output WAKE# status */ + EP_PCIE_DBG_FS("\nPCIe: WAKE# is %d\n\n", + gpio_get_value(dev->gpio[EP_PCIE_GPIO_WAKE].num)); + break; + case 31: /* output core registers when D3 hot is set by host*/ + dev->dump_conf = true; + break; + case 32: /* do not output core registers when D3 hot is set by host*/ + dev->dump_conf = false; + break; + default: + EP_PCIE_DBG_FS("PCIe: Invalid testcase: %d\n", testcase); + break; + } + + if (ret == 0) + return count; + else + return -EFAULT; +} + +const struct file_operations ep_pcie_cmd_debug_ops = { + .write = ep_pcie_cmd_debug, +}; + +void ep_pcie_debugfs_init(struct ep_pcie_dev_t *ep_dev) +{ + dev = ep_dev; + dent_ep_pcie = debugfs_create_dir("pcie-ep", 0); + if (IS_ERR(dent_ep_pcie)) { + EP_PCIE_ERR(dev, + "PCIe V%d: fail to create the folder for debug_fs\n", + dev->rev); + return; + } + + dfile_case = debugfs_create_file("case", 0664, + dent_ep_pcie, 0, + &ep_pcie_cmd_debug_ops); + if (!dfile_case || IS_ERR(dfile_case)) { + EP_PCIE_ERR(dev, + "PCIe V%d: fail to create the file for case\n", + dev->rev); + goto case_error; + } + + EP_PCIE_DBG2(dev, + "PCIe V%d: debugfs is enabled\n", + dev->rev); + + return; + +case_error: + debugfs_remove(dent_ep_pcie); +} + +void ep_pcie_debugfs_exit(void) +{ + debugfs_remove(dfile_case); + debugfs_remove(dent_ep_pcie); +} diff --git a/drivers/platform/msm/ep_pcie/ep_pcie_phy.c b/drivers/platform/msm/ep_pcie/ep_pcie_phy.c new file mode 100644 index 0000000000000000000000000000000000000000..89562ef33cea08a6b83975d2334afec6e872cf53 --- /dev/null +++ b/drivers/platform/msm/ep_pcie/ep_pcie_phy.c @@ -0,0 +1,165 @@ +/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +/* + * MSM PCIe PHY endpoint mode + */ + +#include "ep_pcie_com.h" +#include "ep_pcie_phy.h" + +void ep_pcie_phy_init(struct ep_pcie_dev_t *dev) +{ + switch (dev->phy_rev) { + case 3: + EP_PCIE_DBG(dev, + "PCIe V%d: PHY V%d: Initializing 20nm QMP phy - 100MHz\n", + dev->rev, dev->phy_rev); + break; + case 4: + EP_PCIE_DBG(dev, + "PCIe V%d: PHY V%d: Initializing 14nm QMP phy - 100MHz\n", + dev->rev, dev->phy_rev); + break; + case 5: + EP_PCIE_DBG(dev, + "PCIe V%d: PHY V%d: Initializing 10nm QMP phy - 100MHz\n", + dev->rev, dev->phy_rev); + break; + case 6: + EP_PCIE_DBG(dev, + "PCIe V%d: PHY V%d: Initializing 7nm QMP phy - 100MHz\n", + dev->rev, dev->phy_rev); + break; + default: + EP_PCIE_ERR(dev, + "PCIe V%d: Unexpected phy version %d is caught\n", + dev->rev, dev->phy_rev); + } + + if (dev->phy_init_len && dev->phy_init) { + int i; + struct ep_pcie_phy_info_t *phy_init; + + EP_PCIE_DBG(dev, + "PCIe V%d: PHY V%d: process the sequence specified by DT\n", + dev->rev, dev->phy_rev); + + i = dev->phy_init_len; + phy_init = dev->phy_init; + while (i--) { + ep_pcie_write_reg(dev->phy, + phy_init->offset, + phy_init->val); + if (phy_init->delay) + usleep_range(phy_init->delay, + phy_init->delay + 1); + phy_init++; + } + return; + } + + ep_pcie_write_reg(dev->phy, PCIE_PHY_SW_RESET, 0x01); + ep_pcie_write_reg(dev->phy, PCIE_PHY_POWER_DOWN_CONTROL, 0x01); + + /* Common block settings */ + ep_pcie_write_reg(dev->phy, QSERDES_COM_BIAS_EN_CLKBUFLR_EN, 0x18); + ep_pcie_write_reg(dev->phy, QSERDES_COM_CLK_ENABLE1, 0x00); + ep_pcie_write_reg(dev->phy, QSERDES_COM_BG_TRIM, 0x0F); + ep_pcie_write_reg(dev->phy, QSERDES_COM_LOCK_CMP_EN, 0x01); + ep_pcie_write_reg(dev->phy, QSERDES_COM_VCO_TUNE_MAP, 0x00); + ep_pcie_write_reg(dev->phy, QSERDES_COM_VCO_TUNE_TIMER1, 0xFF); + ep_pcie_write_reg(dev->phy, QSERDES_COM_VCO_TUNE_TIMER2, 0x1F); + ep_pcie_write_reg(dev->phy, QSERDES_COM_CMN_CONFIG, 0x06); + ep_pcie_write_reg(dev->phy, QSERDES_COM_PLL_IVCO, 0x0F); + ep_pcie_write_reg(dev->phy, QSERDES_COM_HSCLK_SEL, 0x00); + ep_pcie_write_reg(dev->phy, QSERDES_COM_SVS_MODE_CLK_SEL, 0x01); + ep_pcie_write_reg(dev->phy, QSERDES_COM_CORE_CLK_EN, 0x20); + ep_pcie_write_reg(dev->phy, QSERDES_COM_CORECLK_DIV, 0x0A); + ep_pcie_write_reg(dev->phy, QSERDES_COM_RESETSM_CNTRL, 0x20); + ep_pcie_write_reg(dev->phy, QSERDES_COM_BG_TIMER, 0x01); + + /* PLL Config Settings */ + ep_pcie_write_reg(dev->phy, QSERDES_COM_SYSCLK_EN_SEL, 0x00); + ep_pcie_write_reg(dev->phy, QSERDES_COM_DEC_START_MODE0, 0x19); + ep_pcie_write_reg(dev->phy, QSERDES_COM_DIV_FRAC_START3_MODE0, 0x00); + ep_pcie_write_reg(dev->phy, QSERDES_COM_DIV_FRAC_START2_MODE0, 0x00); + ep_pcie_write_reg(dev->phy, QSERDES_COM_DIV_FRAC_START1_MODE0, 0x00); + ep_pcie_write_reg(dev->phy, QSERDES_COM_LOCK_CMP3_MODE0, 0x00); + ep_pcie_write_reg(dev->phy, QSERDES_COM_LOCK_CMP2_MODE0, 0x02); + ep_pcie_write_reg(dev->phy, QSERDES_COM_LOCK_CMP1_MODE0, 0x7F); + ep_pcie_write_reg(dev->phy, QSERDES_COM_CLK_SELECT, 0x30); + ep_pcie_write_reg(dev->phy, QSERDES_COM_SYS_CLK_CTRL, 0x06); + ep_pcie_write_reg(dev->phy, QSERDES_COM_SYSCLK_BUF_ENABLE, 0x1E); + ep_pcie_write_reg(dev->phy, QSERDES_COM_CP_CTRL_MODE0, 0x3F); + ep_pcie_write_reg(dev->phy, QSERDES_COM_PLL_RCTRL_MODE0, 0x1A); + ep_pcie_write_reg(dev->phy, QSERDES_COM_PLL_CCTRL_MODE0, 0x00); + ep_pcie_write_reg(dev->phy, QSERDES_COM_INTEGLOOP_GAIN1_MODE0, 0x03); + ep_pcie_write_reg(dev->phy, QSERDES_COM_INTEGLOOP_GAIN0_MODE0, 0xFF); + + /* TX settings */ + ep_pcie_write_reg(dev->phy, QSERDES_TX_HIGHZ_TRANSCEIVEREN_BIAS_DRVR_EN, + 0x45); + ep_pcie_write_reg(dev->phy, QSERDES_TX_LANE_MODE, 0x06); + ep_pcie_write_reg(dev->phy, QSERDES_TX_RES_CODE_LANE_OFFSET, 0x02); + ep_pcie_write_reg(dev->phy, QSERDES_TX_RCV_DETECT_LVL_2, 0x12); + + /* RX settings */ + ep_pcie_write_reg(dev->phy, QSERDES_RX_SIGDET_ENABLES, 0x1C); + ep_pcie_write_reg(dev->phy, QSERDES_RX_SIGDET_DEGLITCH_CNTRL, 0x14); + ep_pcie_write_reg(dev->phy, QSERDES_RX_RX_EQU_ADAPTOR_CNTRL2, 0x01); + ep_pcie_write_reg(dev->phy, QSERDES_RX_RX_EQU_ADAPTOR_CNTRL3, 0x00); + ep_pcie_write_reg(dev->phy, QSERDES_RX_RX_EQU_ADAPTOR_CNTRL4, 0xDB); + ep_pcie_write_reg(dev->phy, QSERDES_RX_UCDR_SO_SATURATION_AND_ENABLE, + 0x4B); + ep_pcie_write_reg(dev->phy, QSERDES_RX_UCDR_SO_GAIN, 0x04); + ep_pcie_write_reg(dev->phy, QSERDES_RX_UCDR_SO_GAIN_HALF, 0x04); + + /* EP_REF_CLK settings */ + ep_pcie_write_reg(dev->phy, QSERDES_COM_CLK_EP_DIV, 0x19); + ep_pcie_write_reg(dev->phy, PCIE_PHY_ENDPOINT_REFCLK_DRIVE, 0x00); + + /* PCIE L1SS settings */ + ep_pcie_write_reg(dev->phy, PCIE_PHY_PWRUP_RESET_DLY_TIME_AUXCLK, 0x40); + ep_pcie_write_reg(dev->phy, PCIE_PHY_L1SS_WAKEUP_DLY_TIME_AUXCLK_MSB, + 0x00); + ep_pcie_write_reg(dev->phy, PCIE_PHY_L1SS_WAKEUP_DLY_TIME_AUXCLK_LSB, + 0x40); + ep_pcie_write_reg(dev->phy, PCIE_PHY_LP_WAKEUP_DLY_TIME_AUXCLK_MSB, + 0x00); + ep_pcie_write_reg(dev->phy, PCIE_PHY_LP_WAKEUP_DLY_TIME_AUXCLK, 0x40); + ep_pcie_write_reg(dev->phy, PCIE_PHY_PLL_LOCK_CHK_DLY_TIME, 0x73); + + /* PCS settings */ + ep_pcie_write_reg(dev->phy, PCIE_PHY_SIGDET_CNTRL, 0x07); + ep_pcie_write_reg(dev->phy, PCIE_PHY_RX_SIGDET_LVL, 0x99); + ep_pcie_write_reg(dev->phy, PCIE_PHY_TXDEEMPH_M6DB_V0, 0x15); + ep_pcie_write_reg(dev->phy, PCIE_PHY_TXDEEMPH_M3P5DB_V0, 0x0E); + + ep_pcie_write_reg(dev->phy, PCIE_PHY_SW_RESET, 0x00); + ep_pcie_write_reg(dev->phy, PCIE_PHY_START_CONTROL, 0x03); +} + +bool ep_pcie_phy_is_ready(struct ep_pcie_dev_t *dev) +{ + u32 offset; + + if (dev->phy_status_reg) + offset = dev->phy_status_reg; + else + offset = PCIE_PHY_PCS_STATUS; + + if (readl_relaxed(dev->phy + offset) & BIT(6)) + return false; + else + return true; +} diff --git a/drivers/platform/msm/ep_pcie/ep_pcie_phy.h b/drivers/platform/msm/ep_pcie/ep_pcie_phy.h new file mode 100644 index 0000000000000000000000000000000000000000..c8f01de0be2b75faff3ef94288c47dcd39529410 --- /dev/null +++ b/drivers/platform/msm/ep_pcie/ep_pcie_phy.h @@ -0,0 +1,463 @@ +/* Copyright (c) 2015, 2017, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef __EP_PCIE_PHY_H +#define __EP_PCIE_PHY_H + +#define QSERDES_COM_ATB_SEL1 0x000 +#define QSERDES_COM_ATB_SEL2 0x004 +#define QSERDES_COM_FREQ_UPDATE 0x008 +#define QSERDES_COM_BG_TIMER 0x00C +#define QSERDES_COM_SSC_EN_CENTER 0x010 +#define QSERDES_COM_SSC_ADJ_PER1 0x014 +#define QSERDES_COM_SSC_ADJ_PER2 0x018 +#define QSERDES_COM_SSC_PER1 0x01C +#define QSERDES_COM_SSC_PER2 0x020 +#define QSERDES_COM_SSC_STEP_SIZE1 0x024 +#define QSERDES_COM_SSC_STEP_SIZE2 0x028 +#define QSERDES_COM_POST_DIV 0x02C +#define QSERDES_COM_POST_DIV_MUX 0x030 +#define QSERDES_COM_BIAS_EN_CLKBUFLR_EN 0x034 +#define QSERDES_COM_CLK_ENABLE1 0x038 +#define QSERDES_COM_SYS_CLK_CTRL 0x03C +#define QSERDES_COM_SYSCLK_BUF_ENABLE 0x040 +#define QSERDES_COM_PLL_EN 0x044 +#define QSERDES_COM_PLL_IVCO 0x048 +#define QSERDES_COM_LOCK_CMP1_MODE0 0x04C +#define QSERDES_COM_LOCK_CMP2_MODE0 0x050 +#define QSERDES_COM_LOCK_CMP3_MODE0 0x054 +#define QSERDES_COM_LOCK_CMP1_MODE1 0x058 +#define QSERDES_COM_LOCK_CMP2_MODE1 0x05C +#define QSERDES_COM_LOCK_CMP3_MODE1 0x060 +#define QSERDES_COM_CMN_RSVD0 0x064 +#define QSERDES_COM_EP_CLOCK_DETECT_CTRL 0x068 +#define QSERDES_COM_SYSCLK_DET_COMP_STATUS 0x06C +#define QSERDES_COM_BG_TRIM 0x070 +#define QSERDES_COM_CLK_EP_DIV 0x074 +#define QSERDES_COM_CP_CTRL_MODE0 0x078 +#define QSERDES_COM_CP_CTRL_MODE1 0x07C +#define QSERDES_COM_CMN_RSVD1 0x080 +#define QSERDES_COM_PLL_RCTRL_MODE0 0x084 +#define QSERDES_COM_PLL_RCTRL_MODE1 0x088 +#define QSERDES_COM_CMN_RSVD2 0x08C +#define QSERDES_COM_PLL_CCTRL_MODE0 0x090 +#define QSERDES_COM_PLL_CCTRL_MODE1 0x094 +#define QSERDES_COM_CMN_RSVD3 0x098 +#define QSERDES_COM_PLL_CNTRL 0x09C +#define QSERDES_COM_PHASE_SEL_CTRL 0x0A0 +#define QSERDES_COM_PHASE_SEL_DC 0x0A4 +#define QSERDES_COM_BIAS_EN_CTRL_BY_PSM 0x0A8 +#define QSERDES_COM_SYSCLK_EN_SEL 0x0AC +#define QSERDES_COM_CML_SYSCLK_SEL 0x0B0 +#define QSERDES_COM_RESETSM_CNTRL 0x0B4 +#define QSERDES_COM_RESETSM_CNTRL2 0x0B8 +#define QSERDES_COM_RESTRIM_CTRL 0x0BC +#define QSERDES_COM_RESTRIM_CTRL2 0x0C0 +#define QSERDES_COM_RESCODE_DIV_NUM 0x0C4 +#define QSERDES_COM_LOCK_CMP_EN 0x0C8 +#define QSERDES_COM_LOCK_CMP_CFG 0x0CC +#define QSERDES_COM_DEC_START_MODE0 0x0D0 +#define QSERDES_COM_DEC_START_MODE1 0x0D4 +#define QSERDES_COM_VCOCAL_DEADMAN_CTRL 0x0D8 +#define QSERDES_COM_DIV_FRAC_START1_MODE0 0x0DC +#define QSERDES_COM_DIV_FRAC_START2_MODE0 0x0E0 +#define QSERDES_COM_DIV_FRAC_START3_MODE0 0x0E4 +#define QSERDES_COM_DIV_FRAC_START1_MODE1 0x0E8 +#define QSERDES_COM_DIV_FRAC_START2_MODE1 0x0EC +#define QSERDES_COM_DIV_FRAC_START3_MODE1 0x0F0 +#define QSERDES_COM_VCO_TUNE_MINVAL1 0x0F4 +#define QSERDES_COM_VCO_TUNE_MINVAL2 0x0F8 +#define QSERDES_COM_CMN_RSVD4 0x0FC +#define QSERDES_COM_INTEGLOOP_INITVAL 0x100 +#define QSERDES_COM_INTEGLOOP_EN 0x104 +#define QSERDES_COM_INTEGLOOP_GAIN0_MODE0 0x108 +#define QSERDES_COM_INTEGLOOP_GAIN1_MODE0 0x10C +#define QSERDES_COM_INTEGLOOP_GAIN0_MODE1 0x110 +#define QSERDES_COM_INTEGLOOP_GAIN1_MODE1 0x114 +#define QSERDES_COM_VCO_TUNE_MAXVAL1 0x118 +#define QSERDES_COM_VCO_TUNE_MAXVAL2 0x11C +#define QSERDES_COM_RES_TRIM_CONTROL2 0x120 +#define QSERDES_COM_VCO_TUNE_CTRL 0x124 +#define QSERDES_COM_VCO_TUNE_MAP 0x128 +#define QSERDES_COM_VCO_TUNE1_MODE0 0x12C +#define QSERDES_COM_VCO_TUNE2_MODE0 0x130 +#define QSERDES_COM_VCO_TUNE1_MODE1 0x134 +#define QSERDES_COM_VCO_TUNE2_MODE1 0x138 +#define QSERDES_COM_VCO_TUNE_INITVAL1 0x13C +#define QSERDES_COM_VCO_TUNE_INITVAL2 0x140 +#define QSERDES_COM_VCO_TUNE_TIMER1 0x144 +#define QSERDES_COM_VCO_TUNE_TIMER2 0x148 +#define QSERDES_COM_SAR 0x14C +#define QSERDES_COM_SAR_CLK 0x150 +#define QSERDES_COM_SAR_CODE_OUT_STATUS 0x154 +#define QSERDES_COM_SAR_CODE_READY_STATUS 0x158 +#define QSERDES_COM_CMN_STATUS 0x15C +#define QSERDES_COM_RESET_SM_STATUS 0x160 +#define QSERDES_COM_RESTRIM_CODE_STATUS 0x164 +#define QSERDES_COM_PLLCAL_CODE1_STATUS 0x168 +#define QSERDES_COM_PLLCAL_CODE2_STATUS 0x16C +#define QSERDES_COM_BG_CTRL 0x170 +#define QSERDES_COM_CLK_SELECT 0x174 +#define QSERDES_COM_HSCLK_SEL 0x178 +#define QSERDES_COM_PLL_ANALOG 0x180 +#define QSERDES_COM_CORECLK_DIV 0x184 +#define QSERDES_COM_SW_RESET 0x188 +#define QSERDES_COM_CORE_CLK_EN 0x18C +#define QSERDES_COM_C_READY_STATUS 0x190 +#define QSERDES_COM_CMN_CONFIG 0x194 +#define QSERDES_COM_CMN_RATE_OVERRIDE 0x198 +#define QSERDES_COM_SVS_MODE_CLK_SEL 0x19C +#define QSERDES_COM_DEBUG_BUS0 0x1A0 +#define QSERDES_COM_DEBUG_BUS1 0x1A4 +#define QSERDES_COM_DEBUG_BUS2 0x1A8 +#define QSERDES_COM_DEBUG_BUS3 0x1AC +#define QSERDES_COM_DEBUG_BUS_SEL 0x1B0 +#define QSERDES_COM_CMN_MISC1 0x1B4 +#define QSERDES_COM_CMN_MISC2 0x1B8 +#define QSERDES_COM_CORECLK_DIV_MODE1 0x1BC +#define QSERDES_COM_CMN_RSVD5 0x1C0 +#define QSERDES_TX_BIST_MODE_LANENO 0x200 +#define QSERDES_TX_BIST_INVERT 0x204 +#define QSERDES_TX_CLKBUF_ENABLE 0x208 +#define QSERDES_TX_CMN_CONTROL_ONE 0x20C +#define QSERDES_TX_CMN_CONTROL_TWO 0x210 +#define QSERDES_TX_CMN_CONTROL_THREE 0x214 +#define QSERDES_TX_TX_EMP_POST1_LVL 0x218 +#define QSERDES_TX_TX_POST2_EMPH 0x21C +#define QSERDES_TX_TX_BOOST_LVL_UP_DN 0x220 +#define QSERDES_TX_HP_PD_ENABLES 0x224 +#define QSERDES_TX_TX_IDLE_LVL_LARGE_AMP 0x228 +#define QSERDES_TX_TX_DRV_LVL 0x22C +#define QSERDES_TX_TX_DRV_LVL_OFFSET 0x230 +#define QSERDES_TX_RESET_TSYNC_EN 0x234 +#define QSERDES_TX_PRE_STALL_LDO_BOOST_EN 0x238 +#define QSERDES_TX_TX_BAND 0x23C +#define QSERDES_TX_SLEW_CNTL 0x240 +#define QSERDES_TX_INTERFACE_SELECT 0x244 +#define QSERDES_TX_LPB_EN 0x248 +#define QSERDES_TX_RES_CODE_LANE_TX 0x24C +#define QSERDES_TX_RES_CODE_LANE_RX 0x250 +#define QSERDES_TX_RES_CODE_LANE_OFFSET 0x254 +#define QSERDES_TX_PERL_LENGTH1 0x258 +#define QSERDES_TX_PERL_LENGTH2 0x25C +#define QSERDES_TX_SERDES_BYP_EN_OUT 0x260 +#define QSERDES_TX_DEBUG_BUS_SEL 0x264 +#define QSERDES_TX_HIGHZ_TRANSCEIVEREN_BIAS_DRVR_EN 0x268 +#define QSERDES_TX_TX_POL_INV 0x26C +#define QSERDES_TX_PARRATE_REC_DETECT_IDLE_EN 0x270 +#define QSERDES_TX_BIST_PATTERN1 0x274 +#define QSERDES_TX_BIST_PATTERN2 0x278 +#define QSERDES_TX_BIST_PATTERN3 0x27C +#define QSERDES_TX_BIST_PATTERN4 0x280 +#define QSERDES_TX_BIST_PATTERN5 0x284 +#define QSERDES_TX_BIST_PATTERN6 0x288 +#define QSERDES_TX_BIST_PATTERN7 0x28C +#define QSERDES_TX_BIST_PATTERN8 0x290 +#define QSERDES_TX_LANE_MODE 0x294 +#define QSERDES_TX_IDAC_CAL_LANE_MODE 0x298 +#define QSERDES_TX_IDAC_CAL_LANE_MODE_CONFIGURATION 0x29C +#define QSERDES_TX_ATB_SEL1 0x2A0 +#define QSERDES_TX_ATB_SEL2 0x2A4 +#define QSERDES_TX_RCV_DETECT_LVL 0x2A8 +#define QSERDES_TX_RCV_DETECT_LVL_2 0x2AC +#define QSERDES_TX_PRBS_SEED1 0x2B0 +#define QSERDES_TX_PRBS_SEED2 0x2B4 +#define QSERDES_TX_PRBS_SEED3 0x2B8 +#define QSERDES_TX_PRBS_SEED4 0x2BC +#define QSERDES_TX_RESET_GEN 0x2C0 +#define QSERDES_TX_RESET_GEN_MUXES 0x2C4 +#define QSERDES_TX_TRAN_DRVR_EMP_EN 0x2C8 +#define QSERDES_TX_TX_INTERFACE_MODE 0x2CC +#define QSERDES_TX_PWM_CTRL 0x2D0 +#define QSERDES_TX_PWM_ENCODED_OR_DATA 0x2D4 +#define QSERDES_TX_PWM_GEAR_1_DIVIDER_BAND2 0x2D8 +#define QSERDES_TX_PWM_GEAR_2_DIVIDER_BAND2 0x2DC +#define QSERDES_TX_PWM_GEAR_3_DIVIDER_BAND2 0x2E0 +#define QSERDES_TX_PWM_GEAR_4_DIVIDER_BAND2 0x2E4 +#define QSERDES_TX_PWM_GEAR_1_DIVIDER_BAND0_1 0x2E8 +#define QSERDES_TX_PWM_GEAR_2_DIVIDER_BAND0_1 0x2EC +#define QSERDES_TX_PWM_GEAR_3_DIVIDER_BAND0_1 0x2F0 +#define QSERDES_TX_PWM_GEAR_4_DIVIDER_BAND0_1 0x2F4 +#define QSERDES_TX_VMODE_CTRL1 0x2F8 +#define QSERDES_TX_VMODE_CTRL2 0x2FC +#define QSERDES_TX_TX_ALOG_INTF_OBSV_CNTL 0x300 +#define QSERDES_TX_BIST_STATUS 0x304 +#define QSERDES_TX_BIST_ERROR_COUNT1 0x308 +#define QSERDES_TX_BIST_ERROR_COUNT2 0x30C +#define QSERDES_TX_TX_ALOG_INTF_OBSV 0x310 +#define QSERDES_RX_UCDR_FO_GAIN_HALF 0x400 +#define QSERDES_RX_UCDR_FO_GAIN_QUARTER 0x404 +#define QSERDES_RX_UCDR_FO_GAIN_EIGHTH 0x408 +#define QSERDES_RX_UCDR_FO_GAIN 0x40C +#define QSERDES_RX_UCDR_SO_GAIN_HALF 0x410 +#define QSERDES_RX_UCDR_SO_GAIN_QUARTER 0x414 +#define QSERDES_RX_UCDR_SO_GAIN_EIGHTH 0x418 +#define QSERDES_RX_UCDR_SO_GAIN 0x41C +#define QSERDES_RX_UCDR_SVS_FO_GAIN_HALF 0x420 +#define QSERDES_RX_UCDR_SVS_FO_GAIN_QUARTER 0x424 +#define QSERDES_RX_UCDR_SVS_FO_GAIN_EIGHTH 0x428 +#define QSERDES_RX_UCDR_SVS_FO_GAIN 0x42C +#define QSERDES_RX_UCDR_SVS_SO_GAIN_HALF 0x430 +#define QSERDES_RX_UCDR_SVS_SO_GAIN_QUARTER 0x434 +#define QSERDES_RX_UCDR_SVS_SO_GAIN_EIGHTH 0x438 +#define QSERDES_RX_UCDR_SVS_SO_GAIN 0x43C +#define QSERDES_RX_UCDR_FASTLOCK_FO_GAIN 0x440 +#define QSERDES_RX_UCDR_FD_GAIN 0x444 +#define QSERDES_RX_UCDR_SO_SATURATION_AND_ENABLE 0x448 +#define QSERDES_RX_UCDR_FO_TO_SO_DELAY 0x44C +#define QSERDES_RX_UCDR_FASTLOCK_COUNT_LOW 0x450 +#define QSERDES_RX_UCDR_FASTLOCK_COUNT_HIGH 0x454 +#define QSERDES_RX_UCDR_MODULATE 0x458 +#define QSERDES_RX_UCDR_PI_CONTROLS 0x45C +#define QSERDES_RX_RBIST_CONTROL 0x460 +#define QSERDES_RX_AUX_CONTROL 0x464 +#define QSERDES_RX_AUX_DATA_TCOARSE 0x468 +#define QSERDES_RX_AUX_DATA_TFINE_LSB 0x46C +#define QSERDES_RX_AUX_DATA_TFINE_MSB 0x470 +#define QSERDES_RX_RCLK_AUXDATA_SEL 0x474 +#define QSERDES_RX_AC_JTAG_ENABLE 0x478 +#define QSERDES_RX_AC_JTAG_INITP 0x47C +#define QSERDES_RX_AC_JTAG_INITN 0x480 +#define QSERDES_RX_AC_JTAG_LVL 0x484 +#define QSERDES_RX_AC_JTAG_MODE 0x488 +#define QSERDES_RX_AC_JTAG_RESET 0x48C +#define QSERDES_RX_RX_TERM_BW 0x490 +#define QSERDES_RX_RX_RCVR_IQ_EN 0x494 +#define QSERDES_RX_RX_IDAC_I_DC_OFFSETS 0x498 +#define QSERDES_RX_RX_IDAC_IBAR_DC_OFFSETS 0x49C +#define QSERDES_RX_RX_IDAC_Q_DC_OFFSETS 0x4A0 +#define QSERDES_RX_RX_IDAC_QBAR_DC_OFFSETS 0x4A4 +#define QSERDES_RX_RX_IDAC_A_DC_OFFSETS 0x4A8 +#define QSERDES_RX_RX_IDAC_ABAR_DC_OFFSETS 0x4AC +#define QSERDES_RX_RX_IDAC_EN 0x4B0 +#define QSERDES_RX_RX_IDAC_ENABLES 0x4B4 +#define QSERDES_RX_RX_IDAC_SIGN 0x4B8 +#define QSERDES_RX_RX_HIGHZ_HIGHRATE 0x4BC +#define QSERDES_RX_RX_TERM_AC_BYPASS_DC_COUPLE_OFFSET 0x4C0 +#define QSERDES_RX_RX_EQ_GAIN1_LSB 0x4C4 +#define QSERDES_RX_RX_EQ_GAIN1_MSB 0x4C8 +#define QSERDES_RX_RX_EQ_GAIN2_LSB 0x4CC +#define QSERDES_RX_RX_EQ_GAIN2_MSB 0x4D0 +#define QSERDES_RX_RX_EQU_ADAPTOR_CNTRL1 0x4D4 +#define QSERDES_RX_RX_EQU_ADAPTOR_CNTRL2 0x4D8 +#define QSERDES_RX_RX_EQU_ADAPTOR_CNTRL3 0x4DC +#define QSERDES_RX_RX_EQU_ADAPTOR_CNTRL4 0x4E0 +#define QSERDES_RX_RX_IDAC_CAL_CONFIGURATION 0x4E4 +#define QSERDES_RX_RX_IDAC_TSETTLE_LOW 0x4E8 +#define QSERDES_RX_RX_IDAC_TSETTLE_HIGH 0x4EC +#define QSERDES_RX_RX_IDAC_ENDSAMP_LOW 0x4F0 +#define QSERDES_RX_RX_IDAC_ENDSAMP_HIGH 0x4F4 +#define QSERDES_RX_RX_IDAC_MIDPOINT_LOW 0x4F8 +#define QSERDES_RX_RX_IDAC_MIDPOINT_HIGH 0x4FC +#define QSERDES_RX_RX_EQ_OFFSET_LSB 0x500 +#define QSERDES_RX_RX_EQ_OFFSET_MSB 0x504 +#define QSERDES_RX_RX_EQ_OFFSET_ADAPTOR_CNTRL1 0x508 +#define QSERDES_RX_RX_OFFSET_ADAPTOR_CNTRL2 0x50C +#define QSERDES_RX_SIGDET_ENABLES 0x510 +#define QSERDES_RX_SIGDET_CNTRL 0x514 +#define QSERDES_RX_SIGDET_LVL 0x518 +#define QSERDES_RX_SIGDET_DEGLITCH_CNTRL 0x51C +#define QSERDES_RX_RX_BAND 0x520 +#define QSERDES_RX_CDR_FREEZE_UP_DN 0x524 +#define QSERDES_RX_CDR_RESET_OVERRIDE 0x528 +#define QSERDES_RX_RX_INTERFACE_MODE 0x52C +#define QSERDES_RX_JITTER_GEN_MODE 0x530 +#define QSERDES_RX_BUJ_AMP 0x534 +#define QSERDES_RX_SJ_AMP1 0x538 +#define QSERDES_RX_SJ_AMP2 0x53C +#define QSERDES_RX_SJ_PER1 0x540 +#define QSERDES_RX_SJ_PER2 0x544 +#define QSERDES_RX_BUJ_STEP_FREQ1 0x548 +#define QSERDES_RX_BUJ_STEP_FREQ2 0x54C +#define QSERDES_RX_PPM_OFFSET1 0x550 +#define QSERDES_RX_PPM_OFFSET2 0x554 +#define QSERDES_RX_SIGN_PPM_PERIOD1 0x558 +#define QSERDES_RX_SIGN_PPM_PERIOD2 0x55C +#define QSERDES_RX_SSC_CTRL 0x560 +#define QSERDES_RX_SSC_COUNT1 0x564 +#define QSERDES_RX_SSC_COUNT2 0x568 +#define QSERDES_RX_RX_ALOG_INTF_OBSV_CNTL 0x56C +#define QSERDES_RX_RX_PWM_ENABLE_AND_DATA 0x570 +#define QSERDES_RX_RX_PWM_GEAR1_TIMEOUT_COUNT 0x574 +#define QSERDES_RX_RX_PWM_GEAR2_TIMEOUT_COUNT 0x578 +#define QSERDES_RX_RX_PWM_GEAR3_TIMEOUT_COUNT 0x57C +#define QSERDES_RX_RX_PWM_GEAR4_TIMEOUT_COUNT 0x580 +#define QSERDES_RX_PI_CTRL1 0x584 +#define QSERDES_RX_PI_CTRL2 0x588 +#define QSERDES_RX_PI_QUAD 0x58C +#define QSERDES_RX_IDATA1 0x590 +#define QSERDES_RX_IDATA2 0x594 +#define QSERDES_RX_AUX_DATA1 0x598 +#define QSERDES_RX_AUX_DATA2 0x59C +#define QSERDES_RX_AC_JTAG_OUTP 0x5A0 +#define QSERDES_RX_AC_JTAG_OUTN 0x5A4 +#define QSERDES_RX_RX_SIGDET 0x5A8 +#define QSERDES_RX_RX_VDCOFF 0x5AC +#define QSERDES_RX_IDAC_CAL_ON 0x5B0 +#define QSERDES_RX_IDAC_STATUS_I 0x5B4 +#define QSERDES_RX_IDAC_STATUS_IBAR 0x5B8 +#define QSERDES_RX_IDAC_STATUS_Q 0x5BC +#define QSERDES_RX_IDAC_STATUS_QBAR 0x5C0 +#define QSERDES_RX_IDAC_STATUS_A 0x5C4 +#define QSERDES_RX_IDAC_STATUS_ABAR 0x5C8 +#define QSERDES_RX_CALST_STATUS_I 0x5CC +#define QSERDES_RX_CALST_STATUS_Q 0x5D0 +#define QSERDES_RX_CALST_STATUS_A 0x5D4 +#define QSERDES_RX_RX_ALOG_INTF_OBSV 0x5D8 +#define QSERDES_RX_READ_EQCODE 0x5DC +#define QSERDES_RX_READ_OFFSETCODE 0x5E0 +#define QSERDES_RX_IA_ERROR_COUNTER_LOW 0x5E4 +#define QSERDES_RX_IA_ERROR_COUNTER_HIGH 0x5E8 +#define PCIE_PHY_MISC_DEBUG_BUS_BYTE0_INDEX 0x600 +#define PCIE_PHY_MISC_DEBUG_BUS_BYTE1_INDEX 0x604 +#define PCIE_PHY_MISC_DEBUG_BUS_BYTE2_INDEX 0x608 +#define PCIE_PHY_MISC_DEBUG_BUS_BYTE3_INDEX 0x60C +#define PCIE_PHY_MISC_PLACEHOLDER_STATUS 0x610 +#define PCIE_PHY_MISC_DEBUG_BUS_0_STATUS 0x614 +#define PCIE_PHY_MISC_DEBUG_BUS_1_STATUS 0x618 +#define PCIE_PHY_MISC_DEBUG_BUS_2_STATUS 0x61C +#define PCIE_PHY_MISC_DEBUG_BUS_3_STATUS 0x620 +#define PCIE_PHY_MISC_OSC_DTCT_STATUS 0x624 +#define PCIE_PHY_MISC_OSC_DTCT_CONFIG1 0x628 +#define PCIE_PHY_MISC_OSC_DTCT_CONFIG2 0x62C +#define PCIE_PHY_MISC_OSC_DTCT_CONFIG3 0x630 +#define PCIE_PHY_MISC_OSC_DTCT_CONFIG4 0x634 +#define PCIE_PHY_MISC_OSC_DTCT_CONFIG5 0x638 +#define PCIE_PHY_MISC_OSC_DTCT_CONFIG6 0x63C +#define PCIE_PHY_MISC_OSC_DTCT_CONFIG7 0x640 +#define PCIE_PHY_SW_RESET 0x800 +#define PCIE_PHY_POWER_DOWN_CONTROL 0x804 +#define PCIE_PHY_START_CONTROL 0x808 +#define PCIE_PHY_TXMGN_V0 0x80C +#define PCIE_PHY_TXMGN_V1 0x810 +#define PCIE_PHY_TXMGN_V2 0x814 +#define PCIE_PHY_TXMGN_V3 0x818 +#define PCIE_PHY_TXMGN_V4 0x81C +#define PCIE_PHY_TXMGN_LS 0x820 +#define PCIE_PHY_TXDEEMPH_M6DB_V0 0x824 +#define PCIE_PHY_TXDEEMPH_M3P5DB_V0 0x828 +#define PCIE_PHY_TXDEEMPH_M6DB_V1 0x82C +#define PCIE_PHY_TXDEEMPH_M3P5DB_V1 0x830 +#define PCIE_PHY_TXDEEMPH_M6DB_V2 0x834 +#define PCIE_PHY_TXDEEMPH_M3P5DB_V2 0x838 +#define PCIE_PHY_TXDEEMPH_M6DB_V3 0x83C +#define PCIE_PHY_TXDEEMPH_M3P5DB_V3 0x840 +#define PCIE_PHY_TXDEEMPH_M6DB_V4 0x844 +#define PCIE_PHY_TXDEEMPH_M3P5DB_V4 0x848 +#define PCIE_PHY_TXDEEMPH_M6DB_LS 0x84C +#define PCIE_PHY_TXDEEMPH_M3P5DB_LS 0x850 +#define PCIE_PHY_ENDPOINT_REFCLK_DRIVE 0x854 +#define PCIE_PHY_RX_IDLE_DTCT_CNTRL 0x858 +#define PCIE_PHY_RATE_SLEW_CNTRL 0x85C +#define PCIE_PHY_POWER_STATE_CONFIG1 0x860 +#define PCIE_PHY_POWER_STATE_CONFIG2 0x864 +#define PCIE_PHY_POWER_STATE_CONFIG3 0x868 +#define PCIE_PHY_POWER_STATE_CONFIG4 0x86C +#define PCIE_PHY_RCVR_DTCT_DLY_P1U2_L 0x870 +#define PCIE_PHY_RCVR_DTCT_DLY_P1U2_H 0x874 +#define PCIE_PHY_RCVR_DTCT_DLY_U3_L 0x878 +#define PCIE_PHY_RCVR_DTCT_DLY_U3_H 0x87C +#define PCIE_PHY_LOCK_DETECT_CONFIG1 0x880 +#define PCIE_PHY_LOCK_DETECT_CONFIG2 0x884 +#define PCIE_PHY_LOCK_DETECT_CONFIG3 0x888 +#define PCIE_PHY_TSYNC_RSYNC_TIME 0x88C +#define PCIE_PHY_SIGDET_LOW_2_IDLE_TIME 0x890 +#define PCIE_PHY_BEACON_2_IDLE_TIME_L 0x894 +#define PCIE_PHY_BEACON_2_IDLE_TIME_H 0x898 +#define PCIE_PHY_PWRUP_RESET_DLY_TIME_SYSCLK 0x89C +#define PCIE_PHY_PWRUP_RESET_DLY_TIME_AUXCLK 0x8A0 +#define PCIE_PHY_LP_WAKEUP_DLY_TIME_AUXCLK 0x8A4 +#define PCIE_PHY_PLL_LOCK_CHK_DLY_TIME 0x8A8 +#define PCIE_PHY_LFPS_DET_HIGH_COUNT_VAL 0x8AC +#define PCIE_PHY_LFPS_TX_ECSTART_EQTLOCK 0x8B0 +#define PCIE_PHY_LFPS_TX_END_CNT_P2U3_START 0x8B4 +#define PCIE_PHY_RXEQTRAINING_WAIT_TIME 0x8B8 +#define PCIE_PHY_RXEQTRAINING_RUN_TIME 0x8BC +#define PCIE_PHY_TXONESZEROS_RUN_LENGTH 0x8C0 +#define PCIE_PHY_FLL_CNTRL1 0x8C4 +#define PCIE_PHY_FLL_CNTRL2 0x8C8 +#define PCIE_PHY_FLL_CNT_VAL_L 0x8CC +#define PCIE_PHY_FLL_CNT_VAL_H_TOL 0x8D0 +#define PCIE_PHY_FLL_MAN_CODE 0x8D4 +#define PCIE_PHY_AUTONOMOUS_MODE_CTRL 0x8D8 +#define PCIE_PHY_LFPS_RXTERM_IRQ_CLEAR 0x8DC +#define PCIE_PHY_ARCVR_DTCT_EN_PERIOD 0x8E0 +#define PCIE_PHY_ARCVR_DTCT_CM_DLY 0x8E4 +#define PCIE_PHY_ALFPS_DEGLITCH_VAL 0x8E8 +#define PCIE_PHY_INSIG_SW_CTRL1 0x8EC +#define PCIE_PHY_INSIG_SW_CTRL2 0x8F0 +#define PCIE_PHY_INSIG_SW_CTRL3 0x8F4 +#define PCIE_PHY_INSIG_MX_CTRL1 0x8F8 +#define PCIE_PHY_INSIG_MX_CTRL2 0x8FC +#define PCIE_PHY_INSIG_MX_CTRL3 0x900 +#define PCIE_PHY_OUTSIG_SW_CTRL1 0x904 +#define PCIE_PHY_OUTSIG_MX_CTRL1 0x908 +#define PCIE_PHY_CLK_DEBUG_BYPASS_CTRL 0x90C +#define PCIE_PHY_TEST_CONTROL 0x910 +#define PCIE_PHY_TEST_CONTROL2 0x914 +#define PCIE_PHY_TEST_CONTROL3 0x918 +#define PCIE_PHY_TEST_CONTROL4 0x91C +#define PCIE_PHY_TEST_CONTROL5 0x920 +#define PCIE_PHY_TEST_CONTROL6 0x924 +#define PCIE_PHY_TEST_CONTROL7 0x928 +#define PCIE_PHY_COM_RESET_CONTROL 0x92C +#define PCIE_PHY_BIST_CTRL 0x930 +#define PCIE_PHY_PRBS_POLY0 0x934 +#define PCIE_PHY_PRBS_POLY1 0x938 +#define PCIE_PHY_PRBS_SEED0 0x93C +#define PCIE_PHY_PRBS_SEED1 0x940 +#define PCIE_PHY_FIXED_PAT_CTRL 0x944 +#define PCIE_PHY_FIXED_PAT0 0x948 +#define PCIE_PHY_FIXED_PAT1 0x94C +#define PCIE_PHY_FIXED_PAT2 0x950 +#define PCIE_PHY_FIXED_PAT3 0x954 +#define PCIE_PHY_COM_CLK_SWITCH_CTRL 0x958 +#define PCIE_PHY_ELECIDLE_DLY_SEL 0x95C +#define PCIE_PHY_SPARE1 0x960 +#define PCIE_PHY_BIST_CHK_ERR_CNT_L_STATUS 0x964 +#define PCIE_PHY_BIST_CHK_ERR_CNT_H_STATUS 0x968 +#define PCIE_PHY_BIST_CHK_STATUS 0x96C +#define PCIE_PHY_LFPS_RXTERM_IRQ_SOURCE_STATUS 0x970 +#define PCIE_PHY_PCS_STATUS 0x974 +#define PCIE_PHY_PCS_STATUS2 0x978 +#define PCIE_PHY_PCS_STATUS3 0x97C +#define PCIE_PHY_COM_RESET_STATUS 0x980 +#define PCIE_PHY_OSC_DTCT_STATUS 0x984 +#define PCIE_PHY_REVISION_ID0 0x988 +#define PCIE_PHY_REVISION_ID1 0x98C +#define PCIE_PHY_REVISION_ID2 0x990 +#define PCIE_PHY_REVISION_ID3 0x994 +#define PCIE_PHY_DEBUG_BUS_0_STATUS 0x998 +#define PCIE_PHY_DEBUG_BUS_1_STATUS 0x99C +#define PCIE_PHY_DEBUG_BUS_2_STATUS 0x9A0 +#define PCIE_PHY_DEBUG_BUS_3_STATUS 0x9A4 +#define PCIE_PHY_LP_WAKEUP_DLY_TIME_AUXCLK_MSB 0x9A8 +#define PCIE_PHY_OSC_DTCT_ACTIONS 0x9AC +#define PCIE_PHY_SIGDET_CNTRL 0x9B0 +#define PCIE_PHY_IDAC_CAL_CNTRL 0x9B4 +#define PCIE_PHY_CMN_ACK_OUT_SEL 0x9B8 +#define PCIE_PHY_PLL_LOCK_CHK_DLY_TIME_SYSCLK 0x9BC +#define PCIE_PHY_AUTONOMOUS_MODE_STATUS 0x9C0 +#define PCIE_PHY_ENDPOINT_REFCLK_CNTRL 0x9C4 +#define PCIE_PHY_EPCLK_PRE_PLL_LOCK_DLY_SYSCLK 0x9C8 +#define PCIE_PHY_EPCLK_PRE_PLL_LOCK_DLY_AUXCLK 0x9CC +#define PCIE_PHY_EPCLK_DLY_COUNT_VAL_L 0x9D0 +#define PCIE_PHY_EPCLK_DLY_COUNT_VAL_H 0x9D4 +#define PCIE_PHY_RX_SIGDET_LVL 0x9D8 +#define PCIE_PHY_L1SS_WAKEUP_DLY_TIME_AUXCLK_LSB 0x9DC +#define PCIE_PHY_L1SS_WAKEUP_DLY_TIME_AUXCLK_MSB 0x9E0 +#define PCIE_PHY_AUTONOMOUS_MODE_CTRL2 0x9E4 +#define PCIE_PHY_RXTERMINATION_DLY_SEL 0x9E8 +#define PCIE_PHY_LFPS_PER_TIMER_VAL 0x9EC +#define PCIE_PHY_SIGDET_STARTUP_TIMER_VAL 0x9F0 +#define PCIE_PHY_LOCK_DETECT_CONFIG4 0x9F4 +#endif diff --git a/drivers/platform/msm/gsi/gsi.c b/drivers/platform/msm/gsi/gsi.c index fc3bf76dba28a32f8746c51624b8fa9674f46d24..2ddaea9fb802552e9afe506c6deabad34e832e97 100644 --- a/drivers/platform/msm/gsi/gsi.c +++ b/drivers/platform/msm/gsi/gsi.c @@ -116,8 +116,7 @@ static void __gsi_config_gen_irq(int ee, uint32_t mask, uint32_t val) static void gsi_channel_state_change_wait(unsigned long chan_hdl, struct gsi_chan_ctx *ctx, - uint32_t tm, - enum gsi_chan_state next_state) + uint32_t tm) { int poll_cnt; int gsi_pending_intr; @@ -166,9 +165,6 @@ static void gsi_channel_state_change_wait(unsigned long chan_hdl, ch, ctx->state, gsi_pending_intr); - - if (ctx->state == next_state) - break; } } @@ -1956,6 +1952,7 @@ static void gsi_program_chan_ctx(struct gsi_chan_props *props, unsigned int ee, case GSI_CHAN_PROT_XHCI: case GSI_CHAN_PROT_GPI: case GSI_CHAN_PROT_XDCI: + case GSI_CHAN_PROT_WDI: prot = props->prot; prot_msb = 0; break; @@ -2474,8 +2471,7 @@ int gsi_start_channel(unsigned long chan_hdl) GSIDBG("GSI Channel Start, waiting for completion\n"); gsi_channel_state_change_wait(chan_hdl, ctx, - GSI_START_CMD_TIMEOUT_MS, - GSI_CHAN_STATE_STARTED); + GSI_START_CMD_TIMEOUT_MS); if (ctx->state != GSI_CHAN_STATE_STARTED) { /* @@ -2548,8 +2544,7 @@ int gsi_stop_channel(unsigned long chan_hdl) GSIDBG("GSI Channel Stop, waiting for completion\n"); gsi_channel_state_change_wait(chan_hdl, ctx, - GSI_STOP_CMD_TIMEOUT_MS, - GSI_CHAN_STATE_STOPPED); + GSI_STOP_CMD_TIMEOUT_MS); if (ctx->state != GSI_CHAN_STATE_STOPPED && ctx->state != GSI_CHAN_STATE_STOP_IN_PROC) { @@ -3234,6 +3229,8 @@ int gsi_config_channel_mode(unsigned long chan_hdl, enum gsi_chan_mode mode) if (curr == GSI_CHAN_MODE_CALLBACK && mode == GSI_CHAN_MODE_POLL) { __gsi_config_ieob_irq(gsi_ctx->per.ee, 1 << ctx->evtr->id, 0); + gsi_writel(1 << ctx->evtr->id, gsi_ctx->base + + GSI_EE_n_CNTXT_SRC_IEOB_IRQ_CLR_OFFS(gsi_ctx->per.ee)); atomic_set(&ctx->poll_mode, mode); ctx->stats.callback_to_poll++; } diff --git a/drivers/platform/msm/gsi/gsi_reg.h b/drivers/platform/msm/gsi/gsi_reg.h index 8a5719282335b9ccad1ce6a612557ef4294e0bce..cd702ee1a0b823386d2d4345b6ccc54f8d1244a1 100644 --- a/drivers/platform/msm/gsi/gsi_reg.h +++ b/drivers/platform/msm/gsi/gsi_reg.h @@ -18,10 +18,17 @@ enum gsi_register_ver { GSI_REGISTER_MAX, }; +#ifdef GSI_REGISTER_VER_CURRENT +#error GSI_REGISTER_VER_CURRENT already defined +#endif + #ifdef CONFIG_GSI_REGISTER_VERSION_2 #include "gsi_reg_v2.h" #define GSI_REGISTER_VER_CURRENT GSI_REGISTER_VER_2 -#else +#endif + +/* The default is V1 */ +#ifndef GSI_REGISTER_VER_CURRENT #include "gsi_reg_v1.h" #define GSI_REGISTER_VER_CURRENT GSI_REGISTER_VER_1 #endif diff --git a/drivers/platform/msm/ipa/ipa_api.c b/drivers/platform/msm/ipa/ipa_api.c index 221f06832a37a96c6ee01ff2abfcf82290dee533..e2df1739c7513de4f43e7f2d01309256b2d7cdcd 100644 --- a/drivers/platform/msm/ipa/ipa_api.c +++ b/drivers/platform/msm/ipa/ipa_api.c @@ -196,6 +196,8 @@ const char *ipa_clients_strings[IPA_CLIENT_MAX] = { __stringify(IPA_CLIENT_MHI_DPL_CONS), __stringify(RESERVERD_PROD_82), __stringify(IPA_CLIENT_ODL_DPL_CONS), + __stringify(IPA_CLIENT_Q6_AUDIO_DMA_MHI_PROD), + __stringify(IPA_CLIENT_Q6_AUDIO_DMA_MHI_CONS), }; /** diff --git a/drivers/platform/msm/ipa/ipa_clients/Makefile b/drivers/platform/msm/ipa/ipa_clients/Makefile index 738d88f23ddddecb1b08bf5f8f5a60fa423c30c4..a21313085c5679a0dde1cfa3548341bc3f2172a7 100644 --- a/drivers/platform/msm/ipa/ipa_clients/Makefile +++ b/drivers/platform/msm/ipa/ipa_clients/Makefile @@ -1,4 +1,4 @@ -obj-$(CONFIG_IPA3) += ipa_usb.o odu_bridge.o ipa_mhi_client.o ipa_uc_offload.o ipa_wdi3.o +obj-$(CONFIG_IPA3) += ipa_usb.o odu_bridge.o ipa_mhi_client.o ipa_uc_offload.o ipa_wdi3.o ipa_gsb.o obj-$(CONFIG_IPA) += odu_bridge.o ipa_mhi_client.o ipa_uc_offload.o ipa_wdi3.o obj-$(CONFIG_ECM_IPA) += ecm_ipa.o obj-$(CONFIG_RNDIS_IPA) += rndis_ipa.o diff --git a/drivers/platform/msm/ipa/ipa_clients/ecm_ipa.c b/drivers/platform/msm/ipa/ipa_clients/ecm_ipa.c index f7c4dc43dacfbd8572641f04efb3b61e36009758..d1378d8f750140685d511e1054b3bba1b1422fe8 100644 --- a/drivers/platform/msm/ipa/ipa_clients/ecm_ipa.c +++ b/drivers/platform/msm/ipa/ipa_clients/ecm_ipa.c @@ -32,17 +32,49 @@ #define DEBUGFS_TEMP_BUF_SIZE 4 #define TX_TIMEOUT (5 * HZ) +#define IPA_ECM_IPC_LOG_PAGES 50 + +#define IPA_ECM_IPC_LOGGING(buf, fmt, args...) \ + do { \ + if (buf) \ + ipc_log_string((buf), fmt, __func__, __LINE__, \ + ## args); \ + } while (0) + +static void *ipa_ecm_logbuf; + #define ECM_IPA_DEBUG(fmt, args...) \ - pr_debug("ctx:%s: "\ - fmt, current->comm, ## args) + do { \ + pr_debug(DRIVER_NAME " %s:%d "\ + fmt, __func__, __LINE__, ## args);\ + if (ipa_ecm_logbuf) { \ + IPA_ECM_IPC_LOGGING(ipa_ecm_logbuf, \ + DRIVER_NAME " %s:%d " fmt, ## args); \ + } \ + } while (0) + +#define ECM_IPA_DEBUG_XMIT(fmt, args...) \ + pr_debug(DRIVER_NAME " %s:%d " fmt, __func__, __LINE__, ## args) #define ECM_IPA_INFO(fmt, args...) \ - pr_err(DRIVER_NAME "@%s@%d@ctx:%s: "\ - fmt, __func__, __LINE__, current->comm, ## args) + do { \ + pr_info(DRIVER_NAME "@%s@%d@ctx:%s: "\ + fmt, __func__, __LINE__, current->comm, ## args);\ + if (ipa_ecm_logbuf) { \ + IPA_ECM_IPC_LOGGING(ipa_ecm_logbuf, \ + DRIVER_NAME " %s:%d " fmt, ## args); \ + } \ + } while (0) #define ECM_IPA_ERROR(fmt, args...) \ - pr_err(DRIVER_NAME "@%s@%d@ctx:%s: "\ - fmt, __func__, __LINE__, current->comm, ## args) + do { \ + pr_err(DRIVER_NAME "@%s@%d@ctx:%s: "\ + fmt, __func__, __LINE__, current->comm, ## args);\ + if (ipa_ecm_logbuf) { \ + IPA_ECM_IPC_LOGGING(ipa_ecm_logbuf, \ + DRIVER_NAME " %s:%d " fmt, ## args); \ + } \ + } while (0) #define NULL_CHECK(ptr) \ do { \ @@ -585,7 +617,7 @@ static netdev_tx_t ecm_ipa_start_xmit netif_trans_update(net); - ECM_IPA_DEBUG + ECM_IPA_DEBUG_XMIT ("Tx, len=%d, skb->protocol=%d, outstanding=%d\n", skb->len, skb->protocol, atomic_read(&ecm_ipa_ctx->outstanding_pkts)); @@ -1286,7 +1318,9 @@ static void ecm_ipa_tx_complete_notify ecm_ipa_ctx->net->stats.tx_packets++; ecm_ipa_ctx->net->stats.tx_bytes += skb->len; - atomic_dec(&ecm_ipa_ctx->outstanding_pkts); + if (atomic_read(&ecm_ipa_ctx->outstanding_pkts) > 0) + atomic_dec(&ecm_ipa_ctx->outstanding_pkts); + if (netif_queue_stopped(ecm_ipa_ctx->net) && netif_carrier_ok(ecm_ipa_ctx->net) && @@ -1574,6 +1608,10 @@ static const char *ecm_ipa_state_string(enum ecm_ipa_state state) static int ecm_ipa_init_module(void) { ECM_IPA_LOG_ENTRY(); + ipa_ecm_logbuf = ipc_log_context_create(IPA_ECM_IPC_LOG_PAGES, + "ipa_ecm", 0); + if (ipa_ecm_logbuf == NULL) + ECM_IPA_DEBUG("failed to create IPC log, continue...\n"); ECM_IPA_LOG_EXIT(); return 0; } @@ -1585,6 +1623,9 @@ static int ecm_ipa_init_module(void) static void ecm_ipa_cleanup_module(void) { ECM_IPA_LOG_ENTRY(); + if (ipa_ecm_logbuf) + ipc_log_context_destroy(ipa_ecm_logbuf); + ipa_ecm_logbuf = NULL; ECM_IPA_LOG_EXIT(); } diff --git a/drivers/platform/msm/ipa/ipa_clients/ipa_gsb.c b/drivers/platform/msm/ipa/ipa_clients/ipa_gsb.c new file mode 100644 index 0000000000000000000000000000000000000000..833ab6bff7edaf19658723c2594dcaa742dd45dd --- /dev/null +++ b/drivers/platform/msm/ipa/ipa_clients/ipa_gsb.c @@ -0,0 +1,1192 @@ +/* Copyright (c) 2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "../ipa_common_i.h" +#ifdef CONFIG_IPA3 +#include "../ipa_v3/ipa_pm.h" +#endif + +#define IPA_GSB_DRV_NAME "ipa_gsb" + +#define MAX_SUPPORTED_IFACE 5 + +#define IPA_GSB_DBG(fmt, args...) \ + do { \ + pr_debug(IPA_GSB_DRV_NAME " %s:%d " fmt, \ + __func__, __LINE__, ## args); \ + IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \ + IPA_GSB_DRV_NAME " %s:%d " fmt, ## args); \ + IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \ + IPA_GSB_DRV_NAME " %s:%d " fmt, ## args); \ + } while (0) + +#define IPA_GSB_DBG_LOW(fmt, args...) \ + do { \ + pr_debug(IPA_GSB_DRV_NAME " %s:%d " fmt, \ + __func__, __LINE__, ## args); \ + IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \ + IPA_GSB_DRV_NAME " %s:%d " fmt, ## args); \ + } while (0) + +#define IPA_GSB_ERR(fmt, args...) \ + do { \ + pr_err(IPA_GSB_DRV_NAME " %s:%d " fmt, \ + __func__, __LINE__, ## args); \ + IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \ + IPA_GSB_DRV_NAME " %s:%d " fmt, ## args); \ + IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \ + IPA_GSB_DRV_NAME " %s:%d " fmt, ## args); \ + } while (0) + +#define IPA_GSB_MAX_MSG_LEN 512 +static char dbg_buff[IPA_GSB_MAX_MSG_LEN]; + +#define IPA_GSB_SKB_HEADROOM 256 +#define IPA_GSB_SKB_DUMMY_HEADER 42 +#define IPA_GSB_AGGR_BYTE_LIMIT 14 +#define IPA_GSB_AGGR_TIME_LIMIT 1 + +static struct dentry *dent; +static struct dentry *dfile_stats; + +/** + * struct stats - driver statistics, + * @num_ul_packets: number of uplink packets + * @num_dl_packets: number of downlink packets + * @num_insufficient_headroom_packets: number of + packets with insufficient headroom + */ +struct stats { + u64 num_ul_packets; + u64 num_dl_packets; + u64 num_insufficient_headroom_packets; +}; + +/** + * struct ipa_gsb_mux_hdr - ipa gsb mux header, + * @iface_hdl: interface handle + * @qmap_id: qmap id + * @pkt_size: packet size + */ +struct ipa_gsb_mux_hdr { + u8 iface_hdl; + u8 qmap_id; + u16 pkt_size; +}; + +/** + * struct ipa_gsb_iface_info - GSB interface information + * @netdev_name: network interface name + * @device_ethaddr: network interface ethernet address + * @priv: client's private data. to be used in client's callbacks + * @tx_dp_notify: client callback for handling IPA ODU_PROD callback + * @send_dl_skb: client callback for sending skb in downlink direction + * @iface_stats: statistics, how many packets were transmitted + * using the SW bridge. + * @partial_hdr_hdl: handle for partial header + * @wakeup_request: client callback to wakeup + * @is_conencted: is interface connected ? + * @is_resumed: is interface resumed ? + * @iface_hdl: interface handle + */ +struct ipa_gsb_iface_info { + char netdev_name[IPA_RESOURCE_NAME_MAX]; + u8 device_ethaddr[ETH_ALEN]; + void *priv; + ipa_notify_cb tx_dp_notify; + int (*send_dl_skb)(void *priv, struct sk_buff *skb); + struct stats iface_stats; + uint32_t partial_hdr_hdl[IPA_IP_MAX]; + void (*wakeup_request)(void *); + bool is_connected; + bool is_resumed; + u8 iface_hdl; +}; + +/** + * struct ipa_gsb_context - GSB driver context information + * @logbuf: buffer of ipc logging + * @logbuf_low: buffer of ipc logging (low priority) + * @lock: global mutex lock for global variables + * @prod_hdl: handle for prod pipe + * @cons_hdl: handle for cons pipe + * @ipa_sys_desc_size: sys pipe desc size + * @num_iface: number of interface + * @iface_hdl: interface handles + * @num_connected_iface: number of connected interface + * @num_resumed_iface: number of resumed interface + * @iface: interface information + * @iface_lock: interface mutex lock for control path + * @iface_spinlock: interface spinlock for data path + * @pm_hdl: IPA PM handle + */ +struct ipa_gsb_context { + void *logbuf; + void *logbuf_low; + struct mutex lock; + u32 prod_hdl; + u32 cons_hdl; + u32 ipa_sys_desc_size; + int num_iface; + bool iface_hdl[MAX_SUPPORTED_IFACE]; + int num_connected_iface; + int num_resumed_iface; + struct ipa_gsb_iface_info *iface[MAX_SUPPORTED_IFACE]; + struct mutex iface_lock[MAX_SUPPORTED_IFACE]; + spinlock_t iface_spinlock[MAX_SUPPORTED_IFACE]; + u32 pm_hdl; +}; + +static struct ipa_gsb_context *ipa_gsb_ctx; + +#ifdef CONFIG_DEBUG_FS +static ssize_t ipa_gsb_debugfs_stats(struct file *file, + char __user *ubuf, + size_t count, + loff_t *ppos) +{ + int i, nbytes = 0; + struct ipa_gsb_iface_info *iface = NULL; + struct stats iface_stats; + + for (i = 0; i < MAX_SUPPORTED_IFACE; i++) { + iface = ipa_gsb_ctx->iface[i]; + if (iface != NULL) { + iface_stats = iface->iface_stats; + nbytes += scnprintf(&dbg_buff[nbytes], + IPA_GSB_MAX_MSG_LEN - nbytes, + "netdev: %s\n", + iface->netdev_name); + + nbytes += scnprintf(&dbg_buff[nbytes], + IPA_GSB_MAX_MSG_LEN - nbytes, + "UL packets: %lld\n", + iface_stats.num_ul_packets); + + nbytes += scnprintf(&dbg_buff[nbytes], + IPA_GSB_MAX_MSG_LEN - nbytes, + "DL packets: %lld\n", + iface_stats.num_dl_packets); + + nbytes += scnprintf(&dbg_buff[nbytes], + IPA_GSB_MAX_MSG_LEN - nbytes, + "packets with insufficient headroom: %lld\n", + iface_stats.num_insufficient_headroom_packets); + } + } + return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, nbytes); +} + +static const struct file_operations ipa_gsb_stats_ops = { + .read = ipa_gsb_debugfs_stats, +}; + +static void ipa_gsb_debugfs_init(void) +{ + const mode_t read_only_mode = 00444; + + dent = debugfs_create_dir("ipa_gsb", NULL); + if (IS_ERR(dent)) { + IPA_GSB_ERR("fail to create folder ipa_gsb\n"); + return; + } + + dfile_stats = + debugfs_create_file("stats", read_only_mode, dent, + NULL, &ipa_gsb_stats_ops); + if (!dfile_stats || IS_ERR(dfile_stats)) { + IPA_GSB_ERR("fail to create file stats\n"); + goto fail; + } + + return; + +fail: + debugfs_remove_recursive(dent); +} + +static void ipa_gsb_debugfs_destroy(void) +{ + debugfs_remove_recursive(dent); +} +#else +static void ipa_gsb_debugfs_init(void) +{ +} + +static void ipa_gsb_debugfs_destroy(void) +{ +} +#endif + +static int ipa_gsb_driver_init(struct odu_bridge_params *params) +{ + int i; + if (!ipa_is_ready()) { + IPA_GSB_ERR("IPA is not ready\n"); + return -EFAULT; + } + + ipa_gsb_ctx = kzalloc(sizeof(*ipa_gsb_ctx), + GFP_KERNEL); + + if (!ipa_gsb_ctx) + return -ENOMEM; + + mutex_init(&ipa_gsb_ctx->lock); + for (i = 0; i < MAX_SUPPORTED_IFACE; i++) { + mutex_init(&ipa_gsb_ctx->iface_lock[i]); + spin_lock_init(&ipa_gsb_ctx->iface_spinlock[i]); + } + ipa_gsb_debugfs_init(); + + return 0; +} + +static int ipa_gsb_commit_partial_hdr(struct ipa_gsb_iface_info *iface_info) +{ + int i; + struct ipa_ioc_add_hdr *hdr; + + if (!iface_info) { + IPA_GSB_ERR("invalid input\n"); + return -EINVAL; + } + + hdr = kzalloc(sizeof(struct ipa_ioc_add_hdr) + + 2 * sizeof(struct ipa_hdr_add), GFP_KERNEL); + if (!hdr) + return -ENOMEM; + + hdr->commit = 1; + hdr->num_hdrs = 2; + + snprintf(hdr->hdr[0].name, sizeof(hdr->hdr[0].name), + "%s_ipv4", iface_info->netdev_name); + snprintf(hdr->hdr[1].name, sizeof(hdr->hdr[1].name), + "%s_ipv6", iface_info->netdev_name); + /* + * partial header: + * [hdl][QMAP ID][pkt size][Dummy Header][ETH header] + */ + for (i = IPA_IP_v4; i < IPA_IP_MAX; i++) { + /* + * Optimization: add dummy header to reserve space + * for rndis header, so we can do the skb_clone + * instead of deep copy. + */ + hdr->hdr[i].hdr_len = ETH_HLEN + + sizeof(struct ipa_gsb_mux_hdr) + + IPA_GSB_SKB_DUMMY_HEADER; + hdr->hdr[i].type = IPA_HDR_L2_ETHERNET_II; + hdr->hdr[i].is_partial = 1; + hdr->hdr[i].is_eth2_ofst_valid = 1; + hdr->hdr[i].eth2_ofst = sizeof(struct ipa_gsb_mux_hdr) + + IPA_GSB_SKB_DUMMY_HEADER; + /* populate iface handle */ + hdr->hdr[i].hdr[0] = iface_info->iface_hdl; + /* populate src ETH address */ + memcpy(&hdr->hdr[i].hdr[10 + IPA_GSB_SKB_DUMMY_HEADER], + iface_info->device_ethaddr, 6); + /* populate Ethertype */ + if (i == IPA_IP_v4) + *(u16 *)(hdr->hdr[i].hdr + 16 + + IPA_GSB_SKB_DUMMY_HEADER) = htons(ETH_P_IP); + else + *(u16 *)(hdr->hdr[i].hdr + 16 + + IPA_GSB_SKB_DUMMY_HEADER) = htons(ETH_P_IPV6); + } + + if (ipa_add_hdr(hdr)) { + IPA_GSB_ERR("fail to add partial headers\n"); + kfree(hdr); + return -EFAULT; + } + + for (i = IPA_IP_v4; i < IPA_IP_MAX; i++) + iface_info->partial_hdr_hdl[i] = + hdr->hdr[i].hdr_hdl; + + IPA_GSB_DBG("added partial hdr hdl for ipv4: %d\n", + iface_info->partial_hdr_hdl[IPA_IP_v4]); + IPA_GSB_DBG("added partial hdr hdl for ipv6: %d\n", + iface_info->partial_hdr_hdl[IPA_IP_v6]); + + kfree(hdr); + return 0; +} + +static void ipa_gsb_delete_partial_hdr(struct ipa_gsb_iface_info *iface_info) +{ + struct ipa_ioc_del_hdr *del_hdr; + + del_hdr = kzalloc(sizeof(struct ipa_ioc_del_hdr) + + 2 * sizeof(struct ipa_hdr_del), GFP_KERNEL); + if (!del_hdr) + return; + + del_hdr->commit = 1; + del_hdr->num_hdls = 2; + del_hdr->hdl[IPA_IP_v4].hdl = iface_info->partial_hdr_hdl[IPA_IP_v4]; + del_hdr->hdl[IPA_IP_v6].hdl = iface_info->partial_hdr_hdl[IPA_IP_v6]; + + if (ipa_del_hdr(del_hdr) != 0) + IPA_GSB_ERR("failed to delete partial hdr\n"); + + IPA_GSB_DBG("deleted partial hdr hdl for ipv4: %d\n", + iface_info->partial_hdr_hdl[IPA_IP_v4]); + IPA_GSB_DBG("deleted partial hdr hdl for ipv6: %d\n", + iface_info->partial_hdr_hdl[IPA_IP_v6]); + + kfree(del_hdr); +} + +static int ipa_gsb_reg_intf_props(struct ipa_gsb_iface_info *iface_info) +{ + struct ipa_tx_intf tx; + struct ipa_rx_intf rx; + struct ipa_ioc_tx_intf_prop tx_prop[2]; + struct ipa_ioc_rx_intf_prop rx_prop[2]; + + /* populate tx prop */ + tx.num_props = 2; + tx.prop = tx_prop; + + memset(tx_prop, 0, sizeof(tx_prop)); + tx_prop[0].ip = IPA_IP_v4; + tx_prop[0].dst_pipe = IPA_CLIENT_ODU_EMB_CONS; + tx_prop[0].hdr_l2_type = IPA_HDR_L2_ETHERNET_II; + snprintf(tx_prop[0].hdr_name, sizeof(tx_prop[0].hdr_name), + "%s_ipv4", iface_info->netdev_name); + + tx_prop[1].ip = IPA_IP_v6; + tx_prop[1].dst_pipe = IPA_CLIENT_ODU_EMB_CONS; + tx_prop[1].hdr_l2_type = IPA_HDR_L2_ETHERNET_II; + snprintf(tx_prop[1].hdr_name, sizeof(tx_prop[1].hdr_name), + "%s_ipv6", iface_info->netdev_name); + + /* populate rx prop */ + rx.num_props = 2; + rx.prop = rx_prop; + + memset(rx_prop, 0, sizeof(rx_prop)); + rx_prop[0].ip = IPA_IP_v4; + rx_prop[0].src_pipe = IPA_CLIENT_ODU_PROD; + rx_prop[0].hdr_l2_type = IPA_HDR_L2_ETHERNET_II; + rx_prop[0].attrib.attrib_mask |= IPA_FLT_META_DATA; + rx_prop[0].attrib.meta_data = iface_info->iface_hdl; + rx_prop[0].attrib.meta_data_mask = 0xFF; + + rx_prop[1].ip = IPA_IP_v6; + rx_prop[1].src_pipe = IPA_CLIENT_ODU_PROD; + rx_prop[1].hdr_l2_type = IPA_HDR_L2_ETHERNET_II; + rx_prop[1].attrib.attrib_mask |= IPA_FLT_META_DATA; + rx_prop[1].attrib.meta_data = iface_info->iface_hdl; + rx_prop[1].attrib.meta_data_mask = 0xFF; + + if (ipa_register_intf(iface_info->netdev_name, &tx, &rx)) { + IPA_GSB_ERR("fail to add interface prop\n"); + return -EFAULT; + } + + return 0; +} + +static void ipa_gsb_dereg_intf_props(struct ipa_gsb_iface_info *iface_info) +{ + if (ipa_deregister_intf(iface_info->netdev_name) != 0) + IPA_GSB_ERR("fail to dereg intf props\n"); + + IPA_GSB_DBG("deregistered iface props for %s\n", + iface_info->netdev_name); +} + +static void ipa_gsb_pm_cb(void *user_data, enum ipa_pm_cb_event event) +{ + int i; + + if (event != IPA_PM_REQUEST_WAKEUP) { + IPA_GSB_ERR("Unexpected event %d\n", event); + WARN_ON(1); + return; + } + + IPA_GSB_DBG_LOW("wake up clients\n"); + for (i = 0; i < MAX_SUPPORTED_IFACE; i++) + if (ipa_gsb_ctx->iface[i] != NULL) + ipa_gsb_ctx->iface[i]->wakeup_request( + ipa_gsb_ctx->iface[i]->priv); +} + +static int ipa_gsb_register_pm(void) +{ + struct ipa_pm_register_params reg_params; + int ret; + + memset(®_params, 0, sizeof(reg_params)); + reg_params.name = "ipa_gsb"; + reg_params.callback = ipa_gsb_pm_cb; + reg_params.user_data = NULL; + reg_params.group = IPA_PM_GROUP_DEFAULT; + + ret = ipa_pm_register(®_params, + &ipa_gsb_ctx->pm_hdl); + if (ret) { + IPA_GSB_ERR("fail to register with PM %d\n", ret); + goto fail_pm_reg; + } + IPA_GSB_DBG("ipa pm hdl: %d\n", ipa_gsb_ctx->pm_hdl); + + ret = ipa_pm_associate_ipa_cons_to_client(ipa_gsb_ctx->pm_hdl, + IPA_CLIENT_ODU_EMB_CONS); + if (ret) { + IPA_GSB_ERR("fail to associate cons with PM %d\n", ret); + goto fail_pm_cons; + } + + return 0; + +fail_pm_cons: + ipa_pm_deregister(ipa_gsb_ctx->pm_hdl); + ipa_gsb_ctx->pm_hdl = ~0; +fail_pm_reg: + return ret; +} + +int ipa_bridge_init(struct ipa_bridge_init_params *params, u32 *hdl) +{ + int i, ret; + struct ipa_gsb_iface_info *new_intf; + + if (!params || !params->wakeup_request || !hdl || + !params->info.netdev_name || !params->info.tx_dp_notify || + !params->info.send_dl_skb) { + IPA_GSB_ERR("Invalid parameters\n"); + return -EINVAL; + } + + IPA_GSB_DBG("netdev_name: %s\n", params->info.netdev_name); + + if (ipa_gsb_ctx == NULL) { + ret = ipa_gsb_driver_init(¶ms->info); + if (ret) { + IPA_GSB_ERR("fail to init ipa gsb driver\n"); + return -EFAULT; + } + ipa_gsb_ctx->ipa_sys_desc_size = + params->info.ipa_desc_size; + IPA_GSB_DBG("desc size: %d\n", ipa_gsb_ctx->ipa_sys_desc_size); + } + + mutex_lock(&ipa_gsb_ctx->lock); + + if (params->info.ipa_desc_size != ipa_gsb_ctx->ipa_sys_desc_size) { + IPA_GSB_ERR("unmatch: orig desc size %d, new desc size %d\n", + ipa_gsb_ctx->ipa_sys_desc_size, + params->info.ipa_desc_size); + mutex_unlock(&ipa_gsb_ctx->lock); + return -EFAULT; + } + + for (i = 0; i < MAX_SUPPORTED_IFACE; i++) + if (ipa_gsb_ctx->iface[i] != NULL && + strnlen(ipa_gsb_ctx->iface[i]->netdev_name, + IPA_RESOURCE_NAME_MAX) == + strnlen(params->info.netdev_name, + IPA_RESOURCE_NAME_MAX) && + strcmp(ipa_gsb_ctx->iface[i]->netdev_name, + params->info.netdev_name) == 0) { + IPA_GSB_ERR("intf was added before.\n"); + mutex_unlock(&ipa_gsb_ctx->lock); + return -EFAULT; + } + + if (ipa_gsb_ctx->num_iface == MAX_SUPPORTED_IFACE) { + IPA_GSB_ERR("reached maximum supported interfaces"); + mutex_unlock(&ipa_gsb_ctx->lock); + return -EFAULT; + } + + for (i = 0; i < MAX_SUPPORTED_IFACE; i++) + if (ipa_gsb_ctx->iface_hdl[i] == false) { + ipa_gsb_ctx->iface_hdl[i] = true; + *hdl = i; + IPA_GSB_DBG("iface hdl: %d\n", *hdl); + break; + } + + IPA_GSB_DBG("intf was not added before, proceed.\n"); + new_intf = kzalloc(sizeof(*new_intf), GFP_KERNEL); + if (new_intf == NULL) { + ret = -ENOMEM; + goto fail_alloc_mem; + } + + strlcpy(new_intf->netdev_name, params->info.netdev_name, + sizeof(new_intf->netdev_name)); + new_intf->wakeup_request = params->wakeup_request; + new_intf->priv = params->info.priv; + new_intf->tx_dp_notify = params->info.tx_dp_notify; + new_intf->send_dl_skb = params->info.send_dl_skb; + new_intf->iface_hdl = *hdl; + memcpy(new_intf->device_ethaddr, params->info.device_ethaddr, + sizeof(new_intf->device_ethaddr)); + + if (ipa_gsb_commit_partial_hdr(new_intf) != 0) { + IPA_GSB_ERR("fail to commit partial hdrs\n"); + ret = -EFAULT; + goto fail_partial_hdr; + } + + if (ipa_gsb_reg_intf_props(new_intf) != 0) { + IPA_GSB_ERR("fail to register interface props\n"); + ret = -EFAULT; + goto fail_reg_intf_props; + } + + if (ipa_gsb_ctx->num_iface == 0) { + ret = ipa_gsb_register_pm(); + if (ret) { + IPA_GSB_ERR("fail to register with IPA PM %d\n", ret); + ret = -EFAULT; + goto fail_register_pm; + } + } + + ipa_gsb_ctx->iface[*hdl] = new_intf; + ipa_gsb_ctx->num_iface++; + IPA_GSB_DBG("num_iface %d\n", ipa_gsb_ctx->num_iface); + mutex_unlock(&ipa_gsb_ctx->lock); + return 0; + +fail_register_pm: + ipa_gsb_dereg_intf_props(new_intf); +fail_reg_intf_props: + ipa_gsb_delete_partial_hdr(new_intf); +fail_partial_hdr: + kfree(new_intf); +fail_alloc_mem: + ipa_gsb_ctx->iface_hdl[*hdl] = false; + mutex_unlock(&ipa_gsb_ctx->lock); + return ret; +} +EXPORT_SYMBOL(ipa_bridge_init); + +static void ipa_gsb_deregister_pm(void) +{ + IPA_GSB_DBG("deregister ipa pm hdl: %d\n", ipa_gsb_ctx->pm_hdl); + ipa_pm_deactivate_sync(ipa_gsb_ctx->pm_hdl); + ipa_pm_deregister(ipa_gsb_ctx->pm_hdl); + ipa_gsb_ctx->pm_hdl = ~0; +} + +int ipa_bridge_cleanup(u32 hdl) +{ + int i; + if (!ipa_gsb_ctx) { + IPA_GSB_ERR("ipa_gsb_ctx was not initialized\n"); + return -EFAULT; + } + + if (hdl >= MAX_SUPPORTED_IFACE) { + IPA_GSB_ERR("invalid hdl: %d\n", hdl); + return -EINVAL; + } + + mutex_lock(&ipa_gsb_ctx->iface_lock[hdl]); + if (!ipa_gsb_ctx->iface[hdl]) { + IPA_GSB_ERR("fail to find interface, hdl: %d\n", hdl); + mutex_unlock(&ipa_gsb_ctx->iface_lock[hdl]); + return -EFAULT; + } + + IPA_GSB_DBG("client hdl: %d\n", hdl); + + if (ipa_gsb_ctx->iface[hdl]->is_connected) { + IPA_GSB_ERR("cannot cleanup when iface is connected\n"); + mutex_unlock(&ipa_gsb_ctx->iface_lock[hdl]); + return -EFAULT; + } + ipa_gsb_dereg_intf_props(ipa_gsb_ctx->iface[hdl]); + ipa_gsb_delete_partial_hdr(ipa_gsb_ctx->iface[hdl]); + spin_lock_bh(&ipa_gsb_ctx->iface_spinlock[hdl]); + kfree(ipa_gsb_ctx->iface[hdl]); + ipa_gsb_ctx->iface[hdl] = NULL; + ipa_gsb_ctx->iface_hdl[hdl] = false; + spin_unlock_bh(&ipa_gsb_ctx->iface_spinlock[hdl]); + mutex_unlock(&ipa_gsb_ctx->iface_lock[hdl]); + mutex_lock(&ipa_gsb_ctx->lock); + ipa_gsb_ctx->num_iface--; + IPA_GSB_DBG("num_iface %d\n", ipa_gsb_ctx->num_iface); + if (ipa_gsb_ctx->num_iface == 0) { + ipa_gsb_deregister_pm(); + ipa_gsb_debugfs_destroy(); + ipc_log_context_destroy(ipa_gsb_ctx->logbuf); + ipc_log_context_destroy(ipa_gsb_ctx->logbuf_low); + mutex_unlock(&ipa_gsb_ctx->lock); + mutex_destroy(&ipa_gsb_ctx->lock); + for (i = 0; i < MAX_SUPPORTED_IFACE; i++) + mutex_destroy(&ipa_gsb_ctx->iface_lock[i]); + kfree(ipa_gsb_ctx); + ipa_gsb_ctx = NULL; + return 0; + } + mutex_unlock(&ipa_gsb_ctx->lock); + return 0; +} +EXPORT_SYMBOL(ipa_bridge_cleanup); + +static void ipa_gsb_cons_cb(void *priv, enum ipa_dp_evt_type evt, + unsigned long data) +{ + struct sk_buff *skb; + struct sk_buff *skb2; + struct ipa_gsb_mux_hdr *mux_hdr; + u16 pkt_size, pad_byte; + u8 hdl; + + if (evt != IPA_RECEIVE) { + IPA_GSB_ERR("unexpected event\n"); + WARN_ON(1); + return; + } + + skb = (struct sk_buff *)data; + + if (skb == NULL) { + IPA_GSB_ERR("unexpected NULL data\n"); + WARN_ON(1); + return; + } + + while (skb->len) { + mux_hdr = (struct ipa_gsb_mux_hdr *)skb->data; + pkt_size = mux_hdr->pkt_size; + /* 4-byte padding */ + pad_byte = ((pkt_size + sizeof(*mux_hdr) + ETH_HLEN + + 3 + IPA_GSB_SKB_DUMMY_HEADER) & ~3) - + (pkt_size + sizeof(*mux_hdr) + + ETH_HLEN + IPA_GSB_SKB_DUMMY_HEADER); + hdl = mux_hdr->iface_hdl; + if (hdl >= MAX_SUPPORTED_IFACE) { + IPA_GSB_ERR("invalid hdl: %d\n", hdl); + break; + } + IPA_GSB_DBG_LOW("pkt_size: %d, pad_byte: %d, hdl: %d\n", + pkt_size, pad_byte, hdl); + + /* remove 4 byte mux header AND dummy header*/ + skb_pull(skb, sizeof(*mux_hdr) + IPA_GSB_SKB_DUMMY_HEADER); + + skb2 = skb_clone(skb, GFP_KERNEL); + if (!skb2) { + IPA_GSB_ERR("skb_clone failed\n"); + WARN_ON(1); + break; + } + skb_trim(skb2, pkt_size + ETH_HLEN); + spin_lock_bh(&ipa_gsb_ctx->iface_spinlock[hdl]); + if (ipa_gsb_ctx->iface[hdl] != NULL) { + ipa_gsb_ctx->iface[hdl]->send_dl_skb( + ipa_gsb_ctx->iface[hdl]->priv, skb2); + ipa_gsb_ctx->iface[hdl]->iface_stats.num_dl_packets++; + spin_unlock_bh(&ipa_gsb_ctx->iface_spinlock[hdl]); + skb_pull(skb, pkt_size + ETH_HLEN + pad_byte); + } else { + IPA_GSB_ERR("Invalid hdl: %d, drop the skb\n", hdl); + spin_unlock_bh(&ipa_gsb_ctx->iface_spinlock[hdl]); + dev_kfree_skb_any(skb2); + break; + } + } + + if (skb) { + dev_kfree_skb_any(skb); + skb = NULL; + } +} + +static void ipa_gsb_tx_dp_notify(void *priv, enum ipa_dp_evt_type evt, + unsigned long data) +{ + struct sk_buff *skb; + struct ipa_gsb_mux_hdr *mux_hdr; + u8 hdl; + + skb = (struct sk_buff *)data; + + if (skb == NULL) { + IPA_GSB_ERR("unexpected NULL data\n"); + WARN_ON(1); + return; + } + + if (evt != IPA_WRITE_DONE && evt != IPA_RECEIVE) { + IPA_GSB_ERR("unexpected event: %d\n", evt); + dev_kfree_skb_any(skb); + return; + } + + /* fetch iface handle from header */ + mux_hdr = (struct ipa_gsb_mux_hdr *)skb->data; + /* change to host order */ + *(u32 *)mux_hdr = ntohl(*(u32 *)mux_hdr); + hdl = mux_hdr->iface_hdl; + if (!ipa_gsb_ctx->iface[hdl]) { + IPA_GSB_ERR("invalid hdl: %d and cb, drop the skb\n", hdl); + dev_kfree_skb_any(skb); + return; + } + IPA_GSB_DBG_LOW("evt: %d, hdl in tx_dp_notify: %d\n", evt, hdl); + + /* remove 4 byte mux header */ + skb_pull(skb, sizeof(struct ipa_gsb_mux_hdr)); + ipa_gsb_ctx->iface[hdl]->tx_dp_notify( + ipa_gsb_ctx->iface[hdl]->priv, evt, + (unsigned long)skb); +} + +static int ipa_gsb_connect_sys_pipe(void) +{ + struct ipa_sys_connect_params prod_params; + struct ipa_sys_connect_params cons_params; + int res; + + memset(&prod_params, 0, sizeof(prod_params)); + memset(&cons_params, 0, sizeof(cons_params)); + + /* configure RX EP */ + prod_params.client = IPA_CLIENT_ODU_PROD; + prod_params.ipa_ep_cfg.hdr.hdr_len = + ETH_HLEN + sizeof(struct ipa_gsb_mux_hdr); + prod_params.ipa_ep_cfg.nat.nat_en = IPA_SRC_NAT; + prod_params.ipa_ep_cfg.hdr.hdr_ofst_metadata_valid = 1; + prod_params.ipa_ep_cfg.hdr.hdr_ofst_metadata = 0; + prod_params.desc_fifo_sz = ipa_gsb_ctx->ipa_sys_desc_size; + prod_params.priv = NULL; + prod_params.notify = ipa_gsb_tx_dp_notify; + res = ipa_setup_sys_pipe(&prod_params, + &ipa_gsb_ctx->prod_hdl); + if (res) { + IPA_GSB_ERR("fail to setup prod sys pipe %d\n", res); + goto fail_prod; + } + + /* configure TX EP */ + cons_params.client = IPA_CLIENT_ODU_EMB_CONS; + cons_params.ipa_ep_cfg.hdr.hdr_len = + ETH_HLEN + sizeof(struct ipa_gsb_mux_hdr) + + IPA_GSB_SKB_DUMMY_HEADER; + cons_params.ipa_ep_cfg.hdr.hdr_ofst_pkt_size_valid = 1; + cons_params.ipa_ep_cfg.hdr.hdr_ofst_pkt_size = 2; + cons_params.ipa_ep_cfg.hdr_ext.hdr_pad_to_alignment = 2; + cons_params.ipa_ep_cfg.hdr_ext.hdr_little_endian = true; + cons_params.ipa_ep_cfg.nat.nat_en = IPA_BYPASS_NAT; + /* setup aggregation */ + cons_params.ipa_ep_cfg.aggr.aggr_en = IPA_ENABLE_AGGR; + cons_params.ipa_ep_cfg.aggr.aggr = IPA_GENERIC; + cons_params.ipa_ep_cfg.aggr.aggr_time_limit = + IPA_GSB_AGGR_TIME_LIMIT; + cons_params.ipa_ep_cfg.aggr.aggr_byte_limit = + IPA_GSB_AGGR_BYTE_LIMIT; + cons_params.desc_fifo_sz = ipa_gsb_ctx->ipa_sys_desc_size; + cons_params.priv = NULL; + cons_params.notify = ipa_gsb_cons_cb; + res = ipa_setup_sys_pipe(&cons_params, + &ipa_gsb_ctx->cons_hdl); + if (res) { + IPA_GSB_ERR("fail to setup cons sys pipe %d\n", res); + goto fail_cons; + } + + IPA_GSB_DBG("prod_hdl = %d, cons_hdl = %d\n", + ipa_gsb_ctx->prod_hdl, ipa_gsb_ctx->cons_hdl); + + return 0; + +fail_cons: + ipa_teardown_sys_pipe(ipa_gsb_ctx->prod_hdl); + ipa_gsb_ctx->prod_hdl = 0; +fail_prod: + return res; +} + +int ipa_bridge_connect(u32 hdl) +{ + int ret; + + if (!ipa_gsb_ctx) { + IPA_GSB_ERR("ipa_gsb_ctx was not initialized\n"); + return -EFAULT; + } + + if (hdl >= MAX_SUPPORTED_IFACE) { + IPA_GSB_ERR("invalid hdl: %d\n", hdl); + return -EINVAL; + } + + IPA_GSB_DBG("client hdl: %d\n", hdl); + + mutex_lock(&ipa_gsb_ctx->iface_lock[hdl]); + if (!ipa_gsb_ctx->iface[hdl]) { + IPA_GSB_ERR("fail to find interface, hdl: %d\n", hdl); + mutex_unlock(&ipa_gsb_ctx->iface_lock[hdl]); + return -EFAULT; + } + + if (ipa_gsb_ctx->iface[hdl]->is_connected) { + IPA_GSB_DBG("iface was already connected\n"); + mutex_unlock(&ipa_gsb_ctx->iface_lock[hdl]); + return 0; + } + + if (ipa_gsb_ctx->num_connected_iface == 0) { + ret = ipa_pm_activate_sync(ipa_gsb_ctx->pm_hdl); + if (ret) { + IPA_GSB_ERR("failed to activate ipa pm\n"); + mutex_unlock(&ipa_gsb_ctx->iface_lock[hdl]); + return ret; + } + ret = ipa_gsb_connect_sys_pipe(); + if (ret) { + IPA_GSB_ERR("fail to connect pipe\n"); + mutex_unlock(&ipa_gsb_ctx->iface_lock[hdl]); + return ret; + } + } + + /* connect = connect + resume */ + ipa_gsb_ctx->iface[hdl]->is_connected = true; + ipa_gsb_ctx->iface[hdl]->is_resumed = true; + + ipa_gsb_ctx->num_connected_iface++; + IPA_GSB_DBG("connected iface: %d\n", + ipa_gsb_ctx->num_connected_iface); + ipa_gsb_ctx->num_resumed_iface++; + IPA_GSB_DBG("num resumed iface: %d\n", + ipa_gsb_ctx->num_resumed_iface); + + mutex_unlock(&ipa_gsb_ctx->iface_lock[hdl]); + return 0; +} +EXPORT_SYMBOL(ipa_bridge_connect); + +static int ipa_gsb_disconnect_sys_pipe(void) +{ + int ret; + + IPA_GSB_DBG("prod_hdl = %d, cons_hdl = %d\n", + ipa_gsb_ctx->prod_hdl, ipa_gsb_ctx->cons_hdl); + + ret = ipa_teardown_sys_pipe(ipa_gsb_ctx->prod_hdl); + if (ret) { + IPA_GSB_ERR("failed to tear down prod pipe\n"); + return -EFAULT; + } + ipa_gsb_ctx->prod_hdl = 0; + + ret = ipa_teardown_sys_pipe(ipa_gsb_ctx->cons_hdl); + if (ret) { + IPA_GSB_ERR("failed to tear down cons pipe\n"); + return -EFAULT; + } + ipa_gsb_ctx->cons_hdl = 0; + + return 0; +} + +int ipa_bridge_disconnect(u32 hdl) +{ + int ret; + + if (!ipa_gsb_ctx) { + IPA_GSB_ERR("ipa_gsb_ctx was not initialized\n"); + return -EFAULT; + } + + if (hdl >= MAX_SUPPORTED_IFACE) { + IPA_GSB_ERR("invalid hdl: %d\n", hdl); + return -EINVAL; + } + + IPA_GSB_DBG("client hdl: %d\n", hdl); + + mutex_lock(&ipa_gsb_ctx->iface_lock[hdl]); + if (!ipa_gsb_ctx->iface[hdl]) { + IPA_GSB_ERR("fail to find interface, hdl: %d\n", hdl); + mutex_unlock(&ipa_gsb_ctx->iface_lock[hdl]); + return -EFAULT; + } + + if (!ipa_gsb_ctx->iface[hdl]->is_connected) { + IPA_GSB_DBG("iface was not connected\n"); + mutex_unlock(&ipa_gsb_ctx->iface_lock[hdl]); + return 0; + } + + if (ipa_gsb_ctx->num_connected_iface == 1) { + ret = ipa_gsb_disconnect_sys_pipe(); + if (ret) { + IPA_GSB_ERR("fail to discon pipes\n"); + mutex_unlock(&ipa_gsb_ctx->iface_lock[hdl]); + return -EFAULT; + } + + ret = ipa_pm_deactivate_sync(ipa_gsb_ctx->pm_hdl); + if (ret) { + IPA_GSB_ERR("failed to deactivate ipa pm\n"); + mutex_unlock(&ipa_gsb_ctx->iface_lock[hdl]); + return -EFAULT; + } + } + + /* disconnect = suspend + disconnect */ + ipa_gsb_ctx->iface[hdl]->is_connected = false; + ipa_gsb_ctx->num_connected_iface--; + IPA_GSB_DBG("connected iface: %d\n", + ipa_gsb_ctx->num_connected_iface); + + if (ipa_gsb_ctx->iface[hdl]->is_resumed) { + ipa_gsb_ctx->iface[hdl]->is_resumed = false; + ipa_gsb_ctx->num_resumed_iface--; + IPA_GSB_DBG("num resumed iface: %d\n", + ipa_gsb_ctx->num_resumed_iface); + } + + mutex_unlock(&ipa_gsb_ctx->iface_lock[hdl]); + return 0; +} +EXPORT_SYMBOL(ipa_bridge_disconnect); + +int ipa_bridge_resume(u32 hdl) +{ + int ret; + + if (!ipa_gsb_ctx) { + IPA_GSB_ERR("ipa_gsb_ctx was not initialized\n"); + return -EFAULT; + } + + if (hdl >= MAX_SUPPORTED_IFACE) { + IPA_GSB_ERR("invalid hdl: %d\n", hdl); + return -EINVAL; + } + + IPA_GSB_DBG_LOW("client hdl: %d\n", hdl); + + mutex_lock(&ipa_gsb_ctx->iface_lock[hdl]); + if (!ipa_gsb_ctx->iface[hdl]) { + IPA_GSB_ERR("fail to find interface, hdl: %d\n", hdl); + mutex_unlock(&ipa_gsb_ctx->iface_lock[hdl]); + return -EFAULT; + } + + if (!ipa_gsb_ctx->iface[hdl]->is_connected) { + IPA_GSB_ERR("iface is not connected\n"); + mutex_unlock(&ipa_gsb_ctx->iface_lock[hdl]); + return -EFAULT; + } + + if (ipa_gsb_ctx->iface[hdl]->is_resumed) { + IPA_GSB_DBG_LOW("iface was already resumed\n"); + mutex_unlock(&ipa_gsb_ctx->iface_lock[hdl]); + return 0; + } + + if (ipa_gsb_ctx->num_resumed_iface == 0) { + ret = ipa_pm_activate_sync(ipa_gsb_ctx->pm_hdl); + if (ret) { + IPA_GSB_ERR("fail to activate ipa pm\n"); + mutex_unlock(&ipa_gsb_ctx->iface_lock[hdl]); + return ret; + } + + ret = ipa_start_gsi_channel( + ipa_gsb_ctx->cons_hdl); + if (ret) { + IPA_GSB_ERR( + "fail to start con ep %d\n", + ret); + mutex_unlock(&ipa_gsb_ctx->iface_lock[hdl]); + return ret; + } + } + + ipa_gsb_ctx->iface[hdl]->is_resumed = true; + ipa_gsb_ctx->num_resumed_iface++; + IPA_GSB_DBG_LOW("num resumed iface: %d\n", + ipa_gsb_ctx->num_resumed_iface); + + mutex_unlock(&ipa_gsb_ctx->iface_lock[hdl]); + return 0; +} +EXPORT_SYMBOL(ipa_bridge_resume); + +int ipa_bridge_suspend(u32 hdl) +{ + int ret; + + if (!ipa_gsb_ctx) { + IPA_GSB_ERR("ipa_gsb_ctx was not initialized\n"); + return -EFAULT; + } + + if (hdl >= MAX_SUPPORTED_IFACE) { + IPA_GSB_ERR("invalid hdl: %d\n", hdl); + return -EINVAL; + } + + IPA_GSB_DBG_LOW("client hdl: %d\n", hdl); + + mutex_lock(&ipa_gsb_ctx->iface_lock[hdl]); + if (!ipa_gsb_ctx->iface[hdl]) { + IPA_GSB_ERR("fail to find interface, hdl: %d\n", hdl); + mutex_unlock(&ipa_gsb_ctx->iface_lock[hdl]); + return -EFAULT; + } + + if (!ipa_gsb_ctx->iface[hdl]->is_connected) { + IPA_GSB_ERR("iface is not connected\n"); + mutex_unlock(&ipa_gsb_ctx->iface_lock[hdl]); + return -EFAULT; + } + + if (!ipa_gsb_ctx->iface[hdl]->is_resumed) { + IPA_GSB_DBG_LOW("iface was already suspended\n"); + mutex_unlock(&ipa_gsb_ctx->iface_lock[hdl]); + return 0; + } + + if (ipa_gsb_ctx->num_resumed_iface == 1) { + ret = ipa_stop_gsi_channel( + ipa_gsb_ctx->cons_hdl); + if (ret) { + IPA_GSB_ERR( + "fail to stop cons ep %d\n", + ret); + mutex_unlock(&ipa_gsb_ctx->iface_lock[hdl]); + return ret; + } + + ret = ipa_pm_deactivate_sync(ipa_gsb_ctx->pm_hdl); + if (ret) { + IPA_GSB_ERR("fail to deactivate ipa pm\n"); + ipa_start_gsi_channel(ipa_gsb_ctx->cons_hdl); + mutex_unlock(&ipa_gsb_ctx->iface_lock[hdl]); + return ret; + } + } + + ipa_gsb_ctx->iface[hdl]->is_resumed = false; + ipa_gsb_ctx->num_resumed_iface--; + IPA_GSB_DBG_LOW("num resumed iface: %d\n", + ipa_gsb_ctx->num_resumed_iface); + + mutex_unlock(&ipa_gsb_ctx->iface_lock[hdl]); + return 0; +} +EXPORT_SYMBOL(ipa_bridge_suspend); + +int ipa_bridge_set_perf_profile(u32 hdl, u32 bandwidth) +{ + int ret; + + if (!ipa_gsb_ctx) { + IPA_GSB_ERR("ipa_gsb_ctx was not initialized\n"); + return -EFAULT; + } + + if (hdl >= MAX_SUPPORTED_IFACE) { + IPA_GSB_ERR("invalid hdl: %d\n", hdl); + return -EINVAL; + } + + IPA_GSB_DBG("client hdl: %d, BW: %d\n", hdl, bandwidth); + + mutex_lock(&ipa_gsb_ctx->iface_lock[hdl]); + + ret = ipa_pm_set_perf_profile(ipa_gsb_ctx->pm_hdl, + bandwidth); + if (ret) + IPA_GSB_ERR("fail to set perf profile\n"); + + mutex_unlock(&ipa_gsb_ctx->iface_lock[hdl]); + return ret; +} +EXPORT_SYMBOL(ipa_bridge_set_perf_profile); + +int ipa_bridge_tx_dp(u32 hdl, struct sk_buff *skb, + struct ipa_tx_meta *metadata) +{ + struct ipa_gsb_mux_hdr *mux_hdr; + struct sk_buff *skb2; + struct stats iface_stats; + int ret; + + IPA_GSB_DBG_LOW("client hdl: %d\n", hdl); + + iface_stats = ipa_gsb_ctx->iface[hdl]->iface_stats; + if (!ipa_gsb_ctx->iface[hdl]) { + IPA_GSB_ERR("fail to find interface, hdl: %d\n", hdl); + return -EFAULT; + } + + /* make sure skb has enough headroom */ + if (unlikely(skb_headroom(skb) < sizeof(struct ipa_gsb_mux_hdr))) { + IPA_GSB_DBG_LOW("skb doesn't have enough headroom\n"); + skb2 = skb_copy_expand(skb, sizeof(struct ipa_gsb_mux_hdr), + 0, GFP_ATOMIC); + if (!skb2) { + dev_kfree_skb_any(skb); + return -ENOMEM; + } + dev_kfree_skb_any(skb); + skb = skb2; + iface_stats.num_insufficient_headroom_packets++; + } + + /* add 4 byte header for mux */ + mux_hdr = (struct ipa_gsb_mux_hdr *)skb_push(skb, + sizeof(struct ipa_gsb_mux_hdr)); + mux_hdr->iface_hdl = (u8)hdl; + /* change to network order */ + *(u32 *)mux_hdr = htonl(*(u32 *)mux_hdr); + + ret = ipa_tx_dp(IPA_CLIENT_ODU_PROD, skb, metadata); + if (ret) { + IPA_GSB_ERR("tx dp failed %d\n", ret); + return -EFAULT; + } + ipa_gsb_ctx->iface[hdl]->iface_stats.num_ul_packets++; + + return 0; +} +EXPORT_SYMBOL(ipa_bridge_tx_dp); + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("ipa gsb driver"); diff --git a/drivers/platform/msm/ipa/ipa_clients/ipa_mhi_client.c b/drivers/platform/msm/ipa/ipa_clients/ipa_mhi_client.c index a4dceb6ca176a5f48cb7bfdda21e15af418b9278..ca0ca72b2e81e362c7647dbbc0163b43e9efbadb 100644 --- a/drivers/platform/msm/ipa/ipa_clients/ipa_mhi_client.c +++ b/drivers/platform/msm/ipa/ipa_clients/ipa_mhi_client.c @@ -2027,6 +2027,8 @@ static void ipa_mhi_update_host_ch_state(bool update_rp) ipa_assert(); return; } + IPA_MHI_DBG("Updated UL CH=%d state to %s on host\n", + i, MHI_CH_STATE_STR(channel->state)); } for (i = 0; i < IPA_MHI_MAX_DL_CHANNELS; i++) { @@ -2062,7 +2064,10 @@ static void ipa_mhi_update_host_ch_state(bool update_rp) if (res) { IPA_MHI_ERR("ipa_mhi_read_write_host failed\n"); ipa_assert(); + return; } + IPA_MHI_DBG("Updated DL CH=%d state to %s on host\n", + i, MHI_CH_STATE_STR(channel->state)); } } @@ -2096,9 +2101,6 @@ static int ipa_mhi_suspend_dl(bool force) } } - if (ipa_get_transport_type() == IPA_TRANSPORT_TYPE_GSI) - ipa_mhi_update_host_ch_state(true); - return 0; fail_stop_event_update_dl_channel: @@ -2151,6 +2153,9 @@ int ipa_mhi_suspend(bool force) goto fail_suspend_ul_channel; } + if (ipa_get_transport_type() == IPA_TRANSPORT_TYPE_GSI) + ipa_mhi_update_host_ch_state(true); + /* * hold IPA clocks and release them after all * IPA RM resource are released to make sure tag process will not start diff --git a/drivers/platform/msm/ipa/ipa_clients/odu_bridge.c b/drivers/platform/msm/ipa/ipa_clients/odu_bridge.c index a688cefcaa789994bb4f30225cb58633e724ff28..865ca7411bfac15b200b52bb5f2facf78939fa67 100644 --- a/drivers/platform/msm/ipa/ipa_clients/odu_bridge.c +++ b/drivers/platform/msm/ipa/ipa_clients/odu_bridge.c @@ -1259,385 +1259,5 @@ int odu_bridge_cleanup(void) } EXPORT_SYMBOL(odu_bridge_cleanup); -/* IPA Bridge implementation */ -#ifdef CONFIG_IPA3 - -static void ipa_br_rm_notify(void *user_data, enum ipa_rm_event event, - unsigned long data) -{ - if (event == IPA_RM_RESOURCE_GRANTED) - complete(&odu_bridge_ctx->rm_comp); -} - -static int ipa_br_request_prod(void) -{ - int res; - - ODU_BRIDGE_FUNC_ENTRY(); - - reinit_completion(&odu_bridge_ctx->rm_comp); - ODU_BRIDGE_DBG("requesting odu prod\n"); - res = ipa_rm_request_resource(IPA_RM_RESOURCE_ODU_ADAPT_PROD); - if (res) { - if (res != -EINPROGRESS) { - ODU_BRIDGE_ERR("failed to request prod %d\n", res); - return res; - } - wait_for_completion(&odu_bridge_ctx->rm_comp); - } - - ODU_BRIDGE_FUNC_EXIT(); - return 0; - -} - -static int ipa_br_release_prod(void) -{ - int res; - - ODU_BRIDGE_FUNC_ENTRY(); - - reinit_completion(&odu_bridge_ctx->rm_comp); - ODU_BRIDGE_DBG("requesting odu prod\n"); - res = ipa_rm_release_resource(IPA_RM_RESOURCE_ODU_ADAPT_PROD); - if (res) { - ODU_BRIDGE_ERR("failed to release prod %d\n", res); - return res; - } - - ODU_BRIDGE_FUNC_EXIT(); - return 0; - -} - -static int ipa_br_cons_request(void) -{ - ODU_BRIDGE_FUNC_ENTRY(); - if (odu_bridge_ctx->is_suspended) - odu_bridge_ctx->wakeup_request(odu_bridge_ctx->priv); - ODU_BRIDGE_FUNC_EXIT(); - return 0; -} - -static int ipa_br_cons_release(void) -{ - ODU_BRIDGE_FUNC_ENTRY(); - ODU_BRIDGE_FUNC_EXIT(); - return 0; -} - -static void ipa_br_pm_cb(void *p, enum ipa_pm_cb_event event) -{ - ODU_BRIDGE_FUNC_ENTRY(); - if (event != IPA_PM_REQUEST_WAKEUP) { - ODU_BRIDGE_ERR("Unexpected event %d\n", event); - WARN_ON(1); - return; - } - - if (odu_bridge_ctx->is_suspended) - odu_bridge_ctx->wakeup_request(odu_bridge_ctx->priv); - ODU_BRIDGE_FUNC_EXIT(); -} - -static int ipa_br_register_pm(void) -{ - struct ipa_pm_register_params reg_params; - int ret; - - memset(®_params, 0, sizeof(reg_params)); - reg_params.name = "ODU Bridge"; - reg_params.callback = ipa_br_pm_cb; - reg_params.group = IPA_PM_GROUP_DEFAULT; - - ret = ipa_pm_register(®_params, - &odu_bridge_ctx->pm_hdl); - if (ret) { - ODU_BRIDGE_ERR("fail to register with PM %d\n", ret); - goto fail_pm_reg; - } - - ret = ipa_pm_associate_ipa_cons_to_client(odu_bridge_ctx->pm_hdl, - IPA_CLIENT_ODU_EMB_CONS); - if (ret) { - ODU_BRIDGE_ERR("fail to associate cons with PM %d\n", ret); - goto fail_pm_cons; - } - - return 0; - -fail_pm_cons: - ipa_pm_deregister(odu_bridge_ctx->pm_hdl); - odu_bridge_ctx->pm_hdl = ~0; -fail_pm_reg: - return ret; -} - -static int ipa_br_create_rm_resources(void) -{ - int ret; - struct ipa_rm_create_params create_params; - - /* create IPA RM resources for power management */ - init_completion(&odu_bridge_ctx->rm_comp); - memset(&create_params, 0, sizeof(create_params)); - create_params.name = IPA_RM_RESOURCE_ODU_ADAPT_PROD; - create_params.reg_params.user_data = odu_bridge_ctx; - create_params.reg_params.notify_cb = ipa_br_rm_notify; - create_params.floor_voltage = IPA_VOLTAGE_SVS; - ret = ipa_rm_create_resource(&create_params); - if (ret) { - ODU_BRIDGE_ERR("failed to create RM prod %d\n", ret); - goto fail_rm_prod; - } - - ret = ipa_rm_add_dependency_sync(IPA_RM_RESOURCE_ODU_ADAPT_PROD, - IPA_RM_RESOURCE_APPS_CONS); - if (ret) { - ODU_BRIDGE_ERR("failed to add ODU->APPS dependency %d\n", ret); - goto fail_add_dep; - } - - memset(&create_params, 0, sizeof(create_params)); - create_params.name = IPA_RM_RESOURCE_ODU_ADAPT_CONS; - create_params.request_resource = ipa_br_cons_request; - create_params.release_resource = ipa_br_cons_release; - create_params.floor_voltage = IPA_VOLTAGE_SVS; - ret = ipa_rm_create_resource(&create_params); - if (ret) { - ODU_BRIDGE_ERR("failed to create RM cons %d\n", ret); - goto fail_rm_cons; - } - - return 0; - -fail_rm_cons: - ipa_rm_delete_dependency(IPA_RM_RESOURCE_ODU_ADAPT_PROD, - IPA_RM_RESOURCE_APPS_CONS); -fail_add_dep: - ipa_rm_delete_resource(IPA_RM_RESOURCE_ODU_ADAPT_PROD); -fail_rm_prod: - return ret; -} - -/* IPA Bridge API is the new API which will replaces old odu_bridge API */ -int ipa_bridge_init(struct ipa_bridge_init_params *params, u32 *hdl) -{ - int ret; - - if (!params || !params->wakeup_request || !hdl) { - ODU_BRIDGE_ERR("NULL arg\n"); - return -EINVAL; - } - - - ret = odu_bridge_init(¶ms->info); - if (ret) - return ret; - - odu_bridge_ctx->wakeup_request = params->wakeup_request; - - if (ipa_pm_is_used()) - ret = ipa_br_register_pm(); - else - ret = ipa_br_create_rm_resources(); - if (ret) { - ODU_BRIDGE_ERR("fail to register woth RM/PM %d\n", ret); - goto fail_pm; - } - - /* handle is ignored for now */ - *hdl = 0; - - return 0; - -fail_pm: - odu_bridge_cleanup(); - return ret; -} -EXPORT_SYMBOL(ipa_bridge_init); - -int ipa_bridge_connect(u32 hdl) -{ - int ret; - - if (!odu_bridge_ctx) { - ODU_BRIDGE_ERR("Not initialized\n"); - return -EFAULT; - } - - if (odu_bridge_ctx->is_connected) { - ODU_BRIDGE_ERR("already connected\n"); - return -EFAULT; - } - - if (ipa_pm_is_used()) - ret = ipa_pm_activate_sync(odu_bridge_ctx->pm_hdl); - else - ret = ipa_br_request_prod(); - if (ret) - return ret; - - return odu_bridge_connect(); -} -EXPORT_SYMBOL(ipa_bridge_connect); - -int ipa_bridge_set_perf_profile(u32 hdl, u32 bandwidth) -{ - struct ipa_rm_perf_profile profile = {0}; - int ret; - - if (ipa_pm_is_used()) - return ipa_pm_set_perf_profile(odu_bridge_ctx->pm_hdl, - bandwidth); - - profile.max_supported_bandwidth_mbps = bandwidth; - ret = ipa_rm_set_perf_profile(IPA_RM_RESOURCE_ODU_ADAPT_PROD, &profile); - if (ret) { - ODU_BRIDGE_ERR("failed to set perf profile to prod %d\n", ret); - return ret; - } - - ret = ipa_rm_set_perf_profile(IPA_RM_RESOURCE_ODU_ADAPT_CONS, &profile); - if (ret) { - ODU_BRIDGE_ERR("failed to set perf profile to cons %d\n", ret); - return ret; - } - - return 0; -} -EXPORT_SYMBOL(ipa_bridge_set_perf_profile); - -int ipa_bridge_disconnect(u32 hdl) -{ - int ret; - - ret = odu_bridge_disconnect(); - if (ret) - return ret; - - if (ipa_pm_is_used()) - ret = ipa_pm_deactivate_sync(odu_bridge_ctx->pm_hdl); - else - ret = ipa_br_release_prod(); - if (ret) - return ret; - - return 0; -} -EXPORT_SYMBOL(ipa_bridge_disconnect); - -int ipa_bridge_suspend(u32 hdl) -{ - int ret; - - if (!odu_bridge_ctx) { - ODU_BRIDGE_ERR("Not initialized\n"); - return -EFAULT; - } - - if (!odu_bridge_ctx->is_connected) { - ODU_BRIDGE_ERR("bridge is disconnected\n"); - return -EFAULT; - } - - if (odu_bridge_ctx->is_suspended) { - ODU_BRIDGE_ERR("bridge is already suspended\n"); - return -EFAULT; - } - - /* stop cons channel to prevent downlink data during suspend */ - ret = ipa_stop_gsi_channel(odu_bridge_ctx->odu_emb_cons_hdl); - if (ret) { - ODU_BRIDGE_ERR("failed to stop CONS channel %d\n", ret); - return ret; - } - - if (ipa_pm_is_used()) - ret = ipa_pm_deactivate_sync(odu_bridge_ctx->pm_hdl); - else - ret = ipa_br_release_prod(); - if (ret) { - ODU_BRIDGE_ERR("failed to release prod %d\n", ret); - ipa_start_gsi_channel(odu_bridge_ctx->odu_emb_cons_hdl); - return ret; - } - odu_bridge_ctx->is_suspended = true; - - return 0; -} -EXPORT_SYMBOL(ipa_bridge_suspend); - -int ipa_bridge_resume(u32 hdl) -{ - int ret; - - if (!odu_bridge_ctx) { - ODU_BRIDGE_ERR("Not initialized\n"); - return -EFAULT; - } - - if (!odu_bridge_ctx->is_connected) { - ODU_BRIDGE_ERR("bridge is disconnected\n"); - return -EFAULT; - } - - if (!odu_bridge_ctx->is_suspended) { - ODU_BRIDGE_ERR("bridge is not suspended\n"); - return -EFAULT; - } - - if (ipa_pm_is_used()) - ret = ipa_pm_activate_sync(odu_bridge_ctx->pm_hdl); - else - ret = ipa_br_request_prod(); - if (ret) - return ret; - - ret = ipa_start_gsi_channel(odu_bridge_ctx->odu_emb_cons_hdl); - if (ret) { - ODU_BRIDGE_ERR("failed to start CONS channel %d\n", ret); - return ret; - } - odu_bridge_ctx->is_suspended = false; - - return 0; -} -EXPORT_SYMBOL(ipa_bridge_resume); - -int ipa_bridge_tx_dp(u32 hdl, struct sk_buff *skb, - struct ipa_tx_meta *metadata) -{ - return odu_bridge_tx_dp(skb, metadata); -} -EXPORT_SYMBOL(ipa_bridge_tx_dp); - -static void ipa_br_delete_rm_resources(void) -{ - ipa_rm_delete_dependency(IPA_RM_RESOURCE_ODU_ADAPT_PROD, - IPA_RM_RESOURCE_APPS_CONS); - ipa_rm_delete_resource(IPA_RM_RESOURCE_ODU_ADAPT_PROD); - ipa_rm_delete_resource(IPA_RM_RESOURCE_ODU_ADAPT_CONS); -} - -static void ipa_br_deregister_pm(void) -{ - ipa_pm_deactivate_sync(odu_bridge_ctx->pm_hdl); - ipa_pm_deregister(odu_bridge_ctx->pm_hdl); - odu_bridge_ctx->pm_hdl = ~0; -} - -int ipa_bridge_cleanup(u32 hdl) -{ - if (ipa_pm_is_used()) - ipa_br_deregister_pm(); - else - ipa_br_delete_rm_resources(); - return odu_bridge_cleanup(); -} -EXPORT_SYMBOL(ipa_bridge_cleanup); - -#endif /* CONFIG_IPA3 */ - MODULE_LICENSE("GPL v2"); MODULE_DESCRIPTION("ODU bridge driver"); diff --git a/drivers/platform/msm/ipa/ipa_clients/rndis_ipa.c b/drivers/platform/msm/ipa/ipa_clients/rndis_ipa.c index 21bba6e8e9d025ca513c4253dca55c5891e74372..07f405df72d6b8d3deed1b5fca6c1939f1085fb3 100644 --- a/drivers/platform/msm/ipa/ipa_clients/rndis_ipa.c +++ b/drivers/platform/msm/ipa/ipa_clients/rndis_ipa.c @@ -60,11 +60,38 @@ #define DEFAULT_AGGR_TIME_LIMIT 1000 /* 1ms */ #define DEFAULT_AGGR_PKT_LIMIT 0 +#define IPA_RNDIS_IPC_LOG_PAGES 50 + +#define IPA_RNDIS_IPC_LOGGING(buf, fmt, args...) \ + do { \ + if (buf) \ + ipc_log_string((buf), fmt, __func__, __LINE__, \ + ## args); \ + } while (0) + +static void *ipa_rndis_logbuf; + +#define RNDIS_IPA_DEBUG(fmt, args...) \ + do { \ + pr_debug(DRV_NAME " %s:%d " fmt, __func__, __LINE__, ## args);\ + if (ipa_rndis_logbuf) { \ + IPA_RNDIS_IPC_LOGGING(ipa_rndis_logbuf, \ + DRV_NAME " %s:%d " fmt, ## args); \ + } \ + } while (0) + +#define RNDIS_IPA_DEBUG_XMIT(fmt, args...) \ + pr_debug(DRV_NAME " %s:%d " fmt, __func__, __LINE__, ## args) + #define RNDIS_IPA_ERROR(fmt, args...) \ + do { \ pr_err(DRV_NAME "@%s@%d@ctx:%s: "\ - fmt, __func__, __LINE__, current->comm, ## args) -#define RNDIS_IPA_DEBUG(fmt, args...) \ - pr_debug("ctx: %s, "fmt, current->comm, ## args) + fmt, __func__, __LINE__, current->comm, ## args);\ + if (ipa_rndis_logbuf) { \ + IPA_RNDIS_IPC_LOGGING(ipa_rndis_logbuf, \ + DRV_NAME " %s:%d " fmt, ## args); \ + } \ + } while (0) #define NULL_CHECK_RETVAL(ptr) \ do { \ @@ -910,7 +937,7 @@ static netdev_tx_t rndis_ipa_start_xmit(struct sk_buff *skb, netif_trans_update(net); - RNDIS_IPA_DEBUG + RNDIS_IPA_DEBUG_XMIT ("Tx, len=%d, skb->protocol=%d, outstanding=%d\n", skb->len, skb->protocol, atomic_read(&rndis_ipa_ctx->outstanding_pkts)); @@ -1028,7 +1055,9 @@ static void rndis_ipa_tx_complete_notify( rndis_ipa_ctx->net->stats.tx_packets++; rndis_ipa_ctx->net->stats.tx_bytes += skb->len; - atomic_dec(&rndis_ipa_ctx->outstanding_pkts); + if (atomic_read(&rndis_ipa_ctx->outstanding_pkts) > 0) + atomic_dec(&rndis_ipa_ctx->outstanding_pkts); + if (netif_queue_stopped(rndis_ipa_ctx->net) && netif_carrier_ok(rndis_ipa_ctx->net) && @@ -2658,12 +2687,21 @@ static ssize_t rndis_ipa_debugfs_atomic_read static int rndis_ipa_init_module(void) { + ipa_rndis_logbuf = ipc_log_context_create(IPA_RNDIS_IPC_LOG_PAGES, + "ipa_rndis", 0); + if (ipa_rndis_logbuf == NULL) + RNDIS_IPA_DEBUG("failed to create IPC log, continue...\n"); + pr_info("RNDIS_IPA module is loaded."); return 0; } static void rndis_ipa_cleanup_module(void) { + if (ipa_rndis_logbuf) + ipc_log_context_destroy(ipa_rndis_logbuf); + ipa_rndis_logbuf = NULL; + pr_info("RNDIS_IPA module is unloaded."); } diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa.c b/drivers/platform/msm/ipa/ipa_v3/ipa.c index d142de21ef071a82b1ed19929e48d125715c61ba..fc851b4010218d6e0a315d2c8bf354640a3579c9 100644 --- a/drivers/platform/msm/ipa/ipa_v3/ipa.c +++ b/drivers/platform/msm/ipa/ipa_v3/ipa.c @@ -2597,11 +2597,6 @@ void ipa3_q6_post_shutdown_cleanup(void) IPADBG_LOW("ENTER\n"); - if (!ipa3_ctx->uc_ctx.uc_loaded) { - IPAERR("uC is not loaded. Skipping\n"); - return; - } - IPA_ACTIVE_CLIENTS_INC_SIMPLE(); /* Handle the issue where SUSPEND was removed for some reason */ @@ -2618,6 +2613,11 @@ void ipa3_q6_post_shutdown_cleanup(void) ipa3_halt_q6_gsi_channels(prod); + if (!ipa3_ctx->uc_ctx.uc_loaded) { + IPAERR("uC is not loaded. Skipping\n"); + return; + } + for (client_idx = 0; client_idx < IPA_CLIENT_MAX; client_idx++) if (IPA_CLIENT_IS_Q6_PROD(client_idx)) { ep_idx = ipa3_get_ep_mapping(client_idx); @@ -3586,7 +3586,6 @@ void ipa3_enable_clks(void) */ void _ipa_disable_clks_v3_0(void) { - ipa3_suspend_apps_pipes(true); ipa3_uc_notify_clk_state(false); if (ipa3_clk) { IPADBG_LOW("disabling gcc_ipa_clk\n"); @@ -3831,6 +3830,7 @@ static void __ipa3_dec_client_disable_clks(void) ret = atomic_sub_return(1, &ipa3_ctx->ipa3_active_clients.cnt); if (ret > 0) goto unlock_mutex; + ipa3_suspend_apps_pipes(true); ipa3_disable_clks(); unlock_mutex: @@ -4268,7 +4268,7 @@ int ipa3_init_interrupts(void) return 0; fail_add_interrupt_handler: - free_irq(ipa3_res.ipa_irq, &ipa3_ctx->master_pdev->dev); + ipa3_interrupts_destroy(ipa3_res.ipa_irq, &ipa3_ctx->master_pdev->dev); return result; } @@ -4318,8 +4318,10 @@ static void ipa3_freeze_clock_vote_and_notify_modem(void) ipa3_ctx->smp2p_info.ipa_clk_on = true; qcom_smem_state_update_bits(ipa3_ctx->smp2p_info.smem_state, - BIT(IPA_SMP2P_SMEM_STATE_MASK), - BIT(ipa3_ctx->smp2p_info.ipa_clk_on | (1 << 1))); + IPA_SMP2P_SMEM_STATE_MASK, + ((ipa3_ctx->smp2p_info.ipa_clk_on << + IPA_SMP2P_OUT_CLK_VOTE_IDX) | + (1 << IPA_SMP2P_OUT_CLK_RSP_CMPLT_IDX))); ipa3_ctx->smp2p_info.res_sent = true; IPADBG("IPA clocks are %s\n", @@ -4335,8 +4337,10 @@ void ipa3_reset_freeze_vote(void) IPA_ACTIVE_CLIENTS_DEC_SPECIAL("FREEZE_VOTE"); qcom_smem_state_update_bits(ipa3_ctx->smp2p_info.smem_state, - BIT(IPA_SMP2P_SMEM_STATE_MASK), - BIT(ipa3_ctx->smp2p_info.ipa_clk_on | (1 << 1))); + IPA_SMP2P_SMEM_STATE_MASK, + ((ipa3_ctx->smp2p_info.ipa_clk_on << + IPA_SMP2P_OUT_CLK_VOTE_IDX) | + (1 << IPA_SMP2P_OUT_CLK_RSP_CMPLT_IDX))); ipa3_ctx->smp2p_info.res_sent = false; ipa3_ctx->smp2p_info.ipa_clk_on = false; diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c b/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c index 206e53d67b36ac99740617cd0c1129249be78a24..0729d370d527908ebbd0fef1612e6fb05251ff45 100644 --- a/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c @@ -1152,7 +1152,7 @@ static ssize_t ipa3_read_odlstats(struct file *file, char __user *ubuf, ipa3_odl_ctx->stats.odl_rx_pkt, ipa3_odl_ctx->stats.odl_tx_diag_pkt, ipa3_odl_ctx->stats.odl_drop_pkt, - ipa3_odl_ctx->stats.numer_in_queue); + atomic_read(&ipa3_odl_ctx->stats.numer_in_queue)); cnt += nbytes; diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c b/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c index f06fec9788a6539ecfa1576545f4bb5fd0319339..56b51e13187a7ac452e5e8517ee098671422933f 100644 --- a/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c @@ -36,6 +36,9 @@ #define IPA_GENERIC_AGGR_TIME_LIMIT 500 /* 0.5msec */ #define IPA_GENERIC_AGGR_PKT_LIMIT 0 +#define IPA_GSB_AGGR_BYTE_LIMIT 14 +#define IPA_GSB_RX_BUFF_BASE_SZ 16384 + #define IPA_GENERIC_RX_BUFF_BASE_SZ 8192 #define IPA_REAL_GENERIC_RX_BUFF_SZ(X) (SKB_DATA_ALIGN(\ (X) + NET_SKB_PAD) +\ @@ -3039,7 +3042,6 @@ static int ipa3_assign_policy(struct ipa_sys_connect_params *in, INIT_DELAYED_WORK(&sys->replenish_rx_work, ipa3_replenish_rx_work_func); atomic_set(&sys->curr_polling_state, 0); - sys->rx_buff_sz = IPA_ODU_RX_BUFF_SZ; sys->rx_pool_sz = in->desc_fifo_sz / IPA_FIFO_ELEMENT_SIZE - 1; if (sys->rx_pool_sz > IPA_ODU_RX_POOL_SZ) @@ -3047,8 +3049,23 @@ static int ipa3_assign_policy(struct ipa_sys_connect_params *in, sys->pyld_hdlr = ipa3_odu_rx_pyld_hdlr; sys->get_skb = ipa3_get_skb_ipa_rx; sys->free_skb = ipa3_free_skb_rx; - sys->free_rx_wrapper = ipa3_free_rx_wrapper; - sys->repl_hdlr = ipa3_replenish_rx_cache; + /* recycle skb for GSB use case */ + if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0) { + sys->free_rx_wrapper = + ipa3_free_rx_wrapper; + sys->repl_hdlr = + ipa3_replenish_rx_cache; + /* Overwrite buffer size & aggr limit for GSB */ + sys->rx_buff_sz = IPA_GENERIC_RX_BUFF_SZ( + IPA_GSB_RX_BUFF_BASE_SZ); + in->ipa_ep_cfg.aggr.aggr_byte_limit = + IPA_GSB_AGGR_BYTE_LIMIT; + } else { + sys->free_rx_wrapper = + ipa3_free_rx_wrapper; + sys->repl_hdlr = ipa3_replenish_rx_cache; + sys->rx_buff_sz = IPA_ODU_RX_BUFF_SZ; + } } else if (in->client == IPA_CLIENT_MEMCPY_DMA_ASYNC_CONS) { IPADBG("assigning policy to client:%d", @@ -3323,6 +3340,13 @@ int ipa3_sys_setup(struct ipa_sys_connect_params *sys_in, result = -EFAULT; goto fail_and_disable_clocks; } + if (ipa3_cfg_ep_hdr_ext(ipa_ep_idx, + &sys_in->ipa_ep_cfg.hdr_ext)) { + IPAERR("fail config hdr_ext prop of EP %d\n", + ipa_ep_idx); + result = -EFAULT; + goto fail_and_disable_clocks; + } if (ipa3_cfg_ep_cfg(ipa_ep_idx, &sys_in->ipa_ep_cfg.cfg)) { IPAERR("fail to configure cfg prop of EP %d\n", @@ -3553,10 +3577,8 @@ static void ipa_gsi_irq_rx_notify_cb(struct gsi_chan_xfer_notify *notify) IPADBG_LOW("event %d notified\n", notify->evt_id); sys = (struct ipa3_sys_context *)notify->chan_user_data; - spin_lock_bh(&sys->spinlock); rx_pkt_expected = list_first_entry(&sys->head_desc_list, struct ipa3_rx_pkt_wrapper, link); - spin_unlock_bh(&sys->spinlock); rx_pkt_rcvd = (struct ipa3_rx_pkt_wrapper *)notify->xfer_user_data; if (rx_pkt_expected != rx_pkt_rcvd) { diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_i.h b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h index 18ba305a555a514611941af034ffd1408b5af111..ad18e4cd642663b6b16991b904dc40a6f27e9c47 100644 --- a/drivers/platform/msm/ipa/ipa_v3/ipa_i.h +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h @@ -229,6 +229,8 @@ enum { # define __cpuc_flush_dcache_area __flush_dcache_area #endif +#define IPA_SMP2P_OUT_CLK_RSP_CMPLT_IDX 0 +#define IPA_SMP2P_OUT_CLK_VOTE_IDX 1 #define IPA_SMP2P_SMEM_STATE_MASK 3 @@ -2398,6 +2400,7 @@ int ipa3_active_clients_log_print_buffer(char *buf, int size); int ipa3_active_clients_log_print_table(char *buf, int size); void ipa3_active_clients_log_clear(void); int ipa3_interrupts_init(u32 ipa_irq, u32 ee, struct device *ipa_dev); +void ipa3_interrupts_destroy(u32 ipa_irq, struct device *ipa_dev); int __ipa3_del_rt_rule(u32 rule_hdl); int __ipa3_del_hdr(u32 hdr_hdl, bool by_user); int __ipa3_release_hdr(u32 hdr_hdl); diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_interrupts.c b/drivers/platform/msm/ipa/ipa_v3/ipa_interrupts.c index e949670762bd4e93eb354f67dff1f9010417e9e5..8ff1a4c3f8a6666a9f5e27a1e005896134990fb8 100644 --- a/drivers/platform/msm/ipa/ipa_v3/ipa_interrupts.c +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_interrupts.c @@ -43,10 +43,7 @@ static spinlock_t suspend_wa_lock; static void ipa3_process_interrupts(bool isr_context); static int ipa3_irq_mapping[IPA_IRQ_MAX] = { - [IPA_UC_TX_CMD_Q_NOT_FULL_IRQ] = -1, - [IPA_UC_TO_PROC_ACK_Q_NOT_FULL_IRQ] = -1, [IPA_BAD_SNOC_ACCESS_IRQ] = 0, - [IPA_EOT_COAL_IRQ] = -1, [IPA_UC_IRQ_0] = 2, [IPA_UC_IRQ_1] = 3, [IPA_UC_IRQ_2] = 4, @@ -61,7 +58,17 @@ static int ipa3_irq_mapping[IPA_IRQ_MAX] = { [IPA_PROC_ERR_IRQ] = 13, [IPA_TX_SUSPEND_IRQ] = 14, [IPA_TX_HOLB_DROP_IRQ] = 15, - [IPA_GSI_IDLE_IRQ] = 16, + [IPA_BAM_GSI_IDLE_IRQ] = 16, + [IPA_PIPE_YELLOW_MARKER_BELOW_IRQ] = 17, + [IPA_PIPE_RED_MARKER_BELOW_IRQ] = 18, + [IPA_PIPE_YELLOW_MARKER_ABOVE_IRQ] = 19, + [IPA_PIPE_RED_MARKER_ABOVE_IRQ] = 20, + [IPA_UCP_IRQ] = 21, + [IPA_DCMP_IRQ] = 22, + [IPA_GSI_EE_IRQ] = 23, + [IPA_GSI_IPA_IF_TLV_RCVD_IRQ] = 24, + [IPA_GSI_UC_IRQ] = 25, + [IPA_TLV_LEN_MIN_DSM_IRQ] = 26, }; static void ipa3_interrupt_defer(struct work_struct *work); @@ -73,7 +80,8 @@ static void ipa3_deferred_interrupt_work(struct work_struct *work) container_of(work, struct ipa3_interrupt_work_wrap, interrupt_work); - IPADBG("call handler from workq...\n"); + IPADBG("call handler from workq for interrupt %d...\n", + work_data->interrupt); work_data->handler(work_data->interrupt, work_data->private_data, work_data->interrupt_data); kfree(work_data->interrupt_data); @@ -111,9 +119,9 @@ static int ipa3_handle_interrupt(int irq_num, bool isr_context) switch (interrupt_info.interrupt) { case IPA_TX_SUSPEND_IRQ: - IPADBG_LOW("processing TX_SUSPEND interrupt work-around\n"); + IPADBG_LOW("processing TX_SUSPEND interrupt\n"); ipa3_tx_suspend_interrupt_wa(); - suspend_data = ipahal_read_reg_n(IPA_IRQ_SUSPEND_INFO_EE_n, + suspend_data = ipahal_read_reg_n(IPA_SUSPEND_IRQ_INFO_EE_n, ipa_ee); IPADBG_LOW("get interrupt %d\n", suspend_data); @@ -154,6 +162,8 @@ static int ipa3_handle_interrupt(int irq_num, bool isr_context) /* Force defer processing if in ISR context. */ if (interrupt_info.deferred_flag || isr_context) { + IPADBG_LOW("Defer handling interrupt %d\n", + interrupt_info.interrupt); work_data = kzalloc(sizeof(struct ipa3_interrupt_work_wrap), GFP_ATOMIC); if (!work_data) { @@ -170,6 +180,7 @@ static int ipa3_handle_interrupt(int irq_num, bool isr_context) queue_work(ipa_interrupt_wq, &work_data->interrupt_work); } else { + IPADBG_LOW("Handle interrupt %d\n", interrupt_info.interrupt); interrupt_info.handler(interrupt_info.interrupt, interrupt_info.private_data, interrupt_data); @@ -219,6 +230,7 @@ static void ipa3_tx_suspend_interrupt_wa(void) u32 val; u32 suspend_bmask; int irq_num; + int wa_delay; IPADBG_LOW("Enter\n"); irq_num = ipa3_irq_mapping[IPA_TX_SUSPEND_IRQ]; @@ -237,8 +249,17 @@ static void ipa3_tx_suspend_interrupt_wa(void) ipa3_uc_rg10_write_reg(IPA_IRQ_EN_EE_n, ipa_ee, val); IPADBG_LOW(" processing suspend interrupt work-around, delayed work\n"); + + wa_delay = DIS_SUSPEND_INTERRUPT_TIMEOUT; + if (ipa3_ctx->ipa3_hw_mode == IPA_HW_MODE_VIRTUAL || + ipa3_ctx->ipa3_hw_mode == IPA_HW_MODE_EMULATION) { + wa_delay *= 400; + } + + IPADBG_LOW("Delay period %d msec\n", wa_delay); + queue_delayed_work(ipa_interrupt_wq, &dwork_en_suspend_int, - msecs_to_jiffies(DIS_SUSPEND_INTERRUPT_TIMEOUT)); + msecs_to_jiffies(wa_delay)); IPADBG_LOW("Exit\n"); } @@ -261,15 +282,18 @@ static void ipa3_process_interrupts(bool isr_context) unsigned long flags; bool uc_irq; - IPADBG_LOW("Enter\n"); + IPADBG_LOW("Enter isr_context=%d\n", isr_context); spin_lock_irqsave(&suspend_wa_lock, flags); en = ipahal_read_reg_n(IPA_IRQ_EN_EE_n, ipa_ee); reg = ipahal_read_reg_n(IPA_IRQ_STTS_EE_n, ipa_ee); while (en & reg) { + IPADBG_LOW("en=0x%x reg=0x%x\n", en, reg); bmsk = 1; for (i = 0; i < IPA_IRQ_NUM_MAX; i++) { + IPADBG_LOW("Check irq number %d\n", i); if (en & reg & bmsk) { + IPADBG_LOW("Irq number %d asserted\n", i); uc_irq = is_uc_irq(i); /* @@ -518,6 +542,8 @@ int ipa3_interrupts_init(u32 ipa_irq, u32 ee, struct device *ipa_dev) IPAERR( "fail to register IPA IRQ handler irq=%d\n", ipa_irq); + destroy_workqueue(ipa_interrupt_wq); + ipa_interrupt_wq = NULL; return -ENODEV; } IPADBG("IPA IRQ handler irq=%d registered\n", ipa_irq); @@ -533,6 +559,26 @@ int ipa3_interrupts_init(u32 ipa_irq, u32 ee, struct device *ipa_dev) return 0; } +/** + * ipa3_interrupts_destroy() - Destroy the IPA interrupts framework + * @ipa_irq: The interrupt number to allocate + * @ee: Execution environment + * @ipa_dev: The basic device structure representing the IPA driver + * + * - Disable apps processor wakeup by IPA interrupts + * - Unregister the ipa interrupt handler - ipa3_isr + * - Destroy the interrupt workqueue + */ +void ipa3_interrupts_destroy(u32 ipa_irq, struct device *ipa_dev) +{ + if (ipa3_ctx->ipa3_hw_mode != IPA_HW_MODE_EMULATION) { + disable_irq_wake(ipa_irq); + free_irq(ipa_irq, ipa_dev); + } + destroy_workqueue(ipa_interrupt_wq); + ipa_interrupt_wq = NULL; +} + /** * ipa3_suspend_active_aggr_wa() - Emulate suspend IRQ * @clnt_hndl: suspended client handle, IRQ is emulated for this pipe diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_odl.c b/drivers/platform/msm/ipa/ipa_v3/ipa_odl.c index 87920f2818c56f24a4c17f4be1ca09f4b1135651..c524db5f00db57ccfd8bbf6338025f7309886d96 100644 --- a/drivers/platform/msm/ipa/ipa_v3/ipa_odl.c +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_odl.c @@ -215,7 +215,8 @@ static void delete_first_node(void) kfree(msg->buff); kfree(msg); ipa3_odl_ctx->stats.odl_drop_pkt++; - IPA_STATS_DEC_CNT(ipa3_odl_ctx->stats.numer_in_queue); + if (atomic_read(&ipa3_odl_ctx->stats.numer_in_queue)) + atomic_dec(&ipa3_odl_ctx->stats.numer_in_queue); } } else { IPADBG("List Empty\n"); @@ -244,10 +245,11 @@ int ipa3_send_adpl_msg(unsigned long skb_data) msg->buff = data; msg->len = skb->len; mutex_lock(&ipa3_odl_ctx->adpl_msg_lock); - if (ipa3_odl_ctx->stats.numer_in_queue >= MAX_QUEUE_TO_ODL) + if (atomic_read(&ipa3_odl_ctx->stats.numer_in_queue) >= + MAX_QUEUE_TO_ODL) delete_first_node(); list_add_tail(&msg->link, &ipa3_odl_ctx->adpl_msg_list); - IPA_STATS_INC_CNT(ipa3_odl_ctx->stats.numer_in_queue); + atomic_inc(&ipa3_odl_ctx->stats.numer_in_queue); mutex_unlock(&ipa3_odl_ctx->adpl_msg_lock); IPA_STATS_INC_CNT(ipa3_odl_ctx->stats.odl_rx_pkt); @@ -342,7 +344,7 @@ int ipa3_odl_pipe_open(void) IPADBG("Setup endpoint config success\n"); ipa3_odl_ctx->stats.odl_drop_pkt = 0; - ipa3_odl_ctx->stats.numer_in_queue = 0; + atomic_set(&ipa3_odl_ctx->stats.numer_in_queue, 0); ipa3_odl_ctx->stats.odl_rx_pkt = 0; ipa3_odl_ctx->stats.odl_tx_diag_pkt = 0; /* @@ -422,7 +424,7 @@ void ipa3_odl_pipe_cleanup(bool is_ssr) */ ipa3_odl_ctx->odl_ctl_msg_wq_flag = true; ipa3_odl_ctx->stats.odl_drop_pkt = 0; - ipa3_odl_ctx->stats.numer_in_queue = 0; + atomic_set(&ipa3_odl_ctx->stats.numer_in_queue, 0); ipa3_odl_ctx->stats.odl_rx_pkt = 0; ipa3_odl_ctx->stats.odl_tx_diag_pkt = 0; IPADBG("Wake up odl ctl\n"); @@ -465,7 +467,8 @@ static ssize_t ipa_adpl_read(struct file *filp, char __user *buf, size_t count, msg = list_first_entry(&ipa3_odl_ctx->adpl_msg_list, struct ipa3_push_msg_odl, link); list_del(&msg->link); - IPA_STATS_DEC_CNT(ipa3_odl_ctx->stats.numer_in_queue); + if (atomic_read(&ipa3_odl_ctx->stats.numer_in_queue)) + atomic_dec(&ipa3_odl_ctx->stats.numer_in_queue); } mutex_unlock(&ipa3_odl_ctx->adpl_msg_lock); diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_odl.h b/drivers/platform/msm/ipa/ipa_v3/ipa_odl.h index 5f522505aa1ed76cce8632d045ee752201c6fdb0..82582669041e7194b41a33904512c7636b58c4f9 100644 --- a/drivers/platform/msm/ipa/ipa_v3/ipa_odl.h +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_odl.h @@ -24,7 +24,7 @@ struct ipa3_odlstats { u32 odl_rx_pkt; u32 odl_tx_diag_pkt; u32 odl_drop_pkt; - u32 numer_in_queue; + atomic_t numer_in_queue; }; struct odl_state_bit_mask { diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_pm.c b/drivers/platform/msm/ipa/ipa_v3/ipa_pm.c index 34065cf8c2ed1f3cba54c37d38c419577077f288..8b40feb6ec911a833bfca568769058678756b4b4 100644 --- a/drivers/platform/msm/ipa/ipa_v3/ipa_pm.c +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_pm.c @@ -453,6 +453,7 @@ static void delayed_deferred_deactivate_work_func(struct work_struct *work) struct delayed_work *dwork; struct ipa_pm_client *client; unsigned long flags; + unsigned long delay; dwork = container_of(work, struct delayed_work, work); client = container_of(dwork, struct ipa_pm_client, deactivate_work); @@ -464,8 +465,13 @@ static void delayed_deferred_deactivate_work_func(struct work_struct *work) client->state = IPA_PM_ACTIVATED; goto bail; case IPA_PM_ACTIVATED_PENDING_RESCHEDULE: + delay = IPA_PM_DEFERRED_TIMEOUT; + if (ipa3_ctx->ipa3_hw_mode == IPA_HW_MODE_VIRTUAL || + ipa3_ctx->ipa3_hw_mode == IPA_HW_MODE_EMULATION) + delay *= 5; + queue_delayed_work(ipa_pm_ctx->wq, &client->deactivate_work, - msecs_to_jiffies(IPA_PM_DEFERRED_TIMEOUT)); + msecs_to_jiffies(delay)); client->state = IPA_PM_ACTIVATED_PENDING_DEACTIVATION; goto bail; case IPA_PM_ACTIVATED_PENDING_DEACTIVATION: @@ -1006,6 +1012,7 @@ int ipa_pm_deferred_deactivate(u32 hdl) { struct ipa_pm_client *client; unsigned long flags; + unsigned long delay; if (ipa_pm_ctx == NULL) { IPA_PM_ERR("PM_ctx is null\n"); @@ -1029,9 +1036,14 @@ int ipa_pm_deferred_deactivate(u32 hdl) spin_unlock_irqrestore(&client->state_lock, flags); return 0; case IPA_PM_ACTIVATED: + delay = IPA_PM_DEFERRED_TIMEOUT; + if (ipa3_ctx->ipa3_hw_mode == IPA_HW_MODE_VIRTUAL || + ipa3_ctx->ipa3_hw_mode == IPA_HW_MODE_EMULATION) + delay *= 5; + client->state = IPA_PM_ACTIVATED_PENDING_DEACTIVATION; queue_delayed_work(ipa_pm_ctx->wq, &client->deactivate_work, - msecs_to_jiffies(IPA_PM_DEFERRED_TIMEOUT)); + msecs_to_jiffies(delay)); break; case IPA_PM_ACTIVATED_TIMER_SET: case IPA_PM_ACTIVATED_PENDING_DEACTIVATION: diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c b/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c index 54640734157381d50d3f54f69b3d6b34d7e0873c..3c3f35ee9e3d81a296c0d089eedbc229027f7039 100644 --- a/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c @@ -301,9 +301,9 @@ static void ipa3_handle_mhi_vote_req(struct qmi_handle *qmi_handle, struct ipa_mhi_clk_vote_resp_msg_v01 resp; int rc; - IPAWANDBG_LOW("Received QMI_IPA_MHI_CLK_VOTE_REQ_V01\n"); vote_req = (struct ipa_mhi_clk_vote_req_msg_v01 *)decoded_msg; - + IPAWANDBG("Received QMI_IPA_MHI_CLK_VOTE_REQ_V01(%d)\n", + vote_req->mhi_vote); rc = imp_handle_vote_req(vote_req->mhi_vote); if (rc) { resp.resp.result = IPA_QMI_RESULT_FAILURE_V01; @@ -311,7 +311,7 @@ static void ipa3_handle_mhi_vote_req(struct qmi_handle *qmi_handle, } else { resp.resp.result = IPA_QMI_RESULT_SUCCESS_V01; } - + IPAWANDBG("start sending QMI_IPA_MHI_CLK_VOTE_RESP_V01\n"); rc = qmi_send_response(qmi_handle, sq, txn, QMI_IPA_MHI_CLK_VOTE_RESP_V01, IPA_MHI_CLK_VOTE_RESP_MSG_V01_MAX_MSG_LEN, @@ -321,7 +321,7 @@ static void ipa3_handle_mhi_vote_req(struct qmi_handle *qmi_handle, if (rc < 0) IPAWANERR("QMI_IPA_MHI_CLK_VOTE_RESP_V01 failed\n"); else - IPAWANDBG("Sent QMI_IPA_MHI_CLK_VOTE_RESP_V01\n"); + IPAWANDBG("Finished senting QMI_IPA_MHI_CLK_VOTE_RESP_V01\n"); } static void ipa3_a5_svc_disconnect_cb(struct qmi_handle *qmi, @@ -849,6 +849,12 @@ int ipa3_qmi_enable_force_clear_datapath_send( return -EINVAL; } + if (ipa3_ctx->ipa3_hw_mode == IPA_HW_MODE_VIRTUAL || + ipa3_ctx->ipa3_hw_mode == IPA_HW_MODE_EMULATION) { + IPAWANDBG("Simulating success on emu/virt mode\n"); + return 0; + } + req_desc.max_msg_len = QMI_IPA_ENABLE_FORCE_CLEAR_DATAPATH_REQ_MAX_MSG_LEN_V01; req_desc.msg_id = QMI_IPA_ENABLE_FORCE_CLEAR_DATAPATH_REQ_V01; @@ -898,6 +904,12 @@ int ipa3_qmi_disable_force_clear_datapath_send( return -EINVAL; } + if (ipa3_ctx->ipa3_hw_mode == IPA_HW_MODE_VIRTUAL || + ipa3_ctx->ipa3_hw_mode == IPA_HW_MODE_EMULATION) { + IPAWANDBG("Simulating success on emu/virt mode\n"); + return 0; + } + req_desc.max_msg_len = QMI_IPA_DISABLE_FORCE_CLEAR_DATAPATH_REQ_MAX_MSG_LEN_V01; req_desc.msg_id = QMI_IPA_DISABLE_FORCE_CLEAR_DATAPATH_REQ_V01; diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_uc_wdi.c b/drivers/platform/msm/ipa/ipa_v3/ipa_uc_wdi.c index cd703e200915d4301c786016120feee96b1be3c7..21820b0c4f96fd2f454ef2c355ba9d3bfea4d152 100644 --- a/drivers/platform/msm/ipa/ipa_v3/ipa_uc_wdi.c +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_uc_wdi.c @@ -34,6 +34,8 @@ #define IPA_WDI_RESUMED BIT(2) #define IPA_UC_POLL_SLEEP_USEC 100 +#define GSI_STOP_MAX_RETRY_CNT 10 + struct ipa_wdi_res { struct ipa_wdi_buffer_info *res; unsigned int nents; @@ -882,6 +884,8 @@ static int ipa3_wdi2_gsi_alloc_evt_ring( evt_scratch.wdi.update_ri_mod_timer_running = 0; evt_scratch.wdi.evt_comp_count = 0; evt_scratch.wdi.last_update_ri = 0; + evt_scratch.wdi.resvd1 = 0; + evt_scratch.wdi.resvd2 = 0; result = gsi_write_evt_ring_scratch(*evt_ring_hdl, evt_scratch); if (result != GSI_STATUS_SUCCESS) { IPAERR("Error writing WDI event ring scratch: %d\n", result); @@ -1114,6 +1118,7 @@ int ipa3_connect_gsi_wdi_pipe(struct ipa_wdi_in_params *in, result = -ENOMEM; goto gsi_timeout; } + gsi_evt_ring_props.rp_update_addr = va; } else { len = in->smmu_enabled ? in->u.ul_smmu.rdy_ring_size : in->u.ul.rdy_ring_size; @@ -1190,13 +1195,13 @@ int ipa3_connect_gsi_wdi_pipe(struct ipa_wdi_in_params *in, result = -ENOMEM; goto gsi_timeout; } - + gsi_evt_ring_props.rp_update_addr = va; gsi_scratch.wdi.wdi_rx_vdev_id = 0xff; gsi_scratch.wdi.wdi_rx_fw_desc = 0xff; gsi_scratch.wdi.endp_metadatareg_offset = ipahal_get_reg_mn_ofst( IPA_ENDP_INIT_HDR_METADATA_n, 0, - ipa_ep_idx); + ipa_ep_idx)/4; gsi_scratch.wdi.qmap_id = 0; } @@ -1259,7 +1264,7 @@ int ipa3_connect_gsi_wdi_pipe(struct ipa_wdi_in_params *in, min(UPDATE_RI_MODERATION_THRESHOLD, num_ring_ele); gsi_scratch.wdi.update_ri_moderation_counter = 0; gsi_scratch.wdi.wdi_rx_tre_proc_in_progress = 0; - + gsi_scratch.wdi.resv1 = 0; result = gsi_write_channel_scratch(ep->gsi_chan_hdl, gsi_scratch); if (result != GSI_STATUS_SUCCESS) { @@ -1268,13 +1273,6 @@ int ipa3_connect_gsi_wdi_pipe(struct ipa_wdi_in_params *in, goto fail_write_channel_scratch; } - result = gsi_start_channel(ep->gsi_chan_hdl); - if (result != GSI_STATUS_SUCCESS) { - IPAERR("gsi_start_channel failed %d\n", result); - goto fail_start_channel; - } - IPADBG("GSI channel started\n"); - /* for AP+STA stats update */ if (in->wdi_notify) ipa3_ctx->uc_wdi_ctx.stats_notify = in->wdi_notify; @@ -1302,7 +1300,6 @@ int ipa3_connect_gsi_wdi_pipe(struct ipa_wdi_in_params *in, ipa_cfg_ep_fail: memset(&ipa3_ctx->ep[ipa_ep_idx], 0, sizeof(struct ipa3_ep_context)); -fail_start_channel: fail_write_channel_scratch: gsi_dealloc_channel(ep->gsi_chan_hdl); gsi_timeout: @@ -1860,18 +1857,6 @@ int ipa3_disconnect_gsi_wdi_pipe(u32 clnt_hdl) if (!ep->keep_ipa_awake) IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl)); - result = ipa3_stop_gsi_channel(clnt_hdl); - if (result != GSI_STATUS_SUCCESS) { - IPAERR("GSI stop chan err: %d.\n", result); - ipa_assert(); - return result; - } - result = ipa3_reset_gsi_channel(clnt_hdl); - if (result != GSI_STATUS_SUCCESS) { - IPAERR("Failed to reset chan: %d.\n", result); - ipa_assert(); - return result; - } result = gsi_reset_evt_ring(ep->gsi_evt_ring_hdl); if (result != GSI_STATUS_SUCCESS) { IPAERR("Failed to reset evt ring: %d.\n", @@ -2336,6 +2321,10 @@ int ipa3_suspend_gsi_wdi_pipe(u32 clnt_hdl) int ipa_ep_idx; struct ipa3_ep_context *ep; int res = 0; + u32 source_pipe_bitmask = 0; + bool disable_force_clear = false; + struct ipahal_ep_cfg_ctrl_scnd ep_ctrl_scnd = { 0 }; + int retry_cnt = 0; ipa_ep_idx = ipa3_get_ep_mapping(ipa3_get_client_mapping(clnt_hdl)); if (ipa_ep_idx < 0) { @@ -2351,15 +2340,56 @@ int ipa3_suspend_gsi_wdi_pipe(u32 clnt_hdl) } if (ep->valid) { IPADBG("suspended pipe %d\n", ipa_ep_idx); - res = ipa3_stop_gsi_channel(ipa_ep_idx); + source_pipe_bitmask = 1 << + ipa3_get_ep_mapping(ep->client); + res = ipa3_enable_force_clear(clnt_hdl, + false, source_pipe_bitmask); if (res) { - IPAERR("failed to stop LAN channel\n"); - ipa_assert(); + /* + * assuming here modem SSR, AP can remove + * the delay in this case + */ + IPAERR("failed to force clear %d\n", res); + IPAERR("remove delay from SCND reg\n"); + ep_ctrl_scnd.endp_delay = false; + ipahal_write_reg_n_fields( + IPA_ENDP_INIT_CTRL_SCND_n, clnt_hdl, + &ep_ctrl_scnd); + } else { + disable_force_clear = true; + } +retry_gsi_stop: + res = ipa3_stop_gsi_channel(ipa_ep_idx); + if (res != 0 && res != -GSI_STATUS_AGAIN && + res != -GSI_STATUS_TIMED_OUT) { + IPAERR("failed to stop channel res = %d\n", res); + goto fail_stop_channel; + } else if (res == -GSI_STATUS_AGAIN) { + IPADBG("GSI stop channel failed retry cnt = %d\n", + retry_cnt); + retry_cnt++; + if (retry_cnt >= GSI_STOP_MAX_RETRY_CNT) + goto fail_stop_channel; + goto retry_gsi_stop; + } else { + IPADBG("GSI channel %ld STOP\n", ep->gsi_chan_hdl); } + + res = ipa3_reset_gsi_channel(clnt_hdl); + if (res != GSI_STATUS_SUCCESS) { + IPAERR("Failed to reset chan: %d.\n", res); + goto fail_stop_channel; + } + } + if (disable_force_clear) + ipa3_disable_force_clear(clnt_hdl); IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl)); ep->gsi_offload_state &= ~IPA_WDI_RESUMED; return res; +fail_stop_channel: + ipa_assert(); + return res; } /** @@ -2511,6 +2541,7 @@ int ipa3_write_qmapid_gsi_wdi_pipe(u32 clnt_hdl, u8 qmap_id) int result = 0; struct ipa3_ep_context *ep; union __packed gsi_channel_scratch gsi_scratch; + int retry_cnt = 0; memset(&gsi_scratch, 0, sizeof(gsi_scratch)); ep = &ipa3_ctx->ep[clnt_hdl]; @@ -2522,18 +2553,25 @@ int ipa3_write_qmapid_gsi_wdi_pipe(u32 clnt_hdl, u8 qmap_id) result); goto fail_read_channel_scratch; } - result = ipa3_stop_gsi_channel(clnt_hdl); - if (result != 0) { + if (ep->gsi_offload_state == (IPA_WDI_CONNECTED | IPA_WDI_ENABLED | + IPA_WDI_RESUMED)) { +retry_gsi_stop: + result = ipa3_stop_gsi_channel(clnt_hdl); if (result != 0 && result != -GSI_STATUS_AGAIN && - result != -GSI_STATUS_TIMED_OUT) { + result != -GSI_STATUS_TIMED_OUT) { IPAERR("GSI stop channel failed %d\n", - result); + result); + goto fail_stop_channel; + } else if (result == -GSI_STATUS_AGAIN) { + IPADBG("GSI stop channel failed retry cnt = %d\n", + retry_cnt); + retry_cnt++; + if (retry_cnt >= GSI_STOP_MAX_RETRY_CNT) + goto fail_stop_channel; + goto retry_gsi_stop; + } else { + IPADBG("GSI channel %ld STOP\n", ep->gsi_chan_hdl); } - goto fail_stop_channel; - } - if (result == 0) { - IPAERR("GSI channel %ld STOP\n", - ep->gsi_chan_hdl); } gsi_scratch.wdi.qmap_id = qmap_id; result = gsi_write_channel_scratch(ep->gsi_chan_hdl, @@ -2543,10 +2581,13 @@ int ipa3_write_qmapid_gsi_wdi_pipe(u32 clnt_hdl, u8 qmap_id) result); goto fail_write_channel_scratch; } - result = gsi_start_channel(ep->gsi_chan_hdl); - if (result != GSI_STATUS_SUCCESS) { - IPAERR("gsi_start_channel failed %d\n", result); - goto fail_start_channel; + if (ep->gsi_offload_state == (IPA_WDI_CONNECTED | IPA_WDI_ENABLED | + IPA_WDI_RESUMED)) { + result = gsi_start_channel(ep->gsi_chan_hdl); + if (result != GSI_STATUS_SUCCESS) { + IPAERR("gsi_start_channel failed %d\n", result); + goto fail_start_channel; + } } return 0; fail_start_channel: diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c b/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c index 357a4e5c3cb9a6cb32d1f1d51906f47a7f6a58e6..bde01ad928ee3ac95a0fd15e3a25a008f118687c 100644 --- a/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c @@ -174,7 +174,6 @@ #define IPA_v4_2_DST_GROUP_MAX (1) #define IPA_v4_5_MHI_GROUP_PCIE (0) -#define IPA_v4_5_ETHERNET (0) #define IPA_v4_5_GROUP_UL_DL (1) #define IPA_v4_5_MHI_GROUP_DDR (1) #define IPA_v4_5_MHI_GROUP_DMA (2) @@ -462,14 +461,14 @@ static const struct rsrc_min_max ipa3_rsrc_dst_grp_config {1, 63}, {0, 0}, {0, 0}, {0, 0}, {0, 0}, {0, 0} }, }, [IPA_4_5] = { - /* ETH UL/DL/DPL not used not used uC other are invalid */ + /* not-used UL/DL/DPL not-used not-used uC other are invalid */ [IPA_v4_0_RSRC_GRP_TYPE_DST_DATA_SECTORS] = { - {16, 16}, {5, 5}, {0, 0}, {0, 0}, {0, 0}, {0, 0} }, + {0, 0}, {5, 5}, {0, 0}, {0, 0}, {0, 0}, {0, 0} }, [IPA_v4_0_RSRC_GRP_TYPE_DST_DPS_DMARS] = { - {2, 63}, {1, 63}, {0, 0}, {0, 0}, {0, 2}, {0, 0} }, + {0, 0}, {1, 63}, {0, 0}, {0, 0}, {0, 2}, {0, 0} }, }, [IPA_4_5_MHI] = { - /* PCIE/DPL DDR DMA QDSS uC other are invalid */ + /* PCIE/DPL DDR DMA/CV2X QDSS uC other are invalid */ [IPA_v4_0_RSRC_GRP_TYPE_DST_DATA_SECTORS] = { {16, 16}, {5, 5}, {2, 2}, {2, 2}, {0, 0}, {0, 0} }, [IPA_v4_0_RSRC_GRP_TYPE_DST_DPS_DMARS] = { @@ -2054,7 +2053,7 @@ static const struct ipa_ep_configuration ipa3_ep_mapping true, IPA_DPS_HPS_REP_SEQ_TYPE_2PKT_PROC_PASS_NO_DEC_UCP_DMAP, QMB_MASTER_SELECT_DDR, - { 0, 10, 8, 16, IPA_EE_AP, GSI_ESCAPE_BUF_ONLY, 0 } }, + { 0, 11, 8, 16, IPA_EE_AP, GSI_ESCAPE_BUF_ONLY, 0 } }, [IPA_4_5][IPA_CLIENT_APPS_LAN_PROD] = { true, IPA_v4_5_GROUP_UL_DL, false, @@ -2066,13 +2065,13 @@ static const struct ipa_ep_configuration ipa3_ep_mapping true, IPA_DPS_HPS_REP_SEQ_TYPE_2PKT_PROC_PASS_NO_DEC_UCP_DMAP, QMB_MASTER_SELECT_DDR, - { 2, 11, 16, 32, IPA_EE_AP, GSI_SMART_PRE_FETCH, 8 } }, + { 2, 7, 16, 32, IPA_EE_AP, GSI_SMART_PRE_FETCH, 8 } }, [IPA_4_5][IPA_CLIENT_APPS_CMD_PROD] = { true, IPA_v4_5_GROUP_UL_DL, false, IPA_DPS_HPS_SEQ_TYPE_DMA_ONLY, QMB_MASTER_SELECT_DDR, - { 7, 8, 20, 24, IPA_EE_AP, GSI_SMART_PRE_FETCH, 8 } }, + { 7, 9, 20, 24, IPA_EE_AP, GSI_SMART_PRE_FETCH, 8 } }, [IPA_4_5][IPA_CLIENT_ODU_PROD] = { true, IPA_v4_5_GROUP_UL_DL, true, @@ -2080,7 +2079,7 @@ static const struct ipa_ep_configuration ipa3_ep_mapping QMB_MASTER_SELECT_DDR, { 1, 0, 8, 16, IPA_EE_AP, GSI_SMART_PRE_FETCH, 4 } }, [IPA_4_5][IPA_CLIENT_ETHERNET_PROD] = { - true, IPA_v4_5_ETHERNET, + true, IPA_v4_5_GROUP_UL_DL, true, IPA_DPS_HPS_REP_SEQ_TYPE_2PKT_PROC_PASS_NO_DEC_UCP_DMAP, QMB_MASTER_SELECT_DDR, @@ -2109,13 +2108,13 @@ static const struct ipa_ep_configuration ipa3_ep_mapping true, IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP, QMB_MASTER_SELECT_DDR, - { 0, 10, 8, 16, IPA_EE_AP } }, + { 0, 11, 8, 16, IPA_EE_AP } }, [IPA_4_5][IPA_CLIENT_TEST1_PROD] = { true, IPA_v4_5_GROUP_UL_DL, true, IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP, QMB_MASTER_SELECT_DDR, - { 0, 10, 8, 16, IPA_EE_AP } }, + { 0, 11, 8, 16, IPA_EE_AP } }, [IPA_4_5][IPA_CLIENT_TEST2_PROD] = { true, IPA_v4_5_GROUP_UL_DL, true, @@ -2141,24 +2140,12 @@ static const struct ipa_ep_configuration ipa3_ep_mapping IPA_DPS_HPS_SEQ_TYPE_INVALID, QMB_MASTER_SELECT_DDR, { 24, 3, 8, 14, IPA_EE_AP, GSI_SMART_PRE_FETCH, 4 } }, - [IPA_4_5][IPA_CLIENT_WLAN2_CONS] = { - true, IPA_v4_5_GROUP_UL_DL, - false, - IPA_DPS_HPS_SEQ_TYPE_INVALID, - QMB_MASTER_SELECT_DDR, - { 26, 18, 9, 9, IPA_EE_AP, GSI_SMART_PRE_FETCH, 4 } }, - [IPA_4_5][IPA_CLIENT_WLAN3_CONS] = { - true, IPA_v4_5_GROUP_UL_DL, - false, - IPA_DPS_HPS_SEQ_TYPE_INVALID, - QMB_MASTER_SELECT_DDR, - { 27, 19, 9, 9, IPA_EE_AP, GSI_SMART_PRE_FETCH, 4 } }, [IPA_4_5][IPA_CLIENT_USB_CONS] = { true, IPA_v4_5_GROUP_UL_DL, false, IPA_DPS_HPS_SEQ_TYPE_INVALID, QMB_MASTER_SELECT_DDR, - { 25, 17, 9, 9, IPA_EE_AP, GSI_ESCAPE_BUF_ONLY, 0 } }, + { 25, 16, 9, 9, IPA_EE_AP, GSI_ESCAPE_BUF_ONLY, 0 } }, [IPA_4_5][IPA_CLIENT_USB_DPL_CONS] = { true, IPA_v4_5_GROUP_UL_DL, false, @@ -2170,7 +2157,7 @@ static const struct ipa_ep_configuration ipa3_ep_mapping false, IPA_DPS_HPS_SEQ_TYPE_INVALID, QMB_MASTER_SELECT_DDR, - { 16, 9, 9, 9, IPA_EE_AP, GSI_ESCAPE_BUF_ONLY, 0 } }, + { 16, 10, 9, 9, IPA_EE_AP, GSI_ESCAPE_BUF_ONLY, 0 } }, [IPA_4_5][IPA_CLIENT_APPS_WAN_CONS] = { true, IPA_v4_5_GROUP_UL_DL, false, @@ -2182,9 +2169,9 @@ static const struct ipa_ep_configuration ipa3_ep_mapping false, IPA_DPS_HPS_SEQ_TYPE_INVALID, QMB_MASTER_SELECT_DDR, - { 23, 16, 9, 9, IPA_EE_AP, GSI_SMART_PRE_FETCH, 4 } }, + { 23, 8, 9, 9, IPA_EE_AP, GSI_SMART_PRE_FETCH, 4 } }, [IPA_4_5][IPA_CLIENT_ETHERNET_CONS] = { - true, IPA_v4_5_ETHERNET, + true, IPA_v4_5_GROUP_UL_DL, false, IPA_DPS_HPS_SEQ_TYPE_INVALID, QMB_MASTER_SELECT_DDR, @@ -2244,13 +2231,13 @@ static const struct ipa_ep_configuration ipa3_ep_mapping false, IPA_DPS_HPS_SEQ_TYPE_INVALID, QMB_MASTER_SELECT_DDR, - { 25, 17, 9, 9, IPA_EE_AP } }, + { 25, 16, 9, 9, IPA_EE_AP } }, [IPA_4_5][IPA_CLIENT_TEST4_CONS] = { true, IPA_v4_5_GROUP_UL_DL, false, IPA_DPS_HPS_SEQ_TYPE_INVALID, QMB_MASTER_SELECT_DDR, - { 27, 19, 9, 9, IPA_EE_AP } }, + { 27, 18, 9, 9, IPA_EE_AP } }, /* Dummy consumer (pipe 31) is used in L2TP rt rule */ [IPA_4_5][IPA_CLIENT_DUMMY_CONS] = { true, IPA_v4_5_GROUP_UL_DL, @@ -2260,18 +2247,12 @@ static const struct ipa_ep_configuration ipa3_ep_mapping { 31, 31, 8, 8, IPA_EE_AP } }, /* IPA_4_5_MHI */ - [IPA_4_5_MHI][IPA_CLIENT_APPS_WAN_PROD] = { - true, IPA_v4_5_MHI_GROUP_DDR, - true, - IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP, - QMB_MASTER_SELECT_DDR, - { 2, 11, 16, 32, IPA_EE_AP, GSI_SMART_PRE_FETCH, 8 } }, [IPA_4_5_MHI][IPA_CLIENT_APPS_CMD_PROD] = { true, IPA_v4_5_MHI_GROUP_DDR, false, IPA_DPS_HPS_SEQ_TYPE_DMA_ONLY, QMB_MASTER_SELECT_DDR, - { 7, 8, 20, 24, IPA_EE_AP, GSI_SMART_PRE_FETCH, 8 } }, + { 7, 9, 20, 24, IPA_EE_AP, GSI_SMART_PRE_FETCH, 8 } }, [IPA_4_5_MHI][IPA_CLIENT_Q6_WAN_PROD] = { true, IPA_v4_5_MHI_GROUP_DDR, true, @@ -2290,6 +2271,12 @@ static const struct ipa_ep_configuration ipa3_ep_mapping IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_DEC_UCP, QMB_MASTER_SELECT_DDR, { 8, 2, 24, 32, IPA_EE_Q6, GSI_FREE_PRE_FETCH, 5 } }, + [IPA_4_5_MHI][IPA_CLIENT_Q6_AUDIO_DMA_MHI_PROD] = { + true, IPA_v4_5_MHI_GROUP_DMA, + false, + IPA_DPS_HPS_SEQ_TYPE_PKT_PROCESS_NO_DEC_NO_UCP, + QMB_MASTER_SELECT_DDR, + { 4, 6, 8, 16, IPA_EE_Q6, GSI_ESCAPE_BUF_ONLY, 0 } }, [IPA_4_5_MHI][IPA_CLIENT_MHI_PROD] = { true, IPA_v4_5_MHI_GROUP_PCIE, true, @@ -2314,38 +2301,14 @@ static const struct ipa_ep_configuration ipa3_ep_mapping true, IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP, QMB_MASTER_SELECT_DDR, - { 0, 10, 8, 16, IPA_EE_AP } }, - [IPA_4_5_MHI][IPA_CLIENT_TEST1_PROD] = { - true, IPA_v4_5_GROUP_UL_DL, - true, - IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP, - QMB_MASTER_SELECT_DDR, - { 0, 10, 8, 16, IPA_EE_AP } }, - [IPA_4_5_MHI][IPA_CLIENT_TEST2_PROD] = { - true, IPA_v4_5_GROUP_UL_DL, - true, - IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP, - QMB_MASTER_SELECT_DDR, - { 1, 0, 8, 16, IPA_EE_AP } }, - [IPA_4_5_MHI][IPA_CLIENT_TEST3_PROD] = { - true, IPA_v4_5_GROUP_UL_DL, - true, - IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP, - QMB_MASTER_SELECT_DDR, - { 9, 12, 8, 16, IPA_EE_AP } }, - [IPA_4_5_MHI][IPA_CLIENT_TEST4_PROD] = { - true, IPA_v4_5_GROUP_UL_DL, - true, - IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP, - QMB_MASTER_SELECT_DDR, - { 11, 13, 8, 16, IPA_EE_AP } }, + { 0, 11, 8, 16, IPA_EE_AP } }, [IPA_4_5_MHI][IPA_CLIENT_APPS_LAN_CONS] = { true, IPA_v4_5_MHI_GROUP_DDR, false, IPA_DPS_HPS_SEQ_TYPE_INVALID, QMB_MASTER_SELECT_DDR, - { 16, 9, 9, 9, IPA_EE_AP, GSI_ESCAPE_BUF_ONLY, 0 } }, + { 16, 10, 9, 9, IPA_EE_AP, GSI_ESCAPE_BUF_ONLY, 0 } }, [IPA_4_5_MHI][IPA_CLIENT_Q6_LAN_CONS] = { true, IPA_v4_5_MHI_GROUP_DDR, false, @@ -2376,24 +2339,30 @@ static const struct ipa_ep_configuration ipa3_ep_mapping IPA_DPS_HPS_SEQ_TYPE_INVALID, QMB_MASTER_SELECT_DDR, { 18, 4, 9, 9, IPA_EE_Q6, GSI_ESCAPE_BUF_ONLY, 0 } }, + [IPA_4_5_MHI][IPA_CLIENT_Q6_AUDIO_DMA_MHI_CONS] = { + true, IPA_v4_5_MHI_GROUP_DMA, + false, + IPA_DPS_HPS_SEQ_TYPE_PKT_PROCESS_NO_DEC_NO_UCP, + QMB_MASTER_SELECT_DDR, + { 29, 9, 9, 9, IPA_EE_Q6, GSI_SMART_PRE_FETCH, 4 } }, [IPA_4_5_MHI][IPA_CLIENT_MEMCPY_DMA_SYNC_CONS] = { true, IPA_v4_5_MHI_GROUP_DMA, false, IPA_DPS_HPS_SEQ_TYPE_INVALID, QMB_MASTER_SELECT_PCIE, - { 26, 18, 9, 9, IPA_EE_AP, GSI_ESCAPE_BUF_ONLY, 0 } }, + { 26, 17, 9, 9, IPA_EE_AP, GSI_ESCAPE_BUF_ONLY, 0 } }, [IPA_4_5_MHI][IPA_CLIENT_MEMCPY_DMA_ASYNC_CONS] = { true, IPA_v4_5_MHI_GROUP_DMA, false, IPA_DPS_HPS_SEQ_TYPE_INVALID, QMB_MASTER_SELECT_PCIE, - { 27, 19, 9, 9, IPA_EE_AP, GSI_ESCAPE_BUF_ONLY, 0 } }, + { 27, 18, 9, 9, IPA_EE_AP, GSI_ESCAPE_BUF_ONLY, 0 } }, [IPA_4_5_MHI][IPA_CLIENT_MHI_CONS] = { true, IPA_v4_5_MHI_GROUP_PCIE, false, IPA_DPS_HPS_SEQ_TYPE_INVALID, QMB_MASTER_SELECT_PCIE, - { 14, 1, 9, 9, IPA_EE_AP, GSI_ESCAPE_BUF_ONLY, 0 } }, + { 14, 1, 9, 9, IPA_EE_AP, GSI_SMART_PRE_FETCH, 4 } }, [IPA_4_5_MHI][IPA_CLIENT_MHI_DPL_CONS] = { true, IPA_v4_5_MHI_GROUP_PCIE, false, @@ -2401,38 +2370,6 @@ static const struct ipa_ep_configuration ipa3_ep_mapping QMB_MASTER_SELECT_PCIE, { 22, 2, 5, 5, IPA_EE_AP, GSI_ESCAPE_BUF_ONLY, 0 } }, - /* Only for test purpose */ - /* MBIM aggregation test pipes should have the same QMB as USB_CONS */ - [IPA_4_5_MHI][IPA_CLIENT_TEST_CONS] = { - true, IPA_v4_5_GROUP_UL_DL, - false, - IPA_DPS_HPS_SEQ_TYPE_INVALID, - QMB_MASTER_SELECT_DDR, - { 14, 1, 9, 9, IPA_EE_AP } }, - [IPA_4_5_MHI][IPA_CLIENT_TEST1_CONS] = { - true, IPA_v4_5_GROUP_UL_DL, - false, - IPA_DPS_HPS_SEQ_TYPE_INVALID, - QMB_MASTER_SELECT_DDR, - { 14, 1, 9, 9, IPA_EE_AP } }, - [IPA_4_5_MHI][IPA_CLIENT_TEST2_CONS] = { - true, IPA_v4_5_GROUP_UL_DL, - false, - IPA_DPS_HPS_SEQ_TYPE_INVALID, - QMB_MASTER_SELECT_DDR, - { 24, 3, 8, 14, IPA_EE_AP } }, - [IPA_4_5_MHI][IPA_CLIENT_TEST3_CONS] = { - true, IPA_v4_5_GROUP_UL_DL, - false, - IPA_DPS_HPS_SEQ_TYPE_INVALID, - QMB_MASTER_SELECT_DDR, - { 25, 17, 9, 9, IPA_EE_AP } }, - [IPA_4_5_MHI][IPA_CLIENT_TEST4_CONS] = { - true, IPA_v4_5_GROUP_UL_DL, - false, - IPA_DPS_HPS_SEQ_TYPE_INVALID, - QMB_MASTER_SELECT_DDR, - { 27, 18, 9, 9, IPA_EE_AP } }, /* Dummy consumer (pipe 31) is used in L2TP rt rule */ [IPA_4_5_MHI][IPA_CLIENT_DUMMY_CONS] = { true, IPA_v4_5_GROUP_UL_DL, @@ -4136,6 +4073,7 @@ int ipa3_cfg_ep_hdr_ext(u32 clnt_hdl, /* copy over EP cfg */ ep->cfg.hdr_ext = *ep_hdr_ext; + ep->cfg.hdr_ext.hdr = &ep->cfg.hdr; IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl)); @@ -4729,9 +4667,11 @@ int ipa3_cfg_ep_metadata(u32 clnt_hdl, const struct ipa_ep_cfg_metadata *ep_md) ep_md_reg_wrt.qmap_id = qmap_id; ipahal_write_reg_n_fields(IPA_ENDP_INIT_HDR_METADATA_n, clnt_hdl, &ep_md_reg_wrt); - ipa3_ctx->ep[clnt_hdl].cfg.hdr.hdr_metadata_reg_valid = 1; - ipahal_write_reg_n_fields(IPA_ENDP_INIT_HDR_n, clnt_hdl, - &ipa3_ctx->ep[clnt_hdl].cfg.hdr); + if (ipa3_ctx->ipa_hw_type < IPA_HW_v4_5) { + ipa3_ctx->ep[clnt_hdl].cfg.hdr.hdr_metadata_reg_valid = 1; + ipahal_write_reg_n_fields(IPA_ENDP_INIT_HDR_n, clnt_hdl, + &ipa3_ctx->ep[clnt_hdl].cfg.hdr); + } IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl)); diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.h b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.h index 1c825c55e8c5a846b623d55dc59a487d0046a706..eefdee527fe5456b4c44f2b08fec2e4a1bdadbcf 100644 --- a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.h +++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.h @@ -535,6 +535,7 @@ enum ipahal_pkt_status_nat_type { * the global flt tbl? (if not, then the per endp tables) * @flt_ret_hdr: Retain header in filter rule flag: Does matching flt rule * specifies to retain header? + * Starting IPA4.5, this will be true only if packet has L2 header. * @flt_miss: Filtering miss flag: Was their a filtering rule miss? * In case of miss, all flt info to be ignored * @rt_local: Route table location flag: Does matching rt rule belongs to diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_i.h b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_i.h index 6010942de13713a8b3d6ffca941e078a7fce9ae6..214f6532e338bba5a40382bb1525f91fa63f4832 100644 --- a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_i.h +++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_i.h @@ -545,6 +545,7 @@ struct ipa_imm_cmd_hw_dma_task_32b_addr { * the global flt tbl? (if not, then the per endp tables) * @flt_ret_hdr: Retain header in filter rule flag: Does matching flt rule * specifies to retain header? + * Starting IPA4.5, this will be true only if packet has L2 header. * @flt_rule_id: The ID of the matching filter rule. This info can be combined * with endp_src_idx to locate the exact rule. ID=0x3FF reserved to specify * flt miss. In case of miss, all flt info to be ignored diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.c b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.c index 0153b0694844ba7f9ec0df657f0ad1dfe2197b38..e35d62bc53d7bf5726a6dd932f56efb5a34d5539 100644 --- a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.c +++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.c @@ -25,7 +25,7 @@ static const char *ipareg_name_to_str[IPA_REG_MAX] = { __stringify(IPA_IRQ_STTS_EE_n), __stringify(IPA_IRQ_EN_EE_n), __stringify(IPA_IRQ_CLR_EE_n), - __stringify(IPA_IRQ_SUSPEND_INFO_EE_n), + __stringify(IPA_SUSPEND_IRQ_INFO_EE_n), __stringify(IPA_SUSPEND_IRQ_EN_EE_n), __stringify(IPA_SUSPEND_IRQ_CLR_EE_n), __stringify(IPA_HOLB_DROP_IRQ_INFO_EE_n), @@ -143,11 +143,7 @@ static const char *ipareg_name_to_str[IPA_REG_MAX] = { __stringify(IPA_FEC_ADDR_EE_n), __stringify(IPA_FEC_ADDR_MSB_EE_n), __stringify(IPA_FEC_ATTR_EE_n), - __stringify(IPA_MBIM_DEAGGR_FEC_ATTR_EE_n), - __stringify(IPA_GEN_DEAGGR_FEC_ATTR_EE_n), - __stringify(IPA_GSI_CONF), __stringify(IPA_ENDP_GSI_CFG1_n), - __stringify(IPA_ENDP_GSI_CFG2_n), __stringify(IPA_ENDP_GSI_CFG_AOS_n), __stringify(IPA_ENDP_GSI_CFG_TLV_n), }; @@ -1709,6 +1705,10 @@ static void ipareg_construct_endp_init_aggr_n(enum ipahal_reg_name reg, { struct ipa_ep_cfg_aggr *ep_aggr = (struct ipa_ep_cfg_aggr *)fields; + u32 byte_limit; + u32 pkt_limit; + u32 max_byte_limit; + u32 max_pkt_limit; IPA_SETFIELD_IN_REG(*val, ep_aggr->aggr_en, IPA_ENDP_INIT_AGGR_n_AGGR_EN_SHFT, @@ -1718,7 +1718,12 @@ static void ipareg_construct_endp_init_aggr_n(enum ipahal_reg_name reg, IPA_ENDP_INIT_AGGR_n_AGGR_TYPE_SHFT, IPA_ENDP_INIT_AGGR_n_AGGR_TYPE_BMSK); - IPA_SETFIELD_IN_REG(*val, ep_aggr->aggr_byte_limit, + /* make sure aggregation byte limit does not cross HW boundaries */ + max_byte_limit = IPA_ENDP_INIT_AGGR_n_AGGR_BYTE_LIMIT_BMSK >> + IPA_ENDP_INIT_AGGR_n_AGGR_BYTE_LIMIT_SHFT; + byte_limit = (ep_aggr->aggr_byte_limit > max_byte_limit) ? + max_byte_limit : ep_aggr->aggr_byte_limit; + IPA_SETFIELD_IN_REG(*val, byte_limit, IPA_ENDP_INIT_AGGR_n_AGGR_BYTE_LIMIT_SHFT, IPA_ENDP_INIT_AGGR_n_AGGR_BYTE_LIMIT_BMSK); @@ -1727,7 +1732,12 @@ static void ipareg_construct_endp_init_aggr_n(enum ipahal_reg_name reg, IPA_ENDP_INIT_AGGR_n_AGGR_TIME_LIMIT_SHFT, IPA_ENDP_INIT_AGGR_n_AGGR_TIME_LIMIT_BMSK); - IPA_SETFIELD_IN_REG(*val, ep_aggr->aggr_pkt_limit, + /* make sure aggregation pkt limit does not cross HW boundaries */ + max_pkt_limit = IPA_ENDP_INIT_AGGR_n_AGGR_PKT_LIMIT_BMSK >> + IPA_ENDP_INIT_AGGR_n_AGGR_PKT_LIMIT_SHFT; + pkt_limit = (ep_aggr->aggr_pkt_limit > max_pkt_limit) ? + max_pkt_limit : ep_aggr->aggr_pkt_limit; + IPA_SETFIELD_IN_REG(*val, pkt_limit, IPA_ENDP_INIT_AGGR_n_AGGR_PKT_LIMIT_SHFT, IPA_ENDP_INIT_AGGR_n_AGGR_PKT_LIMIT_BMSK); @@ -1746,6 +1756,10 @@ static void ipareg_construct_endp_init_aggr_n_v4_5(enum ipahal_reg_name reg, { struct ipa_ep_cfg_aggr *ep_aggr = (struct ipa_ep_cfg_aggr *)fields; + u32 byte_limit; + u32 pkt_limit; + u32 max_byte_limit; + u32 max_pkt_limit; IPA_SETFIELD_IN_REG(*val, ep_aggr->aggr_en, IPA_ENDP_INIT_AGGR_n_AGGR_EN_SHFT_V4_5, @@ -1755,7 +1769,12 @@ static void ipareg_construct_endp_init_aggr_n_v4_5(enum ipahal_reg_name reg, IPA_ENDP_INIT_AGGR_n_AGGR_TYPE_SHFT_V4_5, IPA_ENDP_INIT_AGGR_n_AGGR_TYPE_BMSK_V4_5); - IPA_SETFIELD_IN_REG(*val, ep_aggr->aggr_byte_limit, + /* make sure aggregation byte limit does not cross HW boundaries */ + max_byte_limit = IPA_ENDP_INIT_AGGR_n_AGGR_BYTE_LIMIT_BMSK_V4_5 >> + IPA_ENDP_INIT_AGGR_n_AGGR_BYTE_LIMIT_SHFT_V4_5; + byte_limit = (ep_aggr->aggr_byte_limit > max_byte_limit) ? + max_byte_limit : ep_aggr->aggr_byte_limit; + IPA_SETFIELD_IN_REG(*val, byte_limit, IPA_ENDP_INIT_AGGR_n_AGGR_BYTE_LIMIT_SHFT_V4_5, IPA_ENDP_INIT_AGGR_n_AGGR_BYTE_LIMIT_BMSK_V4_5); @@ -1763,6 +1782,11 @@ static void ipareg_construct_endp_init_aggr_n_v4_5(enum ipahal_reg_name reg, IPA_ENDP_INIT_AGGR_n_AGGR_TIME_LIMIT_SHFT_V4_5, IPA_ENDP_INIT_AGGR_n_AGGR_TIME_LIMIT_BMSK_V4_5); + /* make sure aggregation pkt limit does not cross HW boundaries */ + max_pkt_limit = IPA_ENDP_INIT_AGGR_n_AGGR_PKT_LIMIT_BMSK_V4_5 >> + IPA_ENDP_INIT_AGGR_n_AGGR_PKT_LIMIT_SHFT_V4_5; + pkt_limit = (ep_aggr->aggr_pkt_limit > max_pkt_limit) ? + max_pkt_limit : ep_aggr->aggr_pkt_limit; IPA_SETFIELD_IN_REG(*val, ep_aggr->aggr_pkt_limit, IPA_ENDP_INIT_AGGR_n_AGGR_PKT_LIMIT_SHFT_V4_5, IPA_ENDP_INIT_AGGR_n_AGGR_PKT_LIMIT_BMSK_V4_5); @@ -1786,19 +1810,15 @@ static void ipareg_construct_endp_init_aggr_n_v4_5(enum ipahal_reg_name reg, IPA_ENDP_INIT_AGGR_n_AGGR_GRAN_SEL_BMSK_V4_5); } - -static void ipareg_construct_endp_init_hdr_ext_n(enum ipahal_reg_name reg, - const void *fields, u32 *val) +static void ipareg_construct_endp_init_hdr_ext_n_common( + const struct ipa_ep_cfg_hdr_ext *ep_hdr_ext, u32 *val) { - struct ipa_ep_cfg_hdr_ext *ep_hdr_ext; u8 hdr_endianness; - ep_hdr_ext = (struct ipa_ep_cfg_hdr_ext *)fields; hdr_endianness = ep_hdr_ext->hdr_little_endian ? 0 : 1; - IPA_SETFIELD_IN_REG(*val, ep_hdr_ext->hdr_pad_to_alignment, IPA_ENDP_INIT_HDR_EXT_n_HDR_PAD_TO_ALIGNMENT_SHFT, - IPA_ENDP_INIT_HDR_EXT_n_HDR_PAD_TO_ALIGNMENT_BMSK_v3_0); + IPA_ENDP_INIT_HDR_EXT_n_HDR_PAD_TO_ALIGNMENT_BMSK); IPA_SETFIELD_IN_REG(*val, ep_hdr_ext->hdr_total_len_or_pad_offset, IPA_ENDP_INIT_HDR_EXT_n_HDR_TOTAL_LEN_OR_PAD_OFFSET_SHFT, @@ -1821,6 +1841,48 @@ static void ipareg_construct_endp_init_hdr_ext_n(enum ipahal_reg_name reg, IPA_ENDP_INIT_HDR_EXT_n_HDR_ENDIANNESS_BMSK); } +static void ipareg_construct_endp_init_hdr_ext_n(enum ipahal_reg_name reg, + const void *fields, u32 *val) +{ + ipareg_construct_endp_init_hdr_ext_n_common(fields, val); +} + +static void ipareg_construct_endp_init_hdr_ext_n_v4_5(enum ipahal_reg_name reg, + const void *fields, u32 *val) +{ + const struct ipa_ep_cfg_hdr_ext *ep_hdr_ext = + (const struct ipa_ep_cfg_hdr_ext *)fields; + u32 msb; + + ipareg_construct_endp_init_hdr_ext_n_common(ep_hdr_ext, val); + + msb = ep_hdr_ext->hdr_total_len_or_pad_offset >> + hweight_long( + IPA_ENDP_INIT_HDR_EXT_n_HDR_TOTAL_LEN_OR_PAD_OFFSET_BMSK); + IPA_SETFIELD_IN_REG(*val, msb, + IPA_ENDP_INIT_HDR_EXT_n_HDR_TOTAL_LEN_OR_PAD_OFFSET_MSB_SHFT_v4_5, + IPA_ENDP_INIT_HDR_EXT_n_HDR_TOTAL_LEN_OR_PAD_OFFSET_MSB_BMSK_v4_5); + + if (!ep_hdr_ext->hdr) { + IPAHAL_ERR("No header info, skipping it.\n"); + return; + } + + msb = ep_hdr_ext->hdr->hdr_ofst_pkt_size >> + hweight_long(IPA_ENDP_INIT_HDR_n_HDR_OFST_PKT_SIZE_BMSK_v4_5); + IPA_SETFIELD_IN_REG(*val, msb, + IPA_ENDP_INIT_HDR_EXT_n_HDR_OFST_PKT_SIZE_MSB_SHFT_v4_5, + IPA_ENDP_INIT_HDR_EXT_n_HDR_OFST_PKT_SIZE_MSB_BMSK_v4_5); + + msb = ep_hdr_ext->hdr->hdr_additional_const_len >> + hweight_long( + IPA_ENDP_INIT_HDR_n_HDR_ADDITIONAL_CONST_LEN_BMSK_v4_5); + IPA_SETFIELD_IN_REG(*val, msb, + IPA_ENDP_INIT_HDR_EXT_n_HDR_ADDITIONAL_CONST_LEN_MSB_SHFT_v4_5, + IPA_ENDP_INIT_HDR_EXT_n_HDR_ADDITIONAL_CONST_LEN_MSB_BMSK_v4_5 + ); +} + static void ipareg_construct_endp_init_hdr_n(enum ipahal_reg_name reg, const void *fields, u32 *val) { @@ -1829,12 +1891,12 @@ static void ipareg_construct_endp_init_hdr_n(enum ipahal_reg_name reg, ep_hdr = (struct ipa_ep_cfg_hdr *)fields; IPA_SETFIELD_IN_REG(*val, ep_hdr->hdr_metadata_reg_valid, - IPA_ENDP_INIT_HDR_n_HDR_METADATA_REG_VALID_SHFT_v2, - IPA_ENDP_INIT_HDR_n_HDR_METADATA_REG_VALID_BMSK_v2); + IPA_ENDP_INIT_HDR_n_HDR_METADATA_REG_VALID_SHFT, + IPA_ENDP_INIT_HDR_n_HDR_METADATA_REG_VALID_BMSK); IPA_SETFIELD_IN_REG(*val, ep_hdr->hdr_remove_additional, - IPA_ENDP_INIT_HDR_n_HDR_LEN_INC_DEAGG_HDR_SHFT_v2, - IPA_ENDP_INIT_HDR_n_HDR_LEN_INC_DEAGG_HDR_BMSK_v2); + IPA_ENDP_INIT_HDR_n_HDR_LEN_INC_DEAGG_HDR_SHFT, + IPA_ENDP_INIT_HDR_n_HDR_LEN_INC_DEAGG_HDR_BMSK); IPA_SETFIELD_IN_REG(*val, ep_hdr->hdr_a5_mux, IPA_ENDP_INIT_HDR_n_HDR_A5_MUX_SHFT, @@ -1865,6 +1927,59 @@ static void ipareg_construct_endp_init_hdr_n(enum ipahal_reg_name reg, IPA_ENDP_INIT_HDR_n_HDR_LEN_BMSK); } +static void ipareg_construct_endp_init_hdr_n_v4_5(enum ipahal_reg_name reg, + const void *fields, u32 *val) +{ + struct ipa_ep_cfg_hdr *ep_hdr; + u32 msb; + + ep_hdr = (struct ipa_ep_cfg_hdr *)fields; + + msb = ep_hdr->hdr_ofst_metadata >> + hweight_long(IPA_ENDP_INIT_HDR_n_HDR_OFST_METADATA_BMSK_v4_5); + IPA_SETFIELD_IN_REG(*val, msb, + IPA_ENDP_INIT_HDR_n_HDR_OFST_METADATA_MSB_SHFT_v4_5, + IPA_ENDP_INIT_HDR_n_HDR_OFST_METADATA_MSB_BMSK_v4_5); + + msb = ep_hdr->hdr_len >> + hweight_long(IPA_ENDP_INIT_HDR_n_HDR_LEN_BMSK_v4_5); + IPA_SETFIELD_IN_REG(*val, msb, + IPA_ENDP_INIT_HDR_n_HDR_LEN_MSB_SHFT_v4_5, + IPA_ENDP_INIT_HDR_n_HDR_LEN_MSB_BMSK_v4_5); + + IPA_SETFIELD_IN_REG(*val, ep_hdr->hdr_remove_additional, + IPA_ENDP_INIT_HDR_n_HDR_LEN_INC_DEAGG_HDR_SHFT_v4_5, + IPA_ENDP_INIT_HDR_n_HDR_LEN_INC_DEAGG_HDR_BMSK_v4_5); + + IPA_SETFIELD_IN_REG(*val, ep_hdr->hdr_a5_mux, + IPA_ENDP_INIT_HDR_n_HDR_A5_MUX_SHFT_v4_5, + IPA_ENDP_INIT_HDR_n_HDR_A5_MUX_BMSK_v4_5); + + IPA_SETFIELD_IN_REG(*val, ep_hdr->hdr_ofst_pkt_size, + IPA_ENDP_INIT_HDR_n_HDR_OFST_PKT_SIZE_SHFT_v4_5, + IPA_ENDP_INIT_HDR_n_HDR_OFST_PKT_SIZE_BMSK_v4_5); + + IPA_SETFIELD_IN_REG(*val, ep_hdr->hdr_ofst_pkt_size_valid, + IPA_ENDP_INIT_HDR_n_HDR_OFST_PKT_SIZE_VALID_SHFT_v4_5, + IPA_ENDP_INIT_HDR_n_HDR_OFST_PKT_SIZE_VALID_BMSK_v4_5); + + IPA_SETFIELD_IN_REG(*val, ep_hdr->hdr_additional_const_len, + IPA_ENDP_INIT_HDR_n_HDR_ADDITIONAL_CONST_LEN_SHFT_v4_5, + IPA_ENDP_INIT_HDR_n_HDR_ADDITIONAL_CONST_LEN_BMSK_v4_5); + + IPA_SETFIELD_IN_REG(*val, ep_hdr->hdr_ofst_metadata, + IPA_ENDP_INIT_HDR_n_HDR_OFST_METADATA_SHFT_v4_5, + IPA_ENDP_INIT_HDR_n_HDR_OFST_METADATA_BMSK_v4_5); + + IPA_SETFIELD_IN_REG(*val, ep_hdr->hdr_ofst_metadata_valid, + IPA_ENDP_INIT_HDR_n_HDR_OFST_METADATA_VALID_SHFT_v4_5, + IPA_ENDP_INIT_HDR_n_HDR_OFST_METADATA_VALID_BMSK_v4_5); + + IPA_SETFIELD_IN_REG(*val, ep_hdr->hdr_len, + IPA_ENDP_INIT_HDR_n_HDR_LEN_SHFT_v4_5, + IPA_ENDP_INIT_HDR_n_HDR_LEN_BMSK_v4_5); +} + static void ipareg_construct_route(enum ipahal_reg_name reg, const void *fields, u32 *val) { @@ -2229,7 +2344,7 @@ static struct ipahal_reg_obj ipahal_reg_objs[IPA_HW_MAX][IPA_REG_MAX] = { [IPA_HW_v3_0][IPA_IRQ_CLR_EE_n] = { ipareg_construct_dummy, ipareg_parse_dummy, 0x00003010, 0x1000, 0, 0, 0}, - [IPA_HW_v3_0][IPA_IRQ_SUSPEND_INFO_EE_n] = { + [IPA_HW_v3_0][IPA_SUSPEND_IRQ_INFO_EE_n] = { ipareg_construct_dummy, ipareg_parse_dummy, 0x00003098, 0x1000, 0, 0, 0}, [IPA_HW_v3_0][IPA_BCR] = { @@ -2408,7 +2523,7 @@ static struct ipahal_reg_obj ipahal_reg_objs[IPA_HW_MAX][IPA_REG_MAX] = { /* IPAv3.1 */ - [IPA_HW_v3_1][IPA_IRQ_SUSPEND_INFO_EE_n] = { + [IPA_HW_v3_1][IPA_SUSPEND_IRQ_INFO_EE_n] = { ipareg_construct_dummy, ipareg_parse_dummy, 0x00003030, 0x1000, 0, 0, 0}, [IPA_HW_v3_1][IPA_SUSPEND_IRQ_EN_EE_n] = { @@ -2474,15 +2589,9 @@ static struct ipahal_reg_obj ipahal_reg_objs[IPA_HW_MAX][IPA_REG_MAX] = { [IPA_HW_v3_5][IPA_COUNTER_CFG] = { ipareg_construct_counter_cfg, ipareg_parse_counter_cfg, 0x000001F0, 0, 0, 0, 0}, - [IPA_HW_v3_5][IPA_GSI_CONF] = { - ipareg_construct_dummy, ipareg_parse_dummy, - 0x00002790, 0x0, 0, 0, 0 }, [IPA_HW_v3_5][IPA_ENDP_GSI_CFG1_n] = { ipareg_construct_dummy, ipareg_parse_dummy, 0x00002794, 0x4, 0, 0, 0 }, - [IPA_HW_v3_5][IPA_ENDP_GSI_CFG2_n] = { - ipareg_construct_dummy, ipareg_parse_dummy, - 0x00002A2C, 0x4, 0, 0, 0 }, [IPA_HW_v3_5][IPA_ENDP_GSI_CFG_AOS_n] = { ipareg_construct_dummy, ipareg_parse_dummy, 0x000029A8, 0x4, 0, 0, 0 }, @@ -2494,7 +2603,7 @@ static struct ipahal_reg_obj ipahal_reg_objs[IPA_HW_MAX][IPA_REG_MAX] = { 0x0001e1fc, 0, 0, 0, 0}, /* IPAv4.0 */ - [IPA_HW_v4_0][IPA_IRQ_SUSPEND_INFO_EE_n] = { + [IPA_HW_v4_0][IPA_SUSPEND_IRQ_INFO_EE_n] = { ipareg_construct_dummy, ipareg_parse_dummy, 0x00003030, 0x1000, 0, 1, 1}, [IPA_HW_v4_0][IPA_SUSPEND_IRQ_EN_EE_n] = { @@ -2705,12 +2814,6 @@ static struct ipahal_reg_obj ipahal_reg_objs[IPA_HW_MAX][IPA_REG_MAX] = { [IPA_HW_v4_0][IPA_FEC_ATTR_EE_n] = { ipareg_construct_dummy, ipareg_parse_dummy, 0x00003028, 0x1000, 0, 0, 1}, - [IPA_HW_v4_0][IPA_MBIM_DEAGGR_FEC_ATTR_EE_n] = { - ipareg_construct_dummy, ipareg_parse_dummy, - 0x00003028, 0x1000, 0, 0, 1}, - [IPA_HW_v4_0][IPA_GEN_DEAGGR_FEC_ATTR_EE_n] = { - ipareg_construct_dummy, ipareg_parse_dummy, - 0x00003028, 0x1000, 0, 0, 1}, [IPA_HW_v4_0][IPA_HOLB_DROP_IRQ_INFO_EE_n] = { ipareg_construct_dummy, ipareg_parse_dummy, 0x0000303C, 0x1000, 0, 0, 1}, @@ -2931,6 +3034,23 @@ static struct ipahal_reg_obj ipahal_reg_objs[IPA_HW_MAX][IPA_REG_MAX] = { [IPA_HW_v4_5][IPA_SW_AREA_RAM_DIRECT_ACCESS_n] = { ipareg_construct_dummy, ipareg_parse_dummy, 0x000010000, 0x4, 0, 0, 0}, + [IPA_HW_v4_5][IPA_ENDP_INIT_HDR_n] = { + ipareg_construct_endp_init_hdr_n_v4_5, ipareg_parse_dummy, + 0x00000810, 0x70, 0, 31, 1}, + [IPA_HW_v4_5][IPA_ENDP_INIT_HDR_EXT_n] = { + ipareg_construct_endp_init_hdr_ext_n_v4_5, ipareg_parse_dummy, + 0x00000814, 0x70, 0, 31, 1}, + [IPA_HW_v4_5][IPA_ENDP_INIT_HDR_METADATA_n] = { + ipareg_construct_endp_init_hdr_metadata_n, + ipareg_parse_dummy, + 0x0000081c, 0x70, 0, 13, 1}, + [IPA_HW_v4_5][IPA_ENDP_INIT_HDR_METADATA_MASK_n] = { + ipareg_construct_endp_init_hdr_metadata_mask_n, + ipareg_parse_dummy, + 0x00000818, 0x70, 13, 31, 1}, + [IPA_HW_v4_5][IPA_UC_MAILBOX_m_n] = { + ipareg_construct_dummy, ipareg_parse_dummy, + 0x00082000, 0x4, 0, 0, 0}, }; /* diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.h b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.h index 68c82478b7f3d07a43c4bec4ef4f050f33edccef..ac7835ddafe37f2cb44fb77909cda750dd152594 100644 --- a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.h +++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.h @@ -26,7 +26,7 @@ enum ipahal_reg_name { IPA_IRQ_STTS_EE_n, IPA_IRQ_EN_EE_n, IPA_IRQ_CLR_EE_n, - IPA_IRQ_SUSPEND_INFO_EE_n, + IPA_SUSPEND_IRQ_INFO_EE_n, IPA_SUSPEND_IRQ_EN_EE_n, IPA_SUSPEND_IRQ_CLR_EE_n, IPA_HOLB_DROP_IRQ_INFO_EE_n, @@ -144,11 +144,7 @@ enum ipahal_reg_name { IPA_FEC_ADDR_EE_n, IPA_FEC_ADDR_MSB_EE_n, IPA_FEC_ATTR_EE_n, - IPA_MBIM_DEAGGR_FEC_ATTR_EE_n, - IPA_GEN_DEAGGR_FEC_ATTR_EE_n, - IPA_GSI_CONF, IPA_ENDP_GSI_CFG1_n, - IPA_ENDP_GSI_CFG2_n, IPA_ENDP_GSI_CFG_AOS_n, IPA_ENDP_GSI_CFG_TLV_n, IPA_REG_MAX, diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg_i.h b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg_i.h index 8184c41f81ae93120ca2269aaba36972fade0db6..96203877662b7cd6c40d872de38ace369c3ff2bf 100644 --- a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg_i.h +++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg_i.h @@ -51,10 +51,31 @@ int ipahal_reg_init(enum ipa_hw_type ipa_hw_type); #define IPA_ENDP_INIT_HDR_n_HDR_OFST_PKT_SIZE_SHFT 0x14 #define IPA_ENDP_INIT_HDR_n_HDR_A5_MUX_BMSK 0x4000000 #define IPA_ENDP_INIT_HDR_n_HDR_A5_MUX_SHFT 0x1a -#define IPA_ENDP_INIT_HDR_n_HDR_LEN_INC_DEAGG_HDR_BMSK_v2 0x8000000 -#define IPA_ENDP_INIT_HDR_n_HDR_LEN_INC_DEAGG_HDR_SHFT_v2 0x1b -#define IPA_ENDP_INIT_HDR_n_HDR_METADATA_REG_VALID_BMSK_v2 0x10000000 -#define IPA_ENDP_INIT_HDR_n_HDR_METADATA_REG_VALID_SHFT_v2 0x1c +#define IPA_ENDP_INIT_HDR_n_HDR_LEN_INC_DEAGG_HDR_BMSK 0x8000000 +#define IPA_ENDP_INIT_HDR_n_HDR_LEN_INC_DEAGG_HDR_SHFT 0x1b +#define IPA_ENDP_INIT_HDR_n_HDR_METADATA_REG_VALID_BMSK 0x10000000 +#define IPA_ENDP_INIT_HDR_n_HDR_METADATA_REG_VALID_SHFT 0x1c + +#define IPA_ENDP_INIT_HDR_n_HDR_LEN_BMSK_v4_5 0x3f +#define IPA_ENDP_INIT_HDR_n_HDR_LEN_SHFT_v4_5 0x0 +#define IPA_ENDP_INIT_HDR_n_HDR_OFST_METADATA_VALID_BMSK_v4_5 0x40 +#define IPA_ENDP_INIT_HDR_n_HDR_OFST_METADATA_VALID_SHFT_v4_5 0x6 +#define IPA_ENDP_INIT_HDR_n_HDR_OFST_METADATA_SHFT_v4_5 0x7 +#define IPA_ENDP_INIT_HDR_n_HDR_OFST_METADATA_BMSK_v4_5 0x1f80 +#define IPA_ENDP_INIT_HDR_n_HDR_ADDITIONAL_CONST_LEN_BMSK_v4_5 0x7e000 +#define IPA_ENDP_INIT_HDR_n_HDR_ADDITIONAL_CONST_LEN_SHFT_v4_5 0xd +#define IPA_ENDP_INIT_HDR_n_HDR_OFST_PKT_SIZE_VALID_BMSK_v4_5 0x80000 +#define IPA_ENDP_INIT_HDR_n_HDR_OFST_PKT_SIZE_VALID_SHFT_v4_5 0x13 +#define IPA_ENDP_INIT_HDR_n_HDR_OFST_PKT_SIZE_BMSK_v4_5 0x3f00000 +#define IPA_ENDP_INIT_HDR_n_HDR_OFST_PKT_SIZE_SHFT_v4_5 0x14 +#define IPA_ENDP_INIT_HDR_n_HDR_A5_MUX_BMSK_v4_5 0x4000000 +#define IPA_ENDP_INIT_HDR_n_HDR_A5_MUX_SHFT_v4_5 0x1a +#define IPA_ENDP_INIT_HDR_n_HDR_LEN_INC_DEAGG_HDR_BMSK_v4_5 0x8000000 +#define IPA_ENDP_INIT_HDR_n_HDR_LEN_INC_DEAGG_HDR_SHFT_v4_5 0x1b +#define IPA_ENDP_INIT_HDR_n_HDR_LEN_MSB_BMSK_v4_5 0x30000000 +#define IPA_ENDP_INIT_HDR_n_HDR_LEN_MSB_SHFT_v4_5 0x1c +#define IPA_ENDP_INIT_HDR_n_HDR_OFST_METADATA_MSB_BMSK_v4_5 0xc0000000 +#define IPA_ENDP_INIT_HDR_n_HDR_OFST_METADATA_MSB_SHFT_v4_5 0x1e /* IPA_ENDP_INIT_HDR_EXT_n register */ #define IPA_ENDP_INIT_HDR_EXT_n_HDR_ENDIANNESS_BMSK 0x1 @@ -68,7 +89,14 @@ int ipahal_reg_init(enum ipa_hw_type ipa_hw_type); #define IPA_ENDP_INIT_HDR_EXT_n_HDR_TOTAL_LEN_OR_PAD_OFFSET_BMSK 0x3f0 #define IPA_ENDP_INIT_HDR_EXT_n_HDR_TOTAL_LEN_OR_PAD_OFFSET_SHFT 0x4 #define IPA_ENDP_INIT_HDR_EXT_n_HDR_PAD_TO_ALIGNMENT_SHFT 0xa -#define IPA_ENDP_INIT_HDR_EXT_n_HDR_PAD_TO_ALIGNMENT_BMSK_v3_0 0x3c00 +#define IPA_ENDP_INIT_HDR_EXT_n_HDR_PAD_TO_ALIGNMENT_BMSK 0x3c00 +#define IPA_ENDP_INIT_HDR_EXT_n_HDR_TOTAL_LEN_OR_PAD_OFFSET_MSB_SHFT_v4_5 0x10 +#define IPA_ENDP_INIT_HDR_EXT_n_HDR_TOTAL_LEN_OR_PAD_OFFSET_MSB_BMSK_v4_5 \ + 0x30000 +#define IPA_ENDP_INIT_HDR_EXT_n_HDR_OFST_PKT_SIZE_MSB_SHFT_v4_5 0x12 +#define IPA_ENDP_INIT_HDR_EXT_n_HDR_OFST_PKT_SIZE_MSB_BMSK_v4_5 0xC0000 +#define IPA_ENDP_INIT_HDR_EXT_n_HDR_ADDITIONAL_CONST_LEN_MSB_SHFT_v4_5 0x14 +#define IPA_ENDP_INIT_HDR_EXT_n_HDR_ADDITIONAL_CONST_LEN_MSB_BMSK_v4_5 0x300000 /* IPA_ENDP_INIT_AGGR_n register */ #define IPA_ENDP_INIT_AGGR_n_AGGR_HARD_BYTE_LIMIT_ENABLE_BMSK 0x1000000 diff --git a/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c index f8ae387a4c3ee1860388160b41f3cf40990ed65e..8b362cc525944e9b67730bc84d6ac09928c39c95 100644 --- a/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c +++ b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c @@ -1377,8 +1377,16 @@ static int handle3_egress_format(struct net_device *dev, { int rc; struct ipa_sys_connect_params *ipa_wan_ep_cfg; + int ep_idx; IPAWANDBG("get RMNET_IOCTL_SET_EGRESS_DATA_FORMAT\n"); + + ep_idx = ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_PROD); + if (ep_idx == IPA_EP_NOT_ALLOCATED) { + IPAWANDBG("Embedded datapath not supported\n"); + return -EFAULT; + } + ipa_wan_ep_cfg = &rmnet_ipa3_ctx->apps_to_ipa_ep_cfg; if ((e->u.data) & RMNET_IOCTL_EGRESS_FORMAT_CHECKSUM) { ipa_wan_ep_cfg->ipa_ep_cfg.hdr.hdr_len = 8; @@ -1492,7 +1500,7 @@ static int ipa3_wwan_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) uint32_t mux_id; int8_t *v_name; struct mutex *mux_mutex_ptr; - int wan_cons_ep; + int wan_ep; IPAWANDBG("rmnet_ipa got ioctl number 0x%08x", cmd); switch (cmd) { @@ -1618,17 +1626,23 @@ static int ipa3_wwan_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) /* Endpoint pair */ case RMNET_IOCTL_GET_EP_PAIR: IPAWANDBG("get ioctl: RMNET_IOCTL_GET_EP_PAIR\n"); - wan_cons_ep = - ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_CONS); - if (wan_cons_ep == IPA_EP_NOT_ALLOCATED) { + wan_ep = ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_CONS); + if (wan_ep == IPA_EP_NOT_ALLOCATED) { IPAWANERR("Embedded datapath not supported\n"); rc = -EFAULT; break; } - ext_ioctl_data.u.ipa_ep_pair.consumer_pipe_num = - ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_PROD); ext_ioctl_data.u.ipa_ep_pair.producer_pipe_num = - wan_cons_ep; + wan_ep; + + wan_ep = ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_PROD); + if (wan_ep == IPA_EP_NOT_ALLOCATED) { + IPAWANERR("Embedded datapath not supported\n"); + rc = -EFAULT; + break; + } + ext_ioctl_data.u.ipa_ep_pair.consumer_pipe_num = + wan_ep; if (copy_to_user((u8 *)ifr->ifr_ifru.ifru_data, &ext_ioctl_data, sizeof(struct rmnet_ioctl_extended_s))) @@ -1736,7 +1750,7 @@ static int ipa3_wwan_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) GFP_KERNEL); if (!wan_msg) return -ENOMEM; - + ext_ioctl_data.u.if_name[IFNAMSIZ-1] = '\0'; len = sizeof(wan_msg->upstream_ifname) > sizeof(ext_ioctl_data.u.if_name) ? sizeof(ext_ioctl_data.u.if_name) : @@ -3113,7 +3127,8 @@ static int rmnet_ipa3_query_tethering_stats_wifi( IPAWANERR("can't get ipa3_get_wlan_stats\n"); kfree(sap_stats); return rc; - } else if (reset) { + } else if (data == NULL) { + IPAWANDBG("only reset wlan stats\n"); kfree(sap_stats); return 0; } @@ -3180,6 +3195,7 @@ static int rmnet_ipa3_query_tethering_stats_modem( kfree(resp); return rc; } else if (data == NULL) { + IPAWANDBG("only reset modem stats\n"); kfree(req); kfree(resp); return 0; @@ -3485,11 +3501,8 @@ int rmnet_ipa3_query_tethering_stats_all( int rmnet_ipa3_reset_tethering_stats(struct wan_ioctl_reset_tether_stats *data) { enum ipa_upstream_type upstream_type; - struct wan_ioctl_query_tether_stats tether_stats; int rc = 0; - memset(&tether_stats, 0, sizeof(struct wan_ioctl_query_tether_stats)); - /* prevent string buffer overflows */ data->upstreamIface[IFNAMSIZ-1] = '\0'; @@ -3510,7 +3523,7 @@ int rmnet_ipa3_reset_tethering_stats(struct wan_ioctl_reset_tether_stats *data) } else { IPAWANERR(" reset modem-backhaul stats\n"); rc = rmnet_ipa3_query_tethering_stats_modem( - &tether_stats, true); + NULL, true); if (rc) { IPAWANERR("reset MODEM stats failed\n"); return rc; diff --git a/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa_fd_ioctl.c b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa_fd_ioctl.c index 8fdb5687be54abd1e13b516389b07030556ab7d1..929242a5e26863e356f5023446c257f028deac68 100644 --- a/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa_fd_ioctl.c +++ b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa_fd_ioctl.c @@ -253,7 +253,7 @@ static long ipa3_wan_ioctl(struct file *filp, (struct wan_ioctl_set_data_quota *)param); if (rc != 0) { IPAWANERR("WAN_IOC_SET_DATA_QUOTA failed\n"); - if (retval == -ENODEV) + if (rc == -ENODEV) retval = -ENODEV; else retval = -EFAULT; diff --git a/drivers/platform/msm/ipa/test/ipa_pm_ut.c b/drivers/platform/msm/ipa/test/ipa_pm_ut.c index 348655749c7bc1b6ae1c55cd43ccf5cc0cc171be..305d199dfc25f8e1a9d309c5a910b2c6d1c4f707 100644 --- a/drivers/platform/msm/ipa/test/ipa_pm_ut.c +++ b/drivers/platform/msm/ipa/test/ipa_pm_ut.c @@ -24,12 +24,21 @@ struct callback_param { static int ipa_pm_ut_setup(void **ppriv) { int i; + int vote; IPA_UT_DBG("Start Setup\n"); /* decrement UT vote */ IPA_ACTIVE_CLIENTS_DEC_SPECIAL("IPA_UT"); + vote = atomic_read(&ipa3_ctx->ipa3_active_clients.cnt); + if (vote) { + IPA_UT_ERR("clock vote is not zero %d\n", vote); + IPA_UT_TEST_FAIL_REPORT("clock is voted"); + IPA_ACTIVE_CLIENTS_INC_SPECIAL("IPA_UT"); + return -EINVAL; + } + /*decouple PM from RPM */ ipa3_ctx->enable_clock_scaling = false; @@ -112,7 +121,7 @@ static int clean_up(int n, ...) } -/* test 1.1 */ +/* test 1 */ static int ipa_pm_ut_single_registration(void *priv) { int rc = 0; @@ -156,7 +165,8 @@ static int ipa_pm_ut_single_registration(void *priv) return -EFAULT; } - if (!wait_for_completion_timeout(&user_data.complete, HZ)) { + if (!wait_for_completion_timeout(&user_data.complete, + msecs_to_jiffies(2000))) { IPA_UT_ERR("timeout waiting for activate_callback\n"); IPA_UT_TEST_FAIL_REPORT("activate callback not called"); return -ETIME; @@ -177,8 +187,8 @@ static int ipa_pm_ut_single_registration(void *priv) rc = ipa_pm_deregister(hdl); if (rc == 0) { - IPA_UT_ERR("deregister was not unsuccesful - rc = %d\n", rc); - IPA_UT_TEST_FAIL_REPORT("deregister was not unsuccesful"); + IPA_UT_ERR("deregister succeeded while it should not\n"); + IPA_UT_TEST_FAIL_REPORT("deregister should not succeed"); return -EFAULT; } @@ -197,7 +207,7 @@ static int ipa_pm_ut_single_registration(void *priv) return -EINVAL; } - msleep(200); + msleep(2000); vote = atomic_read(&ipa3_ctx->ipa3_active_clients.cnt); if (vote != 0) { @@ -215,8 +225,8 @@ static int ipa_pm_ut_single_registration(void *priv) rc = ipa_pm_activate(hdl); if (rc == 0) { - IPA_UT_ERR("activate was not unsuccesful- rc = %d\n", rc); - IPA_UT_TEST_FAIL_REPORT("activate was not unsuccesful"); + IPA_UT_ERR("activate succeeded while it should not\n"); + IPA_UT_TEST_FAIL_REPORT("activate should not succeed"); return -EFAULT; } @@ -229,7 +239,7 @@ static int ipa_pm_ut_single_registration(void *priv) return 0; } -/* test 1.1 */ +/* test 2 */ static int ipa_pm_ut_double_register_activate(void *priv) { int rc = 0; @@ -280,14 +290,16 @@ static int ipa_pm_ut_double_register_activate(void *priv) return -EFAULT; } + /* It is possible that previous activation already completed. */ rc = ipa_pm_activate(hdl); - if (rc != -EINPROGRESS) { - IPA_UT_ERR("fail to do nothing - rc = %d\n", rc); - IPA_UT_TEST_FAIL_REPORT("do nothing failed"); + if (rc != -EINPROGRESS && rc != 0) { + IPA_UT_ERR("second time activation failed - rc = %d\n", rc); + IPA_UT_TEST_FAIL_REPORT("second time activation failed"); return -EFAULT; } - if (!wait_for_completion_timeout(&user_data.complete, HZ)) { + if (!wait_for_completion_timeout(&user_data.complete, + msecs_to_jiffies(2000))) { IPA_UT_ERR("timeout waiting for activate_callback\n"); IPA_UT_TEST_FAIL_REPORT("activate callback not called"); return -ETIME; @@ -306,7 +318,7 @@ static int ipa_pm_ut_double_register_activate(void *priv) return -EFAULT; } - msleep(200); + msleep(2000); vote = atomic_read(&ipa3_ctx->ipa3_active_clients.cnt); if (vote != 1) { @@ -333,7 +345,7 @@ static int ipa_pm_ut_double_register_activate(void *priv) return rc; } -/* test 2 */ +/* test 3 */ static int ipa_pm_ut_deferred_deactivate(void *priv) { int rc = 0; @@ -377,7 +389,8 @@ static int ipa_pm_ut_deferred_deactivate(void *priv) return -EFAULT; } - if (!wait_for_completion_timeout(&user_data.complete, HZ)) { + if (!wait_for_completion_timeout(&user_data.complete, + msecs_to_jiffies(2000))) { IPA_UT_ERR("timeout waiting for activate_callback\n"); IPA_UT_TEST_FAIL_REPORT("activate callback not called"); return -ETIME; @@ -411,7 +424,7 @@ static int ipa_pm_ut_deferred_deactivate(void *priv) return -EFAULT; } - msleep(200); + msleep(2000); vote = atomic_read(&ipa3_ctx->ipa3_active_clients.cnt); if (vote != 1) { @@ -439,7 +452,7 @@ static int ipa_pm_ut_deferred_deactivate(void *priv) } -/*test 3*/ +/* test 4 */ static int ipa_pm_ut_two_clients_activate(void *priv) { int rc = 0; @@ -447,7 +460,7 @@ static int ipa_pm_ut_two_clients_activate(void *priv) u32 pipes; struct callback_param user_data_USB; struct callback_param user_data_WLAN; - + bool wait_for_completion; struct ipa_pm_init_params init_params = { .threshold_size = 2, @@ -512,7 +525,7 @@ static int ipa_pm_ut_two_clients_activate(void *priv) } rc = ipa_pm_associate_ipa_cons_to_client(hdl_WLAN, - IPA_CLIENT_WLAN2_CONS); + IPA_CLIENT_USB_DPL_CONS); if (rc) { IPA_UT_ERR("fail to map client 2 to multiplt pipes rc = %d\n", rc); @@ -527,14 +540,19 @@ static int ipa_pm_ut_two_clients_activate(void *priv) return -EFAULT; } + /* It could be that USB enabled clocks so WLAN will be activated + * without delay. + */ rc = ipa_pm_activate(hdl_WLAN); - if (rc != -EINPROGRESS) { - IPA_UT_ERR("fail to queue work for client 2 - rc = %d\n", rc); - IPA_UT_TEST_FAIL_REPORT("queue activate work failed"); + if (rc != -EINPROGRESS && rc != 0) { + IPA_UT_ERR("failed to activate WLAN - rc = %d\n", rc); + IPA_UT_TEST_FAIL_REPORT("failed to activate WLAN"); return -EFAULT; } + wait_for_completion = !rc ? false : true; - if (!wait_for_completion_timeout(&user_data_USB.complete, HZ)) { + if (!wait_for_completion_timeout(&user_data_USB.complete, + msecs_to_jiffies(2000))) { IPA_UT_ERR("timeout waiting for activate_callback 1\n"); IPA_UT_TEST_FAIL_REPORT("activate callback not called"); return -ETIME; @@ -546,13 +564,17 @@ static int ipa_pm_ut_two_clients_activate(void *priv) return -EFAULT; } - if (!wait_for_completion_timeout(&user_data_WLAN.complete, HZ)) { + if (wait_for_completion && + !wait_for_completion_timeout(&user_data_WLAN.complete, + msecs_to_jiffies(2000))) { IPA_UT_ERR("timeout waiting for activate_callback 2\n"); IPA_UT_TEST_FAIL_REPORT("activate callback not called"); return -ETIME; } - if (user_data_WLAN.evt != IPA_PM_CLIENT_ACTIVATED) { + /* In case WLAN activated immediately, there will be no event */ + if (wait_for_completion && + user_data_WLAN.evt != IPA_PM_CLIENT_ACTIVATED) { IPA_UT_ERR("Callback = %d\n", user_data_WLAN.evt); IPA_UT_TEST_FAIL_REPORT("wrong callback called"); return -EFAULT; @@ -576,7 +598,7 @@ static int ipa_pm_ut_two_clients_activate(void *priv) return -EFAULT; } - msleep(200); + msleep(2000); rc = ipa_pm_activate(hdl_USB); if (rc) { @@ -587,13 +609,14 @@ static int ipa_pm_ut_two_clients_activate(void *priv) pipes = 1 << ipa_get_ep_mapping(IPA_CLIENT_USB_CONS); pipes |= 1 << ipa_get_ep_mapping(IPA_CLIENT_WLAN1_CONS); - pipes |= 1 << ipa_get_ep_mapping(IPA_CLIENT_WLAN2_CONS); + pipes |= 1 << ipa_get_ep_mapping(IPA_CLIENT_USB_DPL_CONS); IPA_UT_DBG("pipes = %d\n", pipes); rc = ipa_pm_handle_suspend(pipes); - if (!wait_for_completion_timeout(&user_data_USB.complete, HZ)) { + if (!wait_for_completion_timeout(&user_data_USB.complete, + msecs_to_jiffies(2000))) { IPA_UT_ERR("timeout waiting for wakeup_callback 1\n"); IPA_UT_TEST_FAIL_REPORT("wakeup callback not called"); return -ETIME; @@ -605,7 +628,8 @@ static int ipa_pm_ut_two_clients_activate(void *priv) return -EFAULT; } - if (!wait_for_completion_timeout(&user_data_WLAN.complete, HZ)) { + if (!wait_for_completion_timeout(&user_data_WLAN.complete, + msecs_to_jiffies(2000))) { IPA_UT_ERR("timeout waiting for wakeup_callback 2\n"); IPA_UT_TEST_FAIL_REPORT("wakeup callback not called"); return -ETIME; @@ -637,7 +661,8 @@ static int ipa_pm_ut_two_clients_activate(void *priv) rc = ipa_pm_handle_suspend(pipes); - if (!wait_for_completion_timeout(&user_data_USB.complete, HZ)) { + if (!wait_for_completion_timeout(&user_data_USB.complete, + msecs_to_jiffies(2000))) { IPA_UT_ERR("timeout waiting for wakeup_callback 1\n"); IPA_UT_TEST_FAIL_REPORT("wakeup callback not called"); return -ETIME; @@ -653,7 +678,7 @@ static int ipa_pm_ut_two_clients_activate(void *priv) return rc; } -/* test 4 */ +/* test 5 */ static int ipa_pm_ut_deactivate_all_deferred(void *priv) { @@ -726,7 +751,8 @@ static int ipa_pm_ut_deactivate_all_deferred(void *priv) return -EFAULT; } - if (!wait_for_completion_timeout(&user_data.complete, HZ)) { + if (!wait_for_completion_timeout(&user_data.complete, + msecs_to_jiffies(2000))) { IPA_UT_ERR("timeout waiting for activate_callback 1\n"); IPA_UT_TEST_FAIL_REPORT("activate callback not called"); return -ETIME; @@ -790,7 +816,7 @@ static int ipa_pm_ut_deactivate_all_deferred(void *priv) return -EINVAL; } - msleep(200); + msleep(2000); vote = atomic_read(&ipa3_ctx->ipa3_active_clients.cnt); if (vote != 1) { IPA_UT_ERR("clock vote is at %d\n", vote); @@ -854,7 +880,7 @@ static int ipa_pm_ut_deactivate_after_activate(void *priv) return -EFAULT; } - msleep(200); + msleep(2000); vote = atomic_read(&ipa3_ctx->ipa3_active_clients.cnt); if (vote) { IPA_UT_ERR("clock vote is at %d\n", vote); @@ -877,7 +903,7 @@ static int ipa_pm_ut_deactivate_after_activate(void *priv) return -EFAULT; } - msleep(200); + msleep(2000); vote = atomic_read(&ipa3_ctx->ipa3_active_clients.cnt); if (vote) { IPA_UT_ERR("clock vote is at %d\n", vote); @@ -941,7 +967,8 @@ static int ipa_pm_ut_atomic_activate(void *priv) } spin_unlock_irqrestore(&lock, flags); - if (!wait_for_completion_timeout(&user_data.complete, HZ)) { + if (!wait_for_completion_timeout(&user_data.complete, + msecs_to_jiffies(2000))) { IPA_UT_ERR("timeout waiting for activate_callback\n"); IPA_UT_TEST_FAIL_REPORT("activate callback not called"); return -ETIME; @@ -1046,7 +1073,7 @@ static int ipa_pm_ut_deactivate_loop(void *priv) return -EFAULT; } - msleep(200); + msleep(2000); vote = atomic_read(&ipa3_ctx->ipa3_active_clients.cnt); if (vote != 2) { IPA_UT_ERR("clock vote is at %d\n", vote); @@ -1089,7 +1116,7 @@ static int ipa_pm_ut_deactivate_loop(void *priv) } } - msleep(200); + msleep(2000); vote = atomic_read(&ipa3_ctx->ipa3_active_clients.cnt); if (vote != 1) { IPA_UT_ERR("clock vote is at %d\n", vote); @@ -1184,7 +1211,7 @@ static int ipa_pm_ut_set_perf_profile(void *priv) return -EFAULT; } - msleep(200); + msleep(2000); idx = ipa3_ctx->ipa3_active_clients.bus_vote_idx; if (idx != 2) { IPA_UT_ERR("clock plan is at %d\n", idx); @@ -1313,7 +1340,7 @@ static int ipa_pm_ut_group_tput(void *priv) return -EINVAL; } - msleep(200); + msleep(2000); idx = ipa3_ctx->ipa3_active_clients.bus_vote_idx; if (idx != 1) { IPA_UT_ERR("clock plan is at %d\n", idx); @@ -1350,7 +1377,7 @@ static int ipa_pm_ut_group_tput(void *priv) return -EINVAL; } - msleep(200); + msleep(2000); idx = ipa3_ctx->ipa3_active_clients.bus_vote_idx; if (idx != 2) { IPA_UT_ERR("clock plan is at %d\n", idx); @@ -1480,7 +1507,7 @@ static int ipa_pm_ut_skip_clk_vote_tput(void *priv) return -EINVAL; } - msleep(200); + msleep(2000); idx = ipa3_ctx->ipa3_active_clients.bus_vote_idx; if (idx != 2) { IPA_UT_ERR("clock plan is at %d\n", idx); @@ -1517,7 +1544,7 @@ static int ipa_pm_ut_skip_clk_vote_tput(void *priv) return -EINVAL; } - msleep(200); + msleep(2000); idx = ipa3_ctx->ipa3_active_clients.bus_vote_idx; if (idx != 3) { IPA_UT_ERR("clock plan is at %d\n", idx); @@ -1647,7 +1674,7 @@ static int ipa_pm_ut_simple_exception(void *priv) return -EINVAL; } - msleep(200); + msleep(2000); idx = ipa3_ctx->ipa3_active_clients.bus_vote_idx; if (idx != 2) { IPA_UT_ERR("clock plan is at %d\n", idx); @@ -1684,7 +1711,7 @@ static int ipa_pm_ut_simple_exception(void *priv) return -EINVAL; } - msleep(200); + msleep(2000); idx = ipa3_ctx->ipa3_active_clients.bus_vote_idx; if (idx != 3) { IPA_UT_ERR("clock plan is at %d\n", idx); diff --git a/drivers/platform/msm/ipa/test/ipa_test_mhi.c b/drivers/platform/msm/ipa/test/ipa_test_mhi.c index f0109e39aa570c9482528e29e15b38f0006eb36c..6483fb6924e7602c4bcfeb4df153a08a2c9c99c2 100644 --- a/drivers/platform/msm/ipa/test/ipa_test_mhi.c +++ b/drivers/platform/msm/ipa/test/ipa_test_mhi.c @@ -1571,7 +1571,7 @@ static int ipa_mhi_test_suspend(bool force, bool should_success) if (should_success) { if (p_ch_ctx_array->chstate != IPA_HW_MHI_CHANNEL_STATE_SUSPEND) { - IPA_UT_LOG("chstate is not suspend. ch %d chstate %s\n", + IPA_UT_LOG("chstate is not suspend! ch %d chstate %s\n", IPA_MHI_TEST_FIRST_CHANNEL_ID + 1, ipa_mhi_get_state_str(p_ch_ctx_array->chstate)); IPA_UT_TEST_FAIL_REPORT("channel state not suspend"); @@ -1601,7 +1601,7 @@ static int ipa_mhi_test_suspend(bool force, bool should_success) if (should_success) { if (p_ch_ctx_array->chstate != IPA_HW_MHI_CHANNEL_STATE_SUSPEND) { - IPA_UT_LOG("chstate is not running! ch %d chstate %s\n", + IPA_UT_LOG("chstate is not suspend! ch %d chstate %s\n", IPA_MHI_TEST_FIRST_CHANNEL_ID, ipa_mhi_get_state_str(p_ch_ctx_array->chstate)); IPA_UT_TEST_FAIL_REPORT("channel state not suspend"); @@ -2016,7 +2016,8 @@ static int ipa_mhi_test_suspend_host_wakeup(void) return rc; } - if (wait_for_completion_timeout(&mhi_test_wakeup_comp, HZ) == 0) { + if (wait_for_completion_timeout(&mhi_test_wakeup_comp, + msecs_to_jiffies(3500)) == 0) { IPA_UT_LOG("timeout waiting for wakeup event\n"); IPA_UT_TEST_FAIL_REPORT("timeout waiting for wakeup event"); return -ETIME; diff --git a/drivers/platform/msm/msm_11ad/msm_11ad.c b/drivers/platform/msm/msm_11ad/msm_11ad.c index 14c88122c80880f2bcfcbd0e57807d1f0663a2f1..0d148b58249b29eade251c96e2759d5aeb930fe6 100644 --- a/drivers/platform/msm/msm_11ad/msm_11ad.c +++ b/drivers/platform/msm/msm_11ad/msm_11ad.c @@ -464,6 +464,7 @@ static int msm_11ad_init_clocks(struct msm11ad_ctx *ctx) { int rc; struct device *dev = ctx->dev; + int rf_clk3_pin_idx; if (!of_property_read_bool(dev->of_node, "qcom,use-ext-clocks")) return 0; @@ -472,9 +473,14 @@ static int msm_11ad_init_clocks(struct msm11ad_ctx *ctx) if (rc) return rc; - rc = msm_11ad_init_clk(dev, &ctx->rf_clk3_pin, "rf_clk3_pin_clk"); - if (rc) - msm_11ad_release_clk(ctx->dev, &ctx->rf_clk3); + rf_clk3_pin_idx = of_property_match_string(dev->of_node, "clock-names", + "rf_clk3_pin_clk"); + if (rf_clk3_pin_idx >= 0) { + rc = msm_11ad_init_clk(dev, &ctx->rf_clk3_pin, + "rf_clk3_pin_clk"); + if (rc) + msm_11ad_release_clk(ctx->dev, &ctx->rf_clk3); + } return rc; } @@ -780,7 +786,7 @@ static int msm_11ad_smmu_init(struct msm11ad_ctx *ctx) static int msm_11ad_ssr_shutdown(const struct subsys_desc *subsys, bool force_stop) { - pr_info("%s(%p,%d)\n", __func__, subsys, force_stop); + pr_info("%s(%pK,%d)\n", __func__, subsys, force_stop); /* nothing is done in shutdown. We do full recovery in powerup */ return 0; } @@ -791,7 +797,7 @@ static int msm_11ad_ssr_powerup(const struct subsys_desc *subsys) struct platform_device *pdev; struct msm11ad_ctx *ctx; - pr_info("%s(%p)\n", __func__, subsys); + pr_info("%s(%pK)\n", __func__, subsys); pdev = to_platform_device(subsys->dev); ctx = platform_get_drvdata(pdev); @@ -1185,12 +1191,12 @@ static int msm_11ad_probe(struct platform_device *pdev) msm_11ad_init_cpu_boost(ctx); /* report */ - dev_info(ctx->dev, "msm_11ad discovered. %p {\n" + dev_info(ctx->dev, "msm_11ad discovered. %pK {\n" " gpio_en = %d\n" " sleep_clk_en = %d\n" " rc_index = %d\n" " use_smmu = %d\n" - " pcidev = %p\n" + " pcidev = %pK\n" "}\n", ctx, ctx->gpio_en, ctx->sleep_clk_en, ctx->rc_index, ctx->use_smmu, ctx->pcidev); @@ -1227,7 +1233,7 @@ static int msm_11ad_remove(struct platform_device *pdev) msm_11ad_ssr_deinit(ctx); list_del(&ctx->list); - dev_info(ctx->dev, "%s: pdev %p pcidev %p\n", __func__, pdev, + dev_info(ctx->dev, "%s: pdev %pK pcidev %pK\n", __func__, pdev, ctx->pcidev); kfree(ctx->pristine_state); @@ -1490,7 +1496,7 @@ void *msm_11ad_dev_init(struct device *dev, struct wil_platform_ops *ops, struct msm11ad_ctx *ctx = pcidev2ctx(pcidev); if (!ctx) { - pr_err("Context not found for pcidev %p\n", pcidev); + pr_err("Context not found for pcidev %pK\n", pcidev); return NULL; } diff --git a/drivers/platform/msm/seemp_core/seemp_logk.c b/drivers/platform/msm/seemp_core/seemp_logk.c index 8eebc013ae6fd300cdcef7f5d982e385176950f5..7677a17c5d92c2f039696cff3b9146e7a468f37b 100644 --- a/drivers/platform/msm/seemp_core/seemp_logk.c +++ b/drivers/platform/msm/seemp_core/seemp_logk.c @@ -608,6 +608,9 @@ static int seemp_logk_rtic_thread(void *data) { struct el2_report_header_t *header; int i; + int last_incident_number; + + last_incident_number = 0; header = (struct el2_report_header_t *) el2_shared_mem; if (header->report_version < KP_EL2_REPORT_REVISION) @@ -618,26 +621,25 @@ static int seemp_logk_rtic_thread(void *data) report = el2_shared_mem + sizeof(struct el2_report_header_t); - - for (i = 0; i < report->actor_count; i++) { - struct el2_actor_data_t *actor; - - actor = el2_shared_mem + - sizeof(struct el2_report_header_t) + - sizeof(struct el2_actor_report_t) + - i * (sizeof(struct el2_actor_data_t)); - - seemp_logk_rtic(report->report_type, - actor->pid, - /* - * leave this empty until - * asset id is provided - */ - "", - report->asset_category, - report->response); + if (last_incident_number < report->num_incidents) { + for (i = 0; i < report->actor_count; i++) { + struct el2_actor_data_t *actor; + + actor = el2_shared_mem + + sizeof(struct el2_report_header_t) + + sizeof(struct el2_actor_report_t) + + i * (sizeof(struct el2_actor_data_t)); + + seemp_logk_rtic(report->report_type, + actor->pid, + report->asset_name, + report->asset_category, + report->response, + actor->name); + } } + last_incident_number = report->num_incidents; /* periodically check el2 report every second */ ssleep(1); } diff --git a/drivers/power/supply/qcom/battery.c b/drivers/power/supply/qcom/battery.c index 88c8dc6e07768d2fd8429f1f961b2514fdf74078..66044ac593b71f292e1596e685183be828277b4c 100644 --- a/drivers/power/supply/qcom/battery.c +++ b/drivers/power/supply/qcom/battery.c @@ -45,6 +45,7 @@ #define USBIN_I_VOTER "USBIN_I_VOTER" #define PL_FCC_LOW_VOTER "PL_FCC_LOW_VOTER" #define ICL_LIMIT_VOTER "ICL_LIMIT_VOTER" +#define FCC_STEPPER_VOTER "FCC_STEPPER_VOTER" struct pl_data { int pl_mode; @@ -52,6 +53,7 @@ struct pl_data { int pl_min_icl_ua; int slave_pct; int slave_fcc_ua; + int main_fcc_ua; int restricted_current; bool restricted_charging_enabled; struct votable *fcc_votable; @@ -65,15 +67,25 @@ struct pl_data { struct work_struct pl_disable_forever_work; struct work_struct pl_taper_work; struct delayed_work pl_awake_work; + struct delayed_work fcc_stepper_work; bool taper_work_running; struct power_supply *main_psy; struct power_supply *pl_psy; struct power_supply *batt_psy; struct power_supply *usb_psy; + struct power_supply *dc_psy; int charge_type; int total_settled_ua; int pl_settled_ua; int pl_fcc_max; + int fcc_stepper_enable; + int main_step_fcc_dir; + int main_step_fcc_count; + int main_step_fcc_residual; + int parallel_step_fcc_dir; + int parallel_step_fcc_count; + int parallel_step_fcc_residual; + int step_fcc; u32 wa_flags; struct class qcom_batt_class; struct wakeup_source *pl_ws; @@ -111,6 +123,7 @@ enum { SLAVE_PCT, RESTRICT_CHG_ENABLE, RESTRICT_CHG_CURRENT, + FCC_STEPPING_IN_PROGRESS, }; /******* @@ -258,7 +271,6 @@ static void split_settled(struct pl_data *chip) chip->total_settled_ua = total_settled_ua; chip->pl_settled_ua = slave_ua; - } static ssize_t version_show(struct class *c, struct class_attribute *attr, @@ -378,11 +390,26 @@ static ssize_t restrict_cur_store(struct class *c, struct class_attribute *attr, } static CLASS_ATTR_RW(restrict_cur); +/**************************** + * FCC STEPPING IN PROGRESS * + ****************************/ +static ssize_t fcc_stepping_in_progress_show(struct class *c, + struct class_attribute *attr, char *ubuf) +{ + struct pl_data *chip = container_of(c, struct pl_data, + qcom_batt_class); + + return snprintf(ubuf, PAGE_SIZE, "%d\n", chip->step_fcc); +} +static CLASS_ATTR_RO(fcc_stepping_in_progress); + static struct attribute *batt_class_attrs[] = { [VER] = &class_attr_version.attr, [SLAVE_PCT] = &class_attr_slave_pct.attr, [RESTRICT_CHG_ENABLE] = &class_attr_restrict_chg.attr, [RESTRICT_CHG_CURRENT] = &class_attr_restrict_cur.attr, + [FCC_STEPPING_IN_PROGRESS] + = &class_attr_fcc_stepping_in_progress.attr, NULL, }; ATTRIBUTE_GROUPS(batt_class); @@ -391,6 +418,10 @@ ATTRIBUTE_GROUPS(batt_class); * FCC * **********/ #define EFFICIENCY_PCT 80 +#define FCC_STEP_SIZE_UA 100000 +#define FCC_STEP_UPDATE_DELAY_MS 1000 +#define STEP_UP 1 +#define STEP_DOWN -1 static void get_fcc_split(struct pl_data *chip, int total_ua, int *master_ua, int *slave_ua) { @@ -443,6 +474,47 @@ static void get_fcc_split(struct pl_data *chip, int total_ua, *master_ua = max(0, total_ua - *slave_ua); } +static void get_fcc_stepper_params(struct pl_data *chip, int main_fcc_ua, + int parallel_fcc_ua) +{ + union power_supply_propval pval = {0, }; + int rc; + + /* Read current FCC of main charger */ + rc = power_supply_get_property(chip->main_psy, + POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX, &pval); + if (rc < 0) { + pr_err("Couldn't get main charger current fcc, rc=%d\n", rc); + return; + } + chip->main_fcc_ua = pval.intval; + + chip->main_step_fcc_dir = (main_fcc_ua > pval.intval) ? + STEP_UP : STEP_DOWN; + chip->main_step_fcc_count = abs((main_fcc_ua - pval.intval) / + FCC_STEP_SIZE_UA); + chip->main_step_fcc_residual = (main_fcc_ua - pval.intval) % + FCC_STEP_SIZE_UA; + + chip->parallel_step_fcc_dir = (parallel_fcc_ua > chip->slave_fcc_ua) ? + STEP_UP : STEP_DOWN; + chip->parallel_step_fcc_count = abs((parallel_fcc_ua - + chip->slave_fcc_ua) / FCC_STEP_SIZE_UA); + chip->parallel_step_fcc_residual = (parallel_fcc_ua - + chip->slave_fcc_ua) % FCC_STEP_SIZE_UA; + + if (chip->parallel_step_fcc_count || chip->parallel_step_fcc_residual + || chip->main_step_fcc_count || chip->main_step_fcc_residual) + chip->step_fcc = 1; + + pr_debug("Main FCC Stepper parameters: main_step_direction: %d, main_step_count: %d, main_residual_fcc: %d\n", + chip->main_step_fcc_dir, chip->main_step_fcc_count, + chip->main_step_fcc_residual); + pr_debug("Parallel FCC Stepper parameters: parallel_step_direction: %d, parallel_step_count: %d, parallel_residual_fcc: %d\n", + chip->parallel_step_fcc_dir, chip->parallel_step_fcc_count, + chip->parallel_step_fcc_residual); +} + #define MINIMUM_PARALLEL_FCC_UA 500000 #define PL_TAPER_WORK_DELAY_MS 500 #define TAPER_RESIDUAL_PCT 90 @@ -562,6 +634,195 @@ static int pl_fcc_vote_callback(struct votable *votable, void *data, return 0; } +static void fcc_stepper_work(struct work_struct *work) +{ + struct pl_data *chip = container_of(work, struct pl_data, + fcc_stepper_work.work); + union power_supply_propval pval = {0, }; + int reschedule_ms = 0, rc = 0, charger_present = 0; + int main_fcc = chip->main_fcc_ua; + int parallel_fcc = chip->slave_fcc_ua; + + /* Check whether USB is present or not */ + rc = power_supply_get_property(chip->usb_psy, + POWER_SUPPLY_PROP_PRESENT, &pval); + if (rc < 0) + pr_err("Couldn't get USB Present status, rc=%d\n", rc); + + charger_present = pval.intval; + + /*Check whether DC charger is present or not */ + if (!chip->dc_psy) + chip->dc_psy = power_supply_get_by_name("dc"); + if (chip->dc_psy) { + rc = power_supply_get_property(chip->dc_psy, + POWER_SUPPLY_PROP_PRESENT, &pval); + if (rc < 0) + pr_err("Couldn't get DC Present status, rc=%d\n", rc); + + charger_present |= pval.intval; + } + + /* + * If USB is not present, then set parallel FCC to min value and + * main FCC to the effective value of FCC votable and exit. + */ + if (!charger_present) { + /* Disable parallel */ + parallel_fcc = 0; + + if (chip->pl_psy) { + pval.intval = 1; + rc = power_supply_set_property(chip->pl_psy, + POWER_SUPPLY_PROP_INPUT_SUSPEND, &pval); + if (rc < 0) { + pr_err("Couldn't change slave suspend state rc=%d\n", + rc); + goto out; + } + + chip->pl_disable = true; + power_supply_changed(chip->pl_psy); + } + + main_fcc = get_effective_result_locked(chip->fcc_votable); + pval.intval = main_fcc; + rc = power_supply_set_property(chip->main_psy, + POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX, &pval); + if (rc < 0) { + pr_err("Couldn't set main charger fcc, rc=%d\n", rc); + goto out; + } + + goto stepper_exit; + } + + if (chip->main_step_fcc_count) { + main_fcc += (FCC_STEP_SIZE_UA * chip->main_step_fcc_dir); + chip->main_step_fcc_count--; + reschedule_ms = FCC_STEP_UPDATE_DELAY_MS; + } else if (chip->main_step_fcc_residual) { + main_fcc += chip->main_step_fcc_residual; + chip->main_step_fcc_residual = 0; + } + + if (chip->parallel_step_fcc_count) { + parallel_fcc += (FCC_STEP_SIZE_UA * + chip->parallel_step_fcc_dir); + chip->parallel_step_fcc_count--; + reschedule_ms = FCC_STEP_UPDATE_DELAY_MS; + } else if (chip->parallel_step_fcc_residual) { + parallel_fcc += chip->parallel_step_fcc_residual; + chip->parallel_step_fcc_residual = 0; + } + + if (parallel_fcc < chip->slave_fcc_ua) { + /* Set parallel FCC */ + if (chip->pl_psy && !chip->pl_disable) { + if (parallel_fcc < MINIMUM_PARALLEL_FCC_UA) { + pval.intval = 1; + rc = power_supply_set_property(chip->pl_psy, + POWER_SUPPLY_PROP_INPUT_SUSPEND, &pval); + if (rc < 0) { + pr_err("Couldn't change slave suspend state rc=%d\n", + rc); + goto out; + } + + if (IS_USBIN(chip->pl_mode)) + split_settled(chip); + + parallel_fcc = 0; + chip->parallel_step_fcc_count = 0; + chip->parallel_step_fcc_residual = 0; + chip->total_settled_ua = 0; + chip->pl_settled_ua = 0; + chip->pl_disable = true; + power_supply_changed(chip->pl_psy); + } else { + /* Set Parallel FCC */ + pval.intval = parallel_fcc; + rc = power_supply_set_property(chip->pl_psy, + POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX, + &pval); + if (rc < 0) { + pr_err("Couldn't set parallel charger fcc, rc=%d\n", + rc); + goto out; + } + } + } + + /* Set main FCC */ + pval.intval = main_fcc; + rc = power_supply_set_property(chip->main_psy, + POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX, &pval); + if (rc < 0) { + pr_err("Couldn't set main charger fcc, rc=%d\n", rc); + goto out; + } + } else { + /* Set main FCC */ + pval.intval = main_fcc; + rc = power_supply_set_property(chip->main_psy, + POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX, &pval); + if (rc < 0) { + pr_err("Couldn't set main charger fcc, rc=%d\n", rc); + goto out; + } + + /* Set parallel FCC */ + if (chip->pl_psy) { + pval.intval = parallel_fcc; + rc = power_supply_set_property(chip->pl_psy, + POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX, + &pval); + if (rc < 0) { + pr_err("Couldn't set parallel charger fcc, rc=%d\n", + rc); + goto out; + } + + /* + * Enable parallel charger only if it was disabled + * earlier and configured slave fcc is greater than or + * equal to minimum parallel FCC value. + */ + if (chip->pl_disable && parallel_fcc + >= MINIMUM_PARALLEL_FCC_UA) { + pval.intval = 0; + rc = power_supply_set_property(chip->pl_psy, + POWER_SUPPLY_PROP_INPUT_SUSPEND, &pval); + if (rc < 0) { + pr_err("Couldn't change slave suspend state rc=%d\n", + rc); + goto out; + } + + if (IS_USBIN(chip->pl_mode)) + split_settled(chip); + + chip->pl_disable = false; + power_supply_changed(chip->pl_psy); + } + } + } + +stepper_exit: + chip->main_fcc_ua = main_fcc; + chip->slave_fcc_ua = parallel_fcc; + + if (reschedule_ms) { + schedule_delayed_work(&chip->fcc_stepper_work, + msecs_to_jiffies(reschedule_ms)); + pr_debug("Rescheduling FCC_STEPPER work\n"); + return; + } +out: + chip->step_fcc = 0; + vote(chip->pl_awake_votable, FCC_STEPPER_VOTER, false, 0); +} + #define PARALLEL_FLOAT_VOLTAGE_DELTA_UV 50000 static int pl_fv_vote_callback(struct votable *votable, void *data, int fv_uv, const char *client) @@ -700,6 +961,17 @@ static bool is_main_available(struct pl_data *chip) return !!chip->main_psy; } +static bool is_batt_available(struct pl_data *chip) +{ + if (!chip->batt_psy) + chip->batt_psy = power_supply_get_by_name("battery"); + + if (!chip->batt_psy) + return false; + + return true; +} + static int pl_disable_vote_callback(struct votable *votable, void *data, int pl_disable, const char *client) { @@ -712,6 +984,30 @@ static int pl_disable_vote_callback(struct votable *votable, if (!is_main_available(chip)) return -ENODEV; + if (!is_batt_available(chip)) + return -ENODEV; + + if (!chip->usb_psy) + chip->usb_psy = power_supply_get_by_name("usb"); + if (!chip->usb_psy) { + pr_err("Couldn't get usb psy\n"); + return -ENODEV; + } + + rc = power_supply_get_property(chip->batt_psy, + POWER_SUPPLY_PROP_FCC_STEPPER_ENABLE, &pval); + if (rc < 0) { + pr_err("Couldn't read FCC step update status, rc=%d\n", rc); + return rc; + } + chip->fcc_stepper_enable = pval.intval; + pr_debug("FCC Stepper %s\n", pval.intval ? "enabled" : "disabled"); + + if (chip->fcc_stepper_enable) { + cancel_delayed_work_sync(&chip->fcc_stepper_work); + vote(chip->pl_awake_votable, FCC_STEPPER_VOTER, false, 0); + } + total_fcc_ua = get_effective_result_locked(chip->fcc_votable); if (chip->pl_mode != POWER_SUPPLY_PL_NONE && !pl_disable) { @@ -747,72 +1043,86 @@ static int pl_disable_vote_callback(struct votable *votable, get_fcc_split(chip, total_fcc_ua, &master_fcc_ua, &slave_fcc_ua); - /* - * If there is an increase in slave share - * (Also handles parallel enable case) - * Set Main ICL then slave FCC - * else - * (Also handles parallel disable case) - * Set slave ICL then main FCC. - */ - if (slave_fcc_ua > chip->slave_fcc_ua) { - pval.intval = master_fcc_ua; - rc = power_supply_set_property(chip->main_psy, - POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX, - &pval); - if (rc < 0) { - pr_err("Could not set main fcc, rc=%d\n", rc); - return rc; + if (chip->fcc_stepper_enable) { + get_fcc_stepper_params(chip, master_fcc_ua, + slave_fcc_ua); + if (chip->step_fcc) { + vote(chip->pl_awake_votable, FCC_STEPPER_VOTER, + true, 0); + schedule_delayed_work(&chip->fcc_stepper_work, + 0); } + } else { + /* + * If there is an increase in slave share + * (Also handles parallel enable case) + * Set Main ICL then slave FCC + * else + * (Also handles parallel disable case) + * Set slave ICL then main FCC. + */ + if (slave_fcc_ua > chip->slave_fcc_ua) { + pval.intval = master_fcc_ua; + rc = power_supply_set_property(chip->main_psy, + POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX, + &pval); + if (rc < 0) { + pr_err("Could not set main fcc, rc=%d\n", + rc); + return rc; + } - pval.intval = slave_fcc_ua; - rc = power_supply_set_property(chip->pl_psy, + pval.intval = slave_fcc_ua; + rc = power_supply_set_property(chip->pl_psy, POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX, - &pval); - if (rc < 0) { - pr_err("Couldn't set parallel fcc, rc=%d\n", + &pval); + if (rc < 0) { + pr_err("Couldn't set parallel fcc, rc=%d\n", rc); - return rc; - } + return rc; + } - chip->slave_fcc_ua = slave_fcc_ua; - } else { - pval.intval = slave_fcc_ua; - rc = power_supply_set_property(chip->pl_psy, + chip->slave_fcc_ua = slave_fcc_ua; + } else { + pval.intval = slave_fcc_ua; + rc = power_supply_set_property(chip->pl_psy, POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX, - &pval); - if (rc < 0) { - pr_err("Couldn't set parallel fcc, rc=%d\n", + &pval); + if (rc < 0) { + pr_err("Couldn't set parallel fcc, rc=%d\n", rc); - return rc; - } + return rc; + } - chip->slave_fcc_ua = slave_fcc_ua; + chip->slave_fcc_ua = slave_fcc_ua; - pval.intval = master_fcc_ua; - rc = power_supply_set_property(chip->main_psy, + pval.intval = master_fcc_ua; + rc = power_supply_set_property(chip->main_psy, POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX, - &pval); - if (rc < 0) { - pr_err("Could not set main fcc, rc=%d\n", rc); - return rc; + &pval); + if (rc < 0) { + pr_err("Could not set main fcc, rc=%d\n", + rc); + return rc; + } } - } - /* - * Enable will be called with a valid pl_psy always. The - * PARALLEL_PSY_VOTER keeps it disabled unless a pl_psy - * is seen. - */ - pval.intval = 0; - rc = power_supply_set_property(chip->pl_psy, + /* + * Enable will be called with a valid pl_psy always. The + * PARALLEL_PSY_VOTER keeps it disabled unless a pl_psy + * is seen. + */ + pval.intval = 0; + rc = power_supply_set_property(chip->pl_psy, POWER_SUPPLY_PROP_INPUT_SUSPEND, &pval); - if (rc < 0) - pr_err("Couldn't change slave suspend state rc=%d\n", - rc); + if (rc < 0) + pr_err("Couldn't change slave suspend state rc=%d\n", + rc); + + if (IS_USBIN(chip->pl_mode)) + split_settled(chip); + } - if (IS_USBIN(chip->pl_mode)) - split_settled(chip); /* * we could have been enabled while in taper mode, * start the taper work if so @@ -838,43 +1148,54 @@ static int pl_disable_vote_callback(struct votable *votable, (master_fcc_ua * 100) / total_fcc_ua, (slave_fcc_ua * 100) / total_fcc_ua); } else { - if (IS_USBIN(chip->pl_mode)) - split_settled(chip); + if (!chip->fcc_stepper_enable) { + if (IS_USBIN(chip->pl_mode)) + split_settled(chip); - /* pl_psy may be NULL while in the disable branch */ - if (chip->pl_psy) { - pval.intval = 1; - rc = power_supply_set_property(chip->pl_psy, + /* pl_psy may be NULL while in the disable branch */ + if (chip->pl_psy) { + pval.intval = 1; + rc = power_supply_set_property(chip->pl_psy, POWER_SUPPLY_PROP_INPUT_SUSPEND, &pval); - if (rc < 0) - pr_err("Couldn't change slave suspend state rc=%d\n", - rc); - } + if (rc < 0) + pr_err("Couldn't change slave suspend state rc=%d\n", + rc); + } - /* main psy gets all share */ - pval.intval = total_fcc_ua; - rc = power_supply_set_property(chip->main_psy, + /* main psy gets all share */ + pval.intval = total_fcc_ua; + rc = power_supply_set_property(chip->main_psy, POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX, &pval); - if (rc < 0) { - pr_err("Could not set main fcc, rc=%d\n", rc); - return rc; + if (rc < 0) { + pr_err("Could not set main fcc, rc=%d\n", rc); + return rc; + } + + /* reset parallel FCC */ + chip->slave_fcc_ua = 0; + chip->total_settled_ua = 0; + chip->pl_settled_ua = 0; + } else { + get_fcc_stepper_params(chip, total_fcc_ua, 0); + if (chip->step_fcc) { + vote(chip->pl_awake_votable, FCC_STEPPER_VOTER, + true, 0); + schedule_delayed_work(&chip->fcc_stepper_work, + 0); + } } - /* reset parallel FCC */ - chip->slave_fcc_ua = 0; rerun_election(chip->fv_votable); cancel_delayed_work_sync(&chip->pl_awake_work); schedule_delayed_work(&chip->pl_awake_work, msecs_to_jiffies(5000)); - - chip->total_settled_ua = 0; - chip->pl_settled_ua = 0; } /* notify parallel state change */ - if (chip->pl_psy && (chip->pl_disable != pl_disable)) { + if (chip->pl_psy && (chip->pl_disable != pl_disable) + && !chip->fcc_stepper_enable) { power_supply_changed(chip->pl_psy); chip->pl_disable = (bool)pl_disable; } @@ -909,17 +1230,6 @@ static int pl_awake_vote_callback(struct votable *votable, return 0; } -static bool is_batt_available(struct pl_data *chip) -{ - if (!chip->batt_psy) - chip->batt_psy = power_supply_get_by_name("battery"); - - if (!chip->batt_psy) - return false; - - return true; -} - static bool is_parallel_available(struct pl_data *chip) { union power_supply_propval pval = {0, }; @@ -1083,6 +1393,7 @@ static void handle_settled_icl_change(struct pl_data *chip) else vote(chip->pl_enable_votable_indirect, USBIN_I_VOTER, true, 0); + rerun_election(chip->fcc_votable); if (IS_USBIN(chip->pl_mode)) { /* @@ -1331,6 +1642,7 @@ int qcom_batt_init(int smb_version) INIT_WORK(&chip->pl_taper_work, pl_taper_work); INIT_WORK(&chip->pl_disable_forever_work, pl_disable_forever_work); INIT_DELAYED_WORK(&chip->pl_awake_work, pl_awake_work); + INIT_DELAYED_WORK(&chip->fcc_stepper_work, fcc_stepper_work); rc = pl_register_notifier(chip); if (rc < 0) { @@ -1386,6 +1698,7 @@ void qcom_batt_deinit(void) cancel_work_sync(&chip->pl_taper_work); cancel_work_sync(&chip->pl_disable_forever_work); cancel_delayed_work_sync(&chip->pl_awake_work); + cancel_delayed_work_sync(&chip->fcc_stepper_work); power_supply_unreg_notifier(&chip->nb); destroy_votable(chip->pl_enable_votable_indirect); diff --git a/drivers/power/supply/qcom/fg-core.h b/drivers/power/supply/qcom/fg-core.h index 400c00aa2377cb921080a6c7da6816900c50fe6d..00d49ffa193a38e94c08be878b68814ebc10067b 100644 --- a/drivers/power/supply/qcom/fg-core.h +++ b/drivers/power/supply/qcom/fg-core.h @@ -76,6 +76,8 @@ #define ESR_FCC_VOTER "fg_esr_fcc" +#define FG_PARALLEL_EN_VOTER "fg_parallel_en" + #define BUCKET_COUNT 8 #define BUCKET_SOC_PCT (256 / BUCKET_COUNT) @@ -148,6 +150,7 @@ enum fg_irq_index { IMA_RDY_IRQ, FG_GEN3_IRQ_MAX, /* GEN4 FG_MEM_IF */ + MEM_ATTN_IRQ, DMA_XCP_IRQ, /* GEN4 FG_ADC_RR */ BATT_TEMP_COLD_IRQ, diff --git a/drivers/power/supply/qcom/fg-memif.c b/drivers/power/supply/qcom/fg-memif.c index 08e803818daec826ca495c56641ff1e3d06b4553..42de6f5bf71f27e80519c0bf4fff8318fd286227 100644 --- a/drivers/power/supply/qcom/fg-memif.c +++ b/drivers/power/supply/qcom/fg-memif.c @@ -1257,15 +1257,16 @@ static int fg_dma_init(struct fg_dev *fg) return rc; } - /* Configure PEEK_MUX only for PM8150B v1.0 */ - if (fg->wa_flags & PM8150B_V1_DMA_WA) { - val = ALG_ACTIVE_PEEK_CFG; - rc = fg_write(fg, BATT_INFO_PEEK_MUX4(fg), &val, 1); - if (rc < 0) { - pr_err("failed to configure batt_info_peek_mux4 rc:%d\n", - rc); - return rc; - } + /* + * Configure PEEK_MUX for ALG active signal always for PM8150B. + * For v1.0, it is used for DMA workaround. For v2.0 onwards, it is + * used for ADC lockup workaround. + */ + val = ALG_ACTIVE_PEEK_CFG; + rc = fg_write(fg, BATT_INFO_PEEK_MUX4(fg), &val, 1); + if (rc < 0) { + pr_err("failed to configure batt_info_peek_mux4 rc:%d\n", rc); + return rc; } return 0; @@ -1273,6 +1274,9 @@ static int fg_dma_init(struct fg_dev *fg) int fg_memif_init(struct fg_dev *fg) { + if (fg->battery_missing) + return 0; + if (fg->use_dma) return fg_dma_init(fg); diff --git a/drivers/power/supply/qcom/fg-util.c b/drivers/power/supply/qcom/fg-util.c index 9b34ab20de730d18beed6334e12883f126aaf4ba..4d7894c94267c715d716a38660ed74c2f2166a46 100644 --- a/drivers/power/supply/qcom/fg-util.c +++ b/drivers/power/supply/qcom/fg-util.c @@ -153,7 +153,7 @@ int fg_get_sram_prop(struct fg_dev *fg, enum fg_sram_param_id id, return -EINVAL; if (fg->battery_missing) - return -ENODATA; + return 0; rc = fg_sram_read(fg, fg->sp[id].addr_word, fg->sp[id].addr_byte, buf, fg->sp[id].len, FG_IMA_DEFAULT); @@ -353,25 +353,28 @@ void fg_notify_charger(struct fg_dev *fg) if (!fg->profile_available) return; - prop.intval = fg->bp.float_volt_uv; - rc = power_supply_set_property(fg->batt_psy, - POWER_SUPPLY_PROP_VOLTAGE_MAX, &prop); - if (rc < 0) { - pr_err("Error in setting voltage_max property on batt_psy, rc=%d\n", - rc); - return; + if (fg->bp.float_volt_uv > 0) { + prop.intval = fg->bp.float_volt_uv; + rc = power_supply_set_property(fg->batt_psy, + POWER_SUPPLY_PROP_VOLTAGE_MAX, &prop); + if (rc < 0) { + pr_err("Error in setting voltage_max property on batt_psy, rc=%d\n", + rc); + return; + } } - prop.intval = fg->bp.fastchg_curr_ma * 1000; - rc = power_supply_set_property(fg->batt_psy, - POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX, &prop); - if (rc < 0) { - pr_err("Error in setting constant_charge_current_max property on batt_psy, rc=%d\n", - rc); - return; + if (fg->bp.fastchg_curr_ma > 0) { + prop.intval = fg->bp.fastchg_curr_ma * 1000; + rc = power_supply_set_property(fg->batt_psy, + POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX, + &prop); + if (rc < 0) { + pr_err("Error in setting constant_charge_current_max property on batt_psy, rc=%d\n", + rc); + return; + } } - - fg_dbg(fg, FG_STATUS, "Notified charger on float voltage and FCC\n"); } bool batt_psy_initialized(struct fg_dev *fg) @@ -512,7 +515,7 @@ int fg_sram_write(struct fg_dev *fg, u16 address, u8 offset, return -ENXIO; if (fg->battery_missing) - return -ENODATA; + return 0; if (!fg_sram_address_valid(fg, address, len)) return -EFAULT; @@ -595,7 +598,7 @@ int fg_sram_read(struct fg_dev *fg, u16 address, u8 offset, return -ENXIO; if (fg->battery_missing) - return -ENODATA; + return 0; if (!fg_sram_address_valid(fg, address, len)) return -EFAULT; diff --git a/drivers/power/supply/qcom/qpnp-fg-gen4.c b/drivers/power/supply/qcom/qpnp-fg-gen4.c index 2cc7d5d1f12596b625b28b23a56102c37354d099..7eda9f50ebe494b158f2857e2b97712aa01c188e 100644 --- a/drivers/power/supply/qcom/qpnp-fg-gen4.c +++ b/drivers/power/supply/qcom/qpnp-fg-gen4.c @@ -78,7 +78,10 @@ #define SYS_TERM_CURR_OFFSET 0 #define VBATT_FULL_WORD 23 #define VBATT_FULL_OFFSET 0 +#define KI_COEFF_FULL_SOC_NORM_WORD 24 +#define KI_COEFF_FULL_SOC_NORM_OFFSET 1 #define KI_COEFF_LOW_DISCHG_WORD 25 +#define KI_COEFF_FULL_SOC_LOW_OFFSET 0 #define KI_COEFF_LOW_DISCHG_OFFSET 1 #define KI_COEFF_MED_DISCHG_WORD 26 #define KI_COEFF_MED_DISCHG_OFFSET 0 @@ -202,6 +205,7 @@ struct fg_dt_props { int ki_coeff_low_chg; int ki_coeff_med_chg; int ki_coeff_hi_chg; + int ki_coeff_full_soc_dischg[2]; int ki_coeff_soc[KI_COEFF_SOC_LEVELS]; int ki_coeff_low_dischg[KI_COEFF_SOC_LEVELS]; int ki_coeff_med_dischg[KI_COEFF_SOC_LEVELS]; @@ -218,11 +222,14 @@ struct fg_gen4_chip { struct votable *delta_esr_irq_en_votable; struct votable *pl_disable_votable; struct votable *cp_disable_votable; + struct votable *parallel_current_en_votable; struct work_struct esr_calib_work; struct alarm esr_fast_cal_timer; struct delayed_work pl_enable_work; + struct completion mem_attn; char batt_profile[PROFILE_LEN]; enum slope_limit_status slope_limit_sts; + int ki_coeff_full_soc[2]; int delta_esr_count; int recharge_soc_thr; int esr_actual; @@ -233,6 +240,7 @@ struct fg_gen4_chip { bool esr_fast_calib; bool esr_fast_calib_done; bool esr_fast_cal_timer_expired; + bool esr_fast_calib_retry; bool esr_fcc_ctrl_en; bool rslow_low; bool rapid_soc_dec_en; @@ -317,6 +325,9 @@ static struct fg_sram_param pm8150b_v1_sram_params[] = { 1, 1000, 15625, 0, fg_encode_default, NULL), PARAM(DELTA_ESR_THR, DELTA_ESR_THR_WORD, DELTA_ESR_THR_OFFSET, 2, 1000, 61036, 0, fg_encode_default, NULL), + PARAM(KI_COEFF_FULL_SOC, KI_COEFF_FULL_SOC_NORM_WORD, + KI_COEFF_FULL_SOC_NORM_OFFSET, 1, 1000, 61035, 0, + fg_encode_default, NULL), PARAM(KI_COEFF_LOW_DISCHG, KI_COEFF_LOW_DISCHG_WORD, KI_COEFF_LOW_DISCHG_OFFSET, 1, 1000, 61035, 0, fg_encode_default, NULL), @@ -351,8 +362,8 @@ static struct fg_sram_param pm8150b_v1_sram_params[] = { static struct fg_sram_param pm8150b_v2_sram_params[] = { PARAM(BATT_SOC, BATT_SOC_v2_WORD, BATT_SOC_v2_OFFSET, 4, 1, 1, 0, NULL, fg_decode_default), - PARAM(FULL_SOC, FULL_SOC_v2_WORD, FULL_SOC_v2_OFFSET, 2, 1, 1, 0, NULL, - fg_decode_default), + PARAM(FULL_SOC, FULL_SOC_v2_WORD, FULL_SOC_v2_OFFSET, 2, 1, 1, 0, + fg_encode_default, fg_decode_default), PARAM(MONOTONIC_SOC, MONOTONIC_SOC_v2_WORD, MONOTONIC_SOC_v2_OFFSET, 2, 1, 1, 0, NULL, fg_decode_default), PARAM(VOLTAGE_PRED, VOLTAGE_PRED_v2_WORD, VOLTAGE_PRED_v2_OFFSET, 2, @@ -402,6 +413,9 @@ static struct fg_sram_param pm8150b_v2_sram_params[] = { 1, 1000, 15625, 0, fg_encode_default, NULL), PARAM(DELTA_ESR_THR, DELTA_ESR_THR_WORD, DELTA_ESR_THR_OFFSET, 2, 1000, 61036, 0, fg_encode_default, NULL), + PARAM(KI_COEFF_FULL_SOC, KI_COEFF_FULL_SOC_NORM_WORD, + KI_COEFF_FULL_SOC_NORM_OFFSET, 1, 1000, 61035, 0, + fg_encode_default, NULL), PARAM(KI_COEFF_LOW_DISCHG, KI_COEFF_LOW_DISCHG_WORD, KI_COEFF_LOW_DISCHG_OFFSET, 1, 1000, 61035, 0, fg_encode_default, NULL), @@ -999,6 +1013,55 @@ static void fg_gen4_update_rslow_coeff(struct fg_dev *fg, int batt_temp) } } +#define KI_COEFF_FULL_SOC_NORM_DEFAULT 733 +#define KI_COEFF_FULL_SOC_LOW_DEFAULT 184 +static int fg_gen4_adjust_ki_coeff_full_soc(struct fg_gen4_chip *chip, + int batt_temp) +{ + struct fg_dev *fg = &chip->fg; + int rc, ki_coeff_full_soc_norm, ki_coeff_full_soc_low; + u8 val; + + if (batt_temp < 0) { + ki_coeff_full_soc_norm = 0; + ki_coeff_full_soc_low = 0; + } else if (fg->charge_status == POWER_SUPPLY_STATUS_DISCHARGING) { + ki_coeff_full_soc_norm = chip->dt.ki_coeff_full_soc_dischg[0]; + ki_coeff_full_soc_low = chip->dt.ki_coeff_full_soc_dischg[1]; + } else { + ki_coeff_full_soc_norm = KI_COEFF_FULL_SOC_NORM_DEFAULT; + ki_coeff_full_soc_low = KI_COEFF_FULL_SOC_LOW_DEFAULT; + } + + if (chip->ki_coeff_full_soc[0] == ki_coeff_full_soc_norm && + chip->ki_coeff_full_soc[1] == ki_coeff_full_soc_low) + return 0; + + fg_encode(fg->sp, FG_SRAM_KI_COEFF_FULL_SOC, ki_coeff_full_soc_norm, + &val); + rc = fg_sram_write(fg, KI_COEFF_FULL_SOC_NORM_WORD, + KI_COEFF_FULL_SOC_NORM_OFFSET, &val, 1, FG_IMA_DEFAULT); + if (rc < 0) { + pr_err("Error in writing ki_coeff_full_soc_norm, rc=%d\n", rc); + return rc; + } + + fg_encode(fg->sp, FG_SRAM_KI_COEFF_FULL_SOC, ki_coeff_full_soc_low, + &val); + rc = fg_sram_write(fg, KI_COEFF_LOW_DISCHG_WORD, + KI_COEFF_FULL_SOC_LOW_OFFSET, &val, 1, FG_IMA_DEFAULT); + if (rc < 0) { + pr_err("Error in writing ki_coeff_full_soc_low, rc=%d\n", rc); + return rc; + } + + chip->ki_coeff_full_soc[0] = ki_coeff_full_soc_norm; + chip->ki_coeff_full_soc[1] = ki_coeff_full_soc_low; + fg_dbg(fg, FG_STATUS, "Wrote ki_coeff_full_soc [%d %d]\n", + ki_coeff_full_soc_norm, ki_coeff_full_soc_low); + return 0; +} + #define KI_COEFF_LOW_DISCHG_DEFAULT 428 #define KI_COEFF_MED_DISCHG_DEFAULT 245 #define KI_COEFF_HI_DISCHG_DEFAULT 123 @@ -2070,6 +2133,16 @@ static int fg_gen4_charge_full_update(struct fg_dev *fg) return rc; } +static void fg_gen4_parallel_current_config(struct fg_gen4_chip *chip) +{ + struct fg_dev *fg = &chip->fg; + bool input_present = is_input_present(fg), en; + + en = fg->charge_done ? false : input_present; + + vote(chip->parallel_current_en_votable, FG_PARALLEL_EN_VOTER, en, 0); +} + static int fg_gen4_esr_fcc_config(struct fg_gen4_chip *chip) { struct fg_dev *fg = &chip->fg; @@ -2151,8 +2224,185 @@ static int fg_gen4_esr_fcc_config(struct fg_gen4_chip *chip) return 0; } +static int fg_gen4_configure_esr_cal_soc(struct fg_dev *fg, int soc_min, + int soc_max) +{ + int rc; + u8 buf[2]; + + fg_encode(fg->sp, FG_SRAM_ESR_CAL_SOC_MIN, soc_min, buf); + rc = fg_sram_write(fg, fg->sp[FG_SRAM_ESR_CAL_SOC_MIN].addr_word, + fg->sp[FG_SRAM_ESR_CAL_SOC_MIN].addr_byte, buf, + fg->sp[FG_SRAM_ESR_CAL_SOC_MIN].len, FG_IMA_DEFAULT); + if (rc < 0) { + pr_err("Error in writing ESR_CAL_SOC_MIN, rc=%d\n", rc); + return rc; + } + + fg_encode(fg->sp, FG_SRAM_ESR_CAL_SOC_MAX, soc_max, buf); + rc = fg_sram_write(fg, fg->sp[FG_SRAM_ESR_CAL_SOC_MAX].addr_word, + fg->sp[FG_SRAM_ESR_CAL_SOC_MAX].addr_byte, buf, + fg->sp[FG_SRAM_ESR_CAL_SOC_MAX].len, FG_IMA_DEFAULT); + if (rc < 0) { + pr_err("Error in writing ESR_CAL_SOC_MAX, rc=%d\n", rc); + return rc; + } + + return 0; +} + +static int fg_gen4_configure_esr_cal_temp(struct fg_dev *fg, int temp_min, + int temp_max) +{ + int rc; + u8 buf[2]; + + fg_encode(fg->sp, FG_SRAM_ESR_CAL_TEMP_MIN, temp_min, buf); + rc = fg_sram_write(fg, fg->sp[FG_SRAM_ESR_CAL_TEMP_MIN].addr_word, + fg->sp[FG_SRAM_ESR_CAL_TEMP_MIN].addr_byte, buf, + fg->sp[FG_SRAM_ESR_CAL_TEMP_MIN].len, FG_IMA_DEFAULT); + if (rc < 0) { + pr_err("Error in writing ESR_CAL_TEMP_MIN, rc=%d\n", rc); + return rc; + } + + fg_encode(fg->sp, FG_SRAM_ESR_CAL_TEMP_MAX, temp_max, buf); + rc = fg_sram_write(fg, fg->sp[FG_SRAM_ESR_CAL_TEMP_MAX].addr_word, + fg->sp[FG_SRAM_ESR_CAL_TEMP_MAX].addr_byte, buf, + fg->sp[FG_SRAM_ESR_CAL_TEMP_MAX].len, FG_IMA_DEFAULT); + if (rc < 0) { + pr_err("Error in writing ESR_CAL_TEMP_MAX, rc=%d\n", rc); + return rc; + } + + return 0; +} + +#define ESR_CAL_TEMP_MIN -127 +#define ESR_CAL_TEMP_MAX 127 +static int fg_gen4_esr_fast_calib_config(struct fg_gen4_chip *chip, bool en) +{ + struct fg_dev *fg = &chip->fg; + int rc, esr_timer_chg_init, esr_timer_chg_max, esr_timer_dischg_init, + esr_timer_dischg_max, esr_fast_cal_ms, esr_cal_soc_min, + esr_cal_soc_max, esr_cal_temp_min, esr_cal_temp_max; + u8 val, mask; + + esr_timer_chg_init = esr_timer_chg_max = -EINVAL; + esr_timer_dischg_init = esr_timer_dischg_max = -EINVAL; + if (en) { + esr_timer_chg_init = chip->dt.esr_timer_chg_fast[TIMER_RETRY]; + esr_timer_chg_max = chip->dt.esr_timer_chg_fast[TIMER_MAX]; + esr_timer_dischg_init = + chip->dt.esr_timer_dischg_fast[TIMER_RETRY]; + esr_timer_dischg_max = + chip->dt.esr_timer_dischg_fast[TIMER_MAX]; + + esr_cal_soc_min = 0; + esr_cal_soc_max = FULL_SOC_RAW; + esr_cal_temp_min = ESR_CAL_TEMP_MIN; + esr_cal_temp_max = ESR_CAL_TEMP_MAX; + + vote(chip->delta_esr_irq_en_votable, DELTA_ESR_IRQ_VOTER, + true, 0); + chip->esr_fast_calib_done = false; + } else { + chip->esr_fast_calib_done = true; + + esr_timer_chg_init = chip->dt.esr_timer_chg_slow[TIMER_RETRY]; + esr_timer_chg_max = chip->dt.esr_timer_chg_slow[TIMER_MAX]; + esr_timer_dischg_init = + chip->dt.esr_timer_dischg_slow[TIMER_RETRY]; + esr_timer_dischg_max = + chip->dt.esr_timer_dischg_slow[TIMER_MAX]; + + esr_cal_soc_min = chip->dt.esr_cal_soc_thresh[0]; + esr_cal_soc_max = chip->dt.esr_cal_soc_thresh[1]; + esr_cal_temp_min = chip->dt.esr_cal_temp_thresh[0]; + esr_cal_temp_max = chip->dt.esr_cal_temp_thresh[1]; + + vote(chip->delta_esr_irq_en_votable, DELTA_ESR_IRQ_VOTER, + false, 0); + } + + rc = fg_set_esr_timer(fg, esr_timer_chg_init, esr_timer_chg_max, true, + FG_IMA_DEFAULT); + if (rc < 0) { + pr_err("Error in setting ESR charge timer, rc=%d\n", + rc); + return rc; + } + + rc = fg_set_esr_timer(fg, esr_timer_dischg_init, esr_timer_dischg_max, + false, FG_IMA_DEFAULT); + if (rc < 0) { + pr_err("Error in setting ESR discharge timer, rc=%d\n", + rc); + return rc; + } + + rc = fg_gen4_configure_esr_cal_soc(fg, esr_cal_soc_min, + esr_cal_soc_max); + if (rc < 0) { + pr_err("Error in configuring SOC thresholds, rc=%d\n", + rc); + return rc; + } + + rc = fg_gen4_configure_esr_cal_temp(fg, esr_cal_temp_min, + esr_cal_temp_max); + if (rc < 0) { + pr_err("Error in configuring temperature thresholds, rc=%d\n", + rc); + return rc; + } + + /* + * Disable ESR discharging timer and ESR pulsing during + * discharging when ESR fast calibration is disabled. Otherwise, keep + * it enabled so that ESR pulses can happen during discharging. + */ + val = en ? BIT(6) | BIT(7) : 0; + mask = BIT(6) | BIT(7); + rc = fg_sram_masked_write(fg, SYS_CONFIG_WORD, + SYS_CONFIG_OFFSET, mask, val, FG_IMA_DEFAULT); + if (rc < 0) { + pr_err("Error in writing SYS_CONFIG_WORD, rc=%d\n", rc); + return rc; + } + + if (en) { + /* Set ESR fast calibration timer to 50 seconds as default */ + esr_fast_cal_ms = 50000; + if (chip->dt.esr_timer_chg_fast > 0 && + chip->dt.delta_esr_disable_count > 0) + esr_fast_cal_ms = 3 * chip->dt.delta_esr_disable_count * + chip->dt.esr_timer_chg_fast[TIMER_MAX] * 1000; + + alarm_start_relative(&chip->esr_fast_cal_timer, + ms_to_ktime(esr_fast_cal_ms)); + } else { + alarm_cancel(&chip->esr_fast_cal_timer); + } + + fg_dbg(fg, FG_STATUS, "%sabling ESR fast calibration\n", + en ? "En" : "Dis"); + return 0; +} + /* All irq handlers below this */ +static irqreturn_t fg_mem_attn_irq_handler(int irq, void *data) +{ + struct fg_dev *fg = data; + struct fg_gen4_chip *chip = container_of(fg, struct fg_gen4_chip, fg); + + fg_dbg(fg, FG_IRQ, "irq %d triggered\n", irq); + complete_all(&chip->mem_attn); + + return IRQ_HANDLED; +} + static irqreturn_t fg_mem_xcp_irq_handler(int irq, void *data) { struct fg_dev *fg = data; @@ -2315,6 +2565,10 @@ static irqreturn_t fg_delta_batt_temp_irq_handler(int irq, void *data) if (rc < 0) pr_err("Error in configuring slope limiter rc:%d\n", rc); + rc = fg_gen4_adjust_ki_coeff_full_soc(chip, batt_temp); + if (rc < 0) + pr_err("Error in configuring ki_coeff_full_soc rc:%d\n", rc); + if (abs(fg->last_batt_temp - batt_temp) > 30) pr_warn("Battery temperature last:%d current: %d\n", fg->last_batt_temp, batt_temp); @@ -2410,6 +2664,22 @@ static irqreturn_t fg_delta_msoc_irq_handler(int irq, void *data) if (rc < 0) pr_err("Error in adjusting ki_coeff_dischg, rc=%d\n", rc); + /* + * If ESR fast calibration is done without a delta ESR interrupt, then + * it is possibly a failed attempt. In such cases, retry ESR fast + * calibration once again. This will get restored to normal config once + * a delta ESR interrupt fires or the timer expires. + */ + if (chip->esr_fast_calib && chip->esr_fast_calib_done && + !chip->delta_esr_count && !chip->esr_fast_calib_retry) { + rc = fg_gen4_esr_fast_calib_config(chip, true); + if (rc < 0) + pr_err("Error in configuring esr_fast_calib, rc=%d\n", + rc); + else + chip->esr_fast_calib_retry = true; + } + if (batt_psy_initialized(fg)) power_supply_changed(fg->batt_psy); @@ -2497,6 +2767,10 @@ static struct fg_irq_info fg_irqs[FG_GEN4_IRQ_MAX] = { .handler = fg_dummy_irq_handler, }, /* MEM_IF irqs */ + [MEM_ATTN_IRQ] = { + .name = "mem-attn", + .handler = fg_mem_attn_irq_handler, + }, [DMA_GRANT_IRQ] = { .name = "dma-grant", .handler = fg_dummy_irq_handler, @@ -2573,173 +2847,6 @@ static bool is_batt_empty(struct fg_dev *fg) return ((vbatt_uv < chip->dt.cutoff_volt_mv * 1000) ? true : false); } -static int fg_gen4_configure_esr_cal_soc(struct fg_dev *fg, int soc_min, - int soc_max) -{ - int rc; - u8 buf[2]; - - fg_encode(fg->sp, FG_SRAM_ESR_CAL_SOC_MIN, soc_min, buf); - rc = fg_sram_write(fg, fg->sp[FG_SRAM_ESR_CAL_SOC_MIN].addr_word, - fg->sp[FG_SRAM_ESR_CAL_SOC_MIN].addr_byte, buf, - fg->sp[FG_SRAM_ESR_CAL_SOC_MIN].len, FG_IMA_DEFAULT); - if (rc < 0) { - pr_err("Error in writing ESR_CAL_SOC_MIN, rc=%d\n", rc); - return rc; - } - - fg_encode(fg->sp, FG_SRAM_ESR_CAL_SOC_MAX, soc_max, buf); - rc = fg_sram_write(fg, fg->sp[FG_SRAM_ESR_CAL_SOC_MAX].addr_word, - fg->sp[FG_SRAM_ESR_CAL_SOC_MAX].addr_byte, buf, - fg->sp[FG_SRAM_ESR_CAL_SOC_MAX].len, FG_IMA_DEFAULT); - if (rc < 0) { - pr_err("Error in writing ESR_CAL_SOC_MAX, rc=%d\n", rc); - return rc; - } - - return 0; -} - -static int fg_gen4_configure_esr_cal_temp(struct fg_dev *fg, int temp_min, - int temp_max) -{ - int rc; - u8 buf[2]; - - fg_encode(fg->sp, FG_SRAM_ESR_CAL_TEMP_MIN, temp_min, buf); - rc = fg_sram_write(fg, fg->sp[FG_SRAM_ESR_CAL_TEMP_MIN].addr_word, - fg->sp[FG_SRAM_ESR_CAL_TEMP_MIN].addr_byte, buf, - fg->sp[FG_SRAM_ESR_CAL_TEMP_MIN].len, FG_IMA_DEFAULT); - if (rc < 0) { - pr_err("Error in writing ESR_CAL_TEMP_MIN, rc=%d\n", rc); - return rc; - } - - fg_encode(fg->sp, FG_SRAM_ESR_CAL_TEMP_MAX, temp_max, buf); - rc = fg_sram_write(fg, fg->sp[FG_SRAM_ESR_CAL_TEMP_MAX].addr_word, - fg->sp[FG_SRAM_ESR_CAL_TEMP_MAX].addr_byte, buf, - fg->sp[FG_SRAM_ESR_CAL_TEMP_MAX].len, FG_IMA_DEFAULT); - if (rc < 0) { - pr_err("Error in writing ESR_CAL_TEMP_MAX, rc=%d\n", rc); - return rc; - } - - return 0; -} - -#define ESR_CAL_TEMP_MIN -127 -#define ESR_CAL_TEMP_MAX 127 -static int fg_gen4_esr_fast_calib_config(struct fg_gen4_chip *chip, bool en) -{ - struct fg_dev *fg = &chip->fg; - int rc, esr_timer_chg_init, esr_timer_chg_max, esr_timer_dischg_init, - esr_timer_dischg_max, esr_fast_cal_ms, esr_cal_soc_min, - esr_cal_soc_max, esr_cal_temp_min, esr_cal_temp_max; - u8 val, mask; - - esr_timer_chg_init = esr_timer_chg_max = -EINVAL; - esr_timer_dischg_init = esr_timer_dischg_max = -EINVAL; - if (en) { - esr_timer_chg_init = chip->dt.esr_timer_chg_fast[TIMER_RETRY]; - esr_timer_chg_max = chip->dt.esr_timer_chg_fast[TIMER_MAX]; - esr_timer_dischg_init = - chip->dt.esr_timer_dischg_fast[TIMER_RETRY]; - esr_timer_dischg_max = - chip->dt.esr_timer_dischg_fast[TIMER_MAX]; - - esr_cal_soc_min = 0; - esr_cal_soc_max = FULL_SOC_RAW; - esr_cal_temp_min = ESR_CAL_TEMP_MIN; - esr_cal_temp_max = ESR_CAL_TEMP_MAX; - - vote(chip->delta_esr_irq_en_votable, DELTA_ESR_IRQ_VOTER, - true, 0); - chip->delta_esr_count = 0; - chip->esr_fast_calib_done = false; - } else { - chip->esr_fast_calib_done = true; - - esr_timer_chg_init = chip->dt.esr_timer_chg_slow[TIMER_RETRY]; - esr_timer_chg_max = chip->dt.esr_timer_chg_slow[TIMER_MAX]; - esr_timer_dischg_init = - chip->dt.esr_timer_dischg_slow[TIMER_RETRY]; - esr_timer_dischg_max = - chip->dt.esr_timer_dischg_slow[TIMER_MAX]; - - esr_cal_soc_min = chip->dt.esr_cal_soc_thresh[0]; - esr_cal_soc_max = chip->dt.esr_cal_soc_thresh[1]; - esr_cal_temp_min = chip->dt.esr_cal_temp_thresh[0]; - esr_cal_temp_max = chip->dt.esr_cal_temp_thresh[1]; - - vote(chip->delta_esr_irq_en_votable, DELTA_ESR_IRQ_VOTER, - false, 0); - } - - rc = fg_set_esr_timer(fg, esr_timer_chg_init, esr_timer_chg_max, true, - FG_IMA_DEFAULT); - if (rc < 0) { - pr_err("Error in setting ESR charge timer, rc=%d\n", - rc); - return rc; - } - - rc = fg_set_esr_timer(fg, esr_timer_dischg_init, esr_timer_dischg_max, - false, FG_IMA_DEFAULT); - if (rc < 0) { - pr_err("Error in setting ESR discharge timer, rc=%d\n", - rc); - return rc; - } - - rc = fg_gen4_configure_esr_cal_soc(fg, esr_cal_soc_min, - esr_cal_soc_max); - if (rc < 0) { - pr_err("Error in configuring SOC thresholds, rc=%d\n", - rc); - return rc; - } - - rc = fg_gen4_configure_esr_cal_temp(fg, esr_cal_temp_min, - esr_cal_temp_max); - if (rc < 0) { - pr_err("Error in configuring temperature thresholds, rc=%d\n", - rc); - return rc; - } - - /* - * Disable ESR discharging timer and ESR pulsing during - * discharging when ESR fast calibration is disabled. Otherwise, keep - * it enabled so that ESR pulses can happen during discharging. - */ - val = en ? BIT(6) | BIT(7) : 0; - mask = BIT(6) | BIT(7); - rc = fg_sram_masked_write(fg, SYS_CONFIG_WORD, - SYS_CONFIG_OFFSET, mask, val, FG_IMA_DEFAULT); - if (rc < 0) { - pr_err("Error in writing SYS_CONFIG_WORD, rc=%d\n", rc); - return rc; - } - - if (en) { - /* Set ESR fast calibration timer to 50 seconds as default */ - esr_fast_cal_ms = 50000; - if (chip->dt.esr_timer_chg_fast > 0 && - chip->dt.delta_esr_disable_count > 0) - esr_fast_cal_ms = 3 * chip->dt.delta_esr_disable_count * - chip->dt.esr_timer_chg_fast[TIMER_MAX] * 1000; - - alarm_start_relative(&chip->esr_fast_cal_timer, - ms_to_ktime(esr_fast_cal_ms)); - } else { - alarm_cancel(&chip->esr_fast_cal_timer); - } - - fg_dbg(fg, FG_STATUS, "%sabling ESR fast calibration\n", - en ? "En" : "Dis"); - return 0; -} - static enum alarmtimer_restart fg_esr_fast_cal_timer(struct alarm *alarm, ktime_t time) { @@ -2780,11 +2887,12 @@ static void esr_calib_work(struct work_struct *work) /* * If the number of delta ESR interrupts fired is more than the count - * to disable the interrupt OR ESR fast calibration timer is expired, - * disable ESR fast calibration. + * to disable the interrupt OR ESR fast calibration timer is expired + * OR after one retry, disable ESR fast calibration. */ - if (chip->delta_esr_count >= chip->dt.delta_esr_disable_count || - chip->esr_fast_cal_timer_expired) { + if ((chip->delta_esr_count >= chip->dt.delta_esr_disable_count) || + chip->esr_fast_cal_timer_expired || + (chip->esr_fast_calib_retry && chip->delta_esr_count > 0)) { rc = fg_gen4_esr_fast_calib_config(chip, false); if (rc < 0) pr_err("Error in configuring esr_fast_calib, rc=%d\n", @@ -2795,6 +2903,9 @@ static void esr_calib_work(struct work_struct *work) chip->esr_fast_cal_timer_expired = false; } + if (chip->esr_fast_calib_retry) + chip->esr_fast_calib_retry = false; + goto out; } @@ -2883,6 +2994,11 @@ static void status_change_work(struct work_struct *work) int rc, batt_soc, batt_temp; bool input_present, qnovo_en; + if (fg->battery_missing) { + pm_relax(fg->dev); + return; + } + if (!chip->pl_disable_votable) chip->pl_disable_votable = find_votable("PL_DISABLE"); @@ -2935,6 +3051,10 @@ static void status_change_work(struct work_struct *work) if (rc < 0) pr_err("Error in adjusting ki_coeff_dischg, rc=%d\n", rc); + rc = fg_gen4_adjust_ki_coeff_full_soc(chip, batt_temp); + if (rc < 0) + pr_err("Error in configuring ki_coeff_full_soc rc:%d\n", rc); + rc = fg_gen4_adjust_recharge_soc(chip); if (rc < 0) pr_err("Error in adjusting recharge SOC, rc=%d\n", rc); @@ -2943,6 +3063,8 @@ static void status_change_work(struct work_struct *work) if (rc < 0) pr_err("Error in adjusting FCC for ESR, rc=%d\n", rc); + fg_gen4_parallel_current_config(chip); + ttf_update(chip->ttf, input_present); fg->prev_charge_status = fg->charge_status; out: @@ -3005,6 +3127,13 @@ static int fg_sram_dump_sysfs(const char *val, const struct kernel_param *kp) chip = power_supply_get_drvdata(bms_psy); fg = &chip->fg; + + power_supply_put(bms_psy); + if (fg->battery_missing) { + pr_warn("Battery is missing\n"); + return 0; + } + if (fg_sram_dump) schedule_delayed_work(&fg->sram_dump_work, msecs_to_jiffies(fg_sram_dump_period_ms)); @@ -3047,6 +3176,7 @@ static int fg_restart_sysfs(const char *val, const struct kernel_param *kp) chip = power_supply_get_drvdata(bms_psy); fg = &chip->fg; + power_supply_put(bms_psy); rc = fg_restart(fg, SOC_READY_WAIT_TIME_MS); if (rc < 0) { pr_err("Error in restarting FG, rc=%d\n", rc); @@ -3087,6 +3217,8 @@ static int fg_esr_fast_cal_sysfs(const char *val, const struct kernel_param *kp) } chip = power_supply_get_drvdata(bms_psy); + power_supply_put(bms_psy); + if (!chip) return -ENODEV; @@ -3400,6 +3532,60 @@ static int fg_gen4_ttf_awake_voter(void *data, bool val) return 0; } +static int fg_wait_for_mem_attn(struct fg_gen4_chip *chip) +{ + struct fg_dev *fg = &chip->fg; + int rc, retries = 2; + ktime_t now; + s64 time_us; + + reinit_completion(&chip->mem_attn); + now = ktime_get(); + + while (retries--) { + /* Wait for MEM_ATTN completion */ + rc = wait_for_completion_interruptible_timeout( + &chip->mem_attn, msecs_to_jiffies(1000)); + if (rc > 0) { + rc = 0; + break; + } else if (!rc) { + rc = -ETIMEDOUT; + } + } + + time_us = ktime_us_delta(ktime_get(), now); + if (rc < 0) + pr_err("wait for mem_attn timed out rc=%d\n", rc); + + fg_dbg(fg, FG_STATUS, "mem_attn wait time: %lld us\n", time_us); + return rc; +} + +static int fg_parallel_current_en_cb(struct votable *votable, void *data, + int enable, const char *client) +{ + struct fg_dev *fg = data; + struct fg_gen4_chip *chip = container_of(fg, struct fg_gen4_chip, fg); + int rc; + u8 val, mask; + + /* Wait for MEM_ATTN interrupt */ + rc = fg_wait_for_mem_attn(chip); + if (rc < 0) + return rc; + + val = enable ? SMB_MEASURE_EN_BIT : 0; + mask = SMB_MEASURE_EN_BIT; + rc = fg_masked_write(fg, BATT_INFO_FG_CNV_CHAR_CFG(fg), mask, val); + if (rc < 0) + pr_err("Error in writing to 0x%04x, rc=%d\n", + BATT_INFO_FG_CNV_CHAR_CFG(fg), rc); + + fg_dbg(fg, FG_STATUS, "Parallel current summing: %d\n", enable); + return rc; +} + static int fg_delta_bsoc_irq_en_cb(struct votable *votable, void *data, int enable, const char *client) { @@ -3525,14 +3711,18 @@ static int fg_gen4_hw_init(struct fg_gen4_chip *chip) int rc; u8 buf[4], val, mask; - /* Enable measurement of parallel charging current */ - val = mask = SMB_MEASURE_EN_BIT; - rc = fg_masked_write(fg, BATT_INFO_FG_CNV_CHAR_CFG(fg), mask, val); + rc = fg_read(fg, ADC_RR_INT_RT_STS(fg), &val, 1); if (rc < 0) { - pr_err("Error in writing to 0x%04x, rc=%d\n", - BATT_INFO_FG_CNV_CHAR_CFG(fg), rc); + pr_err("failed to read addr=0x%04x, rc=%d\n", + ADC_RR_INT_RT_STS(fg), rc); return rc; } + fg->battery_missing = (val & ADC_RR_BT_MISS_BIT); + + if (fg->battery_missing) { + pr_warn("Not initializing FG because of battery missing\n"); + return 0; + } fg_encode(fg->sp, FG_SRAM_CUTOFF_VOLT, chip->dt.cutoff_volt_mv, buf); rc = fg_sram_write(fg, fg->sp[FG_SRAM_CUTOFF_VOLT].addr_word, @@ -3860,6 +4050,22 @@ static int fg_parse_ki_coefficients(struct fg_dev *fg) struct device_node *node = fg->dev->of_node; int rc, i; + if (of_find_property(node, "qcom,ki-coeff-full-dischg", NULL)) { + rc = fg_parse_dt_property_u32_array(node, + "qcom,ki-coeff-full-dischg", + chip->dt.ki_coeff_full_soc_dischg, 2); + if (rc < 0) + return rc; + + if (chip->dt.ki_coeff_full_soc_dischg[0] < 62 || + chip->dt.ki_coeff_full_soc_dischg[0] > 15564 || + chip->dt.ki_coeff_full_soc_dischg[1] < 62 || + chip->dt.ki_coeff_full_soc_dischg[1] > 15564) { + pr_err("Error in ki_coeff_full_soc_dischg values\n"); + return -EINVAL; + } + } + chip->dt.ki_coeff_low_chg = -EINVAL; of_property_read_u32(node, "qcom,ki-coeff-low-chg", &chip->dt.ki_coeff_low_chg); @@ -4293,6 +4499,9 @@ static void fg_gen4_cleanup(struct fg_gen4_chip *chip) if (chip->delta_esr_irq_en_votable) destroy_votable(chip->delta_esr_irq_en_votable); + if (chip->parallel_current_en_votable) + destroy_votable(chip->parallel_current_en_votable); + dev_set_drvdata(fg->dev, NULL); } @@ -4315,6 +4524,8 @@ static int fg_gen4_probe(struct platform_device *pdev) fg->prev_charge_status = -EINVAL; fg->online_status = -EINVAL; fg->batt_id_ohms = -EINVAL; + chip->ki_coeff_full_soc[0] = -EINVAL; + chip->ki_coeff_full_soc[1] = -EINVAL; fg->regmap = dev_get_regmap(fg->dev->parent, NULL); if (!fg->regmap) { dev_err(fg->dev, "Parent regmap is unavailable\n"); @@ -4326,6 +4537,7 @@ static int fg_gen4_probe(struct platform_device *pdev) mutex_init(&fg->charge_full_lock); init_completion(&fg->soc_update); init_completion(&fg->soc_ready); + init_completion(&chip->mem_attn); INIT_WORK(&fg->status_change_work, status_change_work); INIT_WORK(&chip->esr_calib_work, esr_calib_work); INIT_DELAYED_WORK(&fg->profile_load_work, profile_load_work); @@ -4359,6 +4571,15 @@ static int fg_gen4_probe(struct platform_device *pdev) goto exit; } + chip->parallel_current_en_votable = create_votable("FG_SMB_MEAS_EN", + VOTE_SET_ANY, + fg_parallel_current_en_cb, fg); + if (IS_ERR(chip->parallel_current_en_votable)) { + rc = PTR_ERR(chip->parallel_current_en_votable); + chip->parallel_current_en_votable = NULL; + goto exit; + } + rc = fg_alg_init(chip); if (rc < 0) { dev_err(fg->dev, "Error in alg_init, rc:%d\n", @@ -4457,7 +4678,8 @@ static int fg_gen4_probe(struct platform_device *pdev) } device_init_wakeup(fg->dev, true); - schedule_delayed_work(&fg->profile_load_work, 0); + if (!fg->battery_missing) + schedule_delayed_work(&fg->profile_load_work, 0); pr_debug("FG GEN4 driver probed successfully\n"); return 0; @@ -4519,6 +4741,9 @@ static int fg_gen4_suspend(struct device *dev) struct fg_gen4_chip *chip = dev_get_drvdata(dev); struct fg_dev *fg = &chip->fg; + if (fg->irqs[MEM_ATTN_IRQ].irq) + disable_irq_nosync(fg->irqs[MEM_ATTN_IRQ].irq); + cancel_delayed_work_sync(&chip->ttf->ttf_work); if (fg_sram_dump) cancel_delayed_work_sync(&fg->sram_dump_work); @@ -4530,6 +4755,9 @@ static int fg_gen4_resume(struct device *dev) struct fg_gen4_chip *chip = dev_get_drvdata(dev); struct fg_dev *fg = &chip->fg; + if (fg->irqs[MEM_ATTN_IRQ].irq) + enable_irq(fg->irqs[MEM_ATTN_IRQ].irq); + schedule_delayed_work(&chip->ttf->ttf_work, 0); if (fg_sram_dump) schedule_delayed_work(&fg->sram_dump_work, diff --git a/drivers/power/supply/qcom/qpnp-qg.c b/drivers/power/supply/qcom/qpnp-qg.c index 47818ca011feb994d2b0942cb75b32fe808ec165..d0e9d05e76b585d7447bffa41127e2a2c14e8ef6 100644 --- a/drivers/power/supply/qcom/qpnp-qg.c +++ b/drivers/power/supply/qcom/qpnp-qg.c @@ -2890,6 +2890,8 @@ static int qg_hw_init(struct qpnp_qg *chip) static int qg_post_init(struct qpnp_qg *chip) { + u8 status = 0; + /* disable all IRQs if profile is not loaded */ if (!chip->profile_loaded) { vote(chip->vbatt_irq_disable_votable, @@ -2904,6 +2906,9 @@ static int qg_post_init(struct qpnp_qg *chip) if (!chip->dt.esr_disable) qg_retrieve_esr_params(chip); + /* read STATUS2 register to clear its last state */ + qg_read(chip, chip->qg_base + QG_STATUS2_REG, &status, 1); + return 0; } @@ -3392,6 +3397,9 @@ static int process_suspend(struct qpnp_qg *chip) chip->suspend_data = false; + /* read STATUS2 register to clear its last state */ + qg_read(chip, chip->qg_base + QG_STATUS2_REG, &status, 1); + /* ignore any suspend processing if we are charging */ if (chip->charge_status == POWER_SUPPLY_STATUS_CHARGING) { qg_dbg(chip, QG_DEBUG_PM, "Charging @ suspend - ignore processing\n"); @@ -3441,9 +3449,6 @@ static int process_suspend(struct qpnp_qg *chip) chip->suspend_data = true; } - /* read STATUS2 register to clear its last state */ - qg_read(chip, chip->qg_base + QG_STATUS2_REG, &status, 1); - qg_dbg(chip, QG_DEBUG_PM, "FIFO rt_length=%d sleep_fifo_length=%d default_s2_count=%d suspend_data=%d\n", fifo_rt_length, sleep_fifo_length, chip->dt.s2_fifo_length, chip->suspend_data); diff --git a/drivers/power/supply/qcom/qpnp-smb5.c b/drivers/power/supply/qcom/qpnp-smb5.c index 4df9147d49845be198b623f5247c69cf48e90c64..aa491b16c7599c22860155c58e967cc12280e8e9 100644 --- a/drivers/power/supply/qcom/qpnp-smb5.c +++ b/drivers/power/supply/qcom/qpnp-smb5.c @@ -289,6 +289,7 @@ static int smb5_chg_config_init(struct smb5 *chip) #define MICRO_1P5A 1500000 #define MICRO_P1A 100000 +#define MICRO_1PA 1000000 #define OTG_DEFAULT_DEGLITCH_TIME_MS 50 #define MIN_WD_BARK_TIME 16 #define DEFAULT_WD_BARK_TIME 64 @@ -348,7 +349,8 @@ static int smb5_parse_dt(struct smb5 *chip) rc = of_property_read_u32(node, "qcom,otg-cl-ua", &chg->otg_cl_ua); if (rc < 0) - chg->otg_cl_ua = MICRO_1P5A; + chg->otg_cl_ua = (chip->chg.smb_version == PMI632_SUBTYPE) ? + MICRO_1PA : MICRO_1P5A; rc = of_property_read_u32(node, "qcom,chg-term-src", &chip->dt.term_current_src); @@ -438,6 +440,9 @@ static int smb5_parse_dt(struct smb5 *chip) if (rc < 0) chg->otg_delay_ms = OTG_DEFAULT_DEGLITCH_TIME_MS; + chg->fcc_stepper_enable = of_property_read_bool(node, + "qcom,fcc-stepping-enable"); + rc = of_property_match_string(node, "io-channel-names", "usb_in_voltage"); if (rc >= 0) { @@ -529,6 +534,7 @@ static enum power_supply_property smb5_usb_props[] = { POWER_SUPPLY_PROP_TYPEC_POWER_ROLE, POWER_SUPPLY_PROP_TYPEC_CC_ORIENTATION, POWER_SUPPLY_PROP_TYPEC_SRC_RP, + POWER_SUPPLY_PROP_LOW_POWER, POWER_SUPPLY_PROP_PD_ACTIVE, POWER_SUPPLY_PROP_INPUT_CURRENT_SETTLED, POWER_SUPPLY_PROP_INPUT_CURRENT_NOW, @@ -616,6 +622,12 @@ static int smb5_usb_get_prop(struct power_supply *psy, case POWER_SUPPLY_PROP_TYPEC_SRC_RP: rc = smblib_get_prop_typec_select_rp(chg, val); break; + case POWER_SUPPLY_PROP_LOW_POWER: + if (chg->sink_src_mode == SRC_MODE) + rc = smblib_get_prop_low_power(chg, val); + else + rc = -ENODATA; + break; case POWER_SUPPLY_PROP_PD_ACTIVE: val->intval = chg->pd_active; break; @@ -1174,6 +1186,7 @@ static enum power_supply_property smb5_batt_props[] = { POWER_SUPPLY_PROP_RECHARGE_SOC, POWER_SUPPLY_PROP_CHARGE_FULL, POWER_SUPPLY_PROP_FORCE_RECHARGE, + POWER_SUPPLY_PROP_FCC_STEPPER_ENABLE, }; static int smb5_batt_get_prop(struct power_supply *psy, @@ -1261,7 +1274,7 @@ static int smb5_batt_get_prop(struct power_supply *psy, break; case POWER_SUPPLY_PROP_DIE_HEALTH: if (chg->die_health == -EINVAL) - rc = smblib_get_prop_die_health(chg, val); + val->intval = smblib_get_prop_die_health(chg); else val->intval = chg->die_health; break; @@ -1291,6 +1304,8 @@ static int smb5_batt_get_prop(struct power_supply *psy, break; case POWER_SUPPLY_PROP_FORCE_RECHARGE: val->intval = 0; + case POWER_SUPPLY_PROP_FCC_STEPPER_ENABLE: + val->intval = chg->fcc_stepper_enable; break; default: pr_err("batt power supply prop %d not supported\n", psp); @@ -1549,8 +1564,11 @@ static int smb5_configure_typec(struct smb_charger *chg) return rc; } - rc = smblib_write(chg, TYPE_C_INTERRUPT_EN_CFG_2_REG, - TYPEC_WATER_DETECTION_INT_EN_BIT); + rc = smblib_masked_write(chg, TYPE_C_INTERRUPT_EN_CFG_2_REG, + TYPEC_SRC_BATT_HPWR_INT_EN_BIT | + TYPEC_WATER_DETECTION_INT_EN_BIT, + TYPEC_SRC_BATT_HPWR_INT_EN_BIT + | TYPEC_WATER_DETECTION_INT_EN_BIT); if (rc < 0) { dev_err(chg->dev, "Couldn't configure Type-C interrupts rc=%d\n", rc); @@ -1591,6 +1609,8 @@ static int smb5_configure_micro_usb(struct smb_charger *chg) { int rc; + /* For micro USB connector, use extcon by default */ + chg->use_extcon = true; chg->pd_not_supported = true; rc = smblib_masked_write(chg, TYPE_C_INTERRUPT_EN_CFG_2_REG, @@ -1797,6 +1817,13 @@ static int smb5_init_hw(struct smb5 *chip) return rc; } + /* set OTG current limit */ + rc = smblib_set_charge_param(chg, &chg->param.otg_cl, chg->otg_cl_ua); + if (rc < 0) { + pr_err("Couldn't set otg current limit rc=%d\n", rc); + return rc; + } + /* vote 0mA on usb_icl for non battery platforms */ vote(chg->usb_icl_votable, DEFAULT_VOTER, chip->dt.no_battery, 0); @@ -1832,8 +1859,9 @@ static int smb5_init_hw(struct smb5 *chip) if (chg->smb_version != PMI632_SUBTYPE) { rc = smblib_masked_write(chg, USBIN_AICL_OPTIONS_CFG_REG, USBIN_AICL_PERIODIC_RERUN_EN_BIT - | USBIN_AICL_ADC_EN_BIT, - USBIN_AICL_PERIODIC_RERUN_EN_BIT); + | USBIN_AICL_ADC_EN_BIT | USBIN_AICL_EN_BIT, + USBIN_AICL_PERIODIC_RERUN_EN_BIT + | USBIN_AICL_EN_BIT); if (rc < 0) { dev_err(chg->dev, "Couldn't config AICL rc=%d\n", rc); return rc; @@ -1965,6 +1993,14 @@ static int smb5_init_hw(struct smb5 *chip) return rc; } + rc = smblib_write(chg, CHGR_FAST_CHARGE_SAFETY_TIMER_CFG_REG, + FAST_CHARGE_SAFETY_TIMER_768_MIN); + if (rc < 0) { + dev_err(chg->dev, "Couldn't set CHGR_FAST_CHARGE_SAFETY_TIMER_CFG_REG rc=%d\n", + rc); + return rc; + } + rc = smblib_masked_write(chg, CHGR_CFG2_REG, RECHG_MASK, (chip->dt.auto_recharge_vbat_mv != -EINVAL) ? VBAT_BASED_RECHG_BIT : 0); @@ -2043,6 +2079,14 @@ static int smb5_init_hw(struct smb5 *chip) } } + rc = smblib_masked_write(chg, DCDC_ENG_SDCDC_CFG5_REG, + ENG_SDCDC_BAT_HPWR_MASK, BOOST_MODE_THRESH_3P6_V); + if (rc < 0) { + dev_err(chg->dev, "Couldn't configure DCDC_ENG_SDCDC_CFG5 rc=%d\n", + rc); + return rc; + } + return rc; } @@ -2597,19 +2641,25 @@ static int smb5_probe(struct platform_device *pdev) return -EINVAL; } - rc = smb5_parse_dt(chip); + rc = smb5_chg_config_init(chip); if (rc < 0) { - pr_err("Couldn't parse device tree rc=%d\n", rc); + if (rc != -EPROBE_DEFER) + pr_err("Couldn't setup chg_config rc=%d\n", rc); return rc; } - rc = smb5_chg_config_init(chip); + rc = smb5_parse_dt(chip); if (rc < 0) { - if (rc != -EPROBE_DEFER) - pr_err("Couldn't setup chg_config rc=%d\n", rc); + pr_err("Couldn't parse device tree rc=%d\n", rc); return rc; } + if (alarmtimer_get_rtcdev()) + alarm_init(&chg->lpd_recheck_timer, ALARM_REALTIME, + smblib_lpd_recheck_timer); + else + return -EPROBE_DEFER; + rc = smblib_init(chg); if (rc < 0) { pr_err("Smblib_init failed rc=%d\n", rc); @@ -2619,20 +2669,6 @@ static int smb5_probe(struct platform_device *pdev) /* set driver data before resources request it */ platform_set_drvdata(pdev, chip); - rc = smb5_init_vbus_regulator(chip); - if (rc < 0) { - pr_err("Couldn't initialize vbus regulator rc=%d\n", - rc); - goto cleanup; - } - - rc = smb5_init_vconn_regulator(chip); - if (rc < 0) { - pr_err("Couldn't initialize vconn regulator rc=%d\n", - rc); - goto cleanup; - } - /* extcon registration */ chg->extcon = devm_extcon_dev_allocate(chg->dev, smblib_extcon_cable); if (IS_ERR(chg->extcon)) { @@ -2655,6 +2691,30 @@ static int smb5_probe(struct platform_device *pdev) goto cleanup; } + /* + * VBUS regulator enablement/disablement for host mode is handled + * by USB-PD driver only. For micro-USB and non-PD typeC designs, + * the VBUS regulator is enabled/disabled by the smb driver itself + * before sending extcon notifications. + * Hence, register vbus and vconn regulators for PD supported designs + * only. + */ + if (!chg->pd_not_supported) { + rc = smb5_init_vbus_regulator(chip); + if (rc < 0) { + pr_err("Couldn't initialize vbus regulator rc=%d\n", + rc); + goto cleanup; + } + + rc = smb5_init_vconn_regulator(chip); + if (rc < 0) { + pr_err("Couldn't initialize vconn regulator rc=%d\n", + rc); + goto cleanup; + } + } + switch (chg->smb_version) { case PM8150B_SUBTYPE: case PM6150_SUBTYPE: diff --git a/drivers/power/supply/qcom/smb1390-charger.c b/drivers/power/supply/qcom/smb1390-charger.c index 92e7fa6ec35036e757fd9cb2e3eeb895e7acc61f..df22dec9972b1aaa1783595e011bfa56448f5c1d 100644 --- a/drivers/power/supply/qcom/smb1390-charger.c +++ b/drivers/power/supply/qcom/smb1390-charger.c @@ -219,6 +219,32 @@ static irqreturn_t default_irq_handler(int irq, void *data) return IRQ_HANDLED; } +static irqreturn_t irev_irq_handler(int irq, void *data) +{ + struct smb1390 *chip = data; + int rc; + + pr_debug("IREV IRQ triggered\n"); + + rc = smb1390_masked_write(chip, CORE_CONTROL1_REG, + CMD_EN_SWITCHER_BIT, 0); + if (rc < 0) { + pr_err("Couldn't disable switcher by command mode\n"); + goto out; + } + + rc = smb1390_masked_write(chip, CORE_CONTROL1_REG, + CMD_EN_SWITCHER_BIT, 1); + if (rc < 0) { + pr_err("Couldn't enable switcher by command mode\n"); + goto out; + } + +out: + kobject_uevent(&chip->dev->kobj, KOBJ_CHANGE); + return IRQ_HANDLED; +} + static const struct smb_irq smb_irqs[] = { [SWITCHER_OFF_WINDOW_IRQ] = { .name = "switcher-off-window", @@ -237,7 +263,7 @@ static const struct smb_irq smb_irqs[] = { }, [IREV_IRQ] = { .name = "irev-fault", - .handler = default_irq_handler, + .handler = irev_irq_handler, .wake = true, }, [VPH_OV_HARD_IRQ] = { diff --git a/drivers/power/supply/qcom/smb5-lib.c b/drivers/power/supply/qcom/smb5-lib.c index 4382b83dd6faa14282d324c368d6c1c7cbf2c611..943839c7afdba9d58dbdc231d9b74e4fca83af9a 100644 --- a/drivers/power/supply/qcom/smb5-lib.c +++ b/drivers/power/supply/qcom/smb5-lib.c @@ -224,8 +224,30 @@ static void smblib_notify_device_mode(struct smb_charger *chg, bool enable) static void smblib_notify_usb_host(struct smb_charger *chg, bool enable) { - if (enable) + int rc = 0; + + if (enable) { + smblib_dbg(chg, PR_OTG, "enabling VBUS in OTG mode\n"); + rc = smblib_masked_write(chg, DCDC_CMD_OTG_REG, + OTG_EN_BIT, OTG_EN_BIT); + if (rc < 0) { + smblib_err(chg, + "Couldn't enable VBUS in OTG mode rc=%d\n", rc); + return; + } + smblib_notify_extcon_props(chg, EXTCON_USB_HOST); + } else { + smblib_dbg(chg, PR_OTG, "disabling VBUS in OTG mode\n"); + rc = smblib_masked_write(chg, DCDC_CMD_OTG_REG, + OTG_EN_BIT, 0); + if (rc < 0) { + smblib_err(chg, + "Couldn't disable VBUS in OTG mode rc=%d\n", + rc); + return; + } + } extcon_set_state_sync(chg->extcon, EXTCON_USB_HOST, enable); } @@ -589,6 +611,28 @@ static int smblib_set_adapter_allowance(struct smb_charger *chg, #define MICRO_5V 5000000 #define MICRO_9V 9000000 #define MICRO_12V 12000000 +static int smblib_set_usb_pd_fsw(struct smb_charger *chg, int voltage) +{ + int rc = 0; + + if (voltage == MICRO_5V) + rc = smblib_set_opt_switcher_freq(chg, chg->chg_freq.freq_5V); + else if (voltage > MICRO_5V && voltage < MICRO_9V) + rc = smblib_set_opt_switcher_freq(chg, + chg->chg_freq.freq_6V_8V); + else if (voltage >= MICRO_9V && voltage < MICRO_12V) + rc = smblib_set_opt_switcher_freq(chg, chg->chg_freq.freq_9V); + else if (voltage == MICRO_12V) + rc = smblib_set_opt_switcher_freq(chg, chg->chg_freq.freq_12V); + else { + smblib_err(chg, "Couldn't set Fsw: invalid voltage %d\n", + voltage); + return -EINVAL; + } + + return rc; +} + static int smblib_set_usb_pd_allowed_voltage(struct smb_charger *chg, int min_allowed_uv, int max_allowed_uv) { @@ -597,13 +641,10 @@ static int smblib_set_usb_pd_allowed_voltage(struct smb_charger *chg, if (min_allowed_uv == MICRO_5V && max_allowed_uv == MICRO_5V) { allowed_voltage = USBIN_ADAPTER_ALLOW_5V; - smblib_set_opt_switcher_freq(chg, chg->chg_freq.freq_5V); } else if (min_allowed_uv == MICRO_9V && max_allowed_uv == MICRO_9V) { allowed_voltage = USBIN_ADAPTER_ALLOW_9V; - smblib_set_opt_switcher_freq(chg, chg->chg_freq.freq_9V); } else if (min_allowed_uv == MICRO_12V && max_allowed_uv == MICRO_12V) { allowed_voltage = USBIN_ADAPTER_ALLOW_12V; - smblib_set_opt_switcher_freq(chg, chg->chg_freq.freq_12V); } else if (min_allowed_uv < MICRO_9V && max_allowed_uv <= MICRO_9V) { allowed_voltage = USBIN_ADAPTER_ALLOW_5V_TO_9V; } else if (min_allowed_uv < MICRO_9V && max_allowed_uv <= MICRO_12V) { @@ -1795,16 +1836,111 @@ static int smblib_force_vbus_voltage(struct smb_charger *chg, u8 val) return rc; } +static void smblib_hvdcp_set_fsw(struct smb_charger *chg, int bit) +{ + switch (bit) { + case QC_5V_BIT: + smblib_set_opt_switcher_freq(chg, + chg->chg_freq.freq_5V); + break; + case QC_9V_BIT: + smblib_set_opt_switcher_freq(chg, + chg->chg_freq.freq_9V); + break; + case QC_12V_BIT: + smblib_set_opt_switcher_freq(chg, + chg->chg_freq.freq_12V); + break; + default: + smblib_set_opt_switcher_freq(chg, + chg->chg_freq.freq_removal); + break; + } +} + +#define QC3_PULSES_FOR_6V 5 +#define QC3_PULSES_FOR_9V 20 +#define QC3_PULSES_FOR_12V 35 +static int smblib_hvdcp3_set_fsw(struct smb_charger *chg) +{ + int pulse_count, rc; + + rc = smblib_get_pulse_cnt(chg, &pulse_count); + if (rc < 0) { + smblib_err(chg, "Couldn't read QC_PULSE_COUNT rc=%d\n", rc); + return rc; + } + + if (pulse_count < QC3_PULSES_FOR_6V) + smblib_set_opt_switcher_freq(chg, + chg->chg_freq.freq_5V); + else if (pulse_count < QC3_PULSES_FOR_9V) + smblib_set_opt_switcher_freq(chg, + chg->chg_freq.freq_6V_8V); + else if (pulse_count < QC3_PULSES_FOR_12V) + smblib_set_opt_switcher_freq(chg, + chg->chg_freq.freq_9V); + else + smblib_set_opt_switcher_freq(chg, + chg->chg_freq.freq_12V); + + return 0; +} + +static void smblib_hvdcp_adaptive_voltage_change(struct smb_charger *chg) +{ + int rc; + u8 stat; + + if (chg->real_charger_type == POWER_SUPPLY_TYPE_USB_HVDCP) { + rc = smblib_read(chg, QC_CHANGE_STATUS_REG, &stat); + if (rc < 0) { + smblib_err(chg, + "Couldn't read QC_CHANGE_STATUS rc=%d\n", rc); + return; + } + + smblib_hvdcp_set_fsw(chg, stat & QC_2P0_STATUS_MASK); + } + + if (chg->real_charger_type == POWER_SUPPLY_TYPE_USB_HVDCP_3) { + rc = smblib_hvdcp3_set_fsw(chg); + if (rc < 0) + smblib_err(chg, "Couldn't set QC3.0 Fsw rc=%d\n", rc); + } + + power_supply_changed(chg->usb_main_psy); +} + int smblib_dp_dm(struct smb_charger *chg, int val) { int target_icl_ua, rc = 0; union power_supply_propval pval; + u8 stat; switch (val) { case POWER_SUPPLY_DP_DM_DP_PULSE: + /* + * Pre-emptively increment pulse count to enable the setting + * of FSW prior to increasing voltage. + */ + chg->pulse_cnt++; + + rc = smblib_hvdcp3_set_fsw(chg); + if (rc < 0) + smblib_err(chg, "Couldn't set QC3.0 Fsw rc=%d\n", rc); + rc = smblib_dp_pulse(chg); - if (!rc) - chg->pulse_cnt++; + if (rc < 0) { + smblib_err(chg, "Couldn't increase pulse count rc=%d\n", + rc); + /* + * Increment pulse count failed; + * reset to former value. + */ + chg->pulse_cnt--; + } + smblib_dbg(chg, PR_PARALLEL, "DP_DM_DP_PULSE rc=%d cnt=%d\n", rc, chg->pulse_cnt); break; @@ -1856,6 +1992,17 @@ int smblib_dp_dm(struct smb_charger *chg, int val) return -EINVAL; } + /* If we are increasing voltage to get to 9V, set FSW first */ + rc = smblib_read(chg, QC_CHANGE_STATUS_REG, &stat); + if (rc < 0) { + smblib_err(chg, "Couldn't read QC_CHANGE_STATUS_REG rc=%d\n", + rc); + break; + } + + if (stat & QC_5V_BIT) + smblib_hvdcp_set_fsw(chg, QC_9V_BIT); + rc = smblib_force_vbus_voltage(chg, FORCE_9V_BIT); if (rc < 0) pr_err("Failed to force 9V\n"); @@ -1866,6 +2013,17 @@ int smblib_dp_dm(struct smb_charger *chg, int val) return -EINVAL; } + /* If we are increasing voltage to get to 12V, set FSW first */ + rc = smblib_read(chg, QC_CHANGE_STATUS_REG, &stat); + if (rc < 0) { + smblib_err(chg, "Couldn't read QC_CHANGE_STATUS_REG rc=%d\n", + rc); + break; + } + + if ((stat & QC_9V_BIT) || (stat & QC_5V_BIT)) + smblib_hvdcp_set_fsw(chg, QC_12V_BIT); + rc = smblib_force_vbus_voltage(chg, FORCE_12V_BIT); if (rc < 0) pr_err("Failed to force 12V\n"); @@ -1971,10 +2129,10 @@ int smblib_get_prop_dc_voltage_now(struct smb_charger *chg, } rc = power_supply_get_property(chg->wls_psy, - POWER_SUPPLY_PROP_VOLTAGE_MAX, + POWER_SUPPLY_PROP_INPUT_VOLTAGE_REGULATION, val); if (rc < 0) - dev_err(chg->dev, "Couldn't get POWER_SUPPLY_PROP_VOLTAGE_MAX, rc=%d\n", + dev_err(chg->dev, "Couldn't get POWER_SUPPLY_PROP_VOLTAGE_REGULATION, rc=%d\n", rc); return rc; } @@ -2299,6 +2457,9 @@ static int smblib_get_prop_dfp_mode(struct smb_charger *chg) int rc; u8 stat; + if (chg->lpd_stage == LPD_STAGE_COMMIT) + return POWER_SUPPLY_TYPEC_NONE; + rc = smblib_read(chg, TYPE_C_SRC_STATUS_REG, &stat); if (rc < 0) { smblib_err(chg, "Couldn't read TYPE_C_SRC_STATUS_REG rc=%d\n", @@ -2454,6 +2615,24 @@ int smblib_get_prop_usb_current_now(struct smb_charger *chg, return rc; } +int smblib_get_prop_low_power(struct smb_charger *chg, + union power_supply_propval *val) +{ + int rc; + u8 stat; + + rc = smblib_read(chg, TYPE_C_SRC_STATUS_REG, &stat); + if (rc < 0) { + smblib_err(chg, "Couldn't read TYPE_C_SRC_STATUS_REG rc=%d\n", + rc); + return rc; + } + + val->intval = !(stat & SRC_HIGH_BATT_BIT); + + return 0; +} + int smblib_get_prop_input_current_settled(struct smb_charger *chg, union power_supply_propval *val) { @@ -2501,38 +2680,28 @@ int smblib_get_pe_start(struct smb_charger *chg, return 0; } -int smblib_get_prop_die_health(struct smb_charger *chg, - union power_supply_propval *val) +int smblib_get_prop_die_health(struct smb_charger *chg) { int rc; u8 stat; - rc = smblib_read(chg, TEMP_RANGE_STATUS_REG, &stat); + rc = smblib_read(chg, DIE_TEMP_STATUS_REG, &stat); if (rc < 0) { - smblib_err(chg, "Couldn't read TEMP_RANGE_STATUS_REG rc=%d\n", - rc); - return rc; + smblib_err(chg, "Couldn't read DIE_TEMP_STATUS_REG, rc=%d\n", + rc); + return POWER_SUPPLY_HEALTH_UNKNOWN; } - /* TEMP_RANGE bits are mutually exclusive */ - switch (stat & TEMP_RANGE_MASK) { - case TEMP_BELOW_RANGE_BIT: - val->intval = POWER_SUPPLY_HEALTH_COOL; - break; - case TEMP_WITHIN_RANGE_BIT: - val->intval = POWER_SUPPLY_HEALTH_WARM; - break; - case TEMP_ABOVE_RANGE_BIT: - val->intval = POWER_SUPPLY_HEALTH_HOT; - break; - case ALERT_LEVEL_BIT: - val->intval = POWER_SUPPLY_HEALTH_OVERHEAT; - break; - default: - val->intval = POWER_SUPPLY_HEALTH_UNKNOWN; - } + if (stat & DIE_TEMP_RST_BIT) + return POWER_SUPPLY_HEALTH_OVERHEAT; - return 0; + if (stat & DIE_TEMP_UB_BIT) + return POWER_SUPPLY_HEALTH_HOT; + + if (stat & DIE_TEMP_LB_BIT) + return POWER_SUPPLY_HEALTH_WARM; + + return POWER_SUPPLY_HEALTH_COOL; } int smblib_get_prop_connector_health(struct smb_charger *chg) @@ -2794,7 +2963,7 @@ int smblib_set_prop_pd_voltage_min(struct smb_charger *chg, rc = smblib_set_usb_pd_allowed_voltage(chg, min_uv, chg->voltage_max_uv); if (rc < 0) { - smblib_err(chg, "invalid max voltage %duV rc=%d\n", + smblib_err(chg, "invalid min voltage %duV rc=%d\n", val->intval, rc); return rc; } @@ -2811,10 +2980,18 @@ int smblib_set_prop_pd_voltage_max(struct smb_charger *chg, int rc, max_uv; max_uv = max(val->intval, chg->voltage_min_uv); + + rc = smblib_set_usb_pd_fsw(chg, max_uv); + if (rc < 0) { + smblib_err(chg, "Couldn't set FSW for voltage %duV rc=%d\n", + val->intval, rc); + return rc; + } + rc = smblib_set_usb_pd_allowed_voltage(chg, chg->voltage_min_uv, max_uv); if (rc < 0) { - smblib_err(chg, "invalid min voltage %duV rc=%d\n", + smblib_err(chg, "invalid max voltage %duV rc=%d\n", val->intval, rc); return rc; } @@ -3276,7 +3453,11 @@ void smblib_usb_plugin_hard_reset_locked(struct smb_charger *chg) vbus_rising = (bool)(stat & USBIN_PLUGIN_RT_STS_BIT); - if (!vbus_rising) { + if (vbus_rising) { + /* Remove FCC_STEPPER 1.5A init vote to allow FCC ramp up */ + if (chg->fcc_stepper_enable) + vote(chg->fcc_votable, FCC_STEPPER_VOTER, false, 0); + } else { if (chg->wa_flags & BOOST_BACK_WA) { data = chg->irq_info[SWITCHER_POWER_OK_IRQ].irq_data; if (data) { @@ -3289,6 +3470,11 @@ void smblib_usb_plugin_hard_reset_locked(struct smb_charger *chg) false, 0); } } + + /* Force 1500mA FCC on USB removal if fcc stepper is enabled */ + if (chg->fcc_stepper_enable) + vote(chg->fcc_votable, FCC_STEPPER_VOTER, + true, 1500000); } power_supply_changed(chg->usb_psy); @@ -3320,6 +3506,10 @@ void smblib_usb_plugin_locked(struct smb_charger *chg) if (rc < 0) smblib_err(chg, "Couldn't to enable DPDM rc=%d\n", rc); + /* Remove FCC_STEPPER 1.5A init vote to allow FCC ramp up */ + if (chg->fcc_stepper_enable) + vote(chg->fcc_votable, FCC_STEPPER_VOTER, false, 0); + /* Schedule work to enable parallel charger */ vote(chg->awake_votable, PL_DELAY_VOTER, true, 0); schedule_delayed_work(&chg->pl_enable_work, @@ -3338,6 +3528,11 @@ void smblib_usb_plugin_locked(struct smb_charger *chg) } } + /* Force 1500mA FCC on removal if fcc stepper is enabled */ + if (chg->fcc_stepper_enable) + vote(chg->fcc_votable, FCC_STEPPER_VOTER, + true, 1500000); + rc = smblib_request_dpdm(chg, false); if (rc < 0) smblib_err(chg, "Couldn't disable DPDM rc=%d\n", rc); @@ -3380,67 +3575,6 @@ static void smblib_handle_sdp_enumeration_done(struct smb_charger *chg, rising ? "rising" : "falling"); } -#define QC3_PULSES_FOR_6V 5 -#define QC3_PULSES_FOR_9V 20 -#define QC3_PULSES_FOR_12V 35 -static void smblib_hvdcp_adaptive_voltage_change(struct smb_charger *chg) -{ - int rc; - u8 stat; - int pulses; - - power_supply_changed(chg->usb_main_psy); - if (chg->real_charger_type == POWER_SUPPLY_TYPE_USB_HVDCP) { - rc = smblib_read(chg, QC_CHANGE_STATUS_REG, &stat); - if (rc < 0) { - smblib_err(chg, - "Couldn't read QC_CHANGE_STATUS rc=%d\n", rc); - return; - } - - switch (stat & QC_2P0_STATUS_MASK) { - case QC_5V_BIT: - smblib_set_opt_switcher_freq(chg, - chg->chg_freq.freq_5V); - break; - case QC_9V_BIT: - smblib_set_opt_switcher_freq(chg, - chg->chg_freq.freq_9V); - break; - case QC_12V_BIT: - smblib_set_opt_switcher_freq(chg, - chg->chg_freq.freq_12V); - break; - default: - smblib_set_opt_switcher_freq(chg, - chg->chg_freq.freq_removal); - break; - } - } - - if (chg->real_charger_type == POWER_SUPPLY_TYPE_USB_HVDCP_3) { - rc = smblib_get_pulse_cnt(chg, &pulses); - if (rc < 0) { - smblib_err(chg, - "Couldn't read QC_PULSE_COUNT rc=%d\n", rc); - return; - } - - if (pulses < QC3_PULSES_FOR_6V) - smblib_set_opt_switcher_freq(chg, - chg->chg_freq.freq_5V); - else if (pulses < QC3_PULSES_FOR_9V) - smblib_set_opt_switcher_freq(chg, - chg->chg_freq.freq_6V_8V); - else if (pulses < QC3_PULSES_FOR_12V) - smblib_set_opt_switcher_freq(chg, - chg->chg_freq.freq_9V); - else - smblib_set_opt_switcher_freq(chg, - chg->chg_freq.freq_12V); - } -} - /* triggers when HVDCP 3.0 authentication has finished */ static void smblib_handle_hvdcp_3p0_auth_done(struct smb_charger *chg, bool rising) @@ -3579,8 +3713,7 @@ static void smblib_handle_apsd_done(struct smb_charger *chg, bool rising) case SDP_CHARGER_BIT: case CDP_CHARGER_BIT: case FLOAT_CHARGER_BIT: - if ((chg->connector_type == POWER_SUPPLY_CONNECTOR_MICRO_USB) - || chg->use_extcon) + if (chg->use_extcon) smblib_notify_device_mode(chg, true); break; case OCP_CHARGER_BIT: @@ -3661,6 +3794,84 @@ irqreturn_t usb_source_change_irq_handler(int irq, void *data) return IRQ_HANDLED; } +enum alarmtimer_restart smblib_lpd_recheck_timer(struct alarm *alarm, + ktime_t time) +{ + union power_supply_propval pval; + struct smb_charger *chg = container_of(alarm, struct smb_charger, + lpd_recheck_timer); + int rc; + + if (chg->lpd_reason == LPD_MOISTURE_DETECTED) { + pval.intval = POWER_SUPPLY_TYPEC_PR_DUAL; + rc = smblib_set_prop_typec_power_role(chg, &pval); + if (rc < 0) { + smblib_err(chg, "Couldn't write 0x%02x to TYPE_C_INTRPT_ENB_SOFTWARE_CTRL rc=%d\n", + pval.intval, rc); + return ALARMTIMER_NORESTART; + } + } else { + rc = smblib_masked_write(chg, TYPE_C_INTERRUPT_EN_CFG_2_REG, + TYPEC_WATER_DETECTION_INT_EN_BIT, + TYPEC_WATER_DETECTION_INT_EN_BIT); + if (rc < 0) { + smblib_err(chg, "Couldn't set TYPE_C_INTERRUPT_EN_CFG_2_REG rc=%d\n", + rc); + return ALARMTIMER_NORESTART; + } + } + + chg->lpd_stage = LPD_STAGE_NONE; + chg->lpd_reason = LPD_NONE; + + return ALARMTIMER_NORESTART; +} + +#define RSBU_K_300K_UV 3000000 +static bool smblib_src_lpd(struct smb_charger *chg) +{ + union power_supply_propval pval; + bool lpd_flag = false; + u8 stat; + int rc; + + rc = smblib_read(chg, TYPE_C_SRC_STATUS_REG, &stat); + if (rc < 0) { + smblib_err(chg, "Couldn't read TYPE_C_SRC_STATUS_REG rc=%d\n", + rc); + return false; + } + + switch (stat & DETECTED_SNK_TYPE_MASK) { + case SRC_DEBUG_ACCESS_BIT: + if (smblib_rsbux_low(chg, RSBU_K_300K_UV)) + lpd_flag = true; + break; + case SRC_RD_RA_VCONN_BIT: + case SRC_RD_OPEN_BIT: + case AUDIO_ACCESS_RA_RA_BIT: + default: + break; + } + + if (lpd_flag) { + chg->lpd_stage = LPD_STAGE_COMMIT; + pval.intval = POWER_SUPPLY_TYPEC_PR_SINK; + rc = smblib_set_prop_typec_power_role(chg, &pval); + if (rc < 0) + smblib_err(chg, "Couldn't write 0x%02x to TYPE_C_INTRPT_ENB_SOFTWARE_CTRL rc=%d\n", + pval.intval, rc); + chg->lpd_reason = LPD_MOISTURE_DETECTED; + alarm_start_relative(&chg->lpd_recheck_timer, + ms_to_ktime(60000)); + } else { + chg->lpd_reason = LPD_NONE; + chg->typec_mode = smblib_get_prop_typec_mode(chg); + } + + return lpd_flag; +} + static void typec_sink_insertion(struct smb_charger *chg) { vote(chg->usb_icl_votable, OTG_VOTER, true, 0); @@ -3845,6 +4056,10 @@ irqreturn_t typec_or_rid_detection_change_irq_handler(int irq, void *data) { struct smb_irq_data *irq_data = data; struct smb_charger *chg = irq_data->parent_data; + u8 stat; + int rc; + + smblib_dbg(chg, PR_INTERRUPT, "IRQ: %s\n", irq_data->name); if (chg->connector_type == POWER_SUPPLY_CONNECTOR_MICRO_USB) { cancel_delayed_work_sync(&chg->uusb_otg_work); @@ -3852,9 +4067,33 @@ irqreturn_t typec_or_rid_detection_change_irq_handler(int irq, void *data) smblib_dbg(chg, PR_INTERRUPT, "Scheduling OTG work\n"); schedule_delayed_work(&chg->uusb_otg_work, msecs_to_jiffies(chg->otg_delay_ms)); - return IRQ_HANDLED; + goto out; } + if (chg->pr_swap_in_progress || chg->pd_hard_reset) + goto out; + + rc = smblib_read(chg, TYPE_C_MISC_STATUS_REG, &stat); + if (rc < 0) { + smblib_err(chg, "Couldn't read TYPE_C_MISC_STATUS_REG rc=%d\n", + rc); + goto out; + } + + /* liquid presence detected, to check further */ + if ((stat & TYPEC_WATER_DETECTION_STATUS_BIT) + && chg->lpd_stage == LPD_STAGE_NONE) { + chg->lpd_stage = LPD_STAGE_FLOAT; + cancel_delayed_work_sync(&chg->lpd_ra_open_work); + vote(chg->awake_votable, LPD_VOTER, true, 0); + schedule_delayed_work(&chg->lpd_ra_open_work, + msecs_to_jiffies(300)); + } + + if (chg->usb_psy) + power_supply_changed(chg->usb_psy); + +out: return IRQ_HANDLED; } @@ -3902,6 +4141,10 @@ irqreturn_t typec_attach_detach_irq_handler(int irq, void *data) } if (stat & TYPEC_ATTACH_DETACH_STATE_BIT) { + chg->lpd_stage = LPD_STAGE_ATTACHED; + cancel_delayed_work_sync(&chg->lpd_ra_open_work); + vote(chg->awake_votable, LPD_VOTER, false, 0); + rc = smblib_read(chg, TYPE_C_MISC_STATUS_REG, &stat); if (rc < 0) { smblib_err(chg, "Couldn't read TYPE_C_MISC_STATUS_REG rc=%d\n", @@ -3910,6 +4153,8 @@ irqreturn_t typec_attach_detach_irq_handler(int irq, void *data) } if (stat & SNK_SRC_MODE_BIT) { + if (smblib_src_lpd(chg)) + return IRQ_HANDLED; chg->sink_src_mode = SRC_MODE; typec_sink_insertion(chg); } else { @@ -3934,6 +4179,10 @@ irqreturn_t typec_attach_detach_irq_handler(int irq, void *data) chg->sink_src_mode = UNATTACHED_MODE; chg->early_usb_attach = false; } + + chg->lpd_stage = LPD_STAGE_DETACHED; + schedule_delayed_work(&chg->lpd_detach_work, + msecs_to_jiffies(100)); } power_supply_changed(chg->usb_psy); @@ -4405,6 +4654,106 @@ static void jeita_update_work(struct work_struct *work) chg->jeita_configured = true; } +static void smblib_lpd_ra_open_work(struct work_struct *work) +{ + struct smb_charger *chg = container_of(work, struct smb_charger, + lpd_ra_open_work.work); + union power_supply_propval pval; + u8 stat; + int rc; + + if (chg->pr_swap_in_progress || chg->pd_hard_reset) { + chg->lpd_stage = LPD_STAGE_NONE; + goto out; + } + + if (chg->lpd_stage != LPD_STAGE_FLOAT) + goto out; + + rc = smblib_read(chg, TYPE_C_MISC_STATUS_REG, &stat); + if (rc < 0) { + smblib_err(chg, "Couldn't read TYPE_C_MISC_STATUS_REG rc=%d\n", + rc); + goto out; + } + + /* double check water detection status bit */ + if (!(stat & TYPEC_WATER_DETECTION_STATUS_BIT)) { + chg->lpd_stage = LPD_STAGE_NONE; + goto out; + } + + chg->lpd_stage = LPD_STAGE_COMMIT; + + /* Enable source only mode */ + pval.intval = POWER_SUPPLY_TYPEC_PR_SOURCE; + rc = smblib_set_prop_typec_power_role(chg, &pval); + if (rc < 0) { + smblib_err(chg, "Couldn't write 0x%02x to TYPE_C_INTRPT_ENB_SOFTWARE_CTRL rc=%d\n", + pval.intval, rc); + goto out; + } + + /* Wait 1.5ms to read src status */ + usleep_range(1500, 1510); + + rc = smblib_read(chg, TYPE_C_SRC_STATUS_REG, &stat); + if (rc < 0) { + smblib_err(chg, "Couldn't read TYPE_C_SRC_STATUS_REG rc=%d\n", + rc); + goto out; + } + + /* Emark cable */ + if ((stat & SRC_RA_OPEN_BIT) && + !smblib_rsbux_low(chg, RSBU_K_300K_UV)) { + /* Floating cable, disable water detection irq temporarily */ + rc = smblib_masked_write(chg, TYPE_C_INTERRUPT_EN_CFG_2_REG, + TYPEC_WATER_DETECTION_INT_EN_BIT, 0); + if (rc < 0) { + smblib_err(chg, "Couldn't set TYPE_C_INTERRUPT_EN_CFG_2_REG rc=%d\n", + rc); + goto out; + } + + /* restore DRP mode */ + pval.intval = POWER_SUPPLY_TYPEC_PR_DUAL; + rc = smblib_set_prop_typec_power_role(chg, &pval); + if (rc < 0) { + smblib_err(chg, "Couldn't write 0x%02x to TYPE_C_INTRPT_ENB_SOFTWARE_CTRL rc=%d\n", + pval.intval, rc); + goto out; + } + + chg->lpd_reason = LPD_FLOATING_CABLE; + } else { + /* Moisture detected, enable sink only mode */ + pval.intval = POWER_SUPPLY_TYPEC_PR_SINK; + rc = smblib_set_prop_typec_power_role(chg, &pval); + if (rc < 0) { + smblib_err(chg, "Couldn't write 0x%02x to TYPE_C_INTRPT_ENB_SOFTWARE_CTRL rc=%d\n", + pval.intval, rc); + goto out; + } + + chg->lpd_reason = LPD_MOISTURE_DETECTED; + } + + /* recheck in 60 seconds */ + alarm_start_relative(&chg->lpd_recheck_timer, ms_to_ktime(60000)); +out: + vote(chg->awake_votable, LPD_VOTER, false, 0); +} + +static void smblib_lpd_detach_work(struct work_struct *work) +{ + struct smb_charger *chg = container_of(work, struct smb_charger, + lpd_detach_work.work); + + if (chg->lpd_stage == LPD_STAGE_DETACHED) + chg->lpd_stage = LPD_STAGE_NONE; +} + static int smblib_create_votables(struct smb_charger *chg) { int rc = 0; @@ -4528,6 +4877,8 @@ int smblib_init(struct smb_charger *chg) INIT_DELAYED_WORK(&chg->pl_enable_work, smblib_pl_enable_work); INIT_DELAYED_WORK(&chg->uusb_otg_work, smblib_uusb_otg_work); INIT_DELAYED_WORK(&chg->bb_removal_work, smblib_bb_removal_work); + INIT_DELAYED_WORK(&chg->lpd_ra_open_work, smblib_lpd_ra_open_work); + INIT_DELAYED_WORK(&chg->lpd_detach_work, smblib_lpd_detach_work); chg->fake_capacity = -EINVAL; chg->fake_input_current_limited = -EINVAL; chg->fake_batt_status = -EINVAL; @@ -4628,6 +4979,8 @@ int smblib_deinit(struct smb_charger *chg) cancel_delayed_work_sync(&chg->pl_enable_work); cancel_delayed_work_sync(&chg->uusb_otg_work); cancel_delayed_work_sync(&chg->bb_removal_work); + cancel_delayed_work_sync(&chg->lpd_ra_open_work); + cancel_delayed_work_sync(&chg->lpd_detach_work); power_supply_unreg_notifier(&chg->nb); smblib_destroy_votables(chg); qcom_step_chg_deinit(); diff --git a/drivers/power/supply/qcom/smb5-lib.h b/drivers/power/supply/qcom/smb5-lib.h index 62781c76f9d9920da97d72e0c456270689d0d8e6..380a3a7d5c9ddb9a36c97334bb13d79a9468a0d6 100644 --- a/drivers/power/supply/qcom/smb5-lib.h +++ b/drivers/power/supply/qcom/smb5-lib.h @@ -12,6 +12,8 @@ #ifndef __SMB5_CHARGER_H #define __SMB5_CHARGER_H +#include +#include #include #include #include @@ -60,6 +62,8 @@ enum print_reason { #define HW_LIMIT_VOTER "HW_LIMIT_VOTER" #define PL_SMB_EN_VOTER "PL_SMB_EN_VOTER" #define FORCE_RECHARGE_VOTER "FORCE_RECHARGE_VOTER" +#define LPD_VOTER "LPD_VOTER" +#define FCC_STEPPER_VOTER "FCC_STEPPER_VOTER" #define BOOST_BACK_STORM_COUNT 3 #define WEAK_CHG_STORM_COUNT 8 @@ -194,6 +198,20 @@ static const unsigned int smblib_extcon_cable[] = { EXTCON_NONE, }; +enum lpd_reason { + LPD_NONE, + LPD_MOISTURE_DETECTED, + LPD_FLOATING_CABLE, +}; + +enum lpd_stage { + LPD_STAGE_NONE, + LPD_STAGE_FLOAT, + LPD_STAGE_ATTACHED, + LPD_STAGE_DETACHED, + LPD_STAGE_COMMIT, +}; + /* EXTCON_USB and EXTCON_USB_HOST are mutually exclusive */ static const u32 smblib_extcon_exclusive[] = {0x3, 0}; @@ -325,6 +343,10 @@ struct smb_charger { struct delayed_work pl_enable_work; struct delayed_work uusb_otg_work; struct delayed_work bb_removal_work; + struct delayed_work lpd_ra_open_work; + struct delayed_work lpd_detach_work; + + struct alarm lpd_recheck_timer; /* secondary charger config */ bool sec_pl_present; @@ -376,6 +398,9 @@ struct smb_charger { int charger_temp_max; int smb_temp_max; u8 typec_try_mode; + enum lpd_stage lpd_stage; + enum lpd_reason lpd_reason; + bool fcc_stepper_enable; /* workaround flag */ u32 wa_flags; @@ -514,6 +539,8 @@ int smblib_get_prop_usb_voltage_max(struct smb_charger *chg, union power_supply_propval *val); int smblib_get_prop_usb_voltage_now(struct smb_charger *chg, union power_supply_propval *val); +int smblib_get_prop_low_power(struct smb_charger *chg, + union power_supply_propval *val); int smblib_get_prop_usb_current_now(struct smb_charger *chg, union power_supply_propval *val); int smblib_get_prop_typec_cc_orientation(struct smb_charger *chg, @@ -532,8 +559,7 @@ int smblib_get_pe_start(struct smb_charger *chg, union power_supply_propval *val); int smblib_get_prop_charger_temp(struct smb_charger *chg, union power_supply_propval *val); -int smblib_get_prop_die_health(struct smb_charger *chg, - union power_supply_propval *val); +int smblib_get_prop_die_health(struct smb_charger *chg); int smblib_get_prop_connector_health(struct smb_charger *chg); int smblib_set_prop_pd_current_max(struct smb_charger *chg, const union power_supply_propval *val); @@ -575,6 +601,8 @@ int smblib_get_prop_from_bms(struct smb_charger *chg, union power_supply_propval *val); int smblib_configure_hvdcp_apsd(struct smb_charger *chg, bool enable); int smblib_icl_override(struct smb_charger *chg, bool override); +enum alarmtimer_restart smblib_lpd_recheck_timer(struct alarm *alarm, + ktime_t time); int smblib_init(struct smb_charger *chg); int smblib_deinit(struct smb_charger *chg); diff --git a/drivers/power/supply/qcom/smb5-reg.h b/drivers/power/supply/qcom/smb5-reg.h index 8218b9ec40172c3f9e80fc6beba12b6a6fc71488..fd0385c507700c2016197884461501837d74d307 100644 --- a/drivers/power/supply/qcom/smb5-reg.h +++ b/drivers/power/supply/qcom/smb5-reg.h @@ -108,6 +108,12 @@ enum { #define CHGR_JEITA_THRESHOLD_BASE_REG(i) (CHGR_BASE + 0x94 + (i * 4)) +#define CHGR_FAST_CHARGE_SAFETY_TIMER_CFG_REG (CHGR_BASE + 0xA2) +#define FAST_CHARGE_SAFETY_TIMER_192_MIN 0x0 +#define FAST_CHARGE_SAFETY_TIMER_384_MIN 0x1 +#define FAST_CHARGE_SAFETY_TIMER_768_MIN 0x2 +#define FAST_CHARGE_SAFETY_TIMER_1536_MIN 0x3 + #define CHGR_ENG_CHARGING_CFG_REG (CHGR_BASE + 0xC0) #define CHGR_ITERM_USE_ANALOG_BIT BIT(3) @@ -149,6 +155,15 @@ enum { #define DCDC_CFG_REF_MAX_PSNS_REG (DCDC_BASE + 0x8C) +#define DCDC_ENG_SDCDC_CFG5_REG (DCDC_BASE + 0xC4) +#define ENG_SDCDC_BAT_HPWR_MASK GENMASK(7, 6) +enum { + BOOST_MODE_THRESH_3P3_V, + BOOST_MODE_THRESH_3P4_V = 0x40, + BOOST_MODE_THRESH_3P5_V = 0x80, + BOOST_MODE_THRESH_3P6_V = 0xC0 +}; + /******************************** * BATIF Peripheral Registers * ********************************/ @@ -272,6 +287,7 @@ enum { #define SUSPEND_ON_COLLAPSE_USBIN_BIT BIT(7) #define USBIN_AICL_PERIODIC_RERUN_EN_BIT BIT(4) #define USBIN_AICL_ADC_EN_BIT BIT(3) +#define USBIN_AICL_EN_BIT BIT(2) #define USB_ENG_SSUPPLY_USB2_REG (USBIN_BASE + 0xC0) #define ENG_SSUPPLY_12V_OV_OPT_BIT BIT(1) @@ -298,6 +314,7 @@ enum { #define TYPE_C_SRC_STATUS_REG (TYPEC_BASE + 0x08) #define DETECTED_SNK_TYPE_MASK GENMASK(4, 0) +#define SRC_HIGH_BATT_BIT BIT(5) #define SRC_DEBUG_ACCESS_BIT BIT(4) #define SRC_RD_OPEN_BIT BIT(3) #define SRC_RD_RA_VCONN_BIT BIT(2) @@ -308,6 +325,7 @@ enum { #define TYPEC_ATTACH_DETACH_STATE_BIT BIT(5) #define TYPE_C_MISC_STATUS_REG (TYPEC_BASE + 0x0B) +#define TYPEC_WATER_DETECTION_STATUS_BIT BIT(7) #define SNK_SRC_MODE_BIT BIT(6) #define TYPEC_VBUS_ERROR_STATUS_BIT BIT(4) #define CC_ORIENTATION_BIT BIT(1) @@ -402,6 +420,12 @@ enum { #define TEMP_BELOW_RANGE_BIT BIT(1) #define THERMREG_DISABLED_BIT BIT(0) +#define DIE_TEMP_STATUS_REG (MISC_BASE + 0x07) +#define DIE_TEMP_SHDN_BIT BIT(3) +#define DIE_TEMP_RST_BIT BIT(2) +#define DIE_TEMP_UB_BIT BIT(1) +#define DIE_TEMP_LB_BIT BIT(0) + #define CONNECTOR_TEMP_STATUS_REG (MISC_BASE + 0x09) #define CONNECTOR_TEMP_SHDN_BIT BIT(3) #define CONNECTOR_TEMP_RST_BIT BIT(2) diff --git a/drivers/power/supply/qcom/step-chg-jeita.c b/drivers/power/supply/qcom/step-chg-jeita.c index b87e19ca11e86f8878650d54d4ec8bd3ff79ceea..c0c679eaff8f02de9281b631b501a98a88388d6d 100644 --- a/drivers/power/supply/qcom/step-chg-jeita.c +++ b/drivers/power/supply/qcom/step-chg-jeita.c @@ -72,9 +72,11 @@ struct step_chg_info { struct votable *fcc_votable; struct votable *fv_votable; + struct votable *usb_icl_votable; struct wakeup_source *step_chg_ws; struct power_supply *batt_psy; struct power_supply *bms_psy; + struct power_supply *usb_psy; struct delayed_work status_change_work; struct delayed_work get_config_work; struct notifier_block nb; @@ -111,6 +113,17 @@ static bool is_bms_available(struct step_chg_info *chip) return true; } +static bool is_usb_available(struct step_chg_info *chip) +{ + if (!chip->usb_psy) + chip->usb_psy = power_supply_get_by_name("usb"); + + if (!chip->usb_psy) + return false; + + return true; +} + int read_range_data_from_node(struct device_node *node, const char *prop_str, struct range_data *ranges, u32 max_threshold, u32 max_value) @@ -484,6 +497,7 @@ static int handle_step_chg_config(struct step_chg_info *chip) return (STEP_CHG_HYSTERISIS_DELAY_US - elapsed_us + 1000); } +#define JEITA_SUSPEND_HYST_UV 50000 static int handle_jeita(struct step_chg_info *chip) { union power_supply_propval pval = {0, }; @@ -502,6 +516,8 @@ static int handle_jeita(struct step_chg_info *chip) vote(chip->fcc_votable, JEITA_VOTER, false, 0); if (chip->fv_votable) vote(chip->fv_votable, JEITA_VOTER, false, 0); + if (chip->usb_icl_votable) + vote(chip->usb_icl_votable, JEITA_VOTER, false, 0); return 0; } @@ -523,12 +539,8 @@ static int handle_jeita(struct step_chg_info *chip) pval.intval, &chip->jeita_fcc_index, &fcc_ua); - if (rc < 0) { - /* remove the vote if no step-based fcc is found */ - if (chip->fcc_votable) - vote(chip->fcc_votable, JEITA_VOTER, false, 0); - goto update_time; - } + if (rc < 0) + fcc_ua = 0; if (!chip->fcc_votable) chip->fcc_votable = find_votable("FCC"); @@ -536,7 +548,7 @@ static int handle_jeita(struct step_chg_info *chip) /* changing FCC is a must */ return -EINVAL; - vote(chip->fcc_votable, JEITA_VOTER, true, fcc_ua); + vote(chip->fcc_votable, JEITA_VOTER, fcc_ua ? true : false, fcc_ua); rc = get_val(chip->jeita_fv_config->fv_cfg, chip->jeita_fv_config->hysteresis, @@ -544,21 +556,45 @@ static int handle_jeita(struct step_chg_info *chip) pval.intval, &chip->jeita_fv_index, &fv_uv); - if (rc < 0) { - /* remove the vote if no step-based fcc is found */ - if (chip->fv_votable) - vote(chip->fv_votable, JEITA_VOTER, false, 0); - goto update_time; - } + if (rc < 0) + fv_uv = 0; chip->fv_votable = find_votable("FV"); if (!chip->fv_votable) goto update_time; - vote(chip->fv_votable, JEITA_VOTER, true, fv_uv); + if (!chip->usb_icl_votable) + chip->usb_icl_votable = find_votable("USB_ICL"); + + if (!chip->usb_icl_votable) + goto set_jeita_fv; + + /* + * If JEITA float voltage is same as max-vfloat of battery then + * skip any further VBAT specific checks. + */ + rc = power_supply_get_property(chip->batt_psy, + POWER_SUPPLY_PROP_VOLTAGE_MAX, &pval); + if (rc || (pval.intval == fv_uv)) { + vote(chip->usb_icl_votable, JEITA_VOTER, false, 0); + goto set_jeita_fv; + } - pr_debug("%s = %d FCC = %duA FV = %duV\n", - chip->jeita_fcc_config->prop_name, pval.intval, fcc_ua, fv_uv); + /* + * Suspend USB input path if battery voltage is above + * JEITA VFLOAT threshold. + */ + if (fv_uv > 0) { + rc = power_supply_get_property(chip->batt_psy, + POWER_SUPPLY_PROP_VOLTAGE_NOW, &pval); + if (!rc && (pval.intval > fv_uv)) + vote(chip->usb_icl_votable, JEITA_VOTER, true, 0); + else if (pval.intval < (fv_uv - JEITA_SUSPEND_HYST_UV)) + vote(chip->usb_icl_votable, JEITA_VOTER, false, 0); + } + +set_jeita_fv: + vote(chip->fv_votable, JEITA_VOTER, fv_uv ? true : false, fv_uv); update_time: chip->jeita_last_update_time = ktime_get(); @@ -610,11 +646,13 @@ static void status_change_work(struct work_struct *work) int reschedule_us; int reschedule_jeita_work_us = 0; int reschedule_step_work_us = 0; + union power_supply_propval prop = {0, }; if (!is_batt_available(chip)) - return; + goto exit_work; handle_battery_insertion(chip); + /* skip elapsed_us debounce for handling battery temperature */ rc = handle_jeita(chip); if (rc > 0) @@ -628,12 +666,28 @@ static void status_change_work(struct work_struct *work) if (rc < 0) pr_err("Couldn't handle step rc = %d\n", rc); + /* Remove stale votes on USB removal */ + if (is_usb_available(chip)) { + prop.intval = 0; + power_supply_get_property(chip->usb_psy, + POWER_SUPPLY_PROP_PRESENT, &prop); + if (!prop.intval) { + if (chip->usb_icl_votable) + vote(chip->usb_icl_votable, JEITA_VOTER, + false, 0); + } + } + reschedule_us = min(reschedule_jeita_work_us, reschedule_step_work_us); if (reschedule_us == 0) - __pm_relax(chip->step_chg_ws); + goto exit_work; else schedule_delayed_work(&chip->status_change_work, usecs_to_jiffies(reschedule_us)); + return; + +exit_work: + __pm_relax(chip->step_chg_ws); } static int step_chg_notifier_call(struct notifier_block *nb, @@ -645,7 +699,8 @@ static int step_chg_notifier_call(struct notifier_block *nb, if (ev != PSY_EVENT_PROP_CHANGED) return NOTIFY_OK; - if ((strcmp(psy->desc->name, "battery") == 0)) { + if ((strcmp(psy->desc->name, "battery") == 0) + || (strcmp(psy->desc->name, "usb") == 0)) { __pm_stay_awake(chip->step_chg_ws); schedule_delayed_work(&chip->status_change_work, 0); } diff --git a/drivers/ptp/ptp_chardev.c b/drivers/ptp/ptp_chardev.c index 58a97d4205723fff7e25f454480169b6f658b0a4..51364621f77ce737a65b8fd7e1c8479c76324f3d 100644 --- a/drivers/ptp/ptp_chardev.c +++ b/drivers/ptp/ptp_chardev.c @@ -89,6 +89,7 @@ int ptp_set_pinfunc(struct ptp_clock *ptp, unsigned int pin, case PTP_PF_PHYSYNC: if (chan != 0) return -EINVAL; + break; default: return -EINVAL; } diff --git a/drivers/regulator/cpcap-regulator.c b/drivers/regulator/cpcap-regulator.c index f541b80f1b540ae12d47ece8de124b74839b1acf..bd910fe123d98e65794990a14d7c26986369b6cb 100644 --- a/drivers/regulator/cpcap-regulator.c +++ b/drivers/regulator/cpcap-regulator.c @@ -222,7 +222,7 @@ static unsigned int cpcap_map_mode(unsigned int mode) case CPCAP_BIT_AUDIO_LOW_PWR: return REGULATOR_MODE_STANDBY; default: - return -EINVAL; + return REGULATOR_MODE_INVALID; } } diff --git a/drivers/regulator/of_regulator.c b/drivers/regulator/of_regulator.c index c9875355905d159827ea565affd0b26b7bc18b01..a3bf7c993723acdaab9160b22be58a7e5d52b37a 100644 --- a/drivers/regulator/of_regulator.c +++ b/drivers/regulator/of_regulator.c @@ -31,6 +31,7 @@ static void of_get_regulation_constraints(struct device_node *np, struct regulation_constraints *constraints = &(*init_data)->constraints; struct regulator_state *suspend_state; struct device_node *suspend_np; + unsigned int mode; int ret, i; u32 pval; @@ -124,11 +125,11 @@ static void of_get_regulation_constraints(struct device_node *np, if (!of_property_read_u32(np, "regulator-initial-mode", &pval)) { if (desc && desc->of_map_mode) { - ret = desc->of_map_mode(pval); - if (ret == -EINVAL) + mode = desc->of_map_mode(pval); + if (mode == REGULATOR_MODE_INVALID) pr_err("%s: invalid mode %u\n", np->name, pval); else - constraints->initial_mode = ret; + constraints->initial_mode = mode; } else { pr_warn("%s: mapping for mode %d not defined\n", np->name, pval); @@ -163,12 +164,12 @@ static void of_get_regulation_constraints(struct device_node *np, if (!of_property_read_u32(suspend_np, "regulator-mode", &pval)) { if (desc && desc->of_map_mode) { - ret = desc->of_map_mode(pval); - if (ret == -EINVAL) + mode = desc->of_map_mode(pval); + if (mode == REGULATOR_MODE_INVALID) pr_err("%s: invalid mode %u\n", np->name, pval); else - suspend_state->mode = ret; + suspend_state->mode = mode; } else { pr_warn("%s: mapping for mode %d not defined\n", np->name, pval); diff --git a/drivers/regulator/pfuze100-regulator.c b/drivers/regulator/pfuze100-regulator.c index 63922a2167e55a75fb5e3c3f2526bc9f06bcabf2..659e516455bee1f14f50a1f60ad13b590375e5a7 100644 --- a/drivers/regulator/pfuze100-regulator.c +++ b/drivers/regulator/pfuze100-regulator.c @@ -158,6 +158,7 @@ static const struct regulator_ops pfuze100_sw_regulator_ops = { static const struct regulator_ops pfuze100_swb_regulator_ops = { .enable = regulator_enable_regmap, .disable = regulator_disable_regmap, + .is_enabled = regulator_is_enabled_regmap, .list_voltage = regulator_list_voltage_table, .map_voltage = regulator_map_voltage_ascend, .set_voltage_sel = regulator_set_voltage_sel_regmap, diff --git a/drivers/regulator/qpnp-lcdb-regulator.c b/drivers/regulator/qpnp-lcdb-regulator.c index fe543aecdcd6189430b8153d402447129821b1c3..45c2b7f0e46ffbea75c67d39e236c5bbc53ba143 100644 --- a/drivers/regulator/qpnp-lcdb-regulator.c +++ b/drivers/regulator/qpnp-lcdb-regulator.c @@ -1148,6 +1148,7 @@ static int qpnp_lcdb_get_voltage(struct qpnp_lcdb *lcdb, return rc; } + val &= SET_OUTPUT_VOLTAGE_MASK; if (val < VOLTAGE_STEP_50MV_OFFSET) { *voltage_mv = VOLTAGE_MIN_STEP_100_MV + (val * VOLTAGE_STEP_100_MV); @@ -1823,7 +1824,7 @@ static int qpnp_lcdb_init_bst(struct qpnp_lcdb *lcdb) if (lcdb->bst.pd_strength != -EINVAL) { rc = qpnp_lcdb_masked_write(lcdb, lcdb->base + - LCDB_NCP_PD_CTL_REG, BOOST_PD_STRENGTH_BIT, + LCDB_BST_PD_CTL_REG, BOOST_PD_STRENGTH_BIT, lcdb->bst.pd_strength ? BOOST_PD_STRENGTH_BIT : 0); if (rc < 0) { diff --git a/drivers/regulator/twl-regulator.c b/drivers/regulator/twl-regulator.c index a4456db5849d06282b7eb2884d20e8da29093753..884c7505ed91c493b127e680b7406773952fc1d7 100644 --- a/drivers/regulator/twl-regulator.c +++ b/drivers/regulator/twl-regulator.c @@ -274,7 +274,7 @@ static inline unsigned int twl4030reg_map_mode(unsigned int mode) case RES_STATE_SLEEP: return REGULATOR_MODE_STANDBY; default: - return -EINVAL; + return REGULATOR_MODE_INVALID; } } diff --git a/drivers/rpmsg/qcom_glink_native.c b/drivers/rpmsg/qcom_glink_native.c index a7e74f9204d1f52149bf8717b1207dbb39d61c12..a6558609209dae2b6c437352b767082c55b4bedc 100644 --- a/drivers/rpmsg/qcom_glink_native.c +++ b/drivers/rpmsg/qcom_glink_native.c @@ -27,6 +27,7 @@ #include #include #include +#include #include #include @@ -114,6 +115,8 @@ struct glink_core_rx_intent { * @rx_pipe: pipe object for receive FIFO * @tx_pipe: pipe object for transmit FIFO * @irq: IRQ for signaling incoming events + * @kworker: kworker to handle rx_done work + * @task: kthread running @kworker * @rx_work: worker for handling received control messages * @rx_lock: protects the @rx_queue * @rx_queue: queue of received control messages to be processed in @rx_work @@ -121,6 +124,7 @@ struct glink_core_rx_intent { * @idr_lock: synchronizes @lcids and @rcids modifications * @lcids: idr of all channels with a known local channel id * @rcids: idr of all channels with a known remote channel id + * @in_reset: reset status of this edge * @ilc: ipc logging context reference */ struct qcom_glink { @@ -136,6 +140,9 @@ struct qcom_glink { int irq; + struct kthread_worker kworker; + struct task_struct *task; + struct work_struct rx_work; spinlock_t rx_lock; struct list_head rx_queue; @@ -145,6 +152,8 @@ struct qcom_glink { spinlock_t idr_lock; struct idr lcids; struct idr rcids; + + atomic_t in_reset; unsigned long features; bool intentless; @@ -181,7 +190,8 @@ enum { * @open_req: completed once open-request has been received * @intent_req_lock: Synchronises multiple intent requests * @intent_req_result: Result of intent request - * @intent_req_comp: Completion for intent_req signalling + * @intent_req_comp: Status of intent request completion + * @intent_req_event: Waitqueue for @intent_req_comp */ struct glink_channel { struct rpmsg_endpoint ept; @@ -200,7 +210,7 @@ struct glink_channel { spinlock_t intent_lock; struct idr liids; struct idr riids; - struct work_struct intent_work; + struct kthread_work intent_work; struct list_head done_intents; struct glink_core_rx_intent *buf; @@ -215,7 +225,8 @@ struct glink_channel { struct mutex intent_req_lock; bool intent_req_result; - struct completion intent_req_comp; + atomic_t intent_req_comp; + wait_queue_head_t intent_req_event; }; #define to_glink_channel(_ept) container_of(_ept, struct glink_channel, ept) @@ -240,7 +251,7 @@ static const struct rpmsg_endpoint_ops glink_endpoint_ops; #define GLINK_FEATURE_INTENTLESS BIT(1) -static void qcom_glink_rx_done_work(struct work_struct *work); +static void qcom_glink_rx_done_work(struct kthread_work *work); static struct glink_channel *qcom_glink_alloc_channel(struct qcom_glink *glink, const char *name) @@ -261,10 +272,11 @@ static struct glink_channel *qcom_glink_alloc_channel(struct qcom_glink *glink, init_completion(&channel->open_req); init_completion(&channel->open_ack); - init_completion(&channel->intent_req_comp); + atomic_set(&channel->intent_req_comp, 0); + init_waitqueue_head(&channel->intent_req_event); INIT_LIST_HEAD(&channel->done_intents); - INIT_WORK(&channel->intent_work, qcom_glink_rx_done_work); + kthread_init_work(&channel->intent_work, qcom_glink_rx_done_work); idr_init(&channel->liids); idr_init(&channel->riids); @@ -280,6 +292,8 @@ static void qcom_glink_channel_release(struct kref *ref) unsigned long flags; CH_INFO(channel, "\n"); + wake_up(&channel->intent_req_event); + spin_lock_irqsave(&channel->intent_lock, flags); idr_destroy(&channel->liids); idr_destroy(&channel->riids); @@ -346,6 +360,11 @@ static int qcom_glink_tx(struct qcom_glink *glink, goto out; } + if (atomic_read(&glink->in_reset)) { + ret = -ECONNRESET; + goto out; + } + /* Wait without holding the tx_lock */ spin_unlock_irqrestore(&glink->tx_lock, flags); @@ -417,7 +436,8 @@ static void qcom_glink_handle_intent_req_ack(struct qcom_glink *glink, } channel->intent_req_result = granted; - complete(&channel->intent_req_comp); + atomic_inc(&channel->intent_req_comp); + wake_up(&channel->intent_req_event); CH_INFO(channel, "\n"); } @@ -504,40 +524,54 @@ static void qcom_glink_send_close_ack(struct qcom_glink *glink, qcom_glink_tx(glink, &req, sizeof(req), NULL, 0, true); } -static void qcom_glink_rx_done_work(struct work_struct *work) + +static int __qcom_glink_rx_done(struct qcom_glink *glink, + struct glink_channel *channel, + struct glink_core_rx_intent *intent, + bool wait) { - struct glink_channel *channel = container_of(work, struct glink_channel, - intent_work); - struct qcom_glink *glink = channel->glink; - struct glink_core_rx_intent *intent, *tmp; struct { u16 id; u16 lcid; u32 liid; } __packed cmd; - unsigned int cid = channel->lcid; - unsigned int iid; - bool reuse; + unsigned int iid = intent->id; + bool reuse = intent->reuse; + int ret; + + cmd.id = reuse ? RPM_CMD_RX_DONE_W_REUSE : RPM_CMD_RX_DONE; + cmd.lcid = cid; + cmd.liid = iid; + + ret = qcom_glink_tx(glink, &cmd, sizeof(cmd), NULL, 0, wait); + if (ret) + return ret; + + if (!reuse) { + kfree(intent->data); + kfree(intent); + } + + CH_INFO(channel, "reuse:%d liid:%d", reuse, iid); + return 0; +} + +static void qcom_glink_rx_done_work(struct kthread_work *work) +{ + struct glink_channel *channel = container_of(work, struct glink_channel, + intent_work); + struct qcom_glink *glink = channel->glink; + struct glink_core_rx_intent *intent, *tmp; unsigned long flags; spin_lock_irqsave(&channel->intent_lock, flags); list_for_each_entry_safe(intent, tmp, &channel->done_intents, node) { list_del(&intent->node); spin_unlock_irqrestore(&channel->intent_lock, flags); - iid = intent->id; - reuse = intent->reuse; - cmd.id = reuse ? RPM_CMD_RX_DONE_W_REUSE : RPM_CMD_RX_DONE; - cmd.lcid = cid; - cmd.liid = iid; + __qcom_glink_rx_done(glink, channel, intent, true); - CH_INFO(channel, "reuse:%d liid:%d", reuse, iid); - qcom_glink_tx(glink, &cmd, sizeof(cmd), NULL, 0, true); - if (!reuse) { - kfree(intent->data); - kfree(intent); - } spin_lock_irqsave(&channel->intent_lock, flags); } spin_unlock_irqrestore(&channel->intent_lock, flags); @@ -547,6 +581,8 @@ static void qcom_glink_rx_done(struct qcom_glink *glink, struct glink_channel *channel, struct glink_core_rx_intent *intent) { + int ret = -EAGAIN; + /* We don't send RX_DONE to intentless systems */ if (glink->intentless) { kfree(intent->data); @@ -563,10 +599,14 @@ static void qcom_glink_rx_done(struct qcom_glink *glink, /* Schedule the sending of a rx_done indication */ spin_lock(&channel->intent_lock); - list_add_tail(&intent->node, &channel->done_intents); - spin_unlock(&channel->intent_lock); + if (list_empty(&channel->done_intents)) + ret = __qcom_glink_rx_done(glink, channel, intent, false); - schedule_work(&channel->intent_work); + if (ret) { + list_add_tail(&intent->node, &channel->done_intents); + kthread_queue_work(&glink->kworker, &channel->intent_work); + } + spin_unlock(&channel->intent_lock); } /** @@ -1224,13 +1264,12 @@ static int qcom_glink_create_remote(struct qcom_glink *glink, /* * Send a close request to "undo" our open-ack. The close-ack will - * release the last reference. + * release qcom_glink_send_open_req() reference and the last reference + * will be relesed after receiving remote_close or transport unregister + * by calling qcom_glink_native_remove(). */ qcom_glink_send_close_req(glink, channel); - /* Release qcom_glink_send_open_req() reference */ - kref_put(&channel->refcount, qcom_glink_channel_release); - return ret; } @@ -1343,7 +1382,7 @@ static int qcom_glink_request_intent(struct qcom_glink *glink, mutex_lock(&channel->intent_req_lock); - reinit_completion(&channel->intent_req_comp); + atomic_set(&channel->intent_req_comp, 0); cmd.id = RPM_CMD_RX_INTENT_REQ; cmd.cid = channel->lcid; @@ -1355,10 +1394,15 @@ static int qcom_glink_request_intent(struct qcom_glink *glink, if (ret) goto unlock; - ret = wait_for_completion_timeout(&channel->intent_req_comp, 10 * HZ); + ret = wait_event_timeout(channel->intent_req_event, + atomic_read(&channel->intent_req_comp) || + atomic_read(&glink->in_reset), 10 * HZ); if (!ret) { dev_err(glink->dev, "intent request timed out\n"); ret = -ETIMEDOUT; + } else if (atomic_read(&glink->in_reset)) { + CH_INFO(channel, "ssr detected\n"); + ret = -ECONNRESET; } else { ret = channel->intent_req_result ? 0 : -ECANCELED; } @@ -1610,7 +1654,7 @@ static void qcom_glink_rx_close(struct qcom_glink *glink, unsigned int rcid) CH_INFO(channel, "\n"); /* cancel pending rx_done work */ - cancel_work_sync(&channel->intent_work); + kthread_cancel_work_sync(&channel->intent_work); if (channel->rpdev) { strlcpy(chinfo.name, channel->name, sizeof(chinfo.name)); @@ -1786,6 +1830,7 @@ struct qcom_glink *qcom_glink_native_probe(struct device *dev, spin_lock_init(&glink->idr_lock); idr_init(&glink->lcids); idr_init(&glink->rcids); + atomic_set(&glink->in_reset, 0); ret = of_property_read_string(dev->of_node, "label", &glink->name); if (ret < 0) @@ -1800,6 +1845,15 @@ struct qcom_glink *qcom_glink_native_probe(struct device *dev, return ERR_CAST(glink->mbox_chan); } + kthread_init_worker(&glink->kworker); + glink->task = kthread_run(kthread_worker_fn, &glink->kworker, + "glink_%s", glink->name); + if (IS_ERR(glink->task)) { + dev_err(dev, "failed to spawn intent kthread %d\n", + PTR_ERR(glink->task)); + return ERR_CAST(glink->task); + } + irq = of_irq_get(dev->of_node, 0); ret = devm_request_irq(dev, irq, qcom_glink_native_intr, @@ -1842,28 +1896,50 @@ void qcom_glink_native_remove(struct qcom_glink *glink) int ret; unsigned long flags; + atomic_inc(&glink->in_reset); disable_irq(glink->irq); cancel_work_sync(&glink->rx_work); + /* Signal all threads to cancel tx */ + spin_lock_irqsave(&glink->idr_lock, flags); + idr_for_each_entry(&glink->lcids, channel, cid) { + wake_up(&channel->intent_req_event); + } + spin_unlock_irqrestore(&glink->idr_lock, flags); + ret = device_for_each_child(glink->dev, NULL, qcom_glink_remove_device); if (ret) dev_warn(glink->dev, "Can't remove GLINK devices: %d\n", ret); spin_lock_irqsave(&glink->idr_lock, flags); + idr_for_each_entry(&glink->lcids, channel, cid) { + spin_unlock_irqrestore(&glink->idr_lock, flags); + /* cancel pending rx_done work for each channel*/ + kthread_cancel_work_sync(&channel->intent_work); + spin_lock_irqsave(&glink->idr_lock, flags); + } + spin_unlock_irqrestore(&glink->idr_lock, flags); + + spin_lock_irqsave(&glink->idr_lock, flags); + /* Release any defunct local channels, waiting for close-ack */ idr_for_each_entry(&glink->lcids, channel, cid) { - if (kref_put(&channel->refcount, qcom_glink_channel_release)) - idr_remove(&glink->lcids, cid); + kref_put(&channel->refcount, qcom_glink_channel_release); + idr_remove(&glink->lcids, cid); } /* Release any defunct local channels, waiting for close-req */ - idr_for_each_entry(&glink->lcids, channel, cid) + idr_for_each_entry(&glink->rcids, channel, cid) { kref_put(&channel->refcount, qcom_glink_channel_release); + idr_remove(&glink->rcids, cid); + } idr_destroy(&glink->lcids); idr_destroy(&glink->rcids); spin_unlock_irqrestore(&glink->idr_lock, flags); + kthread_flush_worker(&glink->kworker); + kthread_stop(glink->task); qcom_glink_pipe_reset(glink); mbox_free_channel(glink->mbox_chan); } diff --git a/drivers/rpmsg/qcom_glink_spi.c b/drivers/rpmsg/qcom_glink_spi.c index 438afd5a83c1931d8801f9d4b45c02350ad0d70a..9f3ef04083401b439d4f0a4e5de10cfc053b1458 100644 --- a/drivers/rpmsg/qcom_glink_spi.c +++ b/drivers/rpmsg/qcom_glink_spi.c @@ -200,9 +200,8 @@ struct glink_spi { struct wcd_spi_ops spi_ops; struct glink_cmpnt cmpnt; - u32 activity_flag; - spinlock_t activity_lock; - bool in_reset; + atomic_t activity_cnt; + atomic_t in_reset; void *ilc; }; @@ -297,21 +296,6 @@ static const struct rpmsg_endpoint_ops glink_endpoint_ops; static void glink_spi_rx_done_work(struct work_struct *work); static void glink_spi_remove(struct glink_spi *glink); -/** - * spi_suspend() - Vote for the spi device suspend - * @cmpnt: Component to identify the spi device. - * - * Return: 0 on success, standard Linux error codes on failure. - */ -static int spi_suspend(struct glink_cmpnt *cmpnt) -{ - if (!cmpnt || !cmpnt->master_dev || !cmpnt->master_ops || - !cmpnt->master_ops->suspend) - return 0; - - return cmpnt->master_ops->suspend(cmpnt->master_dev); -} - /** * spi_resume() - Vote for the spi device resume * @cmpnt: Component to identify the spi device. @@ -337,12 +321,7 @@ static int spi_resume(struct glink_cmpnt *cmpnt) */ static void glink_spi_xprt_set_poll_mode(struct glink_spi *glink) { - unsigned long flags; - - spin_lock_irqsave(&glink->activity_lock, flags); - glink->activity_flag |= ACTIVE_RX; - spin_unlock_irqrestore(&glink->activity_lock, flags); - + atomic_inc(&glink->activity_cnt); spi_resume(&glink->cmpnt); } @@ -355,13 +334,7 @@ static void glink_spi_xprt_set_poll_mode(struct glink_spi *glink) */ static void glink_spi_xprt_set_irq_mode(struct glink_spi *glink) { - unsigned long flags; - - spin_lock_irqsave(&glink->activity_lock, flags); - glink->activity_flag &= ~ACTIVE_RX; - spin_unlock_irqrestore(&glink->activity_lock, flags); - - spi_suspend(&glink->cmpnt); + atomic_dec(&glink->activity_cnt); } static struct glink_channel *glink_spi_alloc_channel(struct glink_spi *glink, @@ -504,7 +477,7 @@ static size_t glink_spi_rx_avail(struct glink_spi *glink) u32 tail; int ret; - if (unlikely(glink->in_reset)) + if (atomic_read(&glink->in_reset)) return 0; if (unlikely(!pipe->fifo_base)) { @@ -578,7 +551,7 @@ static size_t glink_spi_tx_avail(struct glink_spi *glink) u32 tail; int ret; - if (unlikely(glink->in_reset)) + if (atomic_read(&glink->in_reset)) return 0; if (unlikely(!pipe->fifo_base)) { @@ -682,7 +655,7 @@ static int glink_spi_tx(struct glink_spi *glink, void *hdr, size_t hlen, goto out; } - if (unlikely(glink->in_reset)) { + if (atomic_read(&glink->in_reset)) { ret = -ENXIO; goto out; } @@ -1177,7 +1150,7 @@ static int glink_spi_send_short(struct glink_channel *channel, return -EAGAIN; } - if (unlikely(glink->in_reset)) { + if (atomic_read(&glink->in_reset)) { mutex_unlock(&glink->tx_lock); return -EINVAL; } @@ -1226,7 +1199,7 @@ static int glink_spi_send_data(struct glink_channel *channel, return -EAGAIN; } - if (unlikely(glink->in_reset)) { + if (atomic_read(&glink->in_reset)) { mutex_unlock(&glink->tx_lock); return -EINVAL; } @@ -1259,11 +1232,8 @@ static int __glink_spi_send(struct glink_channel *channel, CH_INFO(channel, "size:%d, wait:%d\n", len, wait); - spin_lock_irqsave(&glink->activity_lock, flags); - glink->activity_flag |= ACTIVE_TX; - spin_unlock_irqrestore(&glink->activity_lock, flags); - - kref_get(&channel->refcount); + atomic_inc(&glink->activity_cnt); + spi_resume(&glink->cmpnt); while (!intent) { spin_lock_irqsave(&channel->intent_lock, flags); idr_for_each_entry(&channel->riids, tmp, iid) { @@ -1315,11 +1285,8 @@ static int __glink_spi_send(struct glink_channel *channel, /* Mark intent available if we failed */ if (ret && intent) intent->in_use = false; - kref_put(&channel->refcount, glink_spi_channel_release); - spin_lock_irqsave(&glink->activity_lock, flags); - glink->activity_flag &= ~ACTIVE_TX; - spin_unlock_irqrestore(&glink->activity_lock, flags); + atomic_dec(&glink->activity_cnt); return ret; } @@ -1360,30 +1327,38 @@ static void glink_spi_handle_rx_done(struct glink_spi *glink, spin_unlock_irqrestore(&channel->intent_lock, flags); } -static void glink_spi_rx_done(struct glink_spi *glink, - struct glink_channel *channel, - struct glink_core_rx_intent *intent) +static int __glink_spi_rx_done(struct glink_spi *glink, + struct glink_channel *channel, + struct glink_core_rx_intent *intent, + bool wait) { - /* We don't send RX_DONE to intentless systems */ - if (glink->intentless) { + struct { + u16 id; + u16 lcid; + u32 liid; + u64 reserved; + } __packed cmd; + unsigned int cid = channel->lcid; + unsigned int iid = intent->id; + bool reuse = intent->reuse; + int ret; + + cmd.id = reuse ? SPI_CMD_RX_DONE_W_REUSE : SPI_CMD_RX_DONE; + cmd.lcid = cid; + cmd.liid = iid; + + ret = glink_spi_tx(glink, &cmd, sizeof(cmd), NULL, 0, wait); + if (ret) + return ret; + + intent->offset = 0; + if (!reuse) { kfree(intent->data); kfree(intent); - return; } - /* Take it off the tree of receive intents */ - if (!intent->reuse) { - spin_lock(&channel->intent_lock); - idr_remove(&channel->liids, intent->id); - spin_unlock(&channel->intent_lock); - } - - /* Schedule the sending of a rx_done indication */ - spin_lock(&channel->intent_lock); - list_add_tail(&intent->node, &channel->done_intents); - spin_unlock(&channel->intent_lock); - - schedule_work(&channel->intent_work); + CH_INFO(channel, "reuse:%d liid:%d", reuse, iid); + return 0; } static void glink_spi_rx_done_work(struct work_struct *work) @@ -1392,39 +1367,56 @@ static void glink_spi_rx_done_work(struct work_struct *work) intent_work); struct glink_spi *glink = channel->glink; struct glink_core_rx_intent *intent, *tmp; - struct { - u16 id; - u16 lcid; - u32 liid; - u64 reserved; - } __packed cmd; - - unsigned int lcid = channel->lcid; - unsigned int iid; - bool reuse; unsigned long flags; + atomic_inc(&glink->activity_cnt); + spi_resume(&glink->cmpnt); + spin_lock_irqsave(&channel->intent_lock, flags); list_for_each_entry_safe(intent, tmp, &channel->done_intents, node) { list_del(&intent->node); spin_unlock_irqrestore(&channel->intent_lock, flags); - iid = intent->id; - reuse = intent->reuse; - cmd.id = reuse ? SPI_CMD_RX_DONE_W_REUSE : SPI_CMD_RX_DONE; - cmd.lcid = lcid; - cmd.liid = iid; + __glink_spi_rx_done(glink, channel, intent, true); - CH_INFO(channel, "reuse:%d liid:%d", reuse, iid); - glink_spi_tx(glink, &cmd, sizeof(cmd), NULL, 0, true); - intent->offset = 0; - if (!reuse) { - kfree(intent->data); - kfree(intent); - } spin_lock_irqsave(&channel->intent_lock, flags); } spin_unlock_irqrestore(&channel->intent_lock, flags); + + atomic_dec(&glink->activity_cnt); +} + +static void glink_spi_rx_done(struct glink_spi *glink, + struct glink_channel *channel, + struct glink_core_rx_intent *intent) +{ + unsigned long flags; + int ret = -EAGAIN; + + /* We don't send RX_DONE to intentless systems */ + if (glink->intentless) { + kfree(intent->data); + kfree(intent); + return; + } + + /* Take it off the tree of receive intents */ + if (!intent->reuse) { + spin_lock_irqsave(&channel->intent_lock, flags); + idr_remove(&channel->liids, intent->id); + spin_unlock_irqrestore(&channel->intent_lock, flags); + } + + /* Schedule the sending of a rx_done indication */ + if (list_empty(&channel->done_intents)) + ret = __glink_spi_rx_done(glink, channel, intent, false); + + if (ret) { + spin_lock_irqsave(&channel->intent_lock, flags); + list_add_tail(&intent->node, &channel->done_intents); + schedule_work(&channel->intent_work); + spin_unlock_irqrestore(&channel->intent_lock, flags); + } } /* Locally initiated rpmsg_create_ept */ @@ -1501,13 +1493,12 @@ static int glink_spi_create_remote(struct glink_spi *glink, /* * Send a close request to "undo" our open-ack. The close-ack will - * release the last reference. + * release glink_spi_send_open_req() reference and the last reference + * will be release after rx_close or transport unregister by calling + * glink_spi_remove(). */ glink_spi_send_close_req(glink, channel); - /* Release glink_spi_send_open_req() reference */ - kref_put(&channel->refcount, glink_spi_channel_release); - return ret; } @@ -1594,7 +1585,6 @@ static void glink_spi_destroy_ept(struct rpmsg_endpoint *ept) { struct glink_channel *channel = to_glink_channel(ept); struct glink_spi *glink = channel->glink; - struct rpmsg_channel_info chinfo; unsigned long flags; spin_lock_irqsave(&channel->recv_lock, flags); @@ -1602,13 +1592,6 @@ static void glink_spi_destroy_ept(struct rpmsg_endpoint *ept) spin_unlock_irqrestore(&channel->recv_lock, flags); /* Decouple the potential rpdev from the channel */ - if (channel->rpdev) { - strlcpy(chinfo.name, channel->name, sizeof(chinfo.name)); - chinfo.src = RPMSG_ADDR_ANY; - chinfo.dst = RPMSG_ADDR_ANY; - - rpmsg_unregister_device(&glink->dev, &chinfo); - } channel->rpdev = NULL; glink_spi_send_close_req(glink, channel); @@ -1637,7 +1620,6 @@ static void glink_spi_rx_close(struct glink_spi *glink, unsigned int rcid) rpmsg_unregister_device(&glink->dev, &chinfo); } - channel->rpdev = NULL; glink_spi_send_close_ack(glink, channel->rcid); @@ -1945,7 +1927,7 @@ static int glink_spi_rx_data(struct glink_spi *glink, /* Handle message when no fragments remain to be received */ if (!left_size) { - spin_lock(&channel->recv_lock); + spin_lock_irqsave(&channel->recv_lock, flags); if (channel->ept.cb) { channel->ept.cb(channel->ept.rpdev, intent->data, @@ -1953,7 +1935,7 @@ static int glink_spi_rx_data(struct glink_spi *glink, channel->ept.priv, RPMSG_ADDR_ANY); } - spin_unlock(&channel->recv_lock); + spin_unlock_irqrestore(&channel->recv_lock, flags); intent->offset = 0; channel->buf = NULL; @@ -2011,7 +1993,7 @@ static int glink_spi_rx_short_data(struct glink_spi *glink, /* Handle message when no fragments remain to be received */ if (!left_size) { - spin_lock(&channel->recv_lock); + spin_lock_irqsave(&channel->recv_lock, flags); if (channel->ept.cb) { channel->ept.cb(channel->ept.rpdev, intent->data, @@ -2019,7 +2001,7 @@ static int glink_spi_rx_short_data(struct glink_spi *glink, channel->ept.priv, RPMSG_ADDR_ANY); } - spin_unlock(&channel->recv_lock); + spin_unlock_irqrestore(&channel->recv_lock, flags); intent->offset = 0; channel->buf = NULL; @@ -2043,6 +2025,8 @@ static void glink_spi_defer_work(struct work_struct *work) unsigned int param4; unsigned int cmd; + atomic_inc(&glink->activity_cnt); + spi_resume(&glink->cmpnt); for (;;) { spin_lock_irqsave(&glink->rx_lock, flags); if (list_empty(&glink->rx_queue)) { @@ -2078,6 +2062,7 @@ static void glink_spi_defer_work(struct work_struct *work) kfree(dcmd); } + atomic_dec(&glink->activity_cnt); } static int glink_spi_rx_defer(struct glink_spi *glink, @@ -2236,7 +2221,8 @@ static void glink_spi_work(struct kthread_work *work) kfree(rx_data); glink_spi_rx_advance(glink, rx_avail); - } while (inactive_cycles < MAX_INACTIVE_CYCLES && !glink->in_reset); + } while (inactive_cycles < MAX_INACTIVE_CYCLES && + !atomic_read(&glink->in_reset)); glink_spi_xprt_set_irq_mode(glink); } @@ -2256,7 +2242,7 @@ static int glink_spi_cmpnt_event_handler(struct device *dev, void *priv, { struct glink_spi *glink = dev_get_drvdata(dev); struct glink_cmpnt *cmpnt = &glink->cmpnt; - int ret; + int ret = 0; switch (event) { case WDSP_EVENT_PRE_BOOTUP: @@ -2270,7 +2256,7 @@ static int glink_spi_cmpnt_event_handler(struct device *dev, void *priv, GLINK_ERR(glink, "Failed to get transport device\n"); break; case WDSP_EVENT_POST_BOOTUP: - glink->in_reset = false; + atomic_set(&glink->in_reset, 0); ret = glink_spi_send_version(glink); if (ret) GLINK_ERR(glink, "failed to send version %d\n", ret); @@ -2285,13 +2271,15 @@ static int glink_spi_cmpnt_event_handler(struct device *dev, void *priv, case WDSP_EVENT_RESUME: break; case WDSP_EVENT_SUSPEND: + if (atomic_read(&glink->activity_cnt)) + ret = -EBUSY; break; default: GLINK_INFO(glink, "unhandled event %d", event); break; } - return 0; + return ret; } /* glink_spi_cmpnt_ops - Callback operations registered wtih wdsp framework */ @@ -2412,9 +2400,8 @@ struct glink_spi *qcom_glink_spi_register(struct device *parent, idr_init(&glink->lcids); idr_init(&glink->rcids); - glink->in_reset = true; - glink->activity_flag = 0; - spin_lock_init(&glink->activity_lock); + atomic_set(&glink->in_reset, 1); + atomic_set(&glink->activity_cnt, 0); ret = glink_spi_init_pipe("tx-descriptors", node, &glink->tx_pipe); if (ret) @@ -2468,7 +2455,7 @@ static void glink_spi_remove(struct glink_spi *glink) GLINK_INFO(glink, "\n"); - glink->in_reset = true; + atomic_set(&glink->in_reset, 1); kthread_cancel_work_sync(&glink->rx_work); cancel_work_sync(&glink->rx_defer_work); @@ -2476,16 +2463,27 @@ static void glink_spi_remove(struct glink_spi *glink) if (ret) dev_warn(&glink->dev, "Can't remove GLINK devices: %d\n", ret); + spin_lock_irqsave(&glink->idr_lock, flags); + idr_for_each_entry(&glink->lcids, channel, cid) { + spin_unlock_irqrestore(&glink->idr_lock, flags); + /* cancel_work_sync may sleep */ + cancel_work_sync(&channel->intent_work); + spin_lock_irqsave(&glink->idr_lock, flags); + } + spin_unlock_irqrestore(&glink->idr_lock, flags); + spin_lock_irqsave(&glink->idr_lock, flags); /* Release any defunct local channels, waiting for close-ack */ idr_for_each_entry(&glink->lcids, channel, cid) { - if (kref_put(&channel->refcount, glink_spi_channel_release)) - idr_remove(&glink->lcids, cid); + kref_put(&channel->refcount, glink_spi_channel_release); + idr_remove(&glink->lcids, cid); } /* Release any defunct local channels, waiting for close-req */ - idr_for_each_entry(&glink->lcids, channel, cid) + idr_for_each_entry(&glink->lcids, channel, cid) { kref_put(&channel->refcount, glink_spi_channel_release); + idr_remove(&glink->lcids, cid); + } idr_destroy(&glink->lcids); idr_destroy(&glink->rcids); diff --git a/drivers/rtc/interface.c b/drivers/rtc/interface.c index c0c75731adc16ead6ababbd96ae6f80af4b6c724..bef6d430559e679704bd949cc4455a73800a8690 100644 --- a/drivers/rtc/interface.c +++ b/drivers/rtc/interface.c @@ -359,6 +359,11 @@ int rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm) { int err; + if (!rtc->ops) + return -ENODEV; + else if (!rtc->ops->set_alarm) + return -EINVAL; + err = rtc_valid_tm(&alarm->time); if (err != 0) return err; diff --git a/drivers/rtc/rtc-tps6586x.c b/drivers/rtc/rtc-tps6586x.c index a3418a8a37965a909fc4a86cb1cd513d2c9f8c9a..97fdc99bfeefbf1eda692be0a7b464c773102060 100644 --- a/drivers/rtc/rtc-tps6586x.c +++ b/drivers/rtc/rtc-tps6586x.c @@ -276,14 +276,15 @@ static int tps6586x_rtc_probe(struct platform_device *pdev) device_init_wakeup(&pdev->dev, 1); platform_set_drvdata(pdev, rtc); - rtc->rtc = devm_rtc_device_register(&pdev->dev, dev_name(&pdev->dev), - &tps6586x_rtc_ops, THIS_MODULE); + rtc->rtc = devm_rtc_allocate_device(&pdev->dev); if (IS_ERR(rtc->rtc)) { ret = PTR_ERR(rtc->rtc); - dev_err(&pdev->dev, "RTC device register: ret %d\n", ret); + dev_err(&pdev->dev, "RTC allocate device: ret %d\n", ret); goto fail_rtc_register; } + rtc->rtc->ops = &tps6586x_rtc_ops; + ret = devm_request_threaded_irq(&pdev->dev, rtc->irq, NULL, tps6586x_rtc_irq, IRQF_ONESHOT, @@ -294,6 +295,13 @@ static int tps6586x_rtc_probe(struct platform_device *pdev) goto fail_rtc_register; } disable_irq(rtc->irq); + + ret = rtc_register_device(rtc->rtc); + if (ret) { + dev_err(&pdev->dev, "RTC device register: ret %d\n", ret); + goto fail_rtc_register; + } + return 0; fail_rtc_register: diff --git a/drivers/rtc/rtc-tps65910.c b/drivers/rtc/rtc-tps65910.c index d0244d7979fcb8913bfaf6093334b6f3f5938bf4..a56b526db89a415771930f06539878c95a4a3f0d 100644 --- a/drivers/rtc/rtc-tps65910.c +++ b/drivers/rtc/rtc-tps65910.c @@ -380,6 +380,10 @@ static int tps65910_rtc_probe(struct platform_device *pdev) if (!tps_rtc) return -ENOMEM; + tps_rtc->rtc = devm_rtc_allocate_device(&pdev->dev); + if (IS_ERR(tps_rtc->rtc)) + return PTR_ERR(tps_rtc->rtc); + /* Clear pending interrupts */ ret = regmap_read(tps65910->regmap, TPS65910_RTC_STATUS, &rtc_reg); if (ret < 0) @@ -421,10 +425,10 @@ static int tps65910_rtc_probe(struct platform_device *pdev) tps_rtc->irq = irq; device_set_wakeup_capable(&pdev->dev, 1); - tps_rtc->rtc = devm_rtc_device_register(&pdev->dev, pdev->name, - &tps65910_rtc_ops, THIS_MODULE); - if (IS_ERR(tps_rtc->rtc)) { - ret = PTR_ERR(tps_rtc->rtc); + tps_rtc->rtc->ops = &tps65910_rtc_ops; + + ret = rtc_register_device(tps_rtc->rtc); + if (ret) { dev_err(&pdev->dev, "RTC device register: err %d\n", ret); return ret; } diff --git a/drivers/rtc/rtc-vr41xx.c b/drivers/rtc/rtc-vr41xx.c index 7ce22967fd167d53717f86cb54895a8f4e810b2a..7ed010714f2968a243031e780fcad208b4da05d4 100644 --- a/drivers/rtc/rtc-vr41xx.c +++ b/drivers/rtc/rtc-vr41xx.c @@ -292,13 +292,14 @@ static int rtc_probe(struct platform_device *pdev) goto err_rtc1_iounmap; } - rtc = devm_rtc_device_register(&pdev->dev, rtc_name, &vr41xx_rtc_ops, - THIS_MODULE); + rtc = devm_rtc_allocate_device(&pdev->dev); if (IS_ERR(rtc)) { retval = PTR_ERR(rtc); goto err_iounmap_all; } + rtc->ops = &vr41xx_rtc_ops; + rtc->max_user_freq = MAX_PERIODIC_RATE; spin_lock_irq(&rtc_lock); @@ -340,6 +341,10 @@ static int rtc_probe(struct platform_device *pdev) dev_info(&pdev->dev, "Real Time Clock of NEC VR4100 series\n"); + retval = rtc_register_device(rtc); + if (retval) + goto err_iounmap_all; + return 0; err_iounmap_all: diff --git a/drivers/s390/scsi/zfcp_dbf.c b/drivers/s390/scsi/zfcp_dbf.c index b415ba42ca73a7430d387d75f578cbac00761563..599447032e50af8d594c051879d9ea2297780459 100644 --- a/drivers/s390/scsi/zfcp_dbf.c +++ b/drivers/s390/scsi/zfcp_dbf.c @@ -285,6 +285,8 @@ void zfcp_dbf_rec_trig(char *tag, struct zfcp_adapter *adapter, struct list_head *entry; unsigned long flags; + lockdep_assert_held(&adapter->erp_lock); + if (unlikely(!debug_level_enabled(dbf->rec, level))) return; diff --git a/drivers/scsi/3w-9xxx.c b/drivers/scsi/3w-9xxx.c index 00e7968a1d70f6b120aa0309087d196f77f5f145..a1388842e17e561e901726ccb61bc44163332c24 100644 --- a/drivers/scsi/3w-9xxx.c +++ b/drivers/scsi/3w-9xxx.c @@ -886,6 +886,11 @@ static int twa_chrdev_open(struct inode *inode, struct file *file) unsigned int minor_number; int retval = TW_IOCTL_ERROR_OS_ENODEV; + if (!capable(CAP_SYS_ADMIN)) { + retval = -EACCES; + goto out; + } + minor_number = iminor(inode); if (minor_number >= twa_device_extension_count) goto out; diff --git a/drivers/scsi/3w-xxxx.c b/drivers/scsi/3w-xxxx.c index 33261b690774a8deeb8ec20835d0b24100a926b0..f6179e3d695397e368cdd57e913d8863e829fa31 100644 --- a/drivers/scsi/3w-xxxx.c +++ b/drivers/scsi/3w-xxxx.c @@ -1033,6 +1033,9 @@ static int tw_chrdev_open(struct inode *inode, struct file *file) dprintk(KERN_WARNING "3w-xxxx: tw_ioctl_open()\n"); + if (!capable(CAP_SYS_ADMIN)) + return -EACCES; + minor_number = iminor(inode); if (minor_number >= tw_device_extension_count) return -ENODEV; diff --git a/drivers/scsi/cxlflash/main.c b/drivers/scsi/cxlflash/main.c index 0b6467206f8ea7834bfe14ae68a6bae4e6f1fb7c..737314cac8d84fa5640b036503fae866b06c64d6 100644 --- a/drivers/scsi/cxlflash/main.c +++ b/drivers/scsi/cxlflash/main.c @@ -946,9 +946,9 @@ static void cxlflash_remove(struct pci_dev *pdev) return; } - /* If a Task Management Function is active, wait for it to complete - * before continuing with remove. - */ + /* Yield to running recovery threads before continuing with remove */ + wait_event(cfg->reset_waitq, cfg->state != STATE_RESET && + cfg->state != STATE_PROBING); spin_lock_irqsave(&cfg->tmf_slock, lock_flags); if (cfg->tmf_active) wait_event_interruptible_lock_irq(cfg->tmf_waitq, @@ -1303,7 +1303,10 @@ static void afu_err_intr_init(struct afu *afu) for (i = 0; i < afu->num_hwqs; i++) { hwq = get_hwq(afu, i); - writeq_be(SISL_MSI_SYNC_ERROR, &hwq->host_map->ctx_ctrl); + reg = readq_be(&hwq->host_map->ctx_ctrl); + WARN_ON((reg & SISL_CTX_CTRL_LISN_MASK) != 0); + reg |= SISL_MSI_SYNC_ERROR; + writeq_be(reg, &hwq->host_map->ctx_ctrl); writeq_be(SISL_ISTATUS_MASK, &hwq->host_map->intr_mask); } } diff --git a/drivers/scsi/cxlflash/sislite.h b/drivers/scsi/cxlflash/sislite.h index 09daa86670fcbb412ddb8f4ea01aefd707798c9e..0892fb1f0a1ee35afc4d452e130d1a42f2ad9f77 100644 --- a/drivers/scsi/cxlflash/sislite.h +++ b/drivers/scsi/cxlflash/sislite.h @@ -284,6 +284,7 @@ struct sisl_host_map { __be64 cmd_room; __be64 ctx_ctrl; /* least significant byte or b56:63 is LISN# */ #define SISL_CTX_CTRL_UNMAP_SECTOR 0x8000000000000000ULL /* b0 */ +#define SISL_CTX_CTRL_LISN_MASK (0xFFULL) __be64 mbox_w; /* restricted use */ __be64 sq_start; /* Submission Queue (R/W): write sequence and */ __be64 sq_end; /* inclusion semantics are the same as RRQ */ diff --git a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c index 2e5fa9717be89acdcc3b1565c0b1c85a5ab089c1..871962b2e2f64df1ae436a5b2ea8b32ca843d925 100644 --- a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c +++ b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c @@ -328,10 +328,11 @@ enum { #define DIR_TO_DEVICE 2 #define DIR_RESERVED 3 -#define CMD_IS_UNCONSTRAINT(cmd) \ - ((cmd == ATA_CMD_READ_LOG_EXT) || \ - (cmd == ATA_CMD_READ_LOG_DMA_EXT) || \ - (cmd == ATA_CMD_DEV_RESET)) +#define FIS_CMD_IS_UNCONSTRAINED(fis) \ + ((fis.command == ATA_CMD_READ_LOG_EXT) || \ + (fis.command == ATA_CMD_READ_LOG_DMA_EXT) || \ + ((fis.command == ATA_CMD_DEV_RESET) && \ + ((fis.control & ATA_SRST) != 0))) static u32 hisi_sas_read32(struct hisi_hba *hisi_hba, u32 off) { @@ -1044,7 +1045,7 @@ static int prep_ata_v3_hw(struct hisi_hba *hisi_hba, << CMD_HDR_FRAME_TYPE_OFF; dw1 |= sas_dev->device_id << CMD_HDR_DEV_ID_OFF; - if (CMD_IS_UNCONSTRAINT(task->ata_task.fis.command)) + if (FIS_CMD_IS_UNCONSTRAINED(task->ata_task.fis)) dw1 |= 1 << CMD_HDR_UNCON_CMD_OFF; hdr->dw1 = cpu_to_le32(dw1); diff --git a/drivers/scsi/megaraid.c b/drivers/scsi/megaraid.c index 7195cff51d4c4181f60ab3330796818771590798..9b6f5d024dbae8df9af3625e3d2b35c4a5543462 100644 --- a/drivers/scsi/megaraid.c +++ b/drivers/scsi/megaraid.c @@ -4199,6 +4199,9 @@ megaraid_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) int irq, i, j; int error = -ENODEV; + if (hba_count >= MAX_CONTROLLERS) + goto out; + if (pci_enable_device(pdev)) goto out; pci_set_master(pdev); diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c index d8f626567f590723e4e6f626e33c302b2101f100..06a2e3d9fc5b2e15849bfb91a027e44f3e245713 100644 --- a/drivers/scsi/megaraid/megaraid_sas_fusion.c +++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c @@ -2677,6 +2677,9 @@ megasas_build_syspd_fusion(struct megasas_instance *instance, pRAID_Context->timeout_value = cpu_to_le16(os_timeout_value); pRAID_Context->virtual_disk_tgt_id = cpu_to_le16(device_id); } else { + if (os_timeout_value) + os_timeout_value++; + /* system pd Fast Path */ io_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST; timeout_limit = (scmd->device->type == TYPE_DISK) ? diff --git a/drivers/scsi/qedf/qedf_main.c b/drivers/scsi/qedf/qedf_main.c index 7c0064500cc528a9d8c29e3ecd4ff66f005717d6..382edb79a0de99f4bda0551cf999ec3a20a3ad56 100644 --- a/drivers/scsi/qedf/qedf_main.c +++ b/drivers/scsi/qedf/qedf_main.c @@ -1649,6 +1649,15 @@ static int qedf_vport_destroy(struct fc_vport *vport) struct Scsi_Host *shost = vport_to_shost(vport); struct fc_lport *n_port = shost_priv(shost); struct fc_lport *vn_port = vport->dd_data; + struct qedf_ctx *qedf = lport_priv(vn_port); + + if (!qedf) { + QEDF_ERR(NULL, "qedf is NULL.\n"); + goto out; + } + + /* Set unloading bit on vport qedf_ctx to prevent more I/O */ + set_bit(QEDF_UNLOADING, &qedf->flags); mutex_lock(&n_port->lp_mutex); list_del(&vn_port->list); @@ -1675,6 +1684,7 @@ static int qedf_vport_destroy(struct fc_vport *vport) if (vn_port->host) scsi_host_put(vn_port->host); +out: return 0; } diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c index 9ce28c4f9812b6a4890fd909ae3fec8f6b39bd0c..b09d29931393265ce4989cd5b35b30a98326aa2d 100644 --- a/drivers/scsi/qla2xxx/qla_attr.c +++ b/drivers/scsi/qla2xxx/qla_attr.c @@ -2142,6 +2142,7 @@ qla24xx_vport_delete(struct fc_vport *fc_vport) msleep(1000); qla24xx_disable_vp(vha); + qla2x00_wait_for_sess_deletion(vha); vha->flags.delete_progress = 1; diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h index f852ca60c49fd182192efaeb6853bf5c379f015c..89706341514e29b57ef94f8b96c27fcd6e556bd9 100644 --- a/drivers/scsi/qla2xxx/qla_gbl.h +++ b/drivers/scsi/qla2xxx/qla_gbl.h @@ -200,6 +200,7 @@ void qla2x00_handle_login_done_event(struct scsi_qla_host *, fc_port_t *, uint16_t *); int qla24xx_post_gnl_work(struct scsi_qla_host *, fc_port_t *); int qla24xx_async_abort_cmd(srb_t *); +void qla2x00_wait_for_sess_deletion(scsi_qla_host_t *); /* * Global Functions in qla_mid.c source file. diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c index 59ecc4eda6cdec096d53927b3a994262141f9cd3..2a19ec0660cbbb031c28b9b382cb4563466ea6ef 100644 --- a/drivers/scsi/qla2xxx/qla_gs.c +++ b/drivers/scsi/qla2xxx/qla_gs.c @@ -3368,6 +3368,10 @@ int qla24xx_async_gpnid(scsi_qla_host_t *vha, port_id_t *id) return rval; done_free_sp: + spin_lock_irqsave(&vha->hw->vport_slock, flags); + list_del(&sp->elem); + spin_unlock_irqrestore(&vha->hw->vport_slock, flags); + if (sp->u.iocb_cmd.u.ctarg.req) { dma_free_coherent(&vha->hw->pdev->dev, sizeof(struct ct_sns_pkt), diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c index bcde6130f12149026b29314b68a811af94905810..1d42d38f5a45a48c9755ec29a280ceb60b385d7f 100644 --- a/drivers/scsi/qla2xxx/qla_init.c +++ b/drivers/scsi/qla2xxx/qla_init.c @@ -1326,11 +1326,10 @@ qla2x00_async_tm_cmd(fc_port_t *fcport, uint32_t flags, uint32_t lun, wait_for_completion(&tm_iocb->u.tmf.comp); - rval = tm_iocb->u.tmf.comp_status == CS_COMPLETE ? - QLA_SUCCESS : QLA_FUNCTION_FAILED; + rval = tm_iocb->u.tmf.data; - if ((rval != QLA_SUCCESS) || tm_iocb->u.tmf.data) { - ql_dbg(ql_dbg_taskm, vha, 0x8030, + if (rval != QLA_SUCCESS) { + ql_log(ql_log_warn, vha, 0x8030, "TM IOCB failed (%x).\n", rval); } diff --git a/drivers/scsi/qla2xxx/qla_inline.h b/drivers/scsi/qla2xxx/qla_inline.h index 9a2c86eacf44add92137dc94d5bf2846987e89e0..3f5a0f0f8b62824c90f3e155611f6e51a2e3b49b 100644 --- a/drivers/scsi/qla2xxx/qla_inline.h +++ b/drivers/scsi/qla2xxx/qla_inline.h @@ -221,6 +221,8 @@ qla2xxx_get_qpair_sp(struct qla_qpair *qpair, fc_port_t *fcport, gfp_t flag) sp->fcport = fcport; sp->iocbs = 1; sp->vha = qpair->vha; + INIT_LIST_HEAD(&sp->elem); + done: if (!sp) QLA_QPAIR_MARK_NOT_BUSY(qpair); diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c index d77dde89118e3ff0e1a6e1e0b18a02743a98d5dd..375a88e18afe6a0c910822bc233b02265e60dc83 100644 --- a/drivers/scsi/qla2xxx/qla_mid.c +++ b/drivers/scsi/qla2xxx/qla_mid.c @@ -152,10 +152,15 @@ qla24xx_disable_vp(scsi_qla_host_t *vha) { unsigned long flags; int ret; + fc_port_t *fcport; ret = qla24xx_control_vp(vha, VCE_COMMAND_DISABLE_VPS_LOGO_ALL); atomic_set(&vha->loop_state, LOOP_DOWN); atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); + list_for_each_entry(fcport, &vha->vp_fcports, list) + fcport->logout_on_delete = 0; + + qla2x00_mark_all_devices_lost(vha, 0); /* Remove port id from vp target map */ spin_lock_irqsave(&vha->hw->vport_slock, flags); diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index 1be76695e6924331e7e16b57137de694eef3caae..7d7fb5bbb600791de6e4b88bbcfbc115ab03c105 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c @@ -1136,7 +1136,7 @@ static inline int test_fcport_count(scsi_qla_host_t *vha) * qla2x00_wait_for_sess_deletion can only be called from remove_one. * it has dependency on UNLOADING flag to stop device discovery */ -static void +void qla2x00_wait_for_sess_deletion(scsi_qla_host_t *vha) { qla2x00_mark_all_devices_lost(vha, 0); @@ -5794,8 +5794,9 @@ qla2x00_do_dpc(void *data) set_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags); } - if (test_and_clear_bit(ISP_ABORT_NEEDED, - &base_vha->dpc_flags)) { + if (test_and_clear_bit + (ISP_ABORT_NEEDED, &base_vha->dpc_flags) && + !test_bit(UNLOADING, &base_vha->dpc_flags)) { ql_dbg(ql_dbg_dpc, base_vha, 0x4007, "ISP abort scheduled.\n"); diff --git a/drivers/scsi/scsi_dh.c b/drivers/scsi/scsi_dh.c index a5e30e9449efed76883e62cc2cd2a77b38feeb3d..375cede0c534bb364940a2f0aba077f923c247a4 100644 --- a/drivers/scsi/scsi_dh.c +++ b/drivers/scsi/scsi_dh.c @@ -58,7 +58,10 @@ static const struct scsi_dh_blist scsi_dh_blist[] = { {"IBM", "3526", "rdac", }, {"IBM", "3542", "rdac", }, {"IBM", "3552", "rdac", }, - {"SGI", "TP9", "rdac", }, + {"SGI", "TP9300", "rdac", }, + {"SGI", "TP9400", "rdac", }, + {"SGI", "TP9500", "rdac", }, + {"SGI", "TP9700", "rdac", }, {"SGI", "IS", "rdac", }, {"STK", "OPENstorage", "rdac", }, {"STK", "FLEXLINE 380", "rdac", }, diff --git a/drivers/scsi/sd_zbc.c b/drivers/scsi/sd_zbc.c index ea9e1e0ed5b8502be262940459d3cad7d4ec0392..f4944dde6c8e8e1696fe22ba6cab34e44e61aa95 100644 --- a/drivers/scsi/sd_zbc.c +++ b/drivers/scsi/sd_zbc.c @@ -430,7 +430,8 @@ static int sd_zbc_check_capacity(struct scsi_disk *sdkp, * Check that all zones of the device are equal. The last zone can however * be smaller. The zone size must also be a power of two number of LBAs. * - * Returns the zone size in bytes upon success or an error code upon failure. + * Returns the zone size in number of blocks upon success or an error code + * upon failure. */ static s64 sd_zbc_check_zone_size(struct scsi_disk *sdkp) { @@ -440,7 +441,7 @@ static s64 sd_zbc_check_zone_size(struct scsi_disk *sdkp) unsigned char *rec; unsigned int buf_len; unsigned int list_length; - int ret; + s64 ret; u8 same; /* Get a buffer */ diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c index 0576fe79bd1fbb7406ef9fcea7535db9334aa5fd..6dc7f6150c131c00778920e020a3852e0cfc77f2 100644 --- a/drivers/scsi/sg.c +++ b/drivers/scsi/sg.c @@ -2190,6 +2190,7 @@ sg_add_sfp(Sg_device * sdp) write_lock_irqsave(&sdp->sfd_lock, iflags); if (atomic_read(&sdp->detaching)) { write_unlock_irqrestore(&sdp->sfd_lock, iflags); + kfree(sfp); return ERR_PTR(-ENODEV); } list_add_tail(&sfp->sfd_siblings, &sdp->sfds); diff --git a/drivers/scsi/ufs/ufs-debugfs.c b/drivers/scsi/ufs/ufs-debugfs.c index 415d4495656aed8877ca4300d7a5eed8fac5073f..c44a0a141a41bd3f39cf2015c08a2e465b7e7e00 100644 --- a/drivers/scsi/ufs/ufs-debugfs.c +++ b/drivers/scsi/ufs/ufs-debugfs.c @@ -1230,7 +1230,8 @@ static int ufsdbg_config_pwr_mode(struct ufs_hba *hba, * hibern8 manually, this is to avoid auto hibern8 * racing during clock frequency scaling sequence. */ - if (ufshcd_is_auto_hibern8_supported(hba)) { + if (ufshcd_is_auto_hibern8_supported(hba) && + hba->hibern8_on_idle.is_enabled) { ret = ufshcd_uic_hibern8_enter(hba); if (ret) goto out; @@ -1240,7 +1241,8 @@ static int ufsdbg_config_pwr_mode(struct ufs_hba *hba, if (ret) goto out; - if (ufshcd_is_auto_hibern8_supported(hba)) + if (ufshcd_is_auto_hibern8_supported(hba) && + hba->hibern8_on_idle.is_enabled) ret = ufshcd_uic_hibern8_exit(hba); if (scale_up) { diff --git a/drivers/scsi/ufs/ufs-qcom.c b/drivers/scsi/ufs/ufs-qcom.c index a7430a46da4523e7f4281176194eb7241299213a..48676f7693e016ca719ecf53dd9fb708a6a2e559 100644 --- a/drivers/scsi/ufs/ufs-qcom.c +++ b/drivers/scsi/ufs/ufs-qcom.c @@ -1621,7 +1621,8 @@ static int ufs_qcom_setup_clocks(struct ufs_hba *hba, bool on, * If auto hibern8 is supported then the link will already * be in hibern8 state and the ref clock can be gated. */ - if (ufshcd_is_auto_hibern8_supported(hba) || + if ((ufshcd_is_auto_hibern8_supported(hba) && + hba->hibern8_on_idle.is_enabled) || !ufs_qcom_is_link_active(hba)) { /* disable device ref_clk */ ufs_qcom_dev_ref_clk_ctrl(host, false); @@ -2100,6 +2101,7 @@ static int ufs_qcom_parse_reg_info(struct ufs_qcom_host *host, char *name, dev_dbg(dev, "%s: unable to find %s err %d, using default\n", __func__, prop_name, ret); vreg->min_uV = VDDP_REF_CLK_MIN_UV; + ret = 0; } snprintf(prop_name, MAX_PROP_SIZE, "%s-max-uV", name); @@ -2108,6 +2110,7 @@ static int ufs_qcom_parse_reg_info(struct ufs_qcom_host *host, char *name, dev_dbg(dev, "%s: unable to find %s err %d, using default\n", __func__, prop_name, ret); vreg->max_uV = VDDP_REF_CLK_MAX_UV; + ret = 0; } out: diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c index fd5be8522774a312db57647a6e28924a4891e90d..603829048be5abf67a1bd9d11e7d6ecef6fc7e19 100644 --- a/drivers/scsi/ufs/ufshcd.c +++ b/drivers/scsi/ufs/ufshcd.c @@ -445,7 +445,7 @@ static int ufshcd_probe_hba(struct ufs_hba *hba); static int ufshcd_enable_clocks(struct ufs_hba *hba); static int ufshcd_disable_clocks(struct ufs_hba *hba, bool is_gating_context); -static int ufshcd_disable_clocks_skip_ref_clk(struct ufs_hba *hba, +static int ufshcd_disable_clocks_keep_link_active(struct ufs_hba *hba, bool is_gating_context); static void ufshcd_hold_all(struct ufs_hba *hba); static void ufshcd_release_all(struct ufs_hba *hba); @@ -1791,7 +1791,8 @@ static int ufshcd_devfreq_scale(struct ufs_hba *hba, bool scale_up) * hibern8 manually, this is to avoid auto hibern8 * racing during clock frequency scaling sequence. */ - if (ufshcd_is_auto_hibern8_supported(hba)) { + if (ufshcd_is_auto_hibern8_supported(hba) && + hba->hibern8_on_idle.is_enabled) { ret = ufshcd_uic_hibern8_enter(hba); if (ret) /* link will be bad state so no need to scale_up_gear */ @@ -1804,7 +1805,8 @@ static int ufshcd_devfreq_scale(struct ufs_hba *hba, bool scale_up) goto scale_up_gear; ufshcd_custom_cmd_log(hba, "Clk-freq-switched"); - if (ufshcd_is_auto_hibern8_supported(hba)) { + if (ufshcd_is_auto_hibern8_supported(hba) && + hba->hibern8_on_idle.is_enabled) { ret = ufshcd_uic_hibern8_exit(hba); if (ret) /* link will be bad state so no need to scale_up_gear */ @@ -2267,15 +2269,16 @@ static void ufshcd_gate_work(struct work_struct *work) } /* - * If auto hibern8 is supported then the link will already + * If auto hibern8 is supported and enabled then the link will already * be in hibern8 state and the ref clock can be gated. */ - if ((ufshcd_is_auto_hibern8_supported(hba) || + if ((((ufshcd_is_auto_hibern8_supported(hba) && + hba->hibern8_on_idle.is_enabled)) || !ufshcd_is_link_active(hba)) && !hba->no_ref_clk_gating) ufshcd_disable_clocks(hba, true); else /* If link is active, device ref_clk can't be switched off */ - ufshcd_disable_clocks_skip_ref_clk(hba, true); + ufshcd_disable_clocks_keep_link_active(hba, true); /* Put the host controller in low power mode if possible */ ufshcd_hba_vreg_set_lpm(hba); @@ -2757,6 +2760,7 @@ static void __ufshcd_set_auto_hibern8_timer(struct ufs_hba *hba, /* wait for all the outstanding requests to finish */ ufshcd_wait_for_doorbell_clr(hba, U64_MAX); ufshcd_set_auto_hibern8_timer(hba, delay_ms); + hba->hibern8_on_idle.is_enabled = !!delay_ms; up_write(&hba->lock); ufshcd_scsi_unblock_requests(hba); ufshcd_release_all(hba); @@ -2871,7 +2875,7 @@ static ssize_t ufshcd_hibern8_on_idle_enable_store(struct device *dev, if (ufshcd_is_auto_hibern8_supported(hba)) { __ufshcd_set_auto_hibern8_timer(hba, value ? hba->hibern8_on_idle.delay_ms : value); - goto update; + goto out; } if (value) { @@ -2887,7 +2891,6 @@ static ssize_t ufshcd_hibern8_on_idle_enable_store(struct device *dev, spin_unlock_irqrestore(hba->host->host_lock, flags); } -update: hba->hibern8_on_idle.is_enabled = value; out: return count; @@ -6282,7 +6285,8 @@ static irqreturn_t ufshcd_uic_cmd_compl(struct ufs_hba *hba, u32 intr_status) if (hba->uic_async_done) { complete(hba->uic_async_done); retval = IRQ_HANDLED; - } else if (ufshcd_is_auto_hibern8_supported(hba)) { + } else if (ufshcd_is_auto_hibern8_supported(hba) && + hba->hibern8_on_idle.is_enabled) { /* * If uic_async_done flag is not set then this * is an Auto hibern8 err interrupt. @@ -6952,7 +6956,8 @@ static void ufshcd_err_handler(struct work_struct *work) * process of gating when the err handler runs. */ if (unlikely((hba->clk_gating.state != CLKS_ON) && - ufshcd_is_auto_hibern8_supported(hba))) { + ufshcd_is_auto_hibern8_supported(hba) && + hba->hibern8_on_idle.is_enabled)) { spin_unlock_irqrestore(hba->host->host_lock, flags); hba->ufs_stats.clk_hold.ctx = ERR_HNDLR_WORK; ufshcd_hold(hba, false); @@ -8708,7 +8713,8 @@ static int ufshcd_probe_hba(struct ufs_hba *hba) * Enable auto hibern8 if supported, after full host and * device initialization. */ - if (ufshcd_is_auto_hibern8_supported(hba)) + if (ufshcd_is_auto_hibern8_supported(hba) && + hba->hibern8_on_idle.is_enabled) ufshcd_set_auto_hibern8_timer(hba, hba->hibern8_on_idle.delay_ms); out: @@ -9422,7 +9428,7 @@ static int ufshcd_set_vccq_rail_unused(struct ufs_hba *hba, bool unused) } static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on, - bool skip_ref_clk, bool is_gating_context) + bool keep_link_active, bool is_gating_context) { int ret = 0; struct ufs_clk_info *clki; @@ -9454,7 +9460,13 @@ static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on, list_for_each_entry(clki, head, list) { if (!IS_ERR_OR_NULL(clki->clk)) { - if (skip_ref_clk && !strcmp(clki->name, "ref_clk")) + /* + * To keep link active, both device ref clock and unipro + * clock should be kept ON. + */ + if (keep_link_active && + (!strcmp(clki->name, "ref_clk") || + !strcmp(clki->name, "core_clk_unipro"))) continue; clk_state_changed = on ^ clki->enabled; @@ -9529,7 +9541,7 @@ static int ufshcd_disable_clocks(struct ufs_hba *hba, return ufshcd_setup_clocks(hba, false, false, is_gating_context); } -static int ufshcd_disable_clocks_skip_ref_clk(struct ufs_hba *hba, +static int ufshcd_disable_clocks_keep_link_active(struct ufs_hba *hba, bool is_gating_context) { return ufshcd_setup_clocks(hba, false, true, is_gating_context); @@ -10012,8 +10024,11 @@ static int ufshcd_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op) if (!ufshcd_is_link_active(hba)) ret = ufshcd_disable_clocks(hba, false); else - /* If link is active, device ref_clk can't be switched off */ - ret = ufshcd_disable_clocks_skip_ref_clk(hba, false); + /* + * If link is active, device ref_clk and unipro clock can't be + * switched off. + */ + ret = ufshcd_disable_clocks_keep_link_active(hba, false); if (ret) goto set_link_active; @@ -10614,8 +10629,6 @@ void ufshcd_remove(struct ufs_hba *hba) devfreq_remove_device(hba->devfreq); } - ufshcd_exit_latency_hist(hba); - ufshcd_hba_exit(hba); ufsdbg_remove_debugfs(hba); } diff --git a/drivers/soc/imx/gpcv2.c b/drivers/soc/imx/gpcv2.c index afc7ecc3c1876158d33e45933458fc035593c39f..f4e3bd40c72e60c0448c98456f7b53f6be7936bd 100644 --- a/drivers/soc/imx/gpcv2.c +++ b/drivers/soc/imx/gpcv2.c @@ -155,7 +155,7 @@ static int imx7_gpc_pu_pgc_sw_pdn_req(struct generic_pm_domain *genpd) return imx7_gpc_pu_pgc_sw_pxx_req(genpd, false); } -static struct imx7_pgc_domain imx7_pgc_domains[] = { +static const struct imx7_pgc_domain imx7_pgc_domains[] = { [IMX7_POWER_DOMAIN_MIPI_PHY] = { .genpd = { .name = "mipi-phy", @@ -321,11 +321,6 @@ static int imx_gpcv2_probe(struct platform_device *pdev) continue; } - domain = &imx7_pgc_domains[domain_index]; - domain->regmap = regmap; - domain->genpd.power_on = imx7_gpc_pu_pgc_sw_pup_req; - domain->genpd.power_off = imx7_gpc_pu_pgc_sw_pdn_req; - pd_pdev = platform_device_alloc("imx7-pgc-domain", domain_index); if (!pd_pdev) { @@ -334,7 +329,20 @@ static int imx_gpcv2_probe(struct platform_device *pdev) return -ENOMEM; } - pd_pdev->dev.platform_data = domain; + ret = platform_device_add_data(pd_pdev, + &imx7_pgc_domains[domain_index], + sizeof(imx7_pgc_domains[domain_index])); + if (ret) { + platform_device_put(pd_pdev); + of_node_put(np); + return ret; + } + + domain = pd_pdev->dev.platform_data; + domain->regmap = regmap; + domain->genpd.power_on = imx7_gpc_pu_pgc_sw_pup_req; + domain->genpd.power_off = imx7_gpc_pu_pgc_sw_pdn_req; + pd_pdev->dev.parent = dev; pd_pdev->dev.of_node = np; diff --git a/drivers/soc/qcom/Makefile b/drivers/soc/qcom/Makefile index 8534f1d036a1dfd6efcf3f3ec8262c900efcf3ba..05febcc529e757a7e1df4efdaff13252a4469fbd 100644 --- a/drivers/soc/qcom/Makefile +++ b/drivers/soc/qcom/Makefile @@ -48,8 +48,8 @@ obj-$(CONFIG_MSM_SERVICE_LOCATOR) += service-locator.o obj-$(CONFIG_MSM_SYSMON_GLINK_COMM) += sysmon-glink.o obj-$(CONFIG_MSM_SYSMON_QMI_COMM) += sysmon-qmi.o obj-$(CONFIG_MEM_SHARE_QMI_SERVICE) += memshare/ -obj-$(CONFIG_MSM_PIL_SSR_GENERIC) += subsys-pil-tz.o obj-$(CONFIG_MSM_PIL) += peripheral-loader.o +obj-$(CONFIG_MSM_PIL_SSR_GENERIC) += subsys-pil-tz.o obj-$(CONFIG_QCOM_RUN_QUEUE_STATS) += rq_stats.o obj-$(CONFIG_MSM_SPCOM) += spcom.o obj-$(CONFIG_QCOM_BUS_SCALING) += msm_bus/ diff --git a/drivers/soc/qcom/cx_ipeak.c b/drivers/soc/qcom/cx_ipeak.c index a74806446445af0b6a7d54c70013c81f29280a4d..cff789449a8eddc9d84ac8081d5efde171715df2 100644 --- a/drivers/soc/qcom/cx_ipeak.c +++ b/drivers/soc/qcom/cx_ipeak.c @@ -182,6 +182,7 @@ static int cx_ipeak_probe(struct platform_device *pdev) static const struct of_device_id cx_ipeak_match_table[] = { { .compatible = "qcom,cx-ipeak-sdm660"}, + { .compatible = "qcom,cx-ipeak-sm6150"}, {} }; diff --git a/drivers/soc/qcom/dcc_v2.c b/drivers/soc/qcom/dcc_v2.c index 397134fa1ad27c5bb81a1bee581bcb0d1203f56f..dab747433a713f2700c13f859f5d0833e9b8fac8 100644 --- a/drivers/soc/qcom/dcc_v2.c +++ b/drivers/soc/qcom/dcc_v2.c @@ -37,8 +37,6 @@ #define dcc_readl(drvdata, off) \ __raw_readl(drvdata->base + off) -#define dcc_sram_writel(drvdata, val, off) \ - __raw_writel((val), drvdata->ram_base + off) #define dcc_sram_readl(drvdata, off) \ __raw_readl(drvdata->ram_base + off) @@ -157,6 +155,17 @@ struct dcc_drvdata { uint8_t cti_trig; }; +static int dcc_sram_writel(struct dcc_drvdata *drvdata, + uint32_t val, uint32_t off) +{ + if (unlikely(off > (drvdata->ram_size - 4))) + return -EINVAL; + + __raw_writel((val), drvdata->ram_base + off); + + return 0; +} + static bool dcc_ready(struct dcc_drvdata *drvdata) { uint32_t val; @@ -287,7 +296,10 @@ static int __dcc_ll_cfg(struct dcc_drvdata *drvdata, int curr_list) * processing the list */ link |= ((0x1 << 8) & BM(8, 14)); - dcc_sram_writel(drvdata, link, sram_offset); + ret = dcc_sram_writel(drvdata, + link, sram_offset); + if (ret) + goto overstep; sram_offset += 4; /* Reset link and prev_off */ addr = 0x00; @@ -297,13 +309,21 @@ static int __dcc_ll_cfg(struct dcc_drvdata *drvdata, int curr_list) } addr = DCC_RD_MOD_WR_DESCRIPTOR; - dcc_sram_writel(drvdata, addr, sram_offset); + ret = dcc_sram_writel(drvdata, addr, sram_offset); + if (ret) + goto overstep; sram_offset += 4; - dcc_sram_writel(drvdata, entry->mask, sram_offset); + ret = dcc_sram_writel(drvdata, + entry->mask, sram_offset); + if (ret) + goto overstep; sram_offset += 4; - dcc_sram_writel(drvdata, entry->write_val, sram_offset); + ret = dcc_sram_writel(drvdata, + entry->write_val, sram_offset); + if (ret) + goto overstep; sram_offset += 4; addr = 0; break; @@ -313,7 +333,10 @@ static int __dcc_ll_cfg(struct dcc_drvdata *drvdata, int curr_list) { /* Check if we need to write link of prev entry */ if (link) { - dcc_sram_writel(drvdata, link, sram_offset); + ret = dcc_sram_writel(drvdata, + link, sram_offset); + if (ret) + goto overstep; sram_offset += 4; } @@ -323,7 +346,10 @@ static int __dcc_ll_cfg(struct dcc_drvdata *drvdata, int curr_list) loop |= DCC_LOOP_DESCRIPTOR; total_len += (total_len - loop_len) * loop_cnt; - dcc_sram_writel(drvdata, loop, sram_offset); + ret = dcc_sram_writel(drvdata, + loop, sram_offset); + if (ret) + goto overstep; sram_offset += 4; loop_start = false; @@ -352,7 +378,10 @@ static int __dcc_ll_cfg(struct dcc_drvdata *drvdata, int curr_list) * processing the list */ link |= ((0x1 << 8) & BM(8, 14)); - dcc_sram_writel(drvdata, link, sram_offset); + ret = dcc_sram_writel(drvdata, + link, sram_offset); + if (ret) + goto overstep; sram_offset += 4; /* Reset link and prev_off */ addr = 0x00; @@ -375,13 +404,20 @@ static int __dcc_ll_cfg(struct dcc_drvdata *drvdata, int curr_list) addr |= DCC_ADDR_DESCRIPTOR | DCC_WRITE_IND | DCC_AHB_IND; - dcc_sram_writel(drvdata, addr, sram_offset); + ret = dcc_sram_writel(drvdata, addr, sram_offset); + if (ret) + goto overstep; sram_offset += 4; - dcc_sram_writel(drvdata, link, sram_offset); + ret = dcc_sram_writel(drvdata, link, sram_offset); + if (ret) + goto overstep; sram_offset += 4; - dcc_sram_writel(drvdata, entry->write_val, sram_offset); + ret = dcc_sram_writel(drvdata, + entry->write_val, sram_offset); + if (ret) + goto overstep; sram_offset += 4; addr = 0x00; link = 0; @@ -405,8 +441,10 @@ static int __dcc_ll_cfg(struct dcc_drvdata *drvdata, int curr_list) if (!prev_addr || prev_addr != addr || prev_off > off) { /* Check if we need to write prev link entry */ if (link) { - dcc_sram_writel(drvdata, + ret = dcc_sram_writel(drvdata, link, sram_offset); + if (ret) + goto overstep; sram_offset += 4; } dev_dbg(drvdata->dev, @@ -414,7 +452,10 @@ static int __dcc_ll_cfg(struct dcc_drvdata *drvdata, int curr_list) sram_offset); /* Write address */ - dcc_sram_writel(drvdata, addr, sram_offset); + ret = dcc_sram_writel(drvdata, + addr, sram_offset); + if (ret) + goto overstep; sram_offset += 4; /* Reset link and prev_off */ @@ -457,7 +498,10 @@ static int __dcc_ll_cfg(struct dcc_drvdata *drvdata, int curr_list) link |= DCC_LINK_DESCRIPTOR; if (pos) { - dcc_sram_writel(drvdata, link, sram_offset); + ret = dcc_sram_writel(drvdata, + link, sram_offset); + if (ret) + goto overstep; sram_offset += 4; link = 0; } @@ -469,7 +513,9 @@ static int __dcc_ll_cfg(struct dcc_drvdata *drvdata, int curr_list) } if (link) { - dcc_sram_writel(drvdata, link, sram_offset); + ret = dcc_sram_writel(drvdata, link, sram_offset); + if (ret) + goto overstep; sram_offset += 4; } @@ -485,13 +531,17 @@ static int __dcc_ll_cfg(struct dcc_drvdata *drvdata, int curr_list) addr = (0xC105E) & BM(0, 27); addr |= DCC_ADDR_DESCRIPTOR; - dcc_sram_writel(drvdata, addr, sram_offset); + ret = dcc_sram_writel(drvdata, addr, sram_offset); + if (ret) + goto overstep; sram_offset += 4; } /* Setting zero to indicate end of the list */ link = DCC_LINK_DESCRIPTOR; - dcc_sram_writel(drvdata, link, sram_offset); + ret = dcc_sram_writel(drvdata, link, sram_offset); + if (ret) + goto overstep; sram_offset += 4; /* Update ram_cfg and check if the data will overstep */ diff --git a/drivers/soc/qcom/dfc_qmi.c b/drivers/soc/qcom/dfc_qmi.c index 7a701d971b204ad378041cfcb85a2319c9a9332a..1fcf9609a486332aa99a1de126ab6adfb571b46a 100644 --- a/drivers/soc/qcom/dfc_qmi.c +++ b/drivers/soc/qcom/dfc_qmi.c @@ -16,7 +16,6 @@ #include #include -#include #include "qmi_rmnet_i.h" #define CREATE_TRACE_POINTS #include @@ -61,20 +60,6 @@ struct dfc_qmi_data { int restart_state; }; -struct dfc_svc_ind { - struct work_struct work; - struct dfc_qmi_data *data; - void *dfc_info; -}; - -struct dfc_burst_ind { - struct work_struct work; - struct net_device *dev; - struct qos_info *qos; - struct rmnet_bearer_map *bearer; - struct dfc_qmi_data *data; -}; - static void dfc_svc_init(struct work_struct *work); static void dfc_do_burst_flow_control(struct work_struct *work); @@ -257,6 +242,12 @@ struct dfc_flow_status_ind_msg_v01 { u8 eod_ack_reqd; }; +struct dfc_svc_ind { + struct work_struct work; + struct dfc_qmi_data *data; + struct dfc_flow_status_ind_msg_v01 dfc_info; +}; + static struct qmi_elem_info dfc_bind_client_req_msg_v01_ei[] = { { .data_type = QMI_OPT_FLAG, @@ -596,7 +587,7 @@ static int dfc_bearer_flow_ctl(struct net_device *dev, itm = list_entry(p, struct rmnet_flow_map, list); if (itm->bearer_id == bearer->bearer_id) { - qlen = tc_qdisc_flow_control(dev, itm->tcm_handle, + qlen = qmi_rmnet_flow_control(dev, itm->tcm_handle, enable); trace_dfc_qmi_tc(itm->bearer_id, itm->flow_id, bearer->grant_size, qlen, @@ -618,10 +609,9 @@ static int dfc_all_bearer_flow_ctl(struct net_device *dev, struct dfc_flow_status_info_type_v01 *fc_info) { struct list_head *p; - struct rmnet_flow_map *flow_itm; - struct rmnet_bearer_map *bearer_itm; + struct rmnet_bearer_map *bearer_itm = NULL; int enable; - int rc = 0, len; + int rc = 0; list_for_each(p, &qos->bearer_head) { bearer_itm = list_entry(p, struct rmnet_bearer_map, list); @@ -635,15 +625,12 @@ static int dfc_all_bearer_flow_ctl(struct net_device *dev, enable = fc_info->num_bytes > 0 ? 1 : 0; - list_for_each(p, &qos->flow_head) { - flow_itm = list_entry(p, struct rmnet_flow_map, list); + if (enable) + netif_tx_wake_all_queues(dev); + else + netif_tx_stop_all_queues(dev); - len = tc_qdisc_flow_control(dev, flow_itm->tcm_handle, enable); - trace_dfc_qmi_tc(flow_itm->bearer_id, flow_itm->flow_id, - fc_info->num_bytes, len, - flow_itm->tcm_handle, enable); - rc++; - } + trace_dfc_qmi_tc(0xFF, 0, fc_info->num_bytes, 0, 0, enable); if (enable == 0 && ack_req) dfc_send_ack(dev, fc_info->bearer_id, @@ -686,33 +673,19 @@ static void dfc_do_burst_flow_control(struct work_struct *work) { struct dfc_svc_ind *svc_ind = (struct dfc_svc_ind *)work; struct dfc_flow_status_ind_msg_v01 *ind = - (struct dfc_flow_status_ind_msg_v01 *)svc_ind->dfc_info; + (struct dfc_flow_status_ind_msg_v01 *)&svc_ind->dfc_info; struct net_device *dev; struct qos_info *qos; struct dfc_flow_status_info_type_v01 *flow_status; u8 ack_req = ind->eod_ack_reqd_valid ? ind->eod_ack_reqd : 0; - int i, rc; + int i; - if (!svc_ind->data->rmnet_port) { - kfree(ind); + if (unlikely(svc_ind->data->restart_state)) { kfree(svc_ind); return; } - local_bh_disable(); - while (!rtnl_trylock()) { - if (!svc_ind->data->restart_state) { - cond_resched_softirq(); - } else { - kfree(ind); - kfree(svc_ind); - local_bh_enable(); - return; - } - } - - if (unlikely(svc_ind->data->restart_state)) - goto clean_out; + rcu_read_lock(); for (i = 0; i < ind->flow_status_len; i++) { flow_status = &ind->flow_status[i]; @@ -731,74 +704,20 @@ static void dfc_do_burst_flow_control(struct work_struct *work) if (!qos) continue; + spin_lock_bh(&qos->qos_lock); + if (unlikely(flow_status->bearer_id == 0xFF)) - rc = dfc_all_bearer_flow_ctl( + dfc_all_bearer_flow_ctl( dev, qos, ack_req, flow_status); else - rc = dfc_update_fc_map(dev, qos, ack_req, flow_status); + dfc_update_fc_map(dev, qos, ack_req, flow_status); + + spin_unlock_bh(&qos->qos_lock); } clean_out: - kfree(ind); + rcu_read_unlock(); kfree(svc_ind); - rtnl_unlock(); - local_bh_enable(); -} - -static void dfc_bearer_limit_work(struct work_struct *work) -{ - struct dfc_burst_ind *dfc_ind = (struct dfc_burst_ind *)work; - struct rmnet_flow_map *itm; - struct list_head *p; - int qlen, fc; - - local_bh_disable(); - - /* enable transmit on device so that the other - * flows which transmit proceed normally. - * do it here under bh disabled so that the TX softirq - * may not run here - */ - netif_start_queue(dfc_ind->dev); - - while (!rtnl_trylock()) { - if (!dfc_ind->data->restart_state) { - cond_resched_softirq(); - } else { - kfree(dfc_ind); - local_bh_enable(); - return; - } - } - - fc = dfc_ind->bearer->grant_size ? 1 : 0; - /* if grant size is non zero here, we must have already - * got an updated grant. do nothing in that case - */ - if (fc) - goto done; - - list_for_each(p, &dfc_ind->qos->flow_head) { - itm = list_entry(p, struct rmnet_flow_map, list); - - if (itm->bearer_id == dfc_ind->bearer->bearer_id) { - qlen = tc_qdisc_flow_control(dfc_ind->dev, - itm->tcm_handle, fc); - trace_dfc_qmi_tc_limit(itm->bearer_id, itm->flow_id, - dfc_ind->bearer->grant_size, - qlen, itm->tcm_handle, fc); - } - } - - if (dfc_ind->bearer->ack_req) - dfc_send_ack(dfc_ind->dev, dfc_ind->bearer->bearer_id, - dfc_ind->bearer->seq, dfc_ind->qos->mux_id, - DFC_ACK_TYPE_DISABLE); - -done: - kfree(dfc_ind); - rtnl_unlock(); - local_bh_enable(); } static void dfc_clnt_ind_cb(struct qmi_handle *qmi, struct sockaddr_qrtr *sq, @@ -809,9 +728,6 @@ static void dfc_clnt_ind_cb(struct qmi_handle *qmi, struct sockaddr_qrtr *sq, struct dfc_flow_status_ind_msg_v01 *ind_msg; struct dfc_svc_ind *svc_ind; - if (!dfc->rmnet_port) - return; - if (qmi != &dfc->handle) return; @@ -829,14 +745,10 @@ static void dfc_clnt_ind_cb(struct qmi_handle *qmi, struct sockaddr_qrtr *sq, INIT_WORK((struct work_struct *)svc_ind, dfc_do_burst_flow_control); - svc_ind->dfc_info = kzalloc(sizeof(*ind_msg), GFP_ATOMIC); - if (!svc_ind->dfc_info) { - kfree(svc_ind); - return; - } - memcpy(svc_ind->dfc_info, ind_msg, sizeof(*ind_msg)); + memcpy(&svc_ind->dfc_info, ind_msg, sizeof(*ind_msg)); svc_ind->data = dfc; + queue_work(dfc->dfc_wq, (struct work_struct *)svc_ind); } } @@ -921,7 +833,7 @@ int dfc_qmi_client_init(void *port, int index, struct qmi_info *qmi) data->index = index; data->restart_state = 0; - data->dfc_wq = alloc_workqueue("dfc_wq", WQ_HIGHPRI, 1); + data->dfc_wq = create_singlethread_workqueue("dfc_wq"); if (!data->dfc_wq) { pr_err("%s Could not create workqueue\n", __func__); goto err0; @@ -974,35 +886,32 @@ void dfc_qmi_client_exit(void *dfc_data) } void dfc_qmi_burst_check(struct net_device *dev, struct qos_info *qos, - struct sk_buff *skb, struct qmi_info *qmi) + int ip_type, u32 mark, unsigned int len) { struct rmnet_bearer_map *bearer; - struct dfc_burst_ind *dfc_ind; struct rmnet_flow_map *itm; - struct dfc_qmi_data *data; - int ip_type; u32 start_grant; - ip_type = (ip_hdr(skb)->version == IP_VER_6) ? AF_INET6 : AF_INET; + spin_lock(&qos->qos_lock); - itm = qmi_rmnet_get_flow_map(qos, skb->mark, ip_type); + itm = qmi_rmnet_get_flow_map(qos, mark, ip_type); if (unlikely(!itm)) - return; + goto out; bearer = qmi_rmnet_get_bearer_map(qos, itm->bearer_id); if (unlikely(!bearer)) - return; + goto out; - trace_dfc_flow_check(bearer->bearer_id, skb->len, bearer->grant_size); + trace_dfc_flow_check(bearer->bearer_id, len, bearer->grant_size); if (!bearer->grant_size) - return; + goto out; start_grant = bearer->grant_size; - if (skb->len >= bearer->grant_size) + if (len >= bearer->grant_size) bearer->grant_size = 0; else - bearer->grant_size -= skb->len; + bearer->grant_size -= len; if (start_grant > bearer->grant_thresh && bearer->grant_size <= bearer->grant_thresh) { @@ -1011,27 +920,9 @@ void dfc_qmi_burst_check(struct net_device *dev, struct qos_info *qos, DFC_ACK_TYPE_THRESHOLD); } - if (bearer->grant_size) - return; - - data = (struct dfc_qmi_data *)qmi_rmnet_has_dfc_client(qmi); - if (!data) - return; - - dfc_ind = kzalloc(sizeof(*dfc_ind), GFP_ATOMIC); - if (!dfc_ind) - return; - - INIT_WORK((struct work_struct *)dfc_ind, dfc_bearer_limit_work); - - dfc_ind->dev = dev; - dfc_ind->qos = qos; - dfc_ind->bearer = bearer; - dfc_ind->data = data; + if (!bearer->grant_size) + dfc_bearer_flow_ctl(dev, bearer, qos); - /* stop the flow in hope that the worker thread is - * immediately scheduled beyond this point of time - */ - netif_stop_queue(dev); - queue_work(data->dfc_wq, (struct work_struct *)dfc_ind); +out: + spin_unlock(&qos->qos_lock); } diff --git a/drivers/soc/qcom/fsa4480-i2c.c b/drivers/soc/qcom/fsa4480-i2c.c index ad4b233b2054215bac54673e4fe730c43802e201..c415881a25193720d3c62a5439acf40fa673f5f1 100644 --- a/drivers/soc/qcom/fsa4480-i2c.c +++ b/drivers/soc/qcom/fsa4480-i2c.c @@ -194,6 +194,7 @@ int fsa4480_unreg_notifier(struct notifier_block *nb, if (!fsa_priv) return -EINVAL; + atomic_set(&(fsa_priv->usbc_mode), 0); fsa4480_usbc_update_settings(fsa_priv, 0x18, 0x98); return blocking_notifier_chain_unregister (&fsa_priv->fsa4480_notifier, nb); diff --git a/drivers/soc/qcom/glink_probe.c b/drivers/soc/qcom/glink_probe.c index 89655eda90d04f1055eaf40a4fef6e54c9bba118..e846eff8ca720f3670c087fd8f3dc37e8722c886 100644 --- a/drivers/soc/qcom/glink_probe.c +++ b/drivers/soc/qcom/glink_probe.c @@ -118,9 +118,11 @@ static int glink_ssr_ssr_cb(struct notifier_block *this, ssr->seq_num); ret = rpmsg_send(ssr->ept, &msg, sizeof(msg)); - if (ret) + if (ret) { GLINK_ERR(dev, "fail to send do cleanup to %s %d\n", nb->ssr_label, ret); + return NOTIFY_DONE; + } ret = wait_for_completion_timeout(&ssr->completion, HZ); if (!ret) diff --git a/drivers/soc/qcom/icnss.c b/drivers/soc/qcom/icnss.c index 4749ffe6c9cecb736d8dc15e25eb8d3c2a0eac5b..5c354fdb0df1b3b65b5cbd0f942eef250a37d29a 100644 --- a/drivers/soc/qcom/icnss.c +++ b/drivers/soc/qcom/icnss.c @@ -800,6 +800,7 @@ static int icnss_driver_event_server_arrive(void *data) err_setup_msa: icnss_assign_msa_perm_all(penv, ICNSS_MSA_PERM_HLOS_ALL); + clear_bit(ICNSS_MSA0_ASSIGNED, &penv->state); err_power_on: icnss_hw_power_off(penv); clear_server: diff --git a/drivers/soc/qcom/llcc-sm8150.c b/drivers/soc/qcom/llcc-sm8150.c index 5ad72b7fc92d1ae7ded1ed7264b6bab461ff35fc..63e98ceb3489ede33f0606fd334e5790e5a00787 100644 --- a/drivers/soc/qcom/llcc-sm8150.c +++ b/drivers/soc/qcom/llcc-sm8150.c @@ -60,15 +60,15 @@ static struct llcc_slice_config sm8150_data[] = { SCT_ENTRY("cpuss", 1, 1, 3072, 1, 1, 0xFFF, 0x0, 0, 0, 0, 1, 1), SCT_ENTRY("vidsc0", 2, 2, 512, 2, 1, 0xFFF, 0x0, 0, 0, 0, 1, 0), SCT_ENTRY("vidsc1", 3, 3, 512, 2, 1, 0xFFF, 0x0, 0, 0, 0, 1, 0), - SCT_ENTRY("voice", 5, 5, 3072, 1, 1, 0xFFF, 0x0, 0, 0, 0, 1, 0), - SCT_ENTRY("audio", 6, 6, 3072, 1, 1, 0xFFF, 0x0, 0, 0, 0, 1, 0), - SCT_ENTRY("modemhp_grow", 7, 7, 1024, 2, 0, 0xFFF, 0x0, 0, 0, 0, 1, 0), + SCT_ENTRY("voice", 5, 5, 1024, 1, 1, 0xFFF, 0x0, 0, 0, 0, 1, 0), + SCT_ENTRY("audio", 6, 6, 1024, 1, 1, 0xFFF, 0x0, 0, 0, 0, 1, 0), + SCT_ENTRY("modemhp_grow", 7, 7, 3072, 1, 0, 0xFF0, 0xF, 0, 0, 0, 1, 0), SCT_ENTRY("modem", 8, 8, 3072, 1, 1, 0xFFF, 0x0, 0, 0, 0, 1, 0), SCT_ENTRY("modemhw", 9, 9, 1024, 1, 0, 0xFFF, 0x0, 0, 0, 0, 1, 0), SCT_ENTRY("compute", 10, 10, 3072, 1, 1, 0xFFF, 0x0, 0, 0, 0, 1, 0), SCT_ENTRY("gpuhtw", 11, 11, 512, 1, 1, 0xFFF, 0x0, 0, 0, 0, 1, 0), SCT_ENTRY("gpu", 12, 12, 2560, 1, 1, 0xFFF, 0x0, 0, 0, 0, 1, 0), - SCT_ENTRY("mmuhwt", 13, 13, 3072, 1, 1, 0xFFF, 0x0, 0, 0, 0, 0, 1), + SCT_ENTRY("mmuhwt", 13, 13, 1024, 1, 1, 0xFFF, 0x0, 0, 0, 0, 0, 1), SCT_ENTRY("compute_dma", 15, 15, 3072, 1, 1, 0xFFF, 0x0, 0, 0, 0, 1, 0), SCT_ENTRY("display", 16, 16, 3072, 1, 1, 0xFFF, 0x0, 0, 0, 0, 1, 0), SCT_ENTRY("modemhp_fix", 20, 20, 1024, 2, 1, 0xFFF, 0x0, 0, 0, 0, 1, 0), @@ -76,7 +76,9 @@ static struct llcc_slice_config sm8150_data[] = { SCT_ENTRY("audiohw", 22, 22, 1024, 1, 1, 0xFFF, 0x0, 0, 0, 0, 1, 0), SCT_ENTRY("npu", 23, 23, 3072, 1, 1, 0xFFF, 0x0, 0, 0, 0, 1, 0), SCT_ENTRY("wlan_hw", 24, 24, 3072, 1, 1, 0xFFF, 0x0, 0, 0, 0, 1, 0), - SCT_ENTRY("pimem", 25, 25, 1024, 1, 1, 0xFFF, 0x0, 0, 0, 0, 1, 0), + SCT_ENTRY("modem_vpe", 29, 29, 256, 1, 1, 0xF, 0x0, 0, 0, 0, 1, 0), + SCT_ENTRY("ap_tcm", 30, 30, 128, 3, 0, 0x0, 0x3, 1, 0, 0, 1, 0), + SCT_ENTRY("write_cache", 31, 31, 128, 1, 1, 0x3, 0x0, 0, 0, 0, 0, 0), }; static int sm8150_qcom_llcc_probe(struct platform_device *pdev) diff --git a/drivers/soc/qcom/peripheral-loader.c b/drivers/soc/qcom/peripheral-loader.c index aecfb18f1ca83b9e3a65a8458aad6e1fc548036c..672e4eeb560868a21f1125722725cf7c0a09c4cc 100644 --- a/drivers/soc/qcom/peripheral-loader.c +++ b/drivers/soc/qcom/peripheral-loader.c @@ -73,6 +73,9 @@ static int proxy_timeout_ms = -1; module_param(proxy_timeout_ms, int, 0644); static bool disable_timeouts; + +static struct workqueue_struct *pil_wq; + /** * struct pil_mdt - Representation of .mdt file in memory * @hdr: ELF32 header @@ -803,8 +806,11 @@ static int pil_load_seg(struct pil_desc *desc, struct pil_seg *seg) if (fw->size != seg->filesz) { pil_err(desc, "Blob size %u doesn't match %lu\n", ret, seg->filesz); + release_firmware(fw); return -EPERM; } + + release_firmware(fw); } /* Zero out trailing memory */ @@ -893,20 +899,19 @@ struct pil_seg_data { int seg_id; struct pil_desc *desc; struct pil_seg *seg; - struct completion load_done; + struct work_struct load_seg_work; int retval; }; -static int pil_load_seg_thread_fn(void *data) +static void pil_load_seg_work_fn(struct work_struct *work) { - struct pil_seg_data *pil_seg_data = data; + struct pil_seg_data *pil_seg_data = container_of(work, + struct pil_seg_data, + load_seg_work); struct pil_desc *desc = pil_seg_data->desc; struct pil_seg *seg = pil_seg_data->seg; pil_seg_data->retval = pil_load_seg(desc, seg); - complete(&pil_seg_data->load_done); - - return 0; } static int pil_load_segs(struct pil_desc *desc) @@ -915,7 +920,6 @@ static int pil_load_segs(struct pil_desc *desc) struct pil_priv *priv = desc->priv; struct pil_seg_data *pil_seg_data; struct pil_seg *seg; - struct task_struct *task_load_seg; DECLARE_BITMAP(err_map, priv->num_segs); pil_seg_data = kcalloc(priv->num_segs, sizeof(*pil_seg_data), @@ -928,23 +932,10 @@ static int pil_load_segs(struct pil_desc *desc) pil_seg_data[seg_id].seg_id = seg_id; pil_seg_data[seg_id].desc = desc; pil_seg_data[seg_id].seg = seg; - init_completion(&pil_seg_data[seg_id].load_done); - task_load_seg = kthread_run(pil_load_seg_thread_fn, - &pil_seg_data[seg_id], - "%s-%d", desc->name, seg_id); - /* - * For error handling, do not block/kill other threads. Just - * set the error return code for this thread and call its - * completion. Errors can be handled while the threads are being - * collected. - */ - if (IS_ERR(task_load_seg)) { - pil_seg_data[seg_id].retval = PTR_ERR(task_load_seg); - complete(&pil_seg_data[seg_id].load_done); - pil_err(desc, - "Failed to spawn the thread for seg_id: %d, rc: %d\n", - seg_id, pil_seg_data[seg_id].retval); - } + + INIT_WORK(&pil_seg_data[seg_id].load_seg_work, + pil_load_seg_work_fn); + queue_work(pil_wq, &pil_seg_data[seg_id].load_seg_work); seg_id++; } @@ -954,9 +945,7 @@ static int pil_load_segs(struct pil_desc *desc) /* Wait for the parallel loads to finish */ seg_id = 0; list_for_each_entry(seg, &desc->priv->segs, list) { - if (wait_for_completion_interruptible( - &pil_seg_data[seg_id].load_done)) - pil_seg_data[seg_id].retval = -ERESTARTSYS; + flush_work(&pil_seg_data[seg_id].load_seg_work); /* Don't exit if one of the thread fails. Wait for others to * complete. Bitmap the return codes we get from the threads. @@ -990,6 +979,7 @@ int pil_boot(struct pil_desc *desc) { int ret; char fw_name[30]; + struct pil_seg *seg; const struct pil_mdt *mdt; const struct elf32_hdr *ehdr; const struct firmware *fw; @@ -1100,9 +1090,21 @@ int pil_boot(struct pil_desc *desc) trace_pil_event("before_load_seg", desc); - ret = pil_load_segs(desc); - if (ret) - goto err_deinit_image; + /** + * Fallback to serial loading of blobs if the + * workqueue creatation failed during module init. + */ + if (pil_wq) { + ret = pil_load_segs(desc); + if (ret) + goto err_deinit_image; + } else { + list_for_each_entry(seg, &desc->priv->segs, list) { + ret = pil_load_seg(desc, seg); + if (ret) + goto err_deinit_image; + } + } if (desc->subsys_vmid > 0) { trace_pil_event("before_reclaim_mem", desc); @@ -1390,6 +1392,11 @@ static int __init msm_pil_init(void) pr_err("SMEM is not initialized.\n"); return -EPROBE_DEFER; } + + pil_wq = alloc_workqueue("pil_workqueue", WQ_HIGHPRI | WQ_UNBOUND, 0); + if (!pil_wq) + pr_warn("pil: Defaulting to sequential firmware loading.\n"); + out: return register_pm_notifier(&pil_pm_notifier); } @@ -1397,6 +1404,8 @@ subsys_initcall(msm_pil_init); static void __exit msm_pil_exit(void) { + if (pil_wq) + destroy_workqueue(pil_wq); unregister_pm_notifier(&pil_pm_notifier); if (pil_info_base) iounmap(pil_info_base); diff --git a/drivers/soc/qcom/qdss_bridge.c b/drivers/soc/qcom/qdss_bridge.c index 79921d2259d366fb9e32cf2c29607fb680366a58..bb67e8296ed431228b35b8c65922315578c3a39e 100644 --- a/drivers/soc/qcom/qdss_bridge.c +++ b/drivers/soc/qcom/qdss_bridge.c @@ -15,6 +15,8 @@ #include #include +#include +#include #include #include #include @@ -39,6 +41,28 @@ module_param(poolsize, int, 0644); static int itemsize = QDSS_BUF_SIZE; module_param(itemsize, int, 0644); +static struct class *mhi_class; + +static const char * const str_mhi_transfer_mode[] = { + [MHI_TRANSFER_TYPE_USB] = "usb", + [MHI_TRANSFER_TYPE_UCI] = "uci", +}; + +static int qdss_destroy_mhi_buf_tbl(struct qdss_bridge_drvdata *drvdata) +{ + struct list_head *start, *temp; + struct qdss_mhi_buf_tbl_t *entry = NULL; + + list_for_each_safe(start, temp, &drvdata->mhi_buf_tbl) { + entry = list_entry(start, struct qdss_mhi_buf_tbl_t, link); + list_del(&entry->link); + kfree(entry->buf); + kfree(entry); + } + + return 0; +} + static int qdss_destroy_buf_tbl(struct qdss_bridge_drvdata *drvdata) { struct list_head *start, *temp; @@ -67,7 +91,7 @@ static int qdss_create_buf_tbl(struct qdss_bridge_drvdata *drvdata) if (!entry) goto err; - buf = kzalloc(QDSS_BUF_SIZE, GFP_KERNEL); + buf = kzalloc(drvdata->mtu, GFP_KERNEL); usb_req = kzalloc(sizeof(*usb_req), GFP_KERNEL); entry->buf = buf; @@ -129,11 +153,99 @@ static void qdss_buf_tbl_remove(struct qdss_bridge_drvdata *drvdata, static void mhi_ch_close(struct qdss_bridge_drvdata *drvdata) { - flush_workqueue(drvdata->mhi_wq); - qdss_destroy_buf_tbl(drvdata); - mhi_unprepare_from_transfer(drvdata->mhi_dev); + if (drvdata->mode == MHI_TRANSFER_TYPE_USB) { + flush_workqueue(drvdata->mhi_wq); + qdss_destroy_buf_tbl(drvdata); + } else if (drvdata->mode == MHI_TRANSFER_TYPE_UCI) { + qdss_destroy_mhi_buf_tbl(drvdata); + if (drvdata->cur_buf) + kfree(drvdata->cur_buf->buf); + + drvdata->cur_buf = NULL; + } +} + +static ssize_t mhi_show_transfer_mode(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct qdss_bridge_drvdata *drvdata = dev_get_drvdata(dev); + + return scnprintf(buf, PAGE_SIZE, "%s\n", + str_mhi_transfer_mode[drvdata->mode]); +} + +static ssize_t mhi_store_transfer_mode(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t size) +{ + struct qdss_bridge_drvdata *drvdata = dev_get_drvdata(dev); + char str[10] = ""; + int ret; + + if (strlen(buf) >= 10) + return -EINVAL; + if (sscanf(buf, "%3s", str) != 1) + return -EINVAL; + + spin_lock_bh(&drvdata->lock); + if (!strcmp(str, str_mhi_transfer_mode[MHI_TRANSFER_TYPE_UCI])) { + if (drvdata->mode == MHI_TRANSFER_TYPE_USB) { + if (drvdata->opened == ENABLE) { + drvdata->opened = DISABLE; + drvdata->mode = MHI_TRANSFER_TYPE_UCI; + spin_unlock_bh(&drvdata->lock); + usb_qdss_close(drvdata->usb_ch); + mhi_unprepare_from_transfer(drvdata->mhi_dev); + mhi_ch_close(drvdata); + } else if (drvdata->opened == DISABLE) { + drvdata->mode = MHI_TRANSFER_TYPE_UCI; + spin_unlock_bh(&drvdata->lock); + } else { + ret = -ERESTARTSYS; + goto out; + } + } else + spin_unlock_bh(&drvdata->lock); + + } else if (!strcmp(str, str_mhi_transfer_mode[MHI_TRANSFER_TYPE_USB])) { + if (drvdata->mode == MHI_TRANSFER_TYPE_UCI) { + if (drvdata->opened == ENABLE) { + drvdata->opened = DISABLE; + spin_unlock_bh(&drvdata->lock); + wake_up(&drvdata->uci_wq); + mhi_unprepare_from_transfer(drvdata->mhi_dev); + mhi_ch_close(drvdata); + drvdata->mode = MHI_TRANSFER_TYPE_USB; + queue_work(drvdata->mhi_wq, + &drvdata->open_work); + } else if (drvdata->opened == DISABLE) { + drvdata->mode = MHI_TRANSFER_TYPE_USB; + spin_unlock_bh(&drvdata->lock); + queue_work(drvdata->mhi_wq, + &drvdata->open_work); + } else { + ret = -ERESTARTSYS; + goto out; + } + } else + spin_unlock_bh(&drvdata->lock); + + } else { + ret = -EINVAL; + goto out; + } + + ret = size; + return ret; +out: + spin_unlock_bh(&drvdata->lock); + return ret; } +static DEVICE_ATTR(mode, 0644, + mhi_show_transfer_mode, mhi_store_transfer_mode); + + static void mhi_read_work_fn(struct work_struct *work) { int err = 0; @@ -146,14 +258,14 @@ static void mhi_read_work_fn(struct work_struct *work) read_work); do { - if (!drvdata->opened) + if (drvdata->opened != ENABLE) break; entry = qdss_get_entry(drvdata); if (!entry) break; err = mhi_queue_transfer(drvdata->mhi_dev, DMA_FROM_DEVICE, - entry->buf, QDSS_BUF_SIZE, mhi_flags); + entry->buf, drvdata->mtu, mhi_flags); if (err) { pr_err_ratelimited("Unable to read from MHI buffer err:%d", err); @@ -203,7 +315,7 @@ static void mhi_read_done_work_fn(struct work_struct *work) LIST_HEAD(head); do { - if (!(drvdata->opened)) + if (drvdata->opened != ENABLE) break; spin_lock_bh(&drvdata->lock); if (list_empty(&drvdata->read_done_list)) { @@ -227,7 +339,7 @@ static void mhi_read_done_work_fn(struct work_struct *work) * read, discard the buffers here and do not forward * them to the mux layer. */ - if (drvdata->opened) { + if (drvdata->opened == ENABLE) { err = usb_write(drvdata, buf, len); if (err) qdss_buf_tbl_remove(drvdata, buf); @@ -282,18 +394,26 @@ static int mhi_ch_open(struct qdss_bridge_drvdata *drvdata) { int ret; - if (drvdata->opened) + spin_lock_bh(&drvdata->lock); + if (drvdata->opened == ENABLE) return 0; + if (drvdata->opened == SSR) + return -ERESTARTSYS; + drvdata->opened = ENABLE; + spin_unlock_bh(&drvdata->lock); + ret = mhi_prepare_for_transfer(drvdata->mhi_dev); if (ret) { pr_err("Unable to open MHI channel\n"); - return ret; + goto err; } + return 0; +err: spin_lock_bh(&drvdata->lock); - drvdata->opened = 1; + drvdata->opened = DISABLE; spin_unlock_bh(&drvdata->lock); - return 0; + return ret; } static void qdss_bridge_open_work_fn(struct work_struct *work) @@ -320,6 +440,7 @@ static void qdss_bridge_open_work_fn(struct work_struct *work) return; err: + mhi_unprepare_from_transfer(drvdata->mhi_dev); mhi_ch_close(drvdata); err_open: pr_err("Open work failed with err:%d\n", ret); @@ -342,8 +463,10 @@ static void qdss_mhi_read_cb(struct mhi_device *mhi_dev, return; buf = result->buf_addr; - if (drvdata->opened && + spin_lock_bh(&drvdata->lock); + if (drvdata->opened == ENABLE && result->transaction_status != -ENOTCONN) { + spin_unlock_bh(&drvdata->lock); tp = kmalloc(sizeof(*tp), GFP_ATOMIC); if (!tp) return; @@ -352,12 +475,243 @@ static void qdss_mhi_read_cb(struct mhi_device *mhi_dev, spin_lock_bh(&drvdata->lock); list_add_tail(&tp->link, &drvdata->read_done_list); spin_unlock_bh(&drvdata->lock); - queue_work(drvdata->mhi_wq, &drvdata->read_done_work); + if (drvdata->mode == MHI_TRANSFER_TYPE_USB) + queue_work(drvdata->mhi_wq, &drvdata->read_done_work); + else + wake_up(&drvdata->uci_wq); } else { - qdss_buf_tbl_remove(drvdata, buf); + if (drvdata->mode == MHI_TRANSFER_TYPE_USB) { + spin_unlock_bh(&drvdata->lock); + qdss_buf_tbl_remove(drvdata, buf); + } else { + spin_unlock_bh(&drvdata->lock); + return; + } } + } +static int mhi_uci_release(struct inode *inode, struct file *file) +{ + struct qdss_bridge_drvdata *drvdata = file->private_data; + + spin_lock_bh(&drvdata->lock); + if (drvdata->mode == MHI_TRANSFER_TYPE_UCI) { + if (drvdata->opened == ENABLE) { + drvdata->opened = DISABLE; + spin_unlock_bh(&drvdata->lock); + wake_up(&drvdata->uci_wq); + mhi_unprepare_from_transfer(drvdata->mhi_dev); + mhi_ch_close(drvdata); + } else if (drvdata->opened == SSR) { + spin_unlock_bh(&drvdata->lock); + complete(&drvdata->completion); + } else + spin_unlock_bh(&drvdata->lock); + } else + spin_unlock_bh(&drvdata->lock); + + return 0; +} + +static ssize_t mhi_uci_read(struct file *file, + char __user *buf, + size_t count, + loff_t *ppos) +{ + struct qdss_bridge_drvdata *drvdata = file->private_data; + struct mhi_device *mhi_dev = drvdata->mhi_dev; + struct qdss_mhi_buf_tbl_t *uci_buf; + char *ptr; + size_t to_copy; + int ret = 0; + + if (!buf) + return -EINVAL; + + pr_debug("Client provided buf len:%lu\n", count); + + /* confirm channel is active */ + spin_lock_bh(&drvdata->lock); + if (drvdata->opened != ENABLE || + drvdata->mode != MHI_TRANSFER_TYPE_UCI) { + spin_unlock_bh(&drvdata->lock); + return -ERESTARTSYS; + } + + /* No data available to read, wait */ + if (!drvdata->cur_buf && list_empty(&drvdata->read_done_list)) { + spin_unlock_bh(&drvdata->lock); + + pr_debug("No data available to read waiting\n"); + ret = wait_event_interruptible(drvdata->uci_wq, + ((drvdata->opened != ENABLE + || !list_empty(&drvdata->read_done_list)))); + if (ret == -ERESTARTSYS) { + pr_debug("Exit signal caught for node\n"); + return -ERESTARTSYS; + } + + spin_lock_bh(&drvdata->lock); + if (drvdata->opened != ENABLE) { + spin_unlock_bh(&drvdata->lock); + pr_debug("node was disabled or SSR occurred.\n"); + ret = -ERESTARTSYS; + return ret; + } + } + + /* new read, get the next descriptor from the list */ + if (!drvdata->cur_buf) { + uci_buf = list_first_entry_or_null(&drvdata->read_done_list, + struct qdss_mhi_buf_tbl_t, link); + if (unlikely(!uci_buf)) { + ret = -EIO; + goto read_error; + } + + list_del(&uci_buf->link); + drvdata->cur_buf = uci_buf; + drvdata->rx_size = uci_buf->len; + pr_debug("Got pkt of size:%zu\n", drvdata->rx_size); + } + + uci_buf = drvdata->cur_buf; + spin_unlock_bh(&drvdata->lock); + + /* Copy the buffer to user space */ + to_copy = min_t(size_t, count, drvdata->rx_size); + ptr = uci_buf->buf + (uci_buf->len - drvdata->rx_size); + ret = copy_to_user(buf, ptr, to_copy); + if (ret) + return ret; + + pr_debug("Copied %lu of %lu bytes\n", to_copy, drvdata->rx_size); + drvdata->rx_size -= to_copy; + + /* we finished with this buffer, queue it back to hardware */ + if (!drvdata->rx_size) { + spin_lock_bh(&drvdata->lock); + drvdata->cur_buf = NULL; + + if (drvdata->opened == ENABLE) + ret = mhi_queue_transfer(mhi_dev, DMA_FROM_DEVICE, + uci_buf->buf, drvdata->mtu, + MHI_EOT); + else + ret = -ERESTARTSYS; + + if (ret) { + pr_err("Failed to recycle element, ret: %d\n", ret); + kfree(uci_buf->buf); + kfree(uci_buf); + uci_buf->buf = NULL; + uci_buf = NULL; + goto read_error; + } + + spin_unlock_bh(&drvdata->lock); + } + + pr_debug("Returning %lu bytes\n", to_copy); + return to_copy; + +read_error: + spin_unlock_bh(&drvdata->lock); + return ret; +} + +static int mhi_queue_inbound(struct qdss_bridge_drvdata *drvdata) +{ + struct mhi_device *mhi_dev = drvdata->mhi_dev; + int nr_trbs = mhi_get_no_free_descriptors(mhi_dev, DMA_FROM_DEVICE); + void *buf; + struct qdss_mhi_buf_tbl_t *entry; + int ret = -EIO, i; + + for (i = 0; i < nr_trbs; i++) { + entry = kzalloc(sizeof(*entry), GFP_KERNEL); + if (!entry) + goto err; + + buf = kzalloc(drvdata->mtu, GFP_KERNEL); + if (!buf) { + kfree(entry); + goto err; + } + + entry->buf = buf; + + ret = mhi_queue_transfer(mhi_dev, DMA_FROM_DEVICE, buf, + drvdata->mtu, + MHI_EOT); + if (ret) { + kfree(buf); + kfree(entry); + pr_err("Failed to queue buffer %d\n", i); + return ret; + } + list_add_tail(&entry->link, &drvdata->mhi_buf_tbl); + } + + return ret; +err: + return -ENOMEM; + +} + +static int mhi_uci_open(struct inode *inode, struct file *filp) +{ + int ret = -EIO; + struct qdss_mhi_buf_tbl_t *buf_itr, *tmp; + struct qdss_bridge_drvdata *drvdata = container_of(inode->i_cdev, + struct qdss_bridge_drvdata, + cdev); + + spin_lock_bh(&drvdata->lock); + if (drvdata->opened) { + pr_err("Node was opened or SSR occurred\n"); + spin_unlock_bh(&drvdata->lock); + return ret; + } + drvdata->opened = ENABLE; + spin_unlock_bh(&drvdata->lock); + + ret = mhi_prepare_for_transfer(drvdata->mhi_dev); + if (ret) { + pr_err("Error starting transfer channels\n"); + goto error_open_chan; + } + + ret = mhi_queue_inbound(drvdata); + if (ret) + goto error_rx_queue; + + filp->private_data = drvdata; + return ret; + +error_rx_queue: + mhi_unprepare_from_transfer(drvdata->mhi_dev); + list_for_each_entry_safe(buf_itr, tmp, &drvdata->read_done_list, link) { + list_del(&buf_itr->link); + kfree(buf_itr->buf); + } + +error_open_chan: + spin_lock_bh(&drvdata->lock); + drvdata->opened = DISABLE; + spin_unlock_bh(&drvdata->lock); + return ret; +} + + + +static const struct file_operations mhidev_fops = { + .open = mhi_uci_open, + .release = mhi_uci_release, + .read = mhi_uci_read, +}; + static void qdss_mhi_remove(struct mhi_device *mhi_dev) { struct qdss_bridge_drvdata *drvdata = NULL; @@ -367,14 +721,26 @@ static void qdss_mhi_remove(struct mhi_device *mhi_dev) drvdata = mhi_dev->priv_data; if (!drvdata) return; - if (!drvdata->opened) - return; spin_lock_bh(&drvdata->lock); - drvdata->opened = 0; - spin_unlock_bh(&drvdata->lock); - usb_qdss_close(drvdata->usb_ch); - flush_workqueue(drvdata->mhi_wq); - qdss_destroy_buf_tbl(drvdata); + if (drvdata->opened == ENABLE) { + drvdata->opened = SSR; + if (drvdata->mode == MHI_TRANSFER_TYPE_UCI) { + spin_unlock_bh(&drvdata->lock); + wake_up(&drvdata->uci_wq); + wait_for_completion(&drvdata->completion); + } else { + spin_unlock_bh(&drvdata->lock); + usb_qdss_close(drvdata->usb_ch); + } + mhi_ch_close(drvdata); + + } else + spin_unlock_bh(&drvdata->lock); + + device_remove_file(drvdata->dev, &dev_attr_mode); + device_destroy(mhi_class, drvdata->cdev.dev); + cdev_del(&drvdata->cdev); + unregister_chrdev_region(drvdata->cdev.dev, 1); } int qdss_mhi_init(struct qdss_bridge_drvdata *drvdata) @@ -388,8 +754,11 @@ int qdss_mhi_init(struct qdss_bridge_drvdata *drvdata) INIT_WORK(&(drvdata->read_done_work), mhi_read_done_work_fn); INIT_WORK(&(drvdata->open_work), qdss_bridge_open_work_fn); INIT_LIST_HEAD(&drvdata->buf_tbl); + INIT_LIST_HEAD(&drvdata->mhi_buf_tbl); + init_waitqueue_head(&drvdata->uci_wq); + init_completion(&drvdata->completion); INIT_LIST_HEAD(&drvdata->read_done_list); - drvdata->opened = 0; + drvdata->opened = DISABLE; return 0; } @@ -398,7 +767,10 @@ static int qdss_mhi_probe(struct mhi_device *mhi_dev, const struct mhi_device_id *id) { int ret; + unsigned int baseminor = 0; + unsigned int count = 1; struct qdss_bridge_drvdata *drvdata; + dev_t dev; drvdata = devm_kzalloc(&mhi_dev->dev, sizeof(*drvdata), GFP_KERNEL); if (!drvdata) { @@ -406,21 +778,63 @@ static int qdss_mhi_probe(struct mhi_device *mhi_dev, return ret; } + ret = alloc_chrdev_region(&dev, baseminor, count, "mhi_qdss"); + if (ret < 0) { + pr_err("alloc_chrdev_region failed %d\n", ret); + return ret; + } + cdev_init(&drvdata->cdev, &mhidev_fops); + + drvdata->cdev.owner = THIS_MODULE; + drvdata->cdev.ops = &mhidev_fops; + + ret = cdev_add(&drvdata->cdev, dev, 1); + if (ret) + goto exit_unreg_chrdev_region; + + drvdata->dev = device_create(mhi_class, NULL, + drvdata->cdev.dev, drvdata, + "mhi_qdss"); + if (IS_ERR(drvdata->dev)) { + pr_err("class_device_create failed %d\n", ret); + ret = -ENOMEM; + goto exit_cdev_add; + } + + drvdata->mode = MHI_TRANSFER_TYPE_USB; + drvdata->mtu = min_t(size_t, id->driver_data, mhi_dev->mtu); drvdata->mhi_dev = mhi_dev; mhi_device_set_devdata(mhi_dev, drvdata); + dev_set_drvdata(drvdata->dev, drvdata); + + ret = device_create_file(drvdata->dev, &dev_attr_mode); + if (ret) { + pr_err("sysfs node create failed error:%d\n", ret); + goto exit_destroy_device; + } ret = qdss_mhi_init(drvdata); - if (ret) - goto err; + if (ret) { + pr_err("Device probe failed err:%d\n", ret); + goto remove_sysfs_exit; + } queue_work(drvdata->mhi_wq, &drvdata->open_work); return 0; -err: - pr_err("Device probe failed err:%d\n", ret); + +remove_sysfs_exit: + device_remove_file(drvdata->dev, &dev_attr_mode); +exit_destroy_device: + device_destroy(mhi_class, drvdata->cdev.dev); +exit_cdev_add: + cdev_del(&drvdata->cdev); +exit_unreg_chrdev_region: + unregister_chrdev_region(drvdata->cdev.dev, 1); return ret; + } static const struct mhi_device_id qdss_mhi_match_table[] = { - { .chan = "QDSS" }, + { .chan = "QDSS", .driver_data = 0x4000 }, {}, }; @@ -438,7 +852,17 @@ static struct mhi_driver qdss_mhi_driver = { static int __init qdss_bridge_init(void) { - return mhi_driver_register(&qdss_mhi_driver); + int ret; + + mhi_class = class_create(THIS_MODULE, MODULE_NAME); + if (IS_ERR(mhi_class)) + return -ENODEV; + + ret = mhi_driver_register(&qdss_mhi_driver); + if (ret) + class_destroy(mhi_class); + + return ret; } static void __exit qdss_bridge_exit(void) diff --git a/drivers/soc/qcom/qdss_bridge.h b/drivers/soc/qcom/qdss_bridge.h index 60c8b4c63cd2d168736fde27bdd48aea18f98bb5..f5e119b0ce844db2ea140afbcb9126517eea6a31 100644 --- a/drivers/soc/qcom/qdss_bridge.h +++ b/drivers/soc/qcom/qdss_bridge.h @@ -26,10 +26,26 @@ struct qdss_mhi_buf_tbl_t { size_t len; }; +enum mhi_transfer_mode { + MHI_TRANSFER_TYPE_USB, + MHI_TRANSFER_TYPE_UCI, +}; + +enum open_status { + DISABLE, + ENABLE, + SSR, +}; + struct qdss_bridge_drvdata { int alias; - bool opened; + enum open_status opened; + struct completion completion; + size_t mtu; + enum mhi_transfer_mode mode; spinlock_t lock; + struct device *dev; + struct cdev cdev; struct mhi_device *mhi_dev; struct work_struct read_work; struct work_struct read_done_work; @@ -39,8 +55,12 @@ struct qdss_bridge_drvdata { struct mhi_client_handle *hdl; struct mhi_client_info_t *client_info; struct list_head buf_tbl; + struct list_head mhi_buf_tbl; struct list_head read_done_list; struct usb_qdss_ch *usb_ch; + struct qdss_mhi_buf_tbl_t *cur_buf; + wait_queue_head_t uci_wq; + size_t rx_size; }; #endif diff --git a/drivers/soc/qcom/qmi_interface.c b/drivers/soc/qcom/qmi_interface.c index 6651257f15a4a00f15c61507cc5a99ff46780720..8728f42d3aa8fcd7a0c3843e92f11c8f2d25b0a8 100644 --- a/drivers/soc/qcom/qmi_interface.c +++ b/drivers/soc/qcom/qmi_interface.c @@ -351,8 +351,7 @@ int qmi_txn_wait(struct qmi_txn *txn, unsigned long timeout) struct qmi_handle *qmi = txn->qmi; int ret; - ret = wait_for_completion_interruptible_timeout(&txn->completion, - timeout); + ret = wait_for_completion_timeout(&txn->completion, timeout); mutex_lock(&qmi->txn_lock); mutex_lock(&txn->lock); @@ -360,9 +359,7 @@ int qmi_txn_wait(struct qmi_txn *txn, unsigned long timeout) mutex_unlock(&txn->lock); mutex_unlock(&qmi->txn_lock); - if (ret < 0) - return ret; - else if (ret == 0) + if (ret == 0) return -ETIMEDOUT; else return txn->result; diff --git a/drivers/soc/qcom/qmi_rmnet.c b/drivers/soc/qcom/qmi_rmnet.c index 812fc01a2150bf7d96c00c15c7c54d11ae518a84..c38d4c92de35f34e471e21811a04e12840856174 100644 --- a/drivers/soc/qcom/qmi_rmnet.c +++ b/drivers/soc/qcom/qmi_rmnet.c @@ -19,6 +19,7 @@ #include #include "qmi_rmnet_i.h" #include +#include #include #define NLMSG_FLOW_ACTIVATE 1 @@ -26,10 +27,15 @@ #define NLMSG_CLIENT_SETUP 4 #define NLMSG_CLIENT_DELETE 5 -#define FLAG_DFC_MASK 0x0001 +#define FLAG_DFC_MASK 0x000F #define FLAG_POWERSAVE_MASK 0x0010 +#define DFC_MODE_MULTIQ 2 -#define PS_INTERVAL (0x0004 * HZ) +unsigned int rmnet_wq_frequency __read_mostly = 4; +module_param(rmnet_wq_frequency, uint, 0644); +MODULE_PARM_DESC(rmnet_wq_frequency, "Frequency of PS check"); + +#define PS_INTERVAL (((!rmnet_wq_frequency) ? 1 : rmnet_wq_frequency) * HZ) #define NO_DELAY (0x0000 * HZ) #ifdef CONFIG_QCOM_QMI_DFC @@ -69,22 +75,11 @@ struct qmi_elem_info data_ep_id_type_v01_ei[] = { }; EXPORT_SYMBOL(data_ep_id_type_v01_ei); -static struct qmi_info *qmi_rmnet_qmi_init(void) -{ - struct qmi_info *qmi_info; - - qmi_info = kzalloc(sizeof(*qmi_info), GFP_KERNEL); - if (!qmi_info) - return NULL; - - return qmi_info; -} - void *qmi_rmnet_has_dfc_client(struct qmi_info *qmi) { int i; - if (!qmi || !(qmi->flag & FLAG_DFC_MASK)) + if (!qmi || ((qmi->flag & FLAG_DFC_MASK) != DFC_MODE_MULTIQ)) return NULL; for (i = 0; i < MAX_CLIENT_NUM; i++) { @@ -197,6 +192,25 @@ static void qmi_rmnet_update_flow_map(struct rmnet_flow_map *itm, itm->tcm_handle = new_map->tcm_handle; } +int qmi_rmnet_flow_control(struct net_device *dev, u32 tcm_handle, int enable) +{ + struct netdev_queue *q; + + if (unlikely(tcm_handle >= dev->num_tx_queues)) + return 0; + + q = netdev_get_tx_queue(dev, tcm_handle); + if (unlikely(!q)) + return 0; + + if (enable) + netif_tx_wake_queue(q); + else + netif_tx_stop_queue(q); + + return 0; +} + static int qmi_rmnet_add_flow(struct net_device *dev, struct tcmsg *tcm, struct qmi_info *qmi) { @@ -221,14 +235,18 @@ static int qmi_rmnet_add_flow(struct net_device *dev, struct tcmsg *tcm, trace_dfc_flow_info(new_map.bearer_id, new_map.flow_id, new_map.ip_type, new_map.tcm_handle, 1); + spin_lock_bh(&qos_info->qos_lock); + itm = qmi_rmnet_get_flow_map(qos_info, new_map.flow_id, new_map.ip_type); if (itm) { qmi_rmnet_update_flow_map(itm, &new_map); } else { - itm = kzalloc(sizeof(*itm), GFP_KERNEL); - if (!itm) + itm = kzalloc(sizeof(*itm), GFP_ATOMIC); + if (!itm) { + spin_unlock_bh(&qos_info->qos_lock); return -ENOMEM; + } qmi_rmnet_update_flow_link(qmi, dev, itm, 1); qmi_rmnet_update_flow_map(itm, &new_map); @@ -238,9 +256,11 @@ static int qmi_rmnet_add_flow(struct net_device *dev, struct tcmsg *tcm, if (bearer) { bearer->flow_ref++; } else { - bearer = kzalloc(sizeof(*bearer), GFP_KERNEL); - if (!bearer) + bearer = kzalloc(sizeof(*bearer), GFP_ATOMIC); + if (!bearer) { + spin_unlock_bh(&qos_info->qos_lock); return -ENOMEM; + } bearer->bearer_id = new_map.bearer_id; bearer->flow_ref = 1; @@ -251,10 +271,12 @@ static int qmi_rmnet_add_flow(struct net_device *dev, struct tcmsg *tcm, list_add(&bearer->list, &qos_info->bearer_head); } - tc_qdisc_flow_control(dev, itm->tcm_handle, + qmi_rmnet_flow_control(dev, itm->tcm_handle, bearer->grant_size > 0 ? 1 : 0); } + spin_unlock_bh(&qos_info->qos_lock); + return 0; } @@ -277,6 +299,8 @@ qmi_rmnet_del_flow(struct net_device *dev, struct tcmsg *tcm, * tcm->tcm_ifindex - ip_type */ + spin_lock_bh(&qos_info->qos_lock); + new_map.bearer_id = tcm->tcm__pad1; new_map.flow_id = tcm->tcm_parent; new_map.ip_type = tcm->tcm_ifindex; @@ -300,6 +324,8 @@ qmi_rmnet_del_flow(struct net_device *dev, struct tcmsg *tcm, kfree(bearer); } + spin_unlock_bh(&qos_info->qos_lock); + return 0; } @@ -309,7 +335,7 @@ static int qmi_rmnet_enable_all_flows(struct qmi_info *qmi) struct qos_info *qos; struct rmnet_flow_map *m; struct rmnet_bearer_map *bearer; - int qlen, need_enable = 0; + int qlen; if (!qmi_rmnet_has_dfc_client(qmi) || (qmi->flow_cnt == 0)) return 0; @@ -319,21 +345,25 @@ static int qmi_rmnet_enable_all_flows(struct qmi_info *qmi) for (i = 0; i < qmi->flow_cnt; i++) { qos = (struct qos_info *)rmnet_get_qos_pt(qmi->flow[i].dev); m = qmi->flow[i].itm; + + spin_lock_bh(&qos->qos_lock); + bearer = qmi_rmnet_get_bearer_map(qos, m->bearer_id); if (bearer) { - if (bearer->grant_size == 0) - need_enable = 1; bearer->grant_size = DEFAULT_GRANT; bearer->grant_thresh = qmi_rmnet_grant_per(DEFAULT_GRANT); - if (need_enable) { - qlen = tc_qdisc_flow_control(qmi->flow[i].dev, - m->tcm_handle, 1); - trace_dfc_qmi_tc(m->bearer_id, m->flow_id, - bearer->grant_size, qlen, - m->tcm_handle, 1); - } + bearer->seq = 0; + bearer->ack_req = 0; } + + qlen = qmi_rmnet_flow_control(qmi->flow[i].dev, + m->tcm_handle, 1); + trace_dfc_qmi_tc(m->bearer_id, m->flow_id, + DEFAULT_GRANT, qlen, + m->tcm_handle, 1); + + spin_unlock_bh(&qos->qos_lock); } return 0; @@ -410,7 +440,7 @@ qmi_rmnet_setup_client(void *port, struct qmi_info *qmi, struct tcmsg *tcm) idx = (tcm->tcm_handle == 0) ? 0 : 1; if (!qmi) { - qmi = qmi_rmnet_qmi_init(); + qmi = kzalloc(sizeof(struct qmi_info), GFP_KERNEL); if (!qmi) return -ENOMEM; @@ -422,7 +452,7 @@ qmi_rmnet_setup_client(void *port, struct qmi_info *qmi, struct tcmsg *tcm) qmi->fc_info[idx].svc.ep_type = tcm->tcm_info; qmi->fc_info[idx].svc.iface_id = tcm->tcm_parent; - if ((tcm->tcm_ifindex & FLAG_DFC_MASK) && + if (((tcm->tcm_ifindex & FLAG_DFC_MASK) == DFC_MODE_MULTIQ) && (qmi->fc_info[idx].dfc_client == NULL)) { rc = dfc_qmi_client_init(port, idx, qmi); if (rc < 0) @@ -484,20 +514,20 @@ void qmi_rmnet_change_link(struct net_device *dev, void *port, void *tcm_pt) switch (tcm->tcm_family) { case NLMSG_FLOW_ACTIVATE: - if (!qmi || !(qmi->flag & FLAG_DFC_MASK) || + if (!qmi || ((qmi->flag & FLAG_DFC_MASK) != DFC_MODE_MULTIQ) || !qmi_rmnet_has_dfc_client(qmi)) return; qmi_rmnet_add_flow(dev, tcm, qmi); break; case NLMSG_FLOW_DEACTIVATE: - if (!qmi || !(qmi->flag & FLAG_DFC_MASK)) + if (!qmi || ((qmi->flag & FLAG_DFC_MASK) != DFC_MODE_MULTIQ)) return; qmi_rmnet_del_flow(dev, tcm, qmi); break; case NLMSG_CLIENT_SETUP: - if (!(tcm->tcm_ifindex & FLAG_DFC_MASK) && + if (((tcm->tcm_ifindex & FLAG_DFC_MASK) != DFC_MODE_MULTIQ) && !(tcm->tcm_ifindex & FLAG_POWERSAVE_MASK)) return; @@ -553,16 +583,15 @@ void qmi_rmnet_qmi_exit(void *qmi_pt, void *port) EXPORT_SYMBOL(qmi_rmnet_qmi_exit); #ifdef CONFIG_QCOM_QMI_DFC -void qmi_rmnet_burst_fc_check(struct net_device *dev, struct sk_buff *skb) +void qmi_rmnet_burst_fc_check(struct net_device *dev, + int ip_type, u32 mark, unsigned int len) { - void *port = rmnet_get_rmnet_port(dev); - struct qmi_info *qmi = rmnet_get_qmi_pt(port); struct qos_info *qos = rmnet_get_qos_pt(dev); - if (!qmi || !qos) + if (!qos) return; - dfc_qmi_burst_check(dev, qos, skb, qmi); + dfc_qmi_burst_check(dev, qos, ip_type, mark, len); } EXPORT_SYMBOL(qmi_rmnet_burst_fc_check); @@ -586,21 +615,22 @@ void *qmi_rmnet_qos_init(struct net_device *real_dev, u8 mux_id) qos->tran_num = 0; INIT_LIST_HEAD(&qos->flow_head); INIT_LIST_HEAD(&qos->bearer_head); + spin_lock_init(&qos->qos_lock); return qos; } EXPORT_SYMBOL(qmi_rmnet_qos_init); -void qmi_rmnet_qos_exit(struct net_device *dev) +void qmi_rmnet_qos_exit(struct net_device *dev, void *qos) { void *port = rmnet_get_rmnet_port(dev); struct qmi_info *qmi = rmnet_get_qmi_pt(port); - struct qos_info *qos = (struct qos_info *)rmnet_get_qos_pt(dev); + struct qos_info *qos_info = (struct qos_info *)qos; if (!qmi || !qos) return; - qmi_rmnet_clean_flow_list(qmi, dev, qos); + qmi_rmnet_clean_flow_list(qmi, dev, qos_info); kfree(qos); } EXPORT_SYMBOL(qmi_rmnet_qos_exit); @@ -651,6 +681,7 @@ static void qmi_rmnet_check_stats(struct work_struct *work) struct rmnet_powersave_work *real_work; u64 rxd, txd; u64 rx, tx; + unsigned long lock_delay; real_work = container_of(to_delayed_work(work), struct rmnet_powersave_work, work); @@ -658,13 +689,28 @@ static void qmi_rmnet_check_stats(struct work_struct *work) if (unlikely(!real_work || !real_work->port)) return; + /* Min Delay for retry errors */ + lock_delay = qmi_rmnet_work_get_active(real_work->port) ? + PS_INTERVAL : (HZ / 50); + if (!rtnl_trylock()) { - queue_delayed_work(rmnet_ps_wq, &real_work->work, PS_INTERVAL); + queue_delayed_work(rmnet_ps_wq, &real_work->work, lock_delay); return; } if (!qmi_rmnet_work_get_active(real_work->port)) { qmi_rmnet_work_set_active(real_work->port, 1); - qmi_rmnet_set_powersave_mode(real_work->port, 0); + /* Retry after small delay if qmi error + * This resumes UL grants by disabling + * powersave mode if successful. + */ + if (qmi_rmnet_set_powersave_mode(real_work->port, 0) < 0) { + qmi_rmnet_work_set_active(real_work->port, 0); + queue_delayed_work(rmnet_ps_wq, + &real_work->work, lock_delay); + rtnl_unlock(); + return; + + } goto end; } @@ -676,7 +722,17 @@ static void qmi_rmnet_check_stats(struct work_struct *work) if (!rxd && !txd) { qmi_rmnet_work_set_active(real_work->port, 0); - qmi_rmnet_set_powersave_mode(real_work->port, 1); + /* Retry after lock delay if enabling powersave fails. + * This will cause UL grants to continue being sent + * suboptimally. Keeps wq active until successful. + */ + if (qmi_rmnet_set_powersave_mode(real_work->port, 1) < 0) { + qmi_rmnet_work_set_active(real_work->port, 1); + queue_delayed_work(rmnet_ps_wq, + &real_work->work, PS_INTERVAL); + + } + rtnl_unlock(); return; } diff --git a/drivers/soc/qcom/qmi_rmnet_i.h b/drivers/soc/qcom/qmi_rmnet_i.h index a2c4ce1e48975c4934c212a900c6028e4f40b208..ab52ee208a8cbd7d0967a1335b31d14042e97c89 100644 --- a/drivers/soc/qcom/qmi_rmnet_i.h +++ b/drivers/soc/qcom/qmi_rmnet_i.h @@ -60,6 +60,7 @@ struct qos_info { struct list_head bearer_head; u32 default_grant; u32 tran_num; + spinlock_t qos_lock; }; struct flow_info { @@ -110,9 +111,11 @@ int dfc_qmi_client_init(void *port, int index, struct qmi_info *qmi); void dfc_qmi_client_exit(void *dfc_data); -void dfc_qmi_burst_check(struct net_device *dev, - struct qos_info *qos, struct sk_buff *skb, - struct qmi_info *qmi); +void dfc_qmi_burst_check(struct net_device *dev, struct qos_info *qos, + int ip_type, u32 mark, unsigned int len); + +int qmi_rmnet_flow_control(struct net_device *dev, u32 tcm_handle, int enable); + #else static inline struct rmnet_flow_map * qmi_rmnet_get_flow_map(struct qos_info *qos_info, @@ -139,7 +142,7 @@ static inline void dfc_qmi_client_exit(void *dfc_data) static inline void dfc_qmi_burst_check(struct net_device *dev, struct qos_info *qos, - struct sk_buff *skb, struct qmi_info *qmi) + int ip_type, u32 mark, unsigned int len) { } #endif diff --git a/drivers/soc/qcom/secure_buffer.c b/drivers/soc/qcom/secure_buffer.c index 3918a840a01d6b83fb7fbcfa1dc518f9f944a4f3..0a9d18e196336c7ead0ea692924e2debbb8cf241 100644 --- a/drivers/soc/qcom/secure_buffer.c +++ b/drivers/soc/qcom/secure_buffer.c @@ -137,7 +137,8 @@ static int secure_buffer_change_table(struct sg_table *table, int lock) * secure environment to ensure the data is actually present * in RAM */ - dmac_flush_range(chunk_list, chunk_list + chunk_list_len); + dmac_flush_range(chunk_list, + (void *)chunk_list + chunk_list_len); ret = secure_buffer_change_chunk(chunk_list_phys, nchunks, V2_CHUNK_SIZE, lock); diff --git a/drivers/soc/qcom/service-locator.c b/drivers/soc/qcom/service-locator.c index faf3a22188368cf6da0763cadad3a97c08782278..b35cd3b27d7487b6541a9a58da3c821e5041bac8 100644 --- a/drivers/soc/qcom/service-locator.c +++ b/drivers/soc/qcom/service-locator.c @@ -195,7 +195,6 @@ static int service_locator_send_msg(struct pd_qmi_client_data *pd) req->domain_offset_valid = true; req->domain_offset = 0; - pd->domain_list = NULL; do { req->domain_offset += domains_read; rc = servreg_loc_send_msg(req, resp, pd); @@ -224,6 +223,7 @@ static int service_locator_send_msg(struct pd_qmi_client_data *pd) pr_err("Service Locator DB updated for client %s\n", pd->client_name); kfree(pd->domain_list); + pd->domain_list = NULL; rc = -EAGAIN; goto out; } @@ -316,7 +316,7 @@ int get_service_location(char *client_name, char *service_name, goto err; } - pqcd = kmalloc(sizeof(struct pd_qmi_client_data), GFP_KERNEL); + pqcd = kzalloc(sizeof(struct pd_qmi_client_data), GFP_KERNEL); if (!pqcd) { rc = -ENOMEM; pr_err("Allocation failed\n"); @@ -357,7 +357,7 @@ static void pd_locator_work(struct work_struct *work) pr_err("Unable to connect to service locator!, rc = %d\n", rc); pdqw->notifier->notifier_call(pdqw->notifier, LOCATOR_DOWN, NULL); - goto err; + goto err_init_servloc; } rc = service_locator_send_msg(data); if (rc) { @@ -365,11 +365,13 @@ static void pd_locator_work(struct work_struct *work) data->service_name, data->client_name, rc); pdqw->notifier->notifier_call(pdqw->notifier, LOCATOR_DOWN, NULL); - goto err; + goto err_servloc_send_msg; } pdqw->notifier->notifier_call(pdqw->notifier, LOCATOR_UP, data); -err: +err_servloc_send_msg: + kfree(data->domain_list); +err_init_servloc: kfree(data); kfree(pdqw); } diff --git a/drivers/soc/qcom/smp2p.c b/drivers/soc/qcom/smp2p.c index e6cb274083a50d637fc604ed1a291d0bd99a65a8..93949de7b0ddcd4a24f48de38744599e7779fecb 100644 --- a/drivers/soc/qcom/smp2p.c +++ b/drivers/soc/qcom/smp2p.c @@ -420,6 +420,8 @@ static int smp2p_update_bits(void *data, u32 mask, u32 value) val |= value; writel(val, entry->value); spin_unlock(&entry->lock); + SMP2P_INFO("%d: %s: orig:0x%0x new:0x%0x\n", + entry->smp2p->remote_pid, entry->name, orig, val); if (val != orig) qcom_smp2p_kick(entry->smp2p); diff --git a/drivers/soc/qcom/smp2p_sleepstate.c b/drivers/soc/qcom/smp2p_sleepstate.c index 19978d912922b3e0101238f48786b6c57791473e..443f4c23b4fa9ad1246409eeb94160083fb10962 100644 --- a/drivers/soc/qcom/smp2p_sleepstate.c +++ b/drivers/soc/qcom/smp2p_sleepstate.c @@ -13,10 +13,16 @@ #include #include #include +#include +#include +#include +#include +#include #define PROC_AWAKE_ID 12 /* 12th bit */ #define AWAKE_BIT BIT(PROC_AWAKE_ID) static struct qcom_smem_state *state; +struct wakeup_source notify_ws; /** * sleepstate_pm_notifier() - PM notifier callback function. @@ -48,9 +54,18 @@ static struct notifier_block sleepstate_pm_nb = { .priority = INT_MAX, }; +static irqreturn_t smp2p_sleepstate_handler(int irq, void *ctxt) +{ + __pm_wakeup_event(¬ify_ws, 200); + return IRQ_HANDLED; +} + static int smp2p_sleepstate_probe(struct platform_device *pdev) { int ret; + int irq = -1; + struct device *dev = &pdev->dev; + struct device_node *node = dev->of_node; state = qcom_smem_state_get(&pdev->dev, 0, &ret); if (IS_ERR(state)) @@ -59,9 +74,32 @@ static int smp2p_sleepstate_probe(struct platform_device *pdev) ret = register_pm_notifier(&sleepstate_pm_nb); if (ret) - pr_err("%s: power state notif error %d\n", __func__, ret); + dev_err(&pdev->dev, "%s: power state notif error %d\n", + __func__, ret); + + wakeup_source_init(¬ify_ws, "smp2p-sleepstate"); + irq = of_irq_get_byname(node, "smp2p-sleepstate-in"); + if (irq <= 0) { + dev_err(&pdev->dev, + "failed for irq getbyname for smp2p_sleep_state\n"); + ret = -EPROBE_DEFER; + goto err; + } + dev_info(&pdev->dev, "got smp2p-sleepstate-in irq %d\n", irq); + ret = devm_request_threaded_irq(dev, irq, NULL, + (irq_handler_t)smp2p_sleepstate_handler, + IRQF_TRIGGER_RISING, "smp2p_sleepstate", dev); + if (ret) { + dev_err(&pdev->dev, "fail to register smp2p threaded_irq=%d\n", + irq); + goto err; + } return 0; +err: + wakeup_source_trash(¬ify_ws); + unregister_pm_notifier(&sleepstate_pm_nb); + return ret; } static const struct of_device_id smp2p_slst_match_table[] = { diff --git a/drivers/soc/qcom/spcom.c b/drivers/soc/qcom/spcom.c index 1010f47ddb6e8ee7d3b3be8983c8576d25a9852f..891f94113d1405dc016cef1409939e7e575eab9b 100644 --- a/drivers/soc/qcom/spcom.c +++ b/drivers/soc/qcom/spcom.c @@ -643,6 +643,8 @@ static int spcom_handle_send_command(struct spcom_channel *ch, } /* may fail when RX intent not queued by SP */ ret = rpmsg_trysend(ch->rpdev->ept, tx_buf, tx_buf_size); + if (ret == 0) + break; time_msec += TX_RETRY_DELAY_MSEC; mutex_unlock(&ch->lock); msleep(TX_RETRY_DELAY_MSEC); @@ -845,6 +847,8 @@ static int spcom_handle_send_modified_command(struct spcom_channel *ch, } /* may fail when RX intent not queued by SP */ ret = rpmsg_trysend(ch->rpdev->ept, tx_buf, tx_buf_size); + if (ret == 0) + break; time_msec += TX_RETRY_DELAY_MSEC; mutex_unlock(&ch->lock); msleep(TX_RETRY_DELAY_MSEC); @@ -1961,15 +1965,21 @@ static void spcom_rpdev_remove(struct rpmsg_device *rpdev) pr_err("channel %s not found\n", rpdev->id.name); return; } - /* release all ion buffers locked by the channel */ - for (i = 0 ; i < ARRAY_SIZE(ch->dmabuf_handle_table) ; i++) { - if (ch->dmabuf_handle_table[i]) { - dma_buf_put(ch->dmabuf_handle_table[i]); - ch->dmabuf_handle_table[i] = NULL; - dev_info(&rpdev->dev, "dma_buf_put(%d)\n", i); + + mutex_lock(&ch->lock); + // unlock all ion buffers of sp_kernel channel + if (strcmp(ch->name, "sp_kernel") == 0) { + for (i = 0; i < ARRAY_SIZE(ch->dmabuf_handle_table); i++) { + if (ch->dmabuf_handle_table[i] != NULL) { + pr_debug("unlocked ion buf #%d fd [%d].\n", + i, ch->dmabuf_fd_table[i]); + dma_buf_put(ch->dmabuf_handle_table[i]); + ch->dmabuf_handle_table[i] = NULL; + ch->dmabuf_fd_table[i] = -1; + } } } - mutex_lock(&ch->lock); + ch->rpdev = NULL; ch->rpmsg_abort = true; complete_all(&ch->rx_done); diff --git a/drivers/soc/qcom/sysmon-qmi.c b/drivers/soc/qcom/sysmon-qmi.c index cb603b4d053c1fbf5b6698a404690fc137ce65f6..985a2c0951d0d4275a8792acee98a5d5523a989d 100644 --- a/drivers/soc/qcom/sysmon-qmi.c +++ b/drivers/soc/qcom/sysmon-qmi.c @@ -79,6 +79,7 @@ static LIST_HEAD(sysmon_list); static DEFINE_MUTEX(sysmon_list_lock); static const int notif_map[SUBSYS_NOTIF_TYPE_COUNT] = { + [0 ... SUBSYS_NOTIF_TYPE_COUNT - 1] = SSCTL_SSR_EVENT_INVALID, [SUBSYS_BEFORE_POWERUP] = SSCTL_SSR_EVENT_BEFORE_POWERUP, [SUBSYS_AFTER_POWERUP] = SSCTL_SSR_EVENT_AFTER_POWERUP, [SUBSYS_BEFORE_SHUTDOWN] = SSCTL_SSR_EVENT_BEFORE_SHUTDOWN, @@ -118,6 +119,11 @@ static struct qmi_msg_handler qmi_indication_handler[] = { {} }; +static bool is_ssctl_event(enum subsys_notif_type notif) +{ + return notif_map[notif] != SSCTL_SSR_EVENT_INVALID; +} + static int ssctl_new_server(struct qmi_handle *qmi, struct qmi_service *svc) { struct sysmon_qmi_data *data = container_of(qmi, @@ -258,8 +264,8 @@ int sysmon_send_event(struct subsys_desc *dest_desc, int ret; struct qmi_txn txn; - if (notif < 0 || notif >= SUBSYS_NOTIF_TYPE_COUNT || event_ss == NULL - || dest_ss == NULL) + if (notif < 0 || notif >= SUBSYS_NOTIF_TYPE_COUNT || + !is_ssctl_event(notif) || event_ss == NULL || dest_ss == NULL) return -EINVAL; mutex_lock(&sysmon_list_lock); diff --git a/drivers/soc/qcom/wda_qmi.c b/drivers/soc/qcom/wda_qmi.c index e01937090d4615f02c39698615e4cccd0cc79f63..8e34aad2574ba1fa592121a79e13d31baee20938 100644 --- a/drivers/soc/qcom/wda_qmi.c +++ b/drivers/soc/qcom/wda_qmi.c @@ -29,7 +29,7 @@ static void wda_svc_config(struct work_struct *work); /* **************************************************** */ #define WDA_SERVICE_ID_V01 0x1A #define WDA_SERVICE_VERS_V01 0x01 -#define WDA_TIMEOUT_MS 10000 +#define WDA_TIMEOUT_MS 20 #define QMI_WDA_SET_POWERSAVE_CONFIG_REQ_V01 0x002D #define QMI_WDA_SET_POWERSAVE_CONFIG_RESP_V01 0x002D diff --git a/drivers/spi/spi-geni-qcom.c b/drivers/spi/spi-geni-qcom.c index 40fa60e6b613093df7d7d91077ace3d9d8bdc540..a03599b91bf3dc21401b602aa1f21aa407bd8d2b 100644 --- a/drivers/spi/spi-geni-qcom.c +++ b/drivers/spi/spi-geni-qcom.c @@ -89,7 +89,7 @@ #define TIMESTAMP_AFTER BIT(3) #define POST_CMD_DELAY BIT(4) -#define SPI_CORE2X_VOTE (10000) +#define SPI_CORE2X_VOTE (7600) /* GSI CONFIG0 TRE Params */ /* Flags bit fields */ #define GSI_LOOPBACK_EN (BIT(0)) @@ -155,6 +155,7 @@ struct spi_geni_master { int num_xfers; void *ipc; bool shared_se; + bool dis_autosuspend; }; static struct spi_master *get_spi_master(struct device *dev) @@ -311,7 +312,7 @@ static int select_xfer_mode(struct spi_master *spi, struct spi_message *spi_msg) { struct spi_geni_master *mas = spi_master_get_devdata(spi); - int mode = FIFO_MODE; + int mode = SE_DMA; int fifo_disable = (geni_read_reg(mas->base, GENI_IF_FIFO_DISABLE_RO) & FIFO_IF_DISABLE); bool dma_chan_valid = @@ -325,10 +326,10 @@ static int select_xfer_mode(struct spi_master *spi, */ if (fifo_disable && !dma_chan_valid) mode = -EINVAL; + else if (!fifo_disable) + mode = SE_DMA; else if (dma_chan_valid) mode = GSI_DMA; - else - mode = FIFO_MODE; return mode; } @@ -719,25 +720,20 @@ static int spi_geni_prepare_message(struct spi_master *spi, mas->cur_xfer_mode = select_xfer_mode(spi, spi_msg); - if (mas->cur_xfer_mode == FIFO_MODE) { - geni_se_select_mode(mas->base, FIFO_MODE); - reinit_completion(&mas->xfer_done); - ret = setup_fifo_params(spi_msg->spi, spi); + if (mas->cur_xfer_mode < 0) { + dev_err(mas->dev, "%s: Couldn't select mode %d", __func__, + mas->cur_xfer_mode); + ret = -EINVAL; } else if (mas->cur_xfer_mode == GSI_DMA) { - mas->num_tx_eot = 0; - mas->num_rx_eot = 0; - mas->num_xfers = 0; - reinit_completion(&mas->tx_cb); - reinit_completion(&mas->rx_cb); memset(mas->gsi, 0, (sizeof(struct spi_geni_gsi) * NUM_SPI_XFER)); geni_se_select_mode(mas->base, GSI_DMA); ret = spi_geni_map_buf(mas, spi_msg); } else { - dev_err(mas->dev, "%s: Couldn't select mode %d", __func__, - mas->cur_xfer_mode); - ret = -EINVAL; + geni_se_select_mode(mas->base, mas->cur_xfer_mode); + ret = setup_fifo_params(spi_msg->spi, spi); } + return ret; } @@ -756,13 +752,12 @@ static int spi_geni_unprepare_message(struct spi_master *spi_mas, static int spi_geni_prepare_transfer_hardware(struct spi_master *spi) { struct spi_geni_master *mas = spi_master_get_devdata(spi); - int ret = 0; + int ret = 0, count = 0; u32 max_speed = spi->cur_msg->spi->max_speed_hz; struct se_geni_rsc *rsc = &mas->spi_rsc; - /* Adjust the AB/IB based on the max speed of the slave.*/ + /* Adjust the IB based on the max speed of the slave.*/ rsc->ib = max_speed * DEFAULT_BUS_WIDTH; - rsc->ab = max_speed * DEFAULT_BUS_WIDTH; if (mas->shared_se) { struct se_geni_rsc *rsc; int ret = 0; @@ -784,7 +779,12 @@ static int spi_geni_prepare_transfer_hardware(struct spi_master *spi) } else { ret = 0; } - + if (mas->dis_autosuspend) { + count = atomic_read(&mas->dev->power.usage_count); + if (count <= 0) + GENI_SE_ERR(mas->ipc, false, NULL, + "resume usage count mismatch:%d", count); + } if (unlikely(!mas->setup)) { int proto = get_se_proto(mas->base); unsigned int major; @@ -873,6 +873,9 @@ static int spi_geni_prepare_transfer_hardware(struct spi_master *spi) mas->shared_se = (geni_read_reg(mas->base, GENI_IF_FIFO_DISABLE_RO) & FIFO_IF_DISABLE); + if (mas->dis_autosuspend) + GENI_SE_DBG(mas->ipc, false, mas->dev, + "Auto Suspend is disabled\n"); } exit_prepare_transfer_hardware: return ret; @@ -881,7 +884,7 @@ static int spi_geni_prepare_transfer_hardware(struct spi_master *spi) static int spi_geni_unprepare_transfer_hardware(struct spi_master *spi) { struct spi_geni_master *mas = spi_master_get_devdata(spi); - + int count = 0; if (mas->shared_se) { struct se_geni_rsc *rsc; int ret = 0; @@ -894,8 +897,16 @@ static int spi_geni_unprepare_transfer_hardware(struct spi_master *spi) "%s: Error %d pinctrl_select_state\n", __func__, ret); } - pm_runtime_mark_last_busy(mas->dev); - pm_runtime_put_autosuspend(mas->dev); + if (mas->dis_autosuspend) { + pm_runtime_put_sync(mas->dev); + count = atomic_read(&mas->dev->power.usage_count); + if (count < 0) + GENI_SE_ERR(mas->ipc, false, NULL, + "suspend usage count mismatch:%d", count); + } else { + pm_runtime_mark_last_busy(mas->dev); + pm_runtime_put_autosuspend(mas->dev); + } return 0; } @@ -967,25 +978,65 @@ static void setup_fifo_xfer(struct spi_transfer *xfer, geni_write_reg(trans_len, mas->base, SE_SPI_RX_TRANS_LEN); mas->rx_rem_bytes = xfer->len; } + + if (trans_len > (mas->tx_fifo_depth * mas->tx_fifo_width)) { + if (mas->cur_xfer_mode != SE_DMA) { + mas->cur_xfer_mode = SE_DMA; + geni_se_select_mode(mas->base, mas->cur_xfer_mode); + } + } else { + if (mas->cur_xfer_mode != FIFO_MODE) { + mas->cur_xfer_mode = FIFO_MODE; + geni_se_select_mode(mas->base, mas->cur_xfer_mode); + } + } + geni_write_reg(spi_tx_cfg, mas->base, SE_SPI_TRANS_CFG); geni_setup_m_cmd(mas->base, m_cmd, m_param); GENI_SE_DBG(mas->ipc, false, mas->dev, - "%s: trans_len %d xferlen%d tx_cfg 0x%x cmd 0x%x\n", - __func__, trans_len, xfer->len, spi_tx_cfg, m_cmd); - if (m_cmd & SPI_TX_ONLY) - geni_write_reg(mas->tx_wm, mas->base, SE_GENI_TX_WATERMARK_REG); + "%s: trans_len %d xferlen%d tx_cfg 0x%x cmd 0x%x cs%d mode%d\n", + __func__, trans_len, xfer->len, spi_tx_cfg, m_cmd, + xfer->cs_change, mas->cur_xfer_mode); + if ((m_cmd & SPI_RX_ONLY) && (mas->cur_xfer_mode == SE_DMA)) { + int ret = 0; + + ret = geni_se_rx_dma_prep(mas->wrapper_dev, mas->base, + xfer->rx_buf, xfer->len, &xfer->rx_dma); + if (ret) + GENI_SE_ERR(mas->ipc, true, mas->dev, + "Failed to setup Rx dma %d\n", ret); + } + if (m_cmd & SPI_TX_ONLY) { + if (mas->cur_xfer_mode == FIFO_MODE) { + geni_write_reg(mas->tx_wm, mas->base, + SE_GENI_TX_WATERMARK_REG); + } else if (mas->cur_xfer_mode == SE_DMA) { + int ret = 0; + + ret = geni_se_tx_dma_prep(mas->wrapper_dev, mas->base, + (void *)xfer->tx_buf, xfer->len, + &xfer->tx_dma); + if (ret) + GENI_SE_ERR(mas->ipc, true, mas->dev, + "Failed to setup tx dma %d\n", ret); + } + } + + /* Ensure all writes are done before the WM interrupt */ mb(); } -static void handle_fifo_timeout(struct spi_geni_master *mas) +static void handle_fifo_timeout(struct spi_geni_master *mas, + struct spi_transfer *xfer) { unsigned long timeout; geni_se_dump_dbg_regs(&mas->spi_rsc, mas->base, mas->ipc); reinit_completion(&mas->xfer_done); geni_cancel_m_cmd(mas->base); - geni_write_reg(0, mas->base, SE_GENI_TX_WATERMARK_REG); + if (mas->cur_xfer_mode == FIFO_MODE) + geni_write_reg(0, mas->base, SE_GENI_TX_WATERMARK_REG); /* Ensure cmd cancel is written */ mb(); timeout = wait_for_completion_timeout(&mas->xfer_done, HZ); @@ -1000,6 +1051,15 @@ static void handle_fifo_timeout(struct spi_geni_master *mas) dev_err(mas->dev, "Failed to cancel/abort m_cmd\n"); } + if (mas->cur_xfer_mode == SE_DMA) { + if (xfer->tx_buf) + geni_se_tx_dma_unprep(mas->wrapper_dev, + xfer->tx_dma, xfer->len); + if (xfer->rx_buf) + geni_se_rx_dma_unprep(mas->wrapper_dev, + xfer->rx_dma, xfer->len); + } + } static int spi_geni_transfer_one(struct spi_master *spi, @@ -1015,7 +1075,8 @@ static int spi_geni_transfer_one(struct spi_master *spi, return -EINVAL; } - if (mas->cur_xfer_mode == FIFO_MODE) { + if (mas->cur_xfer_mode != GSI_DMA) { + reinit_completion(&mas->xfer_done); setup_fifo_xfer(xfer, mas, slv->mode, spi); timeout = wait_for_completion_timeout(&mas->xfer_done, msecs_to_jiffies(SPI_XFER_TIMEOUT_MS)); @@ -1029,7 +1090,22 @@ static int spi_geni_transfer_one(struct spi_master *spi, ret = -ETIMEDOUT; goto err_fifo_geni_transfer_one; } + + if (mas->cur_xfer_mode == SE_DMA) { + if (xfer->tx_buf) + geni_se_tx_dma_unprep(mas->wrapper_dev, + xfer->tx_dma, xfer->len); + if (xfer->rx_buf) + geni_se_rx_dma_unprep(mas->wrapper_dev, + xfer->rx_dma, xfer->len); + } } else { + mas->num_tx_eot = 0; + mas->num_rx_eot = 0; + mas->num_xfers = 0; + reinit_completion(&mas->tx_cb); + reinit_completion(&mas->rx_cb); + setup_gsi_xfer(xfer, mas, slv, spi); if ((mas->num_xfers >= NUM_SPI_XFER) || (list_is_last(&xfer->transfer_list, @@ -1073,7 +1149,7 @@ static int spi_geni_transfer_one(struct spi_master *spi, dmaengine_terminate_all(mas->tx); return ret; err_fifo_geni_transfer_one: - handle_fifo_timeout(mas); + handle_fifo_timeout(mas, xfer); return ret; } @@ -1189,33 +1265,59 @@ static irqreturn_t geni_spi_irq(int irq, void *dev) goto exit_geni_spi_irq; } m_irq = geni_read_reg(mas->base, SE_GENI_M_IRQ_STATUS); - if ((m_irq & M_RX_FIFO_WATERMARK_EN) || (m_irq & M_RX_FIFO_LAST_EN)) - geni_spi_handle_rx(mas); - - if ((m_irq & M_TX_FIFO_WATERMARK_EN)) - geni_spi_handle_tx(mas); - - if ((m_irq & M_CMD_DONE_EN) || (m_irq & M_CMD_CANCEL_EN) || - (m_irq & M_CMD_ABORT_EN)) { - complete(&mas->xfer_done); - /* - * If this happens, then a CMD_DONE came before all the buffer - * bytes were sent out. This is unusual, log this condition and - * disable the WM interrupt to prevent the system from stalling - * due an interrupt storm. - * If this happens when all Rx bytes haven't been received, log - * the condition. - */ - if (mas->tx_rem_bytes) { - geni_write_reg(0, mas->base, SE_GENI_TX_WATERMARK_REG); - GENI_SE_DBG(mas->ipc, false, mas->dev, - "%s:Premature Done.tx_rem%d bpw%d\n", - __func__, mas->tx_rem_bytes, mas->cur_word_len); + if (mas->cur_xfer_mode == FIFO_MODE) { + if ((m_irq & M_RX_FIFO_WATERMARK_EN) || + (m_irq & M_RX_FIFO_LAST_EN)) + geni_spi_handle_rx(mas); + + if ((m_irq & M_TX_FIFO_WATERMARK_EN)) + geni_spi_handle_tx(mas); + + if ((m_irq & M_CMD_DONE_EN) || (m_irq & M_CMD_CANCEL_EN) || + (m_irq & M_CMD_ABORT_EN)) { + complete(&mas->xfer_done); + /* + * If this happens, then a CMD_DONE came before all the + * buffer bytes were sent out. This is unusual, log this + * condition and disable the WM interrupt to prevent the + * system from stalling due an interrupt storm. + * If this happens when all Rx bytes haven't been + * received, log the condition. + */ + if (mas->tx_rem_bytes) { + geni_write_reg(0, mas->base, + SE_GENI_TX_WATERMARK_REG); + GENI_SE_DBG(mas->ipc, false, mas->dev, + "%s:Premature Done.tx_rem%d bpw%d\n", + __func__, mas->tx_rem_bytes, + mas->cur_word_len); + } + if (mas->rx_rem_bytes) + GENI_SE_DBG(mas->ipc, false, mas->dev, + "%s:Premature Done.rx_rem%d bpw%d\n", + __func__, mas->rx_rem_bytes, + mas->cur_word_len); } - if (mas->rx_rem_bytes) - GENI_SE_DBG(mas->ipc, false, mas->dev, - "%s:Premature Done.rx_rem%d bpw%d\n", - __func__, mas->rx_rem_bytes, mas->cur_word_len); + } else if (mas->cur_xfer_mode == SE_DMA) { + u32 dma_tx_status = geni_read_reg(mas->base, + SE_DMA_TX_IRQ_STAT); + u32 dma_rx_status = geni_read_reg(mas->base, + SE_DMA_RX_IRQ_STAT); + + if (dma_tx_status) + geni_write_reg(dma_tx_status, mas->base, + SE_DMA_TX_IRQ_CLR); + if (dma_rx_status) + geni_write_reg(dma_rx_status, mas->base, + SE_DMA_RX_IRQ_CLR); + if (dma_tx_status & TX_DMA_DONE) + mas->tx_rem_bytes = 0; + if (dma_rx_status & RX_DMA_DONE) + mas->rx_rem_bytes = 0; + if (!mas->tx_rem_bytes && !mas->rx_rem_bytes) + complete(&mas->xfer_done); + if ((m_irq & M_CMD_CANCEL_EN) || (m_irq & M_CMD_ABORT_EN)) + complete(&mas->xfer_done); } exit_geni_spi_irq: geni_write_reg(m_irq, mas->base, SE_GENI_M_IRQ_CLEAR); @@ -1312,6 +1414,15 @@ static int spi_geni_probe(struct platform_device *pdev) goto spi_geni_probe_err; } + ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); + if (ret) { + ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); + if (ret) { + dev_err(&pdev->dev, "could not set DMA mask\n"); + goto spi_geni_probe_err; + } + } + if (of_property_read_u32(pdev->dev.of_node, "spi-max-frequency", &spi->max_speed_hz)) { dev_err(&pdev->dev, "Max frequency not specified.\n"); @@ -1329,7 +1440,9 @@ static int spi_geni_probe(struct platform_device *pdev) rt_pri = of_property_read_bool(pdev->dev.of_node, "qcom,rt"); if (rt_pri) spi->rt = true; - + geni_mas->dis_autosuspend = + of_property_read_bool(pdev->dev.of_node, + "qcom,disable-autosuspend"); geni_mas->phys_addr = res->start; geni_mas->size = resource_size(res); geni_mas->base = devm_ioremap(&pdev->dev, res->start, @@ -1369,8 +1482,11 @@ static int spi_geni_probe(struct platform_device *pdev) init_completion(&geni_mas->tx_cb); init_completion(&geni_mas->rx_cb); pm_runtime_set_suspended(&pdev->dev); - pm_runtime_set_autosuspend_delay(&pdev->dev, SPI_AUTO_SUSPEND_DELAY); - pm_runtime_use_autosuspend(&pdev->dev); + if (!geni_mas->dis_autosuspend) { + pm_runtime_set_autosuspend_delay(&pdev->dev, + SPI_AUTO_SUSPEND_DELAY); + pm_runtime_use_autosuspend(&pdev->dev); + } pm_runtime_enable(&pdev->dev); ret = spi_register_master(spi); if (ret) { diff --git a/drivers/spi/spi-meson-spicc.c b/drivers/spi/spi-meson-spicc.c index 7f84296355025d9f95d660c14587609fd6921975..a5b0df7e6131e56d9eb63abf995dab6d9b89d13b 100644 --- a/drivers/spi/spi-meson-spicc.c +++ b/drivers/spi/spi-meson-spicc.c @@ -574,10 +574,15 @@ static int meson_spicc_probe(struct platform_device *pdev) master->max_speed_hz = rate >> 2; ret = devm_spi_register_master(&pdev->dev, master); - if (!ret) - return 0; + if (ret) { + dev_err(&pdev->dev, "spi master registration failed\n"); + goto out_clk; + } - dev_err(&pdev->dev, "spi master registration failed\n"); + return 0; + +out_clk: + clk_disable_unprepare(spicc->core); out_master: spi_master_put(master); diff --git a/drivers/spi/spi-s3c64xx.c b/drivers/spi/spi-s3c64xx.c index b392cca8fa4f5ba3c1c499ea8b1bc228fcabdbee..1a6ec226d6e46b36bc36051d3229b49e6441cb03 100644 --- a/drivers/spi/spi-s3c64xx.c +++ b/drivers/spi/spi-s3c64xx.c @@ -1273,8 +1273,6 @@ static int s3c64xx_spi_resume(struct device *dev) if (ret < 0) return ret; - s3c64xx_spi_hwinit(sdd, sdd->port_id); - return spi_master_resume(master); } #endif /* CONFIG_PM_SLEEP */ @@ -1312,6 +1310,8 @@ static int s3c64xx_spi_runtime_resume(struct device *dev) if (ret != 0) goto err_disable_src_clk; + s3c64xx_spi_hwinit(sdd, sdd->port_id); + return 0; err_disable_src_clk: diff --git a/drivers/spi/spi-sh-msiof.c b/drivers/spi/spi-sh-msiof.c index 52056535f54e07fc882e94e97bbcf12595ea3a62..0fea18ab970e30424d0799f0b265d24f6d26dfc2 100644 --- a/drivers/spi/spi-sh-msiof.c +++ b/drivers/spi/spi-sh-msiof.c @@ -555,14 +555,16 @@ static int sh_msiof_spi_setup(struct spi_device *spi) /* Configure native chip select mode/polarity early */ clr = MDR1_SYNCMD_MASK; - set = MDR1_TRMD | TMDR1_PCON | MDR1_SYNCMD_SPI; + set = MDR1_SYNCMD_SPI; if (spi->mode & SPI_CS_HIGH) clr |= BIT(MDR1_SYNCAC_SHIFT); else set |= BIT(MDR1_SYNCAC_SHIFT); pm_runtime_get_sync(&p->pdev->dev); tmp = sh_msiof_read(p, TMDR1) & ~clr; - sh_msiof_write(p, TMDR1, tmp | set); + sh_msiof_write(p, TMDR1, tmp | set | MDR1_TRMD | TMDR1_PCON); + tmp = sh_msiof_read(p, RMDR1) & ~clr; + sh_msiof_write(p, RMDR1, tmp | set); pm_runtime_put(&p->pdev->dev); p->native_cs_high = spi->mode & SPI_CS_HIGH; p->native_cs_inited = true; diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c index 84dfef4bd6ae6b20090581b9a42fe5d59b62bedd..f85d30dc91878dbb8472b3bc8fec69ff2244fb6a 100644 --- a/drivers/spi/spi.c +++ b/drivers/spi/spi.c @@ -1222,6 +1222,7 @@ static void __spi_pump_messages(struct spi_controller *ctlr, bool in_kthread) if (!was_busy && ctlr->auto_runtime_pm) { ret = pm_runtime_get_sync(ctlr->dev.parent); if (ret < 0) { + pm_runtime_put_noidle(ctlr->dev.parent); dev_err(&ctlr->dev, "Failed to power device: %d\n", ret); mutex_unlock(&ctlr->io_mutex); diff --git a/drivers/staging/android/ion/ion_cma_heap.c b/drivers/staging/android/ion/ion_cma_heap.c index 639c40f2c242b73db0cee5ce03d5f2fe7dfb6803..a602be653956feee715deee233012617b1d31298 100644 --- a/drivers/staging/android/ion/ion_cma_heap.c +++ b/drivers/staging/android/ion/ion_cma_heap.c @@ -232,11 +232,34 @@ static int ion_secure_cma_allocate( return ret; } +static void *ion_secure_cma_map_kernel(struct ion_heap *heap, + struct ion_buffer *buffer) +{ + if (!hlos_accessible_buffer(buffer)) { + pr_info("%s: Mapping non-HLOS accessible buffer disallowed\n", + __func__); + return NULL; + } + return ion_heap_map_kernel(heap, buffer); +} + +static int ion_secure_cma_map_user(struct ion_heap *mapper, + struct ion_buffer *buffer, + struct vm_area_struct *vma) +{ + if (!hlos_accessible_buffer(buffer)) { + pr_info("%s: Mapping non-HLOS accessible buffer disallowed\n", + __func__); + return -EINVAL; + } + return ion_heap_map_user(mapper, buffer, vma); +} + static struct ion_heap_ops ion_secure_cma_ops = { .allocate = ion_secure_cma_allocate, .free = ion_secure_cma_free, - .map_user = ion_heap_map_user, - .map_kernel = ion_heap_map_kernel, + .map_user = ion_secure_cma_map_user, + .map_kernel = ion_secure_cma_map_kernel, .unmap_kernel = ion_heap_unmap_kernel, }; diff --git a/drivers/staging/android/ion/ion_system_heap.c b/drivers/staging/android/ion/ion_system_heap.c index b4505a164a2adb2d793eae1801f24d2822ca6289..fb93dfb830f9035a01400a2593a2a257ee8b065c 100644 --- a/drivers/staging/android/ion/ion_system_heap.c +++ b/drivers/staging/android/ion/ion_system_heap.c @@ -56,6 +56,11 @@ struct pages_mem { u32 size; }; +int ion_heap_is_system_heap_type(enum ion_heap_type type) +{ + return type == ((enum ion_heap_type)ION_HEAP_TYPE_SYSTEM); +} + static struct page *alloc_buffer_page(struct ion_system_heap *heap, struct ion_buffer *buffer, unsigned long order, @@ -280,6 +285,13 @@ static int ion_system_heap_allocate(struct ion_heap *heap, if (size / PAGE_SIZE > totalram_pages / 2) return -ENOMEM; + if (ion_heap_is_system_heap_type(buffer->heap->type) && + is_secure_vmid_valid(vmid)) { + pr_info("%s: System heap doesn't support secure allocations\n", + __func__); + return -EINVAL; + } + data.size = 0; INIT_LIST_HEAD(&pages); INIT_LIST_HEAD(&pages_from_pool); @@ -377,7 +389,7 @@ static int ion_system_heap_allocate(struct ion_heap *heap, err_free_sg2: /* We failed to zero buffers. Bypass pool */ - buffer->flags |= ION_PRIV_FLAG_SHRINKER_FREE; + buffer->private_flags |= ION_PRIV_FLAG_SHRINKER_FREE; if (vmid > 0) ion_hyp_unassign_sg(table, &vmid, 1, true); diff --git a/drivers/staging/android/ion/ion_system_secure_heap.c b/drivers/staging/android/ion/ion_system_secure_heap.c index 7de44a7660e253eab05e5861ef52ab6c70ae27ce..12bb465d2fe691edbae0dd252f1b10259cfabff2 100644 --- a/drivers/staging/android/ion/ion_system_secure_heap.c +++ b/drivers/staging/android/ion/ion_system_secure_heap.c @@ -109,8 +109,8 @@ static void process_one_prefetch(struct ion_heap *sys_heap, int ret; int vmid; + memset(&buffer, 0, sizeof(struct ion_buffer)); buffer.heap = sys_heap; - buffer.flags = 0; ret = sys_heap->ops->allocate(sys_heap, &buffer, info->size, buffer.flags); @@ -167,6 +167,7 @@ static void process_one_shrink(struct ion_heap *sys_heap, size_t pool_size, size; int ret; + memset(&buffer, 0, sizeof(struct ion_buffer)); buffer.heap = sys_heap; buffer.flags = info->vmid; diff --git a/drivers/staging/android/lowmemorykiller.c b/drivers/staging/android/lowmemorykiller.c index d7f56b29eb90084f8acd6f90a83a6d9d00cbbf33..ca86bfecb401e97c2c94068793858ce90fdf3507 100644 --- a/drivers/staging/android/lowmemorykiller.c +++ b/drivers/staging/android/lowmemorykiller.c @@ -106,6 +106,7 @@ static unsigned long lowmem_count(struct shrinker *s, global_node_page_state(NR_INACTIVE_FILE); } +bool lmk_kill_possible(void); static atomic_t shift_adj = ATOMIC_INIT(0); static short adj_max_shift = 353; module_param_named(adj_max_shift, adj_max_shift, short, 0644); @@ -128,6 +129,20 @@ module_param_named(vmpressure_file_min, vmpressure_file_min, int, 0644); static int oom_reaper; module_param_named(oom_reaper, oom_reaper, int, 0644); +/* Variable that helps in feed to the reclaim path */ +static atomic64_t lmk_feed = ATOMIC64_INIT(0); + +/* + * This function can be called whether to include the anon LRU pages + * for accounting in the page reclaim. + */ +bool lmk_kill_possible(void) +{ + unsigned long val = atomic64_read(&lmk_feed); + + return !val || time_after_eq(jiffies, val); +} + enum { VMPRESSURE_NO_ADJUST = 0, VMPRESSURE_ADJUST_ENCROACH, @@ -457,9 +472,7 @@ static unsigned long lowmem_scan(struct shrinker *s, struct shrink_control *sc) int array_size = ARRAY_SIZE(lowmem_adj); int other_free; int other_file; - - if (!mutex_trylock(&scan_mutex)) - return 0; + bool lock_required = true; other_free = global_zone_page_state(NR_FREE_PAGES) - totalreserve_pages; @@ -473,6 +486,13 @@ static unsigned long lowmem_scan(struct shrinker *s, struct shrink_control *sc) else other_file = 0; + if (!get_nr_swap_pages() && (other_free <= lowmem_minfree[0] >> 1) && + (other_file <= lowmem_minfree[0] >> 1)) + lock_required = false; + + if (likely(lock_required) && !mutex_trylock(&scan_mutex)) + return 0; + tune_lmk_param(&other_free, &other_file, sc); scale_percent = get_minfree_scalefactor(sc->gfp_mask); @@ -498,7 +518,8 @@ static unsigned long lowmem_scan(struct shrinker *s, struct shrink_control *sc) trace_almk_shrink(0, ret, other_free, other_file, 0); lowmem_print(5, "%s %lu, %x, return 0\n", __func__, sc->nr_to_scan, sc->gfp_mask); - mutex_unlock(&scan_mutex); + if (lock_required) + mutex_unlock(&scan_mutex); return 0; } @@ -529,7 +550,8 @@ static unsigned long lowmem_scan(struct shrinker *s, struct shrink_control *sc) lowmem_deathpending_timeout)) { task_unlock(p); rcu_read_unlock(); - mutex_unlock(&scan_mutex); + if (lock_required) + mutex_unlock(&scan_mutex); return 0; } } @@ -538,7 +560,8 @@ static unsigned long lowmem_scan(struct shrinker *s, struct shrink_control *sc) lowmem_deathpending_timeout)) if (test_task_lmk_waiting(tsk)) { rcu_read_unlock(); - mutex_unlock(&scan_mutex); + if (lock_required) + mutex_unlock(&scan_mutex); return 0; } @@ -574,13 +597,15 @@ static unsigned long lowmem_scan(struct shrinker *s, struct shrink_control *sc) long cache_limit = minfree * (long)(PAGE_SIZE / 1024); long free = other_free * (long)(PAGE_SIZE / 1024); + atomic64_set(&lmk_feed, 0); if (test_task_lmk_waiting(selected) && (test_task_state(selected, TASK_UNINTERRUPTIBLE))) { lowmem_print(2, "'%s' (%d) is already killed\n", selected->comm, selected->pid); rcu_read_unlock(); - mutex_unlock(&scan_mutex); + if (lock_required) + mutex_unlock(&scan_mutex); return 0; } @@ -638,11 +663,18 @@ static unsigned long lowmem_scan(struct shrinker *s, struct shrink_control *sc) } else { trace_almk_shrink(1, ret, other_free, other_file, 0); rcu_read_unlock(); + if (other_free < lowmem_minfree[0] && + other_file < lowmem_minfree[0]) + atomic64_set(&lmk_feed, jiffies + HZ); + else + atomic64_set(&lmk_feed, 0); + } lowmem_print(4, "%s %lu, %x, return %lu\n", __func__, sc->nr_to_scan, sc->gfp_mask, rem); - mutex_unlock(&scan_mutex); + if (lock_required) + mutex_unlock(&scan_mutex); return rem; } diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c index 284cdd44a2ee39f88f8b98db184826bc97337ed9..8b92cf06d06355ab0698bcdb7afb9ff674f42f19 100644 --- a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c +++ b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c @@ -1710,7 +1710,7 @@ int kiblnd_fmr_pool_map(struct kib_fmr_poolset *fps, struct kib_tx *tx, return 0; } spin_unlock(&fps->fps_lock); - rc = -EBUSY; + rc = -EAGAIN; } spin_lock(&fps->fps_lock); diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c index 29e10021b906b22acfeec08b3cb672d9f5640b77..4b4a2014989488d2286b6020dc316c70135f3e4b 100644 --- a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c +++ b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c @@ -47,7 +47,7 @@ static int kiblnd_init_rdma(struct kib_conn *conn, struct kib_tx *tx, int type, __u64 dstcookie); static void kiblnd_queue_tx_locked(struct kib_tx *tx, struct kib_conn *conn); static void kiblnd_queue_tx(struct kib_tx *tx, struct kib_conn *conn); -static void kiblnd_unmap_tx(struct lnet_ni *ni, struct kib_tx *tx); +static void kiblnd_unmap_tx(struct kib_tx *tx); static void kiblnd_check_sends_locked(struct kib_conn *conn); static void @@ -65,7 +65,7 @@ kiblnd_tx_done(struct lnet_ni *ni, struct kib_tx *tx) LASSERT(!tx->tx_waiting); /* mustn't be awaiting peer response */ LASSERT(tx->tx_pool); - kiblnd_unmap_tx(ni, tx); + kiblnd_unmap_tx(tx); /* tx may have up to 2 lnet msgs to finalise */ lntmsg[0] = tx->tx_lntmsg[0]; tx->tx_lntmsg[0] = NULL; @@ -590,13 +590,9 @@ kiblnd_fmr_map_tx(struct kib_net *net, struct kib_tx *tx, struct kib_rdma_desc * return 0; } -static void kiblnd_unmap_tx(struct lnet_ni *ni, struct kib_tx *tx) +static void kiblnd_unmap_tx(struct kib_tx *tx) { - struct kib_net *net = ni->ni_data; - - LASSERT(net); - - if (net->ibn_fmr_ps) + if (tx->fmr.fmr_pfmr || tx->fmr.fmr_frd) kiblnd_fmr_pool_unmap(&tx->fmr, tx->tx_status); if (tx->tx_nfrags) { @@ -1289,11 +1285,6 @@ kiblnd_connect_peer(struct kib_peer *peer) goto failed2; } - LASSERT(cmid->device); - CDEBUG(D_NET, "%s: connection bound to %s:%pI4h:%s\n", - libcfs_nid2str(peer->ibp_nid), dev->ibd_ifname, - &dev->ibd_ifip, cmid->device->name); - return; failed2: @@ -2995,8 +2986,19 @@ kiblnd_cm_callback(struct rdma_cm_id *cmid, struct rdma_cm_event *event) } else { rc = rdma_resolve_route( cmid, *kiblnd_tunables.kib_timeout * 1000); - if (!rc) + if (!rc) { + struct kib_net *net = peer->ibp_ni->ni_data; + struct kib_dev *dev = net->ibn_dev; + + CDEBUG(D_NET, "%s: connection bound to "\ + "%s:%pI4h:%s\n", + libcfs_nid2str(peer->ibp_nid), + dev->ibd_ifname, + &dev->ibd_ifip, cmid->device->name); + return 0; + } + /* Can't initiate route resolution */ CERROR("Can't resolve route for %s: %d\n", libcfs_nid2str(peer->ibp_nid), rc); diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_lock.c b/drivers/staging/lustre/lustre/ldlm/ldlm_lock.c index b5d84f3f6071913572606a2e8306e0772804b388..11e01c48f51ac59073e5ad090c9562daff5358e7 100644 --- a/drivers/staging/lustre/lustre/ldlm/ldlm_lock.c +++ b/drivers/staging/lustre/lustre/ldlm/ldlm_lock.c @@ -1571,8 +1571,10 @@ struct ldlm_lock *ldlm_lock_create(struct ldlm_namespace *ns, return ERR_CAST(res); lock = ldlm_lock_new(res); - if (!lock) + if (!lock) { + ldlm_resource_putref(res); return ERR_PTR(-ENOMEM); + } lock->l_req_mode = mode; lock->l_ast_data = data; @@ -1615,6 +1617,8 @@ struct ldlm_lock *ldlm_lock_create(struct ldlm_namespace *ns, return ERR_PTR(rc); } + + /** * Enqueue (request) a lock. * On the client this is called from ldlm_cli_enqueue_fini diff --git a/drivers/staging/lustre/lustre/llite/xattr.c b/drivers/staging/lustre/lustre/llite/xattr.c index 0be55623bac4b0d9141f4e00760c7188dece302f..364d697b26906664db2ae166844c8d81c3509a36 100644 --- a/drivers/staging/lustre/lustre/llite/xattr.c +++ b/drivers/staging/lustre/lustre/llite/xattr.c @@ -93,7 +93,11 @@ ll_xattr_set_common(const struct xattr_handler *handler, __u64 valid; int rc; - if (flags == XATTR_REPLACE) { + /* When setxattr() is called with a size of 0 the value is + * unconditionally replaced by "". When removexattr() is + * called we get a NULL value and XATTR_REPLACE for flags. + */ + if (!value && flags == XATTR_REPLACE) { ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_REMOVEXATTR, 1); valid = OBD_MD_FLXATTRRM; } else { diff --git a/drivers/staging/media/atomisp/i2c/ov2680.c b/drivers/staging/media/atomisp/i2c/ov2680.c index 51b7d61df0f5513173d98f6bc6f2d3e5d40be9ef..17957622431901d729a03f9e715d855d36e9d80d 100644 --- a/drivers/staging/media/atomisp/i2c/ov2680.c +++ b/drivers/staging/media/atomisp/i2c/ov2680.c @@ -396,12 +396,11 @@ static long __ov2680_set_exposure(struct v4l2_subdev *sd, int coarse_itg, { struct i2c_client *client = v4l2_get_subdevdata(sd); struct ov2680_device *dev = to_ov2680_sensor(sd); - u16 vts,hts; + u16 vts; int ret,exp_val; dev_dbg(&client->dev, "+++++++__ov2680_set_exposure coarse_itg %d, gain %d, digitgain %d++\n",coarse_itg, gain, digitgain); - hts = ov2680_res[dev->fmt_idx].pixels_per_line; vts = ov2680_res[dev->fmt_idx].lines_per_frame; /* group hold */ @@ -1190,7 +1189,8 @@ static int ov2680_detect(struct i2c_client *client) OV2680_SC_CMMN_SUB_ID, &high); revision = (u8) high & 0x0f; - dev_info(&client->dev, "sensor_revision id = 0x%x\n", id); + dev_info(&client->dev, "sensor_revision id = 0x%x, rev= %d\n", + id, revision); return 0; } diff --git a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_compat_ioctl32.c b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_compat_ioctl32.c index 0592ac1f2832eedd947dcc3c1d39dad917454751..cfe6bb61001455e44cc274b6b4d972e7b1f680d9 100644 --- a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_compat_ioctl32.c +++ b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_compat_ioctl32.c @@ -81,7 +81,7 @@ static int get_v4l2_framebuffer32(struct v4l2_framebuffer *kp, get_user(kp->flags, &up->flags)) return -EFAULT; - kp->base = compat_ptr(tmp); + kp->base = (void __force *)compat_ptr(tmp); get_v4l2_pix_format((struct v4l2_pix_format *)&kp->fmt, &up->fmt); return 0; } @@ -232,10 +232,10 @@ static int get_atomisp_dvs_6axis_config32(struct atomisp_dvs_6axis_config *kp, get_user(ycoords_uv, &up->ycoords_uv)) return -EFAULT; - kp->xcoords_y = compat_ptr(xcoords_y); - kp->ycoords_y = compat_ptr(ycoords_y); - kp->xcoords_uv = compat_ptr(xcoords_uv); - kp->ycoords_uv = compat_ptr(ycoords_uv); + kp->xcoords_y = (void __force *)compat_ptr(xcoords_y); + kp->ycoords_y = (void __force *)compat_ptr(ycoords_y); + kp->xcoords_uv = (void __force *)compat_ptr(xcoords_uv); + kp->ycoords_uv = (void __force *)compat_ptr(ycoords_uv); return 0; } @@ -296,7 +296,7 @@ static int get_atomisp_metadata_stat32(struct atomisp_metadata *kp, return -EFAULT; kp->data = compat_ptr(data); - kp->effective_width = compat_ptr(effective_width); + kp->effective_width = (void __force *)compat_ptr(effective_width); return 0; } @@ -360,7 +360,7 @@ static int get_atomisp_metadata_by_type_stat32( return -EFAULT; kp->data = compat_ptr(data); - kp->effective_width = compat_ptr(effective_width); + kp->effective_width = (void __force *)compat_ptr(effective_width); return 0; } @@ -437,7 +437,7 @@ static int get_atomisp_overlay32(struct atomisp_overlay *kp, get_user(kp->overlay_start_x, &up->overlay_start_y)) return -EFAULT; - kp->frame = compat_ptr(frame); + kp->frame = (void __force *)compat_ptr(frame); return 0; } @@ -481,7 +481,7 @@ static int get_atomisp_calibration_group32( get_user(calb_grp_values, &up->calb_grp_values)) return -EFAULT; - kp->calb_grp_values = compat_ptr(calb_grp_values); + kp->calb_grp_values = (void __force *)compat_ptr(calb_grp_values); return 0; } @@ -703,8 +703,8 @@ static int get_atomisp_parameters32(struct atomisp_parameters *kp, return -EFAULT; while (n >= 0) { - compat_uptr_t *src = (compat_uptr_t *)up + n; - uintptr_t *dst = (uintptr_t *)kp + n; + compat_uptr_t __user *src = ((compat_uptr_t __user *)up) + n; + uintptr_t *dst = ((uintptr_t *)kp) + n; if (get_user((*dst), src)) return -EFAULT; @@ -751,12 +751,12 @@ static int get_atomisp_parameters32(struct atomisp_parameters *kp, #endif return -EFAULT; - kp->shading_table = user_ptr + offset; + kp->shading_table = (void __force *)user_ptr + offset; offset = sizeof(struct atomisp_shading_table); if (!kp->shading_table) return -EFAULT; - if (copy_to_user(kp->shading_table, + if (copy_to_user((void __user *)kp->shading_table, &karg.shading_table, sizeof(struct atomisp_shading_table))) return -EFAULT; @@ -777,13 +777,14 @@ static int get_atomisp_parameters32(struct atomisp_parameters *kp, #endif return -EFAULT; - kp->morph_table = user_ptr + offset; + kp->morph_table = (void __force *)user_ptr + offset; offset += sizeof(struct atomisp_morph_table); if (!kp->morph_table) return -EFAULT; - if (copy_to_user(kp->morph_table, &karg.morph_table, - sizeof(struct atomisp_morph_table))) + if (copy_to_user((void __user *)kp->morph_table, + &karg.morph_table, + sizeof(struct atomisp_morph_table))) return -EFAULT; } @@ -802,13 +803,14 @@ static int get_atomisp_parameters32(struct atomisp_parameters *kp, #endif return -EFAULT; - kp->dvs2_coefs = user_ptr + offset; + kp->dvs2_coefs = (void __force *)user_ptr + offset; offset += sizeof(struct atomisp_dis_coefficients); if (!kp->dvs2_coefs) return -EFAULT; - if (copy_to_user(kp->dvs2_coefs, &karg.dvs2_coefs, - sizeof(struct atomisp_dis_coefficients))) + if (copy_to_user((void __user *)kp->dvs2_coefs, + &karg.dvs2_coefs, + sizeof(struct atomisp_dis_coefficients))) return -EFAULT; } /* handle dvs 6axis configuration */ @@ -826,13 +828,14 @@ static int get_atomisp_parameters32(struct atomisp_parameters *kp, #endif return -EFAULT; - kp->dvs_6axis_config = user_ptr + offset; + kp->dvs_6axis_config = (void __force *)user_ptr + offset; offset += sizeof(struct atomisp_dvs_6axis_config); if (!kp->dvs_6axis_config) return -EFAULT; - if (copy_to_user(kp->dvs_6axis_config, &karg.dvs_6axis_config, - sizeof(struct atomisp_dvs_6axis_config))) + if (copy_to_user((void __user *)kp->dvs_6axis_config, + &karg.dvs_6axis_config, + sizeof(struct atomisp_dvs_6axis_config))) return -EFAULT; } } @@ -891,7 +894,7 @@ static int get_atomisp_sensor_ae_bracketing_lut( get_user(lut, &up->lut)) return -EFAULT; - kp->lut = compat_ptr(lut); + kp->lut = (void __force *)compat_ptr(lut); return 0; } diff --git a/drivers/staging/speakup/speakup_soft.c b/drivers/staging/speakup/speakup_soft.c index d99daf69e501bf88e3274cb41a5420a20a481ae5..fe229d63deec166cfe892c6b97160122c673a835 100644 --- a/drivers/staging/speakup/speakup_soft.c +++ b/drivers/staging/speakup/speakup_soft.c @@ -207,11 +207,15 @@ static ssize_t softsynthx_read(struct file *fp, char __user *buf, size_t count, int chars_sent = 0; char __user *cp; char *init; + size_t bytes_per_ch = unicode ? 3 : 1; u16 ch; int empty; unsigned long flags; DEFINE_WAIT(wait); + if (count < bytes_per_ch) + return -EINVAL; + spin_lock_irqsave(&speakup_info.spinlock, flags); while (1) { prepare_to_wait(&speakup_event, &wait, TASK_INTERRUPTIBLE); @@ -237,7 +241,7 @@ static ssize_t softsynthx_read(struct file *fp, char __user *buf, size_t count, init = get_initstring(); /* Keep 3 bytes available for a 16bit UTF-8-encoded character */ - while (chars_sent <= count - 3) { + while (chars_sent <= count - bytes_per_ch) { if (speakup_info.flushing) { speakup_info.flushing = 0; ch = '\x18'; diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c index 486be990d7fc7b6ac96053d2b2246beaf6d3a7af..a457034818c33b9a1d99932bef73b627d59dde3f 100644 --- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c +++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c @@ -601,6 +601,7 @@ reserve_space(VCHIQ_STATE_T *state, size_t space, int is_blocking) } if (tx_pos == (state->slot_queue_available * VCHIQ_SLOT_SIZE)) { + up(&state->slot_available_event); pr_warn("%s: invalid tx_pos: %d\n", __func__, tx_pos); return NULL; } diff --git a/drivers/thermal/cpu_cooling.c b/drivers/thermal/cpu_cooling.c index c34d711bed21a44222dbb4a936eb65bd98c5417b..29497584533f0e2722dee380ca1782feae75f8bf 100644 --- a/drivers/thermal/cpu_cooling.c +++ b/drivers/thermal/cpu_cooling.c @@ -156,7 +156,7 @@ static unsigned long get_level(struct cpufreq_cooling_device *cpufreq_cdev, static int cpufreq_cooling_pm_notify(struct notifier_block *nb, unsigned long mode, void *_unused) { - struct cpufreq_cooling_device *cpufreq_cdev; + struct cpufreq_cooling_device *cpufreq_cdev, *next; unsigned int cpu; switch (mode) { @@ -168,8 +168,8 @@ static int cpufreq_cooling_pm_notify(struct notifier_block *nb, case PM_POST_HIBERNATION: case PM_POST_RESTORE: case PM_POST_SUSPEND: - mutex_lock(&cooling_list_lock); - list_for_each_entry(cpufreq_cdev, &cpufreq_cdev_list, node) { + list_for_each_entry_safe(cpufreq_cdev, next, &cpufreq_cdev_list, + node) { if (cpufreq_cdev->cpu_id == -1) continue; mutex_lock(&core_isolate_lock); @@ -193,7 +193,6 @@ static int cpufreq_cooling_pm_notify(struct notifier_block *nb, } mutex_unlock(&core_isolate_lock); } - mutex_unlock(&cooling_list_lock); atomic_set(&in_suspend, 0); break; diff --git a/drivers/thermal/qcom/Kconfig b/drivers/thermal/qcom/Kconfig index 4ab8144a17b510c0660fc61b5725fcb1f76532d8..0efbe21ca0064acfe8f481fe3571f5a66443b079 100644 --- a/drivers/thermal/qcom/Kconfig +++ b/drivers/thermal/qcom/Kconfig @@ -51,6 +51,15 @@ config QTI_QMI_COOLING_DEVICE The QMI cooling device will interface with remote subsystem using QTI QMI interface. +config QTI_QMI_SENSOR + bool "QTI QMI sensor driver" + depends on QCOM_QMI_HELPERS && THERMAL_OF + help + This enables to list the QTI remote subsystem temperature sensors. + This driver can read the temperature of the remote sensor. + These sensors can take thresholds and notify the thermal + framework when the threshold is reached. + config REGULATOR_COOLING_DEVICE bool "Regulator voltage floor cooling device" depends on REGULATOR && THERMAL_OF diff --git a/drivers/thermal/qcom/Makefile b/drivers/thermal/qcom/Makefile index 278721d22c39e25842f6de968750a802242d5e51..87e4a8694940af5a89ce7fb07d4355ec28c4a9db 100644 --- a/drivers/thermal/qcom/Makefile +++ b/drivers/thermal/qcom/Makefile @@ -5,6 +5,7 @@ obj-$(CONFIG_QTI_VIRTUAL_SENSOR) += qti_virtual_sensor.o obj-$(CONFIG_QTI_AOP_REG_COOLING_DEVICE) += regulator_aop_cdev.o obj-$(CONFIG_REGULATOR_COOLING_DEVICE) += regulator_cdev.o obj-$(CONFIG_QTI_QMI_COOLING_DEVICE) += thermal_mitigation_device_service_v01.o qmi_cooling.o +obj-$(CONFIG_QTI_QMI_SENSOR) += thermal_sensor_service_v01.o qmi_sensors.o obj-$(CONFIG_MSM_BCL_PERIPHERAL_CTL) += bcl_peripheral.o obj-$(CONFIG_QTI_BCL_PMIC5) += bcl_pmic5.o obj-$(CONFIG_QTI_BCL_SOC_DRIVER) += bcl_soc.o diff --git a/drivers/thermal/qcom/bcl_pmic5.c b/drivers/thermal/qcom/bcl_pmic5.c index 42d5817e3c12cf756ec4c0f2e5eb70e0f8940065..7a5124e11f26816aed636600706fe4d1dbc832c0 100644 --- a/drivers/thermal/qcom/bcl_pmic5.c +++ b/drivers/thermal/qcom/bcl_pmic5.c @@ -496,7 +496,7 @@ static void bcl_fetch_trip(struct platform_device *pdev, enum bcl_dev_type type, data->irq_num = 0; data->irq_enabled = false; irq_num = platform_get_irq_byname(pdev, int_name); - if (irq_num && handle) { + if (irq_num > 0 && handle) { ret = devm_request_threaded_irq(&pdev->dev, irq_num, NULL, handle, IRQF_TRIGGER_RISING | IRQF_ONESHOT, @@ -510,7 +510,7 @@ static void bcl_fetch_trip(struct platform_device *pdev, enum bcl_dev_type type, } disable_irq_nosync(irq_num); data->irq_num = irq_num; - } else if (irq_num && !handle) { + } else if (irq_num > 0 && !handle) { disable_irq_nosync(irq_num); data->irq_num = irq_num; } @@ -559,6 +559,9 @@ static void bcl_ibat_init(struct platform_device *pdev, ibat->type = type; ibat->dev = bcl_perph; bcl_fetch_trip(pdev, type, ibat, NULL); + if (ibat->irq_num <= 0) + return; + ibat->ops.get_temp = bcl_read_ibat; ibat->ops.set_trips = bcl_set_ibat; diff --git a/drivers/thermal/qcom/qmi_cooling.c b/drivers/thermal/qcom/qmi_cooling.c index 436a30c8211ec86d28243f8a1d0d313a8680f01e..9e7b01d93d17589517c8eaac61e2426724720ac0 100644 --- a/drivers/thermal/qcom/qmi_cooling.c +++ b/drivers/thermal/qcom/qmi_cooling.c @@ -25,7 +25,7 @@ #include "thermal_mitigation_device_service_v01.h" #define QMI_CDEV_DRIVER "qmi-cooling-device" -#define QMI_TMD_RESP_TOUT_MSEC 50 +#define QMI_TMD_RESP_TOUT msecs_to_jiffies(100) #define QMI_CLIENT_NAME_LENGTH 40 enum qmi_device_type { @@ -166,7 +166,7 @@ static int qmi_tmd_send_state_request(struct qmi_cooling_device *qmi_cdev, goto qmi_send_exit; } - ret = qmi_txn_wait(&txn, QMI_TMD_RESP_TOUT_MSEC); + ret = qmi_txn_wait(&txn, QMI_TMD_RESP_TOUT); if (ret < 0) { pr_err("qmi set state:%d txn wait failed for %s ret:%d\n", state, qmi_cdev->cdev_name, ret); @@ -324,7 +324,7 @@ static int verify_devices_and_register(struct qmi_tmd_instance *tmd) goto reg_exit; } - ret = qmi_txn_wait(&txn, QMI_TMD_RESP_TOUT_MSEC); + ret = qmi_txn_wait(&txn, QMI_TMD_RESP_TOUT); if (ret < 0) { pr_err("Transaction wait error for inst_id:0x%x ret:%d\n", tmd->inst_id, ret); diff --git a/drivers/thermal/qcom/qmi_sensors.c b/drivers/thermal/qcom/qmi_sensors.c new file mode 100644 index 0000000000000000000000000000000000000000..fb11a0844bb2440efab6be26ab256f24fa31c6b2 --- /dev/null +++ b/drivers/thermal/qcom/qmi_sensors.c @@ -0,0 +1,666 @@ +/* Copyright (c) 2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#define pr_fmt(fmt) "%s:%s " fmt, KBUILD_MODNAME, __func__ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "thermal_sensor_service_v01.h" +#include "../thermal_core.h" + +#define QMI_SENS_DRIVER "qmi-therm-sensors" +#define QMI_TS_RESP_TOUT msecs_to_jiffies(100) +#define QMI_CLIENT_NAME_LENGTH 40 +#define QMI_FL_SIGN 0x80000000 +#define QMI_FL_EXP 0x7f800000 +#define QMI_FL_MANTISSA 0x007fffff +#define QMI_FL_NORM 0x00800000 +#define QMI_FL_SIGN_BIT 31 +#define QMI_MANTISSA_MSB 23 + +enum qmi_ts_sensor { + QMI_TS_PA, + QMI_TS_PA_1, + QMI_TS_QFE_PA_0, + QMI_TS_QFE_WTR_0, + QMI_TS_MAX_NR +}; + +struct qmi_sensor { + struct device *dev; + char qmi_name[QMI_CLIENT_NAME_LENGTH]; + bool connection_active; + struct list_head ts_node; + struct thermal_zone_device *tz_dev; + int32_t last_reading; + int32_t high_thresh; + int32_t low_thresh; + struct qmi_ts_instance *ts; + enum qmi_ts_sensor sens_type; + struct work_struct therm_notify_work; +}; + +struct qmi_ts_instance { + struct device *dev; + struct qmi_handle handle; + struct mutex mutex; + uint32_t inst_id; + struct list_head ts_sensor_list; + struct work_struct svc_arrive_work; +}; + +static struct qmi_ts_instance *ts_instances; +static atomic_t in_suspend; + +static char sensor_clients[QMI_TS_MAX_NR][QMI_CLIENT_NAME_LENGTH] = { + {"pa"}, + {"pa_1"}, + {"qfe_pa0"}, + {"qfe_wtr0"}, +}; + +static int32_t encode_qmi(int32_t val) +{ + uint32_t shift = 0, local_val = 0; + int32_t temp_val = 0; + + if (val == INT_MAX || val == INT_MIN) + return 0; + + temp_val = val = val / 1000; + if (val < 0) { + temp_val *= -1; + local_val |= 1 << QMI_FL_SIGN_BIT; + } + shift = find_last_bit((const unsigned long *)&temp_val, + sizeof(temp_val) * 8); + local_val |= ((shift + 127) << QMI_MANTISSA_MSB); + temp_val &= ~(1 << shift); + + local_val |= temp_val << (QMI_MANTISSA_MSB - shift); + pr_debug("inp:%d shift:%d out:%x temp_val:%x\n", + val, shift, local_val, temp_val); + + return local_val; +} + +static int32_t decode_qmi(int32_t val) +{ + int32_t sign = 0, shift = 0, local_val; + + sign = (val & QMI_FL_SIGN) ? -1 : 1; + shift = (val & QMI_FL_EXP) >> QMI_MANTISSA_MSB; + shift = QMI_MANTISSA_MSB - (shift - 127); + local_val = (val & QMI_FL_MANTISSA) | QMI_FL_NORM; + pr_debug("val:0x%x sign:%d shift:%d mantissa:%x temp:%d\n", + val, sign, shift, local_val, + sign * (local_val >> shift)); + + return sign * (local_val >> shift); +} + +static int qmi_sensor_pm_notify(struct notifier_block *nb, + unsigned long mode, void *_unused) +{ + switch (mode) { + case PM_HIBERNATION_PREPARE: + case PM_RESTORE_PREPARE: + case PM_SUSPEND_PREPARE: + atomic_set(&in_suspend, 1); + break; + case PM_POST_HIBERNATION: + case PM_POST_RESTORE: + case PM_POST_SUSPEND: + atomic_set(&in_suspend, 0); + break; + default: + break; + } + return 0; +} + +static struct notifier_block qmi_sensor_pm_nb = { + .notifier_call = qmi_sensor_pm_notify, +}; + +static void qmi_ts_thresh_notify(struct work_struct *work) +{ + struct qmi_sensor *qmi_sens = container_of(work, + struct qmi_sensor, + therm_notify_work); + + of_thermal_handle_trip(qmi_sens->tz_dev); +}; + +static void qmi_ts_update_temperature(struct qmi_ts_instance *ts, + const struct ts_temp_report_ind_msg_v01 *ind_msg, + uint8_t notify) +{ + struct qmi_sensor *qmi_sens; + + list_for_each_entry(qmi_sens, &ts->ts_sensor_list, + ts_node) { + if ((strncasecmp(qmi_sens->qmi_name, + ind_msg->sensor_id.sensor_id, + QMI_TS_SENSOR_ID_LENGTH_MAX_V01))) + continue; + + qmi_sens->last_reading = + decode_qmi(ind_msg->temp) * 1000; + pr_debug("sensor:%s temperature:%d\n", + qmi_sens->qmi_name, qmi_sens->last_reading); + if (!qmi_sens->tz_dev) + return; + if (notify && + ((qmi_sens->high_thresh != INT_MAX && + qmi_sens->last_reading >= qmi_sens->high_thresh) || + (qmi_sens->low_thresh != INT_MIN && + qmi_sens->last_reading <= qmi_sens->low_thresh))) { + pr_debug("Sensor:%s Notify. temp:%d\n", + ind_msg->sensor_id.sensor_id, + qmi_sens->last_reading); + queue_work(system_highpri_wq, + &qmi_sens->therm_notify_work); + } + return; + } +} + +void qmi_ts_ind_cb(struct qmi_handle *qmi, struct sockaddr_qrtr *sq, + struct qmi_txn *txn, const void *decoded) +{ + const struct ts_temp_report_ind_msg_v01 *ind_msg = decoded; + uint8_t notify = 0; + struct qmi_ts_instance *ts = container_of(qmi, struct qmi_ts_instance, + handle); + + if (!txn) { + pr_err("Invalid transaction\n"); + return; + } + + if ((ind_msg->report_type != QMI_TS_TEMP_REPORT_CURRENT_TEMP_V01) || + ind_msg->seq_num_valid) + notify = 1; + + if (ind_msg->temp_valid) + qmi_ts_update_temperature(ts, ind_msg, notify); + else + pr_err("Error invalid temperature field."); +} + +static int qmi_ts_request(struct qmi_sensor *qmi_sens, + bool send_current_temp_report) +{ + int ret = 0; + struct ts_register_notification_temp_resp_msg_v01 resp; + struct ts_register_notification_temp_req_msg_v01 req; + struct qmi_ts_instance *ts = qmi_sens->ts; + struct qmi_txn txn; + + memset(&req, 0, sizeof(req)); + memset(&resp, 0, sizeof(resp)); + + strlcpy(req.sensor_id.sensor_id, qmi_sens->qmi_name, + QMI_TS_SENSOR_ID_LENGTH_MAX_V01); + req.seq_num = 0; + if (send_current_temp_report) { + req.send_current_temp_report = 1; + req.seq_num_valid = true; + } else { + req.seq_num_valid = false; + req.temp_threshold_high_valid = + qmi_sens->high_thresh != INT_MAX; + req.temp_threshold_high = + encode_qmi(qmi_sens->high_thresh); + req.temp_threshold_low_valid = + qmi_sens->low_thresh != INT_MIN; + req.temp_threshold_low = + encode_qmi(qmi_sens->low_thresh); + } + + mutex_lock(&ts->mutex); + + ret = qmi_txn_init(&ts->handle, &txn, + ts_register_notification_temp_resp_msg_v01_ei, &resp); + if (ret < 0) { + pr_err("qmi txn init failed for %s ret:%d\n", + qmi_sens->qmi_name, ret); + goto qmi_send_exit; + } + + ret = qmi_send_request(&ts->handle, NULL, &txn, + QMI_TS_REGISTER_NOTIFICATION_TEMP_REQ_V01, + TS_REGISTER_NOTIFICATION_TEMP_REQ_MSG_V01_MAX_MSG_LEN, + ts_register_notification_temp_req_msg_v01_ei, &req); + if (ret < 0) { + pr_err("qmi txn send failed for %s ret:%d\n", + qmi_sens->qmi_name, ret); + qmi_txn_cancel(&txn); + goto qmi_send_exit; + } + + ret = qmi_txn_wait(&txn, QMI_TS_RESP_TOUT); + if (ret < 0) { + pr_err("qmi txn wait failed for %s ret:%d\n", + qmi_sens->qmi_name, ret); + goto qmi_send_exit; + } + if (resp.resp.result != QMI_RESULT_SUCCESS_V01) { + ret = resp.resp.result; + pr_err("qmi NOT success for %s ret:%d\n", + qmi_sens->qmi_name, ret); + goto qmi_send_exit; + } + ret = 0; + +qmi_send_exit: + mutex_unlock(&ts->mutex); + return ret; +} + +static int qmi_sensor_read(void *data, int *temp) +{ + struct qmi_sensor *qmi_sens = (struct qmi_sensor *)data; + + if (qmi_sens->connection_active && !atomic_read(&in_suspend)) + qmi_ts_request(qmi_sens, true); + *temp = qmi_sens->last_reading; + + return 0; +} + +static int qmi_sensor_set_trips(void *data, int low, int high) +{ + struct qmi_sensor *qmi_sens = (struct qmi_sensor *)data; + int ret = 0; + + if (qmi_sens->high_thresh == high && + qmi_sens->low_thresh == low) + return ret; + qmi_sens->high_thresh = high; + qmi_sens->low_thresh = low; + if (!qmi_sens->connection_active) + return ret; + ret = qmi_ts_request(qmi_sens, false); + if (ret) + pr_err("Sensor:%s set high trip:%d low trip:%d error%d\n", + qmi_sens->qmi_name, + qmi_sens->high_thresh, + qmi_sens->low_thresh, + ret); + + return ret; +} + +static struct thermal_zone_of_device_ops qmi_sensor_ops = { + .get_temp = qmi_sensor_read, + .set_trips = qmi_sensor_set_trips, +}; + +static struct qmi_msg_handler handlers[] = { + { + .type = QMI_INDICATION, + .msg_id = QMI_TS_TEMP_REPORT_IND_V01, + .ei = ts_temp_report_ind_msg_v01_ei, + .decoded_size = sizeof(struct ts_temp_report_ind_msg_v01), + .fn = qmi_ts_ind_cb + }, + {} +}; + +static int qmi_register_sensor_device(struct qmi_sensor *qmi_sens) +{ + int ret = 0; + + qmi_sens->tz_dev = thermal_zone_of_sensor_register( + qmi_sens->dev, + qmi_sens->sens_type, + qmi_sens, &qmi_sensor_ops); + if (IS_ERR(qmi_sens->tz_dev)) { + ret = PTR_ERR(qmi_sens->tz_dev); + if (ret != -ENODEV) + pr_err("sensor register failed for %s, ret:%ld\n", + qmi_sens->qmi_name, ret); + qmi_sens->tz_dev = NULL; + return ret; + } + pr_debug("Sensor register success for %s\n", qmi_sens->qmi_name); + + return 0; +} + +static int verify_sensor_and_register(struct qmi_ts_instance *ts) +{ + struct ts_get_sensor_list_req_msg_v01 req; + struct ts_get_sensor_list_resp_msg_v01 *ts_resp; + int ret = 0, i; + struct qmi_txn txn; + + memset(&req, 0, sizeof(req)); + /* size of ts_resp is very high, use heap memory rather than stack */ + ts_resp = kzalloc(sizeof(*ts_resp), GFP_KERNEL); + if (!ts_resp) + return -ENOMEM; + + mutex_lock(&ts->mutex); + ret = qmi_txn_init(&ts->handle, &txn, + ts_get_sensor_list_resp_msg_v01_ei, ts_resp); + if (ret < 0) { + pr_err("Transaction Init error for inst_id:0x%x ret:%d\n", + ts->inst_id, ret); + goto reg_exit; + } + + ret = qmi_send_request(&ts->handle, NULL, &txn, + QMI_TS_GET_SENSOR_LIST_REQ_V01, + TS_GET_SENSOR_LIST_REQ_MSG_V01_MAX_MSG_LEN, + ts_get_sensor_list_req_msg_v01_ei, + &req); + if (ret < 0) { + qmi_txn_cancel(&txn); + goto reg_exit; + } + + ret = qmi_txn_wait(&txn, QMI_TS_RESP_TOUT); + if (ret < 0) { + pr_err("Transaction wait error for inst_id:0x%x ret:%d\n", + ts->inst_id, ret); + goto reg_exit; + } + if (ts_resp->resp.result != QMI_RESULT_SUCCESS_V01) { + ret = ts_resp->resp.result; + pr_err("Get sensor list NOT success for inst_id:0x%x ret:%d\n", + ts->inst_id, ret); + goto reg_exit; + } + mutex_unlock(&ts->mutex); + + for (i = 0; i < ts_resp->sensor_list_len; i++) { + struct qmi_sensor *qmi_sens = NULL; + + list_for_each_entry(qmi_sens, &ts->ts_sensor_list, + ts_node) { + if ((strncasecmp(qmi_sens->qmi_name, + ts_resp->sensor_list[i].sensor_id, + QMI_TS_SENSOR_ID_LENGTH_MAX_V01))) + continue; + + qmi_sens->connection_active = true; + /* + * Send a temperature request notification. + */ + qmi_ts_request(qmi_sens, true); + if (!qmi_sens->tz_dev) + ret = qmi_register_sensor_device(qmi_sens); + break; + } + } + + kfree(ts_resp); + return ret; + +reg_exit: + mutex_unlock(&ts->mutex); + kfree(ts_resp); + + return ret; +} + +static void qmi_ts_svc_arrive(struct work_struct *work) +{ + struct qmi_ts_instance *ts = container_of(work, + struct qmi_ts_instance, + svc_arrive_work); + + verify_sensor_and_register(ts); +} + +static void thermal_qmi_net_reset(struct qmi_handle *qmi) +{ + struct qmi_ts_instance *ts = container_of(qmi, + struct qmi_ts_instance, + handle); + struct qmi_sensor *qmi_sens = NULL; + int ret; + + pr_debug("reset QMI server\n"); + list_for_each_entry(qmi_sens, &ts->ts_sensor_list, + ts_node) { + if (!qmi_sens->connection_active) + continue; + qmi_ts_request(qmi_sens, true); + ret = qmi_ts_request(qmi_sens, false); + if (ret) + pr_err("Sensor:%s set high trip:%d low trip:%d err%d\n", + qmi_sens->tz_dev->type, + qmi_sens->high_thresh, + qmi_sens->low_thresh, + ret); + } +} + +static void thermal_qmi_del_server(struct qmi_handle *qmi, + struct qmi_service *service) +{ + struct qmi_ts_instance *ts = container_of(qmi, + struct qmi_ts_instance, + handle); + struct qmi_sensor *qmi_sens = NULL; + + pr_debug("QMI server deleted\n"); + list_for_each_entry(qmi_sens, &ts->ts_sensor_list, ts_node) + qmi_sens->connection_active = false; +} + +static int thermal_qmi_new_server(struct qmi_handle *qmi, + struct qmi_service *service) +{ + struct qmi_ts_instance *ts = container_of(qmi, + struct qmi_ts_instance, + handle); + struct sockaddr_qrtr sq = {AF_QIPCRTR, service->node, service->port}; + + mutex_lock(&ts->mutex); + kernel_connect(qmi->sock, (struct sockaddr *)&sq, sizeof(sq), 0); + mutex_unlock(&ts->mutex); + queue_work(system_highpri_wq, &ts->svc_arrive_work); + + return 0; +} + +static struct qmi_ops thermal_qmi_event_ops = { + .new_server = thermal_qmi_new_server, + .del_server = thermal_qmi_del_server, + .net_reset = thermal_qmi_net_reset, +}; + +static void qmi_ts_cleanup(void) +{ + struct qmi_ts_instance *ts = ts_instances; + struct qmi_sensor *qmi_sens, *c_next; + + mutex_lock(&ts->mutex); + list_for_each_entry_safe(qmi_sens, c_next, + &ts->ts_sensor_list, ts_node) { + qmi_sens->connection_active = false; + if (qmi_sens->tz_dev) + thermal_zone_of_sensor_unregister( + qmi_sens->dev, qmi_sens->tz_dev); + + list_del(&qmi_sens->ts_node); + } + qmi_handle_release(&ts->handle); + + mutex_unlock(&ts->mutex); +} + +static int of_get_qmi_ts_platform_data(struct device *dev) +{ + int ret = 0, i = 0; + struct device_node *np = dev->of_node; + struct device_node *subsys_np; + struct qmi_ts_instance *ts; + struct qmi_sensor *qmi_sens; + int sens_name_max = 0, sens_idx = 0; + + ts = devm_kzalloc(dev, sizeof(*ts), GFP_KERNEL); + if (!ts) + return -ENOMEM; + + subsys_np = of_get_next_child(np, NULL); + if (!subsys_np) { + dev_err(dev, "No child node\n"); + return -EINVAL; + } + + ret = of_property_read_u32(subsys_np, "qcom,instance-id", + &ts->inst_id); + if (ret) { + dev_err(dev, "error reading qcom,insance-id. ret:%d\n", ret); + goto data_fetch_err; + } + + ts->dev = dev; + mutex_init(&ts->mutex); + INIT_LIST_HEAD(&ts->ts_sensor_list); + INIT_WORK(&ts->svc_arrive_work, qmi_ts_svc_arrive); + + sens_name_max = of_property_count_strings(subsys_np, + "qcom,qmi-sensor-names"); + if (sens_name_max <= 0) { + dev_err(dev, "Invalid or no sensor. err:%d\n", sens_name_max); + ret = -EINVAL; + goto data_fetch_err; + } + + for (sens_idx = 0; sens_idx < sens_name_max; sens_idx++) { + const char *qmi_name; + + qmi_sens = devm_kzalloc(dev, sizeof(*qmi_sens), GFP_KERNEL); + if (!qmi_sens) { + ret = -ENOMEM; + goto data_fetch_err; + } + + of_property_read_string_index(subsys_np, + "qcom,qmi-sensor-names", sens_idx, + &qmi_name); + strlcpy(qmi_sens->qmi_name, qmi_name, + QMI_CLIENT_NAME_LENGTH); + /* Check for supported qmi sensors */ + for (i = 0; i < QMI_TS_MAX_NR; i++) { + if (!strcmp(sensor_clients[i], qmi_sens->qmi_name)) + break; + } + + if (i >= QMI_TS_MAX_NR) { + dev_err(dev, "Unknown sensor:%s\n", + qmi_sens->qmi_name); + ret = -EINVAL; + goto data_fetch_err; + } + dev_dbg(dev, "QMI sensor:%s available\n", qmi_name); + qmi_sens->sens_type = i; + qmi_sens->ts = ts; + qmi_sens->dev = dev; + qmi_sens->last_reading = 0; + qmi_sens->high_thresh = INT_MAX; + qmi_sens->low_thresh = INT_MIN; + INIT_WORK(&qmi_sens->therm_notify_work, + qmi_ts_thresh_notify); + list_add(&qmi_sens->ts_node, &ts->ts_sensor_list); + } + ts_instances = ts; + +data_fetch_err: + of_node_put(subsys_np); + return ret; +} + +static int qmi_sens_device_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + int ret = 0; + struct qmi_ts_instance *ts; + + ret = of_get_qmi_ts_platform_data(dev); + if (ret) + goto probe_err; + + if (!ts_instances) { + dev_err(dev, "Empty ts instances\n"); + return -EINVAL; + } + + ts = ts_instances; + if (list_empty(&ts->ts_sensor_list)) + goto probe_err; + + ret = qmi_handle_init(&ts->handle, + TS_GET_SENSOR_LIST_RESP_MSG_V01_MAX_MSG_LEN, + &thermal_qmi_event_ops, handlers); + if (ret < 0) { + dev_err(dev, "QMI[0x%x] handle init failed. err:%d\n", + ts->inst_id, ret); + goto probe_err; + } + ret = qmi_add_lookup(&ts->handle, TS_SERVICE_ID_V01, + TS_SERVICE_VERS_V01, ts->inst_id); + if (ret < 0) { + dev_err(dev, "QMI register failed for 0x%x, ret:%d\n", + ts->inst_id, ret); + goto probe_err; + } + atomic_set(&in_suspend, 0); + register_pm_notifier(&qmi_sensor_pm_nb); + return 0; + +probe_err: + qmi_ts_cleanup(); + return ret; +} + +static int qmi_sens_device_remove(struct platform_device *pdev) +{ + qmi_ts_cleanup(); + unregister_pm_notifier(&qmi_sensor_pm_nb); + + return 0; +} + +static const struct of_device_id qmi_sens_device_match[] = { + {.compatible = "qcom,qmi-sensors"}, + {} +}; + +static struct platform_driver qmi_sens_device_driver = { + .probe = qmi_sens_device_probe, + .remove = qmi_sens_device_remove, + .driver = { + .name = QMI_SENS_DRIVER, + .owner = THIS_MODULE, + .of_match_table = qmi_sens_device_match, + }, +}; + +builtin_platform_driver(qmi_sens_device_driver); diff --git a/drivers/thermal/qcom/thermal_sensor_service_v01.c b/drivers/thermal/qcom/thermal_sensor_service_v01.c new file mode 100644 index 0000000000000000000000000000000000000000..b09f23bb8ecbeca1146016951815b236c8608ea6 --- /dev/null +++ b/drivers/thermal/qcom/thermal_sensor_service_v01.c @@ -0,0 +1,260 @@ + /* Copyright (c) 2018, The Linux Foundation. All rights reserved. + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + **/ + +#include +#include "thermal_sensor_service_v01.h" + +static struct qmi_elem_info ts_sensor_type_v01_ei[] = { + { + .data_type = QMI_STRING, + .elem_len = QMI_TS_SENSOR_ID_LENGTH_MAX_V01 + 1, + .elem_size = sizeof(char), + .is_array = NO_ARRAY, + .tlv_type = 0, + .offset = offsetof(struct ts_sensor_type_v01, + sensor_id), + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .is_array = QMI_COMMON_TLV_TYPE, + }, +}; + +struct qmi_elem_info ts_get_sensor_list_req_msg_v01_ei[] = { + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .is_array = QMI_COMMON_TLV_TYPE, + }, +}; + +struct qmi_elem_info ts_get_sensor_list_resp_msg_v01_ei[] = { + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct qmi_response_type_v01), + .is_array = NO_ARRAY, + .tlv_type = 0x02, + .offset = + offsetof(struct ts_get_sensor_list_resp_msg_v01, resp), + .ei_array = qmi_response_type_v01_ei, + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x10, + .offset = + offsetof(struct ts_get_sensor_list_resp_msg_v01, + sensor_list_valid), + }, + { + .data_type = QMI_DATA_LEN, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x10, + .offset = + offsetof(struct ts_get_sensor_list_resp_msg_v01, + sensor_list_len), + }, + { + .data_type = QMI_STRUCT, + .elem_len = QMI_TS_SENSOR_LIST_MAX_V01, + .elem_size = sizeof(struct ts_sensor_type_v01), + .is_array = VAR_LEN_ARRAY, + .tlv_type = 0x10, + .offset = + offsetof(struct ts_get_sensor_list_resp_msg_v01, + sensor_list), + .ei_array = ts_sensor_type_v01_ei, + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .is_array = QMI_COMMON_TLV_TYPE, + }, +}; + +struct qmi_elem_info ts_register_notification_temp_req_msg_v01_ei[] = { + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct ts_sensor_type_v01), + .is_array = NO_ARRAY, + .tlv_type = 0x01, + .offset = offsetof( + struct ts_register_notification_temp_req_msg_v01, + sensor_id), + .ei_array = ts_sensor_type_v01_ei, + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof( + struct ts_register_notification_temp_req_msg_v01, + send_current_temp_report), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof( + struct ts_register_notification_temp_req_msg_v01, + temp_threshold_high_valid), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(int), + .is_array = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof( + struct ts_register_notification_temp_req_msg_v01, + temp_threshold_high), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x11, + .offset = offsetof( + struct ts_register_notification_temp_req_msg_v01, + temp_threshold_low_valid), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(int), + .is_array = NO_ARRAY, + .tlv_type = 0x11, + .offset = offsetof( + struct ts_register_notification_temp_req_msg_v01, + temp_threshold_low), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x12, + .offset = offsetof( + struct ts_register_notification_temp_req_msg_v01, + seq_num_valid), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .is_array = NO_ARRAY, + .tlv_type = 0x12, + .offset = offsetof( + struct ts_register_notification_temp_req_msg_v01, + seq_num), + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .is_array = QMI_COMMON_TLV_TYPE, + }, +}; + +struct qmi_elem_info ts_register_notification_temp_resp_msg_v01_ei[] = { + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct qmi_response_type_v01), + .is_array = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof( + struct ts_register_notification_temp_resp_msg_v01, + resp), + .ei_array = qmi_response_type_v01_ei, + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .is_array = QMI_COMMON_TLV_TYPE, + }, +}; + +struct qmi_elem_info ts_temp_report_ind_msg_v01_ei[] = { + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct ts_sensor_type_v01), + .is_array = NO_ARRAY, + .tlv_type = 0x01, + .offset = offsetof(struct ts_temp_report_ind_msg_v01, + sensor_id), + .ei_array = ts_sensor_type_v01_ei, + }, + { + .data_type = QMI_SIGNED_4_BYTE_ENUM, + .elem_len = 1, + .elem_size = sizeof(enum ts_temp_report_type_enum_v01), + .is_array = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof(struct ts_temp_report_ind_msg_v01, + report_type), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct ts_temp_report_ind_msg_v01, + temp_valid), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(int), + .is_array = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct ts_temp_report_ind_msg_v01, + temp), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x11, + .offset = offsetof(struct ts_temp_report_ind_msg_v01, + seq_num_valid), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .is_array = NO_ARRAY, + .tlv_type = 0x11, + .offset = offsetof(struct ts_temp_report_ind_msg_v01, + seq_num), + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .is_array = QMI_COMMON_TLV_TYPE, + }, +}; + diff --git a/drivers/thermal/qcom/thermal_sensor_service_v01.h b/drivers/thermal/qcom/thermal_sensor_service_v01.h new file mode 100644 index 0000000000000000000000000000000000000000..58a15bd225ddefda3eb74da5a7d327e5da4cb5ad --- /dev/null +++ b/drivers/thermal/qcom/thermal_sensor_service_v01.h @@ -0,0 +1,89 @@ + /* Copyright (c) 2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + **/ + +#ifndef THERMAL_SENSOR_SERVICE_V01_H +#define THERMAL_SENSOR_SERVICE_V01_H + +#define TS_SERVICE_ID_V01 0x17 +#define TS_SERVICE_VERS_V01 0x01 + +#define QMI_TS_GET_SENSOR_LIST_RESP_V01 0x0020 +#define QMI_TS_GET_SUPPORTED_MSGS_REQ_V01 0x001E +#define QMI_TS_GET_SUPPORTED_MSGS_RESP_V01 0x001E +#define QMI_TS_REGISTER_NOTIFICATION_TEMP_REQ_V01 0x0021 +#define QMI_TS_REGISTER_NOTIFICATION_TEMP_RESP_V01 0x0021 +#define QMI_TS_GET_SUPPORTED_FIELDS_RESP_V01 0x001F +#define QMI_TS_GET_SENSOR_LIST_REQ_V01 0x0020 +#define QMI_TS_TEMP_REPORT_IND_V01 0x0022 +#define QMI_TS_GET_SUPPORTED_FIELDS_REQ_V01 0x001F + +#define QMI_TS_SENSOR_ID_LENGTH_MAX_V01 32 +#define QMI_TS_SENSOR_LIST_MAX_V01 32 + +struct ts_sensor_type_v01 { + char sensor_id[QMI_TS_SENSOR_ID_LENGTH_MAX_V01 + 1]; +}; + +struct ts_get_sensor_list_req_msg_v01 { + char placeholder; +}; +#define TS_GET_SENSOR_LIST_REQ_MSG_V01_MAX_MSG_LEN 0 +extern struct qmi_elem_info ts_get_sensor_list_req_msg_v01_ei[]; + +struct ts_get_sensor_list_resp_msg_v01 { + struct qmi_response_type_v01 resp; + uint8_t sensor_list_valid; + uint32_t sensor_list_len; + struct ts_sensor_type_v01 sensor_list[QMI_TS_SENSOR_LIST_MAX_V01]; +}; +#define TS_GET_SENSOR_LIST_RESP_MSG_V01_MAX_MSG_LEN 1067 +extern struct qmi_elem_info ts_get_sensor_list_resp_msg_v01_ei[]; + +struct ts_register_notification_temp_req_msg_v01 { + struct ts_sensor_type_v01 sensor_id; + uint8_t send_current_temp_report; + uint8_t temp_threshold_high_valid; + int temp_threshold_high; + uint8_t temp_threshold_low_valid; + int temp_threshold_low; + uint8_t seq_num_valid; + uint32_t seq_num; +}; +#define TS_REGISTER_NOTIFICATION_TEMP_REQ_MSG_V01_MAX_MSG_LEN 61 +extern struct qmi_elem_info ts_register_notification_temp_req_msg_v01_ei[]; + +struct ts_register_notification_temp_resp_msg_v01 { + struct qmi_response_type_v01 resp; +}; +#define TS_REGISTER_NOTIFICATION_TEMP_RESP_MSG_V01_MAX_MSG_LEN 7 +extern struct qmi_elem_info ts_register_notification_temp_resp_msg_v01_ei[]; + +enum ts_temp_report_type_enum_v01 { + TS_TEMP_REPORT_TYPE_ENUM_MIN_VAL_V01 = INT_MIN, + QMI_TS_TEMP_REPORT_CURRENT_TEMP_V01 = 0, + QMI_TS_TEMP_REPORT_THRESHOLD_HIGH_V01 = 1, + QMI_TS_TEMP_REPORT_THRESHOLD_LOW_V01 = 2, + TS_TEMP_REPORT_TYPE_ENUM_MAX_VAL_V01 = INT_MAX, +}; + +struct ts_temp_report_ind_msg_v01 { + struct ts_sensor_type_v01 sensor_id; + enum ts_temp_report_type_enum_v01 report_type; + uint8_t temp_valid; + long temp; + uint8_t seq_num_valid; + uint32_t seq_num; +}; +#define TS_TEMP_REPORT_IND_MSG_V01_MAX_MSG_LEN 57 +extern struct qmi_elem_info ts_temp_report_ind_msg_v01_ei[]; + +#endif diff --git a/drivers/thermal/samsung/exynos_tmu.c b/drivers/thermal/samsung/exynos_tmu.c index ac83f721db24d78cff24d9349ecf75edafe9f2d4..d60069b5dc98dee7bb4ccf82f11c057fc9519a11 100644 --- a/drivers/thermal/samsung/exynos_tmu.c +++ b/drivers/thermal/samsung/exynos_tmu.c @@ -598,6 +598,7 @@ static int exynos5433_tmu_initialize(struct platform_device *pdev) threshold_code = temp_to_code(data, temp); rising_threshold = readl(data->base + rising_reg_offset); + rising_threshold &= ~(0xff << j * 8); rising_threshold |= (threshold_code << j * 8); writel(rising_threshold, data->base + rising_reg_offset); diff --git a/drivers/tty/hvc/hvc_opal.c b/drivers/tty/hvc/hvc_opal.c index 16331a90c1e89f3430e6c5e5909a05e7e8914aed..9da8474fe50afce9f30ff4426e7b053bf3700370 100644 --- a/drivers/tty/hvc/hvc_opal.c +++ b/drivers/tty/hvc/hvc_opal.c @@ -332,7 +332,6 @@ static void udbg_init_opal_common(void) udbg_putc = udbg_opal_putc; udbg_getc = udbg_opal_getc; udbg_getc_poll = udbg_opal_getc_poll; - tb_ticks_per_usec = 0x200; /* Make udelay not suck */ } void __init hvc_opal_init_early(void) diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c index 64338442050ef9d4e47e48edc72676159952090c..899e8fe5e00f512a75fe82c25b42f341eb9a338e 100644 --- a/drivers/tty/pty.c +++ b/drivers/tty/pty.c @@ -110,16 +110,19 @@ static void pty_unthrottle(struct tty_struct *tty) static int pty_write(struct tty_struct *tty, const unsigned char *buf, int c) { struct tty_struct *to = tty->link; + unsigned long flags; if (tty->stopped) return 0; if (c > 0) { + spin_lock_irqsave(&to->port->lock, flags); /* Stuff the data into the input queue of the other end */ c = tty_insert_flip_string(to->port, buf, c); /* And shovel */ if (c) tty_flip_buffer_push(to->port); + spin_unlock_irqrestore(&to->port->lock, flags); } return c; } diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c index 3b9aadd007f5b60e5741283a4dcff0a7da4e07d7..f2f31fc16f2909720bd1fdf2b53d77a405098a9e 100644 --- a/drivers/usb/class/cdc-acm.c +++ b/drivers/usb/class/cdc-acm.c @@ -1844,6 +1844,9 @@ static const struct usb_device_id acm_ids[] = { { USB_DEVICE(0x09d8, 0x0320), /* Elatec GmbH TWN3 */ .driver_info = NO_UNION_NORMAL, /* has misplaced union descriptor */ }, + { USB_DEVICE(0x0ca6, 0xa050), /* Castles VEGA3000 */ + .driver_info = NO_UNION_NORMAL, /* reports zero length descriptor */ + }, { USB_DEVICE(0x2912, 0x0001), /* ATOL FPrint */ .driver_info = CLEAR_HALT_CONDITIONS, diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c index cc2a4c43231635dfeb818fb365e2e968a13e5782..01dde0450b55b7d39354406c612833812a69daa1 100644 --- a/drivers/usb/core/hub.c +++ b/drivers/usb/core/hub.c @@ -1152,10 +1152,14 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type) if (!udev || udev->state == USB_STATE_NOTATTACHED) { /* Tell hub_wq to disconnect the device or - * check for a new connection + * check for a new connection or over current condition. + * Based on USB2.0 Spec Section 11.12.5, + * C_PORT_OVER_CURRENT could be set while + * PORT_OVER_CURRENT is not. So check for any of them. */ if (udev || (portstatus & USB_PORT_STAT_CONNECTION) || - (portstatus & USB_PORT_STAT_OVERCURRENT)) + (portstatus & USB_PORT_STAT_OVERCURRENT) || + (portchange & USB_PORT_STAT_C_OVERCURRENT)) set_bit(port1, hub->change_bits); } else if (portstatus & USB_PORT_STAT_ENABLE) { @@ -3368,6 +3372,10 @@ static int wait_for_connected(struct usb_device *udev, while (delay_ms < 2000) { if (status || *portstatus & USB_PORT_STAT_CONNECTION) break; + if (!port_is_power_on(hub, *portstatus)) { + status = -ENODEV; + break; + } msleep(20); delay_ms += 20; status = hub_port_status(hub, *port1, portstatus, portchange); diff --git a/drivers/usb/dwc2/hcd.c b/drivers/usb/dwc2/hcd.c index 87484f71b2abbb29f446ee4aec57c4545a0d8f66..46d3b0fc00c5c6dadc91d61d3bcfeb3571697449 100644 --- a/drivers/usb/dwc2/hcd.c +++ b/drivers/usb/dwc2/hcd.c @@ -2606,34 +2606,29 @@ static void dwc2_hc_init_xfer(struct dwc2_hsotg *hsotg, #define DWC2_USB_DMA_ALIGN 4 -struct dma_aligned_buffer { - void *kmalloc_ptr; - void *old_xfer_buffer; - u8 data[0]; -}; - static void dwc2_free_dma_aligned_buffer(struct urb *urb) { - struct dma_aligned_buffer *temp; + void *stored_xfer_buffer; if (!(urb->transfer_flags & URB_ALIGNED_TEMP_BUFFER)) return; - temp = container_of(urb->transfer_buffer, - struct dma_aligned_buffer, data); + /* Restore urb->transfer_buffer from the end of the allocated area */ + memcpy(&stored_xfer_buffer, urb->transfer_buffer + + urb->transfer_buffer_length, sizeof(urb->transfer_buffer)); if (usb_urb_dir_in(urb)) - memcpy(temp->old_xfer_buffer, temp->data, + memcpy(stored_xfer_buffer, urb->transfer_buffer, urb->transfer_buffer_length); - urb->transfer_buffer = temp->old_xfer_buffer; - kfree(temp->kmalloc_ptr); + kfree(urb->transfer_buffer); + urb->transfer_buffer = stored_xfer_buffer; urb->transfer_flags &= ~URB_ALIGNED_TEMP_BUFFER; } static int dwc2_alloc_dma_aligned_buffer(struct urb *urb, gfp_t mem_flags) { - struct dma_aligned_buffer *temp, *kmalloc_ptr; + void *kmalloc_ptr; size_t kmalloc_size; if (urb->num_sgs || urb->sg || @@ -2641,22 +2636,29 @@ static int dwc2_alloc_dma_aligned_buffer(struct urb *urb, gfp_t mem_flags) !((uintptr_t)urb->transfer_buffer & (DWC2_USB_DMA_ALIGN - 1))) return 0; - /* Allocate a buffer with enough padding for alignment */ + /* + * Allocate a buffer with enough padding for original transfer_buffer + * pointer. This allocation is guaranteed to be aligned properly for + * DMA + */ kmalloc_size = urb->transfer_buffer_length + - sizeof(struct dma_aligned_buffer) + DWC2_USB_DMA_ALIGN - 1; + sizeof(urb->transfer_buffer); kmalloc_ptr = kmalloc(kmalloc_size, mem_flags); if (!kmalloc_ptr) return -ENOMEM; - /* Position our struct dma_aligned_buffer such that data is aligned */ - temp = PTR_ALIGN(kmalloc_ptr + 1, DWC2_USB_DMA_ALIGN) - 1; - temp->kmalloc_ptr = kmalloc_ptr; - temp->old_xfer_buffer = urb->transfer_buffer; + /* + * Position value of original urb->transfer_buffer pointer to the end + * of allocation for later referencing + */ + memcpy(kmalloc_ptr + urb->transfer_buffer_length, + &urb->transfer_buffer, sizeof(urb->transfer_buffer)); + if (usb_urb_dir_out(urb)) - memcpy(temp->data, urb->transfer_buffer, + memcpy(kmalloc_ptr, urb->transfer_buffer, urb->transfer_buffer_length); - urb->transfer_buffer = temp->data; + urb->transfer_buffer = kmalloc_ptr; urb->transfer_flags |= URB_ALIGNED_TEMP_BUFFER; diff --git a/drivers/usb/dwc3/dwc3-msm.c b/drivers/usb/dwc3/dwc3-msm.c index 4f48541f12c60f1ed7c490ffa073fdc302582483..31639315c9d62b41892e4173498d66edafd7a6b8 100644 --- a/drivers/usb/dwc3/dwc3-msm.c +++ b/drivers/usb/dwc3/dwc3-msm.c @@ -102,18 +102,18 @@ #define GSI_TRB_ADDR_BIT_53_MASK (1 << 21) #define GSI_TRB_ADDR_BIT_55_MASK (1 << 23) -#define GSI_GENERAL_CFG_REG (QSCRATCH_REG_OFFSET + 0xFC) +#define GSI_GENERAL_CFG_REG(offset) (QSCRATCH_REG_OFFSET + offset) #define GSI_RESTART_DBL_PNTR_MASK BIT(20) #define GSI_CLK_EN_MASK BIT(12) #define BLOCK_GSI_WR_GO_MASK BIT(1) #define GSI_EN_MASK BIT(0) -#define GSI_DBL_ADDR_L(n) ((QSCRATCH_REG_OFFSET + 0x110) + (n*4)) -#define GSI_DBL_ADDR_H(n) ((QSCRATCH_REG_OFFSET + 0x120) + (n*4)) -#define GSI_RING_BASE_ADDR_L(n) ((QSCRATCH_REG_OFFSET + 0x130) + (n*4)) -#define GSI_RING_BASE_ADDR_H(n) ((QSCRATCH_REG_OFFSET + 0x144) + (n*4)) +#define GSI_DBL_ADDR_L(offset, n) ((QSCRATCH_REG_OFFSET + offset) + (n*4)) +#define GSI_DBL_ADDR_H(offset, n) ((QSCRATCH_REG_OFFSET + offset) + (n*4)) +#define GSI_RING_BASE_ADDR_L(offset, n) ((QSCRATCH_REG_OFFSET + offset) + (n*4)) +#define GSI_RING_BASE_ADDR_H(offset, n) ((QSCRATCH_REG_OFFSET + offset) + (n*4)) -#define GSI_IF_STS (QSCRATCH_REG_OFFSET + 0x1A4) +#define GSI_IF_STS(offset) (QSCRATCH_REG_OFFSET + offset) #define GSI_WR_CTRL_STATE_MASK BIT(15) #define DWC3_GEVNTCOUNT_EVNTINTRPTMASK (1 << 31) @@ -121,6 +121,16 @@ #define DWC3_GEVNTADRHI_EVNTADRHI_GSI_IDX(n) (n << 16) #define DWC3_GEVENT_TYPE_GSI 0x3 +enum usb_gsi_reg { + GENERAL_CFG_REG, + DBL_ADDR_L, + DBL_ADDR_H, + RING_BASE_ADDR_L, + RING_BASE_ADDR_H, + IF_STS, + GSI_REG_MAX, +}; + struct dwc3_msm_req_complete { struct list_head list_item; struct usb_request *req; @@ -272,6 +282,8 @@ struct dwc3_msm { struct mutex suspend_resume_mutex; enum usb_device_speed override_usb_speed; + u32 *gsi_reg; + int gsi_reg_offset_cnt; }; #define USB_HSPHY_3P3_VOL_MIN 3050000 /* uV */ @@ -938,8 +950,9 @@ static void gsi_store_ringbase_dbl_info(struct usb_ep *ep, struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent); int n = ep->ep_intr_num - 1; - dwc3_msm_write_reg(mdwc->base, GSI_RING_BASE_ADDR_L(n), - dwc3_trb_dma_offset(dep, &dep->trb_pool[0])); + dwc3_msm_write_reg(mdwc->base, + GSI_RING_BASE_ADDR_L(mdwc->gsi_reg[RING_BASE_ADDR_L], (n)), + dwc3_trb_dma_offset(dep, &dep->trb_pool[0])); if (request->mapped_db_reg_phs_addr_lsb) dma_unmap_resource(dwc->sysdev, @@ -956,12 +969,16 @@ static void gsi_store_ringbase_dbl_info(struct usb_ep *ep, ep->name, request->db_reg_phs_addr_lsb, (unsigned long long)request->mapped_db_reg_phs_addr_lsb); - dwc3_msm_write_reg(mdwc->base, GSI_DBL_ADDR_L(n), - (u32)request->mapped_db_reg_phs_addr_lsb); + dwc3_msm_write_reg(mdwc->base, + GSI_DBL_ADDR_L(mdwc->gsi_reg[DBL_ADDR_L], (n)), + (u32)request->mapped_db_reg_phs_addr_lsb); dev_dbg(mdwc->dev, "Ring Base Addr %d: %x (LSB)\n", n, - dwc3_msm_read_reg(mdwc->base, GSI_RING_BASE_ADDR_L(n))); + dwc3_msm_read_reg(mdwc->base, + GSI_RING_BASE_ADDR_L(mdwc->gsi_reg[RING_BASE_ADDR_L], + (n)))); dev_dbg(mdwc->dev, "GSI DB Addr %d: %x (LSB)\n", n, - dwc3_msm_read_reg(mdwc->base, GSI_DBL_ADDR_L(n))); + dwc3_msm_read_reg(mdwc->base, + GSI_DBL_ADDR_L(mdwc->gsi_reg[DBL_ADDR_L], (n)))); } /** @@ -1295,14 +1312,18 @@ static void gsi_enable(struct usb_ep *ep) struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent); dwc3_msm_write_reg_field(mdwc->base, - GSI_GENERAL_CFG_REG, GSI_CLK_EN_MASK, 1); + GSI_GENERAL_CFG_REG(mdwc->gsi_reg[GENERAL_CFG_REG]), + GSI_CLK_EN_MASK, 1); dwc3_msm_write_reg_field(mdwc->base, - GSI_GENERAL_CFG_REG, GSI_RESTART_DBL_PNTR_MASK, 1); + GSI_GENERAL_CFG_REG(mdwc->gsi_reg[GENERAL_CFG_REG]), + GSI_RESTART_DBL_PNTR_MASK, 1); dwc3_msm_write_reg_field(mdwc->base, - GSI_GENERAL_CFG_REG, GSI_RESTART_DBL_PNTR_MASK, 0); + GSI_GENERAL_CFG_REG(mdwc->gsi_reg[GENERAL_CFG_REG]), + GSI_RESTART_DBL_PNTR_MASK, 0); dev_dbg(mdwc->dev, "%s: Enable GSI\n", __func__); dwc3_msm_write_reg_field(mdwc->base, - GSI_GENERAL_CFG_REG, GSI_EN_MASK, 1); + GSI_GENERAL_CFG_REG(mdwc->gsi_reg[GENERAL_CFG_REG]), + GSI_EN_MASK, 1); } /** @@ -1321,7 +1342,8 @@ static void gsi_set_clear_dbell(struct usb_ep *ep, struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent); dwc3_msm_write_reg_field(mdwc->base, - GSI_GENERAL_CFG_REG, BLOCK_GSI_WR_GO_MASK, block_db); + GSI_GENERAL_CFG_REG(mdwc->gsi_reg[GENERAL_CFG_REG]), + BLOCK_GSI_WR_GO_MASK, block_db); } /** @@ -1338,7 +1360,7 @@ static bool gsi_check_ready_to_suspend(struct usb_ep *ep, bool f_suspend) struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent); while (dwc3_msm_read_reg_field(mdwc->base, - GSI_IF_STS, GSI_WR_CTRL_STATE_MASK)) { + GSI_IF_STS(mdwc->gsi_reg[IF_STS]), GSI_WR_CTRL_STATE_MASK)) { if (!timeout--) { dev_err(mdwc->dev, "Unable to suspend GSI ch. WR_CTRL_STATE != 0\n"); @@ -3086,8 +3108,8 @@ static int dwc3_msm_extcon_register(struct dwc3_msm *mdwc) return 0; } -#define SMMU_BASE 0x60000000 /* Device address range base */ -#define SMMU_SIZE 0x90000000 /* Device address range size */ +#define SMMU_BASE 0x90000000 /* Device address range base */ +#define SMMU_SIZE 0x60000000 /* Device address range size */ static int dwc3_msm_init_iommu(struct dwc3_msm *mdwc) { @@ -3316,7 +3338,7 @@ static int dwc3_msm_probe(struct platform_device *pdev) struct dwc3_msm *mdwc; struct dwc3 *dwc; struct resource *res; - int ret = 0, i; + int ret = 0, size = 0, i; u32 val; unsigned long irq_type; @@ -3469,6 +3491,29 @@ static int dwc3_msm_probe(struct platform_device *pdev) ret = of_property_read_u32(node, "qcom,num-gsi-evt-buffs", &mdwc->num_gsi_event_buffers); + if (mdwc->num_gsi_event_buffers) { + of_get_property(node, "qcom,gsi-reg-offset", &size); + if (size) { + mdwc->gsi_reg = devm_kzalloc(dev, size, GFP_KERNEL); + if (!mdwc->gsi_reg) + return -ENOMEM; + + mdwc->gsi_reg_offset_cnt = + (size / sizeof(*mdwc->gsi_reg)); + if (mdwc->gsi_reg_offset_cnt != GSI_REG_MAX) { + dev_err(dev, "invalid reg offset count\n"); + return -EINVAL; + } + + of_property_read_u32_array(dev->of_node, + "qcom,gsi-reg-offset", mdwc->gsi_reg, + mdwc->gsi_reg_offset_cnt); + } else { + dev_err(dev, "err provide qcom,gsi-reg-offset\n"); + return -EINVAL; + } + } + mdwc->use_pdc_interrupts = of_property_read_bool(node, "qcom,use-pdc-interrupts"); dwc3_set_notifier(&dwc3_msm_notify_event); @@ -3709,6 +3754,14 @@ static int dwc3_msm_host_notifier(struct notifier_block *nb, udev->dev.parent->parent == &dwc->xhci->dev) { if (event == USB_DEVICE_ADD && udev->actconfig) { if (!dwc3_msm_is_ss_rhport_connected(mdwc)) { + /* + * controller may wakeup ss phy during hs data + * transfers or doorbell rings. Power down the + * ss phy to avoid turning on pipe clock during + * these wake-ups. + */ + usb_phy_powerdown(mdwc->ss_phy); + /* * Core clock rate can be reduced only if root * hub SS port is not enabled/connected. @@ -3724,6 +3777,7 @@ static int dwc3_msm_host_notifier(struct notifier_block *nb, mdwc->max_rh_port_speed = USB_SPEED_SUPER; } } else { + usb_phy_powerup(mdwc->ss_phy); /* set rate back to default core clk rate */ clk_set_rate(mdwc->core_clk, mdwc->core_clk_rate); dev_dbg(mdwc->dev, "set core clk rate %ld\n", diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c index f54d8346a1626a52328326564c7558035dee091b..06c53abc766799a6e83bb6aa625d40add678e5a8 100644 --- a/drivers/usb/dwc3/gadget.c +++ b/drivers/usb/dwc3/gadget.c @@ -231,7 +231,8 @@ int dwc3_gadget_resize_tx_fifos(struct dwc3 *dwc, struct dwc3_ep *dep) mult = 3; if ((dep->endpoint.maxburst > 2) && - dep->endpoint.ep_type == EP_TYPE_GSI) + dep->endpoint.ep_type == EP_TYPE_GSI + && dwc3_is_usb31(dwc)) mult = 6; tmp = ((max_packet + mdwidth) * mult) + mdwidth; diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c index 5bbf99dc3208d01bbe9010db3e633d6bb7c366cd..a770d23ac301a12a7795e190196d4e983e47d47e 100644 --- a/drivers/usb/gadget/function/f_fs.c +++ b/drivers/usb/gadget/function/f_fs.c @@ -3461,9 +3461,11 @@ static int ffs_func_set_alt(struct usb_function *f, ffs->func = func; ret = ffs_func_eps_enable(func); - if (likely(ret >= 0)) + if (likely(ret >= 0)) { ffs_event_add(ffs, FUNCTIONFS_ENABLE); - + /* Disable USB LPM later on bus_suspend */ + usb_gadget_autopm_get_async(ffs->gadget); + } ffs_log("exit: ret %d", ret); return ret; @@ -3471,8 +3473,13 @@ static int ffs_func_set_alt(struct usb_function *f, static void ffs_func_disable(struct usb_function *f) { + struct ffs_function *func = ffs_func_from_usb(f); + struct ffs_data *ffs = func->ffs; + ffs_log("enter"); ffs_func_set_alt(f, 0, (unsigned)-1); + /* matching put to allow LPM on disconnect */ + usb_gadget_autopm_put_async(ffs->gadget); ffs_log("exit"); } @@ -3537,7 +3544,7 @@ static int ffs_func_setup(struct usb_function *f, ffs_log("exit"); - return USB_GADGET_DELAYED_STATUS; + return creq->wLength == 0 ? USB_GADGET_DELAYED_STATUS : 0; } static bool ffs_func_req_match(struct usb_function *f, diff --git a/drivers/usb/gadget/function/f_gsi.c b/drivers/usb/gadget/function/f_gsi.c index d40fb9a2fa750f433097b8b915b95774ccf98f05..f2d922dacbd4339fdfb823c5ebafa53a447ff801 100644 --- a/drivers/usb/gadget/function/f_gsi.c +++ b/drivers/usb/gadget/function/f_gsi.c @@ -827,9 +827,11 @@ static void ipa_work_handler(struct work_struct *w) ipa_resume_work_handler(d_port); d_port->sm_state = STATE_CONNECTED; } else if (event == EVT_DISCONNECTED) { + usb_gadget_autopm_get(d_port->gadget); ipa_disconnect_work_handler(d_port); d_port->sm_state = STATE_INITIALIZED; log_event_dbg("%s: ST_SUS_EVT_DIS", __func__); + usb_gadget_autopm_put_async(d_port->gadget); } break; default: @@ -1173,7 +1175,7 @@ static long gsi_ctrl_dev_ioctl(struct file *fp, unsigned int cmd, struct f_gsi *gsi; struct gsi_ctrl_pkt *cpkt; struct ep_info info; - struct data_buf_info data_info; + struct data_buf_info data_info = {0}; enum ipa_usb_teth_prot prot_id = *(enum ipa_usb_teth_prot *)(fp->private_data); struct gsi_inst_status *inst_cur = &inst_status[prot_id]; diff --git a/drivers/usb/gadget/udc/renesas_usb3.c b/drivers/usb/gadget/udc/renesas_usb3.c index 4cfa72cb0a91443ffe3ca1d9446b8f8dda380c1b..c12a1a6554bad90bd36d805e3805453f433c5121 100644 --- a/drivers/usb/gadget/udc/renesas_usb3.c +++ b/drivers/usb/gadget/udc/renesas_usb3.c @@ -334,6 +334,7 @@ struct renesas_usb3 { struct usb_gadget_driver *driver; struct extcon_dev *extcon; struct work_struct extcon_work; + struct dentry *dentry; struct renesas_usb3_ep *usb3_ep; int num_usb3_eps; @@ -2397,8 +2398,12 @@ static void renesas_usb3_debugfs_init(struct renesas_usb3 *usb3, file = debugfs_create_file("b_device", 0644, root, usb3, &renesas_usb3_b_device_fops); - if (!file) + if (!file) { dev_info(dev, "%s: Can't create debugfs mode\n", __func__); + debugfs_remove_recursive(root); + } else { + usb3->dentry = root; + } } /*------- platform_driver ------------------------------------------------*/ @@ -2406,6 +2411,7 @@ static int renesas_usb3_remove(struct platform_device *pdev) { struct renesas_usb3 *usb3 = platform_get_drvdata(pdev); + debugfs_remove_recursive(usb3->dentry); device_remove_file(&pdev->dev, &dev_attr_role); usb_del_gadget_udc(&usb3->gadget); diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c index 8ce6979af0ad7757c66938996b2b54946c13fa9e..27d267a54e68da860e55c61c1a82459cf14f29cc 100644 --- a/drivers/usb/host/xhci.c +++ b/drivers/usb/host/xhci.c @@ -864,6 +864,41 @@ static void xhci_disable_port_wake_on_bits(struct xhci_hcd *xhci) spin_unlock_irqrestore(&xhci->lock, flags); } +static bool xhci_pending_portevent(struct xhci_hcd *xhci) +{ + __le32 __iomem **port_array; + int port_index; + u32 status; + u32 portsc; + + status = readl(&xhci->op_regs->status); + if (status & STS_EINT) + return true; + /* + * Checking STS_EINT is not enough as there is a lag between a change + * bit being set and the Port Status Change Event that it generated + * being written to the Event Ring. See note in xhci 1.1 section 4.19.2. + */ + + port_index = xhci->num_usb2_ports; + port_array = xhci->usb2_ports; + while (port_index--) { + portsc = readl(port_array[port_index]); + if (portsc & PORT_CHANGE_MASK || + (portsc & PORT_PLS_MASK) == XDEV_RESUME) + return true; + } + port_index = xhci->num_usb3_ports; + port_array = xhci->usb3_ports; + while (port_index--) { + portsc = readl(port_array[port_index]); + if (portsc & PORT_CHANGE_MASK || + (portsc & PORT_PLS_MASK) == XDEV_RESUME) + return true; + } + return false; +} + /* * Stop HC (not bus-specific) * @@ -963,7 +998,7 @@ EXPORT_SYMBOL_GPL(xhci_suspend); */ int xhci_resume(struct xhci_hcd *xhci, bool hibernated) { - u32 command, temp = 0, status; + u32 command, temp = 0; struct usb_hcd *hcd = xhci_to_hcd(xhci); struct usb_hcd *secondary_hcd; int retval = 0; @@ -1085,8 +1120,7 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated) done: if (retval == 0) { /* Resume root hubs only when have pending events. */ - status = readl(&xhci->op_regs->status); - if (status & STS_EINT) { + if (xhci_pending_portevent(xhci)) { usb_hcd_resume_root_hub(xhci->shared_hcd); usb_hcd_resume_root_hub(hcd); } diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h index c068371c106fde76205c37df95fcceb8bb1aabca..955bfad5ab188754c4c123859ab4146caa2c68ce 100644 --- a/drivers/usb/host/xhci.h +++ b/drivers/usb/host/xhci.h @@ -392,6 +392,10 @@ struct xhci_op_regs { #define PORT_PLC (1 << 22) /* port configure error change - port failed to configure its link partner */ #define PORT_CEC (1 << 23) +#define PORT_CHANGE_MASK (PORT_CSC | PORT_PEC | PORT_WRC | PORT_OCC | \ + PORT_RC | PORT_PLC | PORT_CEC) + + /* Cold Attach Status - xHC can set this bit to report device attached during * Sx state. Warm port reset should be perfomed to clear this bit and move port * to connected state. diff --git a/drivers/usb/pd/policy_engine.c b/drivers/usb/pd/policy_engine.c index 2a12fdac79fb7371c576858dd0d488b58875f165..901f01435a9d0f817e1f2ec780d33aea4dd3cd77 100644 --- a/drivers/usb/pd/policy_engine.c +++ b/drivers/usb/pd/policy_engine.c @@ -51,7 +51,6 @@ enum usbpd_state { PE_SRC_READY, PE_SRC_HARD_RESET, PE_SRC_SOFT_RESET, - PE_SRC_SEND_SOFT_RESET, PE_SRC_DISCOVERY, PE_SRC_TRANSITION_TO_DEFAULT, PE_SNK_STARTUP, @@ -63,7 +62,6 @@ enum usbpd_state { PE_SNK_READY, PE_SNK_HARD_RESET, PE_SNK_SOFT_RESET, - PE_SNK_SEND_SOFT_RESET, PE_SNK_TRANSITION_TO_DEFAULT, PE_DRS_SEND_DR_SWAP, PE_PRS_SNK_SRC_SEND_SWAP, @@ -72,6 +70,7 @@ enum usbpd_state { PE_PRS_SRC_SNK_SEND_SWAP, PE_PRS_SRC_SNK_TRANSITION_TO_OFF, PE_PRS_SRC_SNK_WAIT_SOURCE_ON, + PE_SEND_SOFT_RESET, PE_VCS_WAIT_FOR_VCONN, }; @@ -88,7 +87,6 @@ static const char * const usbpd_state_strings[] = { "SRC_Ready", "SRC_Hard_Reset", "SRC_Soft_Reset", - "SRC_Send_Soft_Reset", "SRC_Discovery", "SRC_Transition_to_default", "SNK_Startup", @@ -100,7 +98,6 @@ static const char * const usbpd_state_strings[] = { "SNK_Ready", "SNK_Hard_Reset", "SNK_Soft_Reset", - "SNK_Send_Soft_Reset", "SNK_Transition_to_default", "DRS_Send_DR_Swap", "PRS_SNK_SRC_Send_Swap", @@ -109,6 +106,7 @@ static const char * const usbpd_state_strings[] = { "PRS_SRC_SNK_Send_Swap", "PRS_SRC_SNK_Transition_to_off", "PRS_SRC_SNK_Wait_Source_on", + "Send_Soft_Reset", "VCS_Wait_for_VCONN", }; @@ -514,21 +512,6 @@ enum plug_orientation usbpd_get_plug_orientation(struct usbpd *pd) } EXPORT_SYMBOL(usbpd_get_plug_orientation); -static unsigned int get_connector_type(struct usbpd *pd) -{ - int ret; - union power_supply_propval val; - - ret = power_supply_get_property(pd->usb_psy, - POWER_SUPPLY_PROP_CONNECTOR_TYPE, &val); - - if (ret) { - dev_err(&pd->dev, "Unable to read CONNECTOR TYPE: %d\n", ret); - return ret; - } - return val.intval; -} - static inline void stop_usb_host(struct usbpd *pd) { extcon_set_state_sync(pd->extcon, EXTCON_USB_HOST, 0); @@ -1337,7 +1320,7 @@ static void usbpd_set_state(struct usbpd *pd, enum usbpd_state next_state) /* send Reject */ ret = pd_send_msg(pd, MSG_REJECT, NULL, 0, SOP_MSG); if (ret) { - usbpd_set_state(pd, PE_SRC_SEND_SOFT_RESET); + usbpd_set_state(pd, PE_SEND_SOFT_RESET); break; } @@ -1358,7 +1341,7 @@ static void usbpd_set_state(struct usbpd *pd, enum usbpd_state next_state) /* PE_SRC_TRANSITION_SUPPLY pseudo-state */ ret = pd_send_msg(pd, MSG_ACCEPT, NULL, 0, SOP_MSG); if (ret) { - usbpd_set_state(pd, PE_SRC_SEND_SOFT_RESET); + usbpd_set_state(pd, PE_SEND_SOFT_RESET); break; } @@ -1373,7 +1356,7 @@ static void usbpd_set_state(struct usbpd *pd, enum usbpd_state next_state) */ ret = pd_send_msg(pd, MSG_PS_RDY, NULL, 0, SOP_MSG); if (ret) { - usbpd_set_state(pd, PE_SRC_SEND_SOFT_RESET); + usbpd_set_state(pd, PE_SEND_SOFT_RESET); break; } @@ -1425,8 +1408,7 @@ static void usbpd_set_state(struct usbpd *pd, enum usbpd_state next_state) kick_sm(pd, 0); break; - case PE_SRC_SEND_SOFT_RESET: - case PE_SNK_SEND_SOFT_RESET: + case PE_SEND_SOFT_RESET: pd_reset_protocol(pd); ret = pd_send_msg(pd, MSG_SOFT_RESET, NULL, 0, SOP_MSG); @@ -1516,7 +1498,7 @@ static void usbpd_set_state(struct usbpd *pd, enum usbpd_state next_state) ret = pd_send_msg(pd, MSG_REQUEST, &pd->rdo, 1, SOP_MSG); if (ret) { - usbpd_set_state(pd, PE_SNK_SEND_SOFT_RESET); + usbpd_set_state(pd, PE_SEND_SOFT_RESET); break; } @@ -1940,9 +1922,7 @@ static void handle_vdm_tx(struct usbpd *pd, enum pd_sop_type sop_type) /* retry when hitting PE_SRC/SNK_Ready again */ if (ret != -EBUSY && sop_type == SOP_MSG) - usbpd_set_state(pd, pd->current_pr == PR_SRC ? - PE_SRC_SEND_SOFT_RESET : - PE_SNK_SEND_SOFT_RESET); + usbpd_set_state(pd, PE_SEND_SOFT_RESET); return; } @@ -2043,9 +2023,7 @@ static void vconn_swap(struct usbpd *pd) ret = pd_send_msg(pd, MSG_PS_RDY, NULL, 0, SOP_MSG); if (ret) { - usbpd_set_state(pd, pd->current_pr == PR_SRC ? - PE_SRC_SEND_SOFT_RESET : - PE_SNK_SEND_SOFT_RESET); + usbpd_set_state(pd, PE_SEND_SOFT_RESET); return; } } @@ -2371,7 +2349,7 @@ static void usbpd_sm(struct work_struct *w) usbpd_set_state(pd, PE_SRC_NEGOTIATE_CAPABILITY); } else if (rx_msg) { usbpd_err(&pd->dev, "Unexpected message received\n"); - usbpd_set_state(pd, PE_SRC_SEND_SOFT_RESET); + usbpd_set_state(pd, PE_SEND_SOFT_RESET); } else { usbpd_set_state(pd, PE_SRC_HARD_RESET); } @@ -2386,7 +2364,7 @@ static void usbpd_sm(struct work_struct *w) pd->sink_caps, pd->num_sink_caps, SOP_MSG); if (ret) - usbpd_set_state(pd, PE_SRC_SEND_SOFT_RESET); + usbpd_set_state(pd, PE_SEND_SOFT_RESET); } else if (IS_DATA(rx_msg, MSG_REQUEST)) { pd->rdo = *(u32 *)rx_msg->payload; usbpd_set_state(pd, PE_SRC_NEGOTIATE_CAPABILITY); @@ -2398,7 +2376,7 @@ static void usbpd_sm(struct work_struct *w) ret = pd_send_msg(pd, MSG_ACCEPT, NULL, 0, SOP_MSG); if (ret) { - usbpd_set_state(pd, PE_SRC_SEND_SOFT_RESET); + usbpd_set_state(pd, PE_SEND_SOFT_RESET); break; } @@ -2407,7 +2385,7 @@ static void usbpd_sm(struct work_struct *w) /* we'll happily accept Src->Sink requests anytime */ ret = pd_send_msg(pd, MSG_ACCEPT, NULL, 0, SOP_MSG); if (ret) { - usbpd_set_state(pd, PE_SRC_SEND_SOFT_RESET); + usbpd_set_state(pd, PE_SEND_SOFT_RESET); break; } @@ -2416,7 +2394,7 @@ static void usbpd_sm(struct work_struct *w) } else if (IS_CTRL(rx_msg, MSG_VCONN_SWAP)) { ret = pd_send_msg(pd, MSG_ACCEPT, NULL, 0, SOP_MSG); if (ret) { - usbpd_set_state(pd, PE_SRC_SEND_SOFT_RESET); + usbpd_set_state(pd, PE_SEND_SOFT_RESET); break; } @@ -2428,13 +2406,13 @@ static void usbpd_sm(struct work_struct *w) ret = pd_send_msg(pd, MSG_NOT_SUPPORTED, NULL, 0, SOP_MSG); if (ret) - usbpd_set_state(pd, PE_SRC_SEND_SOFT_RESET); + usbpd_set_state(pd, PE_SEND_SOFT_RESET); break; } else if (pd->send_pr_swap) { pd->send_pr_swap = false; ret = pd_send_msg(pd, MSG_PR_SWAP, NULL, 0, SOP_MSG); if (ret) { - usbpd_set_state(pd, PE_SRC_SEND_SOFT_RESET); + usbpd_set_state(pd, PE_SEND_SOFT_RESET); break; } @@ -2444,7 +2422,7 @@ static void usbpd_sm(struct work_struct *w) pd->send_dr_swap = false; ret = pd_send_msg(pd, MSG_DR_SWAP, NULL, 0, SOP_MSG); if (ret) { - usbpd_set_state(pd, PE_SRC_SEND_SOFT_RESET); + usbpd_set_state(pd, PE_SEND_SOFT_RESET); break; } @@ -2608,7 +2586,7 @@ static void usbpd_sm(struct work_struct *w) PE_SNK_WAIT_FOR_CAPABILITIES); } else if (rx_msg) { usbpd_err(&pd->dev, "Invalid response to sink request\n"); - usbpd_set_state(pd, PE_SNK_SEND_SOFT_RESET); + usbpd_set_state(pd, PE_SEND_SOFT_RESET); } else { /* timed out; go to hard reset */ usbpd_set_state(pd, PE_SNK_HARD_RESET); @@ -2647,21 +2625,22 @@ static void usbpd_sm(struct work_struct *w) pd->src_cap_id++; usbpd_set_state(pd, PE_SNK_EVALUATE_CAPABILITY); + break; } else if (IS_CTRL(rx_msg, MSG_GET_SINK_CAP)) { ret = pd_send_msg(pd, MSG_SINK_CAPABILITIES, pd->sink_caps, pd->num_sink_caps, SOP_MSG); if (ret) - usbpd_set_state(pd, PE_SNK_SEND_SOFT_RESET); + usbpd_set_state(pd, PE_SEND_SOFT_RESET); + break; } else if (IS_CTRL(rx_msg, MSG_GET_SOURCE_CAP) && pd->spec_rev == USBPD_REV_20) { ret = pd_send_msg(pd, MSG_SOURCE_CAPABILITIES, default_src_caps, ARRAY_SIZE(default_src_caps), SOP_MSG); - if (ret) { - usbpd_set_state(pd, PE_SNK_SEND_SOFT_RESET); - break; - } + if (ret) + usbpd_set_state(pd, PE_SEND_SOFT_RESET); + break; } else if (IS_CTRL(rx_msg, MSG_DR_SWAP)) { if (pd->vdm_state == MODE_ENTERED) { usbpd_set_state(pd, PE_SNK_HARD_RESET); @@ -2670,17 +2649,18 @@ static void usbpd_sm(struct work_struct *w) ret = pd_send_msg(pd, MSG_ACCEPT, NULL, 0, SOP_MSG); if (ret) { - usbpd_set_state(pd, PE_SRC_SEND_SOFT_RESET); + usbpd_set_state(pd, PE_SEND_SOFT_RESET); break; } dr_swap(pd); + break; } else if (IS_CTRL(rx_msg, MSG_PR_SWAP) && pd->spec_rev == USBPD_REV_20) { /* TODO: should we Reject in certain circumstances? */ ret = pd_send_msg(pd, MSG_ACCEPT, NULL, 0, SOP_MSG); if (ret) { - usbpd_set_state(pd, PE_SNK_SEND_SOFT_RESET); + usbpd_set_state(pd, PE_SEND_SOFT_RESET); break; } @@ -2697,57 +2677,22 @@ static void usbpd_sm(struct work_struct *w) ret = pd_send_msg(pd, MSG_REJECT, NULL, 0, SOP_MSG); if (ret) - usbpd_set_state(pd, - PE_SNK_SEND_SOFT_RESET); + usbpd_set_state(pd, PE_SEND_SOFT_RESET); break; } ret = pd_send_msg(pd, MSG_ACCEPT, NULL, 0, SOP_MSG); if (ret) { - usbpd_set_state(pd, PE_SNK_SEND_SOFT_RESET); + usbpd_set_state(pd, PE_SEND_SOFT_RESET); break; } vconn_swap(pd); + break; } else if (IS_DATA(rx_msg, MSG_VDM)) { handle_vdm_rx(pd, rx_msg); - } else if (pd->send_get_src_cap_ext && is_sink_tx_ok(pd)) { - pd->send_get_src_cap_ext = false; - ret = pd_send_msg(pd, MSG_GET_SOURCE_CAP_EXTENDED, NULL, - 0, SOP_MSG); - if (ret) { - usbpd_set_state(pd, PE_SNK_SEND_SOFT_RESET); - break; - } - kick_sm(pd, SENDER_RESPONSE_TIME); - } else if (rx_msg && - IS_EXT(rx_msg, MSG_SOURCE_CAPABILITIES_EXTENDED)) { - if (rx_msg->data_len != PD_SRC_CAP_EXT_DB_LEN) { - usbpd_err(&pd->dev, "Invalid src cap ext db\n"); - break; - } - memcpy(&pd->src_cap_ext_db, rx_msg->payload, - sizeof(pd->src_cap_ext_db)); - complete(&pd->is_ready); - } else if (pd->send_get_pps_status && is_sink_tx_ok(pd)) { - pd->send_get_pps_status = false; - ret = pd_send_msg(pd, MSG_GET_PPS_STATUS, NULL, - 0, SOP_MSG); - if (ret) { - usbpd_set_state(pd, PE_SNK_SEND_SOFT_RESET); - break; - } - kick_sm(pd, SENDER_RESPONSE_TIME); - } else if (rx_msg && - IS_EXT(rx_msg, MSG_PPS_STATUS)) { - if (rx_msg->data_len != sizeof(pd->pps_status_db)) { - usbpd_err(&pd->dev, "Invalid pps status db\n"); - break; - } - memcpy(&pd->pps_status_db, rx_msg->payload, - sizeof(pd->pps_status_db)); - complete(&pd->is_ready); + break; } else if (IS_DATA(rx_msg, MSG_ALERT)) { u32 ado; @@ -2765,15 +2710,24 @@ static void usbpd_sm(struct work_struct *w) */ pd->send_get_status = true; kick_sm(pd, 150); - } else if (pd->send_get_status && is_sink_tx_ok(pd)) { - pd->send_get_status = false; - ret = pd_send_msg(pd, MSG_GET_STATUS, NULL, 0, SOP_MSG); - if (ret) { - usbpd_set_state(pd, PE_SNK_SEND_SOFT_RESET); + break; + } else if (IS_EXT(rx_msg, MSG_SOURCE_CAPABILITIES_EXTENDED)) { + if (rx_msg->data_len != PD_SRC_CAP_EXT_DB_LEN) { + usbpd_err(&pd->dev, "Invalid src cap ext db\n"); break; } - kick_sm(pd, SENDER_RESPONSE_TIME); - } else if (rx_msg && IS_EXT(rx_msg, MSG_STATUS)) { + memcpy(&pd->src_cap_ext_db, rx_msg->payload, + sizeof(pd->src_cap_ext_db)); + complete(&pd->is_ready); + } else if (IS_EXT(rx_msg, MSG_PPS_STATUS)) { + if (rx_msg->data_len != sizeof(pd->pps_status_db)) { + usbpd_err(&pd->dev, "Invalid pps status db\n"); + break; + } + memcpy(&pd->pps_status_db, rx_msg->payload, + sizeof(pd->pps_status_db)); + complete(&pd->is_ready); + } else if (IS_EXT(rx_msg, MSG_STATUS)) { if (rx_msg->data_len != PD_STATUS_DB_LEN) { usbpd_err(&pd->dev, "Invalid status db\n"); break; @@ -2781,17 +2735,8 @@ static void usbpd_sm(struct work_struct *w) memcpy(&pd->status_db, rx_msg->payload, sizeof(pd->status_db)); kobject_uevent(&pd->dev.kobj, KOBJ_CHANGE); - } else if (pd->send_get_battery_cap && is_sink_tx_ok(pd)) { - pd->send_get_battery_cap = false; - ret = pd_send_ext_msg(pd, MSG_GET_BATTERY_CAP, - &pd->get_battery_cap_db, 1, SOP_MSG); - if (ret) { - usbpd_set_state(pd, PE_SNK_SEND_SOFT_RESET); - break; - } - kick_sm(pd, SENDER_RESPONSE_TIME); - } else if (rx_msg && - IS_EXT(rx_msg, MSG_BATTERY_CAPABILITIES)) { + complete(&pd->is_ready); + } else if (IS_EXT(rx_msg, MSG_BATTERY_CAPABILITIES)) { if (rx_msg->data_len != PD_BATTERY_CAP_DB_LEN) { usbpd_err(&pd->dev, "Invalid battery cap db\n"); break; @@ -2799,17 +2744,7 @@ static void usbpd_sm(struct work_struct *w) memcpy(&pd->battery_cap_db, rx_msg->payload, sizeof(pd->battery_cap_db)); complete(&pd->is_ready); - } else if (pd->send_get_battery_status && is_sink_tx_ok(pd)) { - pd->send_get_battery_status = false; - ret = pd_send_ext_msg(pd, MSG_GET_BATTERY_STATUS, - &pd->get_battery_status_db, 1, SOP_MSG); - if (ret) { - usbpd_set_state(pd, PE_SNK_SEND_SOFT_RESET); - break; - } - kick_sm(pd, SENDER_RESPONSE_TIME); - } else if (rx_msg && - IS_EXT(rx_msg, MSG_BATTERY_STATUS)) { + } else if (IS_EXT(rx_msg, MSG_BATTERY_STATUS)) { if (rx_msg->data_len != sizeof(pd->battery_sts_dobj)) { usbpd_err(&pd->dev, "Invalid bat sts dobj\n"); break; @@ -2822,33 +2757,88 @@ static void usbpd_sm(struct work_struct *w) ret = pd_send_msg(pd, MSG_NOT_SUPPORTED, NULL, 0, SOP_MSG); if (ret) - usbpd_set_state(pd, PE_SNK_SEND_SOFT_RESET); + usbpd_set_state(pd, PE_SEND_SOFT_RESET); break; - } else if (pd->send_request) { - pd->send_request = false; - usbpd_set_state(pd, PE_SNK_SELECT_CAPABILITY); - } else if (pd->send_pr_swap && is_sink_tx_ok(pd)) { - pd->send_pr_swap = false; - ret = pd_send_msg(pd, MSG_PR_SWAP, NULL, 0, SOP_MSG); - if (ret) { - usbpd_set_state(pd, PE_SNK_SEND_SOFT_RESET); - break; - } + } - pd->current_state = PE_PRS_SNK_SRC_SEND_SWAP; - kick_sm(pd, SENDER_RESPONSE_TIME); - } else if (pd->send_dr_swap && is_sink_tx_ok(pd)) { - pd->send_dr_swap = false; - ret = pd_send_msg(pd, MSG_DR_SWAP, NULL, 0, SOP_MSG); - if (ret) { - usbpd_set_state(pd, PE_SNK_SEND_SOFT_RESET); - break; - } + /* handle outgoing requests */ + if (is_sink_tx_ok(pd)) { + if (pd->send_get_src_cap_ext) { + pd->send_get_src_cap_ext = false; + ret = pd_send_msg(pd, + MSG_GET_SOURCE_CAP_EXTENDED, + NULL, 0, SOP_MSG); + if (ret) { + usbpd_set_state(pd, PE_SEND_SOFT_RESET); + break; + } + kick_sm(pd, SENDER_RESPONSE_TIME); + } else if (pd->send_get_pps_status) { + pd->send_get_pps_status = false; + ret = pd_send_msg(pd, MSG_GET_PPS_STATUS, + NULL, 0, SOP_MSG); + if (ret) { + usbpd_set_state(pd, PE_SEND_SOFT_RESET); + break; + } + kick_sm(pd, SENDER_RESPONSE_TIME); + } else if (pd->send_get_status) { + pd->send_get_status = false; + ret = pd_send_msg(pd, MSG_GET_STATUS, NULL, 0, + SOP_MSG); + if (ret) { + usbpd_set_state(pd, PE_SEND_SOFT_RESET); + break; + } + kick_sm(pd, SENDER_RESPONSE_TIME); + } else if (pd->send_get_battery_cap) { + pd->send_get_battery_cap = false; + ret = pd_send_ext_msg(pd, MSG_GET_BATTERY_CAP, + &pd->get_battery_cap_db, 1, SOP_MSG); + if (ret) { + usbpd_set_state(pd, PE_SEND_SOFT_RESET); + break; + } + kick_sm(pd, SENDER_RESPONSE_TIME); + } else if (pd->send_get_battery_status) { + pd->send_get_battery_status = false; + ret = pd_send_ext_msg(pd, + MSG_GET_BATTERY_STATUS, + &pd->get_battery_status_db, 1, + SOP_MSG); + if (ret) { + usbpd_set_state(pd, PE_SEND_SOFT_RESET); + break; + } + kick_sm(pd, SENDER_RESPONSE_TIME); + } else if (pd->send_request) { + pd->send_request = false; + usbpd_set_state(pd, PE_SNK_SELECT_CAPABILITY); + } else if (pd->send_pr_swap) { + pd->send_pr_swap = false; + ret = pd_send_msg(pd, MSG_PR_SWAP, NULL, 0, + SOP_MSG); + if (ret) { + usbpd_set_state(pd, PE_SEND_SOFT_RESET); + break; + } - pd->current_state = PE_DRS_SEND_DR_SWAP; - kick_sm(pd, SENDER_RESPONSE_TIME); - } else if (is_sink_tx_ok(pd)) { - handle_vdm_tx(pd, SOP_MSG); + pd->current_state = PE_PRS_SNK_SRC_SEND_SWAP; + kick_sm(pd, SENDER_RESPONSE_TIME); + } else if (pd->send_dr_swap) { + pd->send_dr_swap = false; + ret = pd_send_msg(pd, MSG_DR_SWAP, NULL, 0, + SOP_MSG); + if (ret) { + usbpd_set_state(pd, PE_SEND_SOFT_RESET); + break; + } + + pd->current_state = PE_DRS_SEND_DR_SWAP; + kick_sm(pd, SENDER_RESPONSE_TIME); + } else { + handle_vdm_tx(pd, SOP_MSG); + } } break; @@ -2872,8 +2862,7 @@ static void usbpd_sm(struct work_struct *w) PE_SNK_WAIT_FOR_CAPABILITIES); break; - case PE_SRC_SEND_SOFT_RESET: - case PE_SNK_SEND_SOFT_RESET: + case PE_SEND_SOFT_RESET: if (IS_CTRL(rx_msg, MSG_ACCEPT)) { usbpd_set_state(pd, pd->current_pr == PR_SRC ? PE_SRC_SEND_CAPABILITIES : @@ -4115,11 +4104,6 @@ struct usbpd *usbpd_create(struct device *parent) goto destroy_wq; } - if (get_connector_type(pd) == POWER_SUPPLY_CONNECTOR_MICRO_USB) { - usbpd_dbg(&pd->dev, "USB connector is microAB hence failing pdphy_probe\n"); - ret = -EINVAL; - goto put_psy; - } /* * associate extcon with the parent dev as it could have a DT * node which will be useful for extcon_get_edev_by_phandle() diff --git a/drivers/usb/phy/Makefile b/drivers/usb/phy/Makefile index 7bbed6e3005294c7624df82bb0b7010577ed9a11..c28a7509a3c0b9503f32b0c5c1fb08d7fac036e8 100644 --- a/drivers/usb/phy/Makefile +++ b/drivers/usb/phy/Makefile @@ -31,6 +31,6 @@ obj-$(CONFIG_USB_ULPI) += phy-ulpi.o obj-$(CONFIG_USB_ULPI_VIEWPORT) += phy-ulpi-viewport.o obj-$(CONFIG_KEYSTONE_USB_PHY) += phy-keystone.o obj-$(CONFIG_USB_MSM_SSPHY_QMP) += phy-msm-ssusb-qmp.o -obj-$(CONFIG_MSM_QUSB_PHY) += phy-msm-qusb.o +obj-$(CONFIG_MSM_QUSB_PHY) += phy-msm-qusb.o phy-msm-qusb-v2.o obj-$(CONFIG_MSM_HSUSB_PHY) += phy-msm-snps-hs.o obj-$(CONFIG_MSM_SNPS_FEMTO_PHY) += phy-qcom-snps-28nm-hs.o diff --git a/drivers/usb/phy/phy-msm-qusb-v2.c b/drivers/usb/phy/phy-msm-qusb-v2.c new file mode 100644 index 0000000000000000000000000000000000000000..8f9b0499a83ff3aa9545b869e28eaa33800a896e --- /dev/null +++ b/drivers/usb/phy/phy-msm-qusb-v2.c @@ -0,0 +1,1176 @@ +/* + * Copyright (c) 2014-2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* QUSB2PHY_PWR_CTRL1 register related bits */ +#define PWR_CTRL1_POWR_DOWN BIT(0) + +/* QUSB2PHY_PLL_COMMON_STATUS_ONE register related bits */ +#define CORE_READY_STATUS BIT(0) + +/* Get TUNE value from efuse bit-mask */ +#define TUNE_VAL_MASK(val, pos, mask) ((val >> pos) & mask) + +/* QUSB2PHY_INTR_CTRL register related bits */ +#define DMSE_INTR_HIGH_SEL BIT(4) +#define DPSE_INTR_HIGH_SEL BIT(3) +#define CHG_DET_INTR_EN BIT(2) +#define DMSE_INTR_EN BIT(1) +#define DPSE_INTR_EN BIT(0) + +/* QUSB2PHY_PLL_CORE_INPUT_OVERRIDE register related bits */ +#define CORE_PLL_RATE BIT(0) +#define CORE_PLL_RATE_MUX BIT(1) +#define CORE_PLL_EN BIT(2) +#define CORE_PLL_EN_MUX BIT(3) +#define CORE_PLL_EN_FROM_RESET BIT(4) +#define CORE_RESET BIT(5) +#define CORE_RESET_MUX BIT(6) + +#define QUSB2PHY_1P8_VOL_MIN 1800000 /* uV */ +#define QUSB2PHY_1P8_VOL_MAX 1800000 /* uV */ +#define QUSB2PHY_1P8_HPM_LOAD 30000 /* uA */ + +#define QUSB2PHY_3P3_VOL_MIN 3075000 /* uV */ +#define QUSB2PHY_3P3_VOL_MAX 3200000 /* uV */ +#define QUSB2PHY_3P3_HPM_LOAD 30000 /* uA */ + +#define LINESTATE_DP BIT(0) +#define LINESTATE_DM BIT(1) + +#define BIAS_CTRL_2_OVERRIDE_VAL 0x28 + +#define DEBUG_CTRL1_OVERRIDE_VAL 0x09 + +/* PERIPH_SS_PHY_REFGEN_NORTH_BG_CTRL register bits */ +#define BANDGAP_BYPASS BIT(0) + +/* DEBUG_CTRL2 register value to program VSTATUS MUX for PHY status */ +#define DEBUG_CTRL2_MUX_PLL_LOCK_STATUS 0x4 + +/* STAT5 register bits */ +#define VSTATUS_PLL_LOCK_STATUS_MASK BIT(0) + +enum qusb_phy_reg { + PORT_TUNE1, + PLL_COMMON_STATUS_ONE, + PWR_CTRL1, + INTR_CTRL, + PLL_CORE_INPUT_OVERRIDE, + TEST1, + BIAS_CTRL_2, + DEBUG_CTRL1, + DEBUG_CTRL2, + STAT5, + USB2_PHY_REG_MAX, +}; + +struct qusb_phy { + struct usb_phy phy; + struct mutex lock; + void __iomem *base; + void __iomem *efuse_reg; + void __iomem *refgen_north_bg_reg; + + struct clk *ref_clk_src; + struct clk *ref_clk; + struct clk *cfg_ahb_clk; + struct reset_control *phy_reset; + + struct regulator *vdd; + struct regulator *vdda33; + struct regulator *vdda18; + int vdd_levels[3]; /* none, low, high */ + int init_seq_len; + int *qusb_phy_init_seq; + int host_init_seq_len; + int *qusb_phy_host_init_seq; + + unsigned int *phy_reg; + int qusb_phy_reg_offset_cnt; + + u32 tune_val; + int efuse_bit_pos; + int efuse_num_of_bits; + + int power_enabled_ref; + bool clocks_enabled; + bool cable_connected; + bool suspended; + bool dpdm_enable; + + struct regulator_desc dpdm_rdesc; + struct regulator_dev *dpdm_rdev; + + /* emulation targets specific */ + void __iomem *emu_phy_base; + bool emulation; + int *emu_init_seq; + int emu_init_seq_len; + int *phy_pll_reset_seq; + int phy_pll_reset_seq_len; + int *emu_dcm_reset_seq; + int emu_dcm_reset_seq_len; + + /* override TUNEX registers value */ + struct dentry *root; + u8 tune[5]; + u8 bias_ctrl2; + + bool override_bias_ctrl2; +}; + +static void qusb_phy_enable_clocks(struct qusb_phy *qphy, bool on) +{ + dev_dbg(qphy->phy.dev, "%s(): clocks_enabled:%d on:%d\n", + __func__, qphy->clocks_enabled, on); + + if (!qphy->clocks_enabled && on) { + clk_prepare_enable(qphy->ref_clk_src); + if (qphy->ref_clk) + clk_prepare_enable(qphy->ref_clk); + + if (qphy->cfg_ahb_clk) + clk_prepare_enable(qphy->cfg_ahb_clk); + + qphy->clocks_enabled = true; + } + + if (qphy->clocks_enabled && !on) { + if (qphy->cfg_ahb_clk) + clk_disable_unprepare(qphy->cfg_ahb_clk); + + if (qphy->ref_clk) + clk_disable_unprepare(qphy->ref_clk); + + clk_disable_unprepare(qphy->ref_clk_src); + qphy->clocks_enabled = false; + } + + dev_dbg(qphy->phy.dev, "%s(): clocks_enabled:%d\n", __func__, + qphy->clocks_enabled); +} + +static int qusb_phy_config_vdd(struct qusb_phy *qphy, int high) +{ + int min, ret; + + min = high ? 1 : 0; /* low or none? */ + ret = regulator_set_voltage(qphy->vdd, qphy->vdd_levels[min], + qphy->vdd_levels[2]); + if (ret) { + dev_err(qphy->phy.dev, "unable to set voltage for qusb vdd\n"); + return ret; + } + + dev_dbg(qphy->phy.dev, "min_vol:%d max_vol:%d\n", + qphy->vdd_levels[min], qphy->vdd_levels[2]); + return ret; +} + +static int qusb_phy_enable_power(struct qusb_phy *qphy, bool on) +{ + int ret = 0; + + mutex_lock(&qphy->lock); + + dev_dbg(qphy->phy.dev, + "%s:req to turn %s regulators. power_enabled_ref:%d\n", + __func__, on ? "on" : "off", qphy->power_enabled_ref); + + if (on && ++qphy->power_enabled_ref > 1) { + dev_dbg(qphy->phy.dev, "PHYs' regulators are already on\n"); + goto done; + } + + if (!on) { + if (on == qphy->power_enabled_ref) { + dev_dbg(qphy->phy.dev, + "PHYs' regulators are already off\n"); + goto done; + } + + qphy->power_enabled_ref--; + if (!qphy->power_enabled_ref) + goto disable_vdda33; + + dev_dbg(qphy->phy.dev, "Skip turning off PHYs' regulators\n"); + goto done; + } + + ret = qusb_phy_config_vdd(qphy, true); + if (ret) { + dev_err(qphy->phy.dev, "Unable to config VDD:%d\n", + ret); + goto err_vdd; + } + + ret = regulator_enable(qphy->vdd); + if (ret) { + dev_err(qphy->phy.dev, "Unable to enable VDD\n"); + goto unconfig_vdd; + } + + ret = regulator_set_load(qphy->vdda18, QUSB2PHY_1P8_HPM_LOAD); + if (ret < 0) { + dev_err(qphy->phy.dev, "Unable to set HPM of vdda18:%d\n", ret); + goto disable_vdd; + } + + ret = regulator_set_voltage(qphy->vdda18, QUSB2PHY_1P8_VOL_MIN, + QUSB2PHY_1P8_VOL_MAX); + if (ret) { + dev_err(qphy->phy.dev, + "Unable to set voltage for vdda18:%d\n", ret); + goto put_vdda18_lpm; + } + + ret = regulator_enable(qphy->vdda18); + if (ret) { + dev_err(qphy->phy.dev, "Unable to enable vdda18:%d\n", ret); + goto unset_vdda18; + } + + ret = regulator_set_load(qphy->vdda33, QUSB2PHY_3P3_HPM_LOAD); + if (ret < 0) { + dev_err(qphy->phy.dev, "Unable to set HPM of vdda33:%d\n", ret); + goto disable_vdda18; + } + + ret = regulator_set_voltage(qphy->vdda33, QUSB2PHY_3P3_VOL_MIN, + QUSB2PHY_3P3_VOL_MAX); + if (ret) { + dev_err(qphy->phy.dev, + "Unable to set voltage for vdda33:%d\n", ret); + goto put_vdda33_lpm; + } + + ret = regulator_enable(qphy->vdda33); + if (ret) { + dev_err(qphy->phy.dev, "Unable to enable vdda33:%d\n", ret); + goto unset_vdd33; + } + + pr_debug("%s(): QUSB PHY's regulators are turned ON.\n", __func__); + + mutex_unlock(&qphy->lock); + return ret; + +disable_vdda33: + ret = regulator_disable(qphy->vdda33); + if (ret) + dev_err(qphy->phy.dev, "Unable to disable vdda33:%d\n", ret); + +unset_vdd33: + ret = regulator_set_voltage(qphy->vdda33, 0, QUSB2PHY_3P3_VOL_MAX); + if (ret) + dev_err(qphy->phy.dev, + "Unable to set (0) voltage for vdda33:%d\n", ret); + +put_vdda33_lpm: + ret = regulator_set_load(qphy->vdda33, 0); + if (ret < 0) + dev_err(qphy->phy.dev, "Unable to set (0) HPM of vdda33\n"); + +disable_vdda18: + ret = regulator_disable(qphy->vdda18); + if (ret) + dev_err(qphy->phy.dev, "Unable to disable vdda18:%d\n", ret); + +unset_vdda18: + ret = regulator_set_voltage(qphy->vdda18, 0, QUSB2PHY_1P8_VOL_MAX); + if (ret) + dev_err(qphy->phy.dev, + "Unable to set (0) voltage for vdda18:%d\n", ret); + +put_vdda18_lpm: + ret = regulator_set_load(qphy->vdda18, 0); + if (ret < 0) + dev_err(qphy->phy.dev, "Unable to set LPM of vdda18\n"); + +disable_vdd: + ret = regulator_disable(qphy->vdd); + if (ret) + dev_err(qphy->phy.dev, "Unable to disable vdd:%d\n", + ret); + +unconfig_vdd: + ret = qusb_phy_config_vdd(qphy, false); + if (ret) + dev_err(qphy->phy.dev, "Unable unconfig VDD:%d\n", + ret); +err_vdd: + dev_dbg(qphy->phy.dev, "QUSB PHY's regulators are turned OFF.\n"); + + /* in case of error in turning on regulators */ + if (qphy->power_enabled_ref) + qphy->power_enabled_ref--; +done: + mutex_unlock(&qphy->lock); + return ret; +} + +static void qusb_phy_get_tune1_param(struct qusb_phy *qphy) +{ + u8 reg; + u32 bit_mask = 1; + + pr_debug("%s(): num_of_bits:%d bit_pos:%d\n", __func__, + qphy->efuse_num_of_bits, + qphy->efuse_bit_pos); + + /* get bit mask based on number of bits to use with efuse reg */ + bit_mask = (bit_mask << qphy->efuse_num_of_bits) - 1; + + /* + * if efuse reg is updated (i.e non-zero) then use it to program + * tune parameters + */ + qphy->tune_val = readl_relaxed(qphy->efuse_reg); + pr_debug("%s(): bit_mask:%d efuse based tune1 value:%d\n", + __func__, bit_mask, qphy->tune_val); + + qphy->tune_val = TUNE_VAL_MASK(qphy->tune_val, + qphy->efuse_bit_pos, bit_mask); + reg = readb_relaxed(qphy->base + qphy->phy_reg[PORT_TUNE1]); + if (qphy->tune_val) { + reg = reg & 0x0f; + reg |= (qphy->tune_val << 4); + } + + qphy->tune_val = reg; +} + +static void qusb_phy_write_seq(void __iomem *base, u32 *seq, int cnt, + unsigned long delay) +{ + int i; + + pr_debug("Seq count:%d\n", cnt); + for (i = 0; i < cnt; i = i+2) { + pr_debug("write 0x%02x to 0x%02x\n", seq[i], seq[i+1]); + writel_relaxed(seq[i], base + seq[i+1]); + if (delay) + usleep_range(delay, (delay + 2000)); + } +} + +static void qusb_phy_reset(struct qusb_phy *qphy) +{ + int ret; + + ret = reset_control_assert(qphy->phy_reset); + if (ret) + dev_err(qphy->phy.dev, "%s: phy_reset assert failed\n", + __func__); + usleep_range(100, 150); + + ret = reset_control_deassert(qphy->phy_reset); + if (ret) + dev_err(qphy->phy.dev, "%s: phy_reset deassert failed\n", + __func__); +} + +static bool qusb_phy_pll_locked(struct qusb_phy *qphy) +{ + u32 val; + + writel_relaxed(DEBUG_CTRL2_MUX_PLL_LOCK_STATUS, + qphy->base + qphy->phy_reg[DEBUG_CTRL2]); + + val = readl_relaxed(qphy->base + qphy->phy_reg[STAT5]); + + return (val & VSTATUS_PLL_LOCK_STATUS_MASK); +} + +static void qusb_phy_host_init(struct usb_phy *phy) +{ + u8 reg; + int p_index; + struct qusb_phy *qphy = container_of(phy, struct qusb_phy, phy); + + dev_dbg(phy->dev, "%s\n", __func__); + + qusb_phy_write_seq(qphy->base, qphy->qusb_phy_host_init_seq, + qphy->host_init_seq_len, 0); + + if (qphy->efuse_reg) { + if (!qphy->tune_val) + qusb_phy_get_tune1_param(qphy); + } else { + /* For non fused chips we need to write the TUNE1 param as + * specified in DT otherwise we will end up writing 0 to + * to TUNE1 + */ + qphy->tune_val = readb_relaxed(qphy->base + + qphy->phy_reg[PORT_TUNE1]); + } + + writel_relaxed(qphy->tune_val | BIT(7), + qphy->base + qphy->phy_reg[PORT_TUNE1]); + pr_debug("%s(): Programming TUNE1 parameter as:%x\n", + __func__, readb_relaxed(qphy->base + + qphy->phy_reg[PORT_TUNE1])); + writel_relaxed(DEBUG_CTRL1_OVERRIDE_VAL, + qphy->base + qphy->phy_reg[DEBUG_CTRL1]); + + /* if debugfs based tunex params are set, use that value. */ + for (p_index = 0; p_index < 5; p_index++) { + if (qphy->tune[p_index]) + writel_relaxed(qphy->tune[p_index], + qphy->base + qphy->phy_reg[PORT_TUNE1] + + (4 * p_index)); + } + + if (qphy->refgen_north_bg_reg && qphy->override_bias_ctrl2) + if (readl_relaxed(qphy->refgen_north_bg_reg) & BANDGAP_BYPASS) + writel_relaxed(BIAS_CTRL_2_OVERRIDE_VAL, + qphy->base + qphy->phy_reg[BIAS_CTRL_2]); + + if (qphy->bias_ctrl2) + writel_relaxed(qphy->bias_ctrl2, + qphy->base + qphy->phy_reg[BIAS_CTRL_2]); + + /* Ensure above write is completed before turning ON ref clk */ + wmb(); + + /* Require to get phy pll lock successfully */ + usleep_range(150, 160); + + reg = readb_relaxed(qphy->base + qphy->phy_reg[PLL_COMMON_STATUS_ONE]); + dev_dbg(phy->dev, "QUSB2PHY_PLL_COMMON_STATUS_ONE:%x\n", reg); + if (!(reg & CORE_READY_STATUS)) { + dev_err(phy->dev, "QUSB PHY PLL LOCK fails:%x\n", reg); + WARN_ON(1); + } +} + +static int qusb_phy_init(struct usb_phy *phy) +{ + struct qusb_phy *qphy = container_of(phy, struct qusb_phy, phy); + int ret, p_index; + u8 reg; + + dev_dbg(phy->dev, "%s\n", __func__); + + ret = qusb_phy_enable_power(qphy, true); + if (ret) + return ret; + + qusb_phy_enable_clocks(qphy, true); + + qusb_phy_reset(qphy); + + if (qphy->qusb_phy_host_init_seq && qphy->phy.flags & PHY_HOST_MODE) { + qusb_phy_host_init(phy); + return 0; + } + + if (qphy->emulation) { + if (qphy->emu_init_seq) + qusb_phy_write_seq(qphy->emu_phy_base + 0x8000, + qphy->emu_init_seq, + qphy->emu_init_seq_len, 10000); + + if (qphy->qusb_phy_init_seq) + qusb_phy_write_seq(qphy->base, qphy->qusb_phy_init_seq, + qphy->init_seq_len, 0); + + /* Wait for 5ms as per QUSB2 RUMI sequence */ + usleep_range(5000, 7000); + + if (qphy->phy_pll_reset_seq) + qusb_phy_write_seq(qphy->base, qphy->phy_pll_reset_seq, + qphy->phy_pll_reset_seq_len, 10000); + + if (qphy->emu_dcm_reset_seq) + qusb_phy_write_seq(qphy->emu_phy_base, + qphy->emu_dcm_reset_seq, + qphy->emu_dcm_reset_seq_len, 10000); + + return 0; + } + + /* Disable the PHY */ + writel_relaxed(readl_relaxed(qphy->base + qphy->phy_reg[PWR_CTRL1]) | + PWR_CTRL1_POWR_DOWN, + qphy->base + qphy->phy_reg[PWR_CTRL1]); + + if (qphy->qusb_phy_init_seq) + qusb_phy_write_seq(qphy->base, qphy->qusb_phy_init_seq, + qphy->init_seq_len, 0); + if (qphy->efuse_reg) { + if (!qphy->tune_val) + qusb_phy_get_tune1_param(qphy); + + pr_debug("%s(): Programming TUNE1 parameter as:%x\n", __func__, + qphy->tune_val); + writel_relaxed(qphy->tune_val, + qphy->base + qphy->phy_reg[PORT_TUNE1]); + } + + /* if debugfs based tunex params are set, use that value. */ + for (p_index = 0; p_index < 5; p_index++) { + if (qphy->tune[p_index]) + writel_relaxed(qphy->tune[p_index], + qphy->base + qphy->phy_reg[PORT_TUNE1] + + (4 * p_index)); + } + + if (qphy->refgen_north_bg_reg && qphy->override_bias_ctrl2) + if (readl_relaxed(qphy->refgen_north_bg_reg) & BANDGAP_BYPASS) + writel_relaxed(BIAS_CTRL_2_OVERRIDE_VAL, + qphy->base + qphy->phy_reg[BIAS_CTRL_2]); + + if (qphy->bias_ctrl2) + writel_relaxed(qphy->bias_ctrl2, + qphy->base + qphy->phy_reg[BIAS_CTRL_2]); + + /* ensure above writes are completed before re-enabling PHY */ + wmb(); + + /* Enable the PHY */ + writel_relaxed(readl_relaxed(qphy->base + qphy->phy_reg[PWR_CTRL1]) & + ~PWR_CTRL1_POWR_DOWN, + qphy->base + qphy->phy_reg[PWR_CTRL1]); + + /* Ensure above write is completed before turning ON ref clk */ + wmb(); + + /* Require to get phy pll lock successfully */ + usleep_range(150, 160); + + reg = readb_relaxed(qphy->base + qphy->phy_reg[PLL_COMMON_STATUS_ONE]); + dev_dbg(phy->dev, "QUSB2PHY_PLL_COMMON_STATUS_ONE:%x\n", reg); + if (!(reg & CORE_READY_STATUS)) { + dev_err(phy->dev, "QUSB PHY PLL LOCK fails:%x\n", reg); + WARN_ON(1); + } + return 0; +} + +static void qusb_phy_shutdown(struct usb_phy *phy) +{ + struct qusb_phy *qphy = container_of(phy, struct qusb_phy, phy); + + dev_dbg(phy->dev, "%s\n", __func__); + + qusb_phy_enable_power(qphy, false); + +} + +static u32 qusb_phy_get_linestate(struct qusb_phy *qphy) +{ + u32 linestate = 0; + + if (qphy->cable_connected) { + if (qphy->phy.flags & PHY_HSFS_MODE) + linestate |= LINESTATE_DP; + else if (qphy->phy.flags & PHY_LS_MODE) + linestate |= LINESTATE_DM; + } + return linestate; +} + +/** + * Performs QUSB2 PHY suspend/resume functionality. + * + * @uphy - usb phy pointer. + * @suspend - to enable suspend or not. 1 - suspend, 0 - resume + * + */ +static int qusb_phy_set_suspend(struct usb_phy *phy, int suspend) +{ + struct qusb_phy *qphy = container_of(phy, struct qusb_phy, phy); + u32 linestate = 0, intr_mask = 0; + + if (qphy->suspended && suspend) { + dev_dbg(phy->dev, "%s: USB PHY is already suspended\n", + __func__); + return 0; + } + + if (suspend) { + /* Bus suspend case */ + if (qphy->cable_connected || + (qphy->phy.flags & PHY_HOST_MODE)) { + /* Disable all interrupts */ + writel_relaxed(0x00, + qphy->base + qphy->phy_reg[INTR_CTRL]); + + linestate = qusb_phy_get_linestate(qphy); + /* + * D+/D- interrupts are level-triggered, but we are + * only interested if the line state changes, so enable + * the high/low trigger based on current state. In + * other words, enable the triggers _opposite_ of what + * the current D+/D- levels are. + * e.g. if currently D+ high, D- low (HS 'J'/Suspend), + * configure the mask to trigger on D+ low OR D- high + */ + intr_mask = DPSE_INTR_EN | DMSE_INTR_EN; + if (!(linestate & LINESTATE_DP)) /* D+ low */ + intr_mask |= DPSE_INTR_HIGH_SEL; + if (!(linestate & LINESTATE_DM)) /* D- low */ + intr_mask |= DMSE_INTR_HIGH_SEL; + + writel_relaxed(intr_mask, + qphy->base + qphy->phy_reg[INTR_CTRL]); + + if (linestate & (LINESTATE_DP | LINESTATE_DM)) { + /* enable phy auto-resume */ + writel_relaxed(0x91, + qphy->base + qphy->phy_reg[TEST1]); + /* Delay recommended between TEST1 writes */ + usleep_range(10, 20); + writel_relaxed(0x90, + qphy->base + qphy->phy_reg[TEST1]); + } + + dev_dbg(phy->dev, "%s: intr_mask = %x\n", + __func__, intr_mask); + + /* Makes sure that above write goes through */ + wmb(); + qusb_phy_enable_clocks(qphy, false); + } else { /* Cable disconnect case */ + /* Disable all interrupts */ + writel_relaxed(0x00, + qphy->base + qphy->phy_reg[INTR_CTRL]); + qusb_phy_reset(qphy); + qusb_phy_enable_clocks(qphy, false); + qusb_phy_enable_power(qphy, false); + } + qphy->suspended = true; + } else { + /* Bus resume case */ + if (qphy->cable_connected || + (qphy->phy.flags & PHY_HOST_MODE)) { + qusb_phy_enable_clocks(qphy, true); + /* Clear all interrupts on resume */ + writel_relaxed(0x00, + qphy->base + qphy->phy_reg[INTR_CTRL]); + + /* Reset PLL if needed */ + if (!qusb_phy_pll_locked(qphy)) { + dev_dbg(phy->dev, "%s: reset PLL\n", __func__); + /* hold core PLL into reset */ + writel_relaxed(CORE_PLL_EN_FROM_RESET | + CORE_RESET | CORE_RESET_MUX, + qphy->base + + qphy->phy_reg[PLL_CORE_INPUT_OVERRIDE]); + + /* Wait for PLL to get reset */ + usleep_range(10, 20); + + /* bring core PLL out of reset */ + writel_relaxed(CORE_PLL_EN_FROM_RESET, + qphy->base + + qphy->phy_reg[PLL_CORE_INPUT_OVERRIDE]); + + /* Makes sure that above write goes through */ + wmb(); + } + } else { /* Cable connect case */ + qusb_phy_enable_clocks(qphy, true); + } + qphy->suspended = false; + } + + return 0; +} + +static int qusb_phy_notify_connect(struct usb_phy *phy, + enum usb_device_speed speed) +{ + struct qusb_phy *qphy = container_of(phy, struct qusb_phy, phy); + + qphy->cable_connected = true; + + dev_dbg(phy->dev, "QUSB PHY: connect notification cable_connected=%d\n", + qphy->cable_connected); + return 0; +} + +static int qusb_phy_notify_disconnect(struct usb_phy *phy, + enum usb_device_speed speed) +{ + struct qusb_phy *qphy = container_of(phy, struct qusb_phy, phy); + + qphy->cable_connected = false; + + dev_dbg(phy->dev, "QUSB PHY: connect notification cable_connected=%d\n", + qphy->cable_connected); + return 0; +} + +static int qusb_phy_dpdm_regulator_enable(struct regulator_dev *rdev) +{ + int ret = 0; + struct qusb_phy *qphy = rdev_get_drvdata(rdev); + + dev_dbg(qphy->phy.dev, "%s dpdm_enable:%d\n", + __func__, qphy->dpdm_enable); + + if (!qphy->dpdm_enable) { + ret = qusb_phy_enable_power(qphy, true); + if (ret < 0) { + dev_dbg(qphy->phy.dev, + "dpdm regulator enable failed:%d\n", ret); + return ret; + } + qphy->dpdm_enable = true; + qusb_phy_reset(qphy); + } + + return ret; +} + +static int qusb_phy_dpdm_regulator_disable(struct regulator_dev *rdev) +{ + int ret = 0; + struct qusb_phy *qphy = rdev_get_drvdata(rdev); + + dev_dbg(qphy->phy.dev, "%s dpdm_enable:%d\n", + __func__, qphy->dpdm_enable); + + if (qphy->dpdm_enable) { + ret = qusb_phy_enable_power(qphy, false); + if (ret < 0) { + dev_dbg(qphy->phy.dev, + "dpdm regulator disable failed:%d\n", ret); + return ret; + } + qphy->dpdm_enable = false; + } + + return ret; +} + +static int qusb_phy_dpdm_regulator_is_enabled(struct regulator_dev *rdev) +{ + struct qusb_phy *qphy = rdev_get_drvdata(rdev); + + dev_dbg(qphy->phy.dev, "%s qphy->dpdm_enable = %d\n", __func__, + qphy->dpdm_enable); + return qphy->dpdm_enable; +} + +static struct regulator_ops qusb_phy_dpdm_regulator_ops = { + .enable = qusb_phy_dpdm_regulator_enable, + .disable = qusb_phy_dpdm_regulator_disable, + .is_enabled = qusb_phy_dpdm_regulator_is_enabled, +}; + +static int qusb_phy_regulator_init(struct qusb_phy *qphy) +{ + struct device *dev = qphy->phy.dev; + struct regulator_config cfg = {}; + struct regulator_init_data *init_data; + + init_data = devm_kzalloc(dev, sizeof(*init_data), GFP_KERNEL); + if (!init_data) + return -ENOMEM; + + init_data->constraints.valid_ops_mask |= REGULATOR_CHANGE_STATUS; + qphy->dpdm_rdesc.owner = THIS_MODULE; + qphy->dpdm_rdesc.type = REGULATOR_VOLTAGE; + qphy->dpdm_rdesc.ops = &qusb_phy_dpdm_regulator_ops; + qphy->dpdm_rdesc.name = kbasename(dev->of_node->full_name); + + cfg.dev = dev; + cfg.init_data = init_data; + cfg.driver_data = qphy; + cfg.of_node = dev->of_node; + + qphy->dpdm_rdev = devm_regulator_register(dev, &qphy->dpdm_rdesc, &cfg); + if (IS_ERR(qphy->dpdm_rdev)) + return PTR_ERR(qphy->dpdm_rdev); + + return 0; +} + +static int qusb_phy_create_debugfs(struct qusb_phy *qphy) +{ + struct dentry *file; + int ret = 0, i; + char name[6]; + + qphy->root = debugfs_create_dir(dev_name(qphy->phy.dev), NULL); + if (IS_ERR_OR_NULL(qphy->root)) { + dev_err(qphy->phy.dev, + "can't create debugfs root for %s\n", + dev_name(qphy->phy.dev)); + ret = -ENOMEM; + goto create_err; + } + + for (i = 0; i < 5; i++) { + snprintf(name, sizeof(name), "tune%d", (i + 1)); + file = debugfs_create_x8(name, 0644, qphy->root, + &qphy->tune[i]); + if (IS_ERR_OR_NULL(file)) { + dev_err(qphy->phy.dev, + "can't create debugfs entry for %s\n", name); + debugfs_remove_recursive(qphy->root); + ret = -ENOMEM; + goto create_err; + } + } + + file = debugfs_create_x8("bias_ctrl2", 0644, qphy->root, + &qphy->bias_ctrl2); + if (IS_ERR_OR_NULL(file)) { + dev_err(qphy->phy.dev, + "can't create debugfs entry for bias_ctrl2\n"); + debugfs_remove_recursive(qphy->root); + ret = -ENOMEM; + goto create_err; + } + +create_err: + return ret; +} + +static int qusb_phy_probe(struct platform_device *pdev) +{ + struct qusb_phy *qphy; + struct device *dev = &pdev->dev; + struct resource *res; + int ret = 0, size = 0; + + qphy = devm_kzalloc(dev, sizeof(*qphy), GFP_KERNEL); + if (!qphy) + return -ENOMEM; + + qphy->phy.dev = dev; + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, + "qusb_phy_base"); + qphy->base = devm_ioremap_resource(dev, res); + if (IS_ERR(qphy->base)) + return PTR_ERR(qphy->base); + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, + "emu_phy_base"); + if (res) { + qphy->emu_phy_base = devm_ioremap_resource(dev, res); + if (IS_ERR(qphy->emu_phy_base)) { + dev_dbg(dev, "couldn't ioremap emu_phy_base\n"); + qphy->emu_phy_base = NULL; + } + } + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, + "efuse_addr"); + if (res) { + qphy->efuse_reg = devm_ioremap_nocache(dev, res->start, + resource_size(res)); + if (!IS_ERR_OR_NULL(qphy->efuse_reg)) { + ret = of_property_read_u32(dev->of_node, + "qcom,efuse-bit-pos", + &qphy->efuse_bit_pos); + if (!ret) { + ret = of_property_read_u32(dev->of_node, + "qcom,efuse-num-bits", + &qphy->efuse_num_of_bits); + } + + if (ret) { + dev_err(dev, + "DT Value for efuse is invalid.\n"); + return -EINVAL; + } + } + } + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, + "refgen_north_bg_reg_addr"); + if (res) + qphy->refgen_north_bg_reg = devm_ioremap(dev, res->start, + resource_size(res)); + + /* ref_clk_src is needed irrespective of SE_CLK or DIFF_CLK usage */ + qphy->ref_clk_src = devm_clk_get(dev, "ref_clk_src"); + if (IS_ERR(qphy->ref_clk_src)) { + dev_dbg(dev, "clk get failed for ref_clk_src\n"); + ret = PTR_ERR(qphy->ref_clk_src); + return ret; + } + + /* ref_clk is needed only for DIFF_CLK case, hence make it optional. */ + if (of_property_match_string(pdev->dev.of_node, + "clock-names", "ref_clk") >= 0) { + qphy->ref_clk = devm_clk_get(dev, "ref_clk"); + if (IS_ERR(qphy->ref_clk)) { + ret = PTR_ERR(qphy->ref_clk); + if (ret != -EPROBE_DEFER) + dev_dbg(dev, + "clk get failed for ref_clk\n"); + return ret; + } + + clk_set_rate(qphy->ref_clk, 19200000); + } + + if (of_property_match_string(pdev->dev.of_node, + "clock-names", "cfg_ahb_clk") >= 0) { + qphy->cfg_ahb_clk = devm_clk_get(dev, "cfg_ahb_clk"); + if (IS_ERR(qphy->cfg_ahb_clk)) { + ret = PTR_ERR(qphy->cfg_ahb_clk); + if (ret != -EPROBE_DEFER) + dev_err(dev, + "clk get failed for cfg_ahb_clk ret %d\n", ret); + return ret; + } + } + + qphy->phy_reset = devm_reset_control_get(dev, "phy_reset"); + if (IS_ERR(qphy->phy_reset)) + return PTR_ERR(qphy->phy_reset); + + qphy->emulation = of_property_read_bool(dev->of_node, + "qcom,emulation"); + + of_get_property(dev->of_node, "qcom,emu-init-seq", &size); + if (size) { + qphy->emu_init_seq = devm_kzalloc(dev, + size, GFP_KERNEL); + if (qphy->emu_init_seq) { + qphy->emu_init_seq_len = + (size / sizeof(*qphy->emu_init_seq)); + if (qphy->emu_init_seq_len % 2) { + dev_err(dev, "invalid emu_init_seq_len\n"); + return -EINVAL; + } + + of_property_read_u32_array(dev->of_node, + "qcom,emu-init-seq", + qphy->emu_init_seq, + qphy->emu_init_seq_len); + } else { + dev_dbg(dev, + "error allocating memory for emu_init_seq\n"); + } + } + + size = 0; + of_get_property(dev->of_node, "qcom,phy-pll-reset-seq", &size); + if (size) { + qphy->phy_pll_reset_seq = devm_kzalloc(dev, + size, GFP_KERNEL); + if (qphy->phy_pll_reset_seq) { + qphy->phy_pll_reset_seq_len = + (size / sizeof(*qphy->phy_pll_reset_seq)); + if (qphy->phy_pll_reset_seq_len % 2) { + dev_err(dev, "invalid phy_pll_reset_seq_len\n"); + return -EINVAL; + } + + of_property_read_u32_array(dev->of_node, + "qcom,phy-pll-reset-seq", + qphy->phy_pll_reset_seq, + qphy->phy_pll_reset_seq_len); + } else { + dev_dbg(dev, + "error allocating memory for phy_pll_reset_seq\n"); + } + } + + size = 0; + of_get_property(dev->of_node, "qcom,emu-dcm-reset-seq", &size); + if (size) { + qphy->emu_dcm_reset_seq = devm_kzalloc(dev, + size, GFP_KERNEL); + if (qphy->emu_dcm_reset_seq) { + qphy->emu_dcm_reset_seq_len = + (size / sizeof(*qphy->emu_dcm_reset_seq)); + if (qphy->emu_dcm_reset_seq_len % 2) { + dev_err(dev, "invalid emu_dcm_reset_seq_len\n"); + return -EINVAL; + } + + of_property_read_u32_array(dev->of_node, + "qcom,emu-dcm-reset-seq", + qphy->emu_dcm_reset_seq, + qphy->emu_dcm_reset_seq_len); + } else { + dev_dbg(dev, + "error allocating memory for emu_dcm_reset_seq\n"); + } + } + + size = 0; + of_get_property(dev->of_node, "qcom,qusb-phy-reg-offset", &size); + if (size) { + qphy->phy_reg = devm_kzalloc(dev, size, GFP_KERNEL); + if (qphy->phy_reg) { + qphy->qusb_phy_reg_offset_cnt = + size / sizeof(*qphy->phy_reg); + if (qphy->qusb_phy_reg_offset_cnt != USB2_PHY_REG_MAX) { + dev_err(dev, "invalid reg offset count\n"); + return -EINVAL; + } + + of_property_read_u32_array(dev->of_node, + "qcom,qusb-phy-reg-offset", + qphy->phy_reg, + qphy->qusb_phy_reg_offset_cnt); + } else { + dev_err(dev, "err mem alloc for qusb_phy_reg_offset\n"); + return -ENOMEM; + } + } else { + dev_err(dev, "err provide qcom,qmp-phy-reg-offset\n"); + return -EINVAL; + } + + size = 0; + of_get_property(dev->of_node, "qcom,qusb-phy-init-seq", &size); + if (size) { + qphy->qusb_phy_init_seq = devm_kzalloc(dev, + size, GFP_KERNEL); + if (qphy->qusb_phy_init_seq) { + qphy->init_seq_len = + (size / sizeof(*qphy->qusb_phy_init_seq)); + if (qphy->init_seq_len % 2) { + dev_err(dev, "invalid init_seq_len\n"); + return -EINVAL; + } + + of_property_read_u32_array(dev->of_node, + "qcom,qusb-phy-init-seq", + qphy->qusb_phy_init_seq, + qphy->init_seq_len); + } else { + dev_err(dev, + "error allocating memory for phy_init_seq\n"); + } + } + + qphy->host_init_seq_len = of_property_count_elems_of_size(dev->of_node, + "qcom,qusb-phy-host-init-seq", + sizeof(*qphy->qusb_phy_host_init_seq)); + if (qphy->host_init_seq_len > 0) { + qphy->qusb_phy_host_init_seq = devm_kcalloc(dev, + qphy->host_init_seq_len, + sizeof(*qphy->qusb_phy_host_init_seq), + GFP_KERNEL); + if (qphy->qusb_phy_host_init_seq) + of_property_read_u32_array(dev->of_node, + "qcom,qusb-phy-host-init-seq", + qphy->qusb_phy_host_init_seq, + qphy->host_init_seq_len); + else + return -ENOMEM; + } + + qphy->override_bias_ctrl2 = of_property_read_bool(dev->of_node, + "qcom,override-bias-ctrl2"); + + ret = of_property_read_u32_array(dev->of_node, "qcom,vdd-voltage-level", + (u32 *) qphy->vdd_levels, + ARRAY_SIZE(qphy->vdd_levels)); + if (ret) { + dev_err(dev, "error reading qcom,vdd-voltage-level property\n"); + return ret; + } + + qphy->vdd = devm_regulator_get(dev, "vdd"); + if (IS_ERR(qphy->vdd)) { + dev_err(dev, "unable to get vdd supply\n"); + return PTR_ERR(qphy->vdd); + } + + qphy->vdda33 = devm_regulator_get(dev, "vdda33"); + if (IS_ERR(qphy->vdda33)) { + dev_err(dev, "unable to get vdda33 supply\n"); + return PTR_ERR(qphy->vdda33); + } + + qphy->vdda18 = devm_regulator_get(dev, "vdda18"); + if (IS_ERR(qphy->vdda18)) { + dev_err(dev, "unable to get vdda18 supply\n"); + return PTR_ERR(qphy->vdda18); + } + + mutex_init(&qphy->lock); + platform_set_drvdata(pdev, qphy); + + qphy->phy.label = "msm-qusb-phy-v2"; + qphy->phy.init = qusb_phy_init; + qphy->phy.set_suspend = qusb_phy_set_suspend; + qphy->phy.shutdown = qusb_phy_shutdown; + qphy->phy.type = USB_PHY_TYPE_USB2; + qphy->phy.notify_connect = qusb_phy_notify_connect; + qphy->phy.notify_disconnect = qusb_phy_notify_disconnect; + + ret = usb_add_phy_dev(&qphy->phy); + if (ret) + return ret; + + ret = qusb_phy_regulator_init(qphy); + if (ret) + usb_remove_phy(&qphy->phy); + + qusb_phy_create_debugfs(qphy); + + return ret; +} + +static int qusb_phy_remove(struct platform_device *pdev) +{ + struct qusb_phy *qphy = platform_get_drvdata(pdev); + + usb_remove_phy(&qphy->phy); + qusb_phy_enable_clocks(qphy, false); + qusb_phy_enable_power(qphy, false); + debugfs_remove_recursive(qphy->root); + + return 0; +} + +static const struct of_device_id qusb_phy_id_table[] = { + { .compatible = "qcom,qusb2phy-v2", }, + { }, +}; +MODULE_DEVICE_TABLE(of, qusb_phy_id_table); + +static struct platform_driver qusb_phy_driver = { + .probe = qusb_phy_probe, + .remove = qusb_phy_remove, + .driver = { + .name = "msm-qusb-phy-v2", + .of_match_table = of_match_ptr(qusb_phy_id_table), + }, +}; + +module_platform_driver(qusb_phy_driver); + +MODULE_DESCRIPTION("MSM QUSB2 PHY v2 driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/usb/phy/phy-msm-ssusb-qmp.c b/drivers/usb/phy/phy-msm-ssusb-qmp.c index 4909c6b52d6e33c380568d67d43ae82b0a752e7c..d61ca0d89c97db38c7fdee9ee70f1c0afcead237 100644 --- a/drivers/usb/phy/phy-msm-ssusb-qmp.c +++ b/drivers/usb/phy/phy-msm-ssusb-qmp.c @@ -79,6 +79,7 @@ enum core_ldo_levels { /* USB3_DP_COM_PHY_MODE_CTRL bits */ #define USB3_MODE BIT(0) /* enables USB3 mode */ #define DP_MODE BIT(1) /* enables DP mode */ +#define USB3_DP_COMBO_MODE (USB3_MODE | DP_MODE) /*enables combo mode */ /* USB3 Gen2 link training indicator */ #define RX_EQUALIZATION_IN_PROGRESS BIT(3) @@ -91,6 +92,9 @@ enum qmp_phy_rev_reg { USB3_PHY_SW_RESET, USB3_PHY_START, + /* TypeC port select configuration (optional) */ + USB3_PHY_PCS_MISC_TYPEC_CTRL, + /* USB DP Combo PHY related */ USB3_DP_DP_PHY_PD_CTL, USB3_DP_COM_POWER_DOWN_CTRL, @@ -103,8 +107,6 @@ enum qmp_phy_rev_reg { USB3_DP_PCS_PCS_STATUS2, USB3_DP_PCS_INSIG_SW_CTRL3, USB3_DP_PCS_INSIG_MX_CTRL3, - /* TypeC port select configuration (optional) */ - USB3_PHY_PCS_MISC_TYPEC_CTRL, USB3_PHY_REG_MAX, }; @@ -344,6 +346,18 @@ static int configure_phy_regs(struct usb_phy *uphy, return 0; } +static void msm_ssphy_qmp_setmode(struct msm_ssphy_qmp *phy, u32 mode) +{ + mode = mode & USB3_DP_COMBO_MODE; + + writel_relaxed(mode, + phy->base + phy->phy_reg[USB3_DP_COM_PHY_MODE_CTRL]); + + /* flush the write by reading it */ + readl_relaxed(phy->base + phy->phy_reg[USB3_DP_COM_PHY_MODE_CTRL]); +} + + static void usb_qmp_update_portselect_phymode(struct msm_ssphy_qmp *phy) { int val; @@ -373,8 +387,7 @@ static void usb_qmp_update_portselect_phymode(struct msm_ssphy_qmp *phy) phy->phy_reg[USB3_DP_COM_TYPEC_CTRL]); } - writel_relaxed(USB3_MODE | DP_MODE, - phy->base + phy->phy_reg[USB3_DP_COM_PHY_MODE_CTRL]); + msm_ssphy_qmp_setmode(phy, USB3_DP_COMBO_MODE); /* bring both QMP USB and QMP DP PHYs PCS block out of reset */ writel_relaxed(0x00, @@ -636,11 +649,14 @@ static int msm_ssphy_qmp_set_suspend(struct usb_phy *uphy, int suspend) } if (suspend) { - if (phy->cable_connected) + if (phy->cable_connected) { msm_ssusb_qmp_enable_autonomous(phy, 1); - else + } else { + if (uphy->type == USB_PHY_TYPE_USB3_AND_DP) + msm_ssphy_qmp_setmode(phy, USB3_MODE); writel_relaxed(0x00, phy->base + phy->phy_reg[USB3_PHY_POWER_DOWN_CONTROL]); + } /* Make sure above write completed with PHY */ wmb(); @@ -767,6 +783,23 @@ static int msm_ssphy_qmp_notify_disconnect(struct usb_phy *uphy, return 0; } +static int msm_ssphy_qmp_powerup(struct usb_phy *uphy, bool powerup) +{ + struct msm_ssphy_qmp *phy = container_of(uphy, struct msm_ssphy_qmp, + phy); + u8 reg = powerup ? 1 : 0; + u8 temp; + + writel_relaxed(reg, + phy->base + phy->phy_reg[USB3_PHY_POWER_DOWN_CONTROL]); + temp = readl_relaxed(phy->base + + phy->phy_reg[USB3_PHY_POWER_DOWN_CONTROL]); + + dev_dbg(uphy->dev, "P3 powerup:%x\n", temp); + + return 0; +} + static int msm_ssphy_qmp_get_clks(struct msm_ssphy_qmp *phy, struct device *dev) { int ret = 0; @@ -886,7 +919,7 @@ static int msm_ssphy_qmp_probe(struct platform_device *pdev) phy->phy.type = USB_PHY_TYPE_USB3_AND_DP; if (of_device_is_compatible(dev->of_node, - "qcom,usb-ssphy-qmp-usb-or-dp")) + "qcom,usb-ssphy-qmp-usb3-or-dp")) phy->phy.type = USB_PHY_TYPE_USB3_OR_DP; ret = msm_ssphy_qmp_get_clks(phy, dev); @@ -1068,6 +1101,7 @@ static int msm_ssphy_qmp_probe(struct platform_device *pdev) phy->phy.set_suspend = msm_ssphy_qmp_set_suspend; phy->phy.notify_connect = msm_ssphy_qmp_notify_connect; phy->phy.notify_disconnect = msm_ssphy_qmp_notify_disconnect; + phy->phy.powerup = msm_ssphy_qmp_powerup; if (of_property_read_bool(dev->of_node, "qcom,link-training-reset")) phy->phy.link_training = msm_ssphy_qmp_link_training; diff --git a/drivers/vfio/mdev/mdev_core.c b/drivers/vfio/mdev/mdev_core.c index 126991046eb739f331d66e33bb56cd9bfb91c986..0212f0ee8aea7577246c01c99821e0ba12cf9373 100644 --- a/drivers/vfio/mdev/mdev_core.c +++ b/drivers/vfio/mdev/mdev_core.c @@ -66,34 +66,6 @@ uuid_le mdev_uuid(struct mdev_device *mdev) } EXPORT_SYMBOL(mdev_uuid); -static int _find_mdev_device(struct device *dev, void *data) -{ - struct mdev_device *mdev; - - if (!dev_is_mdev(dev)) - return 0; - - mdev = to_mdev_device(dev); - - if (uuid_le_cmp(mdev->uuid, *(uuid_le *)data) == 0) - return 1; - - return 0; -} - -static bool mdev_device_exist(struct mdev_parent *parent, uuid_le uuid) -{ - struct device *dev; - - dev = device_find_child(parent->dev, &uuid, _find_mdev_device); - if (dev) { - put_device(dev); - return true; - } - - return false; -} - /* Should be called holding parent_list_lock */ static struct mdev_parent *__find_parent_device(struct device *dev) { @@ -221,7 +193,6 @@ int mdev_register_device(struct device *dev, const struct mdev_parent_ops *ops) } kref_init(&parent->ref); - mutex_init(&parent->lock); parent->dev = dev; parent->ops = ops; @@ -297,6 +268,10 @@ static void mdev_device_release(struct device *dev) { struct mdev_device *mdev = to_mdev_device(dev); + mutex_lock(&mdev_list_lock); + list_del(&mdev->next); + mutex_unlock(&mdev_list_lock); + dev_dbg(&mdev->dev, "MDEV: destroying\n"); kfree(mdev); } @@ -304,7 +279,7 @@ static void mdev_device_release(struct device *dev) int mdev_device_create(struct kobject *kobj, struct device *dev, uuid_le uuid) { int ret; - struct mdev_device *mdev; + struct mdev_device *mdev, *tmp; struct mdev_parent *parent; struct mdev_type *type = to_mdev_type(kobj); @@ -312,21 +287,28 @@ int mdev_device_create(struct kobject *kobj, struct device *dev, uuid_le uuid) if (!parent) return -EINVAL; - mutex_lock(&parent->lock); + mutex_lock(&mdev_list_lock); /* Check for duplicate */ - if (mdev_device_exist(parent, uuid)) { - ret = -EEXIST; - goto create_err; + list_for_each_entry(tmp, &mdev_list, next) { + if (!uuid_le_cmp(tmp->uuid, uuid)) { + mutex_unlock(&mdev_list_lock); + ret = -EEXIST; + goto mdev_fail; + } } mdev = kzalloc(sizeof(*mdev), GFP_KERNEL); if (!mdev) { + mutex_unlock(&mdev_list_lock); ret = -ENOMEM; - goto create_err; + goto mdev_fail; } memcpy(&mdev->uuid, &uuid, sizeof(uuid_le)); + list_add(&mdev->next, &mdev_list); + mutex_unlock(&mdev_list_lock); + mdev->parent = parent; kref_init(&mdev->ref); @@ -338,35 +320,28 @@ int mdev_device_create(struct kobject *kobj, struct device *dev, uuid_le uuid) ret = device_register(&mdev->dev); if (ret) { put_device(&mdev->dev); - goto create_err; + goto mdev_fail; } ret = mdev_device_create_ops(kobj, mdev); if (ret) - goto create_failed; + goto create_fail; ret = mdev_create_sysfs_files(&mdev->dev, type); if (ret) { mdev_device_remove_ops(mdev, true); - goto create_failed; + goto create_fail; } mdev->type_kobj = kobj; + mdev->active = true; dev_dbg(&mdev->dev, "MDEV: created\n"); - mutex_unlock(&parent->lock); - - mutex_lock(&mdev_list_lock); - list_add(&mdev->next, &mdev_list); - mutex_unlock(&mdev_list_lock); - - return ret; + return 0; -create_failed: +create_fail: device_unregister(&mdev->dev); - -create_err: - mutex_unlock(&parent->lock); +mdev_fail: mdev_put_parent(parent); return ret; } @@ -377,44 +352,39 @@ int mdev_device_remove(struct device *dev, bool force_remove) struct mdev_parent *parent; struct mdev_type *type; int ret; - bool found = false; mdev = to_mdev_device(dev); mutex_lock(&mdev_list_lock); list_for_each_entry(tmp, &mdev_list, next) { - if (tmp == mdev) { - found = true; + if (tmp == mdev) break; - } } - if (found) - list_del(&mdev->next); + if (tmp != mdev) { + mutex_unlock(&mdev_list_lock); + return -ENODEV; + } - mutex_unlock(&mdev_list_lock); + if (!mdev->active) { + mutex_unlock(&mdev_list_lock); + return -EAGAIN; + } - if (!found) - return -ENODEV; + mdev->active = false; + mutex_unlock(&mdev_list_lock); type = to_mdev_type(mdev->type_kobj); parent = mdev->parent; - mutex_lock(&parent->lock); ret = mdev_device_remove_ops(mdev, force_remove); if (ret) { - mutex_unlock(&parent->lock); - - mutex_lock(&mdev_list_lock); - list_add(&mdev->next, &mdev_list); - mutex_unlock(&mdev_list_lock); - + mdev->active = true; return ret; } mdev_remove_sysfs_files(dev, type); device_unregister(dev); - mutex_unlock(&parent->lock); mdev_put_parent(parent); return 0; diff --git a/drivers/vfio/mdev/mdev_private.h b/drivers/vfio/mdev/mdev_private.h index a9cefd70a7050a45d88b77dfd17efeffd5d3e95f..b5819b7d7ef7016f2467f07b57495be54ce33940 100644 --- a/drivers/vfio/mdev/mdev_private.h +++ b/drivers/vfio/mdev/mdev_private.h @@ -20,7 +20,6 @@ struct mdev_parent { struct device *dev; const struct mdev_parent_ops *ops; struct kref ref; - struct mutex lock; struct list_head next; struct kset *mdev_types_kset; struct list_head type_list; @@ -34,6 +33,7 @@ struct mdev_device { struct kref ref; struct list_head next; struct kobject *type_kobj; + bool active; }; #define to_mdev_device(dev) container_of(dev, struct mdev_device, dev) diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c index f041b1a6cf665e6410917d3608de5fe0ac557476..695b9d1a1aae235efd3e03ca4402513c4e33dcf4 100644 --- a/drivers/vfio/pci/vfio_pci.c +++ b/drivers/vfio/pci/vfio_pci.c @@ -28,6 +28,7 @@ #include #include #include +#include #include "vfio_pci_private.h" @@ -746,6 +747,9 @@ static long vfio_pci_ioctl(void *device_data, if (info.index >= VFIO_PCI_NUM_REGIONS + vdev->num_regions) return -EINVAL; + info.index = array_index_nospec(info.index, + VFIO_PCI_NUM_REGIONS + + vdev->num_regions); i = info.index - VFIO_PCI_NUM_REGIONS; diff --git a/drivers/vfio/platform/vfio_platform_common.c b/drivers/vfio/platform/vfio_platform_common.c index 4c27f4be3c3d038598304729dbb1af90c6b2395a..aa9e792110e381d00177f95c2525debc1a4f118d 100644 --- a/drivers/vfio/platform/vfio_platform_common.c +++ b/drivers/vfio/platform/vfio_platform_common.c @@ -681,18 +681,23 @@ int vfio_platform_probe_common(struct vfio_platform_device *vdev, group = vfio_iommu_group_get(dev); if (!group) { pr_err("VFIO: No IOMMU group for device %s\n", vdev->name); - return -EINVAL; + ret = -EINVAL; + goto put_reset; } ret = vfio_add_group_dev(dev, &vfio_platform_ops, vdev); - if (ret) { - vfio_iommu_group_put(group, dev); - return ret; - } + if (ret) + goto put_iommu; mutex_init(&vdev->igate); return 0; + +put_iommu: + vfio_iommu_group_put(group, dev); +put_reset: + vfio_platform_put_reset(vdev); + return ret; } EXPORT_SYMBOL_GPL(vfio_platform_probe_common); diff --git a/drivers/vfio/vfio_iommu_spapr_tce.c b/drivers/vfio/vfio_iommu_spapr_tce.c index 63112c36ab2de129e41b70e2c9db77e17399a781..b4c68f3b82be9187f0a4fcb58d9b45c08c217082 100644 --- a/drivers/vfio/vfio_iommu_spapr_tce.c +++ b/drivers/vfio/vfio_iommu_spapr_tce.c @@ -457,17 +457,17 @@ static void tce_iommu_unuse_page(struct tce_container *container, } static int tce_iommu_prereg_ua_to_hpa(struct tce_container *container, - unsigned long tce, unsigned long size, + unsigned long tce, unsigned long shift, unsigned long *phpa, struct mm_iommu_table_group_mem_t **pmem) { long ret = 0; struct mm_iommu_table_group_mem_t *mem; - mem = mm_iommu_lookup(container->mm, tce, size); + mem = mm_iommu_lookup(container->mm, tce, 1ULL << shift); if (!mem) return -EINVAL; - ret = mm_iommu_ua_to_hpa(mem, tce, phpa); + ret = mm_iommu_ua_to_hpa(mem, tce, shift, phpa); if (ret) return -EINVAL; @@ -487,7 +487,7 @@ static void tce_iommu_unuse_page_v2(struct tce_container *container, if (!pua) return; - ret = tce_iommu_prereg_ua_to_hpa(container, *pua, IOMMU_PAGE_SIZE(tbl), + ret = tce_iommu_prereg_ua_to_hpa(container, *pua, tbl->it_page_shift, &hpa, &mem); if (ret) pr_debug("%s: tce %lx at #%lx was not cached, ret=%d\n", @@ -609,7 +609,7 @@ static long tce_iommu_build_v2(struct tce_container *container, entry + i); ret = tce_iommu_prereg_ua_to_hpa(container, - tce, IOMMU_PAGE_SIZE(tbl), &hpa, &mem); + tce, tbl->it_page_shift, &hpa, &mem); if (ret) break; diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c index d639378e36acc8634258c2d29a8b4057866d2c82..50eeb74ddc0aa71b07b911a393e21c93c9886e3a 100644 --- a/drivers/vfio/vfio_iommu_type1.c +++ b/drivers/vfio/vfio_iommu_type1.c @@ -83,6 +83,7 @@ struct vfio_dma { size_t size; /* Map size (bytes) */ int prot; /* IOMMU_READ/WRITE */ bool iommu_mapped; + bool lock_cap; /* capable(CAP_IPC_LOCK) */ struct task_struct *task; struct rb_root pfn_list; /* Ex-user pinned pfn list */ }; @@ -246,29 +247,25 @@ static int vfio_iova_put_vfio_pfn(struct vfio_dma *dma, struct vfio_pfn *vpfn) return ret; } -static int vfio_lock_acct(struct task_struct *task, long npage, bool *lock_cap) +static int vfio_lock_acct(struct vfio_dma *dma, long npage, bool async) { struct mm_struct *mm; - bool is_current; int ret; if (!npage) return 0; - is_current = (task->mm == current->mm); - - mm = is_current ? task->mm : get_task_mm(task); + mm = async ? get_task_mm(dma->task) : dma->task->mm; if (!mm) return -ESRCH; /* process exited */ ret = down_write_killable(&mm->mmap_sem); if (!ret) { if (npage > 0) { - if (lock_cap ? !*lock_cap : - !has_capability(task, CAP_IPC_LOCK)) { + if (!dma->lock_cap) { unsigned long limit; - limit = task_rlimit(task, + limit = task_rlimit(dma->task, RLIMIT_MEMLOCK) >> PAGE_SHIFT; if (mm->locked_vm + npage > limit) @@ -282,7 +279,7 @@ static int vfio_lock_acct(struct task_struct *task, long npage, bool *lock_cap) up_write(&mm->mmap_sem); } - if (!is_current) + if (async) mmput(mm); return ret; @@ -391,7 +388,7 @@ static int vaddr_get_pfn(struct mm_struct *mm, unsigned long vaddr, */ static long vfio_pin_pages_remote(struct vfio_dma *dma, unsigned long vaddr, long npage, unsigned long *pfn_base, - bool lock_cap, unsigned long limit) + unsigned long limit) { unsigned long pfn = 0; long ret, pinned = 0, lock_acct = 0; @@ -414,7 +411,7 @@ static long vfio_pin_pages_remote(struct vfio_dma *dma, unsigned long vaddr, * pages are already counted against the user. */ if (!rsvd && !vfio_find_vpfn(dma, iova)) { - if (!lock_cap && current->mm->locked_vm + 1 > limit) { + if (!dma->lock_cap && current->mm->locked_vm + 1 > limit) { put_pfn(*pfn_base, dma->prot); pr_warn("%s: RLIMIT_MEMLOCK (%ld) exceeded\n", __func__, limit << PAGE_SHIFT); @@ -440,7 +437,7 @@ static long vfio_pin_pages_remote(struct vfio_dma *dma, unsigned long vaddr, } if (!rsvd && !vfio_find_vpfn(dma, iova)) { - if (!lock_cap && + if (!dma->lock_cap && current->mm->locked_vm + lock_acct + 1 > limit) { put_pfn(pfn, dma->prot); pr_warn("%s: RLIMIT_MEMLOCK (%ld) exceeded\n", @@ -453,7 +450,7 @@ static long vfio_pin_pages_remote(struct vfio_dma *dma, unsigned long vaddr, } out: - ret = vfio_lock_acct(current, lock_acct, &lock_cap); + ret = vfio_lock_acct(dma, lock_acct, false); unpin_out: if (ret) { @@ -484,7 +481,7 @@ static long vfio_unpin_pages_remote(struct vfio_dma *dma, dma_addr_t iova, } if (do_accounting) - vfio_lock_acct(dma->task, locked - unlocked, NULL); + vfio_lock_acct(dma, locked - unlocked, true); return unlocked; } @@ -501,7 +498,7 @@ static int vfio_pin_page_external(struct vfio_dma *dma, unsigned long vaddr, ret = vaddr_get_pfn(mm, vaddr, dma->prot, pfn_base); if (!ret && do_accounting && !is_invalid_reserved_pfn(*pfn_base)) { - ret = vfio_lock_acct(dma->task, 1, NULL); + ret = vfio_lock_acct(dma, 1, true); if (ret) { put_pfn(*pfn_base, dma->prot); if (ret == -ENOMEM) @@ -528,7 +525,7 @@ static int vfio_unpin_page_external(struct vfio_dma *dma, dma_addr_t iova, unlocked = vfio_iova_put_vfio_pfn(dma, vpfn); if (do_accounting) - vfio_lock_acct(dma->task, -unlocked, NULL); + vfio_lock_acct(dma, -unlocked, true); return unlocked; } @@ -723,7 +720,7 @@ static long vfio_unmap_unpin(struct vfio_iommu *iommu, struct vfio_dma *dma, dma->iommu_mapped = false; if (do_accounting) { - vfio_lock_acct(dma->task, -unlocked, NULL); + vfio_lock_acct(dma, -unlocked, true); return 0; } return unlocked; @@ -935,14 +932,12 @@ static int vfio_pin_map_dma(struct vfio_iommu *iommu, struct vfio_dma *dma, size_t size = map_size; long npage; unsigned long pfn, limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT; - bool lock_cap = capable(CAP_IPC_LOCK); int ret = 0; while (size) { /* Pin a contiguous chunk of memory */ npage = vfio_pin_pages_remote(dma, vaddr + dma->size, - size >> PAGE_SHIFT, &pfn, - lock_cap, limit); + size >> PAGE_SHIFT, &pfn, limit); if (npage <= 0) { WARN_ON(!npage); ret = (int)npage; @@ -1017,8 +1012,36 @@ static int vfio_dma_do_map(struct vfio_iommu *iommu, dma->iova = iova; dma->vaddr = vaddr; dma->prot = prot; - get_task_struct(current); - dma->task = current; + + /* + * We need to be able to both add to a task's locked memory and test + * against the locked memory limit and we need to be able to do both + * outside of this call path as pinning can be asynchronous via the + * external interfaces for mdev devices. RLIMIT_MEMLOCK requires a + * task_struct and VM locked pages requires an mm_struct, however + * holding an indefinite mm reference is not recommended, therefore we + * only hold a reference to a task. We could hold a reference to + * current, however QEMU uses this call path through vCPU threads, + * which can be killed resulting in a NULL mm and failure in the unmap + * path when called via a different thread. Avoid this problem by + * using the group_leader as threads within the same group require + * both CLONE_THREAD and CLONE_VM and will therefore use the same + * mm_struct. + * + * Previously we also used the task for testing CAP_IPC_LOCK at the + * time of pinning and accounting, however has_capability() makes use + * of real_cred, a copy-on-write field, so we can't guarantee that it + * matches group_leader, or in fact that it might not change by the + * time it's evaluated. If a process were to call MAP_DMA with + * CAP_IPC_LOCK but later drop it, it doesn't make sense that they + * possibly see different results for an iommu_mapped vfio_dma vs + * externally mapped. Therefore track CAP_IPC_LOCK in vfio_dma at the + * time of calling MAP_DMA. + */ + get_task_struct(current->group_leader); + dma->task = current->group_leader; + dma->lock_cap = capable(CAP_IPC_LOCK); + dma->pfn_list = RB_ROOT; /* Insert zero-sized and grow as we map chunks of it */ @@ -1053,7 +1076,6 @@ static int vfio_iommu_replay(struct vfio_iommu *iommu, struct vfio_domain *d; struct rb_node *n; unsigned long limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT; - bool lock_cap = capable(CAP_IPC_LOCK); int ret; /* Arbitrarily pick the first domain in the list for lookups */ @@ -1100,8 +1122,7 @@ static int vfio_iommu_replay(struct vfio_iommu *iommu, npage = vfio_pin_pages_remote(dma, vaddr, n >> PAGE_SHIFT, - &pfn, lock_cap, - limit); + &pfn, limit); if (npage <= 0) { WARN_ON(!npage); ret = (int)npage; @@ -1378,7 +1399,7 @@ static void vfio_iommu_unmap_unpin_reaccount(struct vfio_iommu *iommu) if (!is_invalid_reserved_pfn(vpfn->pfn)) locked++; } - vfio_lock_acct(dma->task, locked - unlocked, NULL); + vfio_lock_acct(dma, locked - unlocked, true); } } diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c index b0d606b2d06c34e8df9f4d78914ec995ec6a2d95..6123b4dd86381cf2c69b693bb60de730921b6116 100644 --- a/drivers/vhost/net.c +++ b/drivers/vhost/net.c @@ -1186,7 +1186,8 @@ static long vhost_net_set_backend(struct vhost_net *n, unsigned index, int fd) if (ubufs) vhost_net_ubuf_put_wait_and_free(ubufs); err_ubufs: - sockfd_put(sock); + if (sock) + sockfd_put(sock); err_vq: mutex_unlock(&vq->mutex); err: diff --git a/drivers/video/backlight/pwm_bl.c b/drivers/video/backlight/pwm_bl.c index 1c2289ddd555a64a418f29edeaf8e4b5643430b9..0fa7d2bd0e4811c6c404663451c67f6ecea6afee 100644 --- a/drivers/video/backlight/pwm_bl.c +++ b/drivers/video/backlight/pwm_bl.c @@ -301,14 +301,14 @@ static int pwm_backlight_probe(struct platform_device *pdev) /* * If the GPIO is not known to be already configured as output, that - * is, if gpiod_get_direction returns either GPIOF_DIR_IN or -EINVAL, - * change the direction to output and set the GPIO as active. + * is, if gpiod_get_direction returns either 1 or -EINVAL, change the + * direction to output and set the GPIO as active. * Do not force the GPIO to active when it was already output as it * could cause backlight flickering or we would enable the backlight too * early. Leave the decision of the initial backlight state for later. */ if (pb->enable_gpio && - gpiod_get_direction(pb->enable_gpio) != GPIOF_DIR_OUT) + gpiod_get_direction(pb->enable_gpio) != 0) gpiod_direction_output(pb->enable_gpio, 1); pb->power_supply = devm_regulator_get(&pdev->dev, "power"); diff --git a/drivers/video/backlight/qcom-spmi-wled.c b/drivers/video/backlight/qcom-spmi-wled.c index de6c31564f929023e88fa7dfc86b104e7b4647ca..e09d700f3c87c91e3ce2ec82dabebc73ba6670ce 100644 --- a/drivers/video/backlight/qcom-spmi-wled.c +++ b/drivers/video/backlight/qcom-spmi-wled.c @@ -24,6 +24,7 @@ #include #include #include +#include #include #include #include @@ -222,6 +223,7 @@ struct wled { struct platform_device *pdev; struct regmap *regmap; struct pmic_revid_data *pmic_rev_id; + struct power_supply *bms_psy; struct mutex lock; struct wled_config cfg; ktime_t last_sc_event_time; @@ -252,6 +254,7 @@ struct wled { spinlock_t flash_lock; enum wled_flash_mode flash_mode; u8 num_strings; + u32 leds_per_string; }; enum wled5_mod_sel { @@ -1459,23 +1462,129 @@ static int wled_get_max_current(struct led_classdev *led_cdev, return 0; } +static int get_property_from_fg(struct wled *wled, + enum power_supply_property prop, int *val) +{ + int rc; + union power_supply_propval pval = {0, }; + + if (!wled->bms_psy) + wled->bms_psy = power_supply_get_by_name("bms"); + + if (!wled->bms_psy) + return -ENODEV; + + rc = power_supply_get_property(wled->bms_psy, prop, &pval); + if (rc < 0) { + pr_err("bms psy doesn't support reading prop %d rc = %d\n", + prop, rc); + return rc; + } + + *val = pval.intval; + return rc; +} + +#define V_HDRM_MV 400 +#define V_DROOP_MV 400 +#define V_LED_MV 3100 +#define I_FLASH_MAX_MA 60 +#define EFF_FACTOR 700 static int wled_get_max_avail_current(struct led_classdev *led_cdev, int *max_current) { struct wled *wled; + int rc, ocv_mv, r_bat_mohms, i_bat_ma, i_sink_ma = 0, max_fsc_ma; + int64_t p_out_string, p_out, p_in, v_safe_mv, i_flash_ma, v_ph_mv; if (!strcmp(led_cdev->name, "wled_switch")) wled = container_of(led_cdev, struct wled, switch_cdev); else return -ENODEV; - /* - * For now, return the max brightness. Later this will be replaced with - * the available current predicted based on battery parameters. - */ - - *max_current = max(wled->flash_cdev.max_brightness, + max_fsc_ma = max(wled->flash_cdev.max_brightness, wled->torch_cdev.max_brightness); + if (!wled->leds_per_string || (wled->num_strings == 2 && + wled->leds_per_string == 8)) { + /* Allow max for 8s2p */ + *max_current = max_fsc_ma; + return 0; + } + + rc = get_property_from_fg(wled, POWER_SUPPLY_PROP_VOLTAGE_OCV, &ocv_mv); + if (rc < 0) { + pr_err("Error in getting OCV rc=%d\n", rc); + return rc; + } + ocv_mv /= 1000; + + rc = get_property_from_fg(wled, POWER_SUPPLY_PROP_CURRENT_NOW, + &i_bat_ma); + if (rc < 0) { + pr_err("Error in getting I_BAT rc=%d\n", rc); + return rc; + } + i_bat_ma /= 1000; + + rc = get_property_from_fg(wled, POWER_SUPPLY_PROP_RESISTANCE, + &r_bat_mohms); + if (rc < 0) { + pr_err("Error in getting R_BAT rc=%d\n", rc); + return rc; + } + r_bat_mohms /= 1000; + + pr_debug("ocv: %d i_bat: %d r_bat: %d\n", ocv_mv, i_bat_ma, + r_bat_mohms); + + p_out_string = ((wled->leds_per_string * V_LED_MV) + V_HDRM_MV) * + I_FLASH_MAX_MA; + p_out = p_out_string * wled->num_strings; + p_in = (p_out * 1000) / EFF_FACTOR; + + pr_debug("p_out_string: %lld, p_out: %lld, p_in: %lld\n", p_out_string, + p_out, p_in); + + v_safe_mv = ocv_mv - V_DROOP_MV - ((i_bat_ma * r_bat_mohms) / 1000); + if (v_safe_mv <= 0) { + pr_err("V_safe_mv: %d, cannot support flash\n", v_safe_mv); + *max_current = 0; + return 0; + } + + i_flash_ma = p_in / v_safe_mv; + v_ph_mv = ocv_mv - ((i_bat_ma + i_flash_ma) * r_bat_mohms) / 1000; + + pr_debug("v_safe: %lld, i_flash: %lld, v_ph: %lld\n", v_safe_mv, + i_flash_ma, v_ph_mv); + + i_sink_ma = max_fsc_ma; + if (wled->num_strings == 3 && wled->leds_per_string == 8) { + if (v_ph_mv < 3410) { + /* For 8s3p, I_sink(mA) = 25.396 * Vph(V) - 26.154 */ + i_sink_ma = (((25396 * v_ph_mv) / 1000) - 26154) / 1000; + i_sink_ma *= wled->num_strings; + } + } else if (wled->num_strings == 3 && wled->leds_per_string == 6) { + if (v_ph_mv < 2800) { + /* For 6s3p, I_sink(mA) = 41.311 * Vph(V) - 52.334 */ + i_sink_ma = (((41311 * v_ph_mv) / 1000) - 52334) / 1000; + i_sink_ma *= wled->num_strings; + } + } else if (wled->num_strings == 4 && wled->leds_per_string == 6) { + if (v_ph_mv < 3400) { + /* For 6s4p, I_sink(mA) = 26.24 * Vph(V) - 24.834 */ + i_sink_ma = (((26240 * v_ph_mv) / 1000) - 24834) / 1000; + i_sink_ma *= wled->num_strings; + } + } else if (v_ph_mv < 3200) { + i_sink_ma = max_fsc_ma / 2; + } + + /* Clamp the sink current to maximum FSC */ + *max_current = min(i_sink_ma, max_fsc_ma); + + pr_debug("i_sink_ma: %d\n", i_sink_ma); return 0; } @@ -1758,6 +1867,9 @@ static int wled_flash_configure(struct wled *wled) if (is_wled4(wled)) return 0; + of_property_read_u32(wled->pdev->dev.of_node, "qcom,leds-per-string", + &wled->leds_per_string); + for_each_available_child_of_node(wled->pdev->dev.of_node, temp) { rc = of_property_read_string(temp, "label", &cdev_name); if (rc < 0) @@ -1765,7 +1877,7 @@ static int wled_flash_configure(struct wled *wled) if (!strcmp(cdev_name, "flash")) { /* Value read in mA */ - wled->fparams.fs_current = 40; + wled->fparams.fs_current = 50; rc = of_property_read_u32(temp, "qcom,wled-flash-fsc", &wled->fparams.fs_current); if (!rc) { @@ -1817,7 +1929,7 @@ static int wled_flash_configure(struct wled *wled) wled->flash_cdev.default_trigger = "wled_flash"; } else if (!strcmp(cdev_name, "torch")) { /* Value read in mA */ - wled->tparams.fs_current = 30; + wled->tparams.fs_current = 50; rc = of_property_read_u32(temp, "qcom,wled-torch-fsc", &wled->tparams.fs_current); if (!rc) { diff --git a/drivers/video/fbdev/msm/Kconfig b/drivers/video/fbdev/msm/Kconfig index e8f902bc0e19b87ec14ba0f7e2410457d6f952dc..791182dfc8f26c42eb2236fb600cb8db8048056d 100644 --- a/drivers/video/fbdev/msm/Kconfig +++ b/drivers/video/fbdev/msm/Kconfig @@ -82,6 +82,15 @@ config FB_MSM_MDSS_HDMI_MHL_SII8334 MHL (Mobile High-Definition Link) technology uses USB connector to output HDMI content +config FB_MSM_MDSS_SPI_PANEL + depends on SPI_QUP + bool "Support SPI panel feature" + ---help--- + The MDSS SPI Panel provides support for transmittimg SPI signals of + MDSS frame buffer data to connected panel. Limited by SPI clock rate, + the current max fps only reach to ~30 fps with 240x240 resolution, and + limited by MDP hardware architecture only supply GPU compostition. + config FB_MSM_MDSS_MHL3 depends on FB_MSM_MDSS_HDMI_PANEL bool "MHL3 SII8620 Support" diff --git a/drivers/video/fbdev/msm/Makefile b/drivers/video/fbdev/msm/Makefile index 81d4953828f099d1a65194dea02314c5d9c718f8..09f874f1d956fcb2a4b49d2736fd883ac9123b7a 100644 --- a/drivers/video/fbdev/msm/Makefile +++ b/drivers/video/fbdev/msm/Makefile @@ -64,6 +64,10 @@ obj-$(CONFIG_FB_MSM_MDSS_HDMI_MHL_SII8334) += mhl_sii8334.o mhl_msc.o ccflags-y += -DTARGET_HW_MDSS_HDMI endif +obj-$(CONFIG_FB_MSM_MDSS_SPI_PANEL) += mdss_spi_display.o +obj-$(CONFIG_FB_MSM_MDSS_SPI_PANEL) += mdss_spi_client.o +obj-$(CONFIG_FB_MSM_MDSS_SPI_PANEL) += mdss_spi_panel.o + obj-$(CONFIG_FB_MSM_MDSS_WRITEBACK) += mdss_wb.o mdss-qpic-objs := mdss_qpic.o mdss_fb.o mdss_qpic_panel.o mdss_sync.o diff --git a/drivers/video/fbdev/msm/mdss_compat_utils.c b/drivers/video/fbdev/msm/mdss_compat_utils.c index 003f5acef2f43e2c85d02bae5880ff77595f2b02..79884beb7bbba4caa7b40dca6b688134bb40772d 100644 --- a/drivers/video/fbdev/msm/mdss_compat_utils.c +++ b/drivers/video/fbdev/msm/mdss_compat_utils.c @@ -1336,10 +1336,10 @@ static int __from_user_pgc_lut_data_legacy( return -EFAULT; if (num_r_stages > GC_LUT_SEGMENTS || num_b_stages > GC_LUT_SEGMENTS - || num_r_stages > GC_LUT_SEGMENTS || !num_r_stages || !num_b_stages + || num_g_stages > GC_LUT_SEGMENTS || !num_r_stages || !num_b_stages || !num_g_stages) { pr_err("invalid number of stages r_stages %d b_stages %d g_stages %d\n", - num_r_stages, num_b_stages, num_r_stages); + num_r_stages, num_b_stages, num_g_stages); return -EFAULT; } diff --git a/drivers/video/fbdev/msm/mdss_fb.c b/drivers/video/fbdev/msm/mdss_fb.c index b5a858d3fdf400578ebd75e158451cdbaffb6c90..d46e9bb4298bdb82e947440a7995a14a6be79428 100644 --- a/drivers/video/fbdev/msm/mdss_fb.c +++ b/drivers/video/fbdev/msm/mdss_fb.c @@ -362,6 +362,9 @@ static ssize_t mdss_fb_get_type(struct device *dev, case EDP_PANEL: ret = snprintf(buf, PAGE_SIZE, "edp panel\n"); break; + case SPI_PANEL: + ret = snprintf(buf, PAGE_SIZE, "spi panel\n"); + break; default: ret = snprintf(buf, PAGE_SIZE, "unknown panel\n"); break; @@ -1278,6 +1281,9 @@ static int mdss_fb_probe(struct platform_device *pdev) mfd->pdev = pdev; + if (mfd->panel.type == SPI_PANEL) + mfd->fb_imgType = MDP_RGB_565; + mfd->split_fb_left = mfd->split_fb_right = 0; mdss_fb_set_split_mode(mfd, pdata); @@ -2122,8 +2128,9 @@ void mdss_fb_free_fb_ion_memory(struct msm_fb_data_type *mfd) dma_buf_end_cpu_access(mfd->fbmem_buf, DMA_BIDIRECTIONAL); - if (mfd->mdp.fb_mem_get_iommu_domain && !(!mfd->fb_attachment || - !mfd->fb_attachment->dmabuf || + if ((mfd->mdp.fb_mem_get_iommu_domain || + (mfd->panel.type == SPI_PANEL)) && + !(!mfd->fb_attachment || !mfd->fb_attachment->dmabuf || !mfd->fb_attachment->dmabuf->ops)) { dma_buf_unmap_attachment(mfd->fb_attachment, mfd->fb_table, DMA_BIDIRECTIONAL); @@ -2174,6 +2181,20 @@ int mdss_fb_alloc_fb_ion_memory(struct msm_fb_data_type *mfd, size_t fb_size) rc = PTR_ERR(mfd->fb_table); goto err_detach; } + } else if (mfd->panel.type == SPI_PANEL) { + mfd->fb_attachment = dma_buf_attach(mfd->fbmem_buf, + &mfd->pdev->dev); + if (IS_ERR(mfd->fb_attachment)) { + rc = PTR_ERR(mfd->fb_attachment); + goto err_put; + } + + mfd->fb_table = dma_buf_map_attachment(mfd->fb_attachment, + DMA_BIDIRECTIONAL); + if (IS_ERR(mfd->fb_table)) { + rc = PTR_ERR(mfd->fb_table); + goto err_detach; + } } else { pr_err("No IOMMU Domain\n"); rc = -EINVAL; diff --git a/drivers/video/fbdev/msm/mdss_panel.h b/drivers/video/fbdev/msm/mdss_panel.h index 908c12fb628683eb8905197fd8c172fed01500b3..1bb7d3feb93d958c22646cc763cdad7421172c68 100644 --- a/drivers/video/fbdev/msm/mdss_panel.h +++ b/drivers/video/fbdev/msm/mdss_panel.h @@ -60,6 +60,7 @@ enum fps_resolution { #define WRITEBACK_PANEL 10 /* Wifi display */ #define LVDS_PANEL 11 /* LVDS */ #define EDP_PANEL 12 /* LVDS */ +#define SPI_PANEL 13 /* SPI */ #define DSC_PPS_LEN 128 @@ -108,6 +109,7 @@ enum { MDSS_PANEL_INTF_DSI, MDSS_PANEL_INTF_EDP, MDSS_PANEL_INTF_HDMI, + MDSS_PANEL_INTF_SPI, }; enum { @@ -429,6 +431,10 @@ struct edp_panel_info { char frame_rate; /* fps */ }; +struct spi_panel_info { + char frame_rate; /* fps */ +}; + /** * struct dynamic_fps_data - defines dynamic fps related data * @hfp: horizontal front porch @@ -741,6 +747,7 @@ struct mdss_panel_info { struct mipi_panel_info mipi; struct lvds_panel_info lvds; struct edp_panel_info edp; + struct spi_panel_info spi; bool is_dba_panel; @@ -877,6 +884,9 @@ static inline u32 mdss_panel_get_framerate(struct mdss_panel_info *panel_info, frame_rate = panel_info->lcdc.frame_rate; break; } + case SPI_PANEL: + frame_rate = panel_info->spi.frame_rate; + break; default: pixel_total = (panel_info->lcdc.h_back_porch + panel_info->lcdc.h_front_porch + diff --git a/drivers/video/fbdev/msm/mdss_spi_client.c b/drivers/video/fbdev/msm/mdss_spi_client.c new file mode 100644 index 0000000000000000000000000000000000000000..bdd64ac857c8fbb068b65e94e568530d0b78458d --- /dev/null +++ b/drivers/video/fbdev/msm/mdss_spi_client.c @@ -0,0 +1,218 @@ +/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include + +#include "mdss_spi_client.h" + +#define MAX_READ_SPEED_HZ 9600000 +#define SPI_PANEL_COMMAND_LEN 1 +static struct spi_device *mdss_spi_client; + +int mdss_spi_read_data(u8 reg_addr, u8 *data, u8 len) +{ + int rc = 0; + u32 max_speed_hz; + u8 memory_write_reg = 0x2c; + u8 empty_pack[] = {0x29, 0x29, 0x29}; + struct spi_transfer t[4] = { + [0] = { + .tx_buf = ®_addr, + .len = 1, + }, + [1] = { + .rx_buf = data, + .len = len, + }, + [2] = { + .tx_buf = &empty_pack, + .len = 3, + }, + [3] = { + .tx_buf = &memory_write_reg, + .len = 1, + } + }; + struct spi_message m; + + if (!mdss_spi_client) { + pr_err("%s: spi client not available\n", __func__); + return -EINVAL; + } + + mdss_spi_client->bits_per_word = 8; + max_speed_hz = mdss_spi_client->max_speed_hz; + mdss_spi_client->max_speed_hz = MAX_READ_SPEED_HZ; + + spi_message_init(&m); + spi_message_add_tail(&t[0], &m); + spi_message_add_tail(&t[1], &m); + rc = spi_sync(mdss_spi_client, &m); + if (rc) { + pr_err("%s: send panel reg failed\n", __func__); + return rc; + } + + spi_message_init(&m); + spi_message_add_tail(&t[2], &m); + rc = spi_sync(mdss_spi_client, &m); + if (rc) { + pr_err("%s: send empty package failed\n", __func__); + return rc; + } + + spi_message_init(&m); + spi_message_add_tail(&t[3], &m); + rc = spi_sync(mdss_spi_client, &m); + mdss_spi_client->max_speed_hz = max_speed_hz; + if (rc) { + pr_err("%s: send memory write reg failed\n", __func__); + return rc; + } + + return rc; +} + +int mdss_spi_tx_command(const void *buf) +{ + int rc = 0; + struct spi_transfer t = { + .tx_buf = buf, + .len = SPI_PANEL_COMMAND_LEN, + }; + struct spi_message m; + + if (!mdss_spi_client) { + pr_err("%s: spi client not available\n", __func__); + return -EINVAL; + } + + mdss_spi_client->bits_per_word = 8; + + spi_message_init(&m); + spi_message_add_tail(&t, &m); + rc = spi_sync(mdss_spi_client, &m); + if (rc) + pr_err("%s: send panel command failed\n", __func__); + return rc; +} + +int mdss_spi_tx_parameter(const void *buf, size_t len) +{ + int rc = 0; + struct spi_transfer t = { + .tx_buf = buf, + .len = len, + }; + struct spi_message m; + + if (!mdss_spi_client) { + pr_err("%s: spi client not available\n", __func__); + return -EINVAL; + } + + mdss_spi_client->bits_per_word = 8; + + spi_message_init(&m); + spi_message_add_tail(&t, &m); + rc = spi_sync(mdss_spi_client, &m); + if (rc) + pr_err("%s: send panel parameter failed\n", __func__); + + return rc; +} + +int mdss_spi_tx_pixel(const void *buf, size_t len, + void (*spi_tx_compelet)(void *), void *ctx) +{ + int rc = 0; + static struct spi_transfer t; + static struct spi_message m; + + if (!mdss_spi_client) { + pr_err("%s: spi client not available\n", __func__); + return -EINVAL; + } + + mdss_spi_client->bits_per_word = 16; + t.tx_buf = buf; + t.len = len; + spi_message_init(&m); + m.complete = spi_tx_compelet; + m.context = ctx; + + spi_message_add_tail(&t, &m); + rc = spi_async(mdss_spi_client, &m); + + if (rc) + pr_err("%s: send FrameBuffer data failed\n", __func__); + + return rc; +} + +static int mdss_spi_client_probe(struct spi_device *spidev) +{ + int irq; + int cs; + int cpha, cpol, cs_high; + u32 max_speed; + + irq = spidev->irq; + cs = spidev->chip_select; + cpha = (spidev->mode & SPI_CPHA) ? 1:0; + cpol = (spidev->mode & SPI_CPOL) ? 1:0; + cs_high = (spidev->mode & SPI_CS_HIGH) ? 1:0; + max_speed = spidev->max_speed_hz; + + pr_debug("cs[%x] CPHA[%x] CPOL[%x] CS_HIGH[%x] Max_speed[%d]\n", + cs, cpha, cpol, cs_high, max_speed); + mdss_spi_client = spidev; + + return 0; +} + +static const struct of_device_id mdss_spi_dt_match[] = { + { .compatible = "qcom,mdss-spi-client" }, + {}, +}; + +static struct spi_driver mdss_spi_client_driver = { + .probe = mdss_spi_client_probe, + .driver = { + .name = "mdss-spi-client", + .owner = THIS_MODULE, + .of_match_table = mdss_spi_dt_match, + }, +}; + +static int __init mdss_spi_init(void) +{ + int ret; + + ret = spi_register_driver(&mdss_spi_client_driver); + if (ret) { + pr_err("register mdss spi client driver failed!\n"); + return ret; + } + + return 0; +} +module_init(mdss_spi_init); + +static void __exit mdss_spi_exit(void) +{ + spi_unregister_driver(&mdss_spi_client_driver); +} +module_exit(mdss_spi_exit); + diff --git a/drivers/video/fbdev/msm/mdss_spi_client.h b/drivers/video/fbdev/msm/mdss_spi_client.h new file mode 100644 index 0000000000000000000000000000000000000000..05a3fa54816031efd9a0235f6cad96cd1621cc50 --- /dev/null +++ b/drivers/video/fbdev/msm/mdss_spi_client.h @@ -0,0 +1,22 @@ +/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef __MDSS_SPI_CLIENT_H__ +#define __MDSS_SPI_CLIENT_H__ + +int mdss_spi_tx_command(const void *buf); +int mdss_spi_tx_parameter(const void *buf, size_t len); +int mdss_spi_tx_pixel(const void *buf, size_t len, + void (*spi_tx_compelet)(void *), void *ctx); +int mdss_spi_read_data(u8 reg_addr, u8 *data, u8 len); + +#endif /* End of __MDSS_SPI_CLIENT_H__ */ diff --git a/drivers/video/fbdev/msm/mdss_spi_display.c b/drivers/video/fbdev/msm/mdss_spi_display.c new file mode 100644 index 0000000000000000000000000000000000000000..66f179a05c6ebb63bf5401ab4f1866c4d6be2daf --- /dev/null +++ b/drivers/video/fbdev/msm/mdss_spi_display.c @@ -0,0 +1,541 @@ +/* Copyright (c) 2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "mdss_panel.h" +#include "mdss_spi_panel.h" +#include "mdss_spi_client.h" +#include "mdss_mdp.h" + +static int mdss_spi_get_img(struct spi_panel_data *ctrl_pdata, + struct mdp_layer_commit_v1 *commit, struct device *dev) +{ + struct msmfb_data image; + struct dma_buf *dmabuf; + void *vaddr; + + memset(&image, 0, sizeof(image)); + image.memory_id = commit->input_layers[0].buffer.planes[0].fd; + image.offset = commit->input_layers[0].buffer.planes[0].offset; + + dmabuf = dma_buf_get(image.memory_id); + if (IS_ERR(dmabuf)) { + pr_err("%s : error on dma_buf_get\n", __func__); + return PTR_ERR(dmabuf); + } + ctrl_pdata->image_data.srcp_attachment = + dma_buf_attach(dmabuf, dev); + if (IS_ERR(ctrl_pdata->image_data.srcp_attachment)) + goto err_put; + + ctrl_pdata->image_data.srcp_table = + dma_buf_map_attachment(ctrl_pdata->image_data.srcp_attachment, + DMA_TO_DEVICE); + if (IS_ERR(ctrl_pdata->image_data.srcp_table)) + goto err_detach; + + dma_buf_begin_cpu_access(dmabuf, DMA_TO_DEVICE); + + vaddr = dma_buf_kmap(dmabuf, 0); + if (!vaddr) { + pr_err("%s:ion memory mapping failed\n", __func__); + goto err_unmap; + }; + + ctrl_pdata->image_data.addr = vaddr; + ctrl_pdata->image_data.len = dmabuf->size; + ctrl_pdata->image_data.mapped = true; + ctrl_pdata->image_data.srcp_dma_buf = dmabuf; + + return 0; +err_unmap: + dma_buf_unmap_attachment(ctrl_pdata->image_data.srcp_attachment, + ctrl_pdata->image_data.srcp_table, DMA_BIDIRECTIONAL); +err_detach: + dma_buf_detach(ctrl_pdata->image_data.srcp_dma_buf, + ctrl_pdata->image_data.srcp_attachment); +err_put: + dma_buf_put(ctrl_pdata->image_data.srcp_dma_buf); + return -EINVAL; +} + +static void mdss_spi_put_img(struct spi_panel_data *ctrl_pdata) +{ + if (!ctrl_pdata->image_data.mapped) + return; + dma_buf_kunmap(ctrl_pdata->image_data.srcp_dma_buf, 0, + ctrl_pdata->image_data.addr); + dma_buf_end_cpu_access(ctrl_pdata->image_data.srcp_dma_buf, + DMA_BIDIRECTIONAL); + dma_buf_unmap_attachment(ctrl_pdata->image_data.srcp_attachment, + ctrl_pdata->image_data.srcp_table, DMA_TO_DEVICE); + dma_buf_detach(ctrl_pdata->image_data.srcp_dma_buf, + ctrl_pdata->image_data.srcp_attachment); + dma_buf_put(ctrl_pdata->image_data.srcp_dma_buf); + + ctrl_pdata->image_data.srcp_dma_buf = NULL; + ctrl_pdata->image_data.addr = NULL; + ctrl_pdata->image_data.len = 0; + ctrl_pdata->image_data.mapped = false; +} + +int mdss_spi_display_pre_commit(struct msm_fb_data_type *mfd, + struct file *file, struct mdp_layer_commit_v1 *commit) +{ + char *temp_buf; + int rc = 0, scan_count = 0; + int panel_yres, panel_xres; + int padding_length, byte_per_pixel; + int dma_stride, actual_stride; + struct mdss_panel_data *pdata; + struct spi_panel_data *ctrl_pdata = NULL; + + if (commit->input_layer_cnt == 0) { + pr_err("SPI display doesn't support NULL commit\n"); + return 0; + } + + pdata = dev_get_platdata(&mfd->pdev->dev); + ctrl_pdata = container_of(pdata, struct spi_panel_data, panel_data); + + rc = mdss_spi_get_img(ctrl_pdata, commit, &mfd->pdev->dev); + if (rc) { + pr_err("mdss_spi_get_img failed\n"); + return rc; + } + + panel_xres = ctrl_pdata->panel_data.panel_info.xres; + panel_yres = ctrl_pdata->panel_data.panel_info.yres; + dma_stride = mfd->fbi->fix.line_length; + byte_per_pixel = ctrl_pdata->panel_data.panel_info.bpp / 8; + actual_stride = panel_xres * byte_per_pixel; + padding_length = dma_stride - actual_stride; + + /* remove padding and copy to continuous buffer */ + while (scan_count < panel_yres) { + memcpy((ctrl_pdata->back_buf + scan_count * actual_stride), + (ctrl_pdata->image_data.addr + scan_count * + (actual_stride + padding_length)), actual_stride); + scan_count++; + } + + mdss_spi_put_img(ctrl_pdata); + + /* wait for SPI transfer done */ + rc = mdss_spi_wait_tx_done(ctrl_pdata); + if (!rc) { + pr_err("SPI transfer timeout\n"); + return -EINVAL; + } + + /* swap buffer */ + temp_buf = ctrl_pdata->front_buf; + ctrl_pdata->front_buf = ctrl_pdata->back_buf; + ctrl_pdata->back_buf = temp_buf; + + return 0; +} + +int mdss_spi_display_atomic_validate(struct msm_fb_data_type *mfd, + struct file *file, struct mdp_layer_commit_v1 *commit) +{ + struct mdss_panel_data *pdata; + + pdata = dev_get_platdata(&mfd->pdev->dev); + + if ((commit->input_layers->dst_rect.w != pdata->panel_info.xres) && + (commit->input_layers->dst_rect.h != pdata->panel_info.yres) && + (commit->input_layer_cnt > 1)) { + WARN_ONCE(1, "%s:Only support GPU composition layer_cnt %d\n", + __func__, commit->input_layer_cnt); + return -EINVAL; + } + + if (commit->input_layers[0].buffer.format != MDP_RGB_565) { + WARN_ONCE(1, "%s:SPI display only support RGB565 format %d\n", + __func__, commit->input_layers[0].buffer.format); + return -EINVAL; + } + + return 0; +} + +int mdss_spi_panel_kickoff(struct msm_fb_data_type *mfd, + struct mdp_display_commit *data) +{ + struct spi_panel_data *ctrl_pdata = NULL; + struct mdss_panel_data *pdata; + int rc = 0; + + pdata = dev_get_platdata(&mfd->pdev->dev); + if (WARN_ON(!pdata)) + return -EINVAL; + + ctrl_pdata = container_of(pdata, struct spi_panel_data, panel_data); + + enable_spi_panel_te_irq(ctrl_pdata, true); + mutex_lock(&ctrl_pdata->spi_tx_mutex); + reinit_completion(&ctrl_pdata->spi_panel_te); + atomic_inc(&ctrl_pdata->koff_cnt); + + rc = wait_for_completion_timeout(&ctrl_pdata->spi_panel_te, + msecs_to_jiffies(SPI_PANEL_TE_TIMEOUT)); + if (rc == 0) { + pr_err("wait panel TE time out\n"); + mutex_unlock(&ctrl_pdata->spi_tx_mutex); + return rc; + } + + rc = mdss_spi_tx_pixel(ctrl_pdata->front_buf, + ctrl_pdata->byte_per_frame, + mdss_spi_tx_fb_complete, ctrl_pdata); + + mutex_unlock(&ctrl_pdata->spi_tx_mutex); + enable_spi_panel_te_irq(ctrl_pdata, false); + + return rc; +} + +static int spi_display_get_metadata(struct msm_fb_data_type *mfd, + struct msmfb_metadata *metadata) +{ + int ret = 0; + + switch (metadata->op) { + case metadata_op_frame_rate: + metadata->data.panel_frame_rate = + mfd->panel_info->spi.frame_rate; + break; + case metadata_op_get_caps: + metadata->data.caps.mdp_rev = 5; + metadata->data.caps.rgb_pipes = 0; + metadata->data.caps.vig_pipes = 0; + metadata->data.caps.dma_pipes = 1; + break; + + default: + pr_warn("Unsupported request to GET META IOCTL %d\n", + metadata->op); + ret = -EINVAL; + break; + } + return ret; +} + +static int spi_display_ioctl_handler(struct msm_fb_data_type *mfd, + u32 cmd, void __user *argp) +{ + int val, ret = 0; + struct mdss_panel_data *pdata; + struct msmfb_metadata metadata; + + pdata = dev_get_platdata(&mfd->pdev->dev); + + switch (cmd) { + case MSMFB_OVERLAY_VSYNC_CTRL: + if (!copy_from_user(&val, argp, sizeof(val))) { + mdss_spi_vsync_enable(pdata, val); + } else { + pr_err("overlay vsync ctrl copy from user failed\n"); + ret = -EFAULT; + } + break; + case MSMFB_METADATA_GET: + ret = copy_from_user(&metadata, argp, sizeof(metadata)); + if (ret) { + pr_err("get metadata from user failed (%d)\n", ret); + break; + } + ret = spi_display_get_metadata(mfd, &metadata); + if (ret) { + pr_err("spi_display_get_metadata failed (%d)\n", ret); + break; + } + ret = copy_to_user(argp, &metadata, sizeof(metadata)); + if (ret) + pr_err("copy to user failed (%d)\n", ret); + break; + default: + break; + } + + return ret; +} + +static int mdss_spi_display_off(struct msm_fb_data_type *mfd) +{ + int rc = 0; + struct mdss_panel_data *pdata; + struct spi_panel_data *ctrl_pdata = NULL; + + pdata = dev_get_platdata(&mfd->pdev->dev); + + ctrl_pdata = container_of(pdata, struct spi_panel_data, panel_data); + + ctrl_pdata->ctrl_state &= ~CTRL_STATE_PANEL_ACTIVE; + + if (ctrl_pdata->ctrl_state & CTRL_STATE_PANEL_INIT) { + rc = mdss_spi_panel_off(&ctrl_pdata->panel_data); + if (rc) { + pr_err("%s: Panel off failed\n", __func__); + return rc; + } + ctrl_pdata->ctrl_state &= ~CTRL_STATE_PANEL_INIT; + } + rc = mdss_spi_panel_power_ctrl(pdata, MDSS_PANEL_POWER_OFF); + + return rc; +} + +static int mdss_spi_display_on(struct msm_fb_data_type *mfd) +{ + int rc = 0; + struct mdss_panel_data *pdata; + struct spi_panel_data *ctrl_pdata = NULL; + + pdata = dev_get_platdata(&mfd->pdev->dev); + + ctrl_pdata = container_of(pdata, struct spi_panel_data, panel_data); + + rc = mdss_spi_panel_power_ctrl(pdata, MDSS_PANEL_POWER_ON); + if (rc) { + pr_err("%s:Panel power on failed. rc=%d\n", + __func__, rc); + return rc; + } + + mdss_spi_panel_pinctrl_set_state(ctrl_pdata, true); + mdss_spi_panel_reset(pdata, 1); + ctrl_pdata->ctrl_state |= CTRL_STATE_PANEL_ACTIVE; + rc = mdss_spi_panel_on(&ctrl_pdata->panel_data); + return rc; +} + +u32 mdss_spi_display_fb_stride(u32 fb_index, u32 xres, int bpp) +{ + /* + * The adreno GPU hardware requires that the pitch be aligned to + * 32 pixels for color buffers, so for the cases where the GPU + * is writing directly to fb0, the framebuffer pitch + * also needs to be 32 pixels aligned + */ + + if (fb_index == 0) + return ALIGN(xres, 32) * bpp; + else + return xres * bpp; +} + +ssize_t mdss_spi_show_capabilities(struct device *dev, + struct device_attribute *attr, char *buf) +{ + size_t len = PAGE_SIZE; + int cnt = 0; + + cnt += scnprintf(buf + cnt, len - cnt, "mdp_version=5\n"); + cnt += scnprintf(buf + cnt, len - cnt, "hw_rev=%d\n", 5); + cnt += scnprintf(buf + cnt, len - cnt, "pipe_count:%d\n", 1); + cnt += scnprintf(buf + cnt, len - cnt, + "pipe_num:3 pipe_type:rgb pipe_ndx:8 rects:1 pipe_is_handoff:0" + ); + cnt += scnprintf(buf + cnt, len - cnt, + "display_id:0 fmts_supported:51,224,0,22,0,191,248,255,1,"); + cnt += scnprintf(buf + cnt, len - cnt, + "0,0,0,0,0,,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0\n"); + cnt += scnprintf(buf + cnt, len - cnt, "rgb_pipes=%d\n", 0); + cnt += scnprintf(buf + cnt, len - cnt, "vig_pipes=%d\n", 0); + cnt += scnprintf(buf + cnt, len - cnt, "dma_pipes=%d\n", 1); + cnt += scnprintf(buf + cnt, len - cnt, "blending_stages=%d\n", 2); + cnt += scnprintf(buf + cnt, len - cnt, "cursor_pipes=%d\n", 0); + cnt += scnprintf(buf + cnt, len - cnt, "max_cursor_size=%d\n", 0); + cnt += scnprintf(buf + cnt, len - cnt, "smp_count=%d\n", 0); + cnt += scnprintf(buf + cnt, len - cnt, "smp_size=%d\n", 0); + cnt += scnprintf(buf + cnt, len - cnt, "smp_mb_per_pipe=%d\n", 0); + cnt += scnprintf(buf + cnt, len - cnt, "max_bandwidth_low=3100000\n"); + cnt += scnprintf(buf + cnt, len - cnt, "max_bandwidth_high=3100000\n"); + cnt += scnprintf(buf + cnt, len - cnt, "max_pipe_width=2048\n"); + cnt += scnprintf(buf + cnt, len - cnt, "max_mixer_width=2048\n"); + cnt += scnprintf(buf + cnt, len - cnt, "max_bandwidth_low=3100000\n"); + cnt += scnprintf(buf + cnt, len - cnt, "max_pipe_bw=2300000\n"); + cnt += scnprintf(buf + cnt, len - cnt, "max_mdp_clk=320000000\n"); + cnt += scnprintf(buf + cnt, len - cnt, "rot_dwnscale_min=1\n"); + cnt += scnprintf(buf + cnt, len - cnt, "rot_dwnscale_max=1\n"); + cnt += scnprintf(buf + cnt, len - cnt, "max_downscale_ratio=1\n"); + cnt += scnprintf(buf + cnt, len - cnt, "max_upscale_ratio=1\n"); + + return cnt; +} + +static ssize_t mdss_spi_vsync_show_event(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct fb_info *fbi = dev_get_drvdata(dev); + struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)fbi->par; + struct mdss_panel_data *pdata; + struct spi_panel_data *ctrl_pdata = NULL; + int rc = 0; + u64 vsync_ticks; + + pdata = dev_get_platdata(&mfd->pdev->dev); + ctrl_pdata = container_of(pdata, struct spi_panel_data, panel_data); + + if (!(ctrl_pdata->ctrl_state & CTRL_STATE_PANEL_ACTIVE)) + return -EAGAIN; + + vsync_ticks = ktime_to_ns(ctrl_pdata->vsync_time); + pr_debug("fb%d vsync=%llu\n", mfd->index, vsync_ticks); + rc = scnprintf(buf, PAGE_SIZE, "VSYNC=%llu\n", vsync_ticks); + + return rc; +} + +static DEVICE_ATTR(vsync_event, 0444, mdss_spi_vsync_show_event, NULL); +static DEVICE_ATTR(caps, 0444, mdss_spi_show_capabilities, NULL); + +static struct attribute *mdp_spi_sysfs_attrs[] = { + &dev_attr_caps.attr, + NULL, +}; + +static struct attribute *spi_vsync_fs_attr_group[] = { + &dev_attr_vsync_event.attr, + NULL, +}; + +static struct attribute_group mdp_spi_sysfs_group = { + .attrs = mdp_spi_sysfs_attrs, +}; + +static struct attribute_group spi_vsync_sysfs_group = { + .attrs = spi_vsync_fs_attr_group, +}; + +int mdss_spi_overlay_init(struct msm_fb_data_type *mfd) +{ + struct msm_mdp_interface *spi_display_interface = &mfd->mdp; + struct device *dev = mfd->fbi->dev; + struct mdss_data_type *spi_mdata; + struct mdss_panel_data *pdata; + struct spi_panel_data *ctrl_pdata = NULL; + int rc = 0; + + pdata = dev_get_platdata(&mfd->pdev->dev); + ctrl_pdata = container_of(pdata, struct spi_panel_data, panel_data); + + spi_mdata = dev_get_drvdata(mfd->pdev->dev.parent); + if (!spi_mdata) { + pr_err("unable to initialize spi mdata for fb%d\n", mfd->index); + return -ENODEV; + } + + spi_display_interface->on_fnc = mdss_spi_display_on; + spi_display_interface->off_fnc = mdss_spi_display_off; + spi_display_interface->do_histogram = NULL; + spi_display_interface->cursor_update = NULL; + + spi_display_interface->ioctl_handler = spi_display_ioctl_handler; + spi_display_interface->kickoff_fnc = mdss_spi_panel_kickoff; + spi_display_interface->pre_commit = mdss_spi_display_pre_commit; + spi_display_interface->atomic_validate = + mdss_spi_display_atomic_validate; + spi_display_interface->fb_mem_get_iommu_domain = NULL; + spi_display_interface->fb_stride = mdss_spi_display_fb_stride; + spi_display_interface->fb_mem_alloc_fnc = NULL; + spi_display_interface->check_dsi_status = NULL; + + rc = sysfs_create_group(&dev->kobj, &spi_vsync_sysfs_group); + if (rc) + pr_err("spi vsync sysfs group creation failed, ret=%d\n", rc); + + rc = sysfs_create_link_nowarn(&dev->kobj, + &spi_mdata->pdev->dev.kobj, "mdp"); + + ctrl_pdata->vsync_event_sd = sysfs_get_dirent(dev->kobj.sd, + "vsync_event"); + if (!ctrl_pdata->vsync_event_sd) + pr_err("spi vsync_event sysfs lookup failed\n"); + + return rc; +} + +static int mdss_spi_display_probe(struct platform_device *pdev) +{ + int rc = 0; + struct mdss_data_type *mdata; + static struct msm_mdp_interface spi_display_interface = { + .init_fnc = mdss_spi_overlay_init, + .fb_stride = mdss_spi_display_fb_stride, + }; + struct device *dev = &pdev->dev; + + if (!pdev->dev.of_node) { + pr_err("spi display driver only supports device tree probe\n"); + return -ENOTSUPP; + } + + mdata = devm_kzalloc(&pdev->dev, sizeof(*mdata), GFP_KERNEL); + if (mdata == NULL) + return -ENOMEM; + + pdev->id = 0; + mdata->pdev = pdev; + platform_set_drvdata(pdev, mdata); + + rc = mdss_fb_register_mdp_instance(&spi_display_interface); + if (rc) { + pr_err("unable to register SPI display instance\n"); + return rc; + } + + rc = sysfs_create_group(&dev->kobj, &mdp_spi_sysfs_group); + if (rc) { + pr_err("spi vsync sysfs group creation failed, ret=%d\n", rc); + return rc; + } + + return 0; +} + +static const struct of_device_id mdss_spi_display_match[] = { + { .compatible = "qcom,mdss-spi-display" }, + {}, +}; + +static struct platform_driver this_driver = { + .probe = mdss_spi_display_probe, + .driver = { + .name = "spi_display", + .owner = THIS_MODULE, + .of_match_table = mdss_spi_display_match, + }, +}; + +static int __init mdss_spi_display_init(void) +{ + int ret; + + ret = platform_driver_register(&this_driver); + return ret; +} + +module_init(mdss_spi_display_init); +MODULE_LICENSE("GPL v2"); +MODULE_DEVICE_TABLE(of, mdss_spi_display_match); diff --git a/drivers/video/fbdev/msm/mdss_spi_panel.c b/drivers/video/fbdev/msm/mdss_spi_panel.c new file mode 100644 index 0000000000000000000000000000000000000000..c1911399fb1e96f8e5654ea8ed707e9e2269fa5b --- /dev/null +++ b/drivers/video/fbdev/msm/mdss_spi_panel.c @@ -0,0 +1,1350 @@ +/* Copyright (c) 2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "mdss_panel.h" +#include "mdss_spi_panel.h" +#include "mdss_spi_client.h" + +DEFINE_LED_TRIGGER(bl_led_trigger); +int mdss_spi_panel_reset(struct mdss_panel_data *pdata, int enable) +{ + struct spi_panel_data *ctrl_pdata = NULL; + struct mdss_panel_info *pinfo = NULL; + int i, rc = 0; + + if (WARN_ON(!pdata)) + return -EINVAL; + + ctrl_pdata = container_of(pdata, struct spi_panel_data, + panel_data); + + if (!gpio_is_valid(ctrl_pdata->rst_gpio)) { + pr_debug("%s:%d, reset line not configured\n", + __func__, __LINE__); + return -EINVAL; + } + + if (!gpio_is_valid(ctrl_pdata->disp_dc_gpio)) { + pr_debug("%s:%d, dc line not configured\n", + __func__, __LINE__); + return -EINVAL; + } + + pinfo = &(ctrl_pdata->panel_data.panel_info); + + if (enable) { + rc = gpio_request(ctrl_pdata->rst_gpio, "disp_rst_n"); + if (rc) { + pr_err("display reset gpio request failed\n"); + return rc; + } + + rc = gpio_request(ctrl_pdata->disp_dc_gpio, "disp_dc"); + if (rc) { + pr_err("display dc gpio request failed\n"); + if (gpio_is_valid(ctrl_pdata->rst_gpio)) + gpio_free(ctrl_pdata->rst_gpio); + return rc; + } + + if (!pinfo->cont_splash_enabled) { + for (i = 0; i < pdata->panel_info.rst_seq_len; ++i) { + gpio_direction_output((ctrl_pdata->rst_gpio), + pdata->panel_info.rst_seq[i]); + if (pdata->panel_info.rst_seq[++i]) + usleep_range(pinfo->rst_seq[i] * 1000, + pinfo->rst_seq[i] * 1000); + } + } + + if (ctrl_pdata->ctrl_state & CTRL_STATE_PANEL_INIT) { + pr_debug("%s: Panel Not properly turned OFF\n", + __func__); + ctrl_pdata->ctrl_state &= ~CTRL_STATE_PANEL_INIT; + } + } else { + gpio_direction_output((ctrl_pdata->rst_gpio), 0); + gpio_free(ctrl_pdata->rst_gpio); + + gpio_direction_output(ctrl_pdata->disp_dc_gpio, 0); + gpio_free(ctrl_pdata->disp_dc_gpio); + } + return 0; +} + +int mdss_spi_panel_pinctrl_set_state(struct spi_panel_data *ctrl_pdata, + bool active) +{ + struct pinctrl_state *pin_state; + int rc = -EINVAL; + + if (IS_ERR_OR_NULL(ctrl_pdata->pin_res.pinctrl)) + return -EINVAL; + + pin_state = active ? ctrl_pdata->pin_res.gpio_state_active + : ctrl_pdata->pin_res.gpio_state_suspend; + if (!IS_ERR_OR_NULL(pin_state)) { + rc = pinctrl_select_state(ctrl_pdata->pin_res.pinctrl, + pin_state); + if (rc) + pr_err("%s: can not set %s pins\n", __func__, + active ? MDSS_PINCTRL_STATE_DEFAULT + : MDSS_PINCTRL_STATE_SLEEP); + } else { + pr_err("%s: invalid '%s' pinstate\n", __func__, + active ? MDSS_PINCTRL_STATE_DEFAULT + : MDSS_PINCTRL_STATE_SLEEP); + } + return rc; +} + +static int mdss_spi_panel_pinctrl_init(struct platform_device *pdev) +{ + struct spi_panel_data *ctrl_pdata; + + ctrl_pdata = platform_get_drvdata(pdev); + ctrl_pdata->pin_res.pinctrl = devm_pinctrl_get(&pdev->dev); + if (IS_ERR_OR_NULL(ctrl_pdata->pin_res.pinctrl)) { + pr_err("%s: failed to get pinctrl\n", __func__); + return -EINVAL; + } + + ctrl_pdata->pin_res.gpio_state_active + = pinctrl_lookup_state(ctrl_pdata->pin_res.pinctrl, + MDSS_PINCTRL_STATE_DEFAULT); + if (IS_ERR_OR_NULL(ctrl_pdata->pin_res.gpio_state_active)) + pr_warn("%s: can not get default pinstate\n", __func__); + + ctrl_pdata->pin_res.gpio_state_suspend + = pinctrl_lookup_state(ctrl_pdata->pin_res.pinctrl, + MDSS_PINCTRL_STATE_SLEEP); + if (IS_ERR_OR_NULL(ctrl_pdata->pin_res.gpio_state_suspend)) + pr_warn("%s: can not get sleep pinstate\n", __func__); + + return 0; +} + +static int mdss_spi_panel_power_on(struct mdss_panel_data *pdata) +{ + int ret = 0; + struct spi_panel_data *ctrl_pdata = NULL; + + if (WARN_ON(!pdata)) + return -EINVAL; + + ctrl_pdata = container_of(pdata, struct spi_panel_data, + panel_data); + ret = msm_dss_enable_vreg( + ctrl_pdata->panel_power_data.vreg_config, + ctrl_pdata->panel_power_data.num_vreg, 1); + if (ret) { + pr_err("%s: failed to enable vregs for PANEL_PM\n", + __func__); + return ret; + } + + /* + * If continuous splash screen feature is enabled, then we need to + * request all the GPIOs that have already been configured in the + * bootloader. This needs to be done irresepective of whether + * the lp11_init flag is set or not. + */ + if (pdata->panel_info.cont_splash_enabled) { + if (mdss_spi_panel_pinctrl_set_state(ctrl_pdata, true)) + pr_debug("reset enable: pinctrl not enabled\n"); + + ret = mdss_spi_panel_reset(pdata, 1); + if (ret) + pr_err("%s: Panel reset failed. rc=%d\n", + __func__, ret); + } + + return ret; +} + +static int mdss_spi_panel_power_off(struct mdss_panel_data *pdata) +{ + int ret = 0; + struct spi_panel_data *ctrl_pdata = NULL; + + if (WARN_ON(!pdata)) + return -EINVAL; + + ctrl_pdata = container_of(pdata, struct spi_panel_data, + panel_data); + + ret = mdss_spi_panel_reset(pdata, 0); + if (ret) + pr_warn("%s: Panel reset failed. rc=%d\n", __func__, ret); + + if (mdss_spi_panel_pinctrl_set_state(ctrl_pdata, false)) + pr_warn("reset disable: pinctrl not enabled\n"); + + ret = msm_dss_enable_vreg( + ctrl_pdata->panel_power_data.vreg_config, + ctrl_pdata->panel_power_data.num_vreg, 0); + if (ret) + pr_err("%s: failed to disable vregs for PANEL_PM\n", + __func__); + + return ret; +} + +int mdss_spi_panel_power_ctrl(struct mdss_panel_data *pdata, int power_state) +{ + int ret; + struct mdss_panel_info *pinfo; + + if (WARN_ON(!pdata)) + return -EINVAL; + + pinfo = &pdata->panel_info; + pr_debug("%s: cur_power_state=%d req_power_state=%d\n", __func__, + pinfo->panel_power_state, power_state); + + if (pinfo->panel_power_state == power_state) { + pr_debug("%s: no change needed\n", __func__); + return 0; + } + + switch (power_state) { + case MDSS_PANEL_POWER_OFF: + ret = mdss_spi_panel_power_off(pdata); + break; + case MDSS_PANEL_POWER_ON: + ret = mdss_spi_panel_power_on(pdata); + break; + default: + pr_err("%s: unknown panel power state requested (%d)\n", + __func__, power_state); + ret = -EINVAL; + } + + if (!ret) + pinfo->panel_power_state = power_state; + + return ret; +} + +void enable_spi_panel_te_irq(struct spi_panel_data *ctrl_pdata, + bool enable) +{ + static int te_irq_count; + + if (!gpio_is_valid(ctrl_pdata->disp_te_gpio)) { + pr_err("%s:%d,SPI panel TE GPIO not configured\n", + __func__, __LINE__); + return; + } + + if (enable) { + if (++te_irq_count == 1) + enable_irq(gpio_to_irq(ctrl_pdata->disp_te_gpio)); + } else { + if (--te_irq_count == 0) + disable_irq(gpio_to_irq(ctrl_pdata->disp_te_gpio)); + } +} + +void mdss_spi_tx_fb_complete(void *ctx) +{ + struct spi_panel_data *ctrl_pdata = ctx; + + if (atomic_add_unless(&ctrl_pdata->koff_cnt, -1, 0)) { + if (atomic_read(&ctrl_pdata->koff_cnt)) { + pr_err("%s: too many kickoffs=%d\n", __func__, + atomic_read(&ctrl_pdata->koff_cnt)); + } + wake_up_all(&ctrl_pdata->tx_done_waitq); + } +} + +static int mdss_spi_read_panel_data(struct mdss_panel_data *pdata, + u8 reg_addr, u8 *data, u8 len) +{ + int rc = 0; + struct spi_panel_data *ctrl_pdata = NULL; + + ctrl_pdata = container_of(pdata, struct spi_panel_data, + panel_data); + + mutex_lock(&ctrl_pdata->spi_tx_mutex); + gpio_direction_output(ctrl_pdata->disp_dc_gpio, 0); + rc = mdss_spi_read_data(reg_addr, data, len); + gpio_direction_output(ctrl_pdata->disp_dc_gpio, 1); + mutex_unlock(&ctrl_pdata->spi_tx_mutex); + + return rc; +} + +int mdss_spi_panel_on(struct mdss_panel_data *pdata) +{ + struct spi_panel_data *ctrl = NULL; + struct mdss_panel_info *pinfo; + int i; + + if (WARN_ON(!pdata)) + return -EINVAL; + + pinfo = &pdata->panel_info; + ctrl = container_of(pdata, struct spi_panel_data, + panel_data); + + for (i = 0; i < ctrl->on_cmds.cmd_cnt; i++) { + /* pull down dc gpio indicate this is command */ + gpio_direction_output(ctrl->disp_dc_gpio, 0); + mdss_spi_tx_command(ctrl->on_cmds.cmds[i].command); + gpio_direction_output((ctrl->disp_dc_gpio), 1); + + if (ctrl->on_cmds.cmds[i].dchdr.dlen > 1) { + mdss_spi_tx_parameter(ctrl->on_cmds.cmds[i].parameter, + ctrl->on_cmds.cmds[i].dchdr.dlen - 1); + } + if (ctrl->on_cmds.cmds[i].dchdr.wait != 0) + msleep(ctrl->on_cmds.cmds[i].dchdr.wait); + } + + pr_debug("%s:-\n", __func__); + return 0; +} + +int mdss_spi_panel_off(struct mdss_panel_data *pdata) +{ + struct spi_panel_data *ctrl = NULL; + struct mdss_panel_info *pinfo; + int i; + + if (WARN_ON(!pdata)) + return -EINVAL; + + pinfo = &pdata->panel_info; + ctrl = container_of(pdata, struct spi_panel_data, + panel_data); + + for (i = 0; i < ctrl->off_cmds.cmd_cnt; i++) { + /* pull down dc gpio indicate this is command */ + gpio_direction_output(ctrl->disp_dc_gpio, 0); + mdss_spi_tx_command(ctrl->off_cmds.cmds[i].command); + gpio_direction_output((ctrl->disp_dc_gpio), 1); + + if (ctrl->off_cmds.cmds[i].dchdr.dlen > 1) { + mdss_spi_tx_parameter(ctrl->off_cmds.cmds[i].parameter, + ctrl->off_cmds.cmds[i].dchdr.dlen-1); + } + + if (ctrl->off_cmds.cmds[i].dchdr.wait != 0) + msleep(ctrl->off_cmds.cmds[i].dchdr.wait); + } + + pr_debug("%s:-\n", __func__); + return 0; +} + +static void mdss_spi_put_dt_vreg_data(struct device *dev, + struct dss_module_power *module_power) +{ + if (module_power->vreg_config) { + devm_kfree(dev, module_power->vreg_config); + module_power->vreg_config = NULL; + } + module_power->num_vreg = 0; +} + +static int mdss_spi_get_panel_vreg_data(struct device *dev, + struct dss_module_power *mp) +{ + int i = 0, rc = 0; + u32 tmp = 0; + struct device_node *of_node = NULL, *supply_node = NULL; + struct device_node *supply_root_node = NULL; + + of_node = dev->of_node; + + mp->num_vreg = 0; + + supply_root_node = of_get_child_by_name(of_node, + "qcom,panel-supply-entries"); + + for_each_available_child_of_node(supply_root_node, supply_node) + mp->num_vreg++; + + if (mp->num_vreg == 0) { + pr_debug("%s: no vreg\n", __func__); + goto novreg; + } else { + pr_debug("%s: vreg found. count=%d\n", __func__, mp->num_vreg); + } + + mp->vreg_config = devm_kcalloc(dev, mp->num_vreg, + sizeof(*(mp->vreg_config)), GFP_KERNEL); + + if (mp->vreg_config != NULL) { + for_each_available_child_of_node(supply_root_node, + supply_node) { + const char *st = NULL; + /* vreg-name */ + rc = of_property_read_string(supply_node, + "qcom,supply-name", &st); + if (rc) { + pr_err("%s: error reading name. rc=%d\n", + __func__, rc); + goto error; + } + strlcpy(mp->vreg_config[i].vreg_name, st, + sizeof(mp->vreg_config[i].vreg_name)); + + /* vreg-min-voltage */ + rc = of_property_read_u32(supply_node, + "qcom,supply-min-voltage", &tmp); + if (rc) { + pr_err("%s: error reading min volt. rc=%d\n", + __func__, rc); + goto error; + } + mp->vreg_config[i].min_voltage = tmp; + + /* vreg-max-voltage */ + rc = of_property_read_u32(supply_node, + "qcom,supply-max-voltage", &tmp); + if (rc) { + pr_err("%s: error reading max volt. rc=%d\n", + __func__, rc); + goto error; + } + mp->vreg_config[i].max_voltage = tmp; + + /* enable-load */ + rc = of_property_read_u32(supply_node, + "qcom,supply-enable-load", &tmp); + if (rc) { + pr_err("%s: error read enable load. rc=%d\n", + __func__, rc); + goto error; + } + mp->vreg_config[i].load[DSS_REG_MODE_ENABLE] = tmp; + + /* disable-load */ + rc = of_property_read_u32(supply_node, + "qcom,supply-disable-load", &tmp); + if (rc) { + pr_err("%s: error read disable load. rc=%d\n", + __func__, rc); + goto error; + } + mp->vreg_config[i].load[DSS_REG_MODE_DISABLE] = tmp; + + /* pre-sleep */ + rc = of_property_read_u32(supply_node, + "qcom,supply-pre-on-sleep", &tmp); + if (rc) { + pr_debug("%s: error read pre on value\n", + __func__); + rc = 0; + } else { + mp->vreg_config[i].pre_on_sleep = tmp; + } + + rc = of_property_read_u32(supply_node, + "qcom,supply-pre-off-sleep", &tmp); + if (rc) { + pr_debug("%s: error read pre off value\n", + __func__); + rc = 0; + } else { + mp->vreg_config[i].pre_off_sleep = tmp; + } + + /* post-sleep */ + rc = of_property_read_u32(supply_node, + "qcom,supply-post-on-sleep", &tmp); + if (rc) { + pr_debug("%s: error read post on value\n", + __func__); + rc = 0; + } else { + mp->vreg_config[i].post_on_sleep = tmp; + } + + rc = of_property_read_u32(supply_node, + "qcom,supply-post-off-sleep", &tmp); + if (rc) { + pr_debug("%s: error read post off value\n", + __func__); + rc = 0; + } else { + mp->vreg_config[i].post_off_sleep = tmp; + } + + ++i; + } + } + return rc; +error: + kfree(mp->vreg_config); + mp->vreg_config = NULL; + +novreg: + mp->num_vreg = 0; + + return rc; + +} + +static int mdss_spi_panel_parse_cmds(struct device_node *np, + struct spi_panel_cmds *pcmds, char *cmd_key) +{ + const char *data; + int blen = 0, len; + char *buf, *bp; + struct spi_ctrl_hdr *dchdr; + int i, cnt; + struct platform_device *mdss_pdev; + + data = of_get_property(np, cmd_key, &blen); + if (!data) { + pr_err("%s: failed, key=%s\n", __func__, cmd_key); + return -ENOENT; + } + + mdss_pdev = of_find_device_by_node(np->parent); + if (!mdss_pdev) { + pr_err("Unable to find mdss for node: %s\n", np->full_name); + return -ENOENT; + } + + buf = devm_kcalloc(&mdss_pdev->dev, blen, sizeof(*buf), GFP_KERNEL); + if (!buf) + return -ENOMEM; + + buf = kmemdup(data, blen, GFP_KERNEL); + + /* scan dcs commands */ + bp = buf; + len = blen; + cnt = 0; + while (len >= sizeof(*dchdr)) { + dchdr = (struct spi_ctrl_hdr *)bp; + if (dchdr->dlen > len) { + pr_err("%s: dtsi parse error, len=%d\n", + __func__, dchdr->dlen); + goto exit_free; + } + bp += sizeof(*dchdr); + len -= sizeof(*dchdr); + bp += dchdr->dlen; + len -= dchdr->dlen; + cnt++; + } + + if (len != 0) { + pr_err("%s: dcs_cmd=%x len=%d error\n", + __func__, buf[0], len); + goto exit_free; + } + + pcmds->cmds = devm_kcalloc(&mdss_pdev->dev, cnt, sizeof(*(pcmds->cmds)), + GFP_KERNEL); + if (!pcmds->cmds) + goto exit_free; + + pcmds->cmd_cnt = cnt; + pcmds->buf = buf; + pcmds->blen = blen; + + bp = buf; + len = blen; + for (i = 0; i < cnt; i++) { + dchdr = (struct spi_ctrl_hdr *)bp; + len -= sizeof(*dchdr); + bp += sizeof(*dchdr); + pcmds->cmds[i].dchdr = *dchdr; + pcmds->cmds[i].command = bp; + pcmds->cmds[i].parameter = bp + sizeof(char); + bp += dchdr->dlen; + len -= dchdr->dlen; + } + + pr_debug("%s: dcs_cmd=%x, len=%d, cmd_cnt=%d\n", __func__, + pcmds->buf[0], pcmds->blen, pcmds->cmd_cnt); + return 0; + +exit_free: + kfree(buf); + return -ENOMEM; +} +static int mdss_spi_panel_parse_reset_seq(struct device_node *np, + u32 rst_seq[MDSS_SPI_RST_SEQ_LEN], u32 *rst_len, + const char *name) +{ + int num = 0, i; + int rc; + struct property *data; + u32 tmp[MDSS_SPI_RST_SEQ_LEN]; + + *rst_len = 0; + data = of_find_property(np, name, &num); + num /= sizeof(u32); + if (!data || !num || num > MDSS_SPI_RST_SEQ_LEN || num % 2) { + pr_err("%s:%d, error reading %s, length found = %d\n", + __func__, __LINE__, name, num); + return -EINVAL; + } + rc = of_property_read_u32_array(np, name, tmp, num); + if (rc) { + pr_err("%s:%d, error reading %s, rc = %d\n", + __func__, __LINE__, name, rc); + return rc; + } + + for (i = 0; i < num; ++i) + rst_seq[i] = tmp[i]; + *rst_len = num; + + return 0; +} + +static bool mdss_send_panel_cmd_for_esd(struct spi_panel_data *ctrl_pdata) +{ + if (WARN_ON(!ctrl_pdata)) + return false; + + mutex_lock(&ctrl_pdata->spi_tx_mutex); + mdss_spi_panel_on(&ctrl_pdata->panel_data); + mutex_unlock(&ctrl_pdata->spi_tx_mutex); + + return true; +} + +static bool mdss_spi_reg_status_check(struct spi_panel_data *ctrl_pdata) +{ + int ret = 0; + int i = 0; + + if (WARN_ON(!ctrl_pdata)) + return false; + + pr_debug("%s: Checking Register status\n", __func__); + + ret = mdss_spi_read_panel_data(&ctrl_pdata->panel_data, + ctrl_pdata->panel_status_reg, + ctrl_pdata->act_status_value, + ctrl_pdata->status_cmds_rlen); + if (ret < 0) { + pr_err("%s: Read status register returned error\n", __func__); + return false; + } + + for (i = 0; i < ctrl_pdata->status_cmds_rlen; i++) { + pr_debug("act_value[%d] = %x, exp_value[%d] = %x\n", + i, ctrl_pdata->act_status_value[i], + i, ctrl_pdata->exp_status_value[i]); + if (ctrl_pdata->act_status_value[i] != + ctrl_pdata->exp_status_value[i]) + return false; + } + + return true; +} + +static void mdss_spi_parse_esd_params(struct device_node *np, + struct spi_panel_data *ctrl) +{ + u32 tmp; + int rc; + struct property *data; + const char *string; + struct mdss_panel_info *pinfo = &ctrl->panel_data.panel_info; + struct platform_device *mdss_pdev; + + mdss_pdev = of_find_device_by_node(np->parent); + if (!mdss_pdev) { + pr_err("Unable to find mdss for node: %s\n", np->full_name); + return; + } + + pinfo->esd_check_enabled = of_property_read_bool(np, + "qcom,esd-check-enabled"); + + if (!pinfo->esd_check_enabled) + return; + + ctrl->status_mode = SPI_ESD_MAX; + + rc = of_property_read_string(np, + "qcom,mdss-spi-panel-status-check-mode", &string); + if (!rc) { + if (!strcmp(string, "reg_read")) { + ctrl->status_mode = SPI_ESD_REG; + ctrl->check_status = + mdss_spi_reg_status_check; + } else if (!strcmp(string, "send_init_command")) { + ctrl->status_mode = SPI_SEND_PANEL_COMMAND; + ctrl->check_status = + mdss_send_panel_cmd_for_esd; + return; + } else { + pr_err("No valid panel-status-check-mode string\n"); + pinfo->esd_check_enabled = false; + return; + } + } + + rc = of_property_read_u8(np, "qcom,mdss-spi-panel-status-reg", + &ctrl->panel_status_reg); + if (rc) { + pr_warn("%s:%d, Read status reg failed, disable ESD check\n", + __func__, __LINE__); + pinfo->esd_check_enabled = false; + return; + } + + rc = of_property_read_u32(np, "qcom,mdss-spi-panel-status-read-length", + &tmp); + if (rc) { + pr_warn("%s:%d, Read reg length failed, disable ESD check\n", + __func__, __LINE__); + pinfo->esd_check_enabled = false; + return; + } + + ctrl->status_cmds_rlen = (!rc ? tmp : 1); + + ctrl->exp_status_value = devm_kzalloc(&mdss_pdev->dev, sizeof(u8) * + (ctrl->status_cmds_rlen + 1), GFP_KERNEL); + ctrl->act_status_value = devm_kzalloc(&mdss_pdev->dev, sizeof(u8) * + (ctrl->status_cmds_rlen + 1), GFP_KERNEL); + + if (!ctrl->exp_status_value || !ctrl->act_status_value) { + pinfo->esd_check_enabled = false; + return; + } + + data = of_find_property(np, "qcom,mdss-spi-panel-status-value", &tmp); + tmp /= sizeof(u8); + if (!data || (tmp != ctrl->status_cmds_rlen)) { + pr_err("%s: Panel status values not found\n", __func__); + pinfo->esd_check_enabled = false; + memset(ctrl->exp_status_value, 0, ctrl->status_cmds_rlen); + } else { + rc = of_property_read_u8_array(np, + "qcom,mdss-spi-panel-status-value", + ctrl->exp_status_value, tmp); + if (rc) { + pr_err("%s: Error reading panel status values\n", + __func__); + pinfo->esd_check_enabled = false; + kfree(ctrl->exp_status_value); + kfree(ctrl->act_status_value); + } + } +} + +static int mdss_spi_panel_parse_dt(struct device_node *np, + struct spi_panel_data *ctrl_pdata) +{ + u32 tmp; + int rc; + const char *data; + struct mdss_panel_info *pinfo = &(ctrl_pdata->panel_data.panel_info); + + pinfo->cont_splash_enabled = of_property_read_bool(np, + "qcom,cont-splash-enabled"); + + rc = of_property_read_u32(np, "qcom,mdss-spi-panel-width", &tmp); + if (rc) { + pr_err("%s: panel width not specified\n", __func__); + return -EINVAL; + } + pinfo->xres = tmp; + + rc = of_property_read_u32(np, "qcom,mdss-spi-panel-height", &tmp); + if (rc) { + pr_err("%s:panel height not specified\n", __func__); + return -EINVAL; + } + pinfo->yres = tmp; + + rc = of_property_read_u32(np, + "qcom,mdss-pan-physical-width-dimension", &tmp); + pinfo->physical_width = (!rc ? tmp : 0); + rc = of_property_read_u32(np, + "qcom,mdss-pan-physical-height-dimension", &tmp); + pinfo->physical_height = (!rc ? tmp : 0); + rc = of_property_read_u32(np, "qcom,mdss-spi-panel-framerate", &tmp); + pinfo->spi.frame_rate = (!rc ? tmp : 30); + + /* + * Due to SPI clock limit, frame rate of SPI display can olny reach to + * ~30fps with resolution is 240*240 and format is rgb565. + * VSYNC frequency should be match frame rate for avoid flicker issue. + * but some panels can't support that TE frequency lower than 30Hz, so + * we need to set double the TE frequency in this case. + */ + rc = of_property_read_u32(np, "qcom,mdss-spi-panel-te-per-vsync", &tmp); + ctrl_pdata->vsync_per_te = (!rc ? tmp : 2); + + rc = of_property_read_u32(np, "qcom,mdss-spi-bpp", &tmp); + if (rc) { + pr_err("%s: bpp not specified\n", __func__); + return -EINVAL; + } + pinfo->bpp = tmp; + + pinfo->pdest = DISPLAY_1; + + ctrl_pdata->bklt_ctrl = SPI_UNKNOWN_CTRL; + data = of_get_property(np, "qcom,mdss-spi-bl-pmic-control-type", NULL); + if (data) { + if (!strcmp(data, "bl_ctrl_wled")) { + led_trigger_register_simple("bkl-trigger", + &bl_led_trigger); + pr_debug("%s: SUCCESS-> WLED TRIGGER register\n", + __func__); + ctrl_pdata->bklt_ctrl = SPI_BL_WLED; + } else if (!strcmp(data, "bl_ctrl_pwm")) { + ctrl_pdata->bklt_ctrl = SPI_BL_PWM; + ctrl_pdata->pwm_pmi = of_property_read_bool(np, + "qcom,mdss-spi-bl-pwm-pmi"); + rc = of_property_read_u32(np, + "qcom,mdss-spi-bl-pmic-pwm-frequency", &tmp); + if (rc) { + pr_err("%s: Error, panel pwm_period\n", + __func__); + return -EINVAL; + } + ctrl_pdata->pwm_period = tmp; + if (ctrl_pdata->pwm_pmi) { + ctrl_pdata->pwm_bl = of_pwm_get(np, NULL); + if (IS_ERR(ctrl_pdata->pwm_bl)) { + pr_err("%s: Error, pwm device\n", + __func__); + ctrl_pdata->pwm_bl = NULL; + return -EINVAL; + } + } else { + rc = of_property_read_u32(np, + "qcom,mdss-spi-bl-pmic-bank-select", + &tmp); + if (rc) { + pr_err("%s: Error, lpg channel\n", + __func__); + return -EINVAL; + } + ctrl_pdata->pwm_lpg_chan = tmp; + tmp = of_get_named_gpio(np, + "qcom,mdss-spi-pwm-gpio", 0); + ctrl_pdata->pwm_pmic_gpio = tmp; + pr_debug("%s: Configured PWM bklt ctrl\n", + __func__); + } + } + } + rc = of_property_read_u32(np, "qcom,mdss-brightness-max-level", &tmp); + pinfo->brightness_max = (!rc ? tmp : MDSS_MAX_BL_BRIGHTNESS); + rc = of_property_read_u32(np, "qcom,mdss-spi-bl-min-level", &tmp); + pinfo->bl_min = (!rc ? tmp : 0); + rc = of_property_read_u32(np, "qcom,mdss-spi-bl-max-level", &tmp); + pinfo->bl_max = (!rc ? tmp : 255); + ctrl_pdata->bklt_max = pinfo->bl_max; + + + mdss_spi_panel_parse_reset_seq(np, pinfo->rst_seq, + &(pinfo->rst_seq_len), + "qcom,mdss-spi-reset-sequence"); + + mdss_spi_panel_parse_cmds(np, &ctrl_pdata->on_cmds, + "qcom,mdss-spi-on-command"); + + mdss_spi_panel_parse_cmds(np, &ctrl_pdata->off_cmds, + "qcom,mdss-spi-off-command"); + + mdss_spi_parse_esd_params(np, ctrl_pdata); + + return 0; +} + +static void mdss_spi_panel_bklt_pwm(struct spi_panel_data *ctrl, int level) +{ + int ret; + u32 duty; + u32 period_ns; + + if (WARN_ON(!ctrl->pwm_bl)) + return; + + if (level == 0) { + if (ctrl->pwm_enabled) { + ret = pwm_config(ctrl->pwm_bl, level, + ctrl->pwm_period * NSEC_PER_USEC); + if (ret) + pr_err("%s: pwm_config() failed err=%d.\n", + __func__, ret); + pwm_disable(ctrl->pwm_bl); + } + ctrl->pwm_enabled = 0; + return; + } + + duty = level * ctrl->pwm_period; + duty /= ctrl->bklt_max; + + pr_debug("%s: bklt_ctrl=%d pwm_period=%d pwm_gpio=%d pwm_lpg_chan=%d\n", + __func__, ctrl->bklt_ctrl, ctrl->pwm_period, + ctrl->pwm_pmic_gpio, ctrl->pwm_lpg_chan); + + period_ns = ctrl->pwm_period * NSEC_PER_USEC; + ret = pwm_config(ctrl->pwm_bl, + level * period_ns / ctrl->bklt_max, period_ns); + if (ret) { + pr_err("%s: pwm_config() failed err=%d\n", __func__, ret); + return; + } + + if (!ctrl->pwm_enabled) { + ret = pwm_enable(ctrl->pwm_bl); + if (ret) + pr_err("%s: pwm_enable() failed err=%d\n", __func__, + ret); + ctrl->pwm_enabled = 1; + } +} + +void mdss_spi_panel_bl_ctrl_update(struct mdss_panel_data *pdata, + u32 bl_level) +{ + struct spi_panel_data *ctrl_pdata = NULL; + + if (WARN_ON(!pdata)) + return; + + ctrl_pdata = container_of(pdata, struct spi_panel_data, + panel_data); + + if ((bl_level < pdata->panel_info.bl_min) && (bl_level != 0)) + bl_level = pdata->panel_info.bl_min; + + switch (ctrl_pdata->bklt_ctrl) { + case SPI_BL_WLED: + led_trigger_event(bl_led_trigger, bl_level); + break; + case SPI_BL_PWM: + mdss_spi_panel_bklt_pwm(ctrl_pdata, bl_level); + break; + default: + pr_err("%s: Unknown bl_ctrl configuration %d\n", + __func__, ctrl_pdata->bklt_ctrl); + break; + } +} + +static int mdss_spi_panel_init(struct device_node *node, + struct spi_panel_data *ctrl_pdata) +{ + int rc = 0; + static const char *panel_name; + struct mdss_panel_info *pinfo; + + pinfo = &ctrl_pdata->panel_data.panel_info; + + pr_debug("%s:%d\n", __func__, __LINE__); + pinfo->panel_name[0] = '\0'; + panel_name = of_get_property(node, "qcom,mdss-spi-panel-name", NULL); + if (!panel_name) { + pr_info("%s:%d, Panel name not specified\n", + __func__, __LINE__); + } else { + pr_debug("%s: Panel Name = %s\n", __func__, panel_name); + strlcpy(&pinfo->panel_name[0], panel_name, MDSS_MAX_PANEL_LEN); + } + rc = mdss_spi_panel_parse_dt(node, ctrl_pdata); + if (rc) { + pr_err("%s:%d panel dt parse failed\n", __func__, __LINE__); + return rc; + } + + ctrl_pdata->panel_data.panel_info.is_prim_panel = true; + ctrl_pdata->byte_per_frame = pinfo->xres * pinfo->yres * pinfo->bpp/8; + + ctrl_pdata->front_buf = kzalloc(ctrl_pdata->byte_per_frame, GFP_KERNEL); + ctrl_pdata->back_buf = kzalloc(ctrl_pdata->byte_per_frame, GFP_KERNEL); + + pinfo->cont_splash_enabled = false; + + pr_info("%s: Continuous splash %s\n", __func__, + pinfo->cont_splash_enabled ? "enabled" : "disabled"); + + pinfo->dynamic_switch_pending = false; + pinfo->is_lpm_mode = false; + pinfo->esd_rdy = false; + + ctrl_pdata->panel_data.set_backlight = mdss_spi_panel_bl_ctrl_update; + + return 0; +} + +static void mdss_spi_display_handle_vsync(struct spi_panel_data *ctrl_pdata, + ktime_t t) +{ + ctrl_pdata->vsync_time = t; + sysfs_notify_dirent(ctrl_pdata->vsync_event_sd); +} + +static int mdss_spi_panel_regulator_init(struct platform_device *pdev) +{ + int rc = 0; + + struct spi_panel_data *ctrl_pdata = NULL; + + ctrl_pdata = platform_get_drvdata(pdev); + if (WARN_ON(!ctrl_pdata)) + return -EINVAL; + + rc = msm_dss_config_vreg(&pdev->dev, + ctrl_pdata->panel_power_data.vreg_config, + ctrl_pdata->panel_power_data.num_vreg, 1); + if (rc) + pr_err("%s: failed to init vregs for PANEL_PM\n", + __func__); + + return rc; +} + +static irqreturn_t spi_panel_te_handler(int irq, void *data) +{ + struct spi_panel_data *ctrl_pdata = (struct spi_panel_data *)data; + ktime_t vsync_time; + + complete(&ctrl_pdata->spi_panel_te); + + if (ctrl_pdata->vsync_enable && (ctrl_pdata->te_count == + ctrl_pdata->vsync_per_te)) { + vsync_time = ktime_get(); + mdss_spi_display_handle_vsync(ctrl_pdata, vsync_time); + ctrl_pdata->te_count = 0; + } + + return IRQ_HANDLED; +} + +void mdss_spi_vsync_enable(struct mdss_panel_data *pdata, int enable) +{ + struct spi_panel_data *ctrl_pdata = NULL; + + if (WARN_ON(!pdata)) + return; + + ctrl_pdata = container_of(pdata, struct spi_panel_data, + panel_data); + + mutex_lock(&ctrl_pdata->te_mutex); + if (enable) { + if (ctrl_pdata->vsync_enable == false) { + enable_spi_panel_te_irq(ctrl_pdata, true); + ctrl_pdata->vsync_enable = true; + } + } else { + if (ctrl_pdata->vsync_enable == true) { + enable_spi_panel_te_irq(ctrl_pdata, false); + ctrl_pdata->vsync_enable = false; + } + } + mutex_unlock(&ctrl_pdata->te_mutex); +} + +static struct device_node *mdss_spi_pref_prim_panel( + struct platform_device *pdev) +{ + struct device_node *spi_pan_node = NULL; + + pr_debug("%s:%d: Select primary panel from dt\n", + __func__, __LINE__); + spi_pan_node = of_parse_phandle(pdev->dev.of_node, + "qcom,spi-pref-prim-pan", 0); + if (!spi_pan_node) + pr_err("%s:can't find panel phandle\n", __func__); + + return spi_pan_node; +} + +static struct device_node *mdss_spi_get_fb_node_cb(struct platform_device *pdev) +{ + struct device_node *fb_node; + struct platform_device *spi_dev; + struct mdss_dsi_ctrl_pdata *ctrl_pdata; + + if (WARN_ON(!pdev)) + return NULL; + + ctrl_pdata = platform_get_drvdata(pdev); + spi_dev = of_find_device_by_node(pdev->dev.of_node); + if (!spi_dev) { + pr_err("Unable to find dsi master device: %s\n", + pdev->dev.of_node->full_name); + return NULL; + } + + fb_node = of_parse_phandle(spi_dev->dev.of_node, + "qcom,mdss-fb-map-prim", 0); + if (!fb_node) { + pr_err("Unable to find fb node for device: %s\n", pdev->name); + return NULL; + } + + return fb_node; +} + +static int spi_panel_device_register(struct device_node *pan_node, + struct spi_panel_data *ctrl_pdata) +{ + int rc; + struct mdss_panel_info *pinfo = &(ctrl_pdata->panel_data.panel_info); + struct device_node *spi_ctrl_np = NULL; + struct platform_device *ctrl_pdev = NULL; + + pinfo->type = SPI_PANEL; + + spi_ctrl_np = of_parse_phandle(pan_node, + "qcom,mdss-spi-panel-controller", 0); + if (!spi_ctrl_np) { + pr_err("%s: SPI controller node not initialized\n", __func__); + return -EPROBE_DEFER; + } + + ctrl_pdev = of_find_device_by_node(spi_ctrl_np); + if (!ctrl_pdev) { + of_node_put(spi_ctrl_np); + pr_err("%s: SPI controller node not find\n", __func__); + return -EPROBE_DEFER; + } + + rc = mdss_spi_panel_regulator_init(ctrl_pdev); + if (rc) { + pr_err("%s: failed to init regulator, rc=%d\n", + __func__, rc); + return rc; + } + + pinfo->panel_max_fps = mdss_panel_get_framerate(pinfo, + FPS_RESOLUTION_HZ); + pinfo->panel_max_vtotal = mdss_panel_get_vtotal(pinfo); + + ctrl_pdata->disp_te_gpio = of_get_named_gpio(ctrl_pdev->dev.of_node, + "qcom,platform-te-gpio", 0); + if (!gpio_is_valid(ctrl_pdata->disp_te_gpio)) + pr_err("%s:%d, TE gpio not specified\n", + __func__, __LINE__); + + ctrl_pdata->disp_dc_gpio = of_get_named_gpio(ctrl_pdev->dev.of_node, + "qcom,platform-spi-dc-gpio", 0); + if (!gpio_is_valid(ctrl_pdata->disp_dc_gpio)) + pr_err("%s:%d, SPI DC gpio not specified\n", + __func__, __LINE__); + + ctrl_pdata->rst_gpio = of_get_named_gpio(ctrl_pdev->dev.of_node, + "qcom,platform-reset-gpio", 0); + if (!gpio_is_valid(ctrl_pdata->rst_gpio)) + pr_err("%s:%d, reset gpio not specified\n", + __func__, __LINE__); + + if (ctrl_pdata->bklt_ctrl == SPI_BL_PWM) { + if (ctrl_pdata->pwm_pmi) + return -EINVAL; + + ctrl_pdata->pwm_bl = devm_of_pwm_get(&ctrl_pdev->dev, + ctrl_pdev->dev.of_node, NULL); + if (IS_ERR_OR_NULL(ctrl_pdata->pwm_bl)) + pr_err("%s: Error: devm_of_pwm_get failed", + __func__); + ctrl_pdata->pwm_enabled = 0; + } + + ctrl_pdata->ctrl_state = CTRL_STATE_UNKNOWN; + ctrl_pdata->panel_data.get_fb_node = mdss_spi_get_fb_node_cb; + ctrl_pdata->panel_data.get_fb_node = NULL; + if (pinfo->cont_splash_enabled) { + rc = mdss_spi_panel_power_ctrl(&(ctrl_pdata->panel_data), + MDSS_PANEL_POWER_ON); + if (rc) { + pr_err("%s: Panel power on failed\n", __func__); + return rc; + } + if (ctrl_pdata->bklt_ctrl == SPI_BL_PWM) + ctrl_pdata->pwm_enabled = 1; + ctrl_pdata->ctrl_state |= CTRL_STATE_PANEL_INIT; + } else { + pinfo->panel_power_state = MDSS_PANEL_POWER_OFF; + } + + rc = mdss_register_panel(ctrl_pdev, &(ctrl_pdata->panel_data)); + if (rc) { + pr_err("%s: unable to register SPI panel\n", __func__); + return rc; + } + + pr_debug("%s: Panel data initialized\n", __func__); + return 0; +} + +int mdss_spi_wait_tx_done(struct spi_panel_data *ctrl_pdata) +{ + int rc = 0; + + rc = wait_event_timeout(ctrl_pdata->tx_done_waitq, + atomic_read(&ctrl_pdata->koff_cnt) == 0, + KOFF_TIMEOUT); + + return rc; +} + + +static int mdss_spi_panel_probe(struct platform_device *pdev) +{ + int rc = 0; + struct spi_panel_data *ctrl_pdata; + struct device_node *spi_pan_node = NULL; + char panel_cfg[MDSS_MAX_PANEL_LEN]; + const char *ctrl_name; + + if (!pdev->dev.of_node) { + pr_err("SPI driver only supports device tree probe\n"); + return -ENOTSUPP; + } + + ctrl_pdata = platform_get_drvdata(pdev); + if (!ctrl_pdata) { + ctrl_pdata = devm_kzalloc(&pdev->dev, + sizeof(struct spi_panel_data), + GFP_KERNEL); + if (!ctrl_pdata) { + pr_err("%s: FAILED: cannot alloc spi panel\n", + __func__); + return -ENOMEM; + } + platform_set_drvdata(pdev, ctrl_pdata); + } + + ctrl_name = of_get_property(pdev->dev.of_node, "label", NULL); + if (!ctrl_name) + pr_info("%s:%d, Ctrl name not specified\n", + __func__, __LINE__); + else + pr_debug("%s: Ctrl name = %s\n", + __func__, ctrl_name); + + + rc = of_platform_populate(pdev->dev.of_node, + NULL, NULL, &pdev->dev); + if (rc) { + dev_err(&pdev->dev, + "%s: failed to add child nodes, rc=%d\n", + __func__, rc); + goto error_no_mem; + } + + rc = mdss_spi_panel_pinctrl_init(pdev); + if (rc) + pr_warn("%s: failed to get pin resources\n", __func__); + + rc = mdss_spi_get_panel_vreg_data(&pdev->dev, + &ctrl_pdata->panel_power_data); + if (rc) { + dev_err(&pdev->dev, + "%s: failed to get panel vreg data, rc=%d\n", + __func__, rc); + goto error_vreg; + } + + /* find panel device node */ + spi_pan_node = mdss_spi_pref_prim_panel(pdev); + if (!spi_pan_node) { + pr_err("%s: can't find panel node %s\n", __func__, panel_cfg); + goto error_pan_node; + } + + rc = mdss_spi_panel_init(spi_pan_node, ctrl_pdata); + if (rc) { + pr_err("%s: spi panel init failed\n", __func__); + goto error_pan_node; + } + + rc = spi_panel_device_register(spi_pan_node, ctrl_pdata); + if (rc) { + pr_err("%s: spi panel dev reg failed\n", __func__); + goto error_pan_node; + } + + init_completion(&ctrl_pdata->spi_panel_te); + mutex_init(&ctrl_pdata->spi_tx_mutex); + mutex_init(&ctrl_pdata->te_mutex); + init_waitqueue_head(&ctrl_pdata->tx_done_waitq); + + rc = devm_request_irq(&pdev->dev, + gpio_to_irq(ctrl_pdata->disp_te_gpio), + spi_panel_te_handler, IRQF_TRIGGER_RISING, + "TE_GPIO", ctrl_pdata); + if (rc) { + pr_err("TE request_irq failed.\n"); + goto error_te_request; + } + disable_irq_nosync(gpio_to_irq(ctrl_pdata->disp_te_gpio)); + + pr_debug("%s: spi panel initialized\n", __func__); + return 0; + +error_te_request: + mutex_destroy(&ctrl_pdata->spi_tx_mutex); + mutex_destroy(&ctrl_pdata->te_mutex); +error_pan_node: + of_node_put(spi_pan_node); +error_vreg: + mdss_spi_put_dt_vreg_data(&pdev->dev, + &ctrl_pdata->panel_power_data); +error_no_mem: + devm_kfree(&pdev->dev, ctrl_pdata); + return rc; +} + +static const struct of_device_id mdss_spi_panel_match[] = { + { .compatible = "qcom,mdss-spi-panel" }, + {}, +}; + +static struct platform_driver this_driver = { + .probe = mdss_spi_panel_probe, + .driver = { + .name = "spi_panel", + .owner = THIS_MODULE, + .of_match_table = mdss_spi_panel_match, + }, +}; + +static int __init mdss_spi_display_panel_init(void) +{ + int ret; + + ret = platform_driver_register(&this_driver); + return ret; +} +module_init(mdss_spi_display_panel_init); + +MODULE_DEVICE_TABLE(of, mdss_spi_panel_match); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/video/fbdev/msm/mdss_spi_panel.h b/drivers/video/fbdev/msm/mdss_spi_panel.h new file mode 100644 index 0000000000000000000000000000000000000000..f81cbd3c31f3e333a9707543a147b5da0db2b8ff --- /dev/null +++ b/drivers/video/fbdev/msm/mdss_spi_panel.h @@ -0,0 +1,163 @@ +/* Copyright (c) 2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef __MDSS_SPI_PANEL_H__ +#define __MDSS_SPI_PANEL_H__ + +#include +#include +#include +#include +#include + +#include "mdss_panel.h" +#include "mdss_fb.h" + +#define MDSS_MAX_BL_BRIGHTNESS 255 + +#define MDSS_SPI_RST_SEQ_LEN 10 + +#define NONE_PANEL "none" + +#define CTRL_STATE_UNKNOWN 0x00 +#define CTRL_STATE_PANEL_INIT BIT(0) +#define CTRL_STATE_PANEL_ACTIVE BIT(1) + +#define MDSS_PINCTRL_STATE_DEFAULT "mdss_default" +#define MDSS_PINCTRL_STATE_SLEEP "mdss_sleep" +#define SPI_PANEL_TE_TIMEOUT 400 + +#define KOFF_TIMEOUT_MS 84 +#define KOFF_TIMEOUT msecs_to_jiffies(KOFF_TIMEOUT_MS) + +enum spi_panel_bl_ctrl { + SPI_BL_PWM, + SPI_BL_WLED, + SPI_BL_DCS_CMD, + SPI_UNKNOWN_CTRL, +}; + +struct spi_pinctrl_res { + struct pinctrl *pinctrl; + struct pinctrl_state *gpio_state_active; + struct pinctrl_state *gpio_state_suspend; +}; + +struct spi_ctrl_hdr { + char wait; /* ms */ + char dlen; /* 8 bits */ +}; + +struct spi_cmd_desc { + struct spi_ctrl_hdr dchdr; + char *command; + char *parameter; +}; + +struct spi_panel_cmds { + char *buf; + int blen; + struct spi_cmd_desc *cmds; + int cmd_cnt; +}; + +enum spi_panel_status_mode { + SPI_ESD_REG, + SPI_SEND_PANEL_COMMAND, + SPI_ESD_MAX, +}; + +struct spi_display_notification { + void (*handler)(void *arg); + void *arg; +}; + +struct mdss_spi_img_data { + void *addr; + unsigned long len; + struct dma_buf *srcp_dma_buf; + struct dma_buf_attachment *srcp_attachment; + struct sg_table *srcp_table; + + bool mapped; +}; + +struct mdss_spi_fb_data { + void *tx_buf_addr; + atomic_t used; +}; + +struct spi_panel_data { + struct mdss_panel_data panel_data; + struct mdss_util_intf *mdss_util; + u8 ctrl_state; + + int disp_te_gpio; + int rst_gpio; + int disp_dc_gpio; /* command or data */ + struct spi_pinctrl_res pin_res; + + struct spi_panel_cmds on_cmds; + struct spi_panel_cmds off_cmds; + struct pwm_device *pwm_bl; + struct dss_module_power panel_power_data; + + int bklt_ctrl; /* backlight ctrl */ + bool pwm_pmi; + int pwm_period; + int pwm_pmic_gpio; + int pwm_lpg_chan; + int pwm_enabled; + int bklt_max; + + int status_mode; + u32 status_cmds_rlen; + u8 panel_status_reg; + u8 *exp_status_value; + u8 *act_status_value; + bool (*check_status)(struct spi_panel_data *pdata); + + atomic_t koff_cnt; + int byte_per_frame; + char *front_buf; + char *back_buf; + struct mutex spi_tx_mutex; + struct mutex te_mutex; + struct mdss_spi_img_data image_data; + struct completion spi_panel_te; + unsigned char *return_buf; + struct ion_client *iclient; + wait_queue_head_t tx_done_waitq; + + bool vsync_enable; + ktime_t vsync_time; + unsigned int vsync_status; + int vsync_per_te; + int te_count; + struct kernfs_node *vsync_event_sd; +}; + +int mdss_spi_panel_kickoff(struct msm_fb_data_type *mfd, + struct mdp_display_commit *data); +void mdss_spi_vsync_enable(struct mdss_panel_data *pdata, int enable); +void enable_spi_panel_te_irq(struct spi_panel_data *ctrl_pdata, bool enable); +void mdss_spi_tx_fb_complete(void *ctx); +int mdss_spi_panel_power_ctrl(struct mdss_panel_data *pdata, int power_state); +int mdss_spi_panel_pinctrl_set_state(struct spi_panel_data *ctrl_pdata, + bool active); +int mdss_spi_wait_tx_done(struct spi_panel_data *ctrl_pdata); +int mdss_spi_panel_reset(struct mdss_panel_data *pdata, int enable); +int mdss_spi_panel_on(struct mdss_panel_data *pdata); +int mdss_spi_panel_off(struct mdss_panel_data *pdata); + +#endif /* End of __MDSS_SPI_PANEL_H__ */ diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c index f0b3a0b9d42f8b8b3ea9c6943a4010c3d7e15f18..36c9fbf70d44badd229661d65546d10e067d8728 100644 --- a/drivers/virtio/virtio_balloon.c +++ b/drivers/virtio/virtio_balloon.c @@ -490,7 +490,9 @@ static int virtballoon_migratepage(struct balloon_dev_info *vb_dev_info, tell_host(vb, vb->inflate_vq); /* balloon's page migration 2nd step -- deflate "page" */ + spin_lock_irqsave(&vb_dev_info->pages_lock, flags); balloon_page_delete(page); + spin_unlock_irqrestore(&vb_dev_info->pages_lock, flags); vb->num_pfns = VIRTIO_BALLOON_PAGES_PER_PAGE; set_page_pfns(vb, vb->pfns, page); tell_host(vb, vb->deflate_vq); diff --git a/drivers/watchdog/da9063_wdt.c b/drivers/watchdog/da9063_wdt.c index 2a20fc163ed0b50b1d078b4179dcced844ad2159..4c62ad74aec0c135bc01175785eb14d32a580bfb 100644 --- a/drivers/watchdog/da9063_wdt.c +++ b/drivers/watchdog/da9063_wdt.c @@ -102,10 +102,23 @@ static int da9063_wdt_set_timeout(struct watchdog_device *wdd, { struct da9063 *da9063 = watchdog_get_drvdata(wdd); unsigned int selector; - int ret; + int ret = 0; selector = da9063_wdt_timeout_to_sel(timeout); - ret = _da9063_wdt_set_timeout(da9063, selector); + + /* + * There are two cases when a set_timeout() will be called: + * 1. The watchdog is off and someone wants to set the timeout for the + * further use. + * 2. The watchdog is already running and a new timeout value should be + * set. + * + * The watchdog can't store a timeout value not equal zero without + * enabling the watchdog, so the timeout must be buffered by the driver. + */ + if (watchdog_active(wdd)) + ret = _da9063_wdt_set_timeout(da9063, selector); + if (ret) dev_err(da9063->dev, "Failed to set watchdog timeout (err = %d)\n", ret); diff --git a/fs/autofs4/dev-ioctl.c b/fs/autofs4/dev-ioctl.c index b7c816f3940402f21002cb6e6cbc391b35cb32b9..6dd63981787a2f7f2b15e87c6f955ad36fb3510e 100644 --- a/fs/autofs4/dev-ioctl.c +++ b/fs/autofs4/dev-ioctl.c @@ -148,6 +148,15 @@ static int validate_dev_ioctl(int cmd, struct autofs_dev_ioctl *param) cmd); goto out; } + } else { + unsigned int inr = _IOC_NR(cmd); + + if (inr == AUTOFS_DEV_IOCTL_OPENMOUNT_CMD || + inr == AUTOFS_DEV_IOCTL_REQUESTER_CMD || + inr == AUTOFS_DEV_IOCTL_ISMOUNTPOINT_CMD) { + err = -EINVAL; + goto out; + } } err = 0; @@ -284,7 +293,8 @@ static int autofs_dev_ioctl_openmount(struct file *fp, dev_t devid; int err, fd; - /* param->path has already been checked */ + /* param->path has been checked in validate_dev_ioctl() */ + if (!param->openmount.devid) return -EINVAL; @@ -446,10 +456,7 @@ static int autofs_dev_ioctl_requester(struct file *fp, dev_t devid; int err = -ENOENT; - if (param->size <= AUTOFS_DEV_IOCTL_SIZE) { - err = -EINVAL; - goto out; - } + /* param->path has been checked in validate_dev_ioctl() */ devid = sbi->sb->s_dev; @@ -534,10 +541,7 @@ static int autofs_dev_ioctl_ismountpoint(struct file *fp, unsigned int devid, magic; int err = -ENOENT; - if (param->size <= AUTOFS_DEV_IOCTL_SIZE) { - err = -EINVAL; - goto out; - } + /* param->path has been checked in validate_dev_ioctl() */ name = param->path; type = param->ismountpoint.in.type; diff --git a/fs/block_dev.c b/fs/block_dev.c index 789f55e851aeffb6b1212403188638d12a1d2540..3323eec5c164991e45228ec53c68f8ec1958e313 100644 --- a/fs/block_dev.c +++ b/fs/block_dev.c @@ -231,7 +231,7 @@ __blkdev_direct_IO_simple(struct kiocb *iocb, struct iov_iter *iter, ret = bio_iov_iter_get_pages(&bio, iter); if (unlikely(ret)) - return ret; + goto out; ret = bio.bi_iter.bi_size; if (iov_iter_rw(iter) == READ) { @@ -260,12 +260,13 @@ __blkdev_direct_IO_simple(struct kiocb *iocb, struct iov_iter *iter, put_page(bvec->bv_page); } - if (vecs != inline_vecs) - kfree(vecs); - if (unlikely(bio.bi_status)) ret = blk_status_to_errno(bio.bi_status); +out: + if (vecs != inline_vecs) + kfree(vecs); + bio_uninit(&bio); return ret; diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 7fa50e12f18e7371032438689e46ea158684862e..5b62e06567a379bf3003c57c01083ee68e325764 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -4280,6 +4280,7 @@ int try_release_extent_mapping(struct extent_map_tree *map, struct extent_map *em; u64 start = page_offset(page); u64 end = start + PAGE_SIZE - 1; + struct btrfs_inode *btrfs_inode = BTRFS_I(page->mapping->host); if (gfpflags_allow_blocking(mask) && page->mapping->host->i_size > SZ_16M) { @@ -4302,6 +4303,8 @@ int try_release_extent_mapping(struct extent_map_tree *map, extent_map_end(em) - 1, EXTENT_LOCKED | EXTENT_WRITEBACK, 0, NULL)) { + set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, + &btrfs_inode->runtime_flags); remove_extent_mapping(map, em); /* once for the rb tree */ free_extent_map(em); diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index f5b90dc137ec5c80fdb54258db9faae38554f5e6..28a58f40f3a4746b2586f8155840ed7c692bea2e 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -3162,6 +3162,9 @@ static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent) /* once for the tree */ btrfs_put_ordered_extent(ordered_extent); + /* Try to release some metadata so we don't get an OOM but don't wait */ + btrfs_btree_balance_dirty_nodelay(fs_info); + return ret; } @@ -4737,7 +4740,10 @@ int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans, extent_num_bytes, 0, btrfs_header_owner(leaf), ino, extent_offset); - BUG_ON(ret); + if (ret) { + btrfs_abort_transaction(trans, ret); + break; + } if (btrfs_should_throttle_delayed_refs(trans, fs_info)) btrfs_async_run_delayed_refs(fs_info, trans->delayed_ref_updates * 2, @@ -5496,13 +5502,18 @@ void btrfs_evict_inode(struct inode *inode) trans->block_rsv = rsv; ret = btrfs_truncate_inode_items(trans, root, inode, 0, 0); - if (ret != -ENOSPC && ret != -EAGAIN) + if (ret) { + trans->block_rsv = &fs_info->trans_block_rsv; + btrfs_end_transaction(trans); + btrfs_btree_balance_dirty(fs_info); + if (ret != -ENOSPC && ret != -EAGAIN) { + btrfs_orphan_del(NULL, BTRFS_I(inode)); + btrfs_free_block_rsv(fs_info, rsv); + goto no_delete; + } + } else { break; - - trans->block_rsv = &fs_info->trans_block_rsv; - btrfs_end_transaction(trans); - trans = NULL; - btrfs_btree_balance_dirty(fs_info); + } } btrfs_free_block_rsv(fs_info, rsv); @@ -5511,12 +5522,8 @@ void btrfs_evict_inode(struct inode *inode) * Errors here aren't a big deal, it just means we leave orphan items * in the tree. They will be cleaned up on the next mount. */ - if (ret == 0) { - trans->block_rsv = root->orphan_block_rsv; - btrfs_orphan_del(trans, BTRFS_I(inode)); - } else { - btrfs_orphan_del(NULL, BTRFS_I(inode)); - } + trans->block_rsv = root->orphan_block_rsv; + btrfs_orphan_del(trans, BTRFS_I(inode)); trans->block_rsv = &fs_info->trans_block_rsv; if (!(root == fs_info->tree_root || diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c index e172d4843eae2d8eb6f0d29dce38fb7f693f4ed0..473ad5985aa378e7f412efa7ee1c469dfa595a63 100644 --- a/fs/btrfs/qgroup.c +++ b/fs/btrfs/qgroup.c @@ -2499,6 +2499,21 @@ void btrfs_qgroup_free_refroot(struct btrfs_fs_info *fs_info, spin_unlock(&fs_info->qgroup_lock); } +/* + * Check if the leaf is the last leaf. Which means all node pointers + * are at their last position. + */ +static bool is_last_leaf(struct btrfs_path *path) +{ + int i; + + for (i = 1; i < BTRFS_MAX_LEVEL && path->nodes[i]; i++) { + if (path->slots[i] != btrfs_header_nritems(path->nodes[i]) - 1) + return false; + } + return true; +} + /* * returns < 0 on error, 0 when more leafs are to be scanned. * returns 1 when done. @@ -2512,6 +2527,7 @@ qgroup_rescan_leaf(struct btrfs_fs_info *fs_info, struct btrfs_path *path, struct ulist *roots = NULL; struct seq_list tree_mod_seq_elem = SEQ_LIST_INIT(tree_mod_seq_elem); u64 num_bytes; + bool done; int slot; int ret; @@ -2540,6 +2556,7 @@ qgroup_rescan_leaf(struct btrfs_fs_info *fs_info, struct btrfs_path *path, mutex_unlock(&fs_info->qgroup_rescan_lock); return ret; } + done = is_last_leaf(path); btrfs_item_key_to_cpu(path->nodes[0], &found, btrfs_header_nritems(path->nodes[0]) - 1); @@ -2586,6 +2603,8 @@ qgroup_rescan_leaf(struct btrfs_fs_info *fs_info, struct btrfs_path *path, } btrfs_put_tree_mod_seq(fs_info, &tree_mod_seq_elem); + if (done && !ret) + ret = 1; return ret; } diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c index fc4c14a72366a90d54d52cd0b8f4122be8afb96c..e1b4a59485dffc85208cad4c1b45084080e878bd 100644 --- a/fs/btrfs/tree-log.c +++ b/fs/btrfs/tree-log.c @@ -3041,8 +3041,11 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans, mutex_unlock(&log_root_tree->log_mutex); /* - * The barrier before waitqueue_active is implied by mutex_unlock + * The barrier before waitqueue_active is needed so all the updates + * above are seen by the woken threads. It might not be necessary, but + * proving that seems to be hard. */ + smp_mb(); if (waitqueue_active(&log_root_tree->log_commit_wait[index2])) wake_up(&log_root_tree->log_commit_wait[index2]); out: @@ -3053,8 +3056,11 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans, mutex_unlock(&root->log_mutex); /* - * The barrier before waitqueue_active is implied by mutex_unlock + * The barrier before waitqueue_active is needed so all the updates + * above are seen by the woken threads. It might not be necessary, but + * proving that seems to be hard. */ + smp_mb(); if (waitqueue_active(&root->log_commit_wait[index1])) wake_up(&root->log_commit_wait[index1]); return ret; @@ -4214,6 +4220,110 @@ static int log_one_extent(struct btrfs_trans_handle *trans, return ret; } +/* + * Log all prealloc extents beyond the inode's i_size to make sure we do not + * lose them after doing a fast fsync and replaying the log. We scan the + * subvolume's root instead of iterating the inode's extent map tree because + * otherwise we can log incorrect extent items based on extent map conversion. + * That can happen due to the fact that extent maps are merged when they + * are not in the extent map tree's list of modified extents. + */ +static int btrfs_log_prealloc_extents(struct btrfs_trans_handle *trans, + struct btrfs_inode *inode, + struct btrfs_path *path) +{ + struct btrfs_root *root = inode->root; + struct btrfs_key key; + const u64 i_size = i_size_read(&inode->vfs_inode); + const u64 ino = btrfs_ino(inode); + struct btrfs_path *dst_path = NULL; + u64 last_extent = (u64)-1; + int ins_nr = 0; + int start_slot; + int ret; + + if (!(inode->flags & BTRFS_INODE_PREALLOC)) + return 0; + + key.objectid = ino; + key.type = BTRFS_EXTENT_DATA_KEY; + key.offset = i_size; + ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); + if (ret < 0) + goto out; + + while (true) { + struct extent_buffer *leaf = path->nodes[0]; + int slot = path->slots[0]; + + if (slot >= btrfs_header_nritems(leaf)) { + if (ins_nr > 0) { + ret = copy_items(trans, inode, dst_path, path, + &last_extent, start_slot, + ins_nr, 1, 0); + if (ret < 0) + goto out; + ins_nr = 0; + } + ret = btrfs_next_leaf(root, path); + if (ret < 0) + goto out; + if (ret > 0) { + ret = 0; + break; + } + continue; + } + + btrfs_item_key_to_cpu(leaf, &key, slot); + if (key.objectid > ino) + break; + if (WARN_ON_ONCE(key.objectid < ino) || + key.type < BTRFS_EXTENT_DATA_KEY || + key.offset < i_size) { + path->slots[0]++; + continue; + } + if (last_extent == (u64)-1) { + last_extent = key.offset; + /* + * Avoid logging extent items logged in past fsync calls + * and leading to duplicate keys in the log tree. + */ + do { + ret = btrfs_truncate_inode_items(trans, + root->log_root, + &inode->vfs_inode, + i_size, + BTRFS_EXTENT_DATA_KEY); + } while (ret == -EAGAIN); + if (ret) + goto out; + } + if (ins_nr == 0) + start_slot = slot; + ins_nr++; + path->slots[0]++; + if (!dst_path) { + dst_path = btrfs_alloc_path(); + if (!dst_path) { + ret = -ENOMEM; + goto out; + } + } + } + if (ins_nr > 0) { + ret = copy_items(trans, inode, dst_path, path, &last_extent, + start_slot, ins_nr, 1, 0); + if (ret > 0) + ret = 0; + } +out: + btrfs_release_path(path); + btrfs_free_path(dst_path); + return ret; +} + static int btrfs_log_changed_extents(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_inode *inode, @@ -4256,6 +4366,11 @@ static int btrfs_log_changed_extents(struct btrfs_trans_handle *trans, if (em->generation <= test_gen) continue; + /* We log prealloc extents beyond eof later. */ + if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags) && + em->start >= i_size_read(&inode->vfs_inode)) + continue; + if (em->start < logged_start) logged_start = em->start; if ((em->start + em->len - 1) > logged_end) @@ -4268,31 +4383,6 @@ static int btrfs_log_changed_extents(struct btrfs_trans_handle *trans, num++; } - /* - * Add all prealloc extents beyond the inode's i_size to make sure we - * don't lose them after doing a fast fsync and replaying the log. - */ - if (inode->flags & BTRFS_INODE_PREALLOC) { - struct rb_node *node; - - for (node = rb_last(&tree->map); node; node = rb_prev(node)) { - em = rb_entry(node, struct extent_map, rb_node); - if (em->start < i_size_read(&inode->vfs_inode)) - break; - if (!list_empty(&em->list)) - continue; - /* Same as above loop. */ - if (++num > 32768) { - list_del_init(&tree->modified_extents); - ret = -EFBIG; - goto process; - } - refcount_inc(&em->refs); - set_bit(EXTENT_FLAG_LOGGING, &em->flags); - list_add_tail(&em->list, &extents); - } - } - list_sort(NULL, &extents, extent_cmp); btrfs_get_logged_extents(inode, logged_list, logged_start, logged_end); /* @@ -4337,6 +4427,9 @@ static int btrfs_log_changed_extents(struct btrfs_trans_handle *trans, up_write(&inode->dio_sem); btrfs_release_path(path); + if (!ret) + ret = btrfs_log_prealloc_extents(trans, inode, path); + return ret; } diff --git a/fs/ceph/super.c b/fs/ceph/super.c index 48ffe720bf09c1fb0d8d713cf5477fd415bcbde4..b79b1211a2b55540ddd3194ecf16b7e1ef0ce467 100644 --- a/fs/ceph/super.c +++ b/fs/ceph/super.c @@ -254,7 +254,7 @@ static int parse_fsopt_token(char *c, void *private) case Opt_rasize: if (intval < 0) return -EINVAL; - fsopt->rasize = ALIGN(intval + PAGE_SIZE - 1, PAGE_SIZE); + fsopt->rasize = ALIGN(intval, PAGE_SIZE); break; case Opt_caps_wanted_delay_min: if (intval < 1) diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c index 0480cd9a9e81372b7230b759ac5ea7cfeb7b137b..71b81980787fce27a478e080faf276e80f9d65a1 100644 --- a/fs/cifs/smb2pdu.c +++ b/fs/cifs/smb2pdu.c @@ -338,10 +338,7 @@ smb2_plain_req_init(__le16 smb2_command, struct cifs_tcon *tcon, return rc; /* BB eventually switch this to SMB2 specific small buf size */ - if (smb2_command == SMB2_SET_INFO) - *request_buf = cifs_buf_get(); - else - *request_buf = cifs_small_buf_get(); + *request_buf = cifs_small_buf_get(); if (*request_buf == NULL) { /* BB should we add a retry in here if not a writepage? */ return -ENOMEM; @@ -3171,7 +3168,7 @@ send_set_info(const unsigned int xid, struct cifs_tcon *tcon, } rc = SendReceive2(xid, ses, iov, num, &resp_buftype, flags, &rsp_iov); - cifs_buf_release(req); + cifs_small_buf_release(req); rsp = (struct smb2_set_info_rsp *)rsp_iov.iov_base; if (rc != 0) diff --git a/fs/crypto/crypto.c b/fs/crypto/crypto.c index 0758d32ad01bc284bb7aeb1ae4ced74000cb6195..2f646b1248bca7e85403c547ff94d13d7a54ec3f 100644 --- a/fs/crypto/crypto.c +++ b/fs/crypto/crypto.c @@ -433,8 +433,17 @@ int fscrypt_initialize(unsigned int cop_flags) */ static int __init fscrypt_init(void) { + /* + * Use an unbound workqueue to allow bios to be decrypted in parallel + * even when they happen to complete on the same CPU. This sacrifices + * locality, but it's worthwhile since decryption is CPU-intensive. + * + * Also use a high-priority workqueue to prioritize decryption work, + * which blocks reads from completing, over regular application tasks. + */ fscrypt_read_workqueue = alloc_workqueue("fscrypt_read_queue", - WQ_HIGHPRI, 0); + WQ_UNBOUND | WQ_HIGHPRI, + num_online_cpus()); if (!fscrypt_read_workqueue) goto fail; diff --git a/fs/crypto/fscrypt_ice.c b/fs/crypto/fscrypt_ice.c index 62dae83cf732f4c51273427a75a38ffa55d1563e..ae28cdfd83e42d27bef530d7d733f2759af74dd5 100644 --- a/fs/crypto/fscrypt_ice.c +++ b/fs/crypto/fscrypt_ice.c @@ -126,16 +126,29 @@ void fscrypt_set_ice_dun(const struct inode *inode, struct bio *bio, u64 dun) } EXPORT_SYMBOL(fscrypt_set_ice_dun); +void fscrypt_set_ice_skip(struct bio *bio, int bi_crypt_skip) +{ +#ifdef CONFIG_DM_DEFAULT_KEY + bio->bi_crypt_skip = bi_crypt_skip; +#endif +} +EXPORT_SYMBOL(fscrypt_set_ice_skip); + /* * This function will be used for filesystem when deciding to merge bios. * Basic assumption is, if inline_encryption is set, single bio has to * guarantee consecutive LBAs as well as ino|pg->index. */ -bool fscrypt_mergeable_bio(struct bio *bio, u64 dun, bool bio_encrypted) +bool fscrypt_mergeable_bio(struct bio *bio, u64 dun, bool bio_encrypted, + int bi_crypt_skip) { if (!bio) return true; +#ifdef CONFIG_DM_DEFAULT_KEY + if (bi_crypt_skip != bio->bi_crypt_skip) + return false; +#endif /* if both of them are not encrypted, no further check is needed */ if (!bio_dun(bio) && !bio_encrypted) return true; diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c index 9c9eafd6bd7659f79a5ed59a64c0c452448a14cd..70266a3355dc320ef141c4c2b225b5a3e9211003 100644 --- a/fs/ext4/balloc.c +++ b/fs/ext4/balloc.c @@ -379,6 +379,8 @@ static int ext4_validate_block_bitmap(struct super_block *sb, return -EFSCORRUPTED; ext4_lock_group(sb, block_group); + if (buffer_verified(bh)) + goto verified; if (unlikely(!ext4_block_bitmap_csum_verify(sb, block_group, desc, bh))) { ext4_unlock_group(sb, block_group); @@ -401,6 +403,7 @@ static int ext4_validate_block_bitmap(struct super_block *sb, return -EFSCORRUPTED; } set_buffer_verified(bh); +verified: ext4_unlock_group(sb, block_group); return 0; } diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c index 8ee65447898719820d920e91605d2004221c86f1..2f46564d3fca98c228522fa97b2ff0336483677a 100644 --- a/fs/ext4/ialloc.c +++ b/fs/ext4/ialloc.c @@ -91,6 +91,8 @@ static int ext4_validate_inode_bitmap(struct super_block *sb, return -EFSCORRUPTED; ext4_lock_group(sb, block_group); + if (buffer_verified(bh)) + goto verified; blk = ext4_inode_bitmap(sb, desc); if (!ext4_inode_bitmap_csum_verify(sb, block_group, desc, bh, EXT4_INODES_PER_GROUP(sb) / 8)) { @@ -108,6 +110,7 @@ static int ext4_validate_inode_bitmap(struct super_block *sb, return -EFSBADCRC; } set_buffer_verified(bh); +verified: ext4_unlock_group(sb, block_group); return 0; } diff --git a/fs/ext4/inline.c b/fs/ext4/inline.c index 29ed8dc155c09bf0cc6701304d594fe64eee3a59..a2deb66b1a6a8c76db4c965f0a51484411380ae3 100644 --- a/fs/ext4/inline.c +++ b/fs/ext4/inline.c @@ -702,6 +702,10 @@ int ext4_try_to_write_inline_data(struct address_space *mapping, goto convert; } + ret = ext4_journal_get_write_access(handle, iloc.bh); + if (ret) + goto out; + flags |= AOP_FLAG_NOFS; page = grab_cache_page_write_begin(mapping, 0, flags); @@ -730,7 +734,7 @@ int ext4_try_to_write_inline_data(struct address_space *mapping, out_up_read: up_read(&EXT4_I(inode)->xattr_sem); out: - if (handle) + if (handle && (ret != 1)) ext4_journal_stop(handle); brelse(iloc.bh); return ret; @@ -772,6 +776,7 @@ int ext4_write_inline_data_end(struct inode *inode, loff_t pos, unsigned len, ext4_write_unlock_xattr(inode, &no_expand); brelse(iloc.bh); + mark_inode_dirty(inode); out: return copied; } @@ -918,7 +923,6 @@ int ext4_da_write_inline_data_begin(struct address_space *mapping, goto out; } - page = grab_cache_page_write_begin(mapping, 0, flags); if (!page) { ret = -ENOMEM; @@ -936,6 +940,9 @@ int ext4_da_write_inline_data_begin(struct address_space *mapping, if (ret < 0) goto out_release_page; } + ret = ext4_journal_get_write_access(handle, iloc.bh); + if (ret) + goto out_release_page; up_read(&EXT4_I(inode)->xattr_sem); *pagep = page; @@ -956,7 +963,6 @@ int ext4_da_write_inline_data_end(struct inode *inode, loff_t pos, unsigned len, unsigned copied, struct page *page) { - int i_size_changed = 0; int ret; ret = ext4_write_inline_data_end(inode, pos, len, copied, page); @@ -974,10 +980,8 @@ int ext4_da_write_inline_data_end(struct inode *inode, loff_t pos, * But it's important to update i_size while still holding page lock: * page writeout could otherwise come in and zero beyond i_size. */ - if (pos+copied > inode->i_size) { + if (pos+copied > inode->i_size) i_size_write(inode, pos+copied); - i_size_changed = 1; - } unlock_page(page); put_page(page); @@ -987,8 +991,7 @@ int ext4_da_write_inline_data_end(struct inode *inode, loff_t pos, * ordering of page lock and transaction start for journaling * filesystems. */ - if (i_size_changed) - mark_inode_dirty(inode); + mark_inode_dirty(inode); return copied; } diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index 7e777ea246146738d6ff742f47a0b991ba9ff3d3..332d0224f595f52339522bc4a90c77bdc7e2ab81 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c @@ -1400,10 +1400,11 @@ static int ext4_write_end(struct file *file, loff_t old_size = inode->i_size; int ret = 0, ret2; int i_size_changed = 0; + int inline_data = ext4_has_inline_data(inode); trace_android_fs_datawrite_end(inode, pos, len); trace_ext4_write_end(inode, pos, len, copied); - if (ext4_has_inline_data(inode)) { + if (inline_data) { ret = ext4_write_inline_data_end(inode, pos, len, copied, page); if (ret < 0) { @@ -1431,7 +1432,7 @@ static int ext4_write_end(struct file *file, * ordering of page lock and transaction start for journaling * filesystems. */ - if (i_size_changed) + if (i_size_changed || inline_data) ext4_mark_inode_dirty(handle, inode); if (pos + len > inode->i_size && ext4_can_truncate(inode)) @@ -1505,6 +1506,7 @@ static int ext4_journalled_write_end(struct file *file, int partial = 0; unsigned from, to; int size_changed = 0; + int inline_data = ext4_has_inline_data(inode); trace_android_fs_datawrite_end(inode, pos, len); trace_ext4_journalled_write_end(inode, pos, len, copied); @@ -1513,7 +1515,7 @@ static int ext4_journalled_write_end(struct file *file, BUG_ON(!ext4_handle_valid(handle)); - if (ext4_has_inline_data(inode)) { + if (inline_data) { ret = ext4_write_inline_data_end(inode, pos, len, copied, page); if (ret < 0) { @@ -1544,7 +1546,7 @@ static int ext4_journalled_write_end(struct file *file, if (old_size < pos) pagecache_isize_extended(inode, old_size, pos); - if (size_changed) { + if (size_changed || inline_data) { ret2 = ext4_mark_inode_dirty(handle, inode); if (!ret) ret = ret2; @@ -2041,11 +2043,7 @@ static int __ext4_journalled_writepage(struct page *page, } if (inline_data) { - BUFFER_TRACE(inode_bh, "get write access"); - ret = ext4_journal_get_write_access(handle, inode_bh); - - err = ext4_handle_dirty_metadata(handle, inode, inode_bh); - + ret = ext4_mark_inode_dirty(handle, inode); } else { ret = ext4_walk_page_buffers(handle, page_bufs, 0, len, NULL, do_journal_get_write_access); diff --git a/fs/ext4/super.c b/fs/ext4/super.c index 491d09a1f7bf24e5c2a16d2b48f0f731293e6a67..5951e6316ead377640a7dde1062ad2a0cd190c8c 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c @@ -2304,7 +2304,7 @@ static int ext4_check_descriptors(struct super_block *sb, struct ext4_sb_info *sbi = EXT4_SB(sb); ext4_fsblk_t first_block = le32_to_cpu(sbi->s_es->s_first_data_block); ext4_fsblk_t last_block; - ext4_fsblk_t last_bg_block = sb_block + ext4_bg_num_gdb(sb, 0) + 1; + ext4_fsblk_t last_bg_block = sb_block + ext4_bg_num_gdb(sb, 0); ext4_fsblk_t block_bitmap; ext4_fsblk_t inode_bitmap; ext4_fsblk_t inode_table; @@ -4041,13 +4041,13 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) goto failed_mount2; } } + sbi->s_gdb_count = db_count; if (!ext4_check_descriptors(sb, logical_sb_block, &first_not_zeroed)) { ext4_msg(sb, KERN_ERR, "group descriptors corrupted!"); ret = -EFSCORRUPTED; goto failed_mount2; } - sbi->s_gdb_count = db_count; get_random_bytes(&sbi->s_next_generation, sizeof(u32)); spin_lock_init(&sbi->s_next_gen_lock); diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c index 39dc4810ea5f3c353f4c7ba9311fe6d7cffb4017..d503bdfd3c57801d8b88d4a20e53083532fe3cda 100644 --- a/fs/f2fs/data.c +++ b/fs/f2fs/data.c @@ -451,6 +451,7 @@ int f2fs_submit_page_bio(struct f2fs_io_info *fio) if (f2fs_may_encrypt_bio(inode, fio)) fscrypt_set_ice_dun(inode, bio, PG_DUN(inode, fio->page)); + fscrypt_set_ice_skip(bio, fio->encrypted_page ? 1 : 0); if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) { bio_put(bio); @@ -473,6 +474,7 @@ int f2fs_submit_page_write(struct f2fs_io_info *fio) struct page *bio_page; struct inode *inode; bool bio_encrypted; + int bi_crypt_skip; u64 dun; int err = 0; @@ -499,6 +501,7 @@ int f2fs_submit_page_write(struct f2fs_io_info *fio) bio_page = fio->encrypted_page ? fio->encrypted_page : fio->page; inode = fio->page->mapping->host; dun = PG_DUN(inode, fio->page); + bi_crypt_skip = fio->encrypted_page ? 1 : 0; bio_encrypted = f2fs_may_encrypt_bio(inode, fio); /* set submitted = true as a return value */ @@ -512,7 +515,7 @@ int f2fs_submit_page_write(struct f2fs_io_info *fio) __submit_merged_bio(io); /* ICE support */ - if (!fscrypt_mergeable_bio(io->bio, dun, bio_encrypted)) + if (!fscrypt_mergeable_bio(io->bio, dun, bio_encrypted, bi_crypt_skip)) __submit_merged_bio(io); alloc_new: @@ -528,7 +531,7 @@ int f2fs_submit_page_write(struct f2fs_io_info *fio) fio->type, fio->temp); if (bio_encrypted) fscrypt_set_ice_dun(inode, io->bio, dun); - + fscrypt_set_ice_skip(io->bio, bi_crypt_skip); io->fio = *fio; } @@ -1539,7 +1542,7 @@ static int f2fs_mpage_readpages(struct address_space *mapping, dun = PG_DUN(inode, page); bio_encrypted = f2fs_may_encrypt_bio(inode, NULL); - if (!fscrypt_mergeable_bio(bio, dun, bio_encrypted)) { + if (!fscrypt_mergeable_bio(bio, dun, bio_encrypted, 0)) { __submit_bio(F2FS_I_SB(inode), bio, DATA); bio = NULL; } diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h index 5895204c39d1141460557edf236a9c684cd07c09..7e320817af207e00610a4acd210bb695f2163a9e 100644 --- a/fs/f2fs/f2fs.h +++ b/fs/f2fs/f2fs.h @@ -180,6 +180,7 @@ enum { #define DEF_MAX_DISCARD_REQUEST 8 /* issue 8 discards per round */ #define DEF_MAX_DISCARD_LEN 512 /* Max. 2MB per discard */ #define DEF_MIN_DISCARD_ISSUE_TIME 50 /* 50 ms, if exists */ +#define DEF_MID_DISCARD_ISSUE_TIME 500 /* 500 ms, if device busy */ #define DEF_MAX_DISCARD_ISSUE_TIME 60000 /* 60 s, if no candidates */ #define DEF_DISCARD_URGENT_UTIL 80 /* do more discard over 80% */ #define DEF_CP_INTERVAL 60 /* 60 secs */ @@ -282,6 +283,7 @@ enum { struct discard_policy { int type; /* type of discard */ unsigned int min_interval; /* used for candidates exist */ + unsigned int mid_interval; /* used for device busy */ unsigned int max_interval; /* used for candidates not exist */ unsigned int max_requests; /* # of discards issued per round */ unsigned int io_aware_gran; /* minimum granularity discard not be aware of I/O */ diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c index ed72fc2cc68de5fda4430da920114a0dd27b35a6..490f8afed2ce91d98ac5b9e76d1b2bee2dcd498f 100644 --- a/fs/f2fs/file.c +++ b/fs/f2fs/file.c @@ -1669,6 +1669,8 @@ static int f2fs_ioc_start_atomic_write(struct file *filp) inode_lock(inode); + down_write(&F2FS_I(inode)->dio_rwsem[WRITE]); + if (f2fs_is_atomic_file(inode)) goto out; @@ -1698,6 +1700,7 @@ static int f2fs_ioc_start_atomic_write(struct file *filp) stat_inc_atomic_write(inode); stat_update_max_atomic_write(inode); out: + up_write(&F2FS_I(inode)->dio_rwsem[WRITE]); inode_unlock(inode); mnt_drop_write_file(filp); return ret; @@ -1850,9 +1853,11 @@ static int f2fs_ioc_shutdown(struct file *filp, unsigned long arg) if (get_user(in, (__u32 __user *)arg)) return -EFAULT; - ret = mnt_want_write_file(filp); - if (ret) - return ret; + if (in != F2FS_GOING_DOWN_FULLSYNC) { + ret = mnt_want_write_file(filp); + if (ret) + return ret; + } switch (in) { case F2FS_GOING_DOWN_FULLSYNC: @@ -1893,7 +1898,8 @@ static int f2fs_ioc_shutdown(struct file *filp, unsigned long arg) f2fs_update_time(sbi, REQ_TIME); out: - mnt_drop_write_file(filp); + if (in != F2FS_GOING_DOWN_FULLSYNC) + mnt_drop_write_file(filp); return ret; } @@ -2567,7 +2573,9 @@ static int f2fs_ioc_setproject(struct file *filp, __u32 projid) } f2fs_put_page(ipage, 1); - dquot_initialize(inode); + err = dquot_initialize(inode); + if (err) + goto out_unlock; transfer_to[PRJQUOTA] = dqget(sb, make_kqid_projid(kprojid)); if (!IS_ERR(transfer_to[PRJQUOTA])) { diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c index 66044faf2b71c552118d390affd272c334083a5e..aede4ed719b69b549abf999778c18ccfab7fb977 100644 --- a/fs/f2fs/gc.c +++ b/fs/f2fs/gc.c @@ -779,9 +779,14 @@ static void move_data_page(struct inode *inode, block_t bidx, int gc_type, set_cold_data(page); err = do_write_data_page(&fio); - if (err == -ENOMEM && is_dirty) { - congestion_wait(BLK_RW_ASYNC, HZ/50); - goto retry; + if (err) { + clear_cold_data(page); + if (err == -ENOMEM) { + congestion_wait(BLK_RW_ASYNC, HZ/50); + goto retry; + } + if (is_dirty) + set_page_dirty(page); } } out: diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c index b1e58b10425fe0f2deaa20ec552cff35792f3494..218d3aba46f763741fad0d89168508a731af044e 100644 --- a/fs/f2fs/segment.c +++ b/fs/f2fs/segment.c @@ -230,6 +230,8 @@ static int __revoke_inmem_pages(struct inode *inode, lock_page(page); + f2fs_wait_on_page_writeback(page, DATA, true); + if (recover) { struct dnode_of_data dn; struct node_info ni; @@ -478,6 +480,9 @@ void f2fs_balance_fs(struct f2fs_sb_info *sbi, bool need) void f2fs_balance_fs_bg(struct f2fs_sb_info *sbi) { + if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING))) + return; + /* try to shrink extent cache when there is no enough memory */ if (!available_free_memory(sbi, EXTENT_CACHE)) f2fs_shrink_extent_tree(sbi, EXTENT_CACHE_SHRINK_NUMBER); @@ -929,6 +934,7 @@ static void __init_discard_policy(struct f2fs_sb_info *sbi, if (discard_type == DPOLICY_BG) { dpolicy->min_interval = DEF_MIN_DISCARD_ISSUE_TIME; + dpolicy->mid_interval = DEF_MID_DISCARD_ISSUE_TIME; dpolicy->max_interval = DEF_MAX_DISCARD_ISSUE_TIME; dpolicy->io_aware = true; dpolicy->sync = false; @@ -938,6 +944,7 @@ static void __init_discard_policy(struct f2fs_sb_info *sbi, } } else if (discard_type == DPOLICY_FORCE) { dpolicy->min_interval = DEF_MIN_DISCARD_ISSUE_TIME; + dpolicy->mid_interval = DEF_MID_DISCARD_ISSUE_TIME; dpolicy->max_interval = DEF_MAX_DISCARD_ISSUE_TIME; dpolicy->io_aware = false; } else if (discard_type == DPOLICY_FSTRIM) { @@ -1377,6 +1384,8 @@ static int issue_discard_thread(void *data) struct discard_policy dpolicy; unsigned int wait_ms = DEF_MIN_DISCARD_ISSUE_TIME; int issued; + unsigned long interval = sbi->interval_time[REQ_TIME] * HZ; + long delta; set_freezable(); @@ -1404,9 +1413,15 @@ static int issue_discard_thread(void *data) sb_start_intwrite(sbi->sb); issued = __issue_discard_cmd(sbi, &dpolicy); - if (issued) { + if (issued > 0) { __wait_all_discard_cmd(sbi, &dpolicy); wait_ms = dpolicy.min_interval; + } else if (issued == -1){ + delta = (sbi->last_time[REQ_TIME] + interval) - jiffies; + if (delta > 0) + wait_ms = jiffies_to_msecs(delta); + else + wait_ms = dpolicy.mid_interval; } else { wait_ms = dpolicy.max_interval; } diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c index a26bd6dda9b4a842dc40e9bebd945aa7fb4d7b0b..e9ea8b64512b7b002af411b2774423efc5000204 100644 --- a/fs/f2fs/super.c +++ b/fs/f2fs/super.c @@ -1344,6 +1344,8 @@ static int f2fs_show_options(struct seq_file *seq, struct dentry *root) seq_printf(seq, ",fsync_mode=%s", "posix"); else if (F2FS_OPTION(sbi).fsync_mode == FSYNC_MODE_STRICT) seq_printf(seq, ",fsync_mode=%s", "strict"); + else if (F2FS_OPTION(sbi).fsync_mode == FSYNC_MODE_NOBARRIER) + seq_printf(seq, ",fsync_mode=%s", "nobarrier"); return 0; } @@ -3073,6 +3075,12 @@ static int __init init_f2fs_fs(void) { int err; + if (PAGE_SIZE != F2FS_BLKSIZE) { + printk("F2FS not supported on PAGE_SIZE(%lu) != %d\n", + PAGE_SIZE, F2FS_BLKSIZE); + return -EINVAL; + } + f2fs_build_trace_ios(); err = init_inodecache(); diff --git a/fs/fat/inode.c b/fs/fat/inode.c index c7a4dee206b9023eecb13da5e7f8adeb9471e2d8..3b40937b942a428c45907daf945c16b662e694c9 100644 --- a/fs/fat/inode.c +++ b/fs/fat/inode.c @@ -696,13 +696,21 @@ static void fat_set_state(struct super_block *sb, brelse(bh); } +static void fat_reset_iocharset(struct fat_mount_options *opts) +{ + if (opts->iocharset != fat_default_iocharset) { + /* Note: opts->iocharset can be NULL here */ + kfree(opts->iocharset); + opts->iocharset = fat_default_iocharset; + } +} + static void delayed_free(struct rcu_head *p) { struct msdos_sb_info *sbi = container_of(p, struct msdos_sb_info, rcu); unload_nls(sbi->nls_disk); unload_nls(sbi->nls_io); - if (sbi->options.iocharset != fat_default_iocharset) - kfree(sbi->options.iocharset); + fat_reset_iocharset(&sbi->options); kfree(sbi); } @@ -1117,7 +1125,7 @@ static int parse_options(struct super_block *sb, char *options, int is_vfat, opts->fs_fmask = opts->fs_dmask = current_umask(); opts->allow_utime = -1; opts->codepage = fat_default_codepage; - opts->iocharset = fat_default_iocharset; + fat_reset_iocharset(opts); if (is_vfat) { opts->shortname = VFAT_SFN_DISPLAY_WINNT|VFAT_SFN_CREATE_WIN95; opts->rodir = 0; @@ -1274,8 +1282,7 @@ static int parse_options(struct super_block *sb, char *options, int is_vfat, /* vfat specific */ case Opt_charset: - if (opts->iocharset != fat_default_iocharset) - kfree(opts->iocharset); + fat_reset_iocharset(opts); iocharset = match_strdup(&args[0]); if (!iocharset) return -ENOMEM; @@ -1866,8 +1873,7 @@ int fat_fill_super(struct super_block *sb, void *data, int silent, int isvfat, iput(fat_inode); unload_nls(sbi->nls_io); unload_nls(sbi->nls_disk); - if (sbi->options.iocharset != fat_default_iocharset) - kfree(sbi->options.iocharset); + fat_reset_iocharset(&sbi->options); sb->s_fs_info = NULL; kfree(sbi); return error; diff --git a/fs/jfs/xattr.c b/fs/jfs/xattr.c index c60f3d32ee911192c0cd8dae3b7cb11c0f416411..a6797986b625a34d19e097050c58f582c177c30c 100644 --- a/fs/jfs/xattr.c +++ b/fs/jfs/xattr.c @@ -491,15 +491,17 @@ static int ea_get(struct inode *inode, struct ea_buffer *ea_buf, int min_size) if (size > PSIZE) { /* * To keep the rest of the code simple. Allocate a - * contiguous buffer to work with + * contiguous buffer to work with. Make the buffer large + * enough to make use of the whole extent. */ - ea_buf->xattr = kmalloc(size, GFP_KERNEL); + ea_buf->max_size = (size + sb->s_blocksize - 1) & + ~(sb->s_blocksize - 1); + + ea_buf->xattr = kmalloc(ea_buf->max_size, GFP_KERNEL); if (ea_buf->xattr == NULL) return -ENOMEM; ea_buf->flag = EA_MALLOC; - ea_buf->max_size = (size + sb->s_blocksize - 1) & - ~(sb->s_blocksize - 1); if (ea_size == 0) return 0; diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index 928bbc397818ad252eb64e4ceadbe9464d57ca97..43fbf44950904a1cc448e413f8ff06b63ce782fd 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -745,6 +745,13 @@ static int nfs41_sequence_process(struct rpc_task *task, slot->slot_nr, slot->seq_nr); goto out_retry; + case -NFS4ERR_RETRY_UNCACHED_REP: + case -NFS4ERR_SEQ_FALSE_RETRY: + /* + * The server thinks we tried to replay a request. + * Retry the call after bumping the sequence ID. + */ + goto retry_new_seq; case -NFS4ERR_BADSLOT: /* * The slot id we used was probably retired. Try again @@ -769,10 +776,6 @@ static int nfs41_sequence_process(struct rpc_task *task, goto retry_nowait; } goto session_recover; - case -NFS4ERR_SEQ_FALSE_RETRY: - if (interrupted) - goto retry_new_seq; - goto session_recover; default: /* Just update the slot sequence no. */ slot->seq_done = 1; @@ -2692,7 +2695,7 @@ static int _nfs4_open_and_get_state(struct nfs4_opendata *opendata, if (ret != 0) goto out; - state = nfs4_opendata_to_nfs4_state(opendata); + state = _nfs4_opendata_to_nfs4_state(opendata); ret = PTR_ERR(state); if (IS_ERR(state)) goto out; @@ -2728,6 +2731,7 @@ static int _nfs4_open_and_get_state(struct nfs4_opendata *opendata, nfs4_schedule_stateid_recovery(server, state); } out: + nfs4_sequence_free_slot(&opendata->o_res.seq_res); return ret; } diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c index 7b34534210ce7131987a0e4834c6f5ab463de377..96867fb159bf71b82a92e9ee41678e41d3ebfac9 100644 --- a/fs/nfs/pnfs.c +++ b/fs/nfs/pnfs.c @@ -1126,7 +1126,7 @@ _pnfs_return_layout(struct inode *ino) LIST_HEAD(tmp_list); nfs4_stateid stateid; int status = 0; - bool send; + bool send, valid_layout; dprintk("NFS: %s for inode %lu\n", __func__, ino->i_ino); @@ -1147,6 +1147,7 @@ _pnfs_return_layout(struct inode *ino) goto out_put_layout_hdr; spin_lock(&ino->i_lock); } + valid_layout = pnfs_layout_is_valid(lo); pnfs_clear_layoutcommit(ino, &tmp_list); pnfs_mark_matching_lsegs_invalid(lo, &tmp_list, NULL, 0); @@ -1160,7 +1161,8 @@ _pnfs_return_layout(struct inode *ino) } /* Don't send a LAYOUTRETURN if list was initially empty */ - if (!test_bit(NFS_LAYOUT_RETURN_REQUESTED, &lo->plh_flags)) { + if (!test_bit(NFS_LAYOUT_RETURN_REQUESTED, &lo->plh_flags) || + !valid_layout) { spin_unlock(&ino->i_lock); dprintk("NFS: %s no layout segments to return\n", __func__); goto out_put_layout_hdr; diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c index f6588cc6816c885ff8625ba33db318a85cdbe0b1..c1e92333401206dcb15a88de849de8edf75a492d 100644 --- a/fs/nfsd/nfs4xdr.c +++ b/fs/nfsd/nfs4xdr.c @@ -1586,6 +1586,8 @@ nfsd4_decode_getdeviceinfo(struct nfsd4_compoundargs *argp, gdev->gd_maxcount = be32_to_cpup(p++); num = be32_to_cpup(p++); if (num) { + if (num > 1000) + goto xdr_error; READ_BUF(4 * num); gdev->gd_notify_types = be32_to_cpup(p++); for (i = 1; i < num; i++) { diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c index 88a31e9340a0fb8c658a48b764e66b0926345848..d1516327b7875c9ce3e948c66a6620d9b5b8d5da 100644 --- a/fs/ocfs2/aops.c +++ b/fs/ocfs2/aops.c @@ -134,6 +134,19 @@ static int ocfs2_symlink_get_block(struct inode *inode, sector_t iblock, return err; } +static int ocfs2_lock_get_block(struct inode *inode, sector_t iblock, + struct buffer_head *bh_result, int create) +{ + int ret = 0; + struct ocfs2_inode_info *oi = OCFS2_I(inode); + + down_read(&oi->ip_alloc_sem); + ret = ocfs2_get_block(inode, iblock, bh_result, create); + up_read(&oi->ip_alloc_sem); + + return ret; +} + int ocfs2_get_block(struct inode *inode, sector_t iblock, struct buffer_head *bh_result, int create) { @@ -2128,7 +2141,7 @@ static void ocfs2_dio_free_write_ctx(struct inode *inode, * called like this: dio->get_blocks(dio->inode, fs_startblk, * fs_count, map_bh, dio->rw == WRITE); */ -static int ocfs2_dio_get_block(struct inode *inode, sector_t iblock, +static int ocfs2_dio_wr_get_block(struct inode *inode, sector_t iblock, struct buffer_head *bh_result, int create) { struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); @@ -2154,12 +2167,9 @@ static int ocfs2_dio_get_block(struct inode *inode, sector_t iblock, * while file size will be changed. */ if (pos + total_len <= i_size_read(inode)) { - down_read(&oi->ip_alloc_sem); - /* This is the fast path for re-write. */ - ret = ocfs2_get_block(inode, iblock, bh_result, create); - - up_read(&oi->ip_alloc_sem); + /* This is the fast path for re-write. */ + ret = ocfs2_lock_get_block(inode, iblock, bh_result, create); if (buffer_mapped(bh_result) && !buffer_new(bh_result) && ret == 0) @@ -2424,9 +2434,9 @@ static ssize_t ocfs2_direct_IO(struct kiocb *iocb, struct iov_iter *iter) return 0; if (iov_iter_rw(iter) == READ) - get_block = ocfs2_get_block; + get_block = ocfs2_lock_get_block; else - get_block = ocfs2_dio_get_block; + get_block = ocfs2_dio_wr_get_block; return __blockdev_direct_IO(iocb, inode, inode->i_sb->s_bdev, iter, get_block, diff --git a/fs/ocfs2/cluster/nodemanager.c b/fs/ocfs2/cluster/nodemanager.c index b17d180bdc163544f05b341d98fe79e95ad5c582..c204ac9b49e5cd5c7f119c606a69a725025752dc 100644 --- a/fs/ocfs2/cluster/nodemanager.c +++ b/fs/ocfs2/cluster/nodemanager.c @@ -40,6 +40,9 @@ char *o2nm_fence_method_desc[O2NM_FENCE_METHODS] = { "panic", /* O2NM_FENCE_PANIC */ }; +static inline void o2nm_lock_subsystem(void); +static inline void o2nm_unlock_subsystem(void); + struct o2nm_node *o2nm_get_node_by_num(u8 node_num) { struct o2nm_node *node = NULL; @@ -181,7 +184,10 @@ static struct o2nm_cluster *to_o2nm_cluster_from_node(struct o2nm_node *node) { /* through the first node_set .parent * mycluster/nodes/mynode == o2nm_cluster->o2nm_node_group->o2nm_node */ - return to_o2nm_cluster(node->nd_item.ci_parent->ci_parent); + if (node->nd_item.ci_parent) + return to_o2nm_cluster(node->nd_item.ci_parent->ci_parent); + else + return NULL; } enum { @@ -194,7 +200,7 @@ static ssize_t o2nm_node_num_store(struct config_item *item, const char *page, size_t count) { struct o2nm_node *node = to_o2nm_node(item); - struct o2nm_cluster *cluster = to_o2nm_cluster_from_node(node); + struct o2nm_cluster *cluster; unsigned long tmp; char *p = (char *)page; int ret = 0; @@ -214,6 +220,13 @@ static ssize_t o2nm_node_num_store(struct config_item *item, const char *page, !test_bit(O2NM_NODE_ATTR_PORT, &node->nd_set_attributes)) return -EINVAL; /* XXX */ + o2nm_lock_subsystem(); + cluster = to_o2nm_cluster_from_node(node); + if (!cluster) { + o2nm_unlock_subsystem(); + return -EINVAL; + } + write_lock(&cluster->cl_nodes_lock); if (cluster->cl_nodes[tmp]) ret = -EEXIST; @@ -226,6 +239,8 @@ static ssize_t o2nm_node_num_store(struct config_item *item, const char *page, set_bit(tmp, cluster->cl_nodes_bitmap); } write_unlock(&cluster->cl_nodes_lock); + o2nm_unlock_subsystem(); + if (ret) return ret; @@ -269,7 +284,7 @@ static ssize_t o2nm_node_ipv4_address_store(struct config_item *item, size_t count) { struct o2nm_node *node = to_o2nm_node(item); - struct o2nm_cluster *cluster = to_o2nm_cluster_from_node(node); + struct o2nm_cluster *cluster; int ret, i; struct rb_node **p, *parent; unsigned int octets[4]; @@ -286,6 +301,13 @@ static ssize_t o2nm_node_ipv4_address_store(struct config_item *item, be32_add_cpu(&ipv4_addr, octets[i] << (i * 8)); } + o2nm_lock_subsystem(); + cluster = to_o2nm_cluster_from_node(node); + if (!cluster) { + o2nm_unlock_subsystem(); + return -EINVAL; + } + ret = 0; write_lock(&cluster->cl_nodes_lock); if (o2nm_node_ip_tree_lookup(cluster, ipv4_addr, &p, &parent)) @@ -298,6 +320,8 @@ static ssize_t o2nm_node_ipv4_address_store(struct config_item *item, rb_insert_color(&node->nd_ip_node, &cluster->cl_node_ip_tree); } write_unlock(&cluster->cl_nodes_lock); + o2nm_unlock_subsystem(); + if (ret) return ret; @@ -315,7 +339,7 @@ static ssize_t o2nm_node_local_store(struct config_item *item, const char *page, size_t count) { struct o2nm_node *node = to_o2nm_node(item); - struct o2nm_cluster *cluster = to_o2nm_cluster_from_node(node); + struct o2nm_cluster *cluster; unsigned long tmp; char *p = (char *)page; ssize_t ret; @@ -333,17 +357,26 @@ static ssize_t o2nm_node_local_store(struct config_item *item, const char *page, !test_bit(O2NM_NODE_ATTR_PORT, &node->nd_set_attributes)) return -EINVAL; /* XXX */ + o2nm_lock_subsystem(); + cluster = to_o2nm_cluster_from_node(node); + if (!cluster) { + ret = -EINVAL; + goto out; + } + /* the only failure case is trying to set a new local node * when a different one is already set */ if (tmp && tmp == cluster->cl_has_local && - cluster->cl_local_node != node->nd_num) - return -EBUSY; + cluster->cl_local_node != node->nd_num) { + ret = -EBUSY; + goto out; + } /* bring up the rx thread if we're setting the new local node. */ if (tmp && !cluster->cl_has_local) { ret = o2net_start_listening(node); if (ret) - return ret; + goto out; } if (!tmp && cluster->cl_has_local && @@ -358,7 +391,11 @@ static ssize_t o2nm_node_local_store(struct config_item *item, const char *page, cluster->cl_local_node = node->nd_num; } - return count; + ret = count; + +out: + o2nm_unlock_subsystem(); + return ret; } CONFIGFS_ATTR(o2nm_node_, num); @@ -738,6 +775,16 @@ static struct o2nm_cluster_group o2nm_cluster_group = { }, }; +static inline void o2nm_lock_subsystem(void) +{ + mutex_lock(&o2nm_cluster_group.cs_subsys.su_mutex); +} + +static inline void o2nm_unlock_subsystem(void) +{ + mutex_unlock(&o2nm_cluster_group.cs_subsys.su_mutex); +} + int o2nm_depend_item(struct config_item *item) { return configfs_depend_item(&o2nm_cluster_group.cs_subsys, item); diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c index b8f8d666e8d45b0a03fa00513ca910d56df59baa..ba20393d60efc725f41af10ec30ba63fad581f01 100644 --- a/fs/overlayfs/super.c +++ b/fs/overlayfs/super.c @@ -232,6 +232,7 @@ static void ovl_put_super(struct super_block *sb) kfree(ufs); } +/* Sync real dirty inodes in upper filesystem (if it exists) */ static int ovl_sync_fs(struct super_block *sb, int wait) { struct ovl_fs *ufs = sb->s_fs_info; @@ -240,14 +241,24 @@ static int ovl_sync_fs(struct super_block *sb, int wait) if (!ufs->upper_mnt) return 0; - upper_sb = ufs->upper_mnt->mnt_sb; - if (!upper_sb->s_op->sync_fs) + + /* + * If this is a sync(2) call or an emergency sync, all the super blocks + * will be iterated, including upper_sb, so no need to do anything. + * + * If this is a syncfs(2) call, then we do need to call + * sync_filesystem() on upper_sb, but enough if we do it when being + * called with wait == 1. + */ + if (!wait) return 0; - /* real inodes have already been synced by sync_filesystem(ovl_sb) */ + upper_sb = ufs->upper_mnt->mnt_sb; + down_read(&upper_sb->s_umount); - ret = upper_sb->s_op->sync_fs(upper_sb, wait); + ret = sync_filesystem(upper_sb); up_read(&upper_sb->s_umount); + return ret; } diff --git a/fs/proc/base.c b/fs/proc/base.c index d1ab37c8a52c5f24ae64bc60f087b0b14a27012c..97809bd5ea1a8cc27753c16c2a39bcda7a10ea6f 100644 --- a/fs/proc/base.c +++ b/fs/proc/base.c @@ -2965,6 +2965,52 @@ static int proc_tgid_io_accounting(struct seq_file *m, struct pid_namespace *ns, } #endif /* CONFIG_TASK_IO_ACCOUNTING */ +#ifdef CONFIG_DETECT_HUNG_TASK +static ssize_t proc_hung_task_detection_enabled_read(struct file *file, + char __user *buf, size_t count, loff_t *ppos) +{ + struct task_struct *task = get_proc_task(file_inode(file)); + char buffer[PROC_NUMBUF]; + size_t len; + bool hang_detection_enabled; + + if (!task) + return -ESRCH; + hang_detection_enabled = task->hang_detection_enabled; + put_task_struct(task); + + len = snprintf(buffer, sizeof(buffer), "%d\n", hang_detection_enabled); + + return simple_read_from_buffer(buf, sizeof(buffer), ppos, buffer, len); +} + +static ssize_t proc_hung_task_detection_enabled_write(struct file *file, + const char __user *buf, size_t count, loff_t *ppos) +{ + struct task_struct *task; + bool hang_detection_enabled; + int rv; + + rv = kstrtobool_from_user(buf, count, &hang_detection_enabled); + if (rv < 0) + return rv; + + task = get_proc_task(file_inode(file)); + if (!task) + return -ESRCH; + task->hang_detection_enabled = hang_detection_enabled; + put_task_struct(task); + + return count; +} + +static const struct file_operations proc_hung_task_detection_enabled_operations = { + .read = proc_hung_task_detection_enabled_read, + .write = proc_hung_task_detection_enabled_write, + .llseek = generic_file_llseek, +}; +#endif + #ifdef CONFIG_USER_NS static int proc_id_map_open(struct inode *inode, struct file *file, const struct seq_operations *seq_ops) @@ -3218,6 +3264,10 @@ static const struct pid_entry tgid_base_stuff[] = { #ifdef CONFIG_HARDWALL ONE("hardwall", S_IRUGO, proc_pid_hardwall), #endif +#ifdef CONFIG_DETECT_HUNG_TASK + REG("hang_detection_enabled", 0666, + proc_hung_task_detection_enabled_operations), +#endif #ifdef CONFIG_USER_NS REG("uid_map", S_IRUGO|S_IWUSR, proc_uid_map_operations), REG("gid_map", S_IRUGO|S_IWUSR, proc_gid_map_operations), @@ -3612,6 +3662,10 @@ static const struct pid_entry tid_base_stuff[] = { #ifdef CONFIG_HARDWALL ONE("hardwall", S_IRUGO, proc_pid_hardwall), #endif +#ifdef CONFIG_DETECT_HUNG_TASK + REG("hang_detection_enabled", 0666, + proc_hung_task_detection_enabled_operations), +#endif #ifdef CONFIG_USER_NS REG("uid_map", S_IRUGO|S_IWUSR, proc_uid_map_operations), REG("gid_map", S_IRUGO|S_IWUSR, proc_gid_map_operations), diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c index c18dda53afefddbbf197d0a5e9208490479c83b9..ca443a232de4bcb12b2a2d53670477d30dbc9df8 100644 --- a/fs/proc/task_mmu.c +++ b/fs/proc/task_mmu.c @@ -1348,8 +1348,9 @@ static pagemap_entry_t pte_to_pagemap_entry(struct pagemapread *pm, if (pte_swp_soft_dirty(pte)) flags |= PM_SOFT_DIRTY; entry = pte_to_swp_entry(pte); - frame = swp_type(entry) | - (swp_offset(entry) << MAX_SWAPFILES_SHIFT); + if (pm->show_pfn) + frame = swp_type(entry) | + (swp_offset(entry) << MAX_SWAPFILES_SHIFT); flags |= PM_SWAP; if (is_migration_entry(entry)) page = migration_entry_to_page(entry); @@ -1400,11 +1401,14 @@ static int pagemap_pmd_range(pmd_t *pmdp, unsigned long addr, unsigned long end, #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION else if (is_swap_pmd(pmd)) { swp_entry_t entry = pmd_to_swp_entry(pmd); - unsigned long offset = swp_offset(entry); + unsigned long offset; - offset += (addr & ~PMD_MASK) >> PAGE_SHIFT; - frame = swp_type(entry) | - (offset << MAX_SWAPFILES_SHIFT); + if (pm->show_pfn) { + offset = swp_offset(entry) + + ((addr & ~PMD_MASK) >> PAGE_SHIFT); + frame = swp_type(entry) | + (offset << MAX_SWAPFILES_SHIFT); + } flags |= PM_SWAP; if (pmd_swp_soft_dirty(pmd)) flags |= PM_SOFT_DIRTY; @@ -1422,10 +1426,12 @@ static int pagemap_pmd_range(pmd_t *pmdp, unsigned long addr, unsigned long end, err = add_to_pagemap(addr, &pme, pm); if (err) break; - if (pm->show_pfn && (flags & PM_PRESENT)) - frame++; - else if (flags & PM_SWAP) - frame += (1 << MAX_SWAPFILES_SHIFT); + if (pm->show_pfn) { + if (flags & PM_PRESENT) + frame++; + else if (flags & PM_SWAP) + frame += (1 << MAX_SWAPFILES_SHIFT); + } } spin_unlock(ptl); return err; diff --git a/fs/reiserfs/prints.c b/fs/reiserfs/prints.c index 64f49cafbc5bff7b7c34786e56851408f4f20b55..cfb0c9ac2de430494e8c4609bb04aeb2a7c65027 100644 --- a/fs/reiserfs/prints.c +++ b/fs/reiserfs/prints.c @@ -76,83 +76,99 @@ static char *le_type(struct reiserfs_key *key) } /* %k */ -static void sprintf_le_key(char *buf, struct reiserfs_key *key) +static int scnprintf_le_key(char *buf, size_t size, struct reiserfs_key *key) { if (key) - sprintf(buf, "[%d %d %s %s]", le32_to_cpu(key->k_dir_id), - le32_to_cpu(key->k_objectid), le_offset(key), - le_type(key)); + return scnprintf(buf, size, "[%d %d %s %s]", + le32_to_cpu(key->k_dir_id), + le32_to_cpu(key->k_objectid), le_offset(key), + le_type(key)); else - sprintf(buf, "[NULL]"); + return scnprintf(buf, size, "[NULL]"); } /* %K */ -static void sprintf_cpu_key(char *buf, struct cpu_key *key) +static int scnprintf_cpu_key(char *buf, size_t size, struct cpu_key *key) { if (key) - sprintf(buf, "[%d %d %s %s]", key->on_disk_key.k_dir_id, - key->on_disk_key.k_objectid, reiserfs_cpu_offset(key), - cpu_type(key)); + return scnprintf(buf, size, "[%d %d %s %s]", + key->on_disk_key.k_dir_id, + key->on_disk_key.k_objectid, + reiserfs_cpu_offset(key), cpu_type(key)); else - sprintf(buf, "[NULL]"); + return scnprintf(buf, size, "[NULL]"); } -static void sprintf_de_head(char *buf, struct reiserfs_de_head *deh) +static int scnprintf_de_head(char *buf, size_t size, + struct reiserfs_de_head *deh) { if (deh) - sprintf(buf, - "[offset=%d dir_id=%d objectid=%d location=%d state=%04x]", - deh_offset(deh), deh_dir_id(deh), deh_objectid(deh), - deh_location(deh), deh_state(deh)); + return scnprintf(buf, size, + "[offset=%d dir_id=%d objectid=%d location=%d state=%04x]", + deh_offset(deh), deh_dir_id(deh), + deh_objectid(deh), deh_location(deh), + deh_state(deh)); else - sprintf(buf, "[NULL]"); + return scnprintf(buf, size, "[NULL]"); } -static void sprintf_item_head(char *buf, struct item_head *ih) +static int scnprintf_item_head(char *buf, size_t size, struct item_head *ih) { if (ih) { - strcpy(buf, - (ih_version(ih) == KEY_FORMAT_3_6) ? "*3.6* " : "*3.5*"); - sprintf_le_key(buf + strlen(buf), &(ih->ih_key)); - sprintf(buf + strlen(buf), ", item_len %d, item_location %d, " - "free_space(entry_count) %d", - ih_item_len(ih), ih_location(ih), ih_free_space(ih)); + char *p = buf; + char * const end = buf + size; + + p += scnprintf(p, end - p, "%s", + (ih_version(ih) == KEY_FORMAT_3_6) ? + "*3.6* " : "*3.5*"); + + p += scnprintf_le_key(p, end - p, &ih->ih_key); + + p += scnprintf(p, end - p, + ", item_len %d, item_location %d, free_space(entry_count) %d", + ih_item_len(ih), ih_location(ih), + ih_free_space(ih)); + return p - buf; } else - sprintf(buf, "[NULL]"); + return scnprintf(buf, size, "[NULL]"); } -static void sprintf_direntry(char *buf, struct reiserfs_dir_entry *de) +static int scnprintf_direntry(char *buf, size_t size, + struct reiserfs_dir_entry *de) { char name[20]; memcpy(name, de->de_name, de->de_namelen > 19 ? 19 : de->de_namelen); name[de->de_namelen > 19 ? 19 : de->de_namelen] = 0; - sprintf(buf, "\"%s\"==>[%d %d]", name, de->de_dir_id, de->de_objectid); + return scnprintf(buf, size, "\"%s\"==>[%d %d]", + name, de->de_dir_id, de->de_objectid); } -static void sprintf_block_head(char *buf, struct buffer_head *bh) +static int scnprintf_block_head(char *buf, size_t size, struct buffer_head *bh) { - sprintf(buf, "level=%d, nr_items=%d, free_space=%d rdkey ", - B_LEVEL(bh), B_NR_ITEMS(bh), B_FREE_SPACE(bh)); + return scnprintf(buf, size, + "level=%d, nr_items=%d, free_space=%d rdkey ", + B_LEVEL(bh), B_NR_ITEMS(bh), B_FREE_SPACE(bh)); } -static void sprintf_buffer_head(char *buf, struct buffer_head *bh) +static int scnprintf_buffer_head(char *buf, size_t size, struct buffer_head *bh) { - sprintf(buf, - "dev %pg, size %zd, blocknr %llu, count %d, state 0x%lx, page %p, (%s, %s, %s)", - bh->b_bdev, bh->b_size, - (unsigned long long)bh->b_blocknr, atomic_read(&(bh->b_count)), - bh->b_state, bh->b_page, - buffer_uptodate(bh) ? "UPTODATE" : "!UPTODATE", - buffer_dirty(bh) ? "DIRTY" : "CLEAN", - buffer_locked(bh) ? "LOCKED" : "UNLOCKED"); + return scnprintf(buf, size, + "dev %pg, size %zd, blocknr %llu, count %d, state 0x%lx, page %p, (%s, %s, %s)", + bh->b_bdev, bh->b_size, + (unsigned long long)bh->b_blocknr, + atomic_read(&(bh->b_count)), + bh->b_state, bh->b_page, + buffer_uptodate(bh) ? "UPTODATE" : "!UPTODATE", + buffer_dirty(bh) ? "DIRTY" : "CLEAN", + buffer_locked(bh) ? "LOCKED" : "UNLOCKED"); } -static void sprintf_disk_child(char *buf, struct disk_child *dc) +static int scnprintf_disk_child(char *buf, size_t size, struct disk_child *dc) { - sprintf(buf, "[dc_number=%d, dc_size=%u]", dc_block_number(dc), - dc_size(dc)); + return scnprintf(buf, size, "[dc_number=%d, dc_size=%u]", + dc_block_number(dc), dc_size(dc)); } static char *is_there_reiserfs_struct(char *fmt, int *what) @@ -189,55 +205,60 @@ static void prepare_error_buf(const char *fmt, va_list args) char *fmt1 = fmt_buf; char *k; char *p = error_buf; + char * const end = &error_buf[sizeof(error_buf)]; int what; spin_lock(&error_lock); - strcpy(fmt1, fmt); + if (WARN_ON(strscpy(fmt_buf, fmt, sizeof(fmt_buf)) < 0)) { + strscpy(error_buf, "format string too long", end - error_buf); + goto out_unlock; + } while ((k = is_there_reiserfs_struct(fmt1, &what)) != NULL) { *k = 0; - p += vsprintf(p, fmt1, args); + p += vscnprintf(p, end - p, fmt1, args); switch (what) { case 'k': - sprintf_le_key(p, va_arg(args, struct reiserfs_key *)); + p += scnprintf_le_key(p, end - p, + va_arg(args, struct reiserfs_key *)); break; case 'K': - sprintf_cpu_key(p, va_arg(args, struct cpu_key *)); + p += scnprintf_cpu_key(p, end - p, + va_arg(args, struct cpu_key *)); break; case 'h': - sprintf_item_head(p, va_arg(args, struct item_head *)); + p += scnprintf_item_head(p, end - p, + va_arg(args, struct item_head *)); break; case 't': - sprintf_direntry(p, - va_arg(args, - struct reiserfs_dir_entry *)); + p += scnprintf_direntry(p, end - p, + va_arg(args, struct reiserfs_dir_entry *)); break; case 'y': - sprintf_disk_child(p, - va_arg(args, struct disk_child *)); + p += scnprintf_disk_child(p, end - p, + va_arg(args, struct disk_child *)); break; case 'z': - sprintf_block_head(p, - va_arg(args, struct buffer_head *)); + p += scnprintf_block_head(p, end - p, + va_arg(args, struct buffer_head *)); break; case 'b': - sprintf_buffer_head(p, - va_arg(args, struct buffer_head *)); + p += scnprintf_buffer_head(p, end - p, + va_arg(args, struct buffer_head *)); break; case 'a': - sprintf_de_head(p, - va_arg(args, - struct reiserfs_de_head *)); + p += scnprintf_de_head(p, end - p, + va_arg(args, struct reiserfs_de_head *)); break; } - p += strlen(p); fmt1 = k + 2; } - vsprintf(p, fmt1, args); + p += vscnprintf(p, end - p, fmt1, args); +out_unlock: spin_unlock(&error_lock); } diff --git a/fs/squashfs/cache.c b/fs/squashfs/cache.c index 05e42441d1065df13838c0fc4af5db085e4c1570..9d9d4aa9a7ba156e9da734de6b3ccbb327aadb9e 100644 --- a/fs/squashfs/cache.c +++ b/fs/squashfs/cache.c @@ -340,6 +340,9 @@ int squashfs_read_metadata(struct super_block *sb, void *buffer, TRACE("Entered squashfs_read_metadata [%llx:%x]\n", *block, *offset); + if (unlikely(length < 0)) + return -EIO; + while (length) { entry = squashfs_cache_get(sb, msblk->block_cache, *block, 0); if (entry->error) { diff --git a/fs/squashfs/file.c b/fs/squashfs/file.c index bb2e77ee4209cb38f7f95e06d39c52624e84c0b4..cd3c5c8211a5fc21960ea4fe4a8b249c8ff737a1 100644 --- a/fs/squashfs/file.c +++ b/fs/squashfs/file.c @@ -195,7 +195,11 @@ static long long read_indexes(struct super_block *sb, int n, } for (i = 0; i < blocks; i++) { - int size = le32_to_cpu(blist[i]); + int size = squashfs_block_size(blist[i]); + if (size < 0) { + err = size; + goto failure; + } block += SQUASHFS_COMPRESSED_SIZE_BLOCK(size); } n -= blocks; @@ -368,7 +372,7 @@ static int read_blocklist(struct inode *inode, int index, u64 *block) sizeof(size)); if (res < 0) return res; - return le32_to_cpu(size); + return squashfs_block_size(size); } /* Copy data into page cache */ diff --git a/fs/squashfs/fragment.c b/fs/squashfs/fragment.c index 0ed6edbc5c7170aa06f191e33df193721206cb3f..0681feab4a8499562ccad42cd53b31f05d5776f3 100644 --- a/fs/squashfs/fragment.c +++ b/fs/squashfs/fragment.c @@ -49,11 +49,16 @@ int squashfs_frag_lookup(struct super_block *sb, unsigned int fragment, u64 *fragment_block) { struct squashfs_sb_info *msblk = sb->s_fs_info; - int block = SQUASHFS_FRAGMENT_INDEX(fragment); - int offset = SQUASHFS_FRAGMENT_INDEX_OFFSET(fragment); - u64 start_block = le64_to_cpu(msblk->fragment_index[block]); + int block, offset, size; struct squashfs_fragment_entry fragment_entry; - int size; + u64 start_block; + + if (fragment >= msblk->fragments) + return -EIO; + block = SQUASHFS_FRAGMENT_INDEX(fragment); + offset = SQUASHFS_FRAGMENT_INDEX_OFFSET(fragment); + + start_block = le64_to_cpu(msblk->fragment_index[block]); size = squashfs_read_metadata(sb, &fragment_entry, &start_block, &offset, sizeof(fragment_entry)); @@ -61,9 +66,7 @@ int squashfs_frag_lookup(struct super_block *sb, unsigned int fragment, return size; *fragment_block = le64_to_cpu(fragment_entry.start_block); - size = le32_to_cpu(fragment_entry.size); - - return size; + return squashfs_block_size(fragment_entry.size); } diff --git a/fs/squashfs/squashfs_fs.h b/fs/squashfs/squashfs_fs.h index 24d12fd1417767689778302abf77b21f1efd6350..4e6853f084d071b6291da9891b8a16c730901e48 100644 --- a/fs/squashfs/squashfs_fs.h +++ b/fs/squashfs/squashfs_fs.h @@ -129,6 +129,12 @@ #define SQUASHFS_COMPRESSED_BLOCK(B) (!((B) & SQUASHFS_COMPRESSED_BIT_BLOCK)) +static inline int squashfs_block_size(__le32 raw) +{ + u32 size = le32_to_cpu(raw); + return (size >> 25) ? -EIO : size; +} + /* * Inode number ops. Inodes consist of a compressed block number, and an * uncompressed offset within that block diff --git a/fs/squashfs/squashfs_fs_sb.h b/fs/squashfs/squashfs_fs_sb.h index 8a6995de02773d0387ab36070c1ed1aac8228e6b..3b767ce1e46dbc8bc0c5f590351387320fe0920d 100644 --- a/fs/squashfs/squashfs_fs_sb.h +++ b/fs/squashfs/squashfs_fs_sb.h @@ -75,6 +75,7 @@ struct squashfs_sb_info { unsigned short block_log; long long bytes_used; unsigned int inodes; + unsigned int fragments; int xattr_ids; }; #endif diff --git a/fs/squashfs/super.c b/fs/squashfs/super.c index e2a0a7342bf80b37bf01bc3ecd422f0a566636b1..445ce580f06db5b8310dc82880edfeddaf457cbb 100644 --- a/fs/squashfs/super.c +++ b/fs/squashfs/super.c @@ -175,6 +175,7 @@ static int squashfs_fill_super(struct super_block *sb, void *data, int silent) msblk->inode_table = le64_to_cpu(sblk->inode_table_start); msblk->directory_table = le64_to_cpu(sblk->directory_table_start); msblk->inodes = le32_to_cpu(sblk->inodes); + msblk->fragments = le32_to_cpu(sblk->fragments); flags = le16_to_cpu(sblk->flags); TRACE("Found valid superblock on %pg\n", sb->s_bdev); @@ -185,7 +186,7 @@ static int squashfs_fill_super(struct super_block *sb, void *data, int silent) TRACE("Filesystem size %lld bytes\n", msblk->bytes_used); TRACE("Block size %d\n", msblk->block_size); TRACE("Number of inodes %d\n", msblk->inodes); - TRACE("Number of fragments %d\n", le32_to_cpu(sblk->fragments)); + TRACE("Number of fragments %d\n", msblk->fragments); TRACE("Number of ids %d\n", le16_to_cpu(sblk->no_ids)); TRACE("sblk->inode_table_start %llx\n", msblk->inode_table); TRACE("sblk->directory_table_start %llx\n", msblk->directory_table); @@ -272,7 +273,7 @@ static int squashfs_fill_super(struct super_block *sb, void *data, int silent) sb->s_export_op = &squashfs_export_ops; handle_fragments: - fragments = le32_to_cpu(sblk->fragments); + fragments = msblk->fragments; if (fragments == 0) goto check_directory_table; diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c index 0410facaf68bdca862c3470f58a2d6896546f3c1..23d5f3ff0b987a0459c814aabf6f0b5e09b531f3 100644 --- a/fs/userfaultfd.c +++ b/fs/userfaultfd.c @@ -628,8 +628,10 @@ static void userfaultfd_event_wait_completion(struct userfaultfd_ctx *ctx, /* the various vma->vm_userfaultfd_ctx still points to it */ down_write(&mm->mmap_sem); for (vma = mm->mmap; vma; vma = vma->vm_next) - if (vma->vm_userfaultfd_ctx.ctx == release_new_ctx) + if (vma->vm_userfaultfd_ctx.ctx == release_new_ctx) { vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX; + vma->vm_flags &= ~(VM_UFFD_WP | VM_UFFD_MISSING); + } up_write(&mm->mmap_sem); userfaultfd_ctx_put(release_new_ctx); diff --git a/fs/xfs/libxfs/xfs_attr_leaf.c b/fs/xfs/libxfs/xfs_attr_leaf.c index 5c16db86b38ffbcbb2c7e400cd04a4e84230a34e..40e53a4fc0a65ee479379a0e55c3c171539eb4a3 100644 --- a/fs/xfs/libxfs/xfs_attr_leaf.c +++ b/fs/xfs/libxfs/xfs_attr_leaf.c @@ -785,9 +785,8 @@ xfs_attr_shortform_to_leaf(xfs_da_args_t *args) ASSERT(blkno == 0); error = xfs_attr3_leaf_create(args, blkno, &bp); if (error) { - error = xfs_da_shrink_inode(args, 0, bp); - bp = NULL; - if (error) + /* xfs_attr3_leaf_create may not have instantiated a block */ + if (bp && (xfs_da_shrink_inode(args, 0, bp) != 0)) goto out; xfs_idata_realloc(dp, size, XFS_ATTR_FORK); /* try to put */ memcpy(ifp->if_u1.if_data, tmpbuffer, size); /* it back */ diff --git a/fs/xfs/xfs_icache.c b/fs/xfs/xfs_icache.c index 43005fbe8b1eefabc84ee762a9427ec784889814..544b5211221cdae3a61c5e1910e9286063fed6b0 100644 --- a/fs/xfs/xfs_icache.c +++ b/fs/xfs/xfs_icache.c @@ -305,6 +305,46 @@ xfs_reinit_inode( return error; } +/* + * If we are allocating a new inode, then check what was returned is + * actually a free, empty inode. If we are not allocating an inode, + * then check we didn't find a free inode. + * + * Returns: + * 0 if the inode free state matches the lookup context + * -ENOENT if the inode is free and we are not allocating + * -EFSCORRUPTED if there is any state mismatch at all + */ +static int +xfs_iget_check_free_state( + struct xfs_inode *ip, + int flags) +{ + if (flags & XFS_IGET_CREATE) { + /* should be a free inode */ + if (VFS_I(ip)->i_mode != 0) { + xfs_warn(ip->i_mount, +"Corruption detected! Free inode 0x%llx not marked free! (mode 0x%x)", + ip->i_ino, VFS_I(ip)->i_mode); + return -EFSCORRUPTED; + } + + if (ip->i_d.di_nblocks != 0) { + xfs_warn(ip->i_mount, +"Corruption detected! Free inode 0x%llx has blocks allocated!", + ip->i_ino); + return -EFSCORRUPTED; + } + return 0; + } + + /* should be an allocated inode */ + if (VFS_I(ip)->i_mode == 0) + return -ENOENT; + + return 0; +} + /* * Check the validity of the inode we just found it the cache */ @@ -354,12 +394,12 @@ xfs_iget_cache_hit( } /* - * If lookup is racing with unlink return an error immediately. + * Check the inode free state is valid. This also detects lookup + * racing with unlinks. */ - if (VFS_I(ip)->i_mode == 0 && !(flags & XFS_IGET_CREATE)) { - error = -ENOENT; + error = xfs_iget_check_free_state(ip, flags); + if (error) goto out_error; - } /* * If IRECLAIMABLE is set, we've torn down the VFS inode already. @@ -475,10 +515,14 @@ xfs_iget_cache_miss( trace_xfs_iget_miss(ip); - if ((VFS_I(ip)->i_mode == 0) && !(flags & XFS_IGET_CREATE)) { - error = -ENOENT; + + /* + * Check the inode free state is valid. This also detects lookup + * racing with unlinks. + */ + error = xfs_iget_check_free_state(ip, flags); + if (error) goto out_destroy; - } /* * Preload the radix tree so we can insert safely under the diff --git a/include/drm/drm_dp_helper.h b/include/drm/drm_dp_helper.h index a2a914e7589ecfcb28fd00ad118ea166df99b262..180d2e86151d590a5c3748f8c2a9648444bd4cbf 100644 --- a/include/drm/drm_dp_helper.h +++ b/include/drm/drm_dp_helper.h @@ -454,6 +454,7 @@ # define DP_PSR_FRAME_CAPTURE (1 << 3) # define DP_PSR_SELECTIVE_UPDATE (1 << 4) # define DP_PSR_IRQ_HPD_WITH_CRC_ERRORS (1 << 5) +# define DP_PSR_ENABLE_PSR2 (1 << 6) /* eDP 1.4a */ #define DP_ADAPTER_CTRL 0x1a0 # define DP_ADAPTER_CTRL_FORCE_LOAD_SENSE (1 << 0) diff --git a/include/dt-bindings/clock/qcom,camcc-sdmmagpie.h b/include/dt-bindings/clock/qcom,camcc-sdmmagpie.h index cc9d2cfcfec0cd66f7f4457a51be5519ae3c5d0b..554ac9db0a4abb5aea98a9090e14395d6744309e 100644 --- a/include/dt-bindings/clock/qcom,camcc-sdmmagpie.h +++ b/include/dt-bindings/clock/qcom,camcc-sdmmagpie.h @@ -14,113 +14,100 @@ #ifndef _DT_BINDINGS_CLK_QCOM_CAM_CC_SDMMAGPIE_H #define _DT_BINDINGS_CLK_QCOM_CAM_CC_SDMMAGPIE_H -#define CAM_CC_BPS_AHB_CLK 0 -#define CAM_CC_BPS_AREG_CLK 1 -#define CAM_CC_BPS_AXI_CLK 2 -#define CAM_CC_BPS_CLK 3 -#define CAM_CC_BPS_CLK_SRC 4 -#define CAM_CC_CAMNOC_AXI_CLK 5 -#define CAM_CC_CAMNOC_AXI_CLK_SRC 6 -#define CAM_CC_CAMNOC_DCD_XO_CLK 7 -#define CAM_CC_CCI_0_CLK 8 -#define CAM_CC_CCI_0_CLK_SRC 9 -#define CAM_CC_CCI_1_CLK 10 -#define CAM_CC_CCI_1_CLK_SRC 11 -#define CAM_CC_CORE_AHB_CLK 12 -#define CAM_CC_CPAS_AHB_CLK 13 -#define CAM_CC_CPHY_RX_CLK_SRC 14 -#define CAM_CC_CSI0PHYTIMER_CLK 15 -#define CAM_CC_CSI0PHYTIMER_CLK_SRC 16 -#define CAM_CC_CSI1PHYTIMER_CLK 17 -#define CAM_CC_CSI1PHYTIMER_CLK_SRC 18 -#define CAM_CC_CSI2PHYTIMER_CLK 19 -#define CAM_CC_CSI2PHYTIMER_CLK_SRC 20 -#define CAM_CC_CSI3PHYTIMER_CLK 21 -#define CAM_CC_CSI3PHYTIMER_CLK_SRC 22 -#define CAM_CC_CSIPHY0_CLK 23 -#define CAM_CC_CSIPHY1_CLK 24 -#define CAM_CC_CSIPHY2_CLK 25 -#define CAM_CC_CSIPHY3_CLK 26 -#define CAM_CC_FAST_AHB_CLK_SRC 27 -#define CAM_CC_FD_CORE_CLK 28 -#define CAM_CC_FD_CORE_CLK_SRC 29 -#define CAM_CC_FD_CORE_UAR_CLK 30 -#define CAM_CC_GDSC_CLK 31 -#define CAM_CC_ICP_AHB_CLK 32 -#define CAM_CC_ICP_CLK 33 -#define CAM_CC_ICP_CLK_SRC 34 -#define CAM_CC_IFE_0_AXI_CLK 35 -#define CAM_CC_IFE_0_CLK 36 -#define CAM_CC_IFE_0_CLK_SRC 37 -#define CAM_CC_IFE_0_CPHY_RX_CLK 38 -#define CAM_CC_IFE_0_CSID_CLK 39 -#define CAM_CC_IFE_0_CSID_CLK_SRC 40 -#define CAM_CC_IFE_0_DSP_CLK 41 -#define CAM_CC_IFE_1_AXI_CLK 42 -#define CAM_CC_IFE_1_CLK 43 -#define CAM_CC_IFE_1_CLK_SRC 44 -#define CAM_CC_IFE_1_CPHY_RX_CLK 45 -#define CAM_CC_IFE_1_CSID_CLK 46 -#define CAM_CC_IFE_1_CSID_CLK_SRC 47 -#define CAM_CC_IFE_1_DSP_CLK 48 -#define CAM_CC_IFE_LITE_CLK 49 -#define CAM_CC_IFE_LITE_CLK_SRC 50 -#define CAM_CC_IFE_LITE_CPHY_RX_CLK 51 -#define CAM_CC_IFE_LITE_CSID_CLK 52 -#define CAM_CC_IFE_LITE_CSID_CLK_SRC 53 -#define CAM_CC_IPE_0_AHB_CLK 54 -#define CAM_CC_IPE_0_AREG_CLK 55 -#define CAM_CC_IPE_0_AXI_CLK 56 -#define CAM_CC_IPE_0_CLK 57 -#define CAM_CC_IPE_0_CLK_SRC 58 -#define CAM_CC_IPE_1_AHB_CLK 59 -#define CAM_CC_IPE_1_AREG_CLK 60 -#define CAM_CC_IPE_1_AXI_CLK 61 -#define CAM_CC_IPE_1_CLK 62 -#define CAM_CC_JPEG_CLK 63 -#define CAM_CC_JPEG_CLK_SRC 64 -#define CAM_CC_LRME_CLK 65 -#define CAM_CC_LRME_CLK_SRC 66 -#define CAM_CC_MCLK0_CLK 67 -#define CAM_CC_MCLK0_CLK_SRC 68 -#define CAM_CC_MCLK1_CLK 69 -#define CAM_CC_MCLK1_CLK_SRC 70 -#define CAM_CC_MCLK2_CLK 71 -#define CAM_CC_MCLK2_CLK_SRC 72 -#define CAM_CC_MCLK3_CLK 73 -#define CAM_CC_MCLK3_CLK_SRC 74 -#define CAM_CC_PLL0 75 -#define CAM_CC_PLL0_OUT_EVEN 76 -#define CAM_CC_PLL0_OUT_ODD 77 -#define CAM_CC_PLL1 78 -#define CAM_CC_PLL1_OUT_EVEN 79 -#define CAM_CC_PLL2 80 -#define CAM_CC_PLL2_OUT_AUX 81 -#define CAM_CC_PLL2_OUT_MAIN 82 -#define CAM_CC_PLL3 83 -#define CAM_CC_PLL3_OUT_EVEN 84 -#define CAM_CC_PLL4 85 -#define CAM_CC_PLL4_OUT_EVEN 86 -#define CAM_CC_PLL_TEST_CLK 87 -#define CAM_CC_QDSS_DEBUG_CLK 88 -#define CAM_CC_QDSS_DEBUG_CLK_SRC 89 -#define CAM_CC_QDSS_DEBUG_XO_CLK 90 -#define CAM_CC_SLEEP_CLK 91 -#define CAM_CC_SLEEP_CLK_SRC 92 -#define CAM_CC_SLOW_AHB_CLK_SRC 93 -#define CAM_CC_SPDM_BPS_CLK 94 -#define CAM_CC_SPDM_IFE_0_CLK 95 -#define CAM_CC_SPDM_IFE_0_CSID_CLK 96 -#define CAM_CC_SPDM_IPE_0_CLK 97 -#define CAM_CC_SPDM_IPE_1_CLK 98 -#define CAM_CC_SPDM_JPEG_CLK 99 -#define CAM_CC_XO_CLK_SRC 100 +/* Hardware clocks */ +#define CAM_CC_PLL0_OUT_EVEN 0 +#define CAM_CC_PLL0_OUT_ODD 1 +#define CAM_CC_PLL1_OUT_EVEN 2 +#define CAM_CC_PLL2_OUT_EARLY 3 +#define CAM_CC_PLL3_OUT_EVEN 4 +#define CAM_CC_PLL4_OUT_EVEN 5 -#define BPS_GDSC 0 -#define IFE_0_GDSC 1 -#define IFE_1_GDSC 2 -#define IPE_0_GDSC 3 -#define IPE_1_GDSC 4 -#define TITAN_TOP_GDSC 5 +/* CAM_CC clock registers */ +#define CAM_CC_PLL0 6 +#define CAM_CC_PLL1 7 +#define CAM_CC_PLL2 8 +#define CAM_CC_PLL2_OUT_AUX 9 +#define CAM_CC_PLL2_OUT_MAIN 10 +#define CAM_CC_PLL3 11 +#define CAM_CC_PLL4 12 +#define CAM_CC_BPS_AHB_CLK 13 +#define CAM_CC_BPS_AREG_CLK 14 +#define CAM_CC_BPS_AXI_CLK 15 +#define CAM_CC_BPS_CLK 16 +#define CAM_CC_BPS_CLK_SRC 17 +#define CAM_CC_CAMNOC_AXI_CLK 18 +#define CAM_CC_CAMNOC_AXI_CLK_SRC 19 +#define CAM_CC_CAMNOC_DCD_XO_CLK 20 +#define CAM_CC_CCI_0_CLK 21 +#define CAM_CC_CCI_0_CLK_SRC 22 +#define CAM_CC_CCI_1_CLK 23 +#define CAM_CC_CCI_1_CLK_SRC 24 +#define CAM_CC_CORE_AHB_CLK 25 +#define CAM_CC_CPAS_AHB_CLK 26 +#define CAM_CC_CPHY_RX_CLK_SRC 27 +#define CAM_CC_CSI0PHYTIMER_CLK 28 +#define CAM_CC_CSI0PHYTIMER_CLK_SRC 29 +#define CAM_CC_CSI1PHYTIMER_CLK 30 +#define CAM_CC_CSI1PHYTIMER_CLK_SRC 31 +#define CAM_CC_CSI2PHYTIMER_CLK 32 +#define CAM_CC_CSI2PHYTIMER_CLK_SRC 33 +#define CAM_CC_CSI3PHYTIMER_CLK 34 +#define CAM_CC_CSI3PHYTIMER_CLK_SRC 35 +#define CAM_CC_CSIPHY0_CLK 36 +#define CAM_CC_CSIPHY1_CLK 37 +#define CAM_CC_CSIPHY2_CLK 38 +#define CAM_CC_CSIPHY3_CLK 39 +#define CAM_CC_FAST_AHB_CLK_SRC 40 +#define CAM_CC_FD_CORE_CLK 41 +#define CAM_CC_FD_CORE_CLK_SRC 42 +#define CAM_CC_FD_CORE_UAR_CLK 43 +#define CAM_CC_GDSC_CLK 44 +#define CAM_CC_ICP_AHB_CLK 45 +#define CAM_CC_ICP_CLK 46 +#define CAM_CC_ICP_CLK_SRC 47 +#define CAM_CC_IFE_0_AXI_CLK 48 +#define CAM_CC_IFE_0_CLK 49 +#define CAM_CC_IFE_0_CLK_SRC 50 +#define CAM_CC_IFE_0_CPHY_RX_CLK 51 +#define CAM_CC_IFE_0_CSID_CLK 52 +#define CAM_CC_IFE_0_CSID_CLK_SRC 53 +#define CAM_CC_IFE_0_DSP_CLK 54 +#define CAM_CC_IFE_1_AXI_CLK 55 +#define CAM_CC_IFE_1_CLK 56 +#define CAM_CC_IFE_1_CLK_SRC 57 +#define CAM_CC_IFE_1_CPHY_RX_CLK 58 +#define CAM_CC_IFE_1_CSID_CLK 59 +#define CAM_CC_IFE_1_CSID_CLK_SRC 60 +#define CAM_CC_IFE_1_DSP_CLK 61 +#define CAM_CC_IFE_LITE_CLK 62 +#define CAM_CC_IFE_LITE_CLK_SRC 63 +#define CAM_CC_IFE_LITE_CPHY_RX_CLK 64 +#define CAM_CC_IFE_LITE_CSID_CLK 65 +#define CAM_CC_IFE_LITE_CSID_CLK_SRC 66 +#define CAM_CC_IPE_0_AHB_CLK 67 +#define CAM_CC_IPE_0_AREG_CLK 68 +#define CAM_CC_IPE_0_AXI_CLK 69 +#define CAM_CC_IPE_0_CLK 70 +#define CAM_CC_IPE_0_CLK_SRC 71 +#define CAM_CC_IPE_1_AHB_CLK 72 +#define CAM_CC_IPE_1_AREG_CLK 73 +#define CAM_CC_IPE_1_AXI_CLK 74 +#define CAM_CC_IPE_1_CLK 75 +#define CAM_CC_JPEG_CLK 76 +#define CAM_CC_JPEG_CLK_SRC 77 +#define CAM_CC_LRME_CLK 78 +#define CAM_CC_LRME_CLK_SRC 79 +#define CAM_CC_MCLK0_CLK 80 +#define CAM_CC_MCLK0_CLK_SRC 81 +#define CAM_CC_MCLK1_CLK 82 +#define CAM_CC_MCLK1_CLK_SRC 83 +#define CAM_CC_MCLK2_CLK 84 +#define CAM_CC_MCLK2_CLK_SRC 85 +#define CAM_CC_MCLK3_CLK 86 +#define CAM_CC_MCLK3_CLK_SRC 87 +#define CAM_CC_SLEEP_CLK 88 +#define CAM_CC_SLEEP_CLK_SRC 89 +#define CAM_CC_SLOW_AHB_CLK_SRC 90 +#define CAM_CC_XO_CLK_SRC 91 #endif diff --git a/include/dt-bindings/clock/qcom,gcc-qcs405.h b/include/dt-bindings/clock/qcom,gcc-qcs405.h index 34a531d70bf2efb1f91650e13d6f34237e9d96d0..b67fb4fa1e11c6ad6d5ceb38754200efe703daa0 100644 --- a/include/dt-bindings/clock/qcom,gcc-qcs405.h +++ b/include/dt-bindings/clock/qcom,gcc-qcs405.h @@ -90,7 +90,7 @@ #define GCC_MDSS_PCLK0_CLK 73 #define GCC_MDSS_VSYNC_CLK 74 #define GCC_OXILI_AHB_CLK 75 -#define GCC_OXILI_GFX3D_CLK 76 +#define GFX3D_CLK_SRC 76 #define GCC_PCIE_0_AUX_CLK 77 #define GCC_PCIE_0_CFG_AHB_CLK 78 #define GCC_PCIE_0_MSTR_AXI_CLK 79 @@ -120,7 +120,7 @@ #define GCC_USB3_PHY_PIPE_CLK 103 #define GCC_USB_HS_PHY_CFG_AHB_CLK 104 #define GCC_USB_HS_SYSTEM_CLK 105 -#define GFX3D_CLK_SRC 106 +#define GCC_OXILI_GFX3D_CLK 106 #define GP1_CLK_SRC 107 #define GP2_CLK_SRC 108 #define GP3_CLK_SRC 109 diff --git a/include/dt-bindings/clock/qcom,gcc-sdmmagpie.h b/include/dt-bindings/clock/qcom,gcc-sdmmagpie.h index 1159fbbffd53021042db4a7315c5348dd43888cf..6f570779cb2ee87d6901ab555bfa33c6efb5f87c 100644 --- a/include/dt-bindings/clock/qcom,gcc-sdmmagpie.h +++ b/include/dt-bindings/clock/qcom,gcc-sdmmagpie.h @@ -63,127 +63,120 @@ #define GCC_GPU_MEMNOC_GFX_CLK 43 #define GCC_GPU_SNOC_DVM_GFX_CLK 44 #define GCC_GPU_VS_CLK 45 -#define GCC_MSS_AXIS2_CLK 46 -#define GCC_MSS_CFG_AHB_CLK 47 -#define GCC_MSS_GPLL0_DIV_CLK_SRC 48 -#define GCC_MSS_MFAB_AXIS_CLK 49 -#define GCC_MSS_Q6_MEMNOC_AXI_CLK 50 -#define GCC_MSS_SNOC_AXI_CLK 51 -#define GCC_MSS_VS_CLK 52 -#define GCC_NPU_AXI_CLK 53 -#define GCC_NPU_CFG_AHB_CLK 54 -#define GCC_NPU_GPLL0_CLK_SRC 55 -#define GCC_NPU_GPLL0_DIV_CLK_SRC 56 -#define GCC_PCIE_0_AUX_CLK 57 -#define GCC_PCIE_0_AUX_CLK_SRC 58 -#define GCC_PCIE_0_CFG_AHB_CLK 59 -#define GCC_PCIE_0_CLKREF_CLK 60 -#define GCC_PCIE_0_MSTR_AXI_CLK 61 -#define GCC_PCIE_0_PIPE_CLK 62 -#define GCC_PCIE_0_SLV_AXI_CLK 63 -#define GCC_PCIE_0_SLV_Q2A_AXI_CLK 64 -#define GCC_PCIE_PHY_AUX_CLK 65 -#define GCC_PCIE_PHY_REFGEN_CLK 66 -#define GCC_PCIE_PHY_REFGEN_CLK_SRC 67 -#define GCC_PDM2_CLK 68 -#define GCC_PDM2_CLK_SRC 69 -#define GCC_PDM_AHB_CLK 70 -#define GCC_PDM_XO4_CLK 71 -#define GCC_PRNG_AHB_CLK 72 -#define GCC_QUPV3_WRAP0_CORE_2X_CLK 73 -#define GCC_QUPV3_WRAP0_CORE_CLK 74 -#define GCC_QUPV3_WRAP0_S0_CLK 75 -#define GCC_QUPV3_WRAP0_S0_CLK_SRC 76 -#define GCC_QUPV3_WRAP0_S1_CLK 77 -#define GCC_QUPV3_WRAP0_S1_CLK_SRC 78 -#define GCC_QUPV3_WRAP0_S2_CLK 79 -#define GCC_QUPV3_WRAP0_S2_CLK_SRC 80 -#define GCC_QUPV3_WRAP0_S3_CLK 81 -#define GCC_QUPV3_WRAP0_S3_CLK_SRC 82 -#define GCC_QUPV3_WRAP0_S4_CLK 83 -#define GCC_QUPV3_WRAP0_S4_CLK_SRC 84 -#define GCC_QUPV3_WRAP0_S5_CLK 85 -#define GCC_QUPV3_WRAP0_S5_CLK_SRC 86 -#define GCC_QUPV3_WRAP0_S6_CLK 87 -#define GCC_QUPV3_WRAP0_S6_CLK_SRC 88 -#define GCC_QUPV3_WRAP0_S7_CLK 89 -#define GCC_QUPV3_WRAP0_S7_CLK_SRC 90 -#define GCC_QUPV3_WRAP1_CORE_2X_CLK 91 -#define GCC_QUPV3_WRAP1_CORE_CLK 92 -#define GCC_QUPV3_WRAP1_S0_CLK 93 -#define GCC_QUPV3_WRAP1_S0_CLK_SRC 94 -#define GCC_QUPV3_WRAP1_S1_CLK 95 -#define GCC_QUPV3_WRAP1_S1_CLK_SRC 96 -#define GCC_QUPV3_WRAP1_S2_CLK 97 -#define GCC_QUPV3_WRAP1_S2_CLK_SRC 98 -#define GCC_QUPV3_WRAP1_S3_CLK 99 -#define GCC_QUPV3_WRAP1_S3_CLK_SRC 100 -#define GCC_QUPV3_WRAP1_S4_CLK 101 -#define GCC_QUPV3_WRAP1_S4_CLK_SRC 102 -#define GCC_QUPV3_WRAP1_S5_CLK 103 -#define GCC_QUPV3_WRAP1_S5_CLK_SRC 104 -#define GCC_QUPV3_WRAP1_S6_CLK 105 -#define GCC_QUPV3_WRAP1_S6_CLK_SRC 106 -#define GCC_QUPV3_WRAP1_S7_CLK 107 -#define GCC_QUPV3_WRAP1_S7_CLK_SRC 108 -#define GCC_QUPV3_WRAP_0_M_AHB_CLK 109 -#define GCC_QUPV3_WRAP_0_S_AHB_CLK 110 -#define GCC_QUPV3_WRAP_1_M_AHB_CLK 111 -#define GCC_QUPV3_WRAP_1_S_AHB_CLK 112 -#define GCC_SDCC1_AHB_CLK 113 -#define GCC_SDCC1_APPS_CLK 114 -#define GCC_SDCC1_APPS_CLK_SRC 115 -#define GCC_SDCC1_ICE_CORE_CLK 116 -#define GCC_SDCC1_ICE_CORE_CLK_SRC 117 -#define GCC_SDCC2_AHB_CLK 118 -#define GCC_SDCC2_APPS_CLK 119 -#define GCC_SDCC2_APPS_CLK_SRC 120 -#define GCC_SDCC4_AHB_CLK 121 -#define GCC_SDCC4_APPS_CLK 122 -#define GCC_SDCC4_APPS_CLK_SRC 123 -#define GCC_SYS_NOC_CPUSS_AHB_CLK 124 -#define GCC_TSIF_AHB_CLK 125 -#define GCC_TSIF_INACTIVITY_TIMERS_CLK 126 -#define GCC_TSIF_REF_CLK 127 -#define GCC_TSIF_REF_CLK_SRC 128 -#define GCC_UFS_MEM_CLKREF_CLK 129 -#define GCC_UFS_PHY_AHB_CLK 130 -#define GCC_UFS_PHY_AXI_CLK 131 -#define GCC_UFS_PHY_AXI_CLK_SRC 132 -#define GCC_UFS_PHY_AXI_HW_CTL_CLK 133 -#define GCC_UFS_PHY_ICE_CORE_CLK 134 -#define GCC_UFS_PHY_ICE_CORE_CLK_SRC 135 -#define GCC_UFS_PHY_ICE_CORE_HW_CTL_CLK 136 -#define GCC_UFS_PHY_PHY_AUX_CLK 137 -#define GCC_UFS_PHY_PHY_AUX_CLK_SRC 138 -#define GCC_UFS_PHY_PHY_AUX_HW_CTL_CLK 139 -#define GCC_UFS_PHY_RX_SYMBOL_0_CLK 140 -#define GCC_UFS_PHY_TX_SYMBOL_0_CLK 141 -#define GCC_UFS_PHY_UNIPRO_CORE_CLK 142 -#define GCC_UFS_PHY_UNIPRO_CORE_CLK_SRC 143 -#define GCC_UFS_PHY_UNIPRO_CORE_HW_CTL_CLK 144 -#define GCC_USB30_PRIM_MASTER_CLK 145 -#define GCC_USB30_PRIM_MASTER_CLK_SRC 146 -#define GCC_USB30_PRIM_MOCK_UTMI_CLK 147 -#define GCC_USB30_PRIM_MOCK_UTMI_CLK_SRC 148 -#define GCC_USB30_PRIM_SLEEP_CLK 149 -#define GCC_USB3_PRIM_CLKREF_CLK 150 -#define GCC_USB3_PRIM_PHY_AUX_CLK 151 -#define GCC_USB3_PRIM_PHY_AUX_CLK_SRC 152 -#define GCC_USB3_PRIM_PHY_COM_AUX_CLK 153 -#define GCC_USB3_PRIM_PHY_PIPE_CLK 154 -#define GCC_USB_PHY_CFG_AHB2PHY_CLK 155 -#define GCC_VDDA_VS_CLK 156 -#define GCC_VDDCX_VS_CLK 157 -#define GCC_VDDMX_VS_CLK 158 -#define GCC_VIDEO_AHB_CLK 159 -#define GCC_VIDEO_AXI_CLK 160 -#define GCC_VIDEO_XO_CLK 161 -#define GCC_VS_CTRL_AHB_CLK 162 -#define GCC_VS_CTRL_CLK 163 -#define GCC_VS_CTRL_CLK_SRC 164 -#define GCC_VSENSOR_CLK_SRC 165 -#define GCC_GPLL0_MAIN_DIV_CDIV 167 +#define GCC_NPU_AXI_CLK 46 +#define GCC_NPU_CFG_AHB_CLK 47 +#define GCC_NPU_GPLL0_CLK_SRC 48 +#define GCC_NPU_GPLL0_DIV_CLK_SRC 49 +#define GCC_PCIE_0_AUX_CLK 50 +#define GCC_PCIE_0_AUX_CLK_SRC 51 +#define GCC_PCIE_0_CFG_AHB_CLK 52 +#define GCC_PCIE_0_CLKREF_CLK 53 +#define GCC_PCIE_0_MSTR_AXI_CLK 54 +#define GCC_PCIE_0_PIPE_CLK 55 +#define GCC_PCIE_0_SLV_AXI_CLK 56 +#define GCC_PCIE_0_SLV_Q2A_AXI_CLK 57 +#define GCC_PCIE_PHY_AUX_CLK 58 +#define GCC_PCIE_PHY_REFGEN_CLK 59 +#define GCC_PCIE_PHY_REFGEN_CLK_SRC 60 +#define GCC_PDM2_CLK 61 +#define GCC_PDM2_CLK_SRC 62 +#define GCC_PDM_AHB_CLK 63 +#define GCC_PDM_XO4_CLK 64 +#define GCC_PRNG_AHB_CLK 65 +#define GCC_QUPV3_WRAP0_CORE_2X_CLK 66 +#define GCC_QUPV3_WRAP0_CORE_CLK 67 +#define GCC_QUPV3_WRAP0_S0_CLK 68 +#define GCC_QUPV3_WRAP0_S0_CLK_SRC 69 +#define GCC_QUPV3_WRAP0_S1_CLK 70 +#define GCC_QUPV3_WRAP0_S1_CLK_SRC 71 +#define GCC_QUPV3_WRAP0_S2_CLK 72 +#define GCC_QUPV3_WRAP0_S2_CLK_SRC 73 +#define GCC_QUPV3_WRAP0_S3_CLK 74 +#define GCC_QUPV3_WRAP0_S3_CLK_SRC 75 +#define GCC_QUPV3_WRAP0_S4_CLK 76 +#define GCC_QUPV3_WRAP0_S4_CLK_SRC 78 +#define GCC_QUPV3_WRAP0_S5_CLK 79 +#define GCC_QUPV3_WRAP0_S5_CLK_SRC 80 +#define GCC_QUPV3_WRAP0_S6_CLK 81 +#define GCC_QUPV3_WRAP0_S6_CLK_SRC 82 +#define GCC_QUPV3_WRAP0_S7_CLK 83 +#define GCC_QUPV3_WRAP0_S7_CLK_SRC 84 +#define GCC_QUPV3_WRAP1_CORE_2X_CLK 85 +#define GCC_QUPV3_WRAP1_CORE_CLK 86 +#define GCC_QUPV3_WRAP1_S0_CLK 87 +#define GCC_QUPV3_WRAP1_S0_CLK_SRC 88 +#define GCC_QUPV3_WRAP1_S1_CLK 89 +#define GCC_QUPV3_WRAP1_S1_CLK_SRC 90 +#define GCC_QUPV3_WRAP1_S2_CLK 91 +#define GCC_QUPV3_WRAP1_S2_CLK_SRC 92 +#define GCC_QUPV3_WRAP1_S3_CLK 93 +#define GCC_QUPV3_WRAP1_S3_CLK_SRC 94 +#define GCC_QUPV3_WRAP1_S4_CLK 95 +#define GCC_QUPV3_WRAP1_S4_CLK_SRC 96 +#define GCC_QUPV3_WRAP1_S5_CLK 97 +#define GCC_QUPV3_WRAP1_S5_CLK_SRC 98 +#define GCC_QUPV3_WRAP1_S6_CLK 99 +#define GCC_QUPV3_WRAP1_S6_CLK_SRC 100 +#define GCC_QUPV3_WRAP1_S7_CLK 101 +#define GCC_QUPV3_WRAP1_S7_CLK_SRC 102 +#define GCC_QUPV3_WRAP_0_M_AHB_CLK 103 +#define GCC_QUPV3_WRAP_0_S_AHB_CLK 104 +#define GCC_QUPV3_WRAP_1_M_AHB_CLK 105 +#define GCC_QUPV3_WRAP_1_S_AHB_CLK 106 +#define GCC_SDCC1_AHB_CLK 107 +#define GCC_SDCC1_APPS_CLK 108 +#define GCC_SDCC1_APPS_CLK_SRC 109 +#define GCC_SDCC1_ICE_CORE_CLK 110 +#define GCC_SDCC1_ICE_CORE_CLK_SRC 111 +#define GCC_SDCC2_AHB_CLK 112 +#define GCC_SDCC2_APPS_CLK 113 +#define GCC_SDCC2_APPS_CLK_SRC 114 +#define GCC_SDCC4_AHB_CLK 115 +#define GCC_SDCC4_APPS_CLK 116 +#define GCC_SDCC4_APPS_CLK_SRC 117 +#define GCC_SYS_NOC_CPUSS_AHB_CLK 118 +#define GCC_TSIF_AHB_CLK 119 +#define GCC_TSIF_INACTIVITY_TIMERS_CLK 120 +#define GCC_TSIF_REF_CLK 121 +#define GCC_TSIF_REF_CLK_SRC 123 +#define GCC_UFS_MEM_CLKREF_CLK 124 +#define GCC_UFS_PHY_AHB_CLK 125 +#define GCC_UFS_PHY_AXI_CLK 126 +#define GCC_UFS_PHY_AXI_CLK_SRC 127 +#define GCC_UFS_PHY_AXI_HW_CTL_CLK 128 +#define GCC_UFS_PHY_ICE_CORE_CLK 129 +#define GCC_UFS_PHY_ICE_CORE_CLK_SRC 130 +#define GCC_UFS_PHY_ICE_CORE_HW_CTL_CLK 131 +#define GCC_UFS_PHY_PHY_AUX_CLK 132 +#define GCC_UFS_PHY_PHY_AUX_CLK_SRC 133 +#define GCC_UFS_PHY_PHY_AUX_HW_CTL_CLK 134 +#define GCC_UFS_PHY_RX_SYMBOL_0_CLK 135 +#define GCC_UFS_PHY_TX_SYMBOL_0_CLK 136 +#define GCC_UFS_PHY_UNIPRO_CORE_CLK 137 +#define GCC_UFS_PHY_UNIPRO_CORE_CLK_SRC 138 +#define GCC_UFS_PHY_UNIPRO_CORE_HW_CTL_CLK 140 +#define GCC_USB30_PRIM_MASTER_CLK 141 +#define GCC_USB30_PRIM_MASTER_CLK_SRC 142 +#define GCC_USB30_PRIM_MOCK_UTMI_CLK 143 +#define GCC_USB30_PRIM_MOCK_UTMI_CLK_SRC 144 +#define GCC_USB30_PRIM_SLEEP_CLK 145 +#define GCC_USB3_PRIM_CLKREF_CLK 146 +#define GCC_USB3_PRIM_PHY_AUX_CLK 147 +#define GCC_USB3_PRIM_PHY_AUX_CLK_SRC 148 +#define GCC_USB3_PRIM_PHY_COM_AUX_CLK 149 +#define GCC_USB3_PRIM_PHY_PIPE_CLK 150 +#define GCC_USB_PHY_CFG_AHB2PHY_CLK 151 +#define GCC_VDDA_VS_CLK 152 +#define GCC_VDDCX_VS_CLK 153 +#define GCC_VDDMX_VS_CLK 154 +#define GCC_VIDEO_AHB_CLK 155 +#define GCC_VIDEO_AXI_CLK 156 +#define GCC_VIDEO_XO_CLK 157 +#define GCC_VS_CTRL_AHB_CLK 158 +#define GCC_VS_CTRL_CLK 159 +#define GCC_VS_CTRL_CLK_SRC 160 +#define GCC_VSENSOR_CLK_SRC 161 +#define GCC_GPLL0_MAIN_DIV_CDIV 162 /* GCC Resets */ #define GCC_PCIE_0_BCR 0 @@ -195,5 +188,6 @@ #define GCC_USB3_DP_PHY_SEC_BCR 6 #define GCC_USB3_PHY_PRIM_BCR 7 #define GCC_USB3_PHY_SEC_BCR 8 +#define GCC_QUSB2PHY_PRIM_BCR 9 #endif diff --git a/include/dt-bindings/clock/qcom,gcc-sdxprairie.h b/include/dt-bindings/clock/qcom,gcc-sdxprairie.h index 18fe72a018d6ebf5de3ee252b8e5bc0c35436c26..efd86abff522e9dbf3a2f01e298daee66e2a6c68 100644 --- a/include/dt-bindings/clock/qcom,gcc-sdxprairie.h +++ b/include/dt-bindings/clock/qcom,gcc-sdxprairie.h @@ -15,118 +15,110 @@ #define _DT_BINDINGS_CLK_MSM_GCC_SDXPRAIRIE_H /* GCC clock registers */ -#define GCC_BLSP1_AHB_CLK 0 -#define GCC_BLSP1_QUP1_I2C_APPS_CLK 1 -#define GCC_BLSP1_QUP1_I2C_APPS_CLK_SRC 2 -#define GCC_BLSP1_QUP1_SPI_APPS_CLK 3 -#define GCC_BLSP1_QUP1_SPI_APPS_CLK_SRC 4 -#define GCC_BLSP1_QUP2_I2C_APPS_CLK 5 -#define GCC_BLSP1_QUP2_I2C_APPS_CLK_SRC 6 -#define GCC_BLSP1_QUP2_SPI_APPS_CLK 7 -#define GCC_BLSP1_QUP2_SPI_APPS_CLK_SRC 8 -#define GCC_BLSP1_QUP3_I2C_APPS_CLK 9 -#define GCC_BLSP1_QUP3_I2C_APPS_CLK_SRC 10 -#define GCC_BLSP1_QUP3_SPI_APPS_CLK 11 -#define GCC_BLSP1_QUP3_SPI_APPS_CLK_SRC 12 -#define GCC_BLSP1_QUP4_I2C_APPS_CLK 13 -#define GCC_BLSP1_QUP4_I2C_APPS_CLK_SRC 14 -#define GCC_BLSP1_QUP4_SPI_APPS_CLK 15 -#define GCC_BLSP1_QUP4_SPI_APPS_CLK_SRC 16 -#define GCC_BLSP1_SLEEP_CLK 17 -#define GCC_BLSP1_UART1_APPS_CLK 18 -#define GCC_BLSP1_UART1_APPS_CLK_SRC 19 -#define GCC_BLSP1_UART2_APPS_CLK 20 -#define GCC_BLSP1_UART2_APPS_CLK_SRC 21 -#define GCC_BLSP1_UART3_APPS_CLK 22 -#define GCC_BLSP1_UART3_APPS_CLK_SRC 23 -#define GCC_BLSP1_UART4_APPS_CLK 24 -#define GCC_BLSP1_UART4_APPS_CLK_SRC 25 -#define GCC_BOOT_ROM_AHB_CLK 26 -#define GCC_CE1_AHB_CLK 27 -#define GCC_CE1_AXI_CLK 28 -#define GCC_CE1_CLK 29 -#define GCC_CPUSS_AHB_CLK 30 -#define GCC_CPUSS_AHB_CLK_SRC 31 -#define GCC_CPUSS_GNOC_CLK 32 -#define GCC_CPUSS_RBCPR_CLK 33 -#define GCC_CPUSS_RBCPR_CLK_SRC 34 -#define GCC_EMAC_CLK_SRC 35 -#define GCC_EMAC_PTP_CLK_SRC 36 -#define GCC_ETH_AXI_CLK 37 -#define GCC_ETH_PTP_CLK 38 -#define GCC_ETH_RGMII_CLK 39 -#define GCC_ETH_SLAVE_AHB_CLK 40 -#define GCC_GP1_CLK 41 -#define GCC_GP1_CLK_SRC 42 -#define GCC_GP2_CLK 43 -#define GCC_GP2_CLK_SRC 44 -#define GCC_GP3_CLK 45 -#define GCC_GP3_CLK_SRC 46 -#define GCC_PCIE_0_CLKREF_CLK 47 -#define GCC_PCIE_AUX_CLK 48 -#define GCC_PCIE_AUX_PHY_CLK_SRC 49 -#define GCC_PCIE_CFG_AHB_CLK 50 -#define GCC_PCIE_MSTR_AXI_CLK 51 -#define GCC_PCIE_PHY_REFGEN_CLK 52 -#define GCC_PCIE_PHY_REFGEN_CLK_SRC 53 -#define GCC_PCIE_PIPE_CLK 54 -#define GCC_PCIE_SLEEP_CLK 55 -#define GCC_PCIE_SLV_AXI_CLK 56 -#define GCC_PCIE_SLV_Q2A_AXI_CLK 57 -#define GCC_PDM2_CLK 58 -#define GCC_PDM2_CLK_SRC 59 -#define GCC_PDM_AHB_CLK 60 -#define GCC_PDM_XO4_CLK 61 -#define GCC_PRNG_AHB_CLK 62 -#define GCC_SDCC1_AHB_CLK 63 -#define GCC_SDCC1_APPS_CLK 64 -#define GCC_SDCC1_APPS_CLK_SRC 65 -#define GCC_SPMI_FETCHER_AHB_CLK 66 -#define GCC_SPMI_FETCHER_CLK 67 -#define GCC_SPMI_FETCHER_CLK_SRC 68 -#define GCC_SYS_NOC_CPUSS_AHB_CLK 69 -#define GCC_SYS_NOC_USB3_CLK 70 -#define GCC_USB30_MASTER_CLK 71 -#define GCC_USB30_MASTER_CLK_SRC 72 -#define GCC_USB30_MOCK_UTMI_CLK 73 -#define GCC_USB30_MOCK_UTMI_CLK_SRC 74 -#define GCC_USB30_SLEEP_CLK 75 -#define GCC_USB3_PHY_AUX_CLK 76 -#define GCC_USB3_PHY_AUX_CLK_SRC 77 -#define GCC_USB3_PHY_PIPE_CLK 78 -#define GCC_USB3_PRIM_CLKREF_CLK 79 -#define GCC_USB_PHY_CFG_AHB2PHY_CLK 80 -#define GPLL0 81 -#define GPLL0_OUT_EVEN 82 -#define GPLL4 83 -#define GPLL4_OUT_EVEN 84 +#define MEASURE_ONLY_BIMC_CLK 0 +#define MEASURE_ONLY_IPA_2X_CLK 1 +#define MEASURE_ONLY_SNOC_CLK 2 -/* CPU clocks */ -#define CLOCK_A7SS 0 +#define GPLL0 3 +#define GPLL0_OUT_EVEN 4 +#define GPLL4 5 +#define GPLL4_OUT_EVEN 6 +#define GPLL5 7 +#define GCC_AHB_PCIE_LINK_CLK 8 +#define GCC_BLSP1_AHB_CLK 9 +#define GCC_BLSP1_QUP1_I2C_APPS_CLK 10 +#define GCC_BLSP1_QUP1_I2C_APPS_CLK_SRC 11 +#define GCC_BLSP1_QUP1_SPI_APPS_CLK 12 +#define GCC_BLSP1_QUP1_SPI_APPS_CLK_SRC 13 +#define GCC_BLSP1_QUP2_I2C_APPS_CLK 14 +#define GCC_BLSP1_QUP2_I2C_APPS_CLK_SRC 15 +#define GCC_BLSP1_QUP2_SPI_APPS_CLK 16 +#define GCC_BLSP1_QUP2_SPI_APPS_CLK_SRC 17 +#define GCC_BLSP1_QUP3_I2C_APPS_CLK 18 +#define GCC_BLSP1_QUP3_I2C_APPS_CLK_SRC 19 +#define GCC_BLSP1_QUP3_SPI_APPS_CLK 20 +#define GCC_BLSP1_QUP3_SPI_APPS_CLK_SRC 21 +#define GCC_BLSP1_QUP4_I2C_APPS_CLK 22 +#define GCC_BLSP1_QUP4_I2C_APPS_CLK_SRC 23 +#define GCC_BLSP1_QUP4_SPI_APPS_CLK 24 +#define GCC_BLSP1_QUP4_SPI_APPS_CLK_SRC 25 +#define GCC_BLSP1_UART1_APPS_CLK 26 +#define GCC_BLSP1_UART1_APPS_CLK_SRC 27 +#define GCC_BLSP1_UART2_APPS_CLK 28 +#define GCC_BLSP1_UART2_APPS_CLK_SRC 29 +#define GCC_BLSP1_UART3_APPS_CLK 30 +#define GCC_BLSP1_UART3_APPS_CLK_SRC 31 +#define GCC_BLSP1_UART4_APPS_CLK 32 +#define GCC_BLSP1_UART4_APPS_CLK_SRC 33 +#define GCC_BOOT_ROM_AHB_CLK 34 +#define GCC_CE1_AHB_CLK 35 +#define GCC_CE1_AXI_CLK 36 +#define GCC_CE1_CLK 37 +#define GCC_CPUSS_AHB_CLK 38 +#define GCC_CPUSS_AHB_CLK_SRC 39 +#define GCC_CPUSS_GNOC_CLK 40 +#define GCC_CPUSS_RBCPR_CLK 41 +#define GCC_CPUSS_RBCPR_CLK_SRC 42 +#define GCC_EMAC_CLK_SRC 43 +#define GCC_EMAC_PTP_CLK_SRC 44 +#define GCC_ETH_AXI_CLK 45 +#define GCC_ETH_PTP_CLK 46 +#define GCC_ETH_RGMII_CLK 47 +#define GCC_ETH_SLAVE_AHB_CLK 48 +#define GCC_GP1_CLK 49 +#define GCC_GP1_CLK_SRC 50 +#define GCC_GP2_CLK 51 +#define GCC_GP2_CLK_SRC 52 +#define GCC_GP3_CLK 53 +#define GCC_GP3_CLK_SRC 54 +#define GCC_PCIE_0_CLKREF_CLK 55 +#define GCC_PCIE_AUX_CLK 56 +#define GCC_PCIE_AUX_PHY_CLK_SRC 57 +#define GCC_PCIE_CFG_AHB_CLK 58 +#define GCC_PCIE_MSTR_AXI_CLK 59 +#define GCC_PCIE_PIPE_CLK 60 +#define GCC_PCIE_RCHNG_PHY_CLK 61 +#define GCC_PCIE_RCHNG_PHY_CLK_SRC 62 +#define GCC_PCIE_SLEEP_CLK 63 +#define GCC_PCIE_SLV_AXI_CLK 64 +#define GCC_PCIE_SLV_Q2A_AXI_CLK 65 +#define GCC_PDM2_CLK 66 +#define GCC_PDM2_CLK_SRC 67 +#define GCC_PDM_AHB_CLK 68 +#define GCC_PDM_XO4_CLK 69 +#define GCC_SDCC1_AHB_CLK 70 +#define GCC_SDCC1_APPS_CLK 71 +#define GCC_SDCC1_APPS_CLK_SRC 72 +#define GCC_SYS_NOC_CPUSS_AHB_CLK 73 +#define GCC_USB30_MASTER_CLK 74 +#define GCC_USB30_MASTER_CLK_SRC 75 +#define GCC_USB30_MOCK_UTMI_CLK 76 +#define GCC_USB30_MOCK_UTMI_CLK_SRC 77 +#define GCC_USB30_MSTR_AXI_CLK 78 +#define GCC_USB30_SLEEP_CLK 79 +#define GCC_USB30_SLV_AHB_CLK 80 +#define GCC_USB3_PHY_AUX_CLK 81 +#define GCC_USB3_PHY_AUX_CLK_SRC 82 +#define GCC_USB3_PHY_PIPE_CLK 83 +#define GCC_USB3_PRIM_CLKREF_CLK 84 +#define GCC_USB_PHY_CFG_AHB2PHY_CLK 85 +#define GCC_XO_DIV4_CLK 86 +#define GCC_XO_PCIE_LINK_CLK 87 -/* GCC reset clocks */ -#define GCC_BLSP1_QUP1_BCR 0 -#define GCC_BLSP1_QUP2_BCR 1 -#define GCC_BLSP1_QUP3_BCR 2 -#define GCC_BLSP1_QUP4_BCR 3 -#define GCC_BLSP1_UART2_BCR 4 -#define GCC_BLSP1_UART3_BCR 5 -#define GCC_BLSP1_UART4_BCR 6 -#define GCC_CE1_BCR 7 -#define GCC_PCIE_BCR 8 -#define GCC_PCIE_PHY_BCR 9 -#define GCC_PDM_BCR 10 -#define GCC_PRNG_BCR 11 -#define GCC_SDCC1_BCR 12 -#define GCC_SPMI_FETCHER_BCR 13 -#define GCC_USB30_BCR 14 -#define GCC_USB3_PHY_BCR 15 -#define GCC_USB3PHY_PHY_BCR 16 -#define GCC_QUSB2PHY_BCR 17 -#define GCC_USB_PHY_CFG_AHB2PHY_BCR 18 -#define GCC_EMAC_BCR 19 - -/* Dummy clocks for rate measurement */ -#define MEASURE_ONLY_IPA_2X_CLK 0 +#define GCC_EMAC_BCR 0 +#define GCC_PCIE_BCR 1 +#define GCC_PCIE_LINK_DOWN_BCR 2 +#define GCC_PCIE_NOCSR_COM_PHY_BCR 3 +#define GCC_PCIE_PHY_BCR 4 +#define GCC_PCIE_PHY_CFG_AHB_BCR 5 +#define GCC_PCIE_PHY_COM_BCR 6 +#define GCC_PCIE_PHY_NOCSR_COM_PHY_BCR 7 +#define GCC_PDM_BCR 8 +#define GCC_QUSB2PHY_BCR 9 +#define GCC_TCSR_PCIE_BCR 10 +#define GCC_USB30_BCR 11 +#define GCC_USB3_PHY_BCR 12 +#define GCC_USB3PHY_PHY_BCR 13 +#define GCC_USB_PHY_CFG_AHB2PHY_BCR 14 #endif diff --git a/include/dt-bindings/clock/qcom,gcc-sm6150.h b/include/dt-bindings/clock/qcom,gcc-sm6150.h index 94f9965d7e45679b1e8058b48aabf945a7e30d31..a6c5929e1ceb73c2da61a6ea475336f04f37cae5 100644 --- a/include/dt-bindings/clock/qcom,gcc-sm6150.h +++ b/include/dt-bindings/clock/qcom,gcc-sm6150.h @@ -204,7 +204,7 @@ #define GCC_PCIE_PHY_COM_BCR 9 #define GCC_UFS_PHY_BCR 10 #define GCC_USB20_SEC_BCR 11 -#define GCC_USB3_DP_PHY_PRIM_SP0_BCR 12 +#define GCC_USB3_PHY_PRIM_SP0_BCR 12 #define GCC_USB3PHY_PHY_PRIM_SP0_BCR 13 #endif diff --git a/include/dt-bindings/clock/qcom,scc-sm8150.h b/include/dt-bindings/clock/qcom,scc-sm8150.h new file mode 100644 index 0000000000000000000000000000000000000000..97a8f692b4c5a94ae7af44dbe642def5bc011e89 --- /dev/null +++ b/include/dt-bindings/clock/qcom,scc-sm8150.h @@ -0,0 +1,37 @@ +/* + * Copyright (c) 2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _DT_BINDINGS_CLK_QCOM_SCC_SM8150_H +#define _DT_BINDINGS_CLK_QCOM_SCC_SM8150_H + +#define SCC_MAIN_RCG_CLK_SRC 0 +#define SCC_PLL 1 +#define SCC_PLL_OUT_EVEN 2 +#define SCC_QUPV3_2XCORE_CLK 3 +#define SCC_QUPV3_CORE_CLK 4 +#define SCC_QUPV3_M_HCLK_CLK 5 +#define SCC_QUPV3_S_HCLK_CLK 6 +#define SCC_QUPV3_SE0_CLK 7 +#define SCC_QUPV3_SE0_CLK_SRC 8 +#define SCC_QUPV3_SE1_CLK 9 +#define SCC_QUPV3_SE1_CLK_SRC 10 +#define SCC_QUPV3_SE2_CLK 11 +#define SCC_QUPV3_SE2_CLK_SRC 12 +#define SCC_QUPV3_SE3_CLK 13 +#define SCC_QUPV3_SE3_CLK_SRC 14 +#define SCC_QUPV3_SE4_CLK 15 +#define SCC_QUPV3_SE4_CLK_SRC 16 +#define SCC_QUPV3_SE5_CLK 17 +#define SCC_QUPV3_SE5_CLK_SRC 18 + +#endif diff --git a/include/dt-bindings/msm/msm-bus-ids.h b/include/dt-bindings/msm/msm-bus-ids.h index 3fb0f6aedd0cd35d72a8ef35a3a7a0c8d9eb56c3..f620407aa80c23cfe66016ac357ca45f18954913 100644 --- a/include/dt-bindings/msm/msm-bus-ids.h +++ b/include/dt-bindings/msm/msm-bus-ids.h @@ -89,12 +89,13 @@ #define MSM_BUS_BCM_CE0 7034 #define MSM_BUS_BCM_IP0 7035 #define MSM_BUS_BCM_CN0 7036 -#define MSM_BUS_BCM_ACV 7037 -#define MSM_BUS_BCM_ALC 7038 -#define MSM_BUS_BCM_QUP0 7039 -#define MSM_BUS_BCM_CO0 7040 -#define MSM_BUS_BCM_CO1 7041 -#define MSM_BUS_BCM_CO2 7042 +#define MSM_BUS_BCM_CN1 7037 +#define MSM_BUS_BCM_ACV 7038 +#define MSM_BUS_BCM_ALC 7039 +#define MSM_BUS_BCM_QUP0 7040 +#define MSM_BUS_BCM_CO0 7041 +#define MSM_BUS_BCM_CO1 7042 +#define MSM_BUS_BCM_CO2 7043 #define MSM_BUS_RSC_APPS 8000 #define MSM_BUS_RSC_DISP 8001 @@ -278,6 +279,9 @@ #define MSM_BUS_MASTER_PCIE_3 167 #define MSM_BUS_MASTER_LPASS_ANOC 168 #define MSM_BUS_MASTER_USB2 169 +#define MSM_BUS_MASTER_SENSORS_AHB 170 +#define MSM_BUS_MASTER_CAMNOC_NRT 171 +#define MSM_BUS_MASTER_CAMNOC_RT 172 #define MSM_BUS_MASTER_LLCC_DISPLAY 20000 #define MSM_BUS_MASTER_MNOC_HF_MEM_NOC_DISPLAY 20001 @@ -365,6 +369,7 @@ #define MSM_BUS_SLAVE_SNOC_GEM_NOC_SF 10073 #define MSM_BUS_PNOC_SLV_10 10074 #define MSM_BUS_PNOC_SLV_11 10075 +#define MSM_BUS_SLAVE_CDSP_GEM_NOC 10076 #define MSM_BUS_INT_TEST_ID 20000 #define MSM_BUS_INT_TEST_LAST 20050 @@ -651,6 +656,11 @@ #define MSM_BUS_SLAVE_DC_NOC_GEMNOC 803 #define MSM_BUS_SLAVE_MEM_NOC_PCIE_SNOC 804 #define MSM_BUS_SLAVE_USB2 805 +#define MSM_BUS_SLAVE_EMMC_CFG 806 +#define MSM_BUS_SLAVE_AHB2PHY_NORTH 807 +#define MSM_BUS_SLAVE_CAMERA_NRT_THROTTLE_CFG 808 +#define MSM_BUS_SLAVE_CAMERA_RT_THROTTLE_CFG 809 +#define MSM_BUS_SLAVE_VENUS_CVP_THROTTLE_CFG 810 #define MSM_BUS_SLAVE_EBI_CH0_DISPLAY 20512 #define MSM_BUS_SLAVE_LLCC_DISPLAY 20513 diff --git a/include/linux/atmdev.h b/include/linux/atmdev.h index 0c27515d2cf6db3683da2341a700283f82a99645..8124815eb1218b5653572fc4a04f5d4d734e3469 100644 --- a/include/linux/atmdev.h +++ b/include/linux/atmdev.h @@ -214,6 +214,7 @@ struct atmphy_ops { struct atm_skb_data { struct atm_vcc *vcc; /* ATM VCC */ unsigned long atm_options; /* ATM layer options */ + unsigned int acct_truesize; /* truesize accounted to vcc */ }; #define VCC_HTABLE_SIZE 32 @@ -241,6 +242,20 @@ void vcc_insert_socket(struct sock *sk); void atm_dev_release_vccs(struct atm_dev *dev); +static inline void atm_account_tx(struct atm_vcc *vcc, struct sk_buff *skb) +{ + /* + * Because ATM skbs may not belong to a sock (and we don't + * necessarily want to), skb->truesize may be adjusted, + * escaping the hack in pskb_expand_head() which avoids + * doing so for some cases. So stash the value of truesize + * at the time we accounted it, and atm_pop_raw() can use + * that value later, in case it changes. + */ + refcount_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc); + ATM_SKB(skb)->acct_truesize = skb->truesize; + ATM_SKB(skb)->atm_options = vcc->atm_options; +} static inline void atm_force_charge(struct atm_vcc *vcc,int truesize) { diff --git a/include/linux/backing-dev-defs.h b/include/linux/backing-dev-defs.h index eac387a3bfef74e59243c6fdacf13e9ba619287d..3c1beffc861a64693f0711c9f4830fc29c5f6331 100644 --- a/include/linux/backing-dev-defs.h +++ b/include/linux/backing-dev-defs.h @@ -22,7 +22,6 @@ struct dentry; */ enum wb_state { WB_registered, /* bdi_register() was done */ - WB_shutting_down, /* wb_shutdown() in progress */ WB_writeback_running, /* Writeback is in progress */ WB_has_dirty_io, /* Dirty inodes on ->b_{dirty|io|more_io} */ }; @@ -165,6 +164,7 @@ struct backing_dev_info { #ifdef CONFIG_CGROUP_WRITEBACK struct radix_tree_root cgwb_tree; /* radix tree of active cgroup wbs */ struct rb_root cgwb_congested_tree; /* their congested states */ + struct mutex cgwb_release_mutex; /* protect shutdown of wb structs */ #else struct bdi_writeback_congested *wb_congested; #endif diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h index ba4b484c0008b702c10a60ba085336cee25ea3ef..30f317099366a47e1e092cf2fbe9fb39325689df 100644 --- a/include/linux/blk_types.h +++ b/include/linux/blk_types.h @@ -104,6 +104,9 @@ struct bio { /* Encryption key to use (NULL if none) */ const struct blk_encryption_key *bi_crypt_key; #endif +#ifdef CONFIG_DM_DEFAULT_KEY + int bi_crypt_skip; +#endif unsigned short bi_vcnt; /* how many bio_vec's */ diff --git a/include/linux/bluetooth-power.h b/include/linux/bluetooth-power.h index ef8b519bb21866bdacb0dd42fb6886863814d64f..901dae0a5d08e161687d482c0ee58742a9913adc 100644 --- a/include/linux/bluetooth-power.h +++ b/include/linux/bluetooth-power.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013-2017, The Linux Foundation. All rights reserved. + * Copyright (c) 2013-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -83,7 +83,9 @@ struct bluetooth_power_platform_data { }; int bt_register_slimdev(struct device *dev); +int get_chipset_version(void); #define BT_CMD_SLIM_TEST 0xbfac #define BT_CMD_PWR_CTRL 0xbfad +#define BT_CMD_CHIPSET_VERS 0xbfae #endif /* __LINUX_BLUETOOTH_POWER_H */ diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h index f43113b8890b760ac73b0a370781e67998e055d7..c11032b06d68f0781aa43f52d02b2c0537d36ede 100644 --- a/include/linux/compiler-gcc.h +++ b/include/linux/compiler-gcc.h @@ -65,6 +65,18 @@ #define __must_be_array(a) BUILD_BUG_ON_ZERO(__same_type((a), &(a)[0])) #endif +/* + * Feature detection for gnu_inline (gnu89 extern inline semantics). Either + * __GNUC_STDC_INLINE__ is defined (not using gnu89 extern inline semantics, + * and we opt in to the gnu89 semantics), or __GNUC_STDC_INLINE__ is not + * defined so the gnu89 semantics are the default. + */ +#ifdef __GNUC_STDC_INLINE__ +# define __gnu_inline __attribute__((gnu_inline)) +#else +# define __gnu_inline +#endif + /* * Force always-inline if the user requests it so via the .config, * or if gcc is too old. @@ -72,19 +84,22 @@ * -Wunused-function. This turns out to avoid the need for complex #ifdef * directives. Suppress the warning in clang as well by using "unused" * function attribute, which is redundant but not harmful for gcc. + * Prefer gnu_inline, so that extern inline functions do not emit an + * externally visible function. This makes extern inline behave as per gnu89 + * semantics rather than c99. This prevents multiple symbol definition errors + * of extern inline functions at link time. + * A lot of inline functions can cause havoc with function tracing. */ #if !defined(CONFIG_ARCH_SUPPORTS_OPTIMIZED_INLINING) || \ !defined(CONFIG_OPTIMIZE_INLINING) || (__GNUC__ < 4) -#define inline inline __attribute__((always_inline,unused)) notrace -#define __inline__ __inline__ __attribute__((always_inline,unused)) notrace -#define __inline __inline __attribute__((always_inline,unused)) notrace +#define inline \ + inline __attribute__((always_inline, unused)) notrace __gnu_inline #else -/* A lot of inline functions can cause havoc with function tracing */ -#define inline inline __attribute__((unused)) notrace -#define __inline__ __inline__ __attribute__((unused)) notrace -#define __inline __inline __attribute__((unused)) notrace +#define inline inline __attribute__((unused)) notrace __gnu_inline #endif +#define __inline__ inline +#define __inline inline #define __always_inline inline __attribute__((always_inline)) #define noinline __attribute__((noinline)) diff --git a/include/linux/delayacct.h b/include/linux/delayacct.h index 5e335b6203f49d429c3c81a158d1bed0830c66b7..31c865d1842e88671d7f29534587311c479fb3e7 100644 --- a/include/linux/delayacct.h +++ b/include/linux/delayacct.h @@ -29,7 +29,7 @@ #ifdef CONFIG_TASK_DELAY_ACCT struct task_delay_info { - spinlock_t lock; + raw_spinlock_t lock; unsigned int flags; /* Private per-task flags */ /* For each stat XXX, add following, aligned appropriately @@ -124,7 +124,7 @@ static inline void delayacct_blkio_start(void) static inline void delayacct_blkio_end(struct task_struct *p) { - if (current->delays) + if (p->delays) __delayacct_blkio_end(p); delayacct_clear_flag(DELAYACCT_PF_BLKIO); } diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h index 5d768dd788110ba485c33e88be091d4e9965c07d..93a0e03aa9cff86329e333c907141a347f81a999 100644 --- a/include/linux/dma-buf.h +++ b/include/linux/dma-buf.h @@ -411,6 +411,7 @@ struct dma_buf { void *vmap_ptr; const char *exp_name; char *name; + struct timespec ctime; struct module *owner; struct list_head list_node; void *priv; diff --git a/include/linux/dma-iommu.h b/include/linux/dma-iommu.h index 92f20832fd28770c16ab9d34c5560efce2a888c6..e8ca5e6542773fea4c54725f360743e3f1407b7c 100644 --- a/include/linux/dma-iommu.h +++ b/include/linux/dma-iommu.h @@ -17,6 +17,7 @@ #define __DMA_IOMMU_H #ifdef __KERNEL__ +#include #include #ifdef CONFIG_IOMMU_DMA diff --git a/include/linux/fscrypt_notsupp.h b/include/linux/fscrypt_notsupp.h index 07f6b63f42401a4015c85bf247bd86506034c5ec..68862592d8754ac5903ea23a2e9812f2365a02d0 100644 --- a/include/linux/fscrypt_notsupp.h +++ b/include/linux/fscrypt_notsupp.h @@ -193,8 +193,13 @@ static inline int fscrypt_using_hardware_encryption(const struct inode *inode) static inline void fscrypt_set_ice_dun(const struct inode *inode, struct bio *bio, u64 dun){} +static inline void fscrypt_set_ice_skip(struct bio *bio, int bi_crypt_skip) +{ + return; +} + static inline bool fscrypt_mergeable_bio(struct bio *bio, - sector_t iv_block, bool bio_encrypted) + sector_t iv_block, bool bio_encrypted, int bi_crypt_skip) { return true; } diff --git a/include/linux/fscrypt_supp.h b/include/linux/fscrypt_supp.h index 483de436fbb6ed4432f6bef3b7f83cf84cbd0bd5..083f1fcf483ef63d4b4863eaae58748f15ba5812 100644 --- a/include/linux/fscrypt_supp.h +++ b/include/linux/fscrypt_supp.h @@ -201,8 +201,9 @@ extern int fscrypt_zeroout_range(const struct inode *, pgoff_t, sector_t, extern int fscrypt_using_hardware_encryption(const struct inode *inode); extern void fscrypt_set_ice_dun(const struct inode *inode, struct bio *bio, u64 dun); -extern bool fscrypt_mergeable_bio(struct bio *bio, u64 dun, bool bio_encrypted); - +extern void fscrypt_set_ice_skip(struct bio *bio, int bi_crypt_skip); +extern bool fscrypt_mergeable_bio(struct bio *bio, u64 dun, bool bio_encrypted, + int bi_crypt_skip); /* hooks.c */ extern int fscrypt_file_open(struct inode *inode, struct file *filp); diff --git a/include/linux/ipa.h b/include/linux/ipa.h index 8239fa822d62d903725f047a5110ef2792443b57..05c14618e171677314a6e7eb97bacd7113e1044f 100644 --- a/include/linux/ipa.h +++ b/include/linux/ipa.h @@ -160,6 +160,11 @@ struct ipa_ep_cfg_conn_track { * correctly the length field within the header * (valid only in case Hdr_Ofst_Pkt_Size_Valid=1) * Valid for Output Pipes (IPA Producer) + * Starting IPA4.5, this field in H/W requires more bits + * to support larger range, but no spare bits to use. + * So the MSB part is done thourgh the EXT register. + * When accessing this register, need to access the EXT + * register as well. * @hdr_ofst_pkt_size_valid: 0: Hdr_Ofst_Pkt_Size value is invalid, i.e., no * length field within the inserted header * 1: Hdr_Ofst_Pkt_Size value is valid, i.e., a @@ -170,6 +175,11 @@ struct ipa_ep_cfg_conn_track { * header with the packet length . Assumption is that * header length field size is constant and is 2Bytes * Valid for Output Pipes (IPA Producer) + * Starting IPA4.5, this field in H/W requires more bits + * to support larger range, but no spare bits to use. + * So the MSB part is done thourgh the EXT register. + * When accessing this register, need to access the EXT + * register as well. * @hdr_a5_mux: Determines whether A5 Mux header should be added to the packet. * This bit is valid only when Hdr_En=01(Header Insertion) * SW should set this bit for IPA-to-A5 pipes. @@ -182,6 +192,8 @@ struct ipa_ep_cfg_conn_track { * @hdr_metadata_reg_valid: bool switch, metadata from * register INIT_HDR_METADATA_n is valid. * (relevant only for IPA Consumer pipes) + * Starting IPA4.5, this parameter is irrelevant and H/W + * assumes it is always valid. */ struct ipa_ep_cfg_hdr { u32 hdr_len; @@ -213,6 +225,8 @@ struct ipa_ep_cfg_hdr { * @hdr_total_len_or_pad_valid: 0-Ignore TOTAL_LEN_OR_PAD field, 1-Process * TOTAL_LEN_OR_PAD field * @hdr_little_endian: 0-Big Endian, 1-Little Endian + * @hdr: The header structure. Used starting IPA4.5 where part of the info + * at the header structure is implemented via the EXT register at the H/W */ struct ipa_ep_cfg_hdr_ext { u32 hdr_pad_to_alignment; @@ -221,6 +235,7 @@ struct ipa_ep_cfg_hdr_ext { enum hdr_total_len_or_pad_type hdr_total_len_or_pad; bool hdr_total_len_or_pad_valid; bool hdr_little_endian; + struct ipa_ep_cfg_hdr *hdr; }; /** @@ -791,15 +806,12 @@ struct ipa_rx_data { */ enum ipa_irq_type { IPA_BAD_SNOC_ACCESS_IRQ, - IPA_EOT_COAL_IRQ, IPA_UC_IRQ_0, IPA_UC_IRQ_1, IPA_UC_IRQ_2, IPA_UC_IRQ_3, IPA_UC_IN_Q_NOT_EMPTY_IRQ, IPA_UC_RX_CMD_Q_NOT_FULL_IRQ, - IPA_UC_TX_CMD_Q_NOT_FULL_IRQ, - IPA_UC_TO_PROC_ACK_Q_NOT_FULL_IRQ, IPA_PROC_TO_UC_ACK_Q_NOT_EMPTY_IRQ, IPA_RX_ERR_IRQ, IPA_DEAGGR_ERR_IRQ, @@ -808,8 +820,17 @@ enum ipa_irq_type { IPA_PROC_ERR_IRQ, IPA_TX_SUSPEND_IRQ, IPA_TX_HOLB_DROP_IRQ, - IPA_BAM_IDLE_IRQ, - IPA_GSI_IDLE_IRQ = IPA_BAM_IDLE_IRQ, + IPA_BAM_GSI_IDLE_IRQ, + IPA_PIPE_YELLOW_MARKER_BELOW_IRQ, + IPA_PIPE_RED_MARKER_BELOW_IRQ, + IPA_PIPE_YELLOW_MARKER_ABOVE_IRQ, + IPA_PIPE_RED_MARKER_ABOVE_IRQ, + IPA_UCP_IRQ, + IPA_DCMP_IRQ, + IPA_GSI_EE_IRQ, + IPA_GSI_IPA_IF_TLV_RCVD_IRQ, + IPA_GSI_UC_IRQ, + IPA_TLV_LEN_MIN_DSM_IRQ, IPA_IRQ_MAX }; diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h index f3765155fa4d9a61266848fc482d467d8693c897..1d793d86d55fa1ba444fd64cc417cdd9071ecaf8 100644 --- a/include/linux/mlx5/mlx5_ifc.h +++ b/include/linux/mlx5/mlx5_ifc.h @@ -857,7 +857,7 @@ struct mlx5_ifc_cmd_hca_cap_bits { u8 reserved_at_1a4[0x1]; u8 ets[0x1]; u8 nic_flow_table[0x1]; - u8 eswitch_flow_table[0x1]; + u8 eswitch_manager[0x1]; u8 early_vf_enable[0x1]; u8 mcam_reg[0x1]; u8 pcam_reg[0x1]; diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h index 0cf555098c8234447b5a9cbaa147472ba924a517..f4cc9c95664902dfd34e8766f5a334f594c6a99d 100644 --- a/include/linux/mmc/host.h +++ b/include/linux/mmc/host.h @@ -705,6 +705,8 @@ struct mmc_host { void *cmdq_private; struct mmc_request *err_mrq; + bool inlinecrypt_support; /* Inline encryption support */ + atomic_t rpmb_req_pending; struct mutex rpmb_req_mutex; unsigned long private[0] ____cacheline_aligned; diff --git a/include/linux/mmc/sdio_ids.h b/include/linux/mmc/sdio_ids.h index cdd66a5fbd5e00905ff5e6c2623e6efaf12bf225..0a7abe8a407ff223bfdbd18890a0b20bac9f4aee 100644 --- a/include/linux/mmc/sdio_ids.h +++ b/include/linux/mmc/sdio_ids.h @@ -35,6 +35,7 @@ #define SDIO_DEVICE_ID_BROADCOM_4335_4339 0x4335 #define SDIO_DEVICE_ID_BROADCOM_4339 0x4339 #define SDIO_DEVICE_ID_BROADCOM_43362 0xa962 +#define SDIO_DEVICE_ID_BROADCOM_43364 0xa9a4 #define SDIO_DEVICE_ID_BROADCOM_43430 0xa9a6 #define SDIO_DEVICE_ID_BROADCOM_4345 0x4345 #define SDIO_DEVICE_ID_BROADCOM_43455 0xa9bf diff --git a/include/linux/msm_ep_pcie.h b/include/linux/msm_ep_pcie.h new file mode 100644 index 0000000000000000000000000000000000000000..a1d2a17820e0f8e9deb3ee66d964366f7b90cf2d --- /dev/null +++ b/include/linux/msm_ep_pcie.h @@ -0,0 +1,290 @@ +/* Copyright (c) 2015, 2017, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef __MSM_EP_PCIE_H +#define __MSM_EP_PCIE_H + +#include + +enum ep_pcie_link_status { + EP_PCIE_LINK_DISABLED, + EP_PCIE_LINK_UP, + EP_PCIE_LINK_ENABLED, +}; + +enum ep_pcie_event { + EP_PCIE_EVENT_INVALID = 0, + EP_PCIE_EVENT_PM_D0 = 0x1, + EP_PCIE_EVENT_PM_D3_HOT = 0x2, + EP_PCIE_EVENT_PM_D3_COLD = 0x4, + EP_PCIE_EVENT_PM_RST_DEAST = 0x8, + EP_PCIE_EVENT_LINKDOWN = 0x10, + EP_PCIE_EVENT_LINKUP = 0x20, + EP_PCIE_EVENT_MHI_A7 = 0x40, + EP_PCIE_EVENT_MMIO_WRITE = 0x80, +}; + +enum ep_pcie_irq_event { + EP_PCIE_INT_EVT_LINK_DOWN = 1, + EP_PCIE_INT_EVT_BME, + EP_PCIE_INT_EVT_PM_TURNOFF, + EP_PCIE_INT_EVT_DEBUG, + EP_PCIE_INT_EVT_LTR, + EP_PCIE_INT_EVT_MHI_Q6, + EP_PCIE_INT_EVT_MHI_A7, + EP_PCIE_INT_EVT_DSTATE_CHANGE, + EP_PCIE_INT_EVT_L1SUB_TIMEOUT, + EP_PCIE_INT_EVT_MMIO_WRITE, + EP_PCIE_INT_EVT_CFG_WRITE, + EP_PCIE_INT_EVT_BRIDGE_FLUSH_N, + EP_PCIE_INT_EVT_LINK_UP, + EP_PCIE_INT_EVT_MAX = 13, +}; + +enum ep_pcie_trigger { + EP_PCIE_TRIGGER_CALLBACK, + EP_PCIE_TRIGGER_COMPLETION, +}; + +enum ep_pcie_options { + EP_PCIE_OPT_NULL = 0, + EP_PCIE_OPT_AST_WAKE = 0x1, + EP_PCIE_OPT_POWER_ON = 0x2, + EP_PCIE_OPT_ENUM = 0x4, + EP_PCIE_OPT_ENUM_ASYNC = 0x8, + EP_PCIE_OPT_ALL = 0xFFFFFFFF, +}; + +struct ep_pcie_notify { + enum ep_pcie_event event; + void *user; + void *data; + u32 options; +}; + +struct ep_pcie_register_event { + u32 events; + void *user; + enum ep_pcie_trigger mode; + void (*callback)(struct ep_pcie_notify *notify); + struct ep_pcie_notify notify; + struct completion *completion; + u32 options; +}; + +struct ep_pcie_iatu { + u32 start; + u32 end; + u32 tgt_lower; + u32 tgt_upper; +}; + +struct ep_pcie_msi_config { + u32 lower; + u32 upper; + u32 data; + u32 msg_num; +}; + +struct ep_pcie_db_config { + u8 base; + u8 end; + u32 tgt_addr; +}; + +struct ep_pcie_hw { + struct list_head node; + u32 device_id; + void **private_data; + int (*register_event)(struct ep_pcie_register_event *reg); + int (*deregister_event)(void); + enum ep_pcie_link_status (*get_linkstatus)(void); + int (*config_outbound_iatu)(struct ep_pcie_iatu entries[], + u32 num_entries); + int (*get_msi_config)(struct ep_pcie_msi_config *cfg); + int (*trigger_msi)(u32 idx); + int (*wakeup_host)(void); + int (*enable_endpoint)(enum ep_pcie_options opt); + int (*disable_endpoint)(void); + int (*config_db_routing)(struct ep_pcie_db_config chdb_cfg, + struct ep_pcie_db_config erdb_cfg); + int (*mask_irq_event)(enum ep_pcie_irq_event event, + bool enable); +}; + +/* + * ep_pcie_register_drv - register HW driver. + * @phandle: PCIe endpoint HW driver handle + * + * This function registers PCIe HW driver to PCIe endpoint service + * layer. + * + * Return: 0 on success, negative value on error + */ +int ep_pcie_register_drv(struct ep_pcie_hw *phandle); + +/* + * ep_pcie_deregister_drv - deregister HW driver. + * @phandle: PCIe endpoint HW driver handle + * + * This function deregisters PCIe HW driver to PCIe endpoint service + * layer. + * + * Return: 0 on success, negative value on error + */ +int ep_pcie_deregister_drv(struct ep_pcie_hw *phandle); + +/* + * ep_pcie_get_phandle - get PCIe endpoint HW driver handle. + * @id: PCIe endpoint device ID + * + * This function deregisters PCIe HW driver from PCIe endpoint service + * layer. + * + * Return: PCIe endpoint HW driver handle + */ +struct ep_pcie_hw *ep_pcie_get_phandle(u32 id); + +/* + * ep_pcie_register_event - register event with PCIe driver. + * @phandle: PCIe endpoint HW driver handle + * @reg: event structure + * + * This function gives PCIe client driver an option to register + * event with PCIe driver. + * + * Return: 0 on success, negative value on error + */ +int ep_pcie_register_event(struct ep_pcie_hw *phandle, + struct ep_pcie_register_event *reg); + +/* + * ep_pcie_deregister_event - deregister event with PCIe driver. + * @phandle: PCIe endpoint HW driver handle + * + * This function gives PCIe client driver an option to deregister + * existing event with PCIe driver. + * + * Return: 0 on success, negative value on error + */ +int ep_pcie_deregister_event(struct ep_pcie_hw *phandle); + +/* + * ep_pcie_get_linkstatus - indicate the status of PCIe link. + * @phandle: PCIe endpoint HW driver handle + * + * This function tells PCIe client about the status of PCIe link. + * + * Return: status of PCIe link + */ +enum ep_pcie_link_status ep_pcie_get_linkstatus(struct ep_pcie_hw *phandle); + +/* + * ep_pcie_config_outbound_iatu - configure outbound iATU. + * @entries: iatu entries + * @num_entries: number of iatu entries + * + * This function configures the outbound iATU for PCIe + * client's access to the regions in the host memory which + * are specified by the SW on host side. + * + * Return: 0 on success, negative value on error + */ +int ep_pcie_config_outbound_iatu(struct ep_pcie_hw *phandle, + struct ep_pcie_iatu entries[], + u32 num_entries); + +/* + * ep_pcie_get_msi_config - get MSI config info. + * @phandle: PCIe endpoint HW driver handle + * @cfg: pointer to MSI config + * + * This function returns MSI config info. + * + * Return: 0 on success, negative value on error + */ +int ep_pcie_get_msi_config(struct ep_pcie_hw *phandle, + struct ep_pcie_msi_config *cfg); + +/* + * ep_pcie_trigger_msi - trigger an MSI. + * @phandle: PCIe endpoint HW driver handle + * @idx: MSI index number + * + * This function allows PCIe client to trigger an MSI + * on host side. + * + * Return: 0 on success, negative value on error + */ +int ep_pcie_trigger_msi(struct ep_pcie_hw *phandle, u32 idx); + +/* + * ep_pcie_wakeup_host - wake up the host. + * @phandle: PCIe endpoint HW driver handle + * + * This function asserts WAKE GPIO to wake up the host. + * + * Return: 0 on success, negative value on error + */ +int ep_pcie_wakeup_host(struct ep_pcie_hw *phandle); + +/* + * ep_pcie_enable_endpoint - enable PCIe endpoint. + * @phandle: PCIe endpoint HW driver handle + * @opt: endpoint enable options + * + * This function is to enable the PCIe endpoint device. + * + * Return: 0 on success, negative value on error + */ +int ep_pcie_enable_endpoint(struct ep_pcie_hw *phandle, + enum ep_pcie_options opt); + +/* + * ep_pcie_disable_endpoint - disable PCIe endpoint. + * @phandle: PCIe endpoint HW driver handle + * + * This function is to disable the PCIe endpoint device. + * + * Return: 0 on success, negative value on error + */ +int ep_pcie_disable_endpoint(struct ep_pcie_hw *phandle); + +/* + * ep_pcie_config_db_routing - Configure routing of doorbells to another block. + * @phandle: PCIe endpoint HW driver handle + * @chdb_cfg: channel doorbell config + * @erdb_cfg: event ring doorbell config + * + * This function allows PCIe core to route the doorbells intended + * for another entity via a target address. + * + * Return: 0 on success, negative value on error + */ +int ep_pcie_config_db_routing(struct ep_pcie_hw *phandle, + struct ep_pcie_db_config chdb_cfg, + struct ep_pcie_db_config erdb_cfg); + +/* + * ep_pcie_mask_irq_event - enable and disable IRQ event. + * @phandle: PCIe endpoint HW driver handle + * @event: IRQ event + * @enable: true to enable that IRQ event and false to disable + * + * This function is to enable and disable IRQ event. + * + * Return: 0 on success, negative value on error + */ +int ep_pcie_mask_irq_event(struct ep_pcie_hw *phandle, + enum ep_pcie_irq_event event, + bool enable); +#endif diff --git a/include/linux/msm_gsi.h b/include/linux/msm_gsi.h index 67959d2e5e9418b4d248b40637b00acf9cc170ab..fa57290fd530cc24bd50d1fc7d5d7550e59f8d65 100644 --- a/include/linux/msm_gsi.h +++ b/include/linux/msm_gsi.h @@ -659,15 +659,16 @@ struct __packed gsi_wdi_channel_scratch { uint32_t update_ri_moderation_threshold:5; uint32_t update_ri_moderation_counter:6; uint32_t wdi_rx_tre_proc_in_progress:1; + uint32_t resv1:4; uint32_t wdi_rx_vdev_id:8; uint32_t wdi_rx_fw_desc:8; uint32_t endp_metadatareg_offset:16; uint32_t qmap_id:16; uint32_t wdi_rx_pkt_length:16; - uint32_t resv1:2; + uint32_t resv2:2; uint32_t pkt_comp_count:11; uint32_t stop_in_progress_stm:3; - uint32_t resv2:16; + uint32_t resv3:16; uint32_t wdi_rx_qmap_id_internal:16; }; diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 933259a40671b2a4b873e5f271aeb4d806c6b219..49eb529f546829a4bd06d9d0abcdc63e9fede2ad 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -2665,11 +2665,31 @@ static inline void skb_gro_flush_final(struct sk_buff *skb, struct sk_buff **pp, if (PTR_ERR(pp) != -EINPROGRESS) NAPI_GRO_CB(skb)->flush |= flush; } +static inline void skb_gro_flush_final_remcsum(struct sk_buff *skb, + struct sk_buff **pp, + int flush, + struct gro_remcsum *grc) +{ + if (PTR_ERR(pp) != -EINPROGRESS) { + NAPI_GRO_CB(skb)->flush |= flush; + skb_gro_remcsum_cleanup(skb, grc); + skb->remcsum_offload = 0; + } +} #else static inline void skb_gro_flush_final(struct sk_buff *skb, struct sk_buff **pp, int flush) { NAPI_GRO_CB(skb)->flush |= flush; } +static inline void skb_gro_flush_final_remcsum(struct sk_buff *skb, + struct sk_buff **pp, + int flush, + struct gro_remcsum *grc) +{ + NAPI_GRO_CB(skb)->flush |= flush; + skb_gro_remcsum_cleanup(skb, grc); + skb->remcsum_offload = 0; +} #endif static inline int dev_hard_header(struct sk_buff *skb, struct net_device *dev, diff --git a/include/linux/netfilter/ipset/ip_set_timeout.h b/include/linux/netfilter/ipset/ip_set_timeout.h index bfb3531fd88a4f7811e6ef9fffbaff672dfa6c53..7ad8ddf9ca8a412cd69096c5780a510470b06778 100644 --- a/include/linux/netfilter/ipset/ip_set_timeout.h +++ b/include/linux/netfilter/ipset/ip_set_timeout.h @@ -65,8 +65,14 @@ ip_set_timeout_set(unsigned long *timeout, u32 value) static inline u32 ip_set_timeout_get(const unsigned long *timeout) { - return *timeout == IPSET_ELEM_PERMANENT ? 0 : - jiffies_to_msecs(*timeout - jiffies)/MSEC_PER_SEC; + u32 t; + + if (*timeout == IPSET_ELEM_PERMANENT) + return 0; + + t = jiffies_to_msecs(*timeout - jiffies)/MSEC_PER_SEC; + /* Zero value in userspace means no timeout */ + return t == 0 ? 1 : t; } #endif /* __KERNEL__ */ diff --git a/include/linux/regulator/consumer.h b/include/linux/regulator/consumer.h index 56a81d8112fb2928b7f03b19ee45b550f2bddb3f..6c5dc5d67720d4729ed58450fab98d82dc799386 100644 --- a/include/linux/regulator/consumer.h +++ b/include/linux/regulator/consumer.h @@ -80,6 +80,7 @@ struct regmap; * These modes can be OR'ed together to make up a mask of valid register modes. */ +#define REGULATOR_MODE_INVALID 0x0 #define REGULATOR_MODE_FAST 0x1 #define REGULATOR_MODE_NORMAL 0x2 #define REGULATOR_MODE_IDLE 0x4 diff --git a/include/linux/ring_buffer.h b/include/linux/ring_buffer.h index 289e4d54e3e05e37a620e44199681c8114259bed..5caa062a02b2736b32b90e50783579ff0c6df085 100644 --- a/include/linux/ring_buffer.h +++ b/include/linux/ring_buffer.h @@ -160,6 +160,7 @@ void ring_buffer_record_enable(struct ring_buffer *buffer); void ring_buffer_record_off(struct ring_buffer *buffer); void ring_buffer_record_on(struct ring_buffer *buffer); int ring_buffer_record_is_on(struct ring_buffer *buffer); +int ring_buffer_record_is_set_on(struct ring_buffer *buffer); void ring_buffer_record_disable_cpu(struct ring_buffer *buffer, int cpu); void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu); diff --git a/include/linux/sched.h b/include/linux/sched.h index 1b69ef2ef6691ec442487067a1a7bcd04bca0af7..40406dffce4442706763c10b09d8d0753ca79e97 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1001,7 +1001,9 @@ struct task_struct { struct sysv_shm sysvshm; #endif #ifdef CONFIG_DETECT_HUNG_TASK + /* hung task detection */ unsigned long last_switch_count; + bool hang_detection_enabled; #endif /* Filesystem information: */ struct fs_struct *fs; @@ -1918,6 +1920,7 @@ static inline void set_task_cpu(struct task_struct *p, unsigned int cpu) # define vcpu_is_preempted(cpu) false #endif +extern long msm_sched_setaffinity(pid_t pid, struct cpumask *new_mask); extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask); extern long sched_getaffinity(pid_t pid, struct cpumask *mask); diff --git a/include/linux/sched/core_ctl.h b/include/linux/sched/core_ctl.h index 98d7cb3e899bec93b6b62c56547bca103ee100f1..359ad874295af38d911ae3d3466dd0b05e81a7d4 100644 --- a/include/linux/sched/core_ctl.h +++ b/include/linux/sched/core_ctl.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016, The Linux Foundation. All rights reserved. + * Copyright (c) 2016, 2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -14,14 +14,23 @@ #ifndef __CORE_CTL_H #define __CORE_CTL_H +struct core_ctl_notif_data { + unsigned int nr_big; + unsigned int coloc_load_pct; +}; + #ifdef CONFIG_SCHED_CORE_CTL void core_ctl_check(u64 wallclock); int core_ctl_set_boost(bool boost); +void core_ctl_notifier_register(struct notifier_block *n); +void core_ctl_notifier_unregister(struct notifier_block *n); #else static inline void core_ctl_check(u64 wallclock) {} static inline int core_ctl_set_boost(bool boost) { return 0; } +static inline void core_ctl_notifier_register(struct notifier_block *n) {} +static inline void core_ctl_notifier_unregister(struct notifier_block *n) {} #endif #endif diff --git a/include/linux/sched/sysctl.h b/include/linux/sched/sysctl.h index 4a059b73eedf891bd013d18b32b832dd859c8b35..d149d9d8f608edac5da96a012a08d1888eba1882 100644 --- a/include/linux/sched/sysctl.h +++ b/include/linux/sched/sysctl.h @@ -11,6 +11,7 @@ extern int sysctl_hung_task_check_count; extern unsigned int sysctl_hung_task_panic; extern unsigned long sysctl_hung_task_timeout_secs; extern int sysctl_hung_task_warnings; +extern int sysctl_hung_task_selective_monitoring; extern int proc_dohung_task_timeout_secs(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos); @@ -129,4 +130,9 @@ extern int sched_little_cluster_coloc_fmin_khz_handler(struct ctl_table *table, size_t *lenp, loff_t *ppos); #endif +#define LIB_PATH_LENGTH 512 +extern char sched_lib_name[LIB_PATH_LENGTH]; +extern unsigned int sched_lib_mask_check; +extern unsigned int sched_lib_mask_force; + #endif /* _LINUX_SCHED_SYSCTL_H */ diff --git a/include/linux/sched/task.h b/include/linux/sched/task.h index 677c0fcee80441043be25bbebb19bbf6b38b4f43..bb2d62b3a15691b8e48ecb72823503bc69305c6e 100644 --- a/include/linux/sched/task.h +++ b/include/linux/sched/task.h @@ -75,7 +75,7 @@ extern long _do_fork(unsigned long, unsigned long, unsigned long, int __user *, extern long do_fork(unsigned long, unsigned long, unsigned long, int __user *, int __user *); struct task_struct *fork_idle(int); extern pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags); -extern long kernel_wait4(pid_t, int *, int, struct rusage *); +extern long kernel_wait4(pid_t, int __user *, int, struct rusage *); extern void free_task(struct task_struct *tsk); diff --git a/include/linux/seemp_instrumentation.h b/include/linux/seemp_instrumentation.h index c24c347554fb1e4ac81832dc0dddf3ae27272445..5d0b0c71ce89ac2ff29988321915ada31b7ac873 100644 --- a/include/linux/seemp_instrumentation.h +++ b/include/linux/seemp_instrumentation.h @@ -70,7 +70,7 @@ static inline void seemp_logk_sendto(int fd, void __user *buff, size_t len, } static inline void seemp_logk_rtic(__u8 type, pid_t pid, __u8 asset_id[0x20], - __u8 asset_category, __u8 response) + __u8 asset_category, __u8 response, __u8 process_name[16]) { char *buf = NULL; void *blck = NULL; @@ -80,8 +80,8 @@ static inline void seemp_logk_rtic(__u8 type, pid_t pid, __u8 asset_id[0x20], return; SEEMP_LOGK_RECORD(SEEMP_API_kernel__rtic, - "app_pid=%d,rtic_type=%u,asset_id=%s,asset_category=%u,response=%u", - pid, type, asset_id, asset_category, response); + "app_pid=%d,rtic_type=%u,asset_id=%s,asset_category=%u,response=%u,process_name=%s", + pid, type, asset_id, asset_category, response, process_name); seemp_logk_kernel_end(blck); } diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h index 9c7dfd1c3bec50cb68c22238c458ab37bdbfd0c4..c9dac4ae91e584316a588b15aa7ef042107df057 100644 --- a/include/linux/serial_core.h +++ b/include/linux/serial_core.h @@ -349,7 +349,8 @@ struct earlycon_device { }; struct earlycon_id { - char name[16]; + char name[15]; + char name_term; /* In case compiler didn't '\0' term name */ char compatible[128]; int (*setup)(struct earlycon_device *, const char *options); }; diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index d22d14bda0fc1c62bd5eb0d70084badcb64c3fb9..66220cc6b45c05d50f4a8d7e745745f7cfc82690 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -626,6 +626,7 @@ typedef unsigned char *sk_buff_data_t; * @hash: the packet hash * @queue_mapping: Queue mapping for multiqueue devices * @xmit_more: More SKBs are pending for this queue + * @pfmemalloc: skbuff was allocated from PFMEMALLOC reserves * @ndisc_nodetype: router type (from link layer) * @ooo_okay: allow the mapping of a socket to a queue to be changed * @l4_hash: indicate hash is a canonical 4-tuple hash over transport @@ -724,7 +725,7 @@ struct sk_buff { peeked:1, head_frag:1, xmit_more:1, - __unused:1; /* one bit hole */ + pfmemalloc:1; /* fields enclosed in headers_start/headers_end are copied * using a single memcpy() in __copy_skb_header() @@ -743,31 +744,30 @@ struct sk_buff { __u8 __pkt_type_offset[0]; __u8 pkt_type:3; - __u8 pfmemalloc:1; __u8 ignore_df:1; - __u8 nf_trace:1; __u8 ip_summed:2; __u8 ooo_okay:1; + __u8 l4_hash:1; __u8 sw_hash:1; __u8 wifi_acked_valid:1; __u8 wifi_acked:1; - __u8 no_fcs:1; /* Indicates the inner headers are valid in the skbuff. */ __u8 encapsulation:1; __u8 encap_hdr_csum:1; __u8 csum_valid:1; + __u8 csum_complete_sw:1; __u8 csum_level:2; __u8 csum_not_inet:1; - __u8 dst_pending_confirm:1; #ifdef CONFIG_IPV6_NDISC_NODETYPE __u8 ndisc_nodetype:2; #endif __u8 ipvs_property:1; + __u8 inner_protocol_type:1; __u8 remcsum_offload:1; #ifdef CONFIG_NET_SWITCHDEV diff --git a/include/linux/string.h b/include/linux/string.h index cfd83eb2f926c74622f46ed931bb1c58277df49f..96115bf561b452112f411ecbcd61f5e016b00911 100644 --- a/include/linux/string.h +++ b/include/linux/string.h @@ -28,7 +28,7 @@ extern char * strncpy(char *,const char *, __kernel_size_t); size_t strlcpy(char *, const char *, size_t); #endif #ifndef __HAVE_ARCH_STRSCPY -ssize_t __must_check strscpy(char *, const char *, size_t); +ssize_t strscpy(char *, const char *, size_t); #endif #ifndef __HAVE_ARCH_STRCAT extern char * strcat(char *, const char *); diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h index 34f053a150a969bf03805cd56ee7f5487e041642..cf2862bd134a400b99136aaf2ec1357bd79ba3d2 100644 --- a/include/linux/thread_info.h +++ b/include/linux/thread_info.h @@ -43,11 +43,7 @@ enum { #define THREAD_ALIGN THREAD_SIZE #endif -#if IS_ENABLED(CONFIG_DEBUG_STACK_USAGE) || IS_ENABLED(CONFIG_DEBUG_KMEMLEAK) -# define THREADINFO_GFP (GFP_KERNEL_ACCOUNT | __GFP_ZERO) -#else -# define THREADINFO_GFP (GFP_KERNEL_ACCOUNT) -#endif +#define THREADINFO_GFP (GFP_KERNEL_ACCOUNT | __GFP_ZERO) /* * flag set/clear/test wrappers diff --git a/include/linux/usb/phy.h b/include/linux/usb/phy.h index 8642f030f05a6bc8f64e5848f987ef0024ae9c0b..0ad1073a881616446bb88b220bea7f287550b7a2 100644 --- a/include/linux/usb/phy.h +++ b/include/linux/usb/phy.h @@ -164,6 +164,7 @@ struct usb_phy { int (*notify_disconnect)(struct usb_phy *x, enum usb_device_speed speed); int (*link_training)(struct usb_phy *x, bool start); + int (*powerup)(struct usb_phy *x, bool start); /* * Charger detection method can be implemented if you need to @@ -406,6 +407,24 @@ usb_phy_stop_link_training(struct usb_phy *x) return 0; } +static inline int +usb_phy_powerup(struct usb_phy *x) +{ + if (x && x->powerup) + return x->powerup(x, true); + else + return 0; +} + +static inline int +usb_phy_powerdown(struct usb_phy *x) +{ + if (x && x->powerup) + return x->powerup(x, false); + else + return 0; +} + static inline int usb_phy_notify_disconnect(struct usb_phy *x, enum usb_device_speed speed) { diff --git a/include/linux/verification.h b/include/linux/verification.h index a10549a6c7cdfa72d805d5509fc9c1286c36324b..5a088dab92e2cbbc93d645893491b3f763d3ab73 100644 --- a/include/linux/verification.h +++ b/include/linux/verification.h @@ -26,9 +26,13 @@ enum key_being_used_for { }; extern const char *const key_being_used_for[NR__KEY_BEING_USED_FOR]; -#ifdef CONFIG_SYSTEM_DATA_VERIFICATION - struct key; +struct public_key_signature; + +extern int verify_signature_one(const struct public_key_signature *sig, + struct key *trusted_keys, const char *keyid); + +#ifdef CONFIG_SYSTEM_DATA_VERIFICATION extern int verify_pkcs7_signature(const void *data, size_t len, const void *raw_pkcs7, size_t pkcs7_len, diff --git a/include/net/cnss2.h b/include/net/cnss2.h index c5ccee4db262fb73eebe65b3138a61040b62ee49..e95ef8b54094d9eca2a342f9dc5cde1de186658e 100644 --- a/include/net/cnss2.h +++ b/include/net/cnss2.h @@ -148,6 +148,7 @@ extern int cnss_wlan_register_driver(struct cnss_wlan_driver *driver); extern void cnss_wlan_unregister_driver(struct cnss_wlan_driver *driver); extern void cnss_device_crashed(struct device *dev); extern int cnss_pci_link_down(struct device *dev); +extern int cnss_pci_is_device_down(struct device *dev); extern void cnss_schedule_recovery(struct device *dev, enum cnss_recovery_reason reason); extern int cnss_self_recovery(struct device *dev, diff --git a/include/net/cnss_prealloc.h b/include/net/cnss_prealloc.h index 36de97b9a4ea87355d3aae59ebeeb9c41e05cbea..5d0a612f41dea036df0fc2249de4f44f8b18e3d4 100644 --- a/include/net/cnss_prealloc.h +++ b/include/net/cnss_prealloc.h @@ -15,7 +15,7 @@ #define WCNSS_PRE_ALLOC_GET_THRESHOLD (4*1024) -extern void *wcnss_prealloc_get(unsigned int size); +extern void *wcnss_prealloc_get(size_t size); extern int wcnss_prealloc_put(void *ptr); extern int wcnss_pre_alloc_reset(void); void wcnss_prealloc_check_memory_leak(void); diff --git a/include/net/ipv6.h b/include/net/ipv6.h index d9ed0372b205b14e45616abaf5d6d754e62a307f..b70bdc382b1fe3bf32ece1839a01781046b09879 100644 --- a/include/net/ipv6.h +++ b/include/net/ipv6.h @@ -796,7 +796,7 @@ static inline __be32 ip6_make_flowlabel(struct net *net, struct sk_buff *skb, * to minimize possbility that any useful information to an * attacker is leaked. Only lower 20 bits are relevant. */ - rol32(hash, 16); + hash = rol32(hash, 16); flowlabel = (__force __be32)hash & IPV6_FLOWLABEL_MASK; diff --git a/include/net/tcp.h b/include/net/tcp.h index 71082ba003d7360b7ec2b99c377134fb9ab7a5c8..12d992d978442eba3975ab838a61048f93e4ddb1 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -373,6 +373,7 @@ ssize_t tcp_splice_read(struct socket *sk, loff_t *ppos, struct pipe_inode_info *pipe, size_t len, unsigned int flags); +void tcp_enter_quickack_mode(struct sock *sk, unsigned int max_quickacks); static inline void tcp_dec_quickack_mode(struct sock *sk, const unsigned int pkts) { @@ -561,6 +562,7 @@ void tcp_send_fin(struct sock *sk); void tcp_send_active_reset(struct sock *sk, gfp_t priority); int tcp_send_synack(struct sock *); void tcp_push_one(struct sock *, unsigned int mss_now); +void __tcp_send_ack(struct sock *sk, u32 rcv_nxt); void tcp_send_ack(struct sock *sk); void tcp_send_delayed_ack(struct sock *sk); void tcp_send_loss_probe(struct sock *sk); @@ -858,6 +860,11 @@ struct tcp_skb_cb { * as TCP moves IP6CB into a different location in skb->cb[] */ static inline int tcp_v6_iif(const struct sk_buff *skb) +{ + return TCP_SKB_CB(skb)->header.h6.iif; +} + +static inline int tcp_v6_iif_l3_slave(const struct sk_buff *skb) { bool l3_slave = ipv6_l3mdev_skb(TCP_SKB_CB(skb)->header.h6.flags); diff --git a/include/soc/qcom/qmi_rmnet.h b/include/soc/qcom/qmi_rmnet.h index 127078c0cf21c6553de07efd1a1049a6ff35ccf5..a0c84fedce2abe8c270f489a07ea30648e47ec64 100644 --- a/include/soc/qcom/qmi_rmnet.h +++ b/include/soc/qcom/qmi_rmnet.h @@ -33,8 +33,9 @@ qmi_rmnet_change_link(struct net_device *dev, void *port, void *tcm_pt) #ifdef CONFIG_QCOM_QMI_DFC void *qmi_rmnet_qos_init(struct net_device *real_dev, u8 mux_id); -void qmi_rmnet_qos_exit(struct net_device *dev); -void qmi_rmnet_burst_fc_check(struct net_device *dev, struct sk_buff *skb); +void qmi_rmnet_qos_exit(struct net_device *dev, void *qos); +void qmi_rmnet_burst_fc_check(struct net_device *dev, + int ip_type, u32 mark, unsigned int len); #else static inline void * qmi_rmnet_qos_init(struct net_device *real_dev, u8 mux_id) @@ -42,12 +43,13 @@ qmi_rmnet_qos_init(struct net_device *real_dev, u8 mux_id) return NULL; } -static inline void qmi_rmnet_qos_exit(struct net_device *dev) +static inline void qmi_rmnet_qos_exit(struct net_device *dev, void *qos) { } static inline void -qmi_rmnet_burst_fc_check(struct net_device *dev, struct sk_buff *skb) +qmi_rmnet_burst_fc_check(struct net_device *dev, + int ip_type, u32 mark, unsigned int len) { } #endif diff --git a/include/soc/qcom/sysmon.h b/include/soc/qcom/sysmon.h index b2d82584a3afdd16922ceab4aefba71f2cf0ee3e..36cd472e3ae0046c0bf50be514bef1f473304667 100644 --- a/include/soc/qcom/sysmon.h +++ b/include/soc/qcom/sysmon.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2011-2017, The Linux Foundation. All rights reserved. + * Copyright (c) 2011-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -23,6 +23,7 @@ */ enum ssctl_ssr_event_enum_type { SSCTL_SSR_EVENT_ENUM_TYPE_MIN_ENUM_VAL = -2147483647, + SSCTL_SSR_EVENT_INVALID = -1, SSCTL_SSR_EVENT_BEFORE_POWERUP = 0, SSCTL_SSR_EVENT_AFTER_POWERUP = 1, SSCTL_SSR_EVENT_BEFORE_SHUTDOWN = 2, diff --git a/include/soc/tegra/mc.h b/include/soc/tegra/mc.h index 44202ff897fd93a75e7bc7029d9733ffff2b20ec..f759e0918037baa4286328281cc9efcd416be61d 100644 --- a/include/soc/tegra/mc.h +++ b/include/soc/tegra/mc.h @@ -99,6 +99,8 @@ struct tegra_mc_soc { u8 client_id_mask; const struct tegra_smmu_soc *smmu; + + u32 intmask; }; struct tegra_mc { diff --git a/include/trace/events/cma.h b/include/trace/events/cma.h index 5017a8829270ac56f5990f4694b317a65898316b..a0773a5f87a58b81af780ec9db42e1602e35f1e4 100644 --- a/include/trace/events/cma.h +++ b/include/trace/events/cma.h @@ -8,7 +8,7 @@ #include #include -TRACE_EVENT(cma_alloc, +DECLARE_EVENT_CLASS(cma_alloc_class, TP_PROTO(unsigned long pfn, const struct page *page, unsigned int count, unsigned int align), @@ -61,6 +61,44 @@ TRACE_EVENT(cma_release, __entry->count) ); +TRACE_EVENT(cma_alloc_start, + + TP_PROTO(unsigned int count, unsigned int align), + + TP_ARGS(count, align), + + TP_STRUCT__entry( + __field(unsigned int, count) + __field(unsigned int, align) + ), + + TP_fast_assign( + __entry->count = count; + __entry->align = align; + ), + + TP_printk("count=%u align=%u", + __entry->count, + __entry->align) +); + +DEFINE_EVENT(cma_alloc_class, cma_alloc, + + TP_PROTO(unsigned long pfn, const struct page *page, + unsigned int count, unsigned int align), + + TP_ARGS(pfn, page, count, align) +); + +DEFINE_EVENT(cma_alloc_class, cma_alloc_busy_retry, + + TP_PROTO(unsigned long pfn, const struct page *page, + unsigned int count, unsigned int align), + + TP_ARGS(pfn, page, count, align) +); + + #endif /* _TRACE_CMA_H */ /* This part must be outside protection */ diff --git a/include/trace/events/migrate.h b/include/trace/events/migrate.h index bcf4daccd6be8e564e8d8dd56c23cc76422d3bcc..028c0811cfc987161e4bb449789243b01a02e240 100644 --- a/include/trace/events/migrate.h +++ b/include/trace/events/migrate.h @@ -97,6 +97,27 @@ TRACE_EVENT(mm_numa_migrate_ratelimit, __entry->dst_nid, __entry->nr_pages) ); + +TRACE_EVENT(mm_migrate_pages_start, + + TP_PROTO(enum migrate_mode mode, int reason), + + TP_ARGS(mode, reason), + + TP_STRUCT__entry( + __field(enum migrate_mode, mode) + __field(int, reason) + ), + + TP_fast_assign( + __entry->mode = mode; + __entry->reason = reason; + ), + + TP_printk("mode=%s reason=%s", + __print_symbolic(__entry->mode, MIGRATE_MODE), + __print_symbolic(__entry->reason, MIGRATE_REASON)) +); #endif /* _TRACE_MIGRATE_H */ /* This part must be outside protection */ diff --git a/include/trace/events/net.h b/include/trace/events/net.h index f1a300c8ef8517775f9ad286ee1440f1cc3854f8..135141e93a6ea89dd4e591e3e6c76e8ebab462b3 100644 --- a/include/trace/events/net.h +++ b/include/trace/events/net.h @@ -124,13 +124,6 @@ DEFINE_EVENT(net_dev_template, net_dev_queue, TP_ARGS(skb) ); -DEFINE_EVENT(net_dev_template, netif_receive_skb, - - TP_PROTO(struct sk_buff *skb), - - TP_ARGS(skb) -); - DEFINE_EVENT(net_dev_template, netif_rx, TP_PROTO(struct sk_buff *skb), @@ -216,6 +209,13 @@ DEFINE_EVENT(net_dev_rx_verbose_template, napi_gro_receive_entry, TP_ARGS(skb) ); +DEFINE_EVENT(net_dev_rx_verbose_template, netif_receive_skb, + + TP_PROTO(const struct sk_buff *skb), + + TP_ARGS(skb) +); + DEFINE_EVENT(net_dev_rx_verbose_template, netif_receive_skb_entry, TP_PROTO(const struct sk_buff *skb), diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h index d1f6a726a4d2152628303907cf83da0090c0fca2..9c4762a0d48bc845ae1c4b5fed1bbb5d429d996b 100644 --- a/include/trace/events/sched.h +++ b/include/trace/events/sched.h @@ -249,6 +249,7 @@ TRACE_EVENT(sched_migrate_task, /* * Tracepoint for load balancing: */ +#ifdef CONFIG_SMP #if NR_CPUS > 32 #error "Unsupported NR_CPUS for lb tracepoint." #endif @@ -257,10 +258,10 @@ TRACE_EVENT(sched_load_balance, TP_PROTO(int cpu, enum cpu_idle_type idle, int balance, unsigned long group_mask, int busiest_nr_running, unsigned long imbalance, unsigned int env_flags, int ld_moved, - unsigned int balance_interval), + unsigned int balance_interval, int active_balance), TP_ARGS(cpu, idle, balance, group_mask, busiest_nr_running, - imbalance, env_flags, ld_moved, balance_interval), + imbalance, env_flags, ld_moved, balance_interval, active_balance), TP_STRUCT__entry( __field( int, cpu) @@ -272,6 +273,7 @@ TRACE_EVENT(sched_load_balance, __field( unsigned int, env_flags) __field( int, ld_moved) __field( unsigned int, balance_interval) + __field( int, active_balance) ), TP_fast_assign( @@ -284,18 +286,125 @@ TRACE_EVENT(sched_load_balance, __entry->env_flags = env_flags; __entry->ld_moved = ld_moved; __entry->balance_interval = balance_interval; + __entry->active_balance = active_balance; ), - TP_printk("cpu=%d state=%s balance=%d group=%#lx busy_nr=%d imbalance=%ld flags=%#x ld_moved=%d bal_int=%d", + TP_printk("cpu=%d state=%s balance=%d group=%#lx busy_nr=%d imbalance=%ld flags=%#x ld_moved=%d bal_int=%d active_balance=%d", __entry->cpu, __entry->idle == CPU_IDLE ? "idle" : (__entry->idle == CPU_NEWLY_IDLE ? "newly_idle" : "busy"), __entry->balance, __entry->group_mask, __entry->busiest_nr_running, __entry->imbalance, __entry->env_flags, __entry->ld_moved, - __entry->balance_interval) + __entry->balance_interval, __entry->active_balance) ); +TRACE_EVENT(sched_load_balance_nohz_kick, + + TP_PROTO(int cpu, int kick_cpu), + + TP_ARGS(cpu, kick_cpu), + + TP_STRUCT__entry( + __field(int, cpu ) + __field(unsigned int, cpu_nr ) + __field(unsigned long, misfit_task_load ) + __field(int, cpu_overutil ) + __field(int, kick_cpu ) + __field(unsigned long, nohz_flags ) + ), + + TP_fast_assign( + __entry->cpu = cpu; + __entry->cpu_nr = cpu_rq(cpu)->nr_running; + __entry->misfit_task_load = cpu_rq(cpu)->misfit_task_load; + __entry->cpu_overutil = cpu_overutilized(cpu); + __entry->kick_cpu = kick_cpu; + __entry->nohz_flags = *nohz_flags(kick_cpu); + ), + + TP_printk("cpu=%d nr_run=%u misfit_task_load=%lu overutilized=%d kick_cpu=%d nohz_flags=0x%lx", + __entry->cpu, __entry->cpu_nr, __entry->misfit_task_load, __entry->cpu_overutil, + __entry->kick_cpu, __entry->nohz_flags) + +); + +TRACE_EVENT(sched_load_balance_sg_stats, + + TP_PROTO(unsigned long sg_cpus, int group_type, unsigned int idle_cpus, unsigned int sum_nr_running, unsigned long group_load, unsigned long group_capacity, unsigned long group_util, int group_no_capacity, unsigned long load_per_task, unsigned long misfit_load, unsigned long busiest), + + TP_ARGS(sg_cpus, group_type, idle_cpus, sum_nr_running, group_load, group_capacity, group_util, group_no_capacity, load_per_task, misfit_load, busiest), + + TP_STRUCT__entry( + __field(unsigned long, group_mask ) + __field(int, group_type ) + __field(unsigned int, group_idle_cpus ) + __field(unsigned int, sum_nr_running ) + __field(unsigned long, group_load ) + __field(unsigned long, group_capacity ) + __field(unsigned long, group_util ) + __field(int, group_no_capacity ) + __field(unsigned long, load_per_task ) + __field(unsigned long, misfit_task_load ) + __field(unsigned long, busiest ) + ), + + TP_fast_assign( + __entry->group_mask = sg_cpus; + __entry->group_type = group_type; + __entry->group_idle_cpus = idle_cpus; + __entry->sum_nr_running = sum_nr_running; + __entry->group_load = group_load; + __entry->group_capacity = group_capacity; + __entry->group_util = group_util; + __entry->group_no_capacity = group_no_capacity; + __entry->load_per_task = load_per_task; + __entry->misfit_task_load = misfit_load; + __entry->busiest = busiest; + ), + + TP_printk("sched_group=%#lx type=%d idle_cpus=%u sum_nr_run=%u group_load=%lu capacity=%lu util=%lu no_capacity=%d lpt=%lu misfit_tload=%lu busiest_group=%#lx", + __entry->group_mask, __entry->group_type, __entry->group_idle_cpus, __entry->sum_nr_running, __entry->group_load, __entry->group_capacity, __entry->group_util, __entry->group_no_capacity, __entry->load_per_task, __entry->misfit_task_load, __entry->busiest) +); + +TRACE_EVENT(sched_load_balance_stats, + + TP_PROTO(unsigned long busiest, int bgroup_type, unsigned long bavg_load, unsigned long bload_per_task, unsigned long local, int lgroup_type, unsigned long lavg_load, unsigned long lload_per_task, unsigned long sds_avg_load, unsigned long imbalance), + + TP_ARGS(busiest, bgroup_type, bavg_load, bload_per_task, local, lgroup_type, lavg_load, lload_per_task, sds_avg_load, imbalance), + + TP_STRUCT__entry( + __field(unsigned long, busiest ) + __field(int, bgp_type ) + __field(unsigned long, bavg_load ) + __field(unsigned long, blpt ) + __field(unsigned long, local ) + __field(int, lgp_type ) + __field(unsigned long, lavg_load ) + __field(unsigned long, llpt ) + __field(unsigned long, sds_avg ) + __field(unsigned long, imbalance ) + ), + + TP_fast_assign( + __entry->busiest = busiest; + __entry->bgp_type = bgroup_type; + __entry->bavg_load = bavg_load; + __entry->blpt = bload_per_task; + __entry->bgp_type = bgroup_type; + __entry->local = local; + __entry->lgp_type = lgroup_type; + __entry->lavg_load = lavg_load; + __entry->llpt = lload_per_task; + __entry->sds_avg = sds_avg_load; + __entry->imbalance = imbalance; + ), + + TP_printk("busiest_group=%#lx busiest_type=%d busiest_avg_load=%ld busiest_lpt=%ld local_group=%#lx local_type=%d local_avg_load=%ld local_lpt=%ld domain_avg_load=%ld imbalance=%ld", + __entry->busiest, __entry->bgp_type, __entry->bavg_load, __entry->blpt, __entry->local, __entry->lgp_type, __entry->lavg_load, __entry->llpt, __entry->sds_avg, __entry->imbalance) +); +#endif + DECLARE_EVENT_CLASS(sched_process_template, TP_PROTO(struct task_struct *p), @@ -1020,9 +1129,9 @@ TRACE_EVENT(core_ctl_update_nr_need, TRACE_EVENT(sched_tune_tasks_update, TP_PROTO(struct task_struct *tsk, int cpu, int tasks, int idx, - int boost, int max_boost), + int boost, int max_boost, u64 group_ts), - TP_ARGS(tsk, cpu, tasks, idx, boost, max_boost), + TP_ARGS(tsk, cpu, tasks, idx, boost, max_boost, group_ts), TP_STRUCT__entry( __array( char, comm, TASK_COMM_LEN ) @@ -1032,6 +1141,7 @@ TRACE_EVENT(sched_tune_tasks_update, __field( int, idx ) __field( int, boost ) __field( int, max_boost ) + __field( u64, group_ts ) ), TP_fast_assign( @@ -1042,13 +1152,15 @@ TRACE_EVENT(sched_tune_tasks_update, __entry->idx = idx; __entry->boost = boost; __entry->max_boost = max_boost; + __entry->group_ts = group_ts; ), TP_printk("pid=%d comm=%s " - "cpu=%d tasks=%d idx=%d boost=%d max_boost=%d", + "cpu=%d tasks=%d idx=%d boost=%d max_boost=%d timeout=%llu", __entry->pid, __entry->comm, __entry->cpu, __entry->tasks, __entry->idx, - __entry->boost, __entry->max_boost) + __entry->boost, __entry->max_boost, + __entry->group_ts) ); /* diff --git a/include/trace/events/walt.h b/include/trace/events/walt.h index 888c36b9d8b35dc84a1096d3ac3f30dd674f2fab..0dd4f22edf90e9a28ce51ca1cf7a5d8e1d889363 100644 --- a/include/trace/events/walt.h +++ b/include/trace/events/walt.h @@ -442,6 +442,40 @@ TRACE_EVENT(sched_set_boost, TP_printk("type %d", __entry->type) ); +TRACE_EVENT(sched_load_balance_skip_tasks, + + TP_PROTO(int scpu, int dcpu, int grp_type, int pid, unsigned long h_load, unsigned long task_util, unsigned long affinity), + + TP_ARGS(scpu, dcpu, grp_type, pid, h_load, task_util, affinity), + + TP_STRUCT__entry( + __field(int, scpu ) + __field(unsigned long, src_util_cum ) + __field(int, grp_type ) + __field(int, dcpu ) + __field(unsigned long, dst_util_cum ) + __field(int, pid ) + __field(unsigned long, affinity ) + __field(unsigned long, task_util ) + __field(unsigned long, h_load ) + ), + + TP_fast_assign( + __entry->scpu = scpu; + __entry->src_util_cum = cpu_rq(scpu)->cum_window_demand_scaled; + __entry->grp_type = grp_type; + __entry->dcpu = dcpu; + __entry->dst_util_cum = cpu_rq(dcpu)->cum_window_demand_scaled; + __entry->pid = pid; + __entry->affinity = affinity; + __entry->task_util = task_util; + __entry->h_load = h_load; + ), + + TP_printk("source_cpu=%d util_cum=%lu group_type=%d dest_cpu=%d util_cum=%lu pid=%d affinity=%#lx task_util=%lu task_h_load=%lu", + __entry->scpu, __entry->src_util_cum, __entry->grp_type, __entry->dcpu, __entry->dst_util_cum, __entry->pid, __entry->affinity, __entry->task_util, __entry->h_load) +); + DECLARE_EVENT_CLASS(sched_cpu_load, TP_PROTO(struct rq *rq, int idle, u64 irqload, unsigned int power_cost), diff --git a/include/uapi/linux/msm_ipa.h b/include/uapi/linux/msm_ipa.h index bdf2c90e9f14d3a2d0bdb205fb864cf33a0577e5..68e85c9ef72bc86b8e70aee7d4a3d256fbd2d9b8 100644 --- a/include/uapi/linux/msm_ipa.h +++ b/include/uapi/linux/msm_ipa.h @@ -319,14 +319,18 @@ enum ipa_client_type { /* RESERVERD PROD = 82, */ IPA_CLIENT_ODL_DPL_CONS = 83, + IPA_CLIENT_Q6_AUDIO_DMA_MHI_PROD = 84, + IPA_CLIENT_Q6_AUDIO_DMA_MHI_CONS = 85, }; -#define IPA_CLIENT_MAX (IPA_CLIENT_ODL_DPL_CONS + 1) +#define IPA_CLIENT_MAX (IPA_CLIENT_Q6_AUDIO_DMA_MHI_CONS + 1) #define IPA_CLIENT_Q6_DL_NLO_DATA_PROD IPA_CLIENT_Q6_DL_NLO_DATA_PROD #define IPA_CLIENT_Q6_UL_NLO_ACK_CONS IPA_CLIENT_Q6_UL_NLO_ACK_CONS #define IPA_CLIENT_Q6_QBAP_STATUS_CONS IPA_CLIENT_Q6_QBAP_STATUS_CONS #define IPA_CLIENT_MHI_DPL_CONS IPA_CLIENT_MHI_DPL_CONS +#define IPA_CLIENT_Q6_AUDIO_DMA_MHI_PROD IPA_CLIENT_Q6_AUDIO_DMA_MHI_PROD +#define IPA_CLIENT_Q6_AUDIO_DMA_MHI_CONS IPA_CLIENT_Q6_AUDIO_DMA_MHI_CONS #define IPA_CLIENT_IS_APPS_CONS(client) \ ((client) == IPA_CLIENT_APPS_LAN_CONS || \ @@ -358,7 +362,8 @@ enum ipa_client_type { (client) == IPA_CLIENT_Q6_LTE_WIFI_AGGR_CONS || \ (client) == IPA_CLIENT_Q6_UL_NLO_DATA_CONS || \ (client) == IPA_CLIENT_Q6_UL_NLO_ACK_CONS || \ - (client) == IPA_CLIENT_Q6_QBAP_STATUS_CONS) + (client) == IPA_CLIENT_Q6_QBAP_STATUS_CONS || \ + (client) == IPA_CLIENT_Q6_AUDIO_DMA_MHI_CONS) #define IPA_CLIENT_IS_Q6_PROD(client) \ ((client) == IPA_CLIENT_Q6_LAN_PROD || \ @@ -366,7 +371,8 @@ enum ipa_client_type { (client) == IPA_CLIENT_Q6_CMD_PROD || \ (client) == IPA_CLIENT_Q6_DECOMP_PROD || \ (client) == IPA_CLIENT_Q6_DECOMP2_PROD || \ - (client) == IPA_CLIENT_Q6_DL_NLO_DATA_PROD) + (client) == IPA_CLIENT_Q6_DL_NLO_DATA_PROD || \ + (client) == IPA_CLIENT_Q6_AUDIO_DMA_MHI_PROD) #define IPA_CLIENT_IS_Q6_NON_ZIP_CONS(client) \ ((client) == IPA_CLIENT_Q6_LAN_CONS || \ @@ -375,7 +381,8 @@ enum ipa_client_type { (client) == IPA_CLIENT_Q6_LTE_WIFI_AGGR_CONS || \ (client) == IPA_CLIENT_Q6_UL_NLO_DATA_CONS || \ (client) == IPA_CLIENT_Q6_UL_NLO_ACK_CONS || \ - (client) == IPA_CLIENT_Q6_QBAP_STATUS_CONS) + (client) == IPA_CLIENT_Q6_QBAP_STATUS_CONS || \ + (client) == IPA_CLIENT_Q6_AUDIO_DMA_MHI_CONS) #define IPA_CLIENT_IS_Q6_ZIP_CONS(client) \ ((client) == IPA_CLIENT_Q6_DECOMP_CONS || \ @@ -385,7 +392,8 @@ enum ipa_client_type { ((client) == IPA_CLIENT_Q6_LAN_PROD || \ (client) == IPA_CLIENT_Q6_WAN_PROD || \ (client) == IPA_CLIENT_Q6_CMD_PROD || \ - (client) == IPA_CLIENT_Q6_DL_NLO_DATA_PROD) + (client) == IPA_CLIENT_Q6_DL_NLO_DATA_PROD || \ + (client) == IPA_CLIENT_Q6_AUDIO_DMA_MHI_PROD) #define IPA_CLIENT_IS_Q6_ZIP_PROD(client) \ ((client) == IPA_CLIENT_Q6_DECOMP_PROD || \ diff --git a/include/uapi/linux/msm_kgsl.h b/include/uapi/linux/msm_kgsl.h index 96e844ab05d89407f8fcf4b04c06869e0d0970c4..03094d5161da8649bb37c64e4cf59f4f106a559a 100644 --- a/include/uapi/linux/msm_kgsl.h +++ b/include/uapi/linux/msm_kgsl.h @@ -332,6 +332,8 @@ enum kgsl_timestamp_type { #define KGSL_PROP_UBWC_MODE 0x1B #define KGSL_PROP_DEVICE_QTIMER 0x20 #define KGSL_PROP_L3_PWR_CONSTRAINT 0x22 +#define KGSL_PROP_SECURE_BUFFER_ALIGNMENT 0x23 +#define KGSL_PROP_SECURE_CTXT_SUPPORT 0x24 struct kgsl_shadowprop { unsigned long gpuaddr; diff --git a/include/uapi/linux/v4l2-controls.h b/include/uapi/linux/v4l2-controls.h index 85bdb5631399f4fc7c888b68a0882918452da2b9..2f226b145f7d86e975c9c2ca0c7296793db0ab09 100644 --- a/include/uapi/linux/v4l2-controls.h +++ b/include/uapi/linux/v4l2-controls.h @@ -941,6 +941,12 @@ enum v4l2_mpeg_vidc_video_vp9_level { V4L2_MPEG_VIDC_VIDEO_VP9_LEVEL_41 = 8, V4L2_MPEG_VIDC_VIDEO_VP9_LEVEL_5 = 9, V4L2_MPEG_VIDC_VIDEO_VP9_LEVEL_51 = 10, +#define V4L2_MPEG_VIDC_VIDEO_VP9_LEVEL_6 \ + V4L2_MPEG_VIDC_VIDEO_VP9_LEVEL_6 + V4L2_MPEG_VIDC_VIDEO_VP9_LEVEL_6 = 11, +#define V4L2_MPEG_VIDC_VIDEO_VP9_LEVEL_61 \ + V4L2_MPEG_VIDC_VIDEO_VP9_LEVEL_61 + V4L2_MPEG_VIDC_VIDEO_VP9_LEVEL_61 = 12, }; #define V4L2_CID_MPEG_VIDC_VIDEO_ADAPTIVE_B \ diff --git a/include/uapi/linux/videodev2.h b/include/uapi/linux/videodev2.h index fe37ad3ab1b04d5fb8d34a4f6ed3fd3f3bf2387d..f4f6d8423ae307ad8c5b4241834166cc11be58d3 100644 --- a/include/uapi/linux/videodev2.h +++ b/include/uapi/linux/videodev2.h @@ -1053,7 +1053,6 @@ struct v4l2_buffer { #define V4L2_BUF_FLAG_LAST 0x00100000 /* Vendor extensions */ #define V4L2_QCOM_BUF_FLAG_CODECCONFIG 0x00020000 -#define V4L2_QCOM_BUF_DATA_CORRUPT 0x00400000 #define V4L2_BUF_FLAG_DATA_CORRUPT 0x00400000 #define V4L2_QCOM_BUF_INPUT_UNSUPPORTED 0x01000000 #define V4L2_QCOM_BUF_FLAG_EOS 0x02000000 diff --git a/include/uapi/media/msm_media_info.h b/include/uapi/media/msm_media_info.h index 509ec6a9e852e1ae20d5f0cad47c24932deb6525..b52c89381a2ef6535478e4ffca139322e0296b0e 100644 --- a/include/uapi/media/msm_media_info.h +++ b/include/uapi/media/msm_media_info.h @@ -1451,6 +1451,46 @@ static inline unsigned int VENUS_BUFFER_SIZE( return size; } +static inline unsigned int VENUS_BUFFER_SIZE_USED( + int color_fmt, int width, int height, int interlace) +{ + unsigned int size = 0; + unsigned int y_stride, uv_stride, y_sclines, uv_sclines; + unsigned int y_ubwc_plane = 0, uv_ubwc_plane = 0; + unsigned int y_meta_stride = 0, y_meta_scanlines = 0; + unsigned int uv_meta_stride = 0, uv_meta_scanlines = 0; + unsigned int y_meta_plane = 0, uv_meta_plane = 0; + + if (!width || !height) + goto invalid_input; + + if (!interlace && color_fmt == COLOR_FMT_NV12_UBWC) { + y_stride = VENUS_Y_STRIDE(color_fmt, width); + uv_stride = VENUS_UV_STRIDE(color_fmt, width); + y_sclines = VENUS_Y_SCANLINES(color_fmt, height); + y_ubwc_plane = MSM_MEDIA_ALIGN(y_stride * y_sclines, 4096); + uv_sclines = VENUS_UV_SCANLINES(color_fmt, height); + uv_ubwc_plane = MSM_MEDIA_ALIGN(uv_stride * uv_sclines, 4096); + y_meta_stride = VENUS_Y_META_STRIDE(color_fmt, width); + y_meta_scanlines = + VENUS_Y_META_SCANLINES(color_fmt, height); + y_meta_plane = MSM_MEDIA_ALIGN( + y_meta_stride * y_meta_scanlines, 4096); + uv_meta_stride = VENUS_UV_META_STRIDE(color_fmt, width); + uv_meta_scanlines = + VENUS_UV_META_SCANLINES(color_fmt, height); + uv_meta_plane = MSM_MEDIA_ALIGN(uv_meta_stride * + uv_meta_scanlines, 4096); + size = (y_ubwc_plane + uv_ubwc_plane + y_meta_plane + + uv_meta_plane); + size = MSM_MEDIA_ALIGN(size, 4096); + } else { + size = VENUS_BUFFER_SIZE(color_fmt, width, height); + } +invalid_input: + return size; +} + static inline unsigned int VENUS_VIEW2_OFFSET( int color_fmt, int width, int height) { diff --git a/include/uapi/sound/asoc.h b/include/uapi/sound/asoc.h index 69c37ecbff7ee389cb31b7dc49be4021e9a6fd14..f3c4b46e39d8b081f67b0e0b6b8ddadc64b00652 100644 --- a/include/uapi/sound/asoc.h +++ b/include/uapi/sound/asoc.h @@ -139,6 +139,11 @@ #define SND_SOC_TPLG_DAI_FLGBIT_SYMMETRIC_CHANNELS (1 << 1) #define SND_SOC_TPLG_DAI_FLGBIT_SYMMETRIC_SAMPLEBITS (1 << 2) +/* DAI clock gating */ +#define SND_SOC_TPLG_DAI_CLK_GATE_UNDEFINED 0 +#define SND_SOC_TPLG_DAI_CLK_GATE_GATED 1 +#define SND_SOC_TPLG_DAI_CLK_GATE_CONT 2 + /* DAI physical PCM data formats. * Add new formats to the end of the list. */ @@ -160,6 +165,18 @@ #define SND_SOC_TPLG_LNK_FLGBIT_SYMMETRIC_SAMPLEBITS (1 << 2) #define SND_SOC_TPLG_LNK_FLGBIT_VOICE_WAKEUP (1 << 3) +/* DAI topology BCLK parameter + * For the backwards capability, by default codec is bclk master + */ +#define SND_SOC_TPLG_BCLK_CM 0 /* codec is bclk master */ +#define SND_SOC_TPLG_BCLK_CS 1 /* codec is bclk slave */ + +/* DAI topology FSYNC parameter + * For the backwards capability, by default codec is fsync master + */ +#define SND_SOC_TPLG_FSYNC_CM 0 /* codec is fsync master */ +#define SND_SOC_TPLG_FSYNC_CS 1 /* codec is fsync slave */ + /* * Block Header. * This header precedes all object and object arrays below. @@ -312,11 +329,11 @@ struct snd_soc_tplg_hw_config { __le32 size; /* in bytes of this structure */ __le32 id; /* unique ID - - used to match */ __le32 fmt; /* SND_SOC_DAI_FORMAT_ format value */ - __u8 clock_gated; /* 1 if clock can be gated to save power */ + __u8 clock_gated; /* SND_SOC_TPLG_DAI_CLK_GATE_ value */ __u8 invert_bclk; /* 1 for inverted BCLK, 0 for normal */ __u8 invert_fsync; /* 1 for inverted frame clock, 0 for normal */ - __u8 bclk_master; /* 1 for master of BCLK, 0 for slave */ - __u8 fsync_master; /* 1 for master of FSYNC, 0 for slave */ + __u8 bclk_master; /* SND_SOC_TPLG_BCLK_ value */ + __u8 fsync_master; /* SND_SOC_TPLG_FSYNC_ value */ __u8 mclk_direction; /* 0 for input, 1 for output */ __le16 reserved; /* for 32bit alignment */ __le32 mclk_rate; /* MCLK or SYSCLK freqency in Hz */ diff --git a/kernel/auditfilter.c b/kernel/auditfilter.c index 0b0aa5854dac1ed41959f9383af95d4097ad4646..8dd4063647c2c7db83a612a3dc307044a83fae4e 100644 --- a/kernel/auditfilter.c +++ b/kernel/auditfilter.c @@ -407,7 +407,7 @@ static int audit_field_valid(struct audit_entry *entry, struct audit_field *f) return -EINVAL; break; case AUDIT_EXE: - if (f->op != Audit_equal) + if (f->op != Audit_not_equal && f->op != Audit_equal) return -EINVAL; if (entry->rule.listnr != AUDIT_FILTER_EXIT) return -EINVAL; diff --git a/kernel/auditsc.c b/kernel/auditsc.c index ecc23e25c9eb2b3aedf5db585905d7b044d6b276..76d789d6cea060a4e7d85901f22e5752764a915b 100644 --- a/kernel/auditsc.c +++ b/kernel/auditsc.c @@ -471,6 +471,8 @@ static int audit_filter_rules(struct task_struct *tsk, break; case AUDIT_EXE: result = audit_exe_compare(tsk, rule->exe); + if (f->op == Audit_not_equal) + result = !result; break; case AUDIT_UID: result = audit_uid_comparator(cred->uid, f->op, f->uid); @@ -1272,8 +1274,12 @@ static void show_special(struct audit_context *context, int *call_panic) break; case AUDIT_KERN_MODULE: audit_log_format(ab, "name="); - audit_log_untrustedstring(ab, context->module.name); - kfree(context->module.name); + if (context->module.name) { + audit_log_untrustedstring(ab, context->module.name); + kfree(context->module.name); + } else + audit_log_format(ab, "(null)"); + break; } audit_log_end(ab); @@ -2385,8 +2391,9 @@ void __audit_log_kern_module(char *name) { struct audit_context *context = current->audit_context; - context->module.name = kmalloc(strlen(name) + 1, GFP_KERNEL); - strcpy(context->module.name, name); + context->module.name = kstrdup(name, GFP_KERNEL); + if (!context->module.name) + audit_log_lost("out of memory in __audit_log_kern_module"); context->type = AUDIT_KERN_MODULE; } diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 3ceb269c0ebd90225fc05436ced1013186091dd2..450e2cd31ed604a6c70e8b7356a1640d2029c38f 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -4110,7 +4110,7 @@ static int replace_map_fd_with_map_ptr(struct bpf_verifier_env *env) /* hold the map. If the program is rejected by verifier, * the map will be released by release_maps() or it * will be used by the valid program until it's unloaded - * and all maps are released in free_bpf_prog_info() + * and all maps are released in free_used_maps() */ map = bpf_map_inc(map, false); if (IS_ERR(map)) { @@ -4623,7 +4623,7 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr) vfree(log_buf); if (!env->prog->aux->used_maps) /* if we didn't copy map pointers into bpf_prog_info, release - * them now. Otherwise free_bpf_prog_info() will release them. + * them now. Otherwise free_used_maps() will release them. */ release_maps(env); *prog = env->prog; diff --git a/kernel/compat.c b/kernel/compat.c index 7e83733d4c95c854c1a4acf2b8b7a96bf91c0d59..f3925fca93391b85ab53b524143b1a6ccc3d9610 100644 --- a/kernel/compat.c +++ b/kernel/compat.c @@ -334,7 +334,7 @@ COMPAT_SYSCALL_DEFINE3(sched_setaffinity, compat_pid_t, pid, if (retval) goto out; - retval = sched_setaffinity(pid, new_mask); + retval = msm_sched_setaffinity(pid, new_mask); out: free_cpumask_var(new_mask); return retval; diff --git a/kernel/delayacct.c b/kernel/delayacct.c index e2764d767f186eb7965b514b7d9636659608dcf5..ca8ac2824f0b666d880154602f313db7e4112b42 100644 --- a/kernel/delayacct.c +++ b/kernel/delayacct.c @@ -44,23 +44,24 @@ void __delayacct_tsk_init(struct task_struct *tsk) { tsk->delays = kmem_cache_zalloc(delayacct_cache, GFP_KERNEL); if (tsk->delays) - spin_lock_init(&tsk->delays->lock); + raw_spin_lock_init(&tsk->delays->lock); } /* * Finish delay accounting for a statistic using its timestamps (@start), * accumalator (@total) and @count */ -static void delayacct_end(spinlock_t *lock, u64 *start, u64 *total, u32 *count) +static void delayacct_end(raw_spinlock_t *lock, u64 *start, u64 *total, + u32 *count) { s64 ns = ktime_get_ns() - *start; unsigned long flags; if (ns > 0) { - spin_lock_irqsave(lock, flags); + raw_spin_lock_irqsave(lock, flags); *total += ns; (*count)++; - spin_unlock_irqrestore(lock, flags); + raw_spin_unlock_irqrestore(lock, flags); } } @@ -127,7 +128,7 @@ int __delayacct_add_tsk(struct taskstats *d, struct task_struct *tsk) /* zero XXX_total, non-zero XXX_count implies XXX stat overflowed */ - spin_lock_irqsave(&tsk->delays->lock, flags); + raw_spin_lock_irqsave(&tsk->delays->lock, flags); tmp = d->blkio_delay_total + tsk->delays->blkio_delay; d->blkio_delay_total = (tmp < d->blkio_delay_total) ? 0 : tmp; tmp = d->swapin_delay_total + tsk->delays->swapin_delay; @@ -137,7 +138,7 @@ int __delayacct_add_tsk(struct taskstats *d, struct task_struct *tsk) d->blkio_count += tsk->delays->blkio_count; d->swapin_count += tsk->delays->swapin_count; d->freepages_count += tsk->delays->freepages_count; - spin_unlock_irqrestore(&tsk->delays->lock, flags); + raw_spin_unlock_irqrestore(&tsk->delays->lock, flags); return 0; } @@ -147,10 +148,10 @@ __u64 __delayacct_blkio_ticks(struct task_struct *tsk) __u64 ret; unsigned long flags; - spin_lock_irqsave(&tsk->delays->lock, flags); + raw_spin_lock_irqsave(&tsk->delays->lock, flags); ret = nsec_to_clock_t(tsk->delays->blkio_delay + tsk->delays->swapin_delay); - spin_unlock_irqrestore(&tsk->delays->lock, flags); + raw_spin_unlock_irqrestore(&tsk->delays->lock, flags); return ret; } diff --git a/kernel/fork.c b/kernel/fork.c index 8e94f499277a55ac0f813262cc1fcf8eeeb803a7..88b065b2c2e97e1adaa3a865235d762994a8f329 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -216,10 +216,9 @@ static unsigned long *alloc_thread_stack_node(struct task_struct *tsk, int node) if (!s) continue; -#ifdef CONFIG_DEBUG_KMEMLEAK /* Clear stale pointers from reused stack. */ memset(s->addr, 0, THREAD_SIZE); -#endif + tsk->stack_vm_area = s; return s->addr; } diff --git a/kernel/hung_task.c b/kernel/hung_task.c index 751593ed7c0b0b9cc9bf74735fb9bd5d7e100be2..254f3c158b745d22198b30b8f6dc3f3785113613 100644 --- a/kernel/hung_task.c +++ b/kernel/hung_task.c @@ -20,12 +20,21 @@ #include #include +#include /* * The number of tasks checked: */ int __read_mostly sysctl_hung_task_check_count = PID_MAX_LIMIT; +/* + * Selective monitoring of hung tasks. + * + * if set to 1, khungtaskd skips monitoring tasks, which has + * task_struct->hang_detection_enabled value not set, else monitors all tasks. + */ +int sysctl_hung_task_selective_monitoring = 1; + /* * Limit number of tasks checked in a batch. * @@ -44,6 +53,7 @@ int __read_mostly sysctl_hung_task_warnings = 10; static int __read_mostly did_panic; static bool hung_task_show_lock; +static bool hung_task_call_panic; static struct task_struct *watchdog_task; @@ -127,10 +137,8 @@ static void check_hung_task(struct task_struct *t, unsigned long timeout) touch_nmi_watchdog(); if (sysctl_hung_task_panic) { - if (hung_task_show_lock) - debug_show_all_locks(); - trigger_all_cpu_backtrace(); - panic("hung_task: blocked tasks"); + hung_task_show_lock = true; + hung_task_call_panic = true; } } @@ -187,12 +195,19 @@ static void check_hung_uninterruptible_tasks(unsigned long timeout) } /* use "==" to skip the TASK_KILLABLE tasks waiting on NFS */ if (t->state == TASK_UNINTERRUPTIBLE) - check_hung_task(t, timeout); + /* Check for selective monitoring */ + if (!sysctl_hung_task_selective_monitoring || + t->hang_detection_enabled) + check_hung_task(t, timeout); } unlock: rcu_read_unlock(); if (hung_task_show_lock) debug_show_all_locks(); + if (hung_task_call_panic) { + trigger_all_cpu_backtrace(); + panic("hung_task: blocked tasks"); + } } static long hung_timeout_jiffies(unsigned long last_checked, diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index 6674c7c8844b062763588a3e2b48644da8645159..89c841a0d51d0514f9fd82a0dd519450cd2837d7 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c @@ -1033,6 +1033,13 @@ static int irq_setup_forced_threading(struct irqaction *new) if (new->flags & (IRQF_NO_THREAD | IRQF_PERCPU | IRQF_ONESHOT)) return 0; + /* + * No further action required for interrupts which are requested as + * threaded interrupts already + */ + if (new->handler == irq_default_primary_handler) + return 0; + new->flags |= IRQF_ONESHOT; /* @@ -1040,7 +1047,7 @@ static int irq_setup_forced_threading(struct irqaction *new) * thread handler. We force thread them as well by creating a * secondary action. */ - if (new->handler != irq_default_primary_handler && new->thread_fn) { + if (new->handler && new->thread_fn) { /* Allocate the secondary action */ new->secondary = kzalloc(sizeof(struct irqaction), GFP_KERNEL); if (!new->secondary) diff --git a/kernel/kcov.c b/kernel/kcov.c index b11ef6e51f7e762c3cd20ae06637b9c44ad1aa89..f1e060b04ef626188361c924979803f7e937b627 100644 --- a/kernel/kcov.c +++ b/kernel/kcov.c @@ -108,7 +108,8 @@ static void kcov_put(struct kcov *kcov) void kcov_task_init(struct task_struct *t) { - t->kcov_mode = KCOV_MODE_DISABLED; + WRITE_ONCE(t->kcov_mode, KCOV_MODE_DISABLED); + barrier(); t->kcov_size = 0; t->kcov_area = NULL; t->kcov = NULL; diff --git a/kernel/kthread.c b/kernel/kthread.c index df461383a2a79333f89b781718c4a2fef2fc50a9..6027968af92d37c17b62cbb1970fdc4feb6ce630 100644 --- a/kernel/kthread.c +++ b/kernel/kthread.c @@ -318,8 +318,14 @@ struct task_struct *__kthread_create_on_node(int (*threadfn)(void *data), task = create->result; if (!IS_ERR(task)) { static const struct sched_param param = { .sched_priority = 0 }; + char name[TASK_COMM_LEN]; - vsnprintf(task->comm, sizeof(task->comm), namefmt, args); + /* + * task is already visible to other tasks, so updating + * COMM must be protected. + */ + vsnprintf(name, sizeof(name), namefmt, args); + set_task_comm(task, name); /* * root may have changed our (kthreadd's) priority or CPU mask. * The kernel thread should not inherit these properties. diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c index a57b0c869d7eca9bc2006795696c477266d5ee43..fb096c288bb983b08f19a6dcfad9ba78f0c7baa3 100644 --- a/kernel/power/suspend.c +++ b/kernel/power/suspend.c @@ -62,7 +62,7 @@ static const struct platform_s2idle_ops *s2idle_ops; static DECLARE_WAIT_QUEUE_HEAD(s2idle_wait_head); enum s2idle_states __read_mostly s2idle_state; -static DEFINE_SPINLOCK(s2idle_lock); +static DEFINE_RAW_SPINLOCK(s2idle_lock); void s2idle_set_ops(const struct platform_s2idle_ops *ops) { @@ -80,12 +80,12 @@ static void s2idle_enter(void) { trace_suspend_resume(TPS("machine_suspend"), PM_SUSPEND_TO_IDLE, true); - spin_lock_irq(&s2idle_lock); + raw_spin_lock_irq(&s2idle_lock); if (pm_wakeup_pending()) goto out; s2idle_state = S2IDLE_STATE_ENTER; - spin_unlock_irq(&s2idle_lock); + raw_spin_unlock_irq(&s2idle_lock); get_online_cpus(); cpuidle_resume(); @@ -99,11 +99,11 @@ static void s2idle_enter(void) cpuidle_pause(); put_online_cpus(); - spin_lock_irq(&s2idle_lock); + raw_spin_lock_irq(&s2idle_lock); out: s2idle_state = S2IDLE_STATE_NONE; - spin_unlock_irq(&s2idle_lock); + raw_spin_unlock_irq(&s2idle_lock); trace_suspend_resume(TPS("machine_suspend"), PM_SUSPEND_TO_IDLE, false); } @@ -158,12 +158,12 @@ void s2idle_wake(void) { unsigned long flags; - spin_lock_irqsave(&s2idle_lock, flags); + raw_spin_lock_irqsave(&s2idle_lock, flags); if (s2idle_state > S2IDLE_STATE_NONE) { s2idle_state = S2IDLE_STATE_WAKE; wake_up(&s2idle_wait_head); } - spin_unlock_irqrestore(&s2idle_lock, flags); + raw_spin_unlock_irqrestore(&s2idle_lock, flags); } EXPORT_SYMBOL_GPL(s2idle_wake); diff --git a/kernel/printk/printk_safe.c b/kernel/printk/printk_safe.c index d989cc2381988e4574b76194bd7bf9e401ef4403..64825b2df3a5fcbf0efab285c931d06527d19da6 100644 --- a/kernel/printk/printk_safe.c +++ b/kernel/printk/printk_safe.c @@ -284,7 +284,7 @@ void printk_safe_flush_on_panic(void) * Make sure that we could access the main ring buffer. * Do not risk a double release when more CPUs are up. */ - if (in_nmi() && raw_spin_is_locked(&logbuf_lock)) { + if (raw_spin_is_locked(&logbuf_lock)) { if (num_online_cpus() > 1) return; diff --git a/kernel/sched/core.c b/kernel/sched/core.c index f46cf644af3c49cb17a9a5b0b1e0e9186e661724..1e4b7d0d53108a01c36897e75c40544d036ebff7 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -27,6 +27,7 @@ #include #include #include +#include #include @@ -785,7 +786,7 @@ static inline void dequeue_task(struct rq *rq, struct task_struct *p, int flags) p->sched_class->dequeue_task(rq, p, flags); #ifdef CONFIG_SCHED_WALT if (p == rq->ed_task) - early_detection_notify(rq, ktime_get_ns()); + early_detection_notify(rq, sched_ktime_clock()); #endif trace_sched_enq_deq_task(p, 0, cpumask_bits(&p->cpus_allowed)[0]); } @@ -1447,7 +1448,7 @@ unsigned long wait_task_inactive(struct task_struct *p, long match_state) * yield - it could be a while. */ if (unlikely(queued)) { - ktime_t to = NSEC_PER_SEC / HZ; + ktime_t to = NSEC_PER_MSEC / HZ; set_current_state(TASK_UNINTERRUPTIBLE); schedule_hrtimeout(&to, HRTIMER_MODE_REL); @@ -2041,9 +2042,10 @@ static inline void walt_try_to_wake_up(struct task_struct *p) rq_lock_irqsave(rq, &rf); old_load = task_load(p); - wallclock = ktime_get_ns(); + wallclock = sched_ktime_clock(); update_task_ravg(rq->curr, rq, TASK_UPDATE, wallclock, 0); update_task_ravg(p, rq, TASK_WAKE, wallclock, 0); + note_task_waking(p, wallclock); rq_unlock_irqrestore(rq, &rf); rcu_read_lock(); @@ -2081,9 +2083,6 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags, { unsigned long flags; int cpu, success = 0; -#ifdef CONFIG_SMP - u64 wallclock; -#endif /* * If we are going to wake up a thread waiting for CONDITION we @@ -2175,8 +2174,6 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags, set_task_cpu(p, cpu); } - wallclock = ktime_get_ns(); - note_task_waking(p, wallclock); #else /* CONFIG_SMP */ if (p->in_iowait) { @@ -2240,7 +2237,7 @@ static void try_to_wake_up_local(struct task_struct *p, struct rq_flags *rf) trace_sched_waking(p); if (!task_on_rq_queued(p)) { - u64 wallclock = ktime_get_ns(); + u64 wallclock = sched_ktime_clock(); update_task_ravg(rq->curr, rq, TASK_UPDATE, wallclock, 0); update_task_ravg(p, rq, TASK_WAKE, wallclock, 0); @@ -3163,7 +3160,7 @@ void scheduler_tick(void) old_load = task_load(curr); set_window_start(rq); - wallclock = ktime_get_ns(); + wallclock = sched_ktime_clock(); update_task_ravg(rq->curr, rq, TASK_UPDATE, wallclock, 0); update_rq_clock(rq); curr->sched_class->task_tick(rq, curr, 0); @@ -3547,7 +3544,7 @@ static void __sched notrace __schedule(bool preempt) clear_tsk_need_resched(prev); clear_preempt_need_resched(); - wallclock = ktime_get_ns(); + wallclock = sched_ktime_clock(); if (likely(prev != next)) { if (!prev->on_rq) prev->last_sleep_ts = wallclock; @@ -4910,6 +4907,71 @@ long sched_setaffinity(pid_t pid, const struct cpumask *in_mask) return retval; } +char sched_lib_name[LIB_PATH_LENGTH]; +unsigned int sched_lib_mask_check; +unsigned int sched_lib_mask_force; +static inline bool is_sched_lib_based_app(pid_t pid) +{ + const char *name = NULL; + struct vm_area_struct *vma; + char path_buf[LIB_PATH_LENGTH]; + bool found = false; + struct task_struct *p; + + if (strnlen(sched_lib_name, LIB_PATH_LENGTH) == 0) + return false; + + rcu_read_lock(); + + p = find_process_by_pid(pid); + if (!p) { + rcu_read_unlock(); + return false; + } + + /* Prevent p going away */ + get_task_struct(p); + rcu_read_unlock(); + + if (!p->mm) + goto put_task_struct; + + down_read(&p->mm->mmap_sem); + for (vma = p->mm->mmap; vma ; vma = vma->vm_next) { + if (vma->vm_file && vma->vm_flags & VM_EXEC) { + name = d_path(&vma->vm_file->f_path, + path_buf, LIB_PATH_LENGTH); + if (IS_ERR(name)) + goto release_sem; + + if (strnstr(name, sched_lib_name, + strnlen(name, LIB_PATH_LENGTH))) { + found = true; + break; + } + } + } + +release_sem: + up_read(&p->mm->mmap_sem); +put_task_struct: + put_task_struct(p); + return found; +} + +long msm_sched_setaffinity(pid_t pid, struct cpumask *new_mask) +{ + if (sched_lib_mask_check != 0 && sched_lib_mask_force != 0 && + (cpumask_bits(new_mask)[0] == sched_lib_mask_check) && + is_sched_lib_based_app(pid)) { + + cpumask_t forced_mask = { {sched_lib_mask_force} }; + + cpumask_copy(new_mask, &forced_mask); + } + return sched_setaffinity(pid, new_mask); +} + static int get_user_cpu_mask(unsigned long __user *user_mask_ptr, unsigned len, struct cpumask *new_mask) { @@ -4940,7 +5002,7 @@ SYSCALL_DEFINE3(sched_setaffinity, pid_t, pid, unsigned int, len, retval = get_user_cpu_mask(user_mask_ptr, len, new_mask); if (retval == 0) - retval = sched_setaffinity(pid, new_mask); + retval = msm_sched_setaffinity(pid, new_mask); free_cpumask_var(new_mask); return retval; } @@ -5924,6 +5986,22 @@ int sched_isolate_cpu(int cpu) if (++cpu_isolation_vote[cpu] > 1) goto out; + /* + * There is a race between watchdog being enabled by hotplug and + * core isolation disabling the watchdog. When a CPU is hotplugged in + * and the hotplug lock has been released the watchdog thread might + * not have run yet to enable the watchdog. + * We have to wait for the watchdog to be enabled before proceeding. + */ + if (!watchdog_configured(cpu)) { + msleep(20); + if (!watchdog_configured(cpu)) { + --cpu_isolation_vote[cpu]; + ret_code = -EBUSY; + goto out; + } + } + set_cpu_isolated(cpu, true); cpumask_clear_cpu(cpu, &avail_cpus); @@ -7426,7 +7504,7 @@ void sched_exit(struct task_struct *p) rq = task_rq_lock(p, &rf); /* rq->curr == p */ - wallclock = ktime_get_ns(); + wallclock = sched_ktime_clock(); update_task_ravg(rq->curr, rq, TASK_UPDATE, wallclock, 0); dequeue_task(rq, p, 0); /* diff --git a/kernel/sched/core_ctl.c b/kernel/sched/core_ctl.c index e7787e631def4ec99cce1d5bad33a8df9d0381ab..40734c356c962f09eff99d0e16dfa66159687fec 100644 --- a/kernel/sched/core_ctl.c +++ b/kernel/sched/core_ctl.c @@ -21,6 +21,7 @@ #include #include #include +#include #include #include "sched.h" @@ -81,6 +82,9 @@ static void apply_need(struct cluster_data *state); static void wake_up_core_ctl_thread(struct cluster_data *state); static bool initialized; +ATOMIC_NOTIFIER_HEAD(core_ctl_notifier); +static unsigned int last_nr_big; + static unsigned int get_active_cpu_count(const struct cluster_data *cluster); static void cpuset_next(struct cluster_data *cluster); @@ -660,6 +664,7 @@ static void update_running_avg(void) } spin_unlock_irqrestore(&state_lock, flags); + last_nr_big = big_avg; walt_rotation_checkpoint(big_avg); } @@ -851,6 +856,38 @@ int core_ctl_set_boost(bool boost) } EXPORT_SYMBOL(core_ctl_set_boost); +void core_ctl_notifier_register(struct notifier_block *n) +{ + atomic_notifier_chain_register(&core_ctl_notifier, n); +} + +void core_ctl_notifier_unregister(struct notifier_block *n) +{ + atomic_notifier_chain_unregister(&core_ctl_notifier, n); +} + +static void core_ctl_call_notifier(void) +{ + struct core_ctl_notif_data ndata; + struct notifier_block *nb; + + /* + * Don't bother querying the stats when the notifier + * chain is empty. + */ + rcu_read_lock(); + nb = rcu_dereference_raw(core_ctl_notifier.head); + rcu_read_unlock(); + + if (!nb) + return; + + ndata.nr_big = last_nr_big; + ndata.coloc_load_pct = walt_get_default_coloc_group_load(); + + atomic_notifier_call_chain(&core_ctl_notifier, 0, &ndata); +} + void core_ctl_check(u64 window_start) { int cpu; @@ -886,6 +923,8 @@ void core_ctl_check(u64 window_start) if (eval_need(cluster)) wake_up_core_ctl_thread(cluster); } + + core_ctl_call_notifier(); } static void move_cpu_lru(struct cpu_data *cpu_data) diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c index 98739c5f55c478458a242c95408c78525721811b..1e23ffb7a0995a157fc1cadab568f1c4a91758c0 100644 --- a/kernel/sched/cpufreq_schedutil.c +++ b/kernel/sched/cpufreq_schedutil.c @@ -540,7 +540,7 @@ static void sugov_work(struct kthread_work *work) mutex_lock(&sg_policy->work_lock); raw_spin_lock_irqsave(&sg_policy->update_lock, flags); sugov_track_cycles(sg_policy, sg_policy->policy->cur, - ktime_get_ns()); + sched_ktime_clock()); raw_spin_unlock_irqrestore(&sg_policy->update_lock, flags); __cpufreq_driver_target(sg_policy->policy, sg_policy->next_freq, CPUFREQ_RELATION_L); @@ -993,7 +993,7 @@ static void sugov_limits(struct cpufreq_policy *policy) mutex_lock(&sg_policy->work_lock); raw_spin_lock_irqsave(&sg_policy->update_lock, flags); sugov_track_cycles(sg_policy, sg_policy->policy->cur, - ktime_get_ns()); + sched_ktime_clock()); raw_spin_unlock_irqrestore(&sg_policy->update_lock, flags); cpufreq_policy_apply_limits(policy); mutex_unlock(&sg_policy->work_lock); diff --git a/kernel/sched/energy.c b/kernel/sched/energy.c index 5e7247c8180e8c06a7c2c5d667361fa5effd0163..b4764309dac4a0e6257ab834372a59428c183d8c 100644 --- a/kernel/sched/energy.c +++ b/kernel/sched/energy.c @@ -52,6 +52,20 @@ static void free_resources(void) } static bool sge_ready; +void check_max_cap_vs_cpu_scale(int cpu, struct sched_group_energy *sge) +{ + unsigned long max_cap, cpu_scale; + + max_cap = sge->cap_states[sge->nr_cap_states - 1].cap; + cpu_scale = topology_get_cpu_scale(NULL, cpu); + + if (max_cap == cpu_scale) + return; + + pr_debug("CPU%d max energy model capacity=%ld != cpu_scale=%ld\n", cpu, + max_cap, cpu_scale); +} + void init_sched_energy_costs(void) { struct device_node *cn, *cp; @@ -137,6 +151,8 @@ void init_sched_energy_costs(void) sge_array[cpu][sd_level] = sge; } + + check_max_cap_vs_cpu_scale(cpu, sge_array[cpu][SD_LEVEL0]); } sge_ready = true; diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index a0ac1bc27fe48c18e37bf2ac9f04f5b45945bad4..1ff95e922ac281493da7c8af47f0eb925ba6e7d8 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -5208,10 +5208,12 @@ static inline void update_overutilized_status(struct rq *rq) rcu_read_lock(); sd = rcu_dereference(rq->sd); - if (sd && (sd->flags & SD_LOAD_BALANCE)) - set_sd_overutilized(sd); - else if (sd && sd->parent) - set_sd_overutilized(sd->parent); + if (cpu_overutilized(rq->cpu)) { + if (sd && (sd->flags & SD_LOAD_BALANCE)) + set_sd_overutilized(sd); + else if (sd && sd->parent) + set_sd_overutilized(sd->parent); + } rcu_read_unlock(); } #else @@ -7578,6 +7580,13 @@ static inline int find_best_target(struct task_struct *p, int *backup_cpu, if (capacity_orig < capacity_orig_of(cpu)) continue; + /* + * Favor CPUs with smaller capacity for non latency + * sensitive tasks. + */ + if (capacity_orig > target_capacity) + continue; + /* * Case B) Non latency sensitive tasks on IDLE CPUs. * @@ -7985,7 +7994,7 @@ static int find_energy_efficient_cpu(struct sched_domain *sd, { int use_fbt = sched_feat(FIND_BEST_TARGET); int cpu_iter, eas_cpu_idx = EAS_CPU_NXT; - int energy_cpu = prev_cpu, delta = 0; + int delta = 0; int target_cpu = -1; struct energy_env *eenv; struct cpumask *rtg_target = find_rtg_target(p); @@ -8005,7 +8014,7 @@ static int find_energy_efficient_cpu(struct sched_domain *sd, if (sysctl_sched_sync_hint_enable && sync && bias_to_waker_cpu(p, cpu, rtg_target)) { - energy_cpu = cpu; + target_cpu = cpu; fbt_env.fastpath = SYNC_WAKEUP; goto out; } @@ -8070,10 +8079,8 @@ static int find_energy_efficient_cpu(struct sched_domain *sd, boosted, prefer_idle, &fbt_env); /* Immediately return a found idle CPU for a prefer_idle task */ - if (prefer_idle && target_cpu >= 0 && idle_cpu(target_cpu)) { - energy_cpu = target_cpu; + if (prefer_idle && target_cpu >= 0 && idle_cpu(target_cpu)) goto out; - } /* Place target into NEXT slot */ eenv->cpu[EAS_CPU_NXT].cpu_id = target_cpu; @@ -8109,19 +8116,22 @@ static int find_energy_efficient_cpu(struct sched_domain *sd, cpumask_test_cpu(next_cpu, rtg_target))) || __cpu_overutilized(prev_cpu, delta) || !task_fits_max(p, prev_cpu) || cpu_isolated(prev_cpu))) { - energy_cpu = eenv->cpu[EAS_CPU_NXT].cpu_id; + target_cpu = eenv->cpu[EAS_CPU_NXT].cpu_id; goto out; } /* find most energy-efficient CPU */ - energy_cpu = select_energy_cpu_idx(eenv) < 0 ? -1 : + target_cpu = select_energy_cpu_idx(eenv) < 0 ? prev_cpu : eenv->cpu[eenv->next_idx].cpu_id; out: - trace_sched_task_util(p, next_cpu, backup_cpu, energy_cpu, sync, + if (target_cpu < 0) + target_cpu = prev_cpu; + + trace_sched_task_util(p, next_cpu, backup_cpu, target_cpu, sync, need_idle, fbt_env.fastpath, placement_boost, rtg_target ? cpumask_first(rtg_target) : -1, start_t); - return energy_cpu; + return target_cpu; } static inline bool nohz_kick_needed(struct rq *rq, bool only_update); @@ -9024,6 +9034,9 @@ int can_migrate_task(struct task_struct *p, struct lb_env *env) return 0; } + /* Record that we found atleast one task that could run on dst_cpu */ + env->flags &= ~LBF_ALL_PINNED; + if (energy_aware() && !sd_overutilized(env->sd) && env->idle == CPU_NEWLY_IDLE) { long util_cum_dst, util_cum_src; @@ -9037,9 +9050,6 @@ int can_migrate_task(struct task_struct *p, struct lb_env *env) return 0; } - /* Record that we found atleast one task that could run on dst_cpu */ - env->flags &= ~LBF_ALL_PINNED; - #ifdef CONFIG_SCHED_WALT if (env->flags & LBF_IGNORE_PREFERRED_CLUSTER_TASKS && !preferred_cluster(cpu_rq(env->dst_cpu)->cluster, p)) @@ -9142,7 +9152,7 @@ static int detach_tasks(struct lb_env *env) { struct list_head *tasks = &env->src_rq->cfs_tasks; struct task_struct *p; - unsigned long load; + unsigned long load = 0; int detached = 0; int orig_loop = env->loop; @@ -9216,6 +9226,9 @@ static int detach_tasks(struct lb_env *env) continue; next: + trace_sched_load_balance_skip_tasks(env->src_cpu, env->dst_cpu, + env->src_grp_type, p->pid, load, task_util(p), + cpumask_bits(&p->cpus_allowed)[0]); list_move_tail(&p->se.group_node, tasks); } @@ -9554,13 +9567,12 @@ static void update_cpu_capacity(struct sched_domain *sd, int cpu) int max_cap_cpu; unsigned long flags; - capacity = min(capacity, thermal_cap(cpu)); - - cpu_rq(cpu)->cpu_capacity_orig = capacity; - capacity *= arch_scale_max_freq_capacity(sd, cpu); capacity >>= SCHED_CAPACITY_SHIFT; + capacity = min(capacity, thermal_cap(cpu)); + cpu_rq(cpu)->cpu_capacity_orig = capacity; + mcc = &cpu_rq(cpu)->rd->max_cpu_capacity; raw_spin_lock_irqsave(&mcc->lock, flags); @@ -10137,6 +10149,14 @@ static inline void update_sd_lb_stats(struct lb_env *env, struct sd_lb_stats *sd sds->total_capacity += sgs->group_capacity; sds->total_util += sgs->group_util; + trace_sched_load_balance_sg_stats(sg->cpumask[0], sgs->group_type, + sgs->idle_cpus, sgs->sum_nr_running, + sgs->group_load, sgs->group_capacity, + sgs->group_util, sgs->group_no_capacity, + sgs->load_per_task, + sgs->group_misfit_task_load, + sds->busiest ? sds->busiest->cpumask[0] : 0); + sg = sg->next; } while (sg != env->sd->groups); @@ -10416,8 +10436,23 @@ static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *s * a think about bumping its value to force at least one task to be * moved */ - if (env->imbalance < busiest->load_per_task) + if (env->imbalance < busiest->load_per_task) { + /* + * The busiest group is overloaded so it could use help + * from the other groups. If the local group has idle CPUs + * and it is not overloaded and has no imbalance with in + * the group, allow the load balance by bumping the + * imbalance. + */ + if (busiest->group_type == group_overloaded && + local->group_type <= group_misfit_task && + env->idle != CPU_NOT_IDLE) { + env->imbalance = busiest->load_per_task; + return; + } + return fix_small_imbalance(env, sds); + } } /******* find_busiest_group() helpers end here *********************/ @@ -10549,6 +10584,11 @@ static struct sched_group *find_busiest_group(struct lb_env *env) /* Looks like there is an imbalance. Compute it */ env->src_grp_type = busiest->group_type; calculate_imbalance(env, &sds); + trace_sched_load_balance_stats(sds.busiest->cpumask[0], busiest->group_type, + busiest->avg_load, busiest->load_per_task, + sds.local->cpumask[0], local->group_type, + local->avg_load, local->load_per_task, + sds.avg_load, env->imbalance); return sds.busiest; out_balanced: @@ -10705,8 +10745,7 @@ static int need_active_balance(struct lb_env *env) env->src_rq->misfit_task_load) return 1; - return unlikely(sd->nr_balance_failed > - sd->cache_nice_tries + NEED_ACTIVE_BALANCE_THRESHOLD); + return unlikely(sd->nr_balance_failed > sd->cache_nice_tries+2); } static int group_balance_cpu_not_isolated(struct sched_group *sg) @@ -11055,7 +11094,7 @@ static int load_balance(int this_cpu, struct rq *this_rq, group ? group->cpumask[0] : 0, busiest ? busiest->nr_running : 0, env.imbalance, env.flags, ld_moved, - sd->balance_interval); + sd->balance_interval, active_balance); return ld_moved; } @@ -11437,6 +11476,7 @@ static void nohz_balancer_kick(bool only_update) * is idle. And the softirq performing nohz idle load balance * will be run before returning from the IPI. */ + trace_sched_load_balance_nohz_kick(smp_processor_id(), ilb_cpu); smp_send_reschedule(ilb_cpu); return; } @@ -12750,7 +12790,7 @@ static void walt_check_for_rotation(struct rq *src_rq) if (!is_min_capacity_cpu(src_cpu)) return; - wc = ktime_get_ns(); + wc = sched_ktime_clock(); for_each_possible_cpu(i) { struct rq *rq = cpu_rq(i); diff --git a/kernel/sched/features.h b/kernel/sched/features.h index 245139741555d87cdeb5bc74a70e8f9ad74a1dbe..68c92b6709e9b30b8f06d5379a9290ad52f356af 100644 --- a/kernel/sched/features.h +++ b/kernel/sched/features.h @@ -118,3 +118,14 @@ SCHED_FEAT(ENERGY_AWARE, false) SCHED_FEAT(EAS_PREFER_IDLE, true) SCHED_FEAT(FIND_BEST_TARGET, true) SCHED_FEAT(FBT_STRICT_ORDER, false) + +/* + * Apply schedtune boost hold to tasks of all sched classes. + * If enabled, schedtune will hold the boost applied to a CPU + * for 50ms regardless of task activation - if the task is + * still running 50ms later, the boost hold expires and schedtune + * boost will expire immediately the task stops. + * If disabled, this behaviour will only apply to tasks of the + * RT class. + */ +SCHED_FEAT(SCHEDTUNE_BOOST_HOLD_ALL, false) diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c index 093077acd2d5252ec241d73dce038eb9539391ca..e0d3e14c8df16388a34a474d4dac20238a03a9b3 100644 --- a/kernel/sched/rt.c +++ b/kernel/sched/rt.c @@ -9,6 +9,7 @@ #include #include #include +#include "tune.h" #include "walt.h" @@ -1395,6 +1396,8 @@ enqueue_task_rt(struct rq *rq, struct task_struct *p, int flags) { struct sched_rt_entity *rt_se = &p->rt; + schedtune_enqueue_task(p, cpu_of(rq)); + if (flags & ENQUEUE_WAKEUP) rt_se->timeout = 0; @@ -1409,6 +1412,8 @@ static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int flags) { struct sched_rt_entity *rt_se = &p->rt; + schedtune_dequeue_task(p, cpu_of(rq)); + update_curr_rt(rq); dequeue_rt_entity(rt_se, flags); walt_dec_cumulative_runnable_avg(rq, p); diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index d3a72c8fe991950fe863b15616319ac4a53da2ee..21e7d2c54e6bdef6cc858ab8e895c1078ac06d58 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -565,6 +565,9 @@ struct cfs_rq { u64 throttled_clock_task_time; int throttled, throttle_count; struct list_head throttled_list; +#ifdef CONFIG_SCHED_WALT + u64 cumulative_runnable_avg; +#endif /* CONFIG_SCHED_WALT */ #endif /* CONFIG_CFS_BANDWIDTH */ #endif /* CONFIG_FAIR_GROUP_SCHED */ }; @@ -1841,6 +1844,15 @@ static inline int hrtick_enabled(struct rq *rq) #endif /* CONFIG_SCHED_HRTICK */ +#ifdef CONFIG_SCHED_WALT +u64 sched_ktime_clock(void); +#else +static inline u64 sched_ktime_clock(void) +{ + return 0; +} +#endif + #ifdef CONFIG_SMP extern void sched_avg_update(struct rq *rq); extern unsigned long sched_get_rt_rq_util(int cpu); @@ -2479,7 +2491,7 @@ static inline void cpufreq_update_util(struct rq *rq, unsigned int flags) data = rcu_dereference_sched(*per_cpu_ptr(&cpufreq_update_util_data, cpu_of(rq))); if (data) - data->func(data, ktime_get_ns(), flags); + data->func(data, sched_ktime_clock(), flags); } #else static inline void cpufreq_update_util(struct rq *rq, unsigned int flags) {} diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c index 1e9899cf6ed671e0634d1c9b101418bbfd161dea..be0fe4ac752e7bdcd308a3075e9d14505f3372e3 100644 --- a/kernel/sched/topology.c +++ b/kernel/sched/topology.c @@ -307,6 +307,8 @@ static int init_rootdomain(struct root_domain *rd) init_max_cpu_capacity(&rd->max_cpu_capacity); + init_max_cpu_capacity(&rd->max_cpu_capacity); + return 0; free_cpudl: diff --git a/kernel/sched/tune.c b/kernel/sched/tune.c index e52a82fe5369436d5fe314cf3ea02f58bd0263d6..dc49ffb4278f16325b97bae8d7bf02e06a8b2e39 100644 --- a/kernel/sched/tune.c +++ b/kernel/sched/tune.c @@ -14,6 +14,9 @@ bool schedtune_initialized = false; extern struct reciprocal_value schedtune_spc_rdiv; +/* We hold schedtune boost in effect for at least this long */ +#define SCHEDTUNE_BOOST_HOLD_NS 50000000ULL + /* * EAS scheduler tunables for task groups. */ @@ -129,11 +132,14 @@ struct boost_groups { /* Maximum boost value for all RUNNABLE tasks on a CPU */ bool idle; int boost_max; + u64 boost_ts; struct { /* The boost for tasks on that boost group */ int boost; /* Count of RUNNABLE tasks on that boost group */ unsigned tasks; + /* Timestamp of boost activation */ + u64 ts; } group[BOOSTGROUPS_COUNT]; /* CPU's boost group locking */ raw_spinlock_t lock; @@ -213,30 +219,53 @@ static int sched_boost_override_write(struct cgroup_subsys_state *css, #endif /* CONFIG_SCHED_WALT */ +static inline bool schedtune_boost_timeout(u64 now, u64 ts) +{ + return ((now - ts) > SCHEDTUNE_BOOST_HOLD_NS); +} + +static inline bool +schedtune_boost_group_active(int idx, struct boost_groups* bg, u64 now) +{ + if (bg->group[idx].tasks) + return true; + + return !schedtune_boost_timeout(now, bg->group[idx].ts); +} + static void -schedtune_cpu_update(int cpu) +schedtune_cpu_update(int cpu, u64 now) { struct boost_groups *bg = &per_cpu(cpu_boost_groups, cpu); int boost_max; + u64 boost_ts; int idx; /* The root boost group is always active */ boost_max = bg->group[0].boost; + boost_ts = now; for (idx = 1; idx < BOOSTGROUPS_COUNT; ++idx) { /* * A boost group affects a CPU only if it has - * RUNNABLE tasks on that CPU + * RUNNABLE tasks on that CPU or it has hold + * in effect from a previous task. */ - if (bg->group[idx].tasks == 0) + if (!schedtune_boost_group_active(idx, bg, now)) + continue; + + /* This boost group is active */ + if (boost_max > bg->group[idx].boost) continue; - boost_max = max(boost_max, bg->group[idx].boost); + boost_max = bg->group[idx].boost; + boost_ts = bg->group[idx].ts; } /* Ensures boost_max is non-negative when all cgroup boost values * are neagtive. Avoids under-accounting of cpu capacity which may cause * task stacking and frequency spikes.*/ boost_max = max(boost_max, 0); bg->boost_max = boost_max; + bg->boost_ts = boost_ts; } static int @@ -246,6 +275,7 @@ schedtune_boostgroup_update(int idx, int boost) int cur_boost_max; int old_boost; int cpu; + u64 now; /* Update per CPU boost groups */ for_each_possible_cpu(cpu) { @@ -263,15 +293,19 @@ schedtune_boostgroup_update(int idx, int boost) bg->group[idx].boost = boost; /* Check if this update increase current max */ - if (boost > cur_boost_max && bg->group[idx].tasks) { + now = sched_clock_cpu(cpu); + if (boost > cur_boost_max && + schedtune_boost_group_active(idx, bg, now)) { bg->boost_max = boost; + bg->boost_ts = bg->group[idx].ts; + trace_sched_tune_boostgroup_update(cpu, 1, bg->boost_max); continue; } /* Check if this update has decreased current max */ if (cur_boost_max == old_boost && old_boost > boost) { - schedtune_cpu_update(cpu); + schedtune_cpu_update(cpu, now); trace_sched_tune_boostgroup_update(cpu, -1, bg->boost_max); continue; } @@ -285,6 +319,15 @@ schedtune_boostgroup_update(int idx, int boost) #define ENQUEUE_TASK 1 #define DEQUEUE_TASK -1 +static inline bool +schedtune_update_timestamp(struct task_struct *p) +{ + if (sched_feat(SCHEDTUNE_BOOST_HOLD_ALL)) + return true; + + return task_has_rt_policy(p); +} + static inline void schedtune_tasks_update(struct task_struct *p, int cpu, int idx, int task_count) { @@ -294,12 +337,21 @@ schedtune_tasks_update(struct task_struct *p, int cpu, int idx, int task_count) /* Update boosted tasks count while avoiding to make it negative */ bg->group[idx].tasks = max(0, tasks); - trace_sched_tune_tasks_update(p, cpu, tasks, idx, - bg->group[idx].boost, bg->boost_max); + /* Update timeout on enqueue */ + if (task_count > 0) { + u64 now = sched_clock_cpu(cpu); + + if (schedtune_update_timestamp(p)) + bg->group[idx].ts = now; - /* Boost group activation or deactivation on that RQ */ - if (tasks == 1 || tasks == 0) - schedtune_cpu_update(cpu); + /* Boost group activation or deactivation on that RQ */ + if (bg->group[idx].tasks == 1) + schedtune_cpu_update(cpu, now); + } + + trace_sched_tune_tasks_update(p, cpu, tasks, idx, + bg->group[idx].boost, bg->boost_max, + bg->group[idx].ts); } /* @@ -343,6 +395,7 @@ int schedtune_can_attach(struct cgroup_taskset *tset) int src_bg; /* Source boost group index */ int dst_bg; /* Destination boost group index */ int tasks; + u64 now; if (unlikely(!schedtune_initialized)) return 0; @@ -393,13 +446,15 @@ int schedtune_can_attach(struct cgroup_taskset *tset) bg->group[src_bg].tasks = max(0, tasks); bg->group[dst_bg].tasks += 1; - raw_spin_unlock(&bg->lock); - task_rq_unlock(rq, task, &rq_flags); + /* Update boost hold start for this group */ + now = sched_clock_cpu(cpu); + bg->group[dst_bg].ts = now; - /* Update CPU boost group */ - if (bg->group[src_bg].tasks == 0 || bg->group[dst_bg].tasks == 1) - schedtune_cpu_update(task_cpu(task)); + /* Force boost group re-evaluation at next boost check */ + bg->boost_ts = now - SCHEDTUNE_BOOST_HOLD_NS; + raw_spin_unlock(&bg->lock); + task_rq_unlock(rq, task, &rq_flags); } return 0; @@ -493,8 +548,15 @@ void schedtune_dequeue_task(struct task_struct *p, int cpu) int schedtune_cpu_boost(int cpu) { struct boost_groups *bg; + u64 now; bg = &per_cpu(cpu_boost_groups, cpu); + now = sched_clock_cpu(cpu); + + /* Check to see if we have a hold in effect */ + if (schedtune_boost_timeout(now, bg->boost_ts)) + schedtune_cpu_update(cpu, now); + return bg->boost_max; } @@ -643,6 +705,7 @@ schedtune_boostgroup_init(struct schedtune *st) bg = &per_cpu(cpu_boost_groups, cpu); bg->group[st->idx].boost = 0; bg->group[st->idx].tasks = 0; + bg->group[st->idx].ts = 0; } return 0; diff --git a/kernel/sched/walt.c b/kernel/sched/walt.c index 1b171077cf24e52b5e91937e15f74780a6c3ae49..082dcf58cc5849031cf608bc07c816608651f053 100644 --- a/kernel/sched/walt.c +++ b/kernel/sched/walt.c @@ -19,6 +19,7 @@ * and Todd Kjos */ +#include #include #include #include @@ -42,6 +43,8 @@ const char *migrate_type_names[] = {"GROUP_TO_RQ", "RQ_TO_GROUP", #define EARLY_DETECTION_DURATION 9500000 +static ktime_t ktime_last; +static bool sched_ktime_suspended; static struct cpu_cycle_counter_cb cpu_cycle_counter_cb; static bool use_cycle_counter; DEFINE_MUTEX(cluster_lock); @@ -51,6 +54,37 @@ u64 walt_load_reported_window; static struct irq_work walt_cpufreq_irq_work; static struct irq_work walt_migration_irq_work; +u64 sched_ktime_clock(void) +{ + if (unlikely(sched_ktime_suspended)) + return ktime_to_ns(ktime_last); + return ktime_get_ns(); +} + +static void sched_resume(void) +{ + sched_ktime_suspended = false; +} + +static int sched_suspend(void) +{ + ktime_last = ktime_get(); + sched_ktime_suspended = true; + return 0; +} + +static struct syscore_ops sched_syscore_ops = { + .resume = sched_resume, + .suspend = sched_suspend +}; + +static int __init sched_init_ops(void) +{ + register_syscore_ops(&sched_syscore_ops); + return 0; +} +late_initcall(sched_init_ops); + static void acquire_rq_locks_irqsave(const cpumask_t *cpus, unsigned long *flags) { @@ -331,11 +365,12 @@ bool early_detection_notify(struct rq *rq, u64 wallclock) struct task_struct *p; int loop_max = 10; + rq->ed_task = NULL; + if ((!walt_rotation_enabled && sched_boost_policy() == SCHED_BOOST_NONE) || !rq->cfs.h_nr_running) return 0; - rq->ed_task = NULL; list_for_each_entry(p, &rq->cfs_tasks, se.group_node) { if (!loop_max) break; @@ -361,7 +396,7 @@ void sched_account_irqstart(int cpu, struct task_struct *curr, u64 wallclock) if (is_idle_task(curr)) { /* We're here without rq->lock held, IRQ disabled */ raw_spin_lock(&rq->lock); - update_task_cpu_cycles(curr, cpu, ktime_get_ns()); + update_task_cpu_cycles(curr, cpu, sched_ktime_clock()); raw_spin_unlock(&rq->lock); } } @@ -416,7 +451,7 @@ void sched_account_irqtime(int cpu, struct task_struct *curr, cur_jiffies_ts = get_jiffies_64(); if (is_idle_task(curr)) - update_task_ravg(curr, rq, IRQ_UPDATE, ktime_get_ns(), + update_task_ravg(curr, rq, IRQ_UPDATE, sched_ktime_clock(), delta); nr_windows = cur_jiffies_ts - rq->irqload_ts; @@ -756,7 +791,7 @@ void fixup_busy_time(struct task_struct *p, int new_cpu) if (sched_disable_window_stats) goto done; - wallclock = ktime_get_ns(); + wallclock = sched_ktime_clock(); update_task_ravg(task_rq(p)->curr, task_rq(p), TASK_UPDATE, @@ -2055,7 +2090,7 @@ void mark_task_starting(struct task_struct *p) return; } - wallclock = ktime_get_ns(); + wallclock = sched_ktime_clock(); p->ravg.mark_start = p->last_wake_ts = wallclock; p->last_enqueued_ts = wallclock; update_task_cpu_cycles(p, cpu_of(rq), wallclock); @@ -2401,7 +2436,7 @@ static int cpufreq_notifier_trans(struct notifier_block *nb, raw_spin_lock_irqsave(&rq->lock, flags); update_task_ravg(rq->curr, rq, TASK_UPDATE, - ktime_get_ns(), 0); + sched_ktime_clock(), 0); raw_spin_unlock_irqrestore(&rq->lock, flags); } @@ -2561,7 +2596,7 @@ static void _set_preferred_cluster(struct related_thread_group *grp) if (list_empty(&grp->tasks)) return; - wallclock = ktime_get_ns(); + wallclock = sched_ktime_clock(); /* * wakeup of two or more related tasks could race with each other and @@ -2587,7 +2622,7 @@ static void _set_preferred_cluster(struct related_thread_group *grp) grp->preferred_cluster = best_cluster(grp, combined_demand, group_boost); - grp->last_update = ktime_get_ns(); + grp->last_update = sched_ktime_clock(); trace_sched_set_preferred_cluster(grp, combined_demand); } @@ -2611,7 +2646,7 @@ int update_preferred_cluster(struct related_thread_group *grp, * has passed since we last updated preference */ if (abs(new_load - old_load) > sched_ravg_window / 4 || - ktime_get_ns() - grp->last_update > sched_ravg_window) + sched_ktime_clock() - grp->last_update > sched_ravg_window) return 1; return 0; @@ -2994,7 +3029,7 @@ static void transfer_busy_time(struct rq *rq, struct related_thread_group *grp, bool new_task; int i; - wallclock = ktime_get_ns(); + wallclock = sched_ktime_clock(); update_task_ravg(rq->curr, rq, TASK_UPDATE, wallclock, 0); update_task_ravg(p, rq, TASK_UPDATE, wallclock, 0); @@ -3158,6 +3193,7 @@ void walt_irq_work(struct irq_work *irq_work) int cpu; u64 wc; bool is_migration = false; + u64 total_grp_load = 0; /* Am I the window rollover work or the migration work? */ if (irq_work == &walt_migration_irq_work) @@ -3166,7 +3202,7 @@ void walt_irq_work(struct irq_work *irq_work) for_each_cpu(cpu, cpu_possible_mask) raw_spin_lock(&cpu_rq(cpu)->lock); - wc = ktime_get_ns(); + wc = sched_ktime_clock(); walt_load_reported_window = atomic64_read(&walt_irq_work_lastq_ws); for_each_sched_cluster(cluster) { u64 aggr_grp_load = 0; @@ -3184,12 +3220,14 @@ void walt_irq_work(struct irq_work *irq_work) } cluster->aggr_grp_load = aggr_grp_load; + total_grp_load += aggr_grp_load; cluster->coloc_boost_load = 0; raw_spin_unlock(&cluster->load_lock); } - walt_update_coloc_boost_load(); + if (total_grp_load) + walt_update_coloc_boost_load(); for_each_sched_cluster(cluster) { cpumask_t cluster_online_cpus; @@ -3240,6 +3278,49 @@ void walt_rotation_checkpoint(int nr_big) walt_rotation_enabled = nr_big >= num_possible_cpus(); } +unsigned int walt_get_default_coloc_group_load(void) +{ + struct related_thread_group *grp; + unsigned long flags; + u64 total_demand = 0, wallclock; + struct task_struct *p; + int min_cap_cpu, scale = 1024; + + grp = lookup_related_thread_group(DEFAULT_CGROUP_COLOC_ID); + + raw_spin_lock_irqsave(&grp->lock, flags); + if (list_empty(&grp->tasks)) { + raw_spin_unlock_irqrestore(&grp->lock, flags); + return 0; + } + + wallclock = sched_ktime_clock(); + + list_for_each_entry(p, &grp->tasks, grp_list) { + if (p->ravg.mark_start < wallclock - + (sched_ravg_window * sched_ravg_hist_size)) + continue; + + total_demand += p->ravg.coloc_demand; + } + + raw_spin_unlock_irqrestore(&grp->lock, flags); + + /* + * Scale the total demand to the lowest capacity CPU and + * convert into percentage. + * + * P = total_demand/sched_ravg_window * 1024/scale * 100 + */ + + min_cap_cpu = this_rq()->rd->min_cap_orig_cpu; + if (min_cap_cpu != -1) + scale = arch_scale_cpu_capacity(NULL, min_cap_cpu); + + return div64_u64(total_demand * 1024 * 100, + (u64)sched_ravg_window * scale); +} + int walt_proc_update_handler(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) diff --git a/kernel/sched/walt.h b/kernel/sched/walt.h index e3c1181265bcb67ada3e7a8821fb710d2737d8ec..995efe7cce6d57acde66de31244054d486ea4d40 100644 --- a/kernel/sched/walt.h +++ b/kernel/sched/walt.h @@ -299,11 +299,12 @@ void walt_sched_init_rq(struct rq *rq); static inline void walt_update_last_enqueue(struct task_struct *p) { - p->last_enqueued_ts = ktime_get_ns(); + p->last_enqueued_ts = sched_ktime_clock(); } extern void walt_rotate_work_init(void); extern void walt_rotation_checkpoint(int nr_big); extern unsigned int walt_rotation_enabled; +extern unsigned int walt_get_default_coloc_group_load(void); #else /* CONFIG_SCHED_WALT */ @@ -312,6 +313,10 @@ static inline void walt_sched_init_rq(struct rq *rq) { } static inline void walt_rotate_work_init(void) { } static inline void walt_rotation_checkpoint(int nr_big) { } static inline void walt_update_last_enqueue(struct task_struct *p) { } +static inline unsigned int walt_get_default_coloc_group_load(void) +{ + return 0; +} static inline void update_task_ravg(struct task_struct *p, struct rq *rq, int event, u64 wallclock, u64 irqtime) { } diff --git a/kernel/softirq.c b/kernel/softirq.c index 81740f20ca1344dd7e645760b6f866b552e37ad9..ee4f0e018424dd1f8bdb4cd7cdf2b38d22f006cc 100644 --- a/kernel/softirq.c +++ b/kernel/softirq.c @@ -390,7 +390,7 @@ static inline void tick_irq_exit(void) /* Make sure that timer wheel updates are propagated */ if ((idle_cpu(cpu) && !need_resched()) || tick_nohz_full_cpu(cpu)) { - if (!in_interrupt()) + if (!in_irq()) tick_nohz_irq_exit(); } #endif diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c index b140029388dbe9233b0ab9f297db05dc9ed0ef86..067cb83f37eae5644fa84e29a614abd6b4d8a3de 100644 --- a/kernel/stop_machine.c +++ b/kernel/stop_machine.c @@ -37,7 +37,7 @@ struct cpu_stop_done { struct cpu_stopper { struct task_struct *thread; - spinlock_t lock; + raw_spinlock_t lock; bool enabled; /* is this stopper enabled? */ struct list_head works; /* list of pending works */ @@ -82,13 +82,13 @@ static bool cpu_stop_queue_work(unsigned int cpu, struct cpu_stop_work *work) bool enabled; preempt_disable(); - spin_lock_irqsave(&stopper->lock, flags); + raw_spin_lock_irqsave(&stopper->lock, flags); enabled = stopper->enabled; if (enabled) __cpu_stop_queue_work(stopper, work, &wakeq); else if (work->done) cpu_stop_signal_done(work->done); - spin_unlock_irqrestore(&stopper->lock, flags); + raw_spin_unlock_irqrestore(&stopper->lock, flags); wake_up_q(&wakeq); preempt_enable(); @@ -238,13 +238,24 @@ static int cpu_stop_queue_two_works(int cpu1, struct cpu_stop_work *work1, struct cpu_stopper *stopper2 = per_cpu_ptr(&cpu_stopper, cpu2); DEFINE_WAKE_Q(wakeq); int err; + retry: - spin_lock_irq(&stopper1->lock); - spin_lock_nested(&stopper2->lock, SINGLE_DEPTH_NESTING); + /* + * The waking up of stopper threads has to happen in the same + * scheduling context as the queueing. Otherwise, there is a + * possibility of one of the above stoppers being woken up by another + * CPU, and preempting us. This will cause us to not wake up the other + * stopper forever. + */ + preempt_disable(); + raw_spin_lock_irq(&stopper1->lock); + raw_spin_lock_nested(&stopper2->lock, SINGLE_DEPTH_NESTING); - err = -ENOENT; - if (!stopper1->enabled || !stopper2->enabled) + if (!stopper1->enabled || !stopper2->enabled) { + err = -ENOENT; goto unlock; + } + /* * Ensure that if we race with __stop_cpus() the stoppers won't get * queued up in reverse order leading to system deadlock. @@ -255,36 +266,30 @@ static int cpu_stop_queue_two_works(int cpu1, struct cpu_stop_work *work1, * It can be falsely true but it is safe to spin until it is cleared, * queue_stop_cpus_work() does everything under preempt_disable(). */ - err = -EDEADLK; - if (unlikely(stop_cpus_in_progress)) - goto unlock; + if (unlikely(stop_cpus_in_progress)) { + err = -EDEADLK; + goto unlock; + } err = 0; __cpu_stop_queue_work(stopper1, work1, &wakeq); __cpu_stop_queue_work(stopper2, work2, &wakeq); - /* - * The waking up of stopper threads has to happen - * in the same scheduling context as the queueing. - * Otherwise, there is a possibility of one of the - * above stoppers being woken up by another CPU, - * and preempting us. This will cause us to not - * wake up the other stopper forever. - */ - preempt_disable(); + unlock: - spin_unlock(&stopper2->lock); - spin_unlock_irq(&stopper1->lock); + raw_spin_unlock(&stopper2->lock); + raw_spin_unlock_irq(&stopper1->lock); if (unlikely(err == -EDEADLK)) { + preempt_enable(); + while (stop_cpus_in_progress) cpu_relax(); + goto retry; } - if (!err) { - wake_up_q(&wakeq); - preempt_enable(); - } + wake_up_q(&wakeq); + preempt_enable(); return err; } @@ -471,9 +476,9 @@ static int cpu_stop_should_run(unsigned int cpu) unsigned long flags; int run; - spin_lock_irqsave(&stopper->lock, flags); + raw_spin_lock_irqsave(&stopper->lock, flags); run = !list_empty(&stopper->works); - spin_unlock_irqrestore(&stopper->lock, flags); + raw_spin_unlock_irqrestore(&stopper->lock, flags); return run; } @@ -484,13 +489,13 @@ static void cpu_stopper_thread(unsigned int cpu) repeat: work = NULL; - spin_lock_irq(&stopper->lock); + raw_spin_lock_irq(&stopper->lock); if (!list_empty(&stopper->works)) { work = list_first_entry(&stopper->works, struct cpu_stop_work, list); list_del_init(&work->list); } - spin_unlock_irq(&stopper->lock); + raw_spin_unlock_irq(&stopper->lock); if (work) { cpu_stop_fn_t fn = work->fn; @@ -564,7 +569,7 @@ static int __init cpu_stop_init(void) for_each_possible_cpu(cpu) { struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu); - spin_lock_init(&stopper->lock); + raw_spin_lock_init(&stopper->lock); INIT_LIST_HEAD(&stopper->works); } diff --git a/kernel/sysctl.c b/kernel/sysctl.c index 3a4e11d61f8780127f98d1da464738c4a5514a26..f5297a0ee323c72aa6bd146c5257fcc0dcfcbbb7 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c @@ -139,6 +139,7 @@ static int ten_thousand = 10000; #ifdef CONFIG_PERF_EVENTS static int six_hundred_forty_kb = 640 * 1024; #endif +static int two_hundred_fifty_five = 255; /* this is needed for the proc_doulongvec_minmax of vm_dirty_bytes */ static unsigned long dirty_bytes_min = 2 * PAGE_SIZE; @@ -578,6 +579,31 @@ static struct ctl_table kern_table[] = { .extra1 = &one, }, #endif + { + .procname = "sched_lib_name", + .data = sched_lib_name, + .maxlen = LIB_PATH_LENGTH, + .mode = 0644, + .proc_handler = proc_dostring, + }, + { + .procname = "sched_lib_mask_check", + .data = &sched_lib_mask_check, + .maxlen = sizeof(unsigned int), + .mode = 0644, + .proc_handler = proc_douintvec_minmax, + .extra1 = &zero, + .extra2 = &two_hundred_fifty_five, + }, + { + .procname = "sched_lib_mask_force", + .data = &sched_lib_mask_force, + .maxlen = sizeof(unsigned int), + .mode = 0644, + .proc_handler = proc_douintvec_minmax, + .extra1 = &zero, + .extra2 = &two_hundred_fifty_five, + }, #ifdef CONFIG_PROVE_LOCKING { .procname = "prove_locking", @@ -1206,6 +1232,16 @@ static struct ctl_table kern_table[] = { .proc_handler = proc_dointvec_minmax, .extra1 = &neg_one, }, + { + .procname = "hung_task_selective_monitoring", + .data = &sysctl_hung_task_selective_monitoring, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec_minmax, + .extra1 = &zero, + .extra2 = &one, + }, + #endif #ifdef CONFIG_RT_MUTEXES { @@ -3264,7 +3300,7 @@ static int do_proc_douintvec_capacity_conv(bool *negp, unsigned long *lvalp, int *valp, int write, void *data) { if (write) { - if (*negp) + if (*negp || *lvalp == 0) return -EINVAL; *valp = SCHED_FIXEDPOINT_SCALE * 100 / *lvalp; } else { diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c index b1e5728745ffb3aa50427962b49d8df8e914d6a0..e13d32706b74be029b8eb048cc767e0848ec2559 100644 --- a/kernel/time/clocksource.c +++ b/kernel/time/clocksource.c @@ -322,6 +322,8 @@ static void clocksource_enqueue_watchdog(struct clocksource *cs) { unsigned long flags; + INIT_LIST_HEAD(&cs->wd_list); + spin_lock_irqsave(&watchdog_lock, flags); if (cs->flags & CLOCK_SOURCE_MUST_VERIFY) { /* cs is a clocksource to be watched. */ diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c index c2e975ad25cc165ed8bc44236e1f0c2aeff9b553..dc7792f71602ae55cc66d86ff116f7385e15e671 100644 --- a/kernel/time/tick-sched.c +++ b/kernel/time/tick-sched.c @@ -946,12 +946,11 @@ static void __tick_nohz_idle_enter(struct tick_sched *ts) ktime_t now, expires; int cpu = smp_processor_id(); - now = tick_nohz_start_idle(ts); - #ifdef CONFIG_SMP if (check_pending_deferrable_timers(cpu)) raise_softirq_irqoff(TIMER_SOFTIRQ); #endif + now = tick_nohz_start_idle(ts); if (can_stop_idle_tick(cpu, ts)) { int was_stopped = ts->tick_stopped; diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index 36f018b1539246b63817c20c6c34d56f131a80ae..fd78090042973ceaae2fab46daa10021509bee13 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c @@ -3109,6 +3109,22 @@ int ring_buffer_record_is_on(struct ring_buffer *buffer) return !atomic_read(&buffer->record_disabled); } +/** + * ring_buffer_record_is_set_on - return true if the ring buffer is set writable + * @buffer: The ring buffer to see if write is set enabled + * + * Returns true if the ring buffer is set writable by ring_buffer_record_on(). + * Note that this does NOT mean it is in a writable state. + * + * It may return true when the ring buffer has been disabled by + * ring_buffer_record_disable(), as that is a temporary disabling of + * the ring buffer. + */ +int ring_buffer_record_is_set_on(struct ring_buffer *buffer) +{ + return !(atomic_read(&buffer->record_disabled) & RB_BUFFER_OFF); +} + /** * ring_buffer_record_disable_cpu - stop all writes into the cpu_buffer * @buffer: The ring buffer to stop writes to. diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 2f12af99a4baeb72ab70e6127bcd785ca1ae55d3..8de773161495ac40493cb7998e2c559e962738ec 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -1367,6 +1367,12 @@ update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu) arch_spin_lock(&tr->max_lock); + /* Inherit the recordable setting from trace_buffer */ + if (ring_buffer_record_is_set_on(tr->trace_buffer.buffer)) + ring_buffer_record_on(tr->max_buffer.buffer); + else + ring_buffer_record_off(tr->max_buffer.buffer); + buf = tr->trace_buffer.buffer; tr->trace_buffer.buffer = tr->max_buffer.buffer; tr->max_buffer.buffer = buf; diff --git a/kernel/trace/trace_events_trigger.c b/kernel/trace/trace_events_trigger.c index b413fab7d75becb63fec1e9c795e135463ee552c..43254c5e7e1649323d4cfd3e91c75a80c2e2fb19 100644 --- a/kernel/trace/trace_events_trigger.c +++ b/kernel/trace/trace_events_trigger.c @@ -680,6 +680,8 @@ event_trigger_callback(struct event_command *cmd_ops, goto out_free; out_reg: + /* Up the trigger_data count to make sure reg doesn't free it on failure */ + event_trigger_init(trigger_ops, trigger_data); ret = cmd_ops->reg(glob, trigger_ops, trigger_data, file); /* * The above returns on success the # of functions enabled, @@ -687,11 +689,13 @@ event_trigger_callback(struct event_command *cmd_ops, * Consider no functions a failure too. */ if (!ret) { + cmd_ops->unreg(glob, trigger_ops, trigger_data, file); ret = -ENOENT; - goto out_free; - } else if (ret < 0) - goto out_free; - ret = 0; + } else if (ret > 0) + ret = 0; + + /* Down the counter of trigger_data or free it if not used anymore */ + event_trigger_free(trigger_ops, trigger_data); out: return ret; @@ -1392,6 +1396,9 @@ int event_enable_trigger_func(struct event_command *cmd_ops, goto out; } + /* Up the trigger_data count to make sure nothing frees it on failure */ + event_trigger_init(trigger_ops, trigger_data); + if (trigger) { number = strsep(&trigger, ":"); @@ -1442,6 +1449,7 @@ int event_enable_trigger_func(struct event_command *cmd_ops, goto out_disable; /* Just return zero, not the number of enabled functions */ ret = 0; + event_trigger_free(trigger_ops, trigger_data); out: return ret; @@ -1452,7 +1460,7 @@ int event_enable_trigger_func(struct event_command *cmd_ops, out_free: if (cmd_ops->set_filter) cmd_ops->set_filter(NULL, trigger_data, NULL); - kfree(trigger_data); + event_trigger_free(trigger_ops, trigger_data); kfree(enable_data); goto out; } diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c index f8d3bd974bcc41032ecc11f958be3f72cfe75863..ea20274a105aed94ce34d96e30ffc2304cc038ae 100644 --- a/kernel/trace/trace_kprobe.c +++ b/kernel/trace/trace_kprobe.c @@ -376,11 +376,10 @@ static struct trace_kprobe *find_trace_kprobe(const char *event, static int enable_trace_kprobe(struct trace_kprobe *tk, struct trace_event_file *file) { + struct event_file_link *link = NULL; int ret = 0; if (file) { - struct event_file_link *link; - link = kmalloc(sizeof(*link), GFP_KERNEL); if (!link) { ret = -ENOMEM; @@ -400,6 +399,18 @@ enable_trace_kprobe(struct trace_kprobe *tk, struct trace_event_file *file) else ret = enable_kprobe(&tk->rp.kp); } + + if (ret) { + if (file) { + /* Notice the if is true on not WARN() */ + if (!WARN_ON_ONCE(!link)) + list_del_rcu(&link->list); + kfree(link); + tk->tp.flags &= ~TP_FLAG_TRACE; + } else { + tk->tp.flags &= ~TP_FLAG_PROFILE; + } + } out: return ret; } diff --git a/kernel/watchdog.c b/kernel/watchdog.c index 04865a3f3438ce0a73fd47c68719cb0947bce3b6..7ac002d88308ecca26712cf1944d9322e9fa699f 100644 --- a/kernel/watchdog.c +++ b/kernel/watchdog.c @@ -456,17 +456,13 @@ static void watchdog_set_prio(unsigned int policy, unsigned int prio) sched_setscheduler(current, policy, ¶m); } -/* Must be called with hotplug lock (lock_device_hotplug()) held. */ void watchdog_enable(unsigned int cpu) { struct hrtimer *hrtimer = this_cpu_ptr(&watchdog_hrtimer); unsigned int *enabled = this_cpu_ptr(&watchdog_en); - lock_device_hotplug_assert(); - if (*enabled) return; - *enabled = 1; /* * Start the timer first to prevent the NMI watchdog triggering @@ -484,19 +480,24 @@ void watchdog_enable(unsigned int cpu) watchdog_nmi_enable(cpu); watchdog_set_prio(SCHED_FIFO, MAX_RT_PRIO - 1); + + /* + * Need to ensure above operations are observed by other CPUs before + * indicating that timer is enabled. This is to synchronize core + * isolation and hotplug. Core isolation will wait for this flag to be + * set. + */ + mb(); + *enabled = 1; } -/* Must be called with hotplug lock (lock_device_hotplug()) held. */ void watchdog_disable(unsigned int cpu) { struct hrtimer *hrtimer = this_cpu_ptr(&watchdog_hrtimer); unsigned int *enabled = this_cpu_ptr(&watchdog_en); - lock_device_hotplug_assert(); - if (!*enabled) return; - *enabled = 0; watchdog_set_prio(SCHED_NORMAL, 0); /* @@ -506,6 +507,17 @@ void watchdog_disable(unsigned int cpu) */ watchdog_nmi_disable(cpu); hrtimer_cancel(hrtimer); + + /* + * No need for barrier here since disabling the watchdog is + * synchronized with hotplug lock + */ + *enabled = 0; +} + +bool watchdog_configured(unsigned int cpu) +{ + return *per_cpu_ptr(&watchdog_en, cpu); } static void watchdog_cleanup(unsigned int cpu, bool online) diff --git a/lib/rhashtable.c b/lib/rhashtable.c index b734ce731a7a5c6873cac5f931cd44ea8017e7cf..39215c724fc72213a6fa2fda6f07186956300605 100644 --- a/lib/rhashtable.c +++ b/lib/rhashtable.c @@ -878,8 +878,16 @@ EXPORT_SYMBOL_GPL(rhashtable_walk_stop); static size_t rounded_hashtable_size(const struct rhashtable_params *params) { - return max(roundup_pow_of_two(params->nelem_hint * 4 / 3), - (unsigned long)params->min_size); + size_t retsize; + + if (params->nelem_hint) + retsize = max(roundup_pow_of_two(params->nelem_hint * 4 / 3), + (unsigned long)params->min_size); + else + retsize = max(HASH_DEFAULT_SIZE, + (unsigned long)params->min_size); + + return retsize; } static u32 rhashtable_jhash2(const void *key, u32 length, u32 seed) @@ -936,8 +944,6 @@ int rhashtable_init(struct rhashtable *ht, struct bucket_table *tbl; size_t size; - size = HASH_DEFAULT_SIZE; - if ((!params->key_len && !params->obj_hashfn) || (params->obj_hashfn && !params->obj_cmpfn)) return -EINVAL; @@ -964,8 +970,7 @@ int rhashtable_init(struct rhashtable *ht, ht->p.min_size = max_t(u16, ht->p.min_size, HASH_MIN_SIZE); - if (params->nelem_hint) - size = rounded_hashtable_size(&ht->p); + size = rounded_hashtable_size(&ht->p); if (params->locks_mul) ht->p.locks_mul = roundup_pow_of_two(params->locks_mul); diff --git a/mm/Kconfig b/mm/Kconfig index 17ad9d7de090a4cbd5aad68a8c334eae17b47cf0..56574779b02f71f7ab97932d64937a191ce953d9 100644 --- a/mm/Kconfig +++ b/mm/Kconfig @@ -659,6 +659,15 @@ config MAX_STACK_SIZE_MB A sane initial value is 80 MB. +config BALANCE_ANON_FILE_RECLAIM + bool "During reclaim treat anon and file backed pages equally" + depends on SWAP + help + When performing memory reclaim treat anonymous and file backed pages + equally. + Swapping anonymous pages out to memory can be efficient enough to justify + treating anonymous and file backed pages equally. + # For architectures that support deferred memory initialisation config ARCH_SUPPORTS_DEFERRED_STRUCT_PAGE_INIT bool diff --git a/mm/backing-dev.c b/mm/backing-dev.c index 6774e0369ebecf79484aa2f1b55fabb7b89ab042..9386c98dac123bc48d3744eb988bafef69da49c6 100644 --- a/mm/backing-dev.c +++ b/mm/backing-dev.c @@ -356,15 +356,8 @@ static void wb_shutdown(struct bdi_writeback *wb) spin_lock_bh(&wb->work_lock); if (!test_and_clear_bit(WB_registered, &wb->state)) { spin_unlock_bh(&wb->work_lock); - /* - * Wait for wb shutdown to finish if someone else is just - * running wb_shutdown(). Otherwise we could proceed to wb / - * bdi destruction before wb_shutdown() is finished. - */ - wait_on_bit(&wb->state, WB_shutting_down, TASK_UNINTERRUPTIBLE); return; } - set_bit(WB_shutting_down, &wb->state); spin_unlock_bh(&wb->work_lock); cgwb_remove_from_bdi_list(wb); @@ -376,12 +369,6 @@ static void wb_shutdown(struct bdi_writeback *wb) mod_delayed_work(bdi_wq, &wb->dwork, 0); flush_delayed_work(&wb->dwork); WARN_ON(!list_empty(&wb->work_list)); - /* - * Make sure bit gets cleared after shutdown is finished. Matches with - * the barrier provided by test_and_clear_bit() above. - */ - smp_wmb(); - clear_and_wake_up_bit(WB_shutting_down, &wb->state); } static void wb_exit(struct bdi_writeback *wb) @@ -505,10 +492,12 @@ static void cgwb_release_workfn(struct work_struct *work) struct bdi_writeback *wb = container_of(work, struct bdi_writeback, release_work); + mutex_lock(&wb->bdi->cgwb_release_mutex); wb_shutdown(wb); css_put(wb->memcg_css); css_put(wb->blkcg_css); + mutex_unlock(&wb->bdi->cgwb_release_mutex); fprop_local_destroy_percpu(&wb->memcg_completions); percpu_ref_exit(&wb->refcnt); @@ -694,6 +683,7 @@ static int cgwb_bdi_init(struct backing_dev_info *bdi) INIT_RADIX_TREE(&bdi->cgwb_tree, GFP_ATOMIC); bdi->cgwb_congested_tree = RB_ROOT; + mutex_init(&bdi->cgwb_release_mutex); ret = wb_init(&bdi->wb, bdi, 1, GFP_KERNEL); if (!ret) { @@ -714,7 +704,10 @@ static void cgwb_bdi_unregister(struct backing_dev_info *bdi) spin_lock_irq(&cgwb_lock); radix_tree_for_each_slot(slot, &bdi->cgwb_tree, &iter, 0) cgwb_kill(*slot); + spin_unlock_irq(&cgwb_lock); + mutex_lock(&bdi->cgwb_release_mutex); + spin_lock_irq(&cgwb_lock); while (!list_empty(&bdi->wb_list)) { wb = list_first_entry(&bdi->wb_list, struct bdi_writeback, bdi_node); @@ -723,6 +716,7 @@ static void cgwb_bdi_unregister(struct backing_dev_info *bdi) spin_lock_irq(&cgwb_lock); } spin_unlock_irq(&cgwb_lock); + mutex_unlock(&bdi->cgwb_release_mutex); } /** diff --git a/mm/cma.c b/mm/cma.c index c5ee6148a8a7a8acca0ccd0b5e428d94e1541c8a..a90881891493e9a811246e061eca9fe19889ea71 100644 --- a/mm/cma.c +++ b/mm/cma.c @@ -447,6 +447,8 @@ struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align, if (!count) return NULL; + trace_cma_alloc_start(count, align); + mask = cma_bitmap_aligned_mask(cma, align); offset = cma_bitmap_aligned_offset(cma, align); bitmap_maxno = cma_bitmap_maxno(cma); @@ -514,6 +516,8 @@ struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align, pr_debug("%s(): memory range at %p is busy, retrying\n", __func__, pfn_to_page(pfn)); + + trace_cma_alloc_busy_retry(pfn, pfn_to_page(pfn), count, align); /* try again with a bit different memory target */ start = bitmap_no + mask + 1; } diff --git a/mm/filemap.c b/mm/filemap.c index 3729f2b93052eb80ad9ed141254421a94e050986..e77e15d08670f57c81f2c92d0d4d5214b72f3762 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -2409,11 +2409,6 @@ int filemap_fault(struct vm_fault *vmf) do_async_mmap_readahead(vmf->vma, ra, file, page, offset); } else if (!page) { /* No page in the page cache at all */ - struct address_space *mapping = file->f_mapping; - - if (mapping && (mapping->gfp_mask & __GFP_MOVABLE)) - mapping->gfp_mask |= __GFP_CMA; - do_sync_mmap_readahead(vmf->vma, ra, file, offset); count_vm_event(PGMAJFAULT); count_memcg_event_mm(vmf->vma->vm_mm, PGMAJFAULT); diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 5cb7aee06f761e5b63fc9626004a486b27518995..8d1b8a685ca0d5d7480c04a1860127e1c05c12f9 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -2069,6 +2069,8 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd, if (vma_is_dax(vma)) return; page = pmd_page(_pmd); + if (!PageDirty(page) && pmd_dirty(_pmd)) + set_page_dirty(page); if (!PageReferenced(page) && pmd_young(_pmd)) SetPageReferenced(page); page_remove_rmap(page, true); diff --git a/mm/internal.h b/mm/internal.h index 7cc2c6e787bf45e02e914419ab48e686b4baba1c..b84f706a6f178b044a0c7963dd73c0156040026c 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -557,4 +557,13 @@ static inline bool is_migrate_highatomic_page(struct page *page) } void setup_zone_pageset(struct zone *zone); + +#ifdef CONFIG_ANDROID_LOW_MEMORY_KILLER +extern bool lmk_kill_possible(void); +#else +static inline bool lmk_kill_possible(void) +{ + return false; +} +#endif #endif /* __MM_INTERNAL_H */ diff --git a/mm/memblock.c b/mm/memblock.c index c464e047a1cb079086ad39d2655f606054221211..0e1b8f2b2aeb4a6746f385e1745ae79ae30ca1d8 100644 --- a/mm/memblock.c +++ b/mm/memblock.c @@ -707,7 +707,8 @@ int __init_memblock memblock_free(phys_addr_t base, phys_addr_t size) memblock_dbg(" memblock_free: [%pa-%pa] %pF\n", &base, &end, (void *)_RET_IP_); - kmemleak_free_part_phys(base, size); + if (base < memblock.current_limit) + kmemleak_free_part(__va(base), size); return memblock_remove_range(&memblock.reserved, base, size); } @@ -1148,7 +1149,9 @@ static phys_addr_t __init memblock_alloc_range_nid(phys_addr_t size, * The min_count is set to 0 so that memblock allocations are * never reported as leaks. */ - kmemleak_alloc_phys(found, size, 0, 0); + if (found < memblock.current_limit) + kmemleak_alloc(__va(found), size, 0, 0); + return found; } return 0; diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 942d9342b63bcf744aa4c23c278b91e79f7bbf32..db69d938e9ed0a7105241efd78a105d7d77c8e3c 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -879,7 +879,7 @@ static void invalidate_reclaim_iterators(struct mem_cgroup *dead_memcg) int nid; int i; - while ((memcg = parent_mem_cgroup(memcg))) { + for (; memcg; memcg = parent_mem_cgroup(memcg)) { for_each_node(nid) { mz = mem_cgroup_nodeinfo(memcg, nid); for (i = 0; i <= DEF_PRIORITY; i++) { diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index c1ee2d3ddc03e96dae5e3fa153adf85d1ffcc45e..1102c653df7ce6ffe66258cded6257876064fb60 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c @@ -692,24 +692,60 @@ static int generic_online_page(struct page *page) return 0; } -static int online_pages_range(unsigned long start_pfn, unsigned long nr_pages, - void *arg) +static void __free_pages_hotplug(struct page *page, unsigned int order) { - unsigned long i; + unsigned int nr_pages = 1 << order; + struct page *p = page; + unsigned int loop; + + adjust_managed_page_count(page, nr_pages); + for (loop = 0; loop < nr_pages; loop++, p++) { + __online_page_set_limits(p); + ClearPageReserved(p); + set_page_count(p, 0); + } + + set_page_refcounted(page); + __free_pages(page, order); +} + +static void __free_pages_memory(unsigned long start, + unsigned long nr_pages, void *arg) +{ + unsigned long order; unsigned long onlined_pages = *(unsigned long *)arg; struct page *page; - int ret; + unsigned long i; + unsigned long phy_addr; + + for (i = 0; i < nr_pages; i += (1UL << order)) { + order = min(MAX_ORDER - 1UL, __ffs(start + i)); + page = pfn_to_page(start + i); + phy_addr = page_to_phys(page); + + if (phy_addr >= bootloader_memory_limit) + break; + + while ((1UL << order) > nr_pages - i || + phy_addr + ((1UL << order) * PAGE_SIZE) + > bootloader_memory_limit) + order--; + + __free_pages_hotplug(page, order); + onlined_pages += (1UL << order); + } + + *(unsigned long *)arg = onlined_pages; +} + +static int online_pages_range(unsigned long start_pfn, unsigned long nr_pages, + void *arg) +{ if (PageReserved(pfn_to_page(start_pfn))) - for (i = 0; i < nr_pages; i++) { - page = pfn_to_page(start_pfn + i); - ret = (*online_page_callback)(page); - if (!ret) - onlined_pages++; - } + __free_pages_memory(start_pfn, nr_pages, arg); online_mem_sections(start_pfn, start_pfn + nr_pages); - *(unsigned long *)arg = onlined_pages; return 0; } diff --git a/mm/migrate.c b/mm/migrate.c index 0ff14c3825d38e4cf774e2be6735613291d7d514..c2a0f3fb1f87c355baa73304e6e87fb11b1fad12 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -1388,6 +1388,8 @@ int migrate_pages(struct list_head *from, new_page_t get_new_page, int swapwrite = current->flags & PF_SWAPWRITE; int rc; + trace_mm_migrate_pages_start(mode, reason); + if (!swapwrite) current->flags |= PF_SWAPWRITE; diff --git a/mm/page_alloc.c b/mm/page_alloc.c index c4db6573db789f2e071bdc04e4062ddd6b8d6574..038478d375c3afb68acf9efeead6c3742a97c788 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -1776,9 +1776,9 @@ inline void post_alloc_hook(struct page *page, unsigned int order, set_page_refcounted(page); arch_alloc_page(page, order); + kasan_alloc_pages(page, order); kernel_map_pages(page, 1 << order, 1); kernel_poison_pages(page, 1 << order, 1); - kasan_alloc_pages(page, order); set_page_owner(page, order, gfp_flags); } @@ -3870,8 +3870,7 @@ should_reclaim_retry(gfp_t gfp_mask, unsigned order, * their order will become available due to high fragmentation so * always increment the no progress counter for them */ - if ((did_some_progress && order <= PAGE_ALLOC_COSTLY_ORDER) || - IS_ENABLED(CONFIG_HAVE_LOW_MEMORY_KILLER)) + if (did_some_progress && order <= PAGE_ALLOC_COSTLY_ORDER) *no_progress_loops = 0; else (*no_progress_loops)++; @@ -4172,8 +4171,7 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, * implementation of the compaction depends on the sufficient amount * of free memory (see __compaction_suitable) */ - if ((did_some_progress > 0 || - IS_ENABLED(CONFIG_HAVE_LOW_MEMORY_KILLER)) && + if ((did_some_progress > 0 || lmk_kill_possible()) && should_compact_retry(ac, order, alloc_flags, compact_result, &compact_priority, &compaction_retries)) diff --git a/mm/slub.c b/mm/slub.c index fbfe6393be1b3c3e62e0abf5da1cca523bd42585..18f5352f5e5c96dd63ff8f9400525cd75f92588f 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -718,7 +718,7 @@ void object_err(struct kmem_cache *s, struct page *page, slab_panic(reason); } -static void slab_err(struct kmem_cache *s, struct page *page, +static __printf(3, 4) void slab_err(struct kmem_cache *s, struct page *page, const char *fmt, ...) { va_list args; @@ -1690,6 +1690,7 @@ static void __free_slab(struct kmem_cache *s, struct page *page) if (current->reclaim_state) current->reclaim_state->reclaimed_slab += pages; memcg_uncharge_slab(page, order, s); + kasan_alloc_pages(page, order); __free_pages(page, order); } @@ -3903,6 +3904,7 @@ void kfree(const void *x) if (unlikely(!PageSlab(page))) { BUG_ON(!PageCompound(page)); kfree_hook(object); + kasan_alloc_pages(page, compound_order(page)); __free_pages(page, compound_order(page)); return; } diff --git a/mm/vmalloc.c b/mm/vmalloc.c index ebff729cc9562709500353f30121f2c423296cc8..9ff21a12ea009d1b535d1e60eb20bdd2fa795e50 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -1519,7 +1519,7 @@ static void __vunmap(const void *addr, int deallocate_pages) addr)) return; - area = remove_vm_area(addr); + area = find_vmap_area((unsigned long)addr)->vm; if (unlikely(!area)) { WARN(1, KERN_ERR "Trying to vfree() nonexistent vm area (%p)\n", addr); @@ -1529,6 +1529,7 @@ static void __vunmap(const void *addr, int deallocate_pages) debug_check_no_locks_freed(addr, get_vm_area_size(area)); debug_check_no_obj_freed(addr, get_vm_area_size(area)); + remove_vm_area(addr); if (deallocate_pages) { int i; diff --git a/mm/vmscan.c b/mm/vmscan.c index 4ce5b4d8778ff6bf66afa4bce9ad5e33c6fcb560..3fcb57226805c5bfe801c32a3432f0e0456a621b 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -220,8 +220,7 @@ unsigned long zone_reclaimable_pages(struct zone *zone) nr = zone_page_state_snapshot(zone, NR_ZONE_INACTIVE_FILE) + zone_page_state_snapshot(zone, NR_ZONE_ACTIVE_FILE); - if (get_nr_swap_pages() > 0 - || IS_ENABLED(CONFIG_HAVE_LOW_MEMORY_KILLER)) + if (get_nr_swap_pages() > 0 || lmk_kill_possible()) nr += zone_page_state_snapshot(zone, NR_ZONE_INACTIVE_ANON) + zone_page_state_snapshot(zone, NR_ZONE_ACTIVE_ANON); @@ -332,6 +331,10 @@ static unsigned long do_shrink_slab(struct shrink_control *shrinkctl, long batch_size = shrinker->batch ? shrinker->batch : SHRINK_BATCH; long scanned = 0, next_deferred; + long min_cache_size = batch_size; + + if (current_is_kswapd()) + min_cache_size = 0; freeable = shrinker->count_objects(shrinker, shrinkctl); if (freeable == 0) @@ -399,7 +402,7 @@ static unsigned long do_shrink_slab(struct shrink_control *shrinkctl, * scanning at high prio and therefore should try to reclaim as much as * possible. */ - while (total_scan >= batch_size || + while (total_scan > min_cache_size || total_scan >= freeable) { unsigned long ret; unsigned long nr_to_scan = min(batch_size, total_scan); @@ -2367,7 +2370,8 @@ static void get_scan_count(struct lruvec *lruvec, struct mem_cgroup *memcg, * lruvec even if it has plenty of old anonymous pages unless the * system is under heavy pressure. */ - if (!inactive_list_is_low(lruvec, true, memcg, sc, false) && + if (!IS_ENABLED(CONFIG_BALANCE_ANON_FILE_RECLAIM) && + !inactive_list_is_low(lruvec, true, memcg, sc, false) && lruvec_lru_size(lruvec, LRU_INACTIVE_FILE, sc->reclaim_idx) >> sc->priority) { scan_balance = SCAN_FILE; goto out; diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c index cf2e70003a534fa87cdf6bbc34ed29e9e0be0f4e..cf82d970b0e48c7ab7d537e83d1a889dc3d05cd7 100644 --- a/net/8021q/vlan.c +++ b/net/8021q/vlan.c @@ -664,7 +664,7 @@ static struct sk_buff **vlan_gro_receive(struct sk_buff **head, out_unlock: rcu_read_unlock(); out: - NAPI_GRO_CB(skb)->flush |= flush; + skb_gro_flush_final(skb, pp, flush); return pp; } diff --git a/net/atm/br2684.c b/net/atm/br2684.c index 4e111196f90216488de317efc8a5a792e75603a0..bc21f8e8daf28e22cc2a3226c97ae1341923516b 100644 --- a/net/atm/br2684.c +++ b/net/atm/br2684.c @@ -252,8 +252,7 @@ static int br2684_xmit_vcc(struct sk_buff *skb, struct net_device *dev, ATM_SKB(skb)->vcc = atmvcc = brvcc->atmvcc; pr_debug("atm_skb(%p)->vcc(%p)->dev(%p)\n", skb, atmvcc, atmvcc->dev); - refcount_add(skb->truesize, &sk_atm(atmvcc)->sk_wmem_alloc); - ATM_SKB(skb)->atm_options = atmvcc->atm_options; + atm_account_tx(atmvcc, skb); dev->stats.tx_packets++; dev->stats.tx_bytes += skb->len; diff --git a/net/atm/clip.c b/net/atm/clip.c index 65f706e4344c39f47dc27c4f2b12d742c1dd0547..60920a42f64057d8bb8de21fa54c88347c108ce7 100644 --- a/net/atm/clip.c +++ b/net/atm/clip.c @@ -381,8 +381,7 @@ static netdev_tx_t clip_start_xmit(struct sk_buff *skb, memcpy(here, llc_oui, sizeof(llc_oui)); ((__be16 *) here)[3] = skb->protocol; } - refcount_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc); - ATM_SKB(skb)->atm_options = vcc->atm_options; + atm_account_tx(vcc, skb); entry->vccs->last_use = jiffies; pr_debug("atm_skb(%p)->vcc(%p)->dev(%p)\n", skb, vcc, vcc->dev); old = xchg(&entry->vccs->xoff, 1); /* assume XOFF ... */ diff --git a/net/atm/common.c b/net/atm/common.c index 8a4f99114cd2b5c2a80fa964b00f52a10160b36f..9e812c782a372d03e30ef5b97f4ef2c69cfa9100 100644 --- a/net/atm/common.c +++ b/net/atm/common.c @@ -630,10 +630,9 @@ int vcc_sendmsg(struct socket *sock, struct msghdr *m, size_t size) goto out; } pr_debug("%d += %d\n", sk_wmem_alloc_get(sk), skb->truesize); - refcount_add(skb->truesize, &sk->sk_wmem_alloc); + atm_account_tx(vcc, skb); skb->dev = NULL; /* for paths shared with net_device interfaces */ - ATM_SKB(skb)->atm_options = vcc->atm_options; if (!copy_from_iter_full(skb_put(skb, size), size, &m->msg_iter)) { kfree_skb(skb); error = -EFAULT; diff --git a/net/atm/lec.c b/net/atm/lec.c index 5741b6474dd996188b5eb64100bbc7be47e3235c..9f2365694ad4a4e76dec293fdcfed6705338f6bc 100644 --- a/net/atm/lec.c +++ b/net/atm/lec.c @@ -182,9 +182,8 @@ lec_send(struct atm_vcc *vcc, struct sk_buff *skb) struct net_device *dev = skb->dev; ATM_SKB(skb)->vcc = vcc; - ATM_SKB(skb)->atm_options = vcc->atm_options; + atm_account_tx(vcc, skb); - refcount_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc); if (vcc->send(vcc, skb) < 0) { dev->stats.tx_dropped++; return; diff --git a/net/atm/mpc.c b/net/atm/mpc.c index 5677147209e8181ce2e9eff308bc0ed82b548e45..db9a1838687ce1dcc62d1cdd7ab2d5d0b0efa2e2 100644 --- a/net/atm/mpc.c +++ b/net/atm/mpc.c @@ -555,8 +555,7 @@ static int send_via_shortcut(struct sk_buff *skb, struct mpoa_client *mpc) sizeof(struct llc_snap_hdr)); } - refcount_add(skb->truesize, &sk_atm(entry->shortcut)->sk_wmem_alloc); - ATM_SKB(skb)->atm_options = entry->shortcut->atm_options; + atm_account_tx(entry->shortcut, skb); entry->shortcut->send(entry->shortcut, skb); entry->packets_fwded++; mpc->in_ops->put(entry); diff --git a/net/atm/pppoatm.c b/net/atm/pppoatm.c index 21d9d341a6199255a017437954e4b688f1ba5bfd..af8c4b38b7463e03bf4b060735ce852b515d526c 100644 --- a/net/atm/pppoatm.c +++ b/net/atm/pppoatm.c @@ -350,8 +350,7 @@ static int pppoatm_send(struct ppp_channel *chan, struct sk_buff *skb) return 1; } - refcount_add(skb->truesize, &sk_atm(ATM_SKB(skb)->vcc)->sk_wmem_alloc); - ATM_SKB(skb)->atm_options = ATM_SKB(skb)->vcc->atm_options; + atm_account_tx(vcc, skb); pr_debug("atm_skb(%p)->vcc(%p)->dev(%p)\n", skb, ATM_SKB(skb)->vcc, ATM_SKB(skb)->vcc->dev); ret = ATM_SKB(skb)->vcc->send(ATM_SKB(skb)->vcc, skb) diff --git a/net/atm/raw.c b/net/atm/raw.c index ee10e8d46185173067f459aa5efdf5a77f8f9f06..b3ba44aab0ee6c9425fd278ebf8e2df1590a6d7a 100644 --- a/net/atm/raw.c +++ b/net/atm/raw.c @@ -35,8 +35,8 @@ static void atm_pop_raw(struct atm_vcc *vcc, struct sk_buff *skb) struct sock *sk = sk_atm(vcc); pr_debug("(%d) %d -= %d\n", - vcc->vci, sk_wmem_alloc_get(sk), skb->truesize); - WARN_ON(refcount_sub_and_test(skb->truesize, &sk->sk_wmem_alloc)); + vcc->vci, sk_wmem_alloc_get(sk), ATM_SKB(skb)->acct_truesize); + WARN_ON(refcount_sub_and_test(ATM_SKB(skb)->acct_truesize, &sk->sk_wmem_alloc)); dev_kfree_skb_any(skb); sk->sk_write_space(sk); } diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c index 25738b20676d50d98f573f0c609738d4ee62c103..54c7fe68040f7e060cdd0b1bd56f54da23f630b1 100644 --- a/net/bridge/netfilter/ebtables.c +++ b/net/bridge/netfilter/ebtables.c @@ -398,6 +398,12 @@ ebt_check_watcher(struct ebt_entry_watcher *w, struct xt_tgchk_param *par, watcher = xt_request_find_target(NFPROTO_BRIDGE, w->u.name, 0); if (IS_ERR(watcher)) return PTR_ERR(watcher); + + if (watcher->family != NFPROTO_BRIDGE) { + module_put(watcher->me); + return -ENOENT; + } + w->u.watcher = watcher; par->target = watcher; @@ -719,6 +725,13 @@ ebt_check_entry(struct ebt_entry *e, struct net *net, goto cleanup_watchers; } + /* Reject UNSPEC, xtables verdicts/return values are incompatible */ + if (target->family != NFPROTO_BRIDGE) { + module_put(target->me); + ret = -ENOENT; + goto cleanup_watchers; + } + t->u.target = target; if (t->u.target == &ebt_standard_target) { if (gap < sizeof(struct ebt_standard_target)) { diff --git a/net/core/gen_stats.c b/net/core/gen_stats.c index 87f28557b3298f30b1cf79dccf3bc6b761cd6623..441c04adedba0d21ede168d1e6c09415b5db07d3 100644 --- a/net/core/gen_stats.c +++ b/net/core/gen_stats.c @@ -77,8 +77,20 @@ gnet_stats_start_copy_compat(struct sk_buff *skb, int type, int tc_stats_type, d->lock = lock; spin_lock_bh(lock); } - if (d->tail) - return gnet_stats_copy(d, type, NULL, 0, padattr); + if (d->tail) { + int ret = gnet_stats_copy(d, type, NULL, 0, padattr); + + /* The initial attribute added in gnet_stats_copy() may be + * preceded by a padding attribute, in which case d->tail will + * end up pointing at the padding instead of the real attribute. + * Fix this so gnet_stats_finish_copy() adjusts the length of + * the right attribute. + */ + if (ret == 0 && d->tail->nla_type == padattr) + d->tail = (struct nlattr *)((char *)d->tail + + NLA_ALIGN(d->tail->nla_len)); + return ret; + } return 0; } diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index 4cfdad08aca04d067a67875f97c13510dc456c0b..efe396cc77b5fa8330a5029e09e5d9ceac754551 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@ -2402,9 +2402,12 @@ int rtnl_configure_link(struct net_device *dev, const struct ifinfomsg *ifm) return err; } - dev->rtnl_link_state = RTNL_LINK_INITIALIZED; - - __dev_notify_flags(dev, old_flags, ~0U); + if (dev->rtnl_link_state == RTNL_LINK_INITIALIZED) { + __dev_notify_flags(dev, old_flags, 0U); + } else { + dev->rtnl_link_state = RTNL_LINK_INITIALIZED; + __dev_notify_flags(dev, old_flags, ~0U); + } return 0; } EXPORT_SYMBOL(rtnl_configure_link); diff --git a/net/core/skbuff.c b/net/core/skbuff.c index db973bf72b76ec04e52707d2765452a76347b497..00042186f9426d34e29270d2075fc52d678be741 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -858,6 +858,7 @@ static struct sk_buff *__skb_clone(struct sk_buff *n, struct sk_buff *skb) n->cloned = 1; n->nohdr = 0; n->peeked = 0; + C(pfmemalloc); n->destructor = NULL; C(tail); C(end); @@ -3678,6 +3679,7 @@ struct sk_buff *skb_segment(struct sk_buff *head_skb, net_warn_ratelimited( "skb_segment: too many frags: %u %u\n", pos, mss); + err = -EINVAL; goto err; } @@ -3716,11 +3718,10 @@ struct sk_buff *skb_segment(struct sk_buff *head_skb, perform_csum_check: if (!csum) { - if (skb_has_shared_frag(nskb)) { - err = __skb_linearize(nskb); - if (err) - goto err; - } + if (skb_has_shared_frag(nskb) && + __skb_linearize(nskb)) + goto err; + if (!nskb->remcsum_offload) nskb->ip_summed = CHECKSUM_NONE; SKB_GSO_CB(nskb)->csum = diff --git a/net/core/sockev_nlmcast.c b/net/core/sockev_nlmcast.c index e095487a7a96f2d3b7aa15c87467a46b95cae55f..b49f77f4154e4a948b60d8a04cba061363d0ca15 100644 --- a/net/core/sockev_nlmcast.c +++ b/net/core/sockev_nlmcast.c @@ -37,7 +37,6 @@ static struct netlink_kernel_cfg nlcfg = { static void _sockev_event(unsigned long event, __u8 *evstr, int buflen) { - memset(evstr, 0, buflen); switch (event) { case SOCKEV_SOCKET: @@ -99,6 +98,7 @@ static int sockev_client_cb(struct notifier_block *nb, NETLINK_CB(skb).dst_group = SKNLGRP_SOCKEV; smsg = nlmsg_data(nlh); + memset(smsg, 0, sizeof(struct sknlsockevmsg)); smsg->pid = current->pid; _sockev_event(event, smsg->event, sizeof(smsg->event)); smsg->skfamily = sk->sk_family; diff --git a/net/dccp/ccids/ccid3.c b/net/dccp/ccids/ccid3.c index 119c04317d48eed4abcb2bbf6071062c8d9784fd..03fcf3ee15346cb450d76f314fd3c137a16ed50f 100644 --- a/net/dccp/ccids/ccid3.c +++ b/net/dccp/ccids/ccid3.c @@ -599,7 +599,7 @@ static void ccid3_hc_rx_send_feedback(struct sock *sk, { struct ccid3_hc_rx_sock *hc = ccid3_hc_rx_sk(sk); struct dccp_sock *dp = dccp_sk(sk); - ktime_t now = ktime_get_real(); + ktime_t now = ktime_get(); s64 delta = 0; switch (fbtype) { @@ -624,15 +624,14 @@ static void ccid3_hc_rx_send_feedback(struct sock *sk, case CCID3_FBACK_PERIODIC: delta = ktime_us_delta(now, hc->rx_tstamp_last_feedback); if (delta <= 0) - DCCP_BUG("delta (%ld) <= 0", (long)delta); - else - hc->rx_x_recv = scaled_div32(hc->rx_bytes_recv, delta); + delta = 1; + hc->rx_x_recv = scaled_div32(hc->rx_bytes_recv, delta); break; default: return; } - ccid3_pr_debug("Interval %ldusec, X_recv=%u, 1/p=%u\n", (long)delta, + ccid3_pr_debug("Interval %lldusec, X_recv=%u, 1/p=%u\n", delta, hc->rx_x_recv, hc->rx_pinv); hc->rx_tstamp_last_feedback = now; @@ -679,7 +678,8 @@ static int ccid3_hc_rx_insert_options(struct sock *sk, struct sk_buff *skb) static u32 ccid3_first_li(struct sock *sk) { struct ccid3_hc_rx_sock *hc = ccid3_hc_rx_sk(sk); - u32 x_recv, p, delta; + u32 x_recv, p; + s64 delta; u64 fval; if (hc->rx_rtt == 0) { @@ -687,7 +687,9 @@ static u32 ccid3_first_li(struct sock *sk) hc->rx_rtt = DCCP_FALLBACK_RTT; } - delta = ktime_to_us(net_timedelta(hc->rx_tstamp_last_feedback)); + delta = ktime_us_delta(ktime_get(), hc->rx_tstamp_last_feedback); + if (delta <= 0) + delta = 1; x_recv = scaled_div32(hc->rx_bytes_recv, delta); if (x_recv == 0) { /* would also trigger divide-by-zero */ DCCP_WARN("X_recv==0\n"); diff --git a/net/dns_resolver/dns_key.c b/net/dns_resolver/dns_key.c index f0252768ecf4c39219a58626198f58e5278031ad..5f5d9eafccf5965b3deb23b2449a1392e07e6c61 100644 --- a/net/dns_resolver/dns_key.c +++ b/net/dns_resolver/dns_key.c @@ -87,35 +87,39 @@ dns_resolver_preparse(struct key_preparsed_payload *prep) opt++; kdebug("options: '%s'", opt); do { + int opt_len, opt_nlen; const char *eq; - int opt_len, opt_nlen, opt_vlen, tmp; + char optval[128]; next_opt = memchr(opt, '#', end - opt) ?: end; opt_len = next_opt - opt; - if (opt_len <= 0 || opt_len > 128) { + if (opt_len <= 0 || opt_len > sizeof(optval)) { pr_warn_ratelimited("Invalid option length (%d) for dns_resolver key\n", opt_len); return -EINVAL; } - eq = memchr(opt, '=', opt_len) ?: end; - opt_nlen = eq - opt; - eq++; - opt_vlen = next_opt - eq; /* will be -1 if no value */ + eq = memchr(opt, '=', opt_len); + if (eq) { + opt_nlen = eq - opt; + eq++; + memcpy(optval, eq, next_opt - eq); + optval[next_opt - eq] = '\0'; + } else { + opt_nlen = opt_len; + optval[0] = '\0'; + } - tmp = opt_vlen >= 0 ? opt_vlen : 0; - kdebug("option '%*.*s' val '%*.*s'", - opt_nlen, opt_nlen, opt, tmp, tmp, eq); + kdebug("option '%*.*s' val '%s'", + opt_nlen, opt_nlen, opt, optval); /* see if it's an error number representing a DNS error * that's to be recorded as the result in this key */ if (opt_nlen == sizeof(DNS_ERRORNO_OPTION) - 1 && memcmp(opt, DNS_ERRORNO_OPTION, opt_nlen) == 0) { kdebug("dns error number option"); - if (opt_vlen <= 0) - goto bad_option_value; - ret = kstrtoul(eq, 10, &derrno); + ret = kstrtoul(optval, 10, &derrno); if (ret < 0) goto bad_option_value; diff --git a/net/dsa/slave.c b/net/dsa/slave.c index 865e29e62bad87e7fede6599ace0645ebbd2d196..242e74b9d4540a65da0a2011eb7663e01fc1c1ce 100644 --- a/net/dsa/slave.c +++ b/net/dsa/slave.c @@ -1219,6 +1219,9 @@ int dsa_slave_suspend(struct net_device *slave_dev) { struct dsa_slave_priv *p = netdev_priv(slave_dev); + if (!netif_running(slave_dev)) + return 0; + netif_device_detach(slave_dev); if (p->phy) { @@ -1236,6 +1239,9 @@ int dsa_slave_resume(struct net_device *slave_dev) { struct dsa_slave_priv *p = netdev_priv(slave_dev); + if (!netif_running(slave_dev)) + return 0; + netif_device_attach(slave_dev); if (p->phy) { diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c index df8fd3ce713d757c9ea8f227176cf9e4269366f7..5bbdd05d0cd306b276207c9cd3c3b79d263dc638 100644 --- a/net/ipv4/fib_frontend.c +++ b/net/ipv4/fib_frontend.c @@ -282,18 +282,19 @@ __be32 fib_compute_spec_dst(struct sk_buff *skb) return ip_hdr(skb)->daddr; in_dev = __in_dev_get_rcu(dev); - BUG_ON(!in_dev); net = dev_net(dev); scope = RT_SCOPE_UNIVERSE; if (!ipv4_is_zeronet(ip_hdr(skb)->saddr)) { + bool vmark = in_dev && IN_DEV_SRC_VMARK(in_dev); struct flowi4 fl4 = { .flowi4_iif = LOOPBACK_IFINDEX, + .flowi4_oif = l3mdev_master_ifindex_rcu(dev), .daddr = ip_hdr(skb)->saddr, .flowi4_tos = RT_TOS(ip_hdr(skb)->tos), .flowi4_scope = scope, - .flowi4_mark = IN_DEV_SRC_VMARK(in_dev) ? skb->mark : 0, + .flowi4_mark = vmark ? skb->mark : 0, }; if (!fib_lookup(net, &fl4, &res, 0)) return FIB_RES_PREFSRC(net, res); diff --git a/net/ipv4/fou.c b/net/ipv4/fou.c index 1540db65241a6fd4d96b00546f13a3e3d3cd1815..c9ec1603666bffcfb24597b933a05f53b6d83440 100644 --- a/net/ipv4/fou.c +++ b/net/ipv4/fou.c @@ -448,9 +448,7 @@ static struct sk_buff **gue_gro_receive(struct sock *sk, out_unlock: rcu_read_unlock(); out: - NAPI_GRO_CB(skb)->flush |= flush; - skb_gro_remcsum_cleanup(skb, &grc); - skb->remcsum_offload = 0; + skb_gro_flush_final_remcsum(skb, pp, flush, &grc); return pp; } diff --git a/net/ipv4/gre_offload.c b/net/ipv4/gre_offload.c index 1859c473b21a862b383edebbcf2c1656f9c58b3b..6a7d980105f60514c8180e6333f0a4a53912c3d5 100644 --- a/net/ipv4/gre_offload.c +++ b/net/ipv4/gre_offload.c @@ -223,7 +223,7 @@ static struct sk_buff **gre_gro_receive(struct sk_buff **head, out_unlock: rcu_read_unlock(); out: - NAPI_GRO_CB(skb)->flush |= flush; + skb_gro_flush_final(skb, pp, flush); return pp; } diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c index fbeb35ad804bd4af4f8092ff190bb07224146760..502aae3e3ab8e9c263ab25a0e9dcf9e38d48a5f4 100644 --- a/net/ipv4/igmp.c +++ b/net/ipv4/igmp.c @@ -1201,8 +1201,7 @@ static void igmpv3_del_delrec(struct in_device *in_dev, struct ip_mc_list *im) if (pmc) { im->interface = pmc->interface; im->crcount = in_dev->mr_qrv ?: net->ipv4.sysctl_igmp_qrv; - im->sfmode = pmc->sfmode; - if (pmc->sfmode == MCAST_INCLUDE) { + if (im->sfmode == MCAST_INCLUDE) { im->tomb = pmc->tomb; im->sources = pmc->sources; for (psf = im->sources; psf; psf = psf->sf_next) diff --git a/net/ipv4/inet_fragment.c b/net/ipv4/inet_fragment.c index e691705f0a85a1a43a331063bb4ff571651af759..ba4454ecdf0f64708ae19cfd88d2e22230f60d41 100644 --- a/net/ipv4/inet_fragment.c +++ b/net/ipv4/inet_fragment.c @@ -356,11 +356,6 @@ static struct inet_frag_queue *inet_frag_alloc(struct netns_frags *nf, { struct inet_frag_queue *q; - if (!nf->high_thresh || frag_mem_limit(nf) > nf->high_thresh) { - inet_frag_schedule_worker(f); - return NULL; - } - q = kmem_cache_zalloc(f->frags_cachep, GFP_ATOMIC); if (!q) return NULL; @@ -397,6 +392,11 @@ struct inet_frag_queue *inet_frag_find(struct netns_frags *nf, struct inet_frag_queue *q; int depth = 0; + if (!nf->high_thresh || frag_mem_limit(nf) > nf->high_thresh) { + inet_frag_schedule_worker(f); + return NULL; + } + if (frag_mem_limit(nf) > nf->low_thresh) inet_frag_schedule_worker(f); diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c index e7d15fb0d94d9790675356d3144d204b91eab984..24b066c32e06f62a8041c305f80e7e3c3c691816 100644 --- a/net/ipv4/inet_hashtables.c +++ b/net/ipv4/inet_hashtables.c @@ -188,9 +188,9 @@ static inline int compute_score(struct sock *sk, struct net *net, bool dev_match = (sk->sk_bound_dev_if == dif || sk->sk_bound_dev_if == sdif); - if (exact_dif && !dev_match) + if (!dev_match) return -1; - if (sk->sk_bound_dev_if && dev_match) + if (sk->sk_bound_dev_if) score += 4; } if (sk->sk_incoming_cpu == raw_smp_processor_id()) diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c index df8fe0503de0e5050699be822ce04b383a677aa9..4cb1befc39494fcec05f6a93f949bb6eefc4e0a3 100644 --- a/net/ipv4/ip_fragment.c +++ b/net/ipv4/ip_fragment.c @@ -447,11 +447,16 @@ static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb) int i = end - FRAG_CB(next)->offset; /* overlap is 'i' bytes */ if (i < next->len) { + int delta = -next->truesize; + /* Eat head of the next overlapped fragment * and leave the loop. The next ones cannot overlap. */ if (!pskb_pull(next, i)) goto err; + delta += next->truesize; + if (delta) + add_frag_mem_limit(qp->q.net, delta); FRAG_CB(next)->offset += i; qp->q.meat -= i; if (next->ip_summed != CHECKSUM_UNNECESSARY) diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c index 20a0ca768f7a618a9d52bdeaa8573cc272d88d06..a6b403a2f791d74f655cb9e80f74598b750d91c8 100644 --- a/net/ipv4/ip_output.c +++ b/net/ipv4/ip_output.c @@ -523,6 +523,8 @@ static void ip_copy_metadata(struct sk_buff *to, struct sk_buff *from) to->dev = from->dev; to->mark = from->mark; + skb_copy_hash(to, from); + /* Copy the flags to each fragment. */ IPCB(to)->flags = IPCB(from)->flags; diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c index d07ba4d5917b4559ea1f97a81095904d759dbe50..048d5f6dd320e1f5cbf32438b572b02630387666 100644 --- a/net/ipv4/ip_sockglue.c +++ b/net/ipv4/ip_sockglue.c @@ -148,15 +148,18 @@ static void ip_cmsg_recv_dstaddr(struct msghdr *msg, struct sk_buff *skb) { struct sockaddr_in sin; const struct iphdr *iph = ip_hdr(skb); - __be16 *ports = (__be16 *)skb_transport_header(skb); + __be16 *ports; + int end; - if (skb_transport_offset(skb) + 4 > (int)skb->len) + end = skb_transport_offset(skb) + 4; + if (end > 0 && !pskb_may_pull(skb, end)) return; /* All current transport protocols have the port numbers in the * first four bytes of the transport header and this function is * written with this assumption in mind. */ + ports = (__be16 *)skb_transport_header(skb); sin.sin_family = AF_INET; sin.sin_addr.s_addr = iph->daddr; diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c index abdebca848c977dc3c3c7c0a8fd862100373209e..f0782c91514ce28bd62a37740d8cfa0f55895106 100644 --- a/net/ipv4/ipconfig.c +++ b/net/ipv4/ipconfig.c @@ -781,6 +781,11 @@ static void __init ic_bootp_init_ext(u8 *e) */ static inline void __init ic_bootp_init(void) { + /* Re-initialise all name servers to NONE, in case any were set via the + * "ip=" or "nfsaddrs=" kernel command line parameters: any IP addresses + * specified there will already have been decoded but are no longer + * needed + */ ic_nameservers_predef(); dev_add_pack(&bootp_packet_type); @@ -1402,6 +1407,13 @@ static int __init ip_auto_config(void) int err; unsigned int i; + /* Initialise all name servers to NONE (but only if the "ip=" or + * "nfsaddrs=" kernel command line parameters weren't decoded, otherwise + * we'll overwrite the IP addresses specified there) + */ + if (ic_set_manually == 0) + ic_nameservers_predef(); + #ifdef CONFIG_PROC_FS proc_create("pnp", S_IRUGO, init_net.proc_net, &pnp_seq_fops); #endif /* CONFIG_PROC_FS */ @@ -1622,6 +1634,7 @@ static int __init ip_auto_config_setup(char *addrs) return 1; } + /* Initialise all name servers to NONE */ ic_nameservers_predef(); /* Parse string for static IP assignment. */ diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c index c5de100e614b26022b3b49a77e08defa7db4776f..2171324cdb5c95667d01a2edb98651c09128f569 100644 --- a/net/ipv4/sysctl_net_ipv4.c +++ b/net/ipv4/sysctl_net_ipv4.c @@ -186,8 +186,9 @@ static int ipv4_ping_group_range(struct ctl_table *table, int write, if (write && ret == 0) { low = make_kgid(user_ns, urange[0]); high = make_kgid(user_ns, urange[1]); - if (!gid_valid(low) || !gid_valid(high) || - (urange[1] < urange[0]) || gid_lt(high, low)) { + if (!gid_valid(low) || !gid_valid(high)) + return -EINVAL; + if (urange[1] < urange[0] || gid_lt(high, low)) { low = make_kgid(&init_user_ns, 1); high = make_kgid(&init_user_ns, 0); } @@ -273,8 +274,9 @@ static int proc_tcp_fastopen_key(struct ctl_table *ctl, int write, { struct ctl_table tbl = { .maxlen = (TCP_FASTOPEN_KEY_LENGTH * 2 + 10) }; struct tcp_fastopen_context *ctxt; - int ret; u32 user_key[4]; /* 16 bytes, matching TCP_FASTOPEN_KEY_LENGTH */ + __le32 key[4]; + int ret, i; tbl.data = kmalloc(tbl.maxlen, GFP_KERNEL); if (!tbl.data) @@ -283,11 +285,14 @@ static int proc_tcp_fastopen_key(struct ctl_table *ctl, int write, rcu_read_lock(); ctxt = rcu_dereference(tcp_fastopen_ctx); if (ctxt) - memcpy(user_key, ctxt->key, TCP_FASTOPEN_KEY_LENGTH); + memcpy(key, ctxt->key, TCP_FASTOPEN_KEY_LENGTH); else - memset(user_key, 0, sizeof(user_key)); + memset(key, 0, sizeof(key)); rcu_read_unlock(); + for (i = 0; i < ARRAY_SIZE(key); i++) + user_key[i] = le32_to_cpu(key[i]); + snprintf(tbl.data, tbl.maxlen, "%08x-%08x-%08x-%08x", user_key[0], user_key[1], user_key[2], user_key[3]); ret = proc_dostring(&tbl, write, buffer, lenp, ppos); @@ -303,12 +308,16 @@ static int proc_tcp_fastopen_key(struct ctl_table *ctl, int write, * first invocation of tcp_fastopen_cookie_gen */ tcp_fastopen_init_key_once(false); - tcp_fastopen_reset_cipher(user_key, TCP_FASTOPEN_KEY_LENGTH); + + for (i = 0; i < ARRAY_SIZE(user_key); i++) + key[i] = cpu_to_le32(user_key[i]); + + tcp_fastopen_reset_cipher(key, TCP_FASTOPEN_KEY_LENGTH); } bad_key: pr_debug("proc FO key set 0x%x-%x-%x-%x <- 0x%s: %u\n", - user_key[0], user_key[1], user_key[2], user_key[3], + user_key[0], user_key[1], user_key[2], user_key[3], (char *)tbl.data, ret); kfree(tbl.data); return ret; diff --git a/net/ipv4/tcp_bbr.c b/net/ipv4/tcp_bbr.c index 9a0b952dd09b5d380a66fe519ba8c230e43044fc..06f247ca9197e3e34728ef688be74d26b465edc7 100644 --- a/net/ipv4/tcp_bbr.c +++ b/net/ipv4/tcp_bbr.c @@ -353,6 +353,10 @@ static u32 bbr_target_cwnd(struct sock *sk, u32 bw, int gain) /* Reduce delayed ACKs by rounding up cwnd to the next even number. */ cwnd = (cwnd + 1) & ~1U; + /* Ensure gain cycling gets inflight above BDP even for small BDPs. */ + if (bbr->mode == BBR_PROBE_BW && gain > BBR_UNIT) + cwnd += 2; + return cwnd; } diff --git a/net/ipv4/tcp_dctcp.c b/net/ipv4/tcp_dctcp.c index 5f5e5936760e65739859d0d8d9717b3204482a43..1a9b88c8cf7263c016277163636eb361f57960d3 100644 --- a/net/ipv4/tcp_dctcp.c +++ b/net/ipv4/tcp_dctcp.c @@ -131,23 +131,14 @@ static void dctcp_ce_state_0_to_1(struct sock *sk) struct dctcp *ca = inet_csk_ca(sk); struct tcp_sock *tp = tcp_sk(sk); - /* State has changed from CE=0 to CE=1 and delayed - * ACK has not sent yet. - */ - if (!ca->ce_state && ca->delayed_ack_reserved) { - u32 tmp_rcv_nxt; - - /* Save current rcv_nxt. */ - tmp_rcv_nxt = tp->rcv_nxt; - - /* Generate previous ack with CE=0. */ - tp->ecn_flags &= ~TCP_ECN_DEMAND_CWR; - tp->rcv_nxt = ca->prior_rcv_nxt; - - tcp_send_ack(sk); - - /* Recover current rcv_nxt. */ - tp->rcv_nxt = tmp_rcv_nxt; + if (!ca->ce_state) { + /* State has changed from CE=0 to CE=1, force an immediate + * ACK to reflect the new CE state. If an ACK was delayed, + * send that first to reflect the prior CE state. + */ + if (inet_csk(sk)->icsk_ack.pending & ICSK_ACK_TIMER) + __tcp_send_ack(sk, ca->prior_rcv_nxt); + tcp_enter_quickack_mode(sk, 1); } ca->prior_rcv_nxt = tp->rcv_nxt; @@ -161,23 +152,14 @@ static void dctcp_ce_state_1_to_0(struct sock *sk) struct dctcp *ca = inet_csk_ca(sk); struct tcp_sock *tp = tcp_sk(sk); - /* State has changed from CE=1 to CE=0 and delayed - * ACK has not sent yet. - */ - if (ca->ce_state && ca->delayed_ack_reserved) { - u32 tmp_rcv_nxt; - - /* Save current rcv_nxt. */ - tmp_rcv_nxt = tp->rcv_nxt; - - /* Generate previous ack with CE=1. */ - tp->ecn_flags |= TCP_ECN_DEMAND_CWR; - tp->rcv_nxt = ca->prior_rcv_nxt; - - tcp_send_ack(sk); - - /* Recover current rcv_nxt. */ - tp->rcv_nxt = tmp_rcv_nxt; + if (ca->ce_state) { + /* State has changed from CE=1 to CE=0, force an immediate + * ACK to reflect the new CE state. If an ACK was delayed, + * send that first to reflect the prior CE state. + */ + if (inet_csk(sk)->icsk_ack.pending & ICSK_ACK_TIMER) + __tcp_send_ack(sk, ca->prior_rcv_nxt); + tcp_enter_quickack_mode(sk, 1); } ca->prior_rcv_nxt = tp->rcv_nxt; diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 59a1fb811c00ee04e6827abbd689fbcc1cc35e39..9466531062039d0242ece30654a6a8ff2277cbf0 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -199,24 +199,27 @@ static void tcp_measure_rcv_mss(struct sock *sk, const struct sk_buff *skb) } } -static void tcp_incr_quickack(struct sock *sk) +static void tcp_incr_quickack(struct sock *sk, unsigned int max_quickacks) { struct inet_connection_sock *icsk = inet_csk(sk); unsigned int quickacks = tcp_sk(sk)->rcv_wnd / (2 * icsk->icsk_ack.rcv_mss); if (quickacks == 0) quickacks = 2; + quickacks = min(quickacks, max_quickacks); if (quickacks > icsk->icsk_ack.quick) - icsk->icsk_ack.quick = min(quickacks, TCP_MAX_QUICKACKS); + icsk->icsk_ack.quick = quickacks; } -static void tcp_enter_quickack_mode(struct sock *sk) +void tcp_enter_quickack_mode(struct sock *sk, unsigned int max_quickacks) { struct inet_connection_sock *icsk = inet_csk(sk); - tcp_incr_quickack(sk); + + tcp_incr_quickack(sk, max_quickacks); icsk->icsk_ack.pingpong = 0; icsk->icsk_ack.ato = TCP_ATO_MIN; } +EXPORT_SYMBOL(tcp_enter_quickack_mode); /* Send ACKs quickly, if "quick" count is not exhausted * and the session is not interactive. @@ -248,8 +251,10 @@ static void tcp_ecn_withdraw_cwr(struct tcp_sock *tp) tp->ecn_flags &= ~TCP_ECN_DEMAND_CWR; } -static void __tcp_ecn_check_ce(struct tcp_sock *tp, const struct sk_buff *skb) +static void __tcp_ecn_check_ce(struct sock *sk, const struct sk_buff *skb) { + struct tcp_sock *tp = tcp_sk(sk); + switch (TCP_SKB_CB(skb)->ip_dsfield & INET_ECN_MASK) { case INET_ECN_NOT_ECT: /* Funny extension: if ECT is not set on a segment, @@ -257,31 +262,31 @@ static void __tcp_ecn_check_ce(struct tcp_sock *tp, const struct sk_buff *skb) * it is probably a retransmit. */ if (tp->ecn_flags & TCP_ECN_SEEN) - tcp_enter_quickack_mode((struct sock *)tp); + tcp_enter_quickack_mode(sk, 2); break; case INET_ECN_CE: - if (tcp_ca_needs_ecn((struct sock *)tp)) - tcp_ca_event((struct sock *)tp, CA_EVENT_ECN_IS_CE); + if (tcp_ca_needs_ecn(sk)) + tcp_ca_event(sk, CA_EVENT_ECN_IS_CE); if (!(tp->ecn_flags & TCP_ECN_DEMAND_CWR)) { /* Better not delay acks, sender can have a very low cwnd */ - tcp_enter_quickack_mode((struct sock *)tp); + tcp_enter_quickack_mode(sk, 2); tp->ecn_flags |= TCP_ECN_DEMAND_CWR; } tp->ecn_flags |= TCP_ECN_SEEN; break; default: - if (tcp_ca_needs_ecn((struct sock *)tp)) - tcp_ca_event((struct sock *)tp, CA_EVENT_ECN_NO_CE); + if (tcp_ca_needs_ecn(sk)) + tcp_ca_event(sk, CA_EVENT_ECN_NO_CE); tp->ecn_flags |= TCP_ECN_SEEN; break; } } -static void tcp_ecn_check_ce(struct tcp_sock *tp, const struct sk_buff *skb) +static void tcp_ecn_check_ce(struct sock *sk, const struct sk_buff *skb) { - if (tp->ecn_flags & TCP_ECN_OK) - __tcp_ecn_check_ce(tp, skb); + if (tcp_sk(sk)->ecn_flags & TCP_ECN_OK) + __tcp_ecn_check_ce(sk, skb); } static void tcp_ecn_rcv_synack(struct tcp_sock *tp, const struct tcphdr *th) @@ -686,7 +691,7 @@ static void tcp_event_data_recv(struct sock *sk, struct sk_buff *skb) /* The _first_ data packet received, initialize * delayed ACK engine. */ - tcp_incr_quickack(sk); + tcp_incr_quickack(sk, TCP_MAX_QUICKACKS); icsk->icsk_ack.ato = TCP_ATO_MIN; } else { int m = now - icsk->icsk_ack.lrcvtime; @@ -702,13 +707,13 @@ static void tcp_event_data_recv(struct sock *sk, struct sk_buff *skb) /* Too long gap. Apparently sender failed to * restart window, so that we send ACKs quickly. */ - tcp_incr_quickack(sk); + tcp_incr_quickack(sk, TCP_MAX_QUICKACKS); sk_mem_reclaim(sk); } } icsk->icsk_ack.lrcvtime = now; - tcp_ecn_check_ce(tp, skb); + tcp_ecn_check_ce(sk, skb); if (skb->len >= 128) tcp_grow_window(sk, skb); @@ -3195,6 +3200,15 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets, if (tcp_is_reno(tp)) { tcp_remove_reno_sacks(sk, pkts_acked); + + /* If any of the cumulatively ACKed segments was + * retransmitted, non-SACK case cannot confirm that + * progress was due to original transmission due to + * lack of TCPCB_SACKED_ACKED bits even if some of + * the packets may have been never retransmitted. + */ + if (flag & FLAG_RETRANS_DATA_ACKED) + flag &= ~FLAG_ORIG_SACK_ACKED; } else { int delta; @@ -4151,7 +4165,7 @@ static void tcp_send_dupack(struct sock *sk, const struct sk_buff *skb) if (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq && before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) { NET_INC_STATS(sock_net(sk), LINUX_MIB_DELAYEDACKLOST); - tcp_enter_quickack_mode(sk); + tcp_enter_quickack_mode(sk, TCP_MAX_QUICKACKS); if (tcp_is_sack(tp) && sysctl_tcp_dsack) { u32 end_seq = TCP_SKB_CB(skb)->end_seq; @@ -4432,7 +4446,7 @@ static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb) u32 seq, end_seq; bool fragstolen; - tcp_ecn_check_ce(tp, skb); + tcp_ecn_check_ce(sk, skb); if (unlikely(tcp_try_rmem_schedule(sk, skb, skb->truesize))) { NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPOFODROP); @@ -4701,7 +4715,7 @@ static void tcp_data_queue(struct sock *sk, struct sk_buff *skb) tcp_dsack_set(sk, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq); out_of_window: - tcp_enter_quickack_mode(sk); + tcp_enter_quickack_mode(sk, TCP_MAX_QUICKACKS); inet_csk_schedule_ack(sk); drop: tcp_drop(sk, skb); @@ -4712,8 +4726,6 @@ static void tcp_data_queue(struct sock *sk, struct sk_buff *skb) if (!before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt + tcp_receive_window(tp))) goto out_of_window; - tcp_enter_quickack_mode(sk); - if (before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) { /* Partial packet, seq < rcv_next < end_seq */ SOCK_DEBUG(sk, "partial packet: rcv_next %X seq %X - %X\n", @@ -5784,7 +5796,7 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb, * to stand against the temptation 8) --ANK */ inet_csk_schedule_ack(sk); - tcp_enter_quickack_mode(sk); + tcp_enter_quickack_mode(sk, TCP_MAX_QUICKACKS); inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK, TCP_DELACK_MAX, TCP_RTO_MAX); diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 9e8a95a555dfd18e8184772cdd322227f64c66a1..635714a9e9c62a38c1ee8885b80ca45caef60d1e 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -175,8 +175,13 @@ static void tcp_event_data_sent(struct tcp_sock *tp, } /* Account for an ACK we sent. */ -static inline void tcp_event_ack_sent(struct sock *sk, unsigned int pkts) +static inline void tcp_event_ack_sent(struct sock *sk, unsigned int pkts, + u32 rcv_nxt) { + struct tcp_sock *tp = tcp_sk(sk); + + if (unlikely(rcv_nxt != tp->rcv_nxt)) + return; /* Special ACK sent by DCTCP to reflect ECN */ tcp_dec_quickack_mode(sk, pkts); inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK); } @@ -984,8 +989,8 @@ static void tcp_internal_pacing(struct sock *sk, const struct sk_buff *skb) * We are working here with either a clone of the original * SKB, or a fresh unique copy made by the retransmit engine. */ -static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it, - gfp_t gfp_mask) +static int __tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, + int clone_it, gfp_t gfp_mask, u32 rcv_nxt) { const struct inet_connection_sock *icsk = inet_csk(sk); struct inet_sock *inet; @@ -1057,7 +1062,7 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it, th->source = inet->inet_sport; th->dest = inet->inet_dport; th->seq = htonl(tcb->seq); - th->ack_seq = htonl(tp->rcv_nxt); + th->ack_seq = htonl(rcv_nxt); *(((__be16 *)th) + 6) = htons(((tcp_header_size >> 2) << 12) | tcb->tcp_flags); @@ -1098,7 +1103,7 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it, icsk->icsk_af_ops->send_check(sk, skb); if (likely(tcb->tcp_flags & TCPHDR_ACK)) - tcp_event_ack_sent(sk, tcp_skb_pcount(skb)); + tcp_event_ack_sent(sk, tcp_skb_pcount(skb), rcv_nxt); if (skb->len != tcp_header_size) { tcp_event_data_sent(tp, sk); @@ -1135,6 +1140,13 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it, return err; } +static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it, + gfp_t gfp_mask) +{ + return __tcp_transmit_skb(sk, skb, clone_it, gfp_mask, + tcp_sk(sk)->rcv_nxt); +} + /* This routine just queues the buffer for sending. * * NOTE: probe0 timer is not checked, do not forget tcp_push_pending_frames, @@ -3551,7 +3563,7 @@ void tcp_send_delayed_ack(struct sock *sk) } /* This routine sends an ack and also updates the window. */ -void tcp_send_ack(struct sock *sk) +void __tcp_send_ack(struct sock *sk, u32 rcv_nxt) { struct sk_buff *buff; @@ -3586,9 +3598,14 @@ void tcp_send_ack(struct sock *sk) skb_set_tcp_pure_ack(buff); /* Send it off, this clears delayed acks for us. */ - tcp_transmit_skb(sk, buff, 0, (__force gfp_t)0); + __tcp_transmit_skb(sk, buff, 0, (__force gfp_t)0, rcv_nxt); +} +EXPORT_SYMBOL_GPL(__tcp_send_ack); + +void tcp_send_ack(struct sock *sk) +{ + __tcp_send_ack(sk, tcp_sk(sk)->rcv_nxt); } -EXPORT_SYMBOL_GPL(tcp_send_ack); /* This routine sends a packet with an out of date sequence * number. It assumes the other end will try to ack it. diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c index c4724f2a2f3132113acd5e68dd9f1117ea4a5be7..8721164375c0f7f43b01a7f7311a875ddefc3232 100644 --- a/net/ipv4/udp_offload.c +++ b/net/ipv4/udp_offload.c @@ -360,7 +360,7 @@ struct sk_buff **udp_gro_receive(struct sk_buff **head, struct sk_buff *skb, out_unlock: rcu_read_unlock(); out: - NAPI_GRO_CB(skb)->flush |= flush; + skb_gro_flush_final(skb, pp, flush); return pp; } EXPORT_SYMBOL(udp_gro_receive); diff --git a/net/ipv6/Kconfig b/net/ipv6/Kconfig index ea71e4b0ab7aea80fc6b564fddeea7a6b01feaeb..2d36fd09729907c6d1a32fde8bef71d757345efe 100644 --- a/net/ipv6/Kconfig +++ b/net/ipv6/Kconfig @@ -109,6 +109,7 @@ config IPV6_MIP6 config IPV6_ILA tristate "IPv6: Identifier Locator Addressing (ILA)" depends on NETFILTER + select DST_CACHE select LWTUNNEL ---help--- Support for IPv6 Identifier Locator Addressing (ILA). diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c index e438c709706a1ff19de7566d6f9df7839b7d6b63..b17210fe3ae3e87fd787d943cc7498d24b0ed625 100644 --- a/net/ipv6/datagram.c +++ b/net/ipv6/datagram.c @@ -708,13 +708,16 @@ void ip6_datagram_recv_specific_ctl(struct sock *sk, struct msghdr *msg, } if (np->rxopt.bits.rxorigdstaddr) { struct sockaddr_in6 sin6; - __be16 *ports = (__be16 *) skb_transport_header(skb); + __be16 *ports; + int end; - if (skb_transport_offset(skb) + 4 <= (int)skb->len) { + end = skb_transport_offset(skb) + 4; + if (end <= 0 || pskb_may_pull(skb, end)) { /* All current transport protocols have the port numbers in the * first four bytes of the transport header and this function is * written with this assumption in mind. */ + ports = (__be16 *)skb_transport_header(skb); sin6.sin6_family = AF_INET6; sin6.sin6_addr = ipv6_hdr(skb)->daddr; diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c index 5acb54405b10b637962a7c59c9badb6b4f4f17bd..c5f2b17b7ee1a70d9acb13931de0c1e4e1a4c96b 100644 --- a/net/ipv6/icmp.c +++ b/net/ipv6/icmp.c @@ -405,9 +405,10 @@ static int icmp6_iif(const struct sk_buff *skb) /* for local traffic to local address, skb dev is the loopback * device. Check if there is a dst attached to the skb and if so - * get the real device index. + * get the real device index. Same is needed for replies to a link + * local address on a device enslaved to an L3 master device */ - if (unlikely(iif == LOOPBACK_IFINDEX)) { + if (unlikely(iif == LOOPBACK_IFINDEX || netif_is_l3_master(skb->dev))) { const struct rt6_info *rt6 = skb_rt6_info(skb); if (rt6) diff --git a/net/ipv6/inet6_hashtables.c b/net/ipv6/inet6_hashtables.c index b01858f5deb1711f24c0c38cba0a3e61d43b390c..6dc93ac2826102b54a6c109fe9953744aaa28062 100644 --- a/net/ipv6/inet6_hashtables.c +++ b/net/ipv6/inet6_hashtables.c @@ -113,9 +113,9 @@ static inline int compute_score(struct sock *sk, struct net *net, bool dev_match = (sk->sk_bound_dev_if == dif || sk->sk_bound_dev_if == sdif); - if (exact_dif && !dev_match) + if (!dev_match) return -1; - if (sk->sk_bound_dev_if && dev_match) + if (sk->sk_bound_dev_if) score++; } if (sk->sk_incoming_cpu == raw_smp_processor_id()) diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index 345e643134bc5680d57de24606fab19e7be90282..48003bc3976fbdbee6fb863d2beda6b2f7820ba6 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c @@ -595,6 +595,8 @@ static void ip6_copy_metadata(struct sk_buff *to, struct sk_buff *from) to->dev = from->dev; to->mark = from->mark; + skb_copy_hash(to, from); + #ifdef CONFIG_NET_SCHED to->tc_index = from->tc_index; #endif diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c index 9a38a2c641facdf54f8e94f3e7e64496ea31304a..6fd913d63835761a43e7cf6e0dd348df44732a9f 100644 --- a/net/ipv6/mcast.c +++ b/net/ipv6/mcast.c @@ -771,8 +771,7 @@ static void mld_del_delrec(struct inet6_dev *idev, struct ifmcaddr6 *im) if (pmc) { im->idev = pmc->idev; im->mca_crcount = idev->mc_qrv; - im->mca_sfmode = pmc->mca_sfmode; - if (pmc->mca_sfmode == MCAST_INCLUDE) { + if (im->mca_sfmode == MCAST_INCLUDE) { im->mca_tomb = pmc->mca_tomb; im->mca_sources = pmc->mca_sources; for (psf = im->mca_sources; psf; psf = psf->sf_next) diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c index d081db125905225c269bf17803bd26d576138a43..528218460bc5973aec0a38f63fd64a35ce2ded84 100644 --- a/net/ipv6/ndisc.c +++ b/net/ipv6/ndisc.c @@ -803,7 +803,7 @@ static void ndisc_recv_ns(struct sk_buff *skb) return; } } - if (ndopts.nd_opts_nonce) + if (ndopts.nd_opts_nonce && ndopts.nd_opts_nonce->nd_opt_len == 1) memcpy(&nonce, (u8 *)(ndopts.nd_opts_nonce + 1), 6); inc = ipv6_addr_is_multicast(daddr); diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c index 64ec23388450c7bd9b24a7d04d4a96b011573ae9..722a9db8c6a7b786500f29cc8bfe732b1e3e23c5 100644 --- a/net/ipv6/netfilter/nf_conntrack_reasm.c +++ b/net/ipv6/netfilter/nf_conntrack_reasm.c @@ -618,6 +618,8 @@ int nf_ct_frag6_gather(struct net *net, struct sk_buff *skb, u32 user) fq->q.meat == fq->q.len && nf_ct_frag6_reasm(fq, skb, dev)) ret = 0; + else + skb_dst_drop(skb); out_unlock: spin_unlock_bh(&fq->q.lock); diff --git a/net/ipv6/seg6_hmac.c b/net/ipv6/seg6_hmac.c index 33fb35cbfac132b1a85cd2c9ce62b4344cbe8afe..558fe8cc6d43858ca828cbd8dc8ea65e63bc6602 100644 --- a/net/ipv6/seg6_hmac.c +++ b/net/ipv6/seg6_hmac.c @@ -373,7 +373,7 @@ static int seg6_hmac_init_algo(void) return -ENOMEM; for_each_possible_cpu(cpu) { - tfm = crypto_alloc_shash(algo->name, 0, GFP_KERNEL); + tfm = crypto_alloc_shash(algo->name, 0, 0); if (IS_ERR(tfm)) return PTR_ERR(tfm); p_tfm = per_cpu_ptr(algo->tfms, cpu); diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index 00b67c2adfbee5c59502c216b26e6b601c945bb1..4d2bdcbe806c07c1a6897e184f0f8c4a83d2a2dc 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c @@ -918,7 +918,8 @@ static void tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb) &tcp_hashinfo, NULL, 0, &ipv6h->saddr, th->source, &ipv6h->daddr, - ntohs(th->source), tcp_v6_iif(skb), + ntohs(th->source), + tcp_v6_iif_l3_slave(skb), tcp_v6_sdif(skb)); if (!sk1) goto out; @@ -1573,7 +1574,8 @@ static int tcp_v6_rcv(struct sk_buff *skb) skb, __tcp_hdrlen(th), &ipv6_hdr(skb)->saddr, th->source, &ipv6_hdr(skb)->daddr, - ntohs(th->dest), tcp_v6_iif(skb), + ntohs(th->dest), + tcp_v6_iif_l3_slave(skb), sdif); if (sk2) { struct inet_timewait_sock *tw = inet_twsk(sk); diff --git a/net/netfilter/ipset/ip_set_hash_gen.h b/net/netfilter/ipset/ip_set_hash_gen.h index 51063d9ed0f75d53be7a59738bf859570cd67d9b..dfd268166e427ebe60cd3927391816d9fe7a19cc 100644 --- a/net/netfilter/ipset/ip_set_hash_gen.h +++ b/net/netfilter/ipset/ip_set_hash_gen.h @@ -1241,7 +1241,10 @@ IPSET_TOKEN(HTYPE, _create)(struct net *net, struct ip_set *set, pr_debug("Create set %s with family %s\n", set->name, set->family == NFPROTO_IPV4 ? "inet" : "inet6"); -#ifndef IP_SET_PROTO_UNDEF +#ifdef IP_SET_PROTO_UNDEF + if (set->family != NFPROTO_UNSPEC) + return -IPSET_ERR_INVALID_FAMILY; +#else if (!(set->family == NFPROTO_IPV4 || set->family == NFPROTO_IPV6)) return -IPSET_ERR_INVALID_FAMILY; #endif diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index 85b549e84104a1f260dddf7dbf66256021ac3fd6..9a945024a0b6379fc83d7dc6f49a9625314dd1e7 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c @@ -2710,12 +2710,13 @@ static struct nft_set *nf_tables_set_lookup_byid(const struct net *net, u32 id = ntohl(nla_get_be32(nla)); list_for_each_entry(trans, &net->nft.commit_list, list) { - struct nft_set *set = nft_trans_set(trans); + if (trans->msg_type == NFT_MSG_NEWSET) { + struct nft_set *set = nft_trans_set(trans); - if (trans->msg_type == NFT_MSG_NEWSET && - id == nft_trans_set_id(trans) && - nft_active_genmask(set, genmask)) - return set; + if (id == nft_trans_set_id(trans) && + nft_active_genmask(set, genmask)) + return set; + } } return ERR_PTR(-ENOENT); } diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index b3932846f6c4661b5e2f31794ee8fde1ca152218..c67abda5d6391b3a208ab949913a8d2b9df9fc30 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c @@ -63,6 +63,7 @@ #include #include #include +#include #include #include @@ -647,6 +648,7 @@ static int netlink_create(struct net *net, struct socket *sock, int protocol, if (protocol < 0 || protocol >= MAX_LINKS) return -EPROTONOSUPPORT; + protocol = array_index_nospec(protocol, MAX_LINKS); netlink_lock_table(); #ifdef CONFIG_MODULES @@ -977,6 +979,11 @@ static int netlink_bind(struct socket *sock, struct sockaddr *addr, return err; } + if (nlk->ngroups == 0) + groups = 0; + else if (nlk->ngroups < 8*sizeof(groups)) + groups &= (1UL << nlk->ngroups) - 1; + bound = nlk->bound; if (bound) { /* Ensure nlk->portid is up-to-date. */ diff --git a/net/nfc/llcp_commands.c b/net/nfc/llcp_commands.c index 2ceefa183ceed6ba3d06f2aae958104a514f2146..6a196e438b6c03d4c86e0a8a78af1c496a7e599b 100644 --- a/net/nfc/llcp_commands.c +++ b/net/nfc/llcp_commands.c @@ -752,11 +752,14 @@ int nfc_llcp_send_ui_frame(struct nfc_llcp_sock *sock, u8 ssap, u8 dsap, pr_debug("Fragment %zd bytes remaining %zd", frag_len, remaining_len); - pdu = nfc_alloc_send_skb(sock->dev, &sock->sk, MSG_DONTWAIT, + pdu = nfc_alloc_send_skb(sock->dev, &sock->sk, 0, frag_len + LLCP_HEADER_SIZE, &err); if (pdu == NULL) { - pr_err("Could not allocate PDU\n"); - continue; + pr_err("Could not allocate PDU (error=%d)\n", err); + len -= remaining_len; + if (len == 0) + len = err; + break; } pdu = llcp_add_header(pdu, dsap, ssap, LLCP_PDU_UI); diff --git a/net/nsh/nsh.c b/net/nsh/nsh.c index 6df6f58a810388af5a1e69fcba99702894041c08..5647905c88d664a7bc1b418aadd271a47c754662 100644 --- a/net/nsh/nsh.c +++ b/net/nsh/nsh.c @@ -42,7 +42,7 @@ static struct sk_buff *nsh_gso_segment(struct sk_buff *skb, __skb_pull(skb, nsh_len); skb_reset_mac_header(skb); - skb_reset_mac_len(skb); + skb->mac_len = proto == htons(ETH_P_TEB) ? ETH_HLEN : 0; skb->protocol = proto; features &= NETIF_F_SG; diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index 4fe2e34522d6c1512d3235aa7ab8199fc5f8138f..27dafe36f29c0876a383852ccfe32424f1ab50a7 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c @@ -2303,6 +2303,13 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev, if (po->stats.stats1.tp_drops) status |= TP_STATUS_LOSING; } + + if (do_vnet && + virtio_net_hdr_from_skb(skb, h.raw + macoff - + sizeof(struct virtio_net_hdr), + vio_le(), true, 0)) + goto drop_n_account; + po->stats.stats1.tp_packets++; if (copy_skb) { status |= TP_STATUS_COPY; @@ -2310,15 +2317,6 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev, } spin_unlock(&sk->sk_receive_queue.lock); - if (do_vnet) { - if (virtio_net_hdr_from_skb(skb, h.raw + macoff - - sizeof(struct virtio_net_hdr), - vio_le(), true, 0)) { - spin_lock(&sk->sk_receive_queue.lock); - goto drop_n_account; - } - } - skb_copy_bits(skb, 0, h.raw + macoff, snaplen); if (!(ts_status = tpacket_get_timestamp(skb, &ts, po->tp_tstamp))) diff --git a/net/qrtr/qrtr.c b/net/qrtr/qrtr.c index 7d264321e05a6988f8e934e65bceed36ccc44fe7..c956997f96739b0424df49901a77a285ffc5ebb0 100644 --- a/net/qrtr/qrtr.c +++ b/net/qrtr/qrtr.c @@ -18,6 +18,7 @@ #include #include #include +#include #include @@ -38,6 +39,8 @@ #define QRTR_STATE_MULTI -2 #define QRTR_STATE_INIT -1 +#define AID_VENDOR_QRTR KGIDT_INIT(2906) + /** * struct qrtr_hdr_v1 - (I|R)PCrouter packet header version 1 * @version: protocol version @@ -133,6 +136,7 @@ static DEFINE_MUTEX(qrtr_port_lock); * @ep: endpoint * @ref: reference count for node * @nid: node id + * @hello_sent: hello packet sent to endpoint * @qrtr_tx_flow: remote port tx flow control list * @resume_tx: wait until remote port acks control flag * @qrtr_tx_lock: lock for qrtr_tx_flow @@ -146,6 +150,7 @@ struct qrtr_node { struct qrtr_endpoint *ep; struct kref ref; unsigned int nid; + atomic_t hello_sent; struct radix_tree_root qrtr_tx_flow; struct wait_queue_head resume_tx; @@ -447,18 +452,8 @@ static int qrtr_tx_wait(struct qrtr_node *node, struct sockaddr_qrtr *to, timeo); if (ret < 0) return ret; - if (!ret) { - waiter = kzalloc(sizeof(*waiter), GFP_KERNEL); - if (!waiter) - return -ENOMEM; - waiter->sk = sk; - sock_hold(sk); - - mutex_lock(&node->qrtr_tx_lock); - list_add_tail(&waiter->node, &flow->waiters); - mutex_unlock(&node->qrtr_tx_lock); + if (!ret) return -EAGAIN; - } if (!node->ep) return -EPIPE; @@ -473,6 +468,17 @@ static int qrtr_tx_wait(struct qrtr_node *node, struct sockaddr_qrtr *to, mutex_unlock(&node->qrtr_tx_lock); } + if (confirm_rx) { + waiter = kzalloc(sizeof(*waiter), GFP_KERNEL); + if (!waiter) + return -ENOMEM; + waiter->sk = sk; + sock_hold(sk); + + mutex_lock(&node->qrtr_tx_lock); + list_add_tail(&waiter->node, &flow->waiters); + mutex_unlock(&node->qrtr_tx_lock); + } return confirm_rx; } @@ -486,6 +492,9 @@ static int qrtr_node_enqueue(struct qrtr_node *node, struct sk_buff *skb, size_t len = skb->len; int rc = -ENODEV; + if (!atomic_read(&node->hello_sent) && type != QRTR_TYPE_HELLO) + return rc; + confirm_rx = qrtr_tx_wait(node, to, skb->sk, type, flags); if (confirm_rx < 0) { kfree_skb(skb); @@ -518,6 +527,9 @@ static int qrtr_node_enqueue(struct qrtr_node *node, struct sk_buff *skb, kfree_skb(skb); mutex_unlock(&node->ep_lock); + if (!rc && type == QRTR_TYPE_HELLO) + atomic_inc(&node->hello_sent); + return rc; } @@ -544,21 +556,31 @@ static struct qrtr_node *qrtr_node_lookup(unsigned int nid) */ static void qrtr_node_assign(struct qrtr_node *node, unsigned int nid) { + struct qrtr_node *tnode = NULL; char name[32] = {0,}; if (nid == QRTR_EP_NID_AUTO) return; + if (nid == node->nid) + return; + + down_read(&qrtr_node_lock); + tnode = radix_tree_lookup(&qrtr_nodes, nid); + up_read(&qrtr_node_lock); + if (tnode) + return; down_write(&qrtr_node_lock); - if (!radix_tree_lookup(&qrtr_nodes, nid)) - radix_tree_insert(&qrtr_nodes, nid, node); + radix_tree_insert(&qrtr_nodes, nid, node); if (node->nid == QRTR_EP_NID_AUTO) node->nid = nid; up_write(&qrtr_node_lock); - snprintf(name, sizeof(name), "qrtr_%d", nid); - node->ilc = ipc_log_context_create(QRTR_LOG_PAGE_CNT, name, 0); + if (!node->ilc) { + snprintf(name, sizeof(name), "qrtr_%d", nid); + node->ilc = ipc_log_context_create(QRTR_LOG_PAGE_CNT, name, 0); + } } /** @@ -743,6 +765,7 @@ int qrtr_endpoint_register(struct qrtr_endpoint *ep, unsigned int nid) skb_queue_head_init(&node->rx_queue); node->nid = QRTR_EP_NID_AUTO; node->ep = ep; + atomic_set(&node->hello_sent, 0); mutex_init(&node->qrtr_tx_lock); INIT_RADIX_TREE(&node->qrtr_tx_flow, GFP_KERNEL); @@ -906,17 +929,18 @@ static int qrtr_port_assign(struct qrtr_sock *ipc, int *port) mutex_lock(&qrtr_port_lock); if (!*port) { - rc = idr_alloc(&qrtr_ports, ipc, - QRTR_MIN_EPH_SOCKET, QRTR_MAX_EPH_SOCKET + 1, - GFP_ATOMIC); + rc = idr_alloc_cyclic(&qrtr_ports, ipc, QRTR_MIN_EPH_SOCKET, + QRTR_MAX_EPH_SOCKET + 1, GFP_ATOMIC); if (rc >= 0) *port = rc; - } else if (*port < QRTR_MIN_EPH_SOCKET && !capable(CAP_NET_ADMIN)) { + } else if (*port < QRTR_MIN_EPH_SOCKET && + !(capable(CAP_NET_ADMIN) || in_egroup_p(AID_VENDOR_QRTR))) { rc = -EACCES; } else if (*port == QRTR_PORT_CTRL) { rc = idr_alloc(&qrtr_ports, ipc, 0, 1, GFP_ATOMIC); } else { - rc = idr_alloc(&qrtr_ports, ipc, *port, *port + 1, GFP_ATOMIC); + rc = idr_alloc_cyclic(&qrtr_ports, ipc, *port, *port + 1, + GFP_ATOMIC); if (rc >= 0) *port = rc; } @@ -1484,8 +1508,7 @@ static int qrtr_addr_doit(struct sk_buff *skb, struct nlmsghdr *nlh, struct ifaddrmsg *ifm; int rc; - if (!netlink_capable(skb, CAP_NET_ADMIN) && - !netlink_capable(skb, CAP_NET_BIND_SERVICE)) + if (!netlink_capable(skb, CAP_NET_ADMIN)) return -EPERM; ASSERT_RTNL(); diff --git a/net/rds/loop.c b/net/rds/loop.c index f2bf78de5688a3ee44862fe158bffee4ca94d91a..dac6218a460ed4d4a5b7b03ad4f6056a68784a16 100644 --- a/net/rds/loop.c +++ b/net/rds/loop.c @@ -193,4 +193,5 @@ struct rds_transport rds_loop_transport = { .inc_copy_to_user = rds_message_inc_copy_to_user, .inc_free = rds_loop_inc_free, .t_name = "loopback", + .t_type = RDS_TRANS_LOOP, }; diff --git a/net/rds/rds.h b/net/rds/rds.h index d09f6c1facb4a83c5d47340e5ae7134230089388..f685d8b514e553ceaba724918489717cba46bfc2 100644 --- a/net/rds/rds.h +++ b/net/rds/rds.h @@ -454,6 +454,11 @@ struct rds_notifier { int n_status; }; +/* Available as part of RDS core, so doesn't need to participate + * in get_preferred transport etc + */ +#define RDS_TRANS_LOOP 3 + /** * struct rds_transport - transport specific behavioural hooks * diff --git a/net/rds/recv.c b/net/rds/recv.c index 555f07ccf0dc60d480adb0495091a748dca9b3ba..c27cceae52e1f118ef9c5a4b19831d7654327f96 100644 --- a/net/rds/recv.c +++ b/net/rds/recv.c @@ -103,6 +103,11 @@ static void rds_recv_rcvbuf_delta(struct rds_sock *rs, struct sock *sk, rds_stats_add(s_recv_bytes_added_to_socket, delta); else rds_stats_add(s_recv_bytes_removed_from_socket, -delta); + + /* loop transport doesn't send/recv congestion updates */ + if (rs->rs_transport->t_type == RDS_TRANS_LOOP) + return; + now_congested = rs->rs_rcv_bytes > rds_sk_rcvbuf(rs); rdsdebug("rs %p (%pI4:%u) recv bytes %d buf %d " diff --git a/net/rxrpc/call_accept.c b/net/rxrpc/call_accept.c index 3028298ca56134e86b1ef60c9987b37490e12f19..62b1581d44a5af8c66a5c3cdeef2b7fb631558a1 100644 --- a/net/rxrpc/call_accept.c +++ b/net/rxrpc/call_accept.c @@ -115,9 +115,9 @@ static int rxrpc_service_prealloc_one(struct rxrpc_sock *rx, while (*pp) { parent = *pp; xcall = rb_entry(parent, struct rxrpc_call, sock_node); - if (user_call_ID < call->user_call_ID) + if (user_call_ID < xcall->user_call_ID) pp = &(*pp)->rb_left; - else if (user_call_ID > call->user_call_ID) + else if (user_call_ID > xcall->user_call_ID) pp = &(*pp)->rb_right; else goto id_in_use; diff --git a/net/sched/sch_blackhole.c b/net/sched/sch_blackhole.c index c98a61e980baa68931f7e974582eb1c43ed60cf5..9c4c2bb547d7ea1da26e956a77b23592d467365b 100644 --- a/net/sched/sch_blackhole.c +++ b/net/sched/sch_blackhole.c @@ -21,7 +21,7 @@ static int blackhole_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free) { qdisc_drop(skb, sch, to_free); - return NET_XMIT_SUCCESS; + return NET_XMIT_SUCCESS | __NET_XMIT_BYPASS; } static struct sk_buff *blackhole_dequeue(struct Qdisc *sch) diff --git a/net/socket.c b/net/socket.c index 84faf10210e8d43dbb5f8aa64d117ae4c6f8cfd3..bc9619ee0b9659001a9066c48fa6b5e4267ebe7d 100644 --- a/net/socket.c +++ b/net/socket.c @@ -91,6 +91,7 @@ #include #include #include +#include #include #include @@ -2468,6 +2469,7 @@ SYSCALL_DEFINE2(socketcall, int, call, unsigned long __user *, args) if (call < 1 || call > SYS_SENDMMSG) return -EINVAL; + call = array_index_nospec(call, SYS_SENDMMSG + 1); len = nargs[call]; if (len > sizeof(a)) diff --git a/net/strparser/strparser.c b/net/strparser/strparser.c index c741365f77daf9e92b6485d212c50ab6be3f6430..a68c754e84ea639bf30e0086732b5138d454ce0f 100644 --- a/net/strparser/strparser.c +++ b/net/strparser/strparser.c @@ -35,7 +35,6 @@ struct _strp_msg { */ struct strp_msg strp; int accum_len; - int early_eaten; }; static inline struct _strp_msg *_strp_msg(struct sk_buff *skb) @@ -115,20 +114,6 @@ static int __strp_recv(read_descriptor_t *desc, struct sk_buff *orig_skb, head = strp->skb_head; if (head) { /* Message already in progress */ - - stm = _strp_msg(head); - if (unlikely(stm->early_eaten)) { - /* Already some number of bytes on the receive sock - * data saved in skb_head, just indicate they - * are consumed. - */ - eaten = orig_len <= stm->early_eaten ? - orig_len : stm->early_eaten; - stm->early_eaten -= eaten; - - return eaten; - } - if (unlikely(orig_offset)) { /* Getting data with a non-zero offset when a message is * in progress is not expected. If it does happen, we @@ -297,9 +282,9 @@ static int __strp_recv(read_descriptor_t *desc, struct sk_buff *orig_skb, } stm->accum_len += cand_len; + eaten += cand_len; strp->need_bytes = stm->strp.full_len - stm->accum_len; - stm->early_eaten = cand_len; STRP_STATS_ADD(strp->stats.bytes, cand_len); desc->count = 0; /* Stop reading socket */ break; diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c index 97b9d4f671ac3a639399dd0e1bfd698672c5d68f..2aaf46599126e55eee01e819f06cf0ff1b61d858 100644 --- a/net/sunrpc/xprtrdma/verbs.c +++ b/net/sunrpc/xprtrdma/verbs.c @@ -270,7 +270,6 @@ rpcrdma_conn_upcall(struct rdma_cm_id *id, struct rdma_cm_event *event) wait_for_completion(&ia->ri_remove_done); ia->ri_id = NULL; - ia->ri_pd = NULL; ia->ri_device = NULL; /* Return 1 to ensure the core destroys the id. */ return 1; @@ -464,7 +463,9 @@ rpcrdma_ia_remove(struct rpcrdma_ia *ia) ia->ri_id->qp = NULL; } ib_free_cq(ep->rep_attr.recv_cq); + ep->rep_attr.recv_cq = NULL; ib_free_cq(ep->rep_attr.send_cq); + ep->rep_attr.send_cq = NULL; /* The ULP is responsible for ensuring all DMA * mappings and MRs are gone. @@ -477,6 +478,8 @@ rpcrdma_ia_remove(struct rpcrdma_ia *ia) rpcrdma_dma_unmap_regbuf(req->rl_recvbuf); } rpcrdma_destroy_mrs(buf); + ib_dealloc_pd(ia->ri_pd); + ia->ri_pd = NULL; /* Allow waiters to continue */ complete(&ia->ri_remove_done); @@ -650,14 +653,16 @@ rpcrdma_ep_destroy(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia) cancel_delayed_work_sync(&ep->rep_connect_worker); - if (ia->ri_id->qp) { + if (ia->ri_id && ia->ri_id->qp) { rpcrdma_ep_disconnect(ep, ia); rdma_destroy_qp(ia->ri_id); ia->ri_id->qp = NULL; } - ib_free_cq(ep->rep_attr.recv_cq); - ib_free_cq(ep->rep_attr.send_cq); + if (ep->rep_attr.recv_cq) + ib_free_cq(ep->rep_attr.recv_cq); + if (ep->rep_attr.send_cq) + ib_free_cq(ep->rep_attr.send_cq); } /* Re-establish a connection after a device removal event. diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c index 3c86614462f6d31c3c54caa7809fc82f92834e5f..fb79caf56d0e8fe4ec0a8baf08b3067c54666315 100644 --- a/net/tls/tls_sw.c +++ b/net/tls/tls_sw.c @@ -135,9 +135,10 @@ static int alloc_sg(struct sock *sk, int len, struct scatterlist *sg, pfrag->offset += use; sge = sg + num_elem - 1; - if (num_elem > first_coalesce && sg_page(sg) == pfrag->page && - sg->offset + sg->length == orig_offset) { - sg->length += use; + + if (num_elem > first_coalesce && sg_page(sge) == pfrag->page && + sge->offset + sge->length == orig_offset) { + sge->length += use; } else { sge++; sg_unmark_end(sge); @@ -449,7 +450,7 @@ int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size) ret = tls_push_record(sk, msg->msg_flags, record_type); if (!ret) continue; - if (ret == -EAGAIN) + if (ret < 0) goto send_end; copied -= try_to_copy; diff --git a/net/vmw_vsock/virtio_transport.c b/net/vmw_vsock/virtio_transport.c index 403d86e80162e7796fd75249b1ae876d1eee1e6a..fdb294441682b0a3249b9270540bf9f6c8d0c0c6 100644 --- a/net/vmw_vsock/virtio_transport.c +++ b/net/vmw_vsock/virtio_transport.c @@ -201,7 +201,7 @@ virtio_transport_send_pkt(struct virtio_vsock_pkt *pkt) return -ENODEV; } - if (le32_to_cpu(pkt->hdr.dst_cid) == vsock->guest_cid) + if (le64_to_cpu(pkt->hdr.dst_cid) == vsock->guest_cid) return virtio_transport_send_pkt_loopback(vsock, pkt); if (pkt->reply) diff --git a/security/integrity/ima/ima_main.c b/security/integrity/ima/ima_main.c index 7e334fd31c154380f83c88fb0b6bd9c73d50efcf..f8553179bdd78f670dfb911c07bb1df47c9c12da 100644 --- a/security/integrity/ima/ima_main.c +++ b/security/integrity/ima/ima_main.c @@ -379,6 +379,7 @@ int ima_read_file(struct file *file, enum kernel_read_file_id read_id) static int read_idmap[READING_MAX_ID] = { [READING_FIRMWARE] = FIRMWARE_CHECK, + [READING_FIRMWARE_PREALLOC_BUFFER] = FIRMWARE_CHECK, [READING_MODULE] = MODULE_CHECK, [READING_KEXEC_IMAGE] = KEXEC_KERNEL_CHECK, [READING_KEXEC_INITRAMFS] = KEXEC_INITRAMFS_CHECK, diff --git a/security/pfe/pfk.c b/security/pfe/pfk.c index 27a2ede619464d05146aedcd91d43720f4062be3..f66577307573c8c8071f8e257cc38575c95f9cdb 100644 --- a/security/pfe/pfk.c +++ b/security/pfe/pfk.c @@ -288,7 +288,7 @@ static int pfk_get_key_for_bio(const struct bio *bio, { const struct inode *inode; enum pfe_type which_pfe; - const struct blk_encryption_key *key; + const struct blk_encryption_key *key = NULL; inode = pfk_bio_get_inode(bio); which_pfe = pfk_get_pfe_type(inode); @@ -305,7 +305,9 @@ static int pfk_get_key_for_bio(const struct bio *bio, * bio is not for an encrypted file. Use ->bi_crypt_key if it was set. * Otherwise, don't encrypt/decrypt the bio. */ +#ifdef CONFIG_DM_DEFAULT_KEY key = bio->bi_crypt_key; +#endif if (!key) { *is_pfe = false; return -EINVAL; @@ -464,13 +466,18 @@ int pfk_load_key_end(const struct bio *bio, bool *is_pfe) */ bool pfk_allow_merge_bio(const struct bio *bio1, const struct bio *bio2) { - const struct blk_encryption_key *key1; - const struct blk_encryption_key *key2; + const struct blk_encryption_key *key1 = NULL; + const struct blk_encryption_key *key2 = NULL; const struct inode *inode1; const struct inode *inode2; enum pfe_type which_pfe1; enum pfe_type which_pfe2; +#ifdef CONFIG_DM_DEFAULT_KEY + key1 = bio1->bi_crypt_key; + key2 = bio2->bi_crypt_key; +#endif + if (!pfk_is_ready()) return false; diff --git a/sound/pci/emu10k1/emupcm.c b/sound/pci/emu10k1/emupcm.c index 2683b971721532acd8867308e57fc4fcd6a331be..56be1630bd3e6cbb52eb321d3ec64b8e9d728c18 100644 --- a/sound/pci/emu10k1/emupcm.c +++ b/sound/pci/emu10k1/emupcm.c @@ -1850,7 +1850,9 @@ int snd_emu10k1_pcm_efx(struct snd_emu10k1 *emu, int device) if (!kctl) return -ENOMEM; kctl->id.device = device; - snd_ctl_add(emu->card, kctl); + err = snd_ctl_add(emu->card, kctl); + if (err < 0) + return err; snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(emu->pci), 64*1024, 64*1024); diff --git a/sound/pci/emu10k1/memory.c b/sound/pci/emu10k1/memory.c index 4f1f69be18651b7c692f9feb812c239e8f911386..8c778fa33031733d9c30c1782c991101e50ed462 100644 --- a/sound/pci/emu10k1/memory.c +++ b/sound/pci/emu10k1/memory.c @@ -237,13 +237,13 @@ search_empty(struct snd_emu10k1 *emu, int size) static int is_valid_page(struct snd_emu10k1 *emu, dma_addr_t addr) { if (addr & ~emu->dma_mask) { - dev_err(emu->card->dev, + dev_err_ratelimited(emu->card->dev, "max memory size is 0x%lx (addr = 0x%lx)!!\n", emu->dma_mask, (unsigned long)addr); return 0; } if (addr & (EMUPAGESIZE-1)) { - dev_err(emu->card->dev, "page is not aligned\n"); + dev_err_ratelimited(emu->card->dev, "page is not aligned\n"); return 0; } return 1; @@ -334,7 +334,7 @@ snd_emu10k1_alloc_pages(struct snd_emu10k1 *emu, struct snd_pcm_substream *subst else addr = snd_pcm_sgbuf_get_addr(substream, ofs); if (! is_valid_page(emu, addr)) { - dev_err(emu->card->dev, + dev_err_ratelimited(emu->card->dev, "emu: failure page = %d\n", idx); mutex_unlock(&hdr->block_mutex); return NULL; diff --git a/sound/pci/fm801.c b/sound/pci/fm801.c index 73a67bc3586bcd55e6e0387093fcda8917e6c2e7..e3fb9c61017c6535e3d8fb4cb4f9694bce2e2bb1 100644 --- a/sound/pci/fm801.c +++ b/sound/pci/fm801.c @@ -1068,11 +1068,19 @@ static int snd_fm801_mixer(struct fm801 *chip) if ((err = snd_ac97_mixer(chip->ac97_bus, &ac97, &chip->ac97_sec)) < 0) return err; } - for (i = 0; i < FM801_CONTROLS; i++) - snd_ctl_add(chip->card, snd_ctl_new1(&snd_fm801_controls[i], chip)); + for (i = 0; i < FM801_CONTROLS; i++) { + err = snd_ctl_add(chip->card, + snd_ctl_new1(&snd_fm801_controls[i], chip)); + if (err < 0) + return err; + } if (chip->multichannel) { - for (i = 0; i < FM801_CONTROLS_MULTI; i++) - snd_ctl_add(chip->card, snd_ctl_new1(&snd_fm801_controls_multi[i], chip)); + for (i = 0; i < FM801_CONTROLS_MULTI; i++) { + err = snd_ctl_add(chip->card, + snd_ctl_new1(&snd_fm801_controls_multi[i], chip)); + if (err < 0) + return err; + } } return 0; } diff --git a/sound/pci/hda/patch_ca0132.c b/sound/pci/hda/patch_ca0132.c index 3e73d5c6ccfc95155d79557d0a5292f865afe777..119f3b504765ef994c02917b7b80b10ae4063aa9 100644 --- a/sound/pci/hda/patch_ca0132.c +++ b/sound/pci/hda/patch_ca0132.c @@ -38,6 +38,10 @@ /* Enable this to see controls for tuning purpose. */ /*#define ENABLE_TUNING_CONTROLS*/ +#ifdef ENABLE_TUNING_CONTROLS +#include +#endif + #define FLOAT_ZERO 0x00000000 #define FLOAT_ONE 0x3f800000 #define FLOAT_TWO 0x40000000 @@ -3067,8 +3071,8 @@ static int equalizer_ctl_put(struct snd_kcontrol *kcontrol, return 1; } -static const DECLARE_TLV_DB_SCALE(voice_focus_db_scale, 2000, 100, 0); -static const DECLARE_TLV_DB_SCALE(eq_db_scale, -2400, 100, 0); +static const SNDRV_CTL_TLVD_DECLARE_DB_SCALE(voice_focus_db_scale, 2000, 100, 0); +static const SNDRV_CTL_TLVD_DECLARE_DB_SCALE(eq_db_scale, -2400, 100, 0); static int add_tuning_control(struct hda_codec *codec, hda_nid_t pnid, hda_nid_t nid, diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c index ba9a7e552183392b1d07d535a91bdd9206cea65d..88ce2f1022e1a5d5d8d551e17ee6e44b87545b03 100644 --- a/sound/pci/hda/patch_conexant.c +++ b/sound/pci/hda/patch_conexant.c @@ -965,6 +965,7 @@ static const struct snd_pci_quirk cxt5066_fixups[] = { SND_PCI_QUIRK(0x103c, 0x8115, "HP Z1 Gen3", CXT_FIXUP_HP_GATE_MIC), SND_PCI_QUIRK(0x103c, 0x814f, "HP ZBook 15u G3", CXT_FIXUP_MUTE_LED_GPIO), SND_PCI_QUIRK(0x103c, 0x822e, "HP ProBook 440 G4", CXT_FIXUP_MUTE_LED_GPIO), + SND_PCI_QUIRK(0x103c, 0x836e, "HP ProBook 455 G5", CXT_FIXUP_MUTE_LED_GPIO), SND_PCI_QUIRK(0x103c, 0x8299, "HP 800 G3 SFF", CXT_FIXUP_HP_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x103c, 0x829a, "HP 800 G3 DM", CXT_FIXUP_HP_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x103c, 0x8455, "HP Z2 G4", CXT_FIXUP_HP_MIC_NO_PRESENCE), diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index bf7737fc3b28fefdf5389012601953f170141348..dcc9e6551b51eac7eb4e3821e458ce37deacf91b 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c @@ -6402,6 +6402,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x10cf, 0x1629, "Lifebook U7x7", ALC255_FIXUP_LIFEBOOK_U7x7_HEADSET_MIC), SND_PCI_QUIRK(0x10cf, 0x1845, "Lifebook U904", ALC269_FIXUP_LIFEBOOK_EXTMIC), SND_PCI_QUIRK(0x10ec, 0x10f2, "Intel Reference board", ALC700_FIXUP_INTEL_REFERENCE), + SND_PCI_QUIRK(0x10f7, 0x8338, "Panasonic CF-SZ6", ALC269_FIXUP_HEADSET_MODE), SND_PCI_QUIRK(0x144d, 0xc109, "Samsung Ativ book 9 (NP900X3G)", ALC269_FIXUP_INV_DMIC), SND_PCI_QUIRK(0x144d, 0xc740, "Samsung Ativ book 8 (NP870Z5G)", ALC269_FIXUP_ATIV_BOOK_8), SND_PCI_QUIRK(0x1458, 0xfa53, "Gigabyte BXBT-2807", ALC283_FIXUP_HEADSET_MIC), diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c index 6383b95416ee87af02698354d234754b7dab9ad0..174da18a0e454389ee23f67e3681968e4dd700b3 100644 --- a/sound/soc/soc-pcm.c +++ b/sound/soc/soc-pcm.c @@ -1896,8 +1896,10 @@ int dpcm_be_dai_shutdown(struct snd_soc_pcm_runtime *fe, int stream) continue; if ((be->dpcm[stream].state != SND_SOC_DPCM_STATE_HW_FREE) && - (be->dpcm[stream].state != SND_SOC_DPCM_STATE_OPEN)) - continue; + (be->dpcm[stream].state != SND_SOC_DPCM_STATE_OPEN)) { + soc_pcm_hw_free(be_substream); + be->dpcm[stream].state = SND_SOC_DPCM_STATE_HW_FREE; + } dev_dbg(be->dev, "ASoC: close BE %s\n", be->dai_link->name); diff --git a/sound/soc/soc-topology.c b/sound/soc/soc-topology.c index 30cdad2eab7f2b19d43879a9219f647c79ac415a..c1619860a5de1e964a0045e126b0dcf1ec53020a 100644 --- a/sound/soc/soc-topology.c +++ b/sound/soc/soc-topology.c @@ -1997,6 +1997,13 @@ static void set_link_hw_format(struct snd_soc_dai_link *link, link->dai_fmt = hw_config->fmt & SND_SOC_DAIFMT_FORMAT_MASK; + /* clock gating */ + if (hw_config->clock_gated == SND_SOC_TPLG_DAI_CLK_GATE_GATED) + link->dai_fmt |= SND_SOC_DAIFMT_GATED; + else if (hw_config->clock_gated == + SND_SOC_TPLG_DAI_CLK_GATE_CONT) + link->dai_fmt |= SND_SOC_DAIFMT_CONT; + /* clock signal polarity */ invert_bclk = hw_config->invert_bclk; invert_fsync = hw_config->invert_fsync; @@ -2010,13 +2017,15 @@ static void set_link_hw_format(struct snd_soc_dai_link *link, link->dai_fmt |= SND_SOC_DAIFMT_IB_IF; /* clock masters */ - bclk_master = hw_config->bclk_master; - fsync_master = hw_config->fsync_master; - if (!bclk_master && !fsync_master) + bclk_master = (hw_config->bclk_master == + SND_SOC_TPLG_BCLK_CM); + fsync_master = (hw_config->fsync_master == + SND_SOC_TPLG_FSYNC_CM); + if (bclk_master && fsync_master) link->dai_fmt |= SND_SOC_DAIFMT_CBM_CFM; - else if (bclk_master && !fsync_master) - link->dai_fmt |= SND_SOC_DAIFMT_CBS_CFM; else if (!bclk_master && fsync_master) + link->dai_fmt |= SND_SOC_DAIFMT_CBS_CFM; + else if (bclk_master && !fsync_master) link->dai_fmt |= SND_SOC_DAIFMT_CBM_CFS; else link->dai_fmt |= SND_SOC_DAIFMT_CBS_CFS; diff --git a/sound/usb/pcm.c b/sound/usb/pcm.c index e30ec91cd681054bad6de5f0219b916eeaf15d04..f97f309d94b8a33cb24ddc6d3a91b87d44bf19bd 100644 --- a/sound/usb/pcm.c +++ b/sound/usb/pcm.c @@ -1441,7 +1441,7 @@ static void retire_capture_urb(struct snd_usb_substream *subs, if (bytes % (runtime->sample_bits >> 3) != 0) { int oldbytes = bytes; bytes = frames * stride; - dev_warn(&subs->dev->dev, + dev_warn_ratelimited(&subs->dev->dev, "Corrected urb data len. %d->%d\n", oldbytes, bytes); } diff --git a/sound/usb/usb_audio_qmi_svc.c b/sound/usb/usb_audio_qmi_svc.c index c300d4642a6308df6de78f6a42072cdc90063478..abe3be30cf5bba83baaaeea965981e1a61aede24 100644 --- a/sound/usb/usb_audio_qmi_svc.c +++ b/sound/usb/usb_audio_qmi_svc.c @@ -447,7 +447,7 @@ static int prepare_qmi_response(struct snd_usb_substream *subs, struct uac_format_type_i_discrete_descriptor *fmt_v1; struct uac_format_type_i_ext_descriptor *fmt_v2; struct uac1_as_header_descriptor *as; - int ret = -ENODEV; + int ret; int protocol, card_num, pcm_dev_num; void *hdr_ptr; u8 *xfer_buf; @@ -462,6 +462,7 @@ static int prepare_qmi_response(struct snd_usb_substream *subs, if (!iface) { pr_err("%s: interface # %d does not exist\n", __func__, subs->interface); + ret = -ENODEV; goto err; } @@ -480,12 +481,14 @@ static int prepare_qmi_response(struct snd_usb_substream *subs, if (!fmt) { pr_err("%s: %u:%d : no UAC_FORMAT_TYPE desc\n", __func__, subs->interface, subs->altset_idx); + ret = -ENODEV; goto err; } } if (!uadev[card_num].ctrl_intf) { pr_err("%s: audio ctrl intf info not cached\n", __func__); + ret = -ENODEV; goto err; } @@ -496,6 +499,7 @@ static int prepare_qmi_response(struct snd_usb_substream *subs, NULL, UAC_HEADER); if (!hdr_ptr) { pr_err("%s: no UAC_HEADER desc\n", __func__); + ret = -ENODEV; goto err; } } @@ -506,6 +510,7 @@ static int prepare_qmi_response(struct snd_usb_substream *subs, if (!as) { pr_err("%s: %u:%d : no UAC_AS_GENERAL desc\n", __func__, subs->interface, subs->altset_idx); + ret = -ENODEV; goto err; } resp->data_path_delay = as->bDelay; @@ -552,6 +557,7 @@ static int prepare_qmi_response(struct snd_usb_substream *subs, resp->usb_audio_subslot_size_valid = 1; } else { pr_err("%s: unknown protocol version %x\n", __func__, protocol); + ret = -ENODEV; goto err; } @@ -565,6 +571,7 @@ static int prepare_qmi_response(struct snd_usb_substream *subs, if (!ep) { pr_err("%s: data ep # %d context is null\n", __func__, subs->data_endpoint->ep_num); + ret = -ENODEV; goto err; } data_ep_pipe = subs->data_endpoint->pipe; @@ -574,6 +581,7 @@ static int prepare_qmi_response(struct snd_usb_substream *subs, tr_data_pa = usb_get_xfer_ring_phys_addr(subs->dev, ep, &dma); if (!tr_data_pa) { pr_err("%s:failed to get data ep ring dma address\n", __func__); + ret = -ENODEV; goto err; } @@ -593,6 +601,7 @@ static int prepare_qmi_response(struct snd_usb_substream *subs, if (!tr_sync_pa) { pr_err("%s:failed to get sync ep ring dma address\n", __func__); + ret = -ENODEV; goto err; } resp->xhci_mem_info.tr_sync.pa = dma; @@ -618,17 +627,21 @@ static int prepare_qmi_response(struct snd_usb_substream *subs, ret); goto err; } + xhci_pa = usb_get_sec_event_ring_phys_addr(subs->dev, resp->interrupter_num, &dma); if (!xhci_pa) { pr_err("%s: failed to get sec event ring dma address\n", __func__); + ret = -ENODEV; goto err; } va = uaudio_iommu_map(MEM_EVENT_RING, xhci_pa, PAGE_SIZE, NULL); - if (!va) + if (!va) { + ret = -ENOMEM; goto err; + } resp->xhci_mem_info.evt_ring.va = PREPEND_SID_TO_IOVA(va, uaudio_qdev->sid); @@ -637,15 +650,19 @@ static int prepare_qmi_response(struct snd_usb_substream *subs, uaudio_qdev->er_mapped = true; resp->speed_info = get_speed_info(subs->dev->speed); - if (resp->speed_info == USB_AUDIO_DEVICE_SPEED_INVALID_V01) + if (resp->speed_info == USB_AUDIO_DEVICE_SPEED_INVALID_V01) { + ret = -ENODEV; goto unmap_er; + } resp->speed_info_valid = 1; /* data transfer ring */ va = uaudio_iommu_map(MEM_XFER_RING, tr_data_pa, PAGE_SIZE, NULL); - if (!va) + if (!va) { + ret = -ENOMEM; goto unmap_er; + } tr_data_va = va; resp->xhci_mem_info.tr_data.va = PREPEND_SID_TO_IOVA(va, @@ -658,8 +675,10 @@ static int prepare_qmi_response(struct snd_usb_substream *subs, xhci_pa = resp->xhci_mem_info.tr_sync.pa; va = uaudio_iommu_map(MEM_XFER_RING, tr_sync_pa, PAGE_SIZE, NULL); - if (!va) + if (!va) { + ret = -ENOMEM; goto unmap_data; + } tr_sync_va = va; resp->xhci_mem_info.tr_sync.va = PREPEND_SID_TO_IOVA(va, @@ -683,14 +702,18 @@ static int prepare_qmi_response(struct snd_usb_substream *subs, } xfer_buf = usb_alloc_coherent(subs->dev, len, GFP_KERNEL, &xfer_buf_pa); - if (!xfer_buf) + if (!xfer_buf) { + ret = -ENOMEM; goto unmap_sync; + } dma_get_sgtable(subs->dev->bus->sysdev, &sgt, xfer_buf, xfer_buf_pa, len); va = uaudio_iommu_map(MEM_XFER_BUF, xfer_buf_pa, len, &sgt); - if (!va) + if (!va) { + ret = -ENOMEM; goto unmap_sync; + } resp->xhci_mem_info.xfer_buff.pa = xfer_buf_pa; resp->xhci_mem_info.xfer_buff.size = len; diff --git a/tools/perf/util/parse-events.y b/tools/perf/util/parse-events.y index e81a20ea8d7df1d2c7d8313279daa834eda88415..988310cd3049cba330a16ebc07b67994ce3ba973 100644 --- a/tools/perf/util/parse-events.y +++ b/tools/perf/util/parse-events.y @@ -72,6 +72,7 @@ static void inc_group_count(struct list_head *list, %type value_sym %type event_config %type opt_event_config +%type opt_pmu_config %type event_term %type event_pmu %type event_legacy_symbol @@ -223,7 +224,7 @@ event_def: event_pmu | event_bpf_file event_pmu: -PE_NAME opt_event_config +PE_NAME opt_pmu_config { struct list_head *list, *orig_terms, *terms; @@ -486,6 +487,17 @@ opt_event_config: $$ = NULL; } +opt_pmu_config: +'/' event_config '/' +{ + $$ = $2; +} +| +'/' '/' +{ + $$ = NULL; +} + start_terms: event_config { struct parse_events_state *parse_state = _parse_state; diff --git a/tools/testing/selftests/intel_pstate/run.sh b/tools/testing/selftests/intel_pstate/run.sh index c670359becc6c2d0d6f0ba750a40cc6aad9a9e27..92897880434241fe08c36580997b42a18a4f2546 100755 --- a/tools/testing/selftests/intel_pstate/run.sh +++ b/tools/testing/selftests/intel_pstate/run.sh @@ -30,9 +30,12 @@ EVALUATE_ONLY=0 +# Kselftest framework requirement - SKIP code is 4. +ksft_skip=4 + if ! uname -m | sed -e s/i.86/x86/ -e s/x86_64/x86/ | grep -q x86; then echo "$0 # Skipped: Test can only run on x86 architectures." - exit 0 + exit $ksft_skip fi max_cpus=$(($(nproc)-1)) @@ -48,11 +51,12 @@ function run_test () { echo "sleeping for 5 seconds" sleep 5 - num_freqs=$(cat /proc/cpuinfo | grep MHz | sort -u | wc -l) - if [ $num_freqs -le 2 ]; then - cat /proc/cpuinfo | grep MHz | sort -u | tail -1 > /tmp/result.$1 + grep MHz /proc/cpuinfo | sort -u > /tmp/result.freqs + num_freqs=$(wc -l /tmp/result.freqs | awk ' { print $1 } ') + if [ $num_freqs -ge 2 ]; then + tail -n 1 /tmp/result.freqs > /tmp/result.$1 else - cat /proc/cpuinfo | grep MHz | sort -u > /tmp/result.$1 + cp /tmp/result.freqs /tmp/result.$1 fi ./msr 0 >> /tmp/result.$1 @@ -82,21 +86,20 @@ _max_freq=$(cpupower frequency-info -l | tail -1 | awk ' { print $2 } ') max_freq=$(($_max_freq / 1000)) -for freq in `seq $max_freq -100 $min_freq` +[ $EVALUATE_ONLY -eq 0 ] && for freq in `seq $max_freq -100 $min_freq` do echo "Setting maximum frequency to $freq" cpupower frequency-set -g powersave --max=${freq}MHz >& /dev/null - [ $EVALUATE_ONLY -eq 0 ] && run_test $freq + run_test $freq done -echo "==============================================================================" +[ $EVALUATE_ONLY -eq 0 ] && cpupower frequency-set -g powersave --max=${max_freq}MHz >& /dev/null +echo "==============================================================================" echo "The marketing frequency of the cpu is $mkt_freq MHz" echo "The maximum frequency of the cpu is $max_freq MHz" echo "The minimum frequency of the cpu is $min_freq MHz" -cpupower frequency-set -g powersave --max=${max_freq}MHz >& /dev/null - # make a pretty table echo "Target Actual Difference MSR(0x199) max_perf_pct" for freq in `seq $max_freq -100 $min_freq` @@ -104,10 +107,6 @@ do result_freq=$(cat /tmp/result.${freq} | grep "cpu MHz" | awk ' { print $4 } ' | awk -F "." ' { print $1 } ') msr=$(cat /tmp/result.${freq} | grep "msr" | awk ' { print $3 } ') max_perf_pct=$(cat /tmp/result.${freq} | grep "max_perf_pct" | awk ' { print $2 } ' ) - if [ $result_freq -eq $freq ]; then - echo " $freq $result_freq 0 $msr $(($max_perf_pct*3300))" - else - echo " $freq $result_freq $(($result_freq-$freq)) $msr $(($max_perf_pct*$max_freq))" - fi + echo " $freq $result_freq $(($result_freq-$freq)) $msr $(($max_perf_pct*$max_freq))" done exit 0 diff --git a/tools/testing/selftests/memfd/run_tests.sh b/tools/testing/selftests/memfd/run_tests.sh index daabb350697c6ac83bd06016062ad8e66fc641fe..bf83db61013ab533e10f07688af024649294ee35 100755 --- a/tools/testing/selftests/memfd/run_tests.sh +++ b/tools/testing/selftests/memfd/run_tests.sh @@ -1,6 +1,9 @@ #!/bin/bash # please run as root +# Kselftest framework requirement - SKIP code is 4. +ksft_skip=4 + # # Normal tests requiring no special resources # @@ -29,12 +32,13 @@ if [ -n "$freepgs" ] && [ $freepgs -lt $hpages_test ]; then nr_hugepgs=`cat /proc/sys/vm/nr_hugepages` hpages_needed=`expr $hpages_test - $freepgs` + if [ $UID != 0 ]; then + echo "Please run memfd with hugetlbfs test as root" + exit $ksft_skip + fi + echo 3 > /proc/sys/vm/drop_caches echo $(( $hpages_needed + $nr_hugepgs )) > /proc/sys/vm/nr_hugepages - if [ $? -ne 0 ]; then - echo "Please run this test as root" - exit 1 - fi while read name size unit; do if [ "$name" = "HugePages_Free:" ]; then freepgs=$size @@ -53,7 +57,7 @@ if [ $freepgs -lt $hpages_test ]; then fi printf "Not enough huge pages available (%d < %d)\n" \ $freepgs $needpgs - exit 1 + exit $ksft_skip fi # diff --git a/tools/usb/usbip/libsrc/vhci_driver.c b/tools/usb/usbip/libsrc/vhci_driver.c index d1fc0f9f00fb8cb50242e83e20234fdffd3b610f..ed8c9d360c0f8e80cf65b564728db0bec52af168 100644 --- a/tools/usb/usbip/libsrc/vhci_driver.c +++ b/tools/usb/usbip/libsrc/vhci_driver.c @@ -135,11 +135,11 @@ static int refresh_imported_device_list(void) return 0; } -static int get_nports(void) +static int get_nports(struct udev_device *hc_device) { const char *attr_nports; - attr_nports = udev_device_get_sysattr_value(vhci_driver->hc_device, "nports"); + attr_nports = udev_device_get_sysattr_value(hc_device, "nports"); if (!attr_nports) { err("udev_device_get_sysattr_value nports failed"); return -1; @@ -242,35 +242,41 @@ static int read_record(int rhport, char *host, unsigned long host_len, int usbip_vhci_driver_open(void) { + int nports; + struct udev_device *hc_device; + udev_context = udev_new(); if (!udev_context) { err("udev_new failed"); return -1; } - vhci_driver = calloc(1, sizeof(struct usbip_vhci_driver)); - /* will be freed in usbip_driver_close() */ - vhci_driver->hc_device = + hc_device = udev_device_new_from_subsystem_sysname(udev_context, USBIP_VHCI_BUS_TYPE, USBIP_VHCI_DEVICE_NAME); - if (!vhci_driver->hc_device) { + if (!hc_device) { err("udev_device_new_from_subsystem_sysname failed"); goto err; } - vhci_driver->nports = get_nports(); - dbg("available ports: %d", vhci_driver->nports); - - if (vhci_driver->nports <= 0) { + nports = get_nports(hc_device); + if (nports <= 0) { err("no available ports"); goto err; - } else if (vhci_driver->nports > MAXNPORT) { - err("port number exceeds %d", MAXNPORT); + } + dbg("available ports: %d", nports); + + vhci_driver = calloc(1, sizeof(struct usbip_vhci_driver) + + nports * sizeof(struct usbip_imported_device)); + if (!vhci_driver) { + err("vhci_driver allocation failed"); goto err; } + vhci_driver->nports = nports; + vhci_driver->hc_device = hc_device; vhci_driver->ncontrollers = get_ncontrollers(); dbg("available controllers: %d", vhci_driver->ncontrollers); @@ -285,7 +291,7 @@ int usbip_vhci_driver_open(void) return 0; err: - udev_device_unref(vhci_driver->hc_device); + udev_device_unref(hc_device); if (vhci_driver) free(vhci_driver); diff --git a/tools/usb/usbip/libsrc/vhci_driver.h b/tools/usb/usbip/libsrc/vhci_driver.h index 418b404d5121079837aade8ba692c977e441ef52..6c9aca2167051cd5ee15674092c5aee31e0f895d 100644 --- a/tools/usb/usbip/libsrc/vhci_driver.h +++ b/tools/usb/usbip/libsrc/vhci_driver.h @@ -13,7 +13,6 @@ #define USBIP_VHCI_BUS_TYPE "platform" #define USBIP_VHCI_DEVICE_NAME "vhci_hcd.0" -#define MAXNPORT 128 enum hub_speed { HUB_SPEED_HIGH = 0, @@ -41,7 +40,7 @@ struct usbip_vhci_driver { int ncontrollers; int nports; - struct usbip_imported_device idev[MAXNPORT]; + struct usbip_imported_device idev[]; }; diff --git a/tools/usb/usbip/src/usbip_detach.c b/tools/usb/usbip/src/usbip_detach.c index 9db9d21bb2ecee75c89c027ccb07cfdeb21062de..6a8db858caa5f3d3f908185afdc09b43d6db774e 100644 --- a/tools/usb/usbip/src/usbip_detach.c +++ b/tools/usb/usbip/src/usbip_detach.c @@ -43,7 +43,7 @@ void usbip_detach_usage(void) static int detach_port(char *port) { - int ret; + int ret = 0; uint8_t portnum; char path[PATH_MAX+1]; @@ -73,9 +73,12 @@ static int detach_port(char *port) } ret = usbip_vhci_detach_device(portnum); - if (ret < 0) - return -1; + if (ret < 0) { + ret = -1; + goto call_driver_close; + } +call_driver_close: usbip_vhci_driver_close(); return ret; diff --git a/verity_dev_keys.x509 b/verity_dev_keys.x509 new file mode 100644 index 0000000000000000000000000000000000000000..86399c3c1dd7d5ae8dcee2c0b68b3f12a7237f28 --- /dev/null +++ b/verity_dev_keys.x509 @@ -0,0 +1,24 @@ +-----BEGIN CERTIFICATE----- +MIID/TCCAuWgAwIBAgIJAJcPmDkJqolJMA0GCSqGSIb3DQEBBQUAMIGUMQswCQYD +VQQGEwJVUzETMBEGA1UECAwKQ2FsaWZvcm5pYTEWMBQGA1UEBwwNTW91bnRhaW4g +VmlldzEQMA4GA1UECgwHQW5kcm9pZDEQMA4GA1UECwwHQW5kcm9pZDEQMA4GA1UE +AwwHQW5kcm9pZDEiMCAGCSqGSIb3DQEJARYTYW5kcm9pZEBhbmRyb2lkLmNvbTAe +Fw0xNDExMDYxOTA3NDBaFw00MjAzMjQxOTA3NDBaMIGUMQswCQYDVQQGEwJVUzET +MBEGA1UECAwKQ2FsaWZvcm5pYTEWMBQGA1UEBwwNTW91bnRhaW4gVmlldzEQMA4G +A1UECgwHQW5kcm9pZDEQMA4GA1UECwwHQW5kcm9pZDEQMA4GA1UEAwwHQW5kcm9p +ZDEiMCAGCSqGSIb3DQEJARYTYW5kcm9pZEBhbmRyb2lkLmNvbTCCASIwDQYJKoZI +hvcNAQEBBQADggEPADCCAQoCggEBAOjreE0vTVSRenuzO9vnaWfk0eQzYab0gqpi +6xAzi6dmD+ugoEKJmbPiuE5Dwf21isZ9uhUUu0dQM46dK4ocKxMRrcnmGxydFn6o +fs3ODJMXOkv2gKXL/FdbEPdDbxzdu8z3yk+W67udM/fW7WbaQ3DO0knu+izKak/3 +T41c5uoXmQ81UNtAzRGzGchNVXMmWuTGOkg6U+0I2Td7K8yvUMWhAWPPpKLtVH9r +AL5TzjYNR92izdKcz3AjRsI3CTjtpiVABGeX0TcjRSuZB7K9EK56HV+OFNS6I1NP +jdD7FIShyGlqqZdUOkAUZYanbpgeT5N7QL6uuqcGpoTOkalu6kkCAwEAAaNQME4w +HQYDVR0OBBYEFH5DM/m7oArf4O3peeKO0ZIEkrQPMB8GA1UdIwQYMBaAFH5DM/m7 +oArf4O3peeKO0ZIEkrQPMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEB +AHO3NSvDE5jFvMehGGtS8BnFYdFKRIglDMc4niWSzhzOVYRH4WajxdtBWc5fx0ix +NF/+hVKVhP6AIOQa+++sk+HIi7RvioPPbhjcsVlZe7cUEGrLSSveGouQyc+j0+m6 +JF84kszIl5GGNMTnx0XRPO+g8t6h5LWfnVydgZfpGRRg+WHewk1U2HlvTjIceb0N +dcoJ8WKJAFWdcuE7VIm4w+vF/DYX/A2Oyzr2+QRhmYSv1cusgAeC1tvH4ap+J1Lg +UnOu5Kh/FqPLLSwNVQp4Bu7b9QFfqK8Moj84bj88NqRGZgDyqzuTrFxn6FW7dmyA +yttuAJAEAymk1mipd9+zp38= +-----END CERTIFICATE----- diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c index 9bee849db682d26f1f7969f0ff6b4dd911e1a4f7..d5f1d83645716e37d70136a29644f54ecb353512 100644 --- a/virt/kvm/arm/arm.c +++ b/virt/kvm/arm/arm.c @@ -51,8 +51,8 @@ __asm__(".arch_extension virt"); #endif +DEFINE_PER_CPU(kvm_cpu_context_t, kvm_host_cpu_state); static DEFINE_PER_CPU(unsigned long, kvm_arm_hyp_stack_page); -static kvm_cpu_context_t __percpu *kvm_host_cpu_state; /* Per-CPU variable containing the currently running vcpu. */ static DEFINE_PER_CPU(struct kvm_vcpu *, kvm_arm_running_vcpu); @@ -351,7 +351,7 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) } vcpu->cpu = cpu; - vcpu->arch.host_cpu_context = this_cpu_ptr(kvm_host_cpu_state); + vcpu->arch.host_cpu_context = this_cpu_ptr(&kvm_host_cpu_state); kvm_arm_set_running_vcpu(vcpu); @@ -1259,19 +1259,8 @@ static inline void hyp_cpu_pm_exit(void) } #endif -static void teardown_common_resources(void) -{ - free_percpu(kvm_host_cpu_state); -} - static int init_common_resources(void) { - kvm_host_cpu_state = alloc_percpu(kvm_cpu_context_t); - if (!kvm_host_cpu_state) { - kvm_err("Cannot allocate host CPU state\n"); - return -ENOMEM; - } - /* set size of VMID supported by CPU */ kvm_vmid_bits = kvm_get_vmid_bits(); kvm_info("%d-bit VMID\n", kvm_vmid_bits); @@ -1413,7 +1402,7 @@ static int init_hyp_mode(void) for_each_possible_cpu(cpu) { kvm_cpu_context_t *cpu_ctxt; - cpu_ctxt = per_cpu_ptr(kvm_host_cpu_state, cpu); + cpu_ctxt = per_cpu_ptr(&kvm_host_cpu_state, cpu); err = create_hyp_mappings(cpu_ctxt, cpu_ctxt + 1, PAGE_HYP); if (err) { @@ -1422,6 +1411,10 @@ static int init_hyp_mode(void) } } + err = hyp_map_aux_data(); + if (err) + kvm_err("Cannot map host auxilary data: %d\n", err); + return 0; out_err: @@ -1497,7 +1490,6 @@ int kvm_arch_init(void *opaque) if (!in_hyp_mode) teardown_hyp_mode(); out_err: - teardown_common_resources(); return err; } diff --git a/virt/kvm/arm/hyp/vgic-v2-sr.c b/virt/kvm/arm/hyp/vgic-v2-sr.c index d7fd46fe9efb35ca28a0685b333f8c68a61b2d64..4b4221b0d4ba07ade93319ea9b685ce9ad5a79a7 100644 --- a/virt/kvm/arm/hyp/vgic-v2-sr.c +++ b/virt/kvm/arm/hyp/vgic-v2-sr.c @@ -139,7 +139,7 @@ int __hyp_text __vgic_v2_perform_cpuif_access(struct kvm_vcpu *vcpu) return -1; rd = kvm_vcpu_dabt_get_rd(vcpu); - addr = kern_hyp_va((kern_hyp_va(&kvm_vgic_global_state))->vcpu_base_va); + addr = kern_hyp_va(hyp_symbol_addr(kvm_vgic_global_state)->vcpu_base_va); addr += fault_ipa - vgic->vgic_cpu_base; if (kvm_vcpu_dabt_iswrite(vcpu)) { diff --git a/virt/kvm/arm/psci.c b/virt/kvm/arm/psci.c index c4762bef13c6d389ff0c1e4f656322d66bb167b0..c95ab4c5a47516067737b1612d7545fa9543b3b1 100644 --- a/virt/kvm/arm/psci.c +++ b/virt/kvm/arm/psci.c @@ -405,7 +405,7 @@ static int kvm_psci_call(struct kvm_vcpu *vcpu) int kvm_hvc_call_handler(struct kvm_vcpu *vcpu) { u32 func_id = smccc_get_function(vcpu); - u32 val = PSCI_RET_NOT_SUPPORTED; + u32 val = SMCCC_RET_NOT_SUPPORTED; u32 feature; switch (func_id) { @@ -417,7 +417,21 @@ int kvm_hvc_call_handler(struct kvm_vcpu *vcpu) switch(feature) { case ARM_SMCCC_ARCH_WORKAROUND_1: if (kvm_arm_harden_branch_predictor()) - val = 0; + val = SMCCC_RET_SUCCESS; + break; + case ARM_SMCCC_ARCH_WORKAROUND_2: + switch (kvm_arm_have_ssbd()) { + case KVM_SSBD_FORCE_DISABLE: + case KVM_SSBD_UNKNOWN: + break; + case KVM_SSBD_KERNEL: + val = SMCCC_RET_SUCCESS; + break; + case KVM_SSBD_FORCE_ENABLE: + case KVM_SSBD_MITIGATED: + val = SMCCC_RET_NOT_REQUIRED; + break; + } break; } break; diff --git a/virt/kvm/eventfd.c b/virt/kvm/eventfd.c index f2ac53ab82438f0b473ecd8ed91b1e2548af7ca2..58a9b31b0dd54192e35e1821ba2399d0c780fce5 100644 --- a/virt/kvm/eventfd.c +++ b/virt/kvm/eventfd.c @@ -119,8 +119,12 @@ irqfd_shutdown(struct work_struct *work) { struct kvm_kernel_irqfd *irqfd = container_of(work, struct kvm_kernel_irqfd, shutdown); + struct kvm *kvm = irqfd->kvm; u64 cnt; + /* Make sure irqfd has been initalized in assign path. */ + synchronize_srcu(&kvm->irq_srcu); + /* * Synchronize with the wait-queue and unhook ourselves to prevent * further events. @@ -387,7 +391,6 @@ kvm_irqfd_assign(struct kvm *kvm, struct kvm_irqfd *args) idx = srcu_read_lock(&kvm->irq_srcu); irqfd_update(kvm, irqfd); - srcu_read_unlock(&kvm->irq_srcu, idx); list_add_tail(&irqfd->list, &kvm->irqfds.items); @@ -421,6 +424,7 @@ kvm_irqfd_assign(struct kvm *kvm, struct kvm_irqfd *args) } #endif + srcu_read_unlock(&kvm->irq_srcu, idx); return 0; fail: